Changes in kernel/generic/src/mm/as.c [97bdb4a:c964521] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r97bdb4a rc964521 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 *89 88 */ 90 89 as_operations_t *as_operations = NULL; … … 92 91 /** 93 92 * Slab for as_t objects. 94 *95 93 */ 96 94 static slab_cache_t *as_slab; … … 102 100 * - as->asid for each as of the as_t type 103 101 * - asids_allocated counter 104 *105 102 */ 106 103 SPINLOCK_INITIALIZE(asidlock); … … 109 106 * This list contains address spaces that are not active on any 110 107 * processor and that have valid ASID. 111 *112 108 */ 113 109 LIST_INITIALIZE(inactive_as_with_asid_head); … … 116 112 as_t *AS_KERNEL = NULL; 117 113 118 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 114 static int area_flags_to_page_flags(int); 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 static void sh_info_remove_reference(share_info_t *); 118 119 static int as_constructor(void *obj, int flags) 119 120 { 120 121 as_t *as = (as_t *) obj; 121 122 int rc; 123 122 124 link_initialize(&as->inactive_as_with_asid_link); 123 125 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 126 125 intrc = as_constructor_arch(as, flags);127 rc = as_constructor_arch(as, flags); 126 128 127 129 return rc; 128 130 } 129 131 130 NO_TRACE static size_t as_destructor(void *obj)132 static int as_destructor(void *obj) 131 133 { 132 134 as_t *as = (as_t *) obj; 135 133 136 return as_destructor_arch(as); 134 137 } … … 138 141 { 139 142 as_arch_init(); 140 143 141 144 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 142 145 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 154 157 /** Create address space. 155 158 * 156 * @param flags Flags that influence the way in wich the address 157 * space is created. 158 * 159 */ 160 as_t *as_create(unsigned int flags) 161 { 162 as_t *as = (as_t *) slab_alloc(as_slab, 0); 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 163 167 (void) as_create_arch(as, 0); 164 168 … … 172 176 atomic_set(&as->refcount, 0); 173 177 as->cpu_refcount = 0; 174 175 178 #ifdef AS_PAGE_TABLE 176 179 as->genarch.page_table = page_table_create(flags); … … 189 192 * We know that we don't hold any spinlock. 190 193 * 191 * @param as Address space to be destroyed. 192 * 194 * @param as Address space to be destroyed. 193 195 */ 194 196 void as_destroy(as_t *as) 195 197 { 198 ipl_t ipl; 199 bool cond; 196 200 DEADLOCK_PROBE_INIT(p_asidlock); 197 201 … … 210 214 * disabled to prevent nested context switches. We also depend on the 211 215 * fact that so far no spinlocks are held. 212 *213 216 */ 214 217 preemption_disable(); 215 ipl_t ipl = interrupts_read(); 216 218 ipl = interrupts_read(); 217 219 retry: 218 220 interrupts_disable(); … … 222 224 goto retry; 223 225 } 224 225 /* Interrupts disabled, enable preemption */ 226 preemption_enable(); 227 228 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 229 228 if (as->cpu_refcount == 0) 230 229 list_remove(&as->inactive_as_with_asid_link); 231 232 230 asid_put(as->asid); 233 231 } 234 235 232 spinlock_unlock(&asidlock); 236 interrupts_restore(ipl); 237 238 233 239 234 /* 240 235 * Destroy address space areas of the address space. 241 236 * The B+tree must be walked carefully because it is 242 237 * also being destroyed. 243 * 244 */245 bool cond = true;246 while (cond) { 238 */ 239 for (cond = true; cond; ) { 240 btree_node_t *node; 241 247 242 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 248 249 btree_node_t *node = 250 list_get_instance(as->as_area_btree.leaf_head.next, 243 node = list_get_instance(as->as_area_btree.leaf_head.next, 251 244 btree_node_t, leaf_link); 252 253 if ((cond = node->keys)) 245 246 if ((cond = node->keys)) { 254 247 as_area_destroy(as, node->key[0]); 255 } 256 248 } 249 } 250 257 251 btree_destroy(&as->as_area_btree); 258 259 252 #ifdef AS_PAGE_TABLE 260 253 page_table_destroy(as->genarch.page_table); … … 262 255 page_table_destroy(NULL); 263 256 #endif 264 257 258 interrupts_restore(ipl); 259 265 260 slab_free(as_slab, as); 266 261 } … … 271 266 * space. 272 267 * 273 * @param as Address space to be held. 274 * 275 */ 276 NO_TRACE void as_hold(as_t *as) 268 * @param a Address space to be held. 269 */ 270 void as_hold(as_t *as) 277 271 { 278 272 atomic_inc(&as->refcount); … … 284 278 * space. 285 279 * 286 * @param asAddress space to be released. 287 * 288 */ 289 NO_TRACE void as_release(as_t *as) 280 * @param a Address space to be released. 281 */ 282 void as_release(as_t *as) 290 283 { 291 284 if (atomic_predec(&as->refcount) == 0) … … 293 286 } 294 287 295 /** Check area conflicts with other areas.296 *297 * @param as Address space.298 * @param va Starting virtual address of the area being tested.299 * @param size Size of the area being tested.300 * @param avoid_area Do not touch this area.301 *302 * @return True if there is no conflict, false otherwise.303 *304 */305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size,306 as_area_t *avoid_area)307 {308 ASSERT(mutex_locked(&as->lock));309 310 /*311 * We don't want any area to have conflicts with NULL page.312 *313 */314 if (overlaps(va, size, NULL, PAGE_SIZE))315 return false;316 317 /*318 * The leaf node is found in O(log n), where n is proportional to319 * the number of address space areas belonging to as.320 * The check for conflicts is then attempted on the rightmost321 * record in the left neighbour, the leftmost record in the right322 * neighbour and all records in the leaf node itself.323 *324 */325 btree_node_t *leaf;326 as_area_t *area =327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);328 if (area) {329 if (area != avoid_area)330 return false;331 }332 333 /* First, check the two border cases. */334 btree_node_t *node =335 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf);336 if (node) {337 area = (as_area_t *) node->value[node->keys - 1];338 339 mutex_lock(&area->lock);340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {342 mutex_unlock(&area->lock);343 return false;344 }345 346 mutex_unlock(&area->lock);347 }348 349 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);350 if (node) {351 area = (as_area_t *) node->value[0];352 353 mutex_lock(&area->lock);354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {356 mutex_unlock(&area->lock);357 return false;358 }359 360 mutex_unlock(&area->lock);361 }362 363 /* Second, check the leaf node. */364 btree_key_t i;365 for (i = 0; i < leaf->keys; i++) {366 area = (as_area_t *) leaf->value[i];367 368 if (area == avoid_area)369 continue;370 371 mutex_lock(&area->lock);372 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) {374 mutex_unlock(&area->lock);375 return false;376 }377 378 mutex_unlock(&area->lock);379 }380 381 /*382 * So far, the area does not conflict with other areas.383 * Check if it doesn't conflict with kernel address space.384 *385 */386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) {387 return !overlaps(va, size,388 KERNEL_ADDRESS_SPACE_START,389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START);390 }391 392 return true;393 }394 395 288 /** Create address space area of common attributes. 396 289 * 397 290 * The created address space area is added to the target address space. 398 291 * 399 * @param as Target address space. 400 * @param flags Flags of the area memory. 401 * @param size Size of area. 402 * @param base Base address of area. 403 * @param attrs Attributes of the area. 404 * @param backend Address space area backend. NULL if no backend is used. 405 * @param backend_data NULL or a pointer to an array holding two void *. 406 * 407 * @return Address space area on success or NULL on failure. 408 * 409 */ 410 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 411 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 412 mem_backend_data_t *backend_data) 413 { 292 * @param as Target address space. 293 * @param flags Flags of the area memory. 294 * @param size Size of area. 295 * @param base Base address of area. 296 * @param attrs Attributes of the area. 297 * @param backend Address space area backend. NULL if no backend is used. 298 * @param backend_data NULL or a pointer to an array holding two void *. 299 * 300 * @return Address space area on success or NULL on failure. 301 */ 302 as_area_t * 303 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 304 mem_backend_t *backend, mem_backend_data_t *backend_data) 305 { 306 ipl_t ipl; 307 as_area_t *a; 308 414 309 if (base % PAGE_SIZE) 415 310 return NULL; 416 311 417 312 if (!size) 418 313 return NULL; 419 314 420 315 /* Writeable executable areas are not supported. */ 421 316 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 422 317 return NULL; 423 318 319 ipl = interrupts_disable(); 424 320 mutex_lock(&as->lock); 425 321 426 322 if (!check_area_conflicts(as, base, size, NULL)) { 427 323 mutex_unlock(&as->lock); 324 interrupts_restore(ipl); 428 325 return NULL; 429 326 } 430 327 431 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 432 433 mutex_initialize(&area->lock, MUTEX_PASSIVE); 434 435 area->as = as; 436 area->flags = flags; 437 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->base = base; 440 area->sh_info = NULL; 441 area->backend = backend; 442 328 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 329 330 mutex_initialize(&a->lock, MUTEX_PASSIVE); 331 332 a->as = as; 333 a->flags = flags; 334 a->attributes = attrs; 335 a->pages = SIZE2FRAMES(size); 336 a->base = base; 337 a->sh_info = NULL; 338 a->backend = backend; 443 339 if (backend_data) 444 a rea->backend_data = *backend_data;340 a->backend_data = *backend_data; 445 341 else 446 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 447 448 btree_create(&area->used_space); 449 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 450 342 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 343 344 btree_create(&a->used_space); 345 346 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 347 451 348 mutex_unlock(&as->lock); 452 453 return area; 454 } 455 456 /** Find address space area and lock it. 457 * 458 * @param as Address space. 459 * @param va Virtual address. 460 * 461 * @return Locked address space area containing va on success or 462 * NULL on failure. 463 * 464 */ 465 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 466 { 467 ASSERT(mutex_locked(&as->lock)); 468 469 btree_node_t *leaf; 470 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 471 if (area) { 472 /* va is the base address of an address space area */ 473 mutex_lock(&area->lock); 474 return area; 475 } 476 477 /* 478 * Search the leaf node and the righmost record of its left neighbour 479 * to find out whether this is a miss or va belongs to an address 480 * space area found there. 481 * 482 */ 483 484 /* First, search the leaf node itself. */ 485 btree_key_t i; 486 487 for (i = 0; i < leaf->keys; i++) { 488 area = (as_area_t *) leaf->value[i]; 489 490 mutex_lock(&area->lock); 491 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 return area; 494 495 mutex_unlock(&area->lock); 496 } 497 498 /* 499 * Second, locate the left neighbour and test its last record. 500 * Because of its position in the B+tree, it must have base < va. 501 * 502 */ 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 504 if (lnode) { 505 area = (as_area_t *) lnode->value[lnode->keys - 1]; 506 507 mutex_lock(&area->lock); 508 509 if (va < area->base + area->pages * PAGE_SIZE) 510 return area; 511 512 mutex_unlock(&area->lock); 513 } 514 515 return NULL; 349 interrupts_restore(ipl); 350 351 return a; 516 352 } 517 353 518 354 /** Find address space area and change it. 519 355 * 520 * @param as Address space. 521 * @param address Virtual address belonging to the area to be changed. 522 * Must be page-aligned. 523 * @param size New size of the virtual memory block starting at 524 * address. 525 * @param flags Flags influencing the remap operation. Currently unused. 526 * 527 * @return Zero on success or a value from @ref errno.h otherwise. 528 * 529 */ 530 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 531 { 356 * @param as Address space. 357 * @param address Virtual address belonging to the area to be changed. 358 * Must be page-aligned. 359 * @param size New size of the virtual memory block starting at 360 * address. 361 * @param flags Flags influencing the remap operation. Currently unused. 362 * 363 * @return Zero on success or a value from @ref errno.h otherwise. 364 */ 365 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 366 { 367 as_area_t *area; 368 ipl_t ipl; 369 size_t pages; 370 371 ipl = interrupts_disable(); 532 372 mutex_lock(&as->lock); 533 373 534 374 /* 535 375 * Locate the area. 536 * 537 */ 538 as_area_t *area = find_area_and_lock(as, address); 376 */ 377 area = find_area_and_lock(as, address); 539 378 if (!area) { 540 379 mutex_unlock(&as->lock); 380 interrupts_restore(ipl); 541 381 return ENOENT; 542 382 } 543 383 544 384 if (area->backend == &phys_backend) { 545 385 /* 546 386 * Remapping of address space areas associated 547 387 * with memory mapped devices is not supported. 548 *549 388 */ 550 389 mutex_unlock(&area->lock); 551 390 mutex_unlock(&as->lock); 391 interrupts_restore(ipl); 552 392 return ENOTSUP; 553 393 } 554 555 394 if (area->sh_info) { 556 395 /* 557 * Remapping of shared address space areas 396 * Remapping of shared address space areas 558 397 * is not supported. 559 *560 398 */ 561 399 mutex_unlock(&area->lock); 562 400 mutex_unlock(&as->lock); 401 interrupts_restore(ipl); 563 402 return ENOTSUP; 564 403 } 565 566 size_tpages = SIZE2FRAMES((address - area->base) + size);404 405 pages = SIZE2FRAMES((address - area->base) + size); 567 406 if (!pages) { 568 407 /* 569 408 * Zero size address space areas are not allowed. 570 *571 409 */ 572 410 mutex_unlock(&area->lock); 573 411 mutex_unlock(&as->lock); 412 interrupts_restore(ipl); 574 413 return EPERM; 575 414 } 576 415 577 416 if (pages < area->pages) { 417 bool cond; 578 418 uintptr_t start_free = area->base + pages * PAGE_SIZE; 579 419 580 420 /* 581 421 * Shrinking the area. 582 422 * No need to check for overlaps. 583 * 584 */ 585 423 */ 424 586 425 page_table_lock(as, false); 587 426 588 427 /* 589 428 * Start TLB shootdown sequence. 590 * 591 */ 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages); 594 429 */ 430 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 431 pages * PAGE_SIZE, area->pages - pages); 432 595 433 /* 596 434 * Remove frames belonging to used space starting from … … 599 437 * is also the right way to remove part of the used_space 600 438 * B+tree leaf list. 601 * 602 */603 bool cond = true;604 while (cond) {439 */ 440 for (cond = true; cond;) { 441 btree_node_t *node; 442 605 443 ASSERT(!list_empty(&area->used_space.leaf_head)); 606 607 btree_node_t *node = 444 node = 608 445 list_get_instance(area->used_space.leaf_head.prev, 609 446 btree_node_t, leaf_link); 447 if ((cond = (bool) node->keys)) { 448 uintptr_t b = node->key[node->keys - 1]; 449 size_t c = 450 (size_t) node->value[node->keys - 1]; 451 unsigned int i = 0; 610 452 611 if ((cond = (bool) node->keys)) { 612 uintptr_t ptr = node->key[node->keys - 1]; 613 size_t size = 614 (size_t) node->value[node->keys - 1]; 615 size_t i = 0; 616 617 if (overlaps(ptr, size * PAGE_SIZE, area->base, 453 if (overlaps(b, c * PAGE_SIZE, area->base, 618 454 pages * PAGE_SIZE)) { 619 455 620 if ( ptr + size* PAGE_SIZE <= start_free) {456 if (b + c * PAGE_SIZE <= start_free) { 621 457 /* 622 458 * The whole interval fits 623 459 * completely in the resized 624 460 * address space area. 625 *626 461 */ 627 462 break; 628 463 } 629 464 630 465 /* 631 466 * Part of the interval corresponding 632 467 * to b and c overlaps with the resized 633 468 * address space area. 634 *635 469 */ 636 637 /* We are almost done */ 638 cond = false; 639 i = (start_free - ptr) >> PAGE_WIDTH; 470 471 cond = false; /* we are almost done */ 472 i = (start_free - b) >> PAGE_WIDTH; 640 473 if (!used_space_remove(area, start_free, 641 size - i)) 642 panic("Cannot remove used space."); 474 c - i)) 475 panic("Cannot remove used " 476 "space."); 643 477 } else { 644 478 /* … … 646 480 * completely removed. 647 481 */ 648 if (!used_space_remove(area, ptr, size)) 649 panic("Cannot remove used space."); 482 if (!used_space_remove(area, b, c)) 483 panic("Cannot remove used " 484 "space."); 650 485 } 651 652 for (; i < size; i++) { 653 pte_t *pte = page_mapping_find(as, ptr + 486 487 for (; i < c; i++) { 488 pte_t *pte; 489 490 pte = page_mapping_find(as, b + 654 491 i * PAGE_SIZE); 655 656 ASSERT(pte); 657 ASSERT(PTE_VALID(pte)); 658 ASSERT(PTE_PRESENT(pte)); 659 660 if ((area->backend) && 661 (area->backend->frame_free)) { 492 ASSERT(pte && PTE_VALID(pte) && 493 PTE_PRESENT(pte)); 494 if (area->backend && 495 area->backend->frame_free) { 662 496 area->backend->frame_free(area, 663 ptr+ i * PAGE_SIZE,497 b + i * PAGE_SIZE, 664 498 PTE_GET_FRAME(pte)); 665 499 } 666 667 page_mapping_remove(as, ptr + 500 page_mapping_remove(as, b + 668 501 i * PAGE_SIZE); 669 502 } 670 503 } 671 504 } 672 505 673 506 /* 674 507 * Finish TLB shootdown sequence. 675 * 676 */ 677 508 */ 509 678 510 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 679 511 area->pages - pages); 680 512 681 513 /* 682 514 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 515 */ 685 516 as_invalidate_translation_cache(as, area->base + 686 517 pages * PAGE_SIZE, area->pages - pages); 687 tlb_shootdown_finalize(ipl); 518 tlb_shootdown_finalize(); 519 520 page_table_unlock(as, false); 688 521 689 page_table_unlock(as, false);690 522 } else { 691 523 /* 692 524 * Growing the area. 693 525 * Check for overlaps with other address space areas. 694 *695 526 */ 696 527 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 528 area)) { 698 529 mutex_unlock(&area->lock); 699 mutex_unlock(&as->lock); 530 mutex_unlock(&as->lock); 531 interrupts_restore(ipl); 700 532 return EADDRNOTAVAIL; 701 533 } 702 } 703 534 } 535 704 536 area->pages = pages; 705 537 706 538 mutex_unlock(&area->lock); 707 539 mutex_unlock(&as->lock); 708 540 interrupts_restore(ipl); 541 709 542 return 0; 710 543 } 711 544 712 /** Remove reference to address space area share info.713 *714 * If the reference count drops to 0, the sh_info is deallocated.715 *716 * @param sh_info Pointer to address space area share info.717 *718 */719 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)720 {721 bool dealloc = false;722 723 mutex_lock(&sh_info->lock);724 ASSERT(sh_info->refcount);725 726 if (--sh_info->refcount == 0) {727 dealloc = true;728 link_t *cur;729 730 /*731 * Now walk carefully the pagemap B+tree and free/remove732 * reference from all frames found there.733 */734 for (cur = sh_info->pagemap.leaf_head.next;735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {736 btree_node_t *node737 = list_get_instance(cur, btree_node_t, leaf_link);738 btree_key_t i;739 740 for (i = 0; i < node->keys; i++)741 frame_free((uintptr_t) node->value[i]);742 }743 744 }745 mutex_unlock(&sh_info->lock);746 747 if (dealloc) {748 btree_destroy(&sh_info->pagemap);749 free(sh_info);750 }751 }752 753 545 /** Destroy address space area. 754 546 * 755 * @param as Address space. 756 * @param address Address within the area to be deleted. 757 * 758 * @return Zero on success or a value from @ref errno.h on failure. 759 * 547 * @param as Address space. 548 * @param address Address within the area to be deleted. 549 * 550 * @return Zero on success or a value from @ref errno.h on failure. 760 551 */ 761 552 int as_area_destroy(as_t *as, uintptr_t address) 762 553 { 554 as_area_t *area; 555 uintptr_t base; 556 link_t *cur; 557 ipl_t ipl; 558 559 ipl = interrupts_disable(); 763 560 mutex_lock(&as->lock); 764 765 a s_area_t *area = find_area_and_lock(as, address);561 562 area = find_area_and_lock(as, address); 766 563 if (!area) { 767 564 mutex_unlock(&as->lock); 565 interrupts_restore(ipl); 768 566 return ENOENT; 769 567 } 770 771 uintptr_tbase = area->base;772 568 569 base = area->base; 570 773 571 page_table_lock(as, false); 774 572 775 573 /* 776 574 * Start TLB shootdown sequence. 777 575 */ 778 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 779 area->pages); 780 576 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 577 781 578 /* 782 579 * Visit only the pages mapped by used_space B+tree. 783 580 */ 784 link_t *cur;785 581 for (cur = area->used_space.leaf_head.next; 786 582 cur != &area->used_space.leaf_head; cur = cur->next) { 787 583 btree_node_t *node; 788 btree_key_t i;584 unsigned int i; 789 585 790 586 node = list_get_instance(cur, btree_node_t, leaf_link); 791 587 for (i = 0; i < node->keys; i++) { 792 uintptr_t ptr = node->key[i]; 793 size_t size; 588 uintptr_t b = node->key[i]; 589 size_t j; 590 pte_t *pte; 794 591 795 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 797 798 ASSERT(pte); 799 ASSERT(PTE_VALID(pte)); 800 ASSERT(PTE_PRESENT(pte)); 801 802 if ((area->backend) && 803 (area->backend->frame_free)) { 804 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 592 for (j = 0; j < (size_t) node->value[i]; j++) { 593 pte = page_mapping_find(as, b + j * PAGE_SIZE); 594 ASSERT(pte && PTE_VALID(pte) && 595 PTE_PRESENT(pte)); 596 if (area->backend && 597 area->backend->frame_free) { 598 area->backend->frame_free(area, b + 599 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 806 600 } 807 808 page_mapping_remove(as, ptr + size * PAGE_SIZE); 601 page_mapping_remove(as, b + j * PAGE_SIZE); 809 602 } 810 603 } 811 604 } 812 605 813 606 /* 814 607 * Finish TLB shootdown sequence. 815 * 816 */ 817 608 */ 609 818 610 tlb_invalidate_pages(as->asid, area->base, area->pages); 819 611 820 612 /* 821 613 * Invalidate potential software translation caches (e.g. TSB on 822 614 * sparc64). 823 *824 615 */ 825 616 as_invalidate_translation_cache(as, area->base, area->pages); 826 tlb_shootdown_finalize( ipl);827 617 tlb_shootdown_finalize(); 618 828 619 page_table_unlock(as, false); 829 620 830 621 btree_destroy(&area->used_space); 831 622 832 623 area->attributes |= AS_AREA_ATTR_PARTIAL; 833 624 834 625 if (area->sh_info) 835 626 sh_info_remove_reference(area->sh_info); 836 627 837 628 mutex_unlock(&area->lock); 838 629 839 630 /* 840 631 * Remove the empty area from address space. 841 *842 632 */ 843 633 btree_remove(&as->as_area_btree, base, NULL); … … 846 636 847 637 mutex_unlock(&as->lock); 638 interrupts_restore(ipl); 848 639 return 0; 849 640 } … … 856 647 * sh_info of the source area. The process of duplicating the 857 648 * mapping is done through the backend share function. 858 * 859 * @param src_as 860 * @param src_base 861 * @param acc_size 862 * @param dst_as 863 * @param dst_base 649 * 650 * @param src_as Pointer to source address space. 651 * @param src_base Base address of the source address space area. 652 * @param acc_size Expected size of the source area. 653 * @param dst_as Pointer to destination address space. 654 * @param dst_base Target base address. 864 655 * @param dst_flags_mask Destination address space area flags mask. 865 656 * 866 * @return Zero on success. 867 * @return ENOENT if there is no such task or such address space. 868 * @return EPERM if there was a problem in accepting the area. 869 * @return ENOMEM if there was a problem in allocating destination 870 * address space area. 871 * @return ENOTSUP if the address space area backend does not support 872 * sharing. 873 * 657 * @return Zero on success or ENOENT if there is no such task or if 658 * there is no such address space area, EPERM if there was 659 * a problem in accepting the area or ENOMEM if there was a 660 * problem in allocating destination address space area. 661 * ENOTSUP is returned if the address space area backend 662 * does not support sharing. 874 663 */ 875 664 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 876 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 877 { 665 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 666 { 667 ipl_t ipl; 668 int src_flags; 669 size_t src_size; 670 as_area_t *src_area, *dst_area; 671 share_info_t *sh_info; 672 mem_backend_t *src_backend; 673 mem_backend_data_t src_backend_data; 674 675 ipl = interrupts_disable(); 878 676 mutex_lock(&src_as->lock); 879 as_area_t *src_area = find_area_and_lock(src_as, src_base);677 src_area = find_area_and_lock(src_as, src_base); 880 678 if (!src_area) { 881 679 /* 882 680 * Could not find the source address space area. 883 *884 681 */ 885 682 mutex_unlock(&src_as->lock); 683 interrupts_restore(ipl); 886 684 return ENOENT; 887 685 } 888 889 if ( (!src_area->backend) || (!src_area->backend->share)) {686 687 if (!src_area->backend || !src_area->backend->share) { 890 688 /* 891 689 * There is no backend or the backend does not 892 690 * know how to share the area. 893 *894 691 */ 895 692 mutex_unlock(&src_area->lock); 896 693 mutex_unlock(&src_as->lock); 694 interrupts_restore(ipl); 897 695 return ENOTSUP; 898 696 } 899 697 900 s ize_t src_size = src_area->pages * PAGE_SIZE;901 unsigned intsrc_flags = src_area->flags;902 mem_backend_t *src_backend = src_area->backend;903 mem_backend_data_tsrc_backend_data = src_area->backend_data;904 698 src_size = src_area->pages * PAGE_SIZE; 699 src_flags = src_area->flags; 700 src_backend = src_area->backend; 701 src_backend_data = src_area->backend_data; 702 905 703 /* Share the cacheable flag from the original mapping */ 906 704 if (src_flags & AS_AREA_CACHEABLE) 907 705 dst_flags_mask |= AS_AREA_CACHEABLE; 908 909 if ( (src_size != acc_size)||910 ( (src_flags & dst_flags_mask) != dst_flags_mask)) {706 707 if (src_size != acc_size || 708 (src_flags & dst_flags_mask) != dst_flags_mask) { 911 709 mutex_unlock(&src_area->lock); 912 710 mutex_unlock(&src_as->lock); 711 interrupts_restore(ipl); 913 712 return EPERM; 914 713 } 915 714 916 715 /* 917 716 * Now we are committed to sharing the area. 918 717 * First, prepare the area for sharing. 919 718 * Then it will be safe to unlock it. 920 * 921 */ 922 share_info_t *sh_info = src_area->sh_info; 719 */ 720 sh_info = src_area->sh_info; 923 721 if (!sh_info) { 924 722 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 927 725 btree_create(&sh_info->pagemap); 928 726 src_area->sh_info = sh_info; 929 930 727 /* 931 728 * Call the backend to setup sharing. 932 *933 729 */ 934 730 src_area->backend->share(src_area); … … 938 734 mutex_unlock(&sh_info->lock); 939 735 } 940 736 941 737 mutex_unlock(&src_area->lock); 942 738 mutex_unlock(&src_as->lock); 943 739 944 740 /* 945 741 * Create copy of the source address space area. … … 949 745 * The flags of the source area are masked against dst_flags_mask 950 746 * to support sharing in less privileged mode. 951 * 952 */ 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 954 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 747 */ 748 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 749 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 955 750 if (!dst_area) { 956 751 /* … … 959 754 sh_info_remove_reference(sh_info); 960 755 756 interrupts_restore(ipl); 961 757 return ENOMEM; 962 758 } 963 759 964 760 /* 965 761 * Now the destination address space area has been 966 762 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 763 * attribute and set the sh_info. 968 * 969 */ 970 mutex_lock(&dst_as->lock); 764 */ 765 mutex_lock(&dst_as->lock); 971 766 mutex_lock(&dst_area->lock); 972 767 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 973 768 dst_area->sh_info = sh_info; 974 769 mutex_unlock(&dst_area->lock); 975 mutex_unlock(&dst_as->lock); 770 mutex_unlock(&dst_as->lock); 771 772 interrupts_restore(ipl); 976 773 977 774 return 0; … … 980 777 /** Check access mode for address space area. 981 778 * 982 * @param area Address space area. 983 * @param access Access mode. 984 * 985 * @return False if access violates area's permissions, true 986 * otherwise. 987 * 988 */ 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 779 * The address space area must be locked prior to this call. 780 * 781 * @param area Address space area. 782 * @param access Access mode. 783 * 784 * @return False if access violates area's permissions, true 785 * otherwise. 786 */ 787 bool as_area_check_access(as_area_t *area, pf_access_t access) 990 788 { 991 789 int flagmap[] = { … … 995 793 }; 996 794 997 ASSERT(mutex_locked(&area->lock));998 999 795 if (!(area->flags & flagmap[access])) 1000 796 return false; 1001 797 1002 798 return true; 1003 }1004 1005 /** Convert address space area flags to page flags.1006 *1007 * @param aflags Flags of some address space area.1008 *1009 * @return Flags to be passed to page_mapping_insert().1010 *1011 */1012 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags)1013 {1014 unsigned int flags = PAGE_USER | PAGE_PRESENT;1015 1016 if (aflags & AS_AREA_READ)1017 flags |= PAGE_READ;1018 1019 if (aflags & AS_AREA_WRITE)1020 flags |= PAGE_WRITE;1021 1022 if (aflags & AS_AREA_EXEC)1023 flags |= PAGE_EXEC;1024 1025 if (aflags & AS_AREA_CACHEABLE)1026 flags |= PAGE_CACHEABLE;1027 1028 return flags;1029 799 } 1030 800 … … 1043 813 * 1044 814 */ 1045 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 1046 { 815 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 816 { 817 as_area_t *area; 818 link_t *cur; 819 ipl_t ipl; 820 int page_flags; 821 uintptr_t *old_frame; 822 size_t frame_idx; 823 size_t used_pages; 824 1047 825 /* Flags for the new memory mapping */ 1048 unsigned int page_flags = area_flags_to_page_flags(flags); 1049 826 page_flags = area_flags_to_page_flags(flags); 827 828 ipl = interrupts_disable(); 1050 829 mutex_lock(&as->lock); 1051 1052 a s_area_t *area = find_area_and_lock(as, address);830 831 area = find_area_and_lock(as, address); 1053 832 if (!area) { 1054 833 mutex_unlock(&as->lock); 834 interrupts_restore(ipl); 1055 835 return ENOENT; 1056 836 } 1057 837 1058 838 if ((area->sh_info) || (area->backend != &anon_backend)) { 1059 839 /* Copying shared areas not supported yet */ … … 1061 841 mutex_unlock(&area->lock); 1062 842 mutex_unlock(&as->lock); 843 interrupts_restore(ipl); 1063 844 return ENOTSUP; 1064 845 } 1065 846 1066 847 /* 1067 848 * Compute total number of used pages in the used_space B+tree 1068 * 1069 */ 1070 size_t used_pages = 0; 1071 link_t *cur; 1072 849 */ 850 used_pages = 0; 851 1073 852 for (cur = area->used_space.leaf_head.next; 1074 853 cur != &area->used_space.leaf_head; cur = cur->next) { 1075 btree_node_t *node 1076 = list_get_instance(cur, btree_node_t, leaf_link); 1077 btree_key_t i; 854 btree_node_t *node; 855 unsigned int i; 1078 856 1079 for (i = 0; i < node->keys; i++) 857 node = list_get_instance(cur, btree_node_t, leaf_link); 858 for (i = 0; i < node->keys; i++) { 1080 859 used_pages += (size_t) node->value[i]; 1081 } 1082 860 } 861 } 862 1083 863 /* An array for storing frame numbers */ 1084 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0);1085 864 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 865 1086 866 page_table_lock(as, false); 1087 867 1088 868 /* 1089 869 * Start TLB shootdown sequence. 1090 * 1091 */ 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1093 area->pages); 1094 870 */ 871 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 872 1095 873 /* 1096 874 * Remove used pages from page tables and remember their frame 1097 875 * numbers. 1098 * 1099 */ 1100 size_t frame_idx = 0; 1101 876 */ 877 frame_idx = 0; 878 1102 879 for (cur = area->used_space.leaf_head.next; 1103 880 cur != &area->used_space.leaf_head; cur = cur->next) { 1104 btree_node_t *node 1105 = list_get_instance(cur, btree_node_t, leaf_link); 1106 btree_key_t i; 881 btree_node_t *node; 882 unsigned int i; 1107 883 884 node = list_get_instance(cur, btree_node_t, leaf_link); 1108 885 for (i = 0; i < node->keys; i++) { 1109 uintptr_t ptr = node->key[i]; 1110 size_t size; 886 uintptr_t b = node->key[i]; 887 size_t j; 888 pte_t *pte; 1111 889 1112 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1114 1115 ASSERT(pte); 1116 ASSERT(PTE_VALID(pte)); 1117 ASSERT(PTE_PRESENT(pte)); 1118 890 for (j = 0; j < (size_t) node->value[i]; j++) { 891 pte = page_mapping_find(as, b + j * PAGE_SIZE); 892 ASSERT(pte && PTE_VALID(pte) && 893 PTE_PRESENT(pte)); 1119 894 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 1120 895 1121 896 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size* PAGE_SIZE);897 page_mapping_remove(as, b + j * PAGE_SIZE); 1123 898 } 1124 899 } 1125 900 } 1126 901 1127 902 /* 1128 903 * Finish TLB shootdown sequence. 1129 * 1130 */ 1131 904 */ 905 1132 906 tlb_invalidate_pages(as->asid, area->base, area->pages); 1133 907 … … 1135 909 * Invalidate potential software translation caches (e.g. TSB on 1136 910 * sparc64). 1137 *1138 911 */ 1139 912 as_invalidate_translation_cache(as, area->base, area->pages); 1140 tlb_shootdown_finalize( ipl);1141 913 tlb_shootdown_finalize(); 914 1142 915 page_table_unlock(as, false); 1143 916 1144 917 /* 1145 918 * Set the new flags. 1146 919 */ 1147 920 area->flags = flags; 1148 921 1149 922 /* 1150 923 * Map pages back in with new flags. This step is kept separate … … 1153 926 */ 1154 927 frame_idx = 0; 1155 928 1156 929 for (cur = area->used_space.leaf_head.next; 1157 930 cur != &area->used_space.leaf_head; cur = cur->next) { 1158 btree_node_t *node 1159 = list_get_instance(cur, btree_node_t, leaf_link); 1160 btree_key_t i; 931 btree_node_t *node; 932 unsigned int i; 1161 933 934 node = list_get_instance(cur, btree_node_t, leaf_link); 1162 935 for (i = 0; i < node->keys; i++) { 1163 uintptr_t ptr= node->key[i];1164 size_t size;936 uintptr_t b = node->key[i]; 937 size_t j; 1165 938 1166 for ( size = 0; size < (size_t) node->value[i]; size++) {939 for (j = 0; j < (size_t) node->value[i]; j++) { 1167 940 page_table_lock(as, false); 1168 941 1169 942 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size* PAGE_SIZE,943 page_mapping_insert(as, b + j * PAGE_SIZE, 1171 944 old_frame[frame_idx++], page_flags); 1172 945 1173 946 page_table_unlock(as, false); 1174 947 } 1175 948 } 1176 949 } 1177 950 1178 951 free(old_frame); 1179 952 1180 953 mutex_unlock(&area->lock); 1181 954 mutex_unlock(&as->lock); 1182 955 interrupts_restore(ipl); 956 1183 957 return 0; 1184 958 } 959 1185 960 1186 961 /** Handle page fault within the current address space. … … 1192 967 * Interrupts are assumed disabled. 1193 968 * 1194 * @param page Faulting page. 1195 * @param access Access mode that caused the page fault (i.e. 1196 * read/write/exec). 1197 * @param istate Pointer to the interrupted state. 1198 * 1199 * @return AS_PF_FAULT on page fault. 1200 * @return AS_PF_OK on success. 1201 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1202 * or copy_from_uspace(). 1203 * 969 * @param page Faulting page. 970 * @param access Access mode that caused the page fault (i.e. 971 * read/write/exec). 972 * @param istate Pointer to the interrupted state. 973 * 974 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 975 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 976 * or copy_from_uspace(). 1204 977 */ 1205 978 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1206 979 { 980 pte_t *pte; 981 as_area_t *area; 982 1207 983 if (!THREAD) 1208 984 return AS_PF_FAULT; … … 1212 988 1213 989 mutex_lock(&AS->lock); 1214 a s_area_t *area = find_area_and_lock(AS, page);990 area = find_area_and_lock(AS, page); 1215 991 if (!area) { 1216 992 /* 1217 993 * No area contained mapping for 'page'. 1218 994 * Signal page fault to low-level handler. 1219 *1220 995 */ 1221 996 mutex_unlock(&AS->lock); 1222 997 goto page_fault; 1223 998 } 1224 999 1225 1000 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1226 1001 /* … … 1230 1005 mutex_unlock(&area->lock); 1231 1006 mutex_unlock(&AS->lock); 1232 goto page_fault; 1233 } 1234 1235 if ( (!area->backend) || (!area->backend->page_fault)) {1007 goto page_fault; 1008 } 1009 1010 if (!area->backend || !area->backend->page_fault) { 1236 1011 /* 1237 1012 * The address space area is not backed by any backend 1238 1013 * or the backend cannot handle page faults. 1239 *1240 1014 */ 1241 1015 mutex_unlock(&area->lock); 1242 1016 mutex_unlock(&AS->lock); 1243 goto page_fault; 1244 } 1245 1017 goto page_fault; 1018 } 1019 1246 1020 page_table_lock(AS, false); 1247 1021 … … 1249 1023 * To avoid race condition between two page faults on the same address, 1250 1024 * we need to make sure the mapping has not been already inserted. 1251 * 1252 */ 1253 pte_t *pte; 1025 */ 1254 1026 if ((pte = page_mapping_find(AS, page))) { 1255 1027 if (PTE_PRESENT(pte)) { … … 1267 1039 /* 1268 1040 * Resort to the backend page fault handler. 1269 *1270 1041 */ 1271 1042 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1280 1051 mutex_unlock(&AS->lock); 1281 1052 return AS_PF_OK; 1282 1053 1283 1054 page_fault: 1284 1055 if (THREAD->in_copy_from_uspace) { … … 1293 1064 return AS_PF_FAULT; 1294 1065 } 1295 1066 1296 1067 return AS_PF_DEFER; 1297 1068 } … … 1305 1076 * When this function is enetered, no spinlocks may be held. 1306 1077 * 1307 * @param old Old address space or NULL. 1308 * @param new New address space. 1309 * 1078 * @param old Old address space or NULL. 1079 * @param new New address space. 1310 1080 */ 1311 1081 void as_switch(as_t *old_as, as_t *new_as) … … 1313 1083 DEADLOCK_PROBE_INIT(p_asidlock); 1314 1084 preemption_disable(); 1315 1316 1085 retry: 1317 1086 (void) interrupts_disable(); 1318 1087 if (!spinlock_trylock(&asidlock)) { 1319 /* 1088 /* 1320 1089 * Avoid deadlock with TLB shootdown. 1321 1090 * We can enable interrupts here because 1322 1091 * preemption is disabled. We should not be 1323 1092 * holding any other lock. 1324 *1325 1093 */ 1326 1094 (void) interrupts_enable(); … … 1329 1097 } 1330 1098 preemption_enable(); 1331 1099 1332 1100 /* 1333 1101 * First, take care of the old address space. 1334 */ 1102 */ 1335 1103 if (old_as) { 1336 1104 ASSERT(old_as->cpu_refcount); 1337 1338 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1105 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1339 1106 /* 1340 1107 * The old address space is no longer active on … … 1342 1109 * list of inactive address spaces with assigned 1343 1110 * ASID. 1344 *1345 1111 */ 1346 1112 ASSERT(old_as->asid != ASID_INVALID); 1347 1348 1113 list_append(&old_as->inactive_as_with_asid_link, 1349 1114 &inactive_as_with_asid_head); 1350 1115 } 1351 1116 1352 1117 /* 1353 1118 * Perform architecture-specific tasks when the address space 1354 1119 * is being removed from the CPU. 1355 *1356 1120 */ 1357 1121 as_deinstall_arch(old_as); 1358 1122 } 1359 1123 1360 1124 /* 1361 1125 * Second, prepare the new address space. 1362 *1363 1126 */ 1364 1127 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1368 1131 new_as->asid = asid_get(); 1369 1132 } 1370 1371 1133 #ifdef AS_PAGE_TABLE 1372 1134 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1376 1138 * Perform architecture-specific steps. 1377 1139 * (e.g. write ASID to hardware register etc.) 1378 *1379 1140 */ 1380 1141 as_install_arch(new_as); 1381 1142 1382 1143 spinlock_unlock(&asidlock); 1383 1144 … … 1385 1146 } 1386 1147 1148 /** Convert address space area flags to page flags. 1149 * 1150 * @param aflags Flags of some address space area. 1151 * 1152 * @return Flags to be passed to page_mapping_insert(). 1153 */ 1154 int area_flags_to_page_flags(int aflags) 1155 { 1156 int flags; 1157 1158 flags = PAGE_USER | PAGE_PRESENT; 1159 1160 if (aflags & AS_AREA_READ) 1161 flags |= PAGE_READ; 1162 1163 if (aflags & AS_AREA_WRITE) 1164 flags |= PAGE_WRITE; 1165 1166 if (aflags & AS_AREA_EXEC) 1167 flags |= PAGE_EXEC; 1168 1169 if (aflags & AS_AREA_CACHEABLE) 1170 flags |= PAGE_CACHEABLE; 1171 1172 return flags; 1173 } 1174 1387 1175 /** Compute flags for virtual address translation subsytem. 1388 1176 * 1389 * @param area Address space area.1390 * 1391 * @return Flags to be used in page_mapping_insert().1392 * 1393 * /1394 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1395 { 1396 ASSERT(mutex_locked(&area->lock)); 1397 1398 return area_flags_to_page_flags(a rea->flags);1177 * The address space area must be locked. 1178 * Interrupts must be disabled. 1179 * 1180 * @param a Address space area. 1181 * 1182 * @return Flags to be used in page_mapping_insert(). 1183 */ 1184 int as_area_get_flags(as_area_t *a) 1185 { 1186 return area_flags_to_page_flags(a->flags); 1399 1187 } 1400 1188 … … 1404 1192 * table. 1405 1193 * 1406 * @param flags Flags saying whether the page table is for the kernel 1407 * address space. 1408 * 1409 * @return First entry of the page table. 1410 * 1411 */ 1412 NO_TRACE pte_t *page_table_create(unsigned int flags) 1194 * @param flags Flags saying whether the page table is for the kernel 1195 * address space. 1196 * 1197 * @return First entry of the page table. 1198 */ 1199 pte_t *page_table_create(int flags) 1413 1200 { 1414 1201 ASSERT(as_operations); … … 1422 1209 * Destroy page table in architecture specific way. 1423 1210 * 1424 * @param page_table Physical address of PTL0. 1425 * 1426 */ 1427 NO_TRACE void page_table_destroy(pte_t *page_table) 1211 * @param page_table Physical address of PTL0. 1212 */ 1213 void page_table_destroy(pte_t *page_table) 1428 1214 { 1429 1215 ASSERT(as_operations); … … 1437 1223 * This function should be called before any page_mapping_insert(), 1438 1224 * page_mapping_remove() and page_mapping_find(). 1439 * 1225 * 1440 1226 * Locking order is such that address space areas must be locked 1441 1227 * prior to this call. Address space can be locked prior to this 1442 1228 * call in which case the lock argument is false. 1443 1229 * 1444 * @param as Address space. 1445 * @param lock If false, do not attempt to lock as->lock. 1446 * 1447 */ 1448 NO_TRACE void page_table_lock(as_t *as, bool lock) 1230 * @param as Address space. 1231 * @param lock If false, do not attempt to lock as->lock. 1232 */ 1233 void page_table_lock(as_t *as, bool lock) 1449 1234 { 1450 1235 ASSERT(as_operations); … … 1456 1241 /** Unlock page table. 1457 1242 * 1458 * @param as Address space. 1459 * @param unlock If false, do not attempt to unlock as->lock. 1460 * 1461 */ 1462 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1243 * @param as Address space. 1244 * @param unlock If false, do not attempt to unlock as->lock. 1245 */ 1246 void page_table_unlock(as_t *as, bool unlock) 1463 1247 { 1464 1248 ASSERT(as_operations); … … 1468 1252 } 1469 1253 1470 /** Test whether page tables are locked. 1471 * 1472 * @param as Address space where the page tables belong. 1473 * 1474 * @return True if the page tables belonging to the address soace 1475 * are locked, otherwise false. 1476 */ 1477 NO_TRACE bool page_table_locked(as_t *as) 1478 { 1479 ASSERT(as_operations); 1480 ASSERT(as_operations->page_table_locked); 1481 1482 return as_operations->page_table_locked(as); 1254 1255 /** Find address space area and lock it. 1256 * 1257 * The address space must be locked and interrupts must be disabled. 1258 * 1259 * @param as Address space. 1260 * @param va Virtual address. 1261 * 1262 * @return Locked address space area containing va on success or 1263 * NULL on failure. 1264 */ 1265 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1266 { 1267 as_area_t *a; 1268 btree_node_t *leaf, *lnode; 1269 unsigned int i; 1270 1271 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1272 if (a) { 1273 /* va is the base address of an address space area */ 1274 mutex_lock(&a->lock); 1275 return a; 1276 } 1277 1278 /* 1279 * Search the leaf node and the righmost record of its left neighbour 1280 * to find out whether this is a miss or va belongs to an address 1281 * space area found there. 1282 */ 1283 1284 /* First, search the leaf node itself. */ 1285 for (i = 0; i < leaf->keys; i++) { 1286 a = (as_area_t *) leaf->value[i]; 1287 mutex_lock(&a->lock); 1288 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1289 return a; 1290 } 1291 mutex_unlock(&a->lock); 1292 } 1293 1294 /* 1295 * Second, locate the left neighbour and test its last record. 1296 * Because of its position in the B+tree, it must have base < va. 1297 */ 1298 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1299 if (lnode) { 1300 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1301 mutex_lock(&a->lock); 1302 if (va < a->base + a->pages * PAGE_SIZE) { 1303 return a; 1304 } 1305 mutex_unlock(&a->lock); 1306 } 1307 1308 return NULL; 1309 } 1310 1311 /** Check area conflicts with other areas. 1312 * 1313 * The address space must be locked and interrupts must be disabled. 1314 * 1315 * @param as Address space. 1316 * @param va Starting virtual address of the area being tested. 1317 * @param size Size of the area being tested. 1318 * @param avoid_area Do not touch this area. 1319 * 1320 * @return True if there is no conflict, false otherwise. 1321 */ 1322 bool 1323 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1324 { 1325 as_area_t *a; 1326 btree_node_t *leaf, *node; 1327 unsigned int i; 1328 1329 /* 1330 * We don't want any area to have conflicts with NULL page. 1331 */ 1332 if (overlaps(va, size, NULL, PAGE_SIZE)) 1333 return false; 1334 1335 /* 1336 * The leaf node is found in O(log n), where n is proportional to 1337 * the number of address space areas belonging to as. 1338 * The check for conflicts is then attempted on the rightmost 1339 * record in the left neighbour, the leftmost record in the right 1340 * neighbour and all records in the leaf node itself. 1341 */ 1342 1343 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1344 if (a != avoid_area) 1345 return false; 1346 } 1347 1348 /* First, check the two border cases. */ 1349 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1350 a = (as_area_t *) node->value[node->keys - 1]; 1351 mutex_lock(&a->lock); 1352 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1353 mutex_unlock(&a->lock); 1354 return false; 1355 } 1356 mutex_unlock(&a->lock); 1357 } 1358 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1359 if (node) { 1360 a = (as_area_t *) node->value[0]; 1361 mutex_lock(&a->lock); 1362 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1363 mutex_unlock(&a->lock); 1364 return false; 1365 } 1366 mutex_unlock(&a->lock); 1367 } 1368 1369 /* Second, check the leaf node. */ 1370 for (i = 0; i < leaf->keys; i++) { 1371 a = (as_area_t *) leaf->value[i]; 1372 1373 if (a == avoid_area) 1374 continue; 1375 1376 mutex_lock(&a->lock); 1377 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1378 mutex_unlock(&a->lock); 1379 return false; 1380 } 1381 mutex_unlock(&a->lock); 1382 } 1383 1384 /* 1385 * So far, the area does not conflict with other areas. 1386 * Check if it doesn't conflict with kernel address space. 1387 */ 1388 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1389 return !overlaps(va, size, 1390 KERNEL_ADDRESS_SPACE_START, 1391 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1392 } 1393 1394 return true; 1483 1395 } 1484 1396 1485 1397 /** Return size of the address space area with given base. 1486 1398 * 1487 * @param base Arbitrary address inside the address space area. 1488 * 1489 * @return Size of the address space area in bytes or zero if it 1490 * does not exist. 1491 * 1399 * @param base Arbitrary address insede the address space area. 1400 * 1401 * @return Size of the address space area in bytes or zero if it 1402 * does not exist. 1492 1403 */ 1493 1404 size_t as_area_get_size(uintptr_t base) 1494 1405 { 1406 ipl_t ipl; 1407 as_area_t *src_area; 1495 1408 size_t size; 1496 1497 page_table_lock(AS, true); 1498 as_area_t *src_area = find_area_and_lock(AS, base); 1499 1409 1410 ipl = interrupts_disable(); 1411 src_area = find_area_and_lock(AS, base); 1500 1412 if (src_area) { 1501 1413 size = src_area->pages * PAGE_SIZE; 1502 1414 mutex_unlock(&src_area->lock); 1503 } else 1415 } else { 1504 1416 size = 0; 1505 1506 page_table_unlock(AS, true);1417 } 1418 interrupts_restore(ipl); 1507 1419 return size; 1508 1420 } … … 1512 1424 * The address space area must be already locked. 1513 1425 * 1514 * @param area Address space area. 1515 * @param page First page to be marked. 1516 * @param count Number of page to be marked. 1517 * 1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 { 1523 ASSERT(mutex_locked(&area->lock)); 1426 * @param a Address space area. 1427 * @param page First page to be marked. 1428 * @param count Number of page to be marked. 1429 * 1430 * @return Zero on failure and non-zero on success. 1431 */ 1432 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1433 { 1434 btree_node_t *leaf, *node; 1435 size_t pages; 1436 unsigned int i; 1437 1524 1438 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1525 1439 ASSERT(count); 1526 1527 btree_node_t *leaf; 1528 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1440 1441 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1529 1442 if (pages) { 1530 1443 /* 1531 1444 * We hit the beginning of some used space. 1532 *1533 1445 */ 1534 1446 return 0; 1535 1447 } 1536 1448 1537 1449 if (!leaf->keys) { 1538 btree_insert(&a rea->used_space, page, (void *) count, leaf);1450 btree_insert(&a->used_space, page, (void *) count, leaf); 1539 1451 return 1; 1540 1452 } 1541 1542 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);1453 1454 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1543 1455 if (node) { 1544 1456 uintptr_t left_pg = node->key[node->keys - 1]; … … 1551 1463 * somewhere between the rightmost interval of 1552 1464 * the left neigbour and the first interval of the leaf. 1553 * 1554 */ 1555 1465 */ 1466 1556 1467 if (page >= right_pg) { 1557 1468 /* Do nothing. */ … … 1563 1474 right_cnt * PAGE_SIZE)) { 1564 1475 /* The interval intersects with the right interval. */ 1565 return 0; 1476 return 0; 1566 1477 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1567 1478 (page + count * PAGE_SIZE == right_pg)) { … … 1569 1480 * The interval can be added by merging the two already 1570 1481 * present intervals. 1571 *1572 1482 */ 1573 1483 node->value[node->keys - 1] += count + right_cnt; 1574 btree_remove(&a rea->used_space, right_pg, leaf);1575 return 1; 1484 btree_remove(&a->used_space, right_pg, leaf); 1485 return 1; 1576 1486 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1577 /* 1487 /* 1578 1488 * The interval can be added by simply growing the left 1579 1489 * interval. 1580 *1581 1490 */ 1582 1491 node->value[node->keys - 1] += count; … … 1587 1496 * the right interval down and increasing its size 1588 1497 * accordingly. 1589 *1590 1498 */ 1591 1499 leaf->value[0] += count; … … 1596 1504 * The interval is between both neigbouring intervals, 1597 1505 * but cannot be merged with any of them. 1598 *1599 1506 */ 1600 btree_insert(&a rea->used_space, page, (void *) count,1507 btree_insert(&a->used_space, page, (void *) count, 1601 1508 leaf); 1602 1509 return 1; … … 1605 1512 uintptr_t right_pg = leaf->key[0]; 1606 1513 size_t right_cnt = (size_t) leaf->value[0]; 1607 1514 1608 1515 /* 1609 1516 * Investigate the border case in which the left neighbour does 1610 1517 * not exist but the interval fits from the left. 1611 * 1612 */ 1613 1518 */ 1519 1614 1520 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 1521 right_cnt * PAGE_SIZE)) { … … 1621 1527 * right interval down and increasing its size 1622 1528 * accordingly. 1623 *1624 1529 */ 1625 1530 leaf->key[0] = page; … … 1630 1535 * The interval doesn't adjoin with the right interval. 1631 1536 * It must be added individually. 1632 *1633 1537 */ 1634 btree_insert(&a rea->used_space, page, (void *) count,1538 btree_insert(&a->used_space, page, (void *) count, 1635 1539 leaf); 1636 1540 return 1; 1637 1541 } 1638 1542 } 1639 1640 node = btree_leaf_node_right_neighbour(&a rea->used_space, leaf);1543 1544 node = btree_leaf_node_right_neighbour(&a->used_space, leaf); 1641 1545 if (node) { 1642 1546 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1649 1553 * somewhere between the leftmost interval of 1650 1554 * the right neigbour and the last interval of the leaf. 1651 * 1652 */ 1653 1555 */ 1556 1654 1557 if (page < left_pg) { 1655 1558 /* Do nothing. */ … … 1661 1564 right_cnt * PAGE_SIZE)) { 1662 1565 /* The interval intersects with the right interval. */ 1663 return 0; 1566 return 0; 1664 1567 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1665 1568 (page + count * PAGE_SIZE == right_pg)) { … … 1667 1570 * The interval can be added by merging the two already 1668 1571 * present intervals. 1669 * 1670 */ 1572 * */ 1671 1573 leaf->value[leaf->keys - 1] += count + right_cnt; 1672 btree_remove(&a rea->used_space, right_pg, node);1673 return 1; 1574 btree_remove(&a->used_space, right_pg, node); 1575 return 1; 1674 1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1675 1577 /* 1676 1578 * The interval can be added by simply growing the left 1677 1579 * interval. 1678 * 1679 */ 1580 * */ 1680 1581 leaf->value[leaf->keys - 1] += count; 1681 1582 return 1; … … 1685 1586 * the right interval down and increasing its size 1686 1587 * accordingly. 1687 *1688 1588 */ 1689 1589 node->value[0] += count; … … 1694 1594 * The interval is between both neigbouring intervals, 1695 1595 * but cannot be merged with any of them. 1696 *1697 1596 */ 1698 btree_insert(&a rea->used_space, page, (void *) count,1597 btree_insert(&a->used_space, page, (void *) count, 1699 1598 leaf); 1700 1599 return 1; … … 1703 1602 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1704 1603 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1705 1604 1706 1605 /* 1707 1606 * Investigate the border case in which the right neighbour 1708 1607 * does not exist but the interval fits from the right. 1709 * 1710 */ 1711 1608 */ 1609 1712 1610 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 1611 left_cnt * PAGE_SIZE)) { … … 1718 1616 * The interval can be added by growing the left 1719 1617 * interval. 1720 *1721 1618 */ 1722 1619 leaf->value[leaf->keys - 1] += count; … … 1726 1623 * The interval doesn't adjoin with the left interval. 1727 1624 * It must be added individually. 1728 *1729 1625 */ 1730 btree_insert(&a rea->used_space, page, (void *) count,1626 btree_insert(&a->used_space, page, (void *) count, 1731 1627 leaf); 1732 1628 return 1; … … 1738 1634 * only between two other intervals of the leaf. The two border cases 1739 1635 * were already resolved. 1740 * 1741 */ 1742 btree_key_t i; 1636 */ 1743 1637 for (i = 1; i < leaf->keys; i++) { 1744 1638 if (page < leaf->key[i]) { … … 1747 1641 size_t left_cnt = (size_t) leaf->value[i - 1]; 1748 1642 size_t right_cnt = (size_t) leaf->value[i]; 1749 1643 1750 1644 /* 1751 1645 * The interval fits between left_pg and right_pg. 1752 *1753 1646 */ 1754 1647 1755 1648 if (overlaps(page, count * PAGE_SIZE, left_pg, 1756 1649 left_cnt * PAGE_SIZE)) { … … 1758 1651 * The interval intersects with the left 1759 1652 * interval. 1760 *1761 1653 */ 1762 1654 return 0; … … 1766 1658 * The interval intersects with the right 1767 1659 * interval. 1768 *1769 1660 */ 1770 return 0; 1661 return 0; 1771 1662 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1772 1663 (page + count * PAGE_SIZE == right_pg)) { … … 1774 1665 * The interval can be added by merging the two 1775 1666 * already present intervals. 1776 *1777 1667 */ 1778 1668 leaf->value[i - 1] += count + right_cnt; 1779 btree_remove(&a rea->used_space, right_pg, leaf);1780 return 1; 1669 btree_remove(&a->used_space, right_pg, leaf); 1670 return 1; 1781 1671 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1782 1672 /* 1783 1673 * The interval can be added by simply growing 1784 1674 * the left interval. 1785 *1786 1675 */ 1787 1676 leaf->value[i - 1] += count; … … 1789 1678 } else if (page + count * PAGE_SIZE == right_pg) { 1790 1679 /* 1791 1680 * The interval can be addded by simply moving 1792 1681 * base of the right interval down and 1793 1682 * increasing its size accordingly. 1794 * 1795 */ 1683 */ 1796 1684 leaf->value[i] += count; 1797 1685 leaf->key[i] = page; … … 1802 1690 * intervals, but cannot be merged with any of 1803 1691 * them. 1804 *1805 1692 */ 1806 btree_insert(&a rea->used_space, page,1693 btree_insert(&a->used_space, page, 1807 1694 (void *) count, leaf); 1808 1695 return 1; … … 1810 1697 } 1811 1698 } 1812 1699 1813 1700 panic("Inconsistency detected while adding %" PRIs " pages of used " 1814 1701 "space at %p.", count, page); … … 1819 1706 * The address space area must be already locked. 1820 1707 * 1821 * @param area Address space area. 1822 * @param page First page to be marked. 1823 * @param count Number of page to be marked. 1824 * 1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 { 1830 ASSERT(mutex_locked(&area->lock)); 1708 * @param a Address space area. 1709 * @param page First page to be marked. 1710 * @param count Number of page to be marked. 1711 * 1712 * @return Zero on failure and non-zero on success. 1713 */ 1714 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1715 { 1716 btree_node_t *leaf, *node; 1717 size_t pages; 1718 unsigned int i; 1719 1831 1720 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1832 1721 ASSERT(count); 1833 1834 btree_node_t *leaf; 1835 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1722 1723 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1836 1724 if (pages) { 1837 1725 /* 1838 1726 * We are lucky, page is the beginning of some interval. 1839 *1840 1727 */ 1841 1728 if (count > pages) { 1842 1729 return 0; 1843 1730 } else if (count == pages) { 1844 btree_remove(&a rea->used_space, page, leaf);1731 btree_remove(&a->used_space, page, leaf); 1845 1732 return 1; 1846 1733 } else { … … 1848 1735 * Find the respective interval. 1849 1736 * Decrease its size and relocate its start address. 1850 *1851 1737 */ 1852 btree_key_t i;1853 1738 for (i = 0; i < leaf->keys; i++) { 1854 1739 if (leaf->key[i] == page) { … … 1861 1746 } 1862 1747 } 1863 1864 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);1865 if ( (node) && (page < leaf->key[0])) {1748 1749 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1750 if (node && page < leaf->key[0]) { 1866 1751 uintptr_t left_pg = node->key[node->keys - 1]; 1867 1752 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1868 1753 1869 1754 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1870 1755 count * PAGE_SIZE)) { … … 1876 1761 * removed by updating the size of the bigger 1877 1762 * interval. 1878 *1879 1763 */ 1880 1764 node->value[node->keys - 1] -= count; … … 1882 1766 } else if (page + count * PAGE_SIZE < 1883 1767 left_pg + left_cnt*PAGE_SIZE) { 1768 size_t new_cnt; 1769 1884 1770 /* 1885 1771 * The interval is contained in the rightmost … … 1888 1774 * the original interval and also inserting a 1889 1775 * new interval. 1890 *1891 1776 */ 1892 size_tnew_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1777 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1893 1778 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1894 1779 node->value[node->keys - 1] -= count + new_cnt; 1895 btree_insert(&a rea->used_space, page +1780 btree_insert(&a->used_space, page + 1896 1781 count * PAGE_SIZE, (void *) new_cnt, leaf); 1897 1782 return 1; … … 1899 1784 } 1900 1785 return 0; 1901 } else if (page < leaf->key[0]) 1786 } else if (page < leaf->key[0]) { 1902 1787 return 0; 1788 } 1903 1789 1904 1790 if (page > leaf->key[leaf->keys - 1]) { 1905 1791 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1906 1792 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1907 1793 1908 1794 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1909 1795 count * PAGE_SIZE)) { 1910 if (page + count * PAGE_SIZE == 1796 if (page + count * PAGE_SIZE == 1911 1797 left_pg + left_cnt * PAGE_SIZE) { 1912 1798 /* … … 1914 1800 * interval of the leaf and can be removed by 1915 1801 * updating the size of the bigger interval. 1916 *1917 1802 */ 1918 1803 leaf->value[leaf->keys - 1] -= count; … … 1920 1805 } else if (page + count * PAGE_SIZE < left_pg + 1921 1806 left_cnt * PAGE_SIZE) { 1807 size_t new_cnt; 1808 1922 1809 /* 1923 1810 * The interval is contained in the rightmost … … 1926 1813 * original interval and also inserting a new 1927 1814 * interval. 1928 *1929 1815 */ 1930 size_tnew_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1816 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1931 1817 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1932 1818 leaf->value[leaf->keys - 1] -= count + new_cnt; 1933 btree_insert(&a rea->used_space, page +1819 btree_insert(&a->used_space, page + 1934 1820 count * PAGE_SIZE, (void *) new_cnt, leaf); 1935 1821 return 1; … … 1937 1823 } 1938 1824 return 0; 1939 } 1825 } 1940 1826 1941 1827 /* … … 1943 1829 * Now the interval can be only between intervals of the leaf. 1944 1830 */ 1945 btree_key_t i;1946 1831 for (i = 1; i < leaf->keys - 1; i++) { 1947 1832 if (page < leaf->key[i]) { 1948 1833 uintptr_t left_pg = leaf->key[i - 1]; 1949 1834 size_t left_cnt = (size_t) leaf->value[i - 1]; 1950 1835 1951 1836 /* 1952 1837 * Now the interval is between intervals corresponding … … 1962 1847 * be removed by updating the size of 1963 1848 * the bigger interval. 1964 *1965 1849 */ 1966 1850 leaf->value[i - 1] -= count; … … 1968 1852 } else if (page + count * PAGE_SIZE < 1969 1853 left_pg + left_cnt * PAGE_SIZE) { 1854 size_t new_cnt; 1855 1970 1856 /* 1971 1857 * The interval is contained in the … … 1975 1861 * also inserting a new interval. 1976 1862 */ 1977 size_tnew_cnt = ((left_pg +1863 new_cnt = ((left_pg + 1978 1864 left_cnt * PAGE_SIZE) - 1979 1865 (page + count * PAGE_SIZE)) >> 1980 1866 PAGE_WIDTH; 1981 1867 leaf->value[i - 1] -= count + new_cnt; 1982 btree_insert(&a rea->used_space, page +1868 btree_insert(&a->used_space, page + 1983 1869 count * PAGE_SIZE, (void *) new_cnt, 1984 1870 leaf); … … 1989 1875 } 1990 1876 } 1991 1877 1992 1878 error: 1993 1879 panic("Inconsistency detected while removing %" PRIs " pages of used " … … 1995 1881 } 1996 1882 1883 /** Remove reference to address space area share info. 1884 * 1885 * If the reference count drops to 0, the sh_info is deallocated. 1886 * 1887 * @param sh_info Pointer to address space area share info. 1888 */ 1889 void sh_info_remove_reference(share_info_t *sh_info) 1890 { 1891 bool dealloc = false; 1892 1893 mutex_lock(&sh_info->lock); 1894 ASSERT(sh_info->refcount); 1895 if (--sh_info->refcount == 0) { 1896 dealloc = true; 1897 link_t *cur; 1898 1899 /* 1900 * Now walk carefully the pagemap B+tree and free/remove 1901 * reference from all frames found there. 1902 */ 1903 for (cur = sh_info->pagemap.leaf_head.next; 1904 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1905 btree_node_t *node; 1906 unsigned int i; 1907 1908 node = list_get_instance(cur, btree_node_t, leaf_link); 1909 for (i = 0; i < node->keys; i++) 1910 frame_free((uintptr_t) node->value[i]); 1911 } 1912 1913 } 1914 mutex_unlock(&sh_info->lock); 1915 1916 if (dealloc) { 1917 btree_destroy(&sh_info->pagemap); 1918 free(sh_info); 1919 } 1920 } 1921 1997 1922 /* 1998 1923 * Address space related syscalls. … … 2000 1925 2001 1926 /** Wrapper for as_area_create(). */ 2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsignedint flags)1927 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) 2003 1928 { 2004 1929 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 2010 1935 2011 1936 /** Wrapper for as_area_resize(). */ 2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsignedint flags)1937 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) 2013 1938 { 2014 1939 return (unative_t) as_area_resize(AS, address, size, 0); … … 2016 1941 2017 1942 /** Wrapper for as_area_change_flags(). */ 2018 unative_t sys_as_area_change_flags(uintptr_t address, unsignedint flags)1943 unative_t sys_as_area_change_flags(uintptr_t address, int flags) 2019 1944 { 2020 1945 return (unative_t) as_area_change_flags(AS, flags, address); … … 2029 1954 /** Get list of adress space areas. 2030 1955 * 2031 * @param as Address space. 2032 * @param obuf Place to save pointer to returned buffer. 2033 * @param osize Place to save size of returned buffer. 2034 * 1956 * @param as Address space. 1957 * @param obuf Place to save pointer to returned buffer. 1958 * @param osize Place to save size of returned buffer. 2035 1959 */ 2036 1960 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 2037 1961 { 1962 ipl_t ipl; 1963 size_t area_cnt, area_idx, i; 1964 link_t *cur; 1965 1966 as_area_info_t *info; 1967 size_t isize; 1968 1969 ipl = interrupts_disable(); 2038 1970 mutex_lock(&as->lock); 2039 1971 2040 1972 /* First pass, count number of areas. */ 2041 2042 size_t area_cnt = 0; 2043 link_t *cur; 2044 1973 1974 area_cnt = 0; 1975 2045 1976 for (cur = as->as_area_btree.leaf_head.next; 2046 1977 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2047 btree_node_t *node = 2048 list_get_instance(cur, btree_node_t, leaf_link); 1978 btree_node_t *node; 1979 1980 node = list_get_instance(cur, btree_node_t, leaf_link); 2049 1981 area_cnt += node->keys; 2050 1982 } 2051 2052 size_tisize = area_cnt * sizeof(as_area_info_t);2053 as_area_info_t *info = malloc(isize, 0);2054 1983 1984 isize = area_cnt * sizeof(as_area_info_t); 1985 info = malloc(isize, 0); 1986 2055 1987 /* Second pass, record data. */ 2056 2057 size_tarea_idx = 0;2058 1988 1989 area_idx = 0; 1990 2059 1991 for (cur = as->as_area_btree.leaf_head.next; 2060 1992 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2061 btree_node_t *node =2062 list_get_instance(cur, btree_node_t, leaf_link); 2063 btree_key_t i;2064 1993 btree_node_t *node; 1994 1995 node = list_get_instance(cur, btree_node_t, leaf_link); 1996 2065 1997 for (i = 0; i < node->keys; i++) { 2066 1998 as_area_t *area = node->value[i]; 2067 1999 2068 2000 ASSERT(area_idx < area_cnt); 2069 2001 mutex_lock(&area->lock); 2070 2002 2071 2003 info[area_idx].start_addr = area->base; 2072 2004 info[area_idx].size = FRAMES2SIZE(area->pages); 2073 2005 info[area_idx].flags = area->flags; 2074 2006 ++area_idx; 2075 2007 2076 2008 mutex_unlock(&area->lock); 2077 2009 } 2078 2010 } 2079 2011 2080 2012 mutex_unlock(&as->lock); 2081 2013 interrupts_restore(ipl); 2014 2082 2015 *obuf = info; 2083 2016 *osize = isize; 2084 2017 } 2085 2018 2019 2086 2020 /** Print out information about address space. 2087 2021 * 2088 * @param as Address space. 2089 * 2022 * @param as Address space. 2090 2023 */ 2091 2024 void as_print(as_t *as) 2092 2025 { 2026 ipl_t ipl; 2027 2028 ipl = interrupts_disable(); 2093 2029 mutex_lock(&as->lock); 2094 2030 … … 2097 2033 for (cur = as->as_area_btree.leaf_head.next; 2098 2034 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2099 btree_node_t *node 2100 = list_get_instance(cur, btree_node_t, leaf_link); 2101 btree_key_t i; 2035 btree_node_t *node; 2102 2036 2037 node = list_get_instance(cur, btree_node_t, leaf_link); 2038 2039 unsigned int i; 2103 2040 for (i = 0; i < node->keys; i++) { 2104 2041 as_area_t *area = node->value[i]; 2105 2042 2106 2043 mutex_lock(&area->lock); 2107 2044 printf("as_area: %p, base=%p, pages=%" PRIs … … 2113 2050 2114 2051 mutex_unlock(&as->lock); 2052 interrupts_restore(ipl); 2115 2053 } 2116 2054
Note:
See TracChangeset
for help on using the changeset viewer.