Changes in kernel/generic/src/mm/as.c [c964521:97bdb4a] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rc964521 r97bdb4a 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 * 88 89 */ 89 90 as_operations_t *as_operations = NULL; … … 91 92 /** 92 93 * Slab for as_t objects. 94 * 93 95 */ 94 96 static slab_cache_t *as_slab; … … 100 102 * - as->asid for each as of the as_t type 101 103 * - asids_allocated counter 104 * 102 105 */ 103 106 SPINLOCK_INITIALIZE(asidlock); … … 106 109 * This list contains address spaces that are not active on any 107 110 * processor and that have valid ASID. 111 * 108 112 */ 109 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 116 as_t *AS_KERNEL = NULL; 113 117 114 static int area_flags_to_page_flags(int); 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 static void sh_info_remove_reference(share_info_t *); 118 119 static int as_constructor(void *obj, int flags) 118 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 120 119 { 121 120 as_t *as = (as_t *) obj; 122 int rc; 123 121 124 122 link_initialize(&as->inactive_as_with_asid_link); 125 123 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 124 127 rc = as_constructor_arch(as, flags);125 int rc = as_constructor_arch(as, flags); 128 126 129 127 return rc; 130 128 } 131 129 132 static int as_destructor(void *obj)130 NO_TRACE static size_t as_destructor(void *obj) 133 131 { 134 132 as_t *as = (as_t *) obj; 135 136 133 return as_destructor_arch(as); 137 134 } … … 141 138 { 142 139 as_arch_init(); 143 140 144 141 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 142 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 157 154 /** Create address space. 158 155 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 156 * @param flags Flags that influence the way in wich the address 157 * space is created. 158 * 159 */ 160 as_t *as_create(unsigned int flags) 161 { 162 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 163 (void) as_create_arch(as, 0); 168 164 … … 176 172 atomic_set(&as->refcount, 0); 177 173 as->cpu_refcount = 0; 174 178 175 #ifdef AS_PAGE_TABLE 179 176 as->genarch.page_table = page_table_create(flags); … … 192 189 * We know that we don't hold any spinlock. 193 190 * 194 * @param as Address space to be destroyed. 191 * @param as Address space to be destroyed. 192 * 195 193 */ 196 194 void as_destroy(as_t *as) 197 195 { 198 ipl_t ipl;199 bool cond;200 196 DEADLOCK_PROBE_INIT(p_asidlock); 201 197 … … 214 210 * disabled to prevent nested context switches. We also depend on the 215 211 * fact that so far no spinlocks are held. 212 * 216 213 */ 217 214 preemption_disable(); 218 ipl = interrupts_read(); 215 ipl_t ipl = interrupts_read(); 216 219 217 retry: 220 218 interrupts_disable(); … … 224 222 goto retry; 225 223 } 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 224 225 /* Interrupts disabled, enable preemption */ 226 preemption_enable(); 227 228 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 228 229 if (as->cpu_refcount == 0) 229 230 list_remove(&as->inactive_as_with_asid_link); 231 230 232 asid_put(as->asid); 231 233 } 234 232 235 spinlock_unlock(&asidlock); 233 236 interrupts_restore(ipl); 237 238 234 239 /* 235 240 * Destroy address space areas of the address space. 236 241 * The B+tree must be walked carefully because it is 237 242 * also being destroyed. 238 * /239 for (cond = true; cond; ) {240 btree_node_t *node;241 243 * 244 */ 245 bool cond = true; 246 while (cond) { 242 247 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 243 node = list_get_instance(as->as_area_btree.leaf_head.next, 248 249 btree_node_t *node = 250 list_get_instance(as->as_area_btree.leaf_head.next, 244 251 btree_node_t, leaf_link); 245 246 if ((cond = node->keys)) {252 253 if ((cond = node->keys)) 247 254 as_area_destroy(as, node->key[0]); 248 } 249 } 250 255 } 256 251 257 btree_destroy(&as->as_area_btree); 258 252 259 #ifdef AS_PAGE_TABLE 253 260 page_table_destroy(as->genarch.page_table); … … 255 262 page_table_destroy(NULL); 256 263 #endif 257 258 interrupts_restore(ipl); 259 264 260 265 slab_free(as_slab, as); 261 266 } … … 266 271 * space. 267 272 * 268 * @param a Address space to be held. 269 */ 270 void as_hold(as_t *as) 273 * @param as Address space to be held. 274 * 275 */ 276 NO_TRACE void as_hold(as_t *as) 271 277 { 272 278 atomic_inc(&as->refcount); … … 278 284 * space. 279 285 * 280 * @param a Address space to be released. 281 */ 282 void as_release(as_t *as) 286 * @param asAddress space to be released. 287 * 288 */ 289 NO_TRACE void as_release(as_t *as) 283 290 { 284 291 if (atomic_predec(&as->refcount) == 0) … … 286 293 } 287 294 295 /** Check area conflicts with other areas. 296 * 297 * @param as Address space. 298 * @param va Starting virtual address of the area being tested. 299 * @param size Size of the area being tested. 300 * @param avoid_area Do not touch this area. 301 * 302 * @return True if there is no conflict, false otherwise. 303 * 304 */ 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 308 ASSERT(mutex_locked(&as->lock)); 309 310 /* 311 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, NULL, PAGE_SIZE)) 315 return false; 316 317 /* 318 * The leaf node is found in O(log n), where n is proportional to 319 * the number of address space areas belonging to as. 320 * The check for conflicts is then attempted on the rightmost 321 * record in the left neighbour, the leftmost record in the right 322 * neighbour and all records in the leaf node itself. 323 * 324 */ 325 btree_node_t *leaf; 326 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 328 if (area) { 329 if (area != avoid_area) 330 return false; 331 } 332 333 /* First, check the two border cases. */ 334 btree_node_t *node = 335 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 336 if (node) { 337 area = (as_area_t *) node->value[node->keys - 1]; 338 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 342 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 347 } 348 349 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 350 if (node) { 351 area = (as_area_t *) node->value[0]; 352 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 356 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 361 } 362 363 /* Second, check the leaf node. */ 364 btree_key_t i; 365 for (i = 0; i < leaf->keys; i++) { 366 area = (as_area_t *) leaf->value[i]; 367 368 if (area == avoid_area) 369 continue; 370 371 mutex_lock(&area->lock); 372 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 374 mutex_unlock(&area->lock); 375 return false; 376 } 377 378 mutex_unlock(&area->lock); 379 } 380 381 /* 382 * So far, the area does not conflict with other areas. 383 * Check if it doesn't conflict with kernel address space. 384 * 385 */ 386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps(va, size, 388 KERNEL_ADDRESS_SPACE_START, 389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 390 } 391 392 return true; 393 } 394 288 395 /** Create address space area of common attributes. 289 396 * 290 397 * The created address space area is added to the target address space. 291 398 * 292 * @param as Target address space. 293 * @param flags Flags of the area memory. 294 * @param size Size of area. 295 * @param base Base address of area. 296 * @param attrs Attributes of the area. 297 * @param backend Address space area backend. NULL if no backend is used. 298 * @param backend_data NULL or a pointer to an array holding two void *. 299 * 300 * @return Address space area on success or NULL on failure. 301 */ 302 as_area_t * 303 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 304 mem_backend_t *backend, mem_backend_data_t *backend_data) 305 { 306 ipl_t ipl; 307 as_area_t *a; 308 399 * @param as Target address space. 400 * @param flags Flags of the area memory. 401 * @param size Size of area. 402 * @param base Base address of area. 403 * @param attrs Attributes of the area. 404 * @param backend Address space area backend. NULL if no backend is used. 405 * @param backend_data NULL or a pointer to an array holding two void *. 406 * 407 * @return Address space area on success or NULL on failure. 408 * 409 */ 410 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 411 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 412 mem_backend_data_t *backend_data) 413 { 309 414 if (base % PAGE_SIZE) 310 415 return NULL; 311 416 312 417 if (!size) 313 418 return NULL; 314 419 315 420 /* Writeable executable areas are not supported. */ 316 421 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 317 422 return NULL; 318 423 319 ipl = interrupts_disable();320 424 mutex_lock(&as->lock); 321 425 322 426 if (!check_area_conflicts(as, base, size, NULL)) { 323 427 mutex_unlock(&as->lock); 324 interrupts_restore(ipl);325 428 return NULL; 326 429 } 327 430 328 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 329 330 mutex_initialize(&a->lock, MUTEX_PASSIVE); 331 332 a->as = as; 333 a->flags = flags; 334 a->attributes = attrs; 335 a->pages = SIZE2FRAMES(size); 336 a->base = base; 337 a->sh_info = NULL; 338 a->backend = backend; 431 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 432 433 mutex_initialize(&area->lock, MUTEX_PASSIVE); 434 435 area->as = as; 436 area->flags = flags; 437 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 439 area->base = base; 440 area->sh_info = NULL; 441 area->backend = backend; 442 339 443 if (backend_data) 340 a ->backend_data = *backend_data;444 area->backend_data = *backend_data; 341 445 else 342 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 343 344 btree_create(&a->used_space); 345 346 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 347 446 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 447 448 btree_create(&area->used_space); 449 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 450 348 451 mutex_unlock(&as->lock); 349 interrupts_restore(ipl); 350 351 return a; 452 453 return area; 454 } 455 456 /** Find address space area and lock it. 457 * 458 * @param as Address space. 459 * @param va Virtual address. 460 * 461 * @return Locked address space area containing va on success or 462 * NULL on failure. 463 * 464 */ 465 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 466 { 467 ASSERT(mutex_locked(&as->lock)); 468 469 btree_node_t *leaf; 470 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 471 if (area) { 472 /* va is the base address of an address space area */ 473 mutex_lock(&area->lock); 474 return area; 475 } 476 477 /* 478 * Search the leaf node and the righmost record of its left neighbour 479 * to find out whether this is a miss or va belongs to an address 480 * space area found there. 481 * 482 */ 483 484 /* First, search the leaf node itself. */ 485 btree_key_t i; 486 487 for (i = 0; i < leaf->keys; i++) { 488 area = (as_area_t *) leaf->value[i]; 489 490 mutex_lock(&area->lock); 491 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 return area; 494 495 mutex_unlock(&area->lock); 496 } 497 498 /* 499 * Second, locate the left neighbour and test its last record. 500 * Because of its position in the B+tree, it must have base < va. 501 * 502 */ 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 504 if (lnode) { 505 area = (as_area_t *) lnode->value[lnode->keys - 1]; 506 507 mutex_lock(&area->lock); 508 509 if (va < area->base + area->pages * PAGE_SIZE) 510 return area; 511 512 mutex_unlock(&area->lock); 513 } 514 515 return NULL; 352 516 } 353 517 354 518 /** Find address space area and change it. 355 519 * 356 * @param as Address space. 357 * @param address Virtual address belonging to the area to be changed. 358 * Must be page-aligned. 359 * @param size New size of the virtual memory block starting at 360 * address. 361 * @param flags Flags influencing the remap operation. Currently unused. 362 * 363 * @return Zero on success or a value from @ref errno.h otherwise. 364 */ 365 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 366 { 367 as_area_t *area; 368 ipl_t ipl; 369 size_t pages; 370 371 ipl = interrupts_disable(); 520 * @param as Address space. 521 * @param address Virtual address belonging to the area to be changed. 522 * Must be page-aligned. 523 * @param size New size of the virtual memory block starting at 524 * address. 525 * @param flags Flags influencing the remap operation. Currently unused. 526 * 527 * @return Zero on success or a value from @ref errno.h otherwise. 528 * 529 */ 530 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 531 { 372 532 mutex_lock(&as->lock); 373 533 374 534 /* 375 535 * Locate the area. 376 */ 377 area = find_area_and_lock(as, address); 536 * 537 */ 538 as_area_t *area = find_area_and_lock(as, address); 378 539 if (!area) { 379 540 mutex_unlock(&as->lock); 380 interrupts_restore(ipl);381 541 return ENOENT; 382 542 } 383 543 384 544 if (area->backend == &phys_backend) { 385 545 /* 386 546 * Remapping of address space areas associated 387 547 * with memory mapped devices is not supported. 548 * 388 549 */ 389 550 mutex_unlock(&area->lock); 390 551 mutex_unlock(&as->lock); 391 interrupts_restore(ipl);392 552 return ENOTSUP; 393 553 } 554 394 555 if (area->sh_info) { 395 556 /* 396 * Remapping of shared address space areas 557 * Remapping of shared address space areas 397 558 * is not supported. 559 * 398 560 */ 399 561 mutex_unlock(&area->lock); 400 562 mutex_unlock(&as->lock); 401 interrupts_restore(ipl);402 563 return ENOTSUP; 403 564 } 404 405 pages = SIZE2FRAMES((address - area->base) + size);565 566 size_t pages = SIZE2FRAMES((address - area->base) + size); 406 567 if (!pages) { 407 568 /* 408 569 * Zero size address space areas are not allowed. 570 * 409 571 */ 410 572 mutex_unlock(&area->lock); 411 573 mutex_unlock(&as->lock); 412 interrupts_restore(ipl);413 574 return EPERM; 414 575 } 415 576 416 577 if (pages < area->pages) { 417 bool cond;418 578 uintptr_t start_free = area->base + pages * PAGE_SIZE; 419 579 420 580 /* 421 581 * Shrinking the area. 422 582 * No need to check for overlaps. 423 */ 424 583 * 584 */ 585 425 586 page_table_lock(as, false); 426 587 427 588 /* 428 589 * Start TLB shootdown sequence. 429 */ 430 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 431 pages * PAGE_SIZE, area->pages - pages); 432 590 * 591 */ 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages); 594 433 595 /* 434 596 * Remove frames belonging to used space starting from … … 437 599 * is also the right way to remove part of the used_space 438 600 * B+tree leaf list. 439 * /440 for (cond = true; cond;) {441 btree_node_t *node;442 601 * 602 */ 603 bool cond = true; 604 while (cond) { 443 605 ASSERT(!list_empty(&area->used_space.leaf_head)); 444 node = 606 607 btree_node_t *node = 445 608 list_get_instance(area->used_space.leaf_head.prev, 446 609 btree_node_t, leaf_link); 610 447 611 if ((cond = (bool) node->keys)) { 448 uintptr_t b= node->key[node->keys - 1];449 size_t c=612 uintptr_t ptr = node->key[node->keys - 1]; 613 size_t size = 450 614 (size_t) node->value[node->keys - 1]; 451 unsigned int i = 0;452 453 if (overlaps( b, c* PAGE_SIZE, area->base,615 size_t i = 0; 616 617 if (overlaps(ptr, size * PAGE_SIZE, area->base, 454 618 pages * PAGE_SIZE)) { 455 619 456 if ( b + c* PAGE_SIZE <= start_free) {620 if (ptr + size * PAGE_SIZE <= start_free) { 457 621 /* 458 622 * The whole interval fits 459 623 * completely in the resized 460 624 * address space area. 625 * 461 626 */ 462 627 break; 463 628 } 464 629 465 630 /* 466 631 * Part of the interval corresponding 467 632 * to b and c overlaps with the resized 468 633 * address space area. 634 * 469 635 */ 470 471 cond = false; /* we are almost done */ 472 i = (start_free - b) >> PAGE_WIDTH; 636 637 /* We are almost done */ 638 cond = false; 639 i = (start_free - ptr) >> PAGE_WIDTH; 473 640 if (!used_space_remove(area, start_free, 474 c - i)) 475 panic("Cannot remove used " 476 "space."); 641 size - i)) 642 panic("Cannot remove used space."); 477 643 } else { 478 644 /* … … 480 646 * completely removed. 481 647 */ 482 if (!used_space_remove(area, b, c)) 483 panic("Cannot remove used " 484 "space."); 648 if (!used_space_remove(area, ptr, size)) 649 panic("Cannot remove used space."); 485 650 } 486 487 for (; i < c; i++) { 488 pte_t *pte; 489 490 pte = page_mapping_find(as, b + 651 652 for (; i < size; i++) { 653 pte_t *pte = page_mapping_find(as, ptr + 491 654 i * PAGE_SIZE); 492 ASSERT(pte && PTE_VALID(pte) && 493 PTE_PRESENT(pte)); 494 if (area->backend && 495 area->backend->frame_free) { 655 656 ASSERT(pte); 657 ASSERT(PTE_VALID(pte)); 658 ASSERT(PTE_PRESENT(pte)); 659 660 if ((area->backend) && 661 (area->backend->frame_free)) { 496 662 area->backend->frame_free(area, 497 b+ i * PAGE_SIZE,663 ptr + i * PAGE_SIZE, 498 664 PTE_GET_FRAME(pte)); 499 665 } 500 page_mapping_remove(as, b + 666 667 page_mapping_remove(as, ptr + 501 668 i * PAGE_SIZE); 502 669 } 503 670 } 504 671 } 505 672 506 673 /* 507 674 * Finish TLB shootdown sequence. 508 */ 509 675 * 676 */ 677 510 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 511 679 area->pages - pages); 512 680 513 681 /* 514 682 * Invalidate software translation caches (e.g. TSB on sparc64). 683 * 515 684 */ 516 685 as_invalidate_translation_cache(as, area->base + 517 686 pages * PAGE_SIZE, area->pages - pages); 518 tlb_shootdown_finalize( );519 687 tlb_shootdown_finalize(ipl); 688 520 689 page_table_unlock(as, false); 521 522 690 } else { 523 691 /* 524 692 * Growing the area. 525 693 * Check for overlaps with other address space areas. 694 * 526 695 */ 527 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 528 697 area)) { 529 698 mutex_unlock(&area->lock); 530 mutex_unlock(&as->lock); 531 interrupts_restore(ipl); 699 mutex_unlock(&as->lock); 532 700 return EADDRNOTAVAIL; 533 701 } 534 } 535 702 } 703 536 704 area->pages = pages; 537 705 538 706 mutex_unlock(&area->lock); 539 707 mutex_unlock(&as->lock); 540 interrupts_restore(ipl); 541 708 542 709 return 0; 543 710 } 544 711 712 /** Remove reference to address space area share info. 713 * 714 * If the reference count drops to 0, the sh_info is deallocated. 715 * 716 * @param sh_info Pointer to address space area share info. 717 * 718 */ 719 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 720 { 721 bool dealloc = false; 722 723 mutex_lock(&sh_info->lock); 724 ASSERT(sh_info->refcount); 725 726 if (--sh_info->refcount == 0) { 727 dealloc = true; 728 link_t *cur; 729 730 /* 731 * Now walk carefully the pagemap B+tree and free/remove 732 * reference from all frames found there. 733 */ 734 for (cur = sh_info->pagemap.leaf_head.next; 735 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 736 btree_node_t *node 737 = list_get_instance(cur, btree_node_t, leaf_link); 738 btree_key_t i; 739 740 for (i = 0; i < node->keys; i++) 741 frame_free((uintptr_t) node->value[i]); 742 } 743 744 } 745 mutex_unlock(&sh_info->lock); 746 747 if (dealloc) { 748 btree_destroy(&sh_info->pagemap); 749 free(sh_info); 750 } 751 } 752 545 753 /** Destroy address space area. 546 754 * 547 * @param as Address space. 548 * @param address Address within the area to be deleted. 549 * 550 * @return Zero on success or a value from @ref errno.h on failure. 755 * @param as Address space. 756 * @param address Address within the area to be deleted. 757 * 758 * @return Zero on success or a value from @ref errno.h on failure. 759 * 551 760 */ 552 761 int as_area_destroy(as_t *as, uintptr_t address) 553 762 { 554 as_area_t *area;555 uintptr_t base;556 link_t *cur;557 ipl_t ipl;558 559 ipl = interrupts_disable();560 763 mutex_lock(&as->lock); 561 562 a rea = find_area_and_lock(as, address);764 765 as_area_t *area = find_area_and_lock(as, address); 563 766 if (!area) { 564 767 mutex_unlock(&as->lock); 565 interrupts_restore(ipl);566 768 return ENOENT; 567 769 } 568 569 base = area->base;570 770 771 uintptr_t base = area->base; 772 571 773 page_table_lock(as, false); 572 774 573 775 /* 574 776 * Start TLB shootdown sequence. 575 777 */ 576 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 577 778 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 779 area->pages); 780 578 781 /* 579 782 * Visit only the pages mapped by used_space B+tree. 580 783 */ 784 link_t *cur; 581 785 for (cur = area->used_space.leaf_head.next; 582 786 cur != &area->used_space.leaf_head; cur = cur->next) { 583 787 btree_node_t *node; 584 unsigned int i;788 btree_key_t i; 585 789 586 790 node = list_get_instance(cur, btree_node_t, leaf_link); 587 791 for (i = 0; i < node->keys; i++) { 588 uintptr_t b = node->key[i]; 589 size_t j; 590 pte_t *pte; 792 uintptr_t ptr = node->key[i]; 793 size_t size; 591 794 592 for (j = 0; j < (size_t) node->value[i]; j++) { 593 pte = page_mapping_find(as, b + j * PAGE_SIZE); 594 ASSERT(pte && PTE_VALID(pte) && 595 PTE_PRESENT(pte)); 596 if (area->backend && 597 area->backend->frame_free) { 598 area->backend->frame_free(area, b + 599 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 795 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 797 798 ASSERT(pte); 799 ASSERT(PTE_VALID(pte)); 800 ASSERT(PTE_PRESENT(pte)); 801 802 if ((area->backend) && 803 (area->backend->frame_free)) { 804 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 600 806 } 601 page_mapping_remove(as, b + j * PAGE_SIZE); 807 808 page_mapping_remove(as, ptr + size * PAGE_SIZE); 602 809 } 603 810 } 604 811 } 605 812 606 813 /* 607 814 * Finish TLB shootdown sequence. 608 */ 609 815 * 816 */ 817 610 818 tlb_invalidate_pages(as->asid, area->base, area->pages); 611 819 612 820 /* 613 821 * Invalidate potential software translation caches (e.g. TSB on 614 822 * sparc64). 823 * 615 824 */ 616 825 as_invalidate_translation_cache(as, area->base, area->pages); 617 tlb_shootdown_finalize( );618 826 tlb_shootdown_finalize(ipl); 827 619 828 page_table_unlock(as, false); 620 829 621 830 btree_destroy(&area->used_space); 622 831 623 832 area->attributes |= AS_AREA_ATTR_PARTIAL; 624 833 625 834 if (area->sh_info) 626 835 sh_info_remove_reference(area->sh_info); 627 836 628 837 mutex_unlock(&area->lock); 629 838 630 839 /* 631 840 * Remove the empty area from address space. 841 * 632 842 */ 633 843 btree_remove(&as->as_area_btree, base, NULL); … … 636 846 637 847 mutex_unlock(&as->lock); 638 interrupts_restore(ipl);639 848 return 0; 640 849 } … … 647 856 * sh_info of the source area. The process of duplicating the 648 857 * mapping is done through the backend share function. 649 * 650 * @param src_as 651 * @param src_base 652 * @param acc_size 653 * @param dst_as 654 * @param dst_base 858 * 859 * @param src_as Pointer to source address space. 860 * @param src_base Base address of the source address space area. 861 * @param acc_size Expected size of the source area. 862 * @param dst_as Pointer to destination address space. 863 * @param dst_base Target base address. 655 864 * @param dst_flags_mask Destination address space area flags mask. 656 865 * 657 * @return Zero on success or ENOENT if there is no such task or if 658 * there is no such address space area, EPERM if there was 659 * a problem in accepting the area or ENOMEM if there was a 660 * problem in allocating destination address space area. 661 * ENOTSUP is returned if the address space area backend 662 * does not support sharing. 866 * @return Zero on success. 867 * @return ENOENT if there is no such task or such address space. 868 * @return EPERM if there was a problem in accepting the area. 869 * @return ENOMEM if there was a problem in allocating destination 870 * address space area. 871 * @return ENOTSUP if the address space area backend does not support 872 * sharing. 873 * 663 874 */ 664 875 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 665 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 666 { 667 ipl_t ipl; 668 int src_flags; 669 size_t src_size; 670 as_area_t *src_area, *dst_area; 671 share_info_t *sh_info; 672 mem_backend_t *src_backend; 673 mem_backend_data_t src_backend_data; 674 675 ipl = interrupts_disable(); 876 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 877 { 676 878 mutex_lock(&src_as->lock); 677 src_area = find_area_and_lock(src_as, src_base);879 as_area_t *src_area = find_area_and_lock(src_as, src_base); 678 880 if (!src_area) { 679 881 /* 680 882 * Could not find the source address space area. 883 * 681 884 */ 682 885 mutex_unlock(&src_as->lock); 683 interrupts_restore(ipl);684 886 return ENOENT; 685 887 } 686 687 if ( !src_area->backend || !src_area->backend->share) {888 889 if ((!src_area->backend) || (!src_area->backend->share)) { 688 890 /* 689 891 * There is no backend or the backend does not 690 892 * know how to share the area. 893 * 691 894 */ 692 895 mutex_unlock(&src_area->lock); 693 896 mutex_unlock(&src_as->lock); 694 interrupts_restore(ipl);695 897 return ENOTSUP; 696 898 } 697 899 698 s rc_size = src_area->pages * PAGE_SIZE;699 src_flags = src_area->flags;700 src_backend = src_area->backend;701 src_backend_data = src_area->backend_data;702 900 size_t src_size = src_area->pages * PAGE_SIZE; 901 unsigned int src_flags = src_area->flags; 902 mem_backend_t *src_backend = src_area->backend; 903 mem_backend_data_t src_backend_data = src_area->backend_data; 904 703 905 /* Share the cacheable flag from the original mapping */ 704 906 if (src_flags & AS_AREA_CACHEABLE) 705 907 dst_flags_mask |= AS_AREA_CACHEABLE; 706 707 if ( src_size != acc_size||708 ( src_flags & dst_flags_mask) != dst_flags_mask) {908 909 if ((src_size != acc_size) || 910 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 709 911 mutex_unlock(&src_area->lock); 710 912 mutex_unlock(&src_as->lock); 711 interrupts_restore(ipl);712 913 return EPERM; 713 914 } 714 915 715 916 /* 716 917 * Now we are committed to sharing the area. 717 918 * First, prepare the area for sharing. 718 919 * Then it will be safe to unlock it. 719 */ 720 sh_info = src_area->sh_info; 920 * 921 */ 922 share_info_t *sh_info = src_area->sh_info; 721 923 if (!sh_info) { 722 924 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 725 927 btree_create(&sh_info->pagemap); 726 928 src_area->sh_info = sh_info; 929 727 930 /* 728 931 * Call the backend to setup sharing. 932 * 729 933 */ 730 934 src_area->backend->share(src_area); … … 734 938 mutex_unlock(&sh_info->lock); 735 939 } 736 940 737 941 mutex_unlock(&src_area->lock); 738 942 mutex_unlock(&src_as->lock); 739 943 740 944 /* 741 945 * Create copy of the source address space area. … … 745 949 * The flags of the source area are masked against dst_flags_mask 746 950 * to support sharing in less privileged mode. 747 */ 748 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 749 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 951 * 952 */ 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 954 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 750 955 if (!dst_area) { 751 956 /* … … 754 959 sh_info_remove_reference(sh_info); 755 960 756 interrupts_restore(ipl);757 961 return ENOMEM; 758 962 } 759 963 760 964 /* 761 965 * Now the destination address space area has been 762 966 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 763 967 * attribute and set the sh_info. 764 */ 765 mutex_lock(&dst_as->lock); 968 * 969 */ 970 mutex_lock(&dst_as->lock); 766 971 mutex_lock(&dst_area->lock); 767 972 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 768 973 dst_area->sh_info = sh_info; 769 974 mutex_unlock(&dst_area->lock); 770 mutex_unlock(&dst_as->lock); 771 772 interrupts_restore(ipl); 975 mutex_unlock(&dst_as->lock); 773 976 774 977 return 0; … … 777 980 /** Check access mode for address space area. 778 981 * 779 * The address space area must be locked prior to this call. 780 * 781 * @param area Address space area. 782 * @param access Access mode. 783 * 784 * @return False if access violates area's permissions, true 785 * otherwise. 786 */ 787 bool as_area_check_access(as_area_t *area, pf_access_t access) 982 * @param area Address space area. 983 * @param access Access mode. 984 * 985 * @return False if access violates area's permissions, true 986 * otherwise. 987 * 988 */ 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 788 990 { 789 991 int flagmap[] = { … … 793 995 }; 794 996 997 ASSERT(mutex_locked(&area->lock)); 998 795 999 if (!(area->flags & flagmap[access])) 796 1000 return false; 797 1001 798 1002 return true; 1003 } 1004 1005 /** Convert address space area flags to page flags. 1006 * 1007 * @param aflags Flags of some address space area. 1008 * 1009 * @return Flags to be passed to page_mapping_insert(). 1010 * 1011 */ 1012 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags) 1013 { 1014 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1015 1016 if (aflags & AS_AREA_READ) 1017 flags |= PAGE_READ; 1018 1019 if (aflags & AS_AREA_WRITE) 1020 flags |= PAGE_WRITE; 1021 1022 if (aflags & AS_AREA_EXEC) 1023 flags |= PAGE_EXEC; 1024 1025 if (aflags & AS_AREA_CACHEABLE) 1026 flags |= PAGE_CACHEABLE; 1027 1028 return flags; 799 1029 } 800 1030 … … 813 1043 * 814 1044 */ 815 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 816 { 817 as_area_t *area; 818 link_t *cur; 819 ipl_t ipl; 820 int page_flags; 821 uintptr_t *old_frame; 822 size_t frame_idx; 823 size_t used_pages; 824 1045 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 1046 { 825 1047 /* Flags for the new memory mapping */ 826 page_flags = area_flags_to_page_flags(flags); 827 828 ipl = interrupts_disable(); 1048 unsigned int page_flags = area_flags_to_page_flags(flags); 1049 829 1050 mutex_lock(&as->lock); 830 831 a rea = find_area_and_lock(as, address);1051 1052 as_area_t *area = find_area_and_lock(as, address); 832 1053 if (!area) { 833 1054 mutex_unlock(&as->lock); 834 interrupts_restore(ipl);835 1055 return ENOENT; 836 1056 } 837 1057 838 1058 if ((area->sh_info) || (area->backend != &anon_backend)) { 839 1059 /* Copying shared areas not supported yet */ … … 841 1061 mutex_unlock(&area->lock); 842 1062 mutex_unlock(&as->lock); 843 interrupts_restore(ipl);844 1063 return ENOTSUP; 845 1064 } 846 1065 847 1066 /* 848 1067 * Compute total number of used pages in the used_space B+tree 849 */ 850 used_pages = 0; 851 1068 * 1069 */ 1070 size_t used_pages = 0; 1071 link_t *cur; 1072 852 1073 for (cur = area->used_space.leaf_head.next; 853 1074 cur != &area->used_space.leaf_head; cur = cur->next) { 854 btree_node_t *node ;855 unsigned int i;856 857 node = list_get_instance(cur, btree_node_t, leaf_link);858 for (i = 0; i < node->keys; i++) {1075 btree_node_t *node 1076 = list_get_instance(cur, btree_node_t, leaf_link); 1077 btree_key_t i; 1078 1079 for (i = 0; i < node->keys; i++) 859 1080 used_pages += (size_t) node->value[i]; 860 } 861 } 862 1081 } 1082 863 1083 /* An array for storing frame numbers */ 864 old_frame = malloc(used_pages * sizeof(uintptr_t), 0);865 1084 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1085 866 1086 page_table_lock(as, false); 867 1087 868 1088 /* 869 1089 * Start TLB shootdown sequence. 870 */ 871 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 872 1090 * 1091 */ 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1093 area->pages); 1094 873 1095 /* 874 1096 * Remove used pages from page tables and remember their frame 875 1097 * numbers. 876 */ 877 frame_idx = 0; 878 1098 * 1099 */ 1100 size_t frame_idx = 0; 1101 879 1102 for (cur = area->used_space.leaf_head.next; 880 1103 cur != &area->used_space.leaf_head; cur = cur->next) { 881 btree_node_t *node ;882 unsigned int i;883 884 node = list_get_instance(cur, btree_node_t, leaf_link);1104 btree_node_t *node 1105 = list_get_instance(cur, btree_node_t, leaf_link); 1106 btree_key_t i; 1107 885 1108 for (i = 0; i < node->keys; i++) { 886 uintptr_t b = node->key[i]; 887 size_t j; 888 pte_t *pte; 1109 uintptr_t ptr = node->key[i]; 1110 size_t size; 889 1111 890 for (j = 0; j < (size_t) node->value[i]; j++) { 891 pte = page_mapping_find(as, b + j * PAGE_SIZE); 892 ASSERT(pte && PTE_VALID(pte) && 893 PTE_PRESENT(pte)); 1112 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1114 1115 ASSERT(pte); 1116 ASSERT(PTE_VALID(pte)); 1117 ASSERT(PTE_PRESENT(pte)); 1118 894 1119 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 895 1120 896 1121 /* Remove old mapping */ 897 page_mapping_remove(as, b + j* PAGE_SIZE);1122 page_mapping_remove(as, ptr + size * PAGE_SIZE); 898 1123 } 899 1124 } 900 1125 } 901 1126 902 1127 /* 903 1128 * Finish TLB shootdown sequence. 904 */ 905 1129 * 1130 */ 1131 906 1132 tlb_invalidate_pages(as->asid, area->base, area->pages); 907 1133 … … 909 1135 * Invalidate potential software translation caches (e.g. TSB on 910 1136 * sparc64). 1137 * 911 1138 */ 912 1139 as_invalidate_translation_cache(as, area->base, area->pages); 913 tlb_shootdown_finalize( );914 1140 tlb_shootdown_finalize(ipl); 1141 915 1142 page_table_unlock(as, false); 916 1143 917 1144 /* 918 1145 * Set the new flags. 919 1146 */ 920 1147 area->flags = flags; 921 1148 922 1149 /* 923 1150 * Map pages back in with new flags. This step is kept separate … … 926 1153 */ 927 1154 frame_idx = 0; 928 1155 929 1156 for (cur = area->used_space.leaf_head.next; 930 1157 cur != &area->used_space.leaf_head; cur = cur->next) { 931 btree_node_t *node ;932 unsigned int i;933 934 node = list_get_instance(cur, btree_node_t, leaf_link);1158 btree_node_t *node 1159 = list_get_instance(cur, btree_node_t, leaf_link); 1160 btree_key_t i; 1161 935 1162 for (i = 0; i < node->keys; i++) { 936 uintptr_t b= node->key[i];937 size_t j;1163 uintptr_t ptr = node->key[i]; 1164 size_t size; 938 1165 939 for ( j = 0; j < (size_t) node->value[i]; j++) {1166 for (size = 0; size < (size_t) node->value[i]; size++) { 940 1167 page_table_lock(as, false); 941 1168 942 1169 /* Insert the new mapping */ 943 page_mapping_insert(as, b + j* PAGE_SIZE,1170 page_mapping_insert(as, ptr + size * PAGE_SIZE, 944 1171 old_frame[frame_idx++], page_flags); 945 1172 946 1173 page_table_unlock(as, false); 947 1174 } 948 1175 } 949 1176 } 950 1177 951 1178 free(old_frame); 952 1179 953 1180 mutex_unlock(&area->lock); 954 1181 mutex_unlock(&as->lock); 955 interrupts_restore(ipl); 956 1182 957 1183 return 0; 958 1184 } 959 960 1185 961 1186 /** Handle page fault within the current address space. … … 967 1192 * Interrupts are assumed disabled. 968 1193 * 969 * @param page Faulting page. 970 * @param access Access mode that caused the page fault (i.e. 971 * read/write/exec). 972 * @param istate Pointer to the interrupted state. 973 * 974 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 975 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 976 * or copy_from_uspace(). 1194 * @param page Faulting page. 1195 * @param access Access mode that caused the page fault (i.e. 1196 * read/write/exec). 1197 * @param istate Pointer to the interrupted state. 1198 * 1199 * @return AS_PF_FAULT on page fault. 1200 * @return AS_PF_OK on success. 1201 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1202 * or copy_from_uspace(). 1203 * 977 1204 */ 978 1205 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 979 1206 { 980 pte_t *pte;981 as_area_t *area;982 983 1207 if (!THREAD) 984 1208 return AS_PF_FAULT; … … 988 1212 989 1213 mutex_lock(&AS->lock); 990 a rea = find_area_and_lock(AS, page);1214 as_area_t *area = find_area_and_lock(AS, page); 991 1215 if (!area) { 992 1216 /* 993 1217 * No area contained mapping for 'page'. 994 1218 * Signal page fault to low-level handler. 1219 * 995 1220 */ 996 1221 mutex_unlock(&AS->lock); 997 1222 goto page_fault; 998 1223 } 999 1224 1000 1225 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1001 1226 /* … … 1005 1230 mutex_unlock(&area->lock); 1006 1231 mutex_unlock(&AS->lock); 1007 goto page_fault; 1008 } 1009 1010 if ( !area->backend || !area->backend->page_fault) {1232 goto page_fault; 1233 } 1234 1235 if ((!area->backend) || (!area->backend->page_fault)) { 1011 1236 /* 1012 1237 * The address space area is not backed by any backend 1013 1238 * or the backend cannot handle page faults. 1239 * 1014 1240 */ 1015 1241 mutex_unlock(&area->lock); 1016 1242 mutex_unlock(&AS->lock); 1017 goto page_fault; 1018 } 1019 1243 goto page_fault; 1244 } 1245 1020 1246 page_table_lock(AS, false); 1021 1247 … … 1023 1249 * To avoid race condition between two page faults on the same address, 1024 1250 * we need to make sure the mapping has not been already inserted. 1025 */ 1251 * 1252 */ 1253 pte_t *pte; 1026 1254 if ((pte = page_mapping_find(AS, page))) { 1027 1255 if (PTE_PRESENT(pte)) { … … 1039 1267 /* 1040 1268 * Resort to the backend page fault handler. 1269 * 1041 1270 */ 1042 1271 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1051 1280 mutex_unlock(&AS->lock); 1052 1281 return AS_PF_OK; 1053 1282 1054 1283 page_fault: 1055 1284 if (THREAD->in_copy_from_uspace) { … … 1064 1293 return AS_PF_FAULT; 1065 1294 } 1066 1295 1067 1296 return AS_PF_DEFER; 1068 1297 } … … 1076 1305 * When this function is enetered, no spinlocks may be held. 1077 1306 * 1078 * @param old Old address space or NULL. 1079 * @param new New address space. 1307 * @param old Old address space or NULL. 1308 * @param new New address space. 1309 * 1080 1310 */ 1081 1311 void as_switch(as_t *old_as, as_t *new_as) … … 1083 1313 DEADLOCK_PROBE_INIT(p_asidlock); 1084 1314 preemption_disable(); 1315 1085 1316 retry: 1086 1317 (void) interrupts_disable(); 1087 1318 if (!spinlock_trylock(&asidlock)) { 1088 /* 1319 /* 1089 1320 * Avoid deadlock with TLB shootdown. 1090 1321 * We can enable interrupts here because 1091 1322 * preemption is disabled. We should not be 1092 1323 * holding any other lock. 1324 * 1093 1325 */ 1094 1326 (void) interrupts_enable(); … … 1097 1329 } 1098 1330 preemption_enable(); 1099 1331 1100 1332 /* 1101 1333 * First, take care of the old address space. 1102 */ 1334 */ 1103 1335 if (old_as) { 1104 1336 ASSERT(old_as->cpu_refcount); 1105 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1337 1338 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1106 1339 /* 1107 1340 * The old address space is no longer active on … … 1109 1342 * list of inactive address spaces with assigned 1110 1343 * ASID. 1344 * 1111 1345 */ 1112 1346 ASSERT(old_as->asid != ASID_INVALID); 1347 1113 1348 list_append(&old_as->inactive_as_with_asid_link, 1114 1349 &inactive_as_with_asid_head); 1115 1350 } 1116 1351 1117 1352 /* 1118 1353 * Perform architecture-specific tasks when the address space 1119 1354 * is being removed from the CPU. 1355 * 1120 1356 */ 1121 1357 as_deinstall_arch(old_as); 1122 1358 } 1123 1359 1124 1360 /* 1125 1361 * Second, prepare the new address space. 1362 * 1126 1363 */ 1127 1364 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1131 1368 new_as->asid = asid_get(); 1132 1369 } 1370 1133 1371 #ifdef AS_PAGE_TABLE 1134 1372 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1138 1376 * Perform architecture-specific steps. 1139 1377 * (e.g. write ASID to hardware register etc.) 1378 * 1140 1379 */ 1141 1380 as_install_arch(new_as); 1142 1381 1143 1382 spinlock_unlock(&asidlock); 1144 1383 … … 1146 1385 } 1147 1386 1148 /** Convert address space area flags to page flags.1149 *1150 * @param aflags Flags of some address space area.1151 *1152 * @return Flags to be passed to page_mapping_insert().1153 */1154 int area_flags_to_page_flags(int aflags)1155 {1156 int flags;1157 1158 flags = PAGE_USER | PAGE_PRESENT;1159 1160 if (aflags & AS_AREA_READ)1161 flags |= PAGE_READ;1162 1163 if (aflags & AS_AREA_WRITE)1164 flags |= PAGE_WRITE;1165 1166 if (aflags & AS_AREA_EXEC)1167 flags |= PAGE_EXEC;1168 1169 if (aflags & AS_AREA_CACHEABLE)1170 flags |= PAGE_CACHEABLE;1171 1172 return flags;1173 }1174 1175 1387 /** Compute flags for virtual address translation subsytem. 1176 1388 * 1177 * The address space area must be locked.1178 * Interrupts must be disabled.1179 * 1180 * @param a Address space area.1181 * 1182 * @return Flags to be used in page_mapping_insert(). 1183 */ 1184 int as_area_get_flags(as_area_t *a) 1185 { 1186 return area_flags_to_page_flags(a ->flags);1389 * @param area Address space area. 1390 * 1391 * @return Flags to be used in page_mapping_insert(). 1392 * 1393 */ 1394 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1395 { 1396 ASSERT(mutex_locked(&area->lock)); 1397 1398 return area_flags_to_page_flags(area->flags); 1187 1399 } 1188 1400 … … 1192 1404 * table. 1193 1405 * 1194 * @param flags Flags saying whether the page table is for the kernel 1195 * address space. 1196 * 1197 * @return First entry of the page table. 1198 */ 1199 pte_t *page_table_create(int flags) 1406 * @param flags Flags saying whether the page table is for the kernel 1407 * address space. 1408 * 1409 * @return First entry of the page table. 1410 * 1411 */ 1412 NO_TRACE pte_t *page_table_create(unsigned int flags) 1200 1413 { 1201 1414 ASSERT(as_operations); … … 1209 1422 * Destroy page table in architecture specific way. 1210 1423 * 1211 * @param page_table Physical address of PTL0. 1212 */ 1213 void page_table_destroy(pte_t *page_table) 1424 * @param page_table Physical address of PTL0. 1425 * 1426 */ 1427 NO_TRACE void page_table_destroy(pte_t *page_table) 1214 1428 { 1215 1429 ASSERT(as_operations); … … 1223 1437 * This function should be called before any page_mapping_insert(), 1224 1438 * page_mapping_remove() and page_mapping_find(). 1225 * 1439 * 1226 1440 * Locking order is such that address space areas must be locked 1227 1441 * prior to this call. Address space can be locked prior to this 1228 1442 * call in which case the lock argument is false. 1229 1443 * 1230 * @param as Address space. 1231 * @param lock If false, do not attempt to lock as->lock. 1232 */ 1233 void page_table_lock(as_t *as, bool lock) 1444 * @param as Address space. 1445 * @param lock If false, do not attempt to lock as->lock. 1446 * 1447 */ 1448 NO_TRACE void page_table_lock(as_t *as, bool lock) 1234 1449 { 1235 1450 ASSERT(as_operations); … … 1241 1456 /** Unlock page table. 1242 1457 * 1243 * @param as Address space. 1244 * @param unlock If false, do not attempt to unlock as->lock. 1245 */ 1246 void page_table_unlock(as_t *as, bool unlock) 1458 * @param as Address space. 1459 * @param unlock If false, do not attempt to unlock as->lock. 1460 * 1461 */ 1462 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1247 1463 { 1248 1464 ASSERT(as_operations); … … 1252 1468 } 1253 1469 1254 1255 /** Find address space area and lock it. 1256 * 1257 * The address space must be locked and interrupts must be disabled. 1258 * 1259 * @param as Address space. 1260 * @param va Virtual address. 1261 * 1262 * @return Locked address space area containing va on success or 1263 * NULL on failure. 1264 */ 1265 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1266 { 1267 as_area_t *a; 1268 btree_node_t *leaf, *lnode; 1269 unsigned int i; 1270 1271 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1272 if (a) { 1273 /* va is the base address of an address space area */ 1274 mutex_lock(&a->lock); 1275 return a; 1276 } 1277 1278 /* 1279 * Search the leaf node and the righmost record of its left neighbour 1280 * to find out whether this is a miss or va belongs to an address 1281 * space area found there. 1282 */ 1283 1284 /* First, search the leaf node itself. */ 1285 for (i = 0; i < leaf->keys; i++) { 1286 a = (as_area_t *) leaf->value[i]; 1287 mutex_lock(&a->lock); 1288 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1289 return a; 1290 } 1291 mutex_unlock(&a->lock); 1292 } 1293 1294 /* 1295 * Second, locate the left neighbour and test its last record. 1296 * Because of its position in the B+tree, it must have base < va. 1297 */ 1298 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1299 if (lnode) { 1300 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1301 mutex_lock(&a->lock); 1302 if (va < a->base + a->pages * PAGE_SIZE) { 1303 return a; 1304 } 1305 mutex_unlock(&a->lock); 1306 } 1307 1308 return NULL; 1309 } 1310 1311 /** Check area conflicts with other areas. 1312 * 1313 * The address space must be locked and interrupts must be disabled. 1314 * 1315 * @param as Address space. 1316 * @param va Starting virtual address of the area being tested. 1317 * @param size Size of the area being tested. 1318 * @param avoid_area Do not touch this area. 1319 * 1320 * @return True if there is no conflict, false otherwise. 1321 */ 1322 bool 1323 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1324 { 1325 as_area_t *a; 1326 btree_node_t *leaf, *node; 1327 unsigned int i; 1328 1329 /* 1330 * We don't want any area to have conflicts with NULL page. 1331 */ 1332 if (overlaps(va, size, NULL, PAGE_SIZE)) 1333 return false; 1334 1335 /* 1336 * The leaf node is found in O(log n), where n is proportional to 1337 * the number of address space areas belonging to as. 1338 * The check for conflicts is then attempted on the rightmost 1339 * record in the left neighbour, the leftmost record in the right 1340 * neighbour and all records in the leaf node itself. 1341 */ 1342 1343 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1344 if (a != avoid_area) 1345 return false; 1346 } 1347 1348 /* First, check the two border cases. */ 1349 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1350 a = (as_area_t *) node->value[node->keys - 1]; 1351 mutex_lock(&a->lock); 1352 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1353 mutex_unlock(&a->lock); 1354 return false; 1355 } 1356 mutex_unlock(&a->lock); 1357 } 1358 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1359 if (node) { 1360 a = (as_area_t *) node->value[0]; 1361 mutex_lock(&a->lock); 1362 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1363 mutex_unlock(&a->lock); 1364 return false; 1365 } 1366 mutex_unlock(&a->lock); 1367 } 1368 1369 /* Second, check the leaf node. */ 1370 for (i = 0; i < leaf->keys; i++) { 1371 a = (as_area_t *) leaf->value[i]; 1372 1373 if (a == avoid_area) 1374 continue; 1375 1376 mutex_lock(&a->lock); 1377 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1378 mutex_unlock(&a->lock); 1379 return false; 1380 } 1381 mutex_unlock(&a->lock); 1382 } 1383 1384 /* 1385 * So far, the area does not conflict with other areas. 1386 * Check if it doesn't conflict with kernel address space. 1387 */ 1388 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1389 return !overlaps(va, size, 1390 KERNEL_ADDRESS_SPACE_START, 1391 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1392 } 1393 1394 return true; 1470 /** Test whether page tables are locked. 1471 * 1472 * @param as Address space where the page tables belong. 1473 * 1474 * @return True if the page tables belonging to the address soace 1475 * are locked, otherwise false. 1476 */ 1477 NO_TRACE bool page_table_locked(as_t *as) 1478 { 1479 ASSERT(as_operations); 1480 ASSERT(as_operations->page_table_locked); 1481 1482 return as_operations->page_table_locked(as); 1395 1483 } 1396 1484 1397 1485 /** Return size of the address space area with given base. 1398 1486 * 1399 * @param base Arbitrary address insede the address space area. 1400 * 1401 * @return Size of the address space area in bytes or zero if it 1402 * does not exist. 1487 * @param base Arbitrary address inside the address space area. 1488 * 1489 * @return Size of the address space area in bytes or zero if it 1490 * does not exist. 1491 * 1403 1492 */ 1404 1493 size_t as_area_get_size(uintptr_t base) 1405 1494 { 1406 ipl_t ipl;1407 as_area_t *src_area;1408 1495 size_t size; 1409 1410 ipl = interrupts_disable(); 1411 src_area = find_area_and_lock(AS, base); 1496 1497 page_table_lock(AS, true); 1498 as_area_t *src_area = find_area_and_lock(AS, base); 1499 1412 1500 if (src_area) { 1413 1501 size = src_area->pages * PAGE_SIZE; 1414 1502 mutex_unlock(&src_area->lock); 1415 } else {1503 } else 1416 1504 size = 0; 1417 }1418 interrupts_restore(ipl);1505 1506 page_table_unlock(AS, true); 1419 1507 return size; 1420 1508 } … … 1424 1512 * The address space area must be already locked. 1425 1513 * 1426 * @param a Address space area. 1427 * @param page First page to be marked. 1428 * @param count Number of page to be marked. 1429 * 1430 * @return Zero on failure and non-zero on success. 1431 */ 1432 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1433 { 1434 btree_node_t *leaf, *node; 1435 size_t pages; 1436 unsigned int i; 1437 1514 * @param area Address space area. 1515 * @param page First page to be marked. 1516 * @param count Number of page to be marked. 1517 * 1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 { 1523 ASSERT(mutex_locked(&area->lock)); 1438 1524 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1439 1525 ASSERT(count); 1440 1441 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1526 1527 btree_node_t *leaf; 1528 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1442 1529 if (pages) { 1443 1530 /* 1444 1531 * We hit the beginning of some used space. 1532 * 1445 1533 */ 1446 1534 return 0; 1447 1535 } 1448 1536 1449 1537 if (!leaf->keys) { 1450 btree_insert(&a ->used_space, page, (void *) count, leaf);1538 btree_insert(&area->used_space, page, (void *) count, leaf); 1451 1539 return 1; 1452 1540 } 1453 1454 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1541 1542 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1455 1543 if (node) { 1456 1544 uintptr_t left_pg = node->key[node->keys - 1]; … … 1463 1551 * somewhere between the rightmost interval of 1464 1552 * the left neigbour and the first interval of the leaf. 1465 */ 1466 1553 * 1554 */ 1555 1467 1556 if (page >= right_pg) { 1468 1557 /* Do nothing. */ … … 1474 1563 right_cnt * PAGE_SIZE)) { 1475 1564 /* The interval intersects with the right interval. */ 1476 return 0; 1565 return 0; 1477 1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1478 1567 (page + count * PAGE_SIZE == right_pg)) { … … 1480 1569 * The interval can be added by merging the two already 1481 1570 * present intervals. 1571 * 1482 1572 */ 1483 1573 node->value[node->keys - 1] += count + right_cnt; 1484 btree_remove(&a ->used_space, right_pg, leaf);1485 return 1; 1574 btree_remove(&area->used_space, right_pg, leaf); 1575 return 1; 1486 1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1487 /* 1577 /* 1488 1578 * The interval can be added by simply growing the left 1489 1579 * interval. 1580 * 1490 1581 */ 1491 1582 node->value[node->keys - 1] += count; … … 1496 1587 * the right interval down and increasing its size 1497 1588 * accordingly. 1589 * 1498 1590 */ 1499 1591 leaf->value[0] += count; … … 1504 1596 * The interval is between both neigbouring intervals, 1505 1597 * but cannot be merged with any of them. 1598 * 1506 1599 */ 1507 btree_insert(&a ->used_space, page, (void *) count,1600 btree_insert(&area->used_space, page, (void *) count, 1508 1601 leaf); 1509 1602 return 1; … … 1512 1605 uintptr_t right_pg = leaf->key[0]; 1513 1606 size_t right_cnt = (size_t) leaf->value[0]; 1514 1607 1515 1608 /* 1516 1609 * Investigate the border case in which the left neighbour does 1517 1610 * not exist but the interval fits from the left. 1518 */ 1519 1611 * 1612 */ 1613 1520 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1521 1615 right_cnt * PAGE_SIZE)) { … … 1527 1621 * right interval down and increasing its size 1528 1622 * accordingly. 1623 * 1529 1624 */ 1530 1625 leaf->key[0] = page; … … 1535 1630 * The interval doesn't adjoin with the right interval. 1536 1631 * It must be added individually. 1632 * 1537 1633 */ 1538 btree_insert(&a ->used_space, page, (void *) count,1634 btree_insert(&area->used_space, page, (void *) count, 1539 1635 leaf); 1540 1636 return 1; 1541 1637 } 1542 1638 } 1543 1544 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1639 1640 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1545 1641 if (node) { 1546 1642 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1553 1649 * somewhere between the leftmost interval of 1554 1650 * the right neigbour and the last interval of the leaf. 1555 */ 1556 1651 * 1652 */ 1653 1557 1654 if (page < left_pg) { 1558 1655 /* Do nothing. */ … … 1564 1661 right_cnt * PAGE_SIZE)) { 1565 1662 /* The interval intersects with the right interval. */ 1566 return 0; 1663 return 0; 1567 1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1568 1665 (page + count * PAGE_SIZE == right_pg)) { … … 1570 1667 * The interval can be added by merging the two already 1571 1668 * present intervals. 1572 * */ 1669 * 1670 */ 1573 1671 leaf->value[leaf->keys - 1] += count + right_cnt; 1574 btree_remove(&a ->used_space, right_pg, node);1575 return 1; 1672 btree_remove(&area->used_space, right_pg, node); 1673 return 1; 1576 1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1577 1675 /* 1578 1676 * The interval can be added by simply growing the left 1579 1677 * interval. 1580 * */ 1678 * 1679 */ 1581 1680 leaf->value[leaf->keys - 1] += count; 1582 1681 return 1; … … 1586 1685 * the right interval down and increasing its size 1587 1686 * accordingly. 1687 * 1588 1688 */ 1589 1689 node->value[0] += count; … … 1594 1694 * The interval is between both neigbouring intervals, 1595 1695 * but cannot be merged with any of them. 1696 * 1596 1697 */ 1597 btree_insert(&a ->used_space, page, (void *) count,1698 btree_insert(&area->used_space, page, (void *) count, 1598 1699 leaf); 1599 1700 return 1; … … 1602 1703 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1603 1704 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1604 1705 1605 1706 /* 1606 1707 * Investigate the border case in which the right neighbour 1607 1708 * does not exist but the interval fits from the right. 1608 */ 1609 1709 * 1710 */ 1711 1610 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1611 1713 left_cnt * PAGE_SIZE)) { … … 1616 1718 * The interval can be added by growing the left 1617 1719 * interval. 1720 * 1618 1721 */ 1619 1722 leaf->value[leaf->keys - 1] += count; … … 1623 1726 * The interval doesn't adjoin with the left interval. 1624 1727 * It must be added individually. 1728 * 1625 1729 */ 1626 btree_insert(&a ->used_space, page, (void *) count,1730 btree_insert(&area->used_space, page, (void *) count, 1627 1731 leaf); 1628 1732 return 1; … … 1634 1738 * only between two other intervals of the leaf. The two border cases 1635 1739 * were already resolved. 1636 */ 1740 * 1741 */ 1742 btree_key_t i; 1637 1743 for (i = 1; i < leaf->keys; i++) { 1638 1744 if (page < leaf->key[i]) { … … 1641 1747 size_t left_cnt = (size_t) leaf->value[i - 1]; 1642 1748 size_t right_cnt = (size_t) leaf->value[i]; 1643 1749 1644 1750 /* 1645 1751 * The interval fits between left_pg and right_pg. 1752 * 1646 1753 */ 1647 1754 1648 1755 if (overlaps(page, count * PAGE_SIZE, left_pg, 1649 1756 left_cnt * PAGE_SIZE)) { … … 1651 1758 * The interval intersects with the left 1652 1759 * interval. 1760 * 1653 1761 */ 1654 1762 return 0; … … 1658 1766 * The interval intersects with the right 1659 1767 * interval. 1768 * 1660 1769 */ 1661 return 0; 1770 return 0; 1662 1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1663 1772 (page + count * PAGE_SIZE == right_pg)) { … … 1665 1774 * The interval can be added by merging the two 1666 1775 * already present intervals. 1776 * 1667 1777 */ 1668 1778 leaf->value[i - 1] += count + right_cnt; 1669 btree_remove(&a ->used_space, right_pg, leaf);1670 return 1; 1779 btree_remove(&area->used_space, right_pg, leaf); 1780 return 1; 1671 1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1672 1782 /* 1673 1783 * The interval can be added by simply growing 1674 1784 * the left interval. 1785 * 1675 1786 */ 1676 1787 leaf->value[i - 1] += count; … … 1678 1789 } else if (page + count * PAGE_SIZE == right_pg) { 1679 1790 /* 1680 1791 * The interval can be addded by simply moving 1681 1792 * base of the right interval down and 1682 1793 * increasing its size accordingly. 1683 */ 1794 * 1795 */ 1684 1796 leaf->value[i] += count; 1685 1797 leaf->key[i] = page; … … 1690 1802 * intervals, but cannot be merged with any of 1691 1803 * them. 1804 * 1692 1805 */ 1693 btree_insert(&a ->used_space, page,1806 btree_insert(&area->used_space, page, 1694 1807 (void *) count, leaf); 1695 1808 return 1; … … 1697 1810 } 1698 1811 } 1699 1812 1700 1813 panic("Inconsistency detected while adding %" PRIs " pages of used " 1701 1814 "space at %p.", count, page); … … 1706 1819 * The address space area must be already locked. 1707 1820 * 1708 * @param a Address space area. 1709 * @param page First page to be marked. 1710 * @param count Number of page to be marked. 1711 * 1712 * @return Zero on failure and non-zero on success. 1713 */ 1714 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1715 { 1716 btree_node_t *leaf, *node; 1717 size_t pages; 1718 unsigned int i; 1719 1821 * @param area Address space area. 1822 * @param page First page to be marked. 1823 * @param count Number of page to be marked. 1824 * 1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 { 1830 ASSERT(mutex_locked(&area->lock)); 1720 1831 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1721 1832 ASSERT(count); 1722 1723 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1833 1834 btree_node_t *leaf; 1835 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1724 1836 if (pages) { 1725 1837 /* 1726 1838 * We are lucky, page is the beginning of some interval. 1839 * 1727 1840 */ 1728 1841 if (count > pages) { 1729 1842 return 0; 1730 1843 } else if (count == pages) { 1731 btree_remove(&a ->used_space, page, leaf);1844 btree_remove(&area->used_space, page, leaf); 1732 1845 return 1; 1733 1846 } else { … … 1735 1848 * Find the respective interval. 1736 1849 * Decrease its size and relocate its start address. 1850 * 1737 1851 */ 1852 btree_key_t i; 1738 1853 for (i = 0; i < leaf->keys; i++) { 1739 1854 if (leaf->key[i] == page) { … … 1746 1861 } 1747 1862 } 1748 1749 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1750 if ( node && page < leaf->key[0]) {1863 1864 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1865 if ((node) && (page < leaf->key[0])) { 1751 1866 uintptr_t left_pg = node->key[node->keys - 1]; 1752 1867 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1753 1868 1754 1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1755 1870 count * PAGE_SIZE)) { … … 1761 1876 * removed by updating the size of the bigger 1762 1877 * interval. 1878 * 1763 1879 */ 1764 1880 node->value[node->keys - 1] -= count; … … 1766 1882 } else if (page + count * PAGE_SIZE < 1767 1883 left_pg + left_cnt*PAGE_SIZE) { 1768 size_t new_cnt;1769 1770 1884 /* 1771 1885 * The interval is contained in the rightmost … … 1774 1888 * the original interval and also inserting a 1775 1889 * new interval. 1890 * 1776 1891 */ 1777 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1778 1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1779 1894 node->value[node->keys - 1] -= count + new_cnt; 1780 btree_insert(&a ->used_space, page +1895 btree_insert(&area->used_space, page + 1781 1896 count * PAGE_SIZE, (void *) new_cnt, leaf); 1782 1897 return 1; … … 1784 1899 } 1785 1900 return 0; 1786 } else if (page < leaf->key[0]) {1901 } else if (page < leaf->key[0]) 1787 1902 return 0; 1788 }1789 1903 1790 1904 if (page > leaf->key[leaf->keys - 1]) { 1791 1905 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1792 1906 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1793 1907 1794 1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1795 1909 count * PAGE_SIZE)) { 1796 if (page + count * PAGE_SIZE == 1910 if (page + count * PAGE_SIZE == 1797 1911 left_pg + left_cnt * PAGE_SIZE) { 1798 1912 /* … … 1800 1914 * interval of the leaf and can be removed by 1801 1915 * updating the size of the bigger interval. 1916 * 1802 1917 */ 1803 1918 leaf->value[leaf->keys - 1] -= count; … … 1805 1920 } else if (page + count * PAGE_SIZE < left_pg + 1806 1921 left_cnt * PAGE_SIZE) { 1807 size_t new_cnt;1808 1809 1922 /* 1810 1923 * The interval is contained in the rightmost … … 1813 1926 * original interval and also inserting a new 1814 1927 * interval. 1928 * 1815 1929 */ 1816 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1817 1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1818 1932 leaf->value[leaf->keys - 1] -= count + new_cnt; 1819 btree_insert(&a ->used_space, page +1933 btree_insert(&area->used_space, page + 1820 1934 count * PAGE_SIZE, (void *) new_cnt, leaf); 1821 1935 return 1; … … 1823 1937 } 1824 1938 return 0; 1825 } 1939 } 1826 1940 1827 1941 /* … … 1829 1943 * Now the interval can be only between intervals of the leaf. 1830 1944 */ 1945 btree_key_t i; 1831 1946 for (i = 1; i < leaf->keys - 1; i++) { 1832 1947 if (page < leaf->key[i]) { 1833 1948 uintptr_t left_pg = leaf->key[i - 1]; 1834 1949 size_t left_cnt = (size_t) leaf->value[i - 1]; 1835 1950 1836 1951 /* 1837 1952 * Now the interval is between intervals corresponding … … 1847 1962 * be removed by updating the size of 1848 1963 * the bigger interval. 1964 * 1849 1965 */ 1850 1966 leaf->value[i - 1] -= count; … … 1852 1968 } else if (page + count * PAGE_SIZE < 1853 1969 left_pg + left_cnt * PAGE_SIZE) { 1854 size_t new_cnt;1855 1856 1970 /* 1857 1971 * The interval is contained in the … … 1861 1975 * also inserting a new interval. 1862 1976 */ 1863 new_cnt = ((left_pg +1977 size_t new_cnt = ((left_pg + 1864 1978 left_cnt * PAGE_SIZE) - 1865 1979 (page + count * PAGE_SIZE)) >> 1866 1980 PAGE_WIDTH; 1867 1981 leaf->value[i - 1] -= count + new_cnt; 1868 btree_insert(&a ->used_space, page +1982 btree_insert(&area->used_space, page + 1869 1983 count * PAGE_SIZE, (void *) new_cnt, 1870 1984 leaf); … … 1875 1989 } 1876 1990 } 1877 1991 1878 1992 error: 1879 1993 panic("Inconsistency detected while removing %" PRIs " pages of used " … … 1881 1995 } 1882 1996 1883 /** Remove reference to address space area share info.1884 *1885 * If the reference count drops to 0, the sh_info is deallocated.1886 *1887 * @param sh_info Pointer to address space area share info.1888 */1889 void sh_info_remove_reference(share_info_t *sh_info)1890 {1891 bool dealloc = false;1892 1893 mutex_lock(&sh_info->lock);1894 ASSERT(sh_info->refcount);1895 if (--sh_info->refcount == 0) {1896 dealloc = true;1897 link_t *cur;1898 1899 /*1900 * Now walk carefully the pagemap B+tree and free/remove1901 * reference from all frames found there.1902 */1903 for (cur = sh_info->pagemap.leaf_head.next;1904 cur != &sh_info->pagemap.leaf_head; cur = cur->next) {1905 btree_node_t *node;1906 unsigned int i;1907 1908 node = list_get_instance(cur, btree_node_t, leaf_link);1909 for (i = 0; i < node->keys; i++)1910 frame_free((uintptr_t) node->value[i]);1911 }1912 1913 }1914 mutex_unlock(&sh_info->lock);1915 1916 if (dealloc) {1917 btree_destroy(&sh_info->pagemap);1918 free(sh_info);1919 }1920 }1921 1922 1997 /* 1923 1998 * Address space related syscalls. … … 1925 2000 1926 2001 /** Wrapper for as_area_create(). */ 1927 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1928 2003 { 1929 2004 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 1935 2010 1936 2011 /** Wrapper for as_area_resize(). */ 1937 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1938 2013 { 1939 2014 return (unative_t) as_area_resize(AS, address, size, 0); … … 1941 2016 1942 2017 /** Wrapper for as_area_change_flags(). */ 1943 unative_t sys_as_area_change_flags(uintptr_t address, int flags)2018 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1944 2019 { 1945 2020 return (unative_t) as_area_change_flags(AS, flags, address); … … 1954 2029 /** Get list of adress space areas. 1955 2030 * 1956 * @param as Address space. 1957 * @param obuf Place to save pointer to returned buffer. 1958 * @param osize Place to save size of returned buffer. 2031 * @param as Address space. 2032 * @param obuf Place to save pointer to returned buffer. 2033 * @param osize Place to save size of returned buffer. 2034 * 1959 2035 */ 1960 2036 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1961 2037 { 1962 ipl_t ipl; 1963 size_t area_cnt, area_idx, i; 2038 mutex_lock(&as->lock); 2039 2040 /* First pass, count number of areas. */ 2041 2042 size_t area_cnt = 0; 1964 2043 link_t *cur; 1965 1966 as_area_info_t *info; 1967 size_t isize; 1968 1969 ipl = interrupts_disable(); 1970 mutex_lock(&as->lock); 1971 1972 /* First pass, count number of areas. */ 1973 1974 area_cnt = 0; 1975 2044 1976 2045 for (cur = as->as_area_btree.leaf_head.next; 1977 2046 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1978 btree_node_t *node; 1979 1980 node = list_get_instance(cur, btree_node_t, leaf_link); 2047 btree_node_t *node = 2048 list_get_instance(cur, btree_node_t, leaf_link); 1981 2049 area_cnt += node->keys; 1982 2050 } 1983 1984 1985 info = malloc(isize, 0);1986 2051 2052 size_t isize = area_cnt * sizeof(as_area_info_t); 2053 as_area_info_t *info = malloc(isize, 0); 2054 1987 2055 /* Second pass, record data. */ 1988 1989 area_idx = 0;1990 2056 2057 size_t area_idx = 0; 2058 1991 2059 for (cur = as->as_area_btree.leaf_head.next; 1992 2060 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1993 btree_node_t *node ;1994 1995 node = list_get_instance(cur, btree_node_t, leaf_link);1996 2061 btree_node_t *node = 2062 list_get_instance(cur, btree_node_t, leaf_link); 2063 btree_key_t i; 2064 1997 2065 for (i = 0; i < node->keys; i++) { 1998 2066 as_area_t *area = node->value[i]; 1999 2067 2000 2068 ASSERT(area_idx < area_cnt); 2001 2069 mutex_lock(&area->lock); 2002 2070 2003 2071 info[area_idx].start_addr = area->base; 2004 2072 info[area_idx].size = FRAMES2SIZE(area->pages); 2005 2073 info[area_idx].flags = area->flags; 2006 2074 ++area_idx; 2007 2075 2008 2076 mutex_unlock(&area->lock); 2009 2077 } 2010 2078 } 2011 2079 2012 2080 mutex_unlock(&as->lock); 2013 interrupts_restore(ipl); 2014 2081 2015 2082 *obuf = info; 2016 2083 *osize = isize; 2017 2084 } 2018 2085 2019 2020 2086 /** Print out information about address space. 2021 2087 * 2022 * @param as Address space. 2088 * @param as Address space. 2089 * 2023 2090 */ 2024 2091 void as_print(as_t *as) 2025 2092 { 2026 ipl_t ipl;2027 2028 ipl = interrupts_disable();2029 2093 mutex_lock(&as->lock); 2030 2094 … … 2033 2097 for (cur = as->as_area_btree.leaf_head.next; 2034 2098 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2035 btree_node_t *node; 2036 2037 node = list_get_instance(cur, btree_node_t, leaf_link); 2038 2039 unsigned int i; 2099 btree_node_t *node 2100 = list_get_instance(cur, btree_node_t, leaf_link); 2101 btree_key_t i; 2102 2040 2103 for (i = 0; i < node->keys; i++) { 2041 2104 as_area_t *area = node->value[i]; 2042 2105 2043 2106 mutex_lock(&area->lock); 2044 2107 printf("as_area: %p, base=%p, pages=%" PRIs … … 2050 2113 2051 2114 mutex_unlock(&as->lock); 2052 interrupts_restore(ipl);2053 2115 } 2054 2116
Note:
See TracChangeset
for help on using the changeset viewer.