Changes in kernel/generic/src/mm/as.c [98000fb:fc47885] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r98000fb rfc47885 1 1 /* 2 * Copyright (c) 20 01-2006Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 75 75 #include <config.h> 76 76 #include <align.h> 77 #include < arch/types.h>77 #include <typedefs.h> 78 78 #include <syscall/copy.h> 79 79 #include <arch/interrupt.h> … … 89 89 as_operations_t *as_operations = NULL; 90 90 91 /** 92 * Slab for as_t objects.91 /** Slab for as_t objects. 92 * 93 93 */ 94 94 static slab_cache_t *as_slab; 95 95 96 /** 97 * This lock serializes access to the ASID subsystem.98 * Itprotects:96 /** ASID subsystem lock. 97 * 98 * This lock protects: 99 99 * - inactive_as_with_asid_head list 100 100 * - as->asid for each as of the as_t type 101 101 * - asids_allocated counter 102 * 102 103 */ 103 104 SPINLOCK_INITIALIZE(asidlock); 104 105 105 106 /** 106 * This list contains address spaces that are not active on any107 * processor andthat have valid ASID.107 * Inactive address spaces (on all processors) 108 * that have valid ASID. 108 109 */ 109 110 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 113 as_t *AS_KERNEL = NULL; 113 114 114 static int area_flags_to_page_flags(int); 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 static void sh_info_remove_reference(share_info_t *); 118 119 static int as_constructor(void *obj, int flags) 115 NO_TRACE static int as_constructor(void *obj, unsigned int flags) 120 116 { 121 117 as_t *as = (as_t *) obj; 122 int rc; 123 118 124 119 link_initialize(&as->inactive_as_with_asid_link); 125 120 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 121 127 rc = as_constructor_arch(as, flags); 128 129 return rc; 130 } 131 132 static int as_destructor(void *obj) 133 { 134 as_t *as = (as_t *) obj; 135 136 return as_destructor_arch(as); 122 return as_constructor_arch(as, flags); 123 } 124 125 NO_TRACE static size_t as_destructor(void *obj) 126 { 127 return as_destructor_arch((as_t *) obj); 137 128 } 138 129 … … 141 132 { 142 133 as_arch_init(); 143 134 144 135 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 136 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 149 140 panic("Cannot create kernel address space."); 150 141 151 /* Make sure the kernel address space 142 /* 143 * Make sure the kernel address space 152 144 * reference count never drops to zero. 153 145 */ 154 a tomic_set(&AS_KERNEL->refcount, 1);146 as_hold(AS_KERNEL); 155 147 } 156 148 157 149 /** Create address space. 158 150 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 151 * @param flags Flags that influence the way in wich the address 152 * space is created. 153 * 154 */ 155 as_t *as_create(unsigned int flags) 156 { 157 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 158 (void) as_create_arch(as, 0); 168 159 … … 176 167 atomic_set(&as->refcount, 0); 177 168 as->cpu_refcount = 0; 169 178 170 #ifdef AS_PAGE_TABLE 179 171 as->genarch.page_table = page_table_create(flags); … … 192 184 * We know that we don't hold any spinlock. 193 185 * 194 * @param as Address space to be destroyed. 186 * @param as Address space to be destroyed. 187 * 195 188 */ 196 189 void as_destroy(as_t *as) 197 190 { 198 ipl_t ipl;199 bool cond;200 191 DEADLOCK_PROBE_INIT(p_asidlock); 201 192 193 ASSERT(as != AS); 202 194 ASSERT(atomic_get(&as->refcount) == 0); 203 195 204 196 /* 205 * Since there is no reference to this a rea,206 * it is safe not tolock its mutex.207 */ 208 197 * Since there is no reference to this address space, it is safe not to 198 * lock its mutex. 199 */ 200 209 201 /* 210 202 * We need to avoid deadlock between TLB shootdown and asidlock. … … 215 207 */ 216 208 preemption_disable(); 217 ipl = interrupts_read(); 209 ipl_t ipl = interrupts_read(); 210 218 211 retry: 219 212 interrupts_disable(); … … 223 216 goto retry; 224 217 } 225 preemption_enable(); /* Interrupts disabled, enable preemption */ 226 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 227 if (as != AS && as->cpu_refcount == 0) 218 219 /* Interrupts disabled, enable preemption */ 220 preemption_enable(); 221 222 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 223 if (as->cpu_refcount == 0) 228 224 list_remove(&as->inactive_as_with_asid_link); 225 229 226 asid_put(as->asid); 230 227 } 228 231 229 spinlock_unlock(&asidlock); 232 230 interrupts_restore(ipl); 231 232 233 233 /* 234 234 * Destroy address space areas of the address space. 235 235 * The B+tree must be walked carefully because it is 236 236 * also being destroyed. 237 */ 238 for (cond = true; cond; ) { 239 btree_node_t *node; 240 237 */ 238 bool cond = true; 239 while (cond) { 241 240 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 242 node = list_get_instance(as->as_area_btree.leaf_head.next, 241 242 btree_node_t *node = 243 list_get_instance(as->as_area_btree.leaf_head.next, 243 244 btree_node_t, leaf_link); 244 245 if ((cond = node->keys)) {245 246 if ((cond = node->keys)) 246 247 as_area_destroy(as, node->key[0]); 247 } 248 } 249 248 } 249 250 250 btree_destroy(&as->as_area_btree); 251 251 252 #ifdef AS_PAGE_TABLE 252 253 page_table_destroy(as->genarch.page_table); … … 254 255 page_table_destroy(NULL); 255 256 #endif 256 257 interrupts_restore(ipl); 258 257 259 258 slab_free(as_slab, as); 260 259 } 261 260 261 /** Hold a reference to an address space. 262 * 263 * Holding a reference to an address space prevents destruction 264 * of that address space. 265 * 266 * @param as Address space to be held. 267 * 268 */ 269 NO_TRACE void as_hold(as_t *as) 270 { 271 atomic_inc(&as->refcount); 272 } 273 274 /** Release a reference to an address space. 275 * 276 * The last one to release a reference to an address space 277 * destroys the address space. 278 * 279 * @param asAddress space to be released. 280 * 281 */ 282 NO_TRACE void as_release(as_t *as) 283 { 284 if (atomic_predec(&as->refcount) == 0) 285 as_destroy(as); 286 } 287 288 /** Check area conflicts with other areas. 289 * 290 * @param as Address space. 291 * @param va Starting virtual address of the area being tested. 292 * @param size Size of the area being tested. 293 * @param avoid_area Do not touch this area. 294 * 295 * @return True if there is no conflict, false otherwise. 296 * 297 */ 298 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 299 as_area_t *avoid_area) 300 { 301 ASSERT(mutex_locked(&as->lock)); 302 303 /* 304 * We don't want any area to have conflicts with NULL page. 305 */ 306 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 307 return false; 308 309 /* 310 * The leaf node is found in O(log n), where n is proportional to 311 * the number of address space areas belonging to as. 312 * The check for conflicts is then attempted on the rightmost 313 * record in the left neighbour, the leftmost record in the right 314 * neighbour and all records in the leaf node itself. 315 */ 316 btree_node_t *leaf; 317 as_area_t *area = 318 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 319 if (area) { 320 if (area != avoid_area) 321 return false; 322 } 323 324 /* First, check the two border cases. */ 325 btree_node_t *node = 326 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 327 if (node) { 328 area = (as_area_t *) node->value[node->keys - 1]; 329 330 mutex_lock(&area->lock); 331 332 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 333 mutex_unlock(&area->lock); 334 return false; 335 } 336 337 mutex_unlock(&area->lock); 338 } 339 340 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 341 if (node) { 342 area = (as_area_t *) node->value[0]; 343 344 mutex_lock(&area->lock); 345 346 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 347 mutex_unlock(&area->lock); 348 return false; 349 } 350 351 mutex_unlock(&area->lock); 352 } 353 354 /* Second, check the leaf node. */ 355 btree_key_t i; 356 for (i = 0; i < leaf->keys; i++) { 357 area = (as_area_t *) leaf->value[i]; 358 359 if (area == avoid_area) 360 continue; 361 362 mutex_lock(&area->lock); 363 364 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 365 mutex_unlock(&area->lock); 366 return false; 367 } 368 369 mutex_unlock(&area->lock); 370 } 371 372 /* 373 * So far, the area does not conflict with other areas. 374 * Check if it doesn't conflict with kernel address space. 375 */ 376 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 377 return !overlaps(va, size, 378 KERNEL_ADDRESS_SPACE_START, 379 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 380 } 381 382 return true; 383 } 384 262 385 /** Create address space area of common attributes. 263 386 * 264 387 * The created address space area is added to the target address space. 265 388 * 266 * @param as Target address space. 267 * @param flags Flags of the area memory. 268 * @param size Size of area. 269 * @param base Base address of area. 270 * @param attrs Attributes of the area. 271 * @param backend Address space area backend. NULL if no backend is used. 272 * @param backend_data NULL or a pointer to an array holding two void *. 273 * 274 * @return Address space area on success or NULL on failure. 275 */ 276 as_area_t * 277 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 278 mem_backend_t *backend, mem_backend_data_t *backend_data) 279 { 280 ipl_t ipl; 281 as_area_t *a; 282 389 * @param as Target address space. 390 * @param flags Flags of the area memory. 391 * @param size Size of area. 392 * @param base Base address of area. 393 * @param attrs Attributes of the area. 394 * @param backend Address space area backend. NULL if no backend is used. 395 * @param backend_data NULL or a pointer to an array holding two void *. 396 * 397 * @return Address space area on success or NULL on failure. 398 * 399 */ 400 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 401 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 402 mem_backend_data_t *backend_data) 403 { 283 404 if (base % PAGE_SIZE) 284 405 return NULL; 285 406 286 407 if (!size) 287 408 return NULL; 288 409 289 410 /* Writeable executable areas are not supported. */ 290 411 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 291 412 return NULL; 292 413 293 ipl = interrupts_disable();294 414 mutex_lock(&as->lock); 295 415 296 416 if (!check_area_conflicts(as, base, size, NULL)) { 297 417 mutex_unlock(&as->lock); 298 interrupts_restore(ipl);299 418 return NULL; 300 419 } 301 420 302 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 303 304 mutex_initialize(&a->lock, MUTEX_PASSIVE); 305 306 a->as = as; 307 a->flags = flags; 308 a->attributes = attrs; 309 a->pages = SIZE2FRAMES(size); 310 a->base = base; 311 a->sh_info = NULL; 312 a->backend = backend; 421 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 422 423 mutex_initialize(&area->lock, MUTEX_PASSIVE); 424 425 area->as = as; 426 area->flags = flags; 427 area->attributes = attrs; 428 area->pages = SIZE2FRAMES(size); 429 area->resident = 0; 430 area->base = base; 431 area->sh_info = NULL; 432 area->backend = backend; 433 313 434 if (backend_data) 314 a ->backend_data = *backend_data;435 area->backend_data = *backend_data; 315 436 else 316 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 317 318 btree_create(&a->used_space); 319 320 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 321 437 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 438 439 btree_create(&area->used_space); 440 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 441 322 442 mutex_unlock(&as->lock); 323 interrupts_restore(ipl); 324 325 return a; 443 444 return area; 445 } 446 447 /** Find address space area and lock it. 448 * 449 * @param as Address space. 450 * @param va Virtual address. 451 * 452 * @return Locked address space area containing va on success or 453 * NULL on failure. 454 * 455 */ 456 NO_TRACE static as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 457 { 458 ASSERT(mutex_locked(&as->lock)); 459 460 btree_node_t *leaf; 461 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 462 if (area) { 463 /* va is the base address of an address space area */ 464 mutex_lock(&area->lock); 465 return area; 466 } 467 468 /* 469 * Search the leaf node and the righmost record of its left neighbour 470 * to find out whether this is a miss or va belongs to an address 471 * space area found there. 472 */ 473 474 /* First, search the leaf node itself. */ 475 btree_key_t i; 476 477 for (i = 0; i < leaf->keys; i++) { 478 area = (as_area_t *) leaf->value[i]; 479 480 mutex_lock(&area->lock); 481 482 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 483 return area; 484 485 mutex_unlock(&area->lock); 486 } 487 488 /* 489 * Second, locate the left neighbour and test its last record. 490 * Because of its position in the B+tree, it must have base < va. 491 */ 492 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 493 if (lnode) { 494 area = (as_area_t *) lnode->value[lnode->keys - 1]; 495 496 mutex_lock(&area->lock); 497 498 if (va < area->base + area->pages * PAGE_SIZE) 499 return area; 500 501 mutex_unlock(&area->lock); 502 } 503 504 return NULL; 326 505 } 327 506 328 507 /** Find address space area and change it. 329 508 * 330 * @param as Address space. 331 * @param address Virtual address belonging to the area to be changed. 332 * Must be page-aligned. 333 * @param size New size of the virtual memory block starting at 334 * address. 335 * @param flags Flags influencing the remap operation. Currently unused. 336 * 337 * @return Zero on success or a value from @ref errno.h otherwise. 338 */ 339 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 340 { 341 as_area_t *area; 342 ipl_t ipl; 343 size_t pages; 344 345 ipl = interrupts_disable(); 509 * @param as Address space. 510 * @param address Virtual address belonging to the area to be changed. 511 * Must be page-aligned. 512 * @param size New size of the virtual memory block starting at 513 * address. 514 * @param flags Flags influencing the remap operation. Currently unused. 515 * 516 * @return Zero on success or a value from @ref errno.h otherwise. 517 * 518 */ 519 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 520 { 346 521 mutex_lock(&as->lock); 347 522 … … 349 524 * Locate the area. 350 525 */ 351 a rea = find_area_and_lock(as, address);526 as_area_t *area = find_area_and_lock(as, address); 352 527 if (!area) { 353 528 mutex_unlock(&as->lock); 354 interrupts_restore(ipl);355 529 return ENOENT; 356 530 } 357 531 358 532 if (area->backend == &phys_backend) { 359 533 /* … … 363 537 mutex_unlock(&area->lock); 364 538 mutex_unlock(&as->lock); 365 interrupts_restore(ipl);366 539 return ENOTSUP; 367 540 } 541 368 542 if (area->sh_info) { 369 543 /* 370 * Remapping of shared address space areas 544 * Remapping of shared address space areas 371 545 * is not supported. 372 546 */ 373 547 mutex_unlock(&area->lock); 374 548 mutex_unlock(&as->lock); 375 interrupts_restore(ipl);376 549 return ENOTSUP; 377 550 } 378 379 pages = SIZE2FRAMES((address - area->base) + size);551 552 size_t pages = SIZE2FRAMES((address - area->base) + size); 380 553 if (!pages) { 381 554 /* … … 384 557 mutex_unlock(&area->lock); 385 558 mutex_unlock(&as->lock); 386 interrupts_restore(ipl);387 559 return EPERM; 388 560 } 389 561 390 562 if (pages < area->pages) { 391 bool cond;392 563 uintptr_t start_free = area->base + pages * PAGE_SIZE; 393 564 394 565 /* 395 566 * Shrinking the area. 396 567 * No need to check for overlaps. 397 568 */ 398 569 570 page_table_lock(as, false); 571 399 572 /* 400 573 * Start TLB shootdown sequence. 401 574 */ 402 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base +403 pages * PAGE_SIZE, area->pages - pages);404 575 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 576 area->base + pages * PAGE_SIZE, area->pages - pages); 577 405 578 /* 406 579 * Remove frames belonging to used space starting from … … 409 582 * is also the right way to remove part of the used_space 410 583 * B+tree leaf list. 411 */ 412 for (cond = true; cond;) { 413 btree_node_t *node; 414 584 */ 585 bool cond = true; 586 while (cond) { 415 587 ASSERT(!list_empty(&area->used_space.leaf_head)); 416 node = 588 589 btree_node_t *node = 417 590 list_get_instance(area->used_space.leaf_head.prev, 418 591 btree_node_t, leaf_link); 592 419 593 if ((cond = (bool) node->keys)) { 420 uintptr_t b= node->key[node->keys - 1];421 size_t c=594 uintptr_t ptr = node->key[node->keys - 1]; 595 size_t size = 422 596 (size_t) node->value[node->keys - 1]; 423 unsigned int i = 0;424 425 if (overlaps( b, c* PAGE_SIZE, area->base,597 size_t i = 0; 598 599 if (overlaps(ptr, size * PAGE_SIZE, area->base, 426 600 pages * PAGE_SIZE)) { 427 601 428 if ( b + c* PAGE_SIZE <= start_free) {602 if (ptr + size * PAGE_SIZE <= start_free) { 429 603 /* 430 604 * The whole interval fits … … 434 608 break; 435 609 } 436 610 437 611 /* 438 612 * Part of the interval corresponding … … 440 614 * address space area. 441 615 */ 442 443 cond = false; /* we are almost done */ 444 i = (start_free - b) >> PAGE_WIDTH; 616 617 /* We are almost done */ 618 cond = false; 619 i = (start_free - ptr) >> PAGE_WIDTH; 445 620 if (!used_space_remove(area, start_free, 446 c - i)) 447 panic("Cannot remove used " 448 "space."); 621 size - i)) 622 panic("Cannot remove used space."); 449 623 } else { 450 624 /* … … 452 626 * completely removed. 453 627 */ 454 if (!used_space_remove(area, b, c)) 455 panic("Cannot remove used " 456 "space."); 628 if (!used_space_remove(area, ptr, size)) 629 panic("Cannot remove used space."); 457 630 } 458 459 for (; i < c; i++) { 460 pte_t *pte; 461 462 page_table_lock(as, false); 463 pte = page_mapping_find(as, b + 631 632 for (; i < size; i++) { 633 pte_t *pte = page_mapping_find(as, ptr + 464 634 i * PAGE_SIZE); 465 ASSERT(pte && PTE_VALID(pte) && 466 PTE_PRESENT(pte)); 467 if (area->backend && 468 area->backend->frame_free) { 635 636 ASSERT(pte); 637 ASSERT(PTE_VALID(pte)); 638 ASSERT(PTE_PRESENT(pte)); 639 640 if ((area->backend) && 641 (area->backend->frame_free)) { 469 642 area->backend->frame_free(area, 470 b+ i * PAGE_SIZE,643 ptr + i * PAGE_SIZE, 471 644 PTE_GET_FRAME(pte)); 472 645 } 473 page_mapping_remove(as, b + 646 647 page_mapping_remove(as, ptr + 474 648 i * PAGE_SIZE); 475 page_table_unlock(as, false);476 649 } 477 650 } 478 651 } 479 652 480 653 /* 481 654 * Finish TLB shootdown sequence. 482 655 */ 483 656 484 657 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 485 658 area->pages - pages); 659 486 660 /* 487 661 * Invalidate software translation caches (e.g. TSB on sparc64). … … 489 663 as_invalidate_translation_cache(as, area->base + 490 664 pages * PAGE_SIZE, area->pages - pages); 491 tlb_shootdown_finalize(); 492 665 tlb_shootdown_finalize(ipl); 666 667 page_table_unlock(as, false); 493 668 } else { 494 669 /* … … 499 674 area)) { 500 675 mutex_unlock(&area->lock); 501 mutex_unlock(&as->lock); 502 interrupts_restore(ipl); 676 mutex_unlock(&as->lock); 503 677 return EADDRNOTAVAIL; 504 678 } 505 } 506 679 } 680 507 681 area->pages = pages; 508 682 509 683 mutex_unlock(&area->lock); 510 684 mutex_unlock(&as->lock); 511 interrupts_restore(ipl); 512 685 513 686 return 0; 514 687 } 515 688 689 /** Remove reference to address space area share info. 690 * 691 * If the reference count drops to 0, the sh_info is deallocated. 692 * 693 * @param sh_info Pointer to address space area share info. 694 * 695 */ 696 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 697 { 698 bool dealloc = false; 699 700 mutex_lock(&sh_info->lock); 701 ASSERT(sh_info->refcount); 702 703 if (--sh_info->refcount == 0) { 704 dealloc = true; 705 link_t *cur; 706 707 /* 708 * Now walk carefully the pagemap B+tree and free/remove 709 * reference from all frames found there. 710 */ 711 for (cur = sh_info->pagemap.leaf_head.next; 712 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 713 btree_node_t *node 714 = list_get_instance(cur, btree_node_t, leaf_link); 715 btree_key_t i; 716 717 for (i = 0; i < node->keys; i++) 718 frame_free((uintptr_t) node->value[i]); 719 } 720 721 } 722 mutex_unlock(&sh_info->lock); 723 724 if (dealloc) { 725 btree_destroy(&sh_info->pagemap); 726 free(sh_info); 727 } 728 } 729 516 730 /** Destroy address space area. 517 731 * 518 * @param as Address space. 519 * @param address Address within the area to be deleted. 520 * 521 * @return Zero on success or a value from @ref errno.h on failure. 732 * @param as Address space. 733 * @param address Address within the area to be deleted. 734 * 735 * @return Zero on success or a value from @ref errno.h on failure. 736 * 522 737 */ 523 738 int as_area_destroy(as_t *as, uintptr_t address) 524 739 { 525 as_area_t *area;526 uintptr_t base;527 link_t *cur;528 ipl_t ipl;529 530 ipl = interrupts_disable();531 740 mutex_lock(&as->lock); 532 533 a rea = find_area_and_lock(as, address);741 742 as_area_t *area = find_area_and_lock(as, address); 534 743 if (!area) { 535 744 mutex_unlock(&as->lock); 536 interrupts_restore(ipl);537 745 return ENOENT; 538 746 } 539 540 base = area->base; 541 747 748 uintptr_t base = area->base; 749 750 page_table_lock(as, false); 751 542 752 /* 543 753 * Start TLB shootdown sequence. 544 754 */ 545 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 546 755 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 756 area->pages); 757 547 758 /* 548 759 * Visit only the pages mapped by used_space B+tree. 549 760 */ 761 link_t *cur; 550 762 for (cur = area->used_space.leaf_head.next; 551 763 cur != &area->used_space.leaf_head; cur = cur->next) { 552 764 btree_node_t *node; 553 unsigned int i;765 btree_key_t i; 554 766 555 767 node = list_get_instance(cur, btree_node_t, leaf_link); 556 768 for (i = 0; i < node->keys; i++) { 557 uintptr_t b = node->key[i]; 558 size_t j; 559 pte_t *pte; 769 uintptr_t ptr = node->key[i]; 770 size_t size; 560 771 561 for (j = 0; j < (size_t) node->value[i]; j++) { 562 page_table_lock(as, false); 563 pte = page_mapping_find(as, b + j * PAGE_SIZE); 564 ASSERT(pte && PTE_VALID(pte) && 565 PTE_PRESENT(pte)); 566 if (area->backend && 567 area->backend->frame_free) { 568 area->backend->frame_free(area, b + 569 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 772 for (size = 0; size < (size_t) node->value[i]; size++) { 773 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 774 775 ASSERT(pte); 776 ASSERT(PTE_VALID(pte)); 777 ASSERT(PTE_PRESENT(pte)); 778 779 if ((area->backend) && 780 (area->backend->frame_free)) { 781 area->backend->frame_free(area, 782 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 570 783 } 571 page_mapping_remove(as, b + j * PAGE_SIZE);572 page_ table_unlock(as, false);784 785 page_mapping_remove(as, ptr + size * PAGE_SIZE); 573 786 } 574 787 } 575 788 } 576 789 577 790 /* 578 791 * Finish TLB shootdown sequence. 579 792 */ 580 793 581 794 tlb_invalidate_pages(as->asid, area->base, area->pages); 795 582 796 /* 583 797 * Invalidate potential software translation caches (e.g. TSB on … … 585 799 */ 586 800 as_invalidate_translation_cache(as, area->base, area->pages); 587 tlb_shootdown_finalize(); 801 tlb_shootdown_finalize(ipl); 802 803 page_table_unlock(as, false); 588 804 589 805 btree_destroy(&area->used_space); 590 806 591 807 area->attributes |= AS_AREA_ATTR_PARTIAL; 592 808 593 809 if (area->sh_info) 594 810 sh_info_remove_reference(area->sh_info); 595 811 596 812 mutex_unlock(&area->lock); 597 813 598 814 /* 599 815 * Remove the empty area from address space. … … 604 820 605 821 mutex_unlock(&as->lock); 606 interrupts_restore(ipl);607 822 return 0; 608 823 } … … 615 830 * sh_info of the source area. The process of duplicating the 616 831 * mapping is done through the backend share function. 617 * 618 * @param src_as 619 * @param src_base 620 * @param acc_size 621 * @param dst_as 622 * @param dst_base 832 * 833 * @param src_as Pointer to source address space. 834 * @param src_base Base address of the source address space area. 835 * @param acc_size Expected size of the source area. 836 * @param dst_as Pointer to destination address space. 837 * @param dst_base Target base address. 623 838 * @param dst_flags_mask Destination address space area flags mask. 624 839 * 625 * @return Zero on success or ENOENT if there is no such task or if 626 * there is no such address space area, EPERM if there was 627 * a problem in accepting the area or ENOMEM if there was a 628 * problem in allocating destination address space area. 629 * ENOTSUP is returned if the address space area backend 630 * does not support sharing. 840 * @return Zero on success. 841 * @return ENOENT if there is no such task or such address space. 842 * @return EPERM if there was a problem in accepting the area. 843 * @return ENOMEM if there was a problem in allocating destination 844 * address space area. 845 * @return ENOTSUP if the address space area backend does not support 846 * sharing. 847 * 631 848 */ 632 849 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 633 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 634 { 635 ipl_t ipl; 636 int src_flags; 637 size_t src_size; 638 as_area_t *src_area, *dst_area; 639 share_info_t *sh_info; 640 mem_backend_t *src_backend; 641 mem_backend_data_t src_backend_data; 642 643 ipl = interrupts_disable(); 850 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 851 { 644 852 mutex_lock(&src_as->lock); 645 src_area = find_area_and_lock(src_as, src_base);853 as_area_t *src_area = find_area_and_lock(src_as, src_base); 646 854 if (!src_area) { 647 855 /* … … 649 857 */ 650 858 mutex_unlock(&src_as->lock); 651 interrupts_restore(ipl);652 859 return ENOENT; 653 860 } 654 655 if ( !src_area->backend || !src_area->backend->share) {861 862 if ((!src_area->backend) || (!src_area->backend->share)) { 656 863 /* 657 864 * There is no backend or the backend does not … … 660 867 mutex_unlock(&src_area->lock); 661 868 mutex_unlock(&src_as->lock); 662 interrupts_restore(ipl);663 869 return ENOTSUP; 664 870 } 665 871 666 s rc_size = src_area->pages * PAGE_SIZE;667 src_flags = src_area->flags;668 src_backend = src_area->backend;669 src_backend_data = src_area->backend_data;670 872 size_t src_size = src_area->pages * PAGE_SIZE; 873 unsigned int src_flags = src_area->flags; 874 mem_backend_t *src_backend = src_area->backend; 875 mem_backend_data_t src_backend_data = src_area->backend_data; 876 671 877 /* Share the cacheable flag from the original mapping */ 672 878 if (src_flags & AS_AREA_CACHEABLE) 673 879 dst_flags_mask |= AS_AREA_CACHEABLE; 674 675 if ( src_size != acc_size||676 ( src_flags & dst_flags_mask) != dst_flags_mask) {880 881 if ((src_size != acc_size) || 882 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 677 883 mutex_unlock(&src_area->lock); 678 884 mutex_unlock(&src_as->lock); 679 interrupts_restore(ipl);680 885 return EPERM; 681 886 } 682 887 683 888 /* 684 889 * Now we are committed to sharing the area. … … 686 891 * Then it will be safe to unlock it. 687 892 */ 688 sh _info = src_area->sh_info;893 share_info_t *sh_info = src_area->sh_info; 689 894 if (!sh_info) { 690 895 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 693 898 btree_create(&sh_info->pagemap); 694 899 src_area->sh_info = sh_info; 900 695 901 /* 696 902 * Call the backend to setup sharing. … … 702 908 mutex_unlock(&sh_info->lock); 703 909 } 704 910 705 911 mutex_unlock(&src_area->lock); 706 912 mutex_unlock(&src_as->lock); 707 913 708 914 /* 709 915 * Create copy of the source address space area. … … 714 920 * to support sharing in less privileged mode. 715 921 */ 716 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base,717 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);922 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 923 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 718 924 if (!dst_area) { 719 925 /* … … 722 928 sh_info_remove_reference(sh_info); 723 929 724 interrupts_restore(ipl);725 930 return ENOMEM; 726 931 } 727 932 728 933 /* 729 934 * Now the destination address space area has been 730 935 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 731 936 * attribute and set the sh_info. 732 */ 733 mutex_lock(&dst_as->lock); 937 */ 938 mutex_lock(&dst_as->lock); 734 939 mutex_lock(&dst_area->lock); 735 940 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 736 941 dst_area->sh_info = sh_info; 737 942 mutex_unlock(&dst_area->lock); 738 mutex_unlock(&dst_as->lock); 739 740 interrupts_restore(ipl); 943 mutex_unlock(&dst_as->lock); 741 944 742 945 return 0; … … 745 948 /** Check access mode for address space area. 746 949 * 747 * The address space area must be locked prior to this call. 748 * 749 * @param area Address space area. 750 * @param access Access mode. 751 * 752 * @return False if access violates area's permissions, true 753 * otherwise. 754 */ 755 bool as_area_check_access(as_area_t *area, pf_access_t access) 756 { 950 * @param area Address space area. 951 * @param access Access mode. 952 * 953 * @return False if access violates area's permissions, true 954 * otherwise. 955 * 956 */ 957 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 958 { 959 ASSERT(mutex_locked(&area->lock)); 960 757 961 int flagmap[] = { 758 962 [PF_ACCESS_READ] = AS_AREA_READ, … … 760 964 [PF_ACCESS_EXEC] = AS_AREA_EXEC 761 965 }; 762 966 763 967 if (!(area->flags & flagmap[access])) 764 968 return false; 765 969 766 970 return true; 971 } 972 973 /** Convert address space area flags to page flags. 974 * 975 * @param aflags Flags of some address space area. 976 * 977 * @return Flags to be passed to page_mapping_insert(). 978 * 979 */ 980 NO_TRACE static unsigned int area_flags_to_page_flags(unsigned int aflags) 981 { 982 unsigned int flags = PAGE_USER | PAGE_PRESENT; 983 984 if (aflags & AS_AREA_READ) 985 flags |= PAGE_READ; 986 987 if (aflags & AS_AREA_WRITE) 988 flags |= PAGE_WRITE; 989 990 if (aflags & AS_AREA_EXEC) 991 flags |= PAGE_EXEC; 992 993 if (aflags & AS_AREA_CACHEABLE) 994 flags |= PAGE_CACHEABLE; 995 996 return flags; 767 997 } 768 998 … … 781 1011 * 782 1012 */ 783 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 784 { 785 as_area_t *area; 786 uintptr_t base; 787 link_t *cur; 788 ipl_t ipl; 789 int page_flags; 790 uintptr_t *old_frame; 791 size_t frame_idx; 792 size_t used_pages; 793 1013 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 1014 { 794 1015 /* Flags for the new memory mapping */ 795 page_flags = area_flags_to_page_flags(flags); 796 797 ipl = interrupts_disable(); 1016 unsigned int page_flags = area_flags_to_page_flags(flags); 1017 798 1018 mutex_lock(&as->lock); 799 800 a rea = find_area_and_lock(as, address);1019 1020 as_area_t *area = find_area_and_lock(as, address); 801 1021 if (!area) { 802 1022 mutex_unlock(&as->lock); 803 interrupts_restore(ipl);804 1023 return ENOENT; 805 1024 } 806 1025 807 1026 if ((area->sh_info) || (area->backend != &anon_backend)) { 808 1027 /* Copying shared areas not supported yet */ … … 810 1029 mutex_unlock(&area->lock); 811 1030 mutex_unlock(&as->lock); 812 interrupts_restore(ipl);813 1031 return ENOTSUP; 814 1032 } 815 816 base = area->base; 817 1033 818 1034 /* 819 1035 * Compute total number of used pages in the used_space B+tree 820 1036 */ 821 used_pages = 0; 822 1037 size_t used_pages = 0; 1038 link_t *cur; 1039 823 1040 for (cur = area->used_space.leaf_head.next; 824 1041 cur != &area->used_space.leaf_head; cur = cur->next) { 825 btree_node_t *node ;826 unsigned int i;827 828 node = list_get_instance(cur, btree_node_t, leaf_link);829 for (i = 0; i < node->keys; i++) {1042 btree_node_t *node 1043 = list_get_instance(cur, btree_node_t, leaf_link); 1044 btree_key_t i; 1045 1046 for (i = 0; i < node->keys; i++) 830 1047 used_pages += (size_t) node->value[i]; 831 } 832 } 833 1048 } 1049 834 1050 /* An array for storing frame numbers */ 835 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 836 1051 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 1052 1053 page_table_lock(as, false); 1054 837 1055 /* 838 1056 * Start TLB shootdown sequence. 839 1057 */ 840 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 841 1058 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, 1059 area->pages); 1060 842 1061 /* 843 1062 * Remove used pages from page tables and remember their frame 844 1063 * numbers. 845 1064 */ 846 frame_idx = 0;847 1065 size_t frame_idx = 0; 1066 848 1067 for (cur = area->used_space.leaf_head.next; 849 1068 cur != &area->used_space.leaf_head; cur = cur->next) { 850 btree_node_t *node ;851 unsigned int i;852 853 node = list_get_instance(cur, btree_node_t, leaf_link);1069 btree_node_t *node 1070 = list_get_instance(cur, btree_node_t, leaf_link); 1071 btree_key_t i; 1072 854 1073 for (i = 0; i < node->keys; i++) { 855 uintptr_t b = node->key[i]; 856 size_t j; 857 pte_t *pte; 1074 uintptr_t ptr = node->key[i]; 1075 size_t size; 858 1076 859 for (j = 0; j < (size_t) node->value[i]; j++) { 860 page_table_lock(as, false); 861 pte = page_mapping_find(as, b + j * PAGE_SIZE); 862 ASSERT(pte && PTE_VALID(pte) && 863 PTE_PRESENT(pte)); 1077 for (size = 0; size < (size_t) node->value[i]; size++) { 1078 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1079 1080 ASSERT(pte); 1081 ASSERT(PTE_VALID(pte)); 1082 ASSERT(PTE_PRESENT(pte)); 1083 864 1084 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 865 1085 866 1086 /* Remove old mapping */ 867 page_mapping_remove(as, b + j * PAGE_SIZE); 868 page_table_unlock(as, false); 1087 page_mapping_remove(as, ptr + size * PAGE_SIZE); 869 1088 } 870 1089 } 871 1090 } 872 1091 873 1092 /* 874 1093 * Finish TLB shootdown sequence. 875 1094 */ 876 1095 877 1096 tlb_invalidate_pages(as->asid, area->base, area->pages); 878 1097 … … 882 1101 */ 883 1102 as_invalidate_translation_cache(as, area->base, area->pages); 884 tlb_shootdown_finalize(); 885 1103 tlb_shootdown_finalize(ipl); 1104 1105 page_table_unlock(as, false); 1106 886 1107 /* 887 1108 * Set the new flags. 888 1109 */ 889 1110 area->flags = flags; 890 1111 891 1112 /* 892 1113 * Map pages back in with new flags. This step is kept separate … … 895 1116 */ 896 1117 frame_idx = 0; 897 1118 898 1119 for (cur = area->used_space.leaf_head.next; 899 1120 cur != &area->used_space.leaf_head; cur = cur->next) { 900 btree_node_t *node ;901 unsigned int i;902 903 node = list_get_instance(cur, btree_node_t, leaf_link);1121 btree_node_t *node 1122 = list_get_instance(cur, btree_node_t, leaf_link); 1123 btree_key_t i; 1124 904 1125 for (i = 0; i < node->keys; i++) { 905 uintptr_t b= node->key[i];906 size_t j;1126 uintptr_t ptr = node->key[i]; 1127 size_t size; 907 1128 908 for ( j = 0; j < (size_t) node->value[i]; j++) {1129 for (size = 0; size < (size_t) node->value[i]; size++) { 909 1130 page_table_lock(as, false); 910 1131 911 1132 /* Insert the new mapping */ 912 page_mapping_insert(as, b + j* PAGE_SIZE,1133 page_mapping_insert(as, ptr + size * PAGE_SIZE, 913 1134 old_frame[frame_idx++], page_flags); 914 1135 915 1136 page_table_unlock(as, false); 916 1137 } 917 1138 } 918 1139 } 919 1140 920 1141 free(old_frame); 921 1142 922 1143 mutex_unlock(&area->lock); 923 1144 mutex_unlock(&as->lock); 924 interrupts_restore(ipl); 925 1145 926 1146 return 0; 927 1147 } 928 929 1148 930 1149 /** Handle page fault within the current address space. … … 936 1155 * Interrupts are assumed disabled. 937 1156 * 938 * @param page Faulting page. 939 * @param access Access mode that caused the page fault (i.e. 940 * read/write/exec). 941 * @param istate Pointer to the interrupted state. 942 * 943 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 944 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 945 * or copy_from_uspace(). 1157 * @param page Faulting page. 1158 * @param access Access mode that caused the page fault (i.e. 1159 * read/write/exec). 1160 * @param istate Pointer to the interrupted state. 1161 * 1162 * @return AS_PF_FAULT on page fault. 1163 * @return AS_PF_OK on success. 1164 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1165 * or copy_from_uspace(). 1166 * 946 1167 */ 947 1168 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 948 1169 { 949 pte_t *pte;950 as_area_t *area;951 952 1170 if (!THREAD) 953 1171 return AS_PF_FAULT; 954 955 ASSERT(AS); 956 1172 1173 if (!AS) 1174 return AS_PF_FAULT; 1175 957 1176 mutex_lock(&AS->lock); 958 a rea = find_area_and_lock(AS, page);1177 as_area_t *area = find_area_and_lock(AS, page); 959 1178 if (!area) { 960 1179 /* … … 965 1184 goto page_fault; 966 1185 } 967 1186 968 1187 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 969 1188 /* … … 973 1192 mutex_unlock(&area->lock); 974 1193 mutex_unlock(&AS->lock); 975 goto page_fault; 976 } 977 978 if ( !area->backend || !area->backend->page_fault) {1194 goto page_fault; 1195 } 1196 1197 if ((!area->backend) || (!area->backend->page_fault)) { 979 1198 /* 980 1199 * The address space area is not backed by any backend … … 983 1202 mutex_unlock(&area->lock); 984 1203 mutex_unlock(&AS->lock); 985 goto page_fault; 986 } 987 1204 goto page_fault; 1205 } 1206 988 1207 page_table_lock(AS, false); 989 1208 … … 992 1211 * we need to make sure the mapping has not been already inserted. 993 1212 */ 1213 pte_t *pte; 994 1214 if ((pte = page_mapping_find(AS, page))) { 995 1215 if (PTE_PRESENT(pte)) { … … 1019 1239 mutex_unlock(&AS->lock); 1020 1240 return AS_PF_OK; 1021 1241 1022 1242 page_fault: 1023 1243 if (THREAD->in_copy_from_uspace) { … … 1032 1252 return AS_PF_FAULT; 1033 1253 } 1034 1254 1035 1255 return AS_PF_DEFER; 1036 1256 } … … 1044 1264 * When this function is enetered, no spinlocks may be held. 1045 1265 * 1046 * @param old Old address space or NULL. 1047 * @param new New address space. 1266 * @param old Old address space or NULL. 1267 * @param new New address space. 1268 * 1048 1269 */ 1049 1270 void as_switch(as_t *old_as, as_t *new_as) … … 1051 1272 DEADLOCK_PROBE_INIT(p_asidlock); 1052 1273 preemption_disable(); 1274 1053 1275 retry: 1054 1276 (void) interrupts_disable(); 1055 1277 if (!spinlock_trylock(&asidlock)) { 1056 /* 1278 /* 1057 1279 * Avoid deadlock with TLB shootdown. 1058 1280 * We can enable interrupts here because … … 1065 1287 } 1066 1288 preemption_enable(); 1067 1289 1068 1290 /* 1069 1291 * First, take care of the old address space. 1070 */ 1292 */ 1071 1293 if (old_as) { 1072 1294 ASSERT(old_as->cpu_refcount); 1073 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1295 1296 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1074 1297 /* 1075 1298 * The old address space is no longer active on … … 1079 1302 */ 1080 1303 ASSERT(old_as->asid != ASID_INVALID); 1304 1081 1305 list_append(&old_as->inactive_as_with_asid_link, 1082 1306 &inactive_as_with_asid_head); 1083 1307 } 1084 1308 1085 1309 /* 1086 1310 * Perform architecture-specific tasks when the address space … … 1089 1313 as_deinstall_arch(old_as); 1090 1314 } 1091 1315 1092 1316 /* 1093 1317 * Second, prepare the new address space. … … 1099 1323 new_as->asid = asid_get(); 1100 1324 } 1325 1101 1326 #ifdef AS_PAGE_TABLE 1102 1327 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1108 1333 */ 1109 1334 as_install_arch(new_as); 1110 1335 1111 1336 spinlock_unlock(&asidlock); 1112 1337 … … 1114 1339 } 1115 1340 1116 /** Convert address space area flags to page flags.1117 *1118 * @param aflags Flags of some address space area.1119 *1120 * @return Flags to be passed to page_mapping_insert().1121 */1122 int area_flags_to_page_flags(int aflags)1123 {1124 int flags;1125 1126 flags = PAGE_USER | PAGE_PRESENT;1127 1128 if (aflags & AS_AREA_READ)1129 flags |= PAGE_READ;1130 1131 if (aflags & AS_AREA_WRITE)1132 flags |= PAGE_WRITE;1133 1134 if (aflags & AS_AREA_EXEC)1135 flags |= PAGE_EXEC;1136 1137 if (aflags & AS_AREA_CACHEABLE)1138 flags |= PAGE_CACHEABLE;1139 1140 return flags;1141 }1142 1143 1341 /** Compute flags for virtual address translation subsytem. 1144 1342 * 1145 * The address space area must be locked.1146 * Interrupts must be disabled.1147 * 1148 * @param a Address space area.1149 * 1150 * @return Flags to be used in page_mapping_insert(). 1151 */ 1152 int as_area_get_flags(as_area_t *a) 1153 { 1154 return area_flags_to_page_flags(a ->flags);1343 * @param area Address space area. 1344 * 1345 * @return Flags to be used in page_mapping_insert(). 1346 * 1347 */ 1348 NO_TRACE unsigned int as_area_get_flags(as_area_t *area) 1349 { 1350 ASSERT(mutex_locked(&area->lock)); 1351 1352 return area_flags_to_page_flags(area->flags); 1155 1353 } 1156 1354 … … 1160 1358 * table. 1161 1359 * 1162 * @param flags Flags saying whether the page table is for the kernel 1163 * address space. 1164 * 1165 * @return First entry of the page table. 1166 */ 1167 pte_t *page_table_create(int flags) 1360 * @param flags Flags saying whether the page table is for the kernel 1361 * address space. 1362 * 1363 * @return First entry of the page table. 1364 * 1365 */ 1366 NO_TRACE pte_t *page_table_create(unsigned int flags) 1168 1367 { 1169 1368 ASSERT(as_operations); … … 1177 1376 * Destroy page table in architecture specific way. 1178 1377 * 1179 * @param page_table Physical address of PTL0. 1180 */ 1181 void page_table_destroy(pte_t *page_table) 1378 * @param page_table Physical address of PTL0. 1379 * 1380 */ 1381 NO_TRACE void page_table_destroy(pte_t *page_table) 1182 1382 { 1183 1383 ASSERT(as_operations); … … 1191 1391 * This function should be called before any page_mapping_insert(), 1192 1392 * page_mapping_remove() and page_mapping_find(). 1193 * 1393 * 1194 1394 * Locking order is such that address space areas must be locked 1195 1395 * prior to this call. Address space can be locked prior to this 1196 1396 * call in which case the lock argument is false. 1197 1397 * 1198 * @param as Address space. 1199 * @param lock If false, do not attempt to lock as->lock. 1200 */ 1201 void page_table_lock(as_t *as, bool lock) 1398 * @param as Address space. 1399 * @param lock If false, do not attempt to lock as->lock. 1400 * 1401 */ 1402 NO_TRACE void page_table_lock(as_t *as, bool lock) 1202 1403 { 1203 1404 ASSERT(as_operations); … … 1209 1410 /** Unlock page table. 1210 1411 * 1211 * @param as Address space. 1212 * @param unlock If false, do not attempt to unlock as->lock. 1213 */ 1214 void page_table_unlock(as_t *as, bool unlock) 1412 * @param as Address space. 1413 * @param unlock If false, do not attempt to unlock as->lock. 1414 * 1415 */ 1416 NO_TRACE void page_table_unlock(as_t *as, bool unlock) 1215 1417 { 1216 1418 ASSERT(as_operations); … … 1220 1422 } 1221 1423 1222 1223 /** Find address space area and lock it. 1224 * 1225 * The address space must be locked and interrupts must be disabled. 1226 * 1227 * @param as Address space. 1228 * @param va Virtual address. 1229 * 1230 * @return Locked address space area containing va on success or 1231 * NULL on failure. 1232 */ 1233 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1234 { 1235 as_area_t *a; 1236 btree_node_t *leaf, *lnode; 1237 unsigned int i; 1238 1239 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1240 if (a) { 1241 /* va is the base address of an address space area */ 1242 mutex_lock(&a->lock); 1243 return a; 1244 } 1245 1246 /* 1247 * Search the leaf node and the righmost record of its left neighbour 1248 * to find out whether this is a miss or va belongs to an address 1249 * space area found there. 1250 */ 1251 1252 /* First, search the leaf node itself. */ 1253 for (i = 0; i < leaf->keys; i++) { 1254 a = (as_area_t *) leaf->value[i]; 1255 mutex_lock(&a->lock); 1256 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1257 return a; 1258 } 1259 mutex_unlock(&a->lock); 1260 } 1261 1262 /* 1263 * Second, locate the left neighbour and test its last record. 1264 * Because of its position in the B+tree, it must have base < va. 1265 */ 1266 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1267 if (lnode) { 1268 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1269 mutex_lock(&a->lock); 1270 if (va < a->base + a->pages * PAGE_SIZE) { 1271 return a; 1272 } 1273 mutex_unlock(&a->lock); 1274 } 1275 1276 return NULL; 1277 } 1278 1279 /** Check area conflicts with other areas. 1280 * 1281 * The address space must be locked and interrupts must be disabled. 1282 * 1283 * @param as Address space. 1284 * @param va Starting virtual address of the area being tested. 1285 * @param size Size of the area being tested. 1286 * @param avoid_area Do not touch this area. 1287 * 1288 * @return True if there is no conflict, false otherwise. 1289 */ 1290 bool 1291 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1292 { 1293 as_area_t *a; 1294 btree_node_t *leaf, *node; 1295 unsigned int i; 1296 1297 /* 1298 * We don't want any area to have conflicts with NULL page. 1299 */ 1300 if (overlaps(va, size, NULL, PAGE_SIZE)) 1301 return false; 1302 1303 /* 1304 * The leaf node is found in O(log n), where n is proportional to 1305 * the number of address space areas belonging to as. 1306 * The check for conflicts is then attempted on the rightmost 1307 * record in the left neighbour, the leftmost record in the right 1308 * neighbour and all records in the leaf node itself. 1309 */ 1310 1311 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1312 if (a != avoid_area) 1313 return false; 1314 } 1315 1316 /* First, check the two border cases. */ 1317 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1318 a = (as_area_t *) node->value[node->keys - 1]; 1319 mutex_lock(&a->lock); 1320 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1321 mutex_unlock(&a->lock); 1322 return false; 1323 } 1324 mutex_unlock(&a->lock); 1325 } 1326 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1327 if (node) { 1328 a = (as_area_t *) node->value[0]; 1329 mutex_lock(&a->lock); 1330 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1331 mutex_unlock(&a->lock); 1332 return false; 1333 } 1334 mutex_unlock(&a->lock); 1335 } 1336 1337 /* Second, check the leaf node. */ 1338 for (i = 0; i < leaf->keys; i++) { 1339 a = (as_area_t *) leaf->value[i]; 1340 1341 if (a == avoid_area) 1342 continue; 1343 1344 mutex_lock(&a->lock); 1345 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1346 mutex_unlock(&a->lock); 1347 return false; 1348 } 1349 mutex_unlock(&a->lock); 1350 } 1351 1352 /* 1353 * So far, the area does not conflict with other areas. 1354 * Check if it doesn't conflict with kernel address space. 1355 */ 1356 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1357 return !overlaps(va, size, 1358 KERNEL_ADDRESS_SPACE_START, 1359 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1360 } 1361 1362 return true; 1424 /** Test whether page tables are locked. 1425 * 1426 * @param as Address space where the page tables belong. 1427 * 1428 * @return True if the page tables belonging to the address soace 1429 * are locked, otherwise false. 1430 */ 1431 NO_TRACE bool page_table_locked(as_t *as) 1432 { 1433 ASSERT(as_operations); 1434 ASSERT(as_operations->page_table_locked); 1435 1436 return as_operations->page_table_locked(as); 1363 1437 } 1364 1438 1365 1439 /** Return size of the address space area with given base. 1366 1440 * 1367 * @param base Arbitrary address insede the address space area. 1368 * 1369 * @return Size of the address space area in bytes or zero if it 1370 * does not exist. 1441 * @param base Arbitrary address inside the address space area. 1442 * 1443 * @return Size of the address space area in bytes or zero if it 1444 * does not exist. 1445 * 1371 1446 */ 1372 1447 size_t as_area_get_size(uintptr_t base) 1373 1448 { 1374 ipl_t ipl;1375 as_area_t *src_area;1376 1449 size_t size; 1377 1378 ipl = interrupts_disable(); 1379 src_area = find_area_and_lock(AS, base); 1450 1451 page_table_lock(AS, true); 1452 as_area_t *src_area = find_area_and_lock(AS, base); 1453 1380 1454 if (src_area) { 1381 1455 size = src_area->pages * PAGE_SIZE; 1382 1456 mutex_unlock(&src_area->lock); 1383 } else {1457 } else 1384 1458 size = 0; 1385 }1386 interrupts_restore(ipl);1459 1460 page_table_unlock(AS, true); 1387 1461 return size; 1388 1462 } … … 1392 1466 * The address space area must be already locked. 1393 1467 * 1394 * @param a Address space area. 1395 * @param page First page to be marked. 1396 * @param count Number of page to be marked. 1397 * 1398 * @return Zero on failure and non-zero on success. 1399 */ 1400 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1401 { 1402 btree_node_t *leaf, *node; 1403 size_t pages; 1404 unsigned int i; 1405 1468 * @param area Address space area. 1469 * @param page First page to be marked. 1470 * @param count Number of page to be marked. 1471 * 1472 * @return False on failure or true on success. 1473 * 1474 */ 1475 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1476 { 1477 ASSERT(mutex_locked(&area->lock)); 1406 1478 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1407 1479 ASSERT(count); 1408 1409 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1480 1481 btree_node_t *leaf; 1482 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1410 1483 if (pages) { 1411 1484 /* 1412 1485 * We hit the beginning of some used space. 1413 1486 */ 1414 return 0;1415 } 1416 1487 return false; 1488 } 1489 1417 1490 if (!leaf->keys) { 1418 btree_insert(&a ->used_space, page, (void *) count, leaf);1419 return 1;1420 } 1421 1422 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1491 btree_insert(&area->used_space, page, (void *) count, leaf); 1492 goto success; 1493 } 1494 1495 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1423 1496 if (node) { 1424 1497 uintptr_t left_pg = node->key[node->keys - 1]; … … 1432 1505 * the left neigbour and the first interval of the leaf. 1433 1506 */ 1434 1507 1435 1508 if (page >= right_pg) { 1436 1509 /* Do nothing. */ … … 1438 1511 left_cnt * PAGE_SIZE)) { 1439 1512 /* The interval intersects with the left interval. */ 1440 return 0;1513 return false; 1441 1514 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1442 1515 right_cnt * PAGE_SIZE)) { 1443 1516 /* The interval intersects with the right interval. */ 1444 return 0;1517 return false; 1445 1518 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1446 1519 (page + count * PAGE_SIZE == right_pg)) { … … 1450 1523 */ 1451 1524 node->value[node->keys - 1] += count + right_cnt; 1452 btree_remove(&a ->used_space, right_pg, leaf);1453 return 1;1525 btree_remove(&area->used_space, right_pg, leaf); 1526 goto success; 1454 1527 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1455 /* 1528 /* 1456 1529 * The interval can be added by simply growing the left 1457 1530 * interval. 1458 1531 */ 1459 1532 node->value[node->keys - 1] += count; 1460 return 1;1533 goto success; 1461 1534 } else if (page + count * PAGE_SIZE == right_pg) { 1462 1535 /* … … 1467 1540 leaf->value[0] += count; 1468 1541 leaf->key[0] = page; 1469 return 1;1542 goto success; 1470 1543 } else { 1471 1544 /* … … 1473 1546 * but cannot be merged with any of them. 1474 1547 */ 1475 btree_insert(&a ->used_space, page, (void *) count,1548 btree_insert(&area->used_space, page, (void *) count, 1476 1549 leaf); 1477 return 1;1550 goto success; 1478 1551 } 1479 1552 } else if (page < leaf->key[0]) { 1480 1553 uintptr_t right_pg = leaf->key[0]; 1481 1554 size_t right_cnt = (size_t) leaf->value[0]; 1482 1555 1483 1556 /* 1484 1557 * Investigate the border case in which the left neighbour does 1485 1558 * not exist but the interval fits from the left. 1486 1559 */ 1487 1560 1488 1561 if (overlaps(page, count * PAGE_SIZE, right_pg, 1489 1562 right_cnt * PAGE_SIZE)) { 1490 1563 /* The interval intersects with the right interval. */ 1491 return 0;1564 return false; 1492 1565 } else if (page + count * PAGE_SIZE == right_pg) { 1493 1566 /* … … 1498 1571 leaf->key[0] = page; 1499 1572 leaf->value[0] += count; 1500 return 1;1573 goto success; 1501 1574 } else { 1502 1575 /* … … 1504 1577 * It must be added individually. 1505 1578 */ 1506 btree_insert(&a ->used_space, page, (void *) count,1579 btree_insert(&area->used_space, page, (void *) count, 1507 1580 leaf); 1508 return 1;1509 } 1510 } 1511 1512 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1581 goto success; 1582 } 1583 } 1584 1585 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1513 1586 if (node) { 1514 1587 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1522 1595 * the right neigbour and the last interval of the leaf. 1523 1596 */ 1524 1597 1525 1598 if (page < left_pg) { 1526 1599 /* Do nothing. */ … … 1528 1601 left_cnt * PAGE_SIZE)) { 1529 1602 /* The interval intersects with the left interval. */ 1530 return 0;1603 return false; 1531 1604 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1532 1605 right_cnt * PAGE_SIZE)) { 1533 1606 /* The interval intersects with the right interval. */ 1534 return 0;1607 return false; 1535 1608 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1536 1609 (page + count * PAGE_SIZE == right_pg)) { … … 1538 1611 * The interval can be added by merging the two already 1539 1612 * present intervals. 1540 * */1613 */ 1541 1614 leaf->value[leaf->keys - 1] += count + right_cnt; 1542 btree_remove(&a ->used_space, right_pg, node);1543 return 1;1615 btree_remove(&area->used_space, right_pg, node); 1616 goto success; 1544 1617 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1545 1618 /* 1546 1619 * The interval can be added by simply growing the left 1547 1620 * interval. 1548 * */1549 leaf->value[leaf->keys - 1] += 1550 return 1;1621 */ 1622 leaf->value[leaf->keys - 1] += count; 1623 goto success; 1551 1624 } else if (page + count * PAGE_SIZE == right_pg) { 1552 1625 /* … … 1557 1630 node->value[0] += count; 1558 1631 node->key[0] = page; 1559 return 1;1632 goto success; 1560 1633 } else { 1561 1634 /* … … 1563 1636 * but cannot be merged with any of them. 1564 1637 */ 1565 btree_insert(&a ->used_space, page, (void *) count,1638 btree_insert(&area->used_space, page, (void *) count, 1566 1639 leaf); 1567 return 1;1640 goto success; 1568 1641 } 1569 1642 } else if (page >= leaf->key[leaf->keys - 1]) { 1570 1643 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1571 1644 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1572 1645 1573 1646 /* 1574 1647 * Investigate the border case in which the right neighbour 1575 1648 * does not exist but the interval fits from the right. 1576 1649 */ 1577 1650 1578 1651 if (overlaps(page, count * PAGE_SIZE, left_pg, 1579 1652 left_cnt * PAGE_SIZE)) { 1580 1653 /* The interval intersects with the left interval. */ 1581 return 0;1654 return false; 1582 1655 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1583 1656 /* … … 1586 1659 */ 1587 1660 leaf->value[leaf->keys - 1] += count; 1588 return 1;1661 goto success; 1589 1662 } else { 1590 1663 /* … … 1592 1665 * It must be added individually. 1593 1666 */ 1594 btree_insert(&a ->used_space, page, (void *) count,1667 btree_insert(&area->used_space, page, (void *) count, 1595 1668 leaf); 1596 return 1;1669 goto success; 1597 1670 } 1598 1671 } … … 1603 1676 * were already resolved. 1604 1677 */ 1678 btree_key_t i; 1605 1679 for (i = 1; i < leaf->keys; i++) { 1606 1680 if (page < leaf->key[i]) { … … 1609 1683 size_t left_cnt = (size_t) leaf->value[i - 1]; 1610 1684 size_t right_cnt = (size_t) leaf->value[i]; 1611 1685 1612 1686 /* 1613 1687 * The interval fits between left_pg and right_pg. 1614 1688 */ 1615 1689 1616 1690 if (overlaps(page, count * PAGE_SIZE, left_pg, 1617 1691 left_cnt * PAGE_SIZE)) { … … 1620 1694 * interval. 1621 1695 */ 1622 return 0;1696 return false; 1623 1697 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1624 1698 right_cnt * PAGE_SIZE)) { … … 1627 1701 * interval. 1628 1702 */ 1629 return 0;1703 return false; 1630 1704 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1631 1705 (page + count * PAGE_SIZE == right_pg)) { … … 1635 1709 */ 1636 1710 leaf->value[i - 1] += count + right_cnt; 1637 btree_remove(&a ->used_space, right_pg, leaf);1638 return 1;1711 btree_remove(&area->used_space, right_pg, leaf); 1712 goto success; 1639 1713 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1640 1714 /* … … 1643 1717 */ 1644 1718 leaf->value[i - 1] += count; 1645 return 1;1719 goto success; 1646 1720 } else if (page + count * PAGE_SIZE == right_pg) { 1647 1721 /* 1648 1722 * The interval can be addded by simply moving 1649 1723 * base of the right interval down and 1650 1724 * increasing its size accordingly. 1651 1725 */ 1652 1726 leaf->value[i] += count; 1653 1727 leaf->key[i] = page; 1654 return 1;1728 goto success; 1655 1729 } else { 1656 1730 /* … … 1659 1733 * them. 1660 1734 */ 1661 btree_insert(&a ->used_space, page,1735 btree_insert(&area->used_space, page, 1662 1736 (void *) count, leaf); 1663 return 1;1737 goto success; 1664 1738 } 1665 1739 } 1666 1740 } 1667 1668 panic("Inconsistency detected while adding %" PRIs " pages of used " 1669 "space at %p.", count, page); 1741 1742 panic("Inconsistency detected while adding %zu pages of used " 1743 "space at %p.", count, (void *) page); 1744 1745 success: 1746 area->resident += count; 1747 return true; 1670 1748 } 1671 1749 … … 1674 1752 * The address space area must be already locked. 1675 1753 * 1676 * @param a Address space area. 1677 * @param page First page to be marked. 1678 * @param count Number of page to be marked. 1679 * 1680 * @return Zero on failure and non-zero on success. 1681 */ 1682 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1683 { 1684 btree_node_t *leaf, *node; 1685 size_t pages; 1686 unsigned int i; 1687 1754 * @param area Address space area. 1755 * @param page First page to be marked. 1756 * @param count Number of page to be marked. 1757 * 1758 * @return False on failure or true on success. 1759 * 1760 */ 1761 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1762 { 1763 ASSERT(mutex_locked(&area->lock)); 1688 1764 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1689 1765 ASSERT(count); 1690 1691 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1766 1767 btree_node_t *leaf; 1768 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1692 1769 if (pages) { 1693 1770 /* … … 1695 1772 */ 1696 1773 if (count > pages) { 1697 return 0;1774 return false; 1698 1775 } else if (count == pages) { 1699 btree_remove(&a ->used_space, page, leaf);1700 return 1;1776 btree_remove(&area->used_space, page, leaf); 1777 goto success; 1701 1778 } else { 1702 1779 /* … … 1704 1781 * Decrease its size and relocate its start address. 1705 1782 */ 1783 btree_key_t i; 1706 1784 for (i = 0; i < leaf->keys; i++) { 1707 1785 if (leaf->key[i] == page) { 1708 1786 leaf->key[i] += count * PAGE_SIZE; 1709 1787 leaf->value[i] -= count; 1710 return 1;1788 goto success; 1711 1789 } 1712 1790 } 1791 1713 1792 goto error; 1714 1793 } 1715 1794 } 1716 1717 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1718 if ( node && page < leaf->key[0]) {1795 1796 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1797 if ((node) && (page < leaf->key[0])) { 1719 1798 uintptr_t left_pg = node->key[node->keys - 1]; 1720 1799 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1721 1800 1722 1801 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1723 1802 count * PAGE_SIZE)) { … … 1731 1810 */ 1732 1811 node->value[node->keys - 1] -= count; 1733 return 1;1812 goto success; 1734 1813 } else if (page + count * PAGE_SIZE < 1735 1814 left_pg + left_cnt*PAGE_SIZE) { 1736 size_t new_cnt;1737 1738 1815 /* 1739 1816 * The interval is contained in the rightmost … … 1743 1820 * new interval. 1744 1821 */ 1745 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1822 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1746 1823 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1747 1824 node->value[node->keys - 1] -= count + new_cnt; 1748 btree_insert(&a ->used_space, page +1825 btree_insert(&area->used_space, page + 1749 1826 count * PAGE_SIZE, (void *) new_cnt, leaf); 1750 return 1;1827 goto success; 1751 1828 } 1752 1829 } 1753 return 0;1754 } else if (page < leaf->key[0]) {1755 return 0;1756 }1830 1831 return false; 1832 } else if (page < leaf->key[0]) 1833 return false; 1757 1834 1758 1835 if (page > leaf->key[leaf->keys - 1]) { 1759 1836 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1760 1837 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1761 1838 1762 1839 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1763 1840 count * PAGE_SIZE)) { 1764 if (page + count * PAGE_SIZE == 1841 if (page + count * PAGE_SIZE == 1765 1842 left_pg + left_cnt * PAGE_SIZE) { 1766 1843 /* … … 1770 1847 */ 1771 1848 leaf->value[leaf->keys - 1] -= count; 1772 return 1;1849 goto success; 1773 1850 } else if (page + count * PAGE_SIZE < left_pg + 1774 1851 left_cnt * PAGE_SIZE) { 1775 size_t new_cnt;1776 1777 1852 /* 1778 1853 * The interval is contained in the rightmost … … 1782 1857 * interval. 1783 1858 */ 1784 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1859 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1785 1860 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1786 1861 leaf->value[leaf->keys - 1] -= count + new_cnt; 1787 btree_insert(&a ->used_space, page +1862 btree_insert(&area->used_space, page + 1788 1863 count * PAGE_SIZE, (void *) new_cnt, leaf); 1789 return 1;1864 goto success; 1790 1865 } 1791 1866 } 1792 return 0; 1793 } 1867 1868 return false; 1869 } 1794 1870 1795 1871 /* 1796 1872 * The border cases have been already resolved. 1797 * Now the interval can be only between intervals of the leaf. 1798 */ 1873 * Now the interval can be only between intervals of the leaf. 1874 */ 1875 btree_key_t i; 1799 1876 for (i = 1; i < leaf->keys - 1; i++) { 1800 1877 if (page < leaf->key[i]) { 1801 1878 uintptr_t left_pg = leaf->key[i - 1]; 1802 1879 size_t left_cnt = (size_t) leaf->value[i - 1]; 1803 1880 1804 1881 /* 1805 1882 * Now the interval is between intervals corresponding … … 1817 1894 */ 1818 1895 leaf->value[i - 1] -= count; 1819 return 1;1896 goto success; 1820 1897 } else if (page + count * PAGE_SIZE < 1821 1898 left_pg + left_cnt * PAGE_SIZE) { 1822 size_t new_cnt;1823 1824 1899 /* 1825 1900 * The interval is contained in the … … 1829 1904 * also inserting a new interval. 1830 1905 */ 1831 new_cnt = ((left_pg +1906 size_t new_cnt = ((left_pg + 1832 1907 left_cnt * PAGE_SIZE) - 1833 1908 (page + count * PAGE_SIZE)) >> 1834 1909 PAGE_WIDTH; 1835 1910 leaf->value[i - 1] -= count + new_cnt; 1836 btree_insert(&a ->used_space, page +1911 btree_insert(&area->used_space, page + 1837 1912 count * PAGE_SIZE, (void *) new_cnt, 1838 1913 leaf); 1839 return 1;1914 goto success; 1840 1915 } 1841 1916 } 1842 return 0; 1843 } 1844 } 1845 1917 1918 return false; 1919 } 1920 } 1921 1846 1922 error: 1847 panic("Inconsistency detected while removing %" PRIs " pages of used " 1848 "space from %p.", count, page); 1849 } 1850 1851 /** Remove reference to address space area share info. 1852 * 1853 * If the reference count drops to 0, the sh_info is deallocated. 1854 * 1855 * @param sh_info Pointer to address space area share info. 1856 */ 1857 void sh_info_remove_reference(share_info_t *sh_info) 1858 { 1859 bool dealloc = false; 1860 1861 mutex_lock(&sh_info->lock); 1862 ASSERT(sh_info->refcount); 1863 if (--sh_info->refcount == 0) { 1864 dealloc = true; 1865 link_t *cur; 1866 1867 /* 1868 * Now walk carefully the pagemap B+tree and free/remove 1869 * reference from all frames found there. 1870 */ 1871 for (cur = sh_info->pagemap.leaf_head.next; 1872 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1873 btree_node_t *node; 1874 unsigned int i; 1875 1876 node = list_get_instance(cur, btree_node_t, leaf_link); 1877 for (i = 0; i < node->keys; i++) 1878 frame_free((uintptr_t) node->value[i]); 1879 } 1880 1881 } 1882 mutex_unlock(&sh_info->lock); 1883 1884 if (dealloc) { 1885 btree_destroy(&sh_info->pagemap); 1886 free(sh_info); 1887 } 1923 panic("Inconsistency detected while removing %zu pages of used " 1924 "space from %p.", count, (void *) page); 1925 1926 success: 1927 area->resident -= count; 1928 return true; 1888 1929 } 1889 1930 … … 1893 1934 1894 1935 /** Wrapper for as_area_create(). */ 1895 unative_t sys_as_area_create(uintptr_t address, size_t size,int flags)1936 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1896 1937 { 1897 1938 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 1898 1939 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1899 return ( unative_t) address;1940 return (sysarg_t) address; 1900 1941 else 1901 return ( unative_t) -1;1942 return (sysarg_t) -1; 1902 1943 } 1903 1944 1904 1945 /** Wrapper for as_area_resize(). */ 1905 unative_t sys_as_area_resize(uintptr_t address, size_t size,int flags)1906 { 1907 return ( unative_t) as_area_resize(AS, address, size, 0);1946 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1947 { 1948 return (sysarg_t) as_area_resize(AS, address, size, 0); 1908 1949 } 1909 1950 1910 1951 /** Wrapper for as_area_change_flags(). */ 1911 unative_t sys_as_area_change_flags(uintptr_t address,int flags)1912 { 1913 return ( unative_t) as_area_change_flags(AS, flags, address);1952 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1953 { 1954 return (sysarg_t) as_area_change_flags(AS, flags, address); 1914 1955 } 1915 1956 1916 1957 /** Wrapper for as_area_destroy(). */ 1917 unative_t sys_as_area_destroy(uintptr_t address) 1918 { 1919 return (unative_t) as_area_destroy(AS, address); 1958 sysarg_t sys_as_area_destroy(uintptr_t address) 1959 { 1960 return (sysarg_t) as_area_destroy(AS, address); 1961 } 1962 1963 /** Get list of adress space areas. 1964 * 1965 * @param as Address space. 1966 * @param obuf Place to save pointer to returned buffer. 1967 * @param osize Place to save size of returned buffer. 1968 * 1969 */ 1970 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1971 { 1972 mutex_lock(&as->lock); 1973 1974 /* First pass, count number of areas. */ 1975 1976 size_t area_cnt = 0; 1977 link_t *cur; 1978 1979 for (cur = as->as_area_btree.leaf_head.next; 1980 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1981 btree_node_t *node = 1982 list_get_instance(cur, btree_node_t, leaf_link); 1983 area_cnt += node->keys; 1984 } 1985 1986 size_t isize = area_cnt * sizeof(as_area_info_t); 1987 as_area_info_t *info = malloc(isize, 0); 1988 1989 /* Second pass, record data. */ 1990 1991 size_t area_idx = 0; 1992 1993 for (cur = as->as_area_btree.leaf_head.next; 1994 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1995 btree_node_t *node = 1996 list_get_instance(cur, btree_node_t, leaf_link); 1997 btree_key_t i; 1998 1999 for (i = 0; i < node->keys; i++) { 2000 as_area_t *area = node->value[i]; 2001 2002 ASSERT(area_idx < area_cnt); 2003 mutex_lock(&area->lock); 2004 2005 info[area_idx].start_addr = area->base; 2006 info[area_idx].size = FRAMES2SIZE(area->pages); 2007 info[area_idx].flags = area->flags; 2008 ++area_idx; 2009 2010 mutex_unlock(&area->lock); 2011 } 2012 } 2013 2014 mutex_unlock(&as->lock); 2015 2016 *obuf = info; 2017 *osize = isize; 1920 2018 } 1921 2019 1922 2020 /** Print out information about address space. 1923 2021 * 1924 * @param as Address space. 2022 * @param as Address space. 2023 * 1925 2024 */ 1926 2025 void as_print(as_t *as) 1927 2026 { 1928 ipl_t ipl;1929 1930 ipl = interrupts_disable();1931 2027 mutex_lock(&as->lock); 1932 2028 … … 1935 2031 for (cur = as->as_area_btree.leaf_head.next; 1936 2032 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1937 btree_node_t *node; 1938 1939 node = list_get_instance(cur, btree_node_t, leaf_link); 1940 1941 unsigned int i; 2033 btree_node_t *node 2034 = list_get_instance(cur, btree_node_t, leaf_link); 2035 btree_key_t i; 2036 1942 2037 for (i = 0; i < node->keys; i++) { 1943 2038 as_area_t *area = node->value[i]; 1944 2039 1945 2040 mutex_lock(&area->lock); 1946 printf("as_area: %p, base=%p, pages=%" PRIs 1947 " (%p - %p)\n", area, area->base, area->pages, 1948 area->base, area->base + FRAMES2SIZE(area->pages)); 2041 printf("as_area: %p, base=%p, pages=%zu" 2042 " (%p - %p)\n", area, (void *) area->base, 2043 area->pages, (void *) area->base, 2044 (void *) (area->base + FRAMES2SIZE(area->pages))); 1949 2045 mutex_unlock(&area->lock); 1950 2046 } … … 1952 2048 1953 2049 mutex_unlock(&as->lock); 1954 interrupts_restore(ipl);1955 2050 } 1956 2051
Note:
See TracChangeset
for help on using the changeset viewer.