Changeset b5e68c8 in mainline for kernel/generic/src/mm/as.c
- Timestamp:
- 2011-05-12T16:49:44Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f36787d7
- Parents:
- e80329d6 (diff), 750636a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
re80329d6 rb5e68c8 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 79 80 #include <arch/interrupt.h> 80 81 81 #ifdef CONFIG_VIRT_IDX_DCACHE82 #include <arch/mm/cache.h>83 #endif /* CONFIG_VIRT_IDX_DCACHE */84 85 82 /** 86 83 * Each architecture decides what functions will be used to carry out 87 84 * address space operations such as creating or locking page tables. 88 *89 85 */ 90 86 as_operations_t *as_operations = NULL; 91 87 92 /** 93 * Slab for as_t objects. 88 /** Slab for as_t objects. 94 89 * 95 90 */ 96 91 static slab_cache_t *as_slab; 97 92 98 /** 99 * This lock serializes access to the ASID subsystem.100 * Itprotects:93 /** ASID subsystem lock. 94 * 95 * This lock protects: 101 96 * - inactive_as_with_asid_head list 102 97 * - as->asid for each as of the as_t type … … 107 102 108 103 /** 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 104 * Inactive address spaces (on all processors) 105 * that have valid ASID. 112 106 */ 113 107 LIST_INITIALIZE(inactive_as_with_asid_head); … … 123 117 mutex_initialize(&as->lock, MUTEX_PASSIVE); 124 118 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 119 return as_constructor_arch(as, flags); 128 120 } 129 121 130 122 NO_TRACE static size_t as_destructor(void *obj) 131 123 { 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 124 return as_destructor_arch((as_t *) obj); 134 125 } 135 126 … … 146 137 panic("Cannot create kernel address space."); 147 138 148 /* Make sure the kernel address space 139 /* 140 * Make sure the kernel address space 149 141 * reference count never drops to zero. 150 142 */ … … 195 187 { 196 188 DEADLOCK_PROBE_INIT(p_asidlock); 197 189 198 190 ASSERT(as != AS); 199 191 ASSERT(atomic_get(&as->refcount) == 0); … … 203 195 * lock its mutex. 204 196 */ 205 197 206 198 /* 207 199 * We need to avoid deadlock between TLB shootdown and asidlock. … … 210 202 * disabled to prevent nested context switches. We also depend on the 211 203 * fact that so far no spinlocks are held. 212 *213 204 */ 214 205 preemption_disable(); … … 235 226 spinlock_unlock(&asidlock); 236 227 interrupts_restore(ipl); 237 228 238 229 239 230 /* … … 241 232 * The B+tree must be walked carefully because it is 242 233 * also being destroyed. 243 *244 234 */ 245 235 bool cond = true; … … 268 258 /** Hold a reference to an address space. 269 259 * 270 * Holding a reference to an address space prevents destruction of that address271 * space.260 * Holding a reference to an address space prevents destruction 261 * of that address space. 272 262 * 273 263 * @param as Address space to be held. … … 281 271 /** Release a reference to an address space. 282 272 * 283 * The last one to release a reference to an address space destroys the address284 * space.273 * The last one to release a reference to an address space 274 * destroys the address space. 285 275 * 286 276 * @param asAddress space to be released. … … 295 285 /** Check area conflicts with other areas. 296 286 * 297 * @param as 298 * @param vaStarting virtual address of the area being tested.299 * @param size Size ofthe area being tested.300 * @param avoid _areaDo not touch this area.287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 301 291 * 302 292 * @return True if there is no conflict, false otherwise. 303 293 * 304 294 */ 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 295 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid) 297 { 298 ASSERT((addr % PAGE_SIZE) == 0); 308 299 ASSERT(mutex_locked(&as->lock)); 309 300 310 301 /* 311 302 * We don't want any area to have conflicts with NULL page. 312 * 313 */ 314 if (overlaps(va, size, NULL, PAGE_SIZE)) 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 315 305 return false; 316 306 … … 321 311 * record in the left neighbour, the leftmost record in the right 322 312 * neighbour and all records in the leaf node itself. 323 *324 313 */ 325 314 btree_node_t *leaf; 326 315 as_area_t *area = 327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);316 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 328 317 if (area) { 329 if (area != avoid _area)318 if (area != avoid) 330 319 return false; 331 320 } … … 337 326 area = (as_area_t *) node->value[node->keys - 1]; 338 327 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 328 if (area != avoid) { 329 mutex_lock(&area->lock); 330 331 if (overlaps(addr, count << PAGE_WIDTH, 332 area->base, area->pages << PAGE_WIDTH)) { 333 mutex_unlock(&area->lock); 334 return false; 335 } 336 342 337 mutex_unlock(&area->lock); 343 return false; 344 } 345 346 mutex_unlock(&area->lock); 338 } 347 339 } 348 340 … … 351 343 area = (as_area_t *) node->value[0]; 352 344 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 345 if (area != avoid) { 346 mutex_lock(&area->lock); 347 348 if (overlaps(addr, count << PAGE_WIDTH, 349 area->base, area->pages << PAGE_WIDTH)) { 350 mutex_unlock(&area->lock); 351 return false; 352 } 353 356 354 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 355 } 361 356 } 362 357 … … 366 361 area = (as_area_t *) leaf->value[i]; 367 362 368 if (area == avoid _area)363 if (area == avoid) 369 364 continue; 370 365 371 366 mutex_lock(&area->lock); 372 367 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 368 if (overlaps(addr, count << PAGE_WIDTH, 369 area->base, area->pages << PAGE_WIDTH)) { 374 370 mutex_unlock(&area->lock); 375 371 return false; … … 382 378 * So far, the area does not conflict with other areas. 383 379 * Check if it doesn't conflict with kernel address space. 384 *385 380 */ 386 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 387 return !overlaps( va, size,382 return !overlaps(addr, count << PAGE_WIDTH, 388 383 KERNEL_ADDRESS_SPACE_START, 389 384 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 412 407 mem_backend_data_t *backend_data) 413 408 { 414 if ( base % PAGE_SIZE)409 if ((base % PAGE_SIZE) != 0) 415 410 return NULL; 416 411 417 if ( !size)412 if (size == 0) 418 413 return NULL; 414 415 size_t pages = SIZE2FRAMES(size); 419 416 420 417 /* Writeable executable areas are not supported. */ … … 424 421 mutex_lock(&as->lock); 425 422 426 if (!check_area_conflicts(as, base, size, NULL)) {423 if (!check_area_conflicts(as, base, pages, NULL)) { 427 424 mutex_unlock(&as->lock); 428 425 return NULL; … … 436 433 area->flags = flags; 437 434 area->attributes = attrs; 438 area->pages = SIZE2FRAMES(size); 435 area->pages = pages; 436 area->resident = 0; 439 437 area->base = base; 440 438 area->sh_info = NULL; … … 445 443 else 446 444 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 445 446 if (area->backend && area->backend->create) { 447 if (!area->backend->create(area)) { 448 free(area); 449 mutex_unlock(&as->lock); 450 return NULL; 451 } 452 } 447 453 448 454 btree_create(&area->used_space); … … 479 485 * to find out whether this is a miss or va belongs to an address 480 486 * space area found there. 481 *482 487 */ 483 488 … … 490 495 mutex_lock(&area->lock); 491 496 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 497 if ((area->base <= va) && 498 (va < area->base + (area->pages << PAGE_WIDTH))) 493 499 return area; 494 500 … … 499 505 * Second, locate the left neighbour and test its last record. 500 506 * Because of its position in the B+tree, it must have base < va. 501 *502 507 */ 503 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 507 512 mutex_lock(&area->lock); 508 513 509 if (va < area->base + area->pages * PAGE_SIZE)514 if (va < area->base + (area->pages << PAGE_WIDTH)) 510 515 return area; 511 516 … … 534 539 /* 535 540 * Locate the area. 536 *537 541 */ 538 542 as_area_t *area = find_area_and_lock(as, address); … … 546 550 * Remapping of address space areas associated 547 551 * with memory mapped devices is not supported. 548 *549 552 */ 550 553 mutex_unlock(&area->lock); … … 557 560 * Remapping of shared address space areas 558 561 * is not supported. 559 *560 562 */ 561 563 mutex_unlock(&area->lock); … … 568 570 /* 569 571 * Zero size address space areas are not allowed. 570 *571 572 */ 572 573 mutex_unlock(&area->lock); … … 576 577 577 578 if (pages < area->pages) { 578 uintptr_t start_free = area->base + pages * PAGE_SIZE;579 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 579 580 580 581 /* 581 582 * Shrinking the area. 582 583 * No need to check for overlaps. 583 *584 584 */ 585 585 … … 588 588 /* 589 589 * Start TLB shootdown sequence. 590 *591 590 */ 592 591 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + pages * PAGE_SIZE, area->pages - pages);592 area->base + (pages << PAGE_WIDTH), area->pages - pages); 594 593 595 594 /* … … 599 598 * is also the right way to remove part of the used_space 600 599 * B+tree leaf list. 601 *602 600 */ 603 601 bool cond = true; … … 615 613 size_t i = 0; 616 614 617 if (overlaps(ptr, size * PAGE_SIZE, area->base,618 pages * PAGE_SIZE)) {615 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 616 pages << PAGE_WIDTH)) { 619 617 620 if (ptr + size * PAGE_SIZE<= start_free) {618 if (ptr + (size << PAGE_WIDTH) <= start_free) { 621 619 /* 622 620 * The whole interval fits 623 621 * completely in the resized 624 622 * address space area. 625 *626 623 */ 627 624 break; … … 632 629 * to b and c overlaps with the resized 633 630 * address space area. 634 *635 631 */ 636 632 … … 652 648 for (; i < size; i++) { 653 649 pte_t *pte = page_mapping_find(as, ptr + 654 i * PAGE_SIZE);650 (i << PAGE_WIDTH)); 655 651 656 652 ASSERT(pte); … … 661 657 (area->backend->frame_free)) { 662 658 area->backend->frame_free(area, 663 ptr + i * PAGE_SIZE,659 ptr + (i << PAGE_WIDTH), 664 660 PTE_GET_FRAME(pte)); 665 661 } 666 662 667 663 page_mapping_remove(as, ptr + 668 i * PAGE_SIZE);664 (i << PAGE_WIDTH)); 669 665 } 670 666 } … … 673 669 /* 674 670 * Finish TLB shootdown sequence. 675 * 676 */ 677 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 671 */ 672 673 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 679 674 area->pages - pages); 680 675 681 676 /* 682 677 * Invalidate software translation caches (e.g. TSB on sparc64). 683 *684 678 */ 685 679 as_invalidate_translation_cache(as, area->base + 686 pages * PAGE_SIZE, area->pages - pages);680 (pages << PAGE_WIDTH), area->pages - pages); 687 681 tlb_shootdown_finalize(ipl); 688 682 … … 692 686 * Growing the area. 693 687 * Check for overlaps with other address space areas. 694 * 695 */ 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 area)) { 688 */ 689 if (!check_area_conflicts(as, address, pages, area)) { 698 690 mutex_unlock(&area->lock); 699 691 mutex_unlock(&as->lock); 700 692 return EADDRNOTAVAIL; 693 } 694 } 695 696 if (area->backend && area->backend->resize) { 697 if (!area->backend->resize(area, pages)) { 698 mutex_unlock(&area->lock); 699 mutex_unlock(&as->lock); 700 return ENOMEM; 701 701 } 702 702 } … … 768 768 return ENOENT; 769 769 } 770 771 if (area->backend && area->backend->destroy) 772 area->backend->destroy(area); 770 773 771 774 uintptr_t base = area->base; … … 794 797 795 798 for (size = 0; size < (size_t) node->value[i]; size++) { 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 799 pte_t *pte = 800 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 797 801 798 802 ASSERT(pte); … … 803 807 (area->backend->frame_free)) { 804 808 area->backend->frame_free(area, 805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));809 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 806 810 } 807 811 808 page_mapping_remove(as, ptr + size * PAGE_SIZE);812 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 809 813 } 810 814 } … … 813 817 /* 814 818 * Finish TLB shootdown sequence. 815 *816 819 */ 817 820 … … 821 824 * Invalidate potential software translation caches (e.g. TSB on 822 825 * sparc64). 823 *824 826 */ 825 827 as_invalidate_translation_cache(as, area->base, area->pages); … … 839 841 /* 840 842 * Remove the empty area from address space. 841 *842 843 */ 843 844 btree_remove(&as->as_area_btree, base, NULL); … … 881 882 /* 882 883 * Could not find the source address space area. 883 *884 884 */ 885 885 mutex_unlock(&src_as->lock); … … 891 891 * There is no backend or the backend does not 892 892 * know how to share the area. 893 *894 893 */ 895 894 mutex_unlock(&src_area->lock); … … 898 897 } 899 898 900 size_t src_size = src_area->pages * PAGE_SIZE;899 size_t src_size = src_area->pages << PAGE_WIDTH; 901 900 unsigned int src_flags = src_area->flags; 902 901 mem_backend_t *src_backend = src_area->backend; … … 918 917 * First, prepare the area for sharing. 919 918 * Then it will be safe to unlock it. 920 *921 919 */ 922 920 share_info_t *sh_info = src_area->sh_info; … … 930 928 /* 931 929 * Call the backend to setup sharing. 932 *933 930 */ 934 931 src_area->backend->share(src_area); … … 949 946 * The flags of the source area are masked against dst_flags_mask 950 947 * to support sharing in less privileged mode. 951 *952 948 */ 953 949 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 966 962 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 967 963 * attribute and set the sh_info. 968 *969 964 */ 970 965 mutex_lock(&dst_as->lock); … … 989 984 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 990 985 { 986 ASSERT(mutex_locked(&area->lock)); 987 991 988 int flagmap[] = { 992 989 [PF_ACCESS_READ] = AS_AREA_READ, … … 994 991 [PF_ACCESS_EXEC] = AS_AREA_EXEC 995 992 }; 996 997 ASSERT(mutex_locked(&area->lock));998 993 999 994 if (!(area->flags & flagmap[access])) … … 1066 1061 /* 1067 1062 * Compute total number of used pages in the used_space B+tree 1068 *1069 1063 */ 1070 1064 size_t used_pages = 0; … … 1088 1082 /* 1089 1083 * Start TLB shootdown sequence. 1090 *1091 1084 */ 1092 1085 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1096 1089 * Remove used pages from page tables and remember their frame 1097 1090 * numbers. 1098 *1099 1091 */ 1100 1092 size_t frame_idx = 0; … … 1111 1103 1112 1104 for (size = 0; size < (size_t) node->value[i]; size++) { 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1105 pte_t *pte = 1106 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1114 1107 1115 1108 ASSERT(pte); … … 1120 1113 1121 1114 /* Remove old mapping */ 1122 page_mapping_remove(as, ptr + size * PAGE_SIZE);1115 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1123 1116 } 1124 1117 } … … 1127 1120 /* 1128 1121 * Finish TLB shootdown sequence. 1129 *1130 1122 */ 1131 1123 … … 1135 1127 * Invalidate potential software translation caches (e.g. TSB on 1136 1128 * sparc64). 1137 *1138 1129 */ 1139 1130 as_invalidate_translation_cache(as, area->base, area->pages); … … 1168 1159 1169 1160 /* Insert the new mapping */ 1170 page_mapping_insert(as, ptr + size * PAGE_SIZE,1161 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1171 1162 old_frame[frame_idx++], page_flags); 1172 1163 … … 1217 1208 * No area contained mapping for 'page'. 1218 1209 * Signal page fault to low-level handler. 1219 *1220 1210 */ 1221 1211 mutex_unlock(&AS->lock); … … 1237 1227 * The address space area is not backed by any backend 1238 1228 * or the backend cannot handle page faults. 1239 *1240 1229 */ 1241 1230 mutex_unlock(&area->lock); … … 1249 1238 * To avoid race condition between two page faults on the same address, 1250 1239 * we need to make sure the mapping has not been already inserted. 1251 *1252 1240 */ 1253 1241 pte_t *pte; … … 1267 1255 /* 1268 1256 * Resort to the backend page fault handler. 1269 *1270 1257 */ 1271 1258 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1322 1309 * preemption is disabled. We should not be 1323 1310 * holding any other lock. 1324 *1325 1311 */ 1326 1312 (void) interrupts_enable(); … … 1342 1328 * list of inactive address spaces with assigned 1343 1329 * ASID. 1344 *1345 1330 */ 1346 1331 ASSERT(old_as->asid != ASID_INVALID); … … 1353 1338 * Perform architecture-specific tasks when the address space 1354 1339 * is being removed from the CPU. 1355 *1356 1340 */ 1357 1341 as_deinstall_arch(old_as); … … 1360 1344 /* 1361 1345 * Second, prepare the new address space. 1362 *1363 1346 */ 1364 1347 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1376 1359 * Perform architecture-specific steps. 1377 1360 * (e.g. write ASID to hardware register etc.) 1378 *1379 1361 */ 1380 1362 as_install_arch(new_as); … … 1395 1377 { 1396 1378 ASSERT(mutex_locked(&area->lock)); 1397 1379 1398 1380 return area_flags_to_page_flags(area->flags); 1399 1381 } … … 1499 1481 1500 1482 if (src_area) { 1501 size = src_area->pages * PAGE_SIZE;1483 size = src_area->pages << PAGE_WIDTH; 1502 1484 mutex_unlock(&src_area->lock); 1503 1485 } else … … 1516 1498 * @param count Number of page to be marked. 1517 1499 * 1518 * @return Zero on failure and non-zeroon success.1519 * 1520 */ 1521 intused_space_insert(as_area_t *area, uintptr_t page, size_t count)1500 * @return False on failure or true on success. 1501 * 1502 */ 1503 bool used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1522 1504 { 1523 1505 ASSERT(mutex_locked(&area->lock)); … … 1530 1512 /* 1531 1513 * We hit the beginning of some used space. 1532 * 1533 */ 1534 return 0; 1514 */ 1515 return false; 1535 1516 } 1536 1517 1537 1518 if (!leaf->keys) { 1538 1519 btree_insert(&area->used_space, page, (void *) count, leaf); 1539 return 1;1520 goto success; 1540 1521 } 1541 1522 … … 1551 1532 * somewhere between the rightmost interval of 1552 1533 * the left neigbour and the first interval of the leaf. 1553 *1554 1534 */ 1555 1535 1556 1536 if (page >= right_pg) { 1557 1537 /* Do nothing. */ 1558 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1559 left_cnt * PAGE_SIZE)) {1538 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1539 left_cnt << PAGE_WIDTH)) { 1560 1540 /* The interval intersects with the left interval. */ 1561 return 0;1562 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1563 right_cnt * PAGE_SIZE)) {1541 return false; 1542 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1543 right_cnt << PAGE_WIDTH)) { 1564 1544 /* The interval intersects with the right interval. */ 1565 return 0;1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1567 (page + count * PAGE_SIZE== right_pg)) {1545 return false; 1546 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1547 (page + (count << PAGE_WIDTH) == right_pg)) { 1568 1548 /* 1569 1549 * The interval can be added by merging the two already 1570 1550 * present intervals. 1571 *1572 1551 */ 1573 1552 node->value[node->keys - 1] += count + right_cnt; 1574 1553 btree_remove(&area->used_space, right_pg, leaf); 1575 return 1;1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1554 goto success; 1555 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1577 1556 /* 1578 1557 * The interval can be added by simply growing the left 1579 1558 * interval. 1580 *1581 1559 */ 1582 1560 node->value[node->keys - 1] += count; 1583 return 1;1584 } else if (page + count * PAGE_SIZE== right_pg) {1561 goto success; 1562 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1585 1563 /* 1586 1564 * The interval can be addded by simply moving base of 1587 1565 * the right interval down and increasing its size 1588 1566 * accordingly. 1589 *1590 1567 */ 1591 1568 leaf->value[0] += count; 1592 1569 leaf->key[0] = page; 1593 return 1;1570 goto success; 1594 1571 } else { 1595 1572 /* 1596 1573 * The interval is between both neigbouring intervals, 1597 1574 * but cannot be merged with any of them. 1598 *1599 1575 */ 1600 1576 btree_insert(&area->used_space, page, (void *) count, 1601 1577 leaf); 1602 return 1;1578 goto success; 1603 1579 } 1604 1580 } else if (page < leaf->key[0]) { … … 1609 1585 * Investigate the border case in which the left neighbour does 1610 1586 * not exist but the interval fits from the left. 1611 * 1612 */ 1613 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 right_cnt * PAGE_SIZE)) { 1587 */ 1588 1589 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1590 right_cnt << PAGE_WIDTH)) { 1616 1591 /* The interval intersects with the right interval. */ 1617 return 0;1618 } else if (page + count * PAGE_SIZE== right_pg) {1592 return false; 1593 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1619 1594 /* 1620 1595 * The interval can be added by moving the base of the 1621 1596 * right interval down and increasing its size 1622 1597 * accordingly. 1623 *1624 1598 */ 1625 1599 leaf->key[0] = page; 1626 1600 leaf->value[0] += count; 1627 return 1;1601 goto success; 1628 1602 } else { 1629 1603 /* 1630 1604 * The interval doesn't adjoin with the right interval. 1631 1605 * It must be added individually. 1632 *1633 1606 */ 1634 1607 btree_insert(&area->used_space, page, (void *) count, 1635 1608 leaf); 1636 return 1;1609 goto success; 1637 1610 } 1638 1611 } … … 1649 1622 * somewhere between the leftmost interval of 1650 1623 * the right neigbour and the last interval of the leaf. 1651 *1652 1624 */ 1653 1625 1654 1626 if (page < left_pg) { 1655 1627 /* Do nothing. */ 1656 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1657 left_cnt * PAGE_SIZE)) {1628 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1629 left_cnt << PAGE_WIDTH)) { 1658 1630 /* The interval intersects with the left interval. */ 1659 return 0;1660 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1661 right_cnt * PAGE_SIZE)) {1631 return false; 1632 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1633 right_cnt << PAGE_WIDTH)) { 1662 1634 /* The interval intersects with the right interval. */ 1663 return 0;1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1665 (page + count * PAGE_SIZE== right_pg)) {1635 return false; 1636 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1637 (page + (count << PAGE_WIDTH) == right_pg)) { 1666 1638 /* 1667 1639 * The interval can be added by merging the two already 1668 1640 * present intervals. 1669 *1670 1641 */ 1671 1642 leaf->value[leaf->keys - 1] += count + right_cnt; 1672 1643 btree_remove(&area->used_space, right_pg, node); 1673 return 1;1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1644 goto success; 1645 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1675 1646 /* 1676 1647 * The interval can be added by simply growing the left 1677 1648 * interval. 1678 *1679 1649 */ 1680 leaf->value[leaf->keys - 1] += 1681 return 1;1682 } else if (page + count * PAGE_SIZE== right_pg) {1650 leaf->value[leaf->keys - 1] += count; 1651 goto success; 1652 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1683 1653 /* 1684 1654 * The interval can be addded by simply moving base of 1685 1655 * the right interval down and increasing its size 1686 1656 * accordingly. 1687 *1688 1657 */ 1689 1658 node->value[0] += count; 1690 1659 node->key[0] = page; 1691 return 1;1660 goto success; 1692 1661 } else { 1693 1662 /* 1694 1663 * The interval is between both neigbouring intervals, 1695 1664 * but cannot be merged with any of them. 1696 *1697 1665 */ 1698 1666 btree_insert(&area->used_space, page, (void *) count, 1699 1667 leaf); 1700 return 1;1668 goto success; 1701 1669 } 1702 1670 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1707 1675 * Investigate the border case in which the right neighbour 1708 1676 * does not exist but the interval fits from the right. 1709 * 1710 */ 1711 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 left_cnt * PAGE_SIZE)) { 1677 */ 1678 1679 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1680 left_cnt << PAGE_WIDTH)) { 1714 1681 /* The interval intersects with the left interval. */ 1715 return 0;1716 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1682 return false; 1683 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1717 1684 /* 1718 1685 * The interval can be added by growing the left 1719 1686 * interval. 1720 *1721 1687 */ 1722 1688 leaf->value[leaf->keys - 1] += count; 1723 return 1;1689 goto success; 1724 1690 } else { 1725 1691 /* 1726 1692 * The interval doesn't adjoin with the left interval. 1727 1693 * It must be added individually. 1728 *1729 1694 */ 1730 1695 btree_insert(&area->used_space, page, (void *) count, 1731 1696 leaf); 1732 return 1;1697 goto success; 1733 1698 } 1734 1699 } … … 1738 1703 * only between two other intervals of the leaf. The two border cases 1739 1704 * were already resolved. 1740 *1741 1705 */ 1742 1706 btree_key_t i; … … 1750 1714 /* 1751 1715 * The interval fits between left_pg and right_pg. 1752 *1753 1716 */ 1754 1717 1755 if (overlaps(page, count * PAGE_SIZE, left_pg,1756 left_cnt * PAGE_SIZE)) {1718 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1719 left_cnt << PAGE_WIDTH)) { 1757 1720 /* 1758 1721 * The interval intersects with the left 1759 1722 * interval. 1760 *1761 1723 */ 1762 return 0;1763 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1764 right_cnt * PAGE_SIZE)) {1724 return false; 1725 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1726 right_cnt << PAGE_WIDTH)) { 1765 1727 /* 1766 1728 * The interval intersects with the right 1767 1729 * interval. 1768 *1769 1730 */ 1770 return 0;1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1772 (page + count * PAGE_SIZE== right_pg)) {1731 return false; 1732 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1733 (page + (count << PAGE_WIDTH) == right_pg)) { 1773 1734 /* 1774 1735 * The interval can be added by merging the two 1775 1736 * already present intervals. 1776 *1777 1737 */ 1778 1738 leaf->value[i - 1] += count + right_cnt; 1779 1739 btree_remove(&area->used_space, right_pg, leaf); 1780 return 1;1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1740 goto success; 1741 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1782 1742 /* 1783 1743 * The interval can be added by simply growing 1784 1744 * the left interval. 1785 *1786 1745 */ 1787 1746 leaf->value[i - 1] += count; 1788 return 1;1789 } else if (page + count * PAGE_SIZE== right_pg) {1747 goto success; 1748 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1790 1749 /* 1791 1750 * The interval can be addded by simply moving 1792 1751 * base of the right interval down and 1793 1752 * increasing its size accordingly. 1794 *1795 1753 */ 1796 1754 leaf->value[i] += count; 1797 1755 leaf->key[i] = page; 1798 return 1;1756 goto success; 1799 1757 } else { 1800 1758 /* … … 1802 1760 * intervals, but cannot be merged with any of 1803 1761 * them. 1804 *1805 1762 */ 1806 1763 btree_insert(&area->used_space, page, 1807 1764 (void *) count, leaf); 1808 return 1;1765 goto success; 1809 1766 } 1810 1767 } 1811 1768 } 1812 1769 1813 panic("Inconsistency detected while adding %" PRIs " pages of used " 1814 "space at %p.", count, page); 1770 panic("Inconsistency detected while adding %zu pages of used " 1771 "space at %p.", count, (void *) page); 1772 1773 success: 1774 area->resident += count; 1775 return true; 1815 1776 } 1816 1777 … … 1823 1784 * @param count Number of page to be marked. 1824 1785 * 1825 * @return Zero on failure and non-zeroon success.1826 * 1827 */ 1828 intused_space_remove(as_area_t *area, uintptr_t page, size_t count)1786 * @return False on failure or true on success. 1787 * 1788 */ 1789 bool used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1829 1790 { 1830 1791 ASSERT(mutex_locked(&area->lock)); … … 1837 1798 /* 1838 1799 * We are lucky, page is the beginning of some interval. 1839 *1840 1800 */ 1841 1801 if (count > pages) { 1842 return 0;1802 return false; 1843 1803 } else if (count == pages) { 1844 1804 btree_remove(&area->used_space, page, leaf); 1845 return 1;1805 goto success; 1846 1806 } else { 1847 1807 /* 1848 1808 * Find the respective interval. 1849 1809 * Decrease its size and relocate its start address. 1850 *1851 1810 */ 1852 1811 btree_key_t i; 1853 1812 for (i = 0; i < leaf->keys; i++) { 1854 1813 if (leaf->key[i] == page) { 1855 leaf->key[i] += count * PAGE_SIZE;1814 leaf->key[i] += count << PAGE_WIDTH; 1856 1815 leaf->value[i] -= count; 1857 return 1;1816 goto success; 1858 1817 } 1859 1818 } 1819 1860 1820 goto error; 1861 1821 } … … 1867 1827 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1868 1828 1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1870 count * PAGE_SIZE)) {1871 if (page + count * PAGE_SIZE==1872 left_pg + left_cnt * PAGE_SIZE) {1829 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1830 count << PAGE_WIDTH)) { 1831 if (page + (count << PAGE_WIDTH) == 1832 left_pg + (left_cnt << PAGE_WIDTH)) { 1873 1833 /* 1874 1834 * The interval is contained in the rightmost … … 1876 1836 * removed by updating the size of the bigger 1877 1837 * interval. 1878 *1879 1838 */ 1880 1839 node->value[node->keys - 1] -= count; 1881 return 1;1882 } else if (page + count * PAGE_SIZE<1883 left_pg + left_cnt*PAGE_SIZE) {1840 goto success; 1841 } else if (page + (count << PAGE_WIDTH) < 1842 left_pg + (left_cnt << PAGE_WIDTH)) { 1884 1843 /* 1885 1844 * The interval is contained in the rightmost … … 1888 1847 * the original interval and also inserting a 1889 1848 * new interval. 1890 *1891 1849 */ 1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1850 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1851 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1894 1852 node->value[node->keys - 1] -= count + new_cnt; 1895 1853 btree_insert(&area->used_space, page + 1896 count * PAGE_SIZE, (void *) new_cnt, leaf);1897 return 1;1854 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1855 goto success; 1898 1856 } 1899 1857 } 1900 return 0; 1858 1859 return false; 1901 1860 } else if (page < leaf->key[0]) 1902 return 0;1861 return false; 1903 1862 1904 1863 if (page > leaf->key[leaf->keys - 1]) { … … 1906 1865 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1907 1866 1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1909 count * PAGE_SIZE)) {1910 if (page + count * PAGE_SIZE==1911 left_pg + left_cnt * PAGE_SIZE) {1867 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1868 count << PAGE_WIDTH)) { 1869 if (page + (count << PAGE_WIDTH) == 1870 left_pg + (left_cnt << PAGE_WIDTH)) { 1912 1871 /* 1913 1872 * The interval is contained in the rightmost 1914 1873 * interval of the leaf and can be removed by 1915 1874 * updating the size of the bigger interval. 1916 *1917 1875 */ 1918 1876 leaf->value[leaf->keys - 1] -= count; 1919 return 1;1920 } else if (page + count * PAGE_SIZE< left_pg +1921 left_cnt * PAGE_SIZE) {1877 goto success; 1878 } else if (page + (count << PAGE_WIDTH) < left_pg + 1879 (left_cnt << PAGE_WIDTH)) { 1922 1880 /* 1923 1881 * The interval is contained in the rightmost … … 1926 1884 * original interval and also inserting a new 1927 1885 * interval. 1928 *1929 1886 */ 1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1887 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1888 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1932 1889 leaf->value[leaf->keys - 1] -= count + new_cnt; 1933 1890 btree_insert(&area->used_space, page + 1934 count * PAGE_SIZE, (void *) new_cnt, leaf);1935 return 1;1891 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1892 goto success; 1936 1893 } 1937 1894 } 1938 return 0; 1895 1896 return false; 1939 1897 } 1940 1898 1941 1899 /* 1942 1900 * The border cases have been already resolved. 1943 * Now the interval can be only between intervals of the leaf. 1901 * Now the interval can be only between intervals of the leaf. 1944 1902 */ 1945 1903 btree_key_t i; … … 1953 1911 * to (i - 1) and i. 1954 1912 */ 1955 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1956 count * PAGE_SIZE)) {1957 if (page + count * PAGE_SIZE==1958 left_pg + left_cnt*PAGE_SIZE) {1913 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1914 count << PAGE_WIDTH)) { 1915 if (page + (count << PAGE_WIDTH) == 1916 left_pg + (left_cnt << PAGE_WIDTH)) { 1959 1917 /* 1960 1918 * The interval is contained in the … … 1962 1920 * be removed by updating the size of 1963 1921 * the bigger interval. 1964 *1965 1922 */ 1966 1923 leaf->value[i - 1] -= count; 1967 return 1;1968 } else if (page + count * PAGE_SIZE<1969 left_pg + left_cnt * PAGE_SIZE) {1924 goto success; 1925 } else if (page + (count << PAGE_WIDTH) < 1926 left_pg + (left_cnt << PAGE_WIDTH)) { 1970 1927 /* 1971 1928 * The interval is contained in the … … 1976 1933 */ 1977 1934 size_t new_cnt = ((left_pg + 1978 left_cnt * PAGE_SIZE) -1979 (page + count * PAGE_SIZE)) >>1935 (left_cnt << PAGE_WIDTH)) - 1936 (page + (count << PAGE_WIDTH))) >> 1980 1937 PAGE_WIDTH; 1981 1938 leaf->value[i - 1] -= count + new_cnt; 1982 1939 btree_insert(&area->used_space, page + 1983 count * PAGE_SIZE, (void *) new_cnt,1940 (count << PAGE_WIDTH), (void *) new_cnt, 1984 1941 leaf); 1985 return 1;1942 goto success; 1986 1943 } 1987 1944 } 1988 return 0; 1945 1946 return false; 1989 1947 } 1990 1948 } 1991 1949 1992 1950 error: 1993 panic("Inconsistency detected while removing %" PRIs " pages of used " 1994 "space from %p.", count, page); 1951 panic("Inconsistency detected while removing %zu pages of used " 1952 "space from %p.", count, (void *) page); 1953 1954 success: 1955 area->resident -= count; 1956 return true; 1995 1957 } 1996 1958 … … 2000 1962 2001 1963 /** Wrapper for as_area_create(). */ 2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1964 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 2003 1965 { 2004 1966 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 2005 1967 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 2006 return ( unative_t) address;1968 return (sysarg_t) address; 2007 1969 else 2008 return ( unative_t) -1;1970 return (sysarg_t) -1; 2009 1971 } 2010 1972 2011 1973 /** Wrapper for as_area_resize(). */ 2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)2013 { 2014 return ( unative_t) as_area_resize(AS, address, size, 0);1974 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1975 { 1976 return (sysarg_t) as_area_resize(AS, address, size, 0); 2015 1977 } 2016 1978 2017 1979 /** Wrapper for as_area_change_flags(). */ 2018 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)2019 { 2020 return ( unative_t) as_area_change_flags(AS, flags, address);1980 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1981 { 1982 return (sysarg_t) as_area_change_flags(AS, flags, address); 2021 1983 } 2022 1984 2023 1985 /** Wrapper for as_area_destroy(). */ 2024 unative_t sys_as_area_destroy(uintptr_t address) 2025 { 2026 return (unative_t) as_area_destroy(AS, address); 1986 sysarg_t sys_as_area_destroy(uintptr_t address) 1987 { 1988 return (sysarg_t) as_area_destroy(AS, address); 1989 } 1990 1991 /** Return pointer to unmapped address space area 1992 * 1993 * @param base Lowest address bound. 1994 * @param size Requested size of the allocation. 1995 * 1996 * @return Pointer to the beginning of unmapped address space area. 1997 * 1998 */ 1999 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 2000 { 2001 if (size == 0) 2002 return 0; 2003 2004 /* 2005 * Make sure we allocate from page-aligned 2006 * address. Check for possible overflow in 2007 * each step. 2008 */ 2009 2010 size_t pages = SIZE2FRAMES(size); 2011 uintptr_t ret = 0; 2012 2013 /* 2014 * Find the lowest unmapped address aligned on the sz 2015 * boundary, not smaller than base and of the required size. 2016 */ 2017 2018 mutex_lock(&AS->lock); 2019 2020 /* First check the base address itself */ 2021 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2022 if ((addr >= base) && 2023 (check_area_conflicts(AS, addr, pages, NULL))) 2024 ret = addr; 2025 2026 /* Eventually check the addresses behind each area */ 2027 link_t *cur; 2028 for (cur = AS->as_area_btree.leaf_head.next; 2029 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2030 cur = cur->next) { 2031 btree_node_t *node = 2032 list_get_instance(cur, btree_node_t, leaf_link); 2033 2034 btree_key_t i; 2035 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2036 as_area_t *area = (as_area_t *) node->value[i]; 2037 2038 mutex_lock(&area->lock); 2039 2040 uintptr_t addr = 2041 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2042 PAGE_SIZE); 2043 2044 if ((addr >= base) && (addr >= area->base) && 2045 (check_area_conflicts(AS, addr, pages, area))) 2046 ret = addr; 2047 2048 mutex_unlock(&area->lock); 2049 } 2050 } 2051 2052 mutex_unlock(&AS->lock); 2053 2054 return (sysarg_t) ret; 2027 2055 } 2028 2056 … … 2093 2121 mutex_lock(&as->lock); 2094 2122 2095 /* print out info about address space areas */2123 /* Print out info about address space areas */ 2096 2124 link_t *cur; 2097 2125 for (cur = as->as_area_btree.leaf_head.next; … … 2105 2133 2106 2134 mutex_lock(&area->lock); 2107 printf("as_area: %p, base=%p, pages=%" PRIs 2108 " (%p - %p)\n", area, area->base, area->pages, 2109 area->base, area->base + FRAMES2SIZE(area->pages)); 2135 printf("as_area: %p, base=%p, pages=%zu" 2136 " (%p - %p)\n", area, (void *) area->base, 2137 area->pages, (void *) area->base, 2138 (void *) (area->base + FRAMES2SIZE(area->pages))); 2110 2139 mutex_unlock(&area->lock); 2111 2140 }
Note:
See TracChangeset
for help on using the changeset viewer.