Changes in kernel/generic/src/mm/as.c [e394b736:97bdb4a] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
re394b736 r97bdb4a 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h>74 73 #include <arch.h> 75 74 #include <errno.h> … … 80 79 #include <arch/interrupt.h> 81 80 81 #ifdef CONFIG_VIRT_IDX_DCACHE 82 #include <arch/mm/cache.h> 83 #endif /* CONFIG_VIRT_IDX_DCACHE */ 84 82 85 /** 83 86 * Each architecture decides what functions will be used to carry out 84 87 * address space operations such as creating or locking page tables. 88 * 85 89 */ 86 90 as_operations_t *as_operations = NULL; 87 91 88 /** Slab for as_t objects. 92 /** 93 * Slab for as_t objects. 89 94 * 90 95 */ 91 96 static slab_cache_t *as_slab; 92 97 93 /** ASID subsystem lock.94 * 95 * This lockprotects:98 /** 99 * This lock serializes access to the ASID subsystem. 100 * It protects: 96 101 * - inactive_as_with_asid_head list 97 102 * - as->asid for each as of the as_t type … … 102 107 103 108 /** 104 * Inactive address spaces (on all processors) 105 * that have valid ASID. 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 106 112 */ 107 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 117 123 mutex_initialize(&as->lock, MUTEX_PASSIVE); 118 124 119 return as_constructor_arch(as, flags); 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 120 128 } 121 129 122 130 NO_TRACE static size_t as_destructor(void *obj) 123 131 { 124 return as_destructor_arch((as_t *) obj); 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 125 134 } 126 135 … … 137 146 panic("Cannot create kernel address space."); 138 147 139 /* 140 * Make sure the kernel address space 148 /* Make sure the kernel address space 141 149 * reference count never drops to zero. 142 150 */ … … 187 195 { 188 196 DEADLOCK_PROBE_INIT(p_asidlock); 189 197 190 198 ASSERT(as != AS); 191 199 ASSERT(atomic_get(&as->refcount) == 0); … … 195 203 * lock its mutex. 196 204 */ 197 205 198 206 /* 199 207 * We need to avoid deadlock between TLB shootdown and asidlock. … … 202 210 * disabled to prevent nested context switches. We also depend on the 203 211 * fact that so far no spinlocks are held. 212 * 204 213 */ 205 214 preemption_disable(); … … 226 235 spinlock_unlock(&asidlock); 227 236 interrupts_restore(ipl); 228 237 229 238 230 239 /* … … 232 241 * The B+tree must be walked carefully because it is 233 242 * also being destroyed. 243 * 234 244 */ 235 245 bool cond = true; … … 258 268 /** Hold a reference to an address space. 259 269 * 260 * Holding a reference to an address space prevents destruction 261 * of that addressspace.270 * Holding a reference to an address space prevents destruction of that address 271 * space. 262 272 * 263 273 * @param as Address space to be held. … … 271 281 /** Release a reference to an address space. 272 282 * 273 * The last one to release a reference to an address space 274 * destroys the addressspace.283 * The last one to release a reference to an address space destroys the address 284 * space. 275 285 * 276 286 * @param asAddress space to be released. … … 285 295 /** Check area conflicts with other areas. 286 296 * 287 * @param as Address space.288 * @param addrStarting virtual address of the area being tested.289 * @param count Number of pages inthe area being tested.290 * @param avoid Do not touch this area.297 * @param as Address space. 298 * @param va Starting virtual address of the area being tested. 299 * @param size Size of the area being tested. 300 * @param avoid_area Do not touch this area. 291 301 * 292 302 * @return True if there is no conflict, false otherwise. 293 303 * 294 304 */ 295 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid) 297 { 298 ASSERT((addr % PAGE_SIZE) == 0); 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 299 308 ASSERT(mutex_locked(&as->lock)); 300 309 301 310 /* 302 311 * We don't want any area to have conflicts with NULL page. 303 */ 304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 312 * 313 */ 314 if (overlaps(va, size, NULL, PAGE_SIZE)) 305 315 return false; 306 316 … … 311 321 * record in the left neighbour, the leftmost record in the right 312 322 * neighbour and all records in the leaf node itself. 323 * 313 324 */ 314 325 btree_node_t *leaf; 315 326 as_area_t *area = 316 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 317 328 if (area) { 318 if (area != avoid )329 if (area != avoid_area) 319 330 return false; 320 331 } … … 326 337 area = (as_area_t *) node->value[node->keys - 1]; 327 338 328 if (area != avoid) { 329 mutex_lock(&area->lock); 330 331 if (overlaps(addr, count << PAGE_WIDTH, 332 area->base, area->pages << PAGE_WIDTH)) { 333 mutex_unlock(&area->lock); 334 return false; 335 } 336 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 337 342 mutex_unlock(&area->lock); 343 return false; 338 344 } 345 346 mutex_unlock(&area->lock); 339 347 } 340 348 … … 343 351 area = (as_area_t *) node->value[0]; 344 352 345 if (area != avoid) { 346 mutex_lock(&area->lock); 347 348 if (overlaps(addr, count << PAGE_WIDTH, 349 area->base, area->pages << PAGE_WIDTH)) { 350 mutex_unlock(&area->lock); 351 return false; 352 } 353 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 354 356 mutex_unlock(&area->lock); 357 return false; 355 358 } 359 360 mutex_unlock(&area->lock); 356 361 } 357 362 … … 361 366 area = (as_area_t *) leaf->value[i]; 362 367 363 if (area == avoid )368 if (area == avoid_area) 364 369 continue; 365 370 366 371 mutex_lock(&area->lock); 367 372 368 if (overlaps(addr, count << PAGE_WIDTH, 369 area->base, area->pages << PAGE_WIDTH)) { 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 370 374 mutex_unlock(&area->lock); 371 375 return false; … … 378 382 * So far, the area does not conflict with other areas. 379 383 * Check if it doesn't conflict with kernel address space. 384 * 380 385 */ 381 386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps( addr, count << PAGE_WIDTH,387 return !overlaps(va, size, 383 388 KERNEL_ADDRESS_SPACE_START, 384 389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 407 412 mem_backend_data_t *backend_data) 408 413 { 409 if ( (base % PAGE_SIZE) != 0)414 if (base % PAGE_SIZE) 410 415 return NULL; 411 416 412 if ( size == 0)417 if (!size) 413 418 return NULL; 414 415 size_t pages = SIZE2FRAMES(size);416 419 417 420 /* Writeable executable areas are not supported. */ … … 421 424 mutex_lock(&as->lock); 422 425 423 if (!check_area_conflicts(as, base, pages, NULL)) {426 if (!check_area_conflicts(as, base, size, NULL)) { 424 427 mutex_unlock(&as->lock); 425 428 return NULL; … … 433 436 area->flags = flags; 434 437 area->attributes = attrs; 435 area->pages = pages; 436 area->resident = 0; 438 area->pages = SIZE2FRAMES(size); 437 439 area->base = base; 438 440 area->sh_info = NULL; … … 443 445 else 444 446 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 445 446 if (area->backend && area->backend->create) {447 if (!area->backend->create(area)) {448 free(area);449 mutex_unlock(&as->lock);450 return NULL;451 }452 }453 447 454 448 btree_create(&area->used_space); … … 485 479 * to find out whether this is a miss or va belongs to an address 486 480 * space area found there. 481 * 487 482 */ 488 483 … … 495 490 mutex_lock(&area->lock); 496 491 497 if ((area->base <= va) && 498 (va < area->base + (area->pages << PAGE_WIDTH))) 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 499 493 return area; 500 494 … … 505 499 * Second, locate the left neighbour and test its last record. 506 500 * Because of its position in the B+tree, it must have base < va. 501 * 507 502 */ 508 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 512 507 mutex_lock(&area->lock); 513 508 514 if (va < area->base + (area->pages << PAGE_WIDTH))509 if (va < area->base + area->pages * PAGE_SIZE) 515 510 return area; 516 511 … … 539 534 /* 540 535 * Locate the area. 536 * 541 537 */ 542 538 as_area_t *area = find_area_and_lock(as, address); … … 550 546 * Remapping of address space areas associated 551 547 * with memory mapped devices is not supported. 548 * 552 549 */ 553 550 mutex_unlock(&area->lock); … … 560 557 * Remapping of shared address space areas 561 558 * is not supported. 559 * 562 560 */ 563 561 mutex_unlock(&area->lock); … … 570 568 /* 571 569 * Zero size address space areas are not allowed. 570 * 572 571 */ 573 572 mutex_unlock(&area->lock); … … 577 576 578 577 if (pages < area->pages) { 579 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);578 uintptr_t start_free = area->base + pages * PAGE_SIZE; 580 579 581 580 /* 582 581 * Shrinking the area. 583 582 * No need to check for overlaps. 583 * 584 584 */ 585 585 … … 588 588 /* 589 589 * Start TLB shootdown sequence. 590 * 590 591 */ 591 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 592 area->base + (pages << PAGE_WIDTH), area->pages - pages);593 area->base + pages * PAGE_SIZE, area->pages - pages); 593 594 594 595 /* … … 598 599 * is also the right way to remove part of the used_space 599 600 * B+tree leaf list. 601 * 600 602 */ 601 603 bool cond = true; … … 613 615 size_t i = 0; 614 616 615 if (overlaps(ptr, size << PAGE_WIDTH, area->base,616 pages << PAGE_WIDTH)) {617 if (overlaps(ptr, size * PAGE_SIZE, area->base, 618 pages * PAGE_SIZE)) { 617 619 618 if (ptr + (size << PAGE_WIDTH)<= start_free) {620 if (ptr + size * PAGE_SIZE <= start_free) { 619 621 /* 620 622 * The whole interval fits 621 623 * completely in the resized 622 624 * address space area. 625 * 623 626 */ 624 627 break; … … 629 632 * to b and c overlaps with the resized 630 633 * address space area. 634 * 631 635 */ 632 636 … … 648 652 for (; i < size; i++) { 649 653 pte_t *pte = page_mapping_find(as, ptr + 650 (i << PAGE_WIDTH));654 i * PAGE_SIZE); 651 655 652 656 ASSERT(pte); … … 657 661 (area->backend->frame_free)) { 658 662 area->backend->frame_free(area, 659 ptr + (i << PAGE_WIDTH),663 ptr + i * PAGE_SIZE, 660 664 PTE_GET_FRAME(pte)); 661 665 } 662 666 663 667 page_mapping_remove(as, ptr + 664 (i << PAGE_WIDTH));668 i * PAGE_SIZE); 665 669 } 666 670 } … … 669 673 /* 670 674 * Finish TLB shootdown sequence. 671 */ 672 673 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 675 * 676 */ 677 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 674 679 area->pages - pages); 675 680 676 681 /* 677 682 * Invalidate software translation caches (e.g. TSB on sparc64). 683 * 678 684 */ 679 685 as_invalidate_translation_cache(as, area->base + 680 (pages << PAGE_WIDTH), area->pages - pages);686 pages * PAGE_SIZE, area->pages - pages); 681 687 tlb_shootdown_finalize(ipl); 682 688 … … 686 692 * Growing the area. 687 693 * Check for overlaps with other address space areas. 688 */ 689 if (!check_area_conflicts(as, address, pages, area)) { 694 * 695 */ 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 area)) { 690 698 mutex_unlock(&area->lock); 691 699 mutex_unlock(&as->lock); 692 700 return EADDRNOTAVAIL; 693 }694 }695 696 if (area->backend && area->backend->resize) {697 if (!area->backend->resize(area, pages)) {698 mutex_unlock(&area->lock);699 mutex_unlock(&as->lock);700 return ENOMEM;701 701 } 702 702 } … … 768 768 return ENOENT; 769 769 } 770 771 if (area->backend && area->backend->destroy)772 area->backend->destroy(area);773 770 774 771 uintptr_t base = area->base; … … 797 794 798 795 for (size = 0; size < (size_t) node->value[i]; size++) { 799 pte_t *pte = 800 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 801 797 802 798 ASSERT(pte); … … 807 803 (area->backend->frame_free)) { 808 804 area->backend->frame_free(area, 809 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte));805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 810 806 } 811 807 812 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));808 page_mapping_remove(as, ptr + size * PAGE_SIZE); 813 809 } 814 810 } … … 817 813 /* 818 814 * Finish TLB shootdown sequence. 815 * 819 816 */ 820 817 … … 824 821 * Invalidate potential software translation caches (e.g. TSB on 825 822 * sparc64). 823 * 826 824 */ 827 825 as_invalidate_translation_cache(as, area->base, area->pages); … … 841 839 /* 842 840 * Remove the empty area from address space. 841 * 843 842 */ 844 843 btree_remove(&as->as_area_btree, base, NULL); … … 882 881 /* 883 882 * Could not find the source address space area. 883 * 884 884 */ 885 885 mutex_unlock(&src_as->lock); … … 891 891 * There is no backend or the backend does not 892 892 * know how to share the area. 893 * 893 894 */ 894 895 mutex_unlock(&src_area->lock); … … 897 898 } 898 899 899 size_t src_size = src_area->pages << PAGE_WIDTH;900 size_t src_size = src_area->pages * PAGE_SIZE; 900 901 unsigned int src_flags = src_area->flags; 901 902 mem_backend_t *src_backend = src_area->backend; … … 917 918 * First, prepare the area for sharing. 918 919 * Then it will be safe to unlock it. 920 * 919 921 */ 920 922 share_info_t *sh_info = src_area->sh_info; … … 928 930 /* 929 931 * Call the backend to setup sharing. 932 * 930 933 */ 931 934 src_area->backend->share(src_area); … … 946 949 * The flags of the source area are masked against dst_flags_mask 947 950 * to support sharing in less privileged mode. 951 * 948 952 */ 949 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 962 966 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 963 967 * attribute and set the sh_info. 968 * 964 969 */ 965 970 mutex_lock(&dst_as->lock); … … 984 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 985 990 { 986 ASSERT(mutex_locked(&area->lock));987 988 991 int flagmap[] = { 989 992 [PF_ACCESS_READ] = AS_AREA_READ, … … 991 994 [PF_ACCESS_EXEC] = AS_AREA_EXEC 992 995 }; 996 997 ASSERT(mutex_locked(&area->lock)); 993 998 994 999 if (!(area->flags & flagmap[access])) … … 1061 1066 /* 1062 1067 * Compute total number of used pages in the used_space B+tree 1068 * 1063 1069 */ 1064 1070 size_t used_pages = 0; … … 1082 1088 /* 1083 1089 * Start TLB shootdown sequence. 1090 * 1084 1091 */ 1085 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1089 1096 * Remove used pages from page tables and remember their frame 1090 1097 * numbers. 1098 * 1091 1099 */ 1092 1100 size_t frame_idx = 0; … … 1103 1111 1104 1112 for (size = 0; size < (size_t) node->value[i]; size++) { 1105 pte_t *pte = 1106 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1107 1114 1108 1115 ASSERT(pte); … … 1113 1120 1114 1121 /* Remove old mapping */ 1115 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1122 page_mapping_remove(as, ptr + size * PAGE_SIZE); 1116 1123 } 1117 1124 } … … 1120 1127 /* 1121 1128 * Finish TLB shootdown sequence. 1129 * 1122 1130 */ 1123 1131 … … 1127 1135 * Invalidate potential software translation caches (e.g. TSB on 1128 1136 * sparc64). 1137 * 1129 1138 */ 1130 1139 as_invalidate_translation_cache(as, area->base, area->pages); … … 1159 1168 1160 1169 /* Insert the new mapping */ 1161 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1170 page_mapping_insert(as, ptr + size * PAGE_SIZE, 1162 1171 old_frame[frame_idx++], page_flags); 1163 1172 … … 1208 1217 * No area contained mapping for 'page'. 1209 1218 * Signal page fault to low-level handler. 1219 * 1210 1220 */ 1211 1221 mutex_unlock(&AS->lock); … … 1227 1237 * The address space area is not backed by any backend 1228 1238 * or the backend cannot handle page faults. 1239 * 1229 1240 */ 1230 1241 mutex_unlock(&area->lock); … … 1238 1249 * To avoid race condition between two page faults on the same address, 1239 1250 * we need to make sure the mapping has not been already inserted. 1251 * 1240 1252 */ 1241 1253 pte_t *pte; … … 1255 1267 /* 1256 1268 * Resort to the backend page fault handler. 1269 * 1257 1270 */ 1258 1271 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1309 1322 * preemption is disabled. We should not be 1310 1323 * holding any other lock. 1324 * 1311 1325 */ 1312 1326 (void) interrupts_enable(); … … 1328 1342 * list of inactive address spaces with assigned 1329 1343 * ASID. 1344 * 1330 1345 */ 1331 1346 ASSERT(old_as->asid != ASID_INVALID); … … 1338 1353 * Perform architecture-specific tasks when the address space 1339 1354 * is being removed from the CPU. 1355 * 1340 1356 */ 1341 1357 as_deinstall_arch(old_as); … … 1344 1360 /* 1345 1361 * Second, prepare the new address space. 1362 * 1346 1363 */ 1347 1364 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1359 1376 * Perform architecture-specific steps. 1360 1377 * (e.g. write ASID to hardware register etc.) 1378 * 1361 1379 */ 1362 1380 as_install_arch(new_as); … … 1377 1395 { 1378 1396 ASSERT(mutex_locked(&area->lock)); 1379 1397 1380 1398 return area_flags_to_page_flags(area->flags); 1381 1399 } … … 1481 1499 1482 1500 if (src_area) { 1483 size = src_area->pages << PAGE_WIDTH;1501 size = src_area->pages * PAGE_SIZE; 1484 1502 mutex_unlock(&src_area->lock); 1485 1503 } else … … 1498 1516 * @param count Number of page to be marked. 1499 1517 * 1500 * @return False on failure or trueon success.1501 * 1502 */ 1503 boolused_space_insert(as_area_t *area, uintptr_t page, size_t count)1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1504 1522 { 1505 1523 ASSERT(mutex_locked(&area->lock)); … … 1512 1530 /* 1513 1531 * We hit the beginning of some used space. 1514 */ 1515 return false; 1532 * 1533 */ 1534 return 0; 1516 1535 } 1517 1536 1518 1537 if (!leaf->keys) { 1519 1538 btree_insert(&area->used_space, page, (void *) count, leaf); 1520 goto success;1539 return 1; 1521 1540 } 1522 1541 … … 1532 1551 * somewhere between the rightmost interval of 1533 1552 * the left neigbour and the first interval of the leaf. 1553 * 1534 1554 */ 1535 1555 1536 1556 if (page >= right_pg) { 1537 1557 /* Do nothing. */ 1538 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1539 left_cnt << PAGE_WIDTH)) {1558 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1559 left_cnt * PAGE_SIZE)) { 1540 1560 /* The interval intersects with the left interval. */ 1541 return false;1542 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1543 right_cnt << PAGE_WIDTH)) {1561 return 0; 1562 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1563 right_cnt * PAGE_SIZE)) { 1544 1564 /* The interval intersects with the right interval. */ 1545 return false;1546 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1547 (page + (count << PAGE_WIDTH)== right_pg)) {1565 return 0; 1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1567 (page + count * PAGE_SIZE == right_pg)) { 1548 1568 /* 1549 1569 * The interval can be added by merging the two already 1550 1570 * present intervals. 1571 * 1551 1572 */ 1552 1573 node->value[node->keys - 1] += count + right_cnt; 1553 1574 btree_remove(&area->used_space, right_pg, leaf); 1554 goto success;1555 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1575 return 1; 1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1556 1577 /* 1557 1578 * The interval can be added by simply growing the left 1558 1579 * interval. 1580 * 1559 1581 */ 1560 1582 node->value[node->keys - 1] += count; 1561 goto success;1562 } else if (page + (count << PAGE_WIDTH)== right_pg) {1583 return 1; 1584 } else if (page + count * PAGE_SIZE == right_pg) { 1563 1585 /* 1564 1586 * The interval can be addded by simply moving base of 1565 1587 * the right interval down and increasing its size 1566 1588 * accordingly. 1589 * 1567 1590 */ 1568 1591 leaf->value[0] += count; 1569 1592 leaf->key[0] = page; 1570 goto success;1593 return 1; 1571 1594 } else { 1572 1595 /* 1573 1596 * The interval is between both neigbouring intervals, 1574 1597 * but cannot be merged with any of them. 1598 * 1575 1599 */ 1576 1600 btree_insert(&area->used_space, page, (void *) count, 1577 1601 leaf); 1578 goto success;1602 return 1; 1579 1603 } 1580 1604 } else if (page < leaf->key[0]) { … … 1585 1609 * Investigate the border case in which the left neighbour does 1586 1610 * not exist but the interval fits from the left. 1587 */ 1588 1589 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1590 right_cnt << PAGE_WIDTH)) { 1611 * 1612 */ 1613 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 right_cnt * PAGE_SIZE)) { 1591 1616 /* The interval intersects with the right interval. */ 1592 return false;1593 } else if (page + (count << PAGE_WIDTH)== right_pg) {1617 return 0; 1618 } else if (page + count * PAGE_SIZE == right_pg) { 1594 1619 /* 1595 1620 * The interval can be added by moving the base of the 1596 1621 * right interval down and increasing its size 1597 1622 * accordingly. 1623 * 1598 1624 */ 1599 1625 leaf->key[0] = page; 1600 1626 leaf->value[0] += count; 1601 goto success;1627 return 1; 1602 1628 } else { 1603 1629 /* 1604 1630 * The interval doesn't adjoin with the right interval. 1605 1631 * It must be added individually. 1632 * 1606 1633 */ 1607 1634 btree_insert(&area->used_space, page, (void *) count, 1608 1635 leaf); 1609 goto success;1636 return 1; 1610 1637 } 1611 1638 } … … 1622 1649 * somewhere between the leftmost interval of 1623 1650 * the right neigbour and the last interval of the leaf. 1651 * 1624 1652 */ 1625 1653 1626 1654 if (page < left_pg) { 1627 1655 /* Do nothing. */ 1628 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1629 left_cnt << PAGE_WIDTH)) {1656 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1657 left_cnt * PAGE_SIZE)) { 1630 1658 /* The interval intersects with the left interval. */ 1631 return false;1632 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1633 right_cnt << PAGE_WIDTH)) {1659 return 0; 1660 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1661 right_cnt * PAGE_SIZE)) { 1634 1662 /* The interval intersects with the right interval. */ 1635 return false;1636 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1637 (page + (count << PAGE_WIDTH)== right_pg)) {1663 return 0; 1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1665 (page + count * PAGE_SIZE == right_pg)) { 1638 1666 /* 1639 1667 * The interval can be added by merging the two already 1640 1668 * present intervals. 1669 * 1641 1670 */ 1642 1671 leaf->value[leaf->keys - 1] += count + right_cnt; 1643 1672 btree_remove(&area->used_space, right_pg, node); 1644 goto success;1645 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1673 return 1; 1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1646 1675 /* 1647 1676 * The interval can be added by simply growing the left 1648 1677 * interval. 1678 * 1649 1679 */ 1650 leaf->value[leaf->keys - 1] += count;1651 goto success;1652 } else if (page + (count << PAGE_WIDTH)== right_pg) {1680 leaf->value[leaf->keys - 1] += count; 1681 return 1; 1682 } else if (page + count * PAGE_SIZE == right_pg) { 1653 1683 /* 1654 1684 * The interval can be addded by simply moving base of 1655 1685 * the right interval down and increasing its size 1656 1686 * accordingly. 1687 * 1657 1688 */ 1658 1689 node->value[0] += count; 1659 1690 node->key[0] = page; 1660 goto success;1691 return 1; 1661 1692 } else { 1662 1693 /* 1663 1694 * The interval is between both neigbouring intervals, 1664 1695 * but cannot be merged with any of them. 1696 * 1665 1697 */ 1666 1698 btree_insert(&area->used_space, page, (void *) count, 1667 1699 leaf); 1668 goto success;1700 return 1; 1669 1701 } 1670 1702 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1675 1707 * Investigate the border case in which the right neighbour 1676 1708 * does not exist but the interval fits from the right. 1677 */ 1678 1679 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1680 left_cnt << PAGE_WIDTH)) { 1709 * 1710 */ 1711 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 left_cnt * PAGE_SIZE)) { 1681 1714 /* The interval intersects with the left interval. */ 1682 return false;1683 } else if (left_pg + (left_cnt << PAGE_WIDTH)== page) {1715 return 0; 1716 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1684 1717 /* 1685 1718 * The interval can be added by growing the left 1686 1719 * interval. 1720 * 1687 1721 */ 1688 1722 leaf->value[leaf->keys - 1] += count; 1689 goto success;1723 return 1; 1690 1724 } else { 1691 1725 /* 1692 1726 * The interval doesn't adjoin with the left interval. 1693 1727 * It must be added individually. 1728 * 1694 1729 */ 1695 1730 btree_insert(&area->used_space, page, (void *) count, 1696 1731 leaf); 1697 goto success;1732 return 1; 1698 1733 } 1699 1734 } … … 1703 1738 * only between two other intervals of the leaf. The two border cases 1704 1739 * were already resolved. 1740 * 1705 1741 */ 1706 1742 btree_key_t i; … … 1714 1750 /* 1715 1751 * The interval fits between left_pg and right_pg. 1752 * 1716 1753 */ 1717 1754 1718 if (overlaps(page, count << PAGE_WIDTH, left_pg,1719 left_cnt << PAGE_WIDTH)) {1755 if (overlaps(page, count * PAGE_SIZE, left_pg, 1756 left_cnt * PAGE_SIZE)) { 1720 1757 /* 1721 1758 * The interval intersects with the left 1722 1759 * interval. 1760 * 1723 1761 */ 1724 return false;1725 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1726 right_cnt << PAGE_WIDTH)) {1762 return 0; 1763 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1764 right_cnt * PAGE_SIZE)) { 1727 1765 /* 1728 1766 * The interval intersects with the right 1729 1767 * interval. 1768 * 1730 1769 */ 1731 return false;1732 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1733 (page + (count << PAGE_WIDTH)== right_pg)) {1770 return 0; 1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1772 (page + count * PAGE_SIZE == right_pg)) { 1734 1773 /* 1735 1774 * The interval can be added by merging the two 1736 1775 * already present intervals. 1776 * 1737 1777 */ 1738 1778 leaf->value[i - 1] += count + right_cnt; 1739 1779 btree_remove(&area->used_space, right_pg, leaf); 1740 goto success;1741 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1780 return 1; 1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1742 1782 /* 1743 1783 * The interval can be added by simply growing 1744 1784 * the left interval. 1785 * 1745 1786 */ 1746 1787 leaf->value[i - 1] += count; 1747 goto success;1748 } else if (page + (count << PAGE_WIDTH)== right_pg) {1788 return 1; 1789 } else if (page + count * PAGE_SIZE == right_pg) { 1749 1790 /* 1750 1791 * The interval can be addded by simply moving 1751 1792 * base of the right interval down and 1752 1793 * increasing its size accordingly. 1794 * 1753 1795 */ 1754 1796 leaf->value[i] += count; 1755 1797 leaf->key[i] = page; 1756 goto success;1798 return 1; 1757 1799 } else { 1758 1800 /* … … 1760 1802 * intervals, but cannot be merged with any of 1761 1803 * them. 1804 * 1762 1805 */ 1763 1806 btree_insert(&area->used_space, page, 1764 1807 (void *) count, leaf); 1765 goto success;1808 return 1; 1766 1809 } 1767 1810 } 1768 1811 } 1769 1812 1770 panic("Inconsistency detected while adding %zu pages of used " 1771 "space at %p.", count, (void *) page); 1772 1773 success: 1774 area->resident += count; 1775 return true; 1813 panic("Inconsistency detected while adding %" PRIs " pages of used " 1814 "space at %p.", count, page); 1776 1815 } 1777 1816 … … 1784 1823 * @param count Number of page to be marked. 1785 1824 * 1786 * @return False on failure or trueon success.1787 * 1788 */ 1789 boolused_space_remove(as_area_t *area, uintptr_t page, size_t count)1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1790 1829 { 1791 1830 ASSERT(mutex_locked(&area->lock)); … … 1798 1837 /* 1799 1838 * We are lucky, page is the beginning of some interval. 1839 * 1800 1840 */ 1801 1841 if (count > pages) { 1802 return false;1842 return 0; 1803 1843 } else if (count == pages) { 1804 1844 btree_remove(&area->used_space, page, leaf); 1805 goto success;1845 return 1; 1806 1846 } else { 1807 1847 /* 1808 1848 * Find the respective interval. 1809 1849 * Decrease its size and relocate its start address. 1850 * 1810 1851 */ 1811 1852 btree_key_t i; 1812 1853 for (i = 0; i < leaf->keys; i++) { 1813 1854 if (leaf->key[i] == page) { 1814 leaf->key[i] += count << PAGE_WIDTH;1855 leaf->key[i] += count * PAGE_SIZE; 1815 1856 leaf->value[i] -= count; 1816 goto success;1857 return 1; 1817 1858 } 1818 1859 } 1819 1820 1860 goto error; 1821 1861 } … … 1827 1867 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1828 1868 1829 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1830 count << PAGE_WIDTH)) {1831 if (page + (count << PAGE_WIDTH)==1832 left_pg + (left_cnt << PAGE_WIDTH)) {1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1870 count * PAGE_SIZE)) { 1871 if (page + count * PAGE_SIZE == 1872 left_pg + left_cnt * PAGE_SIZE) { 1833 1873 /* 1834 1874 * The interval is contained in the rightmost … … 1836 1876 * removed by updating the size of the bigger 1837 1877 * interval. 1878 * 1838 1879 */ 1839 1880 node->value[node->keys - 1] -= count; 1840 goto success;1841 } else if (page + (count << PAGE_WIDTH)<1842 left_pg + (left_cnt << PAGE_WIDTH)) {1881 return 1; 1882 } else if (page + count * PAGE_SIZE < 1883 left_pg + left_cnt*PAGE_SIZE) { 1843 1884 /* 1844 1885 * The interval is contained in the rightmost … … 1847 1888 * the original interval and also inserting a 1848 1889 * new interval. 1890 * 1849 1891 */ 1850 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1851 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1852 1894 node->value[node->keys - 1] -= count + new_cnt; 1853 1895 btree_insert(&area->used_space, page + 1854 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1855 goto success;1896 count * PAGE_SIZE, (void *) new_cnt, leaf); 1897 return 1; 1856 1898 } 1857 1899 } 1858 1859 return false; 1900 return 0; 1860 1901 } else if (page < leaf->key[0]) 1861 return false;1902 return 0; 1862 1903 1863 1904 if (page > leaf->key[leaf->keys - 1]) { … … 1865 1906 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1866 1907 1867 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1868 count << PAGE_WIDTH)) {1869 if (page + (count << PAGE_WIDTH)==1870 left_pg + (left_cnt << PAGE_WIDTH)) {1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1909 count * PAGE_SIZE)) { 1910 if (page + count * PAGE_SIZE == 1911 left_pg + left_cnt * PAGE_SIZE) { 1871 1912 /* 1872 1913 * The interval is contained in the rightmost 1873 1914 * interval of the leaf and can be removed by 1874 1915 * updating the size of the bigger interval. 1916 * 1875 1917 */ 1876 1918 leaf->value[leaf->keys - 1] -= count; 1877 goto success;1878 } else if (page + (count << PAGE_WIDTH)< left_pg +1879 (left_cnt << PAGE_WIDTH)) {1919 return 1; 1920 } else if (page + count * PAGE_SIZE < left_pg + 1921 left_cnt * PAGE_SIZE) { 1880 1922 /* 1881 1923 * The interval is contained in the rightmost … … 1884 1926 * original interval and also inserting a new 1885 1927 * interval. 1928 * 1886 1929 */ 1887 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1888 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1889 1932 leaf->value[leaf->keys - 1] -= count + new_cnt; 1890 1933 btree_insert(&area->used_space, page + 1891 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1892 goto success;1934 count * PAGE_SIZE, (void *) new_cnt, leaf); 1935 return 1; 1893 1936 } 1894 1937 } 1895 1896 return false; 1938 return 0; 1897 1939 } 1898 1940 1899 1941 /* 1900 1942 * The border cases have been already resolved. 1901 * Now the interval can be only between intervals of the leaf. 1943 * Now the interval can be only between intervals of the leaf. 1902 1944 */ 1903 1945 btree_key_t i; … … 1911 1953 * to (i - 1) and i. 1912 1954 */ 1913 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1914 count << PAGE_WIDTH)) {1915 if (page + (count << PAGE_WIDTH)==1916 left_pg + (left_cnt << PAGE_WIDTH)) {1955 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1956 count * PAGE_SIZE)) { 1957 if (page + count * PAGE_SIZE == 1958 left_pg + left_cnt*PAGE_SIZE) { 1917 1959 /* 1918 1960 * The interval is contained in the … … 1920 1962 * be removed by updating the size of 1921 1963 * the bigger interval. 1964 * 1922 1965 */ 1923 1966 leaf->value[i - 1] -= count; 1924 goto success;1925 } else if (page + (count << PAGE_WIDTH)<1926 left_pg + (left_cnt << PAGE_WIDTH)) {1967 return 1; 1968 } else if (page + count * PAGE_SIZE < 1969 left_pg + left_cnt * PAGE_SIZE) { 1927 1970 /* 1928 1971 * The interval is contained in the … … 1933 1976 */ 1934 1977 size_t new_cnt = ((left_pg + 1935 (left_cnt << PAGE_WIDTH)) -1936 (page + (count << PAGE_WIDTH))) >>1978 left_cnt * PAGE_SIZE) - 1979 (page + count * PAGE_SIZE)) >> 1937 1980 PAGE_WIDTH; 1938 1981 leaf->value[i - 1] -= count + new_cnt; 1939 1982 btree_insert(&area->used_space, page + 1940 (count << PAGE_WIDTH), (void *) new_cnt,1983 count * PAGE_SIZE, (void *) new_cnt, 1941 1984 leaf); 1942 goto success;1985 return 1; 1943 1986 } 1944 1987 } 1945 1946 return false; 1988 return 0; 1947 1989 } 1948 1990 } 1949 1991 1950 1992 error: 1951 panic("Inconsistency detected while removing %zu pages of used " 1952 "space from %p.", count, (void *) page); 1953 1954 success: 1955 area->resident -= count; 1956 return true; 1993 panic("Inconsistency detected while removing %" PRIs " pages of used " 1994 "space from %p.", count, page); 1957 1995 } 1958 1996 … … 1962 2000 1963 2001 /** Wrapper for as_area_create(). */ 1964 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1965 2003 { 1966 2004 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 1967 2005 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1968 return ( sysarg_t) address;2006 return (unative_t) address; 1969 2007 else 1970 return ( sysarg_t) -1;2008 return (unative_t) -1; 1971 2009 } 1972 2010 1973 2011 /** Wrapper for as_area_resize(). */ 1974 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)1975 { 1976 return ( sysarg_t) as_area_resize(AS, address, size, 0);2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 2013 { 2014 return (unative_t) as_area_resize(AS, address, size, 0); 1977 2015 } 1978 2016 1979 2017 /** Wrapper for as_area_change_flags(). */ 1980 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)1981 { 1982 return ( sysarg_t) as_area_change_flags(AS, flags, address);2018 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 2019 { 2020 return (unative_t) as_area_change_flags(AS, flags, address); 1983 2021 } 1984 2022 1985 2023 /** Wrapper for as_area_destroy(). */ 1986 sysarg_t sys_as_area_destroy(uintptr_t address) 1987 { 1988 return (sysarg_t) as_area_destroy(AS, address); 1989 } 1990 1991 /** Return pointer to unmapped address space area 1992 * 1993 * @param base Lowest address bound. 1994 * @param size Requested size of the allocation. 1995 * 1996 * @return Pointer to the beginning of unmapped address space area. 1997 * 1998 */ 1999 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 2000 { 2001 if (size == 0) 2002 return 0; 2003 2004 /* 2005 * Make sure we allocate from page-aligned 2006 * address. Check for possible overflow in 2007 * each step. 2008 */ 2009 2010 size_t pages = SIZE2FRAMES(size); 2011 uintptr_t ret = 0; 2012 2013 /* 2014 * Find the lowest unmapped address aligned on the sz 2015 * boundary, not smaller than base and of the required size. 2016 */ 2017 2018 mutex_lock(&AS->lock); 2019 2020 /* First check the base address itself */ 2021 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2022 if ((addr >= base) && 2023 (check_area_conflicts(AS, addr, pages, NULL))) 2024 ret = addr; 2025 2026 /* Eventually check the addresses behind each area */ 2027 link_t *cur; 2028 for (cur = AS->as_area_btree.leaf_head.next; 2029 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2030 cur = cur->next) { 2031 btree_node_t *node = 2032 list_get_instance(cur, btree_node_t, leaf_link); 2033 2034 btree_key_t i; 2035 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2036 as_area_t *area = (as_area_t *) node->value[i]; 2037 2038 mutex_lock(&area->lock); 2039 2040 uintptr_t addr = 2041 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2042 PAGE_SIZE); 2043 2044 if ((addr >= base) && (addr >= area->base) && 2045 (check_area_conflicts(AS, addr, pages, area))) 2046 ret = addr; 2047 2048 mutex_unlock(&area->lock); 2049 } 2050 } 2051 2052 mutex_unlock(&AS->lock); 2053 2054 return (sysarg_t) ret; 2024 unative_t sys_as_area_destroy(uintptr_t address) 2025 { 2026 return (unative_t) as_area_destroy(AS, address); 2055 2027 } 2056 2028 … … 2121 2093 mutex_lock(&as->lock); 2122 2094 2123 /* Print out info about address space areas */2095 /* print out info about address space areas */ 2124 2096 link_t *cur; 2125 2097 for (cur = as->as_area_btree.leaf_head.next; … … 2133 2105 2134 2106 mutex_lock(&area->lock); 2135 printf("as_area: %p, base=%p, pages=%zu" 2136 " (%p - %p)\n", area, (void *) area->base, 2137 area->pages, (void *) area->base, 2138 (void *) (area->base + FRAMES2SIZE(area->pages))); 2107 printf("as_area: %p, base=%p, pages=%" PRIs 2108 " (%p - %p)\n", area, area->base, area->pages, 2109 area->base, area->base + FRAMES2SIZE(area->pages)); 2139 2110 mutex_unlock(&area->lock); 2140 2111 }
Note:
See TracChangeset
for help on using the changeset viewer.