Changes in / [0c968a17:aa7dc64] in mainline
- Files:
-
- 3 added
- 37 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/mips32/src/mm/tlb.c
r0c968a17 raa7dc64 557 557 entry_hi_t hi, hi_save; 558 558 tlb_index_t index; 559 560 if (asid == ASID_INVALID) 561 return; 559 560 ASSERT(asid != ASID_INVALID); 562 561 563 562 hi_save.value = cp0_entry_hi_read(); -
kernel/generic/include/mm/as.h
r0c968a17 raa7dc64 115 115 116 116 /** 117 * Number of processors on which this 118 * address space is active. Protected by 119 * asidlock. 117 * Number of processors on wich is this address space active. 118 * Protected by asidlock. 120 119 */ 121 120 size_t cpu_refcount; 122 121 123 /** Address space identifier. 124 * 125 * Constant on architectures that do not 126 * support ASIDs. Protected by asidlock. 127 * 122 /** 123 * Address space identifier. 124 * Constant on architectures that do not support ASIDs. 125 * Protected by asidlock. 128 126 */ 129 127 asid_t asid; 130 128 131 /** Number of references (i.e .tasks that reference this as). */129 /** Number of references (i.e tasks that reference this as). */ 132 130 atomic_t refcount; 133 131 … … 201 199 typedef struct { 202 200 mutex_t lock; 203 204 201 /** Containing address space. */ 205 202 as_t *as; 206 203 207 /** Memory flags. */ 204 /** 205 * Flags related to the memory represented by the address space area. 206 */ 208 207 unsigned int flags; 209 208 210 /** A ddress space area attributes. */209 /** Attributes related to the address space area itself. */ 211 210 unsigned int attributes; 212 213 /** Number of pages in the area. */ 211 /** Size of this area in multiples of PAGE_SIZE. */ 214 212 size_t pages; 215 216 /** Number of resident pages in the area. */217 size_t resident;218 219 213 /** Base address of this area. */ 220 214 uintptr_t base; 221 222 215 /** Map of used space. */ 223 216 btree_t used_space; 224 217 225 218 /** 226 * If the address space area is shared. this is227 * a reference tothe share info structure.219 * If the address space area has been shared, this pointer will 220 * reference the share info structure. 228 221 */ 229 222 share_info_t *sh_info; … … 268 261 extern bool as_area_check_access(as_area_t *, pf_access_t); 269 262 extern size_t as_area_get_size(uintptr_t); 270 extern bool used_space_insert(as_area_t *, uintptr_t, size_t); 271 extern bool used_space_remove(as_area_t *, uintptr_t, size_t); 263 extern int used_space_insert(as_area_t *, uintptr_t, size_t); 264 extern int used_space_remove(as_area_t *, uintptr_t, size_t); 265 272 266 273 267 /* Interface to be implemented by architectures. */ … … 313 307 extern sysarg_t sys_as_area_change_flags(uintptr_t, unsigned int); 314 308 extern sysarg_t sys_as_area_destroy(uintptr_t); 315 extern sysarg_t sys_as_get_unmapped_area(uintptr_t, size_t);316 309 317 310 /* Introspection functions. */ -
kernel/generic/include/syscall/syscall.h
r0c968a17 raa7dc64 59 59 SYS_AS_AREA_CHANGE_FLAGS, 60 60 SYS_AS_AREA_DESTROY, 61 SYS_AS_GET_UNMAPPED_AREA,62 61 63 62 SYS_IPC_CALL_SYNC_FAST, -
kernel/generic/src/mm/as.c
r0c968a17 raa7dc64 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h>74 73 #include <arch.h> 75 74 #include <errno.h> … … 87 86 * Each architecture decides what functions will be used to carry out 88 87 * address space operations such as creating or locking page tables. 88 * 89 89 */ 90 90 as_operations_t *as_operations = NULL; 91 91 92 /** Slab for as_t objects. 92 /** 93 * Slab for as_t objects. 93 94 * 94 95 */ 95 96 static slab_cache_t *as_slab; 96 97 97 /** ASID subsystem lock.98 * 99 * This lockprotects:98 /** 99 * This lock serializes access to the ASID subsystem. 100 * It protects: 100 101 * - inactive_as_with_asid_head list 101 102 * - as->asid for each as of the as_t type … … 106 107 107 108 /** 108 * Inactive address spaces (on all processors) 109 * that have valid ASID. 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 110 112 */ 111 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 121 123 mutex_initialize(&as->lock, MUTEX_PASSIVE); 122 124 123 return as_constructor_arch(as, flags); 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 124 128 } 125 129 126 130 NO_TRACE static size_t as_destructor(void *obj) 127 131 { 128 return as_destructor_arch((as_t *) obj); 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 129 134 } 130 135 … … 141 146 panic("Cannot create kernel address space."); 142 147 143 /* 144 * Make sure the kernel address space 148 /* Make sure the kernel address space 145 149 * reference count never drops to zero. 146 150 */ … … 191 195 { 192 196 DEADLOCK_PROBE_INIT(p_asidlock); 193 197 194 198 ASSERT(as != AS); 195 199 ASSERT(atomic_get(&as->refcount) == 0); … … 199 203 * lock its mutex. 200 204 */ 201 205 202 206 /* 203 207 * We need to avoid deadlock between TLB shootdown and asidlock. … … 206 210 * disabled to prevent nested context switches. We also depend on the 207 211 * fact that so far no spinlocks are held. 212 * 208 213 */ 209 214 preemption_disable(); … … 230 235 spinlock_unlock(&asidlock); 231 236 interrupts_restore(ipl); 232 237 233 238 234 239 /* … … 236 241 * The B+tree must be walked carefully because it is 237 242 * also being destroyed. 243 * 238 244 */ 239 245 bool cond = true; … … 262 268 /** Hold a reference to an address space. 263 269 * 264 * Holding a reference to an address space prevents destruction 265 * of that addressspace.270 * Holding a reference to an address space prevents destruction of that address 271 * space. 266 272 * 267 273 * @param as Address space to be held. … … 275 281 /** Release a reference to an address space. 276 282 * 277 * The last one to release a reference to an address space 278 * destroys the addressspace.283 * The last one to release a reference to an address space destroys the address 284 * space. 279 285 * 280 286 * @param asAddress space to be released. … … 289 295 /** Check area conflicts with other areas. 290 296 * 291 * @param as Address space.292 * @param addrStarting virtual address of the area being tested.293 * @param count Number of pages inthe area being tested.294 * @param avoid Do not touch this area.297 * @param as Address space. 298 * @param va Starting virtual address of the area being tested. 299 * @param size Size of the area being tested. 300 * @param avoid_area Do not touch this area. 295 301 * 296 302 * @return True if there is no conflict, false otherwise. 297 303 * 298 304 */ 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 305 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 306 as_area_t *avoid_area) 307 { 303 308 ASSERT(mutex_locked(&as->lock)); 304 309 305 310 /* 306 311 * We don't want any area to have conflicts with NULL page. 307 */ 308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 312 * 313 */ 314 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 309 315 return false; 310 316 … … 315 321 * record in the left neighbour, the leftmost record in the right 316 322 * neighbour and all records in the leaf node itself. 323 * 317 324 */ 318 325 btree_node_t *leaf; 319 326 as_area_t *area = 320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);327 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 321 328 if (area) { 322 if (area != avoid )329 if (area != avoid_area) 323 330 return false; 324 331 } … … 330 337 area = (as_area_t *) node->value[node->keys - 1]; 331 338 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 339 mutex_lock(&area->lock); 340 341 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 341 342 mutex_unlock(&area->lock); 343 return false; 342 344 } 345 346 mutex_unlock(&area->lock); 343 347 } 344 348 … … 347 351 area = (as_area_t *) node->value[0]; 348 352 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 353 mutex_lock(&area->lock); 354 355 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 358 356 mutex_unlock(&area->lock); 357 return false; 359 358 } 359 360 mutex_unlock(&area->lock); 360 361 } 361 362 … … 365 366 area = (as_area_t *) leaf->value[i]; 366 367 367 if (area == avoid )368 if (area == avoid_area) 368 369 continue; 369 370 370 371 mutex_lock(&area->lock); 371 372 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 373 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 374 374 mutex_unlock(&area->lock); 375 375 return false; … … 382 382 * So far, the area does not conflict with other areas. 383 383 * Check if it doesn't conflict with kernel address space. 384 * 384 385 */ 385 386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 386 return !overlaps( addr, count << PAGE_WIDTH,387 return !overlaps(va, size, 387 388 KERNEL_ADDRESS_SPACE_START, 388 389 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 411 412 mem_backend_data_t *backend_data) 412 413 { 413 if ( (base % PAGE_SIZE) != 0)414 if (base % PAGE_SIZE) 414 415 return NULL; 415 416 416 if ( size == 0)417 if (!size) 417 418 return NULL; 418 419 size_t pages = SIZE2FRAMES(size);420 419 421 420 /* Writeable executable areas are not supported. */ … … 425 424 mutex_lock(&as->lock); 426 425 427 if (!check_area_conflicts(as, base, pages, NULL)) {426 if (!check_area_conflicts(as, base, size, NULL)) { 428 427 mutex_unlock(&as->lock); 429 428 return NULL; … … 437 436 area->flags = flags; 438 437 area->attributes = attrs; 439 area->pages = pages; 440 area->resident = 0; 438 area->pages = SIZE2FRAMES(size); 441 439 area->base = base; 442 440 area->sh_info = NULL; … … 481 479 * to find out whether this is a miss or va belongs to an address 482 480 * space area found there. 481 * 483 482 */ 484 483 … … 491 490 mutex_lock(&area->lock); 492 491 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 492 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 495 493 return area; 496 494 … … 501 499 * Second, locate the left neighbour and test its last record. 502 500 * Because of its position in the B+tree, it must have base < va. 501 * 503 502 */ 504 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 508 507 mutex_lock(&area->lock); 509 508 510 if (va < area->base + (area->pages << PAGE_WIDTH))509 if (va < area->base + area->pages * PAGE_SIZE) 511 510 return area; 512 511 … … 535 534 /* 536 535 * Locate the area. 536 * 537 537 */ 538 538 as_area_t *area = find_area_and_lock(as, address); … … 546 546 * Remapping of address space areas associated 547 547 * with memory mapped devices is not supported. 548 * 548 549 */ 549 550 mutex_unlock(&area->lock); … … 556 557 * Remapping of shared address space areas 557 558 * is not supported. 559 * 558 560 */ 559 561 mutex_unlock(&area->lock); … … 566 568 /* 567 569 * Zero size address space areas are not allowed. 570 * 568 571 */ 569 572 mutex_unlock(&area->lock); … … 573 576 574 577 if (pages < area->pages) { 575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);578 uintptr_t start_free = area->base + pages * PAGE_SIZE; 576 579 577 580 /* 578 581 * Shrinking the area. 579 582 * No need to check for overlaps. 583 * 580 584 */ 581 585 … … 584 588 /* 585 589 * Start TLB shootdown sequence. 590 * 586 591 */ 587 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 588 area->base + (pages << PAGE_WIDTH), area->pages - pages);593 area->base + pages * PAGE_SIZE, area->pages - pages); 589 594 590 595 /* … … 594 599 * is also the right way to remove part of the used_space 595 600 * B+tree leaf list. 601 * 596 602 */ 597 603 bool cond = true; … … 609 615 size_t i = 0; 610 616 611 if (overlaps(ptr, size << PAGE_WIDTH, area->base,612 pages << PAGE_WIDTH)) {617 if (overlaps(ptr, size * PAGE_SIZE, area->base, 618 pages * PAGE_SIZE)) { 613 619 614 if (ptr + (size << PAGE_WIDTH)<= start_free) {620 if (ptr + size * PAGE_SIZE <= start_free) { 615 621 /* 616 622 * The whole interval fits 617 623 * completely in the resized 618 624 * address space area. 625 * 619 626 */ 620 627 break; … … 625 632 * to b and c overlaps with the resized 626 633 * address space area. 634 * 627 635 */ 628 636 … … 644 652 for (; i < size; i++) { 645 653 pte_t *pte = page_mapping_find(as, ptr + 646 (i << PAGE_WIDTH));654 i * PAGE_SIZE); 647 655 648 656 ASSERT(pte); … … 653 661 (area->backend->frame_free)) { 654 662 area->backend->frame_free(area, 655 ptr + (i << PAGE_WIDTH),663 ptr + i * PAGE_SIZE, 656 664 PTE_GET_FRAME(pte)); 657 665 } 658 666 659 667 page_mapping_remove(as, ptr + 660 (i << PAGE_WIDTH));668 i * PAGE_SIZE); 661 669 } 662 670 } … … 665 673 /* 666 674 * Finish TLB shootdown sequence. 667 */ 668 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 675 * 676 */ 677 678 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 670 679 area->pages - pages); 671 680 672 681 /* 673 682 * Invalidate software translation caches (e.g. TSB on sparc64). 683 * 674 684 */ 675 685 as_invalidate_translation_cache(as, area->base + 676 (pages << PAGE_WIDTH), area->pages - pages);686 pages * PAGE_SIZE, area->pages - pages); 677 687 tlb_shootdown_finalize(ipl); 678 688 … … 682 692 * Growing the area. 683 693 * Check for overlaps with other address space areas. 684 */ 685 if (!check_area_conflicts(as, address, pages, area)) { 694 * 695 */ 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 697 area)) { 686 698 mutex_unlock(&area->lock); 687 699 mutex_unlock(&as->lock); … … 782 794 783 795 for (size = 0; size < (size_t) node->value[i]; size++) { 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 796 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 786 797 787 798 ASSERT(pte); … … 792 803 (area->backend->frame_free)) { 793 804 area->backend->frame_free(area, 794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte));805 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 795 806 } 796 807 797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));808 page_mapping_remove(as, ptr + size * PAGE_SIZE); 798 809 } 799 810 } … … 802 813 /* 803 814 * Finish TLB shootdown sequence. 815 * 804 816 */ 805 817 … … 809 821 * Invalidate potential software translation caches (e.g. TSB on 810 822 * sparc64). 823 * 811 824 */ 812 825 as_invalidate_translation_cache(as, area->base, area->pages); … … 826 839 /* 827 840 * Remove the empty area from address space. 841 * 828 842 */ 829 843 btree_remove(&as->as_area_btree, base, NULL); … … 867 881 /* 868 882 * Could not find the source address space area. 883 * 869 884 */ 870 885 mutex_unlock(&src_as->lock); … … 876 891 * There is no backend or the backend does not 877 892 * know how to share the area. 893 * 878 894 */ 879 895 mutex_unlock(&src_area->lock); … … 882 898 } 883 899 884 size_t src_size = src_area->pages << PAGE_WIDTH;900 size_t src_size = src_area->pages * PAGE_SIZE; 885 901 unsigned int src_flags = src_area->flags; 886 902 mem_backend_t *src_backend = src_area->backend; … … 902 918 * First, prepare the area for sharing. 903 919 * Then it will be safe to unlock it. 920 * 904 921 */ 905 922 share_info_t *sh_info = src_area->sh_info; … … 913 930 /* 914 931 * Call the backend to setup sharing. 932 * 915 933 */ 916 934 src_area->backend->share(src_area); … … 931 949 * The flags of the source area are masked against dst_flags_mask 932 950 * to support sharing in less privileged mode. 951 * 933 952 */ 934 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 947 966 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 948 967 * attribute and set the sh_info. 968 * 949 969 */ 950 970 mutex_lock(&dst_as->lock); … … 969 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 970 990 { 971 ASSERT(mutex_locked(&area->lock));972 973 991 int flagmap[] = { 974 992 [PF_ACCESS_READ] = AS_AREA_READ, … … 976 994 [PF_ACCESS_EXEC] = AS_AREA_EXEC 977 995 }; 996 997 ASSERT(mutex_locked(&area->lock)); 978 998 979 999 if (!(area->flags & flagmap[access])) … … 1046 1066 /* 1047 1067 * Compute total number of used pages in the used_space B+tree 1068 * 1048 1069 */ 1049 1070 size_t used_pages = 0; … … 1067 1088 /* 1068 1089 * Start TLB shootdown sequence. 1090 * 1069 1091 */ 1070 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1074 1096 * Remove used pages from page tables and remember their frame 1075 1097 * numbers. 1098 * 1076 1099 */ 1077 1100 size_t frame_idx = 0; … … 1088 1111 1089 1112 for (size = 0; size < (size_t) node->value[i]; size++) { 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1113 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1092 1114 1093 1115 ASSERT(pte); … … 1098 1120 1099 1121 /* Remove old mapping */ 1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1122 page_mapping_remove(as, ptr + size * PAGE_SIZE); 1101 1123 } 1102 1124 } … … 1105 1127 /* 1106 1128 * Finish TLB shootdown sequence. 1129 * 1107 1130 */ 1108 1131 … … 1112 1135 * Invalidate potential software translation caches (e.g. TSB on 1113 1136 * sparc64). 1137 * 1114 1138 */ 1115 1139 as_invalidate_translation_cache(as, area->base, area->pages); … … 1144 1168 1145 1169 /* Insert the new mapping */ 1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1170 page_mapping_insert(as, ptr + size * PAGE_SIZE, 1147 1171 old_frame[frame_idx++], page_flags); 1148 1172 … … 1193 1217 * No area contained mapping for 'page'. 1194 1218 * Signal page fault to low-level handler. 1219 * 1195 1220 */ 1196 1221 mutex_unlock(&AS->lock); … … 1212 1237 * The address space area is not backed by any backend 1213 1238 * or the backend cannot handle page faults. 1239 * 1214 1240 */ 1215 1241 mutex_unlock(&area->lock); … … 1223 1249 * To avoid race condition between two page faults on the same address, 1224 1250 * we need to make sure the mapping has not been already inserted. 1251 * 1225 1252 */ 1226 1253 pte_t *pte; … … 1240 1267 /* 1241 1268 * Resort to the backend page fault handler. 1269 * 1242 1270 */ 1243 1271 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1294 1322 * preemption is disabled. We should not be 1295 1323 * holding any other lock. 1324 * 1296 1325 */ 1297 1326 (void) interrupts_enable(); … … 1313 1342 * list of inactive address spaces with assigned 1314 1343 * ASID. 1344 * 1315 1345 */ 1316 1346 ASSERT(old_as->asid != ASID_INVALID); … … 1323 1353 * Perform architecture-specific tasks when the address space 1324 1354 * is being removed from the CPU. 1355 * 1325 1356 */ 1326 1357 as_deinstall_arch(old_as); … … 1329 1360 /* 1330 1361 * Second, prepare the new address space. 1362 * 1331 1363 */ 1332 1364 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1344 1376 * Perform architecture-specific steps. 1345 1377 * (e.g. write ASID to hardware register etc.) 1378 * 1346 1379 */ 1347 1380 as_install_arch(new_as); … … 1362 1395 { 1363 1396 ASSERT(mutex_locked(&area->lock)); 1364 1397 1365 1398 return area_flags_to_page_flags(area->flags); 1366 1399 } … … 1466 1499 1467 1500 if (src_area) { 1468 size = src_area->pages << PAGE_WIDTH;1501 size = src_area->pages * PAGE_SIZE; 1469 1502 mutex_unlock(&src_area->lock); 1470 1503 } else … … 1483 1516 * @param count Number of page to be marked. 1484 1517 * 1485 * @return False on failure or trueon success.1486 * 1487 */ 1488 boolused_space_insert(as_area_t *area, uintptr_t page, size_t count)1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1489 1522 { 1490 1523 ASSERT(mutex_locked(&area->lock)); … … 1497 1530 /* 1498 1531 * We hit the beginning of some used space. 1499 */ 1500 return false; 1532 * 1533 */ 1534 return 0; 1501 1535 } 1502 1536 1503 1537 if (!leaf->keys) { 1504 1538 btree_insert(&area->used_space, page, (void *) count, leaf); 1505 goto success;1539 return 1; 1506 1540 } 1507 1541 … … 1517 1551 * somewhere between the rightmost interval of 1518 1552 * the left neigbour and the first interval of the leaf. 1553 * 1519 1554 */ 1520 1555 1521 1556 if (page >= right_pg) { 1522 1557 /* Do nothing. */ 1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1524 left_cnt << PAGE_WIDTH)) {1558 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1559 left_cnt * PAGE_SIZE)) { 1525 1560 /* The interval intersects with the left interval. */ 1526 return false;1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1528 right_cnt << PAGE_WIDTH)) {1561 return 0; 1562 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1563 right_cnt * PAGE_SIZE)) { 1529 1564 /* The interval intersects with the right interval. */ 1530 return false;1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1532 (page + (count << PAGE_WIDTH)== right_pg)) {1565 return 0; 1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1567 (page + count * PAGE_SIZE == right_pg)) { 1533 1568 /* 1534 1569 * The interval can be added by merging the two already 1535 1570 * present intervals. 1571 * 1536 1572 */ 1537 1573 node->value[node->keys - 1] += count + right_cnt; 1538 1574 btree_remove(&area->used_space, right_pg, leaf); 1539 goto success;1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1575 return 1; 1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1541 1577 /* 1542 1578 * The interval can be added by simply growing the left 1543 1579 * interval. 1580 * 1544 1581 */ 1545 1582 node->value[node->keys - 1] += count; 1546 goto success;1547 } else if (page + (count << PAGE_WIDTH)== right_pg) {1583 return 1; 1584 } else if (page + count * PAGE_SIZE == right_pg) { 1548 1585 /* 1549 1586 * The interval can be addded by simply moving base of 1550 1587 * the right interval down and increasing its size 1551 1588 * accordingly. 1589 * 1552 1590 */ 1553 1591 leaf->value[0] += count; 1554 1592 leaf->key[0] = page; 1555 goto success;1593 return 1; 1556 1594 } else { 1557 1595 /* 1558 1596 * The interval is between both neigbouring intervals, 1559 1597 * but cannot be merged with any of them. 1598 * 1560 1599 */ 1561 1600 btree_insert(&area->used_space, page, (void *) count, 1562 1601 leaf); 1563 goto success;1602 return 1; 1564 1603 } 1565 1604 } else if (page < leaf->key[0]) { … … 1570 1609 * Investigate the border case in which the left neighbour does 1571 1610 * not exist but the interval fits from the left. 1572 */ 1573 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1611 * 1612 */ 1613 1614 if (overlaps(page, count * PAGE_SIZE, right_pg, 1615 right_cnt * PAGE_SIZE)) { 1576 1616 /* The interval intersects with the right interval. */ 1577 return false;1578 } else if (page + (count << PAGE_WIDTH)== right_pg) {1617 return 0; 1618 } else if (page + count * PAGE_SIZE == right_pg) { 1579 1619 /* 1580 1620 * The interval can be added by moving the base of the 1581 1621 * right interval down and increasing its size 1582 1622 * accordingly. 1623 * 1583 1624 */ 1584 1625 leaf->key[0] = page; 1585 1626 leaf->value[0] += count; 1586 goto success;1627 return 1; 1587 1628 } else { 1588 1629 /* 1589 1630 * The interval doesn't adjoin with the right interval. 1590 1631 * It must be added individually. 1632 * 1591 1633 */ 1592 1634 btree_insert(&area->used_space, page, (void *) count, 1593 1635 leaf); 1594 goto success;1636 return 1; 1595 1637 } 1596 1638 } … … 1607 1649 * somewhere between the leftmost interval of 1608 1650 * the right neigbour and the last interval of the leaf. 1651 * 1609 1652 */ 1610 1653 1611 1654 if (page < left_pg) { 1612 1655 /* Do nothing. */ 1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1614 left_cnt << PAGE_WIDTH)) {1656 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1657 left_cnt * PAGE_SIZE)) { 1615 1658 /* The interval intersects with the left interval. */ 1616 return false;1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1618 right_cnt << PAGE_WIDTH)) {1659 return 0; 1660 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1661 right_cnt * PAGE_SIZE)) { 1619 1662 /* The interval intersects with the right interval. */ 1620 return false;1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1622 (page + (count << PAGE_WIDTH)== right_pg)) {1663 return 0; 1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1665 (page + count * PAGE_SIZE == right_pg)) { 1623 1666 /* 1624 1667 * The interval can be added by merging the two already 1625 1668 * present intervals. 1669 * 1626 1670 */ 1627 1671 leaf->value[leaf->keys - 1] += count + right_cnt; 1628 1672 btree_remove(&area->used_space, right_pg, node); 1629 goto success;1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1673 return 1; 1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1631 1675 /* 1632 1676 * The interval can be added by simply growing the left 1633 1677 * interval. 1678 * 1634 1679 */ 1635 leaf->value[leaf->keys - 1] += count;1636 goto success;1637 } else if (page + (count << PAGE_WIDTH)== right_pg) {1680 leaf->value[leaf->keys - 1] += count; 1681 return 1; 1682 } else if (page + count * PAGE_SIZE == right_pg) { 1638 1683 /* 1639 1684 * The interval can be addded by simply moving base of 1640 1685 * the right interval down and increasing its size 1641 1686 * accordingly. 1687 * 1642 1688 */ 1643 1689 node->value[0] += count; 1644 1690 node->key[0] = page; 1645 goto success;1691 return 1; 1646 1692 } else { 1647 1693 /* 1648 1694 * The interval is between both neigbouring intervals, 1649 1695 * but cannot be merged with any of them. 1696 * 1650 1697 */ 1651 1698 btree_insert(&area->used_space, page, (void *) count, 1652 1699 leaf); 1653 goto success;1700 return 1; 1654 1701 } 1655 1702 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1660 1707 * Investigate the border case in which the right neighbour 1661 1708 * does not exist but the interval fits from the right. 1662 */ 1663 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1709 * 1710 */ 1711 1712 if (overlaps(page, count * PAGE_SIZE, left_pg, 1713 left_cnt * PAGE_SIZE)) { 1666 1714 /* The interval intersects with the left interval. */ 1667 return false;1668 } else if (left_pg + (left_cnt << PAGE_WIDTH)== page) {1715 return 0; 1716 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1669 1717 /* 1670 1718 * The interval can be added by growing the left 1671 1719 * interval. 1720 * 1672 1721 */ 1673 1722 leaf->value[leaf->keys - 1] += count; 1674 goto success;1723 return 1; 1675 1724 } else { 1676 1725 /* 1677 1726 * The interval doesn't adjoin with the left interval. 1678 1727 * It must be added individually. 1728 * 1679 1729 */ 1680 1730 btree_insert(&area->used_space, page, (void *) count, 1681 1731 leaf); 1682 goto success;1732 return 1; 1683 1733 } 1684 1734 } … … 1688 1738 * only between two other intervals of the leaf. The two border cases 1689 1739 * were already resolved. 1740 * 1690 1741 */ 1691 1742 btree_key_t i; … … 1699 1750 /* 1700 1751 * The interval fits between left_pg and right_pg. 1752 * 1701 1753 */ 1702 1754 1703 if (overlaps(page, count << PAGE_WIDTH, left_pg,1704 left_cnt << PAGE_WIDTH)) {1755 if (overlaps(page, count * PAGE_SIZE, left_pg, 1756 left_cnt * PAGE_SIZE)) { 1705 1757 /* 1706 1758 * The interval intersects with the left 1707 1759 * interval. 1760 * 1708 1761 */ 1709 return false;1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1711 right_cnt << PAGE_WIDTH)) {1762 return 0; 1763 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1764 right_cnt * PAGE_SIZE)) { 1712 1765 /* 1713 1766 * The interval intersects with the right 1714 1767 * interval. 1768 * 1715 1769 */ 1716 return false;1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1718 (page + (count << PAGE_WIDTH)== right_pg)) {1770 return 0; 1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1772 (page + count * PAGE_SIZE == right_pg)) { 1719 1773 /* 1720 1774 * The interval can be added by merging the two 1721 1775 * already present intervals. 1776 * 1722 1777 */ 1723 1778 leaf->value[i - 1] += count + right_cnt; 1724 1779 btree_remove(&area->used_space, right_pg, leaf); 1725 goto success;1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1780 return 1; 1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1727 1782 /* 1728 1783 * The interval can be added by simply growing 1729 1784 * the left interval. 1785 * 1730 1786 */ 1731 1787 leaf->value[i - 1] += count; 1732 goto success;1733 } else if (page + (count << PAGE_WIDTH)== right_pg) {1788 return 1; 1789 } else if (page + count * PAGE_SIZE == right_pg) { 1734 1790 /* 1735 1791 * The interval can be addded by simply moving 1736 1792 * base of the right interval down and 1737 1793 * increasing its size accordingly. 1794 * 1738 1795 */ 1739 1796 leaf->value[i] += count; 1740 1797 leaf->key[i] = page; 1741 goto success;1798 return 1; 1742 1799 } else { 1743 1800 /* … … 1745 1802 * intervals, but cannot be merged with any of 1746 1803 * them. 1804 * 1747 1805 */ 1748 1806 btree_insert(&area->used_space, page, 1749 1807 (void *) count, leaf); 1750 goto success;1808 return 1; 1751 1809 } 1752 1810 } … … 1755 1813 panic("Inconsistency detected while adding %zu pages of used " 1756 1814 "space at %p.", count, (void *) page); 1757 1758 success:1759 area->resident += count;1760 return true;1761 1815 } 1762 1816 … … 1769 1823 * @param count Number of page to be marked. 1770 1824 * 1771 * @return False on failure or trueon success.1772 * 1773 */ 1774 boolused_space_remove(as_area_t *area, uintptr_t page, size_t count)1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1775 1829 { 1776 1830 ASSERT(mutex_locked(&area->lock)); … … 1783 1837 /* 1784 1838 * We are lucky, page is the beginning of some interval. 1839 * 1785 1840 */ 1786 1841 if (count > pages) { 1787 return false;1842 return 0; 1788 1843 } else if (count == pages) { 1789 1844 btree_remove(&area->used_space, page, leaf); 1790 goto success;1845 return 1; 1791 1846 } else { 1792 1847 /* 1793 1848 * Find the respective interval. 1794 1849 * Decrease its size and relocate its start address. 1850 * 1795 1851 */ 1796 1852 btree_key_t i; 1797 1853 for (i = 0; i < leaf->keys; i++) { 1798 1854 if (leaf->key[i] == page) { 1799 leaf->key[i] += count << PAGE_WIDTH;1855 leaf->key[i] += count * PAGE_SIZE; 1800 1856 leaf->value[i] -= count; 1801 goto success;1857 return 1; 1802 1858 } 1803 1859 } 1804 1805 1860 goto error; 1806 1861 } … … 1812 1867 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1813 1868 1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1815 count << PAGE_WIDTH)) {1816 if (page + (count << PAGE_WIDTH)==1817 left_pg + (left_cnt << PAGE_WIDTH)) {1869 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1870 count * PAGE_SIZE)) { 1871 if (page + count * PAGE_SIZE == 1872 left_pg + left_cnt * PAGE_SIZE) { 1818 1873 /* 1819 1874 * The interval is contained in the rightmost … … 1821 1876 * removed by updating the size of the bigger 1822 1877 * interval. 1878 * 1823 1879 */ 1824 1880 node->value[node->keys - 1] -= count; 1825 goto success;1826 } else if (page + (count << PAGE_WIDTH)<1827 left_pg + (left_cnt << PAGE_WIDTH)) {1881 return 1; 1882 } else if (page + count * PAGE_SIZE < 1883 left_pg + left_cnt*PAGE_SIZE) { 1828 1884 /* 1829 1885 * The interval is contained in the rightmost … … 1832 1888 * the original interval and also inserting a 1833 1889 * new interval. 1890 * 1834 1891 */ 1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1893 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1837 1894 node->value[node->keys - 1] -= count + new_cnt; 1838 1895 btree_insert(&area->used_space, page + 1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1840 goto success;1896 count * PAGE_SIZE, (void *) new_cnt, leaf); 1897 return 1; 1841 1898 } 1842 1899 } 1843 1844 return false; 1900 return 0; 1845 1901 } else if (page < leaf->key[0]) 1846 return false;1902 return 0; 1847 1903 1848 1904 if (page > leaf->key[leaf->keys - 1]) { … … 1850 1906 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1851 1907 1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1853 count << PAGE_WIDTH)) {1854 if (page + (count << PAGE_WIDTH)==1855 left_pg + (left_cnt << PAGE_WIDTH)) {1908 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1909 count * PAGE_SIZE)) { 1910 if (page + count * PAGE_SIZE == 1911 left_pg + left_cnt * PAGE_SIZE) { 1856 1912 /* 1857 1913 * The interval is contained in the rightmost 1858 1914 * interval of the leaf and can be removed by 1859 1915 * updating the size of the bigger interval. 1916 * 1860 1917 */ 1861 1918 leaf->value[leaf->keys - 1] -= count; 1862 goto success;1863 } else if (page + (count << PAGE_WIDTH)< left_pg +1864 (left_cnt << PAGE_WIDTH)) {1919 return 1; 1920 } else if (page + count * PAGE_SIZE < left_pg + 1921 left_cnt * PAGE_SIZE) { 1865 1922 /* 1866 1923 * The interval is contained in the rightmost … … 1869 1926 * original interval and also inserting a new 1870 1927 * interval. 1928 * 1871 1929 */ 1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1931 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1874 1932 leaf->value[leaf->keys - 1] -= count + new_cnt; 1875 1933 btree_insert(&area->used_space, page + 1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1877 goto success;1934 count * PAGE_SIZE, (void *) new_cnt, leaf); 1935 return 1; 1878 1936 } 1879 1937 } 1880 1881 return false; 1938 return 0; 1882 1939 } 1883 1940 1884 1941 /* 1885 1942 * The border cases have been already resolved. 1886 * Now the interval can be only between intervals of the leaf. 1943 * Now the interval can be only between intervals of the leaf. 1887 1944 */ 1888 1945 btree_key_t i; … … 1896 1953 * to (i - 1) and i. 1897 1954 */ 1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1899 count << PAGE_WIDTH)) {1900 if (page + (count << PAGE_WIDTH)==1901 left_pg + (left_cnt << PAGE_WIDTH)) {1955 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1956 count * PAGE_SIZE)) { 1957 if (page + count * PAGE_SIZE == 1958 left_pg + left_cnt*PAGE_SIZE) { 1902 1959 /* 1903 1960 * The interval is contained in the … … 1905 1962 * be removed by updating the size of 1906 1963 * the bigger interval. 1964 * 1907 1965 */ 1908 1966 leaf->value[i - 1] -= count; 1909 goto success;1910 } else if (page + (count << PAGE_WIDTH)<1911 left_pg + (left_cnt << PAGE_WIDTH)) {1967 return 1; 1968 } else if (page + count * PAGE_SIZE < 1969 left_pg + left_cnt * PAGE_SIZE) { 1912 1970 /* 1913 1971 * The interval is contained in the … … 1918 1976 */ 1919 1977 size_t new_cnt = ((left_pg + 1920 (left_cnt << PAGE_WIDTH)) -1921 (page + (count << PAGE_WIDTH))) >>1978 left_cnt * PAGE_SIZE) - 1979 (page + count * PAGE_SIZE)) >> 1922 1980 PAGE_WIDTH; 1923 1981 leaf->value[i - 1] -= count + new_cnt; 1924 1982 btree_insert(&area->used_space, page + 1925 (count << PAGE_WIDTH), (void *) new_cnt,1983 count * PAGE_SIZE, (void *) new_cnt, 1926 1984 leaf); 1927 goto success;1985 return 1; 1928 1986 } 1929 1987 } 1930 1931 return false; 1988 return 0; 1932 1989 } 1933 1990 } … … 1936 1993 panic("Inconsistency detected while removing %zu pages of used " 1937 1994 "space from %p.", count, (void *) page); 1938 1939 success:1940 area->resident -= count;1941 return true;1942 1995 } 1943 1996 … … 1972 2025 { 1973 2026 return (sysarg_t) as_area_destroy(AS, address); 1974 }1975 1976 /** Return pointer to unmapped address space area1977 *1978 * @param base Lowest address bound.1979 * @param size Requested size of the allocation.1980 *1981 * @return Pointer to the beginning of unmapped address space area.1982 *1983 */1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1985 {1986 if (size == 0)1987 return 0;1988 1989 /*1990 * Make sure we allocate from page-aligned1991 * address. Check for possible overflow in1992 * each step.1993 */1994 1995 size_t pages = SIZE2FRAMES(size);1996 uintptr_t ret = 0;1997 1998 /*1999 * Find the lowest unmapped address aligned on the sz2000 * boundary, not smaller than base and of the required size.2001 */2002 2003 mutex_lock(&AS->lock);2004 2005 /* First check the base address itself */2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2007 if ((addr >= base) &&2008 (check_area_conflicts(AS, addr, pages, NULL)))2009 ret = addr;2010 2011 /* Eventually check the addresses behind each area */2012 link_t *cur;2013 for (cur = AS->as_area_btree.leaf_head.next;2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head);2015 cur = cur->next) {2016 btree_node_t *node =2017 list_get_instance(cur, btree_node_t, leaf_link);2018 2019 btree_key_t i;2020 for (i = 0; (ret == 0) && (i < node->keys); i++) {2021 as_area_t *area = (as_area_t *) node->value[i];2022 2023 mutex_lock(&area->lock);2024 2025 uintptr_t addr =2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH),2027 PAGE_SIZE);2028 2029 if ((addr >= base) && (addr >= area->base) &&2030 (check_area_conflicts(AS, addr, pages, area)))2031 ret = addr;2032 2033 mutex_unlock(&area->lock);2034 }2035 }2036 2037 mutex_unlock(&AS->lock);2038 2039 return (sysarg_t) ret;2040 2027 } 2041 2028 … … 2106 2093 mutex_lock(&as->lock); 2107 2094 2108 /* Print out info about address space areas */2095 /* print out info about address space areas */ 2109 2096 link_t *cur; 2110 2097 for (cur = as->as_area_btree.leaf_head.next; -
kernel/generic/src/proc/program.c
r0c968a17 raa7dc64 171 171 void *loader = program_loader; 172 172 if (!loader) { 173 as_destroy(as);174 173 printf("Cannot spawn loader as none was registered\n"); 175 174 return ENOENT; … … 180 179 if (rc != EE_OK) { 181 180 as_destroy(as); 182 printf("Cannot spawn loader (%s)\n", elf_error(rc));183 181 return ENOENT; 184 182 } -
kernel/generic/src/syscall/syscall.c
r0c968a17 raa7dc64 143 143 (syshandler_t) sys_as_area_change_flags, 144 144 (syshandler_t) sys_as_area_destroy, 145 (syshandler_t) sys_as_get_unmapped_area,146 145 147 146 /* IPC related syscalls. */ -
kernel/generic/src/sysinfo/stats.c
r0c968a17 raa7dc64 160 160 static size_t get_task_virtmem(as_t *as) 161 161 { 162 size_t result = 0; 163 162 164 /* 163 * We are holding spinlocks here and therefore are not allowed to 164 * block. Only attempt to lock the address space and address space 165 * area mutexes conditionally. If it is not possible to lock either 166 * object, return inexact statistics by skipping the respective object. 165 * We are holding some spinlocks here and therefore are not allowed to 166 * block. Only attempt to lock the address space and address space area 167 * mutexes conditionally. If it is not possible to lock either object, 168 * allow the statistics to be inexact by skipping the respective object. 169 * 170 * Note that it may be infinitely better to let the address space 171 * management code compute these statistics as it proceeds instead of 172 * having them calculated over and over again here. 167 173 */ 168 174 169 175 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 170 return 0; 171 172 size_t pages = 0; 176 return result * PAGE_SIZE; 173 177 174 178 /* Walk the B+ tree and count pages */ 179 link_t *cur; 180 for (cur = as->as_area_btree.leaf_head.next; 181 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 182 btree_node_t *node = 183 list_get_instance(cur, btree_node_t, leaf_link); 184 185 unsigned int i; 186 for (i = 0; i < node->keys; i++) { 187 as_area_t *area = node->value[i]; 188 189 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 190 continue; 191 result += area->pages; 192 mutex_unlock(&area->lock); 193 } 194 } 195 196 mutex_unlock(&as->lock); 197 198 return result * PAGE_SIZE; 199 } 200 201 /** Get the resident (used) size of a virtual address space 202 * 203 * @param as Address space. 204 * 205 * @return Size of the resident (used) virtual address space (bytes). 206 * 207 */ 208 static size_t get_task_resmem(as_t *as) 209 { 210 size_t result = 0; 211 212 /* 213 * We are holding some spinlocks here and therefore are not allowed to 214 * block. Only attempt to lock the address space and address space area 215 * mutexes conditionally. If it is not possible to lock either object, 216 * allow the statistics to be inexact by skipping the respective object. 217 * 218 * Note that it may be infinitely better to let the address space 219 * management code compute these statistics as it proceeds instead of 220 * having them calculated over and over again here. 221 */ 222 223 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 224 return result * PAGE_SIZE; 225 226 /* Walk the B+ tree of AS areas */ 175 227 link_t *cur; 176 228 for (cur = as->as_area_btree.leaf_head.next; … … 186 238 continue; 187 239 188 pages += area->pages; 240 /* Walk the B+ tree of resident pages */ 241 link_t *rcur; 242 for (rcur = area->used_space.leaf_head.next; 243 rcur != &area->used_space.leaf_head; rcur = rcur->next) { 244 btree_node_t *rnode = 245 list_get_instance(rcur, btree_node_t, leaf_link); 246 247 unsigned int j; 248 for (j = 0; j < rnode->keys; j++) 249 result += (size_t) rnode->value[i]; 250 } 251 189 252 mutex_unlock(&area->lock); 190 253 } … … 193 256 mutex_unlock(&as->lock); 194 257 195 return (pages << PAGE_WIDTH); 196 } 197 198 /** Get the resident (used) size of a virtual address space 199 * 200 * @param as Address space. 201 * 202 * @return Size of the resident (used) virtual address space (bytes). 203 * 204 */ 205 static size_t get_task_resmem(as_t *as) 206 { 207 /* 208 * We are holding spinlocks here and therefore are not allowed to 209 * block. Only attempt to lock the address space and address space 210 * area mutexes conditionally. If it is not possible to lock either 211 * object, return inexact statistics by skipping the respective object. 212 */ 213 214 if (SYNCH_FAILED(mutex_trylock(&as->lock))) 215 return 0; 216 217 size_t pages = 0; 218 219 /* Walk the B+ tree and count pages */ 220 link_t *cur; 221 for (cur = as->as_area_btree.leaf_head.next; 222 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 223 btree_node_t *node = 224 list_get_instance(cur, btree_node_t, leaf_link); 225 226 unsigned int i; 227 for (i = 0; i < node->keys; i++) { 228 as_area_t *area = node->value[i]; 229 230 if (SYNCH_FAILED(mutex_trylock(&area->lock))) 231 continue; 232 233 pages += area->resident; 234 mutex_unlock(&area->lock); 235 } 236 } 237 238 mutex_unlock(&as->lock); 239 240 return (pages << PAGE_WIDTH); 258 return result * PAGE_SIZE; 241 259 } 242 260 -
uspace/app/taskdump/taskdump.c
r0c968a17 raa7dc64 406 406 } 407 407 408 rc = asprintf(&file_name, "/drv/%s/%s", app_name, app_name);409 if (rc < 0) {410 printf("Memory allocation failure.\n");411 exit(1);412 }413 414 rc = symtab_load(file_name, &app_symtab);415 if (rc == EOK) {416 printf("Loaded symbol table from %s\n", file_name);417 free(file_name);418 return;419 }420 421 408 free(file_name); 422 409 printf("Failed autoloading symbol table.\n"); -
uspace/lib/block/libblock.c
r0c968a17 raa7dc64 294 294 295 295 /* Allow 1:1 or small-to-large block size translation */ 296 if (cache->lblock_size % devcon->pblock_size != 0) { 297 free(cache); 296 if (cache->lblock_size % devcon->pblock_size != 0) 298 297 return ENOTSUP; 299 }300 298 301 299 cache->blocks_cluster = cache->lblock_size / devcon->pblock_size; … … 438 436 if (!b->data) { 439 437 free(b); 440 b = NULL;441 438 goto recycle; 442 439 } … … 566 563 assert(devcon); 567 564 assert(devcon->cache); 568 assert(block->refcnt >= 1);569 565 570 566 cache = devcon->cache; … … 626 622 unsigned long key = block->lba; 627 623 hash_table_remove(&cache->block_hash, &key, 1); 624 free(block); 628 625 free(block->data); 629 free(block);630 626 cache->blocks_cached--; 631 627 fibril_mutex_unlock(&cache->lock); -
uspace/lib/c/arch/abs32le/_link.ld.in
r0c968a17 raa7dc64 44 44 } :data 45 45 46 . = ALIGN(0x1000); 47 48 _heap = .; 49 46 50 /DISCARD/ : { 47 51 *(*); -
uspace/lib/c/arch/amd64/_link.ld.in
r0c968a17 raa7dc64 42 42 } :data 43 43 44 . = ALIGN(0x1000); 45 _heap = .; 46 44 47 #ifdef CONFIG_LINE_DEBUG 45 48 .comment 0 : { *(.comment); } :debug … … 58 61 *(*); 59 62 } 63 60 64 } -
uspace/lib/c/arch/arm32/_link.ld.in
r0c968a17 raa7dc64 9 9 SECTIONS { 10 10 . = 0x1000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 } :text 15 14 } : text 16 15 .text : { 17 16 *(.text); 18 17 *(.rodata*); 19 18 } :text 20 19 21 20 . = . + 0x1000; 22 21 23 22 .data : { 24 23 *(.opd); … … 26 25 *(.sdata); 27 26 } :data 28 29 27 .tdata : { 30 28 _tdata_start = .; … … 35 33 _tbss_end = .; 36 34 } :data 37 38 35 _tls_alignment = ALIGNOF(.tdata); 39 40 36 .bss : { 41 37 *(.sbss); 42 38 *(.scommon); 43 44 39 *(COMMON); 40 *(.bss); 45 41 } :data 42 43 . = ALIGN(0x1000); 44 _heap = .; 46 45 47 46 /DISCARD/ : { 48 47 *(*); 49 48 } 49 50 50 } -
uspace/lib/c/arch/ia32/_link.ld.in
r0c968a17 raa7dc64 43 43 } :data 44 44 45 . = ALIGN(0x1000); 46 _heap = .; 47 45 48 #ifdef CONFIG_LINE_DEBUG 46 49 .comment 0 : { *(.comment); } :debug -
uspace/lib/c/arch/ia64/_link.ld.in
r0c968a17 raa7dc64 9 9 SECTIONS { 10 10 . = 0x4000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 } :text 15 14 } : text 16 15 .text : { 17 16 *(.text); 18 17 *(.rodata*); 19 18 } :text 20 19 21 20 . = . + 0x4000; 22 21 23 22 .got : { 24 23 _gp = .; 25 24 *(.got*); 26 } :data 27 25 } :data 28 26 .data : { 29 27 *(.opd); … … 31 29 *(.sdata); 32 30 } :data 33 34 31 .tdata : { 35 32 _tdata_start = .; … … 40 37 _tbss_end = .; 41 38 } :data 42 43 39 _tls_alignment = ALIGNOF(.tdata); 44 45 40 .bss : { 46 41 *(.sbss); … … 49 44 *(.bss); 50 45 } :data 51 46 47 . = ALIGN(0x4000); 48 _heap = .; 49 52 50 /DISCARD/ : { 53 51 *(*); 54 52 } 55 53 } -
uspace/lib/c/arch/mips32/_link.ld.in
r0c968a17 raa7dc64 13 13 *(.init); 14 14 } :text 15 16 15 .text : { 17 16 *(.text); 18 17 *(.rodata*); 19 18 } :text 20 19 21 20 . = . + 0x4000; 22 21 23 22 .data : { 24 23 *(.data); 25 24 *(.data.rel*); 26 25 } :data 27 26 28 27 .got : { 29 28 _gp = .; 30 29 *(.got); 31 30 } :data 32 31 33 32 .tdata : { 34 33 _tdata_start = .; 35 34 *(.tdata); 36 35 _tdata_end = .; 37 } :data38 39 .tbss : {40 36 _tbss_start = .; 41 37 *(.tbss); 42 38 _tbss_end = .; 43 39 } :data 44 45 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 46 40 _tls_alignment = ALIGNOF(.tdata); 41 47 42 .sbss : { 48 43 *(.scommon); 49 44 *(.sbss); 50 } 51 45 } 52 46 .bss : { 53 47 *(.bss); 54 48 *(COMMON); 55 49 } :data 56 50 51 . = ALIGN(0x4000); 52 _heap = .; 53 57 54 /DISCARD/ : { 58 55 *(*); -
uspace/lib/c/arch/mips32/src/entry.s
r0c968a17 raa7dc64 29 29 .text 30 30 .section .init, "ax" 31 32 31 .global __entry 33 32 .global __entry_driver 34 33 .set noreorder 35 34 .option pic2 … … 58 57 nop 59 58 .end 59 60 # Alignment of output section data to 0x4000 61 .section .data 62 .align 14 -
uspace/lib/c/arch/ppc32/_link.ld.in
r0c968a17 raa7dc64 9 9 SECTIONS { 10 10 . = 0x1000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 14 } :text 15 16 15 .text : { 17 16 *(.text); 18 17 *(.rodata*); 19 18 } :text 20 19 21 20 . = . + 0x1000; 22 21 23 22 .data : { 24 23 *(.data); 25 24 *(.sdata); 26 25 } :data 27 28 26 .tdata : { 29 27 _tdata_start = .; … … 34 32 _tbss_end = .; 35 33 } :data 36 37 34 _tls_alignment = ALIGNOF(.tdata); 38 39 35 .bss : { 40 36 *(.sbss); … … 42 38 *(.bss); 43 39 } :data 40 41 . = ALIGN(0x1000); 42 _heap = .; 44 43 45 44 /DISCARD/ : { 46 45 *(*); 47 46 } 47 48 48 } -
uspace/lib/c/arch/sparc64/_link.ld.in
r0c968a17 raa7dc64 9 9 SECTIONS { 10 10 . = 0x4000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 14 } :text 15 16 15 .text : { 17 16 *(.text); 18 17 *(.rodata*); 19 18 } :text 20 19 21 20 . = . + 0x4000; 22 21 23 22 .got : { 24 23 _gp = .; 25 24 *(.got*); 26 25 } :data 27 28 26 .data : { 29 27 *(.data); 30 28 *(.sdata); 31 29 } :data 32 33 30 .tdata : { 34 31 _tdata_start = .; … … 39 36 _tbss_end = .; 40 37 } :data 41 42 38 _tls_alignment = ALIGNOF(.tdata); 43 44 39 .bss : { 45 40 *(.sbss); … … 47 42 *(.bss); 48 43 } :data 44 45 . = ALIGN(0x4000); 46 _heap = .; 49 47 50 48 /DISCARD/ : { 51 49 *(*); 52 50 } 51 53 52 } -
uspace/lib/c/generic/as.c
r0c968a17 raa7dc64 40 40 #include <bitops.h> 41 41 #include <malloc.h> 42 #include "private/libc.h" 42 43 /** Last position allocated by as_get_mappable_page */ 44 static uintptr_t last_allocated = 0; 43 45 44 46 /** Create address space area. … … 101 103 } 102 104 103 /** Return pointer to unmapped address spacearea105 /** Return pointer to some unmapped area, where fits new as_area 104 106 * 105 107 * @param size Requested size of the allocation. 106 108 * 107 * @return Pointer to the beginning of unmapped address space area.109 * @return pointer to the beginning 108 110 * 109 111 */ 110 112 void *as_get_mappable_page(size_t size) 111 113 { 112 return (void *) __SYSCALL2(SYS_AS_GET_UNMAPPED_AREA, 113 (sysarg_t) __entry, (sysarg_t) size); 114 if (size == 0) 115 return NULL; 116 117 size_t sz = 1 << (fnzb(size - 1) + 1); 118 if (last_allocated == 0) 119 last_allocated = get_max_heap_addr(); 120 121 /* 122 * Make sure we allocate from naturally aligned address. 123 */ 124 uintptr_t res = ALIGN_UP(last_allocated, sz); 125 last_allocated = res + ALIGN_UP(size, PAGE_SIZE); 126 127 return ((void *) res); 114 128 } 115 129 -
uspace/lib/c/generic/async.c
r0c968a17 raa7dc64 294 294 } 295 295 296 /** Connection hash table removal callback function. 297 * 298 * This function is called whenever a connection is removed from the connection 299 * hash table. 300 * 301 * @param item Connection hash table item being removed. 302 * 303 */ 296 304 static void conn_remove(link_t *item) 297 305 { 306 free(hash_table_get_instance(item, connection_t, link)); 298 307 } 299 308 … … 638 647 ipc_answer_0(FIBRIL_connection->close_callid, EOK); 639 648 640 free(FIBRIL_connection);641 649 return 0; 642 650 } -
uspace/lib/c/generic/malloc.c
r0c968a17 raa7dc64 47 47 #include "private/malloc.h" 48 48 49 /** Magic used in heap headers. */ 50 #define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101) 51 52 /** Magic used in heap footers. */ 53 #define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202) 54 55 /** Magic used in heap descriptor. */ 56 #define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE) 57 58 /** Allocation alignment. 59 * 60 * This also covers the alignment of fields 61 * in the heap header and footer. 62 * 63 */ 49 /* Magic used in heap headers. */ 50 #define HEAP_BLOCK_HEAD_MAGIC 0xBEEF0101 51 52 /* Magic used in heap footers. */ 53 #define HEAP_BLOCK_FOOT_MAGIC 0xBEEF0202 54 55 /** Allocation alignment (this also covers the alignment of fields 56 in the heap header and footer) */ 64 57 #define BASE_ALIGN 16 65 58 66 /** Overhead of each heap block. */ 67 #define STRUCT_OVERHEAD \ 68 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 69 70 /** Calculate real size of a heap block. 71 * 72 * Add header and footer size. 73 * 59 /** 60 * Either 4 * 256M on 32-bit architecures or 16 * 256M on 64-bit architectures 61 */ 62 #define MAX_HEAP_SIZE (sizeof(uintptr_t) << 28) 63 64 /** 65 * 66 */ 67 #define STRUCT_OVERHEAD (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 68 69 /** 70 * Calculate real size of a heap block (with header and footer) 74 71 */ 75 72 #define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD) 76 73 77 /** Calculate net size of a heap block. 78 * 79 * Subtract header and footer size. 80 * 74 /** 75 * Calculate net size of a heap block (without header and footer) 81 76 */ 82 77 #define NET_SIZE(size) ((size) - STRUCT_OVERHEAD) 83 84 /** Get first block in heap area.85 *86 */87 #define AREA_FIRST_BLOCK(area) \88 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))89 90 /** Get footer in heap block.91 *92 */93 #define BLOCK_FOOT(head) \94 ((heap_block_foot_t *) \95 (((uintptr_t) head) + head->size - sizeof(heap_block_foot_t)))96 97 /** Heap area.98 *99 * The memory managed by the heap allocator is divided into100 * multiple discontinuous heaps. Each heap is represented101 * by a separate address space area which has this structure102 * at its very beginning.103 *104 */105 typedef struct heap_area {106 /** Start of the heap area (including this structure)107 *108 * Aligned on page boundary.109 *110 */111 void *start;112 113 /** End of the heap area (aligned on page boundary) */114 void *end;115 116 /** Next heap area */117 struct heap_area *next;118 119 /** A magic value */120 uint32_t magic;121 } heap_area_t;122 78 123 79 /** Header of a heap block … … 131 87 bool free; 132 88 133 /** Heap area this block belongs to */134 heap_area_t *area;135 136 89 /* A magic value to detect overwrite of heap header */ 137 90 uint32_t magic; … … 149 102 } heap_block_foot_t; 150 103 151 /** First heap area */ 152 static heap_area_t *first_heap_area = NULL; 153 154 /** Last heap area */ 155 static heap_area_t *last_heap_area = NULL; 156 157 /** Next heap block to examine (next fit algorithm) */ 158 static heap_block_head_t *next = NULL; 104 /** Linker heap symbol */ 105 extern char _heap; 159 106 160 107 /** Futex for thread-safe heap manipulation */ 161 108 static futex_t malloc_futex = FUTEX_INITIALIZER; 162 109 110 /** Address of heap start */ 111 static void *heap_start = 0; 112 113 /** Address of heap end */ 114 static void *heap_end = 0; 115 116 /** Maximum heap size */ 117 static size_t max_heap_size = (size_t) -1; 118 119 /** Current number of pages of heap area */ 120 static size_t heap_pages = 0; 121 163 122 /** Initialize a heap block 164 123 * 165 * Fill in the structures related to a heap block.124 * Fills in the structures related to a heap block. 166 125 * Should be called only inside the critical section. 167 126 * … … 169 128 * @param size Size of the block including the header and the footer. 170 129 * @param free Indication of a free block. 171 * @param area Heap area the block belongs to. 172 * 173 */ 174 static void block_init(void *addr, size_t size, bool free, heap_area_t *area) 130 * 131 */ 132 static void block_init(void *addr, size_t size, bool free) 175 133 { 176 134 /* Calculate the position of the header and the footer */ 177 135 heap_block_head_t *head = (heap_block_head_t *) addr; 136 heap_block_foot_t *foot = 137 (heap_block_foot_t *) (addr + size - sizeof(heap_block_foot_t)); 178 138 179 139 head->size = size; 180 140 head->free = free; 181 head->area = area;182 141 head->magic = HEAP_BLOCK_HEAD_MAGIC; 183 184 heap_block_foot_t *foot = BLOCK_FOOT(head);185 142 186 143 foot->size = size; … … 203 160 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC); 204 161 205 heap_block_foot_t *foot = BLOCK_FOOT(head); 162 heap_block_foot_t *foot = 163 (heap_block_foot_t *) (addr + head->size - sizeof(heap_block_foot_t)); 206 164 207 165 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC); … … 209 167 } 210 168 211 /** Check a heap area structure 212 * 213 * @param addr Address of the heap area. 214 * 215 */ 216 static void area_check(void *addr) 217 { 218 heap_area_t *area = (heap_area_t *) addr; 219 220 assert(area->magic == HEAP_AREA_MAGIC); 221 assert(area->start < area->end); 222 assert(((uintptr_t) area->start % PAGE_SIZE) == 0); 223 assert(((uintptr_t) area->end % PAGE_SIZE) == 0); 224 } 225 226 /** Create new heap area 227 * 228 * @param start Preffered starting address of the new area. 229 * @param size Size of the area. 230 * 231 */ 232 static bool area_create(size_t size) 233 { 234 void *start = as_get_mappable_page(size); 235 if (start == NULL) 169 /** Increase the heap area size 170 * 171 * Should be called only inside the critical section. 172 * 173 * @param size Number of bytes to grow the heap by. 174 * 175 */ 176 static bool grow_heap(size_t size) 177 { 178 if (size == 0) 236 179 return false; 237 238 /* Align the heap area on page boundary */ 239 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE); 240 size_t asize = ALIGN_UP(size, PAGE_SIZE); 241 242 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ); 243 if (astart == (void *) -1) 180 181 if ((heap_start + size < heap_start) || (heap_end + size < heap_end)) 244 182 return false; 245 183 246 heap_area_t *area = (heap_area_t *) astart; 247 248 area->start = astart; 249 area->end = (void *) 250 ALIGN_DOWN((uintptr_t) astart + asize, BASE_ALIGN); 251 area->next = NULL; 252 area->magic = HEAP_AREA_MAGIC; 253 254 void *block = (void *) AREA_FIRST_BLOCK(area); 255 size_t bsize = (size_t) (area->end - block); 256 257 block_init(block, bsize, true, area); 258 259 if (last_heap_area == NULL) { 260 first_heap_area = area; 261 last_heap_area = area; 262 } else { 263 last_heap_area->next = area; 264 last_heap_area = area; 265 } 266 267 return true; 268 } 269 270 /** Try to enlarge a heap area 271 * 272 * @param area Heap area to grow. 273 * @param size Gross size of item to allocate (bytes). 274 * 275 */ 276 static bool area_grow(heap_area_t *area, size_t size) 277 { 278 if (size == 0) 184 size_t heap_size = (size_t) (heap_end - heap_start); 185 186 if ((max_heap_size != (size_t) -1) && (heap_size + size > max_heap_size)) 187 return false; 188 189 size_t pages = (size - 1) / PAGE_SIZE + 1; 190 191 if (as_area_resize((void *) &_heap, (heap_pages + pages) * PAGE_SIZE, 0) 192 == EOK) { 193 void *end = (void *) ALIGN_DOWN(((uintptr_t) &_heap) + 194 (heap_pages + pages) * PAGE_SIZE, BASE_ALIGN); 195 block_init(heap_end, end - heap_end, true); 196 heap_pages += pages; 197 heap_end = end; 279 198 return true; 280 281 area_check(area); 282 283 size_t asize = ALIGN_UP((size_t) (area->end - area->start) + size, 284 PAGE_SIZE); 285 286 /* New heap area size */ 287 void *end = (void *) 288 ALIGN_DOWN((uintptr_t) area->start + asize, BASE_ALIGN); 289 290 /* Check for overflow */ 291 if (end < area->start) 292 return false; 293 294 /* Resize the address space area */ 295 int ret = as_area_resize(area->start, asize, 0); 296 if (ret != EOK) 297 return false; 298 299 /* Add new free block */ 300 block_init(area->end, (size_t) (end - area->end), true, area); 301 302 /* Update heap area parameters */ 303 area->end = end; 304 305 return true; 306 } 307 308 /** Try to enlarge any of the heap areas 309 * 310 * @param size Gross size of item to allocate (bytes). 311 * 312 */ 313 static bool heap_grow(size_t size) 314 { 315 if (size == 0) 316 return true; 317 318 /* First try to enlarge some existing area */ 319 heap_area_t *area; 320 for (area = first_heap_area; area != NULL; area = area->next) { 321 if (area_grow(area, size)) 322 return true; 323 } 324 325 /* Eventually try to create a new area */ 326 return area_create(AREA_FIRST_BLOCK(size)); 327 } 328 329 /** Try to shrink heap space 330 * 331 * In all cases the next pointer is reset. 332 * 333 */ 334 static void heap_shrink(void) 335 { 336 next = NULL; 199 } 200 201 return false; 202 } 203 204 /** Decrease the heap area 205 * 206 * Should be called only inside the critical section. 207 * 208 * @param size Number of bytes to shrink the heap by. 209 * 210 */ 211 static void shrink_heap(void) 212 { 213 // TODO 337 214 } 338 215 … … 346 223 void __malloc_init(void) 347 224 { 348 if (!area_create(PAGE_SIZE)) 225 if (!as_area_create((void *) &_heap, PAGE_SIZE, 226 AS_AREA_WRITE | AS_AREA_READ)) 349 227 abort(); 228 229 heap_pages = 1; 230 heap_start = (void *) ALIGN_UP((uintptr_t) &_heap, BASE_ALIGN); 231 heap_end = 232 (void *) ALIGN_DOWN(((uintptr_t) &_heap) + PAGE_SIZE, BASE_ALIGN); 233 234 /* Make the entire area one large block. */ 235 block_init(heap_start, heap_end - heap_start, true); 236 } 237 238 /** Get maximum heap address 239 * 240 */ 241 uintptr_t get_max_heap_addr(void) 242 { 243 futex_down(&malloc_futex); 244 245 if (max_heap_size == (size_t) -1) 246 max_heap_size = 247 max((size_t) (heap_end - heap_start), MAX_HEAP_SIZE); 248 249 uintptr_t max_heap_addr = (uintptr_t) heap_start + max_heap_size; 250 251 futex_up(&malloc_futex); 252 253 return max_heap_addr; 350 254 } 351 255 … … 369 273 /* Block big enough -> split. */ 370 274 void *next = ((void *) cur) + size; 371 block_init(next, cur->size - size, true , cur->area);372 block_init(cur, size, false , cur->area);275 block_init(next, cur->size - size, true); 276 block_init(cur, size, false); 373 277 } else { 374 278 /* Block too small -> use as is. */ … … 377 281 } 378 282 379 /** Allocate memory from heap area starting from givenblock283 /** Allocate a memory block 380 284 * 381 285 * Should be called only inside the critical section. 382 * As a side effect this function also sets the current383 * pointer on successful allocation.384 * 385 * @param area Heap area where to allocate from.386 * @ param first_block Starting heap block.387 * @param final_block Heap block where to finish the search388 * (may be NULL).389 * @param real_size Gross number of bytes to allocate. 390 * @param falign Physical alignment of the block. 391 * 392 * @return Address of the allocated block or NULL on not enough memory. 393 * 394 */ 395 static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block, 396 heap_block_head_t *final_block, size_t real_size, size_t falign) 397 { 398 area_check((void *) area);399 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK(area));400 assert((void *) first_block < area->end); 401 402 heap_block_head_t *cur ;403 for (cur = first_block; (void *) cur < area->end;404 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {286 * 287 * @param size The size of the block to allocate. 288 * @param align Memory address alignment. 289 * 290 * @return the address of the block or NULL when not enough memory. 291 * 292 */ 293 static void *malloc_internal(const size_t size, const size_t align) 294 { 295 if (align == 0) 296 return NULL; 297 298 size_t falign = lcm(align, BASE_ALIGN); 299 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign)); 300 301 bool grown = false; 302 void *result; 303 304 loop: 305 result = NULL; 306 heap_block_head_t *cur = (heap_block_head_t *) heap_start; 307 308 while ((result == NULL) && ((void *) cur < heap_end)) { 405 309 block_check(cur); 406 407 /* Finish searching on the final block */408 if ((final_block != NULL) && (cur == final_block))409 break;410 310 411 311 /* Try to find a block that is free and large enough. */ 412 312 if ((cur->free) && (cur->size >= real_size)) { 413 /* 414 * We have found a suitable block. 415 * Check for alignment properties. 416 */ 417 void *addr = (void *) 418 ((uintptr_t) cur + sizeof(heap_block_head_t)); 419 void *aligned = (void *) 420 ALIGN_UP((uintptr_t) addr, falign); 313 /* We have found a suitable block. 314 Check for alignment properties. */ 315 void *addr = ((void *) cur) + sizeof(heap_block_head_t); 316 void *aligned = (void *) ALIGN_UP(addr, falign); 421 317 422 318 if (addr == aligned) { 423 319 /* Exact block start including alignment. */ 424 320 split_mark(cur, real_size); 425 426 next = cur; 427 return addr; 321 result = addr; 428 322 } else { 429 323 /* Block start has to be aligned */ … … 431 325 432 326 if (cur->size >= real_size + excess) { 433 /* 434 * The current block is large enough to fit 435 * data in (including alignment). 436 */ 437 if ((void *) cur > (void *) AREA_FIRST_BLOCK(area)) { 438 /* 439 * There is a block before the current block. 440 * This previous block can be enlarged to 441 * compensate for the alignment excess. 442 */ 443 heap_block_foot_t *prev_foot = (heap_block_foot_t *) 444 ((void *) cur - sizeof(heap_block_foot_t)); 327 /* The current block is large enough to fit 328 data in including alignment */ 329 if ((void *) cur > heap_start) { 330 /* There is a block before the current block. 331 This previous block can be enlarged to compensate 332 for the alignment excess */ 333 heap_block_foot_t *prev_foot = 334 ((void *) cur) - sizeof(heap_block_foot_t); 445 335 446 heap_block_head_t *prev_head = (heap_block_head_t *)447 ( (void *) cur- prev_foot->size);336 heap_block_head_t *prev_head = 337 (heap_block_head_t *) (((void *) cur) - prev_foot->size); 448 338 449 339 block_check(prev_head); … … 452 342 heap_block_head_t *next_head = ((void *) cur) + excess; 453 343 454 if ((!prev_head->free) && 455 (excess >= STRUCT_OVERHEAD)) { 456 /* 457 * The previous block is not free and there 458 * is enough free space left to fill in 459 * a new free block between the previous 460 * and current block. 461 */ 462 block_init(cur, excess, true, area); 344 if ((!prev_head->free) && (excess >= STRUCT_OVERHEAD)) { 345 /* The previous block is not free and there is enough 346 space to fill in a new free block between the previous 347 and current block */ 348 block_init(cur, excess, true); 463 349 } else { 464 /* 465 * The previous block is free (thus there 466 * is no need to induce additional 467 * fragmentation to the heap) or the 468 * excess is small. Therefore just enlarge 469 * the previous block. 470 */ 471 block_init(prev_head, prev_head->size + excess, 472 prev_head->free, area); 350 /* The previous block is free (thus there is no need to 351 induce additional fragmentation to the heap) or the 352 excess is small, thus just enlarge the previous block */ 353 block_init(prev_head, prev_head->size + excess, prev_head->free); 473 354 } 474 355 475 block_init(next_head, reduced_size, true , area);356 block_init(next_head, reduced_size, true); 476 357 split_mark(next_head, real_size); 477 478 next = next_head; 479 return aligned; 358 result = aligned; 359 cur = next_head; 480 360 } else { 481 /* 482 * The current block is the first block 483 * in the heap area. We have to make sure 484 * that the alignment excess is large enough 485 * to fit a new free block just before the 486 * current block. 487 */ 361 /* The current block is the first block on the heap. 362 We have to make sure that the alignment excess 363 is large enough to fit a new free block just 364 before the current block */ 488 365 while (excess < STRUCT_OVERHEAD) { 489 366 aligned += falign; … … 494 371 if (cur->size >= real_size + excess) { 495 372 size_t reduced_size = cur->size - excess; 496 cur = (heap_block_head_t *) 497 (AREA_FIRST_BLOCK(area) + excess); 373 cur = (heap_block_head_t *) (heap_start + excess); 498 374 499 block_init((void *) AREA_FIRST_BLOCK(area), excess, 500 true, area); 501 block_init(cur, reduced_size, true, area); 375 block_init(heap_start, excess, true); 376 block_init(cur, reduced_size, true); 502 377 split_mark(cur, real_size); 503 504 next = cur; 505 return aligned; 378 result = aligned; 506 379 } 507 380 } … … 509 382 } 510 383 } 511 } 512 513 return NULL; 514 } 515 516 /** Allocate a memory block 517 * 518 * Should be called only inside the critical section. 519 * 520 * @param size The size of the block to allocate. 521 * @param align Memory address alignment. 522 * 523 * @return Address of the allocated block or NULL on not enough memory. 524 * 525 */ 526 static void *malloc_internal(const size_t size, const size_t align) 527 { 528 assert(first_heap_area != NULL); 529 530 if (align == 0) 531 return NULL; 532 533 size_t falign = lcm(align, BASE_ALIGN); 534 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign)); 535 536 bool retry = false; 537 heap_block_head_t *split; 538 539 loop: 540 541 /* Try the next fit approach */ 542 split = next; 543 544 if (split != NULL) { 545 void *addr = malloc_area(split->area, split, NULL, real_size, 546 falign); 547 548 if (addr != NULL) 549 return addr; 550 } 551 552 /* Search the entire heap */ 553 heap_area_t *area; 554 for (area = first_heap_area; area != NULL; area = area->next) { 555 heap_block_head_t *first = (heap_block_head_t *) 556 AREA_FIRST_BLOCK(area); 557 558 void *addr = malloc_area(area, first, split, real_size, 559 falign); 560 561 if (addr != NULL) 562 return addr; 563 } 564 565 if (!retry) { 566 /* Try to grow the heap space */ 567 if (heap_grow(real_size)) { 568 retry = true; 384 385 /* Advance to the next block. */ 386 cur = (heap_block_head_t *) (((void *) cur) + cur->size); 387 } 388 389 if ((result == NULL) && (!grown)) { 390 if (grow_heap(real_size)) { 391 grown = true; 569 392 goto loop; 570 393 } 571 394 } 572 395 573 return NULL;396 return result; 574 397 } 575 398 … … 650 473 (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); 651 474 475 assert((void *) head >= heap_start); 476 assert((void *) head < heap_end); 477 652 478 block_check(head); 653 479 assert(!head->free); 654 655 heap_area_t *area = head->area;656 657 area_check(area);658 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));659 assert((void *) head < area->end);660 480 661 481 void *ptr = NULL; … … 667 487 /* Shrink */ 668 488 if (orig_size - real_size >= STRUCT_OVERHEAD) { 669 /* 670 * Split the original block to a full block 671 * and a trailing free block. 672 */ 673 block_init((void *) head, real_size, false, area); 489 /* Split the original block to a full block 490 and a trailing free block */ 491 block_init((void *) head, real_size, false); 674 492 block_init((void *) head + real_size, 675 orig_size - real_size, true , area);676 heap_shrink();493 orig_size - real_size, true); 494 shrink_heap(); 677 495 } 678 496 679 497 ptr = ((void *) head) + sizeof(heap_block_head_t); 680 498 } else { 681 /* 682 * Look at the next block. If it is free and the size is 683 * sufficient then merge the two. Otherwise just allocate 684 * a new block, copy the original data into it and 685 * free the original block. 686 */ 499 /* Look at the next block. If it is free and the size is 500 sufficient then merge the two. Otherwise just allocate 501 a new block, copy the original data into it and 502 free the original block. */ 687 503 heap_block_head_t *next_head = 688 504 (heap_block_head_t *) (((void *) head) + head->size); 689 505 690 if (((void *) next_head < area->end) &&506 if (((void *) next_head < heap_end) && 691 507 (head->size + next_head->size >= real_size) && 692 508 (next_head->free)) { 693 509 block_check(next_head); 694 block_init(head, head->size + next_head->size, false , area);510 block_init(head, head->size + next_head->size, false); 695 511 split_mark(head, real_size); 696 512 697 513 ptr = ((void *) head) + sizeof(heap_block_head_t); 698 next = NULL;699 514 } else 700 515 reloc = true; … … 727 542 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); 728 543 544 assert((void *) head >= heap_start); 545 assert((void *) head < heap_end); 546 729 547 block_check(head); 730 548 assert(!head->free); 731 732 heap_area_t *area = head->area;733 734 area_check(area);735 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));736 assert((void *) head < area->end);737 549 738 550 /* Mark the block itself as free. */ … … 743 555 = (heap_block_head_t *) (((void *) head) + head->size); 744 556 745 if ((void *) next_head < area->end) {557 if ((void *) next_head < heap_end) { 746 558 block_check(next_head); 747 559 if (next_head->free) 748 block_init(head, head->size + next_head->size, true , area);560 block_init(head, head->size + next_head->size, true); 749 561 } 750 562 751 563 /* Look at the previous block. If it is free, merge the two. */ 752 if ((void *) head > (void *) AREA_FIRST_BLOCK(area)) {564 if ((void *) head > heap_start) { 753 565 heap_block_foot_t *prev_foot = 754 566 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t)); … … 760 572 761 573 if (prev_head->free) 762 block_init(prev_head, prev_head->size + head->size, true, 763 area); 764 } 765 766 heap_shrink(); 574 block_init(prev_head, prev_head->size + head->size, true); 575 } 576 577 shrink_heap(); 767 578 768 579 futex_up(&malloc_futex); -
uspace/lib/c/generic/private/libc.h
r0c968a17 raa7dc64 36 36 #define LIBC_PRIVATE_LIBC_H_ 37 37 38 extern void __entry(void);38 extern int main(int, char *[]); 39 39 extern void __main(void *) __attribute__((noreturn)); 40 extern int main(int, char *[]);41 40 42 41 #endif -
uspace/lib/c/include/as.h
r0c968a17 raa7dc64 41 41 #include <libarch/config.h> 42 42 43 static inline size_t SIZE2PAGES(size_t size)44 {45 if (size == 0)46 return 0;47 48 return (size_t) ((size - 1) >> PAGE_WIDTH) + 1;49 }50 51 static inline size_t PAGES2SIZE(size_t pages)52 {53 return (size_t) (pages << PAGE_WIDTH);54 }55 56 43 extern void *as_area_create(void *address, size_t size, int flags); 57 44 extern int as_area_resize(void *address, size_t size, int flags); -
uspace/lib/c/include/malloc.h
r0c968a17 raa7dc64 38 38 #include <sys/types.h> 39 39 40 extern uintptr_t get_max_heap_addr(void); 41 40 42 extern void *malloc(const size_t size) 41 43 __attribute__((malloc)); -
uspace/lib/c/include/unistd.h
r0c968a17 raa7dc64 44 44 #endif 45 45 46 #define getpagesize() (PAGE_SIZE) 47 46 48 #ifndef SEEK_SET 47 49 #define SEEK_SET 0 … … 55 57 #define SEEK_END 2 56 58 #endif 57 58 #define getpagesize() (PAGE_SIZE)59 59 60 60 extern int dup2(int oldfd, int newfd); -
uspace/srv/devman/devman.c
r0c968a17 raa7dc64 1215 1215 if (info != NULL) { 1216 1216 memset(info, 0, sizeof(dev_class_info_t)); 1217 li nk_initialize(&info->dev_classes);1218 li nk_initialize(&info->devmap_link);1219 li nk_initialize(&info->link);1217 list_initialize(&info->dev_classes); 1218 list_initialize(&info->devmap_link); 1219 list_initialize(&info->link); 1220 1220 } 1221 1221 -
uspace/srv/devmap/devmap.c
r0c968a17 raa7dc64 423 423 */ 424 424 list_initialize(&driver->devices); 425 426 link_initialize(&driver->drivers); 425 list_initialize(&(driver->drivers)); 427 426 428 427 fibril_mutex_lock(&drivers_list_mutex); … … 539 538 } 540 539 541 li nk_initialize(&device->devices);542 li nk_initialize(&device->driver_devices);540 list_initialize(&(device->devices)); 541 list_initialize(&(device->driver_devices)); 543 542 544 543 /* Check that device is not already registered */ … … 943 942 } 944 943 945 li nk_initialize(&device->devices);946 li nk_initialize(&device->driver_devices);944 list_initialize(&(device->devices)); 945 list_initialize(&(device->driver_devices)); 947 946 948 947 /* Get unique device handle */ -
uspace/srv/fs/devfs/devfs_ops.c
r0c968a17 raa7dc64 130 130 { 131 131 devfs_node_t *node = (devfs_node_t *) pfn->data; 132 int ret;133 132 134 133 if (node->handle == 0) { … … 146 145 147 146 if (str_cmp(devs[pos].name, component) == 0) { 148 ret = devfs_node_get_internal(rfn, DEV_HANDLE_NAMESPACE, devs[pos].handle);149 147 free(devs); 150 return ret;148 return devfs_node_get_internal(rfn, DEV_HANDLE_NAMESPACE, devs[pos].handle); 151 149 } 152 150 } … … 164 162 for (pos = 0; pos < count; pos++) { 165 163 if (str_cmp(devs[pos].name, component) == 0) { 166 ret = devfs_node_get_internal(rfn, DEV_HANDLE_DEVICE, devs[pos].handle);167 164 free(devs); 168 return ret;165 return devfs_node_get_internal(rfn, DEV_HANDLE_DEVICE, devs[pos].handle); 169 166 } 170 167 } … … 187 184 for (pos = 0; pos < count; pos++) { 188 185 if (str_cmp(devs[pos].name, component) == 0) { 189 ret = devfs_node_get_internal(rfn, DEV_HANDLE_DEVICE, devs[pos].handle);190 186 free(devs); 191 return ret;187 return devfs_node_get_internal(rfn, DEV_HANDLE_DEVICE, devs[pos].handle); 192 188 } 193 189 } -
uspace/srv/loader/arch/abs32le/_link.ld.in
r0c968a17 raa7dc64 3 3 * is the base address and the special interp section. 4 4 */ 5 6 5 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 7 6 ENTRY(__entry) … … 55 54 } :data 56 55 56 . = ALIGN(0x1000); 57 58 _heap = .; 59 57 60 /DISCARD/ : { 58 61 *(*); -
uspace/srv/loader/arch/amd64/_link.ld.in
r0c968a17 raa7dc64 54 54 } :data 55 55 56 . = ALIGN(0x1000); 57 _heap = .; 58 56 59 #ifdef CONFIG_LINE_DEBUG 57 60 .comment 0 : { *(.comment); } :debug -
uspace/srv/loader/arch/arm32/_link.ld.in
r0c968a17 raa7dc64 3 3 * is the base address. 4 4 */ 5 6 5 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 7 6 ENTRY(__entry) … … 17 16 *(.interp); 18 17 } : interp 19 18 20 19 . = 0x70001000; 21 20 22 21 .init ALIGN(0x1000): SUBALIGN(0x1000) { 23 22 *(.init); 24 } :text 25 23 } : text 26 24 .text : { 27 25 *(.text); 28 26 *(.rodata*); 29 27 } :text 30 28 … … 34 32 *(.sdata); 35 33 } :data 36 37 34 .tdata : { 38 35 _tdata_start = .; … … 40 37 _tdata_end = .; 41 38 } :data 42 43 39 .tbss : { 44 40 _tbss_start = .; … … 46 42 _tbss_end = .; 47 43 } :data 48 49 44 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 50 51 45 .bss : { 52 46 *(.sbss); 53 47 *(.scommon); 54 55 48 *(COMMON); 49 *(.bss); 56 50 } :data 51 52 . = ALIGN(0x1000); 53 _heap = .; 57 54 58 55 /DISCARD/ : { 59 56 *(*); 60 57 } 58 61 59 } -
uspace/srv/loader/arch/ia32/_link.ld.in
r0c968a17 raa7dc64 54 54 } :data 55 55 56 . = ALIGN(0x1000); 57 _heap = .; 58 56 59 #ifdef CONFIG_LINE_DEBUG 57 60 .comment 0 : { *(.comment); } :debug -
uspace/srv/loader/arch/ia64/_link.ld.in
r0c968a17 raa7dc64 12 12 *(.interp); 13 13 } :interp 14 14 15 15 /* On Itanium code sections must be aligned to 16 bytes. */ 16 16 . = ALIGN(0x800000000 + SIZEOF_HEADERS, 16); 17 17 18 18 .init : { 19 19 *(.init); 20 } :text 21 20 } : text 22 21 .text : { 23 22 *(.text); 24 23 *(.rodata*); 25 24 } :text 26 25 27 26 . = . + 0x4000; 28 27 29 28 .got : { 30 29 _gp = .; 31 30 *(.got*); 32 } :data 33 31 } :data 34 32 .data : { 35 33 *(.opd); … … 37 35 *(.sdata); 38 36 } :data 39 40 37 .tdata : { 41 38 _tdata_start = .; … … 43 40 _tdata_end = .; 44 41 } :data 45 46 42 .tbss : { 47 43 _tbss_start = .; … … 49 45 _tbss_end = .; 50 46 } :data 51 52 47 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 53 54 48 .bss : { 55 49 *(.sbss); … … 58 52 *(.bss); 59 53 } :data 60 54 55 . = ALIGN(0x4000); 56 _heap = .; 57 61 58 /DISCARD/ : { 62 59 *(*); 63 60 } 64 61 } -
uspace/srv/loader/arch/mips32/_link.ld.in
r0c968a17 raa7dc64 3 3 * is the base address. 4 4 */ 5 6 5 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 7 6 ENTRY(__entry) … … 17 16 *(.interp); 18 17 } :interp 19 18 20 19 . = 0x70004000; 21 20 … … 23 22 *(.init); 24 23 } :text 25 26 24 .text : { 27 25 *(.text); 28 26 *(.rodata*); 29 27 } :text 30 31 . = . + 0x4000; 32 28 33 29 .data : { 34 30 *(.data); 35 31 *(.data.rel*); 36 32 } :data 37 33 38 34 .got : { 39 35 _gp = .; 40 36 *(.got); 41 37 } :data 42 38 43 39 .tdata : { 44 40 _tdata_start = .; … … 46 42 _tdata_end = .; 47 43 } :data 48 49 44 .tbss : { 50 45 _tbss_start = .; … … 52 47 _tbss_end = .; 53 48 } :data 54 55 49 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 56 50 57 51 .sbss : { 58 52 *(.scommon); 59 53 *(.sbss); 60 } 61 54 } 62 55 .bss : { 63 56 *(.bss); 64 57 *(COMMON); 65 58 } :data 66 59 60 . = ALIGN(0x4000); 61 _heap = .; 62 67 63 /DISCARD/ : { 68 64 *(*); -
uspace/srv/loader/arch/ppc32/_link.ld.in
r0c968a17 raa7dc64 3 3 * is the base address. 4 4 */ 5 6 5 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 7 6 ENTRY(__entry) … … 17 16 *(.interp); 18 17 } :interp 19 18 20 19 . = 0x70001000; 21 20 22 21 .init ALIGN(0x1000) : SUBALIGN(0x1000) { 23 22 *(.init); 24 23 } :text 25 26 24 .text : { 27 25 *(.text); … … 33 31 *(.sdata); 34 32 } :data 35 36 33 .tdata : { 37 34 _tdata_start = .; … … 39 36 _tdata_end = .; 40 37 } :data 41 42 38 .tbss : { 43 39 _tbss_start = .; … … 45 41 _tbss_end = .; 46 42 } :data 47 48 43 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 49 50 44 .bss : { 51 45 *(.sbss); … … 53 47 *(.bss); 54 48 } :data 49 50 . = ALIGN(0x1000); 51 _heap = .; 55 52 56 53 /DISCARD/ : { 57 54 *(*); 58 55 } 56 59 57 } -
uspace/srv/loader/arch/sparc64/_link.ld.in
r0c968a17 raa7dc64 12 12 *(.interp); 13 13 } :interp 14 14 15 15 . = 0x70004000 + SIZEOF_HEADERS; 16 16 17 17 .init : { 18 18 *(.init); 19 19 } :text 20 21 20 .text : { 22 21 *(.text); 23 22 *(.rodata*); 24 23 } :text 25 24 26 25 . = . + 0x4000; 27 26 28 27 .got : { 29 28 _gp = .; 30 29 *(.got*); 31 30 } :data 32 33 31 .data : { 34 32 *(.data); 35 33 *(.sdata); 36 34 } :data 37 38 35 .tdata : { 39 36 _tdata_start = .; … … 41 38 _tdata_end = .; 42 39 } :data 43 44 40 .tbss : { 45 41 _tbss_start = .; … … 47 43 _tbss_end = .; 48 44 } :data 49 50 45 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 51 52 46 .bss : { 53 47 *(.sbss); … … 55 49 *(.bss); 56 50 } :data 51 52 . = ALIGN(0x4000); 53 _heap = .; 57 54 58 55 /DISCARD/ : { 59 56 *(*); 60 57 } 58 61 59 } -
uspace/srv/loader/elf_load.c
r0c968a17 raa7dc64 109 109 int fd; 110 110 int rc; 111 111 112 112 fd = open(file_name, O_RDONLY); 113 113 if (fd < 0) { … … 344 344 seg_ptr = (void *) seg_addr; 345 345 346 DPRINTF("Load segment at addr %p, size 0x%x\n", (void *)seg_addr,346 DPRINTF("Load segment at addr %p, size 0x%x\n", seg_addr, 347 347 entry->p_memsz); 348 348 … … 372 372 mem_sz = entry->p_memsz + (entry->p_vaddr - base); 373 373 374 DPRINTF("Map to seg_addr=%p-%p.\n", (void *) seg_addr, 375 (void *) (entry->p_vaddr + bias + 376 ALIGN_UP(entry->p_memsz, PAGE_SIZE))); 374 DPRINTF("Map to seg_addr=%p-%p.\n", seg_addr, 375 entry->p_vaddr + bias + ALIGN_UP(entry->p_memsz, PAGE_SIZE)); 377 376 378 377 /* … … 387 386 } 388 387 389 DPRINTF("as_area_create(%p, %#zx, %d) -> %p\n",390 (void *) (base + bias), mem_sz, flags, (void *)a);388 DPRINTF("as_area_create(%p, 0x%x, %d) -> 0x%lx\n", 389 base + bias, mem_sz, flags, (uintptr_t)a); 391 390 392 391 /* … … 465 464 (void *)((uint8_t *)entry->sh_addr + elf->bias); 466 465 DPRINTF("Dynamic section found at %p.\n", 467 (void *)elf->info->dynamic);466 (uintptr_t)elf->info->dynamic); 468 467 break; 469 468 default:
Note:
See TracChangeset
for help on using the changeset viewer.