Changes in / [d88218b:0b37882] in mainline
- Files:
-
- 27 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/mm/as.h
rd88218b r0b37882 313 313 extern sysarg_t sys_as_area_change_flags(uintptr_t, unsigned int); 314 314 extern sysarg_t sys_as_area_destroy(uintptr_t); 315 extern sysarg_t sys_as_get_unmapped_area(uintptr_t, size_t); 315 316 316 317 /* Introspection functions. */ -
kernel/generic/include/syscall/syscall.h
rd88218b r0b37882 59 59 SYS_AS_AREA_CHANGE_FLAGS, 60 60 SYS_AS_AREA_DESTROY, 61 SYS_AS_GET_UNMAPPED_AREA, 61 62 62 63 SYS_IPC_CALL_SYNC_FAST, -
kernel/generic/src/mm/as.c
rd88218b r0b37882 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h> 73 74 #include <arch.h> 74 75 #include <errno.h> … … 288 289 /** Check area conflicts with other areas. 289 290 * 290 * @param as 291 * @param vaStarting virtual address of the area being tested.292 * @param size Size ofthe area being tested.293 * @param avoid _areaDo not touch this area.291 * @param as Address space. 292 * @param addr Starting virtual address of the area being tested. 293 * @param count Number of pages in the area being tested. 294 * @param avoid Do not touch this area. 294 295 * 295 296 * @return True if there is no conflict, false otherwise. 296 297 * 297 298 */ 298 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 299 as_area_t *avoid_area) 300 { 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 301 303 ASSERT(mutex_locked(&as->lock)); 302 304 … … 304 306 * We don't want any area to have conflicts with NULL page. 305 307 */ 306 if (overlaps( va, size, (uintptr_t) NULL, PAGE_SIZE))308 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 307 309 return false; 308 310 … … 316 318 btree_node_t *leaf; 317 319 as_area_t *area = 318 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf); 319 321 if (area) { 320 if (area != avoid _area)322 if (area != avoid) 321 323 return false; 322 324 } … … 328 330 area = (as_area_t *) node->value[node->keys - 1]; 329 331 330 mutex_lock(&area->lock); 331 332 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 333 341 mutex_unlock(&area->lock); 334 return false; 335 } 336 337 mutex_unlock(&area->lock); 342 } 338 343 } 339 344 … … 342 347 area = (as_area_t *) node->value[0]; 343 348 344 mutex_lock(&area->lock); 345 346 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 347 358 mutex_unlock(&area->lock); 348 return false; 349 } 350 351 mutex_unlock(&area->lock); 359 } 352 360 } 353 361 … … 357 365 area = (as_area_t *) leaf->value[i]; 358 366 359 if (area == avoid _area)367 if (area == avoid) 360 368 continue; 361 369 362 370 mutex_lock(&area->lock); 363 371 364 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 365 374 mutex_unlock(&area->lock); 366 375 return false; … … 375 384 */ 376 385 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 377 return !overlaps( va, size,386 return !overlaps(addr, count << PAGE_WIDTH, 378 387 KERNEL_ADDRESS_SPACE_START, 379 388 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 402 411 mem_backend_data_t *backend_data) 403 412 { 404 if ( base % PAGE_SIZE)413 if ((base % PAGE_SIZE) != 0) 405 414 return NULL; 406 415 407 if ( !size)416 if (size == 0) 408 417 return NULL; 418 419 size_t pages = SIZE2FRAMES(size); 409 420 410 421 /* Writeable executable areas are not supported. */ … … 414 425 mutex_lock(&as->lock); 415 426 416 if (!check_area_conflicts(as, base, size, NULL)) {427 if (!check_area_conflicts(as, base, pages, NULL)) { 417 428 mutex_unlock(&as->lock); 418 429 return NULL; … … 426 437 area->flags = flags; 427 438 area->attributes = attrs; 428 area->pages = SIZE2FRAMES(size);439 area->pages = pages; 429 440 area->resident = 0; 430 441 area->base = base; … … 480 491 mutex_lock(&area->lock); 481 492 482 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 483 495 return area; 484 496 … … 496 508 mutex_lock(&area->lock); 497 509 498 if (va < area->base + area->pages * PAGE_SIZE)510 if (va < area->base + (area->pages << PAGE_WIDTH)) 499 511 return area; 500 512 … … 561 573 562 574 if (pages < area->pages) { 563 uintptr_t start_free = area->base + pages * PAGE_SIZE;575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 564 576 565 577 /* … … 574 586 */ 575 587 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 576 area->base + pages * PAGE_SIZE, area->pages - pages);588 area->base + (pages << PAGE_WIDTH), area->pages - pages); 577 589 578 590 /* … … 597 609 size_t i = 0; 598 610 599 if (overlaps(ptr, size * PAGE_SIZE, area->base,600 pages * PAGE_SIZE)) {611 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 612 pages << PAGE_WIDTH)) { 601 613 602 if (ptr + size * PAGE_SIZE<= start_free) {614 if (ptr + (size << PAGE_WIDTH) <= start_free) { 603 615 /* 604 616 * The whole interval fits … … 632 644 for (; i < size; i++) { 633 645 pte_t *pte = page_mapping_find(as, ptr + 634 i * PAGE_SIZE);646 (i << PAGE_WIDTH)); 635 647 636 648 ASSERT(pte); … … 641 653 (area->backend->frame_free)) { 642 654 area->backend->frame_free(area, 643 ptr + i * PAGE_SIZE,655 ptr + (i << PAGE_WIDTH), 644 656 PTE_GET_FRAME(pte)); 645 657 } 646 658 647 659 page_mapping_remove(as, ptr + 648 i * PAGE_SIZE);660 (i << PAGE_WIDTH)); 649 661 } 650 662 } … … 655 667 */ 656 668 657 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 658 670 area->pages - pages); 659 671 … … 662 674 */ 663 675 as_invalidate_translation_cache(as, area->base + 664 pages * PAGE_SIZE, area->pages - pages);676 (pages << PAGE_WIDTH), area->pages - pages); 665 677 tlb_shootdown_finalize(ipl); 666 678 … … 671 683 * Check for overlaps with other address space areas. 672 684 */ 673 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 674 area)) { 685 if (!check_area_conflicts(as, address, pages, area)) { 675 686 mutex_unlock(&area->lock); 676 687 mutex_unlock(&as->lock); … … 771 782 772 783 for (size = 0; size < (size_t) node->value[i]; size++) { 773 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 774 786 775 787 ASSERT(pte); … … 780 792 (area->backend->frame_free)) { 781 793 area->backend->frame_free(area, 782 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte));794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 783 795 } 784 796 785 page_mapping_remove(as, ptr + size * PAGE_SIZE);797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 786 798 } 787 799 } … … 870 882 } 871 883 872 size_t src_size = src_area->pages * PAGE_SIZE;884 size_t src_size = src_area->pages << PAGE_WIDTH; 873 885 unsigned int src_flags = src_area->flags; 874 886 mem_backend_t *src_backend = src_area->backend; … … 1076 1088 1077 1089 for (size = 0; size < (size_t) node->value[i]; size++) { 1078 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1079 1092 1080 1093 ASSERT(pte); … … 1085 1098 1086 1099 /* Remove old mapping */ 1087 page_mapping_remove(as, ptr + size * PAGE_SIZE);1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1088 1101 } 1089 1102 } … … 1131 1144 1132 1145 /* Insert the new mapping */ 1133 page_mapping_insert(as, ptr + size * PAGE_SIZE,1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1134 1147 old_frame[frame_idx++], page_flags); 1135 1148 … … 1453 1466 1454 1467 if (src_area) { 1455 size = src_area->pages * PAGE_SIZE;1468 size = src_area->pages << PAGE_WIDTH; 1456 1469 mutex_unlock(&src_area->lock); 1457 1470 } else … … 1508 1521 if (page >= right_pg) { 1509 1522 /* Do nothing. */ 1510 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1511 left_cnt * PAGE_SIZE)) {1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1524 left_cnt << PAGE_WIDTH)) { 1512 1525 /* The interval intersects with the left interval. */ 1513 1526 return false; 1514 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1515 right_cnt * PAGE_SIZE)) {1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1528 right_cnt << PAGE_WIDTH)) { 1516 1529 /* The interval intersects with the right interval. */ 1517 1530 return false; 1518 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1519 (page + count * PAGE_SIZE== right_pg)) {1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1532 (page + (count << PAGE_WIDTH) == right_pg)) { 1520 1533 /* 1521 1534 * The interval can be added by merging the two already … … 1525 1538 btree_remove(&area->used_space, right_pg, leaf); 1526 1539 goto success; 1527 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1528 1541 /* 1529 1542 * The interval can be added by simply growing the left … … 1532 1545 node->value[node->keys - 1] += count; 1533 1546 goto success; 1534 } else if (page + count * PAGE_SIZE== right_pg) {1547 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1535 1548 /* 1536 1549 * The interval can be addded by simply moving base of … … 1559 1572 */ 1560 1573 1561 if (overlaps(page, count * PAGE_SIZE, right_pg,1562 right_cnt * PAGE_SIZE)) {1574 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1575 right_cnt << PAGE_WIDTH)) { 1563 1576 /* The interval intersects with the right interval. */ 1564 1577 return false; 1565 } else if (page + count * PAGE_SIZE== right_pg) {1578 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1566 1579 /* 1567 1580 * The interval can be added by moving the base of the … … 1598 1611 if (page < left_pg) { 1599 1612 /* Do nothing. */ 1600 } else if (overlaps(page, count * PAGE_SIZE, left_pg,1601 left_cnt * PAGE_SIZE)) {1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1614 left_cnt << PAGE_WIDTH)) { 1602 1615 /* The interval intersects with the left interval. */ 1603 1616 return false; 1604 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1605 right_cnt * PAGE_SIZE)) {1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1618 right_cnt << PAGE_WIDTH)) { 1606 1619 /* The interval intersects with the right interval. */ 1607 1620 return false; 1608 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1609 (page + count * PAGE_SIZE== right_pg)) {1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1622 (page + (count << PAGE_WIDTH) == right_pg)) { 1610 1623 /* 1611 1624 * The interval can be added by merging the two already … … 1615 1628 btree_remove(&area->used_space, right_pg, node); 1616 1629 goto success; 1617 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1618 1631 /* 1619 1632 * The interval can be added by simply growing the left … … 1622 1635 leaf->value[leaf->keys - 1] += count; 1623 1636 goto success; 1624 } else if (page + count * PAGE_SIZE== right_pg) {1637 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1625 1638 /* 1626 1639 * The interval can be addded by simply moving base of … … 1649 1662 */ 1650 1663 1651 if (overlaps(page, count * PAGE_SIZE, left_pg,1652 left_cnt * PAGE_SIZE)) {1664 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1665 left_cnt << PAGE_WIDTH)) { 1653 1666 /* The interval intersects with the left interval. */ 1654 1667 return false; 1655 } else if (left_pg + left_cnt * PAGE_SIZE== page) {1668 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1656 1669 /* 1657 1670 * The interval can be added by growing the left … … 1688 1701 */ 1689 1702 1690 if (overlaps(page, count * PAGE_SIZE, left_pg,1691 left_cnt * PAGE_SIZE)) {1703 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1704 left_cnt << PAGE_WIDTH)) { 1692 1705 /* 1693 1706 * The interval intersects with the left … … 1695 1708 */ 1696 1709 return false; 1697 } else if (overlaps(page, count * PAGE_SIZE, right_pg,1698 right_cnt * PAGE_SIZE)) {1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1711 right_cnt << PAGE_WIDTH)) { 1699 1712 /* 1700 1713 * The interval intersects with the right … … 1702 1715 */ 1703 1716 return false; 1704 } else if ((page == left_pg + left_cnt * PAGE_SIZE) &&1705 (page + count * PAGE_SIZE== right_pg)) {1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1718 (page + (count << PAGE_WIDTH) == right_pg)) { 1706 1719 /* 1707 1720 * The interval can be added by merging the two … … 1711 1724 btree_remove(&area->used_space, right_pg, leaf); 1712 1725 goto success; 1713 } else if (page == left_pg + left_cnt * PAGE_SIZE) {1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1714 1727 /* 1715 1728 * The interval can be added by simply growing … … 1718 1731 leaf->value[i - 1] += count; 1719 1732 goto success; 1720 } else if (page + count * PAGE_SIZE== right_pg) {1733 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1721 1734 /* 1722 1735 * The interval can be addded by simply moving … … 1784 1797 for (i = 0; i < leaf->keys; i++) { 1785 1798 if (leaf->key[i] == page) { 1786 leaf->key[i] += count * PAGE_SIZE;1799 leaf->key[i] += count << PAGE_WIDTH; 1787 1800 leaf->value[i] -= count; 1788 1801 goto success; … … 1799 1812 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1800 1813 1801 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1802 count * PAGE_SIZE)) {1803 if (page + count * PAGE_SIZE==1804 left_pg + left_cnt * PAGE_SIZE) {1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1815 count << PAGE_WIDTH)) { 1816 if (page + (count << PAGE_WIDTH) == 1817 left_pg + (left_cnt << PAGE_WIDTH)) { 1805 1818 /* 1806 1819 * The interval is contained in the rightmost … … 1811 1824 node->value[node->keys - 1] -= count; 1812 1825 goto success; 1813 } else if (page + count * PAGE_SIZE<1814 left_pg + left_cnt*PAGE_SIZE) {1826 } else if (page + (count << PAGE_WIDTH) < 1827 left_pg + (left_cnt << PAGE_WIDTH)) { 1815 1828 /* 1816 1829 * The interval is contained in the rightmost … … 1820 1833 * new interval. 1821 1834 */ 1822 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1823 (page + count*PAGE_SIZE)) >> PAGE_WIDTH;1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1824 1837 node->value[node->keys - 1] -= count + new_cnt; 1825 1838 btree_insert(&area->used_space, page + 1826 count * PAGE_SIZE, (void *) new_cnt, leaf);1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1827 1840 goto success; 1828 1841 } … … 1837 1850 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1838 1851 1839 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1840 count * PAGE_SIZE)) {1841 if (page + count * PAGE_SIZE==1842 left_pg + left_cnt * PAGE_SIZE) {1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1853 count << PAGE_WIDTH)) { 1854 if (page + (count << PAGE_WIDTH) == 1855 left_pg + (left_cnt << PAGE_WIDTH)) { 1843 1856 /* 1844 1857 * The interval is contained in the rightmost … … 1848 1861 leaf->value[leaf->keys - 1] -= count; 1849 1862 goto success; 1850 } else if (page + count * PAGE_SIZE< left_pg +1851 left_cnt * PAGE_SIZE) {1863 } else if (page + (count << PAGE_WIDTH) < left_pg + 1864 (left_cnt << PAGE_WIDTH)) { 1852 1865 /* 1853 1866 * The interval is contained in the rightmost … … 1857 1870 * interval. 1858 1871 */ 1859 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1860 (page + count * PAGE_SIZE)) >> PAGE_WIDTH;1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1861 1874 leaf->value[leaf->keys - 1] -= count + new_cnt; 1862 1875 btree_insert(&area->used_space, page + 1863 count * PAGE_SIZE, (void *) new_cnt, leaf);1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1864 1877 goto success; 1865 1878 } … … 1883 1896 * to (i - 1) and i. 1884 1897 */ 1885 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page,1886 count * PAGE_SIZE)) {1887 if (page + count * PAGE_SIZE==1888 left_pg + left_cnt*PAGE_SIZE) {1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1899 count << PAGE_WIDTH)) { 1900 if (page + (count << PAGE_WIDTH) == 1901 left_pg + (left_cnt << PAGE_WIDTH)) { 1889 1902 /* 1890 1903 * The interval is contained in the … … 1895 1908 leaf->value[i - 1] -= count; 1896 1909 goto success; 1897 } else if (page + count * PAGE_SIZE<1898 left_pg + left_cnt * PAGE_SIZE) {1910 } else if (page + (count << PAGE_WIDTH) < 1911 left_pg + (left_cnt << PAGE_WIDTH)) { 1899 1912 /* 1900 1913 * The interval is contained in the … … 1905 1918 */ 1906 1919 size_t new_cnt = ((left_pg + 1907 left_cnt * PAGE_SIZE) -1908 (page + count * PAGE_SIZE)) >>1920 (left_cnt << PAGE_WIDTH)) - 1921 (page + (count << PAGE_WIDTH))) >> 1909 1922 PAGE_WIDTH; 1910 1923 leaf->value[i - 1] -= count + new_cnt; 1911 1924 btree_insert(&area->used_space, page + 1912 count * PAGE_SIZE, (void *) new_cnt,1925 (count << PAGE_WIDTH), (void *) new_cnt, 1913 1926 leaf); 1914 1927 goto success; … … 1961 1974 } 1962 1975 1976 /** Return pointer to unmapped address space area 1977 * 1978 * @param base Lowest address bound. 1979 * @param size Requested size of the allocation. 1980 * 1981 * @return Pointer to the beginning of unmapped address space area. 1982 * 1983 */ 1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1985 { 1986 if (size == 0) 1987 return 0; 1988 1989 /* 1990 * Make sure we allocate from page-aligned 1991 * address. Check for possible overflow in 1992 * each step. 1993 */ 1994 1995 size_t pages = SIZE2FRAMES(size); 1996 uintptr_t ret = 0; 1997 1998 /* 1999 * Find the lowest unmapped address aligned on the sz 2000 * boundary, not smaller than base and of the required size. 2001 */ 2002 2003 mutex_lock(&AS->lock); 2004 2005 /* First check the base address itself */ 2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2007 if ((addr >= base) && 2008 (check_area_conflicts(AS, addr, pages, NULL))) 2009 ret = addr; 2010 2011 /* Eventually check the addresses behind each area */ 2012 link_t *cur; 2013 for (cur = AS->as_area_btree.leaf_head.next; 2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head); 2015 cur = cur->next) { 2016 btree_node_t *node = 2017 list_get_instance(cur, btree_node_t, leaf_link); 2018 2019 btree_key_t i; 2020 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2021 as_area_t *area = (as_area_t *) node->value[i]; 2022 2023 mutex_lock(&area->lock); 2024 2025 uintptr_t addr = 2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2027 PAGE_SIZE); 2028 2029 if ((addr >= base) && (addr >= area->base) && 2030 (check_area_conflicts(AS, addr, pages, area))) 2031 ret = addr; 2032 2033 mutex_unlock(&area->lock); 2034 } 2035 } 2036 2037 mutex_unlock(&AS->lock); 2038 2039 return (sysarg_t) ret; 2040 } 2041 1963 2042 /** Get list of adress space areas. 1964 2043 * … … 2027 2106 mutex_lock(&as->lock); 2028 2107 2029 /* print out info about address space areas */2108 /* Print out info about address space areas */ 2030 2109 link_t *cur; 2031 2110 for (cur = as->as_area_btree.leaf_head.next; -
kernel/generic/src/syscall/syscall.c
rd88218b r0b37882 143 143 (syshandler_t) sys_as_area_change_flags, 144 144 (syshandler_t) sys_as_area_destroy, 145 (syshandler_t) sys_as_get_unmapped_area, 145 146 146 147 /* IPC related syscalls. */ -
uspace/lib/c/arch/abs32le/_link.ld.in
rd88218b r0b37882 44 44 } :data 45 45 46 . = ALIGN(0x1000);47 48 _heap = .;49 50 46 /DISCARD/ : { 51 47 *(*); -
uspace/lib/c/arch/amd64/_link.ld.in
rd88218b r0b37882 42 42 } :data 43 43 44 . = ALIGN(0x1000);45 _heap = .;46 47 44 #ifdef CONFIG_LINE_DEBUG 48 45 .comment 0 : { *(.comment); } :debug … … 61 58 *(*); 62 59 } 63 64 60 } -
uspace/lib/c/arch/arm32/_link.ld.in
rd88218b r0b37882 9 9 SECTIONS { 10 10 . = 0x1000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 } : text 14 } :text 15 15 16 .text : { 16 17 *(.text); 17 18 *(.rodata*); 18 19 } :text 19 20 20 21 . = . + 0x1000; 21 22 22 23 .data : { 23 24 *(.opd); … … 25 26 *(.sdata); 26 27 } :data 28 27 29 .tdata : { 28 30 _tdata_start = .; … … 33 35 _tbss_end = .; 34 36 } :data 37 35 38 _tls_alignment = ALIGNOF(.tdata); 39 36 40 .bss : { 37 41 *(.sbss); 38 42 *(.scommon); 39 40 43 *(COMMON); 44 *(.bss); 41 45 } :data 42 43 . = ALIGN(0x1000);44 _heap = .;45 46 46 47 /DISCARD/ : { 47 48 *(*); 48 49 } 49 50 50 } -
uspace/lib/c/arch/ia32/_link.ld.in
rd88218b r0b37882 43 43 } :data 44 44 45 . = ALIGN(0x1000);46 _heap = .;47 48 45 #ifdef CONFIG_LINE_DEBUG 49 46 .comment 0 : { *(.comment); } :debug -
uspace/lib/c/arch/ia64/_link.ld.in
rd88218b r0b37882 9 9 SECTIONS { 10 10 . = 0x4000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 } : text 14 } :text 15 15 16 .text : { 16 17 *(.text); 17 18 *(.rodata*); 18 19 } :text 19 20 20 21 . = . + 0x4000; 21 22 22 23 .got : { 23 24 _gp = .; 24 25 *(.got*); 25 } :data 26 } :data 27 26 28 .data : { 27 29 *(.opd); … … 29 31 *(.sdata); 30 32 } :data 33 31 34 .tdata : { 32 35 _tdata_start = .; … … 37 40 _tbss_end = .; 38 41 } :data 42 39 43 _tls_alignment = ALIGNOF(.tdata); 44 40 45 .bss : { 41 46 *(.sbss); … … 44 49 *(.bss); 45 50 } :data 46 47 . = ALIGN(0x4000); 48 _heap = .; 49 51 50 52 /DISCARD/ : { 51 53 *(*); 52 54 } 53 55 } -
uspace/lib/c/arch/mips32/_link.ld.in
rd88218b r0b37882 13 13 *(.init); 14 14 } :text 15 15 16 .text : { 16 17 *(.text); 17 18 *(.rodata*); 18 19 } :text 19 20 20 21 . = . + 0x4000; 21 22 22 23 .data : { 23 24 *(.data); 24 25 *(.data.rel*); 25 26 } :data 26 27 27 28 .got : { 28 29 _gp = .; 29 30 *(.got); 30 31 } :data 31 32 32 33 .tdata : { 33 34 _tdata_start = .; 34 35 *(.tdata); 35 36 _tdata_end = .; 37 } :data 38 39 .tbss : { 36 40 _tbss_start = .; 37 41 *(.tbss); 38 42 _tbss_end = .; 39 43 } :data 40 _tls_alignment = ALIGNOF(.tdata); 41 44 45 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 46 42 47 .sbss : { 43 48 *(.scommon); 44 49 *(.sbss); 45 } 50 } 51 46 52 .bss : { 47 53 *(.bss); 48 54 *(COMMON); 49 55 } :data 50 51 . = ALIGN(0x4000); 52 _heap = .; 53 56 54 57 /DISCARD/ : { 55 58 *(*); -
uspace/lib/c/arch/mips32/src/entry.s
rd88218b r0b37882 29 29 .text 30 30 .section .init, "ax" 31 31 32 .global __entry 32 .global __entry_driver 33 33 34 .set noreorder 34 35 .option pic2 … … 57 58 nop 58 59 .end 59 60 # Alignment of output section data to 0x400061 .section .data62 .align 14 -
uspace/lib/c/arch/ppc32/_link.ld.in
rd88218b r0b37882 9 9 SECTIONS { 10 10 . = 0x1000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 14 } :text 15 15 16 .text : { 16 17 *(.text); 17 18 *(.rodata*); 18 19 } :text 19 20 20 21 . = . + 0x1000; 21 22 22 23 .data : { 23 24 *(.data); 24 25 *(.sdata); 25 26 } :data 27 26 28 .tdata : { 27 29 _tdata_start = .; … … 32 34 _tbss_end = .; 33 35 } :data 36 34 37 _tls_alignment = ALIGNOF(.tdata); 38 35 39 .bss : { 36 40 *(.sbss); … … 38 42 *(.bss); 39 43 } :data 40 41 . = ALIGN(0x1000);42 _heap = .;43 44 44 45 /DISCARD/ : { 45 46 *(*); 46 47 } 47 48 48 } -
uspace/lib/c/arch/sparc64/_link.ld.in
rd88218b r0b37882 9 9 SECTIONS { 10 10 . = 0x4000 + SIZEOF_HEADERS; 11 11 12 12 .init : { 13 13 *(.init); 14 14 } :text 15 15 16 .text : { 16 17 *(.text); 17 18 *(.rodata*); 18 19 } :text 19 20 20 21 . = . + 0x4000; 21 22 22 23 .got : { 23 24 _gp = .; 24 25 *(.got*); 25 26 } :data 27 26 28 .data : { 27 29 *(.data); 28 30 *(.sdata); 29 31 } :data 32 30 33 .tdata : { 31 34 _tdata_start = .; … … 36 39 _tbss_end = .; 37 40 } :data 41 38 42 _tls_alignment = ALIGNOF(.tdata); 43 39 44 .bss : { 40 45 *(.sbss); … … 42 47 *(.bss); 43 48 } :data 44 45 . = ALIGN(0x4000);46 _heap = .;47 49 48 50 /DISCARD/ : { 49 51 *(*); 50 52 } 51 52 53 } -
uspace/lib/c/generic/as.c
rd88218b r0b37882 40 40 #include <bitops.h> 41 41 #include <malloc.h> 42 43 /** Last position allocated by as_get_mappable_page */ 44 static uintptr_t last_allocated = 0; 42 #include "private/libc.h" 45 43 46 44 /** Create address space area. … … 103 101 } 104 102 105 /** Return pointer to some unmapped area, where fits new as_area103 /** Return pointer to unmapped address space area 106 104 * 107 105 * @param size Requested size of the allocation. 108 106 * 109 * @return pointer to the beginning107 * @return Pointer to the beginning of unmapped address space area. 110 108 * 111 109 */ 112 110 void *as_get_mappable_page(size_t size) 113 111 { 114 if (size == 0) 115 return NULL; 116 117 size_t sz = 1 << (fnzb(size - 1) + 1); 118 if (last_allocated == 0) 119 last_allocated = get_max_heap_addr(); 120 121 /* 122 * Make sure we allocate from naturally aligned address. 123 */ 124 uintptr_t res = ALIGN_UP(last_allocated, sz); 125 last_allocated = res + ALIGN_UP(size, PAGE_SIZE); 126 127 return ((void *) res); 112 return (void *) __SYSCALL2(SYS_AS_GET_UNMAPPED_AREA, 113 (sysarg_t) __entry, (sysarg_t) size); 128 114 } 129 115 -
uspace/lib/c/generic/malloc.c
rd88218b r0b37882 47 47 #include "private/malloc.h" 48 48 49 /* Magic used in heap headers. */ 50 #define HEAP_BLOCK_HEAD_MAGIC 0xBEEF0101 51 52 /* Magic used in heap footers. */ 53 #define HEAP_BLOCK_FOOT_MAGIC 0xBEEF0202 54 55 /** Allocation alignment (this also covers the alignment of fields 56 in the heap header and footer) */ 49 /** Magic used in heap headers. */ 50 #define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101) 51 52 /** Magic used in heap footers. */ 53 #define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202) 54 55 /** Magic used in heap descriptor. */ 56 #define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE) 57 58 /** Allocation alignment. 59 * 60 * This also covers the alignment of fields 61 * in the heap header and footer. 62 * 63 */ 57 64 #define BASE_ALIGN 16 58 65 59 /** 60 * Either 4 * 256M on 32-bit architecures or 16 * 256M on 64-bit architectures 61 */ 62 #define MAX_HEAP_SIZE (sizeof(uintptr_t) << 28) 63 64 /** 65 * 66 */ 67 #define STRUCT_OVERHEAD (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 68 69 /** 70 * Calculate real size of a heap block (with header and footer) 66 /** Overhead of each heap block. */ 67 #define STRUCT_OVERHEAD \ 68 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 69 70 /** Calculate real size of a heap block. 71 * 72 * Add header and footer size. 73 * 71 74 */ 72 75 #define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD) 73 76 74 /** 75 * Calculate net size of a heap block (without header and footer) 77 /** Calculate net size of a heap block. 78 * 79 * Subtract header and footer size. 80 * 76 81 */ 77 82 #define NET_SIZE(size) ((size) - STRUCT_OVERHEAD) 83 84 /** Get first block in heap area. 85 * 86 */ 87 #define AREA_FIRST_BLOCK(area) \ 88 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN)) 89 90 /** Get footer in heap block. 91 * 92 */ 93 #define BLOCK_FOOT(head) \ 94 ((heap_block_foot_t *) \ 95 (((uintptr_t) head) + head->size - sizeof(heap_block_foot_t))) 96 97 /** Heap area. 98 * 99 * The memory managed by the heap allocator is divided into 100 * multiple discontinuous heaps. Each heap is represented 101 * by a separate address space area which has this structure 102 * at its very beginning. 103 * 104 */ 105 typedef struct heap_area { 106 /** Start of the heap area (including this structure) 107 * 108 * Aligned on page boundary. 109 * 110 */ 111 void *start; 112 113 /** End of the heap area (aligned on page boundary) */ 114 void *end; 115 116 /** Next heap area */ 117 struct heap_area *next; 118 119 /** A magic value */ 120 uint32_t magic; 121 } heap_area_t; 78 122 79 123 /** Header of a heap block … … 87 131 bool free; 88 132 133 /** Heap area this block belongs to */ 134 heap_area_t *area; 135 89 136 /* A magic value to detect overwrite of heap header */ 90 137 uint32_t magic; … … 102 149 } heap_block_foot_t; 103 150 104 /** Linker heap symbol */ 105 extern char _heap; 151 /** First heap area */ 152 static heap_area_t *first_heap_area = NULL; 153 154 /** Last heap area */ 155 static heap_area_t *last_heap_area = NULL; 156 157 /** Next heap block to examine (next fit algorithm) */ 158 static heap_block_head_t *next = NULL; 106 159 107 160 /** Futex for thread-safe heap manipulation */ 108 161 static futex_t malloc_futex = FUTEX_INITIALIZER; 109 162 110 /** Address of heap start */111 static void *heap_start = 0;112 113 /** Address of heap end */114 static void *heap_end = 0;115 116 /** Maximum heap size */117 static size_t max_heap_size = (size_t) -1;118 119 /** Current number of pages of heap area */120 static size_t heap_pages = 0;121 122 163 /** Initialize a heap block 123 164 * 124 * Fill sin the structures related to a heap block.165 * Fill in the structures related to a heap block. 125 166 * Should be called only inside the critical section. 126 167 * … … 128 169 * @param size Size of the block including the header and the footer. 129 170 * @param free Indication of a free block. 130 * 131 */ 132 static void block_init(void *addr, size_t size, bool free) 171 * @param area Heap area the block belongs to. 172 * 173 */ 174 static void block_init(void *addr, size_t size, bool free, heap_area_t *area) 133 175 { 134 176 /* Calculate the position of the header and the footer */ 135 177 heap_block_head_t *head = (heap_block_head_t *) addr; 136 heap_block_foot_t *foot =137 (heap_block_foot_t *) (addr + size - sizeof(heap_block_foot_t));138 178 139 179 head->size = size; 140 180 head->free = free; 181 head->area = area; 141 182 head->magic = HEAP_BLOCK_HEAD_MAGIC; 183 184 heap_block_foot_t *foot = BLOCK_FOOT(head); 142 185 143 186 foot->size = size; … … 160 203 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC); 161 204 162 heap_block_foot_t *foot = 163 (heap_block_foot_t *) (addr + head->size - sizeof(heap_block_foot_t)); 205 heap_block_foot_t *foot = BLOCK_FOOT(head); 164 206 165 207 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC); … … 167 209 } 168 210 169 /** Increase the heap area size 170 * 171 * Should be called only inside the critical section. 172 * 173 * @param size Number of bytes to grow the heap by. 174 * 175 */ 176 static bool grow_heap(size_t size) 211 /** Check a heap area structure 212 * 213 * @param addr Address of the heap area. 214 * 215 */ 216 static void area_check(void *addr) 217 { 218 heap_area_t *area = (heap_area_t *) addr; 219 220 assert(area->magic == HEAP_AREA_MAGIC); 221 assert(area->start < area->end); 222 assert(((uintptr_t) area->start % PAGE_SIZE) == 0); 223 assert(((uintptr_t) area->end % PAGE_SIZE) == 0); 224 } 225 226 /** Create new heap area 227 * 228 * @param start Preffered starting address of the new area. 229 * @param size Size of the area. 230 * 231 */ 232 static bool area_create(size_t size) 233 { 234 void *start = as_get_mappable_page(size); 235 if (start == NULL) 236 return false; 237 238 /* Align the heap area on page boundary */ 239 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE); 240 size_t asize = ALIGN_UP(size, PAGE_SIZE); 241 242 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ); 243 if (astart == (void *) -1) 244 return false; 245 246 heap_area_t *area = (heap_area_t *) astart; 247 248 area->start = astart; 249 area->end = (void *) 250 ALIGN_DOWN((uintptr_t) astart + asize, BASE_ALIGN); 251 area->next = NULL; 252 area->magic = HEAP_AREA_MAGIC; 253 254 void *block = (void *) AREA_FIRST_BLOCK(area); 255 size_t bsize = (size_t) (area->end - block); 256 257 block_init(block, bsize, true, area); 258 259 if (last_heap_area == NULL) { 260 first_heap_area = area; 261 last_heap_area = area; 262 } else { 263 last_heap_area->next = area; 264 last_heap_area = area; 265 } 266 267 return true; 268 } 269 270 /** Try to enlarge a heap area 271 * 272 * @param area Heap area to grow. 273 * @param size Gross size of item to allocate (bytes). 274 * 275 */ 276 static bool area_grow(heap_area_t *area, size_t size) 177 277 { 178 278 if (size == 0) 279 return true; 280 281 area_check(area); 282 283 size_t asize = ALIGN_UP((size_t) (area->end - area->start) + size, 284 PAGE_SIZE); 285 286 /* New heap area size */ 287 void *end = (void *) 288 ALIGN_DOWN((uintptr_t) area->start + asize, BASE_ALIGN); 289 290 /* Check for overflow */ 291 if (end < area->start) 179 292 return false; 180 181 if ((heap_start + size < heap_start) || (heap_end + size < heap_end)) 293 294 /* Resize the address space area */ 295 int ret = as_area_resize(area->start, asize, 0); 296 if (ret != EOK) 182 297 return false; 183 298 184 size_t heap_size = (size_t) (heap_end - heap_start); 185 186 if ((max_heap_size != (size_t) -1) && (heap_size + size > max_heap_size)) 187 return false; 188 189 size_t pages = (size - 1) / PAGE_SIZE + 1; 190 191 if (as_area_resize((void *) &_heap, (heap_pages + pages) * PAGE_SIZE, 0) 192 == EOK) { 193 void *end = (void *) ALIGN_DOWN(((uintptr_t) &_heap) + 194 (heap_pages + pages) * PAGE_SIZE, BASE_ALIGN); 195 block_init(heap_end, end - heap_end, true); 196 heap_pages += pages; 197 heap_end = end; 299 /* Add new free block */ 300 block_init(area->end, (size_t) (end - area->end), true, area); 301 302 /* Update heap area parameters */ 303 area->end = end; 304 305 return true; 306 } 307 308 /** Try to enlarge any of the heap areas 309 * 310 * @param size Gross size of item to allocate (bytes). 311 * 312 */ 313 static bool heap_grow(size_t size) 314 { 315 if (size == 0) 198 316 return true; 199 } 200 201 return false; 202 } 203 204 /** Decrease the heap area 205 * 206 * Should be called only inside the critical section. 207 * 208 * @param size Number of bytes to shrink the heap by. 209 * 210 */ 211 static void shrink_heap(void) 212 { 213 // TODO 317 318 /* First try to enlarge some existing area */ 319 heap_area_t *area; 320 for (area = first_heap_area; area != NULL; area = area->next) { 321 if (area_grow(area, size)) 322 return true; 323 } 324 325 /* Eventually try to create a new area */ 326 return area_create(AREA_FIRST_BLOCK(size)); 327 } 328 329 /** Try to shrink heap space 330 * 331 * In all cases the next pointer is reset. 332 * 333 */ 334 static void heap_shrink(void) 335 { 336 next = NULL; 214 337 } 215 338 … … 223 346 void __malloc_init(void) 224 347 { 225 if (!as_area_create((void *) &_heap, PAGE_SIZE, 226 AS_AREA_WRITE | AS_AREA_READ)) 348 if (!area_create(PAGE_SIZE)) 227 349 abort(); 228 229 heap_pages = 1;230 heap_start = (void *) ALIGN_UP((uintptr_t) &_heap, BASE_ALIGN);231 heap_end =232 (void *) ALIGN_DOWN(((uintptr_t) &_heap) + PAGE_SIZE, BASE_ALIGN);233 234 /* Make the entire area one large block. */235 block_init(heap_start, heap_end - heap_start, true);236 }237 238 /** Get maximum heap address239 *240 */241 uintptr_t get_max_heap_addr(void)242 {243 futex_down(&malloc_futex);244 245 if (max_heap_size == (size_t) -1)246 max_heap_size =247 max((size_t) (heap_end - heap_start), MAX_HEAP_SIZE);248 249 uintptr_t max_heap_addr = (uintptr_t) heap_start + max_heap_size;250 251 futex_up(&malloc_futex);252 253 return max_heap_addr;254 350 } 255 351 … … 273 369 /* Block big enough -> split. */ 274 370 void *next = ((void *) cur) + size; 275 block_init(next, cur->size - size, true );276 block_init(cur, size, false );371 block_init(next, cur->size - size, true, cur->area); 372 block_init(cur, size, false, cur->area); 277 373 } else { 278 374 /* Block too small -> use as is. */ … … 281 377 } 282 378 283 /** Allocate a memoryblock379 /** Allocate memory from heap area starting from given block 284 380 * 285 381 * Should be called only inside the critical section. 286 * 287 * @param size The size of the block to allocate.288 * @param align Memory address alignment.289 * 290 * @ return the address of the block or NULL when not enough memory.291 * 292 * /293 static void *malloc_internal(const size_t size, const size_t align) 294 { 295 if (align == 0) 296 return NULL; 297 298 size_t falign = lcm(align, BASE_ALIGN); 299 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign)); 300 301 bool grown = false; 302 void *result;303 304 loop: 305 result = NULL;306 heap_block_head_t *cur = (heap_block_head_t *) heap_start;307 308 while ((result == NULL) && ((void *) cur < heap_end)) {382 * As a side effect this function also sets the current 383 * pointer on successful allocation. 384 * 385 * @param area Heap area where to allocate from. 386 * @param first_block Starting heap block. 387 * @param final_block Heap block where to finish the search 388 * (may be NULL). 389 * @param real_size Gross number of bytes to allocate. 390 * @param falign Physical alignment of the block. 391 * 392 * @return Address of the allocated block or NULL on not enough memory. 393 * 394 */ 395 static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block, 396 heap_block_head_t *final_block, size_t real_size, size_t falign) 397 { 398 area_check((void *) area); 399 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK(area)); 400 assert((void *) first_block < area->end); 401 402 heap_block_head_t *cur; 403 for (cur = first_block; (void *) cur < area->end; 404 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) { 309 405 block_check(cur); 406 407 /* Finish searching on the final block */ 408 if ((final_block != NULL) && (cur == final_block)) 409 break; 310 410 311 411 /* Try to find a block that is free and large enough. */ 312 412 if ((cur->free) && (cur->size >= real_size)) { 313 /* We have found a suitable block. 314 Check for alignment properties. */ 315 void *addr = ((void *) cur) + sizeof(heap_block_head_t); 316 void *aligned = (void *) ALIGN_UP(addr, falign); 413 /* 414 * We have found a suitable block. 415 * Check for alignment properties. 416 */ 417 void *addr = (void *) 418 ((uintptr_t) cur + sizeof(heap_block_head_t)); 419 void *aligned = (void *) 420 ALIGN_UP((uintptr_t) addr, falign); 317 421 318 422 if (addr == aligned) { 319 423 /* Exact block start including alignment. */ 320 424 split_mark(cur, real_size); 321 result = addr; 425 426 next = cur; 427 return addr; 322 428 } else { 323 429 /* Block start has to be aligned */ … … 325 431 326 432 if (cur->size >= real_size + excess) { 327 /* The current block is large enough to fit 328 data in including alignment */ 329 if ((void *) cur > heap_start) { 330 /* There is a block before the current block. 331 This previous block can be enlarged to compensate 332 for the alignment excess */ 333 heap_block_foot_t *prev_foot = 334 ((void *) cur) - sizeof(heap_block_foot_t); 433 /* 434 * The current block is large enough to fit 435 * data in (including alignment). 436 */ 437 if ((void *) cur > (void *) AREA_FIRST_BLOCK(area)) { 438 /* 439 * There is a block before the current block. 440 * This previous block can be enlarged to 441 * compensate for the alignment excess. 442 */ 443 heap_block_foot_t *prev_foot = (heap_block_foot_t *) 444 ((void *) cur - sizeof(heap_block_foot_t)); 335 445 336 heap_block_head_t *prev_head = 337 ( heap_block_head_t *) (((void *) cur)- prev_foot->size);446 heap_block_head_t *prev_head = (heap_block_head_t *) 447 ((void *) cur - prev_foot->size); 338 448 339 449 block_check(prev_head); … … 342 452 heap_block_head_t *next_head = ((void *) cur) + excess; 343 453 344 if ((!prev_head->free) && (excess >= STRUCT_OVERHEAD)) { 345 /* The previous block is not free and there is enough 346 space to fill in a new free block between the previous 347 and current block */ 348 block_init(cur, excess, true); 454 if ((!prev_head->free) && 455 (excess >= STRUCT_OVERHEAD)) { 456 /* 457 * The previous block is not free and there 458 * is enough free space left to fill in 459 * a new free block between the previous 460 * and current block. 461 */ 462 block_init(cur, excess, true, area); 349 463 } else { 350 /* The previous block is free (thus there is no need to 351 induce additional fragmentation to the heap) or the 352 excess is small, thus just enlarge the previous block */ 353 block_init(prev_head, prev_head->size + excess, prev_head->free); 464 /* 465 * The previous block is free (thus there 466 * is no need to induce additional 467 * fragmentation to the heap) or the 468 * excess is small. Therefore just enlarge 469 * the previous block. 470 */ 471 block_init(prev_head, prev_head->size + excess, 472 prev_head->free, area); 354 473 } 355 474 356 block_init(next_head, reduced_size, true );475 block_init(next_head, reduced_size, true, area); 357 476 split_mark(next_head, real_size); 358 result = aligned; 359 cur = next_head; 477 478 next = next_head; 479 return aligned; 360 480 } else { 361 /* The current block is the first block on the heap. 362 We have to make sure that the alignment excess 363 is large enough to fit a new free block just 364 before the current block */ 481 /* 482 * The current block is the first block 483 * in the heap area. We have to make sure 484 * that the alignment excess is large enough 485 * to fit a new free block just before the 486 * current block. 487 */ 365 488 while (excess < STRUCT_OVERHEAD) { 366 489 aligned += falign; … … 371 494 if (cur->size >= real_size + excess) { 372 495 size_t reduced_size = cur->size - excess; 373 cur = (heap_block_head_t *) (heap_start + excess); 496 cur = (heap_block_head_t *) 497 (AREA_FIRST_BLOCK(area) + excess); 374 498 375 block_init(heap_start, excess, true); 376 block_init(cur, reduced_size, true); 499 block_init((void *) AREA_FIRST_BLOCK(area), excess, 500 true, area); 501 block_init(cur, reduced_size, true, area); 377 502 split_mark(cur, real_size); 378 result = aligned; 503 504 next = cur; 505 return aligned; 379 506 } 380 507 } … … 382 509 } 383 510 } 384 385 /* Advance to the next block. */ 386 cur = (heap_block_head_t *) (((void *) cur) + cur->size); 387 } 388 389 if ((result == NULL) && (!grown)) { 390 if (grow_heap(real_size)) { 391 grown = true; 511 } 512 513 return NULL; 514 } 515 516 /** Allocate a memory block 517 * 518 * Should be called only inside the critical section. 519 * 520 * @param size The size of the block to allocate. 521 * @param align Memory address alignment. 522 * 523 * @return Address of the allocated block or NULL on not enough memory. 524 * 525 */ 526 static void *malloc_internal(const size_t size, const size_t align) 527 { 528 assert(first_heap_area != NULL); 529 530 if (align == 0) 531 return NULL; 532 533 size_t falign = lcm(align, BASE_ALIGN); 534 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign)); 535 536 bool retry = false; 537 heap_block_head_t *split; 538 539 loop: 540 541 /* Try the next fit approach */ 542 split = next; 543 544 if (split != NULL) { 545 void *addr = malloc_area(split->area, split, NULL, real_size, 546 falign); 547 548 if (addr != NULL) 549 return addr; 550 } 551 552 /* Search the entire heap */ 553 heap_area_t *area; 554 for (area = first_heap_area; area != NULL; area = area->next) { 555 heap_block_head_t *first = (heap_block_head_t *) 556 AREA_FIRST_BLOCK(area); 557 558 void *addr = malloc_area(area, first, split, real_size, 559 falign); 560 561 if (addr != NULL) 562 return addr; 563 } 564 565 if (!retry) { 566 /* Try to grow the heap space */ 567 if (heap_grow(real_size)) { 568 retry = true; 392 569 goto loop; 393 570 } 394 571 } 395 572 396 return result;573 return NULL; 397 574 } 398 575 … … 473 650 (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); 474 651 475 assert((void *) head >= heap_start);476 assert((void *) head < heap_end);477 478 652 block_check(head); 479 653 assert(!head->free); 654 655 heap_area_t *area = head->area; 656 657 area_check(area); 658 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area)); 659 assert((void *) head < area->end); 480 660 481 661 void *ptr = NULL; … … 487 667 /* Shrink */ 488 668 if (orig_size - real_size >= STRUCT_OVERHEAD) { 489 /* Split the original block to a full block 490 and a trailing free block */ 491 block_init((void *) head, real_size, false); 669 /* 670 * Split the original block to a full block 671 * and a trailing free block. 672 */ 673 block_init((void *) head, real_size, false, area); 492 674 block_init((void *) head + real_size, 493 orig_size - real_size, true );494 shrink_heap();675 orig_size - real_size, true, area); 676 heap_shrink(); 495 677 } 496 678 497 679 ptr = ((void *) head) + sizeof(heap_block_head_t); 498 680 } else { 499 /* Look at the next block. If it is free and the size is 500 sufficient then merge the two. Otherwise just allocate 501 a new block, copy the original data into it and 502 free the original block. */ 681 /* 682 * Look at the next block. If it is free and the size is 683 * sufficient then merge the two. Otherwise just allocate 684 * a new block, copy the original data into it and 685 * free the original block. 686 */ 503 687 heap_block_head_t *next_head = 504 688 (heap_block_head_t *) (((void *) head) + head->size); 505 689 506 if (((void *) next_head < heap_end) &&690 if (((void *) next_head < area->end) && 507 691 (head->size + next_head->size >= real_size) && 508 692 (next_head->free)) { 509 693 block_check(next_head); 510 block_init(head, head->size + next_head->size, false );694 block_init(head, head->size + next_head->size, false, area); 511 695 split_mark(head, real_size); 512 696 513 697 ptr = ((void *) head) + sizeof(heap_block_head_t); 698 next = NULL; 514 699 } else 515 700 reloc = true; … … 542 727 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); 543 728 544 assert((void *) head >= heap_start);545 assert((void *) head < heap_end);546 547 729 block_check(head); 548 730 assert(!head->free); 731 732 heap_area_t *area = head->area; 733 734 area_check(area); 735 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area)); 736 assert((void *) head < area->end); 549 737 550 738 /* Mark the block itself as free. */ … … 555 743 = (heap_block_head_t *) (((void *) head) + head->size); 556 744 557 if ((void *) next_head < heap_end) {745 if ((void *) next_head < area->end) { 558 746 block_check(next_head); 559 747 if (next_head->free) 560 block_init(head, head->size + next_head->size, true );748 block_init(head, head->size + next_head->size, true, area); 561 749 } 562 750 563 751 /* Look at the previous block. If it is free, merge the two. */ 564 if ((void *) head > heap_start) {752 if ((void *) head > (void *) AREA_FIRST_BLOCK(area)) { 565 753 heap_block_foot_t *prev_foot = 566 754 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t)); … … 572 760 573 761 if (prev_head->free) 574 block_init(prev_head, prev_head->size + head->size, true); 575 } 576 577 shrink_heap(); 762 block_init(prev_head, prev_head->size + head->size, true, 763 area); 764 } 765 766 heap_shrink(); 578 767 579 768 futex_up(&malloc_futex); -
uspace/lib/c/generic/private/libc.h
rd88218b r0b37882 36 36 #define LIBC_PRIVATE_LIBC_H_ 37 37 38 extern void __entry(void); 39 extern void __main(void *) __attribute__((noreturn)); 38 40 extern int main(int, char *[]); 39 extern void __main(void *) __attribute__((noreturn));40 41 41 42 #endif -
uspace/lib/c/include/as.h
rd88218b r0b37882 41 41 #include <libarch/config.h> 42 42 43 static inline size_t SIZE2PAGES(size_t size) 44 { 45 if (size == 0) 46 return 0; 47 48 return (size_t) ((size - 1) >> PAGE_WIDTH) + 1; 49 } 50 51 static inline size_t PAGES2SIZE(size_t pages) 52 { 53 return (size_t) (pages << PAGE_WIDTH); 54 } 55 43 56 extern void *as_area_create(void *address, size_t size, int flags); 44 57 extern int as_area_resize(void *address, size_t size, int flags); -
uspace/lib/c/include/malloc.h
rd88218b r0b37882 38 38 #include <sys/types.h> 39 39 40 extern uintptr_t get_max_heap_addr(void);41 42 40 extern void *malloc(const size_t size) 43 41 __attribute__((malloc)); -
uspace/lib/c/include/unistd.h
rd88218b r0b37882 44 44 #endif 45 45 46 #define getpagesize() (PAGE_SIZE)47 48 46 #ifndef SEEK_SET 49 47 #define SEEK_SET 0 … … 57 55 #define SEEK_END 2 58 56 #endif 57 58 #define getpagesize() (PAGE_SIZE) 59 59 60 60 extern int dup2(int oldfd, int newfd); -
uspace/srv/loader/arch/abs32le/_link.ld.in
rd88218b r0b37882 3 3 * is the base address and the special interp section. 4 4 */ 5 5 6 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 6 7 ENTRY(__entry) … … 54 55 } :data 55 56 56 . = ALIGN(0x1000);57 58 _heap = .;59 60 57 /DISCARD/ : { 61 58 *(*); -
uspace/srv/loader/arch/amd64/_link.ld.in
rd88218b r0b37882 54 54 } :data 55 55 56 . = ALIGN(0x1000);57 _heap = .;58 59 56 #ifdef CONFIG_LINE_DEBUG 60 57 .comment 0 : { *(.comment); } :debug -
uspace/srv/loader/arch/arm32/_link.ld.in
rd88218b r0b37882 3 3 * is the base address. 4 4 */ 5 5 6 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 6 7 ENTRY(__entry) … … 16 17 *(.interp); 17 18 } : interp 18 19 19 20 . = 0x70001000; 20 21 21 22 .init ALIGN(0x1000): SUBALIGN(0x1000) { 22 23 *(.init); 23 } : text 24 } :text 25 24 26 .text : { 25 27 *(.text); 26 28 *(.rodata*); 27 29 } :text 28 30 … … 32 34 *(.sdata); 33 35 } :data 36 34 37 .tdata : { 35 38 _tdata_start = .; … … 37 40 _tdata_end = .; 38 41 } :data 42 39 43 .tbss : { 40 44 _tbss_start = .; … … 42 46 _tbss_end = .; 43 47 } :data 48 44 49 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 50 45 51 .bss : { 46 52 *(.sbss); 47 53 *(.scommon); 48 49 54 *(COMMON); 55 *(.bss); 50 56 } :data 51 52 . = ALIGN(0x1000);53 _heap = .;54 57 55 58 /DISCARD/ : { 56 59 *(*); 57 60 } 58 59 61 } -
uspace/srv/loader/arch/ia32/_link.ld.in
rd88218b r0b37882 54 54 } :data 55 55 56 . = ALIGN(0x1000);57 _heap = .;58 59 56 #ifdef CONFIG_LINE_DEBUG 60 57 .comment 0 : { *(.comment); } :debug -
uspace/srv/loader/arch/ia64/_link.ld.in
rd88218b r0b37882 12 12 *(.interp); 13 13 } :interp 14 14 15 15 /* On Itanium code sections must be aligned to 16 bytes. */ 16 16 . = ALIGN(0x800000000 + SIZEOF_HEADERS, 16); 17 17 18 18 .init : { 19 19 *(.init); 20 } : text 20 } :text 21 21 22 .text : { 22 23 *(.text); 23 24 *(.rodata*); 24 25 } :text 25 26 26 27 . = . + 0x4000; 27 28 28 29 .got : { 29 30 _gp = .; 30 31 *(.got*); 31 } :data 32 } :data 33 32 34 .data : { 33 35 *(.opd); … … 35 37 *(.sdata); 36 38 } :data 39 37 40 .tdata : { 38 41 _tdata_start = .; … … 40 43 _tdata_end = .; 41 44 } :data 45 42 46 .tbss : { 43 47 _tbss_start = .; … … 45 49 _tbss_end = .; 46 50 } :data 51 47 52 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 53 48 54 .bss : { 49 55 *(.sbss); … … 52 58 *(.bss); 53 59 } :data 54 55 . = ALIGN(0x4000); 56 _heap = .; 57 60 58 61 /DISCARD/ : { 59 62 *(*); 60 63 } 61 64 } -
uspace/srv/loader/arch/mips32/_link.ld.in
rd88218b r0b37882 3 3 * is the base address. 4 4 */ 5 5 6 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 6 7 ENTRY(__entry) … … 16 17 *(.interp); 17 18 } :interp 18 19 19 20 . = 0x70004000; 20 21 … … 22 23 *(.init); 23 24 } :text 25 24 26 .text : { 25 27 *(.text); 26 28 *(.rodata*); 27 29 } :text 28 30 31 . = . + 0x4000; 32 29 33 .data : { 30 34 *(.data); 31 35 *(.data.rel*); 32 36 } :data 33 37 34 38 .got : { 35 39 _gp = .; 36 40 *(.got); 37 41 } :data 38 42 39 43 .tdata : { 40 44 _tdata_start = .; … … 42 46 _tdata_end = .; 43 47 } :data 48 44 49 .tbss : { 45 50 _tbss_start = .; … … 47 52 _tbss_end = .; 48 53 } :data 54 49 55 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 50 56 51 57 .sbss : { 52 58 *(.scommon); 53 59 *(.sbss); 54 } 60 } 61 55 62 .bss : { 56 63 *(.bss); 57 64 *(COMMON); 58 65 } :data 59 60 . = ALIGN(0x4000); 61 _heap = .; 62 66 63 67 /DISCARD/ : { 64 68 *(*); -
uspace/srv/loader/arch/ppc32/_link.ld.in
rd88218b r0b37882 3 3 * is the base address. 4 4 */ 5 5 6 STARTUP(LIBC_PREFIX/arch/UARCH/src/entry.o) 6 7 ENTRY(__entry) … … 16 17 *(.interp); 17 18 } :interp 18 19 19 20 . = 0x70001000; 20 21 21 22 .init ALIGN(0x1000) : SUBALIGN(0x1000) { 22 23 *(.init); 23 24 } :text 25 24 26 .text : { 25 27 *(.text); … … 31 33 *(.sdata); 32 34 } :data 35 33 36 .tdata : { 34 37 _tdata_start = .; … … 36 39 _tdata_end = .; 37 40 } :data 41 38 42 .tbss : { 39 43 _tbss_start = .; … … 41 45 _tbss_end = .; 42 46 } :data 47 43 48 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 49 44 50 .bss : { 45 51 *(.sbss); … … 47 53 *(.bss); 48 54 } :data 49 50 . = ALIGN(0x1000);51 _heap = .;52 55 53 56 /DISCARD/ : { 54 57 *(*); 55 58 } 56 57 59 } -
uspace/srv/loader/arch/sparc64/_link.ld.in
rd88218b r0b37882 12 12 *(.interp); 13 13 } :interp 14 14 15 15 . = 0x70004000 + SIZEOF_HEADERS; 16 16 17 17 .init : { 18 18 *(.init); 19 19 } :text 20 20 21 .text : { 21 22 *(.text); 22 23 *(.rodata*); 23 24 } :text 24 25 25 26 . = . + 0x4000; 26 27 27 28 .got : { 28 29 _gp = .; 29 30 *(.got*); 30 31 } :data 32 31 33 .data : { 32 34 *(.data); 33 35 *(.sdata); 34 36 } :data 37 35 38 .tdata : { 36 39 _tdata_start = .; … … 38 41 _tdata_end = .; 39 42 } :data 43 40 44 .tbss : { 41 45 _tbss_start = .; … … 43 47 _tbss_end = .; 44 48 } :data 49 45 50 _tls_alignment = MAX(ALIGNOF(.tdata), ALIGNOF(.tbss)); 51 46 52 .bss : { 47 53 *(.sbss); … … 49 55 *(.bss); 50 56 } :data 51 52 . = ALIGN(0x4000);53 _heap = .;54 57 55 58 /DISCARD/ : { 56 59 *(*); 57 60 } 58 59 61 }
Note:
See TracChangeset
for help on using the changeset viewer.