Changes in / [0b37882:ae6f303] in mainline
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/mm/as.h
r0b37882 rae6f303 313 313 extern sysarg_t sys_as_area_change_flags(uintptr_t, unsigned int); 314 314 extern sysarg_t sys_as_area_destroy(uintptr_t); 315 extern sysarg_t sys_as_get_unmapped_area(uintptr_t, size_t);316 315 317 316 /* Introspection functions. */ -
kernel/generic/include/syscall/syscall.h
r0b37882 rae6f303 59 59 SYS_AS_AREA_CHANGE_FLAGS, 60 60 SYS_AS_AREA_DESTROY, 61 SYS_AS_GET_UNMAPPED_AREA,62 61 63 62 SYS_IPC_CALL_SYNC_FAST, -
kernel/generic/src/mm/as.c
r0b37882 rae6f303 71 71 #include <memstr.h> 72 72 #include <macros.h> 73 #include <bitops.h>74 73 #include <arch.h> 75 74 #include <errno.h> … … 289 288 /** Check area conflicts with other areas. 290 289 * 291 * @param as Address space.292 * @param addrStarting virtual address of the area being tested.293 * @param count Number of pages inthe area being tested.294 * @param avoid Do not touch this area.290 * @param as Address space. 291 * @param va Starting virtual address of the area being tested. 292 * @param size Size of the area being tested. 293 * @param avoid_area Do not touch this area. 295 294 * 296 295 * @return True if there is no conflict, false otherwise. 297 296 * 298 297 */ 299 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 300 size_t count, as_area_t *avoid) 301 { 302 ASSERT((addr % PAGE_SIZE) == 0); 298 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 299 as_area_t *avoid_area) 300 { 303 301 ASSERT(mutex_locked(&as->lock)); 304 302 … … 306 304 * We don't want any area to have conflicts with NULL page. 307 305 */ 308 if (overlaps( addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE))306 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) 309 307 return false; 310 308 … … 318 316 btree_node_t *leaf; 319 317 as_area_t *area = 320 (as_area_t *) btree_search(&as->as_area_btree, addr, &leaf);318 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 321 319 if (area) { 322 if (area != avoid )320 if (area != avoid_area) 323 321 return false; 324 322 } … … 330 328 area = (as_area_t *) node->value[node->keys - 1]; 331 329 332 if (area != avoid) { 333 mutex_lock(&area->lock); 334 335 if (overlaps(addr, count << PAGE_WIDTH, 336 area->base, area->pages << PAGE_WIDTH)) { 337 mutex_unlock(&area->lock); 338 return false; 339 } 340 330 mutex_lock(&area->lock); 331 332 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 341 333 mutex_unlock(&area->lock); 342 } 334 return false; 335 } 336 337 mutex_unlock(&area->lock); 343 338 } 344 339 … … 347 342 area = (as_area_t *) node->value[0]; 348 343 349 if (area != avoid) { 350 mutex_lock(&area->lock); 351 352 if (overlaps(addr, count << PAGE_WIDTH, 353 area->base, area->pages << PAGE_WIDTH)) { 354 mutex_unlock(&area->lock); 355 return false; 356 } 357 344 mutex_lock(&area->lock); 345 346 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 358 347 mutex_unlock(&area->lock); 359 } 348 return false; 349 } 350 351 mutex_unlock(&area->lock); 360 352 } 361 353 … … 365 357 area = (as_area_t *) leaf->value[i]; 366 358 367 if (area == avoid )359 if (area == avoid_area) 368 360 continue; 369 361 370 362 mutex_lock(&area->lock); 371 363 372 if (overlaps(addr, count << PAGE_WIDTH, 373 area->base, area->pages << PAGE_WIDTH)) { 364 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 374 365 mutex_unlock(&area->lock); 375 366 return false; … … 384 375 */ 385 376 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 386 return !overlaps( addr, count << PAGE_WIDTH,377 return !overlaps(va, size, 387 378 KERNEL_ADDRESS_SPACE_START, 388 379 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); … … 411 402 mem_backend_data_t *backend_data) 412 403 { 413 if ( (base % PAGE_SIZE) != 0)404 if (base % PAGE_SIZE) 414 405 return NULL; 415 406 416 if ( size == 0)407 if (!size) 417 408 return NULL; 418 419 size_t pages = SIZE2FRAMES(size);420 409 421 410 /* Writeable executable areas are not supported. */ … … 425 414 mutex_lock(&as->lock); 426 415 427 if (!check_area_conflicts(as, base, pages, NULL)) {416 if (!check_area_conflicts(as, base, size, NULL)) { 428 417 mutex_unlock(&as->lock); 429 418 return NULL; … … 437 426 area->flags = flags; 438 427 area->attributes = attrs; 439 area->pages = pages;428 area->pages = SIZE2FRAMES(size); 440 429 area->resident = 0; 441 430 area->base = base; … … 491 480 mutex_lock(&area->lock); 492 481 493 if ((area->base <= va) && 494 (va < area->base + (area->pages << PAGE_WIDTH))) 482 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 495 483 return area; 496 484 … … 508 496 mutex_lock(&area->lock); 509 497 510 if (va < area->base + (area->pages << PAGE_WIDTH))498 if (va < area->base + area->pages * PAGE_SIZE) 511 499 return area; 512 500 … … 573 561 574 562 if (pages < area->pages) { 575 uintptr_t start_free = area->base + (pages << PAGE_WIDTH);563 uintptr_t start_free = area->base + pages * PAGE_SIZE; 576 564 577 565 /* … … 586 574 */ 587 575 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 588 area->base + (pages << PAGE_WIDTH), area->pages - pages);576 area->base + pages * PAGE_SIZE, area->pages - pages); 589 577 590 578 /* … … 609 597 size_t i = 0; 610 598 611 if (overlaps(ptr, size << PAGE_WIDTH, area->base,612 pages << PAGE_WIDTH)) {599 if (overlaps(ptr, size * PAGE_SIZE, area->base, 600 pages * PAGE_SIZE)) { 613 601 614 if (ptr + (size << PAGE_WIDTH)<= start_free) {602 if (ptr + size * PAGE_SIZE <= start_free) { 615 603 /* 616 604 * The whole interval fits … … 644 632 for (; i < size; i++) { 645 633 pte_t *pte = page_mapping_find(as, ptr + 646 (i << PAGE_WIDTH));634 i * PAGE_SIZE); 647 635 648 636 ASSERT(pte); … … 653 641 (area->backend->frame_free)) { 654 642 area->backend->frame_free(area, 655 ptr + (i << PAGE_WIDTH),643 ptr + i * PAGE_SIZE, 656 644 PTE_GET_FRAME(pte)); 657 645 } 658 646 659 647 page_mapping_remove(as, ptr + 660 (i << PAGE_WIDTH));648 i * PAGE_SIZE); 661 649 } 662 650 } … … 667 655 */ 668 656 669 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH),657 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 670 658 area->pages - pages); 671 659 … … 674 662 */ 675 663 as_invalidate_translation_cache(as, area->base + 676 (pages << PAGE_WIDTH), area->pages - pages);664 pages * PAGE_SIZE, area->pages - pages); 677 665 tlb_shootdown_finalize(ipl); 678 666 … … 683 671 * Check for overlaps with other address space areas. 684 672 */ 685 if (!check_area_conflicts(as, address, pages, area)) { 673 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 674 area)) { 686 675 mutex_unlock(&area->lock); 687 676 mutex_unlock(&as->lock); … … 782 771 783 772 for (size = 0; size < (size_t) node->value[i]; size++) { 784 pte_t *pte = 785 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 773 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 786 774 787 775 ASSERT(pte); … … 792 780 (area->backend->frame_free)) { 793 781 area->backend->frame_free(area, 794 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte));782 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 795 783 } 796 784 797 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));785 page_mapping_remove(as, ptr + size * PAGE_SIZE); 798 786 } 799 787 } … … 882 870 } 883 871 884 size_t src_size = src_area->pages << PAGE_WIDTH;872 size_t src_size = src_area->pages * PAGE_SIZE; 885 873 unsigned int src_flags = src_area->flags; 886 874 mem_backend_t *src_backend = src_area->backend; … … 1088 1076 1089 1077 for (size = 0; size < (size_t) node->value[i]; size++) { 1090 pte_t *pte = 1091 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1078 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 1092 1079 1093 1080 ASSERT(pte); … … 1098 1085 1099 1086 /* Remove old mapping */ 1100 page_mapping_remove(as, ptr + (size << PAGE_WIDTH));1087 page_mapping_remove(as, ptr + size * PAGE_SIZE); 1101 1088 } 1102 1089 } … … 1144 1131 1145 1132 /* Insert the new mapping */ 1146 page_mapping_insert(as, ptr + (size << PAGE_WIDTH),1133 page_mapping_insert(as, ptr + size * PAGE_SIZE, 1147 1134 old_frame[frame_idx++], page_flags); 1148 1135 … … 1466 1453 1467 1454 if (src_area) { 1468 size = src_area->pages << PAGE_WIDTH;1455 size = src_area->pages * PAGE_SIZE; 1469 1456 mutex_unlock(&src_area->lock); 1470 1457 } else … … 1521 1508 if (page >= right_pg) { 1522 1509 /* Do nothing. */ 1523 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1524 left_cnt << PAGE_WIDTH)) {1510 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1511 left_cnt * PAGE_SIZE)) { 1525 1512 /* The interval intersects with the left interval. */ 1526 1513 return false; 1527 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1528 right_cnt << PAGE_WIDTH)) {1514 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1515 right_cnt * PAGE_SIZE)) { 1529 1516 /* The interval intersects with the right interval. */ 1530 1517 return false; 1531 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1532 (page + (count << PAGE_WIDTH)== right_pg)) {1518 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1519 (page + count * PAGE_SIZE == right_pg)) { 1533 1520 /* 1534 1521 * The interval can be added by merging the two already … … 1538 1525 btree_remove(&area->used_space, right_pg, leaf); 1539 1526 goto success; 1540 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1527 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1541 1528 /* 1542 1529 * The interval can be added by simply growing the left … … 1545 1532 node->value[node->keys - 1] += count; 1546 1533 goto success; 1547 } else if (page + (count << PAGE_WIDTH)== right_pg) {1534 } else if (page + count * PAGE_SIZE == right_pg) { 1548 1535 /* 1549 1536 * The interval can be addded by simply moving base of … … 1572 1559 */ 1573 1560 1574 if (overlaps(page, count << PAGE_WIDTH, right_pg,1575 right_cnt << PAGE_WIDTH)) {1561 if (overlaps(page, count * PAGE_SIZE, right_pg, 1562 right_cnt * PAGE_SIZE)) { 1576 1563 /* The interval intersects with the right interval. */ 1577 1564 return false; 1578 } else if (page + (count << PAGE_WIDTH)== right_pg) {1565 } else if (page + count * PAGE_SIZE == right_pg) { 1579 1566 /* 1580 1567 * The interval can be added by moving the base of the … … 1611 1598 if (page < left_pg) { 1612 1599 /* Do nothing. */ 1613 } else if (overlaps(page, count << PAGE_WIDTH, left_pg,1614 left_cnt << PAGE_WIDTH)) {1600 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1601 left_cnt * PAGE_SIZE)) { 1615 1602 /* The interval intersects with the left interval. */ 1616 1603 return false; 1617 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1618 right_cnt << PAGE_WIDTH)) {1604 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1605 right_cnt * PAGE_SIZE)) { 1619 1606 /* The interval intersects with the right interval. */ 1620 1607 return false; 1621 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1622 (page + (count << PAGE_WIDTH)== right_pg)) {1608 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1609 (page + count * PAGE_SIZE == right_pg)) { 1623 1610 /* 1624 1611 * The interval can be added by merging the two already … … 1628 1615 btree_remove(&area->used_space, right_pg, node); 1629 1616 goto success; 1630 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1617 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1631 1618 /* 1632 1619 * The interval can be added by simply growing the left … … 1635 1622 leaf->value[leaf->keys - 1] += count; 1636 1623 goto success; 1637 } else if (page + (count << PAGE_WIDTH)== right_pg) {1624 } else if (page + count * PAGE_SIZE == right_pg) { 1638 1625 /* 1639 1626 * The interval can be addded by simply moving base of … … 1662 1649 */ 1663 1650 1664 if (overlaps(page, count << PAGE_WIDTH, left_pg,1665 left_cnt << PAGE_WIDTH)) {1651 if (overlaps(page, count * PAGE_SIZE, left_pg, 1652 left_cnt * PAGE_SIZE)) { 1666 1653 /* The interval intersects with the left interval. */ 1667 1654 return false; 1668 } else if (left_pg + (left_cnt << PAGE_WIDTH)== page) {1655 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1669 1656 /* 1670 1657 * The interval can be added by growing the left … … 1701 1688 */ 1702 1689 1703 if (overlaps(page, count << PAGE_WIDTH, left_pg,1704 left_cnt << PAGE_WIDTH)) {1690 if (overlaps(page, count * PAGE_SIZE, left_pg, 1691 left_cnt * PAGE_SIZE)) { 1705 1692 /* 1706 1693 * The interval intersects with the left … … 1708 1695 */ 1709 1696 return false; 1710 } else if (overlaps(page, count << PAGE_WIDTH, right_pg,1711 right_cnt << PAGE_WIDTH)) {1697 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1698 right_cnt * PAGE_SIZE)) { 1712 1699 /* 1713 1700 * The interval intersects with the right … … 1715 1702 */ 1716 1703 return false; 1717 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) &&1718 (page + (count << PAGE_WIDTH)== right_pg)) {1704 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1705 (page + count * PAGE_SIZE == right_pg)) { 1719 1706 /* 1720 1707 * The interval can be added by merging the two … … 1724 1711 btree_remove(&area->used_space, right_pg, leaf); 1725 1712 goto success; 1726 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) {1713 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1727 1714 /* 1728 1715 * The interval can be added by simply growing … … 1731 1718 leaf->value[i - 1] += count; 1732 1719 goto success; 1733 } else if (page + (count << PAGE_WIDTH)== right_pg) {1720 } else if (page + count * PAGE_SIZE == right_pg) { 1734 1721 /* 1735 1722 * The interval can be addded by simply moving … … 1797 1784 for (i = 0; i < leaf->keys; i++) { 1798 1785 if (leaf->key[i] == page) { 1799 leaf->key[i] += count << PAGE_WIDTH;1786 leaf->key[i] += count * PAGE_SIZE; 1800 1787 leaf->value[i] -= count; 1801 1788 goto success; … … 1812 1799 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1813 1800 1814 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1815 count << PAGE_WIDTH)) {1816 if (page + (count << PAGE_WIDTH)==1817 left_pg + (left_cnt << PAGE_WIDTH)) {1801 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1802 count * PAGE_SIZE)) { 1803 if (page + count * PAGE_SIZE == 1804 left_pg + left_cnt * PAGE_SIZE) { 1818 1805 /* 1819 1806 * The interval is contained in the rightmost … … 1824 1811 node->value[node->keys - 1] -= count; 1825 1812 goto success; 1826 } else if (page + (count << PAGE_WIDTH)<1827 left_pg + (left_cnt << PAGE_WIDTH)) {1813 } else if (page + count * PAGE_SIZE < 1814 left_pg + left_cnt*PAGE_SIZE) { 1828 1815 /* 1829 1816 * The interval is contained in the rightmost … … 1833 1820 * new interval. 1834 1821 */ 1835 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1836 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1822 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1823 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1837 1824 node->value[node->keys - 1] -= count + new_cnt; 1838 1825 btree_insert(&area->used_space, page + 1839 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1826 count * PAGE_SIZE, (void *) new_cnt, leaf); 1840 1827 goto success; 1841 1828 } … … 1850 1837 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1851 1838 1852 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1853 count << PAGE_WIDTH)) {1854 if (page + (count << PAGE_WIDTH)==1855 left_pg + (left_cnt << PAGE_WIDTH)) {1839 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1840 count * PAGE_SIZE)) { 1841 if (page + count * PAGE_SIZE == 1842 left_pg + left_cnt * PAGE_SIZE) { 1856 1843 /* 1857 1844 * The interval is contained in the rightmost … … 1861 1848 leaf->value[leaf->keys - 1] -= count; 1862 1849 goto success; 1863 } else if (page + (count << PAGE_WIDTH)< left_pg +1864 (left_cnt << PAGE_WIDTH)) {1850 } else if (page + count * PAGE_SIZE < left_pg + 1851 left_cnt * PAGE_SIZE) { 1865 1852 /* 1866 1853 * The interval is contained in the rightmost … … 1870 1857 * interval. 1871 1858 */ 1872 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) -1873 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH;1859 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1860 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1874 1861 leaf->value[leaf->keys - 1] -= count + new_cnt; 1875 1862 btree_insert(&area->used_space, page + 1876 (count << PAGE_WIDTH), (void *) new_cnt, leaf);1863 count * PAGE_SIZE, (void *) new_cnt, leaf); 1877 1864 goto success; 1878 1865 } … … 1896 1883 * to (i - 1) and i. 1897 1884 */ 1898 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page,1899 count << PAGE_WIDTH)) {1900 if (page + (count << PAGE_WIDTH)==1901 left_pg + (left_cnt << PAGE_WIDTH)) {1885 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1886 count * PAGE_SIZE)) { 1887 if (page + count * PAGE_SIZE == 1888 left_pg + left_cnt*PAGE_SIZE) { 1902 1889 /* 1903 1890 * The interval is contained in the … … 1908 1895 leaf->value[i - 1] -= count; 1909 1896 goto success; 1910 } else if (page + (count << PAGE_WIDTH)<1911 left_pg + (left_cnt << PAGE_WIDTH)) {1897 } else if (page + count * PAGE_SIZE < 1898 left_pg + left_cnt * PAGE_SIZE) { 1912 1899 /* 1913 1900 * The interval is contained in the … … 1918 1905 */ 1919 1906 size_t new_cnt = ((left_pg + 1920 (left_cnt << PAGE_WIDTH)) -1921 (page + (count << PAGE_WIDTH))) >>1907 left_cnt * PAGE_SIZE) - 1908 (page + count * PAGE_SIZE)) >> 1922 1909 PAGE_WIDTH; 1923 1910 leaf->value[i - 1] -= count + new_cnt; 1924 1911 btree_insert(&area->used_space, page + 1925 (count << PAGE_WIDTH), (void *) new_cnt,1912 count * PAGE_SIZE, (void *) new_cnt, 1926 1913 leaf); 1927 1914 goto success; … … 1972 1959 { 1973 1960 return (sysarg_t) as_area_destroy(AS, address); 1974 }1975 1976 /** Return pointer to unmapped address space area1977 *1978 * @param base Lowest address bound.1979 * @param size Requested size of the allocation.1980 *1981 * @return Pointer to the beginning of unmapped address space area.1982 *1983 */1984 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1985 {1986 if (size == 0)1987 return 0;1988 1989 /*1990 * Make sure we allocate from page-aligned1991 * address. Check for possible overflow in1992 * each step.1993 */1994 1995 size_t pages = SIZE2FRAMES(size);1996 uintptr_t ret = 0;1997 1998 /*1999 * Find the lowest unmapped address aligned on the sz2000 * boundary, not smaller than base and of the required size.2001 */2002 2003 mutex_lock(&AS->lock);2004 2005 /* First check the base address itself */2006 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2007 if ((addr >= base) &&2008 (check_area_conflicts(AS, addr, pages, NULL)))2009 ret = addr;2010 2011 /* Eventually check the addresses behind each area */2012 link_t *cur;2013 for (cur = AS->as_area_btree.leaf_head.next;2014 (ret == 0) && (cur != &AS->as_area_btree.leaf_head);2015 cur = cur->next) {2016 btree_node_t *node =2017 list_get_instance(cur, btree_node_t, leaf_link);2018 2019 btree_key_t i;2020 for (i = 0; (ret == 0) && (i < node->keys); i++) {2021 as_area_t *area = (as_area_t *) node->value[i];2022 2023 mutex_lock(&area->lock);2024 2025 uintptr_t addr =2026 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH),2027 PAGE_SIZE);2028 2029 if ((addr >= base) && (addr >= area->base) &&2030 (check_area_conflicts(AS, addr, pages, area)))2031 ret = addr;2032 2033 mutex_unlock(&area->lock);2034 }2035 }2036 2037 mutex_unlock(&AS->lock);2038 2039 return (sysarg_t) ret;2040 1961 } 2041 1962 … … 2106 2027 mutex_lock(&as->lock); 2107 2028 2108 /* Print out info about address space areas */2029 /* print out info about address space areas */ 2109 2030 link_t *cur; 2110 2031 for (cur = as->as_area_btree.leaf_head.next; -
kernel/generic/src/syscall/syscall.c
r0b37882 rae6f303 143 143 (syshandler_t) sys_as_area_change_flags, 144 144 (syshandler_t) sys_as_area_destroy, 145 (syshandler_t) sys_as_get_unmapped_area,146 145 147 146 /* IPC related syscalls. */ -
uspace/lib/c/generic/as.c
r0b37882 rae6f303 40 40 #include <bitops.h> 41 41 #include <malloc.h> 42 #include "private/libc.h" 42 43 /** Last position allocated by as_get_mappable_page */ 44 static uintptr_t last_allocated = 0; 43 45 44 46 /** Create address space area. … … 101 103 } 102 104 103 /** Return pointer to unmapped address spacearea105 /** Return pointer to some unmapped area, where fits new as_area 104 106 * 105 107 * @param size Requested size of the allocation. 106 108 * 107 * @return Pointer to the beginning of unmapped address space area.109 * @return pointer to the beginning 108 110 * 109 111 */ 110 112 void *as_get_mappable_page(size_t size) 111 113 { 112 return (void *) __SYSCALL2(SYS_AS_GET_UNMAPPED_AREA, 113 (sysarg_t) __entry, (sysarg_t) size); 114 if (size == 0) 115 return NULL; 116 117 size_t sz = 1 << (fnzb(size - 1) + 1); 118 if (last_allocated == 0) 119 last_allocated = get_max_heap_addr(); 120 121 /* 122 * Make sure we allocate from naturally aligned address. 123 */ 124 uintptr_t res = ALIGN_UP(last_allocated, sz); 125 last_allocated = res + ALIGN_UP(size, PAGE_SIZE); 126 127 return ((void *) res); 114 128 } 115 129 -
uspace/lib/c/generic/malloc.c
r0b37882 rae6f303 47 47 #include "private/malloc.h" 48 48 49 /** Magic used in heap headers. */ 50 #define HEAP_BLOCK_HEAD_MAGIC UINT32_C(0xBEEF0101) 51 52 /** Magic used in heap footers. */ 53 #define HEAP_BLOCK_FOOT_MAGIC UINT32_C(0xBEEF0202) 54 55 /** Magic used in heap descriptor. */ 56 #define HEAP_AREA_MAGIC UINT32_C(0xBEEFCAFE) 57 58 /** Allocation alignment. 59 * 60 * This also covers the alignment of fields 61 * in the heap header and footer. 62 * 63 */ 49 /* Magic used in heap headers. */ 50 #define HEAP_BLOCK_HEAD_MAGIC 0xBEEF0101 51 52 /* Magic used in heap footers. */ 53 #define HEAP_BLOCK_FOOT_MAGIC 0xBEEF0202 54 55 /** Allocation alignment (this also covers the alignment of fields 56 in the heap header and footer) */ 64 57 #define BASE_ALIGN 16 65 58 66 /** Overhead of each heap block. */ 67 #define STRUCT_OVERHEAD \ 68 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 69 70 /** Calculate real size of a heap block. 71 * 72 * Add header and footer size. 73 * 59 /** 60 * Either 4 * 256M on 32-bit architecures or 16 * 256M on 64-bit architectures 61 */ 62 #define MAX_HEAP_SIZE (sizeof(uintptr_t) << 28) 63 64 /** 65 * 66 */ 67 #define STRUCT_OVERHEAD (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 68 69 /** 70 * Calculate real size of a heap block (with header and footer) 74 71 */ 75 72 #define GROSS_SIZE(size) ((size) + STRUCT_OVERHEAD) 76 73 77 /** Calculate net size of a heap block. 78 * 79 * Subtract header and footer size. 80 * 74 /** 75 * Calculate net size of a heap block (without header and footer) 81 76 */ 82 77 #define NET_SIZE(size) ((size) - STRUCT_OVERHEAD) 83 84 /** Get first block in heap area.85 *86 */87 #define AREA_FIRST_BLOCK(area) \88 (ALIGN_UP(((uintptr_t) (area)) + sizeof(heap_area_t), BASE_ALIGN))89 90 /** Get footer in heap block.91 *92 */93 #define BLOCK_FOOT(head) \94 ((heap_block_foot_t *) \95 (((uintptr_t) head) + head->size - sizeof(heap_block_foot_t)))96 97 /** Heap area.98 *99 * The memory managed by the heap allocator is divided into100 * multiple discontinuous heaps. Each heap is represented101 * by a separate address space area which has this structure102 * at its very beginning.103 *104 */105 typedef struct heap_area {106 /** Start of the heap area (including this structure)107 *108 * Aligned on page boundary.109 *110 */111 void *start;112 113 /** End of the heap area (aligned on page boundary) */114 void *end;115 116 /** Next heap area */117 struct heap_area *next;118 119 /** A magic value */120 uint32_t magic;121 } heap_area_t;122 78 123 79 /** Header of a heap block … … 131 87 bool free; 132 88 133 /** Heap area this block belongs to */134 heap_area_t *area;135 136 89 /* A magic value to detect overwrite of heap header */ 137 90 uint32_t magic; … … 149 102 } heap_block_foot_t; 150 103 151 /** First heap area */ 152 static heap_area_t *first_heap_area = NULL; 153 154 /** Last heap area */ 155 static heap_area_t *last_heap_area = NULL; 156 157 /** Next heap block to examine (next fit algorithm) */ 158 static heap_block_head_t *next = NULL; 104 /** Linker heap symbol */ 105 extern char _heap; 159 106 160 107 /** Futex for thread-safe heap manipulation */ 161 108 static futex_t malloc_futex = FUTEX_INITIALIZER; 162 109 110 /** Address of heap start */ 111 static void *heap_start = 0; 112 113 /** Address of heap end */ 114 static void *heap_end = 0; 115 116 /** Maximum heap size */ 117 static size_t max_heap_size = (size_t) -1; 118 119 /** Current number of pages of heap area */ 120 static size_t heap_pages = 0; 121 163 122 /** Initialize a heap block 164 123 * 165 * Fill in the structures related to a heap block.124 * Fills in the structures related to a heap block. 166 125 * Should be called only inside the critical section. 167 126 * … … 169 128 * @param size Size of the block including the header and the footer. 170 129 * @param free Indication of a free block. 171 * @param area Heap area the block belongs to. 172 * 173 */ 174 static void block_init(void *addr, size_t size, bool free, heap_area_t *area) 130 * 131 */ 132 static void block_init(void *addr, size_t size, bool free) 175 133 { 176 134 /* Calculate the position of the header and the footer */ 177 135 heap_block_head_t *head = (heap_block_head_t *) addr; 136 heap_block_foot_t *foot = 137 (heap_block_foot_t *) (addr + size - sizeof(heap_block_foot_t)); 178 138 179 139 head->size = size; 180 140 head->free = free; 181 head->area = area;182 141 head->magic = HEAP_BLOCK_HEAD_MAGIC; 183 184 heap_block_foot_t *foot = BLOCK_FOOT(head);185 142 186 143 foot->size = size; … … 203 160 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC); 204 161 205 heap_block_foot_t *foot = BLOCK_FOOT(head); 162 heap_block_foot_t *foot = 163 (heap_block_foot_t *) (addr + head->size - sizeof(heap_block_foot_t)); 206 164 207 165 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC); … … 209 167 } 210 168 211 /** Check a heap area structure 212 * 213 * @param addr Address of the heap area. 214 * 215 */ 216 static void area_check(void *addr) 217 { 218 heap_area_t *area = (heap_area_t *) addr; 219 220 assert(area->magic == HEAP_AREA_MAGIC); 221 assert(area->start < area->end); 222 assert(((uintptr_t) area->start % PAGE_SIZE) == 0); 223 assert(((uintptr_t) area->end % PAGE_SIZE) == 0); 224 } 225 226 /** Create new heap area 227 * 228 * @param start Preffered starting address of the new area. 229 * @param size Size of the area. 230 * 231 */ 232 static bool area_create(size_t size) 233 { 234 void *start = as_get_mappable_page(size); 235 if (start == NULL) 169 /** Increase the heap area size 170 * 171 * Should be called only inside the critical section. 172 * 173 * @param size Number of bytes to grow the heap by. 174 * 175 */ 176 static bool grow_heap(size_t size) 177 { 178 if (size == 0) 236 179 return false; 237 238 /* Align the heap area on page boundary */ 239 void *astart = (void *) ALIGN_UP((uintptr_t) start, PAGE_SIZE); 240 size_t asize = ALIGN_UP(size, PAGE_SIZE); 241 242 astart = as_area_create(astart, asize, AS_AREA_WRITE | AS_AREA_READ); 243 if (astart == (void *) -1) 180 181 if ((heap_start + size < heap_start) || (heap_end + size < heap_end)) 244 182 return false; 245 183 246 heap_area_t *area = (heap_area_t *) astart; 247 248 area->start = astart; 249 area->end = (void *) 250 ALIGN_DOWN((uintptr_t) astart + asize, BASE_ALIGN); 251 area->next = NULL; 252 area->magic = HEAP_AREA_MAGIC; 253 254 void *block = (void *) AREA_FIRST_BLOCK(area); 255 size_t bsize = (size_t) (area->end - block); 256 257 block_init(block, bsize, true, area); 258 259 if (last_heap_area == NULL) { 260 first_heap_area = area; 261 last_heap_area = area; 262 } else { 263 last_heap_area->next = area; 264 last_heap_area = area; 265 } 266 267 return true; 268 } 269 270 /** Try to enlarge a heap area 271 * 272 * @param area Heap area to grow. 273 * @param size Gross size of item to allocate (bytes). 274 * 275 */ 276 static bool area_grow(heap_area_t *area, size_t size) 277 { 278 if (size == 0) 184 size_t heap_size = (size_t) (heap_end - heap_start); 185 186 if ((max_heap_size != (size_t) -1) && (heap_size + size > max_heap_size)) 187 return false; 188 189 size_t pages = (size - 1) / PAGE_SIZE + 1; 190 191 if (as_area_resize((void *) &_heap, (heap_pages + pages) * PAGE_SIZE, 0) 192 == EOK) { 193 void *end = (void *) ALIGN_DOWN(((uintptr_t) &_heap) + 194 (heap_pages + pages) * PAGE_SIZE, BASE_ALIGN); 195 block_init(heap_end, end - heap_end, true); 196 heap_pages += pages; 197 heap_end = end; 279 198 return true; 280 281 area_check(area); 282 283 size_t asize = ALIGN_UP((size_t) (area->end - area->start) + size, 284 PAGE_SIZE); 285 286 /* New heap area size */ 287 void *end = (void *) 288 ALIGN_DOWN((uintptr_t) area->start + asize, BASE_ALIGN); 289 290 /* Check for overflow */ 291 if (end < area->start) 292 return false; 293 294 /* Resize the address space area */ 295 int ret = as_area_resize(area->start, asize, 0); 296 if (ret != EOK) 297 return false; 298 299 /* Add new free block */ 300 block_init(area->end, (size_t) (end - area->end), true, area); 301 302 /* Update heap area parameters */ 303 area->end = end; 304 305 return true; 306 } 307 308 /** Try to enlarge any of the heap areas 309 * 310 * @param size Gross size of item to allocate (bytes). 311 * 312 */ 313 static bool heap_grow(size_t size) 314 { 315 if (size == 0) 316 return true; 317 318 /* First try to enlarge some existing area */ 319 heap_area_t *area; 320 for (area = first_heap_area; area != NULL; area = area->next) { 321 if (area_grow(area, size)) 322 return true; 323 } 324 325 /* Eventually try to create a new area */ 326 return area_create(AREA_FIRST_BLOCK(size)); 327 } 328 329 /** Try to shrink heap space 330 * 331 * In all cases the next pointer is reset. 332 * 333 */ 334 static void heap_shrink(void) 335 { 336 next = NULL; 199 } 200 201 return false; 202 } 203 204 /** Decrease the heap area 205 * 206 * Should be called only inside the critical section. 207 * 208 * @param size Number of bytes to shrink the heap by. 209 * 210 */ 211 static void shrink_heap(void) 212 { 213 // TODO 337 214 } 338 215 … … 346 223 void __malloc_init(void) 347 224 { 348 if (!area_create(PAGE_SIZE)) 225 if (!as_area_create((void *) &_heap, PAGE_SIZE, 226 AS_AREA_WRITE | AS_AREA_READ)) 349 227 abort(); 228 229 heap_pages = 1; 230 heap_start = (void *) ALIGN_UP((uintptr_t) &_heap, BASE_ALIGN); 231 heap_end = 232 (void *) ALIGN_DOWN(((uintptr_t) &_heap) + PAGE_SIZE, BASE_ALIGN); 233 234 /* Make the entire area one large block. */ 235 block_init(heap_start, heap_end - heap_start, true); 236 } 237 238 /** Get maximum heap address 239 * 240 */ 241 uintptr_t get_max_heap_addr(void) 242 { 243 futex_down(&malloc_futex); 244 245 if (max_heap_size == (size_t) -1) 246 max_heap_size = 247 max((size_t) (heap_end - heap_start), MAX_HEAP_SIZE); 248 249 uintptr_t max_heap_addr = (uintptr_t) heap_start + max_heap_size; 250 251 futex_up(&malloc_futex); 252 253 return max_heap_addr; 350 254 } 351 255 … … 369 273 /* Block big enough -> split. */ 370 274 void *next = ((void *) cur) + size; 371 block_init(next, cur->size - size, true , cur->area);372 block_init(cur, size, false , cur->area);275 block_init(next, cur->size - size, true); 276 block_init(cur, size, false); 373 277 } else { 374 278 /* Block too small -> use as is. */ … … 377 281 } 378 282 379 /** Allocate memory from heap area starting from givenblock283 /** Allocate a memory block 380 284 * 381 285 * Should be called only inside the critical section. 382 * As a side effect this function also sets the current383 * pointer on successful allocation.384 * 385 * @param area Heap area where to allocate from.386 * @ param first_block Starting heap block.387 * @param final_block Heap block where to finish the search388 * (may be NULL).389 * @param real_size Gross number of bytes to allocate. 390 * @param falign Physical alignment of the block. 391 * 392 * @return Address of the allocated block or NULL on not enough memory. 393 * 394 */ 395 static void *malloc_area(heap_area_t *area, heap_block_head_t *first_block, 396 heap_block_head_t *final_block, size_t real_size, size_t falign) 397 { 398 area_check((void *) area);399 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK(area));400 assert((void *) first_block < area->end); 401 402 heap_block_head_t *cur ;403 for (cur = first_block; (void *) cur < area->end;404 cur = (heap_block_head_t *) (((void *) cur) + cur->size)) {286 * 287 * @param size The size of the block to allocate. 288 * @param align Memory address alignment. 289 * 290 * @return the address of the block or NULL when not enough memory. 291 * 292 */ 293 static void *malloc_internal(const size_t size, const size_t align) 294 { 295 if (align == 0) 296 return NULL; 297 298 size_t falign = lcm(align, BASE_ALIGN); 299 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign)); 300 301 bool grown = false; 302 void *result; 303 304 loop: 305 result = NULL; 306 heap_block_head_t *cur = (heap_block_head_t *) heap_start; 307 308 while ((result == NULL) && ((void *) cur < heap_end)) { 405 309 block_check(cur); 406 407 /* Finish searching on the final block */408 if ((final_block != NULL) && (cur == final_block))409 break;410 310 411 311 /* Try to find a block that is free and large enough. */ 412 312 if ((cur->free) && (cur->size >= real_size)) { 413 /* 414 * We have found a suitable block. 415 * Check for alignment properties. 416 */ 417 void *addr = (void *) 418 ((uintptr_t) cur + sizeof(heap_block_head_t)); 419 void *aligned = (void *) 420 ALIGN_UP((uintptr_t) addr, falign); 313 /* We have found a suitable block. 314 Check for alignment properties. */ 315 void *addr = ((void *) cur) + sizeof(heap_block_head_t); 316 void *aligned = (void *) ALIGN_UP(addr, falign); 421 317 422 318 if (addr == aligned) { 423 319 /* Exact block start including alignment. */ 424 320 split_mark(cur, real_size); 425 426 next = cur; 427 return addr; 321 result = addr; 428 322 } else { 429 323 /* Block start has to be aligned */ … … 431 325 432 326 if (cur->size >= real_size + excess) { 433 /* 434 * The current block is large enough to fit 435 * data in (including alignment). 436 */ 437 if ((void *) cur > (void *) AREA_FIRST_BLOCK(area)) { 438 /* 439 * There is a block before the current block. 440 * This previous block can be enlarged to 441 * compensate for the alignment excess. 442 */ 443 heap_block_foot_t *prev_foot = (heap_block_foot_t *) 444 ((void *) cur - sizeof(heap_block_foot_t)); 327 /* The current block is large enough to fit 328 data in including alignment */ 329 if ((void *) cur > heap_start) { 330 /* There is a block before the current block. 331 This previous block can be enlarged to compensate 332 for the alignment excess */ 333 heap_block_foot_t *prev_foot = 334 ((void *) cur) - sizeof(heap_block_foot_t); 445 335 446 heap_block_head_t *prev_head = (heap_block_head_t *)447 ( (void *) cur- prev_foot->size);336 heap_block_head_t *prev_head = 337 (heap_block_head_t *) (((void *) cur) - prev_foot->size); 448 338 449 339 block_check(prev_head); … … 452 342 heap_block_head_t *next_head = ((void *) cur) + excess; 453 343 454 if ((!prev_head->free) && 455 (excess >= STRUCT_OVERHEAD)) { 456 /* 457 * The previous block is not free and there 458 * is enough free space left to fill in 459 * a new free block between the previous 460 * and current block. 461 */ 462 block_init(cur, excess, true, area); 344 if ((!prev_head->free) && (excess >= STRUCT_OVERHEAD)) { 345 /* The previous block is not free and there is enough 346 space to fill in a new free block between the previous 347 and current block */ 348 block_init(cur, excess, true); 463 349 } else { 464 /* 465 * The previous block is free (thus there 466 * is no need to induce additional 467 * fragmentation to the heap) or the 468 * excess is small. Therefore just enlarge 469 * the previous block. 470 */ 471 block_init(prev_head, prev_head->size + excess, 472 prev_head->free, area); 350 /* The previous block is free (thus there is no need to 351 induce additional fragmentation to the heap) or the 352 excess is small, thus just enlarge the previous block */ 353 block_init(prev_head, prev_head->size + excess, prev_head->free); 473 354 } 474 355 475 block_init(next_head, reduced_size, true , area);356 block_init(next_head, reduced_size, true); 476 357 split_mark(next_head, real_size); 477 478 next = next_head; 479 return aligned; 358 result = aligned; 359 cur = next_head; 480 360 } else { 481 /* 482 * The current block is the first block 483 * in the heap area. We have to make sure 484 * that the alignment excess is large enough 485 * to fit a new free block just before the 486 * current block. 487 */ 361 /* The current block is the first block on the heap. 362 We have to make sure that the alignment excess 363 is large enough to fit a new free block just 364 before the current block */ 488 365 while (excess < STRUCT_OVERHEAD) { 489 366 aligned += falign; … … 494 371 if (cur->size >= real_size + excess) { 495 372 size_t reduced_size = cur->size - excess; 496 cur = (heap_block_head_t *) 497 (AREA_FIRST_BLOCK(area) + excess); 373 cur = (heap_block_head_t *) (heap_start + excess); 498 374 499 block_init((void *) AREA_FIRST_BLOCK(area), excess, 500 true, area); 501 block_init(cur, reduced_size, true, area); 375 block_init(heap_start, excess, true); 376 block_init(cur, reduced_size, true); 502 377 split_mark(cur, real_size); 503 504 next = cur; 505 return aligned; 378 result = aligned; 506 379 } 507 380 } … … 509 382 } 510 383 } 511 } 512 513 return NULL; 514 } 515 516 /** Allocate a memory block 517 * 518 * Should be called only inside the critical section. 519 * 520 * @param size The size of the block to allocate. 521 * @param align Memory address alignment. 522 * 523 * @return Address of the allocated block or NULL on not enough memory. 524 * 525 */ 526 static void *malloc_internal(const size_t size, const size_t align) 527 { 528 assert(first_heap_area != NULL); 529 530 if (align == 0) 531 return NULL; 532 533 size_t falign = lcm(align, BASE_ALIGN); 534 size_t real_size = GROSS_SIZE(ALIGN_UP(size, falign)); 535 536 bool retry = false; 537 heap_block_head_t *split; 538 539 loop: 540 541 /* Try the next fit approach */ 542 split = next; 543 544 if (split != NULL) { 545 void *addr = malloc_area(split->area, split, NULL, real_size, 546 falign); 547 548 if (addr != NULL) 549 return addr; 550 } 551 552 /* Search the entire heap */ 553 heap_area_t *area; 554 for (area = first_heap_area; area != NULL; area = area->next) { 555 heap_block_head_t *first = (heap_block_head_t *) 556 AREA_FIRST_BLOCK(area); 557 558 void *addr = malloc_area(area, first, split, real_size, 559 falign); 560 561 if (addr != NULL) 562 return addr; 563 } 564 565 if (!retry) { 566 /* Try to grow the heap space */ 567 if (heap_grow(real_size)) { 568 retry = true; 384 385 /* Advance to the next block. */ 386 cur = (heap_block_head_t *) (((void *) cur) + cur->size); 387 } 388 389 if ((result == NULL) && (!grown)) { 390 if (grow_heap(real_size)) { 391 grown = true; 569 392 goto loop; 570 393 } 571 394 } 572 395 573 return NULL;396 return result; 574 397 } 575 398 … … 650 473 (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); 651 474 475 assert((void *) head >= heap_start); 476 assert((void *) head < heap_end); 477 652 478 block_check(head); 653 479 assert(!head->free); 654 655 heap_area_t *area = head->area;656 657 area_check(area);658 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));659 assert((void *) head < area->end);660 480 661 481 void *ptr = NULL; … … 667 487 /* Shrink */ 668 488 if (orig_size - real_size >= STRUCT_OVERHEAD) { 669 /* 670 * Split the original block to a full block 671 * and a trailing free block. 672 */ 673 block_init((void *) head, real_size, false, area); 489 /* Split the original block to a full block 490 and a trailing free block */ 491 block_init((void *) head, real_size, false); 674 492 block_init((void *) head + real_size, 675 orig_size - real_size, true , area);676 heap_shrink();493 orig_size - real_size, true); 494 shrink_heap(); 677 495 } 678 496 679 497 ptr = ((void *) head) + sizeof(heap_block_head_t); 680 498 } else { 681 /* 682 * Look at the next block. If it is free and the size is 683 * sufficient then merge the two. Otherwise just allocate 684 * a new block, copy the original data into it and 685 * free the original block. 686 */ 499 /* Look at the next block. If it is free and the size is 500 sufficient then merge the two. Otherwise just allocate 501 a new block, copy the original data into it and 502 free the original block. */ 687 503 heap_block_head_t *next_head = 688 504 (heap_block_head_t *) (((void *) head) + head->size); 689 505 690 if (((void *) next_head < area->end) &&506 if (((void *) next_head < heap_end) && 691 507 (head->size + next_head->size >= real_size) && 692 508 (next_head->free)) { 693 509 block_check(next_head); 694 block_init(head, head->size + next_head->size, false , area);510 block_init(head, head->size + next_head->size, false); 695 511 split_mark(head, real_size); 696 512 697 513 ptr = ((void *) head) + sizeof(heap_block_head_t); 698 next = NULL;699 514 } else 700 515 reloc = true; … … 727 542 = (heap_block_head_t *) (addr - sizeof(heap_block_head_t)); 728 543 544 assert((void *) head >= heap_start); 545 assert((void *) head < heap_end); 546 729 547 block_check(head); 730 548 assert(!head->free); 731 732 heap_area_t *area = head->area;733 734 area_check(area);735 assert((void *) head >= (void *) AREA_FIRST_BLOCK(area));736 assert((void *) head < area->end);737 549 738 550 /* Mark the block itself as free. */ … … 743 555 = (heap_block_head_t *) (((void *) head) + head->size); 744 556 745 if ((void *) next_head < area->end) {557 if ((void *) next_head < heap_end) { 746 558 block_check(next_head); 747 559 if (next_head->free) 748 block_init(head, head->size + next_head->size, true , area);560 block_init(head, head->size + next_head->size, true); 749 561 } 750 562 751 563 /* Look at the previous block. If it is free, merge the two. */ 752 if ((void *) head > (void *) AREA_FIRST_BLOCK(area)) {564 if ((void *) head > heap_start) { 753 565 heap_block_foot_t *prev_foot = 754 566 (heap_block_foot_t *) (((void *) head) - sizeof(heap_block_foot_t)); … … 760 572 761 573 if (prev_head->free) 762 block_init(prev_head, prev_head->size + head->size, true, 763 area); 764 } 765 766 heap_shrink(); 574 block_init(prev_head, prev_head->size + head->size, true); 575 } 576 577 shrink_heap(); 767 578 768 579 futex_up(&malloc_futex); -
uspace/lib/c/generic/private/libc.h
r0b37882 rae6f303 36 36 #define LIBC_PRIVATE_LIBC_H_ 37 37 38 extern void __entry(void);38 extern int main(int, char *[]); 39 39 extern void __main(void *) __attribute__((noreturn)); 40 extern int main(int, char *[]);41 40 42 41 #endif -
uspace/lib/c/include/malloc.h
r0b37882 rae6f303 38 38 #include <sys/types.h> 39 39 40 extern uintptr_t get_max_heap_addr(void); 41 40 42 extern void *malloc(const size_t size) 41 43 __attribute__((malloc));
Note:
See TracChangeset
for help on using the changeset viewer.