Changes in kernel/generic/src/mm/as.c [fc47885:7e752b2] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rfc47885 r7e752b2 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 * 88 89 */ 89 90 as_operations_t *as_operations = NULL; 90 91 91 /** Slab for as_t objects. 92 /** 93 * Slab for as_t objects. 92 94 * 93 95 */ 94 96 static slab_cache_t *as_slab; 95 97 96 /** ASID subsystem lock.97 * 98 * This lockprotects:98 /** 99 * This lock serializes access to the ASID subsystem. 100 * It protects: 99 101 * - inactive_as_with_asid_head list 100 102 * - as->asid for each as of the as_t type … … 105 107 106 108 /** 107 * Inactive address spaces (on all processors) 108 * that have valid ASID. 109 * This list contains address spaces that are not active on any 110 * processor and that have valid ASID. 111 * 109 112 */ 110 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 120 123 mutex_initialize(&as->lock, MUTEX_PASSIVE); 121 124 122 return as_constructor_arch(as, flags); 125 int rc = as_constructor_arch(as, flags); 126 127 return rc; 123 128 } 124 129 125 130 NO_TRACE static size_t as_destructor(void *obj) 126 131 { 127 return as_destructor_arch((as_t *) obj); 132 as_t *as = (as_t *) obj; 133 return as_destructor_arch(as); 128 134 } 129 135 … … 140 146 panic("Cannot create kernel address space."); 141 147 142 /* 143 * Make sure the kernel address space 148 /* Make sure the kernel address space 144 149 * reference count never drops to zero. 145 150 */ … … 190 195 { 191 196 DEADLOCK_PROBE_INIT(p_asidlock); 192 197 193 198 ASSERT(as != AS); 194 199 ASSERT(atomic_get(&as->refcount) == 0); … … 198 203 * lock its mutex. 199 204 */ 200 205 201 206 /* 202 207 * We need to avoid deadlock between TLB shootdown and asidlock. … … 205 210 * disabled to prevent nested context switches. We also depend on the 206 211 * fact that so far no spinlocks are held. 212 * 207 213 */ 208 214 preemption_disable(); … … 229 235 spinlock_unlock(&asidlock); 230 236 interrupts_restore(ipl); 231 237 232 238 233 239 /* … … 235 241 * The B+tree must be walked carefully because it is 236 242 * also being destroyed. 243 * 237 244 */ 238 245 bool cond = true; … … 261 268 /** Hold a reference to an address space. 262 269 * 263 * Holding a reference to an address space prevents destruction 264 * of that addressspace.270 * Holding a reference to an address space prevents destruction of that address 271 * space. 265 272 * 266 273 * @param as Address space to be held. … … 274 281 /** Release a reference to an address space. 275 282 * 276 * The last one to release a reference to an address space 277 * destroys the addressspace.283 * The last one to release a reference to an address space destroys the address 284 * space. 278 285 * 279 286 * @param asAddress space to be released. … … 303 310 /* 304 311 * We don't want any area to have conflicts with NULL page. 312 * 305 313 */ 306 314 if (overlaps(va, size, (uintptr_t) NULL, PAGE_SIZE)) … … 313 321 * record in the left neighbour, the leftmost record in the right 314 322 * neighbour and all records in the leaf node itself. 323 * 315 324 */ 316 325 btree_node_t *leaf; … … 373 382 * So far, the area does not conflict with other areas. 374 383 * Check if it doesn't conflict with kernel address space. 384 * 375 385 */ 376 386 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { … … 427 437 area->attributes = attrs; 428 438 area->pages = SIZE2FRAMES(size); 429 area->resident = 0;430 439 area->base = base; 431 440 area->sh_info = NULL; … … 470 479 * to find out whether this is a miss or va belongs to an address 471 480 * space area found there. 481 * 472 482 */ 473 483 … … 489 499 * Second, locate the left neighbour and test its last record. 490 500 * Because of its position in the B+tree, it must have base < va. 501 * 491 502 */ 492 503 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); … … 523 534 /* 524 535 * Locate the area. 536 * 525 537 */ 526 538 as_area_t *area = find_area_and_lock(as, address); … … 534 546 * Remapping of address space areas associated 535 547 * with memory mapped devices is not supported. 548 * 536 549 */ 537 550 mutex_unlock(&area->lock); … … 544 557 * Remapping of shared address space areas 545 558 * is not supported. 559 * 546 560 */ 547 561 mutex_unlock(&area->lock); … … 554 568 /* 555 569 * Zero size address space areas are not allowed. 570 * 556 571 */ 557 572 mutex_unlock(&area->lock); … … 566 581 * Shrinking the area. 567 582 * No need to check for overlaps. 583 * 568 584 */ 569 585 … … 572 588 /* 573 589 * Start TLB shootdown sequence. 590 * 574 591 */ 575 592 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, … … 582 599 * is also the right way to remove part of the used_space 583 600 * B+tree leaf list. 601 * 584 602 */ 585 603 bool cond = true; … … 605 623 * completely in the resized 606 624 * address space area. 625 * 607 626 */ 608 627 break; … … 613 632 * to b and c overlaps with the resized 614 633 * address space area. 634 * 615 635 */ 616 636 … … 653 673 /* 654 674 * Finish TLB shootdown sequence. 675 * 655 676 */ 656 677 … … 660 681 /* 661 682 * Invalidate software translation caches (e.g. TSB on sparc64). 683 * 662 684 */ 663 685 as_invalidate_translation_cache(as, area->base + … … 670 692 * Growing the area. 671 693 * Check for overlaps with other address space areas. 694 * 672 695 */ 673 696 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, … … 790 813 /* 791 814 * Finish TLB shootdown sequence. 815 * 792 816 */ 793 817 … … 797 821 * Invalidate potential software translation caches (e.g. TSB on 798 822 * sparc64). 823 * 799 824 */ 800 825 as_invalidate_translation_cache(as, area->base, area->pages); … … 814 839 /* 815 840 * Remove the empty area from address space. 841 * 816 842 */ 817 843 btree_remove(&as->as_area_btree, base, NULL); … … 855 881 /* 856 882 * Could not find the source address space area. 883 * 857 884 */ 858 885 mutex_unlock(&src_as->lock); … … 864 891 * There is no backend or the backend does not 865 892 * know how to share the area. 893 * 866 894 */ 867 895 mutex_unlock(&src_area->lock); … … 890 918 * First, prepare the area for sharing. 891 919 * Then it will be safe to unlock it. 920 * 892 921 */ 893 922 share_info_t *sh_info = src_area->sh_info; … … 901 930 /* 902 931 * Call the backend to setup sharing. 932 * 903 933 */ 904 934 src_area->backend->share(src_area); … … 919 949 * The flags of the source area are masked against dst_flags_mask 920 950 * to support sharing in less privileged mode. 951 * 921 952 */ 922 953 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, … … 935 966 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 936 967 * attribute and set the sh_info. 968 * 937 969 */ 938 970 mutex_lock(&dst_as->lock); … … 957 989 NO_TRACE bool as_area_check_access(as_area_t *area, pf_access_t access) 958 990 { 959 ASSERT(mutex_locked(&area->lock));960 961 991 int flagmap[] = { 962 992 [PF_ACCESS_READ] = AS_AREA_READ, … … 964 994 [PF_ACCESS_EXEC] = AS_AREA_EXEC 965 995 }; 996 997 ASSERT(mutex_locked(&area->lock)); 966 998 967 999 if (!(area->flags & flagmap[access])) … … 1034 1066 /* 1035 1067 * Compute total number of used pages in the used_space B+tree 1068 * 1036 1069 */ 1037 1070 size_t used_pages = 0; … … 1055 1088 /* 1056 1089 * Start TLB shootdown sequence. 1090 * 1057 1091 */ 1058 1092 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, … … 1062 1096 * Remove used pages from page tables and remember their frame 1063 1097 * numbers. 1098 * 1064 1099 */ 1065 1100 size_t frame_idx = 0; … … 1092 1127 /* 1093 1128 * Finish TLB shootdown sequence. 1129 * 1094 1130 */ 1095 1131 … … 1099 1135 * Invalidate potential software translation caches (e.g. TSB on 1100 1136 * sparc64). 1137 * 1101 1138 */ 1102 1139 as_invalidate_translation_cache(as, area->base, area->pages); … … 1180 1217 * No area contained mapping for 'page'. 1181 1218 * Signal page fault to low-level handler. 1219 * 1182 1220 */ 1183 1221 mutex_unlock(&AS->lock); … … 1199 1237 * The address space area is not backed by any backend 1200 1238 * or the backend cannot handle page faults. 1239 * 1201 1240 */ 1202 1241 mutex_unlock(&area->lock); … … 1210 1249 * To avoid race condition between two page faults on the same address, 1211 1250 * we need to make sure the mapping has not been already inserted. 1251 * 1212 1252 */ 1213 1253 pte_t *pte; … … 1227 1267 /* 1228 1268 * Resort to the backend page fault handler. 1269 * 1229 1270 */ 1230 1271 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1281 1322 * preemption is disabled. We should not be 1282 1323 * holding any other lock. 1324 * 1283 1325 */ 1284 1326 (void) interrupts_enable(); … … 1300 1342 * list of inactive address spaces with assigned 1301 1343 * ASID. 1344 * 1302 1345 */ 1303 1346 ASSERT(old_as->asid != ASID_INVALID); … … 1310 1353 * Perform architecture-specific tasks when the address space 1311 1354 * is being removed from the CPU. 1355 * 1312 1356 */ 1313 1357 as_deinstall_arch(old_as); … … 1316 1360 /* 1317 1361 * Second, prepare the new address space. 1362 * 1318 1363 */ 1319 1364 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1331 1376 * Perform architecture-specific steps. 1332 1377 * (e.g. write ASID to hardware register etc.) 1378 * 1333 1379 */ 1334 1380 as_install_arch(new_as); … … 1349 1395 { 1350 1396 ASSERT(mutex_locked(&area->lock)); 1351 1397 1352 1398 return area_flags_to_page_flags(area->flags); 1353 1399 } … … 1470 1516 * @param count Number of page to be marked. 1471 1517 * 1472 * @return False on failure or trueon success.1473 * 1474 */ 1475 boolused_space_insert(as_area_t *area, uintptr_t page, size_t count)1518 * @return Zero on failure and non-zero on success. 1519 * 1520 */ 1521 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1476 1522 { 1477 1523 ASSERT(mutex_locked(&area->lock)); … … 1484 1530 /* 1485 1531 * We hit the beginning of some used space. 1486 */ 1487 return false; 1532 * 1533 */ 1534 return 0; 1488 1535 } 1489 1536 1490 1537 if (!leaf->keys) { 1491 1538 btree_insert(&area->used_space, page, (void *) count, leaf); 1492 goto success;1539 return 1; 1493 1540 } 1494 1541 … … 1504 1551 * somewhere between the rightmost interval of 1505 1552 * the left neigbour and the first interval of the leaf. 1553 * 1506 1554 */ 1507 1555 … … 1511 1559 left_cnt * PAGE_SIZE)) { 1512 1560 /* The interval intersects with the left interval. */ 1513 return false;1561 return 0; 1514 1562 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1515 1563 right_cnt * PAGE_SIZE)) { 1516 1564 /* The interval intersects with the right interval. */ 1517 return false;1565 return 0; 1518 1566 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1519 1567 (page + count * PAGE_SIZE == right_pg)) { … … 1521 1569 * The interval can be added by merging the two already 1522 1570 * present intervals. 1571 * 1523 1572 */ 1524 1573 node->value[node->keys - 1] += count + right_cnt; 1525 1574 btree_remove(&area->used_space, right_pg, leaf); 1526 goto success;1575 return 1; 1527 1576 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1528 1577 /* 1529 1578 * The interval can be added by simply growing the left 1530 1579 * interval. 1580 * 1531 1581 */ 1532 1582 node->value[node->keys - 1] += count; 1533 goto success;1583 return 1; 1534 1584 } else if (page + count * PAGE_SIZE == right_pg) { 1535 1585 /* … … 1537 1587 * the right interval down and increasing its size 1538 1588 * accordingly. 1589 * 1539 1590 */ 1540 1591 leaf->value[0] += count; 1541 1592 leaf->key[0] = page; 1542 goto success;1593 return 1; 1543 1594 } else { 1544 1595 /* 1545 1596 * The interval is between both neigbouring intervals, 1546 1597 * but cannot be merged with any of them. 1598 * 1547 1599 */ 1548 1600 btree_insert(&area->used_space, page, (void *) count, 1549 1601 leaf); 1550 goto success;1602 return 1; 1551 1603 } 1552 1604 } else if (page < leaf->key[0]) { … … 1557 1609 * Investigate the border case in which the left neighbour does 1558 1610 * not exist but the interval fits from the left. 1611 * 1559 1612 */ 1560 1613 … … 1562 1615 right_cnt * PAGE_SIZE)) { 1563 1616 /* The interval intersects with the right interval. */ 1564 return false;1617 return 0; 1565 1618 } else if (page + count * PAGE_SIZE == right_pg) { 1566 1619 /* … … 1568 1621 * right interval down and increasing its size 1569 1622 * accordingly. 1623 * 1570 1624 */ 1571 1625 leaf->key[0] = page; 1572 1626 leaf->value[0] += count; 1573 goto success;1627 return 1; 1574 1628 } else { 1575 1629 /* 1576 1630 * The interval doesn't adjoin with the right interval. 1577 1631 * It must be added individually. 1632 * 1578 1633 */ 1579 1634 btree_insert(&area->used_space, page, (void *) count, 1580 1635 leaf); 1581 goto success;1636 return 1; 1582 1637 } 1583 1638 } … … 1594 1649 * somewhere between the leftmost interval of 1595 1650 * the right neigbour and the last interval of the leaf. 1651 * 1596 1652 */ 1597 1653 … … 1601 1657 left_cnt * PAGE_SIZE)) { 1602 1658 /* The interval intersects with the left interval. */ 1603 return false;1659 return 0; 1604 1660 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1605 1661 right_cnt * PAGE_SIZE)) { 1606 1662 /* The interval intersects with the right interval. */ 1607 return false;1663 return 0; 1608 1664 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1609 1665 (page + count * PAGE_SIZE == right_pg)) { … … 1611 1667 * The interval can be added by merging the two already 1612 1668 * present intervals. 1669 * 1613 1670 */ 1614 1671 leaf->value[leaf->keys - 1] += count + right_cnt; 1615 1672 btree_remove(&area->used_space, right_pg, node); 1616 goto success;1673 return 1; 1617 1674 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1618 1675 /* 1619 1676 * The interval can be added by simply growing the left 1620 1677 * interval. 1678 * 1621 1679 */ 1622 leaf->value[leaf->keys - 1] += count;1623 goto success;1680 leaf->value[leaf->keys - 1] += count; 1681 return 1; 1624 1682 } else if (page + count * PAGE_SIZE == right_pg) { 1625 1683 /* … … 1627 1685 * the right interval down and increasing its size 1628 1686 * accordingly. 1687 * 1629 1688 */ 1630 1689 node->value[0] += count; 1631 1690 node->key[0] = page; 1632 goto success;1691 return 1; 1633 1692 } else { 1634 1693 /* 1635 1694 * The interval is between both neigbouring intervals, 1636 1695 * but cannot be merged with any of them. 1696 * 1637 1697 */ 1638 1698 btree_insert(&area->used_space, page, (void *) count, 1639 1699 leaf); 1640 goto success;1700 return 1; 1641 1701 } 1642 1702 } else if (page >= leaf->key[leaf->keys - 1]) { … … 1647 1707 * Investigate the border case in which the right neighbour 1648 1708 * does not exist but the interval fits from the right. 1709 * 1649 1710 */ 1650 1711 … … 1652 1713 left_cnt * PAGE_SIZE)) { 1653 1714 /* The interval intersects with the left interval. */ 1654 return false;1715 return 0; 1655 1716 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1656 1717 /* 1657 1718 * The interval can be added by growing the left 1658 1719 * interval. 1720 * 1659 1721 */ 1660 1722 leaf->value[leaf->keys - 1] += count; 1661 goto success;1723 return 1; 1662 1724 } else { 1663 1725 /* 1664 1726 * The interval doesn't adjoin with the left interval. 1665 1727 * It must be added individually. 1728 * 1666 1729 */ 1667 1730 btree_insert(&area->used_space, page, (void *) count, 1668 1731 leaf); 1669 goto success;1732 return 1; 1670 1733 } 1671 1734 } … … 1675 1738 * only between two other intervals of the leaf. The two border cases 1676 1739 * were already resolved. 1740 * 1677 1741 */ 1678 1742 btree_key_t i; … … 1686 1750 /* 1687 1751 * The interval fits between left_pg and right_pg. 1752 * 1688 1753 */ 1689 1754 … … 1693 1758 * The interval intersects with the left 1694 1759 * interval. 1760 * 1695 1761 */ 1696 return false;1762 return 0; 1697 1763 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1698 1764 right_cnt * PAGE_SIZE)) { … … 1700 1766 * The interval intersects with the right 1701 1767 * interval. 1768 * 1702 1769 */ 1703 return false;1770 return 0; 1704 1771 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1705 1772 (page + count * PAGE_SIZE == right_pg)) { … … 1707 1774 * The interval can be added by merging the two 1708 1775 * already present intervals. 1776 * 1709 1777 */ 1710 1778 leaf->value[i - 1] += count + right_cnt; 1711 1779 btree_remove(&area->used_space, right_pg, leaf); 1712 goto success;1780 return 1; 1713 1781 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1714 1782 /* 1715 1783 * The interval can be added by simply growing 1716 1784 * the left interval. 1785 * 1717 1786 */ 1718 1787 leaf->value[i - 1] += count; 1719 goto success;1788 return 1; 1720 1789 } else if (page + count * PAGE_SIZE == right_pg) { 1721 1790 /* … … 1723 1792 * base of the right interval down and 1724 1793 * increasing its size accordingly. 1794 * 1725 1795 */ 1726 1796 leaf->value[i] += count; 1727 1797 leaf->key[i] = page; 1728 goto success;1798 return 1; 1729 1799 } else { 1730 1800 /* … … 1732 1802 * intervals, but cannot be merged with any of 1733 1803 * them. 1804 * 1734 1805 */ 1735 1806 btree_insert(&area->used_space, page, 1736 1807 (void *) count, leaf); 1737 goto success;1808 return 1; 1738 1809 } 1739 1810 } … … 1742 1813 panic("Inconsistency detected while adding %zu pages of used " 1743 1814 "space at %p.", count, (void *) page); 1744 1745 success:1746 area->resident += count;1747 return true;1748 1815 } 1749 1816 … … 1756 1823 * @param count Number of page to be marked. 1757 1824 * 1758 * @return False on failure or trueon success.1759 * 1760 */ 1761 boolused_space_remove(as_area_t *area, uintptr_t page, size_t count)1825 * @return Zero on failure and non-zero on success. 1826 * 1827 */ 1828 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1762 1829 { 1763 1830 ASSERT(mutex_locked(&area->lock)); … … 1770 1837 /* 1771 1838 * We are lucky, page is the beginning of some interval. 1839 * 1772 1840 */ 1773 1841 if (count > pages) { 1774 return false;1842 return 0; 1775 1843 } else if (count == pages) { 1776 1844 btree_remove(&area->used_space, page, leaf); 1777 goto success;1845 return 1; 1778 1846 } else { 1779 1847 /* 1780 1848 * Find the respective interval. 1781 1849 * Decrease its size and relocate its start address. 1850 * 1782 1851 */ 1783 1852 btree_key_t i; … … 1786 1855 leaf->key[i] += count * PAGE_SIZE; 1787 1856 leaf->value[i] -= count; 1788 goto success;1857 return 1; 1789 1858 } 1790 1859 } 1791 1792 1860 goto error; 1793 1861 } … … 1808 1876 * removed by updating the size of the bigger 1809 1877 * interval. 1878 * 1810 1879 */ 1811 1880 node->value[node->keys - 1] -= count; 1812 goto success;1881 return 1; 1813 1882 } else if (page + count * PAGE_SIZE < 1814 1883 left_pg + left_cnt*PAGE_SIZE) { … … 1819 1888 * the original interval and also inserting a 1820 1889 * new interval. 1890 * 1821 1891 */ 1822 1892 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - … … 1825 1895 btree_insert(&area->used_space, page + 1826 1896 count * PAGE_SIZE, (void *) new_cnt, leaf); 1827 goto success;1897 return 1; 1828 1898 } 1829 1899 } 1830 1831 return false; 1900 return 0; 1832 1901 } else if (page < leaf->key[0]) 1833 return false;1902 return 0; 1834 1903 1835 1904 if (page > leaf->key[leaf->keys - 1]) { … … 1845 1914 * interval of the leaf and can be removed by 1846 1915 * updating the size of the bigger interval. 1916 * 1847 1917 */ 1848 1918 leaf->value[leaf->keys - 1] -= count; 1849 goto success;1919 return 1; 1850 1920 } else if (page + count * PAGE_SIZE < left_pg + 1851 1921 left_cnt * PAGE_SIZE) { … … 1856 1926 * original interval and also inserting a new 1857 1927 * interval. 1928 * 1858 1929 */ 1859 1930 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - … … 1862 1933 btree_insert(&area->used_space, page + 1863 1934 count * PAGE_SIZE, (void *) new_cnt, leaf); 1864 goto success;1935 return 1; 1865 1936 } 1866 1937 } 1867 1868 return false; 1938 return 0; 1869 1939 } 1870 1940 1871 1941 /* 1872 1942 * The border cases have been already resolved. 1873 * Now the interval can be only between intervals of the leaf. 1943 * Now the interval can be only between intervals of the leaf. 1874 1944 */ 1875 1945 btree_key_t i; … … 1892 1962 * be removed by updating the size of 1893 1963 * the bigger interval. 1964 * 1894 1965 */ 1895 1966 leaf->value[i - 1] -= count; 1896 goto success;1967 return 1; 1897 1968 } else if (page + count * PAGE_SIZE < 1898 1969 left_pg + left_cnt * PAGE_SIZE) { … … 1912 1983 count * PAGE_SIZE, (void *) new_cnt, 1913 1984 leaf); 1914 goto success;1985 return 1; 1915 1986 } 1916 1987 } 1917 1918 return false; 1988 return 0; 1919 1989 } 1920 1990 } … … 1923 1993 panic("Inconsistency detected while removing %zu pages of used " 1924 1994 "space from %p.", count, (void *) page); 1925 1926 success:1927 area->resident -= count;1928 return true;1929 1995 } 1930 1996 … … 1934 2000 1935 2001 /** Wrapper for as_area_create(). */ 1936 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)2002 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1937 2003 { 1938 2004 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 1939 2005 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1940 return ( sysarg_t) address;2006 return (unative_t) address; 1941 2007 else 1942 return ( sysarg_t) -1;2008 return (unative_t) -1; 1943 2009 } 1944 2010 1945 2011 /** Wrapper for as_area_resize(). */ 1946 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags)1947 { 1948 return ( sysarg_t) as_area_resize(AS, address, size, 0);2012 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 2013 { 2014 return (unative_t) as_area_resize(AS, address, size, 0); 1949 2015 } 1950 2016 1951 2017 /** Wrapper for as_area_change_flags(). */ 1952 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags)1953 { 1954 return ( sysarg_t) as_area_change_flags(AS, flags, address);2018 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 2019 { 2020 return (unative_t) as_area_change_flags(AS, flags, address); 1955 2021 } 1956 2022 1957 2023 /** Wrapper for as_area_destroy(). */ 1958 sysarg_t sys_as_area_destroy(uintptr_t address)1959 { 1960 return ( sysarg_t) as_area_destroy(AS, address);2024 unative_t sys_as_area_destroy(uintptr_t address) 2025 { 2026 return (unative_t) as_area_destroy(AS, address); 1961 2027 } 1962 2028
Note:
See TracChangeset
for help on using the changeset viewer.