Changes in kernel/generic/src/mm/as.c [560b81c:59fb782] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r560b81c r59fb782 488 488 489 489 /* Eventually check the addresses behind each area */ 490 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, node) { 490 list_foreach(as->as_area_btree.leaf_list, cur) { 491 btree_node_t *node = 492 list_get_instance(cur, btree_node_t, leaf_link); 491 493 492 494 for (btree_key_t i = 0; i < node->keys; i++) { … … 520 522 } 521 523 522 /** Remove reference to address space area share info.523 *524 * If the reference count drops to 0, the sh_info is deallocated.525 *526 * @param sh_info Pointer to address space area share info.527 *528 */529 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info)530 {531 bool dealloc = false;532 533 mutex_lock(&sh_info->lock);534 ASSERT(sh_info->refcount);535 536 if (--sh_info->refcount == 0) {537 dealloc = true;538 539 /*540 * Now walk carefully the pagemap B+tree and free/remove541 * reference from all frames found there.542 */543 list_foreach(sh_info->pagemap.leaf_list, leaf_link,544 btree_node_t, node) {545 btree_key_t i;546 547 for (i = 0; i < node->keys; i++)548 frame_free((uintptr_t) node->value[i], 1);549 }550 551 }552 mutex_unlock(&sh_info->lock);553 554 if (dealloc) {555 if (sh_info->backend && sh_info->backend->destroy_shared_data) {556 sh_info->backend->destroy_shared_data(557 sh_info->backend_shared_data);558 }559 btree_destroy(&sh_info->pagemap);560 free(sh_info);561 }562 }563 564 565 524 /** Create address space area of common attributes. 566 525 * … … 572 531 * @param attrs Attributes of the area. 573 532 * @param backend Address space area backend. NULL if no backend is used. 574 * @param backend_data NULL or a pointer to custom backend data.533 * @param backend_data NULL or a pointer to an array holding two void *. 575 534 * @param base Starting virtual address of the area. 576 * If set to AS_AREA_ANY, a suitable mappable area is 577 * found. 578 * @param bound Lowest address bound if base is set to AS_AREA_ANY. 535 * If set to -1, a suitable mappable area is found. 536 * @param bound Lowest address bound if base is set to -1. 579 537 * Otherwise ignored. 580 538 * … … 586 544 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 587 545 { 588 if ((*base != (uintptr_t) AS_AREA_ANY) && !IS_ALIGNED(*base, PAGE_SIZE))546 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE)) 589 547 return NULL; 590 548 … … 602 560 mutex_lock(&as->lock); 603 561 604 if (*base == (uintptr_t) AS_AREA_ANY) {562 if (*base == (uintptr_t) -1) { 605 563 *base = as_get_unmapped_area(as, bound, size, guarded); 606 564 if (*base == (uintptr_t) -1) { … … 610 568 } 611 569 612 if (overflows_into_positive(*base, size)) { 613 mutex_unlock(&as->lock); 570 if (overflows_into_positive(*base, size)) 614 571 return NULL; 615 }616 572 617 573 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { … … 630 586 area->resident = 0; 631 587 area->base = *base; 588 area->sh_info = NULL; 632 589 area->backend = backend; 633 area->sh_info = NULL;634 590 635 591 if (backend_data) … … 637 593 else 638 594 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 639 640 share_info_t *si = NULL; 641 642 /* 643 * Create the sharing info structure. 644 * We do this in advance for every new area, even if it is not going 645 * to be shared. 646 */ 647 if (!(attrs & AS_AREA_ATTR_PARTIAL)) { 648 si = (share_info_t *) malloc(sizeof(share_info_t), 0); 649 mutex_initialize(&si->lock, MUTEX_PASSIVE); 650 si->refcount = 1; 651 si->shared = false; 652 si->backend_shared_data = NULL; 653 si->backend = backend; 654 btree_create(&si->pagemap); 655 656 area->sh_info = si; 657 658 if (area->backend && area->backend->create_shared_data) { 659 if (!area->backend->create_shared_data(area)) { 660 free(area); 661 mutex_unlock(&as->lock); 662 sh_info_remove_reference(si); 663 return NULL; 664 } 665 } 666 } 667 595 668 596 if (area->backend && area->backend->create) { 669 597 if (!area->backend->create(area)) { 670 598 free(area); 671 599 mutex_unlock(&as->lock); 672 if (!(attrs & AS_AREA_ATTR_PARTIAL))673 sh_info_remove_reference(si);674 600 return NULL; 675 601 } 676 602 } 677 603 678 604 btree_create(&area->used_space); 679 605 btree_insert(&as->as_area_btree, *base, (void *) area, … … 785 711 } 786 712 787 mutex_lock(&area->sh_info->lock); 788 if (area->sh_info->shared) { 713 if (area->sh_info) { 789 714 /* 790 715 * Remapping of shared address space areas 791 716 * is not supported. 792 717 */ 793 mutex_unlock(&area->sh_info->lock);794 718 mutex_unlock(&area->lock); 795 719 mutex_unlock(&as->lock); 796 720 return ENOTSUP; 797 721 } 798 mutex_unlock(&area->sh_info->lock);799 722 800 723 size_t pages = SIZE2FRAMES((address - area->base) + size); … … 835 758 if ((cond = (bool) node->keys)) { 836 759 uintptr_t ptr = node->key[node->keys - 1]; 837 size_t node_size =760 size_t size = 838 761 (size_t) node->value[node->keys - 1]; 839 762 size_t i = 0; 840 763 841 if (overlaps(ptr, P2SZ( node_size), area->base,764 if (overlaps(ptr, P2SZ(size), area->base, 842 765 P2SZ(pages))) { 843 766 844 if (ptr + P2SZ( node_size) <= start_free) {767 if (ptr + P2SZ(size) <= start_free) { 845 768 /* 846 769 * The whole interval fits … … 861 784 i = (start_free - ptr) >> PAGE_WIDTH; 862 785 if (!used_space_remove(area, start_free, 863 node_size - i))786 size - i)) 864 787 panic("Cannot remove used space."); 865 788 } else { … … 868 791 * completely removed. 869 792 */ 870 if (!used_space_remove(area, ptr, node_size))793 if (!used_space_remove(area, ptr, size)) 871 794 panic("Cannot remove used space."); 872 795 } … … 888 811 area->pages - pages); 889 812 890 for (; i < node_size; i++) { 891 pte_t pte; 892 bool found = page_mapping_find(as, 893 ptr + P2SZ(i), false, &pte); 813 for (; i < size; i++) { 814 pte_t *pte = page_mapping_find(as, 815 ptr + P2SZ(i), false); 894 816 895 ASSERT( found);896 ASSERT(PTE_VALID( &pte));897 ASSERT(PTE_PRESENT( &pte));817 ASSERT(pte); 818 ASSERT(PTE_VALID(pte)); 819 ASSERT(PTE_PRESENT(pte)); 898 820 899 821 if ((area->backend) && … … 901 823 area->backend->frame_free(area, 902 824 ptr + P2SZ(i), 903 PTE_GET_FRAME( &pte));825 PTE_GET_FRAME(pte)); 904 826 } 905 827 … … 961 883 } 962 884 885 /** Remove reference to address space area share info. 886 * 887 * If the reference count drops to 0, the sh_info is deallocated. 888 * 889 * @param sh_info Pointer to address space area share info. 890 * 891 */ 892 NO_TRACE static void sh_info_remove_reference(share_info_t *sh_info) 893 { 894 bool dealloc = false; 895 896 mutex_lock(&sh_info->lock); 897 ASSERT(sh_info->refcount); 898 899 if (--sh_info->refcount == 0) { 900 dealloc = true; 901 902 /* 903 * Now walk carefully the pagemap B+tree and free/remove 904 * reference from all frames found there. 905 */ 906 list_foreach(sh_info->pagemap.leaf_list, cur) { 907 btree_node_t *node 908 = list_get_instance(cur, btree_node_t, leaf_link); 909 btree_key_t i; 910 911 for (i = 0; i < node->keys; i++) 912 frame_free((uintptr_t) node->value[i]); 913 } 914 915 } 916 mutex_unlock(&sh_info->lock); 917 918 if (dealloc) { 919 btree_destroy(&sh_info->pagemap); 920 free(sh_info); 921 } 922 } 923 963 924 /** Destroy address space area. 964 925 * … … 995 956 * Visit only the pages mapped by used_space B+tree. 996 957 */ 997 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t,998 node) {958 list_foreach(area->used_space.leaf_list, cur) { 959 btree_node_t *node; 999 960 btree_key_t i; 1000 961 962 node = list_get_instance(cur, btree_node_t, leaf_link); 1001 963 for (i = 0; i < node->keys; i++) { 1002 964 uintptr_t ptr = node->key[i]; … … 1004 966 1005 967 for (size = 0; size < (size_t) node->value[i]; size++) { 1006 pte_t pte; 1007 bool found = page_mapping_find(as, 1008 ptr + P2SZ(size), false, &pte); 968 pte_t *pte = page_mapping_find(as, 969 ptr + P2SZ(size), false); 1009 970 1010 ASSERT( found);1011 ASSERT(PTE_VALID( &pte));1012 ASSERT(PTE_PRESENT( &pte));971 ASSERT(pte); 972 ASSERT(PTE_VALID(pte)); 973 ASSERT(PTE_PRESENT(pte)); 1013 974 1014 975 if ((area->backend) && … … 1016 977 area->backend->frame_free(area, 1017 978 ptr + P2SZ(size), 1018 PTE_GET_FRAME( &pte));979 PTE_GET_FRAME(pte)); 1019 980 } 1020 981 … … 1043 1004 area->attributes |= AS_AREA_ATTR_PARTIAL; 1044 1005 1045 sh_info_remove_reference(area->sh_info); 1006 if (area->sh_info) 1007 sh_info_remove_reference(area->sh_info); 1046 1008 1047 1009 mutex_unlock(&area->lock); … … 1130 1092 */ 1131 1093 share_info_t *sh_info = src_area->sh_info; 1132 1133 mutex_lock(&sh_info->lock); 1134 sh_info->refcount++; 1135 bool shared = sh_info->shared; 1136 sh_info->shared = true; 1137 mutex_unlock(&sh_info->lock); 1138 1139 if (!shared) { 1094 if (!sh_info) { 1095 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); 1096 mutex_initialize(&sh_info->lock, MUTEX_PASSIVE); 1097 sh_info->refcount = 2; 1098 btree_create(&sh_info->pagemap); 1099 src_area->sh_info = sh_info; 1100 1140 1101 /* 1141 1102 * Call the backend to setup sharing. 1142 * This only happens once for each sh_info.1143 1103 */ 1144 1104 src_area->backend->share(src_area); 1105 } else { 1106 mutex_lock(&sh_info->lock); 1107 sh_info->refcount++; 1108 mutex_unlock(&sh_info->lock); 1145 1109 } 1146 1110 … … 1261 1225 } 1262 1226 1263 if (area->backend != &anon_backend) { 1227 if ((area->sh_info) || (area->backend != &anon_backend)) { 1228 /* Copying shared areas not supported yet */ 1264 1229 /* Copying non-anonymous memory not supported yet */ 1265 1230 mutex_unlock(&area->lock); … … 1267 1232 return ENOTSUP; 1268 1233 } 1269 1270 mutex_lock(&area->sh_info->lock);1271 if (area->sh_info->shared) {1272 /* Copying shared areas not supported yet */1273 mutex_unlock(&area->sh_info->lock);1274 mutex_unlock(&area->lock);1275 mutex_unlock(&as->lock);1276 return ENOTSUP;1277 }1278 mutex_unlock(&area->sh_info->lock);1279 1234 1280 1235 /* … … 1283 1238 size_t used_pages = 0; 1284 1239 1285 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1286 node) { 1240 list_foreach(area->used_space.leaf_list, cur) { 1241 btree_node_t *node 1242 = list_get_instance(cur, btree_node_t, leaf_link); 1287 1243 btree_key_t i; 1288 1244 … … 1308 1264 size_t frame_idx = 0; 1309 1265 1310 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1311 node) { 1266 list_foreach(area->used_space.leaf_list, cur) { 1267 btree_node_t *node = list_get_instance(cur, btree_node_t, 1268 leaf_link); 1312 1269 btree_key_t i; 1313 1270 … … 1317 1274 1318 1275 for (size = 0; size < (size_t) node->value[i]; size++) { 1319 pte_t pte; 1320 bool found = page_mapping_find(as, 1321 ptr + P2SZ(size), false, &pte); 1276 pte_t *pte = page_mapping_find(as, 1277 ptr + P2SZ(size), false); 1322 1278 1323 ASSERT( found);1324 ASSERT(PTE_VALID( &pte));1325 ASSERT(PTE_PRESENT( &pte));1279 ASSERT(pte); 1280 ASSERT(PTE_VALID(pte)); 1281 ASSERT(PTE_PRESENT(pte)); 1326 1282 1327 old_frame[frame_idx++] = PTE_GET_FRAME( &pte);1283 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 1328 1284 1329 1285 /* Remove old mapping */ … … 1360 1316 frame_idx = 0; 1361 1317 1362 list_foreach(area->used_space.leaf_list, leaf_link, btree_node_t, 1363 node) { 1318 list_foreach(area->used_space.leaf_list, cur) { 1319 btree_node_t *node 1320 = list_get_instance(cur, btree_node_t, leaf_link); 1364 1321 btree_key_t i; 1365 1322 … … 1455 1412 * we need to make sure the mapping has not been already inserted. 1456 1413 */ 1457 pte_t pte; 1458 bool found = page_mapping_find(AS, page, false, &pte); 1459 if (found && PTE_PRESENT(&pte)) { 1460 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) || 1461 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) || 1462 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) { 1463 page_table_unlock(AS, false); 1464 mutex_unlock(&area->lock); 1465 mutex_unlock(&AS->lock); 1466 return AS_PF_OK; 1414 pte_t *pte; 1415 if ((pte = page_mapping_find(AS, page, false))) { 1416 if (PTE_PRESENT(pte)) { 1417 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || 1418 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || 1419 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { 1420 page_table_unlock(AS, false); 1421 mutex_unlock(&area->lock); 1422 mutex_unlock(&AS->lock); 1423 return AS_PF_OK; 1424 } 1467 1425 } 1468 1426 } … … 1728 1686 ASSERT(count); 1729 1687 1730 btree_node_t *leaf = NULL;1688 btree_node_t *leaf; 1731 1689 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1732 1690 if (pages) { … … 1736 1694 return false; 1737 1695 } 1738 1739 ASSERT(leaf != NULL);1740 1696 1741 1697 if (!leaf->keys) { … … 2185 2141 2186 2142 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2187 uintptr_t bound , as_area_pager_info_t *pager_info)2143 uintptr_t bound) 2188 2144 { 2189 2145 uintptr_t virt = base; 2190 mem_backend_t *backend;2191 mem_backend_data_t backend_data;2192 2193 if (pager_info == AS_AREA_UNPAGED)2194 backend = &anon_backend;2195 else {2196 backend = &user_backend;2197 if (copy_from_uspace(&backend_data.pager_info, pager_info,2198 sizeof(as_area_pager_info_t)) != EOK) {2199 return (sysarg_t) AS_MAP_FAILED;2200 }2201 }2202 2146 as_area_t *area = as_area_create(AS, flags, size, 2203 AS_AREA_ATTR_NONE, backend, &backend_data, &virt, bound);2147 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2204 2148 if (area == NULL) 2205 return (sysarg_t) AS_MAP_FAILED;2149 return (sysarg_t) -1; 2206 2150 2207 2151 return (sysarg_t) virt; … … 2238 2182 size_t area_cnt = 0; 2239 2183 2240 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2241 node) { 2184 list_foreach(as->as_area_btree.leaf_list, cur) { 2185 btree_node_t *node = 2186 list_get_instance(cur, btree_node_t, leaf_link); 2242 2187 area_cnt += node->keys; 2243 2188 } … … 2250 2195 size_t area_idx = 0; 2251 2196 2252 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2253 node) { 2197 list_foreach(as->as_area_btree.leaf_list, cur) { 2198 btree_node_t *node = 2199 list_get_instance(cur, btree_node_t, leaf_link); 2254 2200 btree_key_t i; 2255 2201 … … 2285 2231 2286 2232 /* Print out info about address space areas */ 2287 list_foreach(as->as_area_btree.leaf_list, leaf_link, btree_node_t, 2288 node) { 2233 list_foreach(as->as_area_btree.leaf_list, cur) { 2234 btree_node_t *node 2235 = list_get_instance(cur, btree_node_t, leaf_link); 2289 2236 btree_key_t i; 2290 2237
Note:
See TracChangeset
for help on using the changeset viewer.