Changeset 544a2e4 in mainline for kernel/generic/src/mm/backend_anon.c
- Timestamp:
- 2011-05-30T21:37:43Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7b712b60
- Parents:
- 18ba2e4f (diff), 0743493a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_anon.c
r18ba2e4f r544a2e4 39 39 #include <mm/as.h> 40 40 #include <mm/page.h> 41 #include <mm/reserve.h> 41 42 #include <genarch/mm/page_pt.h> 42 43 #include <genarch/mm/page_ht.h> … … 49 50 #include <typedefs.h> 50 51 #include <align.h> 52 #include <memstr.h> 51 53 #include <arch.h> 52 54 53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame);59 static void anon_ share(as_area_t *area);55 static bool anon_create(as_area_t *); 56 static bool anon_resize(as_area_t *, size_t); 57 static void anon_share(as_area_t *); 58 static void anon_destroy(as_area_t *); 59 60 static int anon_page_fault(as_area_t *, uintptr_t, pf_access_t); 61 static void anon_frame_free(as_area_t *, uintptr_t, uintptr_t); 60 62 61 63 mem_backend_t anon_backend = { 64 .create = anon_create, 65 .resize = anon_resize, 66 .share = anon_share, 67 .destroy = anon_destroy, 68 62 69 .page_fault = anon_page_fault, 63 70 .frame_free = anon_frame_free, 64 .share = anon_share65 71 }; 72 73 bool anon_create(as_area_t *area) 74 { 75 return reserve_try_alloc(area->pages); 76 } 77 78 bool anon_resize(as_area_t *area, size_t new_pages) 79 { 80 if (new_pages > area->pages) 81 return reserve_try_alloc(new_pages - area->pages); 82 else if (new_pages < area->pages) 83 reserve_free(area->pages - new_pages); 84 85 return true; 86 } 87 88 /** Share the anonymous address space area. 89 * 90 * Sharing of anonymous area is done by duplicating its entire mapping 91 * to the pagemap. Page faults will primarily search for frames there. 92 * 93 * The address space and address space area must be already locked. 94 * 95 * @param area Address space area to be shared. 96 */ 97 void anon_share(as_area_t *area) 98 { 99 link_t *cur; 100 101 ASSERT(mutex_locked(&area->as->lock)); 102 ASSERT(mutex_locked(&area->lock)); 103 104 /* 105 * Copy used portions of the area to sh_info's page map. 106 */ 107 mutex_lock(&area->sh_info->lock); 108 for (cur = area->used_space.leaf_head.next; 109 cur != &area->used_space.leaf_head; cur = cur->next) { 110 btree_node_t *node; 111 unsigned int i; 112 113 node = list_get_instance(cur, btree_node_t, leaf_link); 114 for (i = 0; i < node->keys; i++) { 115 uintptr_t base = node->key[i]; 116 size_t count = (size_t) node->value[i]; 117 unsigned int j; 118 119 for (j = 0; j < count; j++) { 120 pte_t *pte; 121 122 page_table_lock(area->as, false); 123 pte = page_mapping_find(area->as, 124 base + P2SZ(j), false); 125 ASSERT(pte && PTE_VALID(pte) && 126 PTE_PRESENT(pte)); 127 btree_insert(&area->sh_info->pagemap, 128 (base + P2SZ(j)) - area->base, 129 (void *) PTE_GET_FRAME(pte), NULL); 130 page_table_unlock(area->as, false); 131 132 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 133 frame_reference_add(pfn); 134 } 135 136 } 137 } 138 mutex_unlock(&area->sh_info->lock); 139 } 140 141 void anon_destroy(as_area_t *area) 142 { 143 reserve_free(area->pages); 144 } 145 66 146 67 147 /** Service a page fault in the anonymous memory address space area. … … 115 195 } 116 196 if (allocate) { 117 frame = (uintptr_t) frame_alloc(ONE_FRAME, 0); 197 frame = (uintptr_t) frame_alloc_noreserve( 198 ONE_FRAME, 0); 118 199 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 119 200 … … 145 226 * the different causes 146 227 */ 147 frame = (uintptr_t) frame_alloc (ONE_FRAME, 0);228 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 148 229 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 149 230 } … … 174 255 ASSERT(mutex_locked(&area->lock)); 175 256 176 frame_free(frame); 177 } 178 179 /** Share the anonymous address space area. 180 * 181 * Sharing of anonymous area is done by duplicating its entire mapping 182 * to the pagemap. Page faults will primarily search for frames there. 183 * 184 * The address space and address space area must be already locked. 185 * 186 * @param area Address space area to be shared. 187 */ 188 void anon_share(as_area_t *area) 189 { 190 link_t *cur; 191 192 ASSERT(mutex_locked(&area->as->lock)); 193 ASSERT(mutex_locked(&area->lock)); 194 195 /* 196 * Copy used portions of the area to sh_info's page map. 197 */ 198 mutex_lock(&area->sh_info->lock); 199 for (cur = area->used_space.leaf_head.next; 200 cur != &area->used_space.leaf_head; cur = cur->next) { 201 btree_node_t *node; 202 unsigned int i; 203 204 node = list_get_instance(cur, btree_node_t, leaf_link); 205 for (i = 0; i < node->keys; i++) { 206 uintptr_t base = node->key[i]; 207 size_t count = (size_t) node->value[i]; 208 unsigned int j; 209 210 for (j = 0; j < count; j++) { 211 pte_t *pte; 212 213 page_table_lock(area->as, false); 214 pte = page_mapping_find(area->as, 215 base + j * PAGE_SIZE); 216 ASSERT(pte && PTE_VALID(pte) && 217 PTE_PRESENT(pte)); 218 btree_insert(&area->sh_info->pagemap, 219 (base + j * PAGE_SIZE) - area->base, 220 (void *) PTE_GET_FRAME(pte), NULL); 221 page_table_unlock(area->as, false); 222 223 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte)); 224 frame_reference_add(pfn); 225 } 226 227 } 228 } 229 mutex_unlock(&area->sh_info->lock); 257 frame_free_noreserve(frame); 230 258 } 231 259
Note:
See TracChangeset
for help on using the changeset viewer.