Changes in kernel/generic/src/mm/backend_elf.c [8f6c6264:8f80c77] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/backend_elf.c
r8f6c6264 r8f80c77 43 43 #include <mm/slab.h> 44 44 #include <mm/page.h> 45 #include <mm/reserve.h>46 45 #include <genarch/mm/page_pt.h> 47 46 #include <genarch/mm/page_ht.h> … … 52 51 #include <arch/barrier.h> 53 52 54 static bool elf_create(as_area_t *); 55 static bool elf_resize(as_area_t *, size_t); 56 static void elf_share(as_area_t *); 57 static void elf_destroy(as_area_t *); 58 59 static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);60 static void elf_ frame_free(as_area_t *, uintptr_t, uintptr_t);53 #ifdef CONFIG_VIRT_IDX_DCACHE 54 #include <arch/mm/cache.h> 55 #endif 56 57 static int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access); 58 static void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame); 59 static void elf_share(as_area_t *area); 61 60 62 61 mem_backend_t elf_backend = { 63 .create = elf_create,64 .resize = elf_resize,65 .share = elf_share,66 .destroy = elf_destroy,67 68 62 .page_fault = elf_page_fault, 69 63 .frame_free = elf_frame_free, 64 .share = elf_share 70 65 }; 71 66 72 static size_t elf_nonanon_pages_get(as_area_t *area) 67 /** Service a page fault in the ELF backend address space area. 68 * 69 * The address space area and page tables must be already locked. 70 * 71 * @param area Pointer to the address space area. 72 * @param addr Faulting virtual address. 73 * @param access Access mode that caused the fault (i.e. 74 * read/write/exec). 75 * 76 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK 77 * on success (i.e. serviced). 78 */ 79 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 80 { 81 elf_header_t *elf = area->backend_data.elf; 82 elf_segment_header_t *entry = area->backend_data.segment; 83 btree_node_t *leaf; 84 uintptr_t base, frame, page, start_anon; 85 size_t i; 86 bool dirty = false; 87 88 ASSERT(page_table_locked(AS)); 89 ASSERT(mutex_locked(&area->lock)); 90 91 if (!as_area_check_access(area, access)) 92 return AS_PF_FAULT; 93 94 ASSERT((addr >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) && 95 (addr < entry->p_vaddr + entry->p_memsz)); 96 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 97 base = (uintptr_t) 98 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 99 100 /* Virtual address of faulting page*/ 101 page = ALIGN_DOWN(addr, PAGE_SIZE); 102 103 /* Virtual address of the end of initialized part of segment */ 104 start_anon = entry->p_vaddr + entry->p_filesz; 105 106 if (area->sh_info) { 107 bool found = false; 108 109 /* 110 * The address space area is shared. 111 */ 112 113 mutex_lock(&area->sh_info->lock); 114 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 115 page - area->base, &leaf); 116 if (!frame) { 117 unsigned int i; 118 119 /* 120 * Workaround for valid NULL address. 121 */ 122 123 for (i = 0; i < leaf->keys; i++) { 124 if (leaf->key[i] == page - area->base) { 125 found = true; 126 break; 127 } 128 } 129 } 130 if (frame || found) { 131 frame_reference_add(ADDR2PFN(frame)); 132 page_mapping_insert(AS, addr, frame, 133 as_area_get_flags(area)); 134 if (!used_space_insert(area, page, 1)) 135 panic("Cannot insert used space."); 136 mutex_unlock(&area->sh_info->lock); 137 return AS_PF_OK; 138 } 139 } 140 141 /* 142 * The area is either not shared or the pagemap does not contain the 143 * mapping. 144 */ 145 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 146 /* 147 * Initialized portion of the segment. The memory is backed 148 * directly by the content of the ELF image. Pages are 149 * only copied if the segment is writable so that there 150 * can be more instantions of the same memory ELF image 151 * used at a time. Note that this could be later done 152 * as COW. 153 */ 154 if (entry->p_flags & PF_W) { 155 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 156 memcpy((void *) PA2KA(frame), 157 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 158 if (entry->p_flags & PF_X) { 159 smc_coherence_block((void *) PA2KA(frame), 160 FRAME_SIZE); 161 } 162 dirty = true; 163 } else { 164 frame = KA2PA(base + i * FRAME_SIZE); 165 } 166 } else if (page >= start_anon) { 167 /* 168 * This is the uninitialized portion of the segment. 169 * It is not physically present in the ELF image. 170 * To resolve the situation, a frame must be allocated 171 * and cleared. 172 */ 173 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 174 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 175 dirty = true; 176 } else { 177 size_t pad_lo, pad_hi; 178 /* 179 * The mixed case. 180 * 181 * The middle part is backed by the ELF image and 182 * the lower and upper parts are anonymous memory. 183 * (The segment can be and often is shorter than 1 page). 184 */ 185 if (page < entry->p_vaddr) 186 pad_lo = entry->p_vaddr - page; 187 else 188 pad_lo = 0; 189 190 if (start_anon < page + PAGE_SIZE) 191 pad_hi = page + PAGE_SIZE - start_anon; 192 else 193 pad_hi = 0; 194 195 frame = (uintptr_t)frame_alloc(ONE_FRAME, 0); 196 memcpy((void *) (PA2KA(frame) + pad_lo), 197 (void *) (base + i * FRAME_SIZE + pad_lo), 198 FRAME_SIZE - pad_lo - pad_hi); 199 if (entry->p_flags & PF_X) { 200 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 201 FRAME_SIZE - pad_lo - pad_hi); 202 } 203 memsetb((void *) PA2KA(frame), pad_lo, 0); 204 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 205 0); 206 dirty = true; 207 } 208 209 if (dirty && area->sh_info) { 210 frame_reference_add(ADDR2PFN(frame)); 211 btree_insert(&area->sh_info->pagemap, page - area->base, 212 (void *) frame, leaf); 213 } 214 215 if (area->sh_info) 216 mutex_unlock(&area->sh_info->lock); 217 218 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 219 if (!used_space_insert(area, page, 1)) 220 panic("Cannot insert used space."); 221 222 return AS_PF_OK; 223 } 224 225 /** Free a frame that is backed by the ELF backend. 226 * 227 * The address space area and page tables must be already locked. 228 * 229 * @param area Pointer to the address space area. 230 * @param page Page that is mapped to frame. Must be aligned to 231 * PAGE_SIZE. 232 * @param frame Frame to be released. 233 * 234 */ 235 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame) 73 236 { 74 237 elf_segment_header_t *entry = area->backend_data.segment; 75 uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE); 76 uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz, 77 PAGE_SIZE); 78 79 if (entry->p_flags & PF_W) 80 return 0; 81 82 if (last < first) 83 return 0; 84 85 return last - first; 86 } 87 88 bool elf_create(as_area_t *area) 89 { 90 size_t nonanon_pages = elf_nonanon_pages_get(area); 91 92 if (area->pages <= nonanon_pages) 93 return true; 94 95 return reserve_try_alloc(area->pages - nonanon_pages); 96 } 97 98 bool elf_resize(as_area_t *area, size_t new_pages) 99 { 100 size_t nonanon_pages = elf_nonanon_pages_get(area); 101 102 if (new_pages > area->pages) { 103 /* The area is growing. */ 104 if (area->pages >= nonanon_pages) 105 return reserve_try_alloc(new_pages - area->pages); 106 else if (new_pages > nonanon_pages) 107 return reserve_try_alloc(new_pages - nonanon_pages); 108 } else if (new_pages < area->pages) { 109 /* The area is shrinking. */ 110 if (new_pages >= nonanon_pages) 111 reserve_free(area->pages - new_pages); 112 else if (area->pages > nonanon_pages) 113 reserve_free(nonanon_pages - new_pages); 114 } 115 116 return true; 238 uintptr_t start_anon; 239 240 ASSERT(page_table_locked(area->as)); 241 ASSERT(mutex_locked(&area->lock)); 242 243 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)); 244 ASSERT(page < entry->p_vaddr + entry->p_memsz); 245 246 start_anon = entry->p_vaddr + entry->p_filesz; 247 248 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 249 if (entry->p_flags & PF_W) { 250 /* 251 * Free the frame with the copy of writable segment 252 * data. 253 */ 254 frame_free(frame); 255 } 256 } else { 257 /* 258 * The frame is either anonymous memory or the mixed case (i.e. 259 * lower part is backed by the ELF image and the upper is 260 * anonymous). In any case, a frame needs to be freed. 261 */ 262 frame_free(frame); 263 } 117 264 } 118 265 … … 205 352 } 206 353 207 void elf_destroy(as_area_t *area)208 {209 size_t nonanon_pages = elf_nonanon_pages_get(area);210 211 if (area->pages > nonanon_pages)212 reserve_free(area->pages - nonanon_pages);213 }214 215 /** Service a page fault in the ELF backend address space area.216 *217 * The address space area and page tables must be already locked.218 *219 * @param area Pointer to the address space area.220 * @param addr Faulting virtual address.221 * @param access Access mode that caused the fault (i.e.222 * read/write/exec).223 *224 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK225 * on success (i.e. serviced).226 */227 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)228 {229 elf_header_t *elf = area->backend_data.elf;230 elf_segment_header_t *entry = area->backend_data.segment;231 btree_node_t *leaf;232 uintptr_t base, frame, page, start_anon;233 size_t i;234 bool dirty = false;235 236 ASSERT(page_table_locked(AS));237 ASSERT(mutex_locked(&area->lock));238 239 if (!as_area_check_access(area, access))240 return AS_PF_FAULT;241 242 if (addr < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))243 return AS_PF_FAULT;244 245 if (addr >= entry->p_vaddr + entry->p_memsz)246 return AS_PF_FAULT;247 248 i = (addr - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;249 base = (uintptr_t)250 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));251 252 /* Virtual address of faulting page*/253 page = ALIGN_DOWN(addr, PAGE_SIZE);254 255 /* Virtual address of the end of initialized part of segment */256 start_anon = entry->p_vaddr + entry->p_filesz;257 258 if (area->sh_info) {259 bool found = false;260 261 /*262 * The address space area is shared.263 */264 265 mutex_lock(&area->sh_info->lock);266 frame = (uintptr_t) btree_search(&area->sh_info->pagemap,267 page - area->base, &leaf);268 if (!frame) {269 unsigned int i;270 271 /*272 * Workaround for valid NULL address.273 */274 275 for (i = 0; i < leaf->keys; i++) {276 if (leaf->key[i] == page - area->base) {277 found = true;278 break;279 }280 }281 }282 if (frame || found) {283 frame_reference_add(ADDR2PFN(frame));284 page_mapping_insert(AS, addr, frame,285 as_area_get_flags(area));286 if (!used_space_insert(area, page, 1))287 panic("Cannot insert used space.");288 mutex_unlock(&area->sh_info->lock);289 return AS_PF_OK;290 }291 }292 293 /*294 * The area is either not shared or the pagemap does not contain the295 * mapping.296 */297 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {298 /*299 * Initialized portion of the segment. The memory is backed300 * directly by the content of the ELF image. Pages are301 * only copied if the segment is writable so that there302 * can be more instantions of the same memory ELF image303 * used at a time. Note that this could be later done304 * as COW.305 */306 if (entry->p_flags & PF_W) {307 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0);308 memcpy((void *) PA2KA(frame),309 (void *) (base + i * FRAME_SIZE), FRAME_SIZE);310 if (entry->p_flags & PF_X) {311 smc_coherence_block((void *) PA2KA(frame),312 FRAME_SIZE);313 }314 dirty = true;315 } else {316 frame = KA2PA(base + i * FRAME_SIZE);317 }318 } else if (page >= start_anon) {319 /*320 * This is the uninitialized portion of the segment.321 * It is not physically present in the ELF image.322 * To resolve the situation, a frame must be allocated323 * and cleared.324 */325 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);326 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);327 dirty = true;328 } else {329 size_t pad_lo, pad_hi;330 /*331 * The mixed case.332 *333 * The middle part is backed by the ELF image and334 * the lower and upper parts are anonymous memory.335 * (The segment can be and often is shorter than 1 page).336 */337 if (page < entry->p_vaddr)338 pad_lo = entry->p_vaddr - page;339 else340 pad_lo = 0;341 342 if (start_anon < page + PAGE_SIZE)343 pad_hi = page + PAGE_SIZE - start_anon;344 else345 pad_hi = 0;346 347 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0);348 memcpy((void *) (PA2KA(frame) + pad_lo),349 (void *) (base + i * FRAME_SIZE + pad_lo),350 FRAME_SIZE - pad_lo - pad_hi);351 if (entry->p_flags & PF_X) {352 smc_coherence_block((void *) (PA2KA(frame) + pad_lo),353 FRAME_SIZE - pad_lo - pad_hi);354 }355 memsetb((void *) PA2KA(frame), pad_lo, 0);356 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,357 0);358 dirty = true;359 }360 361 if (dirty && area->sh_info) {362 frame_reference_add(ADDR2PFN(frame));363 btree_insert(&area->sh_info->pagemap, page - area->base,364 (void *) frame, leaf);365 }366 367 if (area->sh_info)368 mutex_unlock(&area->sh_info->lock);369 370 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));371 if (!used_space_insert(area, page, 1))372 panic("Cannot insert used space.");373 374 return AS_PF_OK;375 }376 377 /** Free a frame that is backed by the ELF backend.378 *379 * The address space area and page tables must be already locked.380 *381 * @param area Pointer to the address space area.382 * @param page Page that is mapped to frame. Must be aligned to383 * PAGE_SIZE.384 * @param frame Frame to be released.385 *386 */387 void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)388 {389 elf_segment_header_t *entry = area->backend_data.segment;390 uintptr_t start_anon;391 392 ASSERT(page_table_locked(area->as));393 ASSERT(mutex_locked(&area->lock));394 395 ASSERT(page >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));396 ASSERT(page < entry->p_vaddr + entry->p_memsz);397 398 start_anon = entry->p_vaddr + entry->p_filesz;399 400 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) {401 if (entry->p_flags & PF_W) {402 /*403 * Free the frame with the copy of writable segment404 * data.405 */406 frame_free_noreserve(frame);407 }408 } else {409 /*410 * The frame is either anonymous memory or the mixed case (i.e.411 * lower part is backed by the ELF image and the upper is412 * anonymous). In any case, a frame needs to be freed.413 */414 frame_free_noreserve(frame);415 }416 }417 418 354 /** @} 419 355 */
Note:
See TracChangeset
for help on using the changeset viewer.