Changes in / [3fe58d3c:321052f7] in mainline
- Location:
- kernel
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/arm32/include/mm/km.h
r3fe58d3c r321052f7 39 39 40 40 #define KM_ARM32_IDENTITY_START UINT32_C(0x80000000) 41 #define KM_ARM32_IDENTITY_SIZE UINT32_C(0x 70000000)41 #define KM_ARM32_IDENTITY_SIZE UINT32_C(0x40000000) 42 42 43 #define KM_ARM32_NON_IDENTITY_START UINT32_C(0xf0000000) 44 /* 45 * The last virtual megabyte contains the high exception vectors (0xFFFF0000). 46 * Do not include this range into kernel non-identity. 47 */ 48 #define KM_ARM32_NON_IDENTITY_SIZE UINT32_C(0x0ff00000) 43 #define KM_ARM32_NON_IDENTITY_START UINT32_C(0xc0000000) 44 #define KM_ARM32_NON_IDENTITY_SIZE UINT32_C(0x40000000) 49 45 50 46 extern void km_identity_arch_init(void); -
kernel/arch/arm32/src/mm/page.c
r3fe58d3c r321052f7 65 65 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 66 66 67 /* Create mapping for exception table at high offset */ 67 68 #ifdef HIGH_EXCEPTION_VECTORS 68 /* Create mapping for exception table at high offset */ 69 uintptr_t ev_frame = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_NONE); 70 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, ev_frame, flags); 69 // XXX: fixme to use proper non-identity page 70 void *virtaddr = frame_alloc(ONE_FRAME, FRAME_KA); 71 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), 72 flags); 71 73 #else 72 74 #error "Only high exception vector supported now" 73 75 #endif 76 cur = ALIGN_DOWN(0x50008010, FRAME_SIZE); 77 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 74 78 75 79 page_table_unlock(AS_KERNEL, true); -
kernel/arch/ia32/src/mm/frame.c
r3fe58d3c r321052f7 54 54 55 55 for (i = 0; i < e820counter; i++) { 56 uint64_t base64 = e820table[i].base_address; 57 uint64_t size64 = e820table[i].size; 58 59 #ifdef KARCH_ia32 60 /* 61 * Restrict the e820 table entries to 32-bits. 62 */ 63 if (base64 >= 0x100000000ULL) 64 continue; 65 if (base64 + size64 > 0x100000000ULL) 66 size64 -= base64 + size64 - 0x100000000ULL; 67 #endif 68 69 uintptr_t base = (uintptr_t) base64; 70 size_t size = (size_t) size64; 56 uintptr_t base = (uintptr_t) e820table[i].base_address; 57 size_t size = (size_t) e820table[i].size; 71 58 72 59 if (!frame_adjust_zone_bounds(low, &base, &size)) -
kernel/genarch/src/mm/page_pt.c
r3fe58d3c r321052f7 322 322 323 323 ASSERT(ispwr2(ptl0step)); 324 ASSERT(size > 0); 325 326 for (addr = ALIGN_DOWN(base, ptl0step); addr - 1 < base + size - 1; 324 325 for (addr = ALIGN_DOWN(base, ptl0step); addr < base + size; 327 326 addr += ptl0step) { 328 327 uintptr_t l1; -
kernel/generic/include/mm/frame.h
r3fe58d3c r321052f7 83 83 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 84 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : \ 86 ZONE_LOWMEM /* | ZONE_HIGHMEM */)) | \ 87 ZONE_AVAILABLE) 85 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : ZONE_NONE)) | \ 86 (ZONE_AVAILABLE | ZONE_LOWMEM /* | ZONE_HIGHMEM */)) 88 87 89 88 #define ZONE_FLAGS_MATCH(zf, f) \ -
kernel/generic/include/mm/km.h
r3fe58d3c r321052f7 37 37 38 38 #include <typedefs.h> 39 #include <mm/frame.h>40 39 41 40 extern void km_identity_init(void); … … 49 48 extern bool km_is_non_identity(uintptr_t); 50 49 51 extern uintptr_t km_temporary_page_get(uintptr_t *, frame_flags_t);52 extern void km_temporary_page_put(uintptr_t);53 54 50 #endif 55 51 -
kernel/generic/src/mm/backend_anon.c
r3fe58d3c r321052f7 44 44 #include <mm/frame.h> 45 45 #include <mm/slab.h> 46 #include <mm/km.h>47 46 #include <synch/mutex.h> 48 47 #include <adt/list.h> … … 156 155 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 157 156 { 158 uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE);159 uintptr_t kpage;160 157 uintptr_t frame; 161 158 … … 178 175 mutex_lock(&area->sh_info->lock); 179 176 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 180 upage- area->base, &leaf);177 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf); 181 178 if (!frame) { 182 179 bool allocate = true; … … 188 185 */ 189 186 for (i = 0; i < leaf->keys; i++) { 190 if (leaf->key[i] == upage - area->base) { 187 if (leaf->key[i] == 188 ALIGN_DOWN(addr, PAGE_SIZE) - area->base) { 191 189 allocate = false; 192 190 break; … … 194 192 } 195 193 if (allocate) { 196 kpage = km_temporary_page_get(&frame, 197 FRAME_NO_RESERVE); 198 memsetb((void *) kpage, PAGE_SIZE, 0); 199 km_temporary_page_put(kpage); 194 frame = (uintptr_t) frame_alloc_noreserve( 195 ONE_FRAME, 0); 196 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 200 197 201 198 /* … … 204 201 */ 205 202 btree_insert(&area->sh_info->pagemap, 206 upage - area->base, (void *) frame, leaf); 203 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, 204 (void *) frame, leaf); 207 205 } 208 206 } … … 225 223 * the different causes 226 224 */ 227 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 228 memsetb((void *) kpage, PAGE_SIZE, 0); 229 km_temporary_page_put(kpage); 225 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 226 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 230 227 } 231 228 232 229 /* 233 * Map ' upage' to 'frame'.230 * Map 'page' to 'frame'. 234 231 * Note that TLB shootdown is not attempted as only new information is 235 232 * being inserted into page tables. 236 233 */ 237 page_mapping_insert(AS, upage, frame, as_area_get_flags(area));238 if (!used_space_insert(area, upage, 1))234 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 235 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 239 236 panic("Cannot insert used space."); 240 237 -
kernel/generic/src/mm/backend_elf.c
r3fe58d3c r321052f7 44 44 #include <mm/page.h> 45 45 #include <mm/reserve.h> 46 #include <mm/km.h>47 46 #include <genarch/mm/page_pt.h> 48 47 #include <genarch/mm/page_ht.h> … … 230 229 elf_segment_header_t *entry = area->backend_data.segment; 231 230 btree_node_t *leaf; 232 uintptr_t base; 233 uintptr_t frame; 234 uintptr_t kpage; 235 uintptr_t upage; 236 uintptr_t start_anon; 231 uintptr_t base, frame, page, start_anon; 237 232 size_t i; 238 233 bool dirty = false; … … 254 249 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 255 250 256 /* Virtual address of faulting page 257 upage = ALIGN_DOWN(addr, PAGE_SIZE);251 /* Virtual address of faulting page*/ 252 page = ALIGN_DOWN(addr, PAGE_SIZE); 258 253 259 254 /* Virtual address of the end of initialized part of segment */ … … 269 264 mutex_lock(&area->sh_info->lock); 270 265 frame = (uintptr_t) btree_search(&area->sh_info->pagemap, 271 upage - area->base, &leaf);266 page - area->base, &leaf); 272 267 if (!frame) { 273 268 unsigned int i; … … 278 273 279 274 for (i = 0; i < leaf->keys; i++) { 280 if (leaf->key[i] == upage - area->base) {275 if (leaf->key[i] == page - area->base) { 281 276 found = true; 282 277 break; … … 286 281 if (frame || found) { 287 282 frame_reference_add(ADDR2PFN(frame)); 288 page_mapping_insert(AS, upage, frame,283 page_mapping_insert(AS, addr, frame, 289 284 as_area_get_flags(area)); 290 if (!used_space_insert(area, upage, 1))285 if (!used_space_insert(area, page, 1)) 291 286 panic("Cannot insert used space."); 292 287 mutex_unlock(&area->sh_info->lock); … … 299 294 * mapping. 300 295 */ 301 if ( upage >= entry->p_vaddr && upage + PAGE_SIZE <= start_anon) {296 if (page >= entry->p_vaddr && page + PAGE_SIZE <= start_anon) { 302 297 /* 303 298 * Initialized portion of the segment. The memory is backed … … 309 304 */ 310 305 if (entry->p_flags & PF_W) { 311 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);312 memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE),313 PAGE_SIZE);306 frame = (uintptr_t)frame_alloc_noreserve(ONE_FRAME, 0); 307 memcpy((void *) PA2KA(frame), 308 (void *) (base + i * FRAME_SIZE), FRAME_SIZE); 314 309 if (entry->p_flags & PF_X) { 315 smc_coherence_block((void *) kpage, PAGE_SIZE); 310 smc_coherence_block((void *) PA2KA(frame), 311 FRAME_SIZE); 316 312 } 317 km_temporary_page_put(kpage);318 313 dirty = true; 319 314 } else { 320 315 frame = KA2PA(base + i * FRAME_SIZE); 321 316 } 322 } else if ( upage >= start_anon) {317 } else if (page >= start_anon) { 323 318 /* 324 319 * This is the uninitialized portion of the segment. … … 327 322 * and cleared. 328 323 */ 329 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE); 330 memsetb((void *) kpage, PAGE_SIZE, 0); 331 km_temporary_page_put(kpage); 324 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 325 memsetb((void *) PA2KA(frame), FRAME_SIZE, 0); 332 326 dirty = true; 333 327 } else { … … 340 334 * (The segment can be and often is shorter than 1 page). 341 335 */ 342 if ( upage < entry->p_vaddr)343 pad_lo = entry->p_vaddr - upage;336 if (page < entry->p_vaddr) 337 pad_lo = entry->p_vaddr - page; 344 338 else 345 339 pad_lo = 0; 346 340 347 if (start_anon < upage + PAGE_SIZE)348 pad_hi = upage + PAGE_SIZE - start_anon;341 if (start_anon < page + PAGE_SIZE) 342 pad_hi = page + PAGE_SIZE - start_anon; 349 343 else 350 344 pad_hi = 0; 351 345 352 kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);353 memcpy((void *) ( kpage+ pad_lo),354 (void *) (base + i * PAGE_SIZE + pad_lo),355 PAGE_SIZE - pad_lo - pad_hi);346 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME, 0); 347 memcpy((void *) (PA2KA(frame) + pad_lo), 348 (void *) (base + i * FRAME_SIZE + pad_lo), 349 FRAME_SIZE - pad_lo - pad_hi); 356 350 if (entry->p_flags & PF_X) { 357 smc_coherence_block((void *) ( kpage+ pad_lo),358 PAGE_SIZE - pad_lo - pad_hi);351 smc_coherence_block((void *) (PA2KA(frame) + pad_lo), 352 FRAME_SIZE - pad_lo - pad_hi); 359 353 } 360 memsetb((void *) kpage, pad_lo, 0);361 memsetb((void *) ( kpage + PAGE_SIZE - pad_hi), pad_hi, 0);362 km_temporary_page_put(kpage);354 memsetb((void *) PA2KA(frame), pad_lo, 0); 355 memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi, 356 0); 363 357 dirty = true; 364 358 } … … 366 360 if (dirty && area->sh_info) { 367 361 frame_reference_add(ADDR2PFN(frame)); 368 btree_insert(&area->sh_info->pagemap, upage - area->base,362 btree_insert(&area->sh_info->pagemap, page - area->base, 369 363 (void *) frame, leaf); 370 364 } … … 373 367 mutex_unlock(&area->sh_info->lock); 374 368 375 page_mapping_insert(AS, upage, frame, as_area_get_flags(area));376 if (!used_space_insert(area, upage, 1))369 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 370 if (!used_space_insert(area, page, 1)) 377 371 panic("Cannot insert used space."); 378 372 -
kernel/generic/src/mm/km.c
r3fe58d3c r321052f7 39 39 #include <arch/mm/km.h> 40 40 #include <mm/page.h> 41 #include <mm/frame.h>42 #include <mm/asid.h>43 41 #include <config.h> 44 42 #include <typedefs.h> 45 43 #include <lib/ra.h> 46 44 #include <debug.h> 47 #include <arch.h>48 45 49 46 static ra_arena_t *km_ni_arena; 50 51 #define DEFERRED_PAGES_MAX (PAGE_SIZE / sizeof(uintptr_t))52 53 /** Number of freed pages in the deferred buffer. */54 static volatile unsigned deferred_pages;55 /** Buffer of deferred freed pages. */56 static uintptr_t deferred_page[DEFERRED_PAGES_MAX];57 58 /** Flush the buffer of deferred freed pages.59 *60 * @return Number of freed pages.61 */62 static unsigned km_flush_deferred(void)63 {64 unsigned i = 0;65 ipl_t ipl;66 67 ipl = tlb_shootdown_start(TLB_INVL_ASID, ASID_KERNEL, 0, 0);68 69 for (i = 0; i < deferred_pages; i++) {70 page_mapping_remove(AS_KERNEL, deferred_page[i]);71 km_page_free(deferred_page[i], PAGE_SIZE);72 }73 74 tlb_invalidate_asid(ASID_KERNEL);75 76 as_invalidate_translation_cache(AS_KERNEL, 0, -1);77 tlb_shootdown_finalize(ipl);78 79 return i;80 }81 47 82 48 /** Architecture dependent setup of identity-mapped kernel memory. */ … … 121 87 } 122 88 123 /** Unmap kernen non-identity page.124 *125 * @param[in] page Non-identity page to be unmapped.126 */127 static void km_unmap_deferred(uintptr_t page)128 {129 page_table_lock(AS_KERNEL, true);130 131 if (deferred_pages == DEFERRED_PAGES_MAX) {132 (void) km_flush_deferred();133 deferred_pages = 0;134 }135 136 deferred_page[deferred_pages++] = page;137 138 page_table_unlock(AS_KERNEL, true);139 }140 141 /** Create a temporary page.142 *143 * The page is mapped read/write to a newly allocated frame of physical memory.144 * The page must be returned back to the system by a call to145 * km_temporary_page_put().146 *147 * @param[inout] framep Pointer to a variable which will receive the physical148 * address of the allocated frame.149 * @param[in] flags Frame allocation flags. FRAME_NONE or FRAME_NO_RESERVE.150 * @return Virtual address of the allocated frame.151 */152 uintptr_t km_temporary_page_get(uintptr_t *framep, frame_flags_t flags)153 {154 uintptr_t frame;155 uintptr_t page;156 157 ASSERT(THREAD);158 ASSERT(framep);159 ASSERT(!(flags & ~FRAME_NO_RESERVE));160 161 /*162 * Allocate a frame, preferably from high memory.163 */164 frame = (uintptr_t) frame_alloc(ONE_FRAME,165 FRAME_HIGHMEM | FRAME_ATOMIC | flags);166 if (frame) {167 page = km_page_alloc(PAGE_SIZE, PAGE_SIZE);168 ASSERT(page); // FIXME169 page_table_lock(AS_KERNEL, true);170 page_mapping_insert(AS_KERNEL, page, frame,171 PAGE_CACHEABLE | PAGE_READ | PAGE_WRITE);172 page_table_unlock(AS_KERNEL, true);173 } else {174 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME,175 FRAME_LOWMEM);176 page = PA2KA(frame);177 }178 179 *framep = frame;180 return page;181 }182 183 /** Destroy a temporary page.184 *185 * This function destroys a temporary page previously created by186 * km_temporary_page_get(). The page destruction may be immediate or deferred.187 * The frame mapped by the destroyed page is not freed.188 *189 * @param[in] page Temporary page to be destroyed.190 */191 void km_temporary_page_put(uintptr_t page)192 {193 ASSERT(THREAD);194 195 if (km_is_non_identity(page))196 km_unmap_deferred(page);197 }198 89 199 90 /** @} -
kernel/generic/src/mm/page.c
r3fe58d3c r321052f7 202 202 asize = ALIGN_UP(size, PAGE_SIZE); 203 203 align = ispwr2(size) ? size : (1U << (fnzb(size) + 1)); 204 virtaddr = km_page_alloc(asize, max(PAGE_SIZE, align));204 virtaddr = km_page_alloc(asize, align); 205 205 206 206 page_table_lock(AS_KERNEL, true);
Note:
See TracChangeset
for help on using the changeset viewer.