Changes in kernel/generic/src/mm/km.c [f7f47a7:03cdd2b] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/km.c
rf7f47a7 r03cdd2b 46 46 #include <debug.h> 47 47 #include <arch.h> 48 #include <align.h> 49 #include <macros.h> 50 #include <bitops.h> 48 51 49 52 static ra_arena_t *km_ni_arena; … … 121 124 } 122 125 123 /** Unmap kernen non-identity page. 126 static uintptr_t 127 km_map_aligned(uintptr_t paddr, size_t size, unsigned int flags) 128 { 129 uintptr_t vaddr; 130 size_t align; 131 uintptr_t offs; 132 133 ASSERT(ALIGN_DOWN(paddr, FRAME_SIZE) == paddr); 134 ASSERT(ALIGN_UP(size, FRAME_SIZE) == size); 135 136 align = ispwr2(size) ? size : (1U << (fnzb(size) + 1)); 137 vaddr = km_page_alloc(size, max(PAGE_SIZE, align)); 138 139 page_table_lock(AS_KERNEL, true); 140 for (offs = 0; offs < size; offs += PAGE_SIZE) { 141 page_mapping_insert(AS_KERNEL, vaddr + offs, paddr + offs, 142 flags); 143 } 144 page_table_unlock(AS_KERNEL, true); 145 146 return vaddr; 147 } 148 149 static void km_unmap_aligned(uintptr_t vaddr, size_t size) 150 { 151 uintptr_t offs; 152 ipl_t ipl; 153 154 ASSERT(ALIGN_DOWN(vaddr, PAGE_SIZE) == vaddr); 155 ASSERT(ALIGN_UP(size, PAGE_SIZE) == size); 156 157 page_table_lock(AS_KERNEL, true); 158 159 ipl = tlb_shootdown_start(TLB_INVL_ASID, ASID_KERNEL, 0, 0); 160 161 for (offs = 0; offs < size; offs += PAGE_SIZE) 162 page_mapping_remove(AS_KERNEL, vaddr + offs); 163 164 tlb_invalidate_asid(ASID_KERNEL); 165 166 as_invalidate_translation_cache(AS_KERNEL, 0, -1); 167 tlb_shootdown_finalize(ipl); 168 page_table_unlock(AS_KERNEL, true); 169 170 km_page_free(vaddr, size); 171 } 172 173 /** Map a piece of physical address space into the virtual address space. 174 * 175 * @param paddr Physical address to be mapped. May be unaligned. 176 * @param size Size of area starting at paddr to be mapped. 177 * @param flags Protection flags to be used for the mapping. 178 * 179 * @return New virtual address mapped to paddr. 180 */ 181 uintptr_t km_map(uintptr_t paddr, size_t size, unsigned int flags) 182 { 183 uintptr_t page; 184 size_t offs; 185 186 offs = paddr - ALIGN_DOWN(paddr, FRAME_SIZE); 187 page = km_map_aligned(ALIGN_DOWN(paddr, FRAME_SIZE), 188 ALIGN_UP(size + offs, FRAME_SIZE), flags); 189 190 return page + offs; 191 } 192 193 /** Unmap a piece of virtual address space. 194 * 195 * @param vaddr Virtual address to be unmapped. May be unaligned, but 196 * it must a value previously returned by km_map(). 197 * @param size Size of area starting at vaddr to be unmapped. 198 */ 199 void km_unmap(uintptr_t vaddr, size_t size) 200 { 201 size_t offs; 202 203 offs = vaddr - ALIGN_DOWN(vaddr, PAGE_SIZE); 204 km_unmap_aligned(ALIGN_DOWN(vaddr, PAGE_SIZE), 205 ALIGN_UP(size + offs, PAGE_SIZE)); 206 } 207 208 /** Unmap kernel non-identity page. 124 209 * 125 210 * @param[in] page Non-identity page to be unmapped. … … 165 250 FRAME_HIGHMEM | FRAME_ATOMIC | flags); 166 251 if (frame) { 167 page = km_page_alloc(PAGE_SIZE, PAGE_SIZE); 252 page = km_map(frame, PAGE_SIZE, 253 PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); 168 254 ASSERT(page); // FIXME 169 page_table_lock(AS_KERNEL, true);170 page_mapping_insert(AS_KERNEL, page, frame,171 PAGE_CACHEABLE | PAGE_READ | PAGE_WRITE);172 page_table_unlock(AS_KERNEL, true);173 255 } else { 174 256 frame = (uintptr_t) frame_alloc_noreserve(ONE_FRAME,
Note:
See TracChangeset
for help on using the changeset viewer.