Changes in / [9ea8fdb4:eaeb056] in mainline
- Location:
- kernel
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/mm/tlb.c
r9ea8fdb4 reaeb056 481 481 482 482 page_table_lock(AS, true); 483 t = page_mapping_find(AS, va );483 t = page_mapping_find(AS, va, true); 484 484 if (t) { 485 485 /* … … 599 599 600 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va );601 pte_t *entry = page_mapping_find(AS, va, true); 602 602 if (entry) { 603 603 /* … … 651 651 652 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va );653 t = page_mapping_find(AS, va, true); 654 654 ASSERT((t) && (t->p)); 655 655 if ((t) && (t->p) && (t->w)) { … … 684 684 685 685 page_table_lock(AS, true); 686 t = page_mapping_find(AS, va );686 t = page_mapping_find(AS, va, true); 687 687 ASSERT((t) && (t->p)); 688 688 if ((t) && (t->p) && (t->x)) { … … 717 717 718 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va );719 t = page_mapping_find(AS, va, true); 720 720 ASSERT((t) && (t->p)); 721 721 if ((t) && (t->p)) { … … 753 753 */ 754 754 page_table_lock(AS, true); 755 t = page_mapping_find(AS, va );755 t = page_mapping_find(AS, va, true); 756 756 ASSERT((t) && (t->p)); 757 757 ASSERT(!t->w); … … 778 778 779 779 page_table_lock(AS, true); 780 t = page_mapping_find(AS, va );780 t = page_mapping_find(AS, va, true); 781 781 ASSERT(t); 782 782 -
kernel/arch/mips32/src/mm/tlb.c
r9ea8fdb4 reaeb056 100 100 mutex_unlock(&AS->lock); 101 101 102 page_table_lock(AS, true);103 104 102 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 105 103 if (!pte) { … … 113 111 * or copy_to_uspace(). 114 112 */ 115 page_table_unlock(AS, true);116 113 return; 117 114 default: … … 144 141 tlbwr(); 145 142 146 page_table_unlock(AS, true);147 143 return; 148 144 149 145 fail: 150 page_table_unlock(AS, true);151 146 tlb_refill_fail(istate); 152 147 } … … 176 171 index.value = cp0_index_read(); 177 172 178 page_table_lock(AS, true);179 180 173 /* 181 174 * Fail if the entry is not in TLB. … … 197 190 * or copy_to_uspace(). 198 191 */ 199 page_table_unlock(AS, true);200 192 return; 201 193 default: … … 227 219 tlbwi(); 228 220 229 page_table_unlock(AS, true);230 221 return; 231 222 232 223 fail: 233 page_table_unlock(AS, true);234 224 tlb_invalid_fail(istate); 235 225 } … … 259 249 index.value = cp0_index_read(); 260 250 261 page_table_lock(AS, true);262 263 251 /* 264 252 * Fail if the entry is not in TLB. … … 280 268 * or copy_to_uspace(). 281 269 */ 282 page_table_unlock(AS, true);283 270 return; 284 271 default: … … 311 298 tlbwi(); 312 299 313 page_table_unlock(AS, true);314 300 return; 315 301 316 302 fail: 317 page_table_unlock(AS, true);318 303 tlb_modified_fail(istate); 319 304 } … … 364 349 pte_t *pte; 365 350 366 ASSERT(mutex_locked(&AS->lock));367 368 351 hi.value = cp0_entry_hi_read(); 369 352 … … 379 362 * Check if the mapping exists in page tables. 380 363 */ 381 pte = page_mapping_find(AS, badvaddr );364 pte = page_mapping_find(AS, badvaddr, true); 382 365 if (pte && pte->p && (pte->w || access != PF_ACCESS_WRITE)) { 383 366 /* … … 393 376 * Resort to higher-level page fault handler. 394 377 */ 395 page_table_unlock(AS, true);396 378 switch (rc = as_page_fault(badvaddr, access, istate)) { 397 379 case AS_PF_OK: … … 400 382 * The mapping ought to be in place. 401 383 */ 402 page_table_lock(AS, true); 403 pte = page_mapping_find(AS, badvaddr); 384 pte = page_mapping_find(AS, badvaddr, true); 404 385 ASSERT(pte && pte->p); 405 386 ASSERT(pte->w || access != PF_ACCESS_WRITE); … … 407 388 break; 408 389 case AS_PF_DEFER: 409 page_table_lock(AS, true);410 390 *pfrc = AS_PF_DEFER; 411 391 return NULL; 412 392 break; 413 393 case AS_PF_FAULT: 414 page_table_lock(AS, true);415 394 *pfrc = AS_PF_FAULT; 416 395 return NULL; -
kernel/arch/ppc32/src/mm/tlb.c
r9ea8fdb4 reaeb056 49 49 * 50 50 * @param as Address space. 51 * @param lock Lock/unlock the address space.52 51 * @param badvaddr Faulting virtual address. 53 52 * @param access Access mode that caused the fault. … … 62 61 istate_t *istate, int *pfrc) 63 62 { 64 ASSERT(mutex_locked(&as->lock));65 66 63 /* 67 64 * Check if the mapping exists in page tables. 68 65 */ 69 pte_t *pte = page_mapping_find(as, badvaddr );66 pte_t *pte = page_mapping_find(as, badvaddr, true); 70 67 if ((pte) && (pte->present)) { 71 68 /* … … 79 76 * Resort to higher-level page fault handler. 80 77 */ 81 page_table_unlock(as, true);82 83 78 int rc = as_page_fault(badvaddr, access, istate); 84 79 switch (rc) { … … 88 83 * The mapping ought to be in place. 89 84 */ 90 page_table_lock(as, true); 91 pte = page_mapping_find(as, badvaddr); 85 pte = page_mapping_find(as, badvaddr, true); 92 86 ASSERT((pte) && (pte->present)); 93 87 *pfrc = 0; 94 88 return pte; 95 89 case AS_PF_DEFER: 96 page_table_lock(as, true);97 90 *pfrc = rc; 98 91 return NULL; 99 92 case AS_PF_FAULT: 100 page_table_lock(as, true);101 93 *pfrc = rc; 102 94 return NULL; … … 214 206 badvaddr = istate->pc; 215 207 216 page_table_lock(as, true);217 218 208 int pfrc; 219 209 pte_t *pte = find_mapping_and_check(as, badvaddr, … … 223 213 switch (pfrc) { 224 214 case AS_PF_FAULT: 225 page_table_unlock(as, true);226 215 pht_refill_fail(badvaddr, istate); 227 216 return; … … 231 220 * or copy_to_uspace(). 232 221 */ 233 page_table_unlock(as, true);234 222 return; 235 223 default: … … 241 229 pte->accessed = 1; 242 230 pht_insert(badvaddr, pte); 243 244 page_table_unlock(as, true);245 231 } 246 232 -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r9ea8fdb4 reaeb056 207 207 208 208 page_table_lock(AS, true); 209 t = page_mapping_find(AS, page_16k );209 t = page_mapping_find(AS, page_16k, true); 210 210 if (t && PTE_EXECUTABLE(t)) { 211 211 /* … … 275 275 276 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, page_16k );277 t = page_mapping_find(AS, page_16k, true); 278 278 if (t) { 279 279 /* … … 319 319 320 320 page_table_lock(AS, true); 321 t = page_mapping_find(AS, page_16k );321 t = page_mapping_find(AS, page_16k, true); 322 322 if (t && PTE_WRITABLE(t)) { 323 323 /* -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r9ea8fdb4 reaeb056 219 219 220 220 page_table_lock(AS, true); 221 t = page_mapping_find(AS, va );221 t = page_mapping_find(AS, va, true); 222 222 223 223 if (t && PTE_EXECUTABLE(t)) { … … 275 275 276 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, va );277 t = page_mapping_find(AS, va, true); 278 278 if (t) { 279 279 /* … … 317 317 318 318 page_table_lock(AS, true); 319 t = page_mapping_find(AS, va );319 t = page_mapping_find(AS, va, true); 320 320 if (t && PTE_WRITABLE(t)) { 321 321 /* -
kernel/genarch/include/mm/page_pt.h
r9ea8fdb4 reaeb056 129 129 130 130 extern void page_mapping_insert_pt(as_t *, uintptr_t, uintptr_t, unsigned int); 131 extern pte_t *page_mapping_find_pt(as_t *, uintptr_t );131 extern pte_t *page_mapping_find_pt(as_t *, uintptr_t, bool); 132 132 133 133 #endif -
kernel/genarch/src/mm/page_ht.c
r9ea8fdb4 reaeb056 58 58 static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 59 static void ht_mapping_remove(as_t *, uintptr_t); 60 static pte_t *ht_mapping_find(as_t *, uintptr_t );60 static pte_t *ht_mapping_find(as_t *, uintptr_t, bool); 61 61 62 62 /** … … 214 214 * this call visible. 215 215 * 216 * @param as Address space to w ich page belongs.216 * @param as Address space to which page belongs. 217 217 * @param page Virtual address of the page to be demapped. 218 218 * … … 237 237 /** Find mapping for virtual page in page hash table. 238 238 * 239 * Find mapping for virtual page. 240 * 241 * @param as Address space to wich page belongs. 242 * @param page Virtual page. 239 * @param as Address space to which page belongs. 240 * @param page Virtual page. 241 * @param nolock True if the page tables need not be locked. 243 242 * 244 243 * @return NULL if there is no such mapping; requested mapping otherwise. 245 244 * 246 245 */ 247 pte_t *ht_mapping_find(as_t *as, uintptr_t page )246 pte_t *ht_mapping_find(as_t *as, uintptr_t page, bool nolock) 248 247 { 249 248 sysarg_t key[2] = { … … 252 251 }; 253 252 254 ASSERT( page_table_locked(as));253 ASSERT(nolock || page_table_locked(as)); 255 254 256 255 link_t *cur = hash_table_find(&page_ht, key); -
kernel/genarch/src/mm/page_pt.c
r9ea8fdb4 reaeb056 48 48 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 49 49 static void pt_mapping_remove(as_t *, uintptr_t); 50 static pte_t *pt_mapping_find(as_t *, uintptr_t );50 static pte_t *pt_mapping_find(as_t *, uintptr_t, bool); 51 51 52 52 page_mapping_operations_t pt_mapping_operations = { … … 238 238 /** Find mapping for virtual page in hierarchical page tables. 239 239 * 240 * Find mapping for virtual page. 241 * 242 * @param as Address space to which page belongs. 243 * @param page Virtual page. 240 * @param as Address space to which page belongs. 241 * @param page Virtual page. 242 * @param nolock True if the page tables need not be locked. 244 243 * 245 244 * @return NULL if there is no such mapping; entry from PTL3 describing … … 247 246 * 248 247 */ 249 pte_t *pt_mapping_find(as_t *as, uintptr_t page )248 pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock) 250 249 { 251 ASSERT( page_table_locked(as));250 ASSERT(nolock || page_table_locked(as)); 252 251 253 252 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); -
kernel/generic/include/mm/page.h
r9ea8fdb4 reaeb056 47 47 void (* mapping_insert)(as_t *, uintptr_t, uintptr_t, unsigned int); 48 48 void (* mapping_remove)(as_t *, uintptr_t); 49 pte_t *(* mapping_find)(as_t *, uintptr_t );49 pte_t *(* mapping_find)(as_t *, uintptr_t, bool); 50 50 } page_mapping_operations_t; 51 51 … … 58 58 extern void page_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 59 extern void page_mapping_remove(as_t *, uintptr_t); 60 extern pte_t *page_mapping_find(as_t *, uintptr_t );60 extern pte_t *page_mapping_find(as_t *, uintptr_t, bool); 61 61 extern pte_t *page_table_create(unsigned int); 62 62 extern void page_table_destroy(pte_t *); -
kernel/generic/src/mm/as.c
r9ea8fdb4 reaeb056 649 649 for (; i < size; i++) { 650 650 pte_t *pte = page_mapping_find(as, 651 ptr + P2SZ(i) );651 ptr + P2SZ(i), false); 652 652 653 653 ASSERT(pte); … … 798 798 for (size = 0; size < (size_t) node->value[i]; size++) { 799 799 pte_t *pte = page_mapping_find(as, 800 ptr + P2SZ(size) );800 ptr + P2SZ(size), false); 801 801 802 802 ASSERT(pte); … … 1105 1105 for (size = 0; size < (size_t) node->value[i]; size++) { 1106 1106 pte_t *pte = page_mapping_find(as, 1107 ptr + P2SZ(size) );1107 ptr + P2SZ(size), false); 1108 1108 1109 1109 ASSERT(pte); … … 1241 1241 */ 1242 1242 pte_t *pte; 1243 if ((pte = page_mapping_find(AS, page ))) {1243 if ((pte = page_mapping_find(AS, page, false))) { 1244 1244 if (PTE_PRESENT(pte)) { 1245 1245 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || -
kernel/generic/src/mm/backend_anon.c
r9ea8fdb4 reaeb056 122 122 page_table_lock(area->as, false); 123 123 pte = page_mapping_find(area->as, 124 base + j * PAGE_SIZE);124 base + P2SZ(j), false); 125 125 ASSERT(pte && PTE_VALID(pte) && 126 126 PTE_PRESENT(pte)); 127 127 btree_insert(&area->sh_info->pagemap, 128 (base + j * PAGE_SIZE) - area->base,128 (base + P2SZ(j)) - area->base, 129 129 (void *) PTE_GET_FRAME(pte), NULL); 130 130 page_table_unlock(area->as, false); -
kernel/generic/src/mm/backend_elf.c
r9ea8fdb4 reaeb056 170 170 if (!(area->flags & AS_AREA_WRITE)) 171 171 if (base >= entry->p_vaddr && 172 base + count * PAGE_SIZE<= start_anon)172 base + P2SZ(count) <= start_anon) 173 173 continue; 174 174 … … 182 182 if (!(area->flags & AS_AREA_WRITE)) 183 183 if (base >= entry->p_vaddr && 184 base + (j + 1) * PAGE_SIZE <= 185 start_anon) 184 base + P2SZ(j + 1) <= start_anon) 186 185 continue; 187 186 188 187 page_table_lock(area->as, false); 189 188 pte = page_mapping_find(area->as, 190 base + j * PAGE_SIZE);189 base + P2SZ(j), false); 191 190 ASSERT(pte && PTE_VALID(pte) && 192 191 PTE_PRESENT(pte)); 193 192 btree_insert(&area->sh_info->pagemap, 194 (base + j * PAGE_SIZE) - area->base,193 (base + P2SZ(j)) - area->base, 195 194 (void *) PTE_GET_FRAME(pte), NULL); 196 195 page_table_unlock(area->as, false); -
kernel/generic/src/mm/page.c
r9ea8fdb4 reaeb056 108 108 * using flags. Allocate and setup any missing page tables. 109 109 * 110 * @param as Address space to w ich page belongs.110 * @param as Address space to which page belongs. 111 111 * @param page Virtual address of the page to be mapped. 112 112 * @param frame Physical address of memory frame to which the mapping is … … 135 135 * this call visible. 136 136 * 137 * @param as Address space to w ich page belongs.137 * @param as Address space to which page belongs. 138 138 * @param page Virtual address of the page to be demapped. 139 139 * … … 152 152 } 153 153 154 /** Find mapping for virtual page 154 /** Find mapping for virtual page. 155 155 * 156 * Find mapping for virtual page. 157 * 158 * @param as Address space to wich page belongs. 159 * @param page Virtual page. 156 * @param as Address space to which page belongs. 157 * @param page Virtual page. 158 * @param nolock True if the page tables need not be locked. 160 159 * 161 160 * @return NULL if there is no such mapping; requested mapping … … 163 162 * 164 163 */ 165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page )164 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock) 166 165 { 167 ASSERT( page_table_locked(as));166 ASSERT(nolock || page_table_locked(as)); 168 167 169 168 ASSERT(page_mapping_operations); 170 169 ASSERT(page_mapping_operations->mapping_find); 171 170 172 return page_mapping_operations->mapping_find(as, page );171 return page_mapping_operations->mapping_find(as, page, nolock); 173 172 } 174 173 -
kernel/generic/src/synch/futex.c
r9ea8fdb4 reaeb056 119 119 */ 120 120 page_table_lock(AS, true); 121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 122 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 123 123 page_table_unlock(AS, true); … … 155 155 */ 156 156 page_table_lock(AS, true); 157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) );157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), false); 158 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 159 159 page_table_unlock(AS, true);
Note:
See TracChangeset
for help on using the changeset viewer.