Changeset e32720ff in mainline
- Timestamp:
- 2012-11-22T22:20:39Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 3f6c16fe
- Parents:
- 0ab362c (diff), 908bb96 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/mm/page.c
r0ab362c re32720ff 92 92 access = PF_ACCESS_READ; 93 93 94 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 95 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 96 panic_memtrap(istate, access, page, NULL); 97 } 94 as_page_fault(page, access, istate); 98 95 } 99 96 -
kernel/arch/arm32/src/mm/page_fault.c
r0ab362c re32720ff 289 289 #error "Unsupported architecture" 290 290 #endif 291 const int ret = as_page_fault(badvaddr, access, istate); 292 293 if (ret == AS_PF_FAULT) { 294 fault_if_from_uspace(istate, "Page fault: %#x.", badvaddr); 295 panic_memtrap(istate, access, badvaddr, NULL); 296 } 291 as_page_fault(badvaddr, access, istate); 297 292 } 298 293 … … 305 300 void prefetch_abort(unsigned int exc_no, istate_t *istate) 306 301 { 307 /* NOTE: We should use IFAR and IFSR here. */ 308 int ret = as_page_fault(istate->pc, PF_ACCESS_EXEC, istate); 309 310 if (ret == AS_PF_FAULT) { 311 fault_if_from_uspace(istate, 312 "Page fault - prefetch_abort: %#x.", istate->pc); 313 panic_memtrap(istate, PF_ACCESS_EXEC, istate->pc, NULL); 314 } 302 as_page_fault(istate->pc, PF_ACCESS_EXEC, istate); 315 303 } 316 304 -
kernel/arch/ia64/src/mm/tlb.c
r0ab362c re32720ff 501 501 * Forward the page fault to address space page fault handler. 502 502 */ 503 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 504 fault_if_from_uspace(istate, "Page fault at %p.", 505 (void *) va); 506 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL); 507 } 503 as_page_fault(va, PF_ACCESS_EXEC, istate); 508 504 } 509 505 } … … 619 615 * handler. 620 616 */ 621 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 622 fault_if_from_uspace(istate, "Page fault at %p.", 623 (void *) va); 624 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL); 625 } 617 as_page_fault(va, PF_ACCESS_READ, istate); 626 618 } 627 619 } … … 667 659 dtc_pte_copy(t); 668 660 } else { 669 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 670 fault_if_from_uspace(istate, "Page fault at %p.", 671 (void *) va); 672 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 673 } 661 as_page_fault(va, PF_ACCESS_WRITE, istate); 674 662 } 675 663 } … … 700 688 itc_pte_copy(t); 701 689 } else { 702 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 703 fault_if_from_uspace(istate, "Page fault at %p.", 704 (void *) va); 705 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL); 706 } 690 as_page_fault(va, PF_ACCESS_EXEC, istate); 707 691 } 708 692 } … … 764 748 ASSERT((t) && (t->p)); 765 749 ASSERT(!t->w); 766 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 767 fault_if_from_uspace(istate, "Page fault at %p.", 768 (void *) va); 769 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 770 } 750 as_page_fault(va, PF_ACCESS_WRITE, istate); 771 751 } 772 752 … … 799 779 dtc_pte_copy(t); 800 780 } else { 801 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 802 fault_if_from_uspace(istate, "Page fault at %p.", 803 (void *) va); 804 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL); 805 } 781 as_page_fault(va, PF_ACCESS_READ, istate); 806 782 } 807 783 } -
kernel/arch/mips32/src/mm/tlb.c
r0ab362c re32720ff 48 48 #include <symtab.h> 49 49 50 static void tlb_refill_fail(istate_t *); 51 static void tlb_invalid_fail(istate_t *); 52 static void tlb_modified_fail(istate_t *); 53 54 static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *, int *); 50 static pte_t *find_mapping_and_check(uintptr_t, int, istate_t *); 55 51 56 52 /** Initialize TLB. … … 92 88 uintptr_t badvaddr; 93 89 pte_t *pte; 94 int pfrc;95 90 96 91 badvaddr = cp0_badvaddr_read(); 97 92 asid = AS->asid; 98 93 99 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 100 if (!pte) { 101 switch (pfrc) { 102 case AS_PF_FAULT: 103 goto fail; 104 break; 105 case AS_PF_DEFER: 106 /* 107 * The page fault came during copy_from_uspace() 108 * or copy_to_uspace(). 109 */ 110 return; 111 default: 112 panic("Unexpected pfrc (%d).", pfrc); 94 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 95 if (pte) { 96 /* 97 * Record access to PTE. 98 */ 99 pte->a = 1; 100 101 tlb_prepare_entry_hi(&hi, asid, badvaddr); 102 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, 103 pte->cacheable, pte->pfn); 104 105 /* 106 * New entry is to be inserted into TLB 107 */ 108 cp0_entry_hi_write(hi.value); 109 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 110 cp0_entry_lo0_write(lo.value); 111 cp0_entry_lo1_write(0); 112 } else { 113 cp0_entry_lo0_write(0); 114 cp0_entry_lo1_write(lo.value); 113 115 } 114 } 115 116 /* 117 * Record access to PTE. 118 */ 119 pte->a = 1; 120 121 tlb_prepare_entry_hi(&hi, asid, badvaddr); 122 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, 123 pte->pfn); 124 125 /* 126 * New entry is to be inserted into TLB 127 */ 128 cp0_entry_hi_write(hi.value); 129 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 130 cp0_entry_lo0_write(lo.value); 131 cp0_entry_lo1_write(0); 132 } 133 else { 134 cp0_entry_lo0_write(0); 135 cp0_entry_lo1_write(lo.value); 136 } 137 cp0_pagemask_write(TLB_PAGE_MASK_16K); 138 tlbwr(); 139 140 return; 141 142 fail: 143 tlb_refill_fail(istate); 116 cp0_pagemask_write(TLB_PAGE_MASK_16K); 117 tlbwr(); 118 } 144 119 } 145 120 … … 155 130 entry_hi_t hi; 156 131 pte_t *pte; 157 int pfrc;158 132 159 133 badvaddr = cp0_badvaddr_read(); … … 168 142 index.value = cp0_index_read(); 169 143 170 /* 171 * Fail if the entry is not in TLB. 172 */ 173 if (index.p) { 174 printf("TLB entry not found.\n"); 175 goto fail; 176 } 177 178 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 179 if (!pte) { 180 switch (pfrc) { 181 case AS_PF_FAULT: 182 goto fail; 183 break; 184 case AS_PF_DEFER: 185 /* 186 * The page fault came during copy_from_uspace() 187 * or copy_to_uspace(). 188 */ 189 return; 190 default: 191 panic("Unexpected pfrc (%d).", pfrc); 192 } 193 } 194 195 /* 196 * Read the faulting TLB entry. 197 */ 198 tlbr(); 199 200 /* 201 * Record access to PTE. 202 */ 203 pte->a = 1; 204 205 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, 206 pte->pfn); 207 208 /* 209 * The entry is to be updated in TLB. 210 */ 211 if ((badvaddr / PAGE_SIZE) % 2 == 0) 212 cp0_entry_lo0_write(lo.value); 213 else 214 cp0_entry_lo1_write(lo.value); 215 cp0_pagemask_write(TLB_PAGE_MASK_16K); 216 tlbwi(); 217 218 return; 219 220 fail: 221 tlb_invalid_fail(istate); 144 ASSERT(!index.p); 145 146 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 147 if (pte) { 148 /* 149 * Read the faulting TLB entry. 150 */ 151 tlbr(); 152 153 /* 154 * Record access to PTE. 155 */ 156 pte->a = 1; 157 158 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, 159 pte->cacheable, pte->pfn); 160 161 /* 162 * The entry is to be updated in TLB. 163 */ 164 if ((badvaddr / PAGE_SIZE) % 2 == 0) 165 cp0_entry_lo0_write(lo.value); 166 else 167 cp0_entry_lo1_write(lo.value); 168 cp0_pagemask_write(TLB_PAGE_MASK_16K); 169 tlbwi(); 170 } 222 171 } 223 172 … … 233 182 entry_hi_t hi; 234 183 pte_t *pte; 235 int pfrc;236 184 237 185 badvaddr = cp0_badvaddr_read(); … … 249 197 * Fail if the entry is not in TLB. 250 198 */ 251 if (index.p) { 252 printf("TLB entry not found.\n"); 253 goto fail; 254 } 255 256 pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc); 257 if (!pte) { 258 switch (pfrc) { 259 case AS_PF_FAULT: 260 goto fail; 261 break; 262 case AS_PF_DEFER: 263 /* 264 * The page fault came during copy_from_uspace() 265 * or copy_to_uspace(). 266 */ 267 return; 268 default: 269 panic("Unexpected pfrc (%d).", pfrc); 270 } 271 } 272 273 /* 274 * Read the faulting TLB entry. 275 */ 276 tlbr(); 277 278 /* 279 * Record access and write to PTE. 280 */ 281 pte->a = 1; 282 pte->d = 1; 283 284 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, 285 pte->pfn); 286 287 /* 288 * The entry is to be updated in TLB. 289 */ 290 if ((badvaddr / PAGE_SIZE) % 2 == 0) 291 cp0_entry_lo0_write(lo.value); 292 else 293 cp0_entry_lo1_write(lo.value); 294 cp0_pagemask_write(TLB_PAGE_MASK_16K); 295 tlbwi(); 296 297 return; 298 299 fail: 300 tlb_modified_fail(istate); 301 } 302 303 void tlb_refill_fail(istate_t *istate) 304 { 305 uintptr_t va = cp0_badvaddr_read(); 306 307 fault_if_from_uspace(istate, "TLB Refill Exception on %p.", 308 (void *) va); 309 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception."); 310 } 311 312 313 void tlb_invalid_fail(istate_t *istate) 314 { 315 uintptr_t va = cp0_badvaddr_read(); 316 317 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.", 318 (void *) va); 319 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception."); 320 } 321 322 void tlb_modified_fail(istate_t *istate) 323 { 324 uintptr_t va = cp0_badvaddr_read(); 325 326 fault_if_from_uspace(istate, "TLB Modified Exception on %p.", 327 (void *) va); 328 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception."); 199 ASSERT(!index.p); 200 201 pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate); 202 if (pte) { 203 /* 204 * Read the faulting TLB entry. 205 */ 206 tlbr(); 207 208 /* 209 * Record access and write to PTE. 210 */ 211 pte->a = 1; 212 pte->d = 1; 213 214 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, 215 pte->cacheable, pte->pfn); 216 217 /* 218 * The entry is to be updated in TLB. 219 */ 220 if ((badvaddr / PAGE_SIZE) % 2 == 0) 221 cp0_entry_lo0_write(lo.value); 222 else 223 cp0_entry_lo1_write(lo.value); 224 cp0_pagemask_write(TLB_PAGE_MASK_16K); 225 tlbwi(); 226 } 329 227 } 330 228 … … 334 232 * @param access Access mode that caused the fault. 335 233 * @param istate Pointer to interrupted state. 336 * @param pfrc Pointer to variable where as_page_fault() return code337 * will be stored.338 234 * 339 235 * @return PTE on success, NULL otherwise. 340 236 */ 341 pte_t * 342 find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate, 343 int *pfrc) 237 pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate) 344 238 { 345 239 entry_hi_t hi; … … 348 242 hi.value = cp0_entry_hi_read(); 349 243 350 /* 351 * Handler cannot succeed if the ASIDs don't match. 352 */ 353 if (hi.asid != AS->asid) { 354 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid); 355 return NULL; 356 } 244 ASSERT(hi.asid == AS->asid); 357 245 358 246 /* … … 366 254 */ 367 255 return pte; 368 } else { 369 int rc; 370 371 /* 372 * Mapping not found in page tables. 373 * Resort to higher-level page fault handler. 374 */ 375 switch (rc = as_page_fault(badvaddr, access, istate)) { 376 case AS_PF_OK: 377 /* 378 * The higher-level page fault handler succeeded, 379 * The mapping ought to be in place. 380 */ 381 pte = page_mapping_find(AS, badvaddr, true); 382 ASSERT(pte && pte->p); 383 ASSERT(pte->w || access != PF_ACCESS_WRITE); 384 return pte; 385 case AS_PF_DEFER: 386 *pfrc = AS_PF_DEFER; 387 return NULL; 388 case AS_PF_FAULT: 389 *pfrc = AS_PF_FAULT; 390 return NULL; 391 default: 392 panic("Unexpected rc (%d).", rc); 393 } 394 395 } 256 } 257 258 /* 259 * Mapping not found in page tables. 260 * Resort to higher-level page fault handler. 261 */ 262 if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) { 263 pte = page_mapping_find(AS, badvaddr, true); 264 ASSERT(pte && pte->p); 265 ASSERT(pte->w || access != PF_ACCESS_WRITE); 266 return pte; 267 } 268 269 return NULL; 396 270 } 397 271 -
kernel/arch/mips64/src/mm/tlb.c
r0ab362c re32720ff 79 79 * @param access Access mode that caused the fault. 80 80 * @param istate Pointer to interrupted state. 81 * @param pfrc Pointer to variable where as_page_fault()82 * return code will be stored.83 81 * 84 82 * @return PTE on success, NULL otherwise. … … 86 84 */ 87 85 static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, 88 istate_t *istate , int *pfrc)86 istate_t *istate) 89 87 { 90 88 entry_hi_t hi; 91 89 hi.value = cp0_entry_hi_read(); 92 90 93 /* 94 * Handler cannot succeed if the ASIDs don't match. 95 */ 96 if (hi.asid != AS->asid) { 97 printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid); 98 return NULL; 99 } 91 ASSERT(hi.asid == AS->asid); 100 92 101 93 /* … … 109 101 */ 110 102 return pte; 111 } else { 112 int rc; 113 114 /* 115 * Mapping not found in page tables. 116 * Resort to higher-level page fault handler. 117 */ 118 switch (rc = as_page_fault(badvaddr, access, istate)) { 119 case AS_PF_OK: 120 /* 121 * The higher-level page fault handler succeeded, 122 * The mapping ought to be in place. 123 */ 124 pte = page_mapping_find(AS, badvaddr, true); 125 ASSERT(pte); 126 ASSERT(pte->p); 127 ASSERT((pte->w) || (access != PF_ACCESS_WRITE)); 128 return pte; 129 case AS_PF_DEFER: 130 *pfrc = AS_PF_DEFER; 131 return NULL; 132 case AS_PF_FAULT: 133 *pfrc = AS_PF_FAULT; 134 return NULL; 135 default: 136 panic("Unexpected return code (%d).", rc); 137 } 138 } 103 } 104 105 /* 106 * Mapping not found in page tables. 107 * Resort to higher-level page fault handler. 108 */ 109 if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) { 110 /* 111 * The higher-level page fault handler succeeded, 112 * The mapping ought to be in place. 113 */ 114 pte = page_mapping_find(AS, badvaddr, true); 115 ASSERT(pte); 116 ASSERT(pte->p); 117 ASSERT((pte->w) || (access != PF_ACCESS_WRITE)); 118 return pte; 119 } 120 121 return NULL; 139 122 } 140 123 … … 156 139 } 157 140 158 static void tlb_refill_fail(istate_t *istate)159 {160 uintptr_t va = cp0_badvaddr_read();161 162 fault_if_from_uspace(istate, "TLB Refill Exception on %p.",163 (void *) va);164 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Refill Exception.");165 }166 167 static void tlb_invalid_fail(istate_t *istate)168 {169 uintptr_t va = cp0_badvaddr_read();170 171 fault_if_from_uspace(istate, "TLB Invalid Exception on %p.",172 (void *) va);173 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, "TLB Invalid Exception.");174 }175 176 static void tlb_modified_fail(istate_t *istate)177 {178 uintptr_t va = cp0_badvaddr_read();179 180 fault_if_from_uspace(istate, "TLB Modified Exception on %p.",181 (void *) va);182 panic_memtrap(istate, PF_ACCESS_WRITE, va, "TLB Modified Exception.");183 }184 185 141 /** Process TLB Refill Exception. 186 142 * … … 196 152 mutex_unlock(&AS->lock); 197 153 198 int pfrc; 199 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, 200 istate, &pfrc); 201 if (!pte) { 202 switch (pfrc) { 203 case AS_PF_FAULT: 204 goto fail; 205 break; 206 case AS_PF_DEFER: 207 /* 208 * The page fault came during copy_from_uspace() 209 * or copy_to_uspace(). 210 */ 211 return; 212 default: 213 panic("Unexpected pfrc (%d).", pfrc); 154 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 155 if (pte) { 156 /* 157 * Record access to PTE. 158 */ 159 pte->a = 1; 160 161 entry_lo_t lo; 162 entry_hi_t hi; 163 164 tlb_prepare_entry_hi(&hi, asid, badvaddr); 165 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 166 pte->frame); 167 168 /* 169 * New entry is to be inserted into TLB 170 */ 171 cp0_entry_hi_write(hi.value); 172 173 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 174 cp0_entry_lo0_write(lo.value); 175 cp0_entry_lo1_write(0); 176 } else { 177 cp0_entry_lo0_write(0); 178 cp0_entry_lo1_write(lo.value); 214 179 } 215 } 216 217 /* 218 * Record access to PTE. 219 */ 220 pte->a = 1; 221 222 entry_lo_t lo; 223 entry_hi_t hi; 224 225 tlb_prepare_entry_hi(&hi, asid, badvaddr); 226 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 227 pte->frame); 228 229 /* 230 * New entry is to be inserted into TLB 231 */ 232 cp0_entry_hi_write(hi.value); 233 234 if ((badvaddr / PAGE_SIZE) % 2 == 0) { 235 cp0_entry_lo0_write(lo.value); 236 cp0_entry_lo1_write(0); 237 } else { 238 cp0_entry_lo0_write(0); 239 cp0_entry_lo1_write(lo.value); 240 } 241 242 cp0_pagemask_write(TLB_PAGE_MASK_16K); 243 tlbwr(); 244 245 return; 246 247 fail: 248 tlb_refill_fail(istate); 180 181 cp0_pagemask_write(TLB_PAGE_MASK_16K); 182 tlbwr(); 183 } 249 184 } 250 185 … … 271 206 index.value = cp0_index_read(); 272 207 273 /* 274 * Fail if the entry is not in TLB. 275 */ 276 if (index.p) { 277 printf("TLB entry not found.\n"); 278 goto fail; 279 } 280 281 int pfrc; 282 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, 283 istate, &pfrc); 284 if (!pte) { 285 switch (pfrc) { 286 case AS_PF_FAULT: 287 goto fail; 288 break; 289 case AS_PF_DEFER: 290 /* 291 * The page fault came during copy_from_uspace() 292 * or copy_to_uspace(). 293 */ 294 return; 295 default: 296 panic("Unexpected pfrc (%d).", pfrc); 297 } 298 } 299 300 /* 301 * Read the faulting TLB entry. 302 */ 303 tlbr(); 304 305 /* 306 * Record access to PTE. 307 */ 308 pte->a = 1; 309 310 entry_lo_t lo; 311 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 312 pte->frame); 313 314 /* 315 * The entry is to be updated in TLB. 316 */ 317 if ((badvaddr / PAGE_SIZE) % 2 == 0) 318 cp0_entry_lo0_write(lo.value); 319 else 320 cp0_entry_lo1_write(lo.value); 321 322 cp0_pagemask_write(TLB_PAGE_MASK_16K); 323 tlbwi(); 324 325 return; 326 327 fail: 328 tlb_invalid_fail(istate); 208 ASSERT(!index.p); 209 210 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate); 211 if (pte) { 212 /* 213 * Read the faulting TLB entry. 214 */ 215 tlbr(); 216 217 /* 218 * Record access to PTE. 219 */ 220 pte->a = 1; 221 222 entry_lo_t lo; 223 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->c, 224 pte->frame); 225 226 /* 227 * The entry is to be updated in TLB. 228 */ 229 if ((badvaddr / PAGE_SIZE) % 2 == 0) 230 cp0_entry_lo0_write(lo.value); 231 else 232 cp0_entry_lo1_write(lo.value); 233 234 cp0_pagemask_write(TLB_PAGE_MASK_16K); 235 tlbwi(); 236 } 237 329 238 } 330 239 … … 351 260 index.value = cp0_index_read(); 352 261 353 /* 354 * Fail if the entry is not in TLB. 355 */ 356 if (index.p) { 357 printf("TLB entry not found.\n"); 358 goto fail; 359 } 360 361 int pfrc; 362 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, 363 istate, &pfrc); 364 if (!pte) { 365 switch (pfrc) { 366 case AS_PF_FAULT: 367 goto fail; 368 break; 369 case AS_PF_DEFER: 370 /* 371 * The page fault came during copy_from_uspace() 372 * or copy_to_uspace(). 373 */ 374 return; 375 default: 376 panic("Unexpected pfrc (%d).", pfrc); 377 } 378 } 379 380 /* 381 * Read the faulting TLB entry. 382 */ 383 tlbr(); 384 385 /* 386 * Record access and write to PTE. 387 */ 388 pte->a = 1; 389 pte->d = 1; 390 391 entry_lo_t lo; 392 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c, 393 pte->frame); 394 395 /* 396 * The entry is to be updated in TLB. 397 */ 398 if ((badvaddr / PAGE_SIZE) % 2 == 0) 399 cp0_entry_lo0_write(lo.value); 400 else 401 cp0_entry_lo1_write(lo.value); 402 403 cp0_pagemask_write(TLB_PAGE_MASK_16K); 404 tlbwi(); 405 406 return; 407 408 fail: 409 tlb_modified_fail(istate); 262 ASSERT(!index.p); 263 264 pte_t *pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate); 265 if (pte) { 266 /* 267 * Read the faulting TLB entry. 268 */ 269 tlbr(); 270 271 /* 272 * Record access and write to PTE. 273 */ 274 pte->a = 1; 275 pte->d = 1; 276 277 entry_lo_t lo; 278 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->c, 279 pte->frame); 280 281 /* 282 * The entry is to be updated in TLB. 283 */ 284 if ((badvaddr / PAGE_SIZE) % 2 == 0) 285 cp0_entry_lo0_write(lo.value); 286 else 287 cp0_entry_lo1_write(lo.value); 288 289 cp0_pagemask_write(TLB_PAGE_MASK_16K); 290 tlbwi(); 291 } 410 292 } 411 293 -
kernel/arch/ppc32/src/mm/pht.c
r0ab362c re32720ff 49 49 * @param access Access mode that caused the fault. 50 50 * @param istate Pointer to interrupted state. 51 * @param pfrc Pointer to variable where as_page_fault() return code52 * will be stored.53 51 * 54 52 * @return PTE on success, NULL otherwise. … … 56 54 */ 57 55 static pte_t *find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access, 58 istate_t *istate , int *pfrc)56 istate_t *istate) 59 57 { 60 58 /* … … 68 66 */ 69 67 return pte; 70 } else { 68 } 69 /* 70 * Mapping not found in page tables. 71 * Resort to higher-level page fault handler. 72 */ 73 if (as_page_fault(badvaddr, access, istate) == AS_PF_OK) { 71 74 /* 72 * Mapping not found in page tables.73 * Resort to higher-level page fault handler.75 * The higher-level page fault handler succeeded, 76 * The mapping ought to be in place. 74 77 */ 75 int rc = as_page_fault(badvaddr, access, istate); 76 switch (rc) { 77 case AS_PF_OK: 78 /* 79 * The higher-level page fault handler succeeded, 80 * The mapping ought to be in place. 81 */ 82 pte = page_mapping_find(as, badvaddr, true); 83 ASSERT((pte) && (pte->present)); 84 *pfrc = 0; 85 return pte; 86 case AS_PF_DEFER: 87 *pfrc = rc; 88 return NULL; 89 case AS_PF_FAULT: 90 *pfrc = rc; 91 return NULL; 92 default: 93 panic("Unexpected rc (%d).", rc); 94 } 95 } 96 } 97 98 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) 99 { 100 fault_if_from_uspace(istate, "PHT Refill Exception on %p.", 101 (void *) badvaddr); 102 panic_memtrap(istate, PF_ACCESS_UNKNOWN, badvaddr, 103 "PHT Refill Exception."); 78 pte = page_mapping_find(as, badvaddr, true); 79 ASSERT((pte) && (pte->present)); 80 return pte; 81 } 82 83 return NULL; 104 84 } 105 85 … … 202 182 badvaddr = istate->pc; 203 183 204 int pfrc;205 184 pte_t *pte = find_mapping_and_check(AS, badvaddr, 206 PF_ACCESS_READ /* FIXME */, istate, &pfrc); 207 208 if (!pte) { 209 switch (pfrc) { 210 case AS_PF_FAULT: 211 pht_refill_fail(badvaddr, istate); 212 return; 213 case AS_PF_DEFER: 214 /* 215 * The page fault came during copy_from_uspace() 216 * or copy_to_uspace(). 217 */ 218 return; 219 default: 220 panic("Unexpected pfrc (%d).", pfrc); 221 } 222 } 223 224 /* Record access to PTE */ 225 pte->accessed = 1; 226 pht_insert(badvaddr, pte); 185 PF_ACCESS_READ /* FIXME */, istate); 186 187 if (pte) { 188 /* Record access to PTE */ 189 pte->accessed = 1; 190 pht_insert(badvaddr, pte); 191 } 227 192 } 228 193 -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r0ab362c re32720ff 58 58 static void dtlb_pte_copy(pte_t *, size_t, bool); 59 59 static void itlb_pte_copy(pte_t *, size_t); 60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,61 const char *);62 static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t,63 const char *);64 static void do_fast_data_access_protection_fault(istate_t *,65 tlb_tag_access_reg_t, const char *);66 60 67 61 const char *context_encoding[] = { … … 222 216 * handler. 223 217 */ 224 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 225 AS_PF_FAULT) { 226 do_fast_instruction_access_mmu_miss_fault(istate, 227 istate->tpc, __func__); 228 } 218 as_page_fault(page_16k, PF_ACCESS_EXEC, istate); 229 219 } 230 220 } … … 256 246 if (!tag.vpn) { 257 247 /* NULL access in kernel */ 258 do_fast_data_access_mmu_miss_fault(istate, tag, 259 "Dereferencing NULL pointer."); 248 panic("NULL pointer dereference."); 260 249 } else if (page_8k >= end_of_identity) { 261 250 /* Kernel non-identity. */ 262 251 as = AS_KERNEL; 263 252 } else { 264 do_fast_data_access_mmu_miss_fault(istate, tag, 265 "Unexpected kernel page fault."); 253 panic("Unexpected kernel page fault."); 266 254 } 267 255 } … … 283 271 * handler. 284 272 */ 285 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 286 AS_PF_FAULT) { 287 do_fast_data_access_mmu_miss_fault(istate, tag, 288 __func__); 289 } 273 as_page_fault(page_16k, PF_ACCESS_READ, istate); 290 274 } 291 275 } … … 332 316 * handler. 333 317 */ 334 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 335 AS_PF_FAULT) { 336 do_fast_data_access_protection_fault(istate, tag, 337 __func__); 338 } 318 as_page_fault(page_16k, PF_ACCESS_WRITE, istate); 339 319 } 340 320 } … … 428 408 429 409 #endif 430 431 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,432 uintptr_t va, const char *str)433 {434 fault_if_from_uspace(istate, "%s, address=%p.", str, (void *) va);435 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);436 }437 438 void do_fast_data_access_mmu_miss_fault(istate_t *istate,439 tlb_tag_access_reg_t tag, const char *str)440 {441 uintptr_t va;442 443 va = tag.vpn << MMU_PAGE_WIDTH;444 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,445 (void *) va, tag.context);446 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, str);447 }448 449 void do_fast_data_access_protection_fault(istate_t *istate,450 tlb_tag_access_reg_t tag, const char *str)451 {452 uintptr_t va;453 454 va = tag.vpn << MMU_PAGE_WIDTH;455 fault_if_from_uspace(istate, "%s, page=%p (asid=%u).", str,456 (void *) va, tag.context);457 panic_memtrap(istate, PF_ACCESS_WRITE, va, str);458 }459 410 460 411 void describe_dmmu_fault(void) -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r0ab362c re32720ff 62 62 static void itlb_pte_copy(pte_t *); 63 63 static void dtlb_pte_copy(pte_t *, bool); 64 static void do_fast_instruction_access_mmu_miss_fault(istate_t *, uintptr_t,65 const char *);66 static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,67 const char *);68 static void do_fast_data_access_protection_fault(istate_t *,69 uint64_t, const char *);70 64 71 65 /* … … 235 229 * handler. 236 230 */ 237 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 238 do_fast_instruction_access_mmu_miss_fault(istate, 239 istate->tpc, __func__); 240 } 231 as_page_fault(va, PF_ACCESS_EXEC, istate); 241 232 } 242 233 } … … 264 255 if (va == 0) { 265 256 /* NULL access in kernel */ 266 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, 267 __func__); 257 panic("NULL pointer dereference."); 268 258 } 269 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " 270 "kernel page fault."); 259 panic("Unexpected kernel page fault."); 271 260 } 272 261 … … 287 276 * handler. 288 277 */ 289 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 290 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, 291 __func__); 292 } 278 as_page_fault(va, PF_ACCESS_READ, istate); 293 279 } 294 280 } … … 329 315 * handler. 330 316 */ 331 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 332 do_fast_data_access_protection_fault(istate, page_and_ctx, 333 __func__); 334 } 317 as_page_fault(va, PF_ACCESS_WRITE, istate); 335 318 } 336 319 } … … 346 329 } 347 330 348 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, uintptr_t va,349 const char *str)350 {351 fault_if_from_uspace(istate, "%s, address=%p.", str,352 (void *) va);353 panic_memtrap(istate, PF_ACCESS_EXEC, va, str);354 }355 356 void do_fast_data_access_mmu_miss_fault(istate_t *istate,357 uint64_t page_and_ctx, const char *str)358 {359 fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,360 (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));361 panic_memtrap(istate, PF_ACCESS_UNKNOWN, DMISS_ADDRESS(page_and_ctx),362 str);363 }364 365 void do_fast_data_access_protection_fault(istate_t *istate,366 uint64_t page_and_ctx, const char *str)367 {368 fault_if_from_uspace(istate, "%s, page=%p (asid=%" PRId64 ").", str,369 (void *) DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));370 panic_memtrap(istate, PF_ACCESS_WRITE, DMISS_ADDRESS(page_and_ctx),371 str);372 }373 374 331 /** 375 332 * Describes the exact condition which caused the last DMMU fault. -
kernel/generic/include/mm/as.h
r0ab362c re32720ff 68 68 #define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */ 69 69 70 /** The page fault was resolved by as_page_fault(). */ 71 #define AS_PF_OK 0 72 73 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */ 74 #define AS_PF_DEFER 1 75 70 76 /** The page fault was not resolved by as_page_fault(). */ 71 #define AS_PF_FAULT 0 72 73 /** The page fault was resolved by as_page_fault(). */ 74 #define AS_PF_OK 1 75 76 /** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */ 77 #define AS_PF_DEFER 2 77 #define AS_PF_FAULT 2 78 79 /** The page fault was not resolved by as_page_fault(). Non-verbose version. */ 80 #define AS_PF_SILENT 3 78 81 79 82 /** Address space structure. -
kernel/generic/include/proc/task.h
r0ab362c re32720ff 134 134 uint64_t ucycles; 135 135 uint64_t kcycles; 136 137 /** If true, do not attempt to print a verbose kill message. */138 bool silent_kill;139 136 } task_t; 140 137 -
kernel/generic/src/interrupt/interrupt.c
r0ab362c re32720ff 166 166 } 167 167 168 static NO_TRACE void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 169 { 170 if (!TASK->silent_kill) { 171 printf("Task %s (%" PRIu64 ") killed due to an exception at " 172 "program counter %p.\n", TASK->name, TASK->taskid, 173 (void *) istate_get_pc(istate)); 174 175 istate_decode(istate); 176 stack_trace_istate(istate); 177 178 printf("Kill message: "); 179 vprintf(fmt, args); 180 printf("\n"); 181 } 168 static NO_TRACE 169 void fault_from_uspace_core(istate_t *istate, const char *fmt, va_list args) 170 { 171 printf("Task %s (%" PRIu64 ") killed due to an exception at " 172 "program counter %p.\n", TASK->name, TASK->taskid, 173 (void *) istate_get_pc(istate)); 174 175 istate_decode(istate); 176 stack_trace_istate(istate); 177 178 printf("Kill message: "); 179 vprintf(fmt, args); 180 printf("\n"); 182 181 183 182 task_kill_self(true); -
kernel/generic/src/mm/as.c
r0ab362c re32720ff 79 79 #include <syscall/copy.h> 80 80 #include <arch/interrupt.h> 81 #include <interrupt.h> 81 82 82 83 /** … … 1362 1363 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1363 1364 { 1365 int rc = AS_PF_FAULT; 1366 1364 1367 if (!THREAD) 1365 return AS_PF_FAULT;1368 goto page_fault; 1366 1369 1367 1370 if (!AS) 1368 return AS_PF_FAULT;1371 goto page_fault; 1369 1372 1370 1373 mutex_lock(&AS->lock); … … 1422 1425 * Resort to the backend page fault handler. 1423 1426 */ 1424 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 1427 rc = area->backend->page_fault(area, page, access); 1428 if (rc != AS_PF_OK) { 1425 1429 page_table_unlock(AS, false); 1426 1430 mutex_unlock(&area->lock); … … 1443 1447 istate_set_retaddr(istate, 1444 1448 (uintptr_t) &memcpy_to_uspace_failover_address); 1449 } else if (rc == AS_PF_SILENT) { 1450 printf("Killing task %" PRIu64 " due to a " 1451 "failed late reservation request.\n", TASK->taskid); 1452 task_kill_self(true); 1445 1453 } else { 1446 return AS_PF_FAULT; 1454 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page); 1455 panic_memtrap(istate, access, page, NULL); 1447 1456 } 1448 1457 -
kernel/generic/src/mm/backend_anon.c
r0ab362c re32720ff 255 255 * Reserve the memory for this page now. 256 256 */ 257 if (!reserve_try_alloc(1)) { 258 printf("Killing task %" PRIu64 " due to a " 259 "failed late reservation request.\n", 260 TASK->taskid); 261 TASK->silent_kill = true; 262 return AS_PF_FAULT; 263 } 257 if (!reserve_try_alloc(1)) 258 return AS_PF_SILENT; 264 259 } 265 260 -
kernel/generic/src/proc/task.c
r0ab362c re32720ff 197 197 task->kcycles = 0; 198 198 199 task->silent_kill = false;200 201 199 task->ipc_info.call_sent = 0; 202 200 task->ipc_info.call_received = 0;
Note:
See TracChangeset
for help on using the changeset viewer.