Changes in kernel/arch/ia64/src/mm/tlb.c [7e752b2:925be4e] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/mm/tlb.c
r7e752b2 r925be4e 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 53 53 #include <interrupt.h> 54 54 55 #define IO_FRAME_BASE 0xFFFFC00000056 57 55 /** Invalidate all TLB entries. */ 58 56 void tlb_invalidate_all(void) … … 61 59 uintptr_t adr; 62 60 uint32_t count1, count2, stride1, stride2; 63 61 64 62 unsigned int i, j; 65 63 66 64 adr = PAL_PTCE_INFO_BASE(); 67 65 count1 = PAL_PTCE_INFO_COUNT1(); … … 69 67 stride1 = PAL_PTCE_INFO_STRIDE1(); 70 68 stride2 = PAL_PTCE_INFO_STRIDE2(); 71 69 72 70 ipl = interrupts_disable(); 73 71 74 72 for (i = 0; i < count1; i++) { 75 73 for (j = 0; j < count2; j++) { 76 74 asm volatile ( 77 "ptc.e %[adr] ;;" 78 :: [adr] "r" (adr) 75 "ptc.e %0 ;;" 76 : 77 : "r" (adr) 79 78 ); 80 79 adr += stride2; … … 82 81 adr += stride1; 83 82 } 84 83 85 84 interrupts_restore(ipl); 86 85 87 86 srlz_d(); 88 87 srlz_i(); 89 90 88 #ifdef CONFIG_VHPT 91 89 vhpt_invalidate_all(); 92 #endif 90 #endif 93 91 } 94 92 95 93 /** Invalidate entries belonging to an address space. 96 94 * 97 * @param asid Address space identifier. 98 * 95 * @param asid Address space identifier. 99 96 */ 100 97 void tlb_invalidate_asid(asid_t asid) … … 106 103 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) 107 104 { 108 region_register _trr;105 region_register rr; 109 106 bool restore_rr = false; 110 107 int b = 0; 111 108 int c = cnt; 112 109 113 110 uintptr_t va; 114 111 va = page; 115 112 116 113 rr.word = rr_read(VA2VRN(va)); 117 114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 120 117 * Save the old content of the register and replace the RID. 121 118 */ 122 region_register _trr0;123 119 region_register rr0; 120 124 121 rr0 = rr; 125 122 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 129 126 } 130 127 131 while 128 while(c >>= 1) 132 129 b++; 133 130 b >>= 1; … … 172 169 break; 173 170 } 174 175 for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 176 asm volatile ( 177 "ptc.l %[va], %[ps] ;;" 178 :: [va]"r" (va), 179 [ps] "r" (ps << 2) 180 ); 181 171 for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 172 asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2)); 182 173 srlz_d(); 183 174 srlz_i(); … … 192 183 /** Insert data into data translation cache. 193 184 * 194 * @param va Virtual page address. 195 * @param asid Address space identifier. 196 * @param entry The rest of TLB entry as required by TLB insertion 197 * format. 198 * 185 * @param va Virtual page address. 186 * @param asid Address space identifier. 187 * @param entry The rest of TLB entry as required by TLB insertion 188 * format. 199 189 */ 200 190 void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 205 195 /** Insert data into instruction translation cache. 206 196 * 207 * @param va 208 * @param asid 209 * @param entry 210 * 197 * @param va Virtual page address. 198 * @param asid Address space identifier. 199 * @param entry The rest of TLB entry as required by TLB insertion 200 * format. 211 201 */ 212 202 void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 217 207 /** Insert data into instruction or data translation cache. 218 208 * 219 * @param va Virtual page address. 220 * @param asid Address space identifier. 221 * @param entry The rest of TLB entry as required by TLB insertion 222 * format. 223 * @param dtc If true, insert into data translation cache, use 224 * instruction translation cache otherwise. 225 * 209 * @param va Virtual page address. 210 * @param asid Address space identifier. 211 * @param entry The rest of TLB entry as required by TLB insertion 212 * format. 213 * @param dtc If true, insert into data translation cache, use 214 * instruction translation cache otherwise. 226 215 */ 227 216 void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) 228 217 { 229 region_register _trr;218 region_register rr; 230 219 bool restore_rr = false; 231 220 232 221 rr.word = rr_read(VA2VRN(va)); 233 222 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 236 225 * Save the old content of the register and replace the RID. 237 226 */ 238 region_register _trr0;239 227 region_register rr0; 228 240 229 rr0 = rr; 241 230 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 246 235 247 236 asm volatile ( 248 "mov r8 = psr ;;\n" 249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 250 "srlz.d ;;\n" 251 "srlz.i ;;\n" 252 "mov cr.ifa = %[va]\n" /* va */ 253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */ 255 "(p6) itc.i %[word0] ;;\n" 256 "(p7) itc.d %[word0] ;;\n" 257 "mov psr.l = r8 ;;\n" 258 "srlz.d ;;\n" 259 :: [mask] "i" (PSR_IC_MASK), 260 [va] "r" (va), 261 [word0] "r" (entry.word[0]), 262 [word1] "r" (entry.word[1]), 263 [dtc] "r" (dtc) 237 "mov r8 = psr;;\n" 238 "rsm %0;;\n" /* PSR_IC_MASK */ 239 "srlz.d;;\n" 240 "srlz.i;;\n" 241 "mov cr.ifa = %1\n" /* va */ 242 "mov cr.itir = %2;;\n" /* entry.word[1] */ 243 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ 244 "(p6) itc.i %3;;\n" 245 "(p7) itc.d %3;;\n" 246 "mov psr.l = r8;;\n" 247 "srlz.d;;\n" 248 : 249 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 250 "r" (entry.word[0]), "r" (dtc) 264 251 : "p6", "p7", "r8" 265 252 ); … … 274 261 /** Insert data into instruction translation register. 275 262 * 276 * @param va 277 * @param asid 278 * @param entry 279 * 280 * @param tr 281 * 282 */ 283 voiditr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)263 * @param va Virtual page address. 264 * @param asid Address space identifier. 265 * @param entry The rest of TLB entry as required by TLB insertion 266 * format. 267 * @param tr Translation register. 268 */ 269 void 270 itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 284 271 { 285 272 tr_mapping_insert(va, asid, entry, false, tr); … … 288 275 /** Insert data into data translation register. 289 276 * 290 * @param va 291 * @param asid 292 * @param entry 293 * 294 * @param tr 295 * 296 */ 297 voiddtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)277 * @param va Virtual page address. 278 * @param asid Address space identifier. 279 * @param entry The rest of TLB entry as required by TLB insertion 280 * format. 281 * @param tr Translation register. 282 */ 283 void 284 dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 298 285 { 299 286 tr_mapping_insert(va, asid, entry, true, tr); … … 302 289 /** Insert data into instruction or data translation register. 303 290 * 304 * @param va 305 * @param asid 306 * @param entry 307 * 308 * @param dtr 309 * 310 * @param tr 311 * 312 */ 313 voidtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,291 * @param va Virtual page address. 292 * @param asid Address space identifier. 293 * @param entry The rest of TLB entry as required by TLB insertion 294 * format. 295 * @param dtr If true, insert into data translation register, use 296 * instruction translation register otherwise. 297 * @param tr Translation register. 298 */ 299 void 300 tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, 314 301 size_t tr) 315 302 { 316 region_register _trr;303 region_register rr; 317 304 bool restore_rr = false; 318 305 319 306 rr.word = rr_read(VA2VRN(va)); 320 307 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 323 310 * Save the old content of the register and replace the RID. 324 311 */ 325 region_register _trr0;326 312 region_register rr0; 313 327 314 rr0 = rr; 328 315 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 331 318 srlz_i(); 332 319 } 333 320 334 321 asm volatile ( 335 "mov r8 = psr ;;\n" 336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 337 "srlz.d ;;\n" 338 "srlz.i ;;\n" 339 "mov cr.ifa = %[va]\n" /* va */ 340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */ 342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n" 343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n" 344 "mov psr.l = r8 ;;\n" 345 "srlz.d ;;\n" 346 :: [mask] "i" (PSR_IC_MASK), 347 [va] "r" (va), 348 [word1] "r" (entry.word[1]), 349 [word0] "r" (entry.word[0]), 350 [tr] "r" (tr), 351 [dtr] "r" (dtr) 322 "mov r8 = psr;;\n" 323 "rsm %0;;\n" /* PSR_IC_MASK */ 324 "srlz.d;;\n" 325 "srlz.i;;\n" 326 "mov cr.ifa = %1\n" /* va */ 327 "mov cr.itir = %2;;\n" /* entry.word[1] */ 328 "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */ 329 "(p6) itr.i itr[%4] = %3;;\n" 330 "(p7) itr.d dtr[%4] = %3;;\n" 331 "mov psr.l = r8;;\n" 332 "srlz.d;;\n" 333 : 334 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 335 "r" (entry.word[0]), "r" (tr), "r" (dtr) 352 336 : "p6", "p7", "r8" 353 337 ); … … 362 346 /** Insert data into DTLB. 363 347 * 364 * @param page 365 * @param frame 366 * @param dtr 367 * 368 * @param tr 369 * 370 */ 371 voiddtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,348 * @param page Virtual page address including VRN bits. 349 * @param frame Physical frame address. 350 * @param dtr If true, insert into data translation register, use data 351 * translation cache otherwise. 352 * @param tr Translation register if dtr is true, ignored otherwise. 353 */ 354 void 355 dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, 372 356 size_t tr) 373 357 { … … 377 361 entry.word[1] = 0; 378 362 379 entry.p = true; 363 entry.p = true; /* present */ 380 364 entry.ma = MA_WRITEBACK; 381 entry.a = true; 382 entry.d = true; 365 entry.a = true; /* already accessed */ 366 entry.d = true; /* already dirty */ 383 367 entry.pl = PL_KERNEL; 384 368 entry.ar = AR_READ | AR_WRITE; … … 396 380 * Purge DTR entries used by the kernel. 397 381 * 398 * @param page Virtual page address including VRN bits. 399 * @param width Width of the purge in bits. 400 * 382 * @param page Virtual page address including VRN bits. 383 * @param width Width of the purge in bits. 401 384 */ 402 385 void dtr_purge(uintptr_t page, size_t width) 403 386 { 404 asm volatile ( 405 "ptr.d %[page], %[width]\n" 406 :: [page] "r" (page), 407 [width] "r" (width << 2) 408 ); 387 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2)); 409 388 } 410 389 … … 412 391 /** Copy content of PTE into data translation cache. 413 392 * 414 * @param t PTE. 415 * 393 * @param t PTE. 416 394 */ 417 395 void dtc_pte_copy(pte_t *t) 418 396 { 419 397 tlb_entry_t entry; 420 398 421 399 entry.word[0] = 0; 422 400 entry.word[1] = 0; … … 432 410 433 411 dtc_mapping_insert(t->page, t->as->asid, entry); 434 435 412 #ifdef CONFIG_VHPT 436 413 vhpt_mapping_insert(t->page, t->as->asid, entry); 437 #endif 414 #endif 438 415 } 439 416 440 417 /** Copy content of PTE into instruction translation cache. 441 418 * 442 * @param t PTE. 443 * 419 * @param t PTE. 444 420 */ 445 421 void itc_pte_copy(pte_t *t) 446 422 { 447 423 tlb_entry_t entry; 448 424 449 425 entry.word[0] = 0; 450 426 entry.word[1] = 0; … … 461 437 462 438 itc_mapping_insert(t->page, t->as->asid, entry); 463 464 439 #ifdef CONFIG_VHPT 465 440 vhpt_mapping_insert(t->page, t->as->asid, entry); 466 #endif 441 #endif 467 442 } 468 443 469 444 /** Instruction TLB fault handler for faults with VHPT turned off. 470 445 * 471 * @param vector Interruption vector. 472 * @param istate Structure with saved interruption state. 473 * 446 * @param vector Interruption vector. 447 * @param istate Structure with saved interruption state. 474 448 */ 475 449 void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) 476 450 { 477 region_register _trr;451 region_register rr; 478 452 rid_t rid; 479 453 uintptr_t va; 480 454 pte_t *t; 481 455 482 va = istate->cr_ifa; 456 va = istate->cr_ifa; /* faulting address */ 483 457 rr.word = rr_read(VA2VRN(va)); 484 458 rid = rr.map.rid; 485 459 486 460 page_table_lock(AS, true); 487 461 t = page_mapping_find(AS, va); … … 499 473 page_table_unlock(AS, true); 500 474 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 501 fault_if_from_uspace(istate, "Page fault at %p.",502 (void *) va);503 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL);475 fault_if_from_uspace(istate,"Page fault at %p.",va); 476 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 477 istate->cr_iip); 504 478 } 505 479 } … … 514 488 } 515 489 490 #define IO_FRAME_BASE 0xFFFFC000000 491 516 492 /** 517 493 * There is special handling of memory mapped legacy io, because of 4KB sized 518 494 * access for userspace. 519 495 * 520 * @param va Virtual address of page fault. 521 * @param istate Structure with saved interruption state. 522 * 523 * @return One on success, zero on failure. 524 * 496 * @param va Virtual address of page fault. 497 * @param istate Structure with saved interruption state. 498 * 499 * @return One on success, zero on failure. 525 500 */ 526 501 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) … … 530 505 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> 531 506 USPACE_IO_PAGE_WIDTH; 532 507 533 508 if (is_io_page_accessible(io_page)) { 534 509 uint64_t page, frame; 535 510 536 511 page = IO_OFFSET + 537 512 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 538 513 frame = IO_FRAME_BASE + 539 514 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 540 515 541 516 tlb_entry_t entry; 542 517 543 518 entry.word[0] = 0; 544 519 entry.word[1] = 0; 545 546 entry.p = true; 547 entry.ma = MA_UNCACHEABLE; 548 entry.a = true; 549 entry.d = true; 520 521 entry.p = true; /* present */ 522 entry.ma = MA_UNCACHEABLE; 523 entry.a = true; /* already accessed */ 524 entry.d = true; /* already dirty */ 550 525 entry.pl = PL_USER; 551 526 entry.ar = AR_READ | AR_WRITE; 552 527 entry.ppn = frame >> PPN_SHIFT; 553 528 entry.ps = USPACE_IO_PAGE_WIDTH; 554 529 555 530 dtc_mapping_insert(page, TASK->as->asid, entry); 556 531 return 1; 557 532 } else { 558 533 fault_if_from_uspace(istate, 559 "IO access fault at %p.", (void *)va);534 "IO access fault at %p.", va); 560 535 } 561 536 } 562 537 } 563 538 564 539 return 0; 565 540 } … … 567 542 /** Data TLB fault handler for faults with VHPT turned off. 568 543 * 569 * @param vector Interruption vector. 570 * @param istate Structure with saved interruption state. 571 * 544 * @param vector Interruption vector. 545 * @param istate Structure with saved interruption state. 572 546 */ 573 547 void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) 574 548 { 575 if (istate->cr_isr.sp) { 576 /* Speculative load. Deffer the exception 577 until a more clever approach can be used. 578 579 Currently if we try to find the mapping 580 for the speculative load while in the kernel, 581 we might introduce a livelock because of 582 the possibly invalid values of the address. */ 583 istate->cr_ipsr.ed = true; 584 return; 585 } 586 587 uintptr_t va = istate->cr_ifa; /* faulting address */ 588 589 region_register_t rr; 590 rr.word = rr_read(VA2VRN(va)); 591 rid_t rid = rr.map.rid; 549 region_register rr; 550 rid_t rid; 551 uintptr_t va; 552 pte_t *t; 553 554 va = istate->cr_ifa; /* faulting address */ 555 rr.word = rr_read(VA2VRN(va)); 556 rid = rr.map.rid; 592 557 if (RID2ASID(rid) == ASID_KERNEL) { 593 558 if (VA2VRN(va) == VRN_KERNEL) { … … 600 565 } 601 566 } 602 603 567 604 568 page_table_lock(AS, true); 605 pte_t *entry= page_mapping_find(AS, va);606 if ( entry) {569 t = page_mapping_find(AS, va); 570 if (t) { 607 571 /* 608 572 * The mapping was found in the software page hash table. 609 573 * Insert it into data translation cache. 610 574 */ 611 dtc_pte_copy( entry);575 dtc_pte_copy(t); 612 576 page_table_unlock(AS, true); 613 577 } else { … … 615 579 if (try_memmap_io_insertion(va, istate)) 616 580 return; 617 618 /* 619 * Forward the page fault to the address space page fault 581 /* 582 * Forward the page fault to the address space page fault 620 583 * handler. 621 584 */ 622 585 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 623 fault_if_from_uspace(istate, "Page fault at %p.",624 (void *) va);625 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);586 fault_if_from_uspace(istate,"Page fault at %p.",va); 587 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 588 istate->cr_iip); 626 589 } 627 590 } … … 632 595 * This fault should not occur. 633 596 * 634 * @param vector Interruption vector. 635 * @param istate Structure with saved interruption state. 636 * 597 * @param vector Interruption vector. 598 * @param istate Structure with saved interruption state. 637 599 */ 638 600 void data_nested_tlb_fault(uint64_t vector, istate_t *istate) 639 601 { 640 ASSERT(false);602 panic("%s.", __func__); 641 603 } 642 604 643 605 /** Data Dirty bit fault handler. 644 606 * 645 * @param vector Interruption vector. 646 * @param istate Structure with saved interruption state. 647 * 607 * @param vector Interruption vector. 608 * @param istate Structure with saved interruption state. 648 609 */ 649 610 void data_dirty_bit_fault(uint64_t vector, istate_t *istate) 650 611 { 651 region_register _trr;612 region_register rr; 652 613 rid_t rid; 653 614 uintptr_t va; 654 615 pte_t *t; 655 616 656 va = istate->cr_ifa; 617 va = istate->cr_ifa; /* faulting address */ 657 618 rr.word = rr_read(VA2VRN(va)); 658 619 rid = rr.map.rid; 659 620 660 621 page_table_lock(AS, true); 661 622 t = page_mapping_find(AS, va); 662 ASSERT( (t) && (t->p));663 if ( (t) && (t->p) && (t->w)) {623 ASSERT(t && t->p); 624 if (t && t->p && t->w) { 664 625 /* 665 626 * Update the Dirty bit in page tables and reinsert … … 670 631 } else { 671 632 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 672 fault_if_from_uspace(istate, "Page fault at %p.",673 (void *) va);674 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL);633 fault_if_from_uspace(istate,"Page fault at %p.",va); 634 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 635 istate->cr_iip); 675 636 } 676 637 } … … 680 641 /** Instruction access bit fault handler. 681 642 * 682 * @param vector Interruption vector. 683 * @param istate Structure with saved interruption state. 684 * 643 * @param vector Interruption vector. 644 * @param istate Structure with saved interruption state. 685 645 */ 686 646 void instruction_access_bit_fault(uint64_t vector, istate_t *istate) 687 647 { 688 region_register _trr;648 region_register rr; 689 649 rid_t rid; 690 650 uintptr_t va; 691 pte_t *t; 692 693 va = istate->cr_ifa; 651 pte_t *t; 652 653 va = istate->cr_ifa; /* faulting address */ 694 654 rr.word = rr_read(VA2VRN(va)); 695 655 rid = rr.map.rid; 696 656 697 657 page_table_lock(AS, true); 698 658 t = page_mapping_find(AS, va); 699 ASSERT( (t) && (t->p));700 if ( (t) && (t->p) && (t->x)) {659 ASSERT(t && t->p); 660 if (t && t->p && t->x) { 701 661 /* 702 662 * Update the Accessed bit in page tables and reinsert … … 707 667 } else { 708 668 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 709 fault_if_from_uspace(istate, "Page fault at %p.", 710 (void *) va);711 panic_memtrap(istate, PF_ACCESS_EXEC, va, NULL);669 fault_if_from_uspace(istate, "Page fault at %p.", va); 670 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 671 istate->cr_iip); 712 672 } 713 673 } … … 719 679 * @param vector Interruption vector. 720 680 * @param istate Structure with saved interruption state. 721 *722 681 */ 723 682 void data_access_bit_fault(uint64_t vector, istate_t *istate) 724 683 { 725 region_register _trr;684 region_register rr; 726 685 rid_t rid; 727 686 uintptr_t va; 728 687 pte_t *t; 729 730 va = istate->cr_ifa; 688 689 va = istate->cr_ifa; /* faulting address */ 731 690 rr.word = rr_read(VA2VRN(va)); 732 691 rid = rr.map.rid; 733 692 734 693 page_table_lock(AS, true); 735 694 t = page_mapping_find(AS, va); 736 ASSERT( (t) && (t->p));737 if ( (t) && (t->p)) {695 ASSERT(t && t->p); 696 if (t && t->p) { 738 697 /* 739 698 * Update the Accessed bit in page tables and reinsert … … 744 703 } else { 745 704 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 746 fault_if_from_uspace(istate, "Page fault at %p.", 747 (void *) va);748 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);705 fault_if_from_uspace(istate, "Page fault at %p.", va); 706 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 707 istate->cr_iip); 749 708 } 750 709 } … … 756 715 * @param vector Interruption vector. 757 716 * @param istate Structure with saved interruption state. 758 *759 717 */ 760 718 void data_access_rights_fault(uint64_t vector, istate_t *istate) 761 719 { 762 region_register _trr;720 region_register rr; 763 721 rid_t rid; 764 722 uintptr_t va; 765 723 pte_t *t; 766 767 va = istate->cr_ifa; 724 725 va = istate->cr_ifa; /* faulting address */ 768 726 rr.word = rr_read(VA2VRN(va)); 769 727 rid = rr.map.rid; 770 728 771 729 /* 772 730 * Assume a write to a read-only page. … … 774 732 page_table_lock(AS, true); 775 733 t = page_mapping_find(AS, va); 776 ASSERT( (t) && (t->p));734 ASSERT(t && t->p); 777 735 ASSERT(!t->w); 778 736 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 779 fault_if_from_uspace(istate, "Page fault at %p.", 780 (void *) va);781 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL);737 fault_if_from_uspace(istate, "Page fault at %p.", va); 738 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 739 istate->cr_iip); 782 740 } 783 741 page_table_unlock(AS, true); … … 788 746 * @param vector Interruption vector. 789 747 * @param istate Structure with saved interruption state. 790 *791 748 */ 792 749 void page_not_present(uint64_t vector, istate_t *istate) 793 750 { 794 region_register _trr;751 region_register rr; 795 752 rid_t rid; 796 753 uintptr_t va; 797 754 pte_t *t; 798 755 799 va = istate->cr_ifa; 756 va = istate->cr_ifa; /* faulting address */ 800 757 rr.word = rr_read(VA2VRN(va)); 801 758 rid = rr.map.rid; 802 759 803 760 page_table_lock(AS, true); 804 761 t = page_mapping_find(AS, va); … … 818 775 page_table_unlock(AS, true); 819 776 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 820 fault_if_from_uspace(istate, "Page fault at %p.", 821 (void *) va); 822 panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL); 777 fault_if_from_uspace(istate, "Page fault at %p.", va); 778 panic("%s: va=%p, rid=%d.", __func__, va, rid); 823 779 } 824 780 }
Note:
See TracChangeset
for help on using the changeset viewer.