Changes in kernel/arch/ia64/src/mm/tlb.c [22f0561:0ff03f3] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/mm/tlb.c
r22f0561 r0ff03f3 52 52 #include <arch.h> 53 53 #include <interrupt.h> 54 #include <arch/legacyio.h> 54 55 #define IO_FRAME_BASE 0xFFFFC000000 55 56 56 57 /** Invalidate all TLB entries. */ … … 466 467 } 467 468 468 static bool is_kernel_fault(uintptr_t va)469 {470 region_register_t rr;471 472 rr.word = rr_read(VA2VRN(va));473 rid_t rid = rr.map.rid;474 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);475 }476 477 469 /** Instruction TLB fault handler for faults with VHPT turned off. 478 470 * … … 488 480 va = istate->cr_ifa; /* faulting address */ 489 481 490 ASSERT(!is_kernel_fault(va)); 491 482 page_table_lock(AS, true); 492 483 t = page_mapping_find(AS, va, true); 493 484 if (t) { … … 497 488 */ 498 489 itc_pte_copy(t); 490 page_table_unlock(AS, true); 499 491 } else { 500 492 /* 501 493 * Forward the page fault to address space page fault handler. 502 494 */ 495 page_table_unlock(AS, true); 503 496 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 504 497 fault_if_from_uspace(istate, "Page fault at %p.", … … 529 522 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) 530 523 { 531 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {524 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) { 532 525 if (TASK) { 533 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>534 LEGACYIO_SINGLE_PAGE_WIDTH;526 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> 527 USPACE_IO_PAGE_WIDTH; 535 528 536 529 if (is_io_page_accessible(io_page)) { 537 530 uint64_t page, frame; 538 531 539 page = LEGACYIO_USER_BASE+540 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;541 frame = LEGACYIO_PHYS_BASE +542 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;532 page = IO_OFFSET + 533 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 534 frame = IO_FRAME_BASE + 535 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 543 536 544 537 tlb_entry_t entry; … … 554 547 entry.ar = AR_READ | AR_WRITE; 555 548 entry.ppn = frame >> PPN_SHIFT; 556 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;549 entry.ps = USPACE_IO_PAGE_WIDTH; 557 550 558 551 dtc_mapping_insert(page, TASK->as->asid, entry); … … 577 570 { 578 571 if (istate->cr_isr.sp) { 579 /* 580 * Speculative load. Deffer the exception until a more clever581 * approach can be used. Currently if we try to find the582 * mapping for the speculative load while in the kernel, we583 * might introduce a livelock because of the possibly invalid584 * values of the address.585 */572 /* Speculative load. Deffer the exception 573 until a more clever approach can be used. 574 575 Currently if we try to find the mapping 576 for the speculative load while in the kernel, 577 we might introduce a livelock because of 578 the possibly invalid values of the address. */ 586 579 istate->cr_ipsr.ed = true; 587 580 return; … … 589 582 590 583 uintptr_t va = istate->cr_ifa; /* faulting address */ 591 as_t *as = AS; 592 593 if (is_kernel_fault(va)) { 594 if (va < end_of_identity) { 584 585 region_register_t rr; 586 rr.word = rr_read(VA2VRN(va)); 587 rid_t rid = rr.map.rid; 588 if (RID2ASID(rid) == ASID_KERNEL) { 589 if (VA2VRN(va) == VRN_KERNEL) { 595 590 /* 596 * Create kernel identity mapping for low memory. 591 * Provide KA2PA(identity) mapping for faulting piece of 592 * kernel address space. 597 593 */ 598 594 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); 599 595 return; 600 } else {601 as = AS_KERNEL;602 596 } 603 597 } 604 598 605 599 606 pte_t *entry = page_mapping_find(as, va, true); 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va, true); 607 602 if (entry) { 608 603 /* … … 611 606 */ 612 607 dtc_pte_copy(entry); 608 page_table_unlock(AS, true); 613 609 } else { 610 page_table_unlock(AS, true); 614 611 if (try_memmap_io_insertion(va, istate)) 615 612 return; … … 650 647 uintptr_t va; 651 648 pte_t *t; 652 as_t *as = AS;653 649 654 650 va = istate->cr_ifa; /* faulting address */ 655 651 656 if (is_kernel_fault(va)) 657 as = AS_KERNEL; 658 659 t = page_mapping_find(as, va, true); 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va, true); 660 654 ASSERT((t) && (t->p)); 661 655 if ((t) && (t->p) && (t->w)) { … … 673 667 } 674 668 } 669 page_table_unlock(AS, true); 675 670 } 676 671 … … 687 682 688 683 va = istate->cr_ifa; /* faulting address */ 689 690 ASSERT(!is_kernel_fault(va)); 691 684 685 page_table_lock(AS, true); 692 686 t = page_mapping_find(AS, va, true); 693 687 ASSERT((t) && (t->p)); … … 706 700 } 707 701 } 702 page_table_unlock(AS, true); 708 703 } 709 704 … … 718 713 uintptr_t va; 719 714 pte_t *t; 720 as_t *as = AS;721 715 722 716 va = istate->cr_ifa; /* faulting address */ 723 717 724 if (is_kernel_fault(va)) 725 as = AS_KERNEL; 726 727 t = page_mapping_find(as, va, true); 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va, true); 728 720 ASSERT((t) && (t->p)); 729 721 if ((t) && (t->p)) { … … 741 733 } 742 734 } 735 page_table_unlock(AS, true); 743 736 } 744 737 … … 755 748 756 749 va = istate->cr_ifa; /* faulting address */ 757 758 ASSERT(!is_kernel_fault(va));759 750 760 751 /* 761 752 * Assume a write to a read-only page. 762 753 */ 754 page_table_lock(AS, true); 763 755 t = page_mapping_find(AS, va, true); 764 756 ASSERT((t) && (t->p)); … … 769 761 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 770 762 } 763 page_table_unlock(AS, true); 771 764 } 772 765 … … 784 777 va = istate->cr_ifa; /* faulting address */ 785 778 786 ASSERT(!is_kernel_fault(va)); 787 779 page_table_lock(AS, true); 788 780 t = page_mapping_find(AS, va, true); 789 781 ASSERT(t); … … 798 790 else 799 791 dtc_pte_copy(t); 792 page_table_unlock(AS, true); 800 793 } else { 794 page_table_unlock(AS, true); 801 795 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 802 796 fault_if_from_uspace(istate, "Page fault at %p.",
Note:
See TracChangeset
for help on using the changeset viewer.