Changeset c520034 in mainline for kernel/arch/ia64/src/mm/tlb.c
- Timestamp:
- 2011-12-31T18:19:35Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 295f658, 77c2b02, 96cd5b4
- Parents:
- 852052d (diff), 22f0561 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/mm/tlb.c
r852052d rc520034 52 52 #include <arch.h> 53 53 #include <interrupt.h> 54 55 #define IO_FRAME_BASE 0xFFFFC000000 54 #include <arch/legacyio.h> 56 55 57 56 /** Invalidate all TLB entries. */ … … 467 466 } 468 467 468 static bool is_kernel_fault(uintptr_t va) 469 { 470 region_register_t rr; 471 472 rr.word = rr_read(VA2VRN(va)); 473 rid_t rid = rr.map.rid; 474 return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL); 475 } 476 469 477 /** Instruction TLB fault handler for faults with VHPT turned off. 470 478 * … … 480 488 va = istate->cr_ifa; /* faulting address */ 481 489 482 page_table_lock(AS, true); 490 ASSERT(!is_kernel_fault(va)); 491 483 492 t = page_mapping_find(AS, va, true); 484 493 if (t) { … … 488 497 */ 489 498 itc_pte_copy(t); 490 page_table_unlock(AS, true);491 499 } else { 492 500 /* 493 501 * Forward the page fault to address space page fault handler. 494 502 */ 495 page_table_unlock(AS, true);496 503 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 497 504 fault_if_from_uspace(istate, "Page fault at %p.", … … 522 529 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) 523 530 { 524 if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 <<IO_PAGE_WIDTH))) {531 if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) { 525 532 if (TASK) { 526 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>527 USPACE_IO_PAGE_WIDTH;533 uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >> 534 LEGACYIO_SINGLE_PAGE_WIDTH; 528 535 529 536 if (is_io_page_accessible(io_page)) { 530 537 uint64_t page, frame; 531 538 532 page = IO_OFFSET+533 (1 << USPACE_IO_PAGE_WIDTH) * io_page;534 frame = IO_FRAME_BASE +535 (1 << USPACE_IO_PAGE_WIDTH) * io_page;539 page = LEGACYIO_USER_BASE + 540 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page; 541 frame = LEGACYIO_PHYS_BASE + 542 (1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page; 536 543 537 544 tlb_entry_t entry; … … 547 554 entry.ar = AR_READ | AR_WRITE; 548 555 entry.ppn = frame >> PPN_SHIFT; 549 entry.ps = USPACE_IO_PAGE_WIDTH;556 entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH; 550 557 551 558 dtc_mapping_insert(page, TASK->as->asid, entry); … … 570 577 { 571 578 if (istate->cr_isr.sp) { 572 /* Speculative load. Deffer the exception573 until a more clever approach can be used.574 575 Currently if we try to find the mapping576 for the speculative load while in the kernel,577 we might introduce a livelock because of578 the possibly invalid values of the address.*/579 /* 580 * Speculative load. Deffer the exception until a more clever 581 * approach can be used. Currently if we try to find the 582 * mapping for the speculative load while in the kernel, we 583 * might introduce a livelock because of the possibly invalid 584 * values of the address. 585 */ 579 586 istate->cr_ipsr.ed = true; 580 587 return; … … 582 589 583 590 uintptr_t va = istate->cr_ifa; /* faulting address */ 584 585 region_register_t rr; 586 rr.word = rr_read(VA2VRN(va)); 587 rid_t rid = rr.map.rid; 588 if (RID2ASID(rid) == ASID_KERNEL) { 589 if (VA2VRN(va) == VRN_KERNEL) { 591 as_t *as = AS; 592 593 if (is_kernel_fault(va)) { 594 if (va < end_of_identity) { 590 595 /* 591 * Provide KA2PA(identity) mapping for faulting piece of 592 * kernel address space. 596 * Create kernel identity mapping for low memory. 593 597 */ 594 598 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); 595 599 return; 600 } else { 601 as = AS_KERNEL; 596 602 } 597 603 } 598 604 599 605 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va, true); 606 pte_t *entry = page_mapping_find(as, va, true); 602 607 if (entry) { 603 608 /* … … 606 611 */ 607 612 dtc_pte_copy(entry); 608 page_table_unlock(AS, true);609 613 } else { 610 page_table_unlock(AS, true);611 614 if (try_memmap_io_insertion(va, istate)) 612 615 return; … … 647 650 uintptr_t va; 648 651 pte_t *t; 652 as_t *as = AS; 649 653 650 654 va = istate->cr_ifa; /* faulting address */ 651 655 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va, true); 656 if (is_kernel_fault(va)) 657 as = AS_KERNEL; 658 659 t = page_mapping_find(as, va, true); 654 660 ASSERT((t) && (t->p)); 655 661 if ((t) && (t->p) && (t->w)) { … … 667 673 } 668 674 } 669 page_table_unlock(AS, true);670 675 } 671 676 … … 682 687 683 688 va = istate->cr_ifa; /* faulting address */ 684 685 page_table_lock(AS, true); 689 690 ASSERT(!is_kernel_fault(va)); 691 686 692 t = page_mapping_find(AS, va, true); 687 693 ASSERT((t) && (t->p)); … … 700 706 } 701 707 } 702 page_table_unlock(AS, true);703 708 } 704 709 … … 713 718 uintptr_t va; 714 719 pte_t *t; 720 as_t *as = AS; 715 721 716 722 va = istate->cr_ifa; /* faulting address */ 717 723 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va, true); 724 if (is_kernel_fault(va)) 725 as = AS_KERNEL; 726 727 t = page_mapping_find(as, va, true); 720 728 ASSERT((t) && (t->p)); 721 729 if ((t) && (t->p)) { … … 733 741 } 734 742 } 735 page_table_unlock(AS, true);736 743 } 737 744 … … 748 755 749 756 va = istate->cr_ifa; /* faulting address */ 757 758 ASSERT(!is_kernel_fault(va)); 750 759 751 760 /* 752 761 * Assume a write to a read-only page. 753 762 */ 754 page_table_lock(AS, true);755 763 t = page_mapping_find(AS, va, true); 756 764 ASSERT((t) && (t->p)); … … 761 769 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 762 770 } 763 page_table_unlock(AS, true);764 771 } 765 772 … … 777 784 va = istate->cr_ifa; /* faulting address */ 778 785 779 page_table_lock(AS, true); 786 ASSERT(!is_kernel_fault(va)); 787 780 788 t = page_mapping_find(AS, va, true); 781 789 ASSERT(t); … … 790 798 else 791 799 dtc_pte_copy(t); 792 page_table_unlock(AS, true);793 800 } else { 794 page_table_unlock(AS, true);795 801 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 796 802 fault_if_from_uspace(istate, "Page fault at %p.",
Note:
See TracChangeset
for help on using the changeset viewer.