Changeset 2a2fbc8 in mainline
- Timestamp:
- 2016-09-01T17:05:13Z (8 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 42d08592
- Parents:
- f126c87 (diff), fb63c06 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/abs32le/include/arch/mm/page.h
rf126c87 r2a2fbc8 115 115 /* Macros for querying the last level entries. */ 116 116 #define PTE_VALID_ARCH(p) \ 117 ( *((uint32_t *) (p))!= 0)117 ((p)->soft_valid != 0) 118 118 #define PTE_PRESENT_ARCH(p) \ 119 119 ((p)->present != 0) -
kernel/arch/amd64/include/arch/mm/page.h
rf126c87 r2a2fbc8 131 131 /* Macros for querying the last-level PTE entries. */ 132 132 #define PTE_VALID_ARCH(p) \ 133 ( *((uint64_t *) (p))!= 0)133 ((p)->soft_valid != 0) 134 134 #define PTE_PRESENT_ARCH(p) \ 135 135 ((p)->present != 0) -
kernel/arch/arm32/include/arch/mm/page_armv4.h
rf126c87 r2a2fbc8 44 44 /* Macros for querying the last-level PTE entries. */ 45 45 #define PTE_VALID_ARCH(pte) \ 46 ( *((uint32_t *) (pte)) != 0)46 (((pte_t *) (pte))->l0.should_be_zero != 0 || PTE_PRESENT_ARCH(pte)) 47 47 #define PTE_PRESENT_ARCH(pte) \ 48 48 (((pte_t *) (pte))->l0.descriptor_type != 0) -
kernel/arch/arm32/include/arch/mm/page_armv6.h
rf126c87 r2a2fbc8 44 44 /* Macros for querying the last-level PTE entries. */ 45 45 #define PTE_VALID_ARCH(pte) \ 46 ( *((uint32_t *) (pte)) != 0)46 (((pte_t *) (pte))->l0.should_be_zero_0 != 0 || PTE_PRESENT_ARCH(pte)) 47 47 #define PTE_PRESENT_ARCH(pte) \ 48 48 (((pte_t *) (pte))->l0.descriptor_type != 0) -
kernel/arch/ia32/include/arch/mm/page.h
rf126c87 r2a2fbc8 132 132 /* Macros for querying the last level entries. */ 133 133 #define PTE_VALID_ARCH(p) \ 134 ( *((uint32_t *) (p))!= 0)134 ((p)->soft_valid != 0) 135 135 #define PTE_PRESENT_ARCH(p) \ 136 136 ((p)->present != 0) -
kernel/arch/ia64/src/mm/tlb.c
rf126c87 r2a2fbc8 484 484 { 485 485 uintptr_t va; 486 pte_t *t;486 pte_t t; 487 487 488 488 va = istate->cr_ifa; /* faulting address */ … … 490 490 ASSERT(!is_kernel_fault(va)); 491 491 492 t = page_mapping_find(AS, va, true);493 if ( t) {492 bool found = page_mapping_find(AS, va, true, &t); 493 if (found) { 494 494 /* 495 495 * The mapping was found in software page hash table. 496 496 * Insert it into data translation cache. 497 497 */ 498 itc_pte_copy( t);498 itc_pte_copy(&t); 499 499 } else { 500 500 /* … … 600 600 601 601 602 pte_t *entry = page_mapping_find(as, va, true); 603 if (entry) { 602 pte_t t; 603 bool found = page_mapping_find(as, va, true, &t); 604 if (found) { 604 605 /* 605 606 * The mapping was found in the software page hash table. 606 607 * Insert it into data translation cache. 607 608 */ 608 dtc_pte_copy( entry);609 dtc_pte_copy(&t); 609 610 } else { 610 611 if (try_memmap_io_insertion(va, istate)) … … 641 642 { 642 643 uintptr_t va; 643 pte_t *t;644 pte_t t; 644 645 as_t *as = AS; 645 646 … … 649 650 as = AS_KERNEL; 650 651 651 t = page_mapping_find(as, va, true); 652 ASSERT((t) && (t->p)); 653 if ((t) && (t->p) && (t->w)) { 652 bool found = page_mapping_find(as, va, true, &t); 653 654 ASSERT(found); 655 ASSERT(t.p); 656 657 if (found && t.p && t.w) { 654 658 /* 655 659 * Update the Dirty bit in page tables and reinsert 656 660 * the mapping into DTC. 657 661 */ 658 t->d = true; 659 dtc_pte_copy(t); 662 t.d = true; 663 dtc_pte_copy(&t); 664 page_mapping_update(as, va, true, &t); 660 665 } else { 661 666 as_page_fault(va, PF_ACCESS_WRITE, istate); … … 672 677 { 673 678 uintptr_t va; 674 pte_t *t;679 pte_t t; 675 680 676 681 va = istate->cr_ifa; /* faulting address */ … … 678 683 ASSERT(!is_kernel_fault(va)); 679 684 680 t = page_mapping_find(AS, va, true); 681 ASSERT((t) && (t->p)); 682 if ((t) && (t->p) && (t->x)) { 685 bool found = page_mapping_find(AS, va, true, &t); 686 687 ASSERT(found); 688 ASSERT(t.p); 689 690 if (found && t.p && t.x) { 683 691 /* 684 692 * Update the Accessed bit in page tables and reinsert 685 693 * the mapping into ITC. 686 694 */ 687 t->a = true; 688 itc_pte_copy(t); 695 t.a = true; 696 itc_pte_copy(&t); 697 page_mapping_update(AS, va, true, &t); 689 698 } else { 690 699 as_page_fault(va, PF_ACCESS_EXEC, istate); … … 701 710 { 702 711 uintptr_t va; 703 pte_t *t;712 pte_t t; 704 713 as_t *as = AS; 705 714 … … 709 718 as = AS_KERNEL; 710 719 711 t = page_mapping_find(as, va, true); 712 ASSERT((t) && (t->p)); 713 if ((t) && (t->p)) { 720 bool found = page_mapping_find(as, va, true, &t); 721 722 ASSERT(found); 723 ASSERT(t.p); 724 725 if (found && t.p) { 714 726 /* 715 727 * Update the Accessed bit in page tables and reinsert 716 728 * the mapping into DTC. 717 729 */ 718 t->a = true; 719 dtc_pte_copy(t); 730 t.a = true; 731 dtc_pte_copy(&t); 732 page_mapping_update(as, va, true, &t); 720 733 } else { 721 734 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { … … 736 749 { 737 750 uintptr_t va; 738 pte_t *t;751 pte_t t; 739 752 740 753 va = istate->cr_ifa; /* faulting address */ … … 745 758 * Assume a write to a read-only page. 746 759 */ 747 t = page_mapping_find(AS, va, true); 748 ASSERT((t) && (t->p)); 749 ASSERT(!t->w); 760 bool found = page_mapping_find(AS, va, true, &t); 761 762 ASSERT(found); 763 ASSERT(t.p); 764 ASSERT(!t.w); 765 750 766 as_page_fault(va, PF_ACCESS_WRITE, istate); 751 767 } … … 760 776 { 761 777 uintptr_t va; 762 pte_t *t;778 pte_t t; 763 779 764 780 va = istate->cr_ifa; /* faulting address */ … … 766 782 ASSERT(!is_kernel_fault(va)); 767 783 768 t = page_mapping_find(AS, va, true); 769 ASSERT(t); 770 771 if (t->p) { 784 bool found = page_mapping_find(AS, va, true, &t); 785 786 ASSERT(found); 787 788 if (t.p) { 772 789 /* 773 790 * If the Present bit is set in page hash table, just copy it 774 791 * and update ITC/DTC. 775 792 */ 776 if (t ->x)777 itc_pte_copy( t);793 if (t.x) 794 itc_pte_copy(&t); 778 795 else 779 dtc_pte_copy( t);796 dtc_pte_copy(&t); 780 797 } else { 781 798 as_page_fault(va, PF_ACCESS_READ, istate); -
kernel/arch/mips32/include/arch/mm/page.h
rf126c87 r2a2fbc8 137 137 138 138 /* Last-level info macros. */ 139 #define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte))!= 0)140 #define PTE_PRESENT_ARCH(pte) 141 #define PTE_GET_FRAME_ARCH(pte) 142 #define PTE_WRITABLE_ARCH(pte) 143 #define PTE_EXECUTABLE_ARCH(pte) 139 #define PTE_VALID_ARCH(pte) ((pte)->soft_valid != 0) 140 #define PTE_PRESENT_ARCH(pte) ((pte)->p != 0) 141 #define PTE_GET_FRAME_ARCH(pte) ((pte)->pfn << 12) 142 #define PTE_WRITABLE_ARCH(pte) ((pte)->w != 0) 143 #define PTE_EXECUTABLE_ARCH(pte) 1 144 144 145 145 #ifndef __ASM__ -
kernel/arch/mips32/src/mm/tlb.c
rf126c87 r2a2fbc8 97 97 entry_lo_t lo; 98 98 uintptr_t badvaddr; 99 pte_t *pte;99 pte_t pte; 100 100 101 101 badvaddr = cp0_badvaddr_read(); 102 102 103 pte = page_mapping_find(AS, badvaddr, true);104 if ( pte && pte->p) {103 bool found = page_mapping_find(AS, badvaddr, true, &pte); 104 if (found && pte.p) { 105 105 /* 106 106 * Record access to PTE. 107 107 */ 108 pte->a = 1; 109 110 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, 111 pte->cacheable, pte->pfn); 108 pte.a = 1; 109 110 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.d, 111 pte.cacheable, pte.pfn); 112 113 page_mapping_update(AS, badvaddr, true, &pte); 112 114 113 115 /* … … 138 140 tlb_index_t index; 139 141 uintptr_t badvaddr; 140 pte_t *pte;142 pte_t pte; 141 143 142 144 /* … … 162 164 badvaddr = cp0_badvaddr_read(); 163 165 164 pte = page_mapping_find(AS, badvaddr, true);165 if ( pte && pte->p) {166 bool found = page_mapping_find(AS, badvaddr, true, &pte); 167 if (found && pte.p) { 166 168 /* 167 169 * Read the faulting TLB entry. … … 172 174 * Record access to PTE. 173 175 */ 174 pte->a = 1; 175 176 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->d, 177 pte->cacheable, pte->pfn); 176 pte.a = 1; 177 178 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.d, 179 pte.cacheable, pte.pfn); 180 181 page_mapping_update(AS, badvaddr, true, &pte); 178 182 179 183 /* … … 200 204 tlb_index_t index; 201 205 uintptr_t badvaddr; 202 pte_t *pte;206 pte_t pte; 203 207 204 208 badvaddr = cp0_badvaddr_read(); … … 224 228 } 225 229 226 pte = page_mapping_find(AS, badvaddr, true);227 if ( pte && pte->p && pte->w) {230 bool found = page_mapping_find(AS, badvaddr, true, &pte); 231 if (found && pte.p && pte.w) { 228 232 /* 229 233 * Read the faulting TLB entry. … … 234 238 * Record access and write to PTE. 235 239 */ 236 pte->a = 1; 237 pte->d = 1; 238 239 tlb_prepare_entry_lo(&lo, pte->g, pte->p, pte->w, 240 pte->cacheable, pte->pfn); 240 pte.a = 1; 241 pte.d = 1; 242 243 tlb_prepare_entry_lo(&lo, pte.g, pte.p, pte.w, 244 pte.cacheable, pte.pfn); 245 246 page_mapping_update(AS, badvaddr, true, &pte); 241 247 242 248 /* -
kernel/arch/ppc32/include/arch/mm/page.h
rf126c87 r2a2fbc8 140 140 141 141 /* Macros for querying the last-level PTEs. */ 142 #define PTE_VALID_ARCH(pte) ( *((uint32_t *) (pte))!= 0)142 #define PTE_VALID_ARCH(pte) ((pte)->valid != 0) 143 143 #define PTE_PRESENT_ARCH(pte) ((pte)->present != 0) 144 144 #define PTE_GET_FRAME_ARCH(pte) ((pte)->pfn << 12) -
kernel/arch/ppc32/src/mm/pht.c
rf126c87 r2a2fbc8 49 49 * @param access Access mode that caused the fault. 50 50 * @param istate Pointer to interrupted state. 51 * 52 * @return PTE on success, NULL otherwise. 53 * 54 */ 55 static pte_t *find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access, 56 istate_t *istate) 51 * @param[out] pte Structure that will receive a copy of the found PTE. 52 * 53 * @return True if the mapping was found, false otherwise. 54 * 55 */ 56 static bool find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access, 57 istate_t *istate, pte_t *pte) 57 58 { 58 59 /* 59 60 * Check if the mapping exists in page tables. 60 61 */ 61 pte_t *pte = page_mapping_find(as, badvaddr, true);62 if ( (pte) && (pte->present)) {62 bool found = page_mapping_find(as, badvaddr, true, pte); 63 if (found && pte->present) { 63 64 /* 64 65 * Mapping found in page tables. 65 66 * Immediately succeed. 66 67 */ 67 return pte;68 return true; 68 69 } 69 70 /* … … 76 77 * The mapping ought to be in place. 77 78 */ 78 pte = page_mapping_find(as, badvaddr, true); 79 ASSERT((pte) && (pte->present)); 80 return pte; 81 } 82 83 return NULL; 79 found = page_mapping_find(as, badvaddr, true, pte); 80 81 ASSERT(found); 82 ASSERT(pte->present); 83 84 return found; 85 } 86 87 return false; 84 88 } 85 89 … … 182 186 badvaddr = istate->pc; 183 187 184 pte_t *pte = find_mapping_and_check(AS, badvaddr, 185 PF_ACCESS_READ /* FIXME */, istate); 186 187 if (pte) { 188 pte_t pte; 189 bool found = find_mapping_and_check(AS, badvaddr, 190 PF_ACCESS_READ /* FIXME */, istate, &pte); 191 192 if (found) { 188 193 /* Record access to PTE */ 189 pte ->accessed = 1;190 pht_insert(badvaddr, pte);194 pte.accessed = 1; 195 pht_insert(badvaddr, &pte); 191 196 } 192 197 } -
kernel/arch/sparc32/include/arch/mm/page.h
rf126c87 r2a2fbc8 129 129 /* Macros for querying the last level entries. */ 130 130 #define PTE_VALID_ARCH(p) \ 131 ( *((uint32_t *) (p)) != 0)131 ((p)->et != PTE_ET_INVALID) 132 132 #define PTE_PRESENT_ARCH(p) \ 133 133 ((p)->et != 0) -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
rf126c87 r2a2fbc8 197 197 { 198 198 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; 199 pte_t *t;200 201 t = page_mapping_find(AS, istate->tpc, true);202 if ( t && PTE_EXECUTABLE(t)) {199 pte_t t; 200 201 bool found = page_mapping_find(AS, istate->tpc, true, &t); 202 if (found && PTE_EXECUTABLE(&t)) { 203 203 /* 204 204 * The mapping was found in the software page hash table. 205 205 * Insert it into ITLB. 206 206 */ 207 t ->a = true;208 itlb_pte_copy( t, index);207 t.a = true; 208 itlb_pte_copy(&t, index); 209 209 #ifdef CONFIG_TSB 210 itsb_pte_copy(t, index); 211 #endif 210 itsb_pte_copy(&t, index); 211 #endif 212 page_mapping_update(AS, istate->tpc, true, &t); 212 213 } else { 213 214 /* … … 233 234 uintptr_t page_16k; 234 235 size_t index; 235 pte_t *t;236 pte_t t; 236 237 as_t *as = AS; 237 238 … … 253 254 } 254 255 255 t = page_mapping_find(as, page_16k, true);256 if ( t) {256 bool found = page_mapping_find(as, page_16k, true, &t); 257 if (found) { 257 258 /* 258 259 * The mapping was found in the software page hash table. 259 260 * Insert it into DTLB. 260 261 */ 261 t ->a = true;262 dtlb_pte_copy( t, index, true);262 t.a = true; 263 dtlb_pte_copy(&t, index, true); 263 264 #ifdef CONFIG_TSB 264 dtsb_pte_copy(t, index, true); 265 #endif 265 dtsb_pte_copy(&t, index, true); 266 #endif 267 page_mapping_update(as, page_16k, true, &t); 266 268 } else { 267 269 /* … … 283 285 uintptr_t page_16k; 284 286 size_t index; 285 pte_t *t;287 pte_t t; 286 288 as_t *as = AS; 287 289 … … 293 295 as = AS_KERNEL; 294 296 295 t = page_mapping_find(as, page_16k, true);296 if ( t && PTE_WRITABLE(t)) {297 bool found = page_mapping_find(as, page_16k, true, &t); 298 if (found && PTE_WRITABLE(&t)) { 297 299 /* 298 300 * The mapping was found in the software page hash table and is … … 300 302 * into DTLB. 301 303 */ 302 t ->a = true;303 t ->d = true;304 t.a = true; 305 t.d = true; 304 306 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, 305 307 page_16k + index * MMU_PAGE_SIZE); 306 dtlb_pte_copy( t, index, false);308 dtlb_pte_copy(&t, index, false); 307 309 #ifdef CONFIG_TSB 308 dtsb_pte_copy(t, index, false); 309 #endif 310 dtsb_pte_copy(&t, index, false); 311 #endif 312 page_mapping_update(as, page_16k, true, &t); 310 313 } else { 311 314 /* -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
rf126c87 r2a2fbc8 211 211 { 212 212 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 213 pte_t *t; 214 215 t = page_mapping_find(AS, va, true); 216 217 if (t && PTE_EXECUTABLE(t)) { 213 pte_t t; 214 215 bool found = page_mapping_find(AS, va, true, &t); 216 if (found && PTE_EXECUTABLE(&t)) { 218 217 /* 219 218 * The mapping was found in the software page hash table. 220 219 * Insert it into ITLB. 221 220 */ 222 t ->a = true;223 itlb_pte_copy( t);221 t.a = true; 222 itlb_pte_copy(&t); 224 223 #ifdef CONFIG_TSB 225 itsb_pte_copy(t); 226 #endif 224 itsb_pte_copy(&t); 225 #endif 226 page_mapping_update(AS, va, true, &t); 227 227 } else { 228 228 /* … … 244 244 void fast_data_access_mmu_miss(unsigned int tt, istate_t *istate) 245 245 { 246 pte_t *t;246 pte_t t; 247 247 uintptr_t va = DMISS_ADDRESS(istate->tlb_tag_access); 248 248 uint16_t ctx = DMISS_CONTEXT(istate->tlb_tag_access); … … 261 261 } 262 262 263 t = page_mapping_find(as, va, true);264 if ( t) {263 bool found = page_mapping_find(as, va, true, &t); 264 if (found) { 265 265 /* 266 266 * The mapping was found in the software page hash table. 267 267 * Insert it into DTLB. 268 268 */ 269 t ->a = true;270 dtlb_pte_copy( t, true);269 t.a = true; 270 dtlb_pte_copy(&t, true); 271 271 #ifdef CONFIG_TSB 272 dtsb_pte_copy(t, true); 273 #endif 272 dtsb_pte_copy(&t, true); 273 #endif 274 page_mapping_update(as, va, true, &t); 274 275 } else { 275 276 /* … … 288 289 void fast_data_access_protection(unsigned int tt, istate_t *istate) 289 290 { 290 pte_t *t;291 pte_t t; 291 292 uintptr_t va = DMISS_ADDRESS(istate->tlb_tag_access); 292 293 uint16_t ctx = DMISS_CONTEXT(istate->tlb_tag_access); … … 296 297 as = AS_KERNEL; 297 298 298 t = page_mapping_find(as, va, true);299 if ( t && PTE_WRITABLE(t)) {299 bool found = page_mapping_find(as, va, true, &t); 300 if (found && PTE_WRITABLE(&t)) { 300 301 /* 301 302 * The mapping was found in the software page hash table and is … … 303 304 * into DTLB. 304 305 */ 305 t ->a = true;306 t ->d = true;306 t.a = true; 307 t.d = true; 307 308 mmu_demap_page(va, ctx, MMU_FLAG_DTLB); 308 dtlb_pte_copy( t, false);309 dtlb_pte_copy(&t, false); 309 310 #ifdef CONFIG_TSB 310 dtsb_pte_copy(t, false); 311 #endif 311 dtsb_pte_copy(&t, false); 312 #endif 313 page_mapping_update(as, va, true, &t); 312 314 } else { 313 315 /* -
kernel/genarch/include/genarch/mm/page_ht.h
rf126c87 r2a2fbc8 44 44 #include <mm/page.h> 45 45 #include <mm/slab.h> 46 #include <synch/mutex.h>47 46 #include <adt/hash_table.h> 48 47 … … 55 54 56 55 /* Macros for querying page hash table PTEs. */ 57 #define PTE_VALID(pte) (( pte) != NULL)56 #define PTE_VALID(pte) ((void *) (pte) != NULL) 58 57 #define PTE_PRESENT(pte) ((pte)->p != 0) 59 58 #define PTE_GET_FRAME(pte) ((pte)->frame) … … 66 65 67 66 extern slab_cache_t *pte_cache; 68 extern mutex_t page_ht_lock;69 67 extern hash_table_t page_ht; 70 68 extern hash_table_operations_t ht_operations; -
kernel/genarch/src/mm/as_ht.c
rf126c87 r2a2fbc8 77 77 if (flags & FLAG_AS_KERNEL) { 78 78 hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations); 79 mutex_initialize(&page_ht_lock, MUTEX_PASSIVE);80 79 pte_cache = slab_cache_create("pte_t", sizeof(pte_t), 0, 81 80 NULL, NULL, SLAB_CACHE_MAGDEFERRED); … … 99 98 /** Lock page table. 100 99 * 101 * Lock address space and page hash table.100 * Lock address space. 102 101 * Interrupts must be disabled. 103 102 * … … 110 109 if (lock) 111 110 mutex_lock(&as->lock); 112 113 mutex_lock(&page_ht_lock);114 111 } 115 112 116 113 /** Unlock page table. 117 114 * 118 * Unlock address space and page hash table.115 * Unlock address space. 119 116 * Interrupts must be disabled. 120 117 * … … 125 122 void ht_unlock(as_t *as, bool unlock) 126 123 { 127 mutex_unlock(&page_ht_lock);128 129 124 if (unlock) 130 125 mutex_unlock(&as->lock); … … 140 135 bool ht_locked(as_t *as) 141 136 { 142 return (mutex_locked(&page_ht_lock) && mutex_locked(&as->lock));137 return mutex_locked(&as->lock); 143 138 } 144 139 -
kernel/genarch/src/mm/page_ht.c
rf126c87 r2a2fbc8 59 59 static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 60 60 static void ht_mapping_remove(as_t *, uintptr_t); 61 static pte_t *ht_mapping_find(as_t *, uintptr_t, bool); 61 static bool ht_mapping_find(as_t *, uintptr_t, bool, pte_t *); 62 static void ht_mapping_update(as_t *, uintptr_t, bool, pte_t *); 62 63 static void ht_mapping_make_global(uintptr_t, size_t); 63 64 … … 70 71 * 71 72 */ 72 mutex_t page_ht_lock;73 IRQ_SPINLOCK_STATIC_INITIALIZE(page_ht_lock); 73 74 74 75 /** Page hash table. … … 91 92 .mapping_remove = ht_mapping_remove, 92 93 .mapping_find = ht_mapping_find, 94 .mapping_update = ht_mapping_update, 93 95 .mapping_make_global = ht_mapping_make_global 94 96 }; … … 191 193 192 194 ASSERT(page_table_locked(as)); 195 196 irq_spinlock_lock(&page_ht_lock, true); 193 197 194 198 if (!hash_table_find(&page_ht, key)) { … … 217 221 hash_table_insert(&page_ht, key, &pte->link); 218 222 } 223 224 irq_spinlock_unlock(&page_ht_lock, true); 219 225 } 220 226 … … 238 244 ASSERT(page_table_locked(as)); 239 245 246 irq_spinlock_lock(&page_ht_lock, true); 247 240 248 /* 241 249 * Note that removed PTE's will be freed … … 243 251 */ 244 252 hash_table_remove(&page_ht, key, 2); 245 } 246 247 248 /** Find mapping for virtual page in page hash table. 249 * 250 * @param as Address space to which page belongs. 251 * @param page Virtual page. 252 * @param nolock True if the page tables need not be locked. 253 * 254 * @return NULL if there is no such mapping; requested mapping otherwise. 255 * 256 */ 257 pte_t *ht_mapping_find(as_t *as, uintptr_t page, bool nolock) 253 254 irq_spinlock_unlock(&page_ht_lock, true); 255 } 256 257 static pte_t *ht_mapping_find_internal(as_t *as, uintptr_t page, bool nolock) 258 258 { 259 259 sysarg_t key[2] = { … … 263 263 264 264 ASSERT(nolock || page_table_locked(as)); 265 265 266 266 link_t *cur = hash_table_find(&page_ht, key); 267 267 if (cur) … … 271 271 } 272 272 273 /** Find mapping for virtual page in page hash table. 274 * 275 * @param as Address space to which page belongs. 276 * @param page Virtual page. 277 * @param nolock True if the page tables need not be locked. 278 * @param[out] pte Structure that will receive a copy of the found PTE. 279 * 280 * @return True if the mapping was found, false otherwise. 281 */ 282 bool ht_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte) 283 { 284 irq_spinlock_lock(&page_ht_lock, true); 285 286 pte_t *t = ht_mapping_find_internal(as, page, nolock); 287 if (t) 288 *pte = *t; 289 290 irq_spinlock_unlock(&page_ht_lock, true); 291 292 return t != NULL; 293 } 294 295 /** Update mapping for virtual page in page hash table. 296 * 297 * @param as Address space to which page belongs. 298 * @param page Virtual page. 299 * @param nolock True if the page tables need not be locked. 300 * @param pte New PTE. 301 */ 302 void ht_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte) 303 { 304 irq_spinlock_lock(&page_ht_lock, true); 305 306 pte_t *t = ht_mapping_find_internal(as, page, nolock); 307 if (!t) 308 panic("Updating non-existent PTE"); 309 310 ASSERT(pte->as == t->as); 311 ASSERT(pte->page == t->page); 312 ASSERT(pte->frame == t->frame); 313 ASSERT(pte->g == t->g); 314 ASSERT(pte->x == t->x); 315 ASSERT(pte->w == t->w); 316 ASSERT(pte->k == t->k); 317 ASSERT(pte->c == t->c); 318 ASSERT(pte->p == t->p); 319 320 t->a = pte->a; 321 t->d = pte->d; 322 323 irq_spinlock_unlock(&page_ht_lock, true); 324 } 325 273 326 void ht_mapping_make_global(uintptr_t base, size_t size) 274 327 { -
kernel/genarch/src/mm/page_pt.c
rf126c87 r2a2fbc8 53 53 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 54 54 static void pt_mapping_remove(as_t *, uintptr_t); 55 static pte_t *pt_mapping_find(as_t *, uintptr_t, bool); 55 static bool pt_mapping_find(as_t *, uintptr_t, bool, pte_t *pte); 56 static void pt_mapping_update(as_t *, uintptr_t, bool, pte_t *pte); 56 57 static void pt_mapping_make_global(uintptr_t, size_t); 57 58 … … 60 61 .mapping_remove = pt_mapping_remove, 61 62 .mapping_find = pt_mapping_find, 63 .mapping_update = pt_mapping_update, 62 64 .mapping_make_global = pt_mapping_make_global 63 65 }; … … 289 291 } 290 292 291 /** Find mapping for virtual page in hierarchical page tables. 292 * 293 * @param as Address space to which page belongs. 294 * @param page Virtual page. 295 * @param nolock True if the page tables need not be locked. 296 * 297 * @return NULL if there is no such mapping; entry from PTL3 describing 298 * the mapping otherwise. 299 * 300 */ 301 pte_t *pt_mapping_find(as_t *as, uintptr_t page, bool nolock) 293 static pte_t *pt_mapping_find_internal(as_t *as, uintptr_t page, bool nolock) 302 294 { 303 295 ASSERT(nolock || page_table_locked(as)); … … 334 326 335 327 return &ptl3[PTL3_INDEX(page)]; 328 } 329 330 /** Find mapping for virtual page in hierarchical page tables. 331 * 332 * @param as Address space to which page belongs. 333 * @param page Virtual page. 334 * @param nolock True if the page tables need not be locked. 335 * @param[out] pte Structure that will receive a copy of the found PTE. 336 * 337 * @return True if the mapping was found, false otherwise. 338 */ 339 bool pt_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte) 340 { 341 pte_t *t = pt_mapping_find_internal(as, page, nolock); 342 if (t) 343 *pte = *t; 344 return t != NULL; 345 } 346 347 /** Update mapping for virtual page in hierarchical page tables. 348 * 349 * @param as Address space to which page belongs. 350 * @param page Virtual page. 351 * @param nolock True if the page tables need not be locked. 352 * @param[in] pte New PTE. 353 */ 354 void pt_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte) 355 { 356 pte_t *t = pt_mapping_find_internal(as, page, nolock); 357 if (!t) 358 panic("Updating non-existent PTE"); 359 360 ASSERT(PTE_VALID(t) == PTE_VALID(pte)); 361 ASSERT(PTE_PRESENT(t) == PTE_PRESENT(pte)); 362 ASSERT(PTE_GET_FRAME(t) == PTE_GET_FRAME(pte)); 363 ASSERT(PTE_WRITABLE(t) == PTE_WRITABLE(pte)); 364 ASSERT(PTE_EXECUTABLE(t) == PTE_EXECUTABLE(pte)); 365 366 *t = *pte; 336 367 } 337 368 -
kernel/generic/include/mm/page.h
rf126c87 r2a2fbc8 48 48 void (* mapping_insert)(as_t *, uintptr_t, uintptr_t, unsigned int); 49 49 void (* mapping_remove)(as_t *, uintptr_t); 50 pte_t *(* mapping_find)(as_t *, uintptr_t, bool); 50 bool (* mapping_find)(as_t *, uintptr_t, bool, pte_t *); 51 void (* mapping_update)(as_t *, uintptr_t, bool, pte_t *); 51 52 void (* mapping_make_global)(uintptr_t, size_t); 52 53 } page_mapping_operations_t; … … 60 61 extern void page_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 61 62 extern void page_mapping_remove(as_t *, uintptr_t); 62 extern pte_t *page_mapping_find(as_t *, uintptr_t, bool); 63 extern bool page_mapping_find(as_t *, uintptr_t, bool, pte_t *); 64 extern void page_mapping_update(as_t *, uintptr_t, bool, pte_t *); 63 65 extern void page_mapping_make_global(uintptr_t, size_t); 64 66 extern pte_t *page_table_create(unsigned int); -
kernel/generic/src/ipc/ops/pagein.c
rf126c87 r2a2fbc8 47 47 { 48 48 if (!IPC_GET_RETVAL(answer->data)) { 49 pte_t *pte;49 pte_t pte; 50 50 uintptr_t frame; 51 51 52 52 page_table_lock(AS, true); 53 pte = page_mapping_find(AS, IPC_GET_ARG1(answer->data), false); 54 if (pte) { 55 frame = PTE_GET_FRAME(pte); 53 bool found = page_mapping_find(AS, IPC_GET_ARG1(answer->data), 54 false, &pte); 55 if (found) { 56 frame = PTE_GET_FRAME(&pte); 56 57 pfn_t pfn = ADDR2PFN(frame); 57 58 if (find_zone(pfn, 1, 0) != (size_t) -1) { -
kernel/generic/src/mm/as.c
rf126c87 r2a2fbc8 889 889 890 890 for (; i < node_size; i++) { 891 pte_t *pte = page_mapping_find(as, 892 ptr + P2SZ(i), false); 891 pte_t pte; 892 bool found = page_mapping_find(as, 893 ptr + P2SZ(i), false, &pte); 893 894 894 ASSERT( pte);895 ASSERT(PTE_VALID( pte));896 ASSERT(PTE_PRESENT( pte));895 ASSERT(found); 896 ASSERT(PTE_VALID(&pte)); 897 ASSERT(PTE_PRESENT(&pte)); 897 898 898 899 if ((area->backend) && … … 900 901 area->backend->frame_free(area, 901 902 ptr + P2SZ(i), 902 PTE_GET_FRAME( pte));903 PTE_GET_FRAME(&pte)); 903 904 } 904 905 … … 1003 1004 1004 1005 for (size = 0; size < (size_t) node->value[i]; size++) { 1005 pte_t *pte = page_mapping_find(as, 1006 ptr + P2SZ(size), false); 1006 pte_t pte; 1007 bool found = page_mapping_find(as, 1008 ptr + P2SZ(size), false, &pte); 1007 1009 1008 ASSERT( pte);1009 ASSERT(PTE_VALID( pte));1010 ASSERT(PTE_PRESENT( pte));1010 ASSERT(found); 1011 ASSERT(PTE_VALID(&pte)); 1012 ASSERT(PTE_PRESENT(&pte)); 1011 1013 1012 1014 if ((area->backend) && … … 1014 1016 area->backend->frame_free(area, 1015 1017 ptr + P2SZ(size), 1016 PTE_GET_FRAME( pte));1018 PTE_GET_FRAME(&pte)); 1017 1019 } 1018 1020 … … 1315 1317 1316 1318 for (size = 0; size < (size_t) node->value[i]; size++) { 1317 pte_t *pte = page_mapping_find(as, 1318 ptr + P2SZ(size), false); 1319 pte_t pte; 1320 bool found = page_mapping_find(as, 1321 ptr + P2SZ(size), false, &pte); 1319 1322 1320 ASSERT( pte);1321 ASSERT(PTE_VALID( pte));1322 ASSERT(PTE_PRESENT( pte));1323 ASSERT(found); 1324 ASSERT(PTE_VALID(&pte)); 1325 ASSERT(PTE_PRESENT(&pte)); 1323 1326 1324 old_frame[frame_idx++] = PTE_GET_FRAME( pte);1327 old_frame[frame_idx++] = PTE_GET_FRAME(&pte); 1325 1328 1326 1329 /* Remove old mapping */ … … 1452 1455 * we need to make sure the mapping has not been already inserted. 1453 1456 */ 1454 pte_t *pte; 1455 if ((pte = page_mapping_find(AS, page, false))) { 1456 if (PTE_PRESENT(pte)) { 1457 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || 1458 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || 1459 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { 1457 pte_t pte; 1458 bool found = page_mapping_find(AS, page, false, &pte); 1459 if (found) { 1460 if (PTE_PRESENT(&pte)) { 1461 if (((access == PF_ACCESS_READ) && PTE_READABLE(&pte)) || 1462 (access == PF_ACCESS_WRITE && PTE_WRITABLE(&pte)) || 1463 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(&pte))) { 1460 1464 page_table_unlock(AS, false); 1461 1465 mutex_unlock(&area->lock); -
kernel/generic/src/mm/backend_anon.c
rf126c87 r2a2fbc8 131 131 132 132 for (j = 0; j < count; j++) { 133 pte_t *pte; 133 pte_t pte; 134 bool found; 134 135 135 136 page_table_lock(area->as, false); 136 pte = page_mapping_find(area->as, 137 base + P2SZ(j), false); 138 ASSERT(pte && PTE_VALID(pte) && 139 PTE_PRESENT(pte)); 137 found = page_mapping_find(area->as, 138 base + P2SZ(j), false, &pte); 139 140 ASSERT(found); 141 ASSERT(PTE_VALID(&pte)); 142 ASSERT(PTE_PRESENT(&pte)); 143 140 144 btree_insert(&area->sh_info->pagemap, 141 145 (base + P2SZ(j)) - area->base, 142 (void *) PTE_GET_FRAME( pte), NULL);146 (void *) PTE_GET_FRAME(&pte), NULL); 143 147 page_table_unlock(area->as, false); 144 148 145 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME( pte));149 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 146 150 frame_reference_add(pfn); 147 151 } -
kernel/generic/src/mm/backend_elf.c
rf126c87 r2a2fbc8 184 184 185 185 for (j = 0; j < count; j++) { 186 pte_t *pte; 186 pte_t pte; 187 bool found; 187 188 188 189 /* … … 196 197 197 198 page_table_lock(area->as, false); 198 pte = page_mapping_find(area->as, 199 base + P2SZ(j), false); 200 ASSERT(pte && PTE_VALID(pte) && 201 PTE_PRESENT(pte)); 199 found = page_mapping_find(area->as, 200 base + P2SZ(j), false, &pte); 201 202 ASSERT(found); 203 ASSERT(PTE_VALID(&pte)); 204 ASSERT(PTE_PRESENT(&pte)); 205 202 206 btree_insert(&area->sh_info->pagemap, 203 207 (base + P2SZ(j)) - area->base, 204 (void *) PTE_GET_FRAME( pte), NULL);208 (void *) PTE_GET_FRAME(&pte), NULL); 205 209 page_table_unlock(area->as, false); 206 210 207 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME( pte));211 pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte)); 208 212 frame_reference_add(pfn); 209 213 } … … 335 339 dirty = true; 336 340 } else { 337 pte_t *pte = page_mapping_find(AS_KERNEL, 338 base + i * FRAME_SIZE, true); 339 340 ASSERT(pte); 341 ASSERT(PTE_PRESENT(pte)); 342 343 frame = PTE_GET_FRAME(pte); 341 pte_t pte; 342 bool found; 343 344 found = page_mapping_find(AS_KERNEL, 345 base + i * FRAME_SIZE, true, &pte); 346 347 ASSERT(found); 348 ASSERT(PTE_PRESENT(&pte)); 349 350 frame = PTE_GET_FRAME(&pte); 344 351 } 345 352 } else if (upage >= start_anon) { -
kernel/generic/src/mm/page.c
rf126c87 r2a2fbc8 137 137 /** Find mapping for virtual page. 138 138 * 139 * @param as Address space to which page belongs.140 * @param page Virtual page.141 * @param nolock True if the page tables need not be locked.142 * 143 * @return NULL if there is no such mapping; requested mapping144 * 145 * 146 */ 147 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page, bool nolock)139 * @param as Address space to which page belongs. 140 * @param page Virtual page. 141 * @param nolock True if the page tables need not be locked. 142 * @param[out] pte Structure that will receive a copy of the found PTE. 143 * 144 * @return True if the mapping was found, false otherwise. 145 */ 146 NO_TRACE bool page_mapping_find(as_t *as, uintptr_t page, bool nolock, 147 pte_t *pte) 148 148 { 149 149 ASSERT(nolock || page_table_locked(as)); … … 153 153 154 154 return page_mapping_operations->mapping_find(as, 155 ALIGN_DOWN(page, PAGE_SIZE), nolock); 155 ALIGN_DOWN(page, PAGE_SIZE), nolock, pte); 156 } 157 158 /** Update mapping for virtual page. 159 * 160 * Use only to update accessed and modified/dirty bits. 161 * 162 * @param as Address space to which page belongs. 163 * @param page Virtual page. 164 * @param nolock True if the page tables need not be locked. 165 * @param pte New PTE. 166 */ 167 NO_TRACE void page_mapping_update(as_t *as, uintptr_t page, bool nolock, 168 pte_t *pte) 169 { 170 ASSERT(nolock || page_table_locked(as)); 171 172 ASSERT(page_mapping_operations); 173 ASSERT(page_mapping_operations->mapping_find); 174 175 page_mapping_operations->mapping_update(as, 176 ALIGN_DOWN(page, PAGE_SIZE), nolock, pte); 156 177 } 157 178 … … 173 194 page_table_lock(AS, true); 174 195 175 pte_t *pte = page_mapping_find(AS, virt, false); 176 if ((!PTE_VALID(pte)) || (!PTE_PRESENT(pte))) { 196 pte_t pte; 197 bool found = page_mapping_find(AS, virt, false, &pte); 198 if (!found || !PTE_VALID(&pte) || !PTE_PRESENT(&pte)) { 177 199 page_table_unlock(AS, true); 178 200 return ENOENT; 179 201 } 180 202 181 *phys = PTE_GET_FRAME( pte) +203 *phys = PTE_GET_FRAME(&pte) + 182 204 (virt - ALIGN_DOWN(virt, PAGE_SIZE)); 183 205 -
kernel/generic/src/synch/futex.c
rf126c87 r2a2fbc8 291 291 spinlock_lock(&futex_ht_lock); 292 292 293 bool found = false; 294 pte_t *t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true); 295 296 if (t && PTE_VALID(t) && PTE_PRESENT(t)) { 297 found = true; 298 *paddr = PTE_GET_FRAME(t) + (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 293 bool success = false; 294 295 pte_t t; 296 bool found; 297 298 found = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE), true, &t); 299 if (found && PTE_VALID(&t) && PTE_PRESENT(&t)) { 300 success = true; 301 *paddr = PTE_GET_FRAME(&t) + 302 (uaddr - ALIGN_DOWN(uaddr, PAGE_SIZE)); 299 303 } 300 304 … … 302 306 page_table_unlock(AS, false); 303 307 304 return found;308 return success; 305 309 } 306 310
Note:
See TracChangeset
for help on using the changeset viewer.