Changeset f47fd19 in mainline
- Timestamp:
- 2006-08-21T13:36:34Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- a796127
- Parents:
- ee289cf0
- Location:
- kernel
- Files:
-
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia64/src/mm/tlb.c
ree289cf0 rf47fd19 507 507 if (t) { 508 508 /* 509 * The mapping was found in software page hash table.509 * The mapping was found in the software page hash table. 510 510 * Insert it into data translation cache. 511 511 */ … … 514 514 } else { 515 515 /* 516 * Forward the page fault to address space page fault handler.516 * Forward the page fault to the address space page fault handler. 517 517 */ 518 518 page_table_unlock(AS, true); -
kernel/arch/sparc64/include/context.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 90 90 #endif 91 91 92 92 /** @} 93 93 */ 94 -
kernel/arch/sparc64/include/context_offset.h
ree289cf0 rf47fd19 21 21 #define OFFSET_CLEANWIN 0x98 22 22 23 /** @}24 */25 -
kernel/arch/sparc64/include/interrupt.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup sparc64interrupt sparc64 30 30 * @ingroup interrupt 31 31 * @{ … … 53 53 54 54 struct istate { 55 uint64_t pstate; 56 uint64_t tnpc; 57 uint64_t tpc; 58 uint64_t tstate; 55 59 }; 56 60 … … 75 79 #endif 76 80 77 81 /** @} 78 82 */ 79 -
kernel/arch/sparc64/include/mm/tlb.h
ree289cf0 rf47fd19 67 67 /* TLB Tag Access shifts */ 68 68 #define TLB_TAG_ACCESS_CONTEXT_SHIFT 0 69 #define TLB_TAG_ACCESS_CONTEXT_MASK ((1<<13)-1) 69 70 #define TLB_TAG_ACCESS_VPN_SHIFT 13 70 71 … … 108 109 struct { 109 110 uint64_t vpn : 51; /**< Virtual Address bits 63:13. */ 110 unsigned context : 13; /**< Context identifier. */111 unsigned context : 13; /**< Context identifier. */ 111 112 } __attribute__ ((packed)); 112 113 }; … … 119 120 uint64_t value; 120 121 struct { 121 uint64_t vpn: 51; 122 uint64_t vpn: 51; /**< Virtual Address bits 63:13. */ 122 123 unsigned : 6; /**< Ignored. */ 123 124 unsigned type : 1; /**< The type of demap operation. */ … … 132 133 uint64_t value; 133 134 struct { 134 unsigned long : 39; /**< Implementation dependent. */ 135 unsigned nf : 1; /**< Nonfaulting load. */ 135 unsigned long : 40; /**< Implementation dependent. */ 136 136 unsigned asi : 8; /**< ASI. */ 137 unsigned tm : 1; /**< TLB miss. */ 138 unsigned : 1; 137 unsigned : 2; 139 138 unsigned ft : 7; /**< Fault type. */ 140 139 unsigned e : 1; /**< Side-effect bit. */ … … 426 425 } 427 426 428 extern void fast_instruction_access_mmu_miss( void);429 extern void fast_data_access_mmu_miss( void);430 extern void fast_data_access_protection( void);427 extern void fast_instruction_access_mmu_miss(int n, istate_t *istate); 428 extern void fast_data_access_mmu_miss(int n, istate_t *istate); 429 extern void fast_data_access_protection(int n, istate_t *istate); 431 430 432 431 extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable); -
kernel/arch/sparc64/include/regdef.h
ree289cf0 rf47fd19 39 39 #define PSTATE_AM_BIT 8 40 40 41 #define PSTATE_AG_BIT (1<<0) 42 #define PSTATE_IG_BIT (1<<11) 43 #define PSTATE_MG_BIT (1<<10) 44 41 45 #endif 42 46 -
kernel/arch/sparc64/include/trap/interrupt.h
ree289cf0 rf47fd19 82 82 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 83 83 mov \n - 1, %o0 84 mov %fp, %o185 84 PREEMPTIBLE_HANDLER exc_dispatch 86 85 .endm -
kernel/arch/sparc64/include/trap/mmu.h
ree289cf0 rf47fd19 39 39 40 40 #include <arch/stack.h> 41 #include <arch/mm/tlb.h> 42 #include <arch/mm/mmu.h> 43 #include <arch/mm/tte.h> 41 44 42 45 #define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x64 … … 56 59 57 60 .macro FAST_DATA_ACCESS_MMU_MISS_HANDLER 58 save %sp, -STACK_WINDOW_SAVE_AREA_SIZE, %sp 59 call fast_data_access_mmu_miss 60 nop 61 restore 61 /* 62 * First, test if it is the portion of the kernel address space 63 * which is faulting. If that is the case, immediately create 64 * identity mapping for that page in DTLB. VPN 0 is excluded from 65 * this treatment. 66 * 67 * Note that branch-delay slots are used in order to save space. 68 */ 69 mov VA_DMMU_TAG_ACCESS, %g1 70 ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN 71 set TLB_TAG_ACCESS_CONTEXT_MASK, %g2 72 andcc %g1, %g2, %g3 ! get Context 73 bnz 0f ! Context is non-zero 74 andncc %g1, %g2, %g3 ! get page address into %g3 75 bz 0f ! page address is zero 76 77 /* 78 * Create and insert the identity-mapped entry for 79 * the faulting kernel page. 80 */ 81 82 or %g3, (TTE_CP|TTE_P|TTE_W), %g2 ! 8K pages are the default (encoded as 0) 83 set 1, %g3 84 sllx %g3, TTE_V_SHIFT, %g3 85 or %g2, %g3, %g2 86 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page 62 87 retry 88 89 0: 90 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 91 PREEMPTIBLE_HANDLER fast_data_access_mmu_miss 63 92 .endm 64 93 -
kernel/arch/sparc64/include/trap/trap_table.h
ree289cf0 rf47fd19 78 78 .endm 79 79 80 /* 81 * The following needs to be in sync with the 82 * definition of the istate structure. 83 */ 80 84 #define PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE (STACK_WINDOW_SAVE_AREA_SIZE+(4*8)) 81 85 #define SAVED_TSTATE -(1*8) -
kernel/arch/sparc64/src/mm/tlb.c
ree289cf0 rf47fd19 35 35 #include <arch/mm/tlb.h> 36 36 #include <mm/tlb.h> 37 #include <mm/as.h> 38 #include <mm/asid.h> 37 39 #include <arch/mm/frame.h> 38 40 #include <arch/mm/page.h> 39 41 #include <arch/mm/mmu.h> 40 #include <mm/asid.h> 42 #include <arch/interrupt.h> 43 #include <arch.h> 41 44 #include <print.h> 42 45 #include <arch/types.h> … … 47 50 #include <arch/asm.h> 48 51 #include <symtab.h> 52 53 static void dtlb_pte_copy(pte_t *t); 54 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str); 49 55 50 56 char *context_encoding[] = { … … 100 106 } 101 107 108 void dtlb_pte_copy(pte_t *t) 109 { 110 } 111 102 112 /** ITLB miss handler. */ 103 void fast_instruction_access_mmu_miss( void)113 void fast_instruction_access_mmu_miss(int n, istate_t *istate) 104 114 { 105 115 panic("%s\n", __FUNCTION__); 106 116 } 107 117 108 /** DTLB miss handler. */ 109 void fast_data_access_mmu_miss(void) 118 /** DTLB miss handler. 119 * 120 * Note that some faults (e.g. kernel faults) were already resolved 121 * by the low-level, assembly language part of the fast_data_access_mmu_miss 122 * handler. 123 */ 124 void fast_data_access_mmu_miss(int n, istate_t *istate) 110 125 { 111 126 tlb_tag_access_reg_t tag; 112 uintptr_t tpc;113 char *tpc_str;127 uintptr_t va; 128 pte_t *t; 114 129 115 130 tag.value = dtlb_tag_access_read(); 116 if (tag.context != ASID_KERNEL || tag.vpn == 0) { 117 tpc = tpc_read(); 118 tpc_str = get_symtab_entry(tpc); 119 120 printf("Faulting page: %p, ASID=%d\n", tag.vpn * PAGE_SIZE, tag.context); 121 printf("TPC=%p, (%s)\n", tpc, tpc_str ? tpc_str : "?"); 122 panic("%s\n", __FUNCTION__); 123 } 124 125 /* 126 * Identity map piece of faulting kernel address space. 127 */ 128 dtlb_insert_mapping(tag.vpn * PAGE_SIZE, tag.vpn * FRAME_SIZE, PAGESIZE_8K, false, true); 131 va = tag.vpn * PAGE_SIZE; 132 if (tag.context == ASID_KERNEL) { 133 if (!tag.vpn) { 134 /* NULL access in kernel */ 135 do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__); 136 } 137 do_fast_data_access_mmu_miss_fault(istate, "Unexpected kernel page fault."); 138 } 139 140 page_table_lock(AS, true); 141 t = page_mapping_find(AS, va); 142 if (t) { 143 /* 144 * The mapping was found in the software page hash table. 145 * Insert it into DTLB. 146 */ 147 dtlb_pte_copy(t); 148 page_table_unlock(AS, true); 149 } else { 150 /* 151 * Forward the page fault to the address space page fault handler. 152 */ 153 page_table_unlock(AS, true); 154 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 155 do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__); 156 } 157 } 129 158 } 130 159 131 160 /** DTLB protection fault handler. */ 132 void fast_data_access_protection( void)161 void fast_data_access_protection(int n, istate_t *istate) 133 162 { 134 163 panic("%s\n", __FUNCTION__); … … 162 191 } 163 192 193 void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str) 194 { 195 tlb_tag_access_reg_t tag; 196 uintptr_t va; 197 char *tpc_str = get_symtab_entry(istate->tpc); 198 199 tag.value = dtlb_tag_access_read(); 200 va = tag.vpn * PAGE_SIZE; 201 202 printf("Faulting page: %p, ASID=%d\n", va, tag.context); 203 printf("TPC=%p, (%s)\n", istate->tpc, tpc_str); 204 panic("%s\n", str); 205 } 206 164 207 /** Invalidate all unlocked ITLB and DTLB entries. */ 165 208 void tlb_invalidate_all(void) -
kernel/arch/sparc64/src/trap/trap_table.S
ree289cf0 rf47fd19 44 44 #include <arch/trap/mmu.h> 45 45 #include <arch/stack.h> 46 #include <arch/regdef.h> 46 47 47 48 #define TABLE_SIZE TRAP_TABLE_SIZE … … 276 277 277 278 278 /* Preemptible trap handler. 279 * 280 * This trap handler makes arrangements to 281 * make calling scheduler() possible. 282 * 283 * The caller is responsible for doing save 284 * and allocating PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE 285 * bytes on stack. 279 /* Preemptible trap handler for TL=1. 280 * 281 * This trap handler makes arrangements to make calling of scheduler() from 282 * within a trap context possible. It is guaranteed to function only when traps 283 * are not nested (i.e. for TL=1). 284 * 285 * Every trap handler on TL=1 that makes a call to the scheduler needs to 286 * be based on this function. The reason behind it is that the nested 287 * trap levels and the automatic saving of the interrupted context by hardware 288 * does not work well together with scheduling (i.e. a thread cannot be rescheduled 289 * with TL>0). Therefore it is necessary to eliminate the effect of trap levels 290 * by software and save the necessary state on the kernel stack. 291 * 292 * Note that for traps with TL>1, more state needs to be saved. This function 293 * is therefore not going to work when TL>1. 294 * 295 * The caller is responsible for doing SAVE and allocating 296 * PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack. 286 297 * 287 298 * Input registers: … … 300 311 rdpr %pstate, %g4 301 312 313 /* 314 * The following memory accesses will not fault 315 * because special provisions are made to have 316 * the kernel stack of THREAD locked in DTLB. 317 */ 302 318 stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE] 303 319 stx %g2, [%fp + STACK_BIAS + SAVED_TPC] … … 314 330 * - switch to normal globals. 315 331 */ 316 and %g4, ~ 1, %g4 ! mask alternate globals332 and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4 317 333 wrpr %g4, 0, %pstate 318 334 … … 325 341 * Call the higher-level handler. 326 342 */ 343 mov %fp, %o1 ! calculate istate address 327 344 call %l0 328 nop329 330 /* 331 * Restore the normal global register set.345 add %o1, STACK_BIAS + SAVED_PSTATE, %o1 ! calculate istate address 346 347 /* 348 * Restore the normal global register set. 332 349 */ 333 350 RESTORE_GLOBALS … … 335 352 /* 336 353 * Restore PSTATE from saved copy. 337 * Alternate globals become active.354 * Alternate/Interrupt/MM globals become active. 338 355 */ 339 356 ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4 … … 358 375 359 376 /* 360 * On execution of retry instruction, CWP will be restored from TSTATE register.361 * However, because of scheduling, it is possible that CWP in saved TSTATE362 * is different from current CWP. The following chunk of code fixes CWP363 * in the saved copy of TSTATE.377 * On execution of the RETRY instruction, CWP will be restored from the TSTATE 378 * register. However, because of scheduling, it is possible that CWP in the saved 379 * TSTATE is different from the current CWP. The following chunk of code fixes 380 * CWP in the saved copy of TSTATE. 364 381 */ 365 382 rdpr %cwp, %g4 ! read current CWP -
kernel/genarch/include/mm/as_ht.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 42 42 #endif 43 43 44 44 /** @} 45 45 */ 46 -
kernel/genarch/include/mm/as_pt.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 42 42 #endif 43 43 44 44 /** @} 45 45 */ 46 -
kernel/genarch/include/mm/page_ht.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ 32 /** @file 33 */ 34 35 /* 36 * This is the generic page hash table interface. 32 /** 33 * @file 34 * @brief This is the generic page hash table interface. 37 35 */ 38 36 … … 88 86 #endif 89 87 90 88 /** @} 91 89 */ 92 -
kernel/genarch/include/mm/page_pt.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 117 117 #endif 118 118 119 119 /** @} 120 120 */ 121 -
kernel/genarch/src/mm/as_ht.c
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 119 119 } 120 120 121 121 /** @} 122 122 */ 123 -
kernel/genarch/src/mm/as_pt.c
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 140 140 } 141 141 142 142 /** @} 143 143 */ 144 144 -
kernel/genarch/src/mm/asid.c
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 175 175 } 176 176 177 177 /** @} 178 178 */ 179 -
kernel/genarch/src/mm/asid_fifo.c
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 95 95 } 96 96 97 97 /** @} 98 98 */ 99 -
kernel/genarch/src/mm/page_ht.c
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 245 245 } 246 246 247 /** @} 248 */ 249 247 /** @} 248 */ -
kernel/genarch/src/mm/page_pt.c
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genarchmm 30 30 * @{ 31 31 */ … … 266 266 } 267 267 268 /** @} 269 */ 270 268 /** @} 269 */ -
kernel/generic/include/mm/as.h
ree289cf0 rf47fd19 27 27 */ 28 28 29 29 /** @addtogroup genericmm 30 30 * @{ 31 31 */ … … 207 207 #endif 208 208 209 /** @} 210 */ 211 209 /** @} 210 */ -
kernel/generic/src/mm/as.c
ree289cf0 rf47fd19 544 544 if (!src_area->backend || !src_area->backend->share) { 545 545 /* 546 * There is no wbackend or the backend does not546 * There is no backend or the backend does not 547 547 * know how to share the area. 548 548 */
Note:
See TracChangeset
for help on using the changeset viewer.