Changeset 5a01f7d in mainline
- Timestamp:
- 2018-03-26T12:35:30Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 48974d6
- Parents:
- 064e0fd
- git-author:
- Jiri Svoboda <jiri@…> (2018-03-25 18:34:49)
- git-committer:
- Jiri Svoboda <jiri@…> (2018-03-26 12:35:30)
- Location:
- kernel/arch/sparc64/include/arch/trap
- Files:
-
- 6 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/arch/trap/regwin.h
r064e0fd r5a01f7d 79 79 80 80 #ifdef __ASSEMBLER__ 81 82 /* 83 * Macro used by the nucleus and the primary context 0 during normal and other spills. 84 */ 85 .macro SPILL_NORMAL_HANDLER_KERNEL 86 stx %l0, [%sp + STACK_BIAS + L0_OFFSET] 87 stx %l1, [%sp + STACK_BIAS + L1_OFFSET] 88 stx %l2, [%sp + STACK_BIAS + L2_OFFSET] 89 stx %l3, [%sp + STACK_BIAS + L3_OFFSET] 90 stx %l4, [%sp + STACK_BIAS + L4_OFFSET] 91 stx %l5, [%sp + STACK_BIAS + L5_OFFSET] 92 stx %l6, [%sp + STACK_BIAS + L6_OFFSET] 93 stx %l7, [%sp + STACK_BIAS + L7_OFFSET] 94 stx %i0, [%sp + STACK_BIAS + I0_OFFSET] 95 stx %i1, [%sp + STACK_BIAS + I1_OFFSET] 96 stx %i2, [%sp + STACK_BIAS + I2_OFFSET] 97 stx %i3, [%sp + STACK_BIAS + I3_OFFSET] 98 stx %i4, [%sp + STACK_BIAS + I4_OFFSET] 99 stx %i5, [%sp + STACK_BIAS + I5_OFFSET] 100 stx %i6, [%sp + STACK_BIAS + I6_OFFSET] 101 stx %i7, [%sp + STACK_BIAS + I7_OFFSET] 102 saved 103 retry 104 .endm 105 106 /* 107 * Macro used by the userspace during normal spills. 108 */ 109 .macro SPILL_NORMAL_HANDLER_USERSPACE 110 wr %g0, ASI_AIUP, %asi 111 stxa %l0, [%sp + STACK_BIAS + L0_OFFSET] %asi 112 stxa %l1, [%sp + STACK_BIAS + L1_OFFSET] %asi 113 stxa %l2, [%sp + STACK_BIAS + L2_OFFSET] %asi 114 stxa %l3, [%sp + STACK_BIAS + L3_OFFSET] %asi 115 stxa %l4, [%sp + STACK_BIAS + L4_OFFSET] %asi 116 stxa %l5, [%sp + STACK_BIAS + L5_OFFSET] %asi 117 stxa %l6, [%sp + STACK_BIAS + L6_OFFSET] %asi 118 stxa %l7, [%sp + STACK_BIAS + L7_OFFSET] %asi 119 stxa %i0, [%sp + STACK_BIAS + I0_OFFSET] %asi 120 stxa %i1, [%sp + STACK_BIAS + I1_OFFSET] %asi 121 stxa %i2, [%sp + STACK_BIAS + I2_OFFSET] %asi 122 stxa %i3, [%sp + STACK_BIAS + I3_OFFSET] %asi 123 stxa %i4, [%sp + STACK_BIAS + I4_OFFSET] %asi 124 stxa %i5, [%sp + STACK_BIAS + I5_OFFSET] %asi 125 stxa %i6, [%sp + STACK_BIAS + I6_OFFSET] %asi 126 stxa %i7, [%sp + STACK_BIAS + I7_OFFSET] %asi 127 saved 128 retry 129 .endm 130 131 /* 132 * Macro used by the nucleus and the primary context 0 during normal fills. 133 */ 134 .macro FILL_NORMAL_HANDLER_KERNEL 135 ldx [%sp + STACK_BIAS + L0_OFFSET], %l0 136 ldx [%sp + STACK_BIAS + L1_OFFSET], %l1 137 ldx [%sp + STACK_BIAS + L2_OFFSET], %l2 138 ldx [%sp + STACK_BIAS + L3_OFFSET], %l3 139 ldx [%sp + STACK_BIAS + L4_OFFSET], %l4 140 ldx [%sp + STACK_BIAS + L5_OFFSET], %l5 141 ldx [%sp + STACK_BIAS + L6_OFFSET], %l6 142 ldx [%sp + STACK_BIAS + L7_OFFSET], %l7 143 ldx [%sp + STACK_BIAS + I0_OFFSET], %i0 144 ldx [%sp + STACK_BIAS + I1_OFFSET], %i1 145 ldx [%sp + STACK_BIAS + I2_OFFSET], %i2 146 ldx [%sp + STACK_BIAS + I3_OFFSET], %i3 147 ldx [%sp + STACK_BIAS + I4_OFFSET], %i4 148 ldx [%sp + STACK_BIAS + I5_OFFSET], %i5 149 ldx [%sp + STACK_BIAS + I6_OFFSET], %i6 150 ldx [%sp + STACK_BIAS + I7_OFFSET], %i7 151 restored 152 retry 153 .endm 154 155 /* 156 * Macro used by the userspace during normal fills. 157 */ 158 .macro FILL_NORMAL_HANDLER_USERSPACE 159 wr %g0, ASI_AIUP, %asi 160 ldxa [%sp + STACK_BIAS + L0_OFFSET] %asi, %l0 161 ldxa [%sp + STACK_BIAS + L1_OFFSET] %asi, %l1 162 ldxa [%sp + STACK_BIAS + L2_OFFSET] %asi, %l2 163 ldxa [%sp + STACK_BIAS + L3_OFFSET] %asi, %l3 164 ldxa [%sp + STACK_BIAS + L4_OFFSET] %asi, %l4 165 ldxa [%sp + STACK_BIAS + L5_OFFSET] %asi, %l5 166 ldxa [%sp + STACK_BIAS + L6_OFFSET] %asi, %l6 167 ldxa [%sp + STACK_BIAS + L7_OFFSET] %asi, %l7 168 ldxa [%sp + STACK_BIAS + I0_OFFSET] %asi, %i0 169 ldxa [%sp + STACK_BIAS + I1_OFFSET] %asi, %i1 170 ldxa [%sp + STACK_BIAS + I2_OFFSET] %asi, %i2 171 ldxa [%sp + STACK_BIAS + I3_OFFSET] %asi, %i3 172 ldxa [%sp + STACK_BIAS + I4_OFFSET] %asi, %i4 173 ldxa [%sp + STACK_BIAS + I5_OFFSET] %asi, %i5 174 ldxa [%sp + STACK_BIAS + I6_OFFSET] %asi, %i6 175 ldxa [%sp + STACK_BIAS + I7_OFFSET] %asi, %i7 176 restored 177 retry 178 .endm 179 180 .macro CLEAN_WINDOW_HANDLER 181 rdpr %cleanwin, %l0 182 add %l0, 1, %l0 183 wrpr %l0, 0, %cleanwin 184 #if defined(SUN4U) 185 mov %r0, %l0 186 mov %r0, %l1 187 mov %r0, %l2 188 mov %r0, %l3 189 mov %r0, %l4 190 mov %r0, %l5 191 mov %r0, %l6 192 mov %r0, %l7 193 mov %r0, %o0 194 mov %r0, %o1 195 mov %r0, %o2 196 mov %r0, %o3 197 mov %r0, %o4 198 mov %r0, %o5 199 mov %r0, %o6 200 mov %r0, %o7 201 #endif 202 retry 203 .endm 81 #include <arch/trap/regwin.S> 204 82 #endif /* __ASSEMBLER__ */ 205 83 -
kernel/arch/sparc64/include/arch/trap/sun4u/mmu.h
r064e0fd r5a01f7d 56 56 57 57 #ifdef __ASSEMBLER__ 58 59 .macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER 60 /* 61 * First, try to refill TLB from TSB. 62 */ 63 #ifdef CONFIG_TSB 64 ldxa [%g0] ASI_IMMU, %g1 ! read TSB Tag Target Register 65 ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2 ! read TSB 8K Pointer 66 ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5 67 cmp %g1, %g4 ! is this the entry we are looking for? 68 bne,pn %xcc, 0f 69 nop 70 stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG ! copy mapping from ITSB to ITLB 71 retry 72 #endif 73 74 0: 75 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate 76 mov TT_FAST_INSTRUCTION_ACCESS_MMU_MISS, %g2 77 mov VA_IMMU_TAG_ACCESS, %g5 78 ldxa [%g5] ASI_IMMU, %g5 ! read the faulting Context and VPN 79 PREEMPTIBLE_HANDLER exc_dispatch 80 .endm 81 82 .macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl 83 /* 84 * First, try to refill TLB from TSB. 85 */ 86 87 #ifdef CONFIG_TSB 88 ldxa [%g0] ASI_DMMU, %g1 ! read TSB Tag Target Register 89 srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this a kernel miss? 90 brz,pn %g2, 0f 91 ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3 ! read TSB 8K Pointer 92 ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5 93 cmp %g1, %g4 ! is this the entry we are looking for? 94 bne,pn %xcc, 0f 95 nop 96 stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG ! copy mapping from DTSB to DTLB 97 retry 98 #endif 99 100 /* 101 * Second, test if it is the portion of the kernel address space 102 * which is faulting. If that is the case, immediately create 103 * identity mapping for that page in DTLB. VPN 0 is excluded from 104 * this treatment. 105 * 106 * Note that branch-delay slots are used in order to save space. 107 */ 108 0: 109 sethi %hi(fast_data_access_mmu_miss_data_hi), %g7 110 wr %g0, ASI_DMMU, %asi 111 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g1 ! read the faulting Context and VPN 112 ldx [%g7 + %lo(tlb_tag_access_context_mask)], %g2 113 andcc %g1, %g2, %g3 ! get Context 114 bnz %xcc, 0f ! Context is non-zero 115 andncc %g1, %g2, %g3 ! get page address into %g3 116 bz %xcc, 0f ! page address is zero 117 ldx [%g7 + %lo(end_of_identity)], %g4 118 cmp %g3, %g4 119 bgeu %xcc, 0f 120 121 ldx [%g7 + %lo(kernel_8k_tlb_data_template)], %g2 122 add %g3, %g2, %g2 123 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page 124 retry 125 126 /* 127 * Third, catch and handle special cases when the trap is caused by 128 * the userspace register window spill or fill handler. In case 129 * one of these two traps caused this trap, we just lower the trap 130 * level and service the DTLB miss. In the end, we restart 131 * the offending SAVE or RESTORE. 132 */ 133 0: 134 .if (\tl > 0) 135 wrpr %g0, 1, %tl 136 .endif 137 138 /* 139 * Switch from the MM globals. 140 */ 141 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate 142 143 mov TT_FAST_DATA_ACCESS_MMU_MISS, %g2 144 ldxa [VA_DMMU_TAG_ACCESS] %asi, %g5 ! read the faulting Context and VPN 145 PREEMPTIBLE_HANDLER exc_dispatch 146 .endm 147 148 .macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl 149 /* 150 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER. 151 */ 152 153 .if (\tl > 0) 154 wrpr %g0, 1, %tl 155 .endif 156 157 /* 158 * Switch from the MM globals. 159 */ 160 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate 161 162 mov TT_FAST_DATA_ACCESS_PROTECTION, %g2 163 mov VA_DMMU_TAG_ACCESS, %g5 164 ldxa [%g5] ASI_DMMU, %g5 ! read the faulting Context and VPN 165 PREEMPTIBLE_HANDLER exc_dispatch 166 .endm 167 58 #include <arch/trap/sun4u/mmu.S> 168 59 #endif /* __ASSEMBLER__ */ 169 60 -
kernel/arch/sparc64/include/arch/trap/sun4u/regwin.h
r064e0fd r5a01f7d 34 34 35 35 #ifdef __ASSEMBLER__ 36 37 /* 38 * Macro used to spill userspace window to userspace window buffer. 39 * It can be either triggered from preemptible_handler doing SAVE 40 * at (TL=1) or from normal kernel code doing SAVE when OTHERWIN>0 41 * at (TL=0). 42 */ 43 .macro SPILL_TO_USPACE_WINDOW_BUFFER 44 stx %l0, [%g7 + L0_OFFSET] 45 stx %l1, [%g7 + L1_OFFSET] 46 stx %l2, [%g7 + L2_OFFSET] 47 stx %l3, [%g7 + L3_OFFSET] 48 stx %l4, [%g7 + L4_OFFSET] 49 stx %l5, [%g7 + L5_OFFSET] 50 stx %l6, [%g7 + L6_OFFSET] 51 stx %l7, [%g7 + L7_OFFSET] 52 stx %i0, [%g7 + I0_OFFSET] 53 stx %i1, [%g7 + I1_OFFSET] 54 stx %i2, [%g7 + I2_OFFSET] 55 stx %i3, [%g7 + I3_OFFSET] 56 stx %i4, [%g7 + I4_OFFSET] 57 stx %i5, [%g7 + I5_OFFSET] 58 stx %i6, [%g7 + I6_OFFSET] 59 stx %i7, [%g7 + I7_OFFSET] 60 add %g7, STACK_WINDOW_SAVE_AREA_SIZE, %g7 61 saved 62 retry 63 .endm 64 36 #include <arch/trap/sun4u/regwin.S> 65 37 #endif 66 38 -
kernel/arch/sparc64/include/arch/trap/sun4v/mmu.h
r064e0fd r5a01f7d 62 62 63 63 #ifdef __ASSEMBLER__ 64 65 /* MMU fault status area data fault offset */ 66 #define FSA_DFA_OFFSET 0x48 67 68 /* MMU fault status area data context */ 69 #define FSA_DFC_OFFSET 0x50 70 71 /* offset of the target address within the TTE Data entry */ 72 #define TTE_DATA_TADDR_OFFSET 13 73 74 .macro FAST_INSTRUCTION_ACCESS_MMU_MISS_HANDLER 75 mov TT_FAST_INSTRUCTION_ACCESS_MMU_MISS, %g2 76 clr %g5 ! XXX 77 PREEMPTIBLE_HANDLER exc_dispatch 78 .endm 79 80 /* 81 * Handler of the Fast Data Access MMU Miss trap. If the trap occurred in the kernel 82 * (context 0), an identity mapping (with displacement) is installed. Otherwise 83 * a higher level service routine is called. 84 */ 85 .macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl 86 mov SCRATCHPAD_MMU_FSA, %g1 87 ldxa [%g1] ASI_SCRATCHPAD, %g1 ! g1 <= RA of MMU fault status area 88 89 /* read faulting context */ 90 add %g1, FSA_DFC_OFFSET, %g2 ! g2 <= RA of data fault context 91 ldxa [%g2] ASI_REAL, %g3 ! read the fault context 92 93 /* read the faulting address */ 94 add %g1, FSA_DFA_OFFSET, %g2 ! g2 <= RA of data fault address 95 ldxa [%g2] ASI_REAL, %g1 ! read the fault address 96 srlx %g1, TTE_DATA_TADDR_OFFSET, %g1 ! truncate it to page boundary 97 sllx %g1, TTE_DATA_TADDR_OFFSET, %g1 98 99 /* service by higher-level routine when context != 0 */ 100 brnz %g3, 0f 101 nop 102 /* exclude page number 0 from installing the identity mapping */ 103 brz %g1, 0f 104 nop 105 106 /* exclude pages beyond the end of memory from the identity mapping */ 107 sethi %hi(end_of_identity), %g4 108 ldx [%g4 + %lo(end_of_identity)], %g4 109 cmp %g1, %g4 110 bgeu %xcc, 0f 111 nop 112 113 /* 114 * Installing the identity does not fit into 32 instructions, call 115 * a separate routine. The routine performs RETRY, hence the call never 116 * returns. 117 */ 118 ba,a %xcc, install_identity_mapping 119 120 0: 121 122 /* 123 * One of the scenarios in which this trap can occur is when the 124 * register window spill/fill handler accesses a memory which is not 125 * mapped. In such a case, this handler will be called from TL = 1. 126 * We handle the situation by pretending that the MMU miss occurred 127 * on TL = 0. Once the MMU miss trap is serviced, the instruction which 128 * caused the spill/fill trap is restarted, the spill/fill trap occurs, 129 * but this time its handler accesses memory which is mapped. 130 */ 131 .if (\tl > 0) 132 wrpr %g0, 1, %tl 133 .endif 134 135 mov TT_FAST_DATA_ACCESS_MMU_MISS, %g2 136 137 /* 138 * Save the faulting virtual page and faulting context to the %g5 139 * register. The most significant 51 bits of the %g5 register will 140 * contain the virtual address which caused the fault truncated to the 141 * page boundary. The least significant 13 bits of the %g5 register 142 * will contain the number of the context in which the fault occurred. 143 * The value of the %g5 register will be stored in the istate structure 144 * for inspeciton by the higher level service routine. 145 */ 146 or %g1, %g3, %g5 147 148 PREEMPTIBLE_HANDLER exc_dispatch 149 .endm 150 151 /* 152 * Handler of the Fast Data MMU Protection trap. Finds the trapping address 153 * and context and calls higher level service routine. 154 */ 155 .macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl 156 /* 157 * The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER. 158 */ 159 .if (\tl > 0) 160 wrpr %g0, 1, %tl 161 .endif 162 163 mov SCRATCHPAD_MMU_FSA, %g1 164 ldxa [%g1] ASI_SCRATCHPAD, %g1 ! g1 <= RA of MMU fault status area 165 166 /* read faulting context */ 167 add %g1, FSA_DFC_OFFSET, %g2 ! g2 <= RA of data fault context 168 ldxa [%g2] ASI_REAL, %g3 ! read the fault context 169 170 /* read the faulting address */ 171 add %g1, FSA_DFA_OFFSET, %g2 ! g2 <= RA of data fault address 172 ldxa [%g2] ASI_REAL, %g1 ! read the fault address 173 srlx %g1, TTE_DATA_TADDR_OFFSET, %g1 ! truncate it to page boundary 174 sllx %g1, TTE_DATA_TADDR_OFFSET, %g1 175 176 mov TT_FAST_DATA_ACCESS_PROTECTION, %g2 177 178 /* the same as for FAST_DATA_ACCESS_MMU_MISS_HANDLER */ 179 or %g1, %g3, %g5 180 181 PREEMPTIBLE_HANDLER exc_dispatch 182 .endm 64 #include <arch/trap/sun4v/mmu.S> 183 65 #endif /* __ASSEMBLER__ */ 184 66 -
kernel/arch/sparc64/include/arch/trap/sun4v/regwin.h
r064e0fd r5a01f7d 35 35 36 36 #ifdef __ASSEMBLER__ 37 38 /* 39 * Saves the contents of the current window to the userspace window buffer. 40 * Does not modify any register window registers, but updates pointer to the 41 * top of the userspace window buffer. 42 * 43 * Parameters: 44 * \tmpreg1 global register to be used for scratching purposes 45 * \tmpreg2 global register to be used for scratching purposes 46 */ 47 .macro SAVE_TO_USPACE_WBUF tmpreg1, tmpreg2 48 set SCRATCHPAD_WBUF, \tmpreg2 49 ldxa [\tmpreg2] ASI_SCRATCHPAD, \tmpreg1 50 stx %l0, [\tmpreg1 + L0_OFFSET] 51 stx %l1, [\tmpreg1 + L1_OFFSET] 52 stx %l2, [\tmpreg1 + L2_OFFSET] 53 stx %l3, [\tmpreg1 + L3_OFFSET] 54 stx %l4, [\tmpreg1 + L4_OFFSET] 55 stx %l5, [\tmpreg1 + L5_OFFSET] 56 stx %l6, [\tmpreg1 + L6_OFFSET] 57 stx %l7, [\tmpreg1 + L7_OFFSET] 58 stx %i0, [\tmpreg1 + I0_OFFSET] 59 stx %i1, [\tmpreg1 + I1_OFFSET] 60 stx %i2, [\tmpreg1 + I2_OFFSET] 61 stx %i3, [\tmpreg1 + I3_OFFSET] 62 stx %i4, [\tmpreg1 + I4_OFFSET] 63 stx %i5, [\tmpreg1 + I5_OFFSET] 64 stx %i6, [\tmpreg1 + I6_OFFSET] 65 stx %i7, [\tmpreg1 + I7_OFFSET] 66 add \tmpreg1, STACK_WINDOW_SAVE_AREA_SIZE, \tmpreg1 67 stxa \tmpreg1, [\tmpreg2] ASI_SCRATCHPAD 68 .endm 69 70 /* 71 * Macro used to spill userspace window to userspace window buffer. 72 * It is triggered from normal kernel code doing SAVE when 73 * OTHERWIN>0 at (TL=0). 74 */ 75 .macro SPILL_TO_USPACE_WINDOW_BUFFER 76 SAVE_TO_USPACE_WBUF %g7, %g4 77 saved 78 retry 79 .endm 80 37 #include <arch/trap/sun4v/regwin.S> 81 38 #endif 82 39 -
kernel/arch/sparc64/include/arch/trap/trap_table.h
r064e0fd r5a01f7d 43 43 #define TRAP_TABLE_SIZE (TRAP_TABLE_ENTRY_COUNT * TRAP_TABLE_ENTRY_SIZE) 44 44 45 #ifndef __ASSEMBLER__46 47 #include <stdint.h>48 49 struct trap_table_entry {50 uint8_t octets[TRAP_TABLE_ENTRY_SIZE];51 } __attribute__ ((packed));52 53 typedef struct trap_table_entry trap_table_entry_t;54 55 extern trap_table_entry_t trap_table[TRAP_TABLE_ENTRY_COUNT];56 extern trap_table_entry_t trap_table_save[TRAP_TABLE_ENTRY_COUNT];57 58 #endif /* !__ASSEMBLER__ */59 60 45 #ifdef __ASSEMBLER__ 61 .macro SAVE_GLOBALS 62 mov %g1, %l1 63 mov %g2, %l2 64 mov %g3, %l3 65 mov %g4, %l4 66 mov %g5, %l5 67 mov %g6, %l6 68 mov %g7, %l7 69 .endm 70 71 .macro RESTORE_GLOBALS 72 mov %l1, %g1 73 mov %l2, %g2 74 mov %l3, %g3 75 mov %l4, %g4 76 mov %l5, %g5 77 mov %l6, %g6 78 mov %l7, %g7 79 .endm 80 81 .macro PREEMPTIBLE_HANDLER f 82 sethi %hi(\f), %g1 83 ba %xcc, preemptible_handler 84 or %g1, %lo(\f), %g1 85 .endm 86 46 #include <arch/trap/trap_table.S> 87 47 #endif /* __ASSEMBLER__ */ 88 48
Note:
See TracChangeset
for help on using the changeset viewer.