Changeset a796127 in mainline
- Timestamp:
- 2006-08-26T18:42:11Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c8ea4a8b
- Parents:
- f47fd19
- Location:
- kernel
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/_link.ld.in
rf47fd19 ra796127 27 27 *(.sdata2); 28 28 *(.sbss); 29 . = ALIGN(8); 29 30 hardcoded_ktext_size = .; 30 31 QUAD(ktext_end - ktext_start); -
kernel/arch/sparc64/include/arch.h
rf47fd19 ra796127 36 36 #define __sparc64_ARCH_H__ 37 37 38 #define ASI_AIUP 0x10 /** Access to primary context with user privileges. */ 39 #define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */ 40 38 41 #endif 39 42 -
kernel/arch/sparc64/include/interrupt.h
rf47fd19 ra796127 45 45 #define IVT_FIRST 1 46 46 47 /* Dummy macros. */48 #define IRQ_KBD 249 #define VECTOR_KBD IRQ_KBD50 51 #define trap_virtual_enable_irqs(x)52 #define trap_virtual_eoi()53 54 47 struct istate { 55 48 uint64_t pstate; -
kernel/arch/sparc64/include/regdef.h
rf47fd19 ra796127 36 36 #define KERN_sparc64_REGDEF_H_ 37 37 38 #define PSTATE_IE_BIT 239 #define PSTATE_AM_BIT 838 #define PSTATE_IE_BIT (1<<1) 39 #define PSTATE_AM_BIT (1<<3) 40 40 41 41 #define PSTATE_AG_BIT (1<<0) … … 43 43 #define PSTATE_MG_BIT (1<<10) 44 44 45 #define PSTATE_PRIV_BIT (1<<2) 46 47 #define TSTATE_PSTATE_SHIFT 8 48 #define TSTATE_PRIV_BIT (PSTATE_PRIV_BIT<<TSTATE_PSTATE_SHIFT) 49 50 #define TSTATE_CWP_MASK 0x1f 51 45 52 #endif 46 53 -
kernel/arch/sparc64/include/trap/interrupt.h
rf47fd19 ra796127 80 80 #ifdef __ASM__ 81 81 .macro INTERRUPT_LEVEL_N_HANDLER n 82 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 83 mov \n - 1, %o0 82 mov \n - 1, %g2 84 83 PREEMPTIBLE_HANDLER exc_dispatch 85 84 .endm -
kernel/arch/sparc64/include/trap/mmu.h
rf47fd19 ra796127 39 39 40 40 #include <arch/stack.h> 41 #include <arch/regdef.h> 41 42 #include <arch/mm/tlb.h> 42 43 #include <arch/mm/mmu.h> … … 60 61 .macro FAST_DATA_ACCESS_MMU_MISS_HANDLER 61 62 /* 62 * First, test if it is the portion of the kernel address space 63 * First, try to refill TLB from TSB. 64 */ 65 ! TODO 66 67 /* 68 * Second, test if it is the portion of the kernel address space 63 69 * which is faulting. If that is the case, immediately create 64 70 * identity mapping for that page in DTLB. VPN 0 is excluded from … … 67 73 * Note that branch-delay slots are used in order to save space. 68 74 */ 75 0: 69 76 mov VA_DMMU_TAG_ACCESS, %g1 70 77 ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN … … 75 82 bz 0f ! page address is zero 76 83 77 /*78 * Create and insert the identity-mapped entry for79 * the faulting kernel page.80 */81 82 84 or %g3, (TTE_CP|TTE_P|TTE_W), %g2 ! 8K pages are the default (encoded as 0) 83 85 set 1, %g3 … … 87 89 retry 88 90 91 /* 92 * Third, catch and handle special cases when the trap is caused by 93 * some register window trap handler. 94 */ 89 95 0: 90 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 96 ! TODO 97 98 0: 99 wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate 91 100 PREEMPTIBLE_HANDLER fast_data_access_mmu_miss 92 101 .endm -
kernel/arch/sparc64/include/trap/regwin.h
rf47fd19 ra796127 39 39 40 40 #include <arch/stack.h> 41 #include <arch/arch.h> 41 42 42 43 #define TT_CLEAN_WINDOW 0x24 … … 50 51 #define FILL_HANDLER_SIZE REGWIN_HANDLER_SIZE 51 52 52 /** Window Save Area offsets. */ 53 #define NWINDOW 8 54 55 /* Window Save Area offsets. */ 53 56 #define L0_OFFSET 0 54 57 #define L1_OFFSET 8 … … 69 72 70 73 #ifdef __ASM__ 71 .macro SPILL_NORMAL_HANDLER 74 75 /* 76 * Macro used by the nucleus and the primary context 0 during normal and other spills. 77 */ 78 .macro SPILL_NORMAL_HANDLER_KERNEL 72 79 stx %l0, [%sp + STACK_BIAS + L0_OFFSET] 73 80 stx %l1, [%sp + STACK_BIAS + L1_OFFSET] … … 90 97 .endm 91 98 92 .macro FILL_NORMAL_HANDLER 99 /* 100 * Macro used by the userspace during normal spills. 101 */ 102 .macro SPILL_NORMAL_HANDLER_USERSPACE 103 wr ASI_AIUP, %asi 104 stxa %l0, [%sp + STACK_BIAS + L0_OFFSET] %asi 105 stxa %l1, [%sp + STACK_BIAS + L1_OFFSET] %asi 106 stxa %l2, [%sp + STACK_BIAS + L2_OFFSET] %asi 107 stxa %l3, [%sp + STACK_BIAS + L3_OFFSET] %asi 108 stxa %l4, [%sp + STACK_BIAS + L4_OFFSET] %asi 109 stxa %l5, [%sp + STACK_BIAS + L5_OFFSET] %asi 110 stxa %l6, [%sp + STACK_BIAS + L6_OFFSET] %asi 111 stxa %l7, [%sp + STACK_BIAS + L7_OFFSET] %asi 112 stxa %i0, [%sp + STACK_BIAS + I0_OFFSET] %asi 113 stxa %i1, [%sp + STACK_BIAS + I1_OFFSET] %asi 114 stxa %i2, [%sp + STACK_BIAS + I2_OFFSET] %asi 115 stxa %i3, [%sp + STACK_BIAS + I3_OFFSET] %asi 116 stxa %i4, [%sp + STACK_BIAS + I4_OFFSET] %asi 117 stxa %i5, [%sp + STACK_BIAS + I5_OFFSET] %asi 118 stxa %i6, [%sp + STACK_BIAS + I6_OFFSET] %asi 119 stxa %i7, [%sp + STACK_BIAS + I7_OFFSET] %asi 120 saved 121 retry 122 .endm 123 124 /* 125 * Macro used by the userspace during other spills. 126 */ 127 .macro SPILL_OTHER_HANDLER_USERSPACE 128 wr ASI_AIUS, %asi 129 stxa %l0, [%sp + STACK_BIAS + L0_OFFSET] %asi 130 stxa %l1, [%sp + STACK_BIAS + L1_OFFSET] %asi 131 stxa %l2, [%sp + STACK_BIAS + L2_OFFSET] %asi 132 stxa %l3, [%sp + STACK_BIAS + L3_OFFSET] %asi 133 stxa %l4, [%sp + STACK_BIAS + L4_OFFSET] %asi 134 stxa %l5, [%sp + STACK_BIAS + L5_OFFSET] %asi 135 stxa %l6, [%sp + STACK_BIAS + L6_OFFSET] %asi 136 stxa %l7, [%sp + STACK_BIAS + L7_OFFSET] %asi 137 stxa %i0, [%sp + STACK_BIAS + I0_OFFSET] %asi 138 stxa %i1, [%sp + STACK_BIAS + I1_OFFSET] %asi 139 stxa %i2, [%sp + STACK_BIAS + I2_OFFSET] %asi 140 stxa %i3, [%sp + STACK_BIAS + I3_OFFSET] %asi 141 stxa %i4, [%sp + STACK_BIAS + I4_OFFSET] %asi 142 stxa %i5, [%sp + STACK_BIAS + I5_OFFSET] %asi 143 stxa %i6, [%sp + STACK_BIAS + I6_OFFSET] %asi 144 stxa %i7, [%sp + STACK_BIAS + I7_OFFSET] %asi 145 saved 146 retry 147 .endm 148 149 150 /* 151 * Macro used by the nucleus and the primary context 0 during normal fills. 152 */ 153 .macro FILL_NORMAL_HANDLER_KERNEL 93 154 ldx [%sp + STACK_BIAS + L0_OFFSET], %l0 94 155 ldx [%sp + STACK_BIAS + L1_OFFSET], %l1 … … 107 168 ldx [%sp + STACK_BIAS + I6_OFFSET], %i6 108 169 ldx [%sp + STACK_BIAS + I7_OFFSET], %i7 170 restored 171 retry 172 .endm 173 174 /* 175 * Macro used by the userspace during normal fills. 176 */ 177 .macro FILL_NORMAL_HANDLER_USERSPACE 178 wr ASI_AIUP, %asi 179 ldxa [%sp + STACK_BIAS + L0_OFFSET] %asi, %l0 180 ldxa [%sp + STACK_BIAS + L1_OFFSET] %asi, %l1 181 ldxa [%sp + STACK_BIAS + L2_OFFSET] %asi, %l2 182 ldxa [%sp + STACK_BIAS + L3_OFFSET] %asi, %l3 183 ldxa [%sp + STACK_BIAS + L4_OFFSET] %asi, %l4 184 ldxa [%sp + STACK_BIAS + L5_OFFSET] %asi, %l5 185 ldxa [%sp + STACK_BIAS + L6_OFFSET] %asi, %l6 186 ldxa [%sp + STACK_BIAS + L7_OFFSET] %asi, %l7 187 ldxa [%sp + STACK_BIAS + I0_OFFSET] %asi, %i0 188 ldxa [%sp + STACK_BIAS + I1_OFFSET] %asi, %i1 189 ldxa [%sp + STACK_BIAS + I2_OFFSET] %asi, %i2 190 ldxa [%sp + STACK_BIAS + I3_OFFSET] %asi, %i3 191 ldxa [%sp + STACK_BIAS + I4_OFFSET] %asi, %i4 192 ldxa [%sp + STACK_BIAS + I5_OFFSET] %asi, %i5 193 ldxa [%sp + STACK_BIAS + I6_OFFSET] %asi, %i6 194 ldxa [%sp + STACK_BIAS + I7_OFFSET] %asi, %i7 195 restored 196 retry 197 .endm 198 199 /* 200 * Macro used by the userspace during other fills. 201 */ 202 .macro FILL_OTHER_HANDLER_USERSPACE 203 wr ASI_AIUS, %asi 204 ldxa [%sp + STACK_BIAS + L0_OFFSET] %asi, %l0 205 ldxa [%sp + STACK_BIAS + L1_OFFSET] %asi, %l1 206 ldxa [%sp + STACK_BIAS + L2_OFFSET] %asi, %l2 207 ldxa [%sp + STACK_BIAS + L3_OFFSET] %asi, %l3 208 ldxa [%sp + STACK_BIAS + L4_OFFSET] %asi, %l4 209 ldxa [%sp + STACK_BIAS + L5_OFFSET] %asi, %l5 210 ldxa [%sp + STACK_BIAS + L6_OFFSET] %asi, %l6 211 ldxa [%sp + STACK_BIAS + L7_OFFSET] %asi, %l7 212 ldxa [%sp + STACK_BIAS + I0_OFFSET] %asi, %i0 213 ldxa [%sp + STACK_BIAS + I1_OFFSET] %asi, %i1 214 ldxa [%sp + STACK_BIAS + I2_OFFSET] %asi, %i2 215 ldxa [%sp + STACK_BIAS + I3_OFFSET] %asi, %i3 216 ldxa [%sp + STACK_BIAS + I4_OFFSET] %asi, %i4 217 ldxa [%sp + STACK_BIAS + I5_OFFSET] %asi, %i5 218 ldxa [%sp + STACK_BIAS + I6_OFFSET] %asi, %i6 219 ldxa [%sp + STACK_BIAS + I7_OFFSET] %asi, %i7 109 220 restored 110 221 retry -
kernel/arch/sparc64/include/trap/trap_table.h
rf47fd19 ra796127 86 86 #define SAVED_TPC -(2*8) 87 87 #define SAVED_TNPC -(3*8) 88 #define SAVED_PSTATE -(4*8)89 88 90 89 .macro PREEMPTIBLE_HANDLER f 91 set \f, %l090 sethi %hi(\f), %g1 92 91 b preemptible_handler 93 nop92 or %g1, %lo(\f), %g1 94 93 .endm 95 94 -
kernel/arch/sparc64/src/mm/tlb.c
rf47fd19 ra796127 51 51 #include <symtab.h> 52 52 53 static void dtlb_pte_copy(pte_t *t); 53 static void dtlb_pte_copy(pte_t *t, bool ro); 54 static void itlb_pte_copy(pte_t *t); 54 55 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str); 56 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str); 55 57 56 58 char *context_encoding[] = { … … 106 108 } 107 109 108 void dtlb_pte_copy(pte_t *t) 109 { 110 /** Copy PTE to TLB. 111 * 112 * @param t Page Table Entry to be copied. 113 * @param ro If true, the entry will be created read-only, regardless of its w field. 114 */ 115 void dtlb_pte_copy(pte_t *t, bool ro) 116 { 117 tlb_tag_access_reg_t tag; 118 tlb_data_t data; 119 page_address_t pg; 120 frame_address_t fr; 121 122 pg.address = t->page; 123 fr.address = t->frame; 124 125 tag.value = 0; 126 tag.context = t->as->asid; 127 tag.vpn = pg.vpn; 128 129 dtlb_tag_access_write(tag.value); 130 131 data.value = 0; 132 data.v = true; 133 data.size = PAGESIZE_8K; 134 data.pfn = fr.pfn; 135 data.l = false; 136 data.cp = t->c; 137 data.cv = t->c; 138 data.p = t->p; 139 data.w = ro ? false : t->w; 140 data.g = t->g; 141 142 dtlb_data_in_write(data.value); 143 } 144 145 void itlb_pte_copy(pte_t *t) 146 { 147 tlb_tag_access_reg_t tag; 148 tlb_data_t data; 149 page_address_t pg; 150 frame_address_t fr; 151 152 pg.address = t->page; 153 fr.address = t->frame; 154 155 tag.value = 0; 156 tag.context = t->as->asid; 157 tag.vpn = pg.vpn; 158 159 itlb_tag_access_write(tag.value); 160 161 data.value = 0; 162 data.v = true; 163 data.size = PAGESIZE_8K; 164 data.pfn = fr.pfn; 165 data.l = false; 166 data.cp = t->c; 167 data.cv = t->c; 168 data.p = t->p; 169 data.w = false; 170 data.g = t->g; 171 172 itlb_data_in_write(data.value); 110 173 } 111 174 … … 113 176 void fast_instruction_access_mmu_miss(int n, istate_t *istate) 114 177 { 115 panic("%s\n", __FUNCTION__); 178 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); 179 pte_t *t; 180 181 page_table_lock(AS, true); 182 t = page_mapping_find(AS, va); 183 if (t && PTE_EXECUTABLE(t)) { 184 /* 185 * The mapping was found in the software page hash table. 186 * Insert it into ITLB. 187 */ 188 t->a = true; 189 itlb_pte_copy(t); 190 page_table_unlock(AS, true); 191 } else { 192 /* 193 * Forward the page fault to the address space page fault handler. 194 */ 195 page_table_unlock(AS, true); 196 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 197 do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__); 198 } 199 } 116 200 } 117 201 … … 145 229 * Insert it into DTLB. 146 230 */ 147 dtlb_pte_copy(t); 231 t->a = true; 232 dtlb_pte_copy(t, true); 148 233 page_table_unlock(AS, true); 149 234 } else { … … 191 276 } 192 277 278 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str) 279 { 280 char *tpc_str = get_symtab_entry(istate->tpc); 281 282 printf("TPC=%p, (%s)\n", istate->tpc, tpc_str); 283 panic("%s\n", str); 284 } 285 193 286 void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str) 194 287 { -
kernel/arch/sparc64/src/start.S
rf47fd19 ra796127 36 36 .register %g2, #scratch 37 37 .register %g3, #scratch 38 .register %g6, #scratch39 .register %g7, #scratch40 38 41 39 .section K_TEXT_START, "ax" … … 154 152 */ 155 153 156 set kernel_image_start, %g 7154 set kernel_image_start, %g5 157 155 158 156 ! write ITLB tag of context 1 … … 160 158 set VA_DMMU_TAG_ACCESS, %g2 161 159 stxa %g1, [%g2] ASI_IMMU 162 flush %g 7160 flush %g5 163 161 164 162 ! write ITLB data and install the temporary mapping in context 1 165 163 SET_TLB_DATA(g1, g2, 0) ! use non-global mapping 166 164 stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG 167 flush %g 7165 flush %g5 168 166 169 167 ! switch to context 1 170 168 set MEM_CONTEXT_TEMP, %g1 171 169 stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! 172 flush %g 7170 flush %g5 173 171 174 172 ! demap context 0 175 173 SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) 176 174 stxa %g0, [%g1] ASI_IMMU_DEMAP 177 flush %g 7175 flush %g5 178 176 179 177 ! write ITLB tag of context 0 … … 181 179 set VA_DMMU_TAG_ACCESS, %g2 182 180 stxa %g1, [%g2] ASI_IMMU 183 flush %g 7181 flush %g5 184 182 185 183 ! write ITLB data and install the permanent kernel mapping in context 0 186 184 SET_TLB_DATA(g1, g2, 0) ! use non-global mapping 187 185 stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG 188 flush %g 7186 flush %g5 189 187 190 188 ! switch to context 0 191 189 stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! 192 flush %g 7190 flush %g5 193 191 194 192 ! ensure nucleus mapping … … 198 196 set MEM_CONTEXT_TEMP, %g1 199 197 stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! 200 flush %g 7198 flush %g5 201 199 202 200 ! demap context 1 203 201 SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY) 204 202 stxa %g0, [%g1] ASI_IMMU_DEMAP 205 flush %g 7203 flush %g5 206 204 207 205 ! set context 0 in the primary context register 208 206 stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! 209 flush %g 7207 flush %g5 210 208 211 209 ! set TL back to 0 -
kernel/arch/sparc64/src/trap/trap_table.S
rf47fd19 ra796127 33 33 .register %g2, #scratch 34 34 .register %g3, #scratch 35 .register %g6, #scratch36 .register %g7, #scratch37 35 38 36 .text … … 204 202 .global spill_0_normal 205 203 spill_0_normal: 206 SPILL_NORMAL_HANDLER 204 SPILL_NORMAL_HANDLER_KERNEL 207 205 208 206 /* TT = 0xc0, TL = 0, fill_0_normal handler */ … … 210 208 .global fill_0_normal 211 209 fill_0_normal: 212 FILL_NORMAL_HANDLER 210 FILL_NORMAL_HANDLER_KERNEL 213 211 214 212 /* … … 268 266 .global spill_0_normal_high 269 267 spill_0_normal_high: 270 SPILL_NORMAL_HANDLER 268 SPILL_NORMAL_HANDLER_KERNEL 271 269 272 270 /* TT = 0xc0, TL > 0, fill_0_normal handler */ … … 274 272 .global fill_0_normal_high 275 273 fill_0_normal_high: 276 FILL_NORMAL_HANDLER 274 FILL_NORMAL_HANDLER_KERNEL 277 275 278 276 … … 280 278 * 281 279 * This trap handler makes arrangements to make calling of scheduler() from 282 * within a trap context possible. It is guaranteed to function only when traps283 * are not nested (i.e. for TL=1).280 * within a trap context possible. It is called from several other trap 281 * handlers. 284 282 * 285 * Every trap handler on TL=1 that makes a call to the scheduler needs to 286 * be based on this function. The reason behind it is that the nested 287 * trap levels and the automatic saving of the interrupted context by hardware 288 * does not work well together with scheduling (i.e. a thread cannot be rescheduled 289 * with TL>0). Therefore it is necessary to eliminate the effect of trap levels 290 * by software and save the necessary state on the kernel stack. 291 * 292 * Note that for traps with TL>1, more state needs to be saved. This function 293 * is therefore not going to work when TL>1. 294 * 295 * The caller is responsible for doing SAVE and allocating 296 * PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack. 283 * This function can be entered either with interrupt globals or alternate globals. 284 * Memory management trap handlers are obliged to switch to one of those global sets 285 * prior to calling this function. Register window management functions are not 286 * allowed to modify the alternate global registers. 297 287 * 298 288 * Input registers: 299 * %l0 Address of function to call. 300 * Output registers: 301 * %l1 - %l7 Copy of %g1 - %g7 289 * %g1 Address of function to call. 290 * %g2 Argument for the function. 291 * %g6 Pre-set as kernel stack base if trap from userspace. 292 * %g7 Reserved. 302 293 */ 303 294 .global preemptible_handler 304 295 preemptible_handler: 305 /* 306 * Save TSTATE, TPC, TNPC and PSTATE aside. 296 rdpr %tstate, %g3 297 andcc %g3, TSTATE_PRIV_BIT, %g0 ! if this trap came from the privileged mode... 298 bnz 0f ! ...skip setting of kernel stack and primary context 299 nop 300 301 /* 302 * Switch to kernel stack. The old stack is 303 * automatically saved in the old window's %sp 304 * and the new window's %fp. 305 */ 306 save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 307 308 /* 309 * Mark the CANSAVE windows as OTHER windows. 310 * Set CLEANWIN to NWINDOW-1 so that clean_window traps do not occur. 311 */ 312 rdpr %cansave, %l0 313 wrpr %l0, %otherwin 314 wrpr %g0, %cansave 315 wrpr %g0, NWINDOW-1, %cleanwin 316 317 /* 318 * Switch to primary context 0. 319 */ 320 mov VA_PRIMARY_CONTEXT_REG, %l0 321 stxa %g0, [%l0] ASI_DMMU 322 set kernel_image_start, %l0 323 flush %l0 324 325 ba 1f 326 nop 327 328 0: 329 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 330 331 /* 332 * At this moment, we are using the kernel stack 333 * and have successfully allocated a register window. 334 */ 335 1: 336 337 /* 338 * Copy arguments. 339 */ 340 mov %g1, %l0 341 mov %g2, %o0 342 343 /* 344 * Save TSTATE, TPC and TNPC aside. 307 345 */ 308 346 rdpr %tstate, %g1 309 347 rdpr %tpc, %g2 310 348 rdpr %tnpc, %g3 311 rdpr %pstate, %g4312 349 313 350 /* … … 316 353 * the kernel stack of THREAD locked in DTLB. 317 354 */ 318 stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE] 319 stx %g2, [%fp + STACK_BIAS + SAVED_TPC] 320 stx %g3, [%fp + STACK_BIAS + SAVED_TNPC] 321 stx %g4, [%fp + STACK_BIAS + SAVED_PSTATE] 355 stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE] 356 stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC] 357 stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC] 322 358 323 /*324 * Write 0 to TL.325 */326 359 wrpr %g0, 0, %tl 327 328 /* 329 * Alter PSTATE. 330 * - switch to normal globals. 331 */ 332 and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4 333 wrpr %g4, 0, %pstate 334 335 /* 336 * Save the normal globals. 337 */ 360 wrpr %g0, PSTATE_PRIV_BIT, %pstate 338 361 SAVE_GLOBALS 339 362 340 363 /* 341 * Call the higher-level handler. 342 */ 343 mov %fp, %o1 ! calculate istate address 364 * Call the higher-level handler and pass istate as second parameter. 365 */ 344 366 call %l0 345 add %o1, STACK_BIAS + SAVED_PSTATE, %o1 ! calculate istate address 346 347 /* 348 * Restore the normal global register set. 349 */ 367 add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1 368 350 369 RESTORE_GLOBALS 351 352 /* 353 * Restore PSTATE from saved copy. 354 * Alternate/Interrupt/MM globals become active. 355 */ 356 ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4 357 wrpr %l4, 0, %pstate 358 359 /* 360 * Write 1 to TL. 361 */ 370 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate 362 371 wrpr %g0, 1, %tl 363 372 … … 365 374 * Read TSTATE, TPC and TNPC from saved copy. 366 375 */ 367 ldx [%fp + STACK_BIAS + SAVED_TSTATE], %g1 368 ldx [%fp + STACK_BIAS + SAVED_TPC], %g2 369 ldx [%fp + STACK_BIAS + SAVED_TNPC], %g3 370 371 /* 372 * Do restore to match the save instruction from the top-level handler. 373 */ 374 restore 375 376 /* 377 * On execution of the RETRY instruction, CWP will be restored from the TSTATE 378 * register. However, because of scheduling, it is possible that CWP in the saved 379 * TSTATE is different from the current CWP. The following chunk of code fixes 380 * CWP in the saved copy of TSTATE. 381 */ 382 rdpr %cwp, %g4 ! read current CWP 383 and %g1, ~0x1f, %g1 ! clear CWP field in saved TSTATE 384 or %g1, %g4, %g1 ! write current CWP to TSTATE 385 376 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1 377 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2 378 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3 379 386 380 /* 387 381 * Restore TSTATE, TPC and TNPC from saved copies. … … 390 384 wrpr %g2, 0, %tpc 391 385 wrpr %g3, 0, %tnpc 392 393 /* 394 * Return from interrupt. 395 */ 386 387 /* 388 * If OTHERWIN is zero, then all the userspace windows have been 389 * spilled to kernel memory (i.e. register window buffer). If 390 * OTHERWIN is non-zero, then some userspace windows are still 391 * valid. Others might have been spilled. However, the CWP pointer 392 * needs no fixing because the scheduler had not been called. 393 */ 394 rdpr %otherwin, %l0 395 brnz %l0, 0f 396 nop 397 398 /* 399 * OTHERWIN == 0 400 */ 401 402 /* 403 * If TSTATE.CWP + 1 == CWP, then we still do not have to fix CWP. 404 */ 405 and %g1, TSTATE_CWP_MASK, %l0 406 inc %l0 407 and %l0, TSTATE_CWP_MASK, %l0 ! %l0 mod NWINDOW 408 rdpr %cwp, %l1 409 cmp %l0, %l1 410 bz 0f ! CWP is ok 411 nop 412 413 /* 414 * Fix CWP. 415 */ 416 mov %fp, %g1 417 flushw 418 wrpr %l0, 0, %cwp 419 mov %g1, %fp 420 421 /* 422 * OTHERWIN != 0 or fall-through from the OTHERWIN == 0 case. 423 */ 424 0: 425 ! TODO: restore register windows from register window memory buffer 426 427 restore 396 428 retry -
kernel/test/synch/rwlock4/test.c
rf47fd19 ra796127 129 129 rwlock_initialize(&rwlock); 130 130 131 132 133 for (; ;) { 131 for (;;) { 134 132 thread_t *thrd; 135 133 -
kernel/test/thread/thread1/test.c
rf47fd19 ra796127 45 45 thread_detach(THREAD); 46 46 47 while(1) 48 { 49 while (1) 50 ; 47 while (1) 51 48 printf("%d\n",(int)(THREAD->tid)); 52 scheduler();53 }54 49 } 55 50
Note:
See TracChangeset
for help on using the changeset viewer.