Changeset 7e7c8747 in mainline
- Timestamp:
- 2006-12-17T12:11:00Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 95155b0c
- Parents:
- 771cd22
- Location:
- kernel/arch/sparc64/src
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/cpu/cpu.c
r771cd22 r7e7c8747 45 45 #include <macros.h> 46 46 47 /** Perform sparc64 specific initialization of the processor structure for the current processor. */ 47 /** Perform sparc64 specific initialization of the processor structure for the 48 * current processor. 49 */ 48 50 void cpu_arch_init(void) 49 51 { … … 67 69 mid = *((uint32_t *) prop->value); 68 70 if (mid == CPU->arch.mid) { 69 prop = ofw_tree_getprop(node, "clock-frequency"); 71 prop = ofw_tree_getprop(node, 72 "clock-frequency"); 70 73 if (prop && prop->value) 71 clock_frequency = *((uint32_t *) prop->value); 74 clock_frequency = *((uint32_t *) 75 prop->value); 72 76 } 73 77 } … … 81 85 * Lock CPU stack in DTLB. 82 86 */ 83 uintptr_t base = ALIGN_DOWN(config.base, 1 <<KERNEL_PAGE_WIDTH);87 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 84 88 85 if (!overlaps((uintptr_t) CPU->stack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) { 89 if (!overlaps((uintptr_t) CPU->stack, PAGE_SIZE, base, (1 << 90 KERNEL_PAGE_WIDTH))) { 86 91 /* 87 92 * Kernel stack of this processor is not locked in DTLB. … … 89 94 * Second, create a locked mapping for it. 90 95 */ 91 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) CPU->stack); 92 dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack), PAGESIZE_8K, true, true); 96 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) 97 CPU->stack); 98 dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack), 99 PAGESIZE_8K, true, true); 93 100 } 94 101 } … … 104 111 * This function is called by the bootstrap processor. 105 112 * 106 * @param m Processor structure of the CPU for which version information is to be printed. 113 * @param m Processor structure of the CPU for which version information is to 114 * be printed. 107 115 */ 108 116 void cpu_print_report(cpu_t *m) … … 152 160 } 153 161 154 printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", 155 m->id, manuf, impl, m->arch.ver.mask, m->arch.clock_frequency/1000000);162 printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", m->id, manuf, 163 impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000); 156 164 } 157 165 -
kernel/arch/sparc64/src/proc/scheduler.c
r771cd22 r7e7c8747 52 52 /** Perform sparc64 specific steps before scheduling a thread. 53 53 * 54 * Ensure that thread's kernel stack, as well as userspace window 55 * buffer for userspace threads, are locked in DTLB. 56 * For userspace threads, initialize reserved global registers 57 * in the alternate and interrupt sets. 54 * Ensure that thread's kernel stack, as well as userspace window buffer for 55 * userspace threads, are locked in DTLB. For userspace threads, initialize 56 * reserved global registers in the alternate and interrupt sets. 58 57 */ 59 58 void before_thread_runs_arch(void) … … 63 62 base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); 64 63 65 if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) { 64 if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 << 65 KERNEL_PAGE_WIDTH))) { 66 66 /* 67 67 * Kernel stack of this thread is not locked in DTLB. … … 69 69 * If not, create a locked mapping for it. 70 70 */ 71 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); 72 dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); 71 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) 72 THREAD->kstack); 73 dtlb_insert_mapping((uintptr_t) THREAD->kstack, 74 KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); 73 75 } 74 76 … … 79 81 */ 80 82 ASSERT(THREAD->arch.uspace_window_buffer); 81 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE); 82 if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) { 83 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) 84 THREAD->arch.uspace_window_buffer, PAGE_SIZE); 85 if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) 86 { 83 87 /* 84 * The buffer is not covered by the 4M locked kernel DTLB entry. 88 * The buffer is not covered by the 4M locked kernel 89 * DTLB entry. 85 90 */ 86 91 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf); 87 dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true); 92 dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, 93 true, true); 88 94 } 89 95 90 96 /* 91 * Write kernel stack address to %g6 and a pointer to the last item 92 * in the userspace window buffer to %g7 in the alternate and interrupt sets. 97 * Write kernel stack address to %g6 and a pointer to the last 98 * item in the userspace window buffer to %g7 in the alternate 99 * and interrupt sets. 93 100 */ 94 101 uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE 95 - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); 102 - (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, 103 STACK_ALIGNMENT)); 96 104 write_to_ig_g6(sp); 97 105 write_to_ag_g6(sp); … … 109 117 uintptr_t base; 110 118 111 base = ALIGN_DOWN(config.base, 1 <<KERNEL_PAGE_WIDTH);119 base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 112 120 113 if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) { 121 if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 << 122 KERNEL_PAGE_WIDTH))) { 114 123 /* 115 124 * Kernel stack of this thread is locked in DTLB. 116 125 * Destroy the mapping. 117 126 */ 118 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); 127 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) 128 THREAD->kstack); 119 129 } 120 130 … … 126 136 ASSERT(THREAD->arch.uspace_window_buffer); 127 137 128 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE); 129 if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) { 138 uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) 139 THREAD->arch.uspace_window_buffer, PAGE_SIZE); 140 if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 130 141 /* 131 142 * The buffer is not covered by the 4M locked kernel DTLB entry -
kernel/arch/sparc64/src/proc/thread.c
r771cd22 r7e7c8747 55 55 * belonging to a killed thread. 56 56 */ 57 frame_free(KA2PA(ALIGN_DOWN((uintptr_t) t->arch.uspace_window_buffer, PAGE_SIZE))); 57 frame_free(KA2PA(ALIGN_DOWN((uintptr_t) 58 t->arch.uspace_window_buffer, PAGE_SIZE))); 58 59 } 59 60 } … … 61 62 void thread_create_arch(thread_t *t) 62 63 { 63 if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer)) { 64 if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer)) 65 { 64 66 /* 65 67 * The thread needs userspace window buffer and the object … … 74 76 * belonging to a killed thread. 75 77 */ 76 t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf, PAGE_SIZE); 78 t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf, 79 PAGE_SIZE); 77 80 } 78 81 } -
kernel/arch/sparc64/src/start.S
r771cd22 r7e7c8747 78 78 79 79 wrpr %g0, NWINDOWS - 2, %cansave ! set maximum saveable windows 80 wrpr %g0, 0, %canrestore ! get rid of windows we will never need again 81 wrpr %g0, 0, %otherwin ! make sure the window state is consistent 82 wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window traps for kernel 83 84 wrpr %g0, 0, %tl ! TL = 0, primary context register is used 85 86 wrpr %g0, PSTATE_PRIV_BIT, %pstate ! Disable interrupts and disable 32-bit address masking. 80 wrpr %g0, 0, %canrestore ! get rid of windows we will 81 ! never need again 82 wrpr %g0, 0, %otherwin ! make sure the window state is 83 ! consistent 84 wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window 85 ! traps for kernel 86 87 wrpr %g0, 0, %tl ! TL = 0, primary context 88 ! register is used 89 90 wrpr %g0, PSTATE_PRIV_BIT, %pstate ! disable interrupts and disable 91 ! 32-bit address masking 87 92 88 93 wrpr %g0, 0, %pil ! intialize %pil … … 95 100 96 101 /* 97 * Take over the DMMU by installing global locked 98 * TTE entry identically mapping the first 4M 99 * of memory. 102 * Take over the DMMU by installing global locked TTE entry identically 103 * mapping the first 4M of memory. 100 104 * 101 * In case of DMMU, no FLUSH instructions need to be 102 * issued. Because of that, the old DTLB contents can 103 * be demapped pretty straightforwardly and without 104 * causing any traps. 105 * In case of DMMU, no FLUSH instructions need to be issued. Because of 106 * that, the old DTLB contents can be demapped pretty straightforwardly 107 * and without causing any traps. 105 108 */ 106 109 … … 108 111 109 112 #define SET_TLB_DEMAP_CMD(r1, context_id) \ 110 set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1 113 set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \ 114 TLB_DEMAP_CONTEXT_SHIFT), %r1 111 115 112 116 ! demap context 0 … … 116 120 117 121 #define SET_TLB_TAG(r1, context) \ 118 set VMA | (context <<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1122 set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1 119 123 120 124 ! write DTLB tag … … 145 149 146 150 /* 147 * Because we cannot use global mappings (because we want to 148 * have separate 64-bit address spaces for both the kernel 149 * and the userspace), we prepare the identity mapping also in 150 * context 1. This step is required by the 151 * code installing the ITLB mapping. 151 * Because we cannot use global mappings (because we want to have 152 * separate 64-bit address spaces for both the kernel and the 153 * userspace), we prepare the identity mapping also in context 1. This 154 * step is required by the code installing the ITLB mapping. 152 155 */ 153 156 ! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP) … … 162 165 163 166 /* 164 * Now is time to take over the IMMU. 165 * Unfortunatelly, it cannot be done as easily as the DMMU,166 * because the IMMU is mapping the code itexecutes.167 * Now is time to take over the IMMU. Unfortunatelly, it cannot be done 168 * as easily as the DMMU, because the IMMU is mapping the code it 169 * executes. 167 170 * 168 * [ Note that brave experiments with disabling the IMMU 169 * and using the DMMU approach failed after a dozen170 * of desparate days with only littlesuccess. ]171 * [ Note that brave experiments with disabling the IMMU and using the 172 * DMMU approach failed after a dozen of desparate days with only little 173 * success. ] 171 174 * 172 * The approach used here is inspired from OpenBSD. 173 * First, the kernel creates IMMU mapping for itself 174 * in context 1 (MEM_CONTEXT_TEMP) and switches to 175 * it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped 176 * afterwards and replaced with the kernel permanent 177 * mapping. Finally, the kernel switches back to 178 * context 0 and demaps context 1. 175 * The approach used here is inspired from OpenBSD. First, the kernel 176 * creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and 177 * switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped 178 * afterwards and replaced with the kernel permanent mapping. Finally, 179 * the kernel switches back to context 0 and demaps context 1. 179 180 * 180 * Moreover, the IMMU requires use of the FLUSH instructions. 181 * But that is OK because we always use operands with182 * addresses already mapped bythe taken over DTLB.181 * Moreover, the IMMU requires use of the FLUSH instructions. But that 182 * is OK because we always use operands with addresses already mapped by 183 * the taken over DTLB. 183 184 */ 184 185 … … 292 293 #ifdef CONFIG_SMP 293 294 /* 294 * Active loop for APs until the BSP picks them up. 295 * A processor cannot leave the loop until the 296 * global variable 'waking_up_mid' equals its 295 * Active loop for APs until the BSP picks them up. A processor cannot 296 * leave the loop until the global variable 'waking_up_mid' equals its 297 297 * MID. 298 298 */ … … 327 327 328 328 /* 329 * Create small stack to be used by the bootstrap processor. 330 * It is going to be used only for a very limited period of 331 * time, but we switch to it anyway, just to be sure we are 332 * properly initialized. 329 * Create small stack to be used by the bootstrap processor. It is going to be 330 * used only for a very limited period of time, but we switch to it anyway, 331 * just to be sure we are properly initialized. 333 332 * 334 * What is important is that this piece of memory is covered 335 * by the 4M DTLB locked entry and therefore there will be 336 * no surprises like deadly combinations of spill trap and 337 * and TLB miss on the stack address. 333 * What is important is that this piece of memory is covered by the 4M DTLB 334 * locked entry and therefore there will be no surprises like deadly 335 * combinations of spill trap and and TLB miss on the stack address. 338 336 */ 339 337 … … 355 353 356 354 /* 357 * This variable is used by the fast_data_MMU_miss trap handler. 358 * In runtime, it is further modified to reflect the starting address of 359 * physical memory. 355 * This variable is used by the fast_data_MMU_miss trap handler. In runtime, it 356 * is further modified to reflect the starting address of physical memory. 360 357 */ 361 358 .global kernel_8k_tlb_data_template 362 359 kernel_8k_tlb_data_template: 363 360 #ifdef CONFIG_VIRT_IDX_DCACHE 364 .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_CV | TTE_P | TTE_W) 361 .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \ 362 TTE_CV | TTE_P | TTE_W) 365 363 #else /* CONFIG_VIRT_IDX_DCACHE */ 366 .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_P | TTE_W) 364 .quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \ 365 TTE_P | TTE_W) 367 366 #endif /* CONFIG_VIRT_IDX_DCACHE */ 367
Note:
See TracChangeset
for help on using the changeset viewer.