Changeset d92bf462 in mainline
- Timestamp:
- 2010-05-22T22:31:17Z (15 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- ba7371f9
- Parents:
- d354d57
- Location:
- kernel/arch
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/mips32/include/cp0.h
rd354d57 rd92bf462 38 38 #include <typedefs.h> 39 39 40 #define cp0_status_ie_enabled_bit 41 #define cp0_status_exl_exception_bit 42 #define cp0_status_erl_error_bit 43 #define cp0_status_um_bit 44 #define cp0_status_bev_bootstrap_bit 45 #define cp0_status_fpu_bit 40 #define cp0_status_ie_enabled_bit (1 << 0) 41 #define cp0_status_exl_exception_bit (1 << 1) 42 #define cp0_status_erl_error_bit (1 << 2) 43 #define cp0_status_um_bit (1 << 4) 44 #define cp0_status_bev_bootstrap_bit (1 << 22) 45 #define cp0_status_fpu_bit (1 << 29) 46 46 47 47 #define cp0_status_im_shift 8 -
kernel/arch/mips32/src/start.S
rd354d57 rd92bf462 47 47 # Which status bits should are thread-local 48 48 #define REG_SAVE_MASK 0x1f # KSU(UM), EXL, ERL, IE 49 49 50 50 # Save registers to space defined by \r 51 # We will change status: Disable ERL, EXL,UM,IE51 # We will change status: Disable ERL, EXL, UM, IE 52 52 # These changes will be automatically reversed in REGISTER_LOAD 53 # SPis NOT saved as part of these registers53 # %sp is NOT saved as part of these registers 54 54 .macro REGISTERS_STORE_AND_EXC_RESET r 55 55 sw $at, EOFFSET_AT(\r) … … 70 70 sw $t8, EOFFSET_T8(\r) 71 71 sw $t9, EOFFSET_T9(\r) 72 72 73 73 mflo $at 74 74 sw $at, EOFFSET_LO(\r) … … 79 79 sw $ra, EOFFSET_RA(\r) 80 80 sw $k1, EOFFSET_K1(\r) 81 81 82 82 mfc0 $t0, $status 83 83 mfc0 $t1, $epc 84 84 85 and $t2, $t0, REG_SAVE_MASK # Save only KSU,EXL,ERL,IE 86 li $t3, ~(0x1f) 87 and $t0, $t0, $t3 # Clear KSU,EXL,ERL,IE 88 89 sw $t2,EOFFSET_STATUS(\r) 90 sw $t1,EOFFSET_EPC(\r) 85 # save only KSU, EXL, ERL, IE 86 and $t2, $t0, REG_SAVE_MASK 87 88 # clear KSU, EXL, ERL, IE 89 li $t3, ~(REG_SAVE_MASK) 90 and $t0, $t0, $t3 91 92 sw $t2, EOFFSET_STATUS(\r) 93 sw $t1, EOFFSET_EPC(\r) 91 94 mtc0 $t0, $status 92 95 .endm 93 96 94 97 .macro REGISTERS_LOAD r 95 # Update only UM, EXR,IE from status, the rest98 # Update only UM, EXR, IE from status, the rest 96 99 # is controlled by OS and not bound to task 97 100 mfc0 $t0, $status 98 101 lw $t1,EOFFSET_STATUS(\r) 99 100 li $t2, ~REG_SAVE_MASK # Mask UM,EXL,ERL,IE 102 103 # Mask UM, EXL, ERL, IE 104 li $t2, ~REG_SAVE_MASK 101 105 and $t0, $t0, $t2 102 106 103 or $t0, $t0, $t1 # Copy UM,EXL, ERL, IE from saved status 107 # Copy UM, EXL, ERL, IE from saved status 108 or $t0, $t0, $t1 104 109 mtc0 $t0, $status 105 110 … … 129 134 lw $at, EOFFSET_HI(\r) 130 135 mthi $at 131 136 132 137 lw $at, EOFFSET_EPC(\r) 133 138 mtc0 $at, $epc … … 138 143 139 144 # Move kernel stack pointer address to register K0 140 # - if we are in user mode, load the appropriate stack 141 # address 145 # - if we are in user mode, load the appropriate stack address 142 146 .macro KERNEL_STACK_TO_K0 143 # If we are in user mode147 # if we are in user mode 144 148 mfc0 $k0, $status 145 149 andi $k0, 0x10 … … 148 152 add $k0, $sp, 0 149 153 150 # Move $k0 pointer to kernel stack154 # move $k0 pointer to kernel stack 151 155 lui $k0, %hi(supervisor_sp) 152 156 ori $k0, $k0, %lo(supervisor_sp) 153 # Move $k0 (superveisor_sp) 157 158 # move $k0 (supervisor_sp) 154 159 lw $k0, 0($k0) 155 1: 160 161 1: 156 162 .endm 157 163 158 164 .org 0x0 159 165 kernel_image_start: 160 /* Load temporary stack */166 # load temporary stack 161 167 lui $sp, %hi(end_stack) 162 168 ori $sp, $sp, %lo(end_stack) 163 169 164 /*Not sure about this, but might165 be needed for PIC code */170 # Not sure about this, but might 171 # be needed for PIC code 166 172 lui $gp, 0x8000 167 173 168 /* $a1 contains physical address of bootinfo_t */ 169 174 # $a1 contains physical address of bootinfo_t 170 175 jal arch_pre_main 171 176 nop … … 174 179 nop 175 180 176 181 .space TEMP_STACK_SIZE 177 182 end_stack: 178 183 … … 191 196 exception_handler: 192 197 KERNEL_STACK_TO_K0 198 193 199 sub $k0, REGISTER_SPACE 194 200 sw $sp, EOFFSET_SP($k0) … … 209 215 jal exc_dispatch # exc_dispatch(excno, register_space) 210 216 move $a0, $k0 211 217 212 218 REGISTERS_LOAD $sp 213 219 # The $sp is automatically restored to former value … … 276 282 277 283 eret 278 284 279 285 tlb_refill_handler: 280 286 KERNEL_STACK_TO_K0 -
kernel/arch/ppc32/include/atomic.h
rd354d57 rd92bf462 42 42 asm volatile ( 43 43 "1:\n" 44 " lwarx %0, 0, %2\n"45 " addic %0, %0, 1\n"46 " stwcx. %0, 0, %2\n"47 " bne- 1b"48 : "=&r" (tmp),44 " lwarx %[tmp], 0, %[count_ptr]\n" 45 " addic %[tmp], %[tmp], 1\n" 46 " stwcx. %[tmp], 0, %[count_ptr]\n" 47 " bne- 1b" 48 : [tmp] "=&r" (tmp), 49 49 "=m" (val->count) 50 : "r" (&val->count),50 : [count_ptr] "r" (&val->count), 51 51 "m" (val->count) 52 52 : "cc" … … 60 60 asm volatile ( 61 61 "1:\n" 62 " lwarx %0, 0, %2\n"63 " addic %0, %0, -1\n"64 " stwcx. %0, 0, %2\n"65 " bne- 1b"66 : "=&r" (tmp),62 " lwarx %[tmp], 0, %[count_ptr]\n" 63 " addic %[tmp], %[tmp], -1\n" 64 " stwcx. %[tmp], 0, %[count_ptr]\n" 65 " bne- 1b" 66 : [tmp] "=&r" (tmp), 67 67 "=m" (val->count) 68 : "r" (&val->count),68 : [count_ptr] "r" (&val->count), 69 69 "m" (val->count) 70 70 : "cc" -
kernel/arch/ppc32/include/barrier.h
rd354d57 rd92bf462 27 27 */ 28 28 29 /** @addtogroup ppc32 29 /** @addtogroup ppc32 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ppc32_BARRIER_H_ 37 37 38 #define CS_ENTER_BARRIER() 39 #define CS_LEAVE_BARRIER() 38 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 39 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") 40 40 41 #define memory_barrier() asm volatile ("sync" ::: "memory") 42 #define read_barrier() asm volatile ("sync" ::: "memory") 43 #define write_barrier() asm volatile ("eieio" ::: "memory") 41 #define memory_barrier() asm volatile ("sync" ::: "memory") 42 #define read_barrier() asm volatile ("sync" ::: "memory") 43 #define write_barrier() asm volatile ("eieio" ::: "memory") 44 45 #define instruction_barrier() \ 46 asm volatile ( \ 47 "sync\n" \ 48 "isync\n" \ 49 ) 50 51 #define COHERENCE_INVAL_MIN 4 44 52 45 53 /* … … 53 61 { 54 62 asm volatile ( 55 "dcbst 0, % 0\n"63 "dcbst 0, %[addr]\n" 56 64 "sync\n" 57 "icbi 0, % 0\n"65 "icbi 0, %[addr]\n" 58 66 "sync\n" 59 67 "isync\n" 60 :: "r" (addr)68 :: [addr] "r" (addr) 61 69 ); 62 70 } 63 71 64 #define COHERENCE_INVAL_MIN 4 65 66 static inline void smc_coherence_block(void *addr, unsigned long len) 72 static inline void smc_coherence_block(void *addr, unsigned int len) 67 73 { 68 unsigned long i; 69 70 for (i = 0; i < len; i += COHERENCE_INVAL_MIN) { 71 asm volatile ("dcbst 0, %0\n" :: "r" (addr + i)); 72 } 73 74 asm volatile ("sync"); 75 76 for (i = 0; i < len; i += COHERENCE_INVAL_MIN) { 77 asm volatile ("icbi 0, %0\n" :: "r" (addr + i)); 78 } 79 80 asm volatile ( 81 "sync\n" 82 "isync\n" 83 ); 74 unsigned int i; 75 76 for (i = 0; i < len; i += COHERENCE_INVAL_MIN) 77 asm volatile ( 78 "dcbst 0, %[addr]\n" 79 :: [addr] "r" (addr + i) 80 ); 81 82 memory_barrier(); 83 84 for (i = 0; i < len; i += COHERENCE_INVAL_MIN) 85 asm volatile ( 86 "icbi 0, %[addr]\n" 87 :: [addr] "r" (addr + i) 88 ); 89 90 instruction_barrier(); 84 91 } 85 92 -
kernel/arch/ppc32/include/cycle.h
rd354d57 rd92bf462 40 40 uint32_t lower; 41 41 uint32_t upper; 42 uint32_t upper2;42 uint32_t tmp; 43 43 44 asm volatile ( 45 "1: mftbu %0\n" 46 "mftb %1\n" 47 "mftbu %2\n" 48 "cmpw %0, %2\n" 49 "bne- 1b\n" 50 : "=r" (upper), 51 "=r" (lower), 52 "=r" (upper2) 53 :: "cr0" 54 ); 44 do { 45 asm volatile ( 46 "mftbu %[upper]\n" 47 "mftb %[lower]\n" 48 "mftbu %[tmp]\n" 49 : [upper] "=r" (upper), 50 [lower] "=r" (lower), 51 [tmp] "=r" (tmp) 52 ); 53 } while (upper != tmp); 55 54 56 55 return ((uint64_t) upper << 32) + (uint64_t) lower; -
kernel/arch/ppc32/src/proc/scheduler.c
rd354d57 rd92bf462 39 39 #include <arch.h> 40 40 41 /** Perform ppc32 specific tasks needed before the new task is run. */ 41 /** Perform ppc32 specific tasks needed before the new task is run. 42 * 43 */ 42 44 void before_task_runs_arch(void) 43 45 { 44 46 } 45 47 46 /** Perform ppc32 specific tasks needed before the new thread is scheduled. */ 48 /** Perform ppc32 specific tasks needed before the new thread is scheduled. 49 * 50 */ 47 51 void before_thread_runs_arch(void) 48 52 { 49 53 tlb_invalidate_all(); 54 50 55 asm volatile ( 51 "mtsprg0 %0\n" 52 : 53 : "r" (KA2PA(&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA])) 56 "mtsprg0 %[ksp]\n" 57 :: [ksp] "r" (KA2PA(&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA])) 54 58 ); 55 59 }
Note:
See TracChangeset
for help on using the changeset viewer.