Changeset cd373bb in mainline
- Timestamp:
- 2006-03-07T11:04:40Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b994a60
- Parents:
- e1c68e0c
- Location:
- arch/ia64/src
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia64/src/ivt.S
re1c68e0c rcd373bb 46 46 #define R_HANDLER r17 47 47 #define R_RET r18 48 #define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */ 48 49 #define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */ 49 50 … … 57 58 * 58 59 * Some steps are skipped (enabling and disabling interrupts). 59 * Some steps are not fully supported yet (e.g. interruptions60 * from userspace and floating-pointcontext).60 * Some steps are not fully supported yet (e.g. dealing with floating-point 61 * context). 61 62 * 62 63 * @param offs Offset from the beginning of IVT. … … 88 89 /* 3. switch to kernel memory stack */ 89 90 mov r30 = cr.ipsr 90 shr.u r31 = r12, VRN_SHIFT ;; 91 92 /* 93 * Set p6 to true if the stack register references kernel address space. 94 * Set p7 to false if the stack register doesn't reference kernel address space. 95 */ 96 cmp.eq p6, p7 = VRN_KERNEL, r31 ;; 97 98 (p6) shr.u r30 = r30, PSR_CPL_SHIFT ;; 99 (p6) and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; 100 101 /* 102 * Set p6 to true if the interrupted context executed in kernel mode. 103 * Set p7 to false if the interrupted context didn't execute in kernel mode. 104 */ 105 (p6) cmp.eq p6, p7 = r30, r0 ;; 106 107 /* 108 * Now, p7 is true iff the stack needs to be switched to kernel stack. 91 shr.u r31 = r12, VRN_SHIFT ;; 92 93 shr.u r30 = r30, PSR_CPL_SHIFT ;; 94 and r30 = PSR_CPL_MASK_SHIFTED, r30 ;; 95 96 /* 97 * Set p3 to true if the interrupted context executed in kernel mode. 98 * Set p4 to false if the interrupted context didn't execute in kernel mode. 99 */ 100 cmp.eq p3, p4 = r30, r0 ;; 101 cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */ 102 103 /* 104 * Set p3 to true if the stack register references kernel address space. 105 * Set p4 to false if the stack register doesn't reference kernel address space. 106 */ 107 (p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;; 108 109 /* 110 * Now, p4 is true iff the stack needs to be switched to kernel stack. 109 111 */ 110 112 mov r30 = r12 111 (p 7) mov r12 = R_KSTACK ;;113 (p4) mov r12 = R_KSTACK ;; 112 114 113 115 add r31 = -STACK_FRAME_BIAS, r12 ;; … … 131 133 mov r26 = cr.ifs 132 134 133 st8 [r31] = r24, -8 ;; /* save ar.rsc */134 st8 [r31] = r25, -8 ;; /* save ar.pfs */135 st8 [r31] = r26, -8 /* save ar.ifs */135 st8 [r31] = r24, -8 ;; /* save ar.rsc */ 136 st8 [r31] = r25, -8 ;; /* save ar.pfs */ 137 st8 [r31] = r26, -8 /* save ar.ifs */ 136 138 137 139 and r30 = ~3, r24 ;; 138 mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */140 mov ar.rsc = r30 ;; /* place RSE in enforced lazy mode */ 139 141 140 142 mov r27 = ar.rnat 141 143 mov r28 = ar.bspstore ;; 142 144 143 /* assume kernel backing store */ 144 mov ar.bspstore = r28 ;; 145 /* 146 * Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE. 147 */ 148 (p1) shr.u r30 = r28, VRN_SHIFT ;; 149 (p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;; 150 151 /* 152 * If BSPSTORE needs to be switched, p1 is false and p2 is true. 153 */ 154 (p1) mov r30 = r28 155 (p2) mov r30 = R_KSTACK_BSP ;; 156 (p2) mov ar.bspstore = r30 ;; 145 157 146 158 mov r29 = ar.bsp 147 159 148 st8 [r31] = r27, -8 ;; /* save ar.rnat */149 st8 [r31] = r 28, -8 ;;/* save new value written to ar.bspstore */150 st8 [r31] = r28, -8 ;; /* save ar.bspstore */151 st8 [r31] = r29, -8 /* save ar.bsp */152 153 mov ar.rsc = r24 /* restore RSE's setting */160 st8 [r31] = r27, -8 ;; /* save ar.rnat */ 161 st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */ 162 st8 [r31] = r28, -8 ;; /* save ar.bspstore */ 163 st8 [r31] = r29, -8 /* save ar.bsp */ 164 165 mov ar.rsc = r24 /* restore RSE's setting */ 154 166 155 167 /* steps 6 - 15 are done by heavyweight_handler_inner() */ 156 mov R_RET = b0 /* save b0 belonging to interrupted context */168 mov R_RET = b0 /* save b0 belonging to interrupted context */ 157 169 br.call.sptk.many b0 = heavyweight_handler_inner 158 0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */170 0: mov b0 = R_RET /* restore b0 belonging to the interrupted context */ 159 171 160 172 /* 16. RSE switch to interrupted context */ 161 cover /* allocate zerro size frame (step 1 (from Intel Docs)) */173 cover /* allocate zerro size frame (step 1 (from Intel Docs)) */ 162 174 163 175 add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;; … … 190 202 191 203 /* 17. restore interruption state from memory stack */ 192 ld8 r28 = [r31], +8 ;; /* load cr.ifa */193 ld8 r27 = [r31], +8 ;; /* load cr.isr */194 ld8 r26 = [r31], +8 ;; /* load cr.iipa */195 ld8 r25 = [r31], +8 ;; /* load cr.ipsr */196 ld8 r24 = [r31], +8 ;; /* load cr.iip */204 ld8 r28 = [r31], +8 ;; /* load cr.ifa */ 205 ld8 r27 = [r31], +8 ;; /* load cr.isr */ 206 ld8 r26 = [r31], +8 ;; /* load cr.iipa */ 207 ld8 r25 = [r31], +8 ;; /* load cr.ipsr */ 208 ld8 r24 = [r31], +8 ;; /* load cr.iip */ 197 209 198 210 mov cr.iip = r24 … … 203 215 204 216 /* 18. restore predicate registers from memory stack */ 205 ld8 r29 = [r31], +8 ;; /* load predicate registers */217 ld8 r29 = [r31], +8 ;; /* load predicate registers */ 206 218 mov pr = r29 207 219 208 220 /* 19. return from interruption */ 209 ld8 r12 = [r31] /* load stack pointer */221 ld8 r12 = [r31] /* load stack pointer */ 210 222 rfi ;; 211 223 -
arch/ia64/src/proc/scheduler.c
re1c68e0c rcd373bb 32 32 #include <arch/register.h> 33 33 #include <arch/context.h> 34 #include <arch/stack.h> 34 35 #include <arch/mm/tlb.h> 35 36 #include <config.h> 36 37 #include <align.h> 37 38 38 /** Record kernel stack address in bank 0 r23 and make sure itis mapped in DTR. */39 /** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */ 39 40 void before_thread_runs_arch(void) 40 41 { … … 52 53 53 54 /* 54 * Record address of kernel stack to bank 0 r23 55 * where it will be found after switch from userspace. 55 * Record address of kernel backing store to bank 0 r22. 56 * Record address of kernel stack to bank 0 r23. 57 * These values will be found there after switch from userspace. 56 58 */ 57 59 __asm__ volatile ( 58 60 "bsw.0\n" 59 "mov r23 = %0\n" 61 "mov r22 = %0\n" 62 "mov r23 = %1\n" 60 63 "bsw.1\n" 61 : : "r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA])); 64 : 65 : "r" (((__address) THREAD->kstack) + ALIGN_UP(sizeof(the_t), REGISTER_STACK_ALIGNMENT)), 66 "r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA])); 62 67 } 63 68
Note:
See TracChangeset
for help on using the changeset viewer.