Changeset cd373bb in mainline


Ignore:
Timestamp:
2006-03-07T11:04:40Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
b994a60
Parents:
e1c68e0c
Message:

ia64 work.
Support switch from userspace register stack in heavyweight handler.

Location:
arch/ia64/src
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • arch/ia64/src/ivt.S

    re1c68e0c rcd373bb  
    4646#define R_HANDLER       r17
    4747#define R_RET           r18
     48#define R_KSTACK_BSP    r22     /* keep in sync with before_thread_runs_arch() */
    4849#define R_KSTACK        r23     /* keep in sync with before_thread_runs_arch() */
    4950
     
    5758 *
    5859 * Some steps are skipped (enabling and disabling interrupts).
    59  * Some steps are not fully supported yet (e.g. interruptions
    60  * from userspace and floating-point context).
     60 * Some steps are not fully supported yet (e.g. dealing with floating-point
     61 * context).
    6162 *
    6263 * @param offs Offset from the beginning of IVT.
     
    8889    /* 3. switch to kernel memory stack */
    8990        mov r30 = cr.ipsr
    90         shr.u r31 = r12, VRN_SHIFT ;;
    91 
    92         /*
    93          * Set p6 to true if the stack register references kernel address space.
    94          * Set p7 to false if the stack register doesn't reference kernel address space.
    95          */
    96         cmp.eq p6, p7 = VRN_KERNEL, r31 ;;
    97        
    98         (p6) shr.u r30 = r30, PSR_CPL_SHIFT ;;
    99         (p6) and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
    100 
    101         /*
    102          * Set p6 to true if the interrupted context executed in kernel mode.
    103          * Set p7 to false if the interrupted context didn't execute in kernel mode.
    104          */
    105         (p6) cmp.eq p6, p7 = r30, r0 ;;
    106        
    107         /*
    108          * Now, p7 is true iff the stack needs to be switched to kernel stack.
     91        shr.u r31 = r12, VRN_SHIFT ;;
     92
     93        shr.u r30 = r30, PSR_CPL_SHIFT ;;
     94        and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
     95
     96        /*
     97         * Set p3 to true if the interrupted context executed in kernel mode.
     98         * Set p4 to false if the interrupted context didn't execute in kernel mode.
     99         */
     100        cmp.eq p3, p4 = r30, r0 ;;
     101        cmp.eq p1, p2 = r30, r0 ;;      /* remember IPSR setting in p1 and p2 */
     102
     103        /*
     104         * Set p3 to true if the stack register references kernel address space.
     105         * Set p4 to false if the stack register doesn't reference kernel address space.
     106         */
     107        (p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
     108       
     109        /*
     110         * Now, p4 is true iff the stack needs to be switched to kernel stack.
    109111         */
    110112        mov r30 = r12
    111         (p7) mov r12 = R_KSTACK ;;
     113        (p4) mov r12 = R_KSTACK ;;
    112114       
    113115        add r31 = -STACK_FRAME_BIAS, r12 ;;
     
    131133        mov r26 = cr.ifs
    132134       
    133         st8 [r31] = r24, -8 ;;  /* save ar.rsc */
    134         st8 [r31] = r25, -8 ;;  /* save ar.pfs */
    135         st8 [r31] = r26, -8     /* save ar.ifs */
     135        st8 [r31] = r24, -8 ;;          /* save ar.rsc */
     136        st8 [r31] = r25, -8 ;;          /* save ar.pfs */
     137        st8 [r31] = r26, -8             /* save ar.ifs */
    136138       
    137139        and r30 = ~3, r24 ;;
    138         mov ar.rsc = r30 ;;     /* place RSE in enforced lazy mode */
     140        mov ar.rsc = r30 ;;             /* place RSE in enforced lazy mode */
    139141       
    140142        mov r27 = ar.rnat
    141143        mov r28 = ar.bspstore ;;
    142144       
    143         /* assume kernel backing store */
    144         mov ar.bspstore = r28 ;;
     145        /*
     146         * Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE.
     147         */
     148        (p1) shr.u r30 = r28, VRN_SHIFT ;;
     149        (p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
     150       
     151        /*
     152         * If BSPSTORE needs to be switched, p1 is false and p2 is true.
     153         */
     154        (p1) mov r30 = r28
     155        (p2) mov r30 = R_KSTACK_BSP ;;
     156        (p2) mov ar.bspstore = r30 ;;
    145157       
    146158        mov r29 = ar.bsp
    147159       
    148         st8 [r31] = r27, -8 ;;  /* save ar.rnat */
    149         st8 [r31] = r28, -8 ;;  /* save new value written to ar.bspstore */
    150         st8 [r31] = r28, -8 ;;  /* save ar.bspstore */
    151         st8 [r31] = r29, -8     /* save ar.bsp */
    152        
    153         mov ar.rsc = r24        /* restore RSE's setting */
     160        st8 [r31] = r27, -8 ;;          /* save ar.rnat */
     161        st8 [r31] = r30, -8 ;;          /* save new value written to ar.bspstore */
     162        st8 [r31] = r28, -8 ;;          /* save ar.bspstore */
     163        st8 [r31] = r29, -8             /* save ar.bsp */
     164       
     165        mov ar.rsc = r24                /* restore RSE's setting */
    154166       
    155167    /* steps 6 - 15 are done by heavyweight_handler_inner() */
    156         mov R_RET = b0          /* save b0 belonging to interrupted context */
     168        mov R_RET = b0                  /* save b0 belonging to interrupted context */
    157169        br.call.sptk.many b0 = heavyweight_handler_inner
    158 0:      mov b0 = R_RET          /* restore b0 belonging to the interrupted context */
     1700:      mov b0 = R_RET                  /* restore b0 belonging to the interrupted context */
    159171
    160172    /* 16. RSE switch to interrupted context */
    161         cover                   /* allocate zerro size frame (step 1 (from Intel Docs)) */
     173        cover                           /* allocate zerro size frame (step 1 (from Intel Docs)) */
    162174
    163175        add r31 = STACK_SCRATCH_AREA_SIZE, r12 ;;
     
    190202
    191203    /* 17. restore interruption state from memory stack */
    192         ld8 r28 = [r31], +8 ;;  /* load cr.ifa */               
    193         ld8 r27 = [r31], +8 ;;  /* load cr.isr */
    194         ld8 r26 = [r31], +8 ;;  /* load cr.iipa */
    195         ld8 r25 = [r31], +8 ;;  /* load cr.ipsr */
    196         ld8 r24 = [r31], +8 ;;  /* load cr.iip */
     204        ld8 r28 = [r31], +8 ;;          /* load cr.ifa */               
     205        ld8 r27 = [r31], +8 ;;          /* load cr.isr */
     206        ld8 r26 = [r31], +8 ;;          /* load cr.iipa */
     207        ld8 r25 = [r31], +8 ;;          /* load cr.ipsr */
     208        ld8 r24 = [r31], +8 ;;          /* load cr.iip */
    197209
    198210        mov cr.iip = r24
     
    203215
    204216    /* 18. restore predicate registers from memory stack */
    205         ld8 r29 = [r31], +8 ;;  /* load predicate registers */
     217        ld8 r29 = [r31], +8 ;;          /* load predicate registers */
    206218        mov pr = r29
    207219       
    208220    /* 19. return from interruption */
    209         ld8 r12 = [r31]         /* load stack pointer */
     221        ld8 r12 = [r31]                 /* load stack pointer */
    210222        rfi ;;
    211223
  • arch/ia64/src/proc/scheduler.c

    re1c68e0c rcd373bb  
    3232#include <arch/register.h>
    3333#include <arch/context.h>
     34#include <arch/stack.h>
    3435#include <arch/mm/tlb.h>
    3536#include <config.h>
    3637#include <align.h>
    3738
    38 /** Record kernel stack address in bank 0 r23 and make sure it is mapped in DTR. */
     39/** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */
    3940void before_thread_runs_arch(void)
    4041{
     
    5253       
    5354        /*
    54          * Record address of kernel stack to bank 0 r23
    55          * where it will be found after switch from userspace.
     55         * Record address of kernel backing store to bank 0 r22.
     56         * Record address of kernel stack to bank 0 r23.
     57         * These values will be found there after switch from userspace.
    5658         */
    5759        __asm__ volatile (
    5860                "bsw.0\n"
    59                 "mov r23 = %0\n"
     61                "mov r22 = %0\n"
     62                "mov r23 = %1\n"
    6063                "bsw.1\n"
    61                  : : "r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]));
     64                :
     65                : "r" (((__address) THREAD->kstack) + ALIGN_UP(sizeof(the_t), REGISTER_STACK_ALIGNMENT)),
     66                  "r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]));
    6267}
    6368
Note: See TracChangeset for help on using the changeset viewer.