Changeset 53f9821 in mainline


Ignore:
Timestamp:
2006-03-20T20:32:17Z (19 years ago)
Author:
Ondrej Palkovsky <ondrap@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
018d957e
Parents:
9d3e185
Message:

Cleanup of spinlocks, now compiles both ia32 and amd64 with
and without DEBUG_SPINLOCKS. Made spinlocks inline.
Moved syscall_handler to generic (it was identical for ia32,amd64 & mips32).
Made slightly faster syscall for ia32.
Made better interrupt routines for ia32.
Allow not saving non-scratch registers during interrupt on ia32,amd64,mips32.
Aligned interrupt handlers on ia32,amd64, this should prevent problems
with different instruction lengths.

Files:
17 edited

Legend:

Unmodified
Added
Removed
  • arch/amd64/include/atomic.h

    r9d3e185 r53f9821  
    3131
    3232#include <arch/types.h>
     33#include <arch/barrier.h>
     34#include <preemption.h>
    3335
    3436typedef struct { volatile __u64 count; } atomic_t;
     
    102104
    103105
    104 extern void spinlock_arch(volatile int *val);
     106/** AMD64 specific fast spinlock */
     107static inline void atomic_lock_arch(atomic_t *val)
     108{
     109        __u64 tmp;
     110
     111        preemption_disable();
     112        __asm__ volatile (
     113                "0:;"
     114#ifdef CONFIG_HT
     115                "pause;" /* Pentium 4's HT love this instruction */
     116#endif
     117                "mov %0, %1;"
     118                "testq %1, %1;"
     119                "jnz 0b;"       /* Leightweight looping on locked spinlock */
     120               
     121                "incq %1;"      /* now use the atomic operation */
     122                "xchgq %0, %1;"
     123                "testq %1, %1;"
     124                "jnz 0b;"
     125                : "=m"(val->count),"=r"(tmp)
     126                );
     127        /*
     128         * Prevent critical section code from bleeding out this way up.
     129         */
     130        CS_ENTER_BARRIER();
     131}
    105132
    106133#endif
  • arch/amd64/include/syscall.h

    r9d3e185 r53f9821  
    3232#include <arch/types.h>
    3333
    34 extern __native syscall_handler(__native a1,__native a2, __native a3,
    35                                 __native a4, __native id);
    3634extern void syscall_setup_cpu(void);
    3735
  • arch/amd64/src/interrupt.c

    r9d3e185 r53f9821  
    5555        printf("ERROR_WORD=%Q\n", istate->error_word);
    5656        printf("%%rcs=%Q, flags=%Q, %%cr0=%Q\n", istate->cs, istate->rflags,read_cr0());
    57         printf("%%rax=%Q, %%rbx=%Q, %%rcx=%Q\n",istate->rax,istate->rbx,istate->rcx);
    58         printf("%%rdx=%Q, %%rsi=%Q, %%rdi=%Q\n",istate->rdx,istate->rsi,istate->rdi);
    59         printf("%%r8 =%Q, %%r9 =%Q, %%r10=%Q\n",istate->r8,istate->r9,istate->r10);
    60         printf("%%r11=%Q, %%r12=%Q, %%r13=%Q\n",istate->r11,istate->r12,istate->r13);
    61         printf("%%r14=%Q, %%r15=%Q, %%rsp=%Q\n",istate->r14,istate->r15,&istate->stack[0]);
    62         printf("%%rbp=%Q\n",istate->rbp);
    63 /*     
    64         printf("stack: %Q, %Q, %Q\n", x[5], x[6], x[7]);
    65         printf("       %Q, %Q, %Q\n", x[8], x[9], x[10]);
    66         printf("       %Q, %Q, %Q\n", x[11], x[12], x[13]);
    67         printf("       %Q, %Q, %Q\n", x[14], x[15], x[16]);
    68 */
     57        printf("%%rax=%Q, %%rcx=%Q, %%rdx=%Q\n",istate->rax,istate->rcx,istate->rdx);
     58        printf("%%rsi=%Q, %%rdi=%Q, %%r8 =%Q\n",istate->rsi,istate->rdi,istate->r8);
     59        printf("%%r9 =%Q, %%r10 =%Q, %%r11=%Q\n",istate->r9,istate->r10,istate->r11);
     60#ifdef CONFIG_DEBUG_ALLREGS     
     61        printf("%%r12=%Q, %%r13=%Q, %%r14=%Q\n",istate->r12,istate->r13,istate->r14);
     62        printf("%%r15=%Q, %%rbx=%Q, %%rbp=%Q\n",istate->r15,istate->rbx,&istate->rbp);
     63#endif
     64        printf("%%rsp=%Q\n",&istate->stack[0]);
    6965}
    7066
  • arch/amd64/src/syscall.c

    r9d3e185 r53f9821  
    6161        write_msr(AMD_MSR_SFMASK, 0x200);
    6262}
    63 
    64 /** Dispatch system call */
    65 __native syscall_handler(__native a1, __native a2, __native a3,
    66                          __native a4, __native id)
    67 {
    68         if (id < SYSCALL_END)
    69                 return syscall_table[id](a1,a2,a3,a4);
    70         else
    71                 panic("Undefined syscall %d", id);
    72 }
  • arch/ia32/include/atomic.h

    r9d3e185 r53f9821  
    3131
    3232#include <arch/types.h>
     33#include <arch/barrier.h>
     34#include <preemption.h>
    3335
    3436typedef struct { volatile __u32 count; } atomic_t;
     
    101103}
    102104
     105/** Ia32 specific fast spinlock */
     106static inline void atomic_lock_arch(atomic_t *val)
     107{
     108        __u32 tmp;
    103109
    104 extern void spinlock_arch(volatile int *val);
     110        preemption_disable();
     111        __asm__ volatile (
     112                "0:;"
     113#ifdef CONFIG_HT
     114                "pause;" /* Pentium 4's HT love this instruction */
     115#endif
     116                "mov %0, %1;"
     117                "testl %1, %1;"
     118                "jnz 0b;"       /* Leightweight looping on locked spinlock */
     119               
     120                "incl %1;"      /* now use the atomic operation */
     121                "xchgl %0, %1;"
     122                "testl %1, %1;"
     123                "jnz 0b;"
     124                : "=m"(val->count),"=r"(tmp)
     125                );
     126        /*
     127         * Prevent critical section code from bleeding out this way up.
     128         */
     129        CS_ENTER_BARRIER();
     130}
    105131
    106132#endif
  • arch/ia32/include/interrupt.h

    r9d3e185 r53f9821  
    6464
    6565struct istate {
     66        __u32 eax;
     67        __u32 ecx;
     68        __u32 edx;
     69        __u32 esi;
    6670        __u32 edi;
    67         __u32 esi;
    6871        __u32 ebp;
    69         __u32 esp;
    7072        __u32 ebx;
    71         __u32 edx;
    72         __u32 ecx;
    73         __u32 eax;
     73
     74        __u32 gs;
     75        __u32 fs;
     76        __u32 es;
     77        __u32 ds;
     78
    7479        __u32 error_word;
    7580        __u32 eip;
  • arch/ia32/src/asm.S

    r9d3e185 r53f9821  
    6969        ret
    7070
     71# Clear nested flag
     72# overwrites %ecx
     73.macro CLEAR_NT_FLAG
     74        pushfl
     75        pop %ecx
     76        and $0xffffbfff,%ecx
     77        push %ecx
     78        popfl
     79.endm   
    7180
    7281## Declare interrupt handlers
     
    7887# and call exc_dispatch().
    7988#
     89#define INTERRUPT_ALIGN 64
    8090.macro handler i n
    8191
     92.ifeq \i-0x30     # Syscall handler
     93        push %ds
     94        push %es
     95        push %fs
     96        push %gs
     97
     98        # Push arguments on stack
     99        push %edi
     100        push %esi
     101        push %edx
     102        push %ecx
     103        push %eax
     104       
     105        # we must fill the data segment registers
     106        movw $16,%ax
     107        movw %ax,%ds
     108        movw %ax,%es
     109       
     110        sti
     111        call syscall_handler   # syscall_handler(ax,cx,dx,si,di)
     112        cli
     113        addl $20, %esp         # clean-up of parameters
     114       
     115        pop %gs
     116        pop %fs
     117        pop %es
     118        pop %ds
     119       
     120        CLEAR_NT_FLAG
     121        iret
     122.else   
    82123        /*
    83124         * This macro distinguishes between two versions of ia32 exceptions.
     
    86127         * handlers and istate_t can be the same for both types.
    87128         */
    88 
    89129        .iflt \i-32
    90130                .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST
    91                         /*
    92                          * Version with error word.
    93                          * Just take space equal to subl $4, %esp.
     131                        /*
     132                         * With error word, do nothing
    94133                         */
    95                         nop
    96                         nop
    97                         nop
    98134                .else
    99135                        /*
     
    107143                 */
    108144                subl $4, %esp
    109         .endif
    110 
    111         pusha
    112         movl %esp, %ebp
     145        .endif
     146       
    113147        push %ds
    114148        push %es
     
    116150        push %gs
    117151
     152#ifdef CONFIG_DEBUG_ALLREGS
     153        push %ebx
     154        push %ebp
     155        push %edi
     156        push %esi
     157#else
     158        sub $16, %esp
     159#endif
     160        push %edx
     161        push %ecx
     162        push %eax
     163       
    118164        # we must fill the data segment registers
    119165        movw $16,%ax
     
    121167        movw %ax,%es
    122168
    123         pushl %ebp
    124         pushl $(\i)
    125         call exc_dispatch
    126         addl $8,%esp
    127 
     169        pushl %esp          # *istate
     170        pushl $(\i)         # intnum
     171        call exc_dispatch   # excdispatch(intnum, *istate)
     172        addl $8,%esp        # Clear arguments from stack
     173
     174        CLEAR_NT_FLAG # Modifies %ecx
     175       
     176        pop %eax
     177        pop %ecx
     178        pop %edx
     179#ifdef CONFIG_DEBUG_ALLREGS
     180        pop %esi
     181        pop %edi
     182        pop %ebp
     183        pop %ebx
     184#else
     185        add $16, %esp
     186#endif 
     187       
    128188        pop %gs
    129189        pop %fs
     
    131191        pop %ds
    132192
    133 # Clear Nested Task flag.
    134         pushfl
    135         pop %eax
    136         and $0xffffbfff,%eax
    137         push %eax
    138         popfl
    139        
    140         popa
    141193        addl $4,%esp    # Skip error word, no matter whether real or fake.
    142194        iret
    143 
     195.endif
     196
     197        .align INTERRUPT_ALIGN
    144198        .if (\n-\i)-1
    145199        handler "(\i+1)",\n
     
    149203# keep in sync with pm.h !!!
    150204IDT_ITEMS=64
     205.align INTERRUPT_ALIGN
    151206interrupt_handlers:
    152207h_start:
    153         handler 0 64
    154 #       handler 64 128 
    155 #       handler 128 192
    156 #       handler 192 256
     208        handler 0 IDT_ITEMS
    157209h_end:
    158210
  • arch/ia32/src/interrupt.c

    r9d3e185 r53f9821  
    6565        printf("ERROR_WORD=%X\n", istate->error_word);
    6666        printf("%%cs=%X,flags=%X\n", istate->cs, istate->eflags);
    67         printf("%%eax=%X, %%ebx=%X, %%ecx=%X, %%edx=%X\n",  istate->eax,istate->ebx,istate->ecx,istate->edx);
    68         printf("%%esi=%X, %%edi=%X, %%ebp=%X, %%esp=%X\n",  istate->esi,istate->edi,istate->ebp,istate->esp);
     67        printf("%%eax=%X, %%ecx=%X, %%edx=%X, %%esp=%X\n",  istate->eax,istate->ecx,istate->edx,&istate->stack[0]);
     68#ifdef CONFIG_DEBUG_ALLREGS
     69        printf("%%esi=%X, %%edi=%X, %%ebp=%X, %%ebx=%X\n",  istate->esi,istate->edi,istate->ebp,istate->ebx);
     70#endif
    6971        printf("stack: %X, %X, %X, %X\n", istate->stack[0], istate->stack[1], istate->stack[2], istate->stack[3]);
    7072        printf("       %X, %X, %X, %X\n", istate->stack[4], istate->stack[5], istate->stack[6], istate->stack[7]);
     
    126128void syscall(int n, istate_t *istate)
    127129{
    128         interrupts_enable();
    129         if (istate->esi < SYSCALL_END)
    130                 istate->eax = syscall_table[istate->esi](istate->eax, istate->ebx, istate->ecx, istate->edx);
    131         else
    132                 panic("Undefined syscall %d", istate->esi);
    133         interrupts_disable();
     130        panic("Obsolete syscall handler.");
    134131}
    135132
  • arch/mips32/include/exception.h

    r9d3e185 r53f9821  
    9999extern void cache_error_entry(void);
    100100extern void exception_init(void);
    101 extern __native syscall_handler(__native a0, __native a1, __native a2,
    102                                 __native a3, __native sysnum);
     101
    103102#endif
  • arch/mips32/src/exception.c

    r9d3e185 r53f9821  
    4141#include <console/kconsole.h>
    4242#include <arch/debugger.h>
    43 #include <syscall/syscall.h>
    4443
    4544static char * exctable[] = {
     
    130129}
    131130
    132 __native syscall_handler(__native a0, __native a1, __native a2,
    133                          __native a3, __native sysnum)
    134 {
    135         if (sysnum < SYSCALL_END)
    136                 return syscall_table[sysnum](a0,a1,a2,a3);
    137         panic("Undefined syscall %d", sysnum);
    138 }
    139 
    140131/** Handle syscall userspace call */
    141132static void syscall_exception(int n, istate_t *istate)
  • arch/mips32/src/start.S

    r9d3e185 r53f9821  
    216216        sub $k1, 8            # 8=SYSCALL
    217217       
    218         beqz $k1, uspace_shortcut
     218        beqz $k1, syscall_shortcut
    219219        add $k1, 8            # Revert $k1 back to correct exc number
    220220       
     
    229229        # The $sp is automatically restored to former value
    230230        eret
    231         nop
    232231
    233232# it seems that mips reserves some space on stack for varfuncs???
     
    236235#define SS_STATUS 24
    237236#define SS_EPC    28
    238 uspace_shortcut:
     237syscall_shortcut:
    239238        # We have a lot of space on the stack, with free use
    240239        sw $sp, SS_SP($k0)
  • generic/include/synch/spinlock.h

    r9d3e185 r53f9821  
    3434#include <preemption.h>
    3535#include <arch/atomic.h>
     36#include <debug.h>
    3637
    3738#ifdef CONFIG_SMP
     
    6768
    6869extern void spinlock_initialize(spinlock_t *sl, char *name);
    69 extern void spinlock_lock(spinlock_t *sl);
    7070extern int spinlock_trylock(spinlock_t *sl);
    71 extern void spinlock_unlock(spinlock_t *sl);
     71extern void spinlock_lock_debug(spinlock_t *sl);
     72
     73#ifdef CONFIG_DEBUG_SPINLOCK
     74#  define spinlock_lock(x) spinlock_lock_debug(x)
     75#else
     76#  define spinlock_lock(x) atomic_lock_arch(&(x)->val)
     77#endif
     78
     79/** Unlock spinlock
     80 *
     81 * Unlock spinlock.
     82 *
     83 * @param sl Pointer to spinlock_t structure.
     84 */
     85static inline void spinlock_unlock(spinlock_t *sl)
     86{
     87        ASSERT(atomic_get(&sl->val) != 0);
     88
     89        /*
     90         * Prevent critical section code from bleeding out this way down.
     91         */
     92        CS_LEAVE_BARRIER();
     93       
     94        atomic_set(&sl->val,0);
     95        preemption_enable();
     96}
    7297
    7398#else
  • generic/include/syscall/syscall.h

    r9d3e185 r53f9821  
    5656
    5757extern syshandler_t syscall_table[SYSCALL_END];
     58extern __native syscall_handler(__native a1, __native a2, __native a3,
     59                                __native a4, __native id);
    5860
    5961#endif
  • generic/src/lib/func.c

    r9d3e185 r53f9821  
    5555        }
    5656#else
    57         atomic_set(haltstate, 1);
     57        atomic_set(&haltstate, 1);
    5858#endif
    5959
  • generic/src/synch/spinlock.c

    r9d3e185 r53f9821  
    5252}
    5353
    54 #ifdef CONFIG_DEBUG_SPINLOCK
    5554/** Lock spinlock
    5655 *
     
    6160 * @param sl Pointer to spinlock_t structure.
    6261 */
    63 void spinlock_lock(spinlock_t *sl)
     62#ifdef CONFIG_DEBUG_SPINLOCK
     63void spinlock_lock_debug(spinlock_t *sl)
    6464{
    6565        count_t i = 0;
     
    8383        if (deadlock_reported)
    8484                printf("cpu%d: not deadlocked\n", CPU->id);
    85 
    86         /*
    87          * Prevent critical section code from bleeding out this way up.
    88          */
    89         CS_ENTER_BARRIER();
    90 
    91 }
    92 
    93 #else
    94 
    95 /** Lock spinlock
    96  *
    97  * Lock spinlock.
    98  *
    99  * @param sl Pointer to spinlock_t structure.
    100  */
    101 void spinlock_lock(spinlock_t *sl)
    102 {
    103         preemption_disable();
    104 
    105         /*
    106          * Each architecture has its own efficient/recommended
    107          * implementation of spinlock.
    108          */
    109         spinlock_arch(&sl->val);
    11085
    11186        /*
     
    144119}
    145120
    146 /** Unlock spinlock
    147  *
    148  * Unlock spinlock.
    149  *
    150  * @param sl Pointer to spinlock_t structure.
    151  */
    152 void spinlock_unlock(spinlock_t *sl)
    153 {
    154         ASSERT(atomic_get(&sl->val) != 0);
    155 
    156         /*
    157          * Prevent critical section code from bleeding out this way down.
    158          */
    159         CS_LEAVE_BARRIER();
    160        
    161         atomic_set(&sl->val,0);
    162         preemption_enable();
    163 }
    164 
    165121#endif
  • generic/src/syscall/syscall.c

    r9d3e185 r53f9821  
    6363}
    6464
     65/** Dispatch system call */
     66__native syscall_handler(__native a1, __native a2, __native a3,
     67                         __native a4, __native id)
     68{
     69        if (id < SYSCALL_END)
     70                return syscall_table[id](a1,a2,a3,a4);
     71        else
     72                panic("Undefined syscall %d", id);
     73}
     74
    6575syshandler_t syscall_table[SYSCALL_END] = {
    6676        sys_io,
  • kernel.config

    r9d3e185 r53f9821  
    7575
    7676# Save all interrupt registers
    77 ! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=mips32)] CONFIG_DEBUG_ALLREGS (y/n)
     77! [CONFIG_DEBUG=y&(ARCH=amd64|ARCH=mips32|ARCH=ia32)] CONFIG_DEBUG_ALLREGS (y/n)
    7878
    7979## Run-time configuration directives
Note: See TracChangeset for help on using the changeset viewer.