Changeset 53f9821 in mainline for generic/src/synch/spinlock.c


Ignore:
Timestamp:
2006-03-20T20:32:17Z (19 years ago)
Author:
Ondrej Palkovsky <ondrap@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
018d957e
Parents:
9d3e185
Message:

Cleanup of spinlocks, now compiles both ia32 and amd64 with
and without DEBUG_SPINLOCKS. Made spinlocks inline.
Moved syscall_handler to generic (it was identical for ia32,amd64 & mips32).
Made slightly faster syscall for ia32.
Made better interrupt routines for ia32.
Allow not saving non-scratch registers during interrupt on ia32,amd64,mips32.
Aligned interrupt handlers on ia32,amd64, this should prevent problems
with different instruction lengths.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • generic/src/synch/spinlock.c

    r9d3e185 r53f9821  
    5252}
    5353
    54 #ifdef CONFIG_DEBUG_SPINLOCK
    5554/** Lock spinlock
    5655 *
     
    6160 * @param sl Pointer to spinlock_t structure.
    6261 */
    63 void spinlock_lock(spinlock_t *sl)
     62#ifdef CONFIG_DEBUG_SPINLOCK
     63void spinlock_lock_debug(spinlock_t *sl)
    6464{
    6565        count_t i = 0;
     
    8383        if (deadlock_reported)
    8484                printf("cpu%d: not deadlocked\n", CPU->id);
    85 
    86         /*
    87          * Prevent critical section code from bleeding out this way up.
    88          */
    89         CS_ENTER_BARRIER();
    90 
    91 }
    92 
    93 #else
    94 
    95 /** Lock spinlock
    96  *
    97  * Lock spinlock.
    98  *
    99  * @param sl Pointer to spinlock_t structure.
    100  */
    101 void spinlock_lock(spinlock_t *sl)
    102 {
    103         preemption_disable();
    104 
    105         /*
    106          * Each architecture has its own efficient/recommended
    107          * implementation of spinlock.
    108          */
    109         spinlock_arch(&sl->val);
    11085
    11186        /*
     
    144119}
    145120
    146 /** Unlock spinlock
    147  *
    148  * Unlock spinlock.
    149  *
    150  * @param sl Pointer to spinlock_t structure.
    151  */
    152 void spinlock_unlock(spinlock_t *sl)
    153 {
    154         ASSERT(atomic_get(&sl->val) != 0);
    155 
    156         /*
    157          * Prevent critical section code from bleeding out this way down.
    158          */
    159         CS_LEAVE_BARRIER();
    160        
    161         atomic_set(&sl->val,0);
    162         preemption_enable();
    163 }
    164 
    165121#endif
Note: See TracChangeset for help on using the changeset viewer.