00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00035 #ifndef __SPINLOCK_H__
00036 #define __SPINLOCK_H__
00037
00038 #include <arch/types.h>
00039 #include <typedefs.h>
00040 #include <preemption.h>
00041 #include <atomic.h>
00042 #include <debug.h>
00043
00044 #ifdef CONFIG_SMP
00045 struct spinlock {
00046 #ifdef CONFIG_DEBUG_SPINLOCK
00047 char *name;
00048 #endif
00049 atomic_t val;
00050 };
00051
00052
00053
00054
00055
00056 #define SPINLOCK_DECLARE(slname) spinlock_t slname
00057
00058
00059
00060
00061
00062 #ifdef CONFIG_DEBUG_SPINLOCK
00063 #define SPINLOCK_INITIALIZE(slname) \
00064 spinlock_t slname = { \
00065 .name = #slname, \
00066 .val = { 0 } \
00067 }
00068 #else
00069 #define SPINLOCK_INITIALIZE(slname) \
00070 spinlock_t slname = { \
00071 .val = { 0 } \
00072 }
00073 #endif
00074
00075 extern void spinlock_initialize(spinlock_t *sl, char *name);
00076 extern int spinlock_trylock(spinlock_t *sl);
00077 extern void spinlock_lock_debug(spinlock_t *sl);
00078
00079 #ifdef CONFIG_DEBUG_SPINLOCK
00080 # define spinlock_lock(x) spinlock_lock_debug(x)
00081 #else
00082 # define spinlock_lock(x) atomic_lock_arch(&(x)->val)
00083 #endif
00084
00091 static inline void spinlock_unlock(spinlock_t *sl)
00092 {
00093 ASSERT(atomic_get(&sl->val) != 0);
00094
00095
00096
00097
00098 CS_LEAVE_BARRIER();
00099
00100 atomic_set(&sl->val,0);
00101 preemption_enable();
00102 }
00103
00104 #else
00105
00106
00107 #define SPINLOCK_DECLARE(name)
00108 #define SPINLOCK_INITIALIZE(name)
00109
00110 #define spinlock_initialize(x,name)
00111 #define spinlock_lock(x) preemption_disable()
00112 #define spinlock_trylock(x) (preemption_disable(), 1)
00113 #define spinlock_unlock(x) preemption_enable()
00114
00115 #endif
00116
00117 #endif
00118