Changes in kernel/generic/include/synch/spinlock.h [8aa9265:13108f24] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/synch/spinlock.h
r8aa9265 r13108f24 41 41 #include <atomic.h> 42 42 #include <debug.h> 43 #include <arch/asm.h>44 43 45 44 #ifdef CONFIG_SMP … … 50 49 #ifdef CONFIG_DEBUG_SPINLOCK 51 50 const char *name; 52 #endif /* CONFIG_DEBUG_SPINLOCK */51 #endif 53 52 } spinlock_t; 54 53 … … 61 60 62 61 /* 63 * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used 64 * for statically allocated spinlocks. They declare (either as global 65 * or static) symbol and initialize the lock. 62 * SPINLOCK_INITIALIZE is to be used for statically allocated spinlocks. 63 * It declares and initializes the lock. 66 64 */ 67 65 #ifdef CONFIG_DEBUG_SPINLOCK … … 79 77 } 80 78 81 #define ASSERT_SPINLOCK(expr, lock) \82 ASSERT_VERBOSE(expr, (lock)->name)79 #define spinlock_lock(lock) spinlock_lock_debug((lock)) 80 #define spinlock_unlock(lock) spinlock_unlock_debug((lock)) 83 81 84 #define spinlock_lock(lock) spinlock_lock_debug((lock)) 85 #define spinlock_unlock(lock) spinlock_unlock_debug((lock)) 86 87 #else /* CONFIG_DEBUG_SPINLOCK */ 82 #else 88 83 89 84 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ … … 97 92 } 98 93 99 #define ASSERT_SPINLOCK(expr, lock) \100 ASSERT(expr)94 #define spinlock_lock(lock) atomic_lock_arch(&(lock)->val) 95 #define spinlock_unlock(lock) spinlock_unlock_nondebug((lock)) 101 96 102 #define spinlock_lock(lock) atomic_lock_arch(&(lock)->val) 103 #define spinlock_unlock(lock) spinlock_unlock_nondebug((lock)) 104 105 #endif /* CONFIG_DEBUG_SPINLOCK */ 97 #endif 106 98 107 99 #define SPINLOCK_INITIALIZE(lock_name) \ … … 111 103 SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name) 112 104 113 extern void spinlock_initialize(spinlock_t * , const char *);114 extern int spinlock_trylock(spinlock_t * );115 extern void spinlock_lock_debug(spinlock_t * );116 extern void spinlock_unlock_debug(spinlock_t * );105 extern void spinlock_initialize(spinlock_t *lock, const char *name); 106 extern int spinlock_trylock(spinlock_t *lock); 107 extern void spinlock_lock_debug(spinlock_t *lock); 108 extern void spinlock_unlock_debug(spinlock_t *lock); 117 109 118 110 /** Unlock spinlock … … 121 113 * 122 114 * @param sl Pointer to spinlock_t structure. 123 *124 115 */ 125 116 static inline void spinlock_unlock_nondebug(spinlock_t *lock) … … 150 141 } 151 142 152 #else /* CONFIG_DEBUG_SPINLOCK */143 #else 153 144 154 145 #define DEADLOCK_PROBE_INIT(pname) 155 146 #define DEADLOCK_PROBE(pname, value) 156 147 157 #endif /* CONFIG_DEBUG_SPINLOCK */148 #endif 158 149 159 150 #else /* CONFIG_SMP */ … … 170 161 #define SPINLOCK_STATIC_INITIALIZE_NAME(name, desc_name) 171 162 172 #define ASSERT_SPINLOCK(expr, lock)173 174 163 #define spinlock_initialize(lock, name) 175 164 … … 181 170 #define DEADLOCK_PROBE(pname, value) 182 171 183 #endif /* CONFIG_SMP */ 184 185 typedef struct { 186 SPINLOCK_DECLARE(lock); /**< Spinlock */ 187 bool guard; /**< Flag whether ipl is valid */ 188 ipl_t ipl; /**< Original interrupt level */ 189 } irq_spinlock_t; 190 191 #define IRQ_SPINLOCK_DECLARE(lock_name) irq_spinlock_t lock_name 192 #define IRQ_SPINLOCK_EXTERN(lock_name) extern irq_spinlock_t lock_name 193 194 #ifdef CONFIG_SMP 195 196 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \ 197 ASSERT_SPINLOCK(expr, &((irq_lock)->lock)) 198 199 /* 200 * IRQ_SPINLOCK_INITIALIZE and IRQ_SPINLOCK_STATIC_INITIALIZE are to be used 201 * for statically allocated interrupts-disabled spinlocks. They declare (either 202 * as global or static symbol) and initialize the lock. 203 */ 204 #ifdef CONFIG_DEBUG_SPINLOCK 205 206 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 207 irq_spinlock_t lock_name = { \ 208 .lock = { \ 209 .name = desc_name, \ 210 .val = { 0 } \ 211 }, \ 212 .guard = false, \ 213 .ipl = 0 \ 214 } 215 216 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 217 static irq_spinlock_t lock_name = { \ 218 .lock = { \ 219 .name = desc_name, \ 220 .val = { 0 } \ 221 }, \ 222 .guard = false, \ 223 .ipl = 0 \ 224 } 225 226 #else /* CONFIG_DEBUG_SPINLOCK */ 227 228 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 229 irq_spinlock_t lock_name = { \ 230 .lock = { \ 231 .val = { 0 } \ 232 }, \ 233 .guard = false, \ 234 .ipl = 0 \ 235 } 236 237 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 238 static irq_spinlock_t lock_name = { \ 239 .lock = { \ 240 .val = { 0 } \ 241 }, \ 242 .guard = false, \ 243 .ipl = 0 \ 244 } 245 246 #endif /* CONFIG_DEBUG_SPINLOCK */ 247 248 #else /* CONFIG_SMP */ 249 250 /* 251 * Since the spinlocks are void on UP systems, we also need 252 * to have a special variant of interrupts-disabled spinlock 253 * macros which take this into account. 254 */ 255 256 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \ 257 ASSERT_SPINLOCK(expr, NULL) 258 259 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 260 irq_spinlock_t lock_name = { \ 261 .guard = false, \ 262 .ipl = 0 \ 263 } 264 265 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 266 static irq_spinlock_t lock_name = { \ 267 .guard = false, \ 268 .ipl = 0 \ 269 } 270 271 #endif /* CONFIG_SMP */ 272 273 #define IRQ_SPINLOCK_INITIALIZE(lock_name) \ 274 IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name) 275 276 #define IRQ_SPINLOCK_STATIC_INITIALIZE(lock_name) \ 277 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name) 278 279 /** Initialize interrupts-disabled spinlock 280 * 281 * @param lock IRQ spinlock to be initialized. 282 * @param name IRQ spinlock name. 283 * 284 */ 285 static inline void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name) 286 { 287 spinlock_initialize(&(lock->lock), name); 288 lock->guard = false; 289 lock->ipl = 0; 290 } 291 292 /** Lock interrupts-disabled spinlock 293 * 294 * Lock a spinlock which requires disabled interrupts. 295 * 296 * @param lock IRQ spinlock to be locked. 297 * @param irq_dis If true, interrupts are actually disabled 298 * prior locking the spinlock. If false, interrupts 299 * are expected to be already disabled. 300 * 301 */ 302 static inline void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis) 303 { 304 if (irq_dis) { 305 ipl_t ipl = interrupts_disable(); 306 spinlock_lock(&(lock->lock)); 307 308 lock->guard = true; 309 lock->ipl = ipl; 310 } else { 311 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 312 313 spinlock_lock(&(lock->lock)); 314 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 315 } 316 } 317 318 /** Unlock interrupts-disabled spinlock 319 * 320 * Unlock a spinlock which requires disabled interrupts. 321 * 322 * @param lock IRQ spinlock to be unlocked. 323 * @param irq_res If true, interrupts are restored to previously 324 * saved interrupt level. 325 * 326 */ 327 static inline void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res) 328 { 329 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 330 331 if (irq_res) { 332 ASSERT_IRQ_SPINLOCK(lock->guard, lock); 333 334 lock->guard = false; 335 ipl_t ipl = lock->ipl; 336 337 spinlock_unlock(&(lock->lock)); 338 interrupts_restore(ipl); 339 } else { 340 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 341 spinlock_unlock(&(lock->lock)); 342 } 343 } 344 345 /** Lock interrupts-disabled spinlock 346 * 347 * Lock an interrupts-disabled spinlock conditionally. If the 348 * spinlock is not available at the moment, signal failure. 349 * Interrupts are expected to be already disabled. 350 * 351 * @param lock IRQ spinlock to be locked conditionally. 352 * 353 * @return Zero on failure, non-zero otherwise. 354 * 355 */ 356 static inline int irq_spinlock_trylock(irq_spinlock_t *lock) 357 { 358 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 359 int rc = spinlock_trylock(&(lock->lock)); 360 361 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 362 return rc; 363 } 364 365 /** Pass lock from one interrupts-disabled spinlock to another 366 * 367 * Pass lock from one IRQ spinlock to another IRQ spinlock 368 * without enabling interrupts during the process. 369 * 370 * The first IRQ spinlock is supposed to be locked. 371 * 372 * @param unlock IRQ spinlock to be unlocked. 373 * @param lock IRQ spinlock to be locked. 374 * 375 */ 376 static inline void irq_spinlock_pass(irq_spinlock_t *unlock, 377 irq_spinlock_t *lock) 378 { 379 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 380 381 /* Pass guard from unlock to lock */ 382 bool guard = unlock->guard; 383 ipl_t ipl = unlock->ipl; 384 unlock->guard = false; 385 386 spinlock_unlock(&(unlock->lock)); 387 spinlock_lock(&(lock->lock)); 388 389 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 390 391 if (guard) { 392 lock->guard = true; 393 lock->ipl = ipl; 394 } 395 } 396 397 /** Hand-over-hand locking of interrupts-disabled spinlocks 398 * 399 * Implement hand-over-hand locking between two interrupts-disabled 400 * spinlocks without enabling interrupts during the process. 401 * 402 * The first IRQ spinlock is supposed to be locked. 403 * 404 * @param unlock IRQ spinlock to be unlocked. 405 * @param lock IRQ spinlock to be locked. 406 * 407 */ 408 static inline void irq_spinlock_exchange(irq_spinlock_t *unlock, 409 irq_spinlock_t *lock) 410 { 411 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 412 413 spinlock_lock(&(lock->lock)); 414 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 415 416 /* Pass guard from unlock to lock */ 417 if (unlock->guard) { 418 lock->guard = true; 419 lock->ipl = unlock->ipl; 420 unlock->guard = false; 421 } 422 423 spinlock_unlock(&(unlock->lock)); 424 } 172 #endif 425 173 426 174 #endif
Note:
See TracChangeset
for help on using the changeset viewer.