Changeset 64e9cf4 in mainline
- Timestamp:
- 2023-02-02T22:23:23Z (2 years ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8addb24a
- Parents:
- f114d40 (diff), b076dfb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 1 added
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/arch/asm.h
rf114d40 r64e9cf4 58 58 } 59 59 } 60 61 #define ARCH_SPIN_HINT() asm volatile ("pause\n") 60 62 61 63 /** Byte from port -
kernel/arch/arm32/include/arch/asm.h
rf114d40 r64e9cf4 65 65 } 66 66 67 #ifdef PROCESSOR_ARCH_armv7_a 68 #define ARCH_SPIN_HINT() asm volatile ("yield") 69 #endif 70 67 71 _NO_TRACE static inline void pio_write_8(ioport8_t *port, uint8_t v) 68 72 { -
kernel/arch/arm64/include/arch/asm.h
rf114d40 r64e9cf4 60 60 ; 61 61 } 62 63 #define ARCH_SPIN_HINT() asm volatile ("yield") 62 64 63 65 /** Output byte to port. -
kernel/arch/ia32/include/arch/asm.h
rf114d40 r64e9cf4 63 63 ); 64 64 } 65 66 #define ARCH_SPIN_HINT() asm volatile ("pause\n") 65 67 66 68 #define GEN_READ_REG(reg) _NO_TRACE static inline sysarg_t read_ ##reg (void) \ -
kernel/arch/mips32/src/mips32.c
rf114d40 r64e9cf4 41 41 #include <str.h> 42 42 #include <mem.h> 43 #include <preemption.h> 43 44 #include <userspace.h> 44 45 #include <stdbool.h> -
kernel/arch/ppc32/src/mm/frame.c
rf114d40 r64e9cf4 33 33 */ 34 34 35 #include <arch/asm.h> 35 36 #include <arch/boot/boot.h> 36 37 #include <arch/mm/frame.h> -
kernel/generic/include/arch.h
rf114d40 r64e9cf4 75 75 typedef struct { 76 76 size_t preemption; /**< Preemption disabled counter and flag. */ 77 size_t mutex_locks; 77 78 struct thread *thread; /**< Current thread. */ 78 79 struct task *task; /**< Current task. */ -
kernel/generic/include/mm/tlb.h
rf114d40 r64e9cf4 36 36 #define KERN_TLB_H_ 37 37 38 #include <arch/asm.h> 38 39 #include <arch/mm/asid.h> 39 40 #include <typedefs.h> -
kernel/generic/include/synch/spinlock.h
rf114d40 r64e9cf4 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 36 37 #define KERN_SPINLOCK_H_ 37 38 38 #include <assert.h>39 39 #include <stdatomic.h> 40 40 #include <stdbool.h> 41 #include <preemption.h>42 #include <arch/asm.h>43 41 44 #ifdef CONFIG_SMP 42 #include <arch/types.h> 43 #include <assert.h> 45 44 46 typedef struct spinlock { 47 atomic_flag flag; 45 #define DEADLOCK_THRESHOLD 100000000 48 46 49 #ifdef CONFIG_DEBUG_SPINLOCK 50 const char *name; 51 #endif /* CONFIG_DEBUG_SPINLOCK */ 52 } spinlock_t; 53 54 /* 55 * SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks, 56 * where the lock gets initialized in run time. 57 */ 58 #define SPINLOCK_DECLARE(lock_name) spinlock_t lock_name 59 #define SPINLOCK_EXTERN(lock_name) extern spinlock_t lock_name 60 61 /* 62 * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used 63 * for statically allocated spinlocks. They declare (either as global 64 * or static) symbol and initialize the lock. 65 */ 66 #ifdef CONFIG_DEBUG_SPINLOCK 67 68 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 69 spinlock_t lock_name = { \ 70 .name = desc_name, \ 71 .flag = ATOMIC_FLAG_INIT \ 72 } 73 74 #define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 75 static spinlock_t lock_name = { \ 76 .name = desc_name, \ 77 .flag = ATOMIC_FLAG_INIT \ 78 } 79 80 #define ASSERT_SPINLOCK(expr, lock) \ 81 assert_verbose(expr, (lock)->name) 82 83 #define spinlock_lock(lock) spinlock_lock_debug((lock)) 84 #define spinlock_unlock(lock) spinlock_unlock_debug((lock)) 85 86 #else /* CONFIG_DEBUG_SPINLOCK */ 87 88 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 89 spinlock_t lock_name = { \ 90 .flag = ATOMIC_FLAG_INIT \ 91 } 92 93 #define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 94 static spinlock_t lock_name = { \ 95 .flag = ATOMIC_FLAG_INIT \ 96 } 97 98 #define ASSERT_SPINLOCK(expr, lock) \ 99 assert(expr) 100 101 /** Acquire spinlock 102 * 103 * @param lock Pointer to spinlock_t structure. 104 */ 105 _NO_TRACE static inline void spinlock_lock(spinlock_t *lock) 106 { 107 preemption_disable(); 108 while (atomic_flag_test_and_set_explicit(&lock->flag, 109 memory_order_acquire)) 110 ; 111 } 112 113 /** Release spinlock 114 * 115 * @param lock Pointer to spinlock_t structure. 116 */ 117 _NO_TRACE static inline void spinlock_unlock(spinlock_t *lock) 118 { 119 atomic_flag_clear_explicit(&lock->flag, memory_order_release); 120 preemption_enable(); 121 } 122 123 #endif /* CONFIG_DEBUG_SPINLOCK */ 124 125 #define SPINLOCK_INITIALIZE(lock_name) \ 126 SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name) 127 128 #define SPINLOCK_STATIC_INITIALIZE(lock_name) \ 129 SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name) 130 131 extern void spinlock_initialize(spinlock_t *, const char *); 132 extern bool spinlock_trylock(spinlock_t *); 133 extern void spinlock_lock_debug(spinlock_t *); 134 extern void spinlock_unlock_debug(spinlock_t *); 135 extern bool spinlock_locked(spinlock_t *); 136 137 #ifdef CONFIG_DEBUG_SPINLOCK 47 #if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK) 138 48 139 49 #include <log.h> 140 141 #define DEADLOCK_THRESHOLD 100000000142 50 143 51 #define DEADLOCK_PROBE_INIT(pname) size_t pname = 0 … … 159 67 #endif /* CONFIG_DEBUG_SPINLOCK */ 160 68 161 #else /* CONFIG_SMP */ 69 typedef struct spinlock { 70 #ifdef CONFIG_SMP 71 atomic_flag flag; 162 72 163 /* On UP systems, spinlocks are effectively left out. */ 73 #ifdef CONFIG_DEBUG_SPINLOCK 74 const char *name; 75 #endif /* CONFIG_DEBUG_SPINLOCK */ 76 #endif 77 } spinlock_t; 164 78 165 /* Allow the use of spinlock_t as an incomplete type. */ 166 typedef struct spinlock spinlock_t; 79 /* 80 * SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks, 81 * where the lock gets initialized in run time. 82 */ 83 #define SPINLOCK_DECLARE(lock_name) spinlock_t lock_name 84 #define SPINLOCK_EXTERN(lock_name) extern spinlock_t lock_name 167 85 168 #define SPINLOCK_DECLARE(name) 169 #define SPINLOCK_EXTERN(name) 86 #ifdef CONFIG_SMP 87 #ifdef CONFIG_DEBUG_SPINLOCK 88 #define SPINLOCK_INITIALIZER(desc_name) { .name = (desc_name), .flag = ATOMIC_FLAG_INIT } 89 #else 90 #define SPINLOCK_INITIALIZER(desc_name) { .flag = ATOMIC_FLAG_INIT } 91 #endif 92 #else 93 #define SPINLOCK_INITIALIZER(desc_name) {} 94 #endif 170 95 171 #define SPINLOCK_INITIALIZE(name) 172 #define SPINLOCK_STATIC_INITIALIZE(name) 96 /* 97 * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used 98 * for statically allocated spinlocks. They declare (either as global 99 * or static) symbol and initialize the lock. 100 */ 101 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 102 spinlock_t lock_name = SPINLOCK_INITIALIZER(desc_name) 173 103 174 #define SPINLOCK_ INITIALIZE_NAME(name, desc_name)175 #define SPINLOCK_STATIC_INITIALIZE_NAME(name,desc_name)104 #define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 105 static spinlock_t lock_name = SPINLOCK_INITIALIZER(desc_name) 176 106 177 #define ASSERT_SPINLOCK(expr, lock) assert(expr) 107 #if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK) 108 #define ASSERT_SPINLOCK(expr, lock) assert_verbose(expr, (lock)->name) 109 #else /* CONFIG_DEBUG_SPINLOCK */ 110 #define ASSERT_SPINLOCK(expr, lock) assert(expr) 111 #endif /* CONFIG_DEBUG_SPINLOCK */ 178 112 179 #define spinlock_initialize(lock, name) 113 #define SPINLOCK_INITIALIZE(lock_name) \ 114 SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name) 180 115 181 #define spinlock_lock(lock) preemption_disable() 182 #define spinlock_trylock(lock) ({ preemption_disable(); 1; }) 183 #define spinlock_unlock(lock) preemption_enable() 184 #define spinlock_locked(lock) 1 185 #define spinlock_unlocked(lock) 1 116 #define SPINLOCK_STATIC_INITIALIZE(lock_name) \ 117 SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name) 186 118 187 #define DEADLOCK_PROBE_INIT(pname) 188 #define DEADLOCK_PROBE(pname, value) 189 190 #endif /* CONFIG_SMP */ 119 extern void spinlock_initialize(spinlock_t *, const char *); 120 extern bool spinlock_trylock(spinlock_t *); 121 extern void spinlock_lock(spinlock_t *); 122 extern void spinlock_unlock(spinlock_t *); 123 extern bool spinlock_locked(spinlock_t *); 191 124 192 125 typedef struct { 193 SPINLOCK_DECLARE(lock); /**< Spinlock */ 194 bool guard; /**< Flag whether ipl is valid */ 195 ipl_t ipl; /**< Original interrupt level */ 126 spinlock_t lock; /**< Spinlock */ 127 bool guard; /**< Flag whether ipl is valid */ 128 ipl_t ipl; /**< Original interrupt level */ 129 #ifdef CONFIG_DEBUG_SPINLOCK 130 _Atomic(struct cpu *) owner; /**< Which cpu currently owns this lock */ 131 #endif 196 132 } irq_spinlock_t; 197 133 … … 199 135 #define IRQ_SPINLOCK_EXTERN(lock_name) extern irq_spinlock_t lock_name 200 136 201 #ifdef CONFIG_SMP202 203 137 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \ 204 138 ASSERT_SPINLOCK(expr, &((irq_lock)->lock)) 139 140 #define IRQ_SPINLOCK_INITIALIZER(desc_name) \ 141 { \ 142 .lock = SPINLOCK_INITIALIZER(desc_name), \ 143 .guard = false, \ 144 .ipl = 0, \ 145 } 205 146 206 147 /* … … 209 150 * as global or static symbol) and initialize the lock. 210 151 */ 211 #ifdef CONFIG_DEBUG_SPINLOCK212 213 152 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 214 irq_spinlock_t lock_name = { \ 215 .lock = { \ 216 .name = desc_name, \ 217 .flag = ATOMIC_FLAG_INIT \ 218 }, \ 219 .guard = false, \ 220 .ipl = 0 \ 221 } 153 irq_spinlock_t lock_name = IRQ_SPINLOCK_INITIALIZER(desc_name) 222 154 223 155 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 224 static irq_spinlock_t lock_name = { \ 225 .lock = { \ 226 .name = desc_name, \ 227 .flag = ATOMIC_FLAG_INIT \ 228 }, \ 229 .guard = false, \ 230 .ipl = 0 \ 231 } 232 233 #else /* CONFIG_DEBUG_SPINLOCK */ 234 235 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 236 irq_spinlock_t lock_name = { \ 237 .lock = { \ 238 .flag = ATOMIC_FLAG_INIT \ 239 }, \ 240 .guard = false, \ 241 .ipl = 0 \ 242 } 243 244 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 245 static irq_spinlock_t lock_name = { \ 246 .lock = { \ 247 .flag = ATOMIC_FLAG_INIT \ 248 }, \ 249 .guard = false, \ 250 .ipl = 0 \ 251 } 252 253 #endif /* CONFIG_DEBUG_SPINLOCK */ 254 255 #else /* CONFIG_SMP */ 256 257 /* 258 * Since the spinlocks are void on UP systems, we also need 259 * to have a special variant of interrupts-disabled spinlock 260 * macros which take this into account. 261 */ 262 263 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \ 264 ASSERT_SPINLOCK(expr, NULL) 265 266 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 267 irq_spinlock_t lock_name = { \ 268 .guard = false, \ 269 .ipl = 0 \ 270 } 271 272 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 273 static irq_spinlock_t lock_name = { \ 274 .guard = false, \ 275 .ipl = 0 \ 276 } 277 278 #endif /* CONFIG_SMP */ 156 static irq_spinlock_t lock_name = IRQ_SPINLOCK_INITIALIZER(desc_name) 279 157 280 158 #define IRQ_SPINLOCK_INITIALIZE(lock_name) \ -
kernel/generic/meson.build
rf114d40 r64e9cf4 101 101 'src/smp/smp.c', 102 102 'src/synch/condvar.c', 103 'src/synch/irq_spinlock.c', 103 104 'src/synch/mutex.c', 104 105 'src/synch/semaphore.c', -
kernel/generic/src/synch/spinlock.c
rf114d40 r64e9cf4 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 36 37 */ 37 38 39 #include <arch/asm.h> 38 40 #include <synch/spinlock.h> 39 41 #include <atomic.h> … … 47 49 #include <cpu.h> 48 50 49 #ifdef CONFIG_SMP 51 #ifndef ARCH_SPIN_HINT 52 #define ARCH_SPIN_HINT() ((void)0) 53 #endif 50 54 51 55 /** Initialize spinlock … … 56 60 void spinlock_initialize(spinlock_t *lock, const char *name) 57 61 { 62 #ifdef CONFIG_SMP 58 63 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 59 64 #ifdef CONFIG_DEBUG_SPINLOCK 60 65 lock->name = name; 61 66 #endif 67 #endif 62 68 } 63 69 64 #ifdef CONFIG_DEBUG_SPINLOCK65 66 70 /** Lock spinlock 67 *68 * Lock spinlock.69 * This version has limitted ability to report70 * possible occurence of deadlock.71 71 * 72 72 * @param lock Pointer to spinlock_t structure. 73 73 * 74 74 */ 75 void spinlock_lock _debug(spinlock_t *lock)75 void spinlock_lock(spinlock_t *lock) 76 76 { 77 preemption_disable(); 78 79 #ifdef CONFIG_SMP 80 bool deadlock_reported = false; 77 81 size_t i = 0; 78 bool deadlock_reported = false;79 82 80 preemption_disable();81 83 while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) { 84 ARCH_SPIN_HINT(); 85 86 #ifdef CONFIG_DEBUG_SPINLOCK 82 87 /* 83 88 * We need to be careful about particular locks … … 111 116 deadlock_reported = true; 112 117 } 118 #endif 113 119 } 120 121 /* Avoid compiler warning with debug disabled. */ 122 (void) i; 114 123 115 124 if (deadlock_reported) 116 125 printf("cpu%u: not deadlocked\n", CPU->id); 126 127 #endif 117 128 } 118 129 119 130 /** Unlock spinlock 120 131 * 121 * Unlock spinlock.122 *123 132 * @param sl Pointer to spinlock_t structure. 124 133 */ 125 void spinlock_unlock _debug(spinlock_t *lock)134 void spinlock_unlock(spinlock_t *lock) 126 135 { 136 #ifdef CONFIG_SMP 137 #ifdef CONFIG_DEBUG_SPINLOCK 127 138 ASSERT_SPINLOCK(spinlock_locked(lock), lock); 139 #endif 128 140 129 141 atomic_flag_clear_explicit(&lock->flag, memory_order_release); 142 #endif 143 130 144 preemption_enable(); 131 145 } 132 146 133 #endif 134 135 /** Lock spinlock conditionally 136 * 147 /** 137 148 * Lock spinlock conditionally. If the spinlock is not available 138 149 * at the moment, signal failure. … … 140 151 * @param lock Pointer to spinlock_t structure. 141 152 * 142 * @return Zero on failure, non-zero otherwise.153 * @return true on success. 143 154 * 144 155 */ … … 146 157 { 147 158 preemption_disable(); 159 160 #ifdef CONFIG_SMP 148 161 bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire); 149 162 … … 152 165 153 166 return ret; 167 #else 168 return true; 169 #endif 154 170 } 155 171 … … 161 177 bool spinlock_locked(spinlock_t *lock) 162 178 { 179 #ifdef CONFIG_SMP 163 180 // NOTE: Atomic flag doesn't support simple atomic read (by design), 164 181 // so instead we test_and_set and then clear if necessary. … … 170 187 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 171 188 return ret; 172 } 173 189 #else 190 return true; 174 191 #endif 175 176 /** Initialize interrupts-disabled spinlock177 *178 * @param lock IRQ spinlock to be initialized.179 * @param name IRQ spinlock name.180 *181 */182 void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name)183 {184 spinlock_initialize(&(lock->lock), name);185 lock->guard = false;186 lock->ipl = 0;187 }188 189 /** Lock interrupts-disabled spinlock190 *191 * Lock a spinlock which requires disabled interrupts.192 *193 * @param lock IRQ spinlock to be locked.194 * @param irq_dis If true, disables interrupts before locking the spinlock.195 * If false, interrupts are expected to be already disabled.196 *197 */198 void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis)199 {200 if (irq_dis) {201 ipl_t ipl = interrupts_disable();202 spinlock_lock(&(lock->lock));203 204 lock->guard = true;205 lock->ipl = ipl;206 } else {207 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);208 209 spinlock_lock(&(lock->lock));210 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);211 }212 }213 214 /** Unlock interrupts-disabled spinlock215 *216 * Unlock a spinlock which requires disabled interrupts.217 *218 * @param lock IRQ spinlock to be unlocked.219 * @param irq_res If true, interrupts are restored to previously220 * saved interrupt level.221 *222 */223 void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res)224 {225 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);226 227 if (irq_res) {228 ASSERT_IRQ_SPINLOCK(lock->guard, lock);229 230 lock->guard = false;231 ipl_t ipl = lock->ipl;232 233 spinlock_unlock(&(lock->lock));234 interrupts_restore(ipl);235 } else {236 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);237 spinlock_unlock(&(lock->lock));238 }239 }240 241 /** Lock interrupts-disabled spinlock242 *243 * Lock an interrupts-disabled spinlock conditionally. If the244 * spinlock is not available at the moment, signal failure.245 * Interrupts are expected to be already disabled.246 *247 * @param lock IRQ spinlock to be locked conditionally.248 *249 * @return Zero on failure, non-zero otherwise.250 *251 */252 bool irq_spinlock_trylock(irq_spinlock_t *lock)253 {254 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock);255 bool ret = spinlock_trylock(&(lock->lock));256 257 ASSERT_IRQ_SPINLOCK((!ret) || (!lock->guard), lock);258 return ret;259 }260 261 /** Pass lock from one interrupts-disabled spinlock to another262 *263 * Pass lock from one IRQ spinlock to another IRQ spinlock264 * without enabling interrupts during the process.265 *266 * The first IRQ spinlock is supposed to be locked.267 *268 * @param unlock IRQ spinlock to be unlocked.269 * @param lock IRQ spinlock to be locked.270 *271 */272 void irq_spinlock_pass(irq_spinlock_t *unlock, irq_spinlock_t *lock)273 {274 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);275 276 /* Pass guard from unlock to lock */277 bool guard = unlock->guard;278 ipl_t ipl = unlock->ipl;279 unlock->guard = false;280 281 spinlock_unlock(&(unlock->lock));282 spinlock_lock(&(lock->lock));283 284 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);285 286 if (guard) {287 lock->guard = true;288 lock->ipl = ipl;289 }290 }291 292 /** Hand-over-hand locking of interrupts-disabled spinlocks293 *294 * Implement hand-over-hand locking between two interrupts-disabled295 * spinlocks without enabling interrupts during the process.296 *297 * The first IRQ spinlock is supposed to be locked.298 *299 * @param unlock IRQ spinlock to be unlocked.300 * @param lock IRQ spinlock to be locked.301 *302 */303 void irq_spinlock_exchange(irq_spinlock_t *unlock, irq_spinlock_t *lock)304 {305 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock);306 307 spinlock_lock(&(lock->lock));308 ASSERT_IRQ_SPINLOCK(!lock->guard, lock);309 310 /* Pass guard from unlock to lock */311 if (unlock->guard) {312 lock->guard = true;313 lock->ipl = unlock->ipl;314 unlock->guard = false;315 }316 317 spinlock_unlock(&(unlock->lock));318 }319 320 /** Find out whether the IRQ spinlock is currently locked.321 *322 * @param lock IRQ spinlock.323 * @return True if the IRQ spinlock is locked, false otherwise.324 */325 bool irq_spinlock_locked(irq_spinlock_t *ilock)326 {327 return spinlock_locked(&ilock->lock);328 192 } 329 193 -
kernel/generic/src/synch/waitq.c
rf114d40 r64e9cf4 48 48 #include <synch/waitq.h> 49 49 #include <synch/spinlock.h> 50 #include <preemption.h> 50 51 #include <proc/thread.h> 51 52 #include <proc/scheduler.h> -
kernel/generic/src/time/clock.c
rf114d40 r64e9cf4 59 59 #include <ddi/ddi.h> 60 60 #include <arch/cycle.h> 61 #include <preemption.h> 61 62 62 63 /* Pointer to variable with uptime */
Note:
See TracChangeset
for help on using the changeset viewer.