Changeset 05e2a7ad in mainline
- Timestamp:
- 2005-12-07T13:32:31Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 839470f
- Parents:
- 253f8590
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia32/include/atomic.h
r253f8590 r05e2a7ad 54 54 atomic_t r; 55 55 __asm__ volatile ( 56 "movl $1, %0;"57 "lock xaddl %0, %1;"56 "movl $1, %0\n" 57 "lock xaddl %0, %1\n" 58 58 : "=r"(r), "=m" (*val) 59 59 ); … … 67 67 atomic_t r; 68 68 __asm__ volatile ( 69 "movl $-1, %0;"70 "lock xaddl %0, %1;"69 "movl $-1, %0\n" 70 "lock xaddl %0, %1\n" 71 71 : "=r"(r), "=m" (*val) 72 72 ); … … 76 76 #define atomic_inc_post(val) (atomic_inc_pre(val)+1) 77 77 #define atomic_dec_post(val) (atomic_dec_pre(val)-1) 78 79 80 78 81 79 static inline int test_and_set(volatile int *val) { -
generic/include/proc/thread.h
r253f8590 r05e2a7ad 65 65 link_t threads_link; /**< Link to the list of all threads. */ 66 66 67 /* items below are protected by lock */ 67 /** Lock protecting thread structure. 68 * 69 * Protects the whole thread structure except list links above. 70 * Must be acquired before T.lock for each T of type task_t. 71 * 72 */ 68 73 spinlock_t lock; 69 74 … … 110 115 }; 111 116 112 extern spinlock_t threads_lock; /**< Lock protecting threads_head list. */ 117 /** Thread list lock. 118 * 119 * This lock protects all link_t structures chained in threads_head. 120 * Must be acquired before T.lock for each T of type thread_t. 121 * 122 */ 123 extern spinlock_t threads_lock; 124 113 125 extern link_t threads_head; /**< List of all threads in the system. */ 114 126 -
generic/include/synch/waitq.h
r253f8590 r05e2a7ad 39 39 #define WAKEUP_ALL 1 40 40 41 /** Wait queue structure. */ 41 42 struct waitq { 43 44 /** Lock protecting wait queue structure. 45 * 46 * Must be acquired before T.lock for each T of type thread_t. 47 */ 42 48 spinlock_t lock; 49 43 50 int missed_wakeups; /**< Number of waitq_wakeup() calls that didn't find a thread to wake up. */ 44 51 link_t head; /**< List of sleeping threads for wich there was no missed_wakeup. */ … … 52 59 extern void waitq_initialize(waitq_t *wq); 53 60 extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking); 54 extern void waitq_wakeup(waitq_t *wq, intall);55 extern void _waitq_wakeup_unsafe(waitq_t *wq, intall);61 extern void waitq_wakeup(waitq_t *wq, bool all); 62 extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all); 56 63 57 64 #endif -
generic/src/proc/scheduler.c
r253f8590 r05e2a7ad 434 434 435 435 /* 436 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU 436 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 437 437 * and preemption counter. At this point THE could be coming either 438 438 * from THREAD's or CPU's stack. -
generic/src/proc/thread.c
r253f8590 r05e2a7ad 55 55 char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ 56 56 57 spinlock_t threads_lock; 58 link_t threads_head; 57 spinlock_t threads_lock; /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ 58 link_t threads_head; /**< List of all threads. */ 59 59 60 60 static spinlock_t tidlock; -
generic/src/synch/rwlock.c
r253f8590 r05e2a7ad 27 27 */ 28 28 29 30 /* 31 * Reader/Writer locks 29 /** Reader/Writer locks 30 * 31 * A reader/writer lock can be held by multiple readers at a time. 32 * Or it can be exclusively held by a sole writer at a time. 32 33 */ 33 34 … … 76 77 */ 77 78 void rwlock_initialize(rwlock_t *rwl) { 78 spinlock_initialize(&rwl->lock, "rwlock ");79 spinlock_initialize(&rwl->lock, "rwlock_t"); 79 80 mutex_initialize(&rwl->exclusive); 80 81 rwl->readers_in = 0; … … 219 220 break; 220 221 case ESYNCH_OK_ATOMIC: 221 panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC ");222 panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); 222 223 break; 223 224 dafault: 224 panic("invalid ESYNCH ");225 panic("invalid ESYNCH\n"); 225 226 break; 226 227 } … … 284 285 285 286 286 /** Direct handoff 287 /** Direct handoff of reader/writer lock ownership. 287 288 * 288 289 * Direct handoff of reader/writer lock ownership … … 307 308 rwlock_type_t type = RWLOCK_NONE; 308 309 thread_t *t = NULL; 309 int one_more = 1;310 bool one_more = true; 310 311 311 312 spinlock_lock(&rwl->exclusive.sem.wq.lock); … … 353 354 spinlock_lock(&t->lock); 354 355 if (t->rwlock_holder_type != RWLOCK_READER) 355 one_more = 0;356 one_more = false; 356 357 spinlock_unlock(&t->lock); 357 358 } -
generic/src/synch/spinlock.c
r253f8590 r05e2a7ad 63 63 void spinlock_lock(spinlock_t *sl) 64 64 { 65 int i = 0;65 count_t i = 0; 66 66 __address caller = ((__address *) &sl)[-1]; 67 67 char *symbol; 68 bool deadlock_reported = false; 68 69 69 70 preemption_disable(); … … 77 78 printf("\n"); 78 79 i = 0; 80 deadlock_reported = true; 79 81 } 80 82 } 83 84 if (deadlock_reported) 85 printf("cpu%d: not deadlocked\n", CPU->id); 81 86 82 87 /* -
generic/src/synch/waitq.c
r253f8590 r05e2a7ad 34 34 #include <arch/asm.h> 35 35 #include <arch/types.h> 36 #include <typedefs.h> 36 37 #include <time/timeout.h> 37 38 #include <arch.h> … … 68 69 thread_t *t = (thread_t *) data; 69 70 waitq_t *wq; 70 int do_wakeup = 0;71 bool do_wakeup = false; 71 72 72 73 spinlock_lock(&threads_lock); … … 76 77 grab_locks: 77 78 spinlock_lock(&t->lock); 78 if (wq = t->sleep_queue) { 79 if (wq = t->sleep_queue) { /* assignment */ 79 80 if (!spinlock_trylock(&wq->lock)) { 80 81 spinlock_unlock(&t->lock); 81 goto grab_locks; 82 goto grab_locks; /* avoid deadlock */ 82 83 } 83 84 84 85 list_remove(&t->wq_link); 85 86 t->saved_context = t->sleep_timeout_context; 86 do_wakeup = 1;87 do_wakeup = true; 87 88 88 89 spinlock_unlock(&wq->lock); … … 90 91 } 91 92 92 t->timeout_pending = 0;93 t->timeout_pending = false; 93 94 spinlock_unlock(&t->lock); 94 95 95 if (do_wakeup) thread_ready(t); 96 if (do_wakeup) 97 thread_ready(t); 96 98 97 99 out: … … 194 196 return ESYNCH_TIMEOUT; 195 197 } 196 THREAD->timeout_pending = 1;198 THREAD->timeout_pending = true; 197 199 timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); 198 200 } … … 228 230 * will be woken up and missed count will be zeroed. 229 231 */ 230 void waitq_wakeup(waitq_t *wq, intall)232 void waitq_wakeup(waitq_t *wq, bool all) 231 233 { 232 234 ipl_t ipl; … … 251 253 * will be woken up and missed count will be zeroed. 252 254 */ 253 void _waitq_wakeup_unsafe(waitq_t *wq, intall)255 void _waitq_wakeup_unsafe(waitq_t *wq, bool all) 254 256 { 255 257 thread_t *t; … … 258 260 if (list_empty(&wq->head)) { 259 261 wq->missed_wakeups++; 260 if (all) wq->missed_wakeups = 0; 262 if (all) 263 wq->missed_wakeups = 0; 261 264 return; 262 265 } … … 267 270 spinlock_lock(&t->lock); 268 271 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout)) 269 t->timeout_pending = 0;272 t->timeout_pending = false; 270 273 t->sleep_queue = NULL; 271 274 spinlock_unlock(&t->lock); … … 273 276 thread_ready(t); 274 277 275 if (all) goto loop; 276 } 278 if (all) 279 goto loop; 280 }
Note:
See TracChangeset
for help on using the changeset viewer.