00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00045 #include <synch/waitq.h>
00046 #include <synch/synch.h>
00047 #include <synch/spinlock.h>
00048 #include <proc/thread.h>
00049 #include <proc/scheduler.h>
00050 #include <arch/asm.h>
00051 #include <arch/types.h>
00052 #include <typedefs.h>
00053 #include <time/timeout.h>
00054 #include <arch.h>
00055 #include <context.h>
00056 #include <adt/list.h>
00057
00058 static void waitq_timeouted_sleep(void *data);
00059
00066 void waitq_initialize(waitq_t *wq)
00067 {
00068 spinlock_initialize(&wq->lock, "waitq_lock");
00069 list_initialize(&wq->head);
00070 wq->missed_wakeups = 0;
00071 }
00072
00085 void waitq_timeouted_sleep(void *data)
00086 {
00087 thread_t *t = (thread_t *) data;
00088 waitq_t *wq;
00089 bool do_wakeup = false;
00090
00091 spinlock_lock(&threads_lock);
00092 if (!thread_exists(t))
00093 goto out;
00094
00095 grab_locks:
00096 spinlock_lock(&t->lock);
00097 if ((wq = t->sleep_queue)) {
00098 if (!spinlock_trylock(&wq->lock)) {
00099 spinlock_unlock(&t->lock);
00100 goto grab_locks;
00101 }
00102
00103 list_remove(&t->wq_link);
00104 t->saved_context = t->sleep_timeout_context;
00105 do_wakeup = true;
00106 t->sleep_queue = NULL;
00107 spinlock_unlock(&wq->lock);
00108 }
00109
00110 t->timeout_pending = false;
00111 spinlock_unlock(&t->lock);
00112
00113 if (do_wakeup)
00114 thread_ready(t);
00115
00116 out:
00117 spinlock_unlock(&threads_lock);
00118 }
00119
00127 void waitq_interrupt_sleep(thread_t *t)
00128 {
00129 waitq_t *wq;
00130 bool do_wakeup = false;
00131 ipl_t ipl;
00132
00133 ipl = interrupts_disable();
00134 spinlock_lock(&threads_lock);
00135 if (!thread_exists(t))
00136 goto out;
00137
00138 grab_locks:
00139 spinlock_lock(&t->lock);
00140 if ((wq = t->sleep_queue)) {
00141 if (!(t->sleep_interruptible)) {
00142
00143
00144
00145 spinlock_unlock(&t->lock);
00146 goto out;
00147 }
00148
00149 if (!spinlock_trylock(&wq->lock)) {
00150 spinlock_unlock(&t->lock);
00151 goto grab_locks;
00152 }
00153
00154 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
00155 t->timeout_pending = false;
00156
00157 list_remove(&t->wq_link);
00158 t->saved_context = t->sleep_interruption_context;
00159 do_wakeup = true;
00160 t->sleep_queue = NULL;
00161 spinlock_unlock(&wq->lock);
00162 }
00163 spinlock_unlock(&t->lock);
00164
00165 if (do_wakeup)
00166 thread_ready(t);
00167
00168 out:
00169 spinlock_unlock(&threads_lock);
00170 interrupts_restore(ipl);
00171 }
00172
00217 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags)
00218 {
00219 ipl_t ipl;
00220 int rc;
00221
00222 ipl = waitq_sleep_prepare(wq);
00223 rc = waitq_sleep_timeout_unsafe(wq, usec, flags);
00224 waitq_sleep_finish(wq, rc, ipl);
00225 return rc;
00226 }
00227
00237 ipl_t waitq_sleep_prepare(waitq_t *wq)
00238 {
00239 ipl_t ipl;
00240
00241 restart:
00242 ipl = interrupts_disable();
00243
00244 if (THREAD) {
00245
00246
00247
00248
00249
00250
00251
00252 spinlock_lock(&THREAD->lock);
00253 if (THREAD->timeout_pending) {
00254 spinlock_unlock(&THREAD->lock);
00255 interrupts_restore(ipl);
00256 goto restart;
00257 }
00258 spinlock_unlock(&THREAD->lock);
00259 }
00260
00261 spinlock_lock(&wq->lock);
00262 return ipl;
00263 }
00264
00275 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
00276 {
00277 switch (rc) {
00278 case ESYNCH_WOULD_BLOCK:
00279 case ESYNCH_OK_ATOMIC:
00280 spinlock_unlock(&wq->lock);
00281 break;
00282 default:
00283 break;
00284 }
00285 interrupts_restore(ipl);
00286 }
00287
00300 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags)
00301 {
00302
00303 if (wq->missed_wakeups) {
00304 wq->missed_wakeups--;
00305 return ESYNCH_OK_ATOMIC;
00306 }
00307 else {
00308 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) {
00309
00310 return ESYNCH_WOULD_BLOCK;
00311 }
00312 }
00313
00314
00315
00316
00317 spinlock_lock(&THREAD->lock);
00318
00319 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
00320
00321
00322
00323
00324
00325 if (THREAD->interrupted) {
00326 spinlock_unlock(&THREAD->lock);
00327 spinlock_unlock(&wq->lock);
00328 return ESYNCH_INTERRUPTED;
00329 }
00330
00331
00332
00333
00334
00335 THREAD->sleep_interruptible = true;
00336 if (!context_save(&THREAD->sleep_interruption_context)) {
00337
00338 spinlock_unlock(&THREAD->lock);
00339 return ESYNCH_INTERRUPTED;
00340 }
00341
00342 } else {
00343 THREAD->sleep_interruptible = false;
00344 }
00345
00346 if (usec) {
00347
00348 if (!context_save(&THREAD->sleep_timeout_context)) {
00349
00350 spinlock_unlock(&THREAD->lock);
00351 return ESYNCH_TIMEOUT;
00352 }
00353 THREAD->timeout_pending = true;
00354 timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_timeouted_sleep, THREAD);
00355 }
00356
00357 list_append(&THREAD->wq_link, &wq->head);
00358
00359
00360
00361
00362 THREAD->state = Sleeping;
00363 THREAD->sleep_queue = wq;
00364
00365 spinlock_unlock(&THREAD->lock);
00366
00367 scheduler();
00368
00369 return ESYNCH_OK_BLOCKED;
00370 }
00371
00372
00386 void waitq_wakeup(waitq_t *wq, bool all)
00387 {
00388 ipl_t ipl;
00389
00390 ipl = interrupts_disable();
00391 spinlock_lock(&wq->lock);
00392
00393 _waitq_wakeup_unsafe(wq, all);
00394
00395 spinlock_unlock(&wq->lock);
00396 interrupts_restore(ipl);
00397 }
00398
00409 void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
00410 {
00411 thread_t *t;
00412
00413 loop:
00414 if (list_empty(&wq->head)) {
00415 wq->missed_wakeups++;
00416 if (all)
00417 wq->missed_wakeups = 0;
00418 return;
00419 }
00420
00421 t = list_get_instance(wq->head.next, thread_t, wq_link);
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439 spinlock_lock(&t->lock);
00440 list_remove(&t->wq_link);
00441
00442 if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
00443 t->timeout_pending = false;
00444 t->sleep_queue = NULL;
00445 spinlock_unlock(&t->lock);
00446
00447 thread_ready(t);
00448
00449 if (all)
00450 goto loop;
00451 }
00452