Changeset 111b9b9 in mainline
- Timestamp:
- 2023-02-11T19:13:44Z (2 years ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4777e02
- Parents:
- 76e17d7c
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2022-08-15 17:46:39)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-11 19:13:44)
- Location:
- kernel
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/scheduler.h
r76e17d7c r111b9b9 57 57 extern void scheduler_fpu_lazy_request(void); 58 58 extern void scheduler(void); 59 extern void scheduler_locked(ipl_t); 59 60 extern void kcpulb(void *arg); 60 61 -
kernel/generic/include/proc/thread.h
r76e17d7c r111b9b9 79 79 odlink_t lthreads; 80 80 81 /** Tracking variable for thread_wait/thread_wakeup */ 82 atomic_int sleep_state; 83 81 84 /** 82 85 * If true, the thread is terminating. … … 85 88 */ 86 89 volatile bool interrupted; 90 91 /** Wait queue in which this thread sleeps. Used for debug printouts. */ 92 _Atomic(waitq_t *) sleep_queue; 87 93 88 94 /** Waitq for thread_join_timeout(). */ … … 108 114 context_t saved_context; 109 115 ipl_t saved_ipl; 110 111 /**112 * From here, the stored timeout context113 * is restored when sleep times out.114 */115 context_t sleep_timeout_context;116 117 /**118 * From here, the stored interruption context119 * is restored when sleep is interrupted.120 */121 context_t sleep_interruption_context;122 123 /** If true, the thread can be interrupted from sleep. */124 bool sleep_interruptible;125 126 /**127 * If true, and this thread's sleep returns without a wakeup128 * (timed out or interrupted), waitq ignores the next wakeup.129 * This is necessary for futex to be able to handle those conditions.130 */131 bool sleep_composable;132 133 /** Wait queue in which this thread sleeps. */134 waitq_t *sleep_queue;135 116 136 117 /** … … 216 197 extern void thread_ready(thread_t *); 217 198 extern void thread_exit(void) __attribute__((noreturn)); 218 extern void thread_interrupt(thread_t *, bool); 199 extern void thread_interrupt(thread_t *); 200 201 typedef enum { 202 THREAD_OK, 203 THREAD_TERMINATING, 204 } thread_termination_state_t; 205 206 typedef enum { 207 THREAD_WAIT_SUCCESS, 208 THREAD_WAIT_TIMEOUT, 209 } thread_wait_result_t; 210 211 extern thread_termination_state_t thread_wait_start(void); 212 extern thread_wait_result_t thread_wait_finish(deadline_t); 213 extern void thread_wakeup(thread_t *); 219 214 220 215 static inline thread_t *thread_ref(thread_t *thread) -
kernel/generic/include/synch/waitq.h
r76e17d7c r111b9b9 41 41 #include <adt/list.h> 42 42 43 typedef enum {44 WAKEUP_FIRST = 0,45 WAKEUP_ALL,46 WAKEUP_CLOSE,47 } wakeup_mode_t;48 49 43 /** Wait queue structure. 50 44 * … … 58 52 59 53 /** 60 * Number of waitq_wakeup() calls that didn't find a thread to wake up.61 * 54 * If negative, number of wakeups that are to be ignored (necessary for futex operation). 55 * If positive, number of wakeups that weren't able to wake a thread. 62 56 */ 63 int missed_wakeups; 64 65 /** Number of wakeups that need to be ignored due to futex timeout. */ 66 int ignore_wakeups; 57 int wakeup_balance; 67 58 68 59 /** List of sleeping threads for which there was no missed_wakeup. */ 69 60 list_t sleepers; 61 62 bool closed; 70 63 } waitq_t; 64 65 typedef struct wait_guard { 66 ipl_t ipl; 67 } wait_guard_t; 71 68 72 69 struct thread; … … 75 72 extern void waitq_initialize_with_count(waitq_t *, int); 76 73 extern errno_t waitq_sleep(waitq_t *); 77 extern errno_t waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int, bool *); 78 extern ipl_t waitq_sleep_prepare(waitq_t *); 79 extern errno_t waitq_sleep_unsafe(waitq_t *, bool *); 80 extern errno_t waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, bool *); 81 extern void waitq_sleep_finish(waitq_t *, bool, ipl_t); 82 extern void waitq_wakeup(waitq_t *, wakeup_mode_t); 83 extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t); 84 extern void waitq_interrupt_sleep(struct thread *); 74 extern errno_t _waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int); 75 extern errno_t waitq_sleep_timeout(waitq_t *, uint32_t); 76 extern wait_guard_t waitq_sleep_prepare(waitq_t *); 77 extern errno_t waitq_sleep_unsafe(waitq_t *, wait_guard_t); 78 extern errno_t waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, wait_guard_t); 79 80 extern void waitq_wake_one(waitq_t *); 81 extern void waitq_wake_all(waitq_t *); 82 extern void waitq_signal(waitq_t *); 83 extern void waitq_close(waitq_t *); 85 84 86 85 #endif -
kernel/generic/include/time/timeout.h
r76e17d7c r111b9b9 42 42 typedef void (*timeout_handler_t)(void *arg); 43 43 44 typedef uint64_t deadline_t; 45 #define DEADLINE_NEVER ((deadline_t) UINT64_MAX) 46 44 47 typedef struct { 45 48 /** Link to the list of active timeouts on timeout->cpu */ 46 49 link_t link; 47 50 /** Timeout will be activated when current clock tick reaches this value. */ 48 uint64_t deadline;51 deadline_t deadline; 49 52 /** Function that will be called on timeout activation. */ 50 53 timeout_handler_t handler; … … 59 62 #define us2ticks(us) ((uint64_t) (((uint32_t) (us) / (1000000 / HZ)))) 60 63 64 extern deadline_t timeout_deadline_in_usec(uint32_t us); 65 61 66 extern void timeout_init(void); 62 67 extern void timeout_initialize(timeout_t *); 63 68 extern void timeout_register(timeout_t *, uint64_t, timeout_handler_t, void *); 69 extern void timeout_register_deadline(timeout_t *, deadline_t, timeout_handler_t, void *); 64 70 extern bool timeout_unregister(timeout_t *); 65 71 -
kernel/generic/src/ipc/event.c
r76e17d7c r111b9b9 169 169 true); 170 170 171 waitq_wakeup(&event->answerbox->wq, 172 WAKEUP_FIRST); 171 waitq_wake_one(&event->answerbox->wq); 173 172 174 173 if (mask) -
kernel/generic/src/ipc/ipc.c
r76e17d7c r111b9b9 326 326 irq_spinlock_unlock(&callerbox->lock, true); 327 327 328 waitq_wake up(&callerbox->wq, WAKEUP_FIRST);328 waitq_wake_one(&callerbox->wq); 329 329 } 330 330 … … 416 416 irq_spinlock_unlock(&box->lock, true); 417 417 418 waitq_wake up(&box->wq, WAKEUP_FIRST);418 waitq_wake_one(&box->wq); 419 419 } 420 420 … … 555 555 errno_t rc; 556 556 557 rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL);557 rc = _waitq_sleep_timeout(&box->wq, usec, flags); 558 558 if (rc != EOK) 559 559 return rc; -
kernel/generic/src/ipc/irq.c
r76e17d7c r111b9b9 429 429 irq_spinlock_unlock(&irq->notif_cfg.answerbox->irq_lock, false); 430 430 431 waitq_wake up(&irq->notif_cfg.answerbox->wq, WAKEUP_FIRST);431 waitq_wake_one(&irq->notif_cfg.answerbox->wq); 432 432 } 433 433 -
kernel/generic/src/ipc/sysipc.c
r76e17d7c r111b9b9 871 871 sys_errno_t sys_ipc_poke(void) 872 872 { 873 waitq_wake up(&TASK->answerbox.wq, WAKEUP_FIRST);873 waitq_wake_one(&TASK->answerbox.wq); 874 874 return EOK; 875 875 } -
kernel/generic/src/proc/scheduler.c
r76e17d7c r111b9b9 300 300 } 301 301 302 void scheduler(void) 303 { 304 ipl_t ipl = interrupts_disable(); 305 306 if (atomic_load(&haltstate)) 307 halt(); 308 309 if (THREAD) { 310 irq_spinlock_lock(&THREAD->lock, false); 311 } 312 313 scheduler_locked(ipl); 314 } 315 302 316 /** The scheduler 303 317 * … … 307 321 * 308 322 */ 309 void scheduler(void) 310 { 311 volatile ipl_t ipl; 312 323 void scheduler_locked(ipl_t ipl) 324 { 313 325 assert(CPU != NULL); 314 326 315 ipl = interrupts_disable();316 317 if (atomic_load(&haltstate))318 halt();319 320 327 if (THREAD) { 321 irq_spinlock_lock(&THREAD->lock, false);322 323 328 /* Update thread kernel accounting */ 324 329 THREAD->kcycles += get_cycle() - THREAD->last_cycle; … … 419 424 case Exiting: 420 425 irq_spinlock_unlock(&THREAD->lock, false); 421 waitq_ wakeup(&THREAD->join_wq, WAKEUP_CLOSE);426 waitq_close(&THREAD->join_wq); 422 427 423 428 /* … … 434 439 */ 435 440 THREAD->priority = -1; 436 437 /*438 * We need to release wq->lock which we locked in439 * waitq_sleep(). Address of wq->lock is kept in440 * THREAD->sleep_queue.441 */442 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false);443 444 441 irq_spinlock_unlock(&THREAD->lock, false); 445 442 break; -
kernel/generic/src/proc/task.c
r76e17d7c r111b9b9 533 533 534 534 list_foreach(task->threads, th_link, thread_t, thread) { 535 thread_t *thr = thread_try_ref(thread); 536 if (thr) 537 thread_interrupt(thr, false); 538 539 // If NULL, the thread is already getting destroyed concurrently with this. 535 thread_interrupt(thread); 540 536 } 541 537 -
kernel/generic/src/proc/thread.c
r76e17d7c r111b9b9 69 69 #include <errno.h> 70 70 #include <debug.h> 71 #include <halt.h> 71 72 72 73 /** Thread states */ … … 79 80 "Exiting", 80 81 "Lingering" 82 }; 83 84 enum sleep_state { 85 SLEEP_INITIAL, 86 SLEEP_ASLEEP, 87 SLEEP_WOKE, 81 88 }; 82 89 … … 365 372 thread->state = Entering; 366 373 367 thread->sleep_interruptible = false; 368 thread->sleep_composable = false; 369 thread->sleep_queue = NULL; 374 atomic_init(&thread->sleep_queue, NULL); 370 375 371 376 thread->in_copy_from_uspace = false; … … 373 378 374 379 thread->interrupted = false; 380 atomic_init(&thread->sleep_state, SLEEP_INITIAL); 381 375 382 waitq_initialize(&thread->join_wq); 376 383 … … 545 552 * @param thread A valid thread object. 546 553 */ 547 void thread_interrupt(thread_t *thread , bool irq_dis)554 void thread_interrupt(thread_t *thread) 548 555 { 549 556 assert(thread != NULL); 550 551 irq_spinlock_lock(&thread->lock, irq_dis);552 553 557 thread->interrupted = true; 554 bool sleeping = (thread->state == Sleeping); 555 556 irq_spinlock_unlock(&thread->lock, irq_dis); 557 558 if (sleeping) 559 waitq_interrupt_sleep(thread); 560 561 thread_put(thread); 558 thread_wakeup(thread); 559 } 560 561 /** Prepare for putting the thread to sleep. 562 * 563 * @returns whether the thread is currently terminating. If THREAD_OK 564 * is returned, the thread is guaranteed to be woken up instantly if the thread 565 * is terminated at any time between this function's return and 566 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still 567 * go to sleep, but doing so will delay termination. 568 */ 569 thread_termination_state_t thread_wait_start(void) 570 { 571 assert(THREAD != NULL); 572 573 /* 574 * This is an exchange rather than a store so that we can use the acquire 575 * semantics, which is needed to ensure that code after this operation sees 576 * memory ops made before thread_wakeup() in other thread, if that wakeup 577 * was reset by this operation. 578 * 579 * In particular, we need this to ensure we can't miss the thread being 580 * terminated concurrently with a synchronization primitive preparing to 581 * sleep. 582 */ 583 (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL, 584 memory_order_acquire); 585 586 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK; 587 } 588 589 static void thread_wait_internal(void) 590 { 591 assert(THREAD != NULL); 592 593 ipl_t ipl = interrupts_disable(); 594 595 if (atomic_load(&haltstate)) 596 halt(); 597 598 /* 599 * Lock here to prevent a race between entering the scheduler and another 600 * thread rescheduling this thread. 601 */ 602 irq_spinlock_lock(&THREAD->lock, false); 603 604 int expected = SLEEP_INITIAL; 605 606 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */ 607 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected, 608 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) { 609 THREAD->state = Sleeping; 610 scheduler_locked(ipl); 611 } else { 612 assert(expected == SLEEP_WOKE); 613 /* Return immediately. */ 614 irq_spinlock_unlock(&THREAD->lock, false); 615 interrupts_restore(ipl); 616 } 617 } 618 619 static void thread_wait_timeout_callback(void *arg) 620 { 621 thread_wakeup(arg); 622 } 623 624 /** 625 * Suspends this thread's execution until thread_wakeup() is called on it, 626 * or deadline is reached. 627 * 628 * The way this would normally be used is that the current thread call 629 * thread_wait_start(), and if interruption has not been signaled, stores 630 * a reference to itself in a synchronized structure (such as waitq). 631 * After that, it releases any spinlocks it might hold and calls this function. 632 * 633 * The thread doing the wakeup will acquire the thread's reference from said 634 * synchronized structure and calls thread_wakeup() on it. 635 * 636 * Notably, there can be more than one thread performing wakeup. 637 * The number of performed calls to thread_wakeup(), or their relative 638 * ordering with thread_wait_finish(), does not matter. However, calls to 639 * thread_wakeup() are expected to be synchronized with thread_wait_start() 640 * with which they are associated, otherwise wakeups may be missed. 641 * However, the operation of thread_wakeup() is defined at any time, 642 * synchronization notwithstanding (in the sense of C un/defined behavior), 643 * and is in fact used to interrupt waiting threads by external events. 644 * The waiting thread must operate correctly in face of spurious wakeups, 645 * and clean up its reference in the synchronization structure if necessary. 646 * 647 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition 648 * for it to have been waken up by the timeout, but the caller must assume 649 * that proper wakeups, timeouts and interrupts may occur concurrently, so 650 * the fact timeout has been registered does not necessarily mean the thread 651 * has not been woken up or interrupted. 652 */ 653 thread_wait_result_t thread_wait_finish(deadline_t deadline) 654 { 655 assert(THREAD != NULL); 656 657 timeout_t timeout; 658 659 if (deadline != DEADLINE_NEVER) { 660 /* Extra check to avoid setting up a deadline if we don't need to. */ 661 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) != 662 SLEEP_INITIAL) 663 return THREAD_WAIT_SUCCESS; 664 665 timeout_initialize(&timeout); 666 timeout_register_deadline(&timeout, deadline, 667 thread_wait_timeout_callback, THREAD); 668 } 669 670 thread_wait_internal(); 671 672 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { 673 return THREAD_WAIT_TIMEOUT; 674 } else { 675 return THREAD_WAIT_SUCCESS; 676 } 677 } 678 679 void thread_wakeup(thread_t *thread) 680 { 681 assert(thread != NULL); 682 683 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE, 684 memory_order_release); 685 686 if (state == SLEEP_ASLEEP) { 687 /* 688 * Only one thread gets to do this. 689 * The reference consumed here is the reference implicitly passed to 690 * the waking thread by the sleeper in thread_wait_finish(). 691 */ 692 thread_ready(thread); 693 } 562 694 } 563 695 … … 628 760 return EOK; 629 761 } else { 630 return waitq_sleep_timeout(&thread->join_wq, usec, 631 SYNCH_FLAGS_NON_BLOCKING, NULL); 762 return _waitq_sleep_timeout(&thread->join_wq, usec, flags); 632 763 } 633 764 } … … 646 777 waitq_initialize(&wq); 647 778 648 (void) waitq_sleep_timeout(&wq, usec , SYNCH_FLAGS_NON_BLOCKING, NULL);779 (void) waitq_sleep_timeout(&wq, usec); 649 780 } 650 781 … … 890 1021 891 1022 if (sleeping) 892 waitq_interrupt_sleep(thread);1023 thread_wakeup(thread); 893 1024 894 1025 thread_put(thread); -
kernel/generic/src/synch/condvar.c
r76e17d7c r111b9b9 58 58 void condvar_signal(condvar_t *cv) 59 59 { 60 waitq_ wakeup(&cv->wq, WAKEUP_FIRST);60 waitq_signal(&cv->wq); 61 61 } 62 62 … … 68 68 void condvar_broadcast(condvar_t *cv) 69 69 { 70 waitq_wake up(&cv->wq, WAKEUP_ALL);70 waitq_wake_all(&cv->wq); 71 71 } 72 72 … … 81 81 errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec) 82 82 { 83 errno_t rc; 84 ipl_t ipl; 85 bool blocked; 83 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); 86 84 87 ipl = waitq_sleep_prepare(&cv->wq);88 85 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 89 86 mutex_unlock(mtx); 90 87 91 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 92 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_FLAGS_NON_BLOCKING, &blocked); 93 assert(blocked || rc != EOK); 88 errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_FLAGS_NON_BLOCKING, guard); 94 89 95 waitq_sleep_finish(&cv->wq, blocked, ipl);96 /* Lock only after releasing the waitq to avoid a possible deadlock. */97 90 mutex_lock(mtx); 98 99 91 return rc; 100 92 } … … 102 94 errno_t condvar_wait(condvar_t *cv, mutex_t *mtx) 103 95 { 104 errno_t rc; 105 ipl_t ipl; 106 bool blocked; 96 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); 107 97 108 ipl = waitq_sleep_prepare(&cv->wq);109 98 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 110 99 mutex_unlock(mtx); 111 100 112 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 113 rc = waitq_sleep_unsafe(&cv->wq, &blocked); 114 assert(blocked || rc != EOK); 101 errno_t rc = waitq_sleep_unsafe(&cv->wq, guard); 115 102 116 waitq_sleep_finish(&cv->wq, blocked, ipl);117 /* Lock only after releasing the waitq to avoid a possible deadlock. */118 103 mutex_lock(mtx); 119 120 104 return rc; 121 105 } … … 142 126 uint32_t usec, int flags) 143 127 { 144 errno_t rc; 145 ipl_t ipl; 146 bool blocked; 147 148 ipl = waitq_sleep_prepare(&cv->wq); 128 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); 149 129 150 130 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 151 131 spinlock_unlock(lock); 152 132 153 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 154 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 155 assert(blocked || rc != EOK); 133 errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, guard); 156 134 157 waitq_sleep_finish(&cv->wq, blocked, ipl);158 /* Lock only after releasing the waitq to avoid a possible deadlock. */159 135 spinlock_lock(lock); 160 161 136 return rc; 162 137 } -
kernel/generic/src/synch/semaphore.c
r76e17d7c r111b9b9 70 70 errno_t semaphore_down_timeout(semaphore_t *sem, uint32_t usec) 71 71 { 72 errno_t rc = waitq_sleep_timeout(&sem->wq, usec , SYNCH_FLAGS_NON_BLOCKING, NULL);72 errno_t rc = waitq_sleep_timeout(&sem->wq, usec); 73 73 assert(rc == EOK || rc == ETIMEOUT || rc == EAGAIN); 74 74 return rc; … … 90 90 void semaphore_up(semaphore_t *sem) 91 91 { 92 waitq_wake up(&sem->wq, WAKEUP_FIRST);92 waitq_wake_one(&sem->wq); 93 93 } 94 94 -
kernel/generic/src/synch/syswaitq.c
r76e17d7c r111b9b9 159 159 #endif 160 160 161 errno_t rc = waitq_sleep_timeout(kobj->waitq, timeout,162 SYNCH_FLAGS_INTERRUPTIBLE | flags , NULL);161 errno_t rc = _waitq_sleep_timeout(kobj->waitq, timeout, 162 SYNCH_FLAGS_INTERRUPTIBLE | flags); 163 163 164 164 #ifdef CONFIG_UDEBUG … … 183 183 return (sys_errno_t) ENOENT; 184 184 185 waitq_wake up(kobj->waitq, WAKEUP_FIRST);185 waitq_wake_one(kobj->waitq); 186 186 187 187 kobject_put(kobj); -
kernel/generic/src/synch/waitq.c
r76e17d7c r111b9b9 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2022 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 60 61 #include <mem.h> 61 62 62 static void waitq_sleep_timed_out(void *);63 static void waitq_complete_wakeup(waitq_t *);64 65 63 /** Initialize wait queue 66 64 * … … 77 75 } 78 76 77 /** 78 * Initialize wait queue with an initial number of queued wakeups 79 * (or a wakeup debt if negative). 80 */ 79 81 void waitq_initialize_with_count(waitq_t *wq, int count) 80 82 { 81 memsetb(wq, sizeof(*wq), 0); 82 irq_spinlock_initialize(&wq->lock, "wq.lock"); 83 list_initialize(&wq->sleepers); 84 wq->missed_wakeups = count; 85 } 86 87 /** Handle timeout during waitq_sleep_timeout() call 88 * 89 * This routine is called when waitq_sleep_timeout() times out. 90 * Interrupts are disabled. 91 * 92 * It is supposed to try to remove 'its' thread from the wait queue; 93 * it can eventually fail to achieve this goal when these two events 94 * overlap. In that case it behaves just as though there was no 95 * timeout at all. 96 * 97 * @param data Pointer to the thread that called waitq_sleep_timeout(). 98 * 99 */ 100 void waitq_sleep_timed_out(void *data) 101 { 102 thread_t *thread = (thread_t *) data; 103 bool do_wakeup = false; 104 DEADLOCK_PROBE_INIT(p_wqlock); 105 106 irq_spinlock_lock(&threads_lock, false); 107 108 grab_locks: 109 irq_spinlock_lock(&thread->lock, false); 110 111 waitq_t *wq; 112 if ((wq = thread->sleep_queue)) { /* Assignment */ 113 if (!irq_spinlock_trylock(&wq->lock)) { 114 irq_spinlock_unlock(&thread->lock, false); 115 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 116 /* Avoid deadlock */ 117 goto grab_locks; 118 } 119 120 list_remove(&thread->wq_link); 121 thread->saved_context = thread->sleep_timeout_context; 122 do_wakeup = true; 123 if (thread->sleep_composable) 124 wq->ignore_wakeups++; 125 thread->sleep_queue = NULL; 126 irq_spinlock_unlock(&wq->lock, false); 127 } 128 129 irq_spinlock_unlock(&thread->lock, false); 130 131 if (do_wakeup) 132 thread_ready(thread); 133 134 irq_spinlock_unlock(&threads_lock, false); 135 } 136 137 /** Interrupt sleeping thread. 138 * 139 * This routine attempts to interrupt a thread from its sleep in 140 * a waitqueue. If the thread is not found sleeping, no action 141 * is taken. 142 * 143 * The threads_lock must be already held and interrupts must be 144 * disabled upon calling this function. 145 * 146 * @param thread Thread to be interrupted. 147 * 148 */ 149 void waitq_interrupt_sleep(thread_t *thread) 150 { 151 bool do_wakeup = false; 152 DEADLOCK_PROBE_INIT(p_wqlock); 153 154 /* 155 * The thread is quaranteed to exist because 156 * threads_lock is held. 157 */ 158 159 grab_locks: 160 irq_spinlock_lock(&thread->lock, false); 161 162 waitq_t *wq; 163 if ((wq = thread->sleep_queue)) { /* Assignment */ 164 if (!(thread->sleep_interruptible)) { 165 /* 166 * The sleep cannot be interrupted. 167 */ 168 irq_spinlock_unlock(&thread->lock, false); 169 return; 170 } 171 172 if (!irq_spinlock_trylock(&wq->lock)) { 173 /* Avoid deadlock */ 174 irq_spinlock_unlock(&thread->lock, false); 175 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 176 goto grab_locks; 177 } 178 179 list_remove(&thread->wq_link); 180 thread->saved_context = thread->sleep_interruption_context; 181 if (thread->sleep_composable) 182 wq->ignore_wakeups++; 183 do_wakeup = true; 184 thread->sleep_queue = NULL; 185 irq_spinlock_unlock(&wq->lock, false); 186 } 187 188 irq_spinlock_unlock(&thread->lock, false); 189 190 if (do_wakeup) 191 thread_ready(thread); 83 waitq_initialize(wq); 84 wq->wakeup_balance = count; 192 85 } 193 86 … … 197 90 errno_t waitq_sleep(waitq_t *wq) 198 91 { 199 return waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL); 92 return _waitq_sleep_timeout(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 93 } 94 95 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec) 96 { 97 return _waitq_sleep_timeout(wq, usec, SYNCH_FLAGS_NON_BLOCKING); 200 98 } 201 99 202 100 /** Sleep until either wakeup, timeout or interruption occurs 203 101 * 204 * This is a sleep implementation which allows itself to time out or to be205 * interrupted from the sleep, restoring a failover context.206 *207 102 * Sleepers are organised in a FIFO fashion in a structure called wait queue. 208 103 * 209 * This function is really basic in that other functions as waitq_sleep()210 * and all the *_timeout() functions use it.104 * Other functions as waitq_sleep() and all the *_timeout() functions are 105 * implemented using this function. 211 106 * 212 107 * @param wq Pointer to wait queue. … … 214 109 * @param flags Specify mode of the sleep. 215 110 * 216 * @param[out] blocked On return, regardless of the return code,217 * `*blocked` is set to `true` iff the thread went to218 * sleep.219 *220 111 * The sleep can be interrupted only if the 221 112 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. … … 231 122 * call will immediately return, reporting either success or failure. 232 123 * 233 * @return EAGAIN, meaning that the sleep failed because it was requested 234 * as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup. 235 * @return ETIMEOUT, meaning that the sleep timed out. 236 * @return EINTR, meaning that somebody interrupted the sleeping 237 * thread. Check the value of `*blocked` to see if the thread slept, 238 * or if a pending interrupt forced it to return immediately. 124 * @return ETIMEOUT, meaning that the sleep timed out, or a nonblocking call 125 * returned unsuccessfully. 126 * @return EINTR, meaning that somebody interrupted the sleeping thread. 239 127 * @return EOK, meaning that none of the above conditions occured, and the 240 * thread was woken up successfuly by `waitq_wakeup()`. Check 241 * the value of `*blocked` to see if the thread slept or if 242 * the wakeup was already pending. 243 * 244 */ 245 errno_t waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 128 * thread was woken up successfuly by `waitq_wake_*()`. 129 * 130 */ 131 errno_t _waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) 246 132 { 247 133 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 248 249 ipl_t ipl = waitq_sleep_prepare(wq); 250 bool nblocked; 251 errno_t rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked); 252 waitq_sleep_finish(wq, nblocked, ipl); 253 254 if (blocked != NULL) { 255 *blocked = nblocked; 256 } 257 return rc; 134 return waitq_sleep_timeout_unsafe(wq, usec, flags, waitq_sleep_prepare(wq)); 258 135 } 259 136 … … 268 145 * 269 146 */ 270 ipl_t waitq_sleep_prepare(waitq_t *wq)147 wait_guard_t waitq_sleep_prepare(waitq_t *wq) 271 148 { 272 149 ipl_t ipl = interrupts_disable(); 273 150 irq_spinlock_lock(&wq->lock, false); 274 return ipl; 275 } 276 277 /** Finish waiting in a wait queue. 278 * 279 * This function restores interrupts to the state that existed prior 280 * to the call to waitq_sleep_prepare(). If necessary, the wait queue 281 * lock is released. 282 * 283 * @param wq Wait queue. 284 * @param blocked Out parameter of waitq_sleep_timeout_unsafe(). 285 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 286 * 287 */ 288 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl) 289 { 290 if (blocked) { 291 /* 292 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 293 * before returning from waitq_sleep() to the caller. Otherwise 294 * the caller might expect that the wait queue is no longer used 295 * and deallocate it (although the wakeup on a another cpu has 296 * not yet completed and is using the wait queue). 297 * 298 * Note that we have to do this for EOK and EINTR, but not 299 * necessarily for ETIMEOUT where the timeout handler stops 300 * using the waitq before waking us up. To be on the safe side, 301 * ensure the waitq is not in use anymore in this case as well. 302 */ 303 waitq_complete_wakeup(wq); 304 } else { 305 irq_spinlock_unlock(&wq->lock, false); 306 } 307 308 interrupts_restore(ipl); 309 } 310 311 errno_t waitq_sleep_unsafe(waitq_t *wq, bool *blocked) 312 { 313 return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, blocked); 151 return (wait_guard_t) { 152 .ipl = ipl, 153 }; 154 } 155 156 errno_t waitq_sleep_unsafe(waitq_t *wq, wait_guard_t guard) 157 { 158 return waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, guard); 314 159 } 315 160 … … 317 162 * 318 163 * This function implements logic of sleeping in a wait queue. 319 * This call must be preceded by a call to waitq_sleep_prepare() 320 * and followed by a call to waitq_sleep_finish(). 164 * This call must be preceded by a call to waitq_sleep_prepare(). 321 165 * 322 166 * @param wq See waitq_sleep_timeout(). … … 329 173 * 330 174 */ 331 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 332 { 333 *blocked = false; 175 errno_t waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, wait_guard_t guard) 176 { 177 errno_t rc; 178 179 /* 180 * If true, and this thread's sleep returns without a wakeup 181 * (timed out or interrupted), waitq ignores the next wakeup. 182 * This is necessary for futex to be able to handle those conditions. 183 */ 184 bool sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 185 bool interruptible = (flags & SYNCH_FLAGS_INTERRUPTIBLE); 186 187 if (wq->closed) { 188 rc = EOK; 189 goto exit; 190 } 334 191 335 192 /* Checks whether to go to sleep at all */ 336 if (wq->missed_wakeups) { 337 wq->missed_wakeups--; 338 return EOK; 339 } else { 340 if (PARAM_NON_BLOCKING(flags, usec)) { 341 /* Return immediately instead of going to sleep */ 342 return EAGAIN; 193 if (wq->wakeup_balance > 0) { 194 wq->wakeup_balance--; 195 196 rc = EOK; 197 goto exit; 198 } 199 200 if (PARAM_NON_BLOCKING(flags, usec)) { 201 /* Return immediately instead of going to sleep */ 202 rc = ETIMEOUT; 203 goto exit; 204 } 205 206 /* Just for debugging output. */ 207 atomic_store_explicit(&THREAD->sleep_queue, wq, memory_order_relaxed); 208 209 /* 210 * This thread_t field is synchronized exclusively via 211 * waitq lock of the waitq currently listing it. 212 */ 213 list_append(&THREAD->wq_link, &wq->sleepers); 214 215 /* Needs to be run when interrupts are still disabled. */ 216 deadline_t deadline = usec > 0 ? 217 timeout_deadline_in_usec(usec) : DEADLINE_NEVER; 218 219 while (true) { 220 bool terminating = (thread_wait_start() == THREAD_TERMINATING); 221 if (terminating && interruptible) { 222 rc = EINTR; 223 goto exit; 343 224 } 344 } 345 346 /* 347 * Now we are firmly decided to go to sleep. 348 * 349 */ 350 irq_spinlock_lock(&THREAD->lock, false); 351 352 timeout_t timeout; 353 timeout_initialize(&timeout); 354 355 THREAD->sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 356 357 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 225 226 irq_spinlock_unlock(&wq->lock, false); 227 228 bool timed_out = (thread_wait_finish(deadline) == THREAD_WAIT_TIMEOUT); 229 358 230 /* 359 * If the thread was already interrupted, 360 * don't go to sleep at all. 231 * We always need to re-lock the WQ, since concurrently running 232 * waitq_wakeup() may still not have exitted. 233 * If we didn't always do this, we'd risk waitq_wakeup() that woke us 234 * up still running on another CPU even after this function returns, 235 * and that would be an issue if the waitq is allocated locally to 236 * wait for a one-off asynchronous event. We'd need more external 237 * synchronization in that case, and that would be a pain. 238 * 239 * On the plus side, always regaining a lock simplifies cleanup. 361 240 */ 362 if (THREAD->interrupted) { 363 irq_spinlock_unlock(&THREAD->lock, false); 364 return EINTR; 241 irq_spinlock_lock(&wq->lock, false); 242 243 if (!link_in_use(&THREAD->wq_link)) { 244 /* 245 * We were woken up by the desired event. Return success, 246 * regardless of any concurrent timeout or interruption. 247 */ 248 rc = EOK; 249 goto exit; 365 250 } 366 251 367 /* 368 * Set context that will be restored if the sleep 369 * of this thread is ever interrupted. 370 */ 371 THREAD->sleep_interruptible = true; 372 if (!context_save(&THREAD->sleep_interruption_context)) { 373 /* Short emulation of scheduler() return code. */ 374 THREAD->last_cycle = get_cycle(); 375 irq_spinlock_unlock(&THREAD->lock, false); 376 if (usec) { 377 timeout_unregister(&timeout); 378 } 379 return EINTR; 252 if (timed_out) { 253 rc = ETIMEOUT; 254 goto exit; 380 255 } 381 } else 382 THREAD->sleep_interruptible = false; 383 384 if (usec) { 385 /* We use the timeout variant. */ 386 if (!context_save(&THREAD->sleep_timeout_context)) { 387 /* Short emulation of scheduler() return code. */ 388 THREAD->last_cycle = get_cycle(); 389 irq_spinlock_unlock(&THREAD->lock, false); 390 return ETIMEOUT; 391 } 392 393 timeout_register(&timeout, (uint64_t) usec, waitq_sleep_timed_out, THREAD); 394 } 395 396 list_append(&THREAD->wq_link, &wq->sleepers); 397 398 /* 399 * Suspend execution. 400 * 401 */ 402 THREAD->state = Sleeping; 403 THREAD->sleep_queue = wq; 404 405 /* 406 * Must be before entry to scheduler, because there are multiple 407 * return vectors. 408 */ 409 *blocked = true; 410 411 irq_spinlock_unlock(&THREAD->lock, false); 412 413 /* wq->lock is released in scheduler_separated_stack() */ 414 scheduler(); 415 416 if (usec) { 417 timeout_unregister(&timeout); 418 } 419 420 return EOK; 421 } 422 423 /** Wake up first thread sleeping in a wait queue 424 * 425 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe 426 * wrapper meant for general use. 427 * 428 * Besides its 'normal' wakeup operation, it attempts to unregister possible 429 * timeout. 430 * 431 * @param wq Pointer to wait queue. 432 * @param mode Wakeup mode. 433 * 434 */ 435 void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode) 256 257 /* Interrupted for some other reason. */ 258 } 259 260 exit: 261 if (THREAD) 262 list_remove(&THREAD->wq_link); 263 264 if (rc != EOK && sleep_composable) 265 wq->wakeup_balance--; 266 267 if (THREAD) 268 atomic_store_explicit(&THREAD->sleep_queue, NULL, memory_order_relaxed); 269 270 irq_spinlock_unlock(&wq->lock, false); 271 interrupts_restore(guard.ipl); 272 return rc; 273 } 274 275 static void _wake_one(waitq_t *wq) 276 { 277 /* Pop one thread from the queue and wake it up. */ 278 thread_t *thread = list_get_instance(list_first(&wq->sleepers), thread_t, wq_link); 279 list_remove(&thread->wq_link); 280 thread_wakeup(thread); 281 } 282 283 /** 284 * Meant for implementing condvar signal. 285 * Always wakes one thread if there are any sleeping, 286 * has no effect if no threads are waiting for wakeup. 287 */ 288 void waitq_signal(waitq_t *wq) 436 289 { 437 290 irq_spinlock_lock(&wq->lock, true); 438 _waitq_wakeup_unsafe(wq, mode); 291 292 if (!list_empty(&wq->sleepers)) 293 _wake_one(wq); 294 439 295 irq_spinlock_unlock(&wq->lock, true); 440 296 } 441 297 442 /** If there is a wakeup in progress actively waits for it to complete. 443 * 444 * The function returns once the concurrently running waitq_wakeup() 445 * exits. It returns immediately if there are no concurrent wakeups 446 * at the time. 447 * 448 * Interrupts must be disabled. 449 * 450 * Example usage: 451 * @code 452 * void callback(waitq *wq) 453 * { 454 * // Do something and notify wait_for_completion() that we're done. 455 * waitq_wakeup(wq); 456 * } 457 * void wait_for_completion(void) 458 * { 459 * waitq wg; 460 * waitq_initialize(&wq); 461 * // Run callback() in the background, pass it wq. 462 * do_asynchronously(callback, &wq); 463 * // Wait for callback() to complete its work. 464 * waitq_sleep(&wq); 465 * // callback() completed its work, but it may still be accessing 466 * // wq in waitq_wakeup(). Therefore it is not yet safe to return 467 * // from waitq_sleep() or it would clobber up our stack (where wq 468 * // is stored). waitq_sleep() ensures the wait queue is no longer 469 * // in use by invoking waitq_complete_wakeup() internally. 470 * 471 * // waitq_sleep() returned, it is safe to free wq. 472 * } 473 * @endcode 474 * 475 * @param wq Pointer to a wait queue. 476 */ 477 static void waitq_complete_wakeup(waitq_t *wq) 478 { 479 assert(interrupts_disabled()); 480 481 irq_spinlock_lock(&wq->lock, false); 482 irq_spinlock_unlock(&wq->lock, false); 483 } 484 485 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() 486 * 487 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It 488 * assumes wq->lock is already locked and interrupts are already disabled. 489 * 490 * @param wq Pointer to wait queue. 491 * @param mode If mode is WAKEUP_FIRST, then the longest waiting 492 * thread, if any, is woken up. If mode is WAKEUP_ALL, then 493 * all waiting threads, if any, are woken up. If there are 494 * no waiting threads to be woken up, the missed wakeup is 495 * recorded in the wait queue. 496 * 497 */ 498 void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode) 499 { 500 size_t count = 0; 501 502 assert(interrupts_disabled()); 503 assert(irq_spinlock_locked(&wq->lock)); 504 505 if (wq->ignore_wakeups > 0) { 506 if (mode == WAKEUP_FIRST) { 507 wq->ignore_wakeups--; 508 return; 509 } 510 wq->ignore_wakeups = 0; 511 } 512 513 loop: 514 if (list_empty(&wq->sleepers)) { 515 if (mode == WAKEUP_CLOSE) { 516 // FIXME: this can technically fail if we get two billion sleeps after the wakeup call. 517 wq->missed_wakeups = INT_MAX; 518 } else if (mode != WAKEUP_ALL) { 519 wq->missed_wakeups++; 520 } 521 522 return; 523 } 524 525 count++; 526 thread_t *thread = list_get_instance(list_first(&wq->sleepers), 527 thread_t, wq_link); 528 529 /* 530 * Lock the thread prior to removing it from the wq. 531 * This is not necessary because of mutual exclusion 532 * (the link belongs to the wait queue), but because 533 * of synchronization with waitq_sleep_timed_out() 534 * and thread_interrupt_sleep(). 535 * 536 * In order for these two functions to work, the following 537 * invariant must hold: 538 * 539 * thread->sleep_queue != NULL <=> thread sleeps in a wait queue 540 * 541 * For an observer who locks the thread, the invariant 542 * holds only when the lock is held prior to removing 543 * it from the wait queue. 544 * 545 */ 546 irq_spinlock_lock(&thread->lock, false); 547 list_remove(&thread->wq_link); 548 549 thread->sleep_queue = NULL; 550 irq_spinlock_unlock(&thread->lock, false); 551 552 thread_ready(thread); 553 554 if (mode == WAKEUP_ALL) 555 goto loop; 298 /** 299 * Wakes up one thread sleeping on this waitq. 300 * If there are no threads waiting, saves the wakeup so that the next sleep 301 * returns immediately. If a previous failure in sleep created a wakeup debt 302 * (see SYNCH_FLAGS_FUTEX) this debt is annulled and no thread is woken up. 303 */ 304 void waitq_wake_one(waitq_t *wq) 305 { 306 irq_spinlock_lock(&wq->lock, true); 307 308 if (!wq->closed) { 309 if (wq->wakeup_balance < 0 || list_empty(&wq->sleepers)) 310 wq->wakeup_balance++; 311 else 312 _wake_one(wq); 313 } 314 315 irq_spinlock_unlock(&wq->lock, true); 316 } 317 318 static void _wake_all(waitq_t *wq) 319 { 320 while (!list_empty(&wq->sleepers)) 321 _wake_one(wq); 322 } 323 324 /** 325 * Wakes up all threads currently waiting on this waitq 326 * and makes all future sleeps return instantly. 327 */ 328 void waitq_close(waitq_t *wq) 329 { 330 irq_spinlock_lock(&wq->lock, true); 331 wq->wakeup_balance = 0; 332 wq->closed = true; 333 _wake_all(wq); 334 irq_spinlock_unlock(&wq->lock, true); 335 } 336 337 /** 338 * Wakes up all threads currently waiting on this waitq 339 */ 340 void waitq_wake_all(waitq_t *wq) 341 { 342 irq_spinlock_lock(&wq->lock, true); 343 wq->wakeup_balance = 0; 344 _wake_all(wq); 345 irq_spinlock_unlock(&wq->lock, true); 556 346 } 557 347 -
kernel/generic/src/time/timeout.c
r76e17d7c r111b9b9 71 71 } 72 72 73 /** Register timeout 74 * 75 * Insert timeout handler f (with argument arg) 76 * to timeout list and make it execute in 77 * time microseconds (or slightly more). 78 * 79 * @param timeout Timeout structure. 80 * @param time Number of usec in the future to execute the handler. 81 * @param handler Timeout handler function. 82 * @param arg Timeout handler argument. 83 * 84 */ 85 void timeout_register(timeout_t *timeout, uint64_t time, 73 /* Only call when interrupts are disabled. */ 74 deadline_t timeout_deadline_in_usec(uint32_t usec) 75 { 76 if (usec == 0) 77 return 0; 78 79 return CPU->current_clock_tick + us2ticks(usec); 80 } 81 82 static void timeout_register_deadline_locked(timeout_t *timeout, deadline_t deadline, 86 83 timeout_handler_t handler, void *arg) 87 84 { 88 irq_spinlock_lock(&CPU->timeoutlock, true);89 90 85 assert(!link_in_use(&timeout->link)); 91 86 92 87 *timeout = (timeout_t) { 93 88 .cpu = CPU, 94 .deadline = CPU->current_clock_tick + us2ticks(time),89 .deadline = deadline, 95 90 .handler = handler, 96 91 .arg = arg, … … 113 108 } 114 109 } 110 } 115 111 112 /** Register timeout 113 * 114 * Insert timeout handler f (with argument arg) 115 * to timeout list and make it execute in 116 * time microseconds (or slightly more). 117 * 118 * @param timeout Timeout structure. 119 * @param time Number of usec in the future to execute the handler. 120 * @param handler Timeout handler function. 121 * @param arg Timeout handler argument. 122 * 123 */ 124 void timeout_register(timeout_t *timeout, uint64_t time, 125 timeout_handler_t handler, void *arg) 126 { 127 irq_spinlock_lock(&CPU->timeoutlock, true); 128 timeout_register_deadline_locked(timeout, timeout_deadline_in_usec(time), handler, arg); 129 irq_spinlock_unlock(&CPU->timeoutlock, true); 130 } 131 132 void timeout_register_deadline(timeout_t *timeout, deadline_t deadline, 133 timeout_handler_t handler, void *arg) 134 { 135 irq_spinlock_lock(&CPU->timeoutlock, true); 136 timeout_register_deadline_locked(timeout, deadline, handler, arg); 116 137 irq_spinlock_unlock(&CPU->timeoutlock, true); 117 138 } -
kernel/generic/src/udebug/udebug.c
r76e17d7c r111b9b9 438 438 /* 439 439 * thread's lock must not be held when calling 440 * waitq_ wakeup.440 * waitq_close. 441 441 * 442 442 */ 443 waitq_ wakeup(&thread->udebug.go_wq, WAKEUP_ALL);443 waitq_close(&thread->udebug.go_wq); 444 444 } 445 445 -
kernel/generic/src/udebug/udebug_ops.c
r76e17d7c r111b9b9 276 276 * 277 277 */ 278 waitq_wake up(&thread->udebug.go_wq, WAKEUP_ALL);278 waitq_wake_all(&thread->udebug.go_wq); 279 279 280 280 _thread_op_end(thread); -
kernel/test/synch/semaphore1.c
r76e17d7c r111b9b9 107 107 108 108 thread_sleep(1); 109 waitq_wake up(&can_start, WAKEUP_ALL);109 waitq_wake_all(&can_start); 110 110 111 111 while ((items_consumed != consumers) || (items_produced != producers)) { -
kernel/test/synch/semaphore2.c
r76e17d7c r111b9b9 99 99 100 100 thread_usleep(20000); 101 waitq_wake up(&can_start, WAKEUP_ALL);101 waitq_wake_all(&can_start); 102 102 103 103 return NULL;
Note:
See TracChangeset
for help on using the changeset viewer.