Changes in kernel/generic/src/synch/condvar.c [b7fd2a0:497bd656] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/condvar.c
rb7fd2a0 r497bd656 80 80 * For exact description of meaning of possible combinations of usec and flags, 81 81 * see comment for waitq_sleep_timeout(). Note that when 82 * SYNCH_FLAGS_NON_BLOCKING is specified here, E AGAINis always82 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 83 83 * returned. 84 84 * 85 85 * @return See comment for waitq_sleep_timeout(). 86 86 */ 87 errno_t _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)87 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags) 88 88 { 89 errno_t rc;89 int rc; 90 90 ipl_t ipl; 91 bool blocked;92 91 93 92 ipl = waitq_sleep_prepare(&cv->wq); … … 96 95 97 96 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 98 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 99 assert(blocked || rc != EOK); 97 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 100 98 101 waitq_sleep_finish(&cv->wq, blocked, ipl);99 waitq_sleep_finish(&cv->wq, rc, ipl); 102 100 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 103 101 mutex_lock(mtx); … … 119 117 * For exact description of meaning of possible combinations of usec and flags, 120 118 * see comment for waitq_sleep_timeout(). Note that when 121 * SYNCH_FLAGS_NON_BLOCKING is specified here, E AGAINis always119 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 122 120 * returned. 123 121 * 124 122 * @return See comment for waitq_sleep_timeout(). 125 123 */ 126 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,124 int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 127 125 uint32_t usec, int flags) 128 126 { 129 errno_t rc;127 int rc; 130 128 ipl_t ipl; 131 bool blocked; 132 129 133 130 ipl = waitq_sleep_prepare(&cv->wq); 134 131 … … 137 134 138 135 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 139 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 140 assert(blocked || rc != EOK); 136 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 141 137 142 waitq_sleep_finish(&cv->wq, blocked, ipl);138 waitq_sleep_finish(&cv->wq, rc, ipl); 143 139 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 144 140 spinlock_lock(lock); … … 156 152 * For exact description of meaning of possible combinations of usec and flags, 157 153 * see comment for waitq_sleep_timeout(). Note that when 158 * SYNCH_FLAGS_NON_BLOCKING is specified here, E AGAINis always154 * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always 159 155 * returned. 160 156 * 161 157 * @return See comment for waitq_sleep_timeout(). 162 158 */ 163 errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,159 int _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock, 164 160 uint32_t usec, int flags) 165 161 { 166 errno_t rc;162 int rc; 167 163 /* Save spinlock's state so we can restore it correctly later on. */ 168 164 ipl_t ipl = irq_lock->ipl;
Note:
See TracChangeset
for help on using the changeset viewer.