Changes in kernel/generic/src/synch/waitq.c [63e27ef:897fd8f1] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/waitq.c
r63e27ef r897fd8f1 45 45 46 46 #include <assert.h> 47 #include <errno.h> 47 48 #include <synch/waitq.h> 48 49 #include <synch/spinlock.h> … … 238 239 * @param flags Specify mode of the sleep. 239 240 * 241 * @param[out] blocked On return, regardless of the return code, 242 * `*blocked` is set to `true` iff the thread went to 243 * sleep. 244 * 240 245 * The sleep can be interrupted only if the 241 246 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. … … 251 256 * call will immediately return, reporting either success or failure. 252 257 * 253 * @return E SYNCH_WOULD_BLOCK, meaning that the sleep failed because at the254 * time of the call there was no pending wakeup255 * @return E SYNCH_TIMEOUT, meaning that the sleep timed out.256 * @return E SYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping257 * thread. 258 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there259 * was a pending wakeup at the time of the call. The caller was not put260 * asleep at all.261 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep262 * was attempted.263 * 264 */ 265 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags )258 * @return EAGAIN, meaning that the sleep failed because it was requested 259 * as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup. 260 * @return ETIMEOUT, meaning that the sleep timed out. 261 * @return EINTR, meaning that somebody interrupted the sleeping 262 * thread. Check the value of `*blocked` to see if the thread slept, 263 * or if a pending interrupt forced it to return immediately. 264 * @return EOK, meaning that none of the above conditions occured, and the 265 * thread was woken up successfuly by `waitq_wakeup()`. Check 266 * the value of `*blocked` to see if the thread slept or if 267 * the wakeup was already pending. 268 * 269 */ 270 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 266 271 { 267 272 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 268 273 269 274 ipl_t ipl = waitq_sleep_prepare(wq); 270 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 271 waitq_sleep_finish(wq, rc, ipl); 275 bool nblocked; 276 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked); 277 waitq_sleep_finish(wq, nblocked, ipl); 278 279 if (blocked != NULL) { 280 *blocked = nblocked; 281 } 272 282 return rc; 273 283 } … … 320 330 * lock is released. 321 331 * 322 * @param wq Wait queue. 323 * @param rc Return code of waitq_sleep_timeout_unsafe(). 324 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 325 * 326 */ 327 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) 328 { 329 switch (rc) { 330 case ESYNCH_WOULD_BLOCK: 331 case ESYNCH_OK_ATOMIC: 332 irq_spinlock_unlock(&wq->lock, false); 333 break; 334 default: 335 /* 332 * @param wq Wait queue. 333 * @param blocked Out parameter of waitq_sleep_timeout_unsafe(). 334 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 335 * 336 */ 337 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl) 338 { 339 if (blocked) { 340 /* 336 341 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 337 342 * before returning from waitq_sleep() to the caller. Otherwise 338 343 * the caller might expect that the wait queue is no longer used 339 344 * and deallocate it (although the wakeup on a another cpu has 340 * not yet completed and is using the wait queue). 341 * 342 * Note that we have to do this for ESYNCH_OK_BLOCKED and 343 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT 344 * where the timeout handler stops using the waitq before waking 345 * us up. To be on the safe side, ensure the waitq is not in use 346 * anymore in this case as well. 345 * not yet completed and is using the wait queue). 346 * 347 * Note that we have to do this for EOK and EINTR, but not 348 * necessarily for ETIMEOUT where the timeout handler stops 349 * using the waitq before waking us up. To be on the safe side, 350 * ensure the waitq is not in use anymore in this case as well. 347 351 */ 348 352 waitq_complete_wakeup(wq); 349 break; 353 } else { 354 irq_spinlock_unlock(&wq->lock, false); 350 355 } 351 356 … … 363 368 * @param flags See waitq_sleep_timeout(). 364 369 * 370 * @param[out] blocked See waitq_sleep_timeout(). 371 * 365 372 * @return See waitq_sleep_timeout(). 366 373 * 367 374 */ 368 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) 369 { 375 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 376 { 377 *blocked = false; 378 370 379 /* Checks whether to go to sleep at all */ 371 380 if (wq->missed_wakeups) { 372 381 wq->missed_wakeups--; 373 return E SYNCH_OK_ATOMIC;382 return EOK; 374 383 } else { 375 384 if (PARAM_NON_BLOCKING(flags, usec)) { 376 385 /* Return immediately instead of going to sleep */ 377 return E SYNCH_WOULD_BLOCK;386 return EAGAIN; 378 387 } 379 388 } … … 392 401 if (THREAD->interrupted) { 393 402 irq_spinlock_unlock(&THREAD->lock, false); 394 irq_spinlock_unlock(&wq->lock, false); 395 return ESYNCH_INTERRUPTED; 403 return EINTR; 396 404 } 397 405 … … 405 413 THREAD->last_cycle = get_cycle(); 406 414 irq_spinlock_unlock(&THREAD->lock, false); 407 return E SYNCH_INTERRUPTED;415 return EINTR; 408 416 } 409 417 } else … … 416 424 THREAD->last_cycle = get_cycle(); 417 425 irq_spinlock_unlock(&THREAD->lock, false); 418 return E SYNCH_TIMEOUT;426 return ETIMEOUT; 419 427 } 420 428 … … 433 441 THREAD->sleep_queue = wq; 434 442 443 /* Must be before entry to scheduler, because there are multiple 444 * return vectors. 445 */ 446 *blocked = true; 447 435 448 irq_spinlock_unlock(&THREAD->lock, false); 436 449 … … 438 451 scheduler(); 439 452 440 return E SYNCH_OK_BLOCKED;453 return EOK; 441 454 } 442 455
Note:
See TracChangeset
for help on using the changeset viewer.