Changeset 58775d30 in mainline for kernel/generic/src/synch/waitq.c
- Timestamp:
- 2015-03-16T16:07:21Z (10 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 2003739
- Parents:
- 6069061 (diff), 795e2bf (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/waitq.c
r6069061 r58775d30 57 57 58 58 static void waitq_sleep_timed_out(void *); 59 static void waitq_complete_wakeup(waitq_t *); 60 59 61 60 62 /** Initialize wait queue … … 330 332 break; 331 333 default: 334 /* 335 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 336 * before returning from waitq_sleep() to the caller. Otherwise 337 * the caller might expect that the wait queue is no longer used 338 * and deallocate it (although the wakeup on a another cpu has 339 * not yet completed and is using the wait queue). 340 * 341 * Note that we have to do this for ESYNCH_OK_BLOCKED and 342 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT 343 * where the timeout handler stops using the waitq before waking 344 * us up. To be on the safe side, ensure the waitq is not in use 345 * anymore in this case as well. 346 */ 347 waitq_complete_wakeup(wq); 332 348 break; 333 349 } … … 357 373 } else { 358 374 if (PARAM_NON_BLOCKING(flags, usec)) { 359 /* Return immediatel ly instead of going to sleep */375 /* Return immediately instead of going to sleep */ 360 376 return ESYNCH_WOULD_BLOCK; 361 377 } … … 442 458 irq_spinlock_unlock(&wq->lock, true); 443 459 } 460 461 /** If there is a wakeup in progress actively waits for it to complete. 462 * 463 * The function returns once the concurrently running waitq_wakeup() 464 * exits. It returns immediately if there are no concurrent wakeups 465 * at the time. 466 * 467 * Interrupts must be disabled. 468 * 469 * Example usage: 470 * @code 471 * void callback(waitq *wq) 472 * { 473 * // Do something and notify wait_for_completion() that we're done. 474 * waitq_wakeup(wq); 475 * } 476 * void wait_for_completion(void) 477 * { 478 * waitq wg; 479 * waitq_initialize(&wq); 480 * // Run callback() in the background, pass it wq. 481 * do_asynchronously(callback, &wq); 482 * // Wait for callback() to complete its work. 483 * waitq_sleep(&wq); 484 * // callback() completed its work, but it may still be accessing 485 * // wq in waitq_wakeup(). Therefore it is not yet safe to return 486 * // from waitq_sleep() or it would clobber up our stack (where wq 487 * // is stored). waitq_sleep() ensures the wait queue is no longer 488 * // in use by invoking waitq_complete_wakeup() internally. 489 * 490 * // waitq_sleep() returned, it is safe to free wq. 491 * } 492 * @endcode 493 * 494 * @param wq Pointer to a wait queue. 495 */ 496 static void waitq_complete_wakeup(waitq_t *wq) 497 { 498 ASSERT(interrupts_disabled()); 499 500 irq_spinlock_lock(&wq->lock, false); 501 irq_spinlock_unlock(&wq->lock, false); 502 } 503 444 504 445 505 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
Note:
See TracChangeset
for help on using the changeset viewer.