Changes in kernel/generic/src/synch/workqueue.c [63e27ef:04552324] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/workqueue.c
r63e27ef r04552324 37 37 */ 38 38 39 #include <assert.h>40 39 #include <synch/workqueue.h> 41 40 #include <synch/spinlock.h> … … 190 189 if (workq) { 191 190 if (workq_init(workq, name)) { 192 assert(!workq_corrupted(workq));191 ASSERT(!workq_corrupted(workq)); 193 192 return workq; 194 193 } … … 203 202 void workq_destroy(struct work_queue *workq) 204 203 { 205 assert(!workq_corrupted(workq));204 ASSERT(!workq_corrupted(workq)); 206 205 207 206 irq_spinlock_lock(&workq->lock, true); … … 215 214 workq_stop(workq); 216 215 } else { 217 assert(0 == running_workers);216 ASSERT(0 == running_workers); 218 217 } 219 218 … … 265 264 static bool add_worker(struct work_queue *workq) 266 265 { 267 assert(!workq_corrupted(workq));266 ASSERT(!workq_corrupted(workq)); 268 267 269 268 thread_t *thread = thread_create(worker_thread, workq, TASK, … … 274 273 275 274 /* cur_worker_cnt proactively increased in signal_worker_logic() .*/ 276 assert(0 < workq->cur_worker_cnt);275 ASSERT(0 < workq->cur_worker_cnt); 277 276 --workq->cur_worker_cnt; 278 277 … … 313 312 314 313 /* cur_worker_cnt proactively increased in signal_worker() .*/ 315 assert(0 < workq->cur_worker_cnt);314 ASSERT(0 < workq->cur_worker_cnt); 316 315 --workq->cur_worker_cnt; 317 316 } … … 335 334 void workq_stop(struct work_queue *workq) 336 335 { 337 assert(!workq_corrupted(workq));336 ASSERT(!workq_corrupted(workq)); 338 337 339 338 interrupt_workers(workq); … … 347 346 348 347 /* workq_stop() may only be called once. */ 349 assert(!workq->stopping);348 ASSERT(!workq->stopping); 350 349 workq->stopping = true; 351 350 … … 359 358 static void wait_for_workers(struct work_queue *workq) 360 359 { 361 assert(!PREEMPTION_DISABLED);360 ASSERT(!PREEMPTION_DISABLED); 362 361 363 362 irq_spinlock_lock(&workq->lock, true); … … 376 375 } 377 376 378 assert(list_empty(&workq->workers));377 ASSERT(list_empty(&workq->workers)); 379 378 380 379 /* Wait for deferred add_worker_op(), signal_worker_op() to finish. */ … … 474 473 work_func_t func, bool can_block) 475 474 { 476 assert(!workq_corrupted(workq));475 ASSERT(!workq_corrupted(workq)); 477 476 478 477 bool success = true; … … 522 521 static size_t active_workers_now(struct work_queue *workq) 523 522 { 524 assert(irq_spinlock_locked(&workq->lock));523 ASSERT(irq_spinlock_locked(&workq->lock)); 525 524 526 525 /* Workers blocked are sleeping in the work function (ie not idle). */ 527 assert(workq->blocked_worker_cnt <= workq->cur_worker_cnt);526 ASSERT(workq->blocked_worker_cnt <= workq->cur_worker_cnt); 528 527 /* Idle workers are waiting for more work to arrive in condvar_wait. */ 529 assert(workq->idle_worker_cnt <= workq->cur_worker_cnt);528 ASSERT(workq->idle_worker_cnt <= workq->cur_worker_cnt); 530 529 531 530 /* Idle + blocked workers == sleeping worker threads. */ 532 531 size_t sleeping_workers = workq->blocked_worker_cnt + workq->idle_worker_cnt; 533 532 534 assert(sleeping_workers <= workq->cur_worker_cnt);533 ASSERT(sleeping_workers <= workq->cur_worker_cnt); 535 534 /* Workers pending activation are idle workers not yet given a time slice. */ 536 assert(workq->activate_pending <= workq->idle_worker_cnt);535 ASSERT(workq->activate_pending <= workq->idle_worker_cnt); 537 536 538 537 /* … … 551 550 static size_t active_workers(struct work_queue *workq) 552 551 { 553 assert(irq_spinlock_locked(&workq->lock));552 ASSERT(irq_spinlock_locked(&workq->lock)); 554 553 555 554 /* … … 574 573 static void signal_worker_op(struct work_queue *workq) 575 574 { 576 assert(!workq_corrupted(workq));575 ASSERT(!workq_corrupted(workq)); 577 576 578 577 condvar_signal(&workq->activate_worker); 579 578 580 579 irq_spinlock_lock(&workq->lock, true); 581 assert(0 < workq->pending_op_cnt);580 ASSERT(0 < workq->pending_op_cnt); 582 581 --workq->pending_op_cnt; 583 582 irq_spinlock_unlock(&workq->lock, true); … … 594 593 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block) 595 594 { 596 assert(!workq_corrupted(workq));597 assert(irq_spinlock_locked(&workq->lock));595 ASSERT(!workq_corrupted(workq)); 596 ASSERT(irq_spinlock_locked(&workq->lock)); 598 597 599 598 /* Only signal workers if really necessary. */ … … 646 645 */ 647 646 if (need_worker && !can_block && 0 == active) { 648 assert(0 == workq->idle_worker_cnt);647 ASSERT(0 == workq->idle_worker_cnt); 649 648 650 649 irq_spinlock_lock(&nonblock_adder.lock, true); … … 682 681 } 683 682 684 assert(arg != NULL);683 ASSERT(arg != NULL); 685 684 686 685 struct work_queue *workq = arg; … … 698 697 static bool dequeue_work(struct work_queue *workq, work_t **pwork_item) 699 698 { 700 assert(!workq_corrupted(workq));699 ASSERT(!workq_corrupted(workq)); 701 700 702 701 irq_spinlock_lock(&workq->lock, true); … … 705 704 if (!workq->stopping && worker_unnecessary(workq)) { 706 705 /* There are too many workers for this load. Exit. */ 707 assert(0 < workq->cur_worker_cnt);706 ASSERT(0 < workq->cur_worker_cnt); 708 707 --workq->cur_worker_cnt; 709 708 list_remove(&THREAD->workq_link); … … 730 729 731 730 #ifdef CONFIG_DEBUG 732 assert(!work_item_corrupted(*pwork_item));731 ASSERT(!work_item_corrupted(*pwork_item)); 733 732 (*pwork_item)->cookie = 0; 734 733 #endif … … 739 738 } else { 740 739 /* Requested to stop and no more work queued. */ 741 assert(workq->stopping);740 ASSERT(workq->stopping); 742 741 --workq->cur_worker_cnt; 743 742 stop = true; … … 752 751 static bool worker_unnecessary(struct work_queue *workq) 753 752 { 754 assert(irq_spinlock_locked(&workq->lock));753 ASSERT(irq_spinlock_locked(&workq->lock)); 755 754 756 755 /* No work is pending. We don't need too many idle threads. */ … … 776 775 777 776 /* Ignore lock ordering just here. */ 778 assert(irq_spinlock_locked(&workq->lock));777 ASSERT(irq_spinlock_locked(&workq->lock)); 779 778 780 779 _condvar_wait_timeout_irq_spinlock(&workq->activate_worker, 781 780 &workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 782 781 783 assert(!workq_corrupted(workq));784 assert(irq_spinlock_locked(&workq->lock));782 ASSERT(!workq_corrupted(workq)); 783 ASSERT(irq_spinlock_locked(&workq->lock)); 785 784 786 785 THREAD->workq_idling = false; … … 792 791 void workq_before_thread_is_ready(thread_t *thread) 793 792 { 794 assert(thread);795 assert(irq_spinlock_locked(&thread->lock));793 ASSERT(thread); 794 ASSERT(irq_spinlock_locked(&thread->lock)); 796 795 797 796 /* Worker's work func() is about to wake up from sleeping. */ 798 797 if (thread->workq && thread->workq_blocked) { 799 798 /* Must be blocked in user work func() and not be waiting for work. */ 800 assert(!thread->workq_idling);801 assert(thread->state == Sleeping);802 assert(THREAD != thread);803 assert(!workq_corrupted(thread->workq));799 ASSERT(!thread->workq_idling); 800 ASSERT(thread->state == Sleeping); 801 ASSERT(THREAD != thread); 802 ASSERT(!workq_corrupted(thread->workq)); 804 803 805 804 /* Protected by thread->lock */ … … 815 814 void workq_after_thread_ran(void) 816 815 { 817 assert(THREAD);818 assert(irq_spinlock_locked(&THREAD->lock));816 ASSERT(THREAD); 817 ASSERT(irq_spinlock_locked(&THREAD->lock)); 819 818 820 819 /* Worker's work func() is about to sleep/block. */ 821 820 if (THREAD->workq && THREAD->state == Sleeping && !THREAD->workq_idling) { 822 assert(!THREAD->workq_blocked);823 assert(!workq_corrupted(THREAD->workq));821 ASSERT(!THREAD->workq_blocked); 822 ASSERT(!workq_corrupted(THREAD->workq)); 824 823 825 824 THREAD->workq_blocked = true; … … 835 834 836 835 if (op) { 837 assert(add_worker_noblock_op == op || signal_worker_op == op);836 ASSERT(add_worker_noblock_op == op || signal_worker_op == op); 838 837 op(THREAD->workq); 839 838 } … … 904 903 struct work_queue, nb_link); 905 904 906 assert(!workq_corrupted(*pworkq));905 ASSERT(!workq_corrupted(*pworkq)); 907 906 908 907 list_remove(&(*pworkq)->nb_link);
Note:
See TracChangeset
for help on using the changeset viewer.