Changes in kernel/generic/src/synch/workqueue.c [04552324:63e27ef] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/workqueue.c
r04552324 r63e27ef 37 37 */ 38 38 39 #include <assert.h> 39 40 #include <synch/workqueue.h> 40 41 #include <synch/spinlock.h> … … 189 190 if (workq) { 190 191 if (workq_init(workq, name)) { 191 ASSERT(!workq_corrupted(workq));192 assert(!workq_corrupted(workq)); 192 193 return workq; 193 194 } … … 202 203 void workq_destroy(struct work_queue *workq) 203 204 { 204 ASSERT(!workq_corrupted(workq));205 assert(!workq_corrupted(workq)); 205 206 206 207 irq_spinlock_lock(&workq->lock, true); … … 214 215 workq_stop(workq); 215 216 } else { 216 ASSERT(0 == running_workers);217 assert(0 == running_workers); 217 218 } 218 219 … … 264 265 static bool add_worker(struct work_queue *workq) 265 266 { 266 ASSERT(!workq_corrupted(workq));267 assert(!workq_corrupted(workq)); 267 268 268 269 thread_t *thread = thread_create(worker_thread, workq, TASK, … … 273 274 274 275 /* cur_worker_cnt proactively increased in signal_worker_logic() .*/ 275 ASSERT(0 < workq->cur_worker_cnt);276 assert(0 < workq->cur_worker_cnt); 276 277 --workq->cur_worker_cnt; 277 278 … … 312 313 313 314 /* cur_worker_cnt proactively increased in signal_worker() .*/ 314 ASSERT(0 < workq->cur_worker_cnt);315 assert(0 < workq->cur_worker_cnt); 315 316 --workq->cur_worker_cnt; 316 317 } … … 334 335 void workq_stop(struct work_queue *workq) 335 336 { 336 ASSERT(!workq_corrupted(workq));337 assert(!workq_corrupted(workq)); 337 338 338 339 interrupt_workers(workq); … … 346 347 347 348 /* workq_stop() may only be called once. */ 348 ASSERT(!workq->stopping);349 assert(!workq->stopping); 349 350 workq->stopping = true; 350 351 … … 358 359 static void wait_for_workers(struct work_queue *workq) 359 360 { 360 ASSERT(!PREEMPTION_DISABLED);361 assert(!PREEMPTION_DISABLED); 361 362 362 363 irq_spinlock_lock(&workq->lock, true); … … 375 376 } 376 377 377 ASSERT(list_empty(&workq->workers));378 assert(list_empty(&workq->workers)); 378 379 379 380 /* Wait for deferred add_worker_op(), signal_worker_op() to finish. */ … … 473 474 work_func_t func, bool can_block) 474 475 { 475 ASSERT(!workq_corrupted(workq));476 assert(!workq_corrupted(workq)); 476 477 477 478 bool success = true; … … 521 522 static size_t active_workers_now(struct work_queue *workq) 522 523 { 523 ASSERT(irq_spinlock_locked(&workq->lock));524 assert(irq_spinlock_locked(&workq->lock)); 524 525 525 526 /* Workers blocked are sleeping in the work function (ie not idle). */ 526 ASSERT(workq->blocked_worker_cnt <= workq->cur_worker_cnt);527 assert(workq->blocked_worker_cnt <= workq->cur_worker_cnt); 527 528 /* Idle workers are waiting for more work to arrive in condvar_wait. */ 528 ASSERT(workq->idle_worker_cnt <= workq->cur_worker_cnt);529 assert(workq->idle_worker_cnt <= workq->cur_worker_cnt); 529 530 530 531 /* Idle + blocked workers == sleeping worker threads. */ 531 532 size_t sleeping_workers = workq->blocked_worker_cnt + workq->idle_worker_cnt; 532 533 533 ASSERT(sleeping_workers <= workq->cur_worker_cnt);534 assert(sleeping_workers <= workq->cur_worker_cnt); 534 535 /* Workers pending activation are idle workers not yet given a time slice. */ 535 ASSERT(workq->activate_pending <= workq->idle_worker_cnt);536 assert(workq->activate_pending <= workq->idle_worker_cnt); 536 537 537 538 /* … … 550 551 static size_t active_workers(struct work_queue *workq) 551 552 { 552 ASSERT(irq_spinlock_locked(&workq->lock));553 assert(irq_spinlock_locked(&workq->lock)); 553 554 554 555 /* … … 573 574 static void signal_worker_op(struct work_queue *workq) 574 575 { 575 ASSERT(!workq_corrupted(workq));576 assert(!workq_corrupted(workq)); 576 577 577 578 condvar_signal(&workq->activate_worker); 578 579 579 580 irq_spinlock_lock(&workq->lock, true); 580 ASSERT(0 < workq->pending_op_cnt);581 assert(0 < workq->pending_op_cnt); 581 582 --workq->pending_op_cnt; 582 583 irq_spinlock_unlock(&workq->lock, true); … … 593 594 static signal_op_t signal_worker_logic(struct work_queue *workq, bool can_block) 594 595 { 595 ASSERT(!workq_corrupted(workq));596 ASSERT(irq_spinlock_locked(&workq->lock));596 assert(!workq_corrupted(workq)); 597 assert(irq_spinlock_locked(&workq->lock)); 597 598 598 599 /* Only signal workers if really necessary. */ … … 645 646 */ 646 647 if (need_worker && !can_block && 0 == active) { 647 ASSERT(0 == workq->idle_worker_cnt);648 assert(0 == workq->idle_worker_cnt); 648 649 649 650 irq_spinlock_lock(&nonblock_adder.lock, true); … … 681 682 } 682 683 683 ASSERT(arg != NULL);684 assert(arg != NULL); 684 685 685 686 struct work_queue *workq = arg; … … 697 698 static bool dequeue_work(struct work_queue *workq, work_t **pwork_item) 698 699 { 699 ASSERT(!workq_corrupted(workq));700 assert(!workq_corrupted(workq)); 700 701 701 702 irq_spinlock_lock(&workq->lock, true); … … 704 705 if (!workq->stopping && worker_unnecessary(workq)) { 705 706 /* There are too many workers for this load. Exit. */ 706 ASSERT(0 < workq->cur_worker_cnt);707 assert(0 < workq->cur_worker_cnt); 707 708 --workq->cur_worker_cnt; 708 709 list_remove(&THREAD->workq_link); … … 729 730 730 731 #ifdef CONFIG_DEBUG 731 ASSERT(!work_item_corrupted(*pwork_item));732 assert(!work_item_corrupted(*pwork_item)); 732 733 (*pwork_item)->cookie = 0; 733 734 #endif … … 738 739 } else { 739 740 /* Requested to stop and no more work queued. */ 740 ASSERT(workq->stopping);741 assert(workq->stopping); 741 742 --workq->cur_worker_cnt; 742 743 stop = true; … … 751 752 static bool worker_unnecessary(struct work_queue *workq) 752 753 { 753 ASSERT(irq_spinlock_locked(&workq->lock));754 assert(irq_spinlock_locked(&workq->lock)); 754 755 755 756 /* No work is pending. We don't need too many idle threads. */ … … 775 776 776 777 /* Ignore lock ordering just here. */ 777 ASSERT(irq_spinlock_locked(&workq->lock));778 assert(irq_spinlock_locked(&workq->lock)); 778 779 779 780 _condvar_wait_timeout_irq_spinlock(&workq->activate_worker, 780 781 &workq->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 781 782 782 ASSERT(!workq_corrupted(workq));783 ASSERT(irq_spinlock_locked(&workq->lock));783 assert(!workq_corrupted(workq)); 784 assert(irq_spinlock_locked(&workq->lock)); 784 785 785 786 THREAD->workq_idling = false; … … 791 792 void workq_before_thread_is_ready(thread_t *thread) 792 793 { 793 ASSERT(thread);794 ASSERT(irq_spinlock_locked(&thread->lock));794 assert(thread); 795 assert(irq_spinlock_locked(&thread->lock)); 795 796 796 797 /* Worker's work func() is about to wake up from sleeping. */ 797 798 if (thread->workq && thread->workq_blocked) { 798 799 /* Must be blocked in user work func() and not be waiting for work. */ 799 ASSERT(!thread->workq_idling);800 ASSERT(thread->state == Sleeping);801 ASSERT(THREAD != thread);802 ASSERT(!workq_corrupted(thread->workq));800 assert(!thread->workq_idling); 801 assert(thread->state == Sleeping); 802 assert(THREAD != thread); 803 assert(!workq_corrupted(thread->workq)); 803 804 804 805 /* Protected by thread->lock */ … … 814 815 void workq_after_thread_ran(void) 815 816 { 816 ASSERT(THREAD);817 ASSERT(irq_spinlock_locked(&THREAD->lock));817 assert(THREAD); 818 assert(irq_spinlock_locked(&THREAD->lock)); 818 819 819 820 /* Worker's work func() is about to sleep/block. */ 820 821 if (THREAD->workq && THREAD->state == Sleeping && !THREAD->workq_idling) { 821 ASSERT(!THREAD->workq_blocked);822 ASSERT(!workq_corrupted(THREAD->workq));822 assert(!THREAD->workq_blocked); 823 assert(!workq_corrupted(THREAD->workq)); 823 824 824 825 THREAD->workq_blocked = true; … … 834 835 835 836 if (op) { 836 ASSERT(add_worker_noblock_op == op || signal_worker_op == op);837 assert(add_worker_noblock_op == op || signal_worker_op == op); 837 838 op(THREAD->workq); 838 839 } … … 903 904 struct work_queue, nb_link); 904 905 905 ASSERT(!workq_corrupted(*pworkq));906 assert(!workq_corrupted(*pworkq)); 906 907 907 908 list_remove(&(*pworkq)->nb_link);
Note:
See TracChangeset
for help on using the changeset viewer.