Changes in kernel/generic/src/synch/rcu.c [82719589:63e27ef] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/rcu.c
r82719589 r63e27ef 123 123 * 124 124 */ 125 125 126 #include <assert.h> 126 127 #include <synch/rcu.h> 127 128 #include <synch/condvar.h> … … 404 405 /* Stop and wait for reclaimers. */ 405 406 for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) { 406 ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL);407 assert(cpus[cpu_id].rcu.reclaimer_thr != NULL); 407 408 408 409 if (cpus[cpu_id].rcu.reclaimer_thr) { … … 487 488 static void read_unlock_impl(size_t *pnesting_cnt) 488 489 { 489 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());490 assert(PREEMPTION_DISABLED || interrupts_disabled()); 490 491 491 492 if (0 == --(*pnesting_cnt)) { … … 509 510 void _rcu_signal_read_unlock(void) 510 511 { 511 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());512 assert(PREEMPTION_DISABLED || interrupts_disabled()); 512 513 513 514 /* … … 531 532 */ 532 533 if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) { 533 ASSERT(link_used(&THREAD->rcu.preempt_link));534 assert(link_used(&THREAD->rcu.preempt_link)); 534 535 535 536 rm_preempted_reader(); … … 563 564 { 564 565 /* Calling from a reader section will deadlock. */ 565 ASSERT(!rcu_read_locked());566 assert(!rcu_read_locked()); 566 567 567 568 synch_item_t completion; … … 576 577 { 577 578 synch_item_t *completion = member_to_inst(rcu_item, synch_item_t, rcu_item); 578 ASSERT(completion);579 assert(completion); 579 580 waitq_wakeup(&completion->wq, WAKEUP_FIRST); 580 581 } … … 615 616 static void add_barrier_cb(void *arg) 616 617 { 617 ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);618 assert(interrupts_disabled() || PREEMPTION_DISABLED); 618 619 atomic_inc(&rcu.barrier_wait_cnt); 619 620 rcu_call(&CPU->rcu.barrier_item, barrier_complete); … … 657 658 rcu_func_t func) 658 659 { 659 ASSERT(rcu_item);660 assert(rcu_item); 660 661 661 662 rcu_item->func = func; … … 689 690 static bool cur_cbs_empty(void) 690 691 { 691 ASSERT(THREAD && THREAD->wired);692 assert(THREAD && THREAD->wired); 692 693 return NULL == CPU->rcu.cur_cbs; 693 694 } … … 695 696 static bool next_cbs_empty(void) 696 697 { 697 ASSERT(THREAD && THREAD->wired);698 assert(THREAD && THREAD->wired); 698 699 return NULL == CPU->rcu.next_cbs; 699 700 } … … 702 703 static bool arriving_cbs_empty(void) 703 704 { 704 ASSERT(THREAD && THREAD->wired);705 assert(THREAD && THREAD->wired); 705 706 /* 706 707 * Accessing with interrupts enabled may at worst lead to … … 719 720 static void reclaimer(void *arg) 720 721 { 721 ASSERT(THREAD && THREAD->wired);722 ASSERT(THREAD == CPU->rcu.reclaimer_thr);722 assert(THREAD && THREAD->wired); 723 assert(THREAD == CPU->rcu.reclaimer_thr); 723 724 724 725 rcu_gp_t last_compl_gp = 0; … … 726 727 727 728 while (ok && wait_for_pending_cbs()) { 728 ASSERT(CPU->rcu.reclaimer_thr == THREAD);729 assert(CPU->rcu.reclaimer_thr == THREAD); 729 730 730 731 exec_completed_cbs(last_compl_gp); … … 765 766 /* Both next_cbs and cur_cbs GP elapsed. */ 766 767 if (CPU->rcu.next_cbs_gp <= last_completed_gp) { 767 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);768 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 768 769 769 770 size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt; … … 864 865 */ 865 866 if (CPU->rcu.next_cbs) { 866 ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);867 assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs); 867 868 868 869 CPU->rcu.arriving_cbs = NULL; … … 913 914 } 914 915 915 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);916 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 916 917 917 918 return expedite; … … 933 934 spinlock_lock(&rcu.gp_lock); 934 935 935 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);936 ASSERT(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);936 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 937 assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1); 937 938 938 939 while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) { … … 1029 1030 static void sample_local_cpu(void *arg) 1030 1031 { 1031 ASSERT(interrupts_disabled());1032 assert(interrupts_disabled()); 1032 1033 cpu_mask_t *reader_cpus = (cpu_mask_t *)arg; 1033 1034 … … 1054 1055 void rcu_after_thread_ran(void) 1055 1056 { 1056 ASSERT(interrupts_disabled());1057 assert(interrupts_disabled()); 1057 1058 1058 1059 /* … … 1116 1117 void rcu_before_thread_runs(void) 1117 1118 { 1118 ASSERT(!rcu_read_locked());1119 assert(!rcu_read_locked()); 1119 1120 1120 1121 /* Load the thread's saved nesting count from before it was preempted. */ … … 1129 1130 void rcu_thread_exiting(void) 1130 1131 { 1131 ASSERT(THE->rcu_nesting == 0);1132 assert(THE->rcu_nesting == 0); 1132 1133 1133 1134 /* … … 1157 1158 void _rcu_preempted_unlock(void) 1158 1159 { 1159 ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);1160 assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting); 1160 1161 1161 1162 size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0); … … 1220 1221 } 1221 1222 1222 ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);1223 ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);1223 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 1224 assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp); 1224 1225 1225 1226 /* … … 1262 1263 static bool cv_wait_for_gp(rcu_gp_t wait_on_gp) 1263 1264 { 1264 ASSERT(spinlock_locked(&rcu.gp_lock));1265 assert(spinlock_locked(&rcu.gp_lock)); 1265 1266 1266 1267 bool interrupted = false; … … 1284 1285 1285 1286 if (detector_idle) { 1286 ASSERT(_rcu_cur_gp == rcu.completed_gp);1287 assert(_rcu_cur_gp == rcu.completed_gp); 1287 1288 condvar_signal(&rcu.req_gp_changed); 1288 1289 } … … 1323 1324 static bool wait_for_detect_req(void) 1324 1325 { 1325 ASSERT(spinlock_locked(&rcu.gp_lock));1326 assert(spinlock_locked(&rcu.gp_lock)); 1326 1327 1327 1328 bool interrupted = false; … … 1340 1341 static void end_cur_gp(void) 1341 1342 { 1342 ASSERT(spinlock_locked(&rcu.gp_lock));1343 assert(spinlock_locked(&rcu.gp_lock)); 1343 1344 1344 1345 rcu.completed_gp = _rcu_cur_gp; … … 1423 1424 static void sample_local_cpu(void *arg) 1424 1425 { 1425 ASSERT(interrupts_disabled());1426 ASSERT(!CPU->rcu.is_delaying_gp);1426 assert(interrupts_disabled()); 1427 assert(!CPU->rcu.is_delaying_gp); 1427 1428 1428 1429 /* Cpu did not pass a quiescent state yet. */ … … 1430 1431 /* Interrupted a reader in a reader critical section. */ 1431 1432 if (0 < CPU->rcu.nesting_cnt) { 1432 ASSERT(!CPU->idle);1433 assert(!CPU->idle); 1433 1434 /* 1434 1435 * Note to notify the detector from rcu_read_unlock(). … … 1492 1493 void rcu_after_thread_ran(void) 1493 1494 { 1494 ASSERT(interrupts_disabled());1495 assert(interrupts_disabled()); 1495 1496 1496 1497 /* … … 1559 1560 void rcu_before_thread_runs(void) 1560 1561 { 1561 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());1562 ASSERT(0 == CPU->rcu.nesting_cnt);1562 assert(PREEMPTION_DISABLED || interrupts_disabled()); 1563 assert(0 == CPU->rcu.nesting_cnt); 1563 1564 1564 1565 /* Load the thread's saved nesting count from before it was preempted. */ … … 1590 1591 void rcu_thread_exiting(void) 1591 1592 { 1592 ASSERT(THREAD != NULL);1593 ASSERT(THREAD->state == Exiting);1594 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());1593 assert(THREAD != NULL); 1594 assert(THREAD->state == Exiting); 1595 assert(PREEMPTION_DISABLED || interrupts_disabled()); 1595 1596 1596 1597 /* … … 1615 1616 static void start_new_gp(void) 1616 1617 { 1617 ASSERT(spinlock_locked(&rcu.gp_lock));1618 assert(spinlock_locked(&rcu.gp_lock)); 1618 1619 1619 1620 irq_spinlock_lock(&rcu.preempt_lock, true); … … 1734 1735 static void upd_missed_gp_in_wait(rcu_gp_t completed_gp) 1735 1736 { 1736 ASSERT(CPU->rcu.cur_cbs_gp <= completed_gp);1737 assert(CPU->rcu.cur_cbs_gp <= completed_gp); 1737 1738 1738 1739 size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp); … … 1764 1765 irq_spinlock_lock(&rcu.preempt_lock, true); 1765 1766 1766 ASSERT(link_used(&THREAD->rcu.preempt_link));1767 assert(link_used(&THREAD->rcu.preempt_link)); 1767 1768 1768 1769 bool prev_empty = list_empty(&rcu.cur_preempted);
Note:
See TracChangeset
for help on using the changeset viewer.