Changeset 82719589 in mainline
- Timestamp:
- 2012-11-19T21:14:26Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0adfc9d
- Parents:
- 6831475
- Location:
- kernel/generic
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/synch/rcu_types.h
r6831475 r82719589 153 153 /** True if the thread was preempted in a reader section. 154 154 * 155 * The thread is place into rcu.cur_preempted or rcu.next_preempted155 * The thread is placed into rcu.cur_preempted or rcu.next_preempted 156 156 * and must remove itself in rcu_read_unlock(). 157 157 * -
kernel/generic/src/synch/rcu.c
r6831475 r82719589 67 67 * the detector; or the cpu is still in a CS) the cpu is interrupted 68 68 * via an IPI. If the IPI handler finds the cpu still in a CS, it instructs 69 * the cpu to notify the detector that it had exited the CS via a semaphore. 69 * the cpu to notify the detector that it had exited the CS via a semaphore 70 * (CPU->rcu.is_delaying_gp). 70 71 * The detector then waits on the semaphore for any cpus to exit their 71 72 * CSs. Lastly, it waits for the last reader preempted in a CS to … … 290 291 static void note_preempted_reader(void); 291 292 static void rm_preempted_reader(void); 292 static void upd_max_cbs_in_slice( void);293 static void upd_max_cbs_in_slice(size_t arriving_cbs_cnt); 293 294 294 295 … … 510 511 ASSERT(PREEMPTION_DISABLED || interrupts_disabled()); 511 512 512 /* todo: make NMI safe with cpu-local atomic ops. */513 514 513 /* 515 * We have to disable interrupts in order to make checking 516 * and resetting was_preempted and is_delaying_gp atomic 517 * with respect to local interrupt handlers. Otherwise 518 * an interrupt could beat us to calling semaphore_up() 519 * before we reset the appropriate flag. 520 */ 521 ipl_t ipl = interrupts_disable(); 514 * If an interrupt occurs here (even a NMI) it may beat us to 515 * resetting .is_delaying_gp or .was_preempted and up the semaphore 516 * for us. 517 */ 522 518 523 519 /* … … 525 521 * notify it that the reader did so. 526 522 */ 527 if (CPU->rcu.is_delaying_gp) { 528 CPU->rcu.is_delaying_gp = false; 523 if (local_atomic_exchange(&CPU->rcu.is_delaying_gp, false)) { 529 524 semaphore_up(&rcu.remaining_readers); 530 525 } … … 535 530 * detector if so. 536 531 */ 537 if (THREAD && THREAD->rcu.was_preempted) {532 if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) { 538 533 ASSERT(link_used(&THREAD->rcu.preempt_link)); 539 THREAD->rcu.was_preempted = false;540 534 541 535 rm_preempted_reader(); … … 544 538 /* If there was something to signal to the detector we have done so. */ 545 539 CPU->rcu.signal_unlock = false; 546 547 interrupts_restore(ipl);548 540 } 549 541 … … 671 663 672 664 preemption_disable(); 673 674 ipl_t ipl = interrupts_disable();675 665 676 666 rcu_cpu_data_t *r = &CPU->rcu; 677 *r->parriving_cbs_tail = rcu_item; 678 r->parriving_cbs_tail = &rcu_item->next; 679 680 size_t cnt = ++r->arriving_cbs_cnt; 681 interrupts_restore(ipl); 667 668 rcu_item_t **prev_tail 669 = local_atomic_exchange(&r->parriving_cbs_tail, &rcu_item->next); 670 *prev_tail = rcu_item; 671 672 /* Approximate the number of callbacks present. */ 673 ++r->arriving_cbs_cnt; 682 674 683 675 if (expedite) { … … 685 677 } 686 678 679 bool first_cb = (prev_tail == &CPU->rcu.arriving_cbs); 680 687 681 /* Added first callback - notify the reclaimer. */ 688 if ( cnt == 1&& !semaphore_count_get(&r->arrived_flag)) {682 if (first_cb && !semaphore_count_get(&r->arrived_flag)) { 689 683 semaphore_up(&r->arrived_flag); 690 684 } … … 849 843 CPU->rcu.cur_cbs_gp = CPU->rcu.next_cbs_gp; 850 844 851 /* Move arriving_cbs to next_cbs. Empties arriving_cbs. */ 852 ipl_t ipl = interrupts_disable(); 853 845 /* Move arriving_cbs to next_cbs. */ 846 847 CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt; 848 CPU->rcu.arriving_cbs_cnt = 0; 849 854 850 /* 855 851 * Too many callbacks queued. Better speed up the detection 856 852 * or risk exhausting all system memory. 857 853 */ 858 bool expedite = (EXPEDITE_THRESHOLD < CPU->rcu. arriving_cbs_cnt)854 bool expedite = (EXPEDITE_THRESHOLD < CPU->rcu.next_cbs_cnt) 859 855 || CPU->rcu.expedite_arriving; 860 861 856 CPU->rcu.expedite_arriving = false; 862 857 858 /* Start moving the arriving_cbs list to next_cbs. */ 863 859 CPU->rcu.next_cbs = CPU->rcu.arriving_cbs; 864 CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt; 865 866 CPU->rcu.arriving_cbs = NULL; 867 CPU->rcu.parriving_cbs_tail = &CPU->rcu.arriving_cbs; 868 CPU->rcu.arriving_cbs_cnt = 0; 869 870 interrupts_restore(ipl); 860 861 /* 862 * At least one callback arrived. The tail therefore does not point 863 * to the head of arriving_cbs and we can safely reset it to NULL. 864 */ 865 if (CPU->rcu.next_cbs) { 866 ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs); 867 868 CPU->rcu.arriving_cbs = NULL; 869 /* Reset arriving_cbs before updating the tail pointer. */ 870 compiler_barrier(); 871 /* Updating the tail pointer completes the move of arriving_cbs. */ 872 ACCESS_ONCE(CPU->rcu.parriving_cbs_tail) = &CPU->rcu.arriving_cbs; 873 } else { 874 /* 875 * arriving_cbs was null and parriving_cbs_tail pointed to it 876 * so leave it that way. Note that interrupt handlers may have 877 * added a callback in the meantime so it is not safe to reset 878 * arriving_cbs or parriving_cbs. 879 */ 880 } 871 881 872 882 /* Update statistics of arrived callbacks. */ … … 1050 1060 * with a local copy. 1051 1061 */ 1052 size_t nesting_cnt = ACCESS_ONCE(THE->rcu_nesting); 1062 size_t nesting_cnt = local_atomic_exchange(&THE->rcu_nesting, 0); 1063 1064 /* 1065 * Ensures NMIs see .rcu_nesting without the WAS_PREEMPTED mark and 1066 * do not accidentally call rm_preempted_reader() from unlock(). 1067 */ 1068 compiler_barrier(); 1053 1069 1054 1070 /* Preempted a reader critical section for the first time. */ … … 1060 1076 /* Save the thread's nesting count when it is not running. */ 1061 1077 THREAD->rcu.nesting_cnt = nesting_cnt; 1062 ACCESS_ONCE(THE->rcu_nesting) = 0;1063 1078 1064 1079 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) { … … 1095 1110 } 1096 1111 1097 upd_max_cbs_in_slice( );1112 upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt); 1098 1113 } 1099 1114 … … 1101 1116 void rcu_before_thread_runs(void) 1102 1117 { 1103 ASSERT(PREEMPTION_DISABLED || interrupts_disabled());1104 1118 ASSERT(!rcu_read_locked()); 1105 1119 … … 1115 1129 void rcu_thread_exiting(void) 1116 1130 { 1117 ASSERT(PREEMPTION_DISABLED || interrupts_disabled()); 1131 ASSERT(THE->rcu_nesting == 0); 1132 1118 1133 /* 1119 1134 * The thread forgot to exit its reader critical section. … … 1125 1140 /* Emulate _rcu_preempted_unlock() with the proper nesting count. */ 1126 1141 if (THREAD->rcu.nesting_cnt & RCU_WAS_PREEMPTED) { 1127 ipl_t ipl = interrupts_disable();1128 1142 rm_preempted_reader(); 1129 interrupts_restore(ipl);1130 1143 } 1131 1144 … … 1144 1157 void _rcu_preempted_unlock(void) 1145 1158 { 1146 ipl_t ipl = interrupts_disable(); 1147 1148 /* todo: Replace with cpu-local atomics to be NMI-safe */ 1149 if (THE->rcu_nesting == RCU_WAS_PREEMPTED) { 1150 THE->rcu_nesting = 0; 1159 ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting); 1160 1161 size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0); 1162 if (prev == RCU_WAS_PREEMPTED) { 1151 1163 /* 1152 1164 * NMI handlers are never preempted but may call rm_preempted_reader() 1153 1165 * if a NMI occurred in _rcu_preempted_unlock() of a preempted thread. 1166 * The only other rcu code that may have been interrupted by the NMI 1167 * in _rcu_preempted_unlock() is: an IPI/sample_local_cpu() and 1168 * the initial part of rcu_after_thread_ran(). 1169 * 1154 1170 * rm_preempted_reader() will not deadlock because none of the locks 1155 * it uses are locked in this case. 1171 * it uses are locked in this case. Neither _rcu_preempted_unlock() 1172 * nor sample_local_cpu() nor the initial part of rcu_after_thread_ran() 1173 * acquire any locks. 1156 1174 */ 1157 1175 rm_preempted_reader(); 1158 1176 } 1159 1160 interrupts_restore(ipl);1161 1177 } 1162 1178 … … 1415 1431 if (0 < CPU->rcu.nesting_cnt) { 1416 1432 ASSERT(!CPU->idle); 1417 /* Note to notify the detector from rcu_read_unlock(). */1418 CPU->rcu.is_delaying_gp = true;1419 1433 /* 1420 * Set signal_unlock only after setting is_delaying_gp so 1421 * that NMI handlers do not accidentally clear it in unlock() 1422 * before seeing and acting upon is_delaying_gp. 1434 * Note to notify the detector from rcu_read_unlock(). 1435 * 1436 * ACCESS_ONCE ensures the compiler writes to is_delaying_gp 1437 * only after it determines that we are in a reader CS. 1423 1438 */ 1424 compiler_barrier();1439 ACCESS_ONCE(CPU->rcu.is_delaying_gp) = true; 1425 1440 CPU->rcu.signal_unlock = true; 1426 1441 … … 1478 1493 { 1479 1494 ASSERT(interrupts_disabled()); 1480 /* todo: make is_delaying_gp and was_preempted NMI safe via local atomics.*/1481 1495 1482 1496 /* 1483 1497 * Prevent NMI handlers from interfering. The detector will be notified 1484 * here if CPU->rcu.is_delaying_gp and the current thread is no longer1485 * running so there is nothingto signal to the detector.1498 * in this function if CPU->rcu.is_delaying_gp. The current thread is 1499 * no longer running so there is nothing else to signal to the detector. 1486 1500 */ 1487 1501 CPU->rcu.signal_unlock = false; 1488 /* Separates clearing of .signal_unlock from CPU->rcu.nesting_cnt = 0. */ 1502 /* 1503 * Separates clearing of .signal_unlock from accesses to 1504 * THREAD->rcu.was_preempted and CPU->rcu.nesting_cnt. 1505 */ 1489 1506 compiler_barrier(); 1490 1507 1491 1508 /* Save the thread's nesting count when it is not running. */ 1492 1509 THREAD->rcu.nesting_cnt = CPU->rcu.nesting_cnt; 1493 /* Interrupt handlers might use RCU while idle in scheduler(). */1494 CPU->rcu.nesting_cnt = 0;1495 1510 1496 1511 /* Preempted a reader critical section for the first time. */ … … 1506 1521 _rcu_record_qs(); 1507 1522 1523 /* 1524 * Interrupt handlers might use RCU while idle in scheduler(). 1525 * The preempted reader has been noted globally, so the handlers 1526 * may now start announcing quiescent states. 1527 */ 1528 CPU->rcu.nesting_cnt = 0; 1529 1508 1530 /* 1509 1531 * This cpu is holding up the current GP. Let the detector know … … 1531 1553 } 1532 1554 1533 upd_max_cbs_in_slice( );1555 upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt); 1534 1556 } 1535 1557 … … 1542 1564 /* Load the thread's saved nesting count from before it was preempted. */ 1543 1565 CPU->rcu.nesting_cnt = THREAD->rcu.nesting_cnt; 1566 1567 /* 1568 * Ensures NMI see the proper nesting count before .signal_unlock. 1569 * Otherwise the NMI may incorrectly signal that a preempted reader 1570 * exited its reader section. 1571 */ 1572 compiler_barrier(); 1573 1544 1574 /* 1545 1575 * In the unlikely event that a NMI occurs between the loading of the … … 1547 1577 * rcu_read_unlock() and clear signal_unlock. In that case we will 1548 1578 * incorrectly overwrite signal_unlock from false to true. This event 1549 * situationbenign and the next rcu_read_unlock() will at worst1579 * is benign and the next rcu_read_unlock() will at worst 1550 1580 * needlessly invoke _rcu_signal_unlock(). 1551 1581 */ … … 1732 1762 static void rm_preempted_reader(void) 1733 1763 { 1734 irq_spinlock_lock(&rcu.preempt_lock, false);1764 irq_spinlock_lock(&rcu.preempt_lock, true); 1735 1765 1736 1766 ASSERT(link_used(&THREAD->rcu.preempt_link)); … … 1752 1782 } 1753 1783 1754 irq_spinlock_unlock(&rcu.preempt_lock, false);1784 irq_spinlock_unlock(&rcu.preempt_lock, true); 1755 1785 } 1756 1786 … … 1775 1805 } 1776 1806 1777 static void upd_max_cbs_in_slice( void)1807 static void upd_max_cbs_in_slice(size_t arriving_cbs_cnt) 1778 1808 { 1779 1809 rcu_cpu_data_t *cr = &CPU->rcu; 1780 1810 1781 if ( cr->arriving_cbs_cnt > cr->last_arriving_cnt) {1782 size_t arrived_cnt = cr->arriving_cbs_cnt - cr->last_arriving_cnt;1811 if (arriving_cbs_cnt > cr->last_arriving_cnt) { 1812 size_t arrived_cnt = arriving_cbs_cnt - cr->last_arriving_cnt; 1783 1813 cr->stat_max_slice_cbs = max(arrived_cnt, cr->stat_max_slice_cbs); 1784 1814 } 1785 1815 1786 cr->last_arriving_cnt = cr->arriving_cbs_cnt;1816 cr->last_arriving_cnt = arriving_cbs_cnt; 1787 1817 } 1788 1818
Note:
See TracChangeset
for help on using the changeset viewer.