Changeset 853d613 in mainline
- Timestamp:
- 2012-11-16T23:59:54Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 25969ac
- Parents:
- 9fe9d296
- Location:
- kernel/generic
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/synch/rcu_types.h
r9fe9d296 r853d613 40 40 41 41 #if !defined(RCU_PREEMPT_PODZIMEK) && !defined(RCU_PREEMPT_A) 42 #define RCU_PREEMPT_A 43 //#error You must select an RCU algorithm. 42 #error You must select an RCU algorithm. 44 43 #endif 45 44 -
kernel/generic/src/synch/rcu.c
r9fe9d296 r853d613 937 937 938 938 bool locked = RCU_CNT_INC <= THE->rcu_nesting; 939 /* smp_call machinery makes the most current _rcu_cur_gp visible. */ 939 940 bool passed_qs = (CPU->rcu.last_seen_gp == _rcu_cur_gp); 940 941 … … 959 960 ASSERT(interrupts_disabled()); 960 961 962 /* 963 * In order not to worry about NMI seeing rcu_nesting change work 964 * with a local copy. 965 */ 966 size_t nesting_cnt = ACCESS_ONCE(THE->rcu_nesting); 967 961 968 /* Preempted a reader critical section for the first time. */ 962 if ( rcu_read_locked() && !(THE->rcu_nesting& RCU_WAS_PREEMPTED)) {963 THE->rcu_nesting|= RCU_WAS_PREEMPTED;969 if (RCU_CNT_INC <= nesting_cnt && !(nesting_cnt & RCU_WAS_PREEMPTED)) { 970 nesting_cnt |= RCU_WAS_PREEMPTED; 964 971 note_preempted_reader(); 965 972 } 966 973 967 974 /* Save the thread's nesting count when it is not running. */ 968 THREAD->rcu.nesting_cnt = THE->rcu_nesting; 969 970 /* Clear rcu_nesting only after noting that a thread was preempted. */ 971 compiler_barrier(); 972 THE->rcu_nesting = 0; 975 THREAD->rcu.nesting_cnt = nesting_cnt; 976 ACCESS_ONCE(THE->rcu_nesting) = 0; 973 977 974 978 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) { … … 976 980 * Contain any memory accesses of old readers before announcing a QS. 977 981 * Also make changes from the previous GP visible to this cpu. 982 * Moreover it separates writing to last_seen_gp from 983 * note_preempted_reader(). 978 984 */ 979 985 memory_barrier(); 980 986 /* 981 * The preempted reader has been noted globally. There are therefore 982 * no readers running on this cpu so this is a quiescent state. 983 */ 987 * The preempted reader has been noted globally. There are therefore 988 * no readers running on this cpu so this is a quiescent state. 989 * 990 * Reading the multiword _rcu_cur_gp non-atomically is benign. 991 * At worst, the read value will be different from the actual value. 992 * As a result, both the detector and this cpu will believe 993 * this cpu has not yet passed a QS although it really did. 994 */ 984 995 CPU->rcu.last_seen_gp = _rcu_cur_gp; 985 996 } 986 997 987 998 /* 988 * Forcefully associate the reclaime with the highest priority999 * Forcefully associate the reclaimer with the highest priority 989 1000 * even if preempted due to its time slice running out. 990 1001 */ … … 1562 1573 * _rcu_cur_gp is modified by local detector thread only. 1563 1574 * Therefore, it is up-to-date even without a lock. 1575 * 1576 * cpu.last_seen_gp may not be up-to-date. At worst, we will 1577 * unnecessarily sample its last_seen_gp with a smp_call. 1564 1578 */ 1565 1579 bool cpu_acked_gp = (cpus[cpu_id].rcu.last_seen_gp == _rcu_cur_gp); … … 1578 1592 } 1579 1593 1580 /** Invokes sample_local_cpu(arg) on each cpu of reader_cpus. */1594 /** Serially invokes sample_local_cpu(arg) on each cpu of reader_cpus. */ 1581 1595 static void sample_cpus(cpu_mask_t *reader_cpus, void *arg) 1582 1596 { 1583 const size_t max_conconcurrent_calls = 16;1584 smp_call_t call[max_conconcurrent_calls];1585 size_t outstanding_calls = 0;1586 1587 1597 cpu_mask_for_each(*reader_cpus, cpu_id) { 1588 smp_call_async(cpu_id, sample_local_cpu, arg, &call[outstanding_calls]); 1589 ++outstanding_calls; 1598 smp_call(cpu_id, sample_local_cpu, arg); 1590 1599 1591 1600 /* Update statistic. */ 1592 1601 if (CPU->id != cpu_id) 1593 1602 ++rcu.stat_smp_call_cnt; 1594 1595 if (outstanding_calls == max_conconcurrent_calls) {1596 for (size_t k = 0; k < outstanding_calls; ++k) {1597 smp_call_wait(&call[k]);1598 }1599 1600 outstanding_calls = 0;1601 }1602 }1603 1604 for (size_t k = 0; k < outstanding_calls; ++k) {1605 smp_call_wait(&call[k]);1606 1603 } 1607 1604 }
Note:
See TracChangeset
for help on using the changeset viewer.