Changeset 82719589 in mainline


Ignore:
Timestamp:
2012-11-19T21:14:26Z (12 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
0adfc9d
Parents:
6831475
Message:

rcu: Made both A-RCU and Podzimek-Preempt-RCU exception safe.

Location:
kernel/generic
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/synch/rcu_types.h

    r6831475 r82719589  
    153153        /** True if the thread was preempted in a reader section.
    154154         *
    155          * The thread is place into rcu.cur_preempted or rcu.next_preempted
     155         * The thread is placed into rcu.cur_preempted or rcu.next_preempted
    156156         * and must remove itself in rcu_read_unlock().
    157157         *
  • kernel/generic/src/synch/rcu.c

    r6831475 r82719589  
    6767 * the detector; or the cpu is still in a CS) the cpu is interrupted
    6868 * via an IPI. If the IPI handler finds the cpu still in a CS, it instructs
    69  * the cpu to notify the detector that it had exited the CS via a semaphore.
     69 * the cpu to notify the detector that it had exited the CS via a semaphore
     70 * (CPU->rcu.is_delaying_gp).
    7071 * The detector then waits on the semaphore for any cpus to exit their
    7172 * CSs. Lastly, it waits for the last reader preempted in a CS to
     
    290291static void note_preempted_reader(void);
    291292static void rm_preempted_reader(void);
    292 static void upd_max_cbs_in_slice(void);
     293static void upd_max_cbs_in_slice(size_t arriving_cbs_cnt);
    293294
    294295
     
    510511        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    511512       
    512         /* todo: make NMI safe with cpu-local atomic ops. */
    513        
    514513        /*
    515          * We have to disable interrupts in order to make checking
    516          * and resetting was_preempted and is_delaying_gp atomic
    517          * with respect to local interrupt handlers. Otherwise
    518          * an interrupt could beat us to calling semaphore_up()
    519          * before we reset the appropriate flag.
    520          */
    521         ipl_t ipl = interrupts_disable();
     514         * If an interrupt occurs here (even a NMI) it may beat us to
     515         * resetting .is_delaying_gp or .was_preempted and up the semaphore
     516         * for us.
     517         */
    522518       
    523519        /*
     
    525521         * notify it that the reader did so.
    526522         */
    527         if (CPU->rcu.is_delaying_gp) {
    528                 CPU->rcu.is_delaying_gp = false;
     523        if (local_atomic_exchange(&CPU->rcu.is_delaying_gp, false)) {
    529524                semaphore_up(&rcu.remaining_readers);
    530525        }
     
    535530         * detector if so.
    536531         */
    537         if (THREAD && THREAD->rcu.was_preempted) {
     532        if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) {
    538533                ASSERT(link_used(&THREAD->rcu.preempt_link));
    539                 THREAD->rcu.was_preempted = false;
    540534
    541535                rm_preempted_reader();
     
    544538        /* If there was something to signal to the detector we have done so. */
    545539        CPU->rcu.signal_unlock = false;
    546        
    547         interrupts_restore(ipl);
    548540}
    549541
     
    671663       
    672664        preemption_disable();
    673        
    674         ipl_t ipl = interrupts_disable();
    675665
    676666        rcu_cpu_data_t *r = &CPU->rcu;
    677         *r->parriving_cbs_tail = rcu_item;
    678         r->parriving_cbs_tail = &rcu_item->next;
    679        
    680         size_t cnt = ++r->arriving_cbs_cnt;
    681         interrupts_restore(ipl);
     667
     668        rcu_item_t **prev_tail
     669                = local_atomic_exchange(&r->parriving_cbs_tail, &rcu_item->next);
     670        *prev_tail = rcu_item;
     671       
     672        /* Approximate the number of callbacks present. */
     673        ++r->arriving_cbs_cnt;
    682674       
    683675        if (expedite) {
     
    685677        }
    686678       
     679        bool first_cb = (prev_tail == &CPU->rcu.arriving_cbs);
     680       
    687681        /* Added first callback - notify the reclaimer. */
    688         if (cnt == 1 && !semaphore_count_get(&r->arrived_flag)) {
     682        if (first_cb && !semaphore_count_get(&r->arrived_flag)) {
    689683                semaphore_up(&r->arrived_flag);
    690684        }
     
    849843        CPU->rcu.cur_cbs_gp = CPU->rcu.next_cbs_gp;
    850844       
    851         /* Move arriving_cbs to next_cbs. Empties arriving_cbs. */
    852         ipl_t ipl = interrupts_disable();
    853 
     845        /* Move arriving_cbs to next_cbs. */
     846       
     847        CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt;
     848        CPU->rcu.arriving_cbs_cnt = 0;
     849       
    854850        /*
    855851         * Too many callbacks queued. Better speed up the detection
    856852         * or risk exhausting all system memory.
    857853         */
    858         bool expedite = (EXPEDITE_THRESHOLD < CPU->rcu.arriving_cbs_cnt)
     854        bool expedite = (EXPEDITE_THRESHOLD < CPU->rcu.next_cbs_cnt)
    859855                || CPU->rcu.expedite_arriving; 
    860 
    861856        CPU->rcu.expedite_arriving = false;
    862        
     857
     858        /* Start moving the arriving_cbs list to next_cbs. */
    863859        CPU->rcu.next_cbs = CPU->rcu.arriving_cbs;
    864         CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt;
    865        
    866         CPU->rcu.arriving_cbs = NULL;
    867         CPU->rcu.parriving_cbs_tail = &CPU->rcu.arriving_cbs;
    868         CPU->rcu.arriving_cbs_cnt = 0;
    869        
    870         interrupts_restore(ipl);
     860       
     861        /*
     862         * At least one callback arrived. The tail therefore does not point
     863         * to the head of arriving_cbs and we can safely reset it to NULL.
     864         */
     865        if (CPU->rcu.next_cbs) {
     866                ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
     867               
     868                CPU->rcu.arriving_cbs = NULL;
     869                /* Reset arriving_cbs before updating the tail pointer. */
     870                compiler_barrier();
     871                /* Updating the tail pointer completes the move of arriving_cbs. */
     872                ACCESS_ONCE(CPU->rcu.parriving_cbs_tail) = &CPU->rcu.arriving_cbs;
     873        } else {
     874                /*
     875                 * arriving_cbs was null and parriving_cbs_tail pointed to it
     876                 * so leave it that way. Note that interrupt handlers may have
     877                 * added a callback in the meantime so it is not safe to reset
     878                 * arriving_cbs or parriving_cbs.
     879                 */
     880        }
    871881
    872882        /* Update statistics of arrived callbacks. */
     
    10501060         * with a local copy.
    10511061         */
    1052         size_t nesting_cnt = ACCESS_ONCE(THE->rcu_nesting);
     1062        size_t nesting_cnt = local_atomic_exchange(&THE->rcu_nesting, 0);
     1063       
     1064        /*
     1065         * Ensures NMIs see .rcu_nesting without the WAS_PREEMPTED mark and
     1066         * do not accidentally call rm_preempted_reader() from unlock().
     1067         */
     1068        compiler_barrier();
    10531069       
    10541070        /* Preempted a reader critical section for the first time. */
     
    10601076        /* Save the thread's nesting count when it is not running. */
    10611077        THREAD->rcu.nesting_cnt = nesting_cnt;
    1062         ACCESS_ONCE(THE->rcu_nesting) = 0;
    10631078
    10641079        if (CPU->rcu.last_seen_gp != _rcu_cur_gp) {
     
    10951110        }
    10961111       
    1097         upd_max_cbs_in_slice();
     1112        upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt);
    10981113}
    10991114
     
    11011116void rcu_before_thread_runs(void)
    11021117{
    1103         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    11041118        ASSERT(!rcu_read_locked());
    11051119       
     
    11151129void rcu_thread_exiting(void)
    11161130{
    1117         ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
     1131        ASSERT(THE->rcu_nesting == 0);
     1132       
    11181133        /*
    11191134         * The thread forgot to exit its reader critical section.
     
    11251140                /* Emulate _rcu_preempted_unlock() with the proper nesting count. */
    11261141                if (THREAD->rcu.nesting_cnt & RCU_WAS_PREEMPTED) {
    1127                         ipl_t ipl = interrupts_disable();
    11281142                        rm_preempted_reader();
    1129                         interrupts_restore(ipl);
    11301143                }
    11311144
     
    11441157void _rcu_preempted_unlock(void)
    11451158{
    1146         ipl_t ipl = interrupts_disable();
    1147        
    1148         /* todo: Replace with cpu-local atomics to be NMI-safe */
    1149         if (THE->rcu_nesting == RCU_WAS_PREEMPTED) {
    1150                 THE->rcu_nesting = 0;
     1159        ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
     1160       
     1161        size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
     1162        if (prev == RCU_WAS_PREEMPTED) {
    11511163                /*
    11521164                 * NMI handlers are never preempted but may call rm_preempted_reader()
    11531165                 * if a NMI occurred in _rcu_preempted_unlock() of a preempted thread.
     1166                 * The only other rcu code that may have been interrupted by the NMI
     1167                 * in _rcu_preempted_unlock() is: an IPI/sample_local_cpu() and
     1168                 * the initial part of rcu_after_thread_ran().
     1169                 *
    11541170                 * rm_preempted_reader() will not deadlock because none of the locks
    1155                  * it uses are locked in this case.
     1171                 * it uses are locked in this case. Neither _rcu_preempted_unlock()
     1172                 * nor sample_local_cpu() nor the initial part of rcu_after_thread_ran()
     1173                 * acquire any locks.
    11561174                 */
    11571175                rm_preempted_reader();
    11581176        }
    1159        
    1160         interrupts_restore(ipl);
    11611177}
    11621178
     
    14151431                if (0 < CPU->rcu.nesting_cnt) {
    14161432                        ASSERT(!CPU->idle);
    1417                         /* Note to notify the detector from rcu_read_unlock(). */
    1418                         CPU->rcu.is_delaying_gp = true;
    14191433                        /*
    1420                          * Set signal_unlock only after setting is_delaying_gp so
    1421                          * that NMI handlers do not accidentally clear it in unlock()
    1422                          * before seeing and acting upon is_delaying_gp.
     1434                         * Note to notify the detector from rcu_read_unlock().
     1435                         *
     1436                         * ACCESS_ONCE ensures the compiler writes to is_delaying_gp
     1437                         * only after it determines that we are in a reader CS.
    14231438                         */
    1424                         compiler_barrier();
     1439                        ACCESS_ONCE(CPU->rcu.is_delaying_gp) = true;
    14251440                        CPU->rcu.signal_unlock = true;
    14261441                       
     
    14781493{
    14791494        ASSERT(interrupts_disabled());
    1480         /* todo: make is_delaying_gp and was_preempted NMI safe via local atomics.*/
    14811495
    14821496        /*
    14831497         * Prevent NMI handlers from interfering. The detector will be notified
    1484          * here if CPU->rcu.is_delaying_gp and the current thread is no longer
    1485          * running so there is nothing to signal to the detector.
     1498         * in this function if CPU->rcu.is_delaying_gp. The current thread is
     1499         * no longer running so there is nothing else to signal to the detector.
    14861500         */
    14871501        CPU->rcu.signal_unlock = false;
    1488         /* Separates clearing of .signal_unlock from CPU->rcu.nesting_cnt = 0. */
     1502        /*
     1503         * Separates clearing of .signal_unlock from accesses to
     1504         * THREAD->rcu.was_preempted and CPU->rcu.nesting_cnt.
     1505         */
    14891506        compiler_barrier();
    14901507       
    14911508        /* Save the thread's nesting count when it is not running. */
    14921509        THREAD->rcu.nesting_cnt = CPU->rcu.nesting_cnt;
    1493         /* Interrupt handlers might use RCU while idle in scheduler(). */
    1494         CPU->rcu.nesting_cnt = 0;
    14951510       
    14961511        /* Preempted a reader critical section for the first time. */
     
    15061521        _rcu_record_qs();
    15071522
     1523        /*
     1524         * Interrupt handlers might use RCU while idle in scheduler().
     1525         * The preempted reader has been noted globally, so the handlers
     1526         * may now start announcing quiescent states.
     1527         */
     1528        CPU->rcu.nesting_cnt = 0;
     1529       
    15081530        /*
    15091531         * This cpu is holding up the current GP. Let the detector know
     
    15311553        }
    15321554       
    1533         upd_max_cbs_in_slice();
     1555        upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt);
    15341556}
    15351557
     
    15421564        /* Load the thread's saved nesting count from before it was preempted. */
    15431565        CPU->rcu.nesting_cnt = THREAD->rcu.nesting_cnt;
     1566       
     1567        /*
     1568         * Ensures NMI see the proper nesting count before .signal_unlock.
     1569         * Otherwise the NMI may incorrectly signal that a preempted reader
     1570         * exited its reader section.
     1571         */
     1572        compiler_barrier();
     1573       
    15441574        /*
    15451575         * In the unlikely event that a NMI occurs between the loading of the
     
    15471577         * rcu_read_unlock() and clear signal_unlock. In that case we will
    15481578         * incorrectly overwrite signal_unlock from false to true. This event
    1549          * situation benign and the next rcu_read_unlock() will at worst
     1579         * is benign and the next rcu_read_unlock() will at worst
    15501580         * needlessly invoke _rcu_signal_unlock().
    15511581         */
     
    17321762static void rm_preempted_reader(void)
    17331763{
    1734         irq_spinlock_lock(&rcu.preempt_lock, false);
     1764        irq_spinlock_lock(&rcu.preempt_lock, true);
    17351765       
    17361766        ASSERT(link_used(&THREAD->rcu.preempt_link));
     
    17521782        }
    17531783
    1754         irq_spinlock_unlock(&rcu.preempt_lock, false);
     1784        irq_spinlock_unlock(&rcu.preempt_lock, true);
    17551785}
    17561786
     
    17751805}
    17761806
    1777 static void upd_max_cbs_in_slice(void)
     1807static void upd_max_cbs_in_slice(size_t arriving_cbs_cnt)
    17781808{
    17791809        rcu_cpu_data_t *cr = &CPU->rcu;
    17801810       
    1781         if (cr->arriving_cbs_cnt > cr->last_arriving_cnt) {
    1782                 size_t arrived_cnt = cr->arriving_cbs_cnt - cr->last_arriving_cnt;
     1811        if (arriving_cbs_cnt > cr->last_arriving_cnt) {
     1812                size_t arrived_cnt = arriving_cbs_cnt - cr->last_arriving_cnt;
    17831813                cr->stat_max_slice_cbs = max(arrived_cnt, cr->stat_max_slice_cbs);
    17841814        }
    17851815       
    1786         cr->last_arriving_cnt = cr->arriving_cbs_cnt;
     1816        cr->last_arriving_cnt = arriving_cbs_cnt;
    17871817}
    17881818
Note: See TracChangeset for help on using the changeset viewer.