Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rcu.c

    r63e27ef r82719589  
    123123 *
    124124 */
    125 
    126 #include <assert.h>
     125 
    127126#include <synch/rcu.h>
    128127#include <synch/condvar.h>
     
    405404        /* Stop and wait for reclaimers. */
    406405        for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) {
    407                 assert(cpus[cpu_id].rcu.reclaimer_thr != NULL);
     406                ASSERT(cpus[cpu_id].rcu.reclaimer_thr != NULL);
    408407       
    409408                if (cpus[cpu_id].rcu.reclaimer_thr) {
     
    488487static void read_unlock_impl(size_t *pnesting_cnt)
    489488{
    490         assert(PREEMPTION_DISABLED || interrupts_disabled());
     489        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    491490       
    492491        if (0 == --(*pnesting_cnt)) {
     
    510509void _rcu_signal_read_unlock(void)
    511510{
    512         assert(PREEMPTION_DISABLED || interrupts_disabled());
     511        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    513512       
    514513        /*
     
    532531         */
    533532        if (THREAD && local_atomic_exchange(&THREAD->rcu.was_preempted, false)) {
    534                 assert(link_used(&THREAD->rcu.preempt_link));
     533                ASSERT(link_used(&THREAD->rcu.preempt_link));
    535534
    536535                rm_preempted_reader();
     
    564563{
    565564        /* Calling from a reader section will deadlock. */
    566         assert(!rcu_read_locked());
     565        ASSERT(!rcu_read_locked());
    567566       
    568567        synch_item_t completion;
     
    577576{
    578577        synch_item_t *completion = member_to_inst(rcu_item, synch_item_t, rcu_item);
    579         assert(completion);
     578        ASSERT(completion);
    580579        waitq_wakeup(&completion->wq, WAKEUP_FIRST);
    581580}
     
    616615static void add_barrier_cb(void *arg)
    617616{
    618         assert(interrupts_disabled() || PREEMPTION_DISABLED);
     617        ASSERT(interrupts_disabled() || PREEMPTION_DISABLED);
    619618        atomic_inc(&rcu.barrier_wait_cnt);
    620619        rcu_call(&CPU->rcu.barrier_item, barrier_complete);
     
    658657        rcu_func_t func)
    659658{
    660         assert(rcu_item);
     659        ASSERT(rcu_item);
    661660       
    662661        rcu_item->func = func;
     
    690689static bool cur_cbs_empty(void)
    691690{
    692         assert(THREAD && THREAD->wired);
     691        ASSERT(THREAD && THREAD->wired);
    693692        return NULL == CPU->rcu.cur_cbs;
    694693}
     
    696695static bool next_cbs_empty(void)
    697696{
    698         assert(THREAD && THREAD->wired);
     697        ASSERT(THREAD && THREAD->wired);
    699698        return NULL == CPU->rcu.next_cbs;
    700699}
     
    703702static bool arriving_cbs_empty(void)
    704703{
    705         assert(THREAD && THREAD->wired);
     704        ASSERT(THREAD && THREAD->wired);
    706705        /*
    707706         * Accessing with interrupts enabled may at worst lead to
     
    720719static void reclaimer(void *arg)
    721720{
    722         assert(THREAD && THREAD->wired);
    723         assert(THREAD == CPU->rcu.reclaimer_thr);
     721        ASSERT(THREAD && THREAD->wired);
     722        ASSERT(THREAD == CPU->rcu.reclaimer_thr);
    724723
    725724        rcu_gp_t last_compl_gp = 0;
     
    727726       
    728727        while (ok && wait_for_pending_cbs()) {
    729                 assert(CPU->rcu.reclaimer_thr == THREAD);
     728                ASSERT(CPU->rcu.reclaimer_thr == THREAD);
    730729               
    731730                exec_completed_cbs(last_compl_gp);
     
    766765        /* Both next_cbs and cur_cbs GP elapsed. */
    767766        if (CPU->rcu.next_cbs_gp <= last_completed_gp) {
    768                 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     767                ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    769768               
    770769                size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt;
     
    865864         */
    866865        if (CPU->rcu.next_cbs) {
    867                 assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
     866                ASSERT(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs);
    868867               
    869868                CPU->rcu.arriving_cbs = NULL;
     
    914913        }
    915914       
    916         assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     915        ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    917916       
    918917        return expedite;       
     
    934933        spinlock_lock(&rcu.gp_lock);
    935934
    936         assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    937         assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
     935        ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     936        ASSERT(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1);
    938937       
    939938        while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) {
     
    10301029static void sample_local_cpu(void *arg)
    10311030{
    1032         assert(interrupts_disabled());
     1031        ASSERT(interrupts_disabled());
    10331032        cpu_mask_t *reader_cpus = (cpu_mask_t *)arg;
    10341033       
     
    10551054void rcu_after_thread_ran(void)
    10561055{
    1057         assert(interrupts_disabled());
     1056        ASSERT(interrupts_disabled());
    10581057
    10591058        /*
     
    11171116void rcu_before_thread_runs(void)
    11181117{
    1119         assert(!rcu_read_locked());
     1118        ASSERT(!rcu_read_locked());
    11201119       
    11211120        /* Load the thread's saved nesting count from before it was preempted. */
     
    11301129void rcu_thread_exiting(void)
    11311130{
    1132         assert(THE->rcu_nesting == 0);
     1131        ASSERT(THE->rcu_nesting == 0);
    11331132       
    11341133        /*
     
    11581157void _rcu_preempted_unlock(void)
    11591158{
    1160         assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
     1159        ASSERT(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting);
    11611160       
    11621161        size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0);
     
    12211220        }
    12221221       
    1223         assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
    1224         assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
     1222        ASSERT(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp);
     1223        ASSERT(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp);
    12251224       
    12261225        /*
     
    12631262static bool cv_wait_for_gp(rcu_gp_t wait_on_gp)
    12641263{
    1265         assert(spinlock_locked(&rcu.gp_lock));
     1264        ASSERT(spinlock_locked(&rcu.gp_lock));
    12661265       
    12671266        bool interrupted = false;
     
    12851284
    12861285                if (detector_idle) {
    1287                         assert(_rcu_cur_gp == rcu.completed_gp);
     1286                        ASSERT(_rcu_cur_gp == rcu.completed_gp);
    12881287                        condvar_signal(&rcu.req_gp_changed);
    12891288                }
     
    13241323static bool wait_for_detect_req(void)
    13251324{
    1326         assert(spinlock_locked(&rcu.gp_lock));
     1325        ASSERT(spinlock_locked(&rcu.gp_lock));
    13271326       
    13281327        bool interrupted = false;
     
    13411340static void end_cur_gp(void)
    13421341{
    1343         assert(spinlock_locked(&rcu.gp_lock));
     1342        ASSERT(spinlock_locked(&rcu.gp_lock));
    13441343       
    13451344        rcu.completed_gp = _rcu_cur_gp;
     
    14241423static void sample_local_cpu(void *arg)
    14251424{
    1426         assert(interrupts_disabled());
    1427         assert(!CPU->rcu.is_delaying_gp);
     1425        ASSERT(interrupts_disabled());
     1426        ASSERT(!CPU->rcu.is_delaying_gp);
    14281427       
    14291428        /* Cpu did not pass a quiescent state yet. */
     
    14311430                /* Interrupted a reader in a reader critical section. */
    14321431                if (0 < CPU->rcu.nesting_cnt) {
    1433                         assert(!CPU->idle);
     1432                        ASSERT(!CPU->idle);
    14341433                        /*
    14351434                         * Note to notify the detector from rcu_read_unlock().
     
    14931492void rcu_after_thread_ran(void)
    14941493{
    1495         assert(interrupts_disabled());
     1494        ASSERT(interrupts_disabled());
    14961495
    14971496        /*
     
    15601559void rcu_before_thread_runs(void)
    15611560{
    1562         assert(PREEMPTION_DISABLED || interrupts_disabled());
    1563         assert(0 == CPU->rcu.nesting_cnt);
     1561        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
     1562        ASSERT(0 == CPU->rcu.nesting_cnt);
    15641563       
    15651564        /* Load the thread's saved nesting count from before it was preempted. */
     
    15911590void rcu_thread_exiting(void)
    15921591{
    1593         assert(THREAD != NULL);
    1594         assert(THREAD->state == Exiting);
    1595         assert(PREEMPTION_DISABLED || interrupts_disabled());
     1592        ASSERT(THREAD != NULL);
     1593        ASSERT(THREAD->state == Exiting);
     1594        ASSERT(PREEMPTION_DISABLED || interrupts_disabled());
    15961595       
    15971596        /*
     
    16161615static void start_new_gp(void)
    16171616{
    1618         assert(spinlock_locked(&rcu.gp_lock));
     1617        ASSERT(spinlock_locked(&rcu.gp_lock));
    16191618       
    16201619        irq_spinlock_lock(&rcu.preempt_lock, true);
     
    17351734static void upd_missed_gp_in_wait(rcu_gp_t completed_gp)
    17361735{
    1737         assert(CPU->rcu.cur_cbs_gp <= completed_gp);
     1736        ASSERT(CPU->rcu.cur_cbs_gp <= completed_gp);
    17381737       
    17391738        size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp);
     
    17651764        irq_spinlock_lock(&rcu.preempt_lock, true);
    17661765       
    1767         assert(link_used(&THREAD->rcu.preempt_link));
     1766        ASSERT(link_used(&THREAD->rcu.preempt_link));
    17681767
    17691768        bool prev_empty = list_empty(&rcu.cur_preempted);
Note: See TracChangeset for help on using the changeset viewer.