Changeset c0bc189 in mainline


Ignore:
Timestamp:
2006-05-19T11:55:55Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
7633b109
Parents:
35f3b8c
Message:

Fix race in condition variables.

Location:
generic
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • generic/include/synch/waitq.h

    r35f3b8c rc0bc189  
    5757extern void waitq_initialize(waitq_t *wq);
    5858extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking);
     59extern ipl_t waitq_sleep_prepare(waitq_t *wq);
     60extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking);
     61extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl);
    5962extern void waitq_wakeup(waitq_t *wq, bool all);
    6063extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all);
  • generic/src/synch/condvar.c

    r35f3b8c rc0bc189  
    3636#include <synch/waitq.h>
    3737#include <synch/synch.h>
     38#include <arch.h>
     39#include <typedefs.h>
    3840
    39 /** Initialize condition variable
    40  *
    41  * Initialize condition variable.
     41/** Initialize condition variable.
    4242 *
    4343 * @param cv Condition variable.
     
    4848}
    4949
    50 /** Signal the condition has become true
    51  *
     50/**
    5251 * Signal the condition has become true
    5352 * to the first waiting thread by waking it up.
     
    6059}
    6160
    62 /** Signal the condition has become true
    63  *
     61/**
    6462 * Signal the condition has become true
    6563 * to all waiting threads by waking them up.
     
    7270}
    7371
    74 /** Wait for the condition becoming true
    75  *
    76  * Wait for the condition becoming true.
     72/** Wait for the condition becoming true.
    7773 *
    7874 * @param cv Condition variable.
     
    8985{
    9086        int rc;
     87        ipl_t ipl;
    9188
     89        ipl = waitq_sleep_prepare(&cv->wq);
    9290        mutex_unlock(mtx);
    93         rc = waitq_sleep_timeout(&cv->wq, usec, trywait);
     91       
     92        rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, trywait);
     93
    9494        mutex_lock(mtx);
     95        waitq_sleep_finish(&cv->wq, rc, ipl);
     96
    9597        return rc;
    9698}
  • generic/src/synch/waitq.c

    r35f3b8c rc0bc189  
    158158}
    159159
    160 
    161160/** Sleep until either wakeup, timeout or interruption occurs
    162161 *
     
    164163 * interrupted from the sleep, restoring a failover context.
    165164 *
    166  * Sleepers are organised in FIFO fashion in a structure called wait queue.
     165 * Sleepers are organised in a FIFO fashion in a structure called wait queue.
    167166 *
    168167 * This function is really basic in that other functions as waitq_sleep()
     
    201200int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
    202201{
    203         volatile ipl_t ipl; /* must be live after context_restore() */
    204        
     202        ipl_t ipl;
     203        int rc;
     204       
     205        ipl = waitq_sleep_prepare(wq);
     206        rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking);
     207        waitq_sleep_finish(wq, rc, ipl);
     208        return rc;
     209}
     210
     211/** Prepare to sleep in a waitq.
     212 *
     213 * This function will return holding the lock of the wait queue
     214 * and interrupts disabled.
     215 *
     216 * @param wq Wait queue.
     217 *
     218 * @return Interrupt level as it existed on entry to this function.
     219 */
     220ipl_t waitq_sleep_prepare(waitq_t *wq)
     221{
     222        ipl_t ipl;
    205223       
    206224restart:
    207225        ipl = interrupts_disable();
    208        
     226
    209227        /*
    210228         * Busy waiting for a delayed timeout.
     
    217235        if (THREAD->timeout_pending) {
    218236                spinlock_unlock(&THREAD->lock);
    219                 interrupts_restore(ipl);               
     237                interrupts_restore(ipl);
    220238                goto restart;
    221239        }
    222240        spinlock_unlock(&THREAD->lock);
    223        
     241                                                                                                       
    224242        spinlock_lock(&wq->lock);
    225        
     243        return ipl;
     244}
     245
     246/** Finish waiting in a wait queue.
     247 *
     248 * This function restores interrupts to the state that existed prior
     249 * to the call to waitq_sleep_prepare(). If necessary, the wait queue
     250 * lock is released.
     251 *
     252 * @param wq Wait queue.
     253 * @param rc Return code of waitq_sleep_timeout_unsafe().
     254 * @param ipl Interrupt level returned by waitq_sleep_prepare().
     255 */
     256void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl)
     257{
     258        switch (rc) {
     259        case ESYNCH_WOULD_BLOCK:
     260        case ESYNCH_OK_ATOMIC:
     261                spinlock_unlock(&wq->lock);
     262                break;
     263        default:
     264                break;
     265        }
     266        interrupts_restore(ipl);
     267}
     268
     269/** Internal implementation of waitq_sleep_timeout().
     270 *
     271 * This function implements logic of sleeping in a wait queue.
     272 * This call must be preceeded by a call to waitq_sleep_prepare()
     273 * and followed by a call to waitq_slee_finish().
     274 *
     275 * @param wq See waitq_sleep_timeout().
     276 * @param usec See waitq_sleep_timeout().
     277 * @param nonblocking See waitq_sleep_timeout().
     278 *
     279 * @return See waitq_sleep_timeout().
     280 */
     281int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking)
     282{
    226283        /* checks whether to go to sleep at all */
    227284        if (wq->missed_wakeups) {
    228285                wq->missed_wakeups--;
    229                 spinlock_unlock(&wq->lock);
    230                 interrupts_restore(ipl);
    231286                return ESYNCH_OK_ATOMIC;
    232287        }
     
    234289                if (nonblocking && (usec == 0)) {
    235290                        /* return immediatelly instead of going to sleep */
    236                         spinlock_unlock(&wq->lock);
    237                         interrupts_restore(ipl);
    238291                        return ESYNCH_WOULD_BLOCK;
    239292                }
     
    252305                /* Short emulation of scheduler() return code. */
    253306                spinlock_unlock(&THREAD->lock);
    254                 interrupts_restore(ipl);
    255307                return ESYNCH_INTERRUPTED;
    256308        }
     
    261313                        /* Short emulation of scheduler() return code. */
    262314                        spinlock_unlock(&THREAD->lock);
    263                         interrupts_restore(ipl);
    264315                        return ESYNCH_TIMEOUT;
    265316                }
     
    279330
    280331        scheduler();    /* wq->lock is released in scheduler_separated_stack() */
    281         interrupts_restore(ipl);
    282332       
    283333        return ESYNCH_OK_BLOCKED;
Note: See TracChangeset for help on using the changeset viewer.