Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/synch/rwlock.c

    rda1bafb rf651e80  
    3333/**
    3434 * @file
    35  * @brief Reader/Writer locks.
     35 * @brief       Reader/Writer locks.
    3636 *
    3737 * A reader/writer lock can be held by multiple readers at a time.
     
    5757 * each thread can block on only one rwlock at a time.
    5858 */
    59 
     59 
    6060#include <synch/rwlock.h>
    6161#include <synch/spinlock.h>
     
    6969#include <panic.h>
    7070
    71 #define ALLOW_ALL           0
    72 #define ALLOW_READERS_ONLY  1
     71#define ALLOW_ALL               0
     72#define ALLOW_READERS_ONLY      1
     73
     74static void let_others_in(rwlock_t *rwl, int readers_only);
     75static void release_spinlock(void *arg);
    7376
    7477/** Initialize reader/writer lock
     
    7780 *
    7881 * @param rwl Reader/Writer lock.
    79  *
    8082 */
    8183void rwlock_initialize(rwlock_t *rwl) {
    82         irq_spinlock_initialize(&rwl->lock, "rwl.lock");
     84        spinlock_initialize(&rwl->lock, "rwlock_t");
    8385        mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
    8486        rwl->readers_in = 0;
    8587}
    8688
    87 /** Direct handoff of reader/writer lock ownership.
    88  *
    89  * Direct handoff of reader/writer lock ownership
    90  * to waiting readers or a writer.
    91  *
    92  * Must be called with rwl->lock locked.
    93  * Must be called with interrupts_disable()'d.
    94  *
    95  * @param rwl          Reader/Writer lock.
    96  * @param readers_only See the description below.
    97  *
    98  * If readers_only is false: (unlock scenario)
    99  * Let the first sleeper on 'exclusive' mutex in, no matter
    100  * whether it is a reader or a writer. If there are more leading
    101  * readers in line, let each of them in.
    102  *
    103  * Otherwise: (timeout scenario)
    104  * Let all leading readers in.
    105  *
    106  */
    107 static void let_others_in(rwlock_t *rwl, int readers_only)
    108 {
    109         rwlock_type_t type = RWLOCK_NONE;
    110         thread_t *thread = NULL;
    111         bool one_more = true;
    112        
    113         irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
    114        
    115         if (!list_empty(&rwl->exclusive.sem.wq.head))
    116                 thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
    117                     thread_t, wq_link);
    118        
    119         do {
    120                 if (thread) {
    121                         irq_spinlock_lock(&thread->lock, false);
    122                         type = thread->rwlock_holder_type;
    123                         irq_spinlock_unlock(&thread->lock, false);
    124                 }
    125                
    126                 /*
    127                  * If readers_only is true, we wake all leading readers
    128                  * if and only if rwl is locked by another reader.
    129                  * Assumption: readers_only ==> rwl->readers_in
    130                  *
    131                  */
    132                 if ((readers_only) && (type != RWLOCK_READER))
    133                         break;
    134                
    135                 if (type == RWLOCK_READER) {
    136                         /*
    137                          * Waking up a reader.
    138                          * We are responsible for incrementing rwl->readers_in
    139                          * for it.
    140                          *
    141                          */
    142                          rwl->readers_in++;
    143                 }
    144                
    145                 /*
    146                  * Only the last iteration through this loop can increment
    147                  * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
    148                  * iterations will wake up a thread.
    149                  *
    150                  */
    151                
    152                 /*
    153                  * We call the internal version of waitq_wakeup, which
    154                  * relies on the fact that the waitq is already locked.
    155                  *
    156                  */
    157                 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
    158                
    159                 thread = NULL;
    160                 if (!list_empty(&rwl->exclusive.sem.wq.head)) {
    161                         thread = list_get_instance(rwl->exclusive.sem.wq.head.next,
    162                             thread_t, wq_link);
    163                        
    164                         if (thread) {
    165                                 irq_spinlock_lock(&thread->lock, false);
    166                                 if (thread->rwlock_holder_type != RWLOCK_READER)
    167                                         one_more = false;
    168                                 irq_spinlock_unlock(&thread->lock, false);
    169                         }
    170                 }
    171         } while ((type == RWLOCK_READER) && (thread) && (one_more));
    172        
    173         irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
    174 }
    175 
    17689/** Acquire reader/writer lock for reading
    17790 *
     
    17992 * Timeout and willingness to block may be specified.
    18093 *
    181  * @param rwl   Reader/Writer lock.
    182  * @param usec  Timeout in microseconds.
     94 * @param rwl Reader/Writer lock.
     95 * @param usec Timeout in microseconds.
    18396 * @param flags Specify mode of operation.
    18497 *
     
    187100 *
    188101 * @return See comment for waitq_sleep_timeout().
    189  *
    190  */
    191 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
    192 {
    193         irq_spinlock_lock(&THREAD->lock, true);
     102 */
     103int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
     104{
     105        ipl_t ipl;
     106        int rc;
     107       
     108        ipl = interrupts_disable();
     109        spinlock_lock(&THREAD->lock);
    194110        THREAD->rwlock_holder_type = RWLOCK_WRITER;
    195         irq_spinlock_unlock(&THREAD->lock, true);
    196        
     111        spinlock_unlock(&THREAD->lock);
     112        interrupts_restore(ipl);
     113
    197114        /*
    198115         * Writers take the easy part.
    199116         * They just need to acquire the exclusive mutex.
    200          *
    201117         */
    202         int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
     118        rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    203119        if (SYNCH_FAILED(rc)) {
     120
    204121                /*
    205122                 * Lock operation timed out or was interrupted.
    206123                 * The state of rwl is UNKNOWN at this point.
    207124                 * No claims about its holder can be made.
    208                  *
    209                  */
    210                 irq_spinlock_lock(&rwl->lock, true);
    211                
     125                 */
     126                 
     127                ipl = interrupts_disable();
     128                spinlock_lock(&rwl->lock);
    212129                /*
    213130                 * Now when rwl is locked, we can inspect it again.
    214131                 * If it is held by some readers already, we can let
    215132                 * readers from the head of the wait queue in.
    216                  *
    217133                 */
    218134                if (rwl->readers_in)
    219135                        let_others_in(rwl, ALLOW_READERS_ONLY);
    220                
    221                 irq_spinlock_unlock(&rwl->lock, true);
     136                spinlock_unlock(&rwl->lock);
     137                interrupts_restore(ipl);
    222138        }
    223139       
    224140        return rc;
    225 }
    226 
    227 /** Release spinlock callback
    228  *
    229  * This is a callback function invoked from the scheduler.
    230  * The callback is registered in _rwlock_read_lock_timeout().
    231  *
    232  * @param arg Spinlock.
    233  *
    234  */
    235 static void release_spinlock(void *arg)
    236 {
    237         if (arg != NULL)
    238                 irq_spinlock_unlock((irq_spinlock_t *) arg, false);
    239141}
    240142
     
    244146 * Timeout and willingness to block may be specified.
    245147 *
    246  * @param rwl   Reader/Writer lock.
    247  * @param usec  Timeout in microseconds.
     148 * @param rwl Reader/Writer lock.
     149 * @param usec Timeout in microseconds.
    248150 * @param flags Select mode of operation.
    249151 *
     
    252154 *
    253155 * @return See comment for waitq_sleep_timeout().
    254  *
    255  */
    256 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags)
    257 {
    258         /*
    259          * Since the locking scenarios get a little bit too
    260          * complicated, we do not rely on internal irq_spinlock_t
    261          * interrupt disabling logic here and control interrupts
    262          * manually.
    263          *
    264          */
    265         ipl_t ipl = interrupts_disable();
    266        
    267         irq_spinlock_lock(&THREAD->lock, false);
     156 */
     157int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags)
     158{
     159        int rc;
     160        ipl_t ipl;
     161       
     162        ipl = interrupts_disable();
     163        spinlock_lock(&THREAD->lock);
    268164        THREAD->rwlock_holder_type = RWLOCK_READER;
    269         irq_spinlock_pass(&THREAD->lock, &rwl->lock);
    270        
     165        spinlock_unlock(&THREAD->lock);
     166
     167        spinlock_lock(&rwl->lock);
     168
    271169        /*
    272170         * Find out whether we can get what we want without blocking.
    273          *
    274171         */
    275         int rc = mutex_trylock(&rwl->exclusive);
     172        rc = mutex_trylock(&rwl->exclusive);
    276173        if (SYNCH_FAILED(rc)) {
     174
    277175                /*
    278176                 * 'exclusive' mutex is being held by someone else.
     
    280178                 * else waiting for it, we can enter the critical
    281179                 * section.
    282                  *
    283                  */
    284                
     180                 */
     181
    285182                if (rwl->readers_in) {
    286                         irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);
     183                        spinlock_lock(&rwl->exclusive.sem.wq.lock);
    287184                        if (list_empty(&rwl->exclusive.sem.wq.head)) {
    288185                                /*
    289186                                 * We can enter.
    290187                                 */
    291                                 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
     188                                spinlock_unlock(&rwl->exclusive.sem.wq.lock);
    292189                                goto shortcut;
    293190                        }
    294                         irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);
    295                 }
    296                
     191                        spinlock_unlock(&rwl->exclusive.sem.wq.lock);
     192                }
     193
    297194                /*
    298195                 * In order to prevent a race condition when a reader
     
    300197                 * we register a function to unlock rwl->lock
    301198                 * after this thread is put asleep.
    302                  *
    303                  */
    304 #ifdef CONFIG_SMP
     199                 */
     200                #ifdef CONFIG_SMP
    305201                thread_register_call_me(release_spinlock, &rwl->lock);
    306 #else
     202                #else
    307203                thread_register_call_me(release_spinlock, NULL);
    308 #endif
    309                
     204                #endif
     205                                 
    310206                rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);
    311207                switch (rc) {
     
    313209                        /*
    314210                         * release_spinlock() wasn't called
    315                          *
    316211                         */
    317212                        thread_register_call_me(NULL, NULL);
    318                         irq_spinlock_unlock(&rwl->lock, false);
     213                        spinlock_unlock(&rwl->lock);
    319214                case ESYNCH_TIMEOUT:
    320215                case ESYNCH_INTERRUPTED:
     
    322217                         * The sleep timed out.
    323218                         * We just restore interrupt priority level.
    324                          *
    325219                         */
    326                 case ESYNCH_OK_BLOCKED:
     220                case ESYNCH_OK_BLOCKED:         
    327221                        /*
    328222                         * We were woken with rwl->readers_in already
     
    334228                         * 'readers_in' is incremented. Same time means both
    335229                         * events happen atomically when rwl->lock is held.)
    336                          *
    337230                         */
    338231                        interrupts_restore(ipl);
     
    347240                return rc;
    348241        }
    349        
     242
    350243shortcut:
     244
    351245        /*
    352246         * We can increment readers_in only if we didn't go to sleep.
    353247         * For sleepers, rwlock_let_others_in() will do the job.
    354          *
    355248         */
    356249        rwl->readers_in++;
    357         irq_spinlock_unlock(&rwl->lock, false);
     250       
     251        spinlock_unlock(&rwl->lock);
    358252        interrupts_restore(ipl);
    359        
     253
    360254        return ESYNCH_OK_ATOMIC;
    361255}
     
    368262 *
    369263 * @param rwl Reader/Writer lock.
    370  *
    371264 */
    372265void rwlock_write_unlock(rwlock_t *rwl)
    373266{
    374         irq_spinlock_lock(&rwl->lock, true);
     267        ipl_t ipl;
     268       
     269        ipl = interrupts_disable();
     270        spinlock_lock(&rwl->lock);
    375271        let_others_in(rwl, ALLOW_ALL);
    376         irq_spinlock_unlock(&rwl->lock, true);
     272        spinlock_unlock(&rwl->lock);
     273        interrupts_restore(ipl);
     274       
    377275}
    378276
     
    385283 *
    386284 * @param rwl Reader/Writer lock.
    387  *
    388285 */
    389286void rwlock_read_unlock(rwlock_t *rwl)
    390287{
    391         irq_spinlock_lock(&rwl->lock, true);
    392        
     288        ipl_t ipl;
     289
     290        ipl = interrupts_disable();
     291        spinlock_lock(&rwl->lock);
    393292        if (!--rwl->readers_in)
    394293                let_others_in(rwl, ALLOW_ALL);
    395        
    396         irq_spinlock_unlock(&rwl->lock, true);
     294        spinlock_unlock(&rwl->lock);
     295        interrupts_restore(ipl);
     296}
     297
     298
     299/** Direct handoff of reader/writer lock ownership.
     300 *
     301 * Direct handoff of reader/writer lock ownership
     302 * to waiting readers or a writer.
     303 *
     304 * Must be called with rwl->lock locked.
     305 * Must be called with interrupts_disable()'d.
     306 *
     307 * @param rwl Reader/Writer lock.
     308 * @param readers_only See the description below.
     309 *
     310 * If readers_only is false: (unlock scenario)
     311 * Let the first sleeper on 'exclusive' mutex in, no matter
     312 * whether it is a reader or a writer. If there are more leading
     313 * readers in line, let each of them in.
     314 *
     315 * Otherwise: (timeout scenario)
     316 * Let all leading readers in.
     317 */
     318void let_others_in(rwlock_t *rwl, int readers_only)
     319{
     320        rwlock_type_t type = RWLOCK_NONE;
     321        thread_t *t = NULL;
     322        bool one_more = true;
     323       
     324        spinlock_lock(&rwl->exclusive.sem.wq.lock);
     325
     326        if (!list_empty(&rwl->exclusive.sem.wq.head))
     327                t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t,
     328                    wq_link);
     329        do {
     330                if (t) {
     331                        spinlock_lock(&t->lock);
     332                        type = t->rwlock_holder_type;
     333                        spinlock_unlock(&t->lock);                     
     334                }
     335       
     336                /*
     337                 * If readers_only is true, we wake all leading readers
     338                 * if and only if rwl is locked by another reader.
     339                 * Assumption: readers_only ==> rwl->readers_in
     340                 */
     341                if (readers_only && (type != RWLOCK_READER))
     342                        break;
     343
     344
     345                if (type == RWLOCK_READER) {
     346                        /*
     347                         * Waking up a reader.
     348                         * We are responsible for incrementing rwl->readers_in
     349                         * for it.
     350                         */
     351                         rwl->readers_in++;
     352                }
     353
     354                /*
     355                 * Only the last iteration through this loop can increment
     356                 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding
     357                 * iterations will wake up a thread.
     358                 */
     359                /* We call the internal version of waitq_wakeup, which
     360                 * relies on the fact that the waitq is already locked.
     361                 */
     362                _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);
     363               
     364                t = NULL;
     365                if (!list_empty(&rwl->exclusive.sem.wq.head)) {
     366                        t = list_get_instance(rwl->exclusive.sem.wq.head.next,
     367                            thread_t, wq_link);
     368                        if (t) {
     369                                spinlock_lock(&t->lock);
     370                                if (t->rwlock_holder_type != RWLOCK_READER)
     371                                        one_more = false;
     372                                spinlock_unlock(&t->lock);     
     373                        }
     374                }
     375        } while ((type == RWLOCK_READER) && t && one_more);
     376
     377        spinlock_unlock(&rwl->exclusive.sem.wq.lock);
     378}
     379
     380/** Release spinlock callback
     381 *
     382 * This is a callback function invoked from the scheduler.
     383 * The callback is registered in _rwlock_read_lock_timeout().
     384 *
     385 * @param arg Spinlock.
     386 */
     387void release_spinlock(void *arg)
     388{
     389        spinlock_unlock((spinlock_t *) arg);
    397390}
    398391
Note: See TracChangeset for help on using the changeset viewer.