Changes in kernel/generic/src/synch/rwlock.c [da1bafb:f651e80] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/rwlock.c
rda1bafb rf651e80 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Reader/Writer locks. 36 36 * 37 37 * A reader/writer lock can be held by multiple readers at a time. … … 57 57 * each thread can block on only one rwlock at a time. 58 58 */ 59 59 60 60 #include <synch/rwlock.h> 61 61 #include <synch/spinlock.h> … … 69 69 #include <panic.h> 70 70 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 73 74 static void let_others_in(rwlock_t *rwl, int readers_only); 75 static void release_spinlock(void *arg); 73 76 74 77 /** Initialize reader/writer lock … … 77 80 * 78 81 * @param rwl Reader/Writer lock. 79 *80 82 */ 81 83 void rwlock_initialize(rwlock_t *rwl) { 82 irq_spinlock_initialize(&rwl->lock, "rwl.lock");84 spinlock_initialize(&rwl->lock, "rwlock_t"); 83 85 mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE); 84 86 rwl->readers_in = 0; 85 87 } 86 88 87 /** Direct handoff of reader/writer lock ownership.88 *89 * Direct handoff of reader/writer lock ownership90 * to waiting readers or a writer.91 *92 * Must be called with rwl->lock locked.93 * Must be called with interrupts_disable()'d.94 *95 * @param rwl Reader/Writer lock.96 * @param readers_only See the description below.97 *98 * If readers_only is false: (unlock scenario)99 * Let the first sleeper on 'exclusive' mutex in, no matter100 * whether it is a reader or a writer. If there are more leading101 * readers in line, let each of them in.102 *103 * Otherwise: (timeout scenario)104 * Let all leading readers in.105 *106 */107 static void let_others_in(rwlock_t *rwl, int readers_only)108 {109 rwlock_type_t type = RWLOCK_NONE;110 thread_t *thread = NULL;111 bool one_more = true;112 113 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);114 115 if (!list_empty(&rwl->exclusive.sem.wq.head))116 thread = list_get_instance(rwl->exclusive.sem.wq.head.next,117 thread_t, wq_link);118 119 do {120 if (thread) {121 irq_spinlock_lock(&thread->lock, false);122 type = thread->rwlock_holder_type;123 irq_spinlock_unlock(&thread->lock, false);124 }125 126 /*127 * If readers_only is true, we wake all leading readers128 * if and only if rwl is locked by another reader.129 * Assumption: readers_only ==> rwl->readers_in130 *131 */132 if ((readers_only) && (type != RWLOCK_READER))133 break;134 135 if (type == RWLOCK_READER) {136 /*137 * Waking up a reader.138 * We are responsible for incrementing rwl->readers_in139 * for it.140 *141 */142 rwl->readers_in++;143 }144 145 /*146 * Only the last iteration through this loop can increment147 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding148 * iterations will wake up a thread.149 *150 */151 152 /*153 * We call the internal version of waitq_wakeup, which154 * relies on the fact that the waitq is already locked.155 *156 */157 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST);158 159 thread = NULL;160 if (!list_empty(&rwl->exclusive.sem.wq.head)) {161 thread = list_get_instance(rwl->exclusive.sem.wq.head.next,162 thread_t, wq_link);163 164 if (thread) {165 irq_spinlock_lock(&thread->lock, false);166 if (thread->rwlock_holder_type != RWLOCK_READER)167 one_more = false;168 irq_spinlock_unlock(&thread->lock, false);169 }170 }171 } while ((type == RWLOCK_READER) && (thread) && (one_more));172 173 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);174 }175 176 89 /** Acquire reader/writer lock for reading 177 90 * … … 179 92 * Timeout and willingness to block may be specified. 180 93 * 181 * @param rwl 182 * @param usec 94 * @param rwl Reader/Writer lock. 95 * @param usec Timeout in microseconds. 183 96 * @param flags Specify mode of operation. 184 97 * … … 187 100 * 188 101 * @return See comment for waitq_sleep_timeout(). 189 * 190 */ 191 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 192 { 193 irq_spinlock_lock(&THREAD->lock, true); 102 */ 103 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 104 { 105 ipl_t ipl; 106 int rc; 107 108 ipl = interrupts_disable(); 109 spinlock_lock(&THREAD->lock); 194 110 THREAD->rwlock_holder_type = RWLOCK_WRITER; 195 irq_spinlock_unlock(&THREAD->lock, true); 196 111 spinlock_unlock(&THREAD->lock); 112 interrupts_restore(ipl); 113 197 114 /* 198 115 * Writers take the easy part. 199 116 * They just need to acquire the exclusive mutex. 200 *201 117 */ 202 intrc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);118 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 203 119 if (SYNCH_FAILED(rc)) { 120 204 121 /* 205 122 * Lock operation timed out or was interrupted. 206 123 * The state of rwl is UNKNOWN at this point. 207 124 * No claims about its holder can be made. 208 * 209 */210 i rq_spinlock_lock(&rwl->lock, true);211 125 */ 126 127 ipl = interrupts_disable(); 128 spinlock_lock(&rwl->lock); 212 129 /* 213 130 * Now when rwl is locked, we can inspect it again. 214 131 * If it is held by some readers already, we can let 215 132 * readers from the head of the wait queue in. 216 *217 133 */ 218 134 if (rwl->readers_in) 219 135 let_others_in(rwl, ALLOW_READERS_ONLY); 220 221 i rq_spinlock_unlock(&rwl->lock, true);136 spinlock_unlock(&rwl->lock); 137 interrupts_restore(ipl); 222 138 } 223 139 224 140 return rc; 225 }226 227 /** Release spinlock callback228 *229 * This is a callback function invoked from the scheduler.230 * The callback is registered in _rwlock_read_lock_timeout().231 *232 * @param arg Spinlock.233 *234 */235 static void release_spinlock(void *arg)236 {237 if (arg != NULL)238 irq_spinlock_unlock((irq_spinlock_t *) arg, false);239 141 } 240 142 … … 244 146 * Timeout and willingness to block may be specified. 245 147 * 246 * @param rwl 247 * @param usec 148 * @param rwl Reader/Writer lock. 149 * @param usec Timeout in microseconds. 248 150 * @param flags Select mode of operation. 249 151 * … … 252 154 * 253 155 * @return See comment for waitq_sleep_timeout(). 254 * 255 */ 256 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 257 { 258 /* 259 * Since the locking scenarios get a little bit too 260 * complicated, we do not rely on internal irq_spinlock_t 261 * interrupt disabling logic here and control interrupts 262 * manually. 263 * 264 */ 265 ipl_t ipl = interrupts_disable(); 266 267 irq_spinlock_lock(&THREAD->lock, false); 156 */ 157 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 158 { 159 int rc; 160 ipl_t ipl; 161 162 ipl = interrupts_disable(); 163 spinlock_lock(&THREAD->lock); 268 164 THREAD->rwlock_holder_type = RWLOCK_READER; 269 irq_spinlock_pass(&THREAD->lock, &rwl->lock); 270 165 spinlock_unlock(&THREAD->lock); 166 167 spinlock_lock(&rwl->lock); 168 271 169 /* 272 170 * Find out whether we can get what we want without blocking. 273 *274 171 */ 275 intrc = mutex_trylock(&rwl->exclusive);172 rc = mutex_trylock(&rwl->exclusive); 276 173 if (SYNCH_FAILED(rc)) { 174 277 175 /* 278 176 * 'exclusive' mutex is being held by someone else. … … 280 178 * else waiting for it, we can enter the critical 281 179 * section. 282 * 283 */ 284 180 */ 181 285 182 if (rwl->readers_in) { 286 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false);183 spinlock_lock(&rwl->exclusive.sem.wq.lock); 287 184 if (list_empty(&rwl->exclusive.sem.wq.head)) { 288 185 /* 289 186 * We can enter. 290 187 */ 291 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);188 spinlock_unlock(&rwl->exclusive.sem.wq.lock); 292 189 goto shortcut; 293 190 } 294 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false);295 } 296 191 spinlock_unlock(&rwl->exclusive.sem.wq.lock); 192 } 193 297 194 /* 298 195 * In order to prevent a race condition when a reader … … 300 197 * we register a function to unlock rwl->lock 301 198 * after this thread is put asleep. 302 * 303 */ 304 #ifdef CONFIG_SMP 199 */ 200 #ifdef CONFIG_SMP 305 201 thread_register_call_me(release_spinlock, &rwl->lock); 306 #else202 #else 307 203 thread_register_call_me(release_spinlock, NULL); 308 #endif309 204 #endif 205 310 206 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 311 207 switch (rc) { … … 313 209 /* 314 210 * release_spinlock() wasn't called 315 *316 211 */ 317 212 thread_register_call_me(NULL, NULL); 318 irq_spinlock_unlock(&rwl->lock, false);213 spinlock_unlock(&rwl->lock); 319 214 case ESYNCH_TIMEOUT: 320 215 case ESYNCH_INTERRUPTED: … … 322 217 * The sleep timed out. 323 218 * We just restore interrupt priority level. 324 *325 219 */ 326 case ESYNCH_OK_BLOCKED: 220 case ESYNCH_OK_BLOCKED: 327 221 /* 328 222 * We were woken with rwl->readers_in already … … 334 228 * 'readers_in' is incremented. Same time means both 335 229 * events happen atomically when rwl->lock is held.) 336 *337 230 */ 338 231 interrupts_restore(ipl); … … 347 240 return rc; 348 241 } 349 242 350 243 shortcut: 244 351 245 /* 352 246 * We can increment readers_in only if we didn't go to sleep. 353 247 * For sleepers, rwlock_let_others_in() will do the job. 354 *355 248 */ 356 249 rwl->readers_in++; 357 irq_spinlock_unlock(&rwl->lock, false); 250 251 spinlock_unlock(&rwl->lock); 358 252 interrupts_restore(ipl); 359 253 360 254 return ESYNCH_OK_ATOMIC; 361 255 } … … 368 262 * 369 263 * @param rwl Reader/Writer lock. 370 *371 264 */ 372 265 void rwlock_write_unlock(rwlock_t *rwl) 373 266 { 374 irq_spinlock_lock(&rwl->lock, true); 267 ipl_t ipl; 268 269 ipl = interrupts_disable(); 270 spinlock_lock(&rwl->lock); 375 271 let_others_in(rwl, ALLOW_ALL); 376 irq_spinlock_unlock(&rwl->lock, true); 272 spinlock_unlock(&rwl->lock); 273 interrupts_restore(ipl); 274 377 275 } 378 276 … … 385 283 * 386 284 * @param rwl Reader/Writer lock. 387 *388 285 */ 389 286 void rwlock_read_unlock(rwlock_t *rwl) 390 287 { 391 irq_spinlock_lock(&rwl->lock, true); 392 288 ipl_t ipl; 289 290 ipl = interrupts_disable(); 291 spinlock_lock(&rwl->lock); 393 292 if (!--rwl->readers_in) 394 293 let_others_in(rwl, ALLOW_ALL); 395 396 irq_spinlock_unlock(&rwl->lock, true); 294 spinlock_unlock(&rwl->lock); 295 interrupts_restore(ipl); 296 } 297 298 299 /** Direct handoff of reader/writer lock ownership. 300 * 301 * Direct handoff of reader/writer lock ownership 302 * to waiting readers or a writer. 303 * 304 * Must be called with rwl->lock locked. 305 * Must be called with interrupts_disable()'d. 306 * 307 * @param rwl Reader/Writer lock. 308 * @param readers_only See the description below. 309 * 310 * If readers_only is false: (unlock scenario) 311 * Let the first sleeper on 'exclusive' mutex in, no matter 312 * whether it is a reader or a writer. If there are more leading 313 * readers in line, let each of them in. 314 * 315 * Otherwise: (timeout scenario) 316 * Let all leading readers in. 317 */ 318 void let_others_in(rwlock_t *rwl, int readers_only) 319 { 320 rwlock_type_t type = RWLOCK_NONE; 321 thread_t *t = NULL; 322 bool one_more = true; 323 324 spinlock_lock(&rwl->exclusive.sem.wq.lock); 325 326 if (!list_empty(&rwl->exclusive.sem.wq.head)) 327 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, 328 wq_link); 329 do { 330 if (t) { 331 spinlock_lock(&t->lock); 332 type = t->rwlock_holder_type; 333 spinlock_unlock(&t->lock); 334 } 335 336 /* 337 * If readers_only is true, we wake all leading readers 338 * if and only if rwl is locked by another reader. 339 * Assumption: readers_only ==> rwl->readers_in 340 */ 341 if (readers_only && (type != RWLOCK_READER)) 342 break; 343 344 345 if (type == RWLOCK_READER) { 346 /* 347 * Waking up a reader. 348 * We are responsible for incrementing rwl->readers_in 349 * for it. 350 */ 351 rwl->readers_in++; 352 } 353 354 /* 355 * Only the last iteration through this loop can increment 356 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 357 * iterations will wake up a thread. 358 */ 359 /* We call the internal version of waitq_wakeup, which 360 * relies on the fact that the waitq is already locked. 361 */ 362 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 363 364 t = NULL; 365 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 366 t = list_get_instance(rwl->exclusive.sem.wq.head.next, 367 thread_t, wq_link); 368 if (t) { 369 spinlock_lock(&t->lock); 370 if (t->rwlock_holder_type != RWLOCK_READER) 371 one_more = false; 372 spinlock_unlock(&t->lock); 373 } 374 } 375 } while ((type == RWLOCK_READER) && t && one_more); 376 377 spinlock_unlock(&rwl->exclusive.sem.wq.lock); 378 } 379 380 /** Release spinlock callback 381 * 382 * This is a callback function invoked from the scheduler. 383 * The callback is registered in _rwlock_read_lock_timeout(). 384 * 385 * @param arg Spinlock. 386 */ 387 void release_spinlock(void *arg) 388 { 389 spinlock_unlock((spinlock_t *) arg); 397 390 } 398 391
Note:
See TracChangeset
for help on using the changeset viewer.