Changes in kernel/generic/src/synch/rwlock.c [f651e80:da1bafb] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/rwlock.c
rf651e80 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Reader/Writer locks. 36 36 * 37 37 * A reader/writer lock can be held by multiple readers at a time. … … 57 57 * each thread can block on only one rwlock at a time. 58 58 */ 59 59 60 60 #include <synch/rwlock.h> 61 61 #include <synch/spinlock.h> … … 69 69 #include <panic.h> 70 70 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 73 74 static void let_others_in(rwlock_t *rwl, int readers_only); 75 static void release_spinlock(void *arg); 71 #define ALLOW_ALL 0 72 #define ALLOW_READERS_ONLY 1 76 73 77 74 /** Initialize reader/writer lock … … 80 77 * 81 78 * @param rwl Reader/Writer lock. 79 * 82 80 */ 83 81 void rwlock_initialize(rwlock_t *rwl) { 84 spinlock_initialize(&rwl->lock, "rwlock_t");82 irq_spinlock_initialize(&rwl->lock, "rwl.lock"); 85 83 mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE); 86 84 rwl->readers_in = 0; 87 85 } 88 86 87 /** Direct handoff of reader/writer lock ownership. 88 * 89 * Direct handoff of reader/writer lock ownership 90 * to waiting readers or a writer. 91 * 92 * Must be called with rwl->lock locked. 93 * Must be called with interrupts_disable()'d. 94 * 95 * @param rwl Reader/Writer lock. 96 * @param readers_only See the description below. 97 * 98 * If readers_only is false: (unlock scenario) 99 * Let the first sleeper on 'exclusive' mutex in, no matter 100 * whether it is a reader or a writer. If there are more leading 101 * readers in line, let each of them in. 102 * 103 * Otherwise: (timeout scenario) 104 * Let all leading readers in. 105 * 106 */ 107 static void let_others_in(rwlock_t *rwl, int readers_only) 108 { 109 rwlock_type_t type = RWLOCK_NONE; 110 thread_t *thread = NULL; 111 bool one_more = true; 112 113 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false); 114 115 if (!list_empty(&rwl->exclusive.sem.wq.head)) 116 thread = list_get_instance(rwl->exclusive.sem.wq.head.next, 117 thread_t, wq_link); 118 119 do { 120 if (thread) { 121 irq_spinlock_lock(&thread->lock, false); 122 type = thread->rwlock_holder_type; 123 irq_spinlock_unlock(&thread->lock, false); 124 } 125 126 /* 127 * If readers_only is true, we wake all leading readers 128 * if and only if rwl is locked by another reader. 129 * Assumption: readers_only ==> rwl->readers_in 130 * 131 */ 132 if ((readers_only) && (type != RWLOCK_READER)) 133 break; 134 135 if (type == RWLOCK_READER) { 136 /* 137 * Waking up a reader. 138 * We are responsible for incrementing rwl->readers_in 139 * for it. 140 * 141 */ 142 rwl->readers_in++; 143 } 144 145 /* 146 * Only the last iteration through this loop can increment 147 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 148 * iterations will wake up a thread. 149 * 150 */ 151 152 /* 153 * We call the internal version of waitq_wakeup, which 154 * relies on the fact that the waitq is already locked. 155 * 156 */ 157 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 158 159 thread = NULL; 160 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 161 thread = list_get_instance(rwl->exclusive.sem.wq.head.next, 162 thread_t, wq_link); 163 164 if (thread) { 165 irq_spinlock_lock(&thread->lock, false); 166 if (thread->rwlock_holder_type != RWLOCK_READER) 167 one_more = false; 168 irq_spinlock_unlock(&thread->lock, false); 169 } 170 } 171 } while ((type == RWLOCK_READER) && (thread) && (one_more)); 172 173 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 174 } 175 89 176 /** Acquire reader/writer lock for reading 90 177 * … … 92 179 * Timeout and willingness to block may be specified. 93 180 * 94 * @param rwl Reader/Writer lock.95 * @param usec Timeout in microseconds.181 * @param rwl Reader/Writer lock. 182 * @param usec Timeout in microseconds. 96 183 * @param flags Specify mode of operation. 97 184 * … … 100 187 * 101 188 * @return See comment for waitq_sleep_timeout(). 102 */ 103 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 104 { 105 ipl_t ipl; 106 int rc; 107 108 ipl = interrupts_disable(); 109 spinlock_lock(&THREAD->lock); 189 * 190 */ 191 int _rwlock_write_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 192 { 193 irq_spinlock_lock(&THREAD->lock, true); 110 194 THREAD->rwlock_holder_type = RWLOCK_WRITER; 111 spinlock_unlock(&THREAD->lock); 112 interrupts_restore(ipl); 113 195 irq_spinlock_unlock(&THREAD->lock, true); 196 114 197 /* 115 198 * Writers take the easy part. 116 199 * They just need to acquire the exclusive mutex. 200 * 117 201 */ 118 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags);202 int rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 119 203 if (SYNCH_FAILED(rc)) { 120 121 204 /* 122 205 * Lock operation timed out or was interrupted. 123 206 * The state of rwl is UNKNOWN at this point. 124 207 * No claims about its holder can be made. 125 * /126 127 i pl = interrupts_disable();128 spinlock_lock(&rwl->lock);208 * 209 */ 210 irq_spinlock_lock(&rwl->lock, true); 211 129 212 /* 130 213 * Now when rwl is locked, we can inspect it again. 131 214 * If it is held by some readers already, we can let 132 215 * readers from the head of the wait queue in. 216 * 133 217 */ 134 218 if (rwl->readers_in) 135 219 let_others_in(rwl, ALLOW_READERS_ONLY); 136 spinlock_unlock(&rwl->lock);137 i nterrupts_restore(ipl);220 221 irq_spinlock_unlock(&rwl->lock, true); 138 222 } 139 223 140 224 return rc; 225 } 226 227 /** Release spinlock callback 228 * 229 * This is a callback function invoked from the scheduler. 230 * The callback is registered in _rwlock_read_lock_timeout(). 231 * 232 * @param arg Spinlock. 233 * 234 */ 235 static void release_spinlock(void *arg) 236 { 237 if (arg != NULL) 238 irq_spinlock_unlock((irq_spinlock_t *) arg, false); 141 239 } 142 240 … … 146 244 * Timeout and willingness to block may be specified. 147 245 * 148 * @param rwl Reader/Writer lock.149 * @param usec Timeout in microseconds.246 * @param rwl Reader/Writer lock. 247 * @param usec Timeout in microseconds. 150 248 * @param flags Select mode of operation. 151 249 * … … 154 252 * 155 253 * @return See comment for waitq_sleep_timeout(). 156 */ 157 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, int flags) 158 { 159 int rc; 160 ipl_t ipl; 161 162 ipl = interrupts_disable(); 163 spinlock_lock(&THREAD->lock); 254 * 255 */ 256 int _rwlock_read_lock_timeout(rwlock_t *rwl, uint32_t usec, unsigned int flags) 257 { 258 /* 259 * Since the locking scenarios get a little bit too 260 * complicated, we do not rely on internal irq_spinlock_t 261 * interrupt disabling logic here and control interrupts 262 * manually. 263 * 264 */ 265 ipl_t ipl = interrupts_disable(); 266 267 irq_spinlock_lock(&THREAD->lock, false); 164 268 THREAD->rwlock_holder_type = RWLOCK_READER; 165 spinlock_unlock(&THREAD->lock); 166 167 spinlock_lock(&rwl->lock); 168 269 irq_spinlock_pass(&THREAD->lock, &rwl->lock); 270 169 271 /* 170 272 * Find out whether we can get what we want without blocking. 273 * 171 274 */ 172 rc = mutex_trylock(&rwl->exclusive);275 int rc = mutex_trylock(&rwl->exclusive); 173 276 if (SYNCH_FAILED(rc)) { 174 175 277 /* 176 278 * 'exclusive' mutex is being held by someone else. … … 178 280 * else waiting for it, we can enter the critical 179 281 * section. 180 */ 181 282 * 283 */ 284 182 285 if (rwl->readers_in) { 183 spinlock_lock(&rwl->exclusive.sem.wq.lock);286 irq_spinlock_lock(&rwl->exclusive.sem.wq.lock, false); 184 287 if (list_empty(&rwl->exclusive.sem.wq.head)) { 185 288 /* 186 289 * We can enter. 187 290 */ 188 spinlock_unlock(&rwl->exclusive.sem.wq.lock);291 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 189 292 goto shortcut; 190 293 } 191 spinlock_unlock(&rwl->exclusive.sem.wq.lock);294 irq_spinlock_unlock(&rwl->exclusive.sem.wq.lock, false); 192 295 } 193 296 194 297 /* 195 298 * In order to prevent a race condition when a reader … … 197 300 * we register a function to unlock rwl->lock 198 301 * after this thread is put asleep. 199 */ 200 #ifdef CONFIG_SMP 302 * 303 */ 304 #ifdef CONFIG_SMP 201 305 thread_register_call_me(release_spinlock, &rwl->lock); 202 306 #else 203 307 thread_register_call_me(release_spinlock, NULL); 204 205 308 #endif 309 206 310 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 207 311 switch (rc) { … … 209 313 /* 210 314 * release_spinlock() wasn't called 315 * 211 316 */ 212 317 thread_register_call_me(NULL, NULL); 213 spinlock_unlock(&rwl->lock);318 irq_spinlock_unlock(&rwl->lock, false); 214 319 case ESYNCH_TIMEOUT: 215 320 case ESYNCH_INTERRUPTED: … … 217 322 * The sleep timed out. 218 323 * We just restore interrupt priority level. 324 * 219 325 */ 220 case ESYNCH_OK_BLOCKED: 326 case ESYNCH_OK_BLOCKED: 221 327 /* 222 328 * We were woken with rwl->readers_in already … … 228 334 * 'readers_in' is incremented. Same time means both 229 335 * events happen atomically when rwl->lock is held.) 336 * 230 337 */ 231 338 interrupts_restore(ipl); … … 240 347 return rc; 241 348 } 242 349 243 350 shortcut: 244 245 351 /* 246 352 * We can increment readers_in only if we didn't go to sleep. 247 353 * For sleepers, rwlock_let_others_in() will do the job. 354 * 248 355 */ 249 356 rwl->readers_in++; 250 251 spinlock_unlock(&rwl->lock); 357 irq_spinlock_unlock(&rwl->lock, false); 252 358 interrupts_restore(ipl); 253 359 254 360 return ESYNCH_OK_ATOMIC; 255 361 } … … 262 368 * 263 369 * @param rwl Reader/Writer lock. 370 * 264 371 */ 265 372 void rwlock_write_unlock(rwlock_t *rwl) 266 373 { 267 ipl_t ipl; 268 269 ipl = interrupts_disable(); 270 spinlock_lock(&rwl->lock); 374 irq_spinlock_lock(&rwl->lock, true); 271 375 let_others_in(rwl, ALLOW_ALL); 272 spinlock_unlock(&rwl->lock); 273 interrupts_restore(ipl); 274 376 irq_spinlock_unlock(&rwl->lock, true); 275 377 } 276 378 … … 283 385 * 284 386 * @param rwl Reader/Writer lock. 387 * 285 388 */ 286 389 void rwlock_read_unlock(rwlock_t *rwl) 287 390 { 288 ipl_t ipl; 289 290 ipl = interrupts_disable(); 291 spinlock_lock(&rwl->lock); 391 irq_spinlock_lock(&rwl->lock, true); 392 292 393 if (!--rwl->readers_in) 293 394 let_others_in(rwl, ALLOW_ALL); 294 spinlock_unlock(&rwl->lock); 295 interrupts_restore(ipl); 296 } 297 298 299 /** Direct handoff of reader/writer lock ownership. 300 * 301 * Direct handoff of reader/writer lock ownership 302 * to waiting readers or a writer. 303 * 304 * Must be called with rwl->lock locked. 305 * Must be called with interrupts_disable()'d. 306 * 307 * @param rwl Reader/Writer lock. 308 * @param readers_only See the description below. 309 * 310 * If readers_only is false: (unlock scenario) 311 * Let the first sleeper on 'exclusive' mutex in, no matter 312 * whether it is a reader or a writer. If there are more leading 313 * readers in line, let each of them in. 314 * 315 * Otherwise: (timeout scenario) 316 * Let all leading readers in. 317 */ 318 void let_others_in(rwlock_t *rwl, int readers_only) 319 { 320 rwlock_type_t type = RWLOCK_NONE; 321 thread_t *t = NULL; 322 bool one_more = true; 323 324 spinlock_lock(&rwl->exclusive.sem.wq.lock); 325 326 if (!list_empty(&rwl->exclusive.sem.wq.head)) 327 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, 328 wq_link); 329 do { 330 if (t) { 331 spinlock_lock(&t->lock); 332 type = t->rwlock_holder_type; 333 spinlock_unlock(&t->lock); 334 } 335 336 /* 337 * If readers_only is true, we wake all leading readers 338 * if and only if rwl is locked by another reader. 339 * Assumption: readers_only ==> rwl->readers_in 340 */ 341 if (readers_only && (type != RWLOCK_READER)) 342 break; 343 344 345 if (type == RWLOCK_READER) { 346 /* 347 * Waking up a reader. 348 * We are responsible for incrementing rwl->readers_in 349 * for it. 350 */ 351 rwl->readers_in++; 352 } 353 354 /* 355 * Only the last iteration through this loop can increment 356 * rwl->exclusive.sem.wq.missed_wakeup's. All preceeding 357 * iterations will wake up a thread. 358 */ 359 /* We call the internal version of waitq_wakeup, which 360 * relies on the fact that the waitq is already locked. 361 */ 362 _waitq_wakeup_unsafe(&rwl->exclusive.sem.wq, WAKEUP_FIRST); 363 364 t = NULL; 365 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 366 t = list_get_instance(rwl->exclusive.sem.wq.head.next, 367 thread_t, wq_link); 368 if (t) { 369 spinlock_lock(&t->lock); 370 if (t->rwlock_holder_type != RWLOCK_READER) 371 one_more = false; 372 spinlock_unlock(&t->lock); 373 } 374 } 375 } while ((type == RWLOCK_READER) && t && one_more); 376 377 spinlock_unlock(&rwl->exclusive.sem.wq.lock); 378 } 379 380 /** Release spinlock callback 381 * 382 * This is a callback function invoked from the scheduler. 383 * The callback is registered in _rwlock_read_lock_timeout(). 384 * 385 * @param arg Spinlock. 386 */ 387 void release_spinlock(void *arg) 388 { 389 spinlock_unlock((spinlock_t *) arg); 395 396 irq_spinlock_unlock(&rwl->lock, true); 390 397 } 391 398
Note:
See TracChangeset
for help on using the changeset viewer.