Changeset 514d561 in mainline for uspace/lib/c/generic/fibril_synch.c
- Timestamp:
- 2018-07-20T16:27:20Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 05208d9
- Parents:
- 7137f74c
- git-author:
- Jiří Zárevúcky <jiri.zarevucky@…> (2018-07-19 21:52:47)
- git-committer:
- Jiří Zárevúcky <jiri.zarevucky@…> (2018-07-20 16:27:20)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/c/generic/fibril_synch.c
r7137f74c r514d561 45 45 #include <stdio.h> 46 46 #include <io/kio.h> 47 #include <mem.h> 48 #include <context.h> 47 49 48 50 #include "private/async.h" … … 51 53 static fibril_local bool deadlocked = false; 52 54 53 static void optimize_execution_power(void) 54 { 55 /* 56 * When waking up a worker fibril previously blocked in fibril 57 * synchronization, chances are that there is an idle manager fibril 58 * waiting for IPC, that could start executing the awakened worker 59 * fibril right away. We try to detect this and bring the manager 60 * fibril back to fruitful work. 61 */ 62 async_poke(); 63 } 55 typedef struct { 56 link_t link; 57 fibril_event_t event; 58 fibril_mutex_t *mutex; 59 fid_t fid; 60 } awaiter_t; 61 62 #define AWAITER_INIT { .fid = fibril_get_id() } 64 63 65 64 static void print_deadlock(fibril_owner_info_t *oi) 66 65 { 66 // FIXME: Print to stderr. 67 67 68 fibril_t *f = (fibril_t *) fibril_get_id(); 68 69 … … 93 94 94 95 95 static void check_for_deadlock(fibril_owner_info_t *oi) 96 { 96 static void check_fibril_for_deadlock(fibril_owner_info_t *oi, fibril_t *fib) 97 { 98 futex_assert_is_locked(&async_futex); 99 97 100 while (oi && oi->owned_by) { 98 if (oi->owned_by == (fibril_t *) fibril_get_id()) { 101 if (oi->owned_by == fib) { 102 futex_unlock(&async_futex); 99 103 print_deadlock(oi); 100 104 abort(); … … 104 108 } 105 109 110 static void check_for_deadlock(fibril_owner_info_t *oi) 111 { 112 check_fibril_for_deadlock(oi, fibril_self()); 113 } 106 114 107 115 void fibril_mutex_initialize(fibril_mutex_t *fm) … … 117 125 118 126 futex_lock(&async_futex); 119 if (fm->counter-- <= 0) { 120 awaiter_t wdata; 121 122 awaiter_initialize(&wdata); 123 wdata.fid = fibril_get_id(); 124 wdata.wu_event.inlist = true; 125 list_append(&wdata.wu_event.link, &fm->waiters); 126 check_for_deadlock(&fm->oi); 127 f->waits_for = &fm->oi; 128 fibril_switch(FIBRIL_FROM_BLOCKED); 129 } else { 127 128 if (fm->counter-- > 0) { 130 129 fm->oi.owned_by = f; 131 } 132 futex_unlock(&async_futex); 130 futex_unlock(&async_futex); 131 return; 132 } 133 134 awaiter_t wdata = AWAITER_INIT; 135 list_append(&wdata.link, &fm->waiters); 136 check_for_deadlock(&fm->oi); 137 f->waits_for = &fm->oi; 138 139 futex_unlock(&async_futex); 140 141 fibril_wait_for(&wdata.event); 133 142 } 134 143 … … 150 159 static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm) 151 160 { 161 assert(fm->oi.owned_by == (fibril_t *) fibril_get_id()); 162 152 163 if (fm->counter++ < 0) { 153 link_t *tmp; 154 awaiter_t *wdp; 155 fibril_t *f; 156 157 tmp = list_first(&fm->waiters); 158 assert(tmp != NULL); 159 wdp = list_get_instance(tmp, awaiter_t, wu_event.link); 160 wdp->active = true; 161 wdp->wu_event.inlist = false; 162 163 f = (fibril_t *) wdp->fid; 164 awaiter_t *wdp = list_pop(&fm->waiters, awaiter_t, link); 165 assert(wdp); 166 167 fibril_t *f = (fibril_t *) wdp->fid; 164 168 fm->oi.owned_by = f; 165 169 f->waits_for = NULL; 166 170 167 list_remove(&wdp->wu_event.link); 168 fibril_add_ready(wdp->fid); 169 optimize_execution_power(); 171 fibril_notify(&wdp->event); 170 172 } else { 171 173 fm->oi.owned_by = NULL; … … 175 177 void fibril_mutex_unlock(fibril_mutex_t *fm) 176 178 { 177 assert(fibril_mutex_is_locked(fm));178 179 futex_lock(&async_futex); 179 180 _fibril_mutex_unlock_unsafe(fm); … … 183 184 bool fibril_mutex_is_locked(fibril_mutex_t *fm) 184 185 { 185 bool locked = false; 186 187 futex_lock(&async_futex); 188 if (fm->counter <= 0) 189 locked = true; 190 futex_unlock(&async_futex); 191 186 futex_lock(&async_futex); 187 bool locked = (fm->oi.owned_by == (fibril_t *) fibril_get_id()); 188 futex_unlock(&async_futex); 192 189 return locked; 193 190 } … … 206 203 207 204 futex_lock(&async_futex); 208 if (frw->writers) { 209 awaiter_t wdata; 210 211 awaiter_initialize(&wdata); 212 wdata.fid = (fid_t) f; 213 wdata.wu_event.inlist = true; 214 f->is_writer = false; 215 list_append(&wdata.wu_event.link, &frw->waiters); 216 check_for_deadlock(&frw->oi); 217 f->waits_for = &frw->oi; 218 fibril_switch(FIBRIL_FROM_BLOCKED); 219 } else { 205 206 if (!frw->writers) { 220 207 /* Consider the first reader the owner. */ 221 208 if (frw->readers++ == 0) 222 209 frw->oi.owned_by = f; 223 } 224 futex_unlock(&async_futex); 210 futex_unlock(&async_futex); 211 return; 212 } 213 214 f->is_writer = false; 215 216 awaiter_t wdata = AWAITER_INIT; 217 list_append(&wdata.link, &frw->waiters); 218 check_for_deadlock(&frw->oi); 219 f->waits_for = &frw->oi; 220 221 futex_unlock(&async_futex); 222 223 fibril_wait_for(&wdata.event); 225 224 } 226 225 … … 230 229 231 230 futex_lock(&async_futex); 232 if (frw->writers || frw->readers) { 233 awaiter_t wdata; 234 235 awaiter_initialize(&wdata); 236 wdata.fid = (fid_t) f; 237 wdata.wu_event.inlist = true; 238 f->is_writer = true; 239 list_append(&wdata.wu_event.link, &frw->waiters); 240 check_for_deadlock(&frw->oi); 241 f->waits_for = &frw->oi; 242 fibril_switch(FIBRIL_FROM_BLOCKED); 243 } else { 231 232 if (!frw->writers && !frw->readers) { 244 233 frw->oi.owned_by = f; 245 234 frw->writers++; 246 } 247 futex_unlock(&async_futex); 235 futex_unlock(&async_futex); 236 return; 237 } 238 239 f->is_writer = true; 240 241 awaiter_t wdata = AWAITER_INIT; 242 list_append(&wdata.link, &frw->waiters); 243 check_for_deadlock(&frw->oi); 244 f->waits_for = &frw->oi; 245 246 futex_unlock(&async_futex); 247 248 fibril_wait_for(&wdata.event); 248 249 } 249 250 250 251 static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw) 251 252 { 252 futex_lock(&async_futex);253 253 if (frw->readers) { 254 254 if (--frw->readers) { 255 255 if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) { 256 256 /* 257 * If this reader fi rbril was considered the257 * If this reader fibril was considered the 258 258 * owner of this rwlock, clear the ownership 259 259 * information even if there are still more … … 267 267 frw->oi.owned_by = NULL; 268 268 } 269 goto out; 269 270 return; 270 271 } 271 272 } else { … … 282 283 fibril_t *f; 283 284 284 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);285 wdp = list_get_instance(tmp, awaiter_t, link); 285 286 f = (fibril_t *) wdp->fid; 286 287 f->waits_for = NULL;288 287 289 288 if (f->is_writer) { 290 289 if (frw->readers) 291 290 break; 292 wdp->active = true;293 wdp->wu_event.inlist = false;294 list_remove(&wdp->wu_event.link);295 fibril_add_ready(wdp->fid);296 291 frw->writers++; 297 frw->oi.owned_by = f; 298 optimize_execution_power(); 292 } else { 293 frw->readers++; 294 } 295 296 f->waits_for = NULL; 297 list_remove(&wdp->link); 298 frw->oi.owned_by = f; 299 fibril_notify(&wdp->event); 300 301 if (frw->writers) 299 302 break; 300 } else { 301 wdp->active = true; 302 wdp->wu_event.inlist = false; 303 list_remove(&wdp->wu_event.link); 304 fibril_add_ready(wdp->fid); 305 if (frw->readers++ == 0) { 306 /* Consider the first reader the owner. */ 307 frw->oi.owned_by = f; 308 } 309 optimize_execution_power(); 310 } 311 } 312 out: 313 futex_unlock(&async_futex); 303 } 314 304 } 315 305 316 306 void fibril_rwlock_read_unlock(fibril_rwlock_t *frw) 317 307 { 318 assert(fibril_rwlock_is_read_locked(frw)); 308 futex_lock(&async_futex); 309 assert(frw->readers > 0); 319 310 _fibril_rwlock_common_unlock(frw); 311 futex_unlock(&async_futex); 320 312 } 321 313 322 314 void fibril_rwlock_write_unlock(fibril_rwlock_t *frw) 323 315 { 324 assert(fibril_rwlock_is_write_locked(frw)); 316 futex_lock(&async_futex); 317 assert(frw->writers == 1); 318 assert(frw->oi.owned_by == fibril_self()); 325 319 _fibril_rwlock_common_unlock(frw); 320 futex_unlock(&async_futex); 326 321 } 327 322 328 323 bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw) 329 324 { 330 bool locked = false; 331 332 futex_lock(&async_futex); 333 if (frw->readers) 334 locked = true; 335 futex_unlock(&async_futex); 336 325 futex_lock(&async_futex); 326 bool locked = (frw->readers > 0); 327 futex_unlock(&async_futex); 337 328 return locked; 338 329 } … … 340 331 bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw) 341 332 { 342 bool locked = false; 343 344 futex_lock(&async_futex); 345 if (frw->writers) { 346 assert(frw->writers == 1); 347 locked = true; 348 } 349 futex_unlock(&async_futex); 350 333 futex_lock(&async_futex); 334 assert(frw->writers <= 1); 335 bool locked = (frw->writers > 0) && (frw->oi.owned_by == fibril_self()); 336 futex_unlock(&async_futex); 351 337 return locked; 352 338 } … … 363 349 } 364 350 351 /** 352 * FIXME: If `timeout` is negative, the function returns ETIMEOUT immediately, 353 * and if `timeout` is 0, the wait never times out. 354 * This is not consistent with other similar APIs. 355 */ 365 356 errno_t 366 357 fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm, 367 358 suseconds_t timeout) 368 359 { 369 awaiter_t wdata;370 371 360 assert(fibril_mutex_is_locked(fm)); 372 361 … … 374 363 return ETIMEOUT; 375 364 376 awaiter_initialize(&wdata); 377 wdata.fid = fibril_get_id(); 378 wdata.to_event.inlist = timeout > 0; 379 wdata.wu_event.inlist = true; 380 381 futex_lock(&async_futex); 365 awaiter_t wdata = AWAITER_INIT; 366 wdata.mutex = fm; 367 368 struct timeval tv; 369 struct timeval *expires = NULL; 382 370 if (timeout) { 383 getuptime(&wdata.to_event.expires); 384 tv_add_diff(&wdata.to_event.expires, timeout); 385 async_insert_timeout(&wdata); 386 } 387 list_append(&wdata.wu_event.link, &fcv->waiters); 371 getuptime(&tv); 372 tv_add_diff(&tv, timeout); 373 expires = &tv; 374 } 375 376 futex_lock(&async_futex); 388 377 _fibril_mutex_unlock_unsafe(fm); 389 fibril_switch(FIBRIL_FROM_BLOCKED); 390 futex_unlock(&async_futex); 391 392 // XXX: This could be replaced with an unlocked version to get rid 393 // of the unlock-lock pair. I deliberately don't do that because 394 // further changes would most likely need to revert that optimization. 378 list_append(&wdata.link, &fcv->waiters); 379 futex_unlock(&async_futex); 380 381 (void) fibril_wait_timeout(&wdata.event, expires); 382 383 futex_lock(&async_futex); 384 bool timed_out = link_in_use(&wdata.link); 385 list_remove(&wdata.link); 386 futex_unlock(&async_futex); 387 395 388 fibril_mutex_lock(fm); 396 389 397 futex_lock(&async_futex); 398 if (wdata.to_event.inlist) 399 list_remove(&wdata.to_event.link); 400 if (wdata.wu_event.inlist) 401 list_remove(&wdata.wu_event.link); 402 futex_unlock(&async_futex); 403 404 return wdata.to_event.occurred ? ETIMEOUT : EOK; 390 return timed_out ? ETIMEOUT : EOK; 405 391 } 406 392 407 393 void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm) 408 394 { 409 errno_t rc; 410 411 rc = fibril_condvar_wait_timeout(fcv, fm, 0); 412 assert(rc == EOK); 413 } 414 415 static void _fibril_condvar_wakeup_common(fibril_condvar_t *fcv, bool once) 416 { 417 link_t *tmp; 418 awaiter_t *wdp; 419 420 futex_lock(&async_futex); 421 while (!list_empty(&fcv->waiters)) { 422 tmp = list_first(&fcv->waiters); 423 wdp = list_get_instance(tmp, awaiter_t, wu_event.link); 424 list_remove(&wdp->wu_event.link); 425 wdp->wu_event.inlist = false; 426 if (!wdp->active) { 427 wdp->active = true; 428 fibril_add_ready(wdp->fid); 429 optimize_execution_power(); 430 if (once) 431 break; 432 } 433 } 434 futex_unlock(&async_futex); 395 (void) fibril_condvar_wait_timeout(fcv, fm, 0); 435 396 } 436 397 437 398 void fibril_condvar_signal(fibril_condvar_t *fcv) 438 399 { 439 _fibril_condvar_wakeup_common(fcv, true); 400 futex_lock(&async_futex); 401 402 awaiter_t *w = list_pop(&fcv->waiters, awaiter_t, link); 403 if (w != NULL) 404 fibril_notify(&w->event); 405 406 futex_unlock(&async_futex); 440 407 } 441 408 442 409 void fibril_condvar_broadcast(fibril_condvar_t *fcv) 443 410 { 444 _fibril_condvar_wakeup_common(fcv, false); 411 futex_lock(&async_futex); 412 413 awaiter_t *w; 414 while ((w = list_pop(&fcv->waiters, awaiter_t, link))) 415 fibril_notify(&w->event); 416 417 futex_unlock(&async_futex); 445 418 } 446 419 … … 624 597 printf("Deadlock detected.\n"); 625 598 stacktrace_print(); 626 printf("Fibril % zxis trying to clear timer %p from "599 printf("Fibril %p is trying to clear timer %p from " 627 600 "inside its handler %p.\n", 628 601 fibril_get_id(), timer, timer->fun); … … 674 647 sem->count++; 675 648 676 if (sem->count > 0) { 677 futex_unlock(&async_futex); 678 return; 679 } 680 681 link_t *tmp = list_first(&sem->waiters); 682 assert(tmp); 683 list_remove(tmp); 684 685 futex_unlock(&async_futex); 686 687 awaiter_t *wdp = list_get_instance(tmp, awaiter_t, wu_event.link); 688 fibril_add_ready(wdp->fid); 689 optimize_execution_power(); 649 if (sem->count <= 0) { 650 awaiter_t *w = list_pop(&sem->waiters, awaiter_t, link); 651 assert(w); 652 fibril_notify(&w->event); 653 } 654 655 futex_unlock(&async_futex); 690 656 } 691 657 … … 707 673 } 708 674 709 awaiter_t wdata; 710 awaiter_initialize(&wdata); 711 712 wdata.fid = fibril_get_id(); 713 list_append(&wdata.wu_event.link, &sem->waiters); 714 715 fibril_switch(FIBRIL_FROM_BLOCKED); 716 futex_unlock(&async_futex); 675 awaiter_t wdata = AWAITER_INIT; 676 list_append(&wdata.link, &sem->waiters); 677 678 futex_unlock(&async_futex); 679 680 fibril_wait_for(&wdata.event); 717 681 } 718 682
Note:
See TracChangeset
for help on using the changeset viewer.