Changeset 514d561 in mainline
- Timestamp:
- 2018-07-20T16:27:20Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 05208d9
- Parents:
- 7137f74c
- git-author:
- Jiří Zárevúcky <jiri.zarevucky@…> (2018-07-19 21:52:47)
- git-committer:
- Jiří Zárevúcky <jiri.zarevucky@…> (2018-07-20 16:27:20)
- Location:
- uspace/lib/c
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/lib/c/generic/async/client.c
r7137f74c r514d561 104 104 #include <ipc/irq.h> 105 105 #include <ipc/event.h> 106 #include <futex.h>107 106 #include <fibril.h> 108 107 #include <adt/hash_table.h> … … 128 127 /** Message data */ 129 128 typedef struct { 130 awaiter_t wdata;129 fibril_event_t received; 131 130 132 131 /** If reply was received. */ … … 136 135 bool forget; 137 136 138 /** If already destroyed. */139 bool destroyed;140 141 137 /** Pointer to where the answer data is stored. */ 142 138 ipc_call_t *dataptr; … … 145 141 } amsg_t; 146 142 147 static void to_event_initialize(to_event_t *to)148 {149 struct timeval tv = { 0, 0 };150 151 to->inlist = false;152 to->occurred = false;153 link_initialize(&to->link);154 to->expires = tv;155 }156 157 static void wu_event_initialize(wu_event_t *wu)158 {159 wu->inlist = false;160 link_initialize(&wu->link);161 }162 163 void awaiter_initialize(awaiter_t *aw)164 {165 aw->fid = 0;166 aw->active = false;167 to_event_initialize(&aw->to_event);168 wu_event_initialize(&aw->wu_event);169 }170 171 143 static amsg_t *amsg_create(void) 172 144 { 173 amsg_t *msg = malloc(sizeof(amsg_t)); 174 if (msg) { 175 msg->done = false; 176 msg->forget = false; 177 msg->destroyed = false; 178 msg->dataptr = NULL; 179 msg->retval = EINVAL; 180 awaiter_initialize(&msg->wdata); 181 } 182 183 return msg; 145 return calloc(1, sizeof(amsg_t)); 184 146 } 185 147 186 148 static void amsg_destroy(amsg_t *msg) 187 149 { 188 if (!msg)189 return;190 191 assert(!msg->destroyed);192 msg->destroyed = true;193 150 free(msg); 194 151 } … … 251 208 msg->retval = IPC_GET_RETVAL(*data); 252 209 253 /* Copy data after futex_down, just in case the call was detached */210 /* Copy data inside lock, just in case the call was detached */ 254 211 if ((msg->dataptr) && (data)) 255 212 *msg->dataptr = *data; 256 213 257 write_barrier();258 259 /* Remove message from timeout list */260 if (msg->wdata.to_event.inlist)261 list_remove(&msg->wdata.to_event.link);262 263 214 msg->done = true; 264 215 265 216 if (msg->forget) { 266 assert(msg->wdata.active);267 217 amsg_destroy(msg); 268 } else if (!msg->wdata.active) { 269 msg->wdata.active = true; 270 fibril_add_ready(msg->wdata.fid); 218 } else { 219 fibril_notify(&msg->received); 271 220 } 272 221 … … 301 250 302 251 msg->dataptr = dataptr; 303 msg->wdata.active = true;304 252 305 253 errno_t rc = ipc_call_async_4(exch->phone, imethod, arg1, arg2, arg3, … … 343 291 344 292 msg->dataptr = dataptr; 345 msg->wdata.active = true;346 293 347 294 errno_t rc = ipc_call_async_5(exch->phone, imethod, arg1, arg2, arg3, … … 371 318 372 319 amsg_t *msg = (amsg_t *) amsgid; 373 374 futex_lock(&async_futex); 375 376 assert(!msg->forget); 377 assert(!msg->destroyed); 378 379 if (msg->done) { 380 futex_unlock(&async_futex); 381 goto done; 382 } 383 384 msg->wdata.fid = fibril_get_id(); 385 msg->wdata.active = false; 386 msg->wdata.to_event.inlist = false; 387 388 /* Leave the async_futex locked when entering this function */ 389 fibril_switch(FIBRIL_FROM_BLOCKED); 390 futex_unlock(&async_futex); 391 392 done: 320 fibril_wait_for(&msg->received); 321 393 322 if (retval) 394 323 *retval = msg->retval; … … 420 349 421 350 amsg_t *msg = (amsg_t *) amsgid; 422 423 futex_lock(&async_futex);424 425 assert(!msg->forget);426 assert(!msg->destroyed);427 428 if (msg->done) {429 futex_unlock(&async_futex);430 goto done;431 }432 351 433 352 /* … … 438 357 timeout = 0; 439 358 440 getuptime(&msg->wdata.to_event.expires); 441 tv_add_diff(&msg->wdata.to_event.expires, timeout); 442 443 /* 444 * Current fibril is inserted as waiting regardless of the 445 * "size" of the timeout. 446 * 447 * Checking for msg->done and immediately bailing out when 448 * timeout == 0 would mean that the manager fibril would never 449 * run (consider single threaded program). 450 * Thus the IPC answer would be never retrieved from the kernel. 451 * 452 * Notice that the actual delay would be very small because we 453 * - switch to manager fibril 454 * - the manager sees expired timeout 455 * - and thus adds us back to ready queue 456 * - manager switches back to some ready fibril 457 * (prior it, it checks for incoming IPC). 458 * 459 */ 460 msg->wdata.fid = fibril_get_id(); 461 msg->wdata.active = false; 462 async_insert_timeout(&msg->wdata); 463 464 /* Leave the async_futex locked when entering this function */ 465 fibril_switch(FIBRIL_FROM_BLOCKED); 466 futex_unlock(&async_futex); 467 468 if (!msg->done) 469 return ETIMEOUT; 470 471 done: 359 struct timeval expires; 360 getuptime(&expires); 361 tv_add_diff(&expires, timeout); 362 363 errno_t rc = fibril_wait_timeout(&msg->received, &expires); 364 if (rc != EOK) 365 return rc; 366 472 367 if (retval) 473 368 *retval = msg->retval; … … 475 370 amsg_destroy(msg); 476 371 477 return 0;372 return EOK; 478 373 } 479 374 … … 494 389 495 390 assert(!msg->forget); 496 assert(!msg->destroyed);497 391 498 392 futex_lock(&async_futex); … … 506 400 507 401 futex_unlock(&async_futex); 508 }509 510 /** Wait for specified time.511 *512 * The current fibril is suspended but the thread continues to execute.513 *514 * @param timeout Duration of the wait in microseconds.515 *516 */517 void fibril_usleep(suseconds_t timeout)518 {519 awaiter_t awaiter;520 awaiter_initialize(&awaiter);521 522 awaiter.fid = fibril_get_id();523 524 getuptime(&awaiter.to_event.expires);525 tv_add_diff(&awaiter.to_event.expires, timeout);526 527 futex_lock(&async_futex);528 529 async_insert_timeout(&awaiter);530 531 /* Leave the async_futex locked when entering this function */532 fibril_switch(FIBRIL_FROM_BLOCKED);533 futex_unlock(&async_futex);534 }535 536 /** Delay execution for the specified number of seconds537 *538 * @param sec Number of seconds to sleep539 */540 void fibril_sleep(unsigned int sec)541 {542 /*543 * Sleep in 1000 second steps to support544 * full argument range545 */546 547 while (sec > 0) {548 unsigned int period = (sec > 1000) ? 1000 : sec;549 550 fibril_usleep(period * 1000000);551 sec -= period;552 }553 402 } 554 403 … … 716 565 717 566 msg->dataptr = &result; 718 msg->wdata.active = true;719 567 720 568 errno_t rc = ipc_call_async_4(phone, IPC_M_CONNECT_ME_TO, -
uspace/lib/c/generic/async/server.c
r7137f74c r514d561 104 104 #include <ipc/irq.h> 105 105 #include <ipc/event.h> 106 #include <futex.h>107 106 #include <fibril.h> 108 107 #include <adt/hash_table.h> … … 118 117 #include <stdlib.h> 119 118 #include <macros.h> 119 #include <str_error.h> 120 120 #include <as.h> 121 121 #include <abi/mm/as.h> … … 127 127 /** Async framework global futex */ 128 128 futex_t async_futex = FUTEX_INITIALIZER; 129 130 /** Number of threads waiting for IPC in the kernel. */131 static atomic_t threads_in_ipc_wait = { 0 };132 129 133 130 /** Call data */ … … 148 145 /* Server connection data */ 149 146 typedef struct { 150 awaiter_t wdata; 147 /** Fibril handling the connection. */ 148 fid_t fid; 151 149 152 150 /** Hash table link. */ … … 161 159 /** Link to the client tracking structure. */ 162 160 client_t *client; 161 162 /** Message event. */ 163 fibril_event_t msg_arrived; 163 164 164 165 /** Messages that should be delivered to this fibril. */ … … 251 252 /* The remaining structures are guarded by async_futex. */ 252 253 static hash_table_t conn_hash_table; 253 static LIST_INITIALIZE(timeout_list);254 254 255 255 static size_t client_key_hash(void *key) … … 487 487 ipc_answer_0(call->cap_handle, ENOMEM); 488 488 489 return ( uintptr_t) NULL;489 return (fid_t) NULL; 490 490 } 491 491 492 492 conn->in_task_id = in_task_id; 493 493 conn->in_phone_hash = in_phone_hash; 494 conn->msg_arrived = FIBRIL_EVENT_INIT; 494 495 list_initialize(&conn->msg_queue); 495 496 conn->close_chandle = CAP_NIL; … … 503 504 504 505 /* We will activate the fibril ASAP */ 505 conn->wdata.active = true; 506 conn->wdata.fid = fibril_create(connection_fibril, conn); 507 508 if (conn->wdata.fid == 0) { 506 conn->fid = fibril_create(connection_fibril, conn); 507 508 if (conn->fid == 0) { 509 509 free(conn); 510 510 … … 512 512 ipc_answer_0(call->cap_handle, ENOMEM); 513 513 514 return ( uintptr_t) NULL;514 return (fid_t) NULL; 515 515 } 516 516 … … 521 521 futex_unlock(&async_futex); 522 522 523 fibril_add_ready(conn-> wdata.fid);524 525 return conn-> wdata.fid;523 fibril_add_ready(conn->fid); 524 525 return conn->fid; 526 526 } 527 527 … … 566 566 fid_t fid = async_new_connection(answer.in_task_id, phone_hash, 567 567 NULL, handler, data); 568 if (fid == ( uintptr_t) NULL)568 if (fid == (fid_t) NULL) 569 569 return ENOMEM; 570 570 … … 602 602 }; 603 603 604 /** Sort in current fibril's timeout request.605 *606 * @param wd Wait data of the current fibril.607 *608 */609 void async_insert_timeout(awaiter_t *wd)610 {611 assert(wd);612 613 wd->to_event.occurred = false;614 wd->to_event.inlist = true;615 616 link_t *tmp = timeout_list.head.next;617 while (tmp != &timeout_list.head) {618 awaiter_t *cur =619 list_get_instance(tmp, awaiter_t, to_event.link);620 621 if (tv_gteq(&cur->to_event.expires, &wd->to_event.expires))622 break;623 624 tmp = tmp->next;625 }626 627 list_insert_before(&wd->to_event.link, tmp);628 }629 630 604 /** Try to route a call to an appropriate connection fibril. 631 605 * … … 657 631 connection_t *conn = hash_table_get_inst(link, connection_t, link); 658 632 633 // FIXME: malloc in critical section 659 634 msg_t *msg = malloc(sizeof(*msg)); 660 635 if (!msg) { … … 670 645 671 646 /* If the connection fibril is waiting for an event, activate it */ 672 if (!conn->wdata.active) { 673 674 /* If in timeout list, remove it */ 675 if (conn->wdata.to_event.inlist) { 676 conn->wdata.to_event.inlist = false; 677 list_remove(&conn->wdata.to_event.link); 678 } 679 680 conn->wdata.active = true; 681 fibril_add_ready(conn->wdata.fid); 682 } 647 fibril_notify(&conn->msg_arrived); 683 648 684 649 futex_unlock(&async_futex); … … 987 952 connection_t *conn = fibril_connection; 988 953 954 struct timeval tv; 955 struct timeval *expires = NULL; 956 if (usecs) { 957 getuptime(&tv); 958 tv_add_diff(&tv, usecs); 959 expires = &tv; 960 } 961 989 962 futex_lock(&async_futex); 990 991 if (usecs) {992 getuptime(&conn->wdata.to_event.expires);993 tv_add_diff(&conn->wdata.to_event.expires, usecs);994 } else995 conn->wdata.to_event.inlist = false;996 963 997 964 /* If nothing in queue, wait until something arrives */ … … 1011 978 } 1012 979 1013 if (usecs) 1014 async_insert_timeout(&conn->wdata); 1015 1016 conn->wdata.active = false; 1017 1018 /* 1019 * Note: the current fibril will be rescheduled either due to a 1020 * timeout or due to an arriving message destined to it. In the 1021 * former case, handle_expired_timeouts() and, in the latter 1022 * case, route_call() will perform the wakeup. 1023 */ 1024 fibril_switch(FIBRIL_FROM_BLOCKED); 1025 1026 if ((usecs) && (conn->wdata.to_event.occurred) && 1027 (list_empty(&conn->msg_queue))) { 1028 /* If we timed out -> exit */ 1029 futex_unlock(&async_futex); 980 // TODO: replace with cvar 981 futex_unlock(&async_futex); 982 983 errno_t rc = fibril_wait_timeout(&conn->msg_arrived, expires); 984 if (rc == ETIMEOUT) 1030 985 return false; 1031 } 986 987 futex_lock(&async_futex); 1032 988 } 1033 989 … … 1126 1082 } 1127 1083 1128 /** Fire all timeouts that expired. */1129 static suseconds_t handle_expired_timeouts(unsigned int *flags)1130 {1131 /* Make sure the async_futex is held. */1132 futex_assert_is_locked(&async_futex);1133 1134 struct timeval tv;1135 getuptime(&tv);1136 1137 bool fired = false;1138 1139 link_t *cur = list_first(&timeout_list);1140 while (cur != NULL) {1141 awaiter_t *waiter =1142 list_get_instance(cur, awaiter_t, to_event.link);1143 1144 if (tv_gt(&waiter->to_event.expires, &tv)) {1145 if (fired) {1146 *flags = SYNCH_FLAGS_NON_BLOCKING;1147 return 0;1148 }1149 *flags = 0;1150 return tv_sub_diff(&waiter->to_event.expires, &tv);1151 }1152 1153 list_remove(&waiter->to_event.link);1154 waiter->to_event.inlist = false;1155 waiter->to_event.occurred = true;1156 1157 /*1158 * Redundant condition?1159 * The fibril should not be active when it gets here.1160 */1161 if (!waiter->active) {1162 waiter->active = true;1163 fibril_add_ready(waiter->fid);1164 fired = true;1165 }1166 1167 cur = list_first(&timeout_list);1168 }1169 1170 if (fired) {1171 *flags = SYNCH_FLAGS_NON_BLOCKING;1172 return 0;1173 }1174 1175 return SYNCH_NO_TIMEOUT;1176 }1177 1178 1084 /** Endless loop dispatching incoming calls and answers. 1179 1085 * … … 1183 1089 static errno_t async_manager_worker(void) 1184 1090 { 1091 ipc_call_t call; 1092 errno_t rc; 1093 1185 1094 while (true) { 1186 futex_lock(&async_futex); 1187 fibril_switch(FIBRIL_FROM_MANAGER); 1188 1189 /* 1190 * The switch only returns when there is no non-manager fibril 1191 * it can run. 1192 */ 1193 1194 unsigned int flags = SYNCH_FLAGS_NONE; 1195 suseconds_t next_timeout = handle_expired_timeouts(&flags); 1196 futex_unlock(&async_futex); 1197 1198 atomic_inc(&threads_in_ipc_wait); 1199 1200 ipc_call_t call; 1201 errno_t rc = ipc_wait(&call, next_timeout, flags); 1202 1203 atomic_dec(&threads_in_ipc_wait); 1204 1095 rc = fibril_ipc_wait(&call, NULL); 1205 1096 if (rc == EOK) 1206 1097 handle_call(&call); … … 1225 1116 1226 1117 /** Add one manager to manager list. */ 1227 voidasync_create_manager(void)1118 fid_t async_create_manager(void) 1228 1119 { 1229 1120 fid_t fid = fibril_create_generic(async_manager_fibril, NULL, PAGE_SIZE); 1230 if (fid != 0) 1231 fibril_add_manager(fid); 1232 } 1233 1234 /** Remove one manager from manager list */ 1235 void async_destroy_manager(void) 1236 { 1237 fibril_remove_manager(); 1121 fibril_start(fid); 1122 return fid; 1238 1123 } 1239 1124 … … 1252 1137 ¬ification_hash_table_ops)) 1253 1138 abort(); 1139 1140 async_create_manager(); 1254 1141 } 1255 1142 … … 1342 1229 1343 1230 return EOK; 1344 }1345 1346 /** Interrupt one thread of this task from waiting for IPC. */1347 void async_poke(void)1348 {1349 if (atomic_get(&threads_in_ipc_wait) > 0)1350 ipc_poke();1351 1231 } 1352 1232 … … 1834 1714 __noreturn void async_manager(void) 1835 1715 { 1836 f utex_lock(&async_futex);1837 fibril_ switch(FIBRIL_FROM_DEAD);1716 fibril_event_t ever = FIBRIL_EVENT_INIT; 1717 fibril_wait_for(&ever); 1838 1718 __builtin_unreachable(); 1839 1719 } -
uspace/lib/c/generic/fibril.c
r7137f74c r514d561 2 2 * Copyright (c) 2006 Ondrej Palkovsky 3 3 * Copyright (c) 2007 Jakub Jermar 4 * Copyright (c) 2018 CZ.NIC, z.s.p.o. 4 5 * All rights reserved. 5 6 * … … 39 40 #include <tls.h> 40 41 #include <stdlib.h> 41 #include <abi/mm/as.h>42 42 #include <as.h> 43 #include <stdio.h>44 #include <libarch/barrier.h>45 43 #include <context.h> 46 44 #include <futex.h> 47 45 #include <assert.h> 48 #include <async.h> 49 46 47 #include <mem.h> 48 #include <str.h> 49 #include <ipc/ipc.h> 50 #include <libarch/faddr.h> 50 51 #include "private/thread.h" 51 52 #include "private/fibril.h" 52 53 #include "private/libc.h" 53 54 54 /** 55 * This futex serializes access to ready_list, 56 * manager_list and fibril_list. 57 */ 55 #define DPRINTF(...) ((void)0) 56 57 /** Member of timeout_list. */ 58 typedef struct { 59 link_t link; 60 struct timeval expires; 61 fibril_event_t *event; 62 } _timeout_t; 63 64 typedef struct { 65 errno_t rc; 66 link_t link; 67 ipc_call_t *call; 68 fibril_event_t event; 69 } _ipc_waiter_t; 70 71 typedef struct { 72 errno_t rc; 73 link_t link; 74 ipc_call_t call; 75 } _ipc_buffer_t; 76 77 typedef enum { 78 SWITCH_FROM_DEAD, 79 SWITCH_FROM_HELPER, 80 SWITCH_FROM_YIELD, 81 SWITCH_FROM_BLOCKED, 82 } _switch_type_t; 83 84 static bool multithreaded = false; 85 86 /* This futex serializes access to global data. */ 58 87 static futex_t fibril_futex = FUTEX_INITIALIZER; 88 static futex_t ready_semaphore = FUTEX_INITIALIZE(0); 59 89 60 90 static LIST_INITIALIZE(ready_list); 61 static LIST_INITIALIZE(manager_list);62 91 static LIST_INITIALIZE(fibril_list); 92 static LIST_INITIALIZE(timeout_list); 93 94 static futex_t ipc_lists_futex = FUTEX_INITIALIZER; 95 static LIST_INITIALIZE(ipc_waiter_list); 96 static LIST_INITIALIZE(ipc_buffer_list); 97 static LIST_INITIALIZE(ipc_buffer_free_list); 98 99 /* Only used as unique markers for triggered events. */ 100 static fibril_t _fibril_event_triggered; 101 static fibril_t _fibril_event_timed_out; 102 #define _EVENT_INITIAL (NULL) 103 #define _EVENT_TRIGGERED (&_fibril_event_triggered) 104 #define _EVENT_TIMED_OUT (&_fibril_event_timed_out) 105 106 static atomic_t threads_in_ipc_wait = { 0 }; 63 107 64 108 /** Function that spans the whole life-cycle of a fibril. … … 69 113 * 70 114 */ 71 static void fibril_main(void)72 { 73 /* fibril_futex and async_futex arelocked when a fibril is started. */115 static void _fibril_main(void) 116 { 117 /* fibril_futex is locked when a fibril is started. */ 74 118 futex_unlock(&fibril_futex); 75 futex_unlock(&async_futex);76 119 77 120 fibril_t *fibril = fibril_self(); 78 121 79 122 /* Call the implementing function. */ 80 fibril->retval = fibril->func(fibril->arg); 81 82 futex_lock(&async_futex); 83 fibril_switch(FIBRIL_FROM_DEAD); 123 fibril_exit(fibril->func(fibril->arg)); 124 84 125 /* Not reached */ 85 126 } … … 116 157 } 117 158 118 void fibril_teardown(fibril_t *fibril, bool locked) 119 { 120 if (!locked) 121 futex_lock(&fibril_futex); 159 void fibril_teardown(fibril_t *fibril) 160 { 161 futex_lock(&fibril_futex); 122 162 list_remove(&fibril->all_link); 123 if (!locked) 124 futex_unlock(&fibril_futex); 163 futex_unlock(&fibril_futex); 125 164 126 165 if (fibril->is_freeable) { … … 130 169 } 131 170 132 /** Switch from the current fibril. 133 * 134 * The async_futex must be held when entering this function, 135 * and is still held on return. 136 * 137 * @param stype Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER, 138 * FIBRIL_FROM_MANAGER, FIBRIL_FROM_DEAD. The parameter 139 * describes the circumstances of the switch. 140 * 141 * @return 0 if there is no ready fibril, 142 * @return 1 otherwise. 143 * 144 */ 145 int fibril_switch(fibril_switch_type_t stype) 146 { 147 /* Make sure the async_futex is held. */ 148 futex_assert_is_locked(&async_futex); 171 /** 172 * Event notification with a given reason. 173 * 174 * @param reason Reason of the notification. 175 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT. 176 */ 177 static fibril_t *_fibril_trigger_internal(fibril_event_t *event, fibril_t *reason) 178 { 179 assert(reason != _EVENT_INITIAL); 180 assert(reason == _EVENT_TIMED_OUT || reason == _EVENT_TRIGGERED); 181 182 futex_assert_is_locked(&fibril_futex); 183 184 if (event->fibril == _EVENT_INITIAL) { 185 event->fibril = reason; 186 return NULL; 187 } 188 189 if (event->fibril == _EVENT_TIMED_OUT) { 190 assert(reason == _EVENT_TRIGGERED); 191 event->fibril = reason; 192 return NULL; 193 } 194 195 if (event->fibril == _EVENT_TRIGGERED) { 196 /* Already triggered. Nothing to do. */ 197 return NULL; 198 } 199 200 fibril_t *f = event->fibril; 201 event->fibril = reason; 202 203 assert(f->sleep_event == event); 204 return f; 205 } 206 207 static errno_t _ipc_wait(ipc_call_t *call, const struct timeval *expires) 208 { 209 if (!expires) 210 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 211 212 if (expires->tv_sec == 0) 213 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING); 214 215 struct timeval now; 216 getuptime(&now); 217 218 if (tv_gteq(&now, expires)) 219 return ipc_wait(call, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING); 220 221 return ipc_wait(call, tv_sub_diff(expires, &now), SYNCH_FLAGS_NONE); 222 } 223 224 /* 225 * Waits until a ready fibril is added to the list, or an IPC message arrives. 226 * Returns NULL on timeout and may also return NULL if returning from IPC 227 * wait after new ready fibrils are added. 228 */ 229 static fibril_t *_ready_list_pop(const struct timeval *expires, bool locked) 230 { 231 if (locked) { 232 futex_assert_is_locked(&fibril_futex); 233 assert(expires); 234 /* Must be nonblocking. */ 235 assert(expires->tv_sec == 0); 236 } else { 237 futex_assert_is_not_locked(&fibril_futex); 238 } 239 240 if (!multithreaded) { 241 /* 242 * The number of available tokens is always equal to the number 243 * of fibrils in the ready list + the number of free IPC buffer 244 * buckets. 245 */ 246 247 assert(atomic_get(&ready_semaphore.val) == 248 list_count(&ready_list) + list_count(&ipc_buffer_free_list)); 249 } 250 251 errno_t rc = futex_down_timeout(&ready_semaphore, expires); 252 253 if (rc != EOK) 254 return NULL; 255 256 /* 257 * Once we acquire a token from ready_semaphore, there are two options. 258 * Either there is a ready fibril in the list, or it's our turn to 259 * call `ipc_wait_cycle()`. There is one extra token on the semaphore 260 * for each entry of the call buffer. 261 */ 262 263 264 if (!locked) 265 futex_lock(&fibril_futex); 266 fibril_t *f = list_pop(&ready_list, fibril_t, link); 267 if (!f) 268 atomic_inc(&threads_in_ipc_wait); 269 if (!locked) 270 futex_unlock(&fibril_futex); 271 272 if (f) 273 return f; 274 275 if (!multithreaded) 276 assert(list_empty(&ipc_buffer_list)); 277 278 /* No fibril is ready, IPC wait it is. */ 279 ipc_call_t call = { 0 }; 280 rc = _ipc_wait(&call, expires); 281 282 atomic_dec(&threads_in_ipc_wait); 283 284 if (rc != EOK && rc != ENOENT) { 285 /* Return token. */ 286 futex_up(&ready_semaphore); 287 return NULL; 288 } 289 290 /* 291 * We might get ENOENT due to a poke. 292 * In that case, we propagate the null call out of fibril_ipc_wait(), 293 * because poke must result in that call returning. 294 */ 295 296 /* 297 * If a fibril is already waiting for IPC, we wake up the fibril, 298 * and return the token to ready_semaphore. 299 * If there is no fibril waiting, we pop a buffer bucket and 300 * put our call there. The token then returns when the bucket is 301 * returned. 302 */ 303 304 if (!locked) 305 futex_lock(&fibril_futex); 306 307 futex_lock(&ipc_lists_futex); 308 309 310 _ipc_waiter_t *w = list_pop(&ipc_waiter_list, _ipc_waiter_t, link); 311 if (w) { 312 *w->call = call; 313 w->rc = rc; 314 /* We switch to the woken up fibril immediately if possible. */ 315 f = _fibril_trigger_internal(&w->event, _EVENT_TRIGGERED); 316 317 /* Return token. */ 318 futex_up(&ready_semaphore); 319 } else { 320 _ipc_buffer_t *buf = list_pop(&ipc_buffer_free_list, _ipc_buffer_t, link); 321 assert(buf); 322 *buf = (_ipc_buffer_t) { .call = call, .rc = rc }; 323 list_append(&buf->link, &ipc_buffer_list); 324 } 325 326 futex_unlock(&ipc_lists_futex); 327 328 if (!locked) 329 futex_unlock(&fibril_futex); 330 331 return f; 332 } 333 334 static fibril_t *_ready_list_pop_nonblocking(bool locked) 335 { 336 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; 337 return _ready_list_pop(&tv, locked); 338 } 339 340 static void _ready_list_push(fibril_t *f) 341 { 342 if (!f) 343 return; 344 345 futex_assert_is_locked(&fibril_futex); 346 347 /* Enqueue in ready_list. */ 348 list_append(&f->link, &ready_list); 349 futex_up(&ready_semaphore); 350 351 if (atomic_get(&threads_in_ipc_wait)) { 352 DPRINTF("Poking.\n"); 353 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */ 354 ipc_poke(); 355 } 356 } 357 358 /* Blocks the current fibril until an IPC call arrives. */ 359 static errno_t _wait_ipc(ipc_call_t *call, const struct timeval *expires) 360 { 361 futex_assert_is_not_locked(&fibril_futex); 362 363 futex_lock(&ipc_lists_futex); 364 _ipc_buffer_t *buf = list_pop(&ipc_buffer_list, _ipc_buffer_t, link); 365 if (buf) { 366 *call = buf->call; 367 errno_t rc = buf->rc; 368 369 /* Return to freelist. */ 370 list_append(&buf->link, &ipc_buffer_free_list); 371 /* Return IPC wait token. */ 372 futex_up(&ready_semaphore); 373 374 futex_unlock(&ipc_lists_futex); 375 return rc; 376 } 377 378 _ipc_waiter_t w = { .call = call }; 379 list_append(&w.link, &ipc_waiter_list); 380 futex_unlock(&ipc_lists_futex); 381 382 errno_t rc = fibril_wait_timeout(&w.event, expires); 383 if (rc == EOK) 384 return w.rc; 385 386 futex_lock(&ipc_lists_futex); 387 if (link_in_use(&w.link)) 388 list_remove(&w.link); 389 else 390 rc = w.rc; 391 futex_unlock(&ipc_lists_futex); 392 return rc; 393 } 394 395 /** Fire all timeouts that expired. */ 396 static struct timeval *_handle_expired_timeouts(struct timeval *next_timeout) 397 { 398 struct timeval tv; 399 getuptime(&tv); 149 400 150 401 futex_lock(&fibril_futex); 151 402 403 while (!list_empty(&timeout_list)) { 404 link_t *cur = list_first(&timeout_list); 405 _timeout_t *to = list_get_instance(cur, _timeout_t, link); 406 407 if (tv_gt(&to->expires, &tv)) { 408 *next_timeout = to->expires; 409 futex_unlock(&fibril_futex); 410 return next_timeout; 411 } 412 413 list_remove(&to->link); 414 415 _ready_list_push(_fibril_trigger_internal( 416 to->event, _EVENT_TIMED_OUT)); 417 } 418 419 futex_unlock(&fibril_futex); 420 return NULL; 421 } 422 423 /** 424 * Clean up after a dead fibril from which we restored context, if any. 425 * Called after a switch is made and fibril_futex is unlocked. 426 */ 427 static void _fibril_cleanup_dead(void) 428 { 152 429 fibril_t *srcf = fibril_self(); 153 fibril_t *dstf = NULL;154 155 /* Choose a new fibril to run */ 156 if (list_empty(&ready_list)) {157 if (stype == FIBRIL_PREEMPT || stype == FIBRIL_FROM_MANAGER) {158 // FIXME: This means that as long as there is a fibril159 // that only yields, IPC messages are never retrieved.160 futex_unlock(&fibril_futex);161 return 0; 162 } 163 164 /* If we are going to manager and none exists, create it */ 165 while (list_empty(&manager_list)){166 futex_unlock(&fibril_futex);167 async_create_manager();168 futex_lock(&fibril_futex);169 }170 171 dstf = list_get_instance(list_first(&manager_list),172 fibril_t, link);173 } else {174 dstf = list_get_instance(list_first(&ready_list), fibril_t, 175 link);176 }177 178 list_remove(&dstf->link);179 if (stype == FIBRIL_FROM_DEAD)430 if (!srcf->clean_after_me) 431 return; 432 433 void *stack = srcf->clean_after_me->stack; 434 assert(stack); 435 as_area_destroy(stack); 436 fibril_teardown(srcf->clean_after_me); 437 srcf->clean_after_me = NULL; 438 } 439 440 /** Switch to a fibril. */ 441 static void _fibril_switch_to(_switch_type_t type, fibril_t *dstf, bool locked) 442 { 443 if (!locked) 444 futex_lock(&fibril_futex); 445 else 446 futex_assert_is_locked(&fibril_futex); 447 448 fibril_t *srcf = fibril_self(); 449 assert(srcf); 450 assert(dstf); 451 452 switch (type) { 453 case SWITCH_FROM_YIELD: 454 _ready_list_push(srcf); 455 break; 456 case SWITCH_FROM_DEAD: 180 457 dstf->clean_after_me = srcf; 181 182 /* Put the current fibril into the correct run list */183 switch (stype) {184 case FIBRIL_PREEMPT:185 list_append(&srcf->link, &ready_list);186 458 break; 187 case FIBRIL_FROM_MANAGER:188 list_append(&srcf->link, &manager_list);459 case SWITCH_FROM_HELPER: 460 case SWITCH_FROM_BLOCKED: 189 461 break; 190 case FIBRIL_FROM_DEAD: 191 case FIBRIL_FROM_BLOCKED: 192 // Nothing. 193 break; 194 } 195 196 /* Bookkeeping. */ 462 } 463 464 dstf->thread_ctx = srcf->thread_ctx; 465 srcf->thread_ctx = NULL; 466 467 /* Just some bookkeeping to allow better debugging of futex locks. */ 197 468 futex_give_to(&fibril_futex, dstf); 198 futex_give_to(&async_futex, dstf);199 469 200 470 /* Swap to the next fibril. */ 201 471 context_swap(&srcf->ctx, &dstf->ctx); 202 472 203 /* Restored by another fibril! */ 204 205 /* Must be after context_swap()! */ 206 futex_unlock(&fibril_futex); 207 208 if (srcf->clean_after_me) { 209 /* 210 * Cleanup after the dead fibril from which we 211 * restored context here. 212 */ 213 void *stack = srcf->clean_after_me->stack; 214 if (stack) { 215 /* 216 * This check is necessary because a 217 * thread could have exited like a 218 * normal fibril using the 219 * FIBRIL_FROM_DEAD switch type. In that 220 * case, its fibril will not have the 221 * stack member filled. 222 */ 223 as_area_destroy(stack); 473 assert(srcf == fibril_self()); 474 assert(srcf->thread_ctx); 475 476 if (!locked) { 477 /* Must be after context_swap()! */ 478 futex_unlock(&fibril_futex); 479 _fibril_cleanup_dead(); 480 } 481 } 482 483 /** 484 * Main function for a helper fibril. 485 * The helper fibril executes on threads in the lightweight fibril pool when 486 * there is no fibril ready to run. Its only purpose is to block until 487 * another fibril is ready, or a timeout expires, or an IPC message arrives. 488 * 489 * There is at most one helper fibril per thread. 490 * 491 */ 492 static errno_t _helper_fibril_fn(void *arg) 493 { 494 /* Set itself as the thread's own context. */ 495 fibril_self()->thread_ctx = fibril_self(); 496 497 (void) arg; 498 499 struct timeval next_timeout; 500 while (true) { 501 struct timeval *to = _handle_expired_timeouts(&next_timeout); 502 fibril_t *f = _ready_list_pop(to, false); 503 if (f) { 504 _fibril_switch_to(SWITCH_FROM_HELPER, f, false); 224 505 } 225 fibril_teardown(srcf->clean_after_me, true); 226 srcf->clean_after_me = NULL; 227 } 228 229 return 1; 506 } 507 508 return EOK; 230 509 } 231 510 … … 247 526 return 0; 248 527 249 size_tstack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ?528 fibril->stack_size = (stksz == FIBRIL_DFLT_STK_SIZE) ? 250 529 stack_size_get() : stksz; 251 fibril->stack = as_area_create(AS_AREA_ANY, stack_size,530 fibril->stack = as_area_create(AS_AREA_ANY, fibril->stack_size, 252 531 AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE | AS_AREA_GUARD | 253 532 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED); 254 533 if (fibril->stack == AS_MAP_FAILED) { 255 fibril_teardown(fibril , false);534 fibril_teardown(fibril); 256 535 return 0; 257 536 } … … 261 540 262 541 context_create_t sctx = { 263 .fn = fibril_main,542 .fn = _fibril_main, 264 543 .stack_base = fibril->stack, 265 .stack_size = stack_size,544 .stack_size = fibril->stack_size, 266 545 .tls = fibril->tcb, 267 546 }; … … 274 553 * 275 554 * Free resources of a fibril that has been created with fibril_create() 276 * but never readied using fibril_add_ready().555 * but never started using fibril_start(). 277 556 * 278 557 * @param fid Pointer to the fibril structure of the fibril to be … … 283 562 fibril_t *fibril = (fibril_t *) fid; 284 563 564 assert(!fibril->is_running); 565 assert(fibril->stack); 285 566 as_area_destroy(fibril->stack); 286 fibril_teardown(fibril, false); 287 } 288 289 /** Add a fibril to the ready list. 290 * 291 * @param fid Pointer to the fibril structure of the fibril to be 292 * added. 293 * 294 */ 295 void fibril_add_ready(fid_t fid) 296 { 297 fibril_t *fibril = (fibril_t *) fid; 567 fibril_teardown(fibril); 568 } 569 570 static void _insert_timeout(_timeout_t *timeout) 571 { 572 futex_assert_is_locked(&fibril_futex); 573 assert(timeout); 574 575 link_t *tmp = timeout_list.head.next; 576 while (tmp != &timeout_list.head) { 577 _timeout_t *cur = list_get_instance(tmp, _timeout_t, link); 578 579 if (tv_gteq(&cur->expires, &timeout->expires)) 580 break; 581 582 tmp = tmp->next; 583 } 584 585 list_insert_before(&timeout->link, tmp); 586 } 587 588 /** 589 * Same as `fibril_wait_for()`, except with a timeout. 590 * 591 * It is guaranteed that timing out cannot cause another thread's 592 * `fibril_notify()` to be lost. I.e. the function returns success if and 593 * only if `fibril_notify()` was called after the last call to 594 * wait/wait_timeout returned, and before the call timed out. 595 * 596 * @return ETIMEOUT if timed out. EOK otherwise. 597 */ 598 errno_t fibril_wait_timeout(fibril_event_t *event, const struct timeval *expires) 599 { 600 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event); 601 602 if (!fibril_self()->thread_ctx) { 603 fibril_self()->thread_ctx = 604 fibril_create_generic(_helper_fibril_fn, NULL, PAGE_SIZE); 605 if (!fibril_self()->thread_ctx) 606 return ENOMEM; 607 } 298 608 299 609 futex_lock(&fibril_futex); 300 list_append(&fibril->link, &ready_list); 610 611 if (event->fibril == _EVENT_TRIGGERED) { 612 DPRINTF("### Already triggered. Returning. \n"); 613 event->fibril = _EVENT_INITIAL; 614 futex_unlock(&fibril_futex); 615 return EOK; 616 } 617 618 assert(event->fibril == _EVENT_INITIAL); 619 620 fibril_t *srcf = fibril_self(); 621 fibril_t *dstf = NULL; 622 623 /* 624 * We cannot block here waiting for another fibril becoming 625 * ready, since that would require unlocking the fibril_futex, 626 * and that in turn would allow another thread to restore 627 * the source fibril before this thread finished switching. 628 * 629 * Instead, we switch to an internal "helper" fibril whose only 630 * job is to wait for an event, freeing the source fibril for 631 * wakeups. There is always one for each running thread. 632 */ 633 634 dstf = _ready_list_pop_nonblocking(true); 635 if (!dstf) { 636 // XXX: It is possible for the _ready_list_pop_nonblocking() to 637 // check for IPC, find a pending message, and trigger the 638 // event on which we are currently trying to sleep. 639 if (event->fibril == _EVENT_TRIGGERED) { 640 event->fibril = _EVENT_INITIAL; 641 futex_unlock(&fibril_futex); 642 return EOK; 643 } 644 645 dstf = srcf->thread_ctx; 646 assert(dstf); 647 } 648 649 _timeout_t timeout = { 0 }; 650 if (expires) { 651 timeout.expires = *expires; 652 timeout.event = event; 653 _insert_timeout(&timeout); 654 } 655 656 assert(srcf); 657 658 event->fibril = srcf; 659 srcf->sleep_event = event; 660 661 assert(event->fibril != _EVENT_INITIAL); 662 663 _fibril_switch_to(SWITCH_FROM_BLOCKED, dstf, true); 664 665 assert(event->fibril != srcf); 666 assert(event->fibril != _EVENT_INITIAL); 667 assert(event->fibril == _EVENT_TIMED_OUT || event->fibril == _EVENT_TRIGGERED); 668 669 list_remove(&timeout.link); 670 errno_t rc = (event->fibril == _EVENT_TIMED_OUT) ? ETIMEOUT : EOK; 671 event->fibril = _EVENT_INITIAL; 672 301 673 futex_unlock(&fibril_futex); 302 } 303 304 /** Add a fibril to the manager list. 305 * 306 * @param fid Pointer to the fibril structure of the fibril to be 307 * added. 308 * 309 */ 310 void fibril_add_manager(fid_t fid) 311 { 312 fibril_t *fibril = (fibril_t *) fid; 313 674 _fibril_cleanup_dead(); 675 return rc; 676 } 677 678 void fibril_wait_for(fibril_event_t *event) 679 { 680 (void) fibril_wait_timeout(event, NULL); 681 } 682 683 void fibril_notify(fibril_event_t *event) 684 { 314 685 futex_lock(&fibril_futex); 315 list_append(&fibril->link, &manager_list);686 _ready_list_push(_fibril_trigger_internal(event, _EVENT_TRIGGERED)); 316 687 futex_unlock(&fibril_futex); 317 688 } 318 689 319 /** Remove one manager from the manager list. */320 void fibril_ remove_manager(void)690 /** Start a fibril that has not been running yet. */ 691 void fibril_start(fibril_t *fibril) 321 692 { 322 693 futex_lock(&fibril_futex); 323 if (!list_empty(&manager_list)) 324 list_remove(list_first(&manager_list)); 694 assert(!fibril->is_running); 695 fibril->is_running = true; 696 697 if (!link_in_use(&fibril->all_link)) 698 list_append(&fibril->all_link, &fibril_list); 699 700 _ready_list_push(fibril); 701 325 702 futex_unlock(&fibril_futex); 326 703 } 327 704 705 /** Start a fibril that has not been running yet. (obsolete) */ 706 void fibril_add_ready(fibril_t *fibril) 707 { 708 fibril_start(fibril); 709 } 710 711 /** @return the currently running fibril. */ 328 712 fibril_t *fibril_self(void) 329 713 { … … 334 718 } 335 719 336 /** Return fibril id of the currently running fibril.337 * 338 * @return fibril ID of the currently running fibril.339 * 720 /** 721 * Obsolete, use fibril_self(). 722 * 723 * @return ID of the currently running fibril. 340 724 */ 341 725 fid_t fibril_get_id(void) … … 344 728 } 345 729 730 /** 731 * Switch to another fibril, if one is ready to run. 732 * Has no effect on a heavy fibril. 733 */ 346 734 void fibril_yield(void) 347 735 { 348 f utex_lock(&async_futex);349 (void) fibril_switch(FIBRIL_PREEMPT);350 futex_unlock(&async_futex);736 fibril_t *f = _ready_list_pop_nonblocking(false); 737 if (f) 738 _fibril_switch_to(SWITCH_FROM_YIELD, f, false); 351 739 } 352 740 353 741 static void _runner_fn(void *arg) 354 742 { 355 futex_lock(&async_futex); 356 (void) fibril_switch(FIBRIL_FROM_BLOCKED); 357 __builtin_unreachable(); 743 _helper_fibril_fn(arg); 358 744 } 359 745 … … 368 754 int fibril_test_spawn_runners(int n) 369 755 { 756 if (!multithreaded) 757 multithreaded = true; 758 370 759 errno_t rc; 371 760 … … 394 783 // TODO: Implement better. 395 784 // For now, 4 total runners is a sensible default. 396 fibril_test_spawn_runners(3); 785 if (!multithreaded) { 786 fibril_test_spawn_runners(3); 787 } 397 788 } 398 789 … … 407 798 } 408 799 800 /** 801 * Exit a fibril. Never returns. 802 * 803 * @param retval Value to return from fibril_join() called on this fibril. 804 */ 805 _Noreturn void fibril_exit(long retval) 806 { 807 // TODO: implement fibril_join() and remember retval 808 (void) retval; 809 810 fibril_t *f = _ready_list_pop_nonblocking(false); 811 if (!f) 812 f = fibril_self()->thread_ctx; 813 814 _fibril_switch_to(SWITCH_FROM_DEAD, f, false); 815 __builtin_unreachable(); 816 } 817 818 void __fibrils_init(void) 819 { 820 /* 821 * We allow a fixed, small amount of parallelism for IPC reads, but 822 * since IPC is currently serialized in kernel, there's not much 823 * we can get from more threads reading messages. 824 */ 825 826 #define IPC_BUFFER_COUNT 1024 827 static _ipc_buffer_t buffers[IPC_BUFFER_COUNT]; 828 829 for (int i = 0; i < IPC_BUFFER_COUNT; i++) { 830 list_append(&buffers[i].link, &ipc_buffer_free_list); 831 futex_up(&ready_semaphore); 832 } 833 } 834 835 void fibril_usleep(suseconds_t timeout) 836 { 837 struct timeval expires; 838 getuptime(&expires); 839 tv_add_diff(&expires, timeout); 840 841 fibril_event_t event = FIBRIL_EVENT_INIT; 842 fibril_wait_timeout(&event, &expires); 843 } 844 845 void fibril_sleep(unsigned int sec) 846 { 847 struct timeval expires; 848 getuptime(&expires); 849 expires.tv_sec += sec; 850 851 fibril_event_t event = FIBRIL_EVENT_INIT; 852 fibril_wait_timeout(&event, &expires); 853 } 854 855 void fibril_ipc_poke(void) 856 { 857 DPRINTF("Poking.\n"); 858 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */ 859 ipc_poke(); 860 } 861 862 errno_t fibril_ipc_wait(ipc_call_t *call, const struct timeval *expires) 863 { 864 return _wait_ipc(call, expires); 865 } 866 409 867 /** @} 410 868 */ -
uspace/lib/c/generic/fibril_synch.c
r7137f74c r514d561 45 45 #include <stdio.h> 46 46 #include <io/kio.h> 47 #include <mem.h> 48 #include <context.h> 47 49 48 50 #include "private/async.h" … … 51 53 static fibril_local bool deadlocked = false; 52 54 53 static void optimize_execution_power(void) 54 { 55 /* 56 * When waking up a worker fibril previously blocked in fibril 57 * synchronization, chances are that there is an idle manager fibril 58 * waiting for IPC, that could start executing the awakened worker 59 * fibril right away. We try to detect this and bring the manager 60 * fibril back to fruitful work. 61 */ 62 async_poke(); 63 } 55 typedef struct { 56 link_t link; 57 fibril_event_t event; 58 fibril_mutex_t *mutex; 59 fid_t fid; 60 } awaiter_t; 61 62 #define AWAITER_INIT { .fid = fibril_get_id() } 64 63 65 64 static void print_deadlock(fibril_owner_info_t *oi) 66 65 { 66 // FIXME: Print to stderr. 67 67 68 fibril_t *f = (fibril_t *) fibril_get_id(); 68 69 … … 93 94 94 95 95 static void check_for_deadlock(fibril_owner_info_t *oi) 96 { 96 static void check_fibril_for_deadlock(fibril_owner_info_t *oi, fibril_t *fib) 97 { 98 futex_assert_is_locked(&async_futex); 99 97 100 while (oi && oi->owned_by) { 98 if (oi->owned_by == (fibril_t *) fibril_get_id()) { 101 if (oi->owned_by == fib) { 102 futex_unlock(&async_futex); 99 103 print_deadlock(oi); 100 104 abort(); … … 104 108 } 105 109 110 static void check_for_deadlock(fibril_owner_info_t *oi) 111 { 112 check_fibril_for_deadlock(oi, fibril_self()); 113 } 106 114 107 115 void fibril_mutex_initialize(fibril_mutex_t *fm) … … 117 125 118 126 futex_lock(&async_futex); 119 if (fm->counter-- <= 0) { 120 awaiter_t wdata; 121 122 awaiter_initialize(&wdata); 123 wdata.fid = fibril_get_id(); 124 wdata.wu_event.inlist = true; 125 list_append(&wdata.wu_event.link, &fm->waiters); 126 check_for_deadlock(&fm->oi); 127 f->waits_for = &fm->oi; 128 fibril_switch(FIBRIL_FROM_BLOCKED); 129 } else { 127 128 if (fm->counter-- > 0) { 130 129 fm->oi.owned_by = f; 131 } 132 futex_unlock(&async_futex); 130 futex_unlock(&async_futex); 131 return; 132 } 133 134 awaiter_t wdata = AWAITER_INIT; 135 list_append(&wdata.link, &fm->waiters); 136 check_for_deadlock(&fm->oi); 137 f->waits_for = &fm->oi; 138 139 futex_unlock(&async_futex); 140 141 fibril_wait_for(&wdata.event); 133 142 } 134 143 … … 150 159 static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm) 151 160 { 161 assert(fm->oi.owned_by == (fibril_t *) fibril_get_id()); 162 152 163 if (fm->counter++ < 0) { 153 link_t *tmp; 154 awaiter_t *wdp; 155 fibril_t *f; 156 157 tmp = list_first(&fm->waiters); 158 assert(tmp != NULL); 159 wdp = list_get_instance(tmp, awaiter_t, wu_event.link); 160 wdp->active = true; 161 wdp->wu_event.inlist = false; 162 163 f = (fibril_t *) wdp->fid; 164 awaiter_t *wdp = list_pop(&fm->waiters, awaiter_t, link); 165 assert(wdp); 166 167 fibril_t *f = (fibril_t *) wdp->fid; 164 168 fm->oi.owned_by = f; 165 169 f->waits_for = NULL; 166 170 167 list_remove(&wdp->wu_event.link); 168 fibril_add_ready(wdp->fid); 169 optimize_execution_power(); 171 fibril_notify(&wdp->event); 170 172 } else { 171 173 fm->oi.owned_by = NULL; … … 175 177 void fibril_mutex_unlock(fibril_mutex_t *fm) 176 178 { 177 assert(fibril_mutex_is_locked(fm));178 179 futex_lock(&async_futex); 179 180 _fibril_mutex_unlock_unsafe(fm); … … 183 184 bool fibril_mutex_is_locked(fibril_mutex_t *fm) 184 185 { 185 bool locked = false; 186 187 futex_lock(&async_futex); 188 if (fm->counter <= 0) 189 locked = true; 190 futex_unlock(&async_futex); 191 186 futex_lock(&async_futex); 187 bool locked = (fm->oi.owned_by == (fibril_t *) fibril_get_id()); 188 futex_unlock(&async_futex); 192 189 return locked; 193 190 } … … 206 203 207 204 futex_lock(&async_futex); 208 if (frw->writers) { 209 awaiter_t wdata; 210 211 awaiter_initialize(&wdata); 212 wdata.fid = (fid_t) f; 213 wdata.wu_event.inlist = true; 214 f->is_writer = false; 215 list_append(&wdata.wu_event.link, &frw->waiters); 216 check_for_deadlock(&frw->oi); 217 f->waits_for = &frw->oi; 218 fibril_switch(FIBRIL_FROM_BLOCKED); 219 } else { 205 206 if (!frw->writers) { 220 207 /* Consider the first reader the owner. */ 221 208 if (frw->readers++ == 0) 222 209 frw->oi.owned_by = f; 223 } 224 futex_unlock(&async_futex); 210 futex_unlock(&async_futex); 211 return; 212 } 213 214 f->is_writer = false; 215 216 awaiter_t wdata = AWAITER_INIT; 217 list_append(&wdata.link, &frw->waiters); 218 check_for_deadlock(&frw->oi); 219 f->waits_for = &frw->oi; 220 221 futex_unlock(&async_futex); 222 223 fibril_wait_for(&wdata.event); 225 224 } 226 225 … … 230 229 231 230 futex_lock(&async_futex); 232 if (frw->writers || frw->readers) { 233 awaiter_t wdata; 234 235 awaiter_initialize(&wdata); 236 wdata.fid = (fid_t) f; 237 wdata.wu_event.inlist = true; 238 f->is_writer = true; 239 list_append(&wdata.wu_event.link, &frw->waiters); 240 check_for_deadlock(&frw->oi); 241 f->waits_for = &frw->oi; 242 fibril_switch(FIBRIL_FROM_BLOCKED); 243 } else { 231 232 if (!frw->writers && !frw->readers) { 244 233 frw->oi.owned_by = f; 245 234 frw->writers++; 246 } 247 futex_unlock(&async_futex); 235 futex_unlock(&async_futex); 236 return; 237 } 238 239 f->is_writer = true; 240 241 awaiter_t wdata = AWAITER_INIT; 242 list_append(&wdata.link, &frw->waiters); 243 check_for_deadlock(&frw->oi); 244 f->waits_for = &frw->oi; 245 246 futex_unlock(&async_futex); 247 248 fibril_wait_for(&wdata.event); 248 249 } 249 250 250 251 static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw) 251 252 { 252 futex_lock(&async_futex);253 253 if (frw->readers) { 254 254 if (--frw->readers) { 255 255 if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) { 256 256 /* 257 * If this reader fi rbril was considered the257 * If this reader fibril was considered the 258 258 * owner of this rwlock, clear the ownership 259 259 * information even if there are still more … … 267 267 frw->oi.owned_by = NULL; 268 268 } 269 goto out; 269 270 return; 270 271 } 271 272 } else { … … 282 283 fibril_t *f; 283 284 284 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);285 wdp = list_get_instance(tmp, awaiter_t, link); 285 286 f = (fibril_t *) wdp->fid; 286 287 f->waits_for = NULL;288 287 289 288 if (f->is_writer) { 290 289 if (frw->readers) 291 290 break; 292 wdp->active = true;293 wdp->wu_event.inlist = false;294 list_remove(&wdp->wu_event.link);295 fibril_add_ready(wdp->fid);296 291 frw->writers++; 297 frw->oi.owned_by = f; 298 optimize_execution_power(); 292 } else { 293 frw->readers++; 294 } 295 296 f->waits_for = NULL; 297 list_remove(&wdp->link); 298 frw->oi.owned_by = f; 299 fibril_notify(&wdp->event); 300 301 if (frw->writers) 299 302 break; 300 } else { 301 wdp->active = true; 302 wdp->wu_event.inlist = false; 303 list_remove(&wdp->wu_event.link); 304 fibril_add_ready(wdp->fid); 305 if (frw->readers++ == 0) { 306 /* Consider the first reader the owner. */ 307 frw->oi.owned_by = f; 308 } 309 optimize_execution_power(); 310 } 311 } 312 out: 313 futex_unlock(&async_futex); 303 } 314 304 } 315 305 316 306 void fibril_rwlock_read_unlock(fibril_rwlock_t *frw) 317 307 { 318 assert(fibril_rwlock_is_read_locked(frw)); 308 futex_lock(&async_futex); 309 assert(frw->readers > 0); 319 310 _fibril_rwlock_common_unlock(frw); 311 futex_unlock(&async_futex); 320 312 } 321 313 322 314 void fibril_rwlock_write_unlock(fibril_rwlock_t *frw) 323 315 { 324 assert(fibril_rwlock_is_write_locked(frw)); 316 futex_lock(&async_futex); 317 assert(frw->writers == 1); 318 assert(frw->oi.owned_by == fibril_self()); 325 319 _fibril_rwlock_common_unlock(frw); 320 futex_unlock(&async_futex); 326 321 } 327 322 328 323 bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw) 329 324 { 330 bool locked = false; 331 332 futex_lock(&async_futex); 333 if (frw->readers) 334 locked = true; 335 futex_unlock(&async_futex); 336 325 futex_lock(&async_futex); 326 bool locked = (frw->readers > 0); 327 futex_unlock(&async_futex); 337 328 return locked; 338 329 } … … 340 331 bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw) 341 332 { 342 bool locked = false; 343 344 futex_lock(&async_futex); 345 if (frw->writers) { 346 assert(frw->writers == 1); 347 locked = true; 348 } 349 futex_unlock(&async_futex); 350 333 futex_lock(&async_futex); 334 assert(frw->writers <= 1); 335 bool locked = (frw->writers > 0) && (frw->oi.owned_by == fibril_self()); 336 futex_unlock(&async_futex); 351 337 return locked; 352 338 } … … 363 349 } 364 350 351 /** 352 * FIXME: If `timeout` is negative, the function returns ETIMEOUT immediately, 353 * and if `timeout` is 0, the wait never times out. 354 * This is not consistent with other similar APIs. 355 */ 365 356 errno_t 366 357 fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm, 367 358 suseconds_t timeout) 368 359 { 369 awaiter_t wdata;370 371 360 assert(fibril_mutex_is_locked(fm)); 372 361 … … 374 363 return ETIMEOUT; 375 364 376 awaiter_initialize(&wdata); 377 wdata.fid = fibril_get_id(); 378 wdata.to_event.inlist = timeout > 0; 379 wdata.wu_event.inlist = true; 380 381 futex_lock(&async_futex); 365 awaiter_t wdata = AWAITER_INIT; 366 wdata.mutex = fm; 367 368 struct timeval tv; 369 struct timeval *expires = NULL; 382 370 if (timeout) { 383 getuptime(&wdata.to_event.expires); 384 tv_add_diff(&wdata.to_event.expires, timeout); 385 async_insert_timeout(&wdata); 386 } 387 list_append(&wdata.wu_event.link, &fcv->waiters); 371 getuptime(&tv); 372 tv_add_diff(&tv, timeout); 373 expires = &tv; 374 } 375 376 futex_lock(&async_futex); 388 377 _fibril_mutex_unlock_unsafe(fm); 389 fibril_switch(FIBRIL_FROM_BLOCKED); 390 futex_unlock(&async_futex); 391 392 // XXX: This could be replaced with an unlocked version to get rid 393 // of the unlock-lock pair. I deliberately don't do that because 394 // further changes would most likely need to revert that optimization. 378 list_append(&wdata.link, &fcv->waiters); 379 futex_unlock(&async_futex); 380 381 (void) fibril_wait_timeout(&wdata.event, expires); 382 383 futex_lock(&async_futex); 384 bool timed_out = link_in_use(&wdata.link); 385 list_remove(&wdata.link); 386 futex_unlock(&async_futex); 387 395 388 fibril_mutex_lock(fm); 396 389 397 futex_lock(&async_futex); 398 if (wdata.to_event.inlist) 399 list_remove(&wdata.to_event.link); 400 if (wdata.wu_event.inlist) 401 list_remove(&wdata.wu_event.link); 402 futex_unlock(&async_futex); 403 404 return wdata.to_event.occurred ? ETIMEOUT : EOK; 390 return timed_out ? ETIMEOUT : EOK; 405 391 } 406 392 407 393 void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm) 408 394 { 409 errno_t rc; 410 411 rc = fibril_condvar_wait_timeout(fcv, fm, 0); 412 assert(rc == EOK); 413 } 414 415 static void _fibril_condvar_wakeup_common(fibril_condvar_t *fcv, bool once) 416 { 417 link_t *tmp; 418 awaiter_t *wdp; 419 420 futex_lock(&async_futex); 421 while (!list_empty(&fcv->waiters)) { 422 tmp = list_first(&fcv->waiters); 423 wdp = list_get_instance(tmp, awaiter_t, wu_event.link); 424 list_remove(&wdp->wu_event.link); 425 wdp->wu_event.inlist = false; 426 if (!wdp->active) { 427 wdp->active = true; 428 fibril_add_ready(wdp->fid); 429 optimize_execution_power(); 430 if (once) 431 break; 432 } 433 } 434 futex_unlock(&async_futex); 395 (void) fibril_condvar_wait_timeout(fcv, fm, 0); 435 396 } 436 397 437 398 void fibril_condvar_signal(fibril_condvar_t *fcv) 438 399 { 439 _fibril_condvar_wakeup_common(fcv, true); 400 futex_lock(&async_futex); 401 402 awaiter_t *w = list_pop(&fcv->waiters, awaiter_t, link); 403 if (w != NULL) 404 fibril_notify(&w->event); 405 406 futex_unlock(&async_futex); 440 407 } 441 408 442 409 void fibril_condvar_broadcast(fibril_condvar_t *fcv) 443 410 { 444 _fibril_condvar_wakeup_common(fcv, false); 411 futex_lock(&async_futex); 412 413 awaiter_t *w; 414 while ((w = list_pop(&fcv->waiters, awaiter_t, link))) 415 fibril_notify(&w->event); 416 417 futex_unlock(&async_futex); 445 418 } 446 419 … … 624 597 printf("Deadlock detected.\n"); 625 598 stacktrace_print(); 626 printf("Fibril % zxis trying to clear timer %p from "599 printf("Fibril %p is trying to clear timer %p from " 627 600 "inside its handler %p.\n", 628 601 fibril_get_id(), timer, timer->fun); … … 674 647 sem->count++; 675 648 676 if (sem->count > 0) { 677 futex_unlock(&async_futex); 678 return; 679 } 680 681 link_t *tmp = list_first(&sem->waiters); 682 assert(tmp); 683 list_remove(tmp); 684 685 futex_unlock(&async_futex); 686 687 awaiter_t *wdp = list_get_instance(tmp, awaiter_t, wu_event.link); 688 fibril_add_ready(wdp->fid); 689 optimize_execution_power(); 649 if (sem->count <= 0) { 650 awaiter_t *w = list_pop(&sem->waiters, awaiter_t, link); 651 assert(w); 652 fibril_notify(&w->event); 653 } 654 655 futex_unlock(&async_futex); 690 656 } 691 657 … … 707 673 } 708 674 709 awaiter_t wdata; 710 awaiter_initialize(&wdata); 711 712 wdata.fid = fibril_get_id(); 713 list_append(&wdata.wu_event.link, &sem->waiters); 714 715 fibril_switch(FIBRIL_FROM_BLOCKED); 716 futex_unlock(&async_futex); 675 awaiter_t wdata = AWAITER_INIT; 676 list_append(&wdata.link, &sem->waiters); 677 678 futex_unlock(&async_futex); 679 680 fibril_wait_for(&wdata.event); 717 681 } 718 682 -
uspace/lib/c/generic/libc.c
r7137f74c r514d561 86 86 87 87 assert(main_fibril.tcb); 88 89 __fibrils_init(); 88 90 89 91 /* Initialize the fibril. */ -
uspace/lib/c/generic/private/async.h
r7137f74c r514d561 42 42 #include <sys/time.h> 43 43 #include <stdbool.h> 44 45 /** Structures of this type are used to track the timeout events. */46 typedef struct {47 /** If true, this struct is in the timeout list. */48 bool inlist;49 50 /** Timeout list link. */51 link_t link;52 53 /** If true, we have timed out. */54 bool occurred;55 56 /** Expiration time. */57 struct timeval expires;58 } to_event_t;59 60 /** Structures of this type are used to track the wakeup events. */61 typedef struct {62 /** If true, this struct is in a synchronization object wait queue. */63 bool inlist;64 65 /** Wait queue linkage. */66 link_t link;67 } wu_event_t;68 69 /** Structures of this type represent a waiting fibril. */70 typedef struct {71 /** Identification of and link to the waiting fibril. */72 fid_t fid;73 74 /** If true, this fibril is currently active. */75 bool active;76 77 /** Timeout wait data. */78 to_event_t to_event;79 /** Wakeup wait data. */80 wu_event_t wu_event;81 } awaiter_t;82 44 83 45 /** Session data */ … … 132 94 }; 133 95 134 extern void awaiter_initialize(awaiter_t *);135 136 96 extern void __async_server_init(void); 137 97 extern void __async_client_init(void); 138 98 extern void __async_ports_init(void); 139 extern void async_insert_timeout(awaiter_t *);140 99 141 100 extern errno_t async_create_port_internal(iface_t, async_port_handler_t, -
uspace/lib/c/generic/private/fibril.h
r7137f74c r514d561 42 42 context_t ctx; 43 43 44 uspace_arg_t uarg; 44 45 link_t link; 45 46 void *stack; 47 size_t stack_size; 46 48 void *arg; 47 49 errno_t (*func)(void *); … … 51 53 errno_t retval; 52 54 53 fibril_ owner_info_t *waits_for;55 fibril_t *thread_ctx; 54 56 55 atomic_t futex_locks;57 bool is_running : 1; 56 58 bool is_writer : 1; 57 59 /* In some places, we use fibril structs that can't be freed. */ 58 60 bool is_freeable : 1; 61 62 /* Debugging stuff. */ 63 atomic_t futex_locks; 64 fibril_owner_info_t *waits_for; 65 fibril_event_t *sleep_event; 59 66 }; 60 61 typedef enum {62 FIBRIL_PREEMPT,63 FIBRIL_FROM_BLOCKED,64 FIBRIL_FROM_MANAGER,65 FIBRIL_FROM_DEAD66 } fibril_switch_type_t;67 67 68 68 extern fibril_t *fibril_alloc(void); 69 69 extern void fibril_setup(fibril_t *); 70 extern void fibril_teardown(fibril_t *f, bool locked); 71 extern int fibril_switch(fibril_switch_type_t stype); 72 extern void fibril_add_manager(fid_t fid); 73 extern void fibril_remove_manager(void); 70 extern void fibril_teardown(fibril_t *f); 74 71 extern fibril_t *fibril_self(void); 75 72 73 extern void __fibrils_init(void); 74 76 75 #endif -
uspace/lib/c/generic/rcu.c
r7137f74c r514d561 115 115 116 116 typedef struct blocked_fibril { 117 fi d_t id;117 fibril_event_t unblock; 118 118 link_t link; 119 119 bool is_ready; … … 218 218 } 219 219 220 /** Delimits the startof an RCU reader critical section. */220 /** Delimits the end of an RCU reader critical section. */ 221 221 void rcu_read_unlock(void) 222 222 { … … 361 361 if (rcu.sync_lock.locked) { 362 362 blocked_fibril_t blocked_fib; 363 blocked_fib. id = fibril_get_id();363 blocked_fib.unblock = FIBRIL_EVENT_INIT; 364 364 365 365 list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils); … … 368 368 blocked_fib.is_ready = false; 369 369 fibril_rmutex_unlock(&rcu.sync_lock.mutex); 370 futex_lock(&async_futex); 371 fibril_switch(FIBRIL_FROM_BLOCKED); 372 futex_unlock(&async_futex); 370 fibril_wait_for(&blocked_fib.unblock); 373 371 fibril_rmutex_lock(&rcu.sync_lock.mutex); 374 372 } while (rcu.sync_lock.locked); … … 393 391 if (!blocked_fib->is_ready) { 394 392 blocked_fib->is_ready = true; 395 fibril_ add_ready(blocked_fib->id);393 fibril_notify(&blocked_fib->unblock); 396 394 } 397 395 } -
uspace/lib/c/generic/thread.c
r7137f74c r514d561 73 73 */ 74 74 75 /* If there is a manager, destroy it */ 76 async_destroy_manager(); 77 78 fibril_teardown(fibril, false); 79 75 fibril_teardown(fibril); 80 76 thread_exit(0); 81 77 } … … 111 107 AS_AREA_LATE_RESERVE, AS_AREA_UNPAGED); 112 108 if (stack == AS_MAP_FAILED) { 113 fibril_teardown(fibril , false);109 fibril_teardown(fibril); 114 110 free(uarg); 115 111 return ENOMEM; -
uspace/lib/c/include/async.h
r7137f74c r514d561 143 143 extern errno_t async_wait_timeout(aid_t, errno_t *, suseconds_t); 144 144 extern void async_forget(aid_t); 145 146 extern void async_create_manager(void);147 extern void async_destroy_manager(void);148 145 149 146 extern void async_set_client_data_constructor(async_client_data_ctor_t); … … 345 342 346 343 extern errno_t async_hangup(async_sess_t *); 347 extern void async_poke(void);348 344 349 345 extern async_exch_t *async_exchange_begin(async_sess_t *); … … 475 471 476 472 errno_t async_spawn_notification_handler(void); 473 fid_t async_create_manager(void); 477 474 478 475 #endif -
uspace/lib/c/include/fibril.h
r7137f74c r514d561 38 38 #include <types/common.h> 39 39 #include <time.h> 40 #include <_bits/__noreturn.h> 41 #include <ipc/common.h> 40 42 41 43 typedef struct fibril fibril_t; … … 45 47 } fibril_owner_info_t; 46 48 47 typedef sysarg_t fid_t; 49 typedef fibril_t *fid_t; 50 51 typedef struct { 52 fibril_t *fibril; 53 } fibril_event_t; 54 55 #define FIBRIL_EVENT_INIT ((fibril_event_t) {0}) 48 56 49 57 /** Fibril-local variable specifier */ … … 52 60 #define FIBRIL_DFLT_STK_SIZE 0 53 61 54 extern fid_t fibril_create_generic(errno_t (* func)(void *), void *arg, size_t);55 extern void fibril_destroy(fid_t fid);56 extern void fibril_add_ready(fid_t fid);62 extern fid_t fibril_create_generic(errno_t (*)(void *), void *, size_t); 63 extern void fibril_destroy(fid_t); 64 extern void fibril_add_ready(fid_t); 57 65 extern fid_t fibril_get_id(void); 58 66 extern void fibril_yield(void); … … 71 79 } 72 80 81 extern void fibril_start(fid_t); 82 extern __noreturn void fibril_exit(long); 83 84 extern void fibril_wait_for(fibril_event_t *); 85 extern errno_t fibril_wait_timeout(fibril_event_t *, const struct timeval *); 86 extern void fibril_notify(fibril_event_t *); 87 88 extern errno_t fibril_ipc_wait(ipc_call_t *, const struct timeval *); 89 extern void fibril_ipc_poke(void); 90 73 91 #endif 74 92
Note:
See TracChangeset
for help on using the changeset viewer.