Changes in / [fbfe59d:8119363] in mainline
- Files:
-
- 1 added
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
rfbfe59d r8119363 385 385 ! CONFIG_UBSAN_KERNEL (n/y) 386 386 387 % Track owner for futexes in userspace. 388 ! CONFIG_DEBUG_FUTEX (y/n) 389 387 390 % Deadlock detection support for spinlocks 388 391 ! [CONFIG_DEBUG=y&CONFIG_SMP=y] CONFIG_DEBUG_SPINLOCK (y/n) -
abi/include/abi/synch.h
rfbfe59d r8119363 45 45 /** Interruptible operation. */ 46 46 #define SYNCH_FLAGS_INTERRUPTIBLE (1 << 1) 47 /** Futex operation (makes sleep with timeout composable). */ 48 #define SYNCH_FLAGS_FUTEX (1 << 2) 47 49 48 50 #endif -
kernel/generic/include/proc/thread.h
rfbfe59d r8119363 112 112 /** If true, the thread can be interrupted from sleep. */ 113 113 bool sleep_interruptible; 114 115 /** 116 * If true, and this thread's sleep returns without a wakeup 117 * (timed out or interrupted), waitq ignores the next wakeup. 118 * This is necessary for futex to be able to handle those conditions. 119 */ 120 bool sleep_composable; 121 114 122 /** Wait queue in which this thread sleeps. */ 115 123 waitq_t *sleep_queue; -
kernel/generic/include/synch/futex.h
rfbfe59d r8119363 53 53 54 54 extern void futex_init(void); 55 extern sys_errno_t sys_futex_sleep(uintptr_t );55 extern sys_errno_t sys_futex_sleep(uintptr_t, uintptr_t); 56 56 extern sys_errno_t sys_futex_wakeup(uintptr_t); 57 57 -
kernel/generic/include/synch/waitq.h
rfbfe59d r8119363 62 62 int missed_wakeups; 63 63 64 /** Number of wakeups that need to be ignored due to futex timeout. */ 65 int ignore_wakeups; 66 64 67 /** List of sleeping threads for which there was no missed_wakeup. */ 65 68 list_t sleepers; -
kernel/generic/src/proc/thread.c
rfbfe59d r8119363 383 383 timeout_initialize(&thread->sleep_timeout); 384 384 thread->sleep_interruptible = false; 385 thread->sleep_composable = false; 385 386 thread->sleep_queue = NULL; 386 387 thread->timeout_pending = false; -
kernel/generic/src/synch/futex.c
rfbfe59d r8119363 398 398 } 399 399 400 /** Sleep in futex wait queue. 401 * 402 * @param uaddr Userspace address of the futex counter. 400 /** Sleep in futex wait queue with a timeout. 401 * If the sleep times out or is interrupted, the next wakeup is ignored. 402 * The userspace portion of the call must handle this condition. 403 * 404 * @param uaddr Userspace address of the futex counter. 405 * @param timeout Maximum number of useconds to sleep. 0 means no limit. 403 406 * 404 407 * @return If there is no physical mapping for uaddr ENOENT is … … 406 409 * waitq_sleep_timeout(). 407 410 */ 408 sys_errno_t sys_futex_sleep(uintptr_t uaddr )411 sys_errno_t sys_futex_sleep(uintptr_t uaddr, uintptr_t timeout) 409 412 { 410 413 futex_t *futex = get_futex(uaddr); … … 417 420 #endif 418 421 419 errno_t rc = waitq_sleep_timeout( 420 &futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE, NULL);422 errno_t rc = waitq_sleep_timeout(&futex->wq, timeout, 423 SYNCH_FLAGS_INTERRUPTIBLE | SYNCH_FLAGS_FUTEX, NULL); 421 424 422 425 #ifdef CONFIG_UDEBUG -
kernel/generic/src/synch/waitq.c
rfbfe59d r8119363 57 57 #include <adt/list.h> 58 58 #include <arch/cycle.h> 59 #include <mem.h> 59 60 60 61 static void waitq_sleep_timed_out(void *); … … 71 72 void waitq_initialize(waitq_t *wq) 72 73 { 74 memsetb(wq, sizeof(*wq), 0); 73 75 irq_spinlock_initialize(&wq->lock, "wq.lock"); 74 76 list_initialize(&wq->sleepers); 75 wq->missed_wakeups = 0;76 77 } 77 78 … … 114 115 thread->saved_context = thread->sleep_timeout_context; 115 116 do_wakeup = true; 117 if (thread->sleep_composable) 118 wq->ignore_wakeups++; 116 119 thread->sleep_queue = NULL; 117 120 irq_spinlock_unlock(&wq->lock, false); … … 176 179 list_remove(&thread->wq_link); 177 180 thread->saved_context = thread->sleep_interruption_context; 181 if (thread->sleep_composable) 182 wq->ignore_wakeups++; 178 183 do_wakeup = true; 179 184 thread->sleep_queue = NULL; … … 393 398 */ 394 399 irq_spinlock_lock(&THREAD->lock, false); 400 401 THREAD->sleep_composable = (flags & SYNCH_FLAGS_FUTEX); 395 402 396 403 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { … … 538 545 assert(irq_spinlock_locked(&wq->lock)); 539 546 547 if (wq->ignore_wakeups > 0) { 548 if (mode == WAKEUP_FIRST) { 549 wq->ignore_wakeups--; 550 return; 551 } 552 wq->ignore_wakeups = 0; 553 } 554 540 555 loop: 541 556 if (list_empty(&wq->sleepers)) { -
uspace/app/nic/nic.c
rfbfe59d r8119363 34 34 */ 35 35 36 #include <assert.h> 36 37 #include <errno.h> 37 38 #include <loc.h> -
uspace/app/taskdump/fibrildump.c
rfbfe59d r8119363 33 33 */ 34 34 35 #include <adt/list.h> 36 #include <context.h> 35 37 #include <errno.h> 36 38 #include <fibril.h> … … 42 44 #include <taskdump.h> 43 45 #include <udebug.h> 46 47 struct fibril { 48 link_t all_link; 49 context_t ctx; 50 uint8_t __opaque[]; 51 }; 44 52 45 53 static errno_t fibrildump_read_uintptr(void *, uintptr_t, uintptr_t *); -
uspace/lib/c/generic/assert.c
rfbfe59d r8119363 38 38 #include <stacktrace.h> 39 39 #include <stdint.h> 40 #include <task.h> 40 41 41 42 static atomic_t failed_asserts = { 0 }; … … 46 47 * Send the message safely to kio. Nested asserts should not occur. 47 48 */ 48 kio_printf("Assertion failed (%s) in file \"%s\", line %u.\n", 49 cond, file, line); 49 kio_printf("Assertion failed (%s) in task %ld, file \"%s\", line %u.\n", 50 cond, (long) task_get_id(), file, line); 51 52 stacktrace_kio_print(); 50 53 51 54 /* Sometimes we know in advance that regular printf() would likely fail. */ … … 58 61 * Send the message safely to kio. Nested asserts should not occur. 59 62 */ 60 kio_printf("Assertion failed (%s) in file \"%s\", line %u.\n", 61 cond, file, line); 63 kio_printf("Assertion failed (%s) in task %ld, file \"%s\", line %u.\n", 64 cond, (long) task_get_id(), file, line); 65 66 stacktrace_kio_print(); 62 67 63 68 /* … … 72 77 * assertions. 73 78 */ 74 printf("Assertion failed (%s) infile \"%s\", line %u.\n",75 cond, file, line);79 kio_printf("Assertion failed (%s) in task %ld, file \"%s\", line %u.\n", 80 cond, (long) task_get_id(), file, line); 76 81 stacktrace_print(); 77 82 -
uspace/lib/c/generic/async/client.c
rfbfe59d r8119363 121 121 #include <abi/mm/as.h> 122 122 #include "../private/libc.h" 123 #include "../private/fibril.h" 123 124 124 125 /** Naming service session */ … … 241 242 assert(arg); 242 243 243 futex_ down(&async_futex);244 futex_lock(&async_futex); 244 245 245 246 amsg_t *msg = (amsg_t *) arg; … … 266 267 } 267 268 268 futex_u p(&async_futex);269 futex_unlock(&async_futex); 269 270 } 270 271 … … 355 356 amsg_t *msg = (amsg_t *) amsgid; 356 357 357 futex_ down(&async_futex);358 futex_lock(&async_futex); 358 359 359 360 assert(!msg->forget); … … 361 362 362 363 if (msg->done) { 363 futex_u p(&async_futex);364 futex_unlock(&async_futex); 364 365 goto done; 365 366 } … … 370 371 371 372 /* Leave the async_futex locked when entering this function */ 372 fibril_switch(FIBRIL_TO_MANAGER); 373 374 /* Futex is up automatically after fibril_switch */ 373 fibril_switch(FIBRIL_FROM_BLOCKED); 374 futex_unlock(&async_futex); 375 375 376 376 done: … … 401 401 amsg_t *msg = (amsg_t *) amsgid; 402 402 403 futex_ down(&async_futex);403 futex_lock(&async_futex); 404 404 405 405 assert(!msg->forget); … … 407 407 408 408 if (msg->done) { 409 futex_u p(&async_futex);409 futex_unlock(&async_futex); 410 410 goto done; 411 411 } … … 443 443 444 444 /* Leave the async_futex locked when entering this function */ 445 fibril_switch(FIBRIL_TO_MANAGER); 446 447 /* Futex is up automatically after fibril_switch */ 445 fibril_switch(FIBRIL_FROM_BLOCKED); 446 futex_unlock(&async_futex); 448 447 449 448 if (!msg->done) … … 475 474 assert(!msg->destroyed); 476 475 477 futex_ down(&async_futex);476 futex_lock(&async_futex); 478 477 479 478 if (msg->done) { … … 484 483 } 485 484 486 futex_u p(&async_futex);485 futex_unlock(&async_futex); 487 486 } 488 487 … … 504 503 tv_add_diff(&awaiter.to_event.expires, timeout); 505 504 506 futex_ down(&async_futex);505 futex_lock(&async_futex); 507 506 508 507 async_insert_timeout(&awaiter); 509 508 510 509 /* Leave the async_futex locked when entering this function */ 511 fibril_switch(FIBRIL_TO_MANAGER); 512 513 /* Futex is up automatically after fibril_switch() */ 510 fibril_switch(FIBRIL_FROM_BLOCKED); 511 futex_unlock(&async_futex); 514 512 } 515 513 -
uspace/lib/c/generic/async/server.c
rfbfe59d r8119363 120 120 #include <abi/mm/as.h> 121 121 #include "../private/libc.h" 122 #include "../private/fibril.h" 122 123 123 124 /** Async framework global futex */ … … 431 432 * Remove myself from the connection hash table. 432 433 */ 433 futex_ down(&async_futex);434 futex_lock(&async_futex); 434 435 hash_table_remove(&conn_hash_table, &(conn_key_t){ 435 436 .task_id = fibril_connection->in_task_id, 436 437 .phone_hash = fibril_connection->in_phone_hash 437 438 }); 438 futex_u p(&async_futex);439 futex_unlock(&async_futex); 439 440 440 441 /* … … 519 520 /* Add connection to the connection hash table */ 520 521 521 futex_ down(&async_futex);522 futex_lock(&async_futex); 522 523 hash_table_insert(&conn_hash_table, &conn->link); 523 futex_u p(&async_futex);524 futex_unlock(&async_futex); 524 525 525 526 fibril_add_ready(conn->wdata.fid); … … 647 648 assert(call); 648 649 649 futex_ down(&async_futex);650 futex_lock(&async_futex); 650 651 651 652 ht_link_t *link = hash_table_find(&conn_hash_table, &(conn_key_t){ … … 654 655 }); 655 656 if (!link) { 656 futex_u p(&async_futex);657 futex_unlock(&async_futex); 657 658 return false; 658 659 } … … 662 663 msg_t *msg = malloc(sizeof(*msg)); 663 664 if (!msg) { 664 futex_u p(&async_futex);665 futex_unlock(&async_futex); 665 666 return false; 666 667 } … … 686 687 } 687 688 688 futex_u p(&async_futex);689 futex_unlock(&async_futex); 689 690 return true; 690 691 } … … 961 962 connection_t *conn = fibril_connection; 962 963 963 futex_ down(&async_futex);964 futex_lock(&async_futex); 964 965 965 966 if (usecs) { … … 981 982 memset(call, 0, sizeof(ipc_call_t)); 982 983 IPC_SET_IMETHOD(*call, IPC_M_PHONE_HUNGUP); 983 futex_u p(&async_futex);984 futex_unlock(&async_futex); 984 985 return conn->close_chandle; 985 986 } … … 996 997 * case, route_call() will perform the wakeup. 997 998 */ 998 fibril_switch(FIBRIL_TO_MANAGER); 999 1000 /* 1001 * Futex is up after getting back from async_manager. 1002 * Get it again. 1003 */ 1004 futex_down(&async_futex); 999 fibril_switch(FIBRIL_FROM_BLOCKED); 1000 1005 1001 if ((usecs) && (conn->wdata.to_event.occurred) && 1006 1002 (list_empty(&conn->msg_queue))) { 1007 1003 /* If we timed out -> exit */ 1008 futex_u p(&async_futex);1004 futex_unlock(&async_futex); 1009 1005 return CAP_NIL; 1010 1006 } … … 1019 1015 free(msg); 1020 1016 1021 futex_u p(&async_futex);1017 futex_unlock(&async_futex); 1022 1018 return chandle; 1023 1019 } … … 1070 1066 assert(call); 1071 1067 1072 /* Kernel notification */ 1073 if ((chandle == CAP_NIL) && (call->flags & IPC_CALL_NOTIF)) { 1074 queue_notification(call); 1068 if (call->flags & IPC_CALL_ANSWERED) 1069 return; 1070 1071 if (chandle == CAP_NIL) { 1072 if (call->flags & IPC_CALL_NOTIF) { 1073 /* Kernel notification */ 1074 queue_notification(call); 1075 } 1075 1076 return; 1076 1077 } … … 1100 1101 1101 1102 /** Fire all timeouts that expired. */ 1102 static void handle_expired_timeouts(void) 1103 { 1103 static suseconds_t handle_expired_timeouts(unsigned int *flags) 1104 { 1105 /* Make sure the async_futex is held. */ 1106 futex_assert_is_locked(&async_futex); 1107 1104 1108 struct timeval tv; 1105 1109 getuptime(&tv); 1106 1110 1107 futex_down(&async_futex);1111 bool fired = false; 1108 1112 1109 1113 link_t *cur = list_first(&timeout_list); … … 1112 1116 list_get_instance(cur, awaiter_t, to_event.link); 1113 1117 1114 if (tv_gt(&waiter->to_event.expires, &tv)) 1115 break; 1118 if (tv_gt(&waiter->to_event.expires, &tv)) { 1119 if (fired) { 1120 *flags = SYNCH_FLAGS_NON_BLOCKING; 1121 return 0; 1122 } 1123 *flags = 0; 1124 return tv_sub_diff(&waiter->to_event.expires, &tv); 1125 } 1116 1126 1117 1127 list_remove(&waiter->to_event.link); … … 1126 1136 waiter->active = true; 1127 1137 fibril_add_ready(waiter->fid); 1138 fired = true; 1128 1139 } 1129 1140 … … 1131 1142 } 1132 1143 1133 futex_up(&async_futex); 1144 if (fired) { 1145 *flags = SYNCH_FLAGS_NON_BLOCKING; 1146 return 0; 1147 } 1148 1149 return SYNCH_NO_TIMEOUT; 1134 1150 } 1135 1151 … … 1142 1158 { 1143 1159 while (true) { 1144 if (fibril_switch(FIBRIL_FROM_MANAGER)) { 1145 futex_up(&async_futex); 1146 /* 1147 * async_futex is always held when entering a manager 1148 * fibril. 1149 */ 1150 continue; 1151 } 1152 1153 futex_down(&async_futex); 1154 1155 suseconds_t timeout; 1160 futex_lock(&async_futex); 1161 fibril_switch(FIBRIL_FROM_MANAGER); 1162 1163 /* 1164 * The switch only returns when there is no non-manager fibril 1165 * it can run. 1166 */ 1167 1156 1168 unsigned int flags = SYNCH_FLAGS_NONE; 1157 if (!list_empty(&timeout_list)) { 1158 awaiter_t *waiter = list_get_instance( 1159 list_first(&timeout_list), awaiter_t, to_event.link); 1160 1161 struct timeval tv; 1162 getuptime(&tv); 1163 1164 if (tv_gteq(&tv, &waiter->to_event.expires)) { 1165 futex_up(&async_futex); 1166 handle_expired_timeouts(); 1167 /* 1168 * Notice that even if the event(s) already 1169 * expired (and thus the other fibril was 1170 * supposed to be running already), 1171 * we check for incoming IPC. 1172 * 1173 * Otherwise, a fibril that continuously 1174 * creates (almost) expired events could 1175 * prevent IPC retrieval from the kernel. 1176 */ 1177 timeout = 0; 1178 flags = SYNCH_FLAGS_NON_BLOCKING; 1179 1180 } else { 1181 timeout = tv_sub_diff(&waiter->to_event.expires, 1182 &tv); 1183 futex_up(&async_futex); 1184 } 1185 } else { 1186 futex_up(&async_futex); 1187 timeout = SYNCH_NO_TIMEOUT; 1188 } 1169 suseconds_t next_timeout = handle_expired_timeouts(&flags); 1170 futex_unlock(&async_futex); 1189 1171 1190 1172 atomic_inc(&threads_in_ipc_wait); 1191 1173 1192 1174 ipc_call_t call; 1193 errno_t rc = ipc_wait_cycle(&call, timeout, flags);1175 errno_t rc = ipc_wait_cycle(&call, next_timeout, flags); 1194 1176 1195 1177 atomic_dec(&threads_in_ipc_wait); 1196 1178 1197 1179 assert(rc == EOK); 1198 1199 if (call.cap_handle == CAP_NIL) {1200 if ((call.flags &1201 (IPC_CALL_NOTIF | IPC_CALL_ANSWERED)) == 0) {1202 /* Neither a notification nor an answer. */1203 handle_expired_timeouts();1204 continue;1205 }1206 }1207 1208 if (call.flags & IPC_CALL_ANSWERED)1209 continue;1210 1211 1180 handle_call(call.cap_handle, &call); 1212 1181 } … … 1225 1194 static errno_t async_manager_fibril(void *arg) 1226 1195 { 1227 futex_up(&async_futex);1228 1229 /*1230 * async_futex is always locked when entering manager1231 */1232 1196 async_manager_worker(); 1233 1234 1197 return 0; 1235 1198 } … … 1884 1847 } 1885 1848 1849 _Noreturn void async_manager(void) 1850 { 1851 futex_lock(&async_futex); 1852 fibril_switch(FIBRIL_FROM_DEAD); 1853 __builtin_unreachable(); 1854 } 1855 1886 1856 /** @} 1887 1857 */ -
uspace/lib/c/generic/fibril.c
rfbfe59d r8119363 49 49 #include <async.h> 50 50 51 #ifdef FUTEX_UPGRADABLE 52 #include <rcu.h> 53 #endif 51 #include "private/fibril.h" 52 54 53 55 54 /** … … 72 71 static void fibril_main(void) 73 72 { 74 /* fibril_futex is locked when a fibril is first started. */ 75 futex_unlock(&fibril_futex); 76 77 fibril_t *fibril = __tcb_get()->fibril_data; 78 79 #ifdef FUTEX_UPGRADABLE 80 rcu_register_fibril(); 81 #endif 73 /* fibril_futex and async_futex are locked when a fibril is started. */ 74 futex_unlock(&fibril_futex); 75 futex_unlock(&async_futex); 76 77 fibril_t *fibril = fibril_self(); 82 78 83 79 /* Call the implementing function. */ 84 80 fibril->retval = fibril->func(fibril->arg); 85 81 86 futex_ down(&async_futex);82 futex_lock(&async_futex); 87 83 fibril_switch(FIBRIL_FROM_DEAD); 88 84 /* Not reached */ … … 98 94 return NULL; 99 95 100 fibril_t *fibril = malloc(sizeof(fibril_t));96 fibril_t *fibril = calloc(1, sizeof(fibril_t)); 101 97 if (!fibril) { 102 98 tls_free(tcb); … … 106 102 tcb->fibril_data = fibril; 107 103 fibril->tcb = tcb; 108 109 fibril->func = NULL;110 fibril->arg = NULL;111 fibril->stack = NULL;112 fibril->clean_after_me = NULL;113 fibril->retval = 0;114 fibril->flags = 0;115 116 fibril->waits_for = NULL;117 104 118 105 /* … … 141 128 /** Switch from the current fibril. 142 129 * 143 * If stype is FIBRIL_TO_MANAGER or FIBRIL_FROM_DEAD, the async_futex must144 * be held.130 * The async_futex must be held when entering this function, 131 * and is still held on return. 145 132 * 146 133 * @param stype Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER, … … 154 141 int fibril_switch(fibril_switch_type_t stype) 155 142 { 143 /* Make sure the async_futex is held. */ 144 futex_assert_is_locked(&async_futex); 145 156 146 futex_lock(&fibril_futex); 157 147 158 fibril_t *srcf = __tcb_get()->fibril_data;148 fibril_t *srcf = fibril_self(); 159 149 fibril_t *dstf = NULL; 160 150 161 151 /* Choose a new fibril to run */ 162 switch (stype) { 163 case FIBRIL_TO_MANAGER: 164 case FIBRIL_FROM_DEAD: 165 /* Make sure the async_futex is held. */ 166 assert((atomic_signed_t) async_futex.val.count <= 0); 152 if (list_empty(&ready_list)) { 153 if (stype == FIBRIL_PREEMPT || stype == FIBRIL_FROM_MANAGER) { 154 // FIXME: This means that as long as there is a fibril 155 // that only yields, IPC messages are never retrieved. 156 futex_unlock(&fibril_futex); 157 return 0; 158 } 167 159 168 160 /* If we are going to manager and none exists, create it */ … … 175 167 dstf = list_get_instance(list_first(&manager_list), 176 168 fibril_t, link); 177 178 if (stype == FIBRIL_FROM_DEAD) 179 dstf->clean_after_me = srcf; 180 break; 181 case FIBRIL_PREEMPT: 182 case FIBRIL_FROM_MANAGER: 183 if (list_empty(&ready_list)) { 184 futex_unlock(&fibril_futex); 185 return 0; 186 } 187 169 } else { 188 170 dstf = list_get_instance(list_first(&ready_list), fibril_t, 189 171 link); 190 break;191 } 172 } 173 192 174 list_remove(&dstf->link); 175 if (stype == FIBRIL_FROM_DEAD) 176 dstf->clean_after_me = srcf; 193 177 194 178 /* Put the current fibril into the correct run list */ … … 201 185 break; 202 186 case FIBRIL_FROM_DEAD: 187 case FIBRIL_FROM_BLOCKED: 203 188 // Nothing. 204 189 break; 205 case FIBRIL_TO_MANAGER: 206 /* 207 * Don't put the current fibril into any list, it should 208 * already be somewhere, or it will be lost. 209 */ 210 break; 211 } 212 213 #ifdef FUTEX_UPGRADABLE 214 if (stype == FIBRIL_FROM_DEAD) { 215 rcu_deregister_fibril(); 216 } 217 #endif 190 } 191 192 /* Bookkeeping. */ 193 futex_give_to(&fibril_futex, dstf); 194 futex_give_to(&async_futex, dstf); 218 195 219 196 /* Swap to the next fibril. */ … … 345 322 } 346 323 324 fibril_t *fibril_self(void) 325 { 326 return __tcb_get()->fibril_data; 327 } 328 347 329 /** Return fibril id of the currently running fibril. 348 330 * … … 352 334 fid_t fibril_get_id(void) 353 335 { 354 return (fid_t) __tcb_get()->fibril_data; 336 return (fid_t) fibril_self(); 337 } 338 339 void fibril_yield(void) 340 { 341 futex_lock(&async_futex); 342 (void) fibril_switch(FIBRIL_PREEMPT); 343 futex_unlock(&async_futex); 355 344 } 356 345 -
uspace/lib/c/generic/fibril_synch.c
rfbfe59d r8119363 45 45 #include <stdio.h> 46 46 #include "private/async.h" 47 #include "private/fibril.h" 47 48 48 49 static void optimize_execution_power(void) … … 105 106 fibril_t *f = (fibril_t *) fibril_get_id(); 106 107 107 futex_ down(&async_futex);108 futex_lock(&async_futex); 108 109 if (fm->counter-- <= 0) { 109 110 awaiter_t wdata; … … 115 116 check_for_deadlock(&fm->oi); 116 117 f->waits_for = &fm->oi; 117 fibril_switch(FIBRIL_ TO_MANAGER);118 fibril_switch(FIBRIL_FROM_BLOCKED); 118 119 } else { 119 120 fm->oi.owned_by = f; 120 futex_up(&async_futex);121 }121 } 122 futex_unlock(&async_futex); 122 123 } 123 124 … … 126 127 bool locked = false; 127 128 128 futex_ down(&async_futex);129 futex_lock(&async_futex); 129 130 if (fm->counter > 0) { 130 131 fm->counter--; … … 132 133 locked = true; 133 134 } 134 futex_u p(&async_futex);135 futex_unlock(&async_futex); 135 136 136 137 return locked; … … 165 166 { 166 167 assert(fibril_mutex_is_locked(fm)); 167 futex_ down(&async_futex);168 futex_lock(&async_futex); 168 169 _fibril_mutex_unlock_unsafe(fm); 169 futex_u p(&async_futex);170 futex_unlock(&async_futex); 170 171 } 171 172 … … 174 175 bool locked = false; 175 176 176 futex_ down(&async_futex);177 futex_lock(&async_futex); 177 178 if (fm->counter <= 0) 178 179 locked = true; 179 futex_u p(&async_futex);180 futex_unlock(&async_futex); 180 181 181 182 return locked; … … 194 195 fibril_t *f = (fibril_t *) fibril_get_id(); 195 196 196 futex_ down(&async_futex);197 futex_lock(&async_futex); 197 198 if (frw->writers) { 198 199 awaiter_t wdata; … … 201 202 wdata.fid = (fid_t) f; 202 203 wdata.wu_event.inlist = true; 203 f-> flags &= ~FIBRIL_WRITER;204 f->is_writer = false; 204 205 list_append(&wdata.wu_event.link, &frw->waiters); 205 206 check_for_deadlock(&frw->oi); 206 207 f->waits_for = &frw->oi; 207 fibril_switch(FIBRIL_ TO_MANAGER);208 fibril_switch(FIBRIL_FROM_BLOCKED); 208 209 } else { 209 210 /* Consider the first reader the owner. */ 210 211 if (frw->readers++ == 0) 211 212 frw->oi.owned_by = f; 212 futex_up(&async_futex);213 }213 } 214 futex_unlock(&async_futex); 214 215 } 215 216 … … 218 219 fibril_t *f = (fibril_t *) fibril_get_id(); 219 220 220 futex_ down(&async_futex);221 futex_lock(&async_futex); 221 222 if (frw->writers || frw->readers) { 222 223 awaiter_t wdata; … … 225 226 wdata.fid = (fid_t) f; 226 227 wdata.wu_event.inlist = true; 227 f-> flags |= FIBRIL_WRITER;228 f->is_writer = true; 228 229 list_append(&wdata.wu_event.link, &frw->waiters); 229 230 check_for_deadlock(&frw->oi); 230 231 f->waits_for = &frw->oi; 231 fibril_switch(FIBRIL_ TO_MANAGER);232 fibril_switch(FIBRIL_FROM_BLOCKED); 232 233 } else { 233 234 frw->oi.owned_by = f; 234 235 frw->writers++; 235 futex_up(&async_futex);236 }236 } 237 futex_unlock(&async_futex); 237 238 } 238 239 239 240 static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw) 240 241 { 241 futex_ down(&async_futex);242 futex_lock(&async_futex); 242 243 if (frw->readers) { 243 244 if (--frw->readers) { … … 276 277 f->waits_for = NULL; 277 278 278 if (f-> flags & FIBRIL_WRITER) {279 if (f->is_writer) { 279 280 if (frw->readers) 280 281 break; … … 300 301 } 301 302 out: 302 futex_u p(&async_futex);303 futex_unlock(&async_futex); 303 304 } 304 305 … … 319 320 bool locked = false; 320 321 321 futex_ down(&async_futex);322 futex_lock(&async_futex); 322 323 if (frw->readers) 323 324 locked = true; 324 futex_u p(&async_futex);325 futex_unlock(&async_futex); 325 326 326 327 return locked; … … 331 332 bool locked = false; 332 333 333 futex_ down(&async_futex);334 futex_lock(&async_futex); 334 335 if (frw->writers) { 335 336 assert(frw->writers == 1); 336 337 locked = true; 337 338 } 338 futex_u p(&async_futex);339 futex_unlock(&async_futex); 339 340 340 341 return locked; … … 368 369 wdata.wu_event.inlist = true; 369 370 370 futex_ down(&async_futex);371 futex_lock(&async_futex); 371 372 if (timeout) { 372 373 getuptime(&wdata.to_event.expires); … … 376 377 list_append(&wdata.wu_event.link, &fcv->waiters); 377 378 _fibril_mutex_unlock_unsafe(fm); 378 fibril_switch(FIBRIL_TO_MANAGER); 379 fibril_switch(FIBRIL_FROM_BLOCKED); 380 futex_unlock(&async_futex); 381 382 // XXX: This could be replaced with an unlocked version to get rid 383 // of the unlock-lock pair. I deliberately don't do that because 384 // further changes would most likely need to revert that optimization. 379 385 fibril_mutex_lock(fm); 380 386 381 /* async_futex not held after fibril_switch() */ 382 futex_down(&async_futex); 387 futex_lock(&async_futex); 383 388 if (wdata.to_event.inlist) 384 389 list_remove(&wdata.to_event.link); 385 390 if (wdata.wu_event.inlist) 386 391 list_remove(&wdata.wu_event.link); 387 futex_u p(&async_futex);392 futex_unlock(&async_futex); 388 393 389 394 return wdata.to_event.occurred ? ETIMEOUT : EOK; … … 403 408 awaiter_t *wdp; 404 409 405 futex_ down(&async_futex);410 futex_lock(&async_futex); 406 411 while (!list_empty(&fcv->waiters)) { 407 412 tmp = list_first(&fcv->waiters); … … 417 422 } 418 423 } 419 futex_u p(&async_futex);424 futex_unlock(&async_futex); 420 425 } 421 426 … … 656 661 void fibril_semaphore_up(fibril_semaphore_t *sem) 657 662 { 658 futex_ down(&async_futex);663 futex_lock(&async_futex); 659 664 sem->count++; 660 665 661 666 if (sem->count > 0) { 662 futex_u p(&async_futex);667 futex_unlock(&async_futex); 663 668 return; 664 669 } … … 668 673 list_remove(tmp); 669 674 670 futex_u p(&async_futex);675 futex_unlock(&async_futex); 671 676 672 677 awaiter_t *wdp = list_get_instance(tmp, awaiter_t, wu_event.link); … … 684 689 void fibril_semaphore_down(fibril_semaphore_t *sem) 685 690 { 686 futex_ down(&async_futex);691 futex_lock(&async_futex); 687 692 sem->count--; 688 693 689 694 if (sem->count >= 0) { 690 futex_u p(&async_futex);695 futex_unlock(&async_futex); 691 696 return; 692 697 } … … 697 702 wdata.fid = fibril_get_id(); 698 703 list_append(&wdata.wu_event.link, &sem->waiters); 699 fibril_switch(FIBRIL_TO_MANAGER); 700 701 /* async_futex not held after fibril_switch() */704 705 fibril_switch(FIBRIL_FROM_BLOCKED); 706 futex_unlock(&async_futex); 702 707 } 703 708 -
uspace/lib/c/generic/futex.c
rfbfe59d r8119363 34 34 35 35 #include <futex.h> 36 37 #include <assert.h> 36 38 #include <atomic.h> 39 #include <fibril.h> 40 #include <io/kio.h> 41 42 #include "private/fibril.h" 43 44 //#define DPRINTF(...) kio_printf(__VA_ARGS__) 45 #define DPRINTF(...) ((void)0) 37 46 38 47 /** Initialize futex counter. … … 47 56 } 48 57 58 #ifdef CONFIG_DEBUG_FUTEX 49 59 50 #ifdef FUTEX_UPGRADABLE 60 void __futex_assert_is_locked(futex_t *futex, const char *name) 61 { 62 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 63 fibril_t *self = (fibril_t *) fibril_get_id(); 64 if (owner != self) { 65 DPRINTF("Assertion failed: %s (%p) is not locked by fibril %p (instead locked by fibril %p).\n", name, futex, self, owner); 66 } 67 assert(owner == self); 68 } 51 69 52 int _upgrade_futexes = 0; 53 static futex_t upg_and_wait_futex = FUTEX_INITIALIZER; 70 void __futex_assert_is_not_locked(futex_t *futex, const char *name) 71 { 72 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 73 fibril_t *self = (fibril_t *) fibril_get_id(); 74 if (owner == self) { 75 DPRINTF("Assertion failed: %s (%p) is already locked by fibril %p.\n", name, futex, self); 76 } 77 assert(owner != self); 78 } 54 79 55 void futex_upgrade_all_and_wait(void)80 void __futex_lock(futex_t *futex, const char *name) 56 81 { 57 futex_down(&upg_and_wait_futex); 82 /* We use relaxed atomics to avoid violating C11 memory model. 83 * They should compile to regular load/stores, but simple assignments 84 * would be UB by definition. 85 */ 58 86 59 if (!_upgrade_futexes) { 60 rcu_assign(_upgrade_futexes, 1); 61 _rcu_synchronize(BM_BLOCK_THREAD); 87 fibril_t *self = (fibril_t *) fibril_get_id(); 88 DPRINTF("Locking futex %s (%p) by fibril %p.\n", name, futex, self); 89 __futex_assert_is_not_locked(futex, name); 90 futex_down(futex); 91 92 void *prev_owner = __atomic_exchange_n(&futex->owner, self, __ATOMIC_RELAXED); 93 assert(prev_owner == NULL); 94 95 atomic_inc(&self->futex_locks); 96 } 97 98 void __futex_unlock(futex_t *futex, const char *name) 99 { 100 fibril_t *self = (fibril_t *) fibril_get_id(); 101 DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self); 102 __futex_assert_is_locked(futex, name); 103 __atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED); 104 atomic_dec(&self->futex_locks); 105 futex_up(futex); 106 } 107 108 bool __futex_trylock(futex_t *futex, const char *name) 109 { 110 fibril_t *self = (fibril_t *) fibril_get_id(); 111 bool success = futex_trydown(futex); 112 if (success) { 113 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 114 assert(owner == NULL); 115 116 __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED); 117 118 atomic_inc(&self->futex_locks); 119 120 DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self); 121 } else { 122 DPRINTF("Trylock on futex %s (%p) by fibril %p failed.\n", name, futex, self); 62 123 } 63 124 64 futex_up(&upg_and_wait_futex); 125 return success; 126 } 127 128 void __futex_give_to(futex_t *futex, void *new_owner, const char *name) 129 { 130 fibril_t *self = fibril_self(); 131 fibril_t *no = new_owner; 132 DPRINTF("Passing futex %s (%p) from fibril %p to fibril %p.\n", name, futex, self, no); 133 134 __futex_assert_is_locked(futex, name); 135 atomic_dec(&self->futex_locks); 136 atomic_inc(&no->futex_locks); 137 __atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED); 65 138 } 66 139 -
uspace/lib/c/generic/inet/host.c
rfbfe59d r8119363 35 35 */ 36 36 37 #include <assert.h> 37 38 #include <errno.h> 38 39 #include <inet/addr.h> -
uspace/lib/c/generic/inet/hostport.c
rfbfe59d r8119363 36 36 */ 37 37 38 #include <assert.h> 38 39 #include <errno.h> 39 40 #include <inet/addr.h> -
uspace/lib/c/generic/libc.c
rfbfe59d r8119363 55 55 #include "private/malloc.h" 56 56 #include "private/io.h" 57 58 #ifdef FUTEX_UPGRADABLE 59 #include <rcu.h> 60 #endif 57 #include "private/fibril.h" 61 58 62 59 #ifdef CONFIG_RTLD … … 89 86 90 87 __tcb_set(fibril->tcb); 91 92 93 #ifdef FUTEX_UPGRADABLE94 rcu_register_fibril();95 #endif96 88 97 89 __async_server_init(); -
uspace/lib/c/generic/rcu.c
rfbfe59d r8119363 82 82 #include <thread.h> 83 83 84 #include "private/fibril.h" 85 84 86 85 87 /** RCU sleeps for RCU_SLEEP_MS before polling an active RCU reader again. */ … … 148 150 149 151 150 static void wait_for_readers(size_t reader_group , blocking_mode_t blocking_mode);152 static void wait_for_readers(size_t reader_group); 151 153 static void force_mb_in_all_threads(void); 152 154 static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group); 153 155 154 static void lock_sync( blocking_mode_t blocking_mode);156 static void lock_sync(void); 155 157 static void unlock_sync(void); 156 static void sync_sleep( blocking_mode_t blocking_mode);158 static void sync_sleep(void); 157 159 158 160 static bool is_in_group(size_t nesting_cnt, size_t group); … … 170 172 assert(!fibril_rcu.registered); 171 173 172 futex_ down(&rcu.list_futex);174 futex_lock(&rcu.list_futex); 173 175 list_append(&fibril_rcu.link, &rcu.fibrils_list); 174 futex_u p(&rcu.list_futex);176 futex_unlock(&rcu.list_futex); 175 177 176 178 fibril_rcu.registered = true; … … 195 197 fibril_rcu.nesting_cnt = 0; 196 198 197 futex_ down(&rcu.list_futex);199 futex_lock(&rcu.list_futex); 198 200 list_remove(&fibril_rcu.link); 199 futex_u p(&rcu.list_futex);201 futex_unlock(&rcu.list_futex); 200 202 201 203 fibril_rcu.registered = false; … … 240 242 241 243 /** Blocks until all preexisting readers exit their critical sections. */ 242 void _rcu_synchronize(blocking_mode_t blocking_mode)244 void rcu_synchronize(void) 243 245 { 244 246 assert(!rcu_read_locked()); … … 250 252 size_t gp_in_progress = ACCESS_ONCE(rcu.cur_gp); 251 253 252 lock_sync( blocking_mode);254 lock_sync(); 253 255 254 256 /* … … 298 300 299 301 size_t new_reader_group = get_other_group(rcu.reader_group); 300 wait_for_readers(new_reader_group , blocking_mode);302 wait_for_readers(new_reader_group); 301 303 302 304 /* Separates waiting for readers in new_reader_group from group flip. */ … … 310 312 memory_barrier(); 311 313 312 wait_for_readers(old_reader_group , blocking_mode);314 wait_for_readers(old_reader_group); 313 315 314 316 /* MB_FORCE_U */ … … 330 332 331 333 /** Waits for readers of reader_group to exit their readers sections. */ 332 static void wait_for_readers(size_t reader_group , blocking_mode_t blocking_mode)333 { 334 futex_ down(&rcu.list_futex);334 static void wait_for_readers(size_t reader_group) 335 { 336 futex_lock(&rcu.list_futex); 335 337 336 338 list_t quiescent_fibrils; … … 343 345 344 346 if (is_preexisting_reader(fib, reader_group)) { 345 futex_u p(&rcu.list_futex);346 sync_sleep( blocking_mode);347 futex_ down(&rcu.list_futex);347 futex_unlock(&rcu.list_futex); 348 sync_sleep(); 349 futex_lock(&rcu.list_futex); 348 350 /* Break to while loop. */ 349 351 break; … … 356 358 357 359 list_concat(&rcu.fibrils_list, &quiescent_fibrils); 358 futex_u p(&rcu.list_futex);359 } 360 361 static void lock_sync( blocking_mode_t blocking_mode)362 { 363 futex_ down(&rcu.sync_lock.futex);360 futex_unlock(&rcu.list_futex); 361 } 362 363 static void lock_sync(void) 364 { 365 futex_lock(&rcu.sync_lock.futex); 364 366 if (rcu.sync_lock.locked) { 365 if (blocking_mode == BM_BLOCK_FIBRIL) { 366 blocked_fibril_t blocked_fib; 367 blocked_fib.id = fibril_get_id(); 368 369 list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils); 370 371 do { 372 blocked_fib.is_ready = false; 373 futex_up(&rcu.sync_lock.futex); 374 fibril_switch(FIBRIL_TO_MANAGER); 375 futex_down(&rcu.sync_lock.futex); 376 } while (rcu.sync_lock.locked); 377 378 list_remove(&blocked_fib.link); 379 rcu.sync_lock.locked = true; 380 } else { 381 assert(blocking_mode == BM_BLOCK_THREAD); 382 rcu.sync_lock.blocked_thread_cnt++; 383 futex_up(&rcu.sync_lock.futex); 384 futex_down(&rcu.sync_lock.futex_blocking_threads); 385 } 367 blocked_fibril_t blocked_fib; 368 blocked_fib.id = fibril_get_id(); 369 370 list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils); 371 372 do { 373 blocked_fib.is_ready = false; 374 futex_unlock(&rcu.sync_lock.futex); 375 futex_lock(&async_futex); 376 fibril_switch(FIBRIL_FROM_BLOCKED); 377 futex_unlock(&async_futex); 378 futex_lock(&rcu.sync_lock.futex); 379 } while (rcu.sync_lock.locked); 380 381 list_remove(&blocked_fib.link); 382 rcu.sync_lock.locked = true; 386 383 } else { 387 384 rcu.sync_lock.locked = true; … … 399 396 if (0 < rcu.sync_lock.blocked_thread_cnt) { 400 397 --rcu.sync_lock.blocked_thread_cnt; 401 futex_u p(&rcu.sync_lock.futex_blocking_threads);398 futex_unlock(&rcu.sync_lock.futex_blocking_threads); 402 399 } else { 403 400 /* Unlock but wake up any fibrils waiting for the lock. */ … … 414 411 415 412 rcu.sync_lock.locked = false; 416 futex_u p(&rcu.sync_lock.futex);413 futex_unlock(&rcu.sync_lock.futex); 417 414 } 418 415 } 419 416 420 static void sync_sleep( blocking_mode_t blocking_mode)417 static void sync_sleep(void) 421 418 { 422 419 assert(rcu.sync_lock.locked); … … 425 422 * but keep sync locked. 426 423 */ 427 futex_up(&rcu.sync_lock.futex); 428 429 if (blocking_mode == BM_BLOCK_FIBRIL) { 430 async_usleep(RCU_SLEEP_MS * 1000); 431 } else { 432 thread_usleep(RCU_SLEEP_MS * 1000); 433 } 434 435 futex_down(&rcu.sync_lock.futex); 424 futex_unlock(&rcu.sync_lock.futex); 425 async_usleep(RCU_SLEEP_MS * 1000); 426 futex_lock(&rcu.sync_lock.futex); 436 427 } 437 428 -
uspace/lib/c/generic/stacktrace.c
rfbfe59d r8119363 39 39 #include <stdint.h> 40 40 #include <errno.h> 41 #include <io/kio.h> 41 42 42 43 static errno_t stacktrace_read_uintptr(void *arg, uintptr_t addr, uintptr_t *data); 43 44 44 45 static stacktrace_ops_t basic_ops = { 45 .read_uintptr = stacktrace_read_uintptr 46 .read_uintptr = stacktrace_read_uintptr, 47 .printf = printf, 48 }; 49 50 static stacktrace_ops_t kio_ops = { 51 .read_uintptr = stacktrace_read_uintptr, 52 .printf = kio_printf, 46 53 }; 47 54 … … 57 64 58 65 while (stacktrace_fp_valid(&st, fp)) { 59 printf("%p: %p()\n", (void *) fp, (void *) pc);66 ops->printf("%p: %p()\n", (void *) fp, (void *) pc); 60 67 rc = stacktrace_ra_get(&st, fp, &pc); 61 68 if (rc != EOK) … … 71 78 { 72 79 stacktrace_print_generic(&basic_ops, NULL, fp, pc); 80 } 81 82 void stacktrace_kio_print(void) 83 { 84 stacktrace_prepare(); 85 stacktrace_print_generic(&kio_ops, NULL, stacktrace_fp_get(), stacktrace_pc_get()); 86 87 /* 88 * Prevent the tail call optimization of the previous call by 89 * making it a non-tail call. 90 */ 91 92 kio_printf("-- end of stack trace --\n"); 73 93 } 74 94 -
uspace/lib/c/generic/thread.c
rfbfe59d r8119363 46 46 #include <as.h> 47 47 #include "private/thread.h" 48 49 #ifdef FUTEX_UPGRADABLE 50 #include <rcu.h> 51 #endif 52 48 #include "private/fibril.h" 53 49 54 50 /** Main thread function. … … 68 64 69 65 __tcb_set(fibril->tcb); 70 71 #ifdef FUTEX_UPGRADABLE72 rcu_register_fibril();73 futex_upgrade_all_and_wait();74 #endif75 66 76 67 uarg->uspace_thread_function(uarg->uspace_thread_arg); … … 84 75 /* If there is a manager, destroy it */ 85 76 async_destroy_manager(); 86 87 #ifdef FUTEX_UPGRADABLE88 rcu_deregister_fibril();89 #endif90 77 91 78 fibril_teardown(fibril, false); -
uspace/lib/c/generic/time.c
rfbfe59d r8119363 51 51 #include <loc.h> 52 52 #include <device/clock_dev.h> 53 #include <thread.h>54 53 55 54 #define ASCTIME_BUF_LEN 26 … … 487 486 * @param tv2 Second timeval. 488 487 */ 489 void tv_add(struct timeval *tv1, struct timeval *tv2)488 void tv_add(struct timeval *tv1, const struct timeval *tv2) 490 489 { 491 490 tv1->tv_sec += tv2->tv_sec; … … 503 502 * 504 503 */ 505 suseconds_t tv_sub_diff( struct timeval *tv1,struct timeval *tv2)504 suseconds_t tv_sub_diff(const struct timeval *tv1, const struct timeval *tv2) 506 505 { 507 506 return (tv1->tv_usec - tv2->tv_usec) + … … 515 514 * 516 515 */ 517 void tv_sub(struct timeval *tv1, struct timeval *tv2)516 void tv_sub(struct timeval *tv1, const struct timeval *tv2) 518 517 { 519 518 tv1->tv_sec -= tv2->tv_sec; … … 531 530 * 532 531 */ 533 int tv_gt( struct timeval *tv1,struct timeval *tv2)532 int tv_gt(const struct timeval *tv1, const struct timeval *tv2) 534 533 { 535 534 if (tv1->tv_sec > tv2->tv_sec) … … 551 550 * 552 551 */ 553 int tv_gteq( struct timeval *tv1,struct timeval *tv2)552 int tv_gteq(const struct timeval *tv1, const struct timeval *tv2) 554 553 { 555 554 if (tv1->tv_sec > tv2->tv_sec) -
uspace/lib/c/include/async.h
rfbfe59d r8119363 108 108 typedef struct async_exch async_exch_t; 109 109 110 #define async_manager() \ 111 do { \ 112 futex_down(&async_futex); \ 113 fibril_switch(FIBRIL_FROM_DEAD); \ 114 } while (0) 110 extern _Noreturn void async_manager(void); 115 111 116 112 #define async_get_call(data) \ -
uspace/lib/c/include/fibril.h
rfbfe59d r8119363 36 36 #define LIBC_FIBRIL_H_ 37 37 38 #include <context.h>39 38 #include <types/common.h> 40 #include <adt/list.h>41 #include <libarch/tls.h>42 39 43 #define FIBRIL_WRITER 1 44 45 struct fibril; 40 typedef struct fibril fibril_t; 46 41 47 42 typedef struct { 48 struct fibril*owned_by;43 fibril_t *owned_by; 49 44 } fibril_owner_info_t; 50 45 51 typedef enum {52 FIBRIL_PREEMPT,53 FIBRIL_TO_MANAGER,54 FIBRIL_FROM_MANAGER,55 FIBRIL_FROM_DEAD56 } fibril_switch_type_t;57 58 46 typedef sysarg_t fid_t; 59 60 typedef struct fibril {61 link_t link;62 link_t all_link;63 context_t ctx;64 void *stack;65 void *arg;66 errno_t (*func)(void *);67 tcb_t *tcb;68 69 struct fibril *clean_after_me;70 errno_t retval;71 int flags;72 73 fibril_owner_info_t *waits_for;74 } fibril_t;75 47 76 48 /** Fibril-local variable specifier */ … … 81 53 extern fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t); 82 54 extern void fibril_destroy(fid_t fid); 83 extern fibril_t *fibril_setup(void);84 extern void fibril_teardown(fibril_t *f, bool locked);85 extern int fibril_switch(fibril_switch_type_t stype);86 55 extern void fibril_add_ready(fid_t fid); 87 extern void fibril_add_manager(fid_t fid);88 extern void fibril_remove_manager(void);89 56 extern fid_t fibril_get_id(void); 57 extern void fibril_yield(void); 90 58 91 59 static inline fid_t fibril_create(errno_t (*func)(void *), void *arg) … … 94 62 } 95 63 96 static inline int fibril_yield(void)97 {98 return fibril_switch(FIBRIL_PREEMPT);99 }100 101 64 #endif 102 65 -
uspace/lib/c/include/futex.h
rfbfe59d r8119363 36 36 #define LIBC_FUTEX_H_ 37 37 38 #include <assert.h> 38 39 #include <atomic.h> 39 40 #include <errno.h> 40 41 #include <libc.h> 42 #include <time.h> 41 43 42 44 typedef struct futex { 43 45 atomic_t val; 44 #ifdef FUTEX_UPGRADABLE45 int upgraded;46 #ifdef CONFIG_DEBUG_FUTEX 47 _Atomic void *owner; 46 48 #endif 47 49 } futex_t; 48 50 49 50 51 extern void futex_initialize(futex_t *futex, int value); 51 52 52 #ifdef FUTEX_UPGRADABLE 53 #include <rcu.h> 53 #ifdef CONFIG_DEBUG_FUTEX 54 54 55 #define FUTEX_INITIALIZE(val) {{ (val) }, 0} 55 #define FUTEX_INITIALIZE(val) {{ (val) }, NULL } 56 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1) 56 57 57 #define futex_lock(fut) \ 58 ({ \ 59 rcu_read_lock(); \ 60 (fut)->upgraded = rcu_access(_upgrade_futexes); \ 61 if ((fut)->upgraded) \ 62 (void) futex_down((fut)); \ 63 }) 58 void __futex_assert_is_locked(futex_t *, const char *); 59 void __futex_assert_is_not_locked(futex_t *, const char *); 60 void __futex_lock(futex_t *, const char *); 61 void __futex_unlock(futex_t *, const char *); 62 bool __futex_trylock(futex_t *, const char *); 63 void __futex_give_to(futex_t *, void *, const char *); 64 64 65 #define futex_trylock(fut) \ 66 ({ \ 67 rcu_read_lock(); \ 68 int _upgraded = rcu_access(_upgrade_futexes); \ 69 if (_upgraded) { \ 70 int _acquired = futex_trydown((fut)); \ 71 if (!_acquired) { \ 72 rcu_read_unlock(); \ 73 } else { \ 74 (fut)->upgraded = true; \ 75 } \ 76 _acquired; \ 77 } else { \ 78 (fut)->upgraded = false; \ 79 1; \ 80 } \ 81 }) 65 #define futex_lock(futex) __futex_lock((futex), #futex) 66 #define futex_unlock(futex) __futex_unlock((futex), #futex) 67 #define futex_trylock(futex) __futex_trylock((futex), #futex) 82 68 83 #define futex_unlock(fut) \ 84 ({ \ 85 if ((fut)->upgraded) \ 86 (void) futex_up((fut)); \ 87 rcu_read_unlock(); \ 88 }) 89 90 extern int _upgrade_futexes; 91 92 extern void futex_upgrade_all_and_wait(void); 69 #define futex_give_to(futex, new_owner) __futex_give_to((futex), (new_owner), #futex) 70 #define futex_assert_is_locked(futex) __futex_assert_is_locked((futex), #futex) 71 #define futex_assert_is_not_locked(futex) __futex_assert_is_not_locked((futex), #futex) 93 72 94 73 #else 95 74 96 75 #define FUTEX_INITIALIZE(val) {{ (val) }} 76 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1) 97 77 98 78 #define futex_lock(fut) (void) futex_down((fut)) … … 100 80 #define futex_unlock(fut) (void) futex_up((fut)) 101 81 82 #define futex_give_to(fut, owner) ((void)0) 83 #define futex_assert_is_locked(fut) assert((atomic_signed_t) (fut)->val.count <= 0) 84 #define futex_assert_is_not_locked(fut) ((void)0) 85 102 86 #endif 103 104 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1)105 87 106 88 /** Try to down the futex. … … 117 99 } 118 100 119 /** Down the futex. 101 /** Down the futex with timeout, composably. 102 * 103 * This means that when the operation fails due to a timeout or being 104 * interrupted, the next futex_up() is ignored, which allows certain kinds of 105 * composition of synchronization primitives. 106 * 107 * In most other circumstances, regular futex_down_timeout() is a better choice. 120 108 * 121 109 * @param futex Futex. 122 110 * 123 111 * @return ENOENT if there is no such virtual address. 112 * @return ETIMEOUT if timeout expires. 124 113 * @return EOK on success. 125 114 * @return Error code from <errno.h> otherwise. 126 115 * 127 116 */ 128 static inline errno_t futex_down (futex_t *futex)117 static inline errno_t futex_down_composable(futex_t *futex, struct timeval *expires) 129 118 { 119 // TODO: Add tests for this. 120 121 /* No timeout by default. */ 122 suseconds_t timeout = 0; 123 124 if (expires) { 125 struct timeval tv; 126 getuptime(&tv); 127 if (tv_gteq(&tv, expires)) { 128 /* We can't just return ETIMEOUT. That wouldn't be composable. */ 129 timeout = 1; 130 } else { 131 timeout = tv_sub_diff(expires, &tv); 132 } 133 134 assert(timeout > 0); 135 } 136 130 137 if ((atomic_signed_t) atomic_predec(&futex->val) < 0) 131 return (errno_t) __SYSCALL 1(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count);138 return (errno_t) __SYSCALL2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout); 132 139 133 140 return EOK; … … 151 158 } 152 159 160 static inline errno_t futex_down_timeout(futex_t *futex, struct timeval *expires) 161 { 162 /* 163 * This combination of a "composable" sleep followed by futex_up() on 164 * failure is necessary to prevent breakage due to certain race 165 * conditions. 166 */ 167 errno_t rc = futex_down_composable(futex, expires); 168 if (rc != EOK) 169 futex_up(futex); 170 return rc; 171 } 172 173 /** Down the futex. 174 * 175 * @param futex Futex. 176 * 177 * @return ENOENT if there is no such virtual address. 178 * @return EOK on success. 179 * @return Error code from <errno.h> otherwise. 180 * 181 */ 182 static inline errno_t futex_down(futex_t *futex) 183 { 184 return futex_down_timeout(futex, NULL); 185 } 186 153 187 #endif 154 188 -
uspace/lib/c/include/rcu.h
rfbfe59d r8119363 92 92 #define rcu_access(ptr) ACCESS_ONCE(ptr) 93 93 94 typedef enum blocking_mode {95 BM_BLOCK_FIBRIL,96 BM_BLOCK_THREAD97 } blocking_mode_t;98 99 94 extern void rcu_register_fibril(void); 100 95 extern void rcu_deregister_fibril(void); … … 105 100 extern bool rcu_read_locked(void); 106 101 107 #define rcu_synchronize() _rcu_synchronize(BM_BLOCK_FIBRIL) 108 109 extern void _rcu_synchronize(blocking_mode_t); 102 extern void rcu_synchronize(void); 110 103 111 104 #endif -
uspace/lib/c/include/stacktrace.h
rfbfe59d r8119363 43 43 typedef struct { 44 44 errno_t (*read_uintptr)(void *, uintptr_t, uintptr_t *); 45 int (*printf)(const char *, ...); 45 46 } stacktrace_ops_t; 46 47 … … 51 52 52 53 extern void stacktrace_print(void); 54 extern void stacktrace_kio_print(void); 53 55 extern void stacktrace_print_fp_pc(uintptr_t, uintptr_t); 54 56 extern void stacktrace_print_generic(stacktrace_ops_t *, void *, uintptr_t, -
uspace/lib/c/include/sys/time.h
rfbfe59d r8119363 69 69 }; 70 70 71 #define TIMEVAL_MAX ((struct timeval) { .tv_sec = LONG_MAX, .tv_usec = 999999 }) 72 71 73 struct timezone { 72 74 int tz_minuteswest; /* minutes W of Greenwich */ … … 75 77 76 78 extern void tv_add_diff(struct timeval *, suseconds_t); 77 extern void tv_add(struct timeval *, struct timeval *);78 extern suseconds_t tv_sub_diff( struct timeval *,struct timeval *);79 extern void tv_sub(struct timeval *, struct timeval *);80 extern int tv_gt( struct timeval *,struct timeval *);81 extern int tv_gteq( struct timeval *,struct timeval *);79 extern void tv_add(struct timeval *, const struct timeval *); 80 extern suseconds_t tv_sub_diff(const struct timeval *, const struct timeval *); 81 extern void tv_sub(struct timeval *, const struct timeval *); 82 extern int tv_gt(const struct timeval *, const struct timeval *); 83 extern int tv_gteq(const struct timeval *, const struct timeval *); 82 84 extern void gettimeofday(struct timeval *, struct timezone *); 83 85 extern void getuptime(struct timeval *); -
uspace/srv/volsrv/types/part.h
rfbfe59d r8119363 38 38 #define TYPES_PART_H_ 39 39 40 #include <adt/list.h> 40 41 #include <types/label.h> 41 42
Note:
See TracChangeset
for help on using the changeset viewer.