Changes in / [8119363:fbfe59d] in mainline
- Files:
-
- 1 deleted
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
r8119363 rfbfe59d 385 385 ! CONFIG_UBSAN_KERNEL (n/y) 386 386 387 % Track owner for futexes in userspace.388 ! CONFIG_DEBUG_FUTEX (y/n)389 390 387 % Deadlock detection support for spinlocks 391 388 ! [CONFIG_DEBUG=y&CONFIG_SMP=y] CONFIG_DEBUG_SPINLOCK (y/n) -
abi/include/abi/synch.h
r8119363 rfbfe59d 45 45 /** Interruptible operation. */ 46 46 #define SYNCH_FLAGS_INTERRUPTIBLE (1 << 1) 47 /** Futex operation (makes sleep with timeout composable). */48 #define SYNCH_FLAGS_FUTEX (1 << 2)49 47 50 48 #endif -
kernel/generic/include/proc/thread.h
r8119363 rfbfe59d 112 112 /** If true, the thread can be interrupted from sleep. */ 113 113 bool sleep_interruptible; 114 115 /**116 * If true, and this thread's sleep returns without a wakeup117 * (timed out or interrupted), waitq ignores the next wakeup.118 * This is necessary for futex to be able to handle those conditions.119 */120 bool sleep_composable;121 122 114 /** Wait queue in which this thread sleeps. */ 123 115 waitq_t *sleep_queue; -
kernel/generic/include/synch/futex.h
r8119363 rfbfe59d 53 53 54 54 extern void futex_init(void); 55 extern sys_errno_t sys_futex_sleep(uintptr_t , uintptr_t);55 extern sys_errno_t sys_futex_sleep(uintptr_t); 56 56 extern sys_errno_t sys_futex_wakeup(uintptr_t); 57 57 -
kernel/generic/include/synch/waitq.h
r8119363 rfbfe59d 62 62 int missed_wakeups; 63 63 64 /** Number of wakeups that need to be ignored due to futex timeout. */65 int ignore_wakeups;66 67 64 /** List of sleeping threads for which there was no missed_wakeup. */ 68 65 list_t sleepers; -
kernel/generic/src/proc/thread.c
r8119363 rfbfe59d 383 383 timeout_initialize(&thread->sleep_timeout); 384 384 thread->sleep_interruptible = false; 385 thread->sleep_composable = false;386 385 thread->sleep_queue = NULL; 387 386 thread->timeout_pending = false; -
kernel/generic/src/synch/futex.c
r8119363 rfbfe59d 398 398 } 399 399 400 /** Sleep in futex wait queue with a timeout. 401 * If the sleep times out or is interrupted, the next wakeup is ignored. 402 * The userspace portion of the call must handle this condition. 403 * 404 * @param uaddr Userspace address of the futex counter. 405 * @param timeout Maximum number of useconds to sleep. 0 means no limit. 400 /** Sleep in futex wait queue. 401 * 402 * @param uaddr Userspace address of the futex counter. 406 403 * 407 404 * @return If there is no physical mapping for uaddr ENOENT is … … 409 406 * waitq_sleep_timeout(). 410 407 */ 411 sys_errno_t sys_futex_sleep(uintptr_t uaddr , uintptr_t timeout)408 sys_errno_t sys_futex_sleep(uintptr_t uaddr) 412 409 { 413 410 futex_t *futex = get_futex(uaddr); … … 420 417 #endif 421 418 422 errno_t rc = waitq_sleep_timeout( &futex->wq, timeout,423 SYNCH_FLAGS_INTERRUPTIBLE | SYNCH_FLAGS_FUTEX, NULL);419 errno_t rc = waitq_sleep_timeout( 420 &futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE, NULL); 424 421 425 422 #ifdef CONFIG_UDEBUG -
kernel/generic/src/synch/waitq.c
r8119363 rfbfe59d 57 57 #include <adt/list.h> 58 58 #include <arch/cycle.h> 59 #include <mem.h>60 59 61 60 static void waitq_sleep_timed_out(void *); … … 72 71 void waitq_initialize(waitq_t *wq) 73 72 { 74 memsetb(wq, sizeof(*wq), 0);75 73 irq_spinlock_initialize(&wq->lock, "wq.lock"); 76 74 list_initialize(&wq->sleepers); 75 wq->missed_wakeups = 0; 77 76 } 78 77 … … 115 114 thread->saved_context = thread->sleep_timeout_context; 116 115 do_wakeup = true; 117 if (thread->sleep_composable)118 wq->ignore_wakeups++;119 116 thread->sleep_queue = NULL; 120 117 irq_spinlock_unlock(&wq->lock, false); … … 179 176 list_remove(&thread->wq_link); 180 177 thread->saved_context = thread->sleep_interruption_context; 181 if (thread->sleep_composable)182 wq->ignore_wakeups++;183 178 do_wakeup = true; 184 179 thread->sleep_queue = NULL; … … 398 393 */ 399 394 irq_spinlock_lock(&THREAD->lock, false); 400 401 THREAD->sleep_composable = (flags & SYNCH_FLAGS_FUTEX);402 395 403 396 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { … … 545 538 assert(irq_spinlock_locked(&wq->lock)); 546 539 547 if (wq->ignore_wakeups > 0) {548 if (mode == WAKEUP_FIRST) {549 wq->ignore_wakeups--;550 return;551 }552 wq->ignore_wakeups = 0;553 }554 555 540 loop: 556 541 if (list_empty(&wq->sleepers)) { -
uspace/app/nic/nic.c
r8119363 rfbfe59d 34 34 */ 35 35 36 #include <assert.h>37 36 #include <errno.h> 38 37 #include <loc.h> -
uspace/app/taskdump/fibrildump.c
r8119363 rfbfe59d 33 33 */ 34 34 35 #include <adt/list.h>36 #include <context.h>37 35 #include <errno.h> 38 36 #include <fibril.h> … … 44 42 #include <taskdump.h> 45 43 #include <udebug.h> 46 47 struct fibril {48 link_t all_link;49 context_t ctx;50 uint8_t __opaque[];51 };52 44 53 45 static errno_t fibrildump_read_uintptr(void *, uintptr_t, uintptr_t *); -
uspace/lib/c/generic/assert.c
r8119363 rfbfe59d 38 38 #include <stacktrace.h> 39 39 #include <stdint.h> 40 #include <task.h>41 40 42 41 static atomic_t failed_asserts = { 0 }; … … 47 46 * Send the message safely to kio. Nested asserts should not occur. 48 47 */ 49 kio_printf("Assertion failed (%s) in task %ld, file \"%s\", line %u.\n", 50 cond, (long) task_get_id(), file, line); 51 52 stacktrace_kio_print(); 48 kio_printf("Assertion failed (%s) in file \"%s\", line %u.\n", 49 cond, file, line); 53 50 54 51 /* Sometimes we know in advance that regular printf() would likely fail. */ … … 61 58 * Send the message safely to kio. Nested asserts should not occur. 62 59 */ 63 kio_printf("Assertion failed (%s) in task %ld, file \"%s\", line %u.\n", 64 cond, (long) task_get_id(), file, line); 65 66 stacktrace_kio_print(); 60 kio_printf("Assertion failed (%s) in file \"%s\", line %u.\n", 61 cond, file, line); 67 62 68 63 /* … … 77 72 * assertions. 78 73 */ 79 kio_printf("Assertion failed (%s) in task %ld,file \"%s\", line %u.\n",80 cond, (long) task_get_id(),file, line);74 printf("Assertion failed (%s) in file \"%s\", line %u.\n", 75 cond, file, line); 81 76 stacktrace_print(); 82 77 -
uspace/lib/c/generic/async/client.c
r8119363 rfbfe59d 121 121 #include <abi/mm/as.h> 122 122 #include "../private/libc.h" 123 #include "../private/fibril.h"124 123 125 124 /** Naming service session */ … … 242 241 assert(arg); 243 242 244 futex_ lock(&async_futex);243 futex_down(&async_futex); 245 244 246 245 amsg_t *msg = (amsg_t *) arg; … … 267 266 } 268 267 269 futex_u nlock(&async_futex);268 futex_up(&async_futex); 270 269 } 271 270 … … 356 355 amsg_t *msg = (amsg_t *) amsgid; 357 356 358 futex_ lock(&async_futex);357 futex_down(&async_futex); 359 358 360 359 assert(!msg->forget); … … 362 361 363 362 if (msg->done) { 364 futex_u nlock(&async_futex);363 futex_up(&async_futex); 365 364 goto done; 366 365 } … … 371 370 372 371 /* Leave the async_futex locked when entering this function */ 373 fibril_switch(FIBRIL_FROM_BLOCKED); 374 futex_unlock(&async_futex); 372 fibril_switch(FIBRIL_TO_MANAGER); 373 374 /* Futex is up automatically after fibril_switch */ 375 375 376 376 done: … … 401 401 amsg_t *msg = (amsg_t *) amsgid; 402 402 403 futex_ lock(&async_futex);403 futex_down(&async_futex); 404 404 405 405 assert(!msg->forget); … … 407 407 408 408 if (msg->done) { 409 futex_u nlock(&async_futex);409 futex_up(&async_futex); 410 410 goto done; 411 411 } … … 443 443 444 444 /* Leave the async_futex locked when entering this function */ 445 fibril_switch(FIBRIL_FROM_BLOCKED); 446 futex_unlock(&async_futex); 445 fibril_switch(FIBRIL_TO_MANAGER); 446 447 /* Futex is up automatically after fibril_switch */ 447 448 448 449 if (!msg->done) … … 474 475 assert(!msg->destroyed); 475 476 476 futex_ lock(&async_futex);477 futex_down(&async_futex); 477 478 478 479 if (msg->done) { … … 483 484 } 484 485 485 futex_u nlock(&async_futex);486 futex_up(&async_futex); 486 487 } 487 488 … … 503 504 tv_add_diff(&awaiter.to_event.expires, timeout); 504 505 505 futex_ lock(&async_futex);506 futex_down(&async_futex); 506 507 507 508 async_insert_timeout(&awaiter); 508 509 509 510 /* Leave the async_futex locked when entering this function */ 510 fibril_switch(FIBRIL_FROM_BLOCKED); 511 futex_unlock(&async_futex); 511 fibril_switch(FIBRIL_TO_MANAGER); 512 513 /* Futex is up automatically after fibril_switch() */ 512 514 } 513 515 -
uspace/lib/c/generic/async/server.c
r8119363 rfbfe59d 120 120 #include <abi/mm/as.h> 121 121 #include "../private/libc.h" 122 #include "../private/fibril.h"123 122 124 123 /** Async framework global futex */ … … 432 431 * Remove myself from the connection hash table. 433 432 */ 434 futex_ lock(&async_futex);433 futex_down(&async_futex); 435 434 hash_table_remove(&conn_hash_table, &(conn_key_t){ 436 435 .task_id = fibril_connection->in_task_id, 437 436 .phone_hash = fibril_connection->in_phone_hash 438 437 }); 439 futex_u nlock(&async_futex);438 futex_up(&async_futex); 440 439 441 440 /* … … 520 519 /* Add connection to the connection hash table */ 521 520 522 futex_ lock(&async_futex);521 futex_down(&async_futex); 523 522 hash_table_insert(&conn_hash_table, &conn->link); 524 futex_u nlock(&async_futex);523 futex_up(&async_futex); 525 524 526 525 fibril_add_ready(conn->wdata.fid); … … 648 647 assert(call); 649 648 650 futex_ lock(&async_futex);649 futex_down(&async_futex); 651 650 652 651 ht_link_t *link = hash_table_find(&conn_hash_table, &(conn_key_t){ … … 655 654 }); 656 655 if (!link) { 657 futex_u nlock(&async_futex);656 futex_up(&async_futex); 658 657 return false; 659 658 } … … 663 662 msg_t *msg = malloc(sizeof(*msg)); 664 663 if (!msg) { 665 futex_u nlock(&async_futex);664 futex_up(&async_futex); 666 665 return false; 667 666 } … … 687 686 } 688 687 689 futex_u nlock(&async_futex);688 futex_up(&async_futex); 690 689 return true; 691 690 } … … 962 961 connection_t *conn = fibril_connection; 963 962 964 futex_ lock(&async_futex);963 futex_down(&async_futex); 965 964 966 965 if (usecs) { … … 982 981 memset(call, 0, sizeof(ipc_call_t)); 983 982 IPC_SET_IMETHOD(*call, IPC_M_PHONE_HUNGUP); 984 futex_u nlock(&async_futex);983 futex_up(&async_futex); 985 984 return conn->close_chandle; 986 985 } … … 997 996 * case, route_call() will perform the wakeup. 998 997 */ 999 fibril_switch(FIBRIL_FROM_BLOCKED); 1000 998 fibril_switch(FIBRIL_TO_MANAGER); 999 1000 /* 1001 * Futex is up after getting back from async_manager. 1002 * Get it again. 1003 */ 1004 futex_down(&async_futex); 1001 1005 if ((usecs) && (conn->wdata.to_event.occurred) && 1002 1006 (list_empty(&conn->msg_queue))) { 1003 1007 /* If we timed out -> exit */ 1004 futex_u nlock(&async_futex);1008 futex_up(&async_futex); 1005 1009 return CAP_NIL; 1006 1010 } … … 1015 1019 free(msg); 1016 1020 1017 futex_u nlock(&async_futex);1021 futex_up(&async_futex); 1018 1022 return chandle; 1019 1023 } … … 1066 1070 assert(call); 1067 1071 1068 if (call->flags & IPC_CALL_ANSWERED) 1069 return; 1070 1071 if (chandle == CAP_NIL) { 1072 if (call->flags & IPC_CALL_NOTIF) { 1073 /* Kernel notification */ 1074 queue_notification(call); 1075 } 1072 /* Kernel notification */ 1073 if ((chandle == CAP_NIL) && (call->flags & IPC_CALL_NOTIF)) { 1074 queue_notification(call); 1076 1075 return; 1077 1076 } … … 1101 1100 1102 1101 /** Fire all timeouts that expired. */ 1103 static suseconds_t handle_expired_timeouts(unsigned int *flags) 1104 { 1105 /* Make sure the async_futex is held. */ 1106 futex_assert_is_locked(&async_futex); 1107 1102 static void handle_expired_timeouts(void) 1103 { 1108 1104 struct timeval tv; 1109 1105 getuptime(&tv); 1110 1106 1111 bool fired = false;1107 futex_down(&async_futex); 1112 1108 1113 1109 link_t *cur = list_first(&timeout_list); … … 1116 1112 list_get_instance(cur, awaiter_t, to_event.link); 1117 1113 1118 if (tv_gt(&waiter->to_event.expires, &tv)) { 1119 if (fired) { 1120 *flags = SYNCH_FLAGS_NON_BLOCKING; 1121 return 0; 1122 } 1123 *flags = 0; 1124 return tv_sub_diff(&waiter->to_event.expires, &tv); 1125 } 1114 if (tv_gt(&waiter->to_event.expires, &tv)) 1115 break; 1126 1116 1127 1117 list_remove(&waiter->to_event.link); … … 1136 1126 waiter->active = true; 1137 1127 fibril_add_ready(waiter->fid); 1138 fired = true;1139 1128 } 1140 1129 … … 1142 1131 } 1143 1132 1144 if (fired) { 1145 *flags = SYNCH_FLAGS_NON_BLOCKING; 1146 return 0; 1147 } 1148 1149 return SYNCH_NO_TIMEOUT; 1133 futex_up(&async_futex); 1150 1134 } 1151 1135 … … 1158 1142 { 1159 1143 while (true) { 1160 futex_lock(&async_futex); 1161 fibril_switch(FIBRIL_FROM_MANAGER); 1162 1163 /* 1164 * The switch only returns when there is no non-manager fibril 1165 * it can run. 1166 */ 1167 1144 if (fibril_switch(FIBRIL_FROM_MANAGER)) { 1145 futex_up(&async_futex); 1146 /* 1147 * async_futex is always held when entering a manager 1148 * fibril. 1149 */ 1150 continue; 1151 } 1152 1153 futex_down(&async_futex); 1154 1155 suseconds_t timeout; 1168 1156 unsigned int flags = SYNCH_FLAGS_NONE; 1169 suseconds_t next_timeout = handle_expired_timeouts(&flags); 1170 futex_unlock(&async_futex); 1157 if (!list_empty(&timeout_list)) { 1158 awaiter_t *waiter = list_get_instance( 1159 list_first(&timeout_list), awaiter_t, to_event.link); 1160 1161 struct timeval tv; 1162 getuptime(&tv); 1163 1164 if (tv_gteq(&tv, &waiter->to_event.expires)) { 1165 futex_up(&async_futex); 1166 handle_expired_timeouts(); 1167 /* 1168 * Notice that even if the event(s) already 1169 * expired (and thus the other fibril was 1170 * supposed to be running already), 1171 * we check for incoming IPC. 1172 * 1173 * Otherwise, a fibril that continuously 1174 * creates (almost) expired events could 1175 * prevent IPC retrieval from the kernel. 1176 */ 1177 timeout = 0; 1178 flags = SYNCH_FLAGS_NON_BLOCKING; 1179 1180 } else { 1181 timeout = tv_sub_diff(&waiter->to_event.expires, 1182 &tv); 1183 futex_up(&async_futex); 1184 } 1185 } else { 1186 futex_up(&async_futex); 1187 timeout = SYNCH_NO_TIMEOUT; 1188 } 1171 1189 1172 1190 atomic_inc(&threads_in_ipc_wait); 1173 1191 1174 1192 ipc_call_t call; 1175 errno_t rc = ipc_wait_cycle(&call, next_timeout, flags);1193 errno_t rc = ipc_wait_cycle(&call, timeout, flags); 1176 1194 1177 1195 atomic_dec(&threads_in_ipc_wait); 1178 1196 1179 1197 assert(rc == EOK); 1198 1199 if (call.cap_handle == CAP_NIL) { 1200 if ((call.flags & 1201 (IPC_CALL_NOTIF | IPC_CALL_ANSWERED)) == 0) { 1202 /* Neither a notification nor an answer. */ 1203 handle_expired_timeouts(); 1204 continue; 1205 } 1206 } 1207 1208 if (call.flags & IPC_CALL_ANSWERED) 1209 continue; 1210 1180 1211 handle_call(call.cap_handle, &call); 1181 1212 } … … 1194 1225 static errno_t async_manager_fibril(void *arg) 1195 1226 { 1227 futex_up(&async_futex); 1228 1229 /* 1230 * async_futex is always locked when entering manager 1231 */ 1196 1232 async_manager_worker(); 1233 1197 1234 return 0; 1198 1235 } … … 1847 1884 } 1848 1885 1849 _Noreturn void async_manager(void)1850 {1851 futex_lock(&async_futex);1852 fibril_switch(FIBRIL_FROM_DEAD);1853 __builtin_unreachable();1854 }1855 1856 1886 /** @} 1857 1887 */ -
uspace/lib/c/generic/fibril.c
r8119363 rfbfe59d 49 49 #include <async.h> 50 50 51 #include "private/fibril.h" 52 51 #ifdef FUTEX_UPGRADABLE 52 #include <rcu.h> 53 #endif 53 54 54 55 /** … … 71 72 static void fibril_main(void) 72 73 { 73 /* fibril_futex and async_futex are locked when a fibril is started. */ 74 futex_unlock(&fibril_futex); 75 futex_unlock(&async_futex); 76 77 fibril_t *fibril = fibril_self(); 74 /* fibril_futex is locked when a fibril is first started. */ 75 futex_unlock(&fibril_futex); 76 77 fibril_t *fibril = __tcb_get()->fibril_data; 78 79 #ifdef FUTEX_UPGRADABLE 80 rcu_register_fibril(); 81 #endif 78 82 79 83 /* Call the implementing function. */ 80 84 fibril->retval = fibril->func(fibril->arg); 81 85 82 futex_ lock(&async_futex);86 futex_down(&async_futex); 83 87 fibril_switch(FIBRIL_FROM_DEAD); 84 88 /* Not reached */ … … 94 98 return NULL; 95 99 96 fibril_t *fibril = calloc(1,sizeof(fibril_t));100 fibril_t *fibril = malloc(sizeof(fibril_t)); 97 101 if (!fibril) { 98 102 tls_free(tcb); … … 102 106 tcb->fibril_data = fibril; 103 107 fibril->tcb = tcb; 108 109 fibril->func = NULL; 110 fibril->arg = NULL; 111 fibril->stack = NULL; 112 fibril->clean_after_me = NULL; 113 fibril->retval = 0; 114 fibril->flags = 0; 115 116 fibril->waits_for = NULL; 104 117 105 118 /* … … 128 141 /** Switch from the current fibril. 129 142 * 130 * The async_futex must be held when entering this function,131 * and is still held on return.143 * If stype is FIBRIL_TO_MANAGER or FIBRIL_FROM_DEAD, the async_futex must 144 * be held. 132 145 * 133 146 * @param stype Switch type. One of FIBRIL_PREEMPT, FIBRIL_TO_MANAGER, … … 141 154 int fibril_switch(fibril_switch_type_t stype) 142 155 { 143 /* Make sure the async_futex is held. */144 futex_assert_is_locked(&async_futex);145 146 156 futex_lock(&fibril_futex); 147 157 148 fibril_t *srcf = fibril_self();158 fibril_t *srcf = __tcb_get()->fibril_data; 149 159 fibril_t *dstf = NULL; 150 160 151 161 /* Choose a new fibril to run */ 152 if (list_empty(&ready_list)) { 153 if (stype == FIBRIL_PREEMPT || stype == FIBRIL_FROM_MANAGER) { 154 // FIXME: This means that as long as there is a fibril 155 // that only yields, IPC messages are never retrieved. 156 futex_unlock(&fibril_futex); 157 return 0; 158 } 162 switch (stype) { 163 case FIBRIL_TO_MANAGER: 164 case FIBRIL_FROM_DEAD: 165 /* Make sure the async_futex is held. */ 166 assert((atomic_signed_t) async_futex.val.count <= 0); 159 167 160 168 /* If we are going to manager and none exists, create it */ … … 167 175 dstf = list_get_instance(list_first(&manager_list), 168 176 fibril_t, link); 169 } else { 177 178 if (stype == FIBRIL_FROM_DEAD) 179 dstf->clean_after_me = srcf; 180 break; 181 case FIBRIL_PREEMPT: 182 case FIBRIL_FROM_MANAGER: 183 if (list_empty(&ready_list)) { 184 futex_unlock(&fibril_futex); 185 return 0; 186 } 187 170 188 dstf = list_get_instance(list_first(&ready_list), fibril_t, 171 189 link); 172 }173 190 break; 191 } 174 192 list_remove(&dstf->link); 175 if (stype == FIBRIL_FROM_DEAD)176 dstf->clean_after_me = srcf;177 193 178 194 /* Put the current fibril into the correct run list */ … … 185 201 break; 186 202 case FIBRIL_FROM_DEAD: 187 case FIBRIL_FROM_BLOCKED:188 203 // Nothing. 189 204 break; 190 } 191 192 /* Bookkeeping. */ 193 futex_give_to(&fibril_futex, dstf); 194 futex_give_to(&async_futex, dstf); 205 case FIBRIL_TO_MANAGER: 206 /* 207 * Don't put the current fibril into any list, it should 208 * already be somewhere, or it will be lost. 209 */ 210 break; 211 } 212 213 #ifdef FUTEX_UPGRADABLE 214 if (stype == FIBRIL_FROM_DEAD) { 215 rcu_deregister_fibril(); 216 } 217 #endif 195 218 196 219 /* Swap to the next fibril. */ … … 322 345 } 323 346 324 fibril_t *fibril_self(void)325 {326 return __tcb_get()->fibril_data;327 }328 329 347 /** Return fibril id of the currently running fibril. 330 348 * … … 334 352 fid_t fibril_get_id(void) 335 353 { 336 return (fid_t) fibril_self(); 337 } 338 339 void fibril_yield(void) 340 { 341 futex_lock(&async_futex); 342 (void) fibril_switch(FIBRIL_PREEMPT); 343 futex_unlock(&async_futex); 354 return (fid_t) __tcb_get()->fibril_data; 344 355 } 345 356 -
uspace/lib/c/generic/fibril_synch.c
r8119363 rfbfe59d 45 45 #include <stdio.h> 46 46 #include "private/async.h" 47 #include "private/fibril.h"48 47 49 48 static void optimize_execution_power(void) … … 106 105 fibril_t *f = (fibril_t *) fibril_get_id(); 107 106 108 futex_ lock(&async_futex);107 futex_down(&async_futex); 109 108 if (fm->counter-- <= 0) { 110 109 awaiter_t wdata; … … 116 115 check_for_deadlock(&fm->oi); 117 116 f->waits_for = &fm->oi; 118 fibril_switch(FIBRIL_ FROM_BLOCKED);117 fibril_switch(FIBRIL_TO_MANAGER); 119 118 } else { 120 119 fm->oi.owned_by = f; 121 }122 futex_unlock(&async_futex);120 futex_up(&async_futex); 121 } 123 122 } 124 123 … … 127 126 bool locked = false; 128 127 129 futex_ lock(&async_futex);128 futex_down(&async_futex); 130 129 if (fm->counter > 0) { 131 130 fm->counter--; … … 133 132 locked = true; 134 133 } 135 futex_u nlock(&async_futex);134 futex_up(&async_futex); 136 135 137 136 return locked; … … 166 165 { 167 166 assert(fibril_mutex_is_locked(fm)); 168 futex_ lock(&async_futex);167 futex_down(&async_futex); 169 168 _fibril_mutex_unlock_unsafe(fm); 170 futex_u nlock(&async_futex);169 futex_up(&async_futex); 171 170 } 172 171 … … 175 174 bool locked = false; 176 175 177 futex_ lock(&async_futex);176 futex_down(&async_futex); 178 177 if (fm->counter <= 0) 179 178 locked = true; 180 futex_u nlock(&async_futex);179 futex_up(&async_futex); 181 180 182 181 return locked; … … 195 194 fibril_t *f = (fibril_t *) fibril_get_id(); 196 195 197 futex_ lock(&async_futex);196 futex_down(&async_futex); 198 197 if (frw->writers) { 199 198 awaiter_t wdata; … … 202 201 wdata.fid = (fid_t) f; 203 202 wdata.wu_event.inlist = true; 204 f-> is_writer = false;203 f->flags &= ~FIBRIL_WRITER; 205 204 list_append(&wdata.wu_event.link, &frw->waiters); 206 205 check_for_deadlock(&frw->oi); 207 206 f->waits_for = &frw->oi; 208 fibril_switch(FIBRIL_ FROM_BLOCKED);207 fibril_switch(FIBRIL_TO_MANAGER); 209 208 } else { 210 209 /* Consider the first reader the owner. */ 211 210 if (frw->readers++ == 0) 212 211 frw->oi.owned_by = f; 213 }214 futex_unlock(&async_futex);212 futex_up(&async_futex); 213 } 215 214 } 216 215 … … 219 218 fibril_t *f = (fibril_t *) fibril_get_id(); 220 219 221 futex_ lock(&async_futex);220 futex_down(&async_futex); 222 221 if (frw->writers || frw->readers) { 223 222 awaiter_t wdata; … … 226 225 wdata.fid = (fid_t) f; 227 226 wdata.wu_event.inlist = true; 228 f-> is_writer = true;227 f->flags |= FIBRIL_WRITER; 229 228 list_append(&wdata.wu_event.link, &frw->waiters); 230 229 check_for_deadlock(&frw->oi); 231 230 f->waits_for = &frw->oi; 232 fibril_switch(FIBRIL_ FROM_BLOCKED);231 fibril_switch(FIBRIL_TO_MANAGER); 233 232 } else { 234 233 frw->oi.owned_by = f; 235 234 frw->writers++; 236 }237 futex_unlock(&async_futex);235 futex_up(&async_futex); 236 } 238 237 } 239 238 240 239 static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw) 241 240 { 242 futex_ lock(&async_futex);241 futex_down(&async_futex); 243 242 if (frw->readers) { 244 243 if (--frw->readers) { … … 277 276 f->waits_for = NULL; 278 277 279 if (f-> is_writer) {278 if (f->flags & FIBRIL_WRITER) { 280 279 if (frw->readers) 281 280 break; … … 301 300 } 302 301 out: 303 futex_u nlock(&async_futex);302 futex_up(&async_futex); 304 303 } 305 304 … … 320 319 bool locked = false; 321 320 322 futex_ lock(&async_futex);321 futex_down(&async_futex); 323 322 if (frw->readers) 324 323 locked = true; 325 futex_u nlock(&async_futex);324 futex_up(&async_futex); 326 325 327 326 return locked; … … 332 331 bool locked = false; 333 332 334 futex_ lock(&async_futex);333 futex_down(&async_futex); 335 334 if (frw->writers) { 336 335 assert(frw->writers == 1); 337 336 locked = true; 338 337 } 339 futex_u nlock(&async_futex);338 futex_up(&async_futex); 340 339 341 340 return locked; … … 369 368 wdata.wu_event.inlist = true; 370 369 371 futex_ lock(&async_futex);370 futex_down(&async_futex); 372 371 if (timeout) { 373 372 getuptime(&wdata.to_event.expires); … … 377 376 list_append(&wdata.wu_event.link, &fcv->waiters); 378 377 _fibril_mutex_unlock_unsafe(fm); 379 fibril_switch(FIBRIL_FROM_BLOCKED); 380 futex_unlock(&async_futex); 381 382 // XXX: This could be replaced with an unlocked version to get rid 383 // of the unlock-lock pair. I deliberately don't do that because 384 // further changes would most likely need to revert that optimization. 378 fibril_switch(FIBRIL_TO_MANAGER); 385 379 fibril_mutex_lock(fm); 386 380 387 futex_lock(&async_futex); 381 /* async_futex not held after fibril_switch() */ 382 futex_down(&async_futex); 388 383 if (wdata.to_event.inlist) 389 384 list_remove(&wdata.to_event.link); 390 385 if (wdata.wu_event.inlist) 391 386 list_remove(&wdata.wu_event.link); 392 futex_u nlock(&async_futex);387 futex_up(&async_futex); 393 388 394 389 return wdata.to_event.occurred ? ETIMEOUT : EOK; … … 408 403 awaiter_t *wdp; 409 404 410 futex_ lock(&async_futex);405 futex_down(&async_futex); 411 406 while (!list_empty(&fcv->waiters)) { 412 407 tmp = list_first(&fcv->waiters); … … 422 417 } 423 418 } 424 futex_u nlock(&async_futex);419 futex_up(&async_futex); 425 420 } 426 421 … … 661 656 void fibril_semaphore_up(fibril_semaphore_t *sem) 662 657 { 663 futex_ lock(&async_futex);658 futex_down(&async_futex); 664 659 sem->count++; 665 660 666 661 if (sem->count > 0) { 667 futex_u nlock(&async_futex);662 futex_up(&async_futex); 668 663 return; 669 664 } … … 673 668 list_remove(tmp); 674 669 675 futex_u nlock(&async_futex);670 futex_up(&async_futex); 676 671 677 672 awaiter_t *wdp = list_get_instance(tmp, awaiter_t, wu_event.link); … … 689 684 void fibril_semaphore_down(fibril_semaphore_t *sem) 690 685 { 691 futex_ lock(&async_futex);686 futex_down(&async_futex); 692 687 sem->count--; 693 688 694 689 if (sem->count >= 0) { 695 futex_u nlock(&async_futex);690 futex_up(&async_futex); 696 691 return; 697 692 } … … 702 697 wdata.fid = fibril_get_id(); 703 698 list_append(&wdata.wu_event.link, &sem->waiters); 704 705 fibril_switch(FIBRIL_FROM_BLOCKED); 706 futex_unlock(&async_futex);699 fibril_switch(FIBRIL_TO_MANAGER); 700 701 /* async_futex not held after fibril_switch() */ 707 702 } 708 703 -
uspace/lib/c/generic/futex.c
r8119363 rfbfe59d 34 34 35 35 #include <futex.h> 36 37 #include <assert.h>38 36 #include <atomic.h> 39 #include <fibril.h>40 #include <io/kio.h>41 42 #include "private/fibril.h"43 44 //#define DPRINTF(...) kio_printf(__VA_ARGS__)45 #define DPRINTF(...) ((void)0)46 37 47 38 /** Initialize futex counter. … … 56 47 } 57 48 58 #ifdef CONFIG_DEBUG_FUTEX59 49 60 void __futex_assert_is_locked(futex_t *futex, const char *name) 50 #ifdef FUTEX_UPGRADABLE 51 52 int _upgrade_futexes = 0; 53 static futex_t upg_and_wait_futex = FUTEX_INITIALIZER; 54 55 void futex_upgrade_all_and_wait(void) 61 56 { 62 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 63 fibril_t *self = (fibril_t *) fibril_get_id(); 64 if (owner != self) { 65 DPRINTF("Assertion failed: %s (%p) is not locked by fibril %p (instead locked by fibril %p).\n", name, futex, self, owner); 66 } 67 assert(owner == self); 68 } 57 futex_down(&upg_and_wait_futex); 69 58 70 void __futex_assert_is_not_locked(futex_t *futex, const char *name) 71 { 72 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 73 fibril_t *self = (fibril_t *) fibril_get_id(); 74 if (owner == self) { 75 DPRINTF("Assertion failed: %s (%p) is already locked by fibril %p.\n", name, futex, self); 76 } 77 assert(owner != self); 78 } 79 80 void __futex_lock(futex_t *futex, const char *name) 81 { 82 /* We use relaxed atomics to avoid violating C11 memory model. 83 * They should compile to regular load/stores, but simple assignments 84 * would be UB by definition. 85 */ 86 87 fibril_t *self = (fibril_t *) fibril_get_id(); 88 DPRINTF("Locking futex %s (%p) by fibril %p.\n", name, futex, self); 89 __futex_assert_is_not_locked(futex, name); 90 futex_down(futex); 91 92 void *prev_owner = __atomic_exchange_n(&futex->owner, self, __ATOMIC_RELAXED); 93 assert(prev_owner == NULL); 94 95 atomic_inc(&self->futex_locks); 96 } 97 98 void __futex_unlock(futex_t *futex, const char *name) 99 { 100 fibril_t *self = (fibril_t *) fibril_get_id(); 101 DPRINTF("Unlocking futex %s (%p) by fibril %p.\n", name, futex, self); 102 __futex_assert_is_locked(futex, name); 103 __atomic_store_n(&futex->owner, NULL, __ATOMIC_RELAXED); 104 atomic_dec(&self->futex_locks); 105 futex_up(futex); 106 } 107 108 bool __futex_trylock(futex_t *futex, const char *name) 109 { 110 fibril_t *self = (fibril_t *) fibril_get_id(); 111 bool success = futex_trydown(futex); 112 if (success) { 113 void *owner = __atomic_load_n(&futex->owner, __ATOMIC_RELAXED); 114 assert(owner == NULL); 115 116 __atomic_store_n(&futex->owner, self, __ATOMIC_RELAXED); 117 118 atomic_inc(&self->futex_locks); 119 120 DPRINTF("Trylock on futex %s (%p) by fibril %p succeeded.\n", name, futex, self); 121 } else { 122 DPRINTF("Trylock on futex %s (%p) by fibril %p failed.\n", name, futex, self); 59 if (!_upgrade_futexes) { 60 rcu_assign(_upgrade_futexes, 1); 61 _rcu_synchronize(BM_BLOCK_THREAD); 123 62 } 124 63 125 return success; 126 } 127 128 void __futex_give_to(futex_t *futex, void *new_owner, const char *name) 129 { 130 fibril_t *self = fibril_self(); 131 fibril_t *no = new_owner; 132 DPRINTF("Passing futex %s (%p) from fibril %p to fibril %p.\n", name, futex, self, no); 133 134 __futex_assert_is_locked(futex, name); 135 atomic_dec(&self->futex_locks); 136 atomic_inc(&no->futex_locks); 137 __atomic_store_n(&futex->owner, new_owner, __ATOMIC_RELAXED); 64 futex_up(&upg_and_wait_futex); 138 65 } 139 66 -
uspace/lib/c/generic/inet/host.c
r8119363 rfbfe59d 35 35 */ 36 36 37 #include <assert.h>38 37 #include <errno.h> 39 38 #include <inet/addr.h> -
uspace/lib/c/generic/inet/hostport.c
r8119363 rfbfe59d 36 36 */ 37 37 38 #include <assert.h>39 38 #include <errno.h> 40 39 #include <inet/addr.h> -
uspace/lib/c/generic/libc.c
r8119363 rfbfe59d 55 55 #include "private/malloc.h" 56 56 #include "private/io.h" 57 #include "private/fibril.h" 57 58 #ifdef FUTEX_UPGRADABLE 59 #include <rcu.h> 60 #endif 58 61 59 62 #ifdef CONFIG_RTLD … … 86 89 87 90 __tcb_set(fibril->tcb); 91 92 93 #ifdef FUTEX_UPGRADABLE 94 rcu_register_fibril(); 95 #endif 88 96 89 97 __async_server_init(); -
uspace/lib/c/generic/rcu.c
r8119363 rfbfe59d 82 82 #include <thread.h> 83 83 84 #include "private/fibril.h"85 86 84 87 85 /** RCU sleeps for RCU_SLEEP_MS before polling an active RCU reader again. */ … … 150 148 151 149 152 static void wait_for_readers(size_t reader_group );150 static void wait_for_readers(size_t reader_group, blocking_mode_t blocking_mode); 153 151 static void force_mb_in_all_threads(void); 154 152 static bool is_preexisting_reader(const fibril_rcu_data_t *fib, size_t group); 155 153 156 static void lock_sync( void);154 static void lock_sync(blocking_mode_t blocking_mode); 157 155 static void unlock_sync(void); 158 static void sync_sleep( void);156 static void sync_sleep(blocking_mode_t blocking_mode); 159 157 160 158 static bool is_in_group(size_t nesting_cnt, size_t group); … … 172 170 assert(!fibril_rcu.registered); 173 171 174 futex_ lock(&rcu.list_futex);172 futex_down(&rcu.list_futex); 175 173 list_append(&fibril_rcu.link, &rcu.fibrils_list); 176 futex_u nlock(&rcu.list_futex);174 futex_up(&rcu.list_futex); 177 175 178 176 fibril_rcu.registered = true; … … 197 195 fibril_rcu.nesting_cnt = 0; 198 196 199 futex_ lock(&rcu.list_futex);197 futex_down(&rcu.list_futex); 200 198 list_remove(&fibril_rcu.link); 201 futex_u nlock(&rcu.list_futex);199 futex_up(&rcu.list_futex); 202 200 203 201 fibril_rcu.registered = false; … … 242 240 243 241 /** Blocks until all preexisting readers exit their critical sections. */ 244 void rcu_synchronize(void)242 void _rcu_synchronize(blocking_mode_t blocking_mode) 245 243 { 246 244 assert(!rcu_read_locked()); … … 252 250 size_t gp_in_progress = ACCESS_ONCE(rcu.cur_gp); 253 251 254 lock_sync( );252 lock_sync(blocking_mode); 255 253 256 254 /* … … 300 298 301 299 size_t new_reader_group = get_other_group(rcu.reader_group); 302 wait_for_readers(new_reader_group );300 wait_for_readers(new_reader_group, blocking_mode); 303 301 304 302 /* Separates waiting for readers in new_reader_group from group flip. */ … … 312 310 memory_barrier(); 313 311 314 wait_for_readers(old_reader_group );312 wait_for_readers(old_reader_group, blocking_mode); 315 313 316 314 /* MB_FORCE_U */ … … 332 330 333 331 /** Waits for readers of reader_group to exit their readers sections. */ 334 static void wait_for_readers(size_t reader_group )335 { 336 futex_ lock(&rcu.list_futex);332 static void wait_for_readers(size_t reader_group, blocking_mode_t blocking_mode) 333 { 334 futex_down(&rcu.list_futex); 337 335 338 336 list_t quiescent_fibrils; … … 345 343 346 344 if (is_preexisting_reader(fib, reader_group)) { 347 futex_u nlock(&rcu.list_futex);348 sync_sleep( );349 futex_ lock(&rcu.list_futex);345 futex_up(&rcu.list_futex); 346 sync_sleep(blocking_mode); 347 futex_down(&rcu.list_futex); 350 348 /* Break to while loop. */ 351 349 break; … … 358 356 359 357 list_concat(&rcu.fibrils_list, &quiescent_fibrils); 360 futex_u nlock(&rcu.list_futex);361 } 362 363 static void lock_sync( void)364 { 365 futex_ lock(&rcu.sync_lock.futex);358 futex_up(&rcu.list_futex); 359 } 360 361 static void lock_sync(blocking_mode_t blocking_mode) 362 { 363 futex_down(&rcu.sync_lock.futex); 366 364 if (rcu.sync_lock.locked) { 367 blocked_fibril_t blocked_fib; 368 blocked_fib.id = fibril_get_id(); 369 370 list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils); 371 372 do { 373 blocked_fib.is_ready = false; 374 futex_unlock(&rcu.sync_lock.futex); 375 futex_lock(&async_futex); 376 fibril_switch(FIBRIL_FROM_BLOCKED); 377 futex_unlock(&async_futex); 378 futex_lock(&rcu.sync_lock.futex); 379 } while (rcu.sync_lock.locked); 380 381 list_remove(&blocked_fib.link); 382 rcu.sync_lock.locked = true; 365 if (blocking_mode == BM_BLOCK_FIBRIL) { 366 blocked_fibril_t blocked_fib; 367 blocked_fib.id = fibril_get_id(); 368 369 list_append(&blocked_fib.link, &rcu.sync_lock.blocked_fibrils); 370 371 do { 372 blocked_fib.is_ready = false; 373 futex_up(&rcu.sync_lock.futex); 374 fibril_switch(FIBRIL_TO_MANAGER); 375 futex_down(&rcu.sync_lock.futex); 376 } while (rcu.sync_lock.locked); 377 378 list_remove(&blocked_fib.link); 379 rcu.sync_lock.locked = true; 380 } else { 381 assert(blocking_mode == BM_BLOCK_THREAD); 382 rcu.sync_lock.blocked_thread_cnt++; 383 futex_up(&rcu.sync_lock.futex); 384 futex_down(&rcu.sync_lock.futex_blocking_threads); 385 } 383 386 } else { 384 387 rcu.sync_lock.locked = true; … … 396 399 if (0 < rcu.sync_lock.blocked_thread_cnt) { 397 400 --rcu.sync_lock.blocked_thread_cnt; 398 futex_u nlock(&rcu.sync_lock.futex_blocking_threads);401 futex_up(&rcu.sync_lock.futex_blocking_threads); 399 402 } else { 400 403 /* Unlock but wake up any fibrils waiting for the lock. */ … … 411 414 412 415 rcu.sync_lock.locked = false; 413 futex_u nlock(&rcu.sync_lock.futex);414 } 415 } 416 417 static void sync_sleep( void)416 futex_up(&rcu.sync_lock.futex); 417 } 418 } 419 420 static void sync_sleep(blocking_mode_t blocking_mode) 418 421 { 419 422 assert(rcu.sync_lock.locked); … … 422 425 * but keep sync locked. 423 426 */ 424 futex_unlock(&rcu.sync_lock.futex); 425 async_usleep(RCU_SLEEP_MS * 1000); 426 futex_lock(&rcu.sync_lock.futex); 427 futex_up(&rcu.sync_lock.futex); 428 429 if (blocking_mode == BM_BLOCK_FIBRIL) { 430 async_usleep(RCU_SLEEP_MS * 1000); 431 } else { 432 thread_usleep(RCU_SLEEP_MS * 1000); 433 } 434 435 futex_down(&rcu.sync_lock.futex); 427 436 } 428 437 -
uspace/lib/c/generic/stacktrace.c
r8119363 rfbfe59d 39 39 #include <stdint.h> 40 40 #include <errno.h> 41 #include <io/kio.h>42 41 43 42 static errno_t stacktrace_read_uintptr(void *arg, uintptr_t addr, uintptr_t *data); 44 43 45 44 static stacktrace_ops_t basic_ops = { 46 .read_uintptr = stacktrace_read_uintptr, 47 .printf = printf, 48 }; 49 50 static stacktrace_ops_t kio_ops = { 51 .read_uintptr = stacktrace_read_uintptr, 52 .printf = kio_printf, 45 .read_uintptr = stacktrace_read_uintptr 53 46 }; 54 47 … … 64 57 65 58 while (stacktrace_fp_valid(&st, fp)) { 66 ops->printf("%p: %p()\n", (void *) fp, (void *) pc);59 printf("%p: %p()\n", (void *) fp, (void *) pc); 67 60 rc = stacktrace_ra_get(&st, fp, &pc); 68 61 if (rc != EOK) … … 78 71 { 79 72 stacktrace_print_generic(&basic_ops, NULL, fp, pc); 80 }81 82 void stacktrace_kio_print(void)83 {84 stacktrace_prepare();85 stacktrace_print_generic(&kio_ops, NULL, stacktrace_fp_get(), stacktrace_pc_get());86 87 /*88 * Prevent the tail call optimization of the previous call by89 * making it a non-tail call.90 */91 92 kio_printf("-- end of stack trace --\n");93 73 } 94 74 -
uspace/lib/c/generic/thread.c
r8119363 rfbfe59d 46 46 #include <as.h> 47 47 #include "private/thread.h" 48 #include "private/fibril.h" 48 49 #ifdef FUTEX_UPGRADABLE 50 #include <rcu.h> 51 #endif 52 49 53 50 54 /** Main thread function. … … 64 68 65 69 __tcb_set(fibril->tcb); 70 71 #ifdef FUTEX_UPGRADABLE 72 rcu_register_fibril(); 73 futex_upgrade_all_and_wait(); 74 #endif 66 75 67 76 uarg->uspace_thread_function(uarg->uspace_thread_arg); … … 75 84 /* If there is a manager, destroy it */ 76 85 async_destroy_manager(); 86 87 #ifdef FUTEX_UPGRADABLE 88 rcu_deregister_fibril(); 89 #endif 77 90 78 91 fibril_teardown(fibril, false); -
uspace/lib/c/generic/time.c
r8119363 rfbfe59d 51 51 #include <loc.h> 52 52 #include <device/clock_dev.h> 53 #include <thread.h> 53 54 54 55 #define ASCTIME_BUF_LEN 26 … … 486 487 * @param tv2 Second timeval. 487 488 */ 488 void tv_add(struct timeval *tv1, conststruct timeval *tv2)489 void tv_add(struct timeval *tv1, struct timeval *tv2) 489 490 { 490 491 tv1->tv_sec += tv2->tv_sec; … … 502 503 * 503 504 */ 504 suseconds_t tv_sub_diff( const struct timeval *tv1, conststruct timeval *tv2)505 suseconds_t tv_sub_diff(struct timeval *tv1, struct timeval *tv2) 505 506 { 506 507 return (tv1->tv_usec - tv2->tv_usec) + … … 514 515 * 515 516 */ 516 void tv_sub(struct timeval *tv1, conststruct timeval *tv2)517 void tv_sub(struct timeval *tv1, struct timeval *tv2) 517 518 { 518 519 tv1->tv_sec -= tv2->tv_sec; … … 530 531 * 531 532 */ 532 int tv_gt( const struct timeval *tv1, conststruct timeval *tv2)533 int tv_gt(struct timeval *tv1, struct timeval *tv2) 533 534 { 534 535 if (tv1->tv_sec > tv2->tv_sec) … … 550 551 * 551 552 */ 552 int tv_gteq( const struct timeval *tv1, conststruct timeval *tv2)553 int tv_gteq(struct timeval *tv1, struct timeval *tv2) 553 554 { 554 555 if (tv1->tv_sec > tv2->tv_sec) -
uspace/lib/c/include/async.h
r8119363 rfbfe59d 108 108 typedef struct async_exch async_exch_t; 109 109 110 extern _Noreturn void async_manager(void); 110 #define async_manager() \ 111 do { \ 112 futex_down(&async_futex); \ 113 fibril_switch(FIBRIL_FROM_DEAD); \ 114 } while (0) 111 115 112 116 #define async_get_call(data) \ -
uspace/lib/c/include/fibril.h
r8119363 rfbfe59d 36 36 #define LIBC_FIBRIL_H_ 37 37 38 #include <context.h> 38 39 #include <types/common.h> 40 #include <adt/list.h> 41 #include <libarch/tls.h> 39 42 40 typedef struct fibril fibril_t; 43 #define FIBRIL_WRITER 1 44 45 struct fibril; 41 46 42 47 typedef struct { 43 fibril_t*owned_by;48 struct fibril *owned_by; 44 49 } fibril_owner_info_t; 45 50 51 typedef enum { 52 FIBRIL_PREEMPT, 53 FIBRIL_TO_MANAGER, 54 FIBRIL_FROM_MANAGER, 55 FIBRIL_FROM_DEAD 56 } fibril_switch_type_t; 57 46 58 typedef sysarg_t fid_t; 59 60 typedef struct fibril { 61 link_t link; 62 link_t all_link; 63 context_t ctx; 64 void *stack; 65 void *arg; 66 errno_t (*func)(void *); 67 tcb_t *tcb; 68 69 struct fibril *clean_after_me; 70 errno_t retval; 71 int flags; 72 73 fibril_owner_info_t *waits_for; 74 } fibril_t; 47 75 48 76 /** Fibril-local variable specifier */ … … 53 81 extern fid_t fibril_create_generic(errno_t (*func)(void *), void *arg, size_t); 54 82 extern void fibril_destroy(fid_t fid); 83 extern fibril_t *fibril_setup(void); 84 extern void fibril_teardown(fibril_t *f, bool locked); 85 extern int fibril_switch(fibril_switch_type_t stype); 55 86 extern void fibril_add_ready(fid_t fid); 87 extern void fibril_add_manager(fid_t fid); 88 extern void fibril_remove_manager(void); 56 89 extern fid_t fibril_get_id(void); 57 extern void fibril_yield(void);58 90 59 91 static inline fid_t fibril_create(errno_t (*func)(void *), void *arg) … … 62 94 } 63 95 96 static inline int fibril_yield(void) 97 { 98 return fibril_switch(FIBRIL_PREEMPT); 99 } 100 64 101 #endif 65 102 -
uspace/lib/c/include/futex.h
r8119363 rfbfe59d 36 36 #define LIBC_FUTEX_H_ 37 37 38 #include <assert.h>39 38 #include <atomic.h> 40 39 #include <errno.h> 41 40 #include <libc.h> 42 #include <time.h>43 41 44 42 typedef struct futex { 45 43 atomic_t val; 46 #ifdef CONFIG_DEBUG_FUTEX47 _Atomic void *owner;44 #ifdef FUTEX_UPGRADABLE 45 int upgraded; 48 46 #endif 49 47 } futex_t; 50 48 49 51 50 extern void futex_initialize(futex_t *futex, int value); 52 51 53 #ifdef CONFIG_DEBUG_FUTEX 52 #ifdef FUTEX_UPGRADABLE 53 #include <rcu.h> 54 54 55 #define FUTEX_INITIALIZE(val) {{ (val) }, NULL } 56 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1) 55 #define FUTEX_INITIALIZE(val) {{ (val) }, 0} 57 56 58 void __futex_assert_is_locked(futex_t *, const char *); 59 void __futex_assert_is_not_locked(futex_t *, const char *); 60 void __futex_lock(futex_t *, const char *); 61 void __futex_unlock(futex_t *, const char *); 62 bool __futex_trylock(futex_t *, const char *); 63 void __futex_give_to(futex_t *, void *, const char *); 57 #define futex_lock(fut) \ 58 ({ \ 59 rcu_read_lock(); \ 60 (fut)->upgraded = rcu_access(_upgrade_futexes); \ 61 if ((fut)->upgraded) \ 62 (void) futex_down((fut)); \ 63 }) 64 64 65 #define futex_lock(futex) __futex_lock((futex), #futex) 66 #define futex_unlock(futex) __futex_unlock((futex), #futex) 67 #define futex_trylock(futex) __futex_trylock((futex), #futex) 65 #define futex_trylock(fut) \ 66 ({ \ 67 rcu_read_lock(); \ 68 int _upgraded = rcu_access(_upgrade_futexes); \ 69 if (_upgraded) { \ 70 int _acquired = futex_trydown((fut)); \ 71 if (!_acquired) { \ 72 rcu_read_unlock(); \ 73 } else { \ 74 (fut)->upgraded = true; \ 75 } \ 76 _acquired; \ 77 } else { \ 78 (fut)->upgraded = false; \ 79 1; \ 80 } \ 81 }) 68 82 69 #define futex_give_to(futex, new_owner) __futex_give_to((futex), (new_owner), #futex) 70 #define futex_assert_is_locked(futex) __futex_assert_is_locked((futex), #futex) 71 #define futex_assert_is_not_locked(futex) __futex_assert_is_not_locked((futex), #futex) 83 #define futex_unlock(fut) \ 84 ({ \ 85 if ((fut)->upgraded) \ 86 (void) futex_up((fut)); \ 87 rcu_read_unlock(); \ 88 }) 89 90 extern int _upgrade_futexes; 91 92 extern void futex_upgrade_all_and_wait(void); 72 93 73 94 #else 74 95 75 96 #define FUTEX_INITIALIZE(val) {{ (val) }} 76 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1)77 97 78 98 #define futex_lock(fut) (void) futex_down((fut)) … … 80 100 #define futex_unlock(fut) (void) futex_up((fut)) 81 101 82 #define futex_give_to(fut, owner) ((void)0) 83 #define futex_assert_is_locked(fut) assert((atomic_signed_t) (fut)->val.count <= 0) 84 #define futex_assert_is_not_locked(fut) ((void)0) 102 #endif 85 103 86 # endif104 #define FUTEX_INITIALIZER FUTEX_INITIALIZE(1) 87 105 88 106 /** Try to down the futex. … … 99 117 } 100 118 101 /** Down the futex with timeout, composably. 102 * 103 * This means that when the operation fails due to a timeout or being 104 * interrupted, the next futex_up() is ignored, which allows certain kinds of 105 * composition of synchronization primitives. 106 * 107 * In most other circumstances, regular futex_down_timeout() is a better choice. 119 /** Down the futex. 108 120 * 109 121 * @param futex Futex. 110 122 * 111 123 * @return ENOENT if there is no such virtual address. 112 * @return ETIMEOUT if timeout expires.113 124 * @return EOK on success. 114 125 * @return Error code from <errno.h> otherwise. 115 126 * 116 127 */ 117 static inline errno_t futex_down _composable(futex_t *futex, struct timeval *expires)128 static inline errno_t futex_down(futex_t *futex) 118 129 { 119 // TODO: Add tests for this.120 121 /* No timeout by default. */122 suseconds_t timeout = 0;123 124 if (expires) {125 struct timeval tv;126 getuptime(&tv);127 if (tv_gteq(&tv, expires)) {128 /* We can't just return ETIMEOUT. That wouldn't be composable. */129 timeout = 1;130 } else {131 timeout = tv_sub_diff(expires, &tv);132 }133 134 assert(timeout > 0);135 }136 137 130 if ((atomic_signed_t) atomic_predec(&futex->val) < 0) 138 return (errno_t) __SYSCALL 2(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count, (sysarg_t) timeout);131 return (errno_t) __SYSCALL1(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count); 139 132 140 133 return EOK; … … 158 151 } 159 152 160 static inline errno_t futex_down_timeout(futex_t *futex, struct timeval *expires)161 {162 /*163 * This combination of a "composable" sleep followed by futex_up() on164 * failure is necessary to prevent breakage due to certain race165 * conditions.166 */167 errno_t rc = futex_down_composable(futex, expires);168 if (rc != EOK)169 futex_up(futex);170 return rc;171 }172 173 /** Down the futex.174 *175 * @param futex Futex.176 *177 * @return ENOENT if there is no such virtual address.178 * @return EOK on success.179 * @return Error code from <errno.h> otherwise.180 *181 */182 static inline errno_t futex_down(futex_t *futex)183 {184 return futex_down_timeout(futex, NULL);185 }186 187 153 #endif 188 154 -
uspace/lib/c/include/rcu.h
r8119363 rfbfe59d 92 92 #define rcu_access(ptr) ACCESS_ONCE(ptr) 93 93 94 typedef enum blocking_mode { 95 BM_BLOCK_FIBRIL, 96 BM_BLOCK_THREAD 97 } blocking_mode_t; 98 94 99 extern void rcu_register_fibril(void); 95 100 extern void rcu_deregister_fibril(void); … … 100 105 extern bool rcu_read_locked(void); 101 106 102 extern void rcu_synchronize(void); 107 #define rcu_synchronize() _rcu_synchronize(BM_BLOCK_FIBRIL) 108 109 extern void _rcu_synchronize(blocking_mode_t); 103 110 104 111 #endif -
uspace/lib/c/include/stacktrace.h
r8119363 rfbfe59d 43 43 typedef struct { 44 44 errno_t (*read_uintptr)(void *, uintptr_t, uintptr_t *); 45 int (*printf)(const char *, ...);46 45 } stacktrace_ops_t; 47 46 … … 52 51 53 52 extern void stacktrace_print(void); 54 extern void stacktrace_kio_print(void);55 53 extern void stacktrace_print_fp_pc(uintptr_t, uintptr_t); 56 54 extern void stacktrace_print_generic(stacktrace_ops_t *, void *, uintptr_t, -
uspace/lib/c/include/sys/time.h
r8119363 rfbfe59d 69 69 }; 70 70 71 #define TIMEVAL_MAX ((struct timeval) { .tv_sec = LONG_MAX, .tv_usec = 999999 })72 73 71 struct timezone { 74 72 int tz_minuteswest; /* minutes W of Greenwich */ … … 77 75 78 76 extern void tv_add_diff(struct timeval *, suseconds_t); 79 extern void tv_add(struct timeval *, conststruct timeval *);80 extern suseconds_t tv_sub_diff( const struct timeval *, conststruct timeval *);81 extern void tv_sub(struct timeval *, conststruct timeval *);82 extern int tv_gt( const struct timeval *, conststruct timeval *);83 extern int tv_gteq( const struct timeval *, conststruct timeval *);77 extern void tv_add(struct timeval *, struct timeval *); 78 extern suseconds_t tv_sub_diff(struct timeval *, struct timeval *); 79 extern void tv_sub(struct timeval *, struct timeval *); 80 extern int tv_gt(struct timeval *, struct timeval *); 81 extern int tv_gteq(struct timeval *, struct timeval *); 84 82 extern void gettimeofday(struct timeval *, struct timezone *); 85 83 extern void getuptime(struct timeval *); -
uspace/srv/volsrv/types/part.h
r8119363 rfbfe59d 38 38 #define TYPES_PART_H_ 39 39 40 #include <adt/list.h>41 40 #include <types/label.h> 42 41
Note:
See TracChangeset
for help on using the changeset viewer.