Changeset 897fd8f1 in mainline
- Timestamp:
- 2017-12-19T18:18:15Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 55b56f4
- Parents:
- 7f11dc6
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
abi/include/abi/synch.h
r7f11dc6 r897fd8f1 46 46 #define SYNCH_FLAGS_INTERRUPTIBLE (1 << 1) 47 47 48 /** Could not satisfy the request without going to sleep. */49 #define ESYNCH_WOULD_BLOCK 150 /** Timeout occurred. */51 #define ESYNCH_TIMEOUT 252 /** Sleep was interrupted. */53 #define ESYNCH_INTERRUPTED 454 /** Operation succeeded without sleeping. */55 #define ESYNCH_OK_ATOMIC 856 /** Operation succeeded and did sleep. */57 #define ESYNCH_OK_BLOCKED 1658 59 #define SYNCH_FAILED(rc) \60 ((rc) & (ESYNCH_WOULD_BLOCK | ESYNCH_TIMEOUT | ESYNCH_INTERRUPTED))61 62 #define SYNCH_OK(rc) \63 ((rc) & (ESYNCH_OK_ATOMIC | ESYNCH_OK_BLOCKED))64 65 48 #endif 66 49 -
kernel/arch/ia32/src/smp/smp.c
r7f11dc6 r897fd8f1 39 39 #include <arch/boot/boot.h> 40 40 #include <assert.h> 41 #include <errno.h> 41 42 #include <genarch/acpi/acpi.h> 42 43 #include <genarch/acpi/madt.h> … … 178 179 */ 179 180 if (waitq_sleep_timeout(&ap_completion_wq, 1000000, 180 SYNCH_FLAGS_NONE ) == ESYNCH_TIMEOUT) {181 SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) { 181 182 log(LF_ARCH, LVL_NOTE, "%s: waiting for cpu%u " 182 183 "(APIC ID = %d) timed out", __FUNCTION__, -
kernel/arch/sparc64/src/smp/sun4u/smp.c
r7f11dc6 r897fd8f1 106 106 waking_up_mid = mid; 107 107 108 if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) ==109 ESYNCH_TIMEOUT)108 if (waitq_sleep_timeout(&ap_completion_wq, 1000000, 109 SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) 110 110 log(LF_ARCH, LVL_NOTE, "%s: waiting for processor (mid = %" PRIu32 111 111 ") timed out", __func__, mid); -
kernel/arch/sparc64/src/smp/sun4v/smp.c
r7f11dc6 r897fd8f1 373 373 #endif 374 374 375 if (waitq_sleep_timeout(&ap_completion_wq, 10000000, SYNCH_FLAGS_NONE) ==376 ESYNCH_TIMEOUT)375 if (waitq_sleep_timeout(&ap_completion_wq, 10000000, 376 SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) 377 377 printf("%s: waiting for processor (cpuid = %" PRIu64 ") timed out\n", 378 378 __func__, cpuid); -
kernel/generic/include/synch/semaphore.h
r7f11dc6 r897fd8f1 36 36 #define KERN_SEMAPHORE_H_ 37 37 38 #include <errno.h> 38 39 #include <stdint.h> 39 40 #include <synch/waitq.h> … … 54 55 55 56 #define semaphore_down_interruptable(s) \ 56 ( ESYNCH_INTERRUPTED !=_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \57 SYNCH_FLAGS_INTERRUPTIBLE) )57 (_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \ 58 SYNCH_FLAGS_INTERRUPTIBLE) != EINTR) 58 59 59 60 extern void semaphore_initialize(semaphore_t *, int); -
kernel/generic/include/synch/waitq.h
r7f11dc6 r897fd8f1 67 67 68 68 #define waitq_sleep(wq) \ 69 waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE )69 waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL) 70 70 71 71 struct thread; 72 72 73 73 extern void waitq_initialize(waitq_t *); 74 extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int );74 extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int, bool *); 75 75 extern ipl_t waitq_sleep_prepare(waitq_t *); 76 extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int );77 extern void waitq_sleep_finish(waitq_t *, int, ipl_t);76 extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, bool *); 77 extern void waitq_sleep_finish(waitq_t *, bool, ipl_t); 78 78 extern void waitq_wakeup(waitq_t *, wakeup_mode_t); 79 79 extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t); -
kernel/generic/src/ipc/ipc.c
r7f11dc6 r897fd8f1 538 538 539 539 restart: 540 rc = waitq_sleep_timeout(&box->wq, usec, flags );541 if ( SYNCH_FAILED(rc))540 rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL); 541 if (rc != EOK) 542 542 return NULL; 543 543 … … 638 638 phone = list_get_instance(list_first(&box->connected_phones), 639 639 phone_t, link); 640 if ( SYNCH_FAILED(mutex_trylock(&phone->lock))) {640 if (mutex_trylock(&phone->lock) != EOK) { 641 641 irq_spinlock_unlock(&box->lock, true); 642 642 DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); -
kernel/generic/src/proc/thread.c
r7f11dc6 r897fd8f1 548 548 * 549 549 * Threads that are blocked waiting for a synchronization primitive 550 * are woken up with a return code of E SYNCH_INTERRUPTEDif the550 * are woken up with a return code of EINTR if the 551 551 * blocking call was interruptable. See waitq_sleep_timeout(). 552 552 * … … 653 653 irq_spinlock_unlock(&thread->lock, true); 654 654 655 return waitq_sleep_timeout(&thread->join_wq, usec, flags );655 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL); 656 656 } 657 657 … … 700 700 waitq_initialize(&wq); 701 701 702 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING );702 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL); 703 703 } 704 704 -
kernel/generic/src/synch/condvar.c
r7f11dc6 r897fd8f1 80 80 * For exact description of meaning of possible combinations of usec and flags, 81 81 * see comment for waitq_sleep_timeout(). Note that when 82 * SYNCH_FLAGS_NON_BLOCKING is specified here, E SYNCH_WOULD_BLOCKis always82 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 83 83 * returned. 84 84 * … … 89 89 int rc; 90 90 ipl_t ipl; 91 bool blocked; 91 92 92 93 ipl = waitq_sleep_prepare(&cv->wq); … … 95 96 96 97 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 97 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 98 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 99 assert(blocked || rc != EOK); 98 100 99 waitq_sleep_finish(&cv->wq, rc, ipl);101 waitq_sleep_finish(&cv->wq, blocked, ipl); 100 102 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 101 103 mutex_lock(mtx); … … 117 119 * For exact description of meaning of possible combinations of usec and flags, 118 120 * see comment for waitq_sleep_timeout(). Note that when 119 * SYNCH_FLAGS_NON_BLOCKING is specified here, E SYNCH_WOULD_BLOCKis always121 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 120 122 * returned. 121 123 * … … 127 129 int rc; 128 130 ipl_t ipl; 129 131 bool blocked; 132 130 133 ipl = waitq_sleep_prepare(&cv->wq); 131 134 … … 134 137 135 138 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 136 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 139 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); 140 assert(blocked || rc != EOK); 137 141 138 waitq_sleep_finish(&cv->wq, rc, ipl);142 waitq_sleep_finish(&cv->wq, blocked, ipl); 139 143 /* Lock only after releasing the waitq to avoid a possible deadlock. */ 140 144 spinlock_lock(lock); … … 152 156 * For exact description of meaning of possible combinations of usec and flags, 153 157 * see comment for waitq_sleep_timeout(). Note that when 154 * SYNCH_FLAGS_NON_BLOCKING is specified here, E SYNCH_WOULD_BLOCKis always158 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 155 159 * returned. 156 160 * -
kernel/generic/src/synch/futex.c
r7f11dc6 r897fd8f1 395 395 * 396 396 * @return If there is no physical mapping for uaddr ENOENT is 397 * returned. Otherwise returns a wait result as defined in398 * synch.h.397 * returned. Otherwise returns the return value of 398 * waitq_sleep_timeout(). 399 399 */ 400 400 sysarg_t sys_futex_sleep(uintptr_t uaddr) … … 409 409 #endif 410 410 411 int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); 411 int rc = waitq_sleep_timeout( 412 &futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE, NULL); 412 413 413 414 #ifdef CONFIG_UDEBUG … … 430 431 if (futex) { 431 432 waitq_wakeup(&futex->wq, WAKEUP_FIRST); 432 return 0;433 return EOK; 433 434 } else { 434 435 return (sysarg_t) ENOENT; -
kernel/generic/src/synch/mutex.c
r7f11dc6 r897fd8f1 37 37 38 38 #include <assert.h> 39 #include <errno.h> 39 40 #include <synch/mutex.h> 40 41 #include <synch/semaphore.h> … … 95 96 if (mtx->owner == THREAD) { 96 97 mtx->nesting++; 97 return E SYNCH_OK_ATOMIC;98 return EOK; 98 99 } else { 99 100 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 100 if ( SYNCH_OK(rc)) {101 if (rc == EOK) { 101 102 mtx->owner = THREAD; 102 103 mtx->nesting = 1; … … 119 120 } 120 121 rc = semaphore_trydown(&mtx->sem); 121 } while (SYNCH_FAILED(rc) && 122 !(flags & SYNCH_FLAGS_NON_BLOCKING)); 122 } while (rc != EOK && !(flags & SYNCH_FLAGS_NON_BLOCKING)); 123 123 if (deadlock_reported) 124 124 printf("cpu%u: not deadlocked\n", CPU->id); -
kernel/generic/src/synch/rcu.c
r7f11dc6 r897fd8f1 960 960 SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 961 961 962 if (ret == E SYNCH_INTERRUPTED) {962 if (ret == EINTR) { 963 963 spinlock_unlock(&rcu.gp_lock); 964 964 return false; … … 1018 1018 1019 1019 /* rcu.expedite_now was signaled. */ 1020 if (ret == E SYNCH_OK_BLOCKED) {1020 if (ret == EOK) { 1021 1021 *expedite = true; 1022 1022 } … … 1024 1024 spinlock_unlock(&rcu.gp_lock); 1025 1025 1026 return (ret != E SYNCH_INTERRUPTED);1026 return (ret != EINTR); 1027 1027 } 1028 1028 } … … 1271 1271 int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock, 1272 1272 SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 1273 interrupted = (ret == E SYNCH_INTERRUPTED);1273 interrupted = (ret == EINTR); 1274 1274 } 1275 1275 … … 1332 1332 &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 1333 1333 1334 interrupted = (ret == E SYNCH_INTERRUPTED);1334 interrupted = (ret == EINTR); 1335 1335 } 1336 1336 … … 1406 1406 spinlock_unlock(&rcu.gp_lock); 1407 1407 1408 return (ret != E SYNCH_INTERRUPTED);1408 return (ret != EINTR); 1409 1409 } 1410 1410 -
kernel/generic/src/synch/semaphore.c
r7f11dc6 r897fd8f1 73 73 int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags) 74 74 { 75 return waitq_sleep_timeout(&sem->wq, usec, flags );75 return waitq_sleep_timeout(&sem->wq, usec, flags, NULL); 76 76 } 77 77 -
kernel/generic/src/synch/waitq.c
r7f11dc6 r897fd8f1 45 45 46 46 #include <assert.h> 47 #include <errno.h> 47 48 #include <synch/waitq.h> 48 49 #include <synch/spinlock.h> … … 238 239 * @param flags Specify mode of the sleep. 239 240 * 241 * @param[out] blocked On return, regardless of the return code, 242 * `*blocked` is set to `true` iff the thread went to 243 * sleep. 244 * 240 245 * The sleep can be interrupted only if the 241 246 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. … … 251 256 * call will immediately return, reporting either success or failure. 252 257 * 253 * @return E SYNCH_WOULD_BLOCK, meaning that the sleep failed because at the254 * time of the call there was no pending wakeup255 * @return E SYNCH_TIMEOUT, meaning that the sleep timed out.256 * @return E SYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping257 * thread. 258 * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there259 * was a pending wakeup at the time of the call. The caller was not put260 * asleep at all.261 * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep262 * was attempted.263 * 264 */ 265 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags )258 * @return EAGAIN, meaning that the sleep failed because it was requested 259 * as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup. 260 * @return ETIMEOUT, meaning that the sleep timed out. 261 * @return EINTR, meaning that somebody interrupted the sleeping 262 * thread. Check the value of `*blocked` to see if the thread slept, 263 * or if a pending interrupt forced it to return immediately. 264 * @return EOK, meaning that none of the above conditions occured, and the 265 * thread was woken up successfuly by `waitq_wakeup()`. Check 266 * the value of `*blocked` to see if the thread slept or if 267 * the wakeup was already pending. 268 * 269 */ 270 int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 266 271 { 267 272 assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 268 273 269 274 ipl_t ipl = waitq_sleep_prepare(wq); 270 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 271 waitq_sleep_finish(wq, rc, ipl); 275 bool nblocked; 276 int rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked); 277 waitq_sleep_finish(wq, nblocked, ipl); 278 279 if (blocked != NULL) { 280 *blocked = nblocked; 281 } 272 282 return rc; 273 283 } … … 320 330 * lock is released. 321 331 * 322 * @param wq Wait queue. 323 * @param rc Return code of waitq_sleep_timeout_unsafe(). 324 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 325 * 326 */ 327 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) 328 { 329 switch (rc) { 330 case ESYNCH_WOULD_BLOCK: 331 case ESYNCH_OK_ATOMIC: 332 irq_spinlock_unlock(&wq->lock, false); 333 break; 334 default: 335 /* 332 * @param wq Wait queue. 333 * @param blocked Out parameter of waitq_sleep_timeout_unsafe(). 334 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 335 * 336 */ 337 void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl) 338 { 339 if (blocked) { 340 /* 336 341 * Wait for a waitq_wakeup() or waitq_unsleep() to complete 337 342 * before returning from waitq_sleep() to the caller. Otherwise 338 343 * the caller might expect that the wait queue is no longer used 339 344 * and deallocate it (although the wakeup on a another cpu has 340 * not yet completed and is using the wait queue). 341 * 342 * Note that we have to do this for ESYNCH_OK_BLOCKED and 343 * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT 344 * where the timeout handler stops using the waitq before waking 345 * us up. To be on the safe side, ensure the waitq is not in use 346 * anymore in this case as well. 345 * not yet completed and is using the wait queue). 346 * 347 * Note that we have to do this for EOK and EINTR, but not 348 * necessarily for ETIMEOUT where the timeout handler stops 349 * using the waitq before waking us up. To be on the safe side, 350 * ensure the waitq is not in use anymore in this case as well. 347 351 */ 348 352 waitq_complete_wakeup(wq); 349 break; 353 } else { 354 irq_spinlock_unlock(&wq->lock, false); 350 355 } 351 356 … … 363 368 * @param flags See waitq_sleep_timeout(). 364 369 * 370 * @param[out] blocked See waitq_sleep_timeout(). 371 * 365 372 * @return See waitq_sleep_timeout(). 366 373 * 367 374 */ 368 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) 369 { 375 int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) 376 { 377 *blocked = false; 378 370 379 /* Checks whether to go to sleep at all */ 371 380 if (wq->missed_wakeups) { 372 381 wq->missed_wakeups--; 373 return E SYNCH_OK_ATOMIC;382 return EOK; 374 383 } else { 375 384 if (PARAM_NON_BLOCKING(flags, usec)) { 376 385 /* Return immediately instead of going to sleep */ 377 return E SYNCH_WOULD_BLOCK;386 return EAGAIN; 378 387 } 379 388 } … … 392 401 if (THREAD->interrupted) { 393 402 irq_spinlock_unlock(&THREAD->lock, false); 394 irq_spinlock_unlock(&wq->lock, false); 395 return ESYNCH_INTERRUPTED; 403 return EINTR; 396 404 } 397 405 … … 405 413 THREAD->last_cycle = get_cycle(); 406 414 irq_spinlock_unlock(&THREAD->lock, false); 407 return E SYNCH_INTERRUPTED;415 return EINTR; 408 416 } 409 417 } else … … 416 424 THREAD->last_cycle = get_cycle(); 417 425 irq_spinlock_unlock(&THREAD->lock, false); 418 return E SYNCH_TIMEOUT;426 return ETIMEOUT; 419 427 } 420 428 … … 433 441 THREAD->sleep_queue = wq; 434 442 443 /* Must be before entry to scheduler, because there are multiple 444 * return vectors. 445 */ 446 *blocked = true; 447 435 448 irq_spinlock_unlock(&THREAD->lock, false); 436 449 … … 438 451 scheduler(); 439 452 440 return E SYNCH_OK_BLOCKED;453 return EOK; 441 454 } 442 455 -
kernel/generic/src/synch/workqueue.c
r7f11dc6 r897fd8f1 38 38 39 39 #include <assert.h> 40 #include <errno.h> 40 41 #include <synch/workqueue.h> 41 42 #include <synch/spinlock.h> … … 897 898 &info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 898 899 899 stop = (ret == E SYNCH_INTERRUPTED);900 stop = (ret == EINTR); 900 901 } 901 902 -
kernel/generic/src/sysinfo/stats.c
r7f11dc6 r897fd8f1 157 157 */ 158 158 159 if ( SYNCH_FAILED(mutex_trylock(&as->lock)))159 if (mutex_trylock(&as->lock) != EOK) 160 160 return 0; 161 161 … … 169 169 as_area_t *area = node->value[i]; 170 170 171 if ( SYNCH_FAILED(mutex_trylock(&area->lock)))171 if (mutex_trylock(&area->lock) != EOK) 172 172 continue; 173 173 … … 198 198 */ 199 199 200 if ( SYNCH_FAILED(mutex_trylock(&as->lock)))200 if (mutex_trylock(&as->lock) != EOK) 201 201 return 0; 202 202 … … 209 209 as_area_t *area = node->value[i]; 210 210 211 if ( SYNCH_FAILED(mutex_trylock(&area->lock)))211 if (mutex_trylock(&area->lock) != EOK) 212 212 continue; 213 213 -
kernel/generic/src/udebug/udebug.c
r7f11dc6 r897fd8f1 98 98 99 99 wq->missed_wakeups = 0; /* Enforce blocking. */ 100 int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);101 102 waitq_sleep_finish(wq, rc, ipl);100 bool blocked; 101 (void) waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, &blocked); 102 waitq_sleep_finish(wq, blocked, ipl); 103 103 } 104 104 -
kernel/test/synch/rcu1.c
r7f11dc6 r897fd8f1 114 114 do { 115 115 int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0); 116 joined = (ret != E SYNCH_TIMEOUT);116 joined = (ret != ETIMEOUT); 117 117 118 if (ret == E SYNCH_OK_BLOCKED) {118 if (ret == EOK) { 119 119 TPRINTF("%zu threads remain\n", thread_cnt - i - 1); 120 120 } -
kernel/test/synch/semaphore2.c
r7f11dc6 r897fd8f1 70 70 TPRINTF("cpu%u, tid %" PRIu64 " down+ (%d)\n", CPU->id, THREAD->tid, to); 71 71 rc = semaphore_down_timeout(&sem, to); 72 if ( SYNCH_FAILED(rc)) {72 if (rc != EOK) { 73 73 TPRINTF("cpu%u, tid %" PRIu64 " down!\n", CPU->id, THREAD->tid); 74 74 return; -
uspace/lib/c/include/futex.h
r7f11dc6 r897fd8f1 37 37 38 38 #include <atomic.h> 39 #include <errno.h> 39 40 #include <libc.h> 40 41 … … 121 122 * 122 123 * @return ENOENT if there is no such virtual address. 123 * @return Zero in the uncontended case.124 * @return Otherwise one of ESYNCH_OK_ATOMIC or ESYNCH_OK_BLOCKED.124 * @return EOK on success. 125 * @return Error code from <errno.h> otherwise. 125 126 * 126 127 */ … … 130 131 return __SYSCALL1(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count); 131 132 132 return 0;133 return EOK; 133 134 } 134 135 … … 138 139 * 139 140 * @return ENOENT if there is no such virtual address. 140 * @return Zero in the uncontended case. 141 * @return EOK on success. 142 * @return Error code from <errno.h> otherwise. 141 143 * 142 144 */ … … 146 148 return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count); 147 149 148 return 0;150 return EOK; 149 151 } 150 152
Note:
See TracChangeset
for help on using the changeset viewer.