Changeset 116d1ef4 in mainline
- Timestamp:
- 2006-06-02T12:26:50Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d0c5901
- Parents:
- 01ebbdf
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia32/src/smp/smp.c
r01ebbdf r116d1ef4 168 168 * supposed to wake us up. 169 169 */ 170 if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_ BLOCKING) == ESYNCH_TIMEOUT)170 if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) 171 171 printf("%s: waiting for cpu%d (APIC ID = %d) timed out\n", __FUNCTION__, config.cpu_active > i ? config.cpu_active : i, ops->cpu_apic_id(i)); 172 172 } else -
generic/include/ipc/ipc.h
r01ebbdf r116d1ef4 210 210 211 211 extern void ipc_init(void); 212 extern call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking);212 extern call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags); 213 213 extern void ipc_answer(answerbox_t *box, call_t *request); 214 214 extern int ipc_call(phone_t *phone, call_t *call); -
generic/include/proc/thread.h
r01ebbdf r116d1ef4 88 88 context_t sleep_interruption_context; 89 89 90 bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */ 90 91 waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */ 91 92 timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */ -
generic/include/synch/condvar.h
r01ebbdf r116d1ef4 40 40 41 41 #define condvar_wait(cv,mtx) \ 42 _condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT )42 _condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) 43 43 #define condvar_wait_timeout(cv,mtx,usec) \ 44 _condvar_wait_timeout((cv),(mtx),(usec) )44 _condvar_wait_timeout((cv),(mtx),(usec),SYNCH_FLAGS_NONE) 45 45 46 46 extern void condvar_initialize(condvar_t *cv); 47 47 extern void condvar_signal(condvar_t *cv); 48 48 extern void condvar_broadcast(condvar_t *cv); 49 extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec );49 extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags); 50 50 51 51 #endif -
generic/include/synch/futex.h
r01ebbdf r116d1ef4 45 45 46 46 extern void futex_init(void); 47 extern __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown);47 extern __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags); 48 48 extern __native sys_futex_wakeup(__address uaddr); 49 49 -
generic/include/synch/mutex.h
r01ebbdf r116d1ef4 40 40 41 41 #define mutex_lock(mtx) \ 42 _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_ BLOCKING)42 _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) 43 43 #define mutex_trylock(mtx) \ 44 _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_ NON_BLOCKING)44 _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) 45 45 #define mutex_lock_timeout(mtx,usec) \ 46 _mutex_lock_timeout((mtx),(usec),SYNCH_ NON_BLOCKING)46 _mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING) 47 47 #define mutex_lock_active(mtx) \ 48 48 while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC) 49 49 50 50 extern void mutex_initialize(mutex_t *mtx); 51 extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock);51 extern int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags); 52 52 extern void mutex_unlock(mutex_t *mtx); 53 53 -
generic/include/synch/rwlock.h
r01ebbdf r116d1ef4 49 49 50 50 #define rwlock_write_lock(rwl) \ 51 _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_ BLOCKING)51 _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) 52 52 #define rwlock_read_lock(rwl) \ 53 _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_ BLOCKING)53 _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) 54 54 #define rwlock_write_trylock(rwl) \ 55 _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_ NON_BLOCKING)55 _rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) 56 56 #define rwlock_read_trylock(rwl) \ 57 _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_ NON_BLOCKING)57 _rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) 58 58 #define rwlock_write_lock_timeout(rwl,usec) \ 59 _rwlock_write_lock_timeout((rwl),(usec),SYNCH_ NON_BLOCKING)59 _rwlock_write_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE) 60 60 #define rwlock_read_lock_timeout(rwl,usec) \ 61 _rwlock_read_lock_timeout((rwl),(usec),SYNCH_ NON_BLOCKING)61 _rwlock_read_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE) 62 62 63 63 extern void rwlock_initialize(rwlock_t *rwl); 64 64 extern void rwlock_read_unlock(rwlock_t *rwl); 65 65 extern void rwlock_write_unlock(rwlock_t *rwl); 66 extern int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock);67 extern int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock);66 extern int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags); 67 extern int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags); 68 68 69 69 #endif 70 -
generic/include/synch/semaphore.h
r01ebbdf r116d1ef4 41 41 42 42 #define semaphore_down(s) \ 43 _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_ BLOCKING)43 _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) 44 44 #define semaphore_trydown(s) \ 45 _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_ NON_BLOCKING)45 _semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) 46 46 #define semaphore_down_timeout(s,usec) \ 47 _semaphore_down_timeout((s),(usec),SYNCH_ NON_BLOCKING)47 _semaphore_down_timeout((s),(usec),SYNCH_FLAGS_NONE) 48 48 49 49 extern void semaphore_initialize(semaphore_t *s, int val); 50 extern int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown);50 extern int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags); 51 51 extern void semaphore_up(semaphore_t *s); 52 52 53 53 #endif 54 -
generic/include/synch/synch.h
r01ebbdf r116d1ef4 31 31 32 32 #define SYNCH_NO_TIMEOUT 0 /**< Request with no timeout. */ 33 #define SYNCH_BLOCKING 0 /**< Blocking operation request. */ 34 #define SYNCH_NON_BLOCKING 1 /**< Non-blocking operation request. */ 33 34 #define SYNCH_FLAGS_NONE 0 /**< No flags specified. */ 35 #define SYNCH_FLAGS_NON_BLOCKING (1<<0) /**< Non-blocking operation request. */ 36 #define SYNCH_FLAGS_INTERRUPTIBLE (1<<1) /**< Interruptible operation. */ 35 37 36 38 #define ESYNCH_WOULD_BLOCK 1 /**< Could not satisfy the request without going to sleep. */ -
generic/include/synch/waitq.h
r01ebbdf r116d1ef4 53 53 54 54 #define waitq_sleep(wq) \ 55 waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_ BLOCKING)55 waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE) 56 56 57 57 extern void waitq_initialize(waitq_t *wq); 58 extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking);58 extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags); 59 59 extern ipl_t waitq_sleep_prepare(waitq_t *wq); 60 extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking);60 extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags); 61 61 extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl); 62 62 extern void waitq_wakeup(waitq_t *wq, bool all); -
generic/src/ipc/ipc.c
r01ebbdf r116d1ef4 143 143 144 144 ipc_call(phone, request); 145 ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_ BLOCKING);145 ipc_wait_for_call(&sync_box, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 146 146 } 147 147 … … 306 306 * @param usec Timeout in microseconds. See documentation for waitq_sleep_timeout() for 307 307 * decription of its special meaning. 308 * @param nonblocking Blocking vs. non-blocking operation mode switch. See documentation309 * for waitq_sleep_timeout()for description of its special meaning.308 * @param flags Select mode of sleep operation. See documentation for waitq_sleep_timeout()i 309 * for description of its special meaning. 310 310 * @return Recived message address 311 311 * - to distinguish between call and answer, look at call->flags 312 312 */ 313 call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int nonblocking)313 call_t * ipc_wait_for_call(answerbox_t *box, __u32 usec, int flags) 314 314 { 315 315 call_t *request; … … 318 318 319 319 restart: 320 rc = waitq_sleep_timeout(&box->wq, usec, nonblocking);320 rc = waitq_sleep_timeout(&box->wq, usec, flags); 321 321 if (SYNCH_FAILED(rc)) 322 322 return NULL; … … 413 413 /* Wait for all async answers to arrive */ 414 414 while (atomic_get(&task->active_calls)) { 415 call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_ BLOCKING);415 call = ipc_wait_for_call(&task->answerbox, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); 416 416 ASSERT((call->flags & IPC_CALL_ANSWERED) || (call->flags & IPC_CALL_NOTIF)); 417 417 ASSERT(! (call->flags & IPC_CALL_STATIC_ALLOC)); -
generic/src/ipc/sysipc.c
r01ebbdf r116d1ef4 503 503 * @param calldata Pointer to buffer where the call/answer data is stored 504 504 * @param usec Timeout. See waitq_sleep_timeout() for explanation. 505 * @param nonblockingSee waitq_sleep_timeout() for explanation.505 * @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation. 506 506 * 507 507 * @return Callid, if callid & 1, then the call is answer 508 508 */ 509 __native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int nonblocking)509 __native sys_ipc_wait_for_call(ipc_data_t *calldata, __u32 usec, int flags) 510 510 { 511 511 call_t *call; 512 512 513 513 restart: 514 call = ipc_wait_for_call(&TASK->answerbox, usec, nonblocking);514 call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); 515 515 if (!call) 516 516 return 0; -
generic/src/proc/thread.c
r01ebbdf r116d1ef4 304 304 305 305 timeout_initialize(&t->sleep_timeout); 306 t->sleep_interruptible = false; 306 307 t->sleep_queue = NULL; 307 308 t->timeout_pending = 0; … … 386 387 waitq_initialize(&wq); 387 388 388 (void) waitq_sleep_timeout(&wq, usec, SYNCH_ NON_BLOCKING);389 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); 389 390 } 390 391 -
generic/src/synch/condvar.c
r01ebbdf r116d1ef4 75 75 * @param mtx Mutex. 76 76 * @param usec Timeout value in microseconds. 77 * @param flags Select mode of operation. 77 78 * 78 * For exact description of meaning of possible values of usec, 79 * see comment for waitq_sleep_timeout(). 79 * For exact description of meaning of possible combinations 80 * of usec and flags, see comment for waitq_sleep_timeout(). 81 * Note that when SYNCH_FLAGS_NON_BLOCKING is specified here, 82 * ESYNCH_WOULD_BLOCK is always returned. 80 83 * 81 84 * @return See comment for waitq_sleep_timeout(). 82 85 */ 83 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec )86 int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, __u32 usec, int flags) 84 87 { 85 88 int rc; … … 90 93 91 94 cv->wq.missed_wakeups = 0; /* Enforce blocking. */ 92 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, SYNCH_BLOCKING);95 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); 93 96 94 97 mutex_lock(mtx); -
generic/src/synch/futex.c
r01ebbdf r116d1ef4 100 100 * @param uaddr Userspace address of the futex counter. 101 101 * @param usec If non-zero, number of microseconds this thread is willing to sleep. 102 * @param trydown If usec is zero and trydown is non-zero, conditional operation will be attempted.102 * @param flags Select mode of operation. 103 103 * 104 104 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h. 105 105 * If there is no physical mapping for uaddr ENOENT is returned. 106 106 */ 107 __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int trydown)107 __native sys_futex_sleep_timeout(__address uaddr, __u32 usec, int flags) 108 108 { 109 109 futex_t *futex; … … 131 131 futex = futex_find(paddr); 132 132 133 return (__native) waitq_sleep_timeout(&futex->wq, usec, trydown);133 return (__native) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); 134 134 } 135 135 -
generic/src/synch/mutex.c
r01ebbdf r116d1ef4 54 54 * @param mtx Mutex. 55 55 * @param usec Timeout in microseconds. 56 * @param trylock Switches between blocking and non-blocking mode.56 * @param flags Specify mode of operation. 57 57 * 58 58 * For exact description of possible combinations of 59 * usec and trylock, see comment for waitq_sleep_timeout().59 * usec and flags, see comment for waitq_sleep_timeout(). 60 60 * 61 61 * @return See comment for waitq_sleep_timeout(). 62 62 */ 63 int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int trylock)63 int _mutex_lock_timeout(mutex_t *mtx, __u32 usec, int flags) 64 64 { 65 return _semaphore_down_timeout(&mtx->sem, usec, trylock);65 return _semaphore_down_timeout(&mtx->sem, usec, flags); 66 66 } 67 67 … … 76 76 semaphore_up(&mtx->sem); 77 77 } 78 -
generic/src/synch/rwlock.c
r01ebbdf r116d1ef4 90 90 * @param rwl Reader/Writer lock. 91 91 * @param usec Timeout in microseconds. 92 * @param trylock Switches between blocking and non-blocking mode.92 * @param flags Specify mode of operation. 93 93 * 94 94 * For exact description of possible combinations of 95 * @usec and @trylock, see comment for waitq_sleep_timeout().95 * usec and flags, see comment for waitq_sleep_timeout(). 96 96 * 97 97 * @return See comment for waitq_sleep_timeout(). 98 98 */ 99 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)99 int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) 100 100 { 101 101 ipl_t ipl; … … 112 112 * They just need to acquire the exclusive mutex. 113 113 */ 114 rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);114 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 115 115 if (SYNCH_FAILED(rc)) { 116 116 117 117 /* 118 * Lock operation timed out .118 * Lock operation timed out or was interrupted. 119 119 * The state of rwl is UNKNOWN at this point. 120 120 * No claims about its holder can be made. … … 144 144 * @param rwl Reader/Writer lock. 145 145 * @param usec Timeout in microseconds. 146 * @param trylock Switches between blocking and non-blocking mode.146 * @param flags Select mode of operation. 147 147 * 148 148 * For exact description of possible combinations of 149 * usec and trylock, see comment for waitq_sleep_timeout().149 * usec and flags, see comment for waitq_sleep_timeout(). 150 150 * 151 151 * @return See comment for waitq_sleep_timeout(). 152 152 */ 153 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)153 int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int flags) 154 154 { 155 155 int rc; … … 200 200 #endif 201 201 202 rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);202 rc = _mutex_lock_timeout(&rwl->exclusive, usec, flags); 203 203 switch (rc) { 204 204 case ESYNCH_WOULD_BLOCK: … … 209 209 spinlock_unlock(&rwl->lock); 210 210 case ESYNCH_TIMEOUT: 211 case ESYNCH_INTERRUPTED: 211 212 /* 212 * The sleep time outed.213 * The sleep timed out. 213 214 * We just restore interrupt priority level. 214 215 */ -
generic/src/synch/semaphore.c
r01ebbdf r116d1ef4 68 68 * @param s Semaphore. 69 69 * @param usec Timeout in microseconds. 70 * @param trydown Switches between blocking and non-blocking mode.70 * @param flags Select mode of operation. 71 71 * 72 72 * For exact description of possible combinations of 73 * usec and trydown, see comment for waitq_sleep_timeout().73 * usec and flags, see comment for waitq_sleep_timeout(). 74 74 * 75 75 * @return See comment for waitq_sleep_timeout(). 76 76 */ 77 int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int trydown)77 int _semaphore_down_timeout(semaphore_t *s, __u32 usec, int flags) 78 78 { 79 return waitq_sleep_timeout(&s->wq, usec, trydown);79 return waitq_sleep_timeout(&s->wq, usec, flags); 80 80 } 81 81 -
generic/src/synch/waitq.c
r01ebbdf r116d1ef4 136 136 spinlock_lock(&t->lock); 137 137 if ((wq = t->sleep_queue)) { /* assignment */ 138 if (!(t->sleep_interruptible)) { 139 /* 140 * The sleep cannot be interrupted. 141 */ 142 spinlock_unlock(&t->lock); 143 goto out; 144 } 145 138 146 if (!spinlock_trylock(&wq->lock)) { 139 147 spinlock_unlock(&t->lock); … … 160 168 /** Sleep until either wakeup, timeout or interruption occurs 161 169 * 162 * This is a sleep implementation which allows itself to be170 * This is a sleep implementation which allows itself to time out or to be 163 171 * interrupted from the sleep, restoring a failover context. 164 172 * … … 170 178 * @param wq Pointer to wait queue. 171 179 * @param usec Timeout in microseconds. 172 * @param nonblocking Blocking vs. non-blocking operation mode switch. 173 * 174 * If usec is greater than zero, regardless of the value of nonblocking, 175 * the call will not return until either timeout or wakeup comes. 176 * 177 * If usec is zero and @nonblocking is zero (false), the call 178 * will not return until wakeup comes. 179 * 180 * If usec is zero and nonblocking is non-zero (true), the call will 180 * @param flags Specify mode of the sleep. 181 * 182 * The sleep can be interrupted only if the 183 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. 184 185 * If usec is greater than zero, regardless of the value of the 186 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout, 187 * interruption or wakeup comes. 188 * 189 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call 190 * will not return until wakeup or interruption comes. 191 * 192 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will 181 193 * immediately return, reporting either success or failure. 182 194 * 183 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, 195 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, 184 196 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. 185 197 * … … 198 210 * attempted. 199 211 */ 200 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)212 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int flags) 201 213 { 202 214 ipl_t ipl; … … 204 216 205 217 ipl = waitq_sleep_prepare(wq); 206 rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking);218 rc = waitq_sleep_timeout_unsafe(wq, usec, flags); 207 219 waitq_sleep_finish(wq, rc, ipl); 208 220 return rc; … … 277 289 * @param wq See waitq_sleep_timeout(). 278 290 * @param usec See waitq_sleep_timeout(). 279 * @param nonblockingSee waitq_sleep_timeout().291 * @param flags See waitq_sleep_timeout(). 280 292 * 281 293 * @return See waitq_sleep_timeout(). 282 294 */ 283 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking)295 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int flags) 284 296 { 285 297 /* checks whether to go to sleep at all */ … … 289 301 } 290 302 else { 291 if ( nonblocking&& (usec == 0)) {303 if ((flags & SYNCH_FLAGS_NON_BLOCKING) && (usec == 0)) { 292 304 /* return immediatelly instead of going to sleep */ 293 305 return ESYNCH_WOULD_BLOCK; … … 300 312 spinlock_lock(&THREAD->lock); 301 313 302 /* 303 * Set context that will be restored if the sleep 304 * of this thread is ever interrupted. 305 */ 306 if (!context_save(&THREAD->sleep_interruption_context)) { 307 /* Short emulation of scheduler() return code. */ 308 spinlock_unlock(&THREAD->lock); 309 return ESYNCH_INTERRUPTED; 314 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 315 /* 316 * Set context that will be restored if the sleep 317 * of this thread is ever interrupted. 318 */ 319 THREAD->sleep_interruptible = true; 320 if (!context_save(&THREAD->sleep_interruption_context)) { 321 /* Short emulation of scheduler() return code. */ 322 spinlock_unlock(&THREAD->lock); 323 return ESYNCH_INTERRUPTED; 324 } 325 } else { 326 THREAD->sleep_interruptible = false; 310 327 } 311 328
Note:
See TracChangeset
for help on using the changeset viewer.