Changeset c0bc189 in mainline
- Timestamp:
- 2006-05-19T11:55:55Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7633b109
- Parents:
- 35f3b8c
- Location:
- generic
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/include/synch/waitq.h
r35f3b8c rc0bc189 57 57 extern void waitq_initialize(waitq_t *wq); 58 58 extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking); 59 extern ipl_t waitq_sleep_prepare(waitq_t *wq); 60 extern int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking); 61 extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl); 59 62 extern void waitq_wakeup(waitq_t *wq, bool all); 60 63 extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all); -
generic/src/synch/condvar.c
r35f3b8c rc0bc189 36 36 #include <synch/waitq.h> 37 37 #include <synch/synch.h> 38 #include <arch.h> 39 #include <typedefs.h> 38 40 39 /** Initialize condition variable 40 * 41 * Initialize condition variable. 41 /** Initialize condition variable. 42 42 * 43 43 * @param cv Condition variable. … … 48 48 } 49 49 50 /** Signal the condition has become true 51 * 50 /** 52 51 * Signal the condition has become true 53 52 * to the first waiting thread by waking it up. … … 60 59 } 61 60 62 /** Signal the condition has become true 63 * 61 /** 64 62 * Signal the condition has become true 65 63 * to all waiting threads by waking them up. … … 72 70 } 73 71 74 /** Wait for the condition becoming true 75 * 76 * Wait for the condition becoming true. 72 /** Wait for the condition becoming true. 77 73 * 78 74 * @param cv Condition variable. … … 89 85 { 90 86 int rc; 87 ipl_t ipl; 91 88 89 ipl = waitq_sleep_prepare(&cv->wq); 92 90 mutex_unlock(mtx); 93 rc = waitq_sleep_timeout(&cv->wq, usec, trywait); 91 92 rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, trywait); 93 94 94 mutex_lock(mtx); 95 waitq_sleep_finish(&cv->wq, rc, ipl); 96 95 97 return rc; 96 98 } -
generic/src/synch/waitq.c
r35f3b8c rc0bc189 158 158 } 159 159 160 161 160 /** Sleep until either wakeup, timeout or interruption occurs 162 161 * … … 164 163 * interrupted from the sleep, restoring a failover context. 165 164 * 166 * Sleepers are organised in FIFO fashion in a structure called wait queue.165 * Sleepers are organised in a FIFO fashion in a structure called wait queue. 167 166 * 168 167 * This function is really basic in that other functions as waitq_sleep() … … 201 200 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) 202 201 { 203 volatile ipl_t ipl; /* must be live after context_restore() */ 204 202 ipl_t ipl; 203 int rc; 204 205 ipl = waitq_sleep_prepare(wq); 206 rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking); 207 waitq_sleep_finish(wq, rc, ipl); 208 return rc; 209 } 210 211 /** Prepare to sleep in a waitq. 212 * 213 * This function will return holding the lock of the wait queue 214 * and interrupts disabled. 215 * 216 * @param wq Wait queue. 217 * 218 * @return Interrupt level as it existed on entry to this function. 219 */ 220 ipl_t waitq_sleep_prepare(waitq_t *wq) 221 { 222 ipl_t ipl; 205 223 206 224 restart: 207 225 ipl = interrupts_disable(); 208 226 209 227 /* 210 228 * Busy waiting for a delayed timeout. … … 217 235 if (THREAD->timeout_pending) { 218 236 spinlock_unlock(&THREAD->lock); 219 interrupts_restore(ipl); 237 interrupts_restore(ipl); 220 238 goto restart; 221 239 } 222 240 spinlock_unlock(&THREAD->lock); 223 241 224 242 spinlock_lock(&wq->lock); 225 243 return ipl; 244 } 245 246 /** Finish waiting in a wait queue. 247 * 248 * This function restores interrupts to the state that existed prior 249 * to the call to waitq_sleep_prepare(). If necessary, the wait queue 250 * lock is released. 251 * 252 * @param wq Wait queue. 253 * @param rc Return code of waitq_sleep_timeout_unsafe(). 254 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 255 */ 256 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) 257 { 258 switch (rc) { 259 case ESYNCH_WOULD_BLOCK: 260 case ESYNCH_OK_ATOMIC: 261 spinlock_unlock(&wq->lock); 262 break; 263 default: 264 break; 265 } 266 interrupts_restore(ipl); 267 } 268 269 /** Internal implementation of waitq_sleep_timeout(). 270 * 271 * This function implements logic of sleeping in a wait queue. 272 * This call must be preceeded by a call to waitq_sleep_prepare() 273 * and followed by a call to waitq_slee_finish(). 274 * 275 * @param wq See waitq_sleep_timeout(). 276 * @param usec See waitq_sleep_timeout(). 277 * @param nonblocking See waitq_sleep_timeout(). 278 * 279 * @return See waitq_sleep_timeout(). 280 */ 281 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking) 282 { 226 283 /* checks whether to go to sleep at all */ 227 284 if (wq->missed_wakeups) { 228 285 wq->missed_wakeups--; 229 spinlock_unlock(&wq->lock);230 interrupts_restore(ipl);231 286 return ESYNCH_OK_ATOMIC; 232 287 } … … 234 289 if (nonblocking && (usec == 0)) { 235 290 /* return immediatelly instead of going to sleep */ 236 spinlock_unlock(&wq->lock);237 interrupts_restore(ipl);238 291 return ESYNCH_WOULD_BLOCK; 239 292 } … … 252 305 /* Short emulation of scheduler() return code. */ 253 306 spinlock_unlock(&THREAD->lock); 254 interrupts_restore(ipl);255 307 return ESYNCH_INTERRUPTED; 256 308 } … … 261 313 /* Short emulation of scheduler() return code. */ 262 314 spinlock_unlock(&THREAD->lock); 263 interrupts_restore(ipl);264 315 return ESYNCH_TIMEOUT; 265 316 } … … 279 330 280 331 scheduler(); /* wq->lock is released in scheduler_separated_stack() */ 281 interrupts_restore(ipl);282 332 283 333 return ESYNCH_OK_BLOCKED;
Note:
See TracChangeset
for help on using the changeset viewer.