Changeset c0bc189 in mainline for generic/src/synch/waitq.c
- Timestamp:
- 2006-05-19T11:55:55Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7633b109
- Parents:
- 35f3b8c
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/synch/waitq.c
r35f3b8c rc0bc189 158 158 } 159 159 160 161 160 /** Sleep until either wakeup, timeout or interruption occurs 162 161 * … … 164 163 * interrupted from the sleep, restoring a failover context. 165 164 * 166 * Sleepers are organised in FIFO fashion in a structure called wait queue.165 * Sleepers are organised in a FIFO fashion in a structure called wait queue. 167 166 * 168 167 * This function is really basic in that other functions as waitq_sleep() … … 201 200 int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) 202 201 { 203 volatile ipl_t ipl; /* must be live after context_restore() */ 204 202 ipl_t ipl; 203 int rc; 204 205 ipl = waitq_sleep_prepare(wq); 206 rc = waitq_sleep_timeout_unsafe(wq, usec, nonblocking); 207 waitq_sleep_finish(wq, rc, ipl); 208 return rc; 209 } 210 211 /** Prepare to sleep in a waitq. 212 * 213 * This function will return holding the lock of the wait queue 214 * and interrupts disabled. 215 * 216 * @param wq Wait queue. 217 * 218 * @return Interrupt level as it existed on entry to this function. 219 */ 220 ipl_t waitq_sleep_prepare(waitq_t *wq) 221 { 222 ipl_t ipl; 205 223 206 224 restart: 207 225 ipl = interrupts_disable(); 208 226 209 227 /* 210 228 * Busy waiting for a delayed timeout. … … 217 235 if (THREAD->timeout_pending) { 218 236 spinlock_unlock(&THREAD->lock); 219 interrupts_restore(ipl); 237 interrupts_restore(ipl); 220 238 goto restart; 221 239 } 222 240 spinlock_unlock(&THREAD->lock); 223 241 224 242 spinlock_lock(&wq->lock); 225 243 return ipl; 244 } 245 246 /** Finish waiting in a wait queue. 247 * 248 * This function restores interrupts to the state that existed prior 249 * to the call to waitq_sleep_prepare(). If necessary, the wait queue 250 * lock is released. 251 * 252 * @param wq Wait queue. 253 * @param rc Return code of waitq_sleep_timeout_unsafe(). 254 * @param ipl Interrupt level returned by waitq_sleep_prepare(). 255 */ 256 void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) 257 { 258 switch (rc) { 259 case ESYNCH_WOULD_BLOCK: 260 case ESYNCH_OK_ATOMIC: 261 spinlock_unlock(&wq->lock); 262 break; 263 default: 264 break; 265 } 266 interrupts_restore(ipl); 267 } 268 269 /** Internal implementation of waitq_sleep_timeout(). 270 * 271 * This function implements logic of sleeping in a wait queue. 272 * This call must be preceeded by a call to waitq_sleep_prepare() 273 * and followed by a call to waitq_slee_finish(). 274 * 275 * @param wq See waitq_sleep_timeout(). 276 * @param usec See waitq_sleep_timeout(). 277 * @param nonblocking See waitq_sleep_timeout(). 278 * 279 * @return See waitq_sleep_timeout(). 280 */ 281 int waitq_sleep_timeout_unsafe(waitq_t *wq, __u32 usec, int nonblocking) 282 { 226 283 /* checks whether to go to sleep at all */ 227 284 if (wq->missed_wakeups) { 228 285 wq->missed_wakeups--; 229 spinlock_unlock(&wq->lock);230 interrupts_restore(ipl);231 286 return ESYNCH_OK_ATOMIC; 232 287 } … … 234 289 if (nonblocking && (usec == 0)) { 235 290 /* return immediatelly instead of going to sleep */ 236 spinlock_unlock(&wq->lock);237 interrupts_restore(ipl);238 291 return ESYNCH_WOULD_BLOCK; 239 292 } … … 252 305 /* Short emulation of scheduler() return code. */ 253 306 spinlock_unlock(&THREAD->lock); 254 interrupts_restore(ipl);255 307 return ESYNCH_INTERRUPTED; 256 308 } … … 261 313 /* Short emulation of scheduler() return code. */ 262 314 spinlock_unlock(&THREAD->lock); 263 interrupts_restore(ipl);264 315 return ESYNCH_TIMEOUT; 265 316 } … … 279 330 280 331 scheduler(); /* wq->lock is released in scheduler_separated_stack() */ 281 interrupts_restore(ipl);282 332 283 333 return ESYNCH_OK_BLOCKED;
Note:
See TracChangeset
for help on using the changeset viewer.