Changeset 76cec1e in mainline for src/proc/scheduler.c
- Timestamp:
- 2005-07-15T21:57:30Z (20 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b4a4c5e3
- Parents:
- e41c47e
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/proc/scheduler.c
re41c47e r76cec1e 103 103 */ 104 104 if (test_and_set(&CPU->kcpulbstarted) == 0) { 105 105 waitq_wakeup(&CPU->kcpulb_wq, 0); 106 106 goto loop; 107 107 } … … 239 239 */ 240 240 before_thread_runs(); 241 241 spinlock_unlock(&THREAD->lock); 242 242 cpu_priority_restore(THREAD->saved_context.pri); 243 243 return; … … 279 279 switch (THREAD->state) { 280 280 case Running: 281 282 283 284 281 THREAD->state = Ready; 282 spinlock_unlock(&THREAD->lock); 283 thread_ready(THREAD); 284 break; 285 285 286 286 case Exiting: 287 frame_free((__address) THREAD->kstack); 288 if (THREAD->ustack) { 289 frame_free((__address) THREAD->ustack); 290 } 291 292 /* 293 * Detach from the containing task. 294 */ 295 spinlock_lock(&TASK->lock); 296 list_remove(&THREAD->th_link); 297 spinlock_unlock(&TASK->lock); 298 299 spinlock_unlock(&THREAD->lock); 300 301 spinlock_lock(&threads_lock); 302 list_remove(&THREAD->threads_link); 303 spinlock_unlock(&threads_lock); 304 305 spinlock_lock(&CPU->lock); 306 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; 307 spinlock_unlock(&CPU->lock); 308 309 310 free(THREAD); 311 312 break; 313 287 frame_free((__address) THREAD->kstack); 288 if (THREAD->ustack) { 289 frame_free((__address) THREAD->ustack); 290 } 291 292 /* 293 * Detach from the containing task. 294 */ 295 spinlock_lock(&TASK->lock); 296 list_remove(&THREAD->th_link); 297 spinlock_unlock(&TASK->lock); 298 299 spinlock_unlock(&THREAD->lock); 300 301 spinlock_lock(&threads_lock); 302 list_remove(&THREAD->threads_link); 303 spinlock_unlock(&threads_lock); 304 305 spinlock_lock(&CPU->lock); 306 if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL; 307 spinlock_unlock(&CPU->lock); 308 309 free(THREAD); 310 311 break; 312 314 313 case Sleeping: 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 314 /* 315 * Prefer the thread after it's woken up. 316 */ 317 THREAD->pri = -1; 318 319 /* 320 * We need to release wq->lock which we locked in waitq_sleep(). 321 * Address of wq->lock is kept in THREAD->sleep_queue. 322 */ 323 spinlock_unlock(&THREAD->sleep_queue->lock); 324 325 /* 326 * Check for possible requests for out-of-context invocation. 327 */ 328 if (THREAD->call_me) { 329 THREAD->call_me(THREAD->call_me_with); 330 THREAD->call_me = NULL; 331 THREAD->call_me_with = NULL; 332 } 333 334 spinlock_unlock(&THREAD->lock); 335 336 break; 338 337 339 338 default: 340 341 342 343 344 339 /* 340 * Entering state is unexpected. 341 */ 342 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 343 break; 345 344 } 346 345 THREAD = NULL; 347 346 } 348 347 349 348 THREAD = find_best_thread(); 350 349 … … 470 469 t = list_get_instance(l, thread_t, rq_link); 471 470 /* 472 471 * We don't want to steal CPU-wired threads neither threads already stolen. 473 472 * The latter prevents threads from migrating between CPU's without ever being run. 474 473 * We don't want to steal threads whose FPU context is still in CPU. 475 474 */ 476 475 spinlock_lock(&t->lock); … … 498 497 atomic_dec(&nrdy); 499 498 500 499 r->n--; 501 500 list_remove(&t->rq_link); 502 501 … … 528 527 529 528 /* 530 529 * We are not satisfied yet, focus on another CPU next time. 531 530 */ 532 531 k++; … … 553 552 554 553 goto not_satisfied; 555 554 556 555 satisfied: 557 556 /*
Note:
See TracChangeset
for help on using the changeset viewer.