Changeset 43114c5 in mainline for src/proc/scheduler.c
- Timestamp:
- 2005-04-09T18:22:53Z (20 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8262010
- Parents:
- e6ba9a3f
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/proc/scheduler.c
re6ba9a3f r43114c5 70 70 cpu_priority_high(); 71 71 72 spinlock_lock(& the->cpu->lock);73 n = the->cpu->nrdy;74 spinlock_unlock(& the->cpu->lock);72 spinlock_lock(&CPU->lock); 73 n = CPU->nrdy; 74 spinlock_unlock(&CPU->lock); 75 75 76 76 cpu_priority_low(); … … 82 82 * set CPU-private flag that the kcpulb has been started. 83 83 */ 84 if (test_and_set(& the->cpu->kcpulbstarted) == 0) {85 waitq_wakeup(& the->cpu->kcpulb_wq, 0);84 if (test_and_set(&CPU->kcpulbstarted) == 0) { 85 waitq_wakeup(&CPU->kcpulb_wq, 0); 86 86 goto loop; 87 87 } … … 101 101 102 102 for (i = 0; i<RQ_COUNT; i++) { 103 r = & the->cpu->rq[i];103 r = &CPU->rq[i]; 104 104 spinlock_lock(&r->lock); 105 105 if (r->n == 0) { … … 115 115 spinlock_unlock(&nrdylock); 116 116 117 spinlock_lock(& the->cpu->lock);118 the->cpu->nrdy--;119 spinlock_unlock(& the->cpu->lock);117 spinlock_lock(&CPU->lock); 118 CPU->nrdy--; 119 spinlock_unlock(&CPU->lock); 120 120 121 121 r->n--; … … 130 130 131 131 spinlock_lock(&t->lock); 132 t->cpu = the->cpu;132 t->cpu = CPU; 133 133 134 134 t->ticks = us2ticks((i+1)*10000); … … 160 160 161 161 list_initialize(&head); 162 spinlock_lock(& the->cpu->lock);163 if ( the->cpu->needs_relink > NEEDS_RELINK_MAX) {162 spinlock_lock(&CPU->lock); 163 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 164 164 for (i = start; i<RQ_COUNT-1; i++) { 165 165 /* remember and empty rq[i + 1] */ 166 r = & the->cpu->rq[i + 1];166 r = &CPU->rq[i + 1]; 167 167 spinlock_lock(&r->lock); 168 168 list_concat(&head, &r->rq_head); … … 172 172 173 173 /* append rq[i + 1] to rq[i] */ 174 r = & the->cpu->rq[i];174 r = &CPU->rq[i]; 175 175 spinlock_lock(&r->lock); 176 176 list_concat(&r->rq_head, &head); … … 178 178 spinlock_unlock(&r->lock); 179 179 } 180 the->cpu->needs_relink = 0;181 } 182 spinlock_unlock(& the->cpu->lock);180 CPU->needs_relink = 0; 181 } 182 spinlock_unlock(&CPU->lock); 183 183 184 184 } … … 196 196 halt(); 197 197 198 if ( the->thread) {199 spinlock_lock(& the->thread->lock);200 if (!context_save(& the->thread->saved_context)) {198 if (THREAD) { 199 spinlock_lock(&THREAD->lock); 200 if (!context_save(&THREAD->saved_context)) { 201 201 /* 202 202 * This is the place where threads leave scheduler(); 203 203 */ 204 spinlock_unlock(& the->thread->lock);205 cpu_priority_restore( the->thread->saved_context.pri);204 spinlock_unlock(&THREAD->lock); 205 cpu_priority_restore(THREAD->saved_context.pri); 206 206 return; 207 207 } 208 the->thread->saved_context.pri = pri;208 THREAD->saved_context.pri = pri; 209 209 } 210 210 … … 221 221 * scheduler_separated_stack(). 222 222 */ 223 context_save(& the->cpu->saved_context);224 the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8];225 the->cpu->saved_context.pc = (__address) scheduler_separated_stack;226 context_restore(& the->cpu->saved_context);223 context_save(&CPU->saved_context); 224 CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; 225 CPU->saved_context.pc = (__address) scheduler_separated_stack; 226 context_restore(&CPU->saved_context); 227 227 /* not reached */ 228 228 } … … 232 232 int priority; 233 233 234 if ( the->thread) {235 switch ( the->thread->state) {234 if (THREAD) { 235 switch (THREAD->state) { 236 236 case Running: 237 the->thread->state = Ready;238 spinlock_unlock(& the->thread->lock);239 thread_ready( the->thread);237 THREAD->state = Ready; 238 spinlock_unlock(&THREAD->lock); 239 thread_ready(THREAD); 240 240 break; 241 241 242 242 case Exiting: 243 frame_free((__address) the->thread->kstack);244 if ( the->thread->ustack) {245 frame_free((__address) the->thread->ustack);243 frame_free((__address) THREAD->kstack); 244 if (THREAD->ustack) { 245 frame_free((__address) THREAD->ustack); 246 246 } 247 247 … … 249 249 * Detach from the containing task. 250 250 */ 251 spinlock_lock(& the->task->lock);252 list_remove(& the->thread->th_link);253 spinlock_unlock(& the->task->lock);254 255 spinlock_unlock(& the->thread->lock);251 spinlock_lock(&TASK->lock); 252 list_remove(&THREAD->th_link); 253 spinlock_unlock(&TASK->lock); 254 255 spinlock_unlock(&THREAD->lock); 256 256 257 257 spinlock_lock(&threads_lock); 258 list_remove(& the->thread->threads_link);258 list_remove(&THREAD->threads_link); 259 259 spinlock_unlock(&threads_lock); 260 260 261 free( the->thread);261 free(THREAD); 262 262 263 263 break; … … 267 267 * Prefer the thread after it's woken up. 268 268 */ 269 the->thread->pri = -1;269 THREAD->pri = -1; 270 270 271 271 /* 272 272 * We need to release wq->lock which we locked in waitq_sleep(). 273 * Address of wq->lock is kept in the->thread->sleep_queue.273 * Address of wq->lock is kept in THREAD->sleep_queue. 274 274 */ 275 spinlock_unlock(& the->thread->sleep_queue->lock);275 spinlock_unlock(&THREAD->sleep_queue->lock); 276 276 277 277 /* 278 278 * Check for possible requests for out-of-context invocation. 279 279 */ 280 if ( the->thread->call_me) {281 the->thread->call_me(the->thread->call_me_with);282 the->thread->call_me = NULL;283 the->thread->call_me_with = NULL;280 if (THREAD->call_me) { 281 THREAD->call_me(THREAD->call_me_with); 282 THREAD->call_me = NULL; 283 THREAD->call_me_with = NULL; 284 284 } 285 285 286 spinlock_unlock(& the->thread->lock);286 spinlock_unlock(&THREAD->lock); 287 287 288 288 break; … … 292 292 * Entering state is unexpected. 293 293 */ 294 panic("tid%d: unexpected state %s\n", the->thread->tid, thread_states[the->thread->state]);294 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 295 295 break; 296 296 } 297 the->thread= NULL;297 THREAD = NULL; 298 298 } 299 299 300 the->thread= find_best_thread();300 THREAD = find_best_thread(); 301 301 302 spinlock_lock(& the->thread->lock);303 priority = the->thread->pri;304 spinlock_unlock(& the->thread->lock);302 spinlock_lock(&THREAD->lock); 303 priority = THREAD->pri; 304 spinlock_unlock(&THREAD->lock); 305 305 306 306 relink_rq(priority); 307 307 308 spinlock_lock(& the->thread->lock);308 spinlock_lock(&THREAD->lock); 309 309 310 310 /* 311 311 * If both the old and the new task are the same, lots of work is avoided. 312 312 */ 313 if ( the->task != the->thread->task) {313 if (TASK != THREAD->task) { 314 314 vm_t *m1 = NULL; 315 315 vm_t *m2; 316 316 317 if ( the->task) {318 spinlock_lock(& the->task->lock);319 m1 = the->task->vm;320 spinlock_unlock(& the->task->lock);321 } 322 323 spinlock_lock(& the->thread->task->lock);324 m2 = the->thread->task->vm;325 spinlock_unlock(& the->thread->task->lock);317 if (TASK) { 318 spinlock_lock(&TASK->lock); 319 m1 = TASK->vm; 320 spinlock_unlock(&TASK->lock); 321 } 322 323 spinlock_lock(&THREAD->task->lock); 324 m2 = THREAD->task->vm; 325 spinlock_unlock(&THREAD->task->lock); 326 326 327 327 /* … … 338 338 vm_install(m2); 339 339 } 340 the->task = the->thread->task;341 } 342 343 the->thread->state = Running;340 TASK = THREAD->task; 341 } 342 343 THREAD->state = Running; 344 344 345 345 #ifdef SCHEDULER_VERBOSE 346 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", the->cpu->id, the->thread->tid, the->thread->pri, the->thread->ticks, the->cpu->nrdy);346 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); 347 347 #endif 348 348 349 context_restore(& the->thread->saved_context);349 context_restore(&THREAD->saved_context); 350 350 /* not reached */ 351 351 } … … 366 366 * Sleep until there's some work to do. 367 367 */ 368 waitq_sleep(& the->cpu->kcpulb_wq);368 waitq_sleep(&CPU->kcpulb_wq); 369 369 370 370 not_satisfied: … … 375 375 */ 376 376 pri = cpu_priority_high(); 377 spinlock_lock(& the->cpu->lock);377 spinlock_lock(&CPU->lock); 378 378 count = nrdy / config.cpu_active; 379 count -= the->cpu->nrdy;380 spinlock_unlock(& the->cpu->lock);379 count -= CPU->nrdy; 380 spinlock_unlock(&CPU->lock); 381 381 cpu_priority_restore(pri); 382 382 … … 400 400 * Doesn't require interrupt disabling for kcpulb is X_WIRED. 401 401 */ 402 if ( the->cpu== cpu)402 if (CPU == cpu) 403 403 continue; 404 404 … … 461 461 spinlock_lock(&t->lock); 462 462 #ifdef KCPULB_VERBOSE 463 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", the->cpu->id, t->tid, the->cpu->id, the->cpu->nrdy, nrdy / config.cpu_active);463 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); 464 464 #endif 465 465 t->flags |= X_STOLEN; … … 484 484 } 485 485 486 if ( the->cpu->nrdy) {486 if (CPU->nrdy) { 487 487 /* 488 488 * Be a little bit light-weight and let migrated threads run. … … 504 504 * Tell find_best_thread() to wake us up later again. 505 505 */ 506 the->cpu->kcpulbstarted = 0;506 CPU->kcpulbstarted = 0; 507 507 goto loop; 508 508 }
Note:
See TracChangeset
for help on using the changeset viewer.