Changeset 7d6ec87 in mainline
- Timestamp:
- 2006-02-28T13:33:36Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 95042fd
- Parents:
- 97f1691
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/sparc64/src/mm/tlb.c
r97f1691 r7d6ec87 75 75 immu_disable(); 76 76 dmmu_disable(); 77 78 /* 79 * Demap everything, especially OpenFirmware. 80 */ 81 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0); 82 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0); 77 83 78 84 /* -
generic/src/proc/scheduler.c
r97f1691 r7d6ec87 48 48 #include <debug.h> 49 49 50 atomic_t nrdy; 50 static void scheduler_separated_stack(void); 51 52 atomic_t nrdy; /**< Number of ready threads in the system. */ 51 53 52 54 /** Take actions before new thread runs. … … 62 64 { 63 65 before_thread_runs_arch(); 64 #ifdef CONFIG_FPU_LAZY66 #ifdef CONFIG_FPU_LAZY 65 67 if(THREAD==CPU->fpu_owner) 66 68 fpu_enable(); 67 69 else 68 70 fpu_disable(); 69 #else71 #else 70 72 fpu_enable(); 71 73 if (THREAD->fpu_context_exists) … … 75 77 THREAD->fpu_context_exists=1; 76 78 } 77 #endif78 } 79 80 /** Take actions after old thread ran.79 #endif 80 } 81 82 /** Take actions after THREAD had run. 81 83 * 82 84 * Perform actions that need to be 83 85 * taken after the running thread 84 * waspreempted by the scheduler.86 * had been preempted by the scheduler. 85 87 * 86 88 * THREAD->lock is locked on entry … … 108 110 109 111 spinlock_lock(&THREAD->lock); 110 if (THREAD->fpu_context_exists) 112 if (THREAD->fpu_context_exists) { 111 113 fpu_context_restore(&THREAD->saved_fpu_context); 112 else {114 } else { 113 115 fpu_init(&(THREAD->saved_fpu_context)); 114 116 THREAD->fpu_context_exists=1; … … 116 118 CPU->fpu_owner=THREAD; 117 119 THREAD->fpu_context_engaged = 1; 118 119 120 spinlock_unlock(&THREAD->lock); 121 120 122 spinlock_unlock(&CPU->lock); 121 123 } … … 130 132 { 131 133 } 132 133 134 134 135 /** Get thread to be scheduled … … 171 172 interrupts_disable(); 172 173 173 i = 0; 174 for (; i<RQ_COUNT; i++) { 174 for (i = 0; i<RQ_COUNT; i++) { 175 175 r = &CPU->rq[i]; 176 176 spinlock_lock(&r->lock); … … 199 199 200 200 t->ticks = us2ticks((i+1)*10000); 201 t->priority = i; /* eventuallycorrect rq index */201 t->priority = i; /* correct rq index */ 202 202 203 203 /* … … 212 212 213 213 } 214 215 214 216 215 /** Prevent rq starvation … … 256 255 } 257 256 258 259 /** Scheduler stack switch wrapper260 *261 * Second part of the scheduler() function262 * using new stack. Handling the actual context263 * switch to a new thread.264 *265 * Assume THREAD->lock is held.266 */267 static void scheduler_separated_stack(void)268 {269 int priority;270 271 ASSERT(CPU != NULL);272 273 if (THREAD) {274 /* must be run after switch to scheduler stack */275 after_thread_ran();276 277 switch (THREAD->state) {278 case Running:279 THREAD->state = Ready;280 spinlock_unlock(&THREAD->lock);281 thread_ready(THREAD);282 break;283 284 case Exiting:285 thread_destroy(THREAD);286 break;287 288 case Sleeping:289 /*290 * Prefer the thread after it's woken up.291 */292 THREAD->priority = -1;293 294 /*295 * We need to release wq->lock which we locked in waitq_sleep().296 * Address of wq->lock is kept in THREAD->sleep_queue.297 */298 spinlock_unlock(&THREAD->sleep_queue->lock);299 300 /*301 * Check for possible requests for out-of-context invocation.302 */303 if (THREAD->call_me) {304 THREAD->call_me(THREAD->call_me_with);305 THREAD->call_me = NULL;306 THREAD->call_me_with = NULL;307 }308 309 spinlock_unlock(&THREAD->lock);310 311 break;312 313 default:314 /*315 * Entering state is unexpected.316 */317 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);318 break;319 }320 321 THREAD = NULL;322 }323 324 325 THREAD = find_best_thread();326 327 spinlock_lock(&THREAD->lock);328 priority = THREAD->priority;329 spinlock_unlock(&THREAD->lock);330 331 relink_rq(priority);332 333 spinlock_lock(&THREAD->lock);334 335 /*336 * If both the old and the new task are the same, lots of work is avoided.337 */338 if (TASK != THREAD->task) {339 as_t *as1 = NULL;340 as_t *as2;341 342 if (TASK) {343 spinlock_lock(&TASK->lock);344 as1 = TASK->as;345 spinlock_unlock(&TASK->lock);346 }347 348 spinlock_lock(&THREAD->task->lock);349 as2 = THREAD->task->as;350 spinlock_unlock(&THREAD->task->lock);351 352 /*353 * Note that it is possible for two tasks to share one address space.354 */355 if (as1 != as2) {356 /*357 * Both tasks and address spaces are different.358 * Replace the old one with the new one.359 */360 as_switch(as1, as2);361 }362 TASK = THREAD->task;363 }364 365 THREAD->state = Running;366 367 #ifdef SCHEDULER_VERBOSE368 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));369 #endif370 371 /*372 * Some architectures provide late kernel PA2KA(identity)373 * mapping in a page fault handler. However, the page fault374 * handler uses the kernel stack of the running thread and375 * therefore cannot be used to map it. The kernel stack, if376 * necessary, is to be mapped in before_thread_runs(). This377 * function must be executed before the switch to the new stack.378 */379 before_thread_runs();380 381 /*382 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack.383 */384 the_copy(THE, (the_t *) THREAD->kstack);385 386 context_restore(&THREAD->saved_context);387 /* not reached */388 }389 390 391 257 /** The scheduler 392 258 * … … 409 275 if (THREAD) { 410 276 spinlock_lock(&THREAD->lock); 411 #ifndef CONFIG_FPU_LAZY277 #ifndef CONFIG_FPU_LAZY 412 278 fpu_context_save(&(THREAD->saved_fpu_context)); 413 #endif279 #endif 414 280 if (!context_save(&THREAD->saved_context)) { 415 281 /* … … 454 320 } 455 321 456 457 458 322 /** Scheduler stack switch wrapper 323 * 324 * Second part of the scheduler() function 325 * using new stack. Handling the actual context 326 * switch to a new thread. 327 * 328 * Assume THREAD->lock is held. 329 */ 330 void scheduler_separated_stack(void) 331 { 332 int priority; 333 334 ASSERT(CPU != NULL); 335 336 if (THREAD) { 337 /* must be run after the switch to scheduler stack */ 338 after_thread_ran(); 339 340 switch (THREAD->state) { 341 case Running: 342 THREAD->state = Ready; 343 spinlock_unlock(&THREAD->lock); 344 thread_ready(THREAD); 345 break; 346 347 case Exiting: 348 thread_destroy(THREAD); 349 break; 350 351 case Sleeping: 352 /* 353 * Prefer the thread after it's woken up. 354 */ 355 THREAD->priority = -1; 356 357 /* 358 * We need to release wq->lock which we locked in waitq_sleep(). 359 * Address of wq->lock is kept in THREAD->sleep_queue. 360 */ 361 spinlock_unlock(&THREAD->sleep_queue->lock); 362 363 /* 364 * Check for possible requests for out-of-context invocation. 365 */ 366 if (THREAD->call_me) { 367 THREAD->call_me(THREAD->call_me_with); 368 THREAD->call_me = NULL; 369 THREAD->call_me_with = NULL; 370 } 371 372 spinlock_unlock(&THREAD->lock); 373 374 break; 375 376 default: 377 /* 378 * Entering state is unexpected. 379 */ 380 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 381 break; 382 } 383 384 THREAD = NULL; 385 } 386 387 THREAD = find_best_thread(); 388 389 spinlock_lock(&THREAD->lock); 390 priority = THREAD->priority; 391 spinlock_unlock(&THREAD->lock); 392 393 relink_rq(priority); 394 395 spinlock_lock(&THREAD->lock); 396 397 /* 398 * If both the old and the new task are the same, lots of work is avoided. 399 */ 400 if (TASK != THREAD->task) { 401 as_t *as1 = NULL; 402 as_t *as2; 403 404 if (TASK) { 405 spinlock_lock(&TASK->lock); 406 as1 = TASK->as; 407 spinlock_unlock(&TASK->lock); 408 } 409 410 spinlock_lock(&THREAD->task->lock); 411 as2 = THREAD->task->as; 412 spinlock_unlock(&THREAD->task->lock); 413 414 /* 415 * Note that it is possible for two tasks to share one address space. 416 */ 417 if (as1 != as2) { 418 /* 419 * Both tasks and address spaces are different. 420 * Replace the old one with the new one. 421 */ 422 as_switch(as1, as2); 423 } 424 TASK = THREAD->task; 425 } 426 427 THREAD->state = Running; 428 429 #ifdef SCHEDULER_VERBOSE 430 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); 431 #endif 432 433 /* 434 * Some architectures provide late kernel PA2KA(identity) 435 * mapping in a page fault handler. However, the page fault 436 * handler uses the kernel stack of the running thread and 437 * therefore cannot be used to map it. The kernel stack, if 438 * necessary, is to be mapped in before_thread_runs(). This 439 * function must be executed before the switch to the new stack. 440 */ 441 before_thread_runs(); 442 443 /* 444 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. 445 */ 446 the_copy(THE, (the_t *) THREAD->kstack); 447 448 context_restore(&THREAD->saved_context); 449 /* not reached */ 450 } 459 451 460 452 #ifdef CONFIG_SMP … … 613 605 * let's not be interrupted */ 614 606 ipl = interrupts_disable(); 615 printf(" *********** Scheduler dump ***********\n");607 printf("Scheduler dump:\n"); 616 608 for (cpu=0;cpu < config.cpu_count; cpu++) { 609 617 610 if (!cpus[cpu].active) 618 611 continue; 612 619 613 spinlock_lock(&cpus[cpu].lock); 620 printf("cpu%d: nrdy: %d needs_relink: %d\n",614 printf("cpu%d: nrdy: %d, needs_relink: %d\n", 621 615 cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); 622 616 … … 628 622 continue; 629 623 } 630 printf("\t Rq %d: ", i);624 printf("\trq[%d]: ", i); 631 625 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { 632 626 t = list_get_instance(cur, thread_t, rq_link);
Note:
See TracChangeset
for help on using the changeset viewer.