Changes in kernel/generic/src/proc/scheduler.c [55b77d9:b4dc35a] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r55b77d9 rb4dc35a 237 237 * Take the first thread from the queue. 238 238 */ 239 thread_t *thread = list_get_instance(240 list_ first(&CPU->rq[i].rq), thread_t, rq_link);239 thread_t *thread = 240 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 241 241 list_remove(&thread->rq_link); 242 242 … … 273 273 static void relink_rq(int start) 274 274 { 275 li st_t list;276 277 list_initialize(& list);275 link_t head; 276 277 list_initialize(&head); 278 278 irq_spinlock_lock(&CPU->lock, false); 279 279 … … 284 284 285 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(& list, &CPU->rq[i + 1].rq);286 list_concat(&head, &CPU->rq[i + 1].rq_head); 287 287 size_t n = CPU->rq[i + 1].n; 288 288 CPU->rq[i + 1].n = 0; … … 292 292 293 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq , &list);294 list_concat(&CPU->rq[i].rq_head, &head); 295 295 CPU->rq[i].n += n; 296 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 376 376 context_save(&CPU->saved_context); 377 377 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 378 (uintptr_t) CPU->stack, STACK_SIZE);378 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 379 379 context_restore(&CPU->saved_context); 380 380 … … 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 * 588 589 */ 589 590 size_t acpu; … … 616 617 617 618 /* Search rq from the back */ 618 link_t *link = cpu->rq[rq].rq.head.prev; 619 620 while (link != &(cpu->rq[rq].rq.head)) { 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 623 624 624 /* 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 629 632 */ 630 633 irq_spinlock_lock(&thread->lock, false); 631 634 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 636 637 /* 637 638 * Remove thread from ready queue. 638 639 */ 639 irq_spinlock_unlock(&thread->lock, 640 false); 640 irq_spinlock_unlock(&thread->lock, false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 664 663 665 664 #ifdef KCPULB_VERBOSE … … 740 739 741 740 printf("\trq[%u]: ", i); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 741 link_t *cur; 742 for (cur = cpus[cpu].rq[i].rq_head.next; 743 cur != &(cpus[cpu].rq[i].rq_head); 744 cur = cur->next) { 745 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 745 746 printf("%" PRIu64 "(%s) ", thread->tid, 746 747 thread_states[thread->state]);
Note:
See TracChangeset
for help on using the changeset viewer.