Changes in kernel/generic/src/proc/scheduler.c [b4dc35a:55b77d9] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rb4dc35a r55b77d9 237 237 * Take the first thread from the queue. 238 238 */ 239 thread_t *thread = 240 list_ get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link);239 thread_t *thread = list_get_instance( 240 list_first(&CPU->rq[i].rq), thread_t, rq_link); 241 241 list_remove(&thread->rq_link); 242 242 … … 273 273 static void relink_rq(int start) 274 274 { 275 li nk_t head;276 277 list_initialize(& head);275 list_t list; 276 277 list_initialize(&list); 278 278 irq_spinlock_lock(&CPU->lock, false); 279 279 … … 284 284 285 285 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 286 list_concat(& head, &CPU->rq[i + 1].rq_head);286 list_concat(&list, &CPU->rq[i + 1].rq); 287 287 size_t n = CPU->rq[i + 1].n; 288 288 CPU->rq[i + 1].n = 0; … … 292 292 293 293 irq_spinlock_lock(&CPU->rq[i].lock, false); 294 list_concat(&CPU->rq[i].rq _head, &head);294 list_concat(&CPU->rq[i].rq, &list); 295 295 CPU->rq[i].n += n; 296 296 irq_spinlock_unlock(&CPU->rq[i].lock, false); … … 376 376 context_save(&CPU->saved_context); 377 377 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 378 (uintptr_t) CPU->stack, CPU_STACK_SIZE);378 (uintptr_t) CPU->stack, STACK_SIZE); 379 379 context_restore(&CPU->saved_context); 380 380 … … 586 586 * Searching least priority queues on all CPU's first and most priority 587 587 * queues on all CPU's last. 588 *589 588 */ 590 589 size_t acpu; … … 617 616 618 617 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 618 link_t *link = cpu->rq[rq].rq.head.prev; 619 620 while (link != &(cpu->rq[rq].rq.head)) { 621 thread = (thread_t *) list_get_instance(link, 622 thread_t, rq_link); 623 623 624 624 /* 625 * We don't want to steal CPU-wired threads 626 * neither threads already stolen. The latter 627 * prevents threads from migrating between CPU's 628 * without ever being run. We don't want to 629 * steal threads whose FPU context is still in 630 * CPU. 631 * 625 * Do not steal CPU-wired threads, threads 626 * already stolen, threads for which migration 627 * was temporarily disabled or threads whose 628 * FPU context is still in the CPU. 632 629 */ 633 630 irq_spinlock_lock(&thread->lock, false); 634 631 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 636 && (!(thread->fpu_context_engaged))) { 632 if (!(thread->flags & THREAD_FLAG_WIRED) && 633 !(thread->flags & THREAD_FLAG_STOLEN) && 634 !thread->nomigrate && 635 !thread->fpu_context_engaged) { 637 636 /* 638 637 * Remove thread from ready queue. 639 638 */ 640 irq_spinlock_unlock(&thread->lock, false); 639 irq_spinlock_unlock(&thread->lock, 640 false); 641 641 642 642 atomic_dec(&cpu->nrdy); … … 660 660 */ 661 661 662 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 662 irq_spinlock_pass(&(cpu->rq[rq].lock), 663 &thread->lock); 663 664 664 665 #ifdef KCPULB_VERBOSE … … 739 740 740 741 printf("\trq[%u]: ", i); 741 link_t *cur; 742 for (cur = cpus[cpu].rq[i].rq_head.next; 743 cur != &(cpus[cpu].rq[i].rq_head); 744 cur = cur->next) { 745 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 742 list_foreach(cpus[cpu].rq[i].rq, cur) { 743 thread_t *thread = list_get_instance(cur, 744 thread_t, rq_link); 746 745 printf("%" PRIu64 "(%s) ", thread->tid, 747 746 thread_states[thread->state]);
Note:
See TracChangeset
for help on using the changeset viewer.