Changes in kernel/generic/src/proc/scheduler.c [df58e44:98000fb] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
rdf58e44 r98000fb 1 1 /* 2 * Copyright (c) 20 10Jakub Jermar2 * Copyright (c) 2001-2007 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 62 62 #include <print.h> 63 63 #include <debug.h> 64 #include <stacktrace.h> 65 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 66 68 static void scheduler_separated_stack(void); 67 69 68 atomic_t nrdy; 70 atomic_t nrdy; /**< Number of ready threads in the system. */ 69 71 70 72 /** Carry out actions before new task runs. */ 71 staticvoid before_task_runs(void)73 void before_task_runs(void) 72 74 { 73 75 before_task_runs_arch(); … … 78 80 * Perform actions that need to be 79 81 * taken before the newly selected 80 * t hread is passed control.82 * tread is passed control. 81 83 * 82 84 * THREAD->lock is locked on entry 83 85 * 84 86 */ 85 staticvoid before_thread_runs(void)87 void before_thread_runs(void) 86 88 { 87 89 before_thread_runs_arch(); 88 89 90 #ifdef CONFIG_FPU_LAZY 90 if (THREAD == CPU->fpu_owner)91 if(THREAD == CPU->fpu_owner) 91 92 fpu_enable(); 92 93 else 93 fpu_disable(); 94 fpu_disable(); 94 95 #else 95 96 fpu_enable(); … … 101 102 } 102 103 #endif 103 104 if (THREAD->btrace) {105 istate_t *istate = THREAD->udebug.uspace_state;106 if (istate != NULL) {107 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);108 stack_trace_istate(istate);109 }110 111 THREAD->btrace = false;112 }113 104 } 114 105 … … 122 113 * 123 114 */ 124 staticvoid after_thread_ran(void)115 void after_thread_ran(void) 125 116 { 126 117 after_thread_ran_arch(); … … 132 123 restart: 133 124 fpu_enable(); 134 irq_spinlock_lock(&CPU->lock, false);135 125 spinlock_lock(&CPU->lock); 126 136 127 /* Save old context */ 137 if (CPU->fpu_owner != NULL) { 138 irq_spinlock_lock(&CPU->fpu_owner->lock, false);128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock); 139 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 140 141 /* Don't prevent migration */ 131 /* don't prevent migration */ 142 132 CPU->fpu_owner->fpu_context_engaged = 0; 143 irq_spinlock_unlock(&CPU->fpu_owner->lock, false);133 spinlock_unlock(&CPU->fpu_owner->lock); 144 134 CPU->fpu_owner = NULL; 145 135 } 146 147 irq_spinlock_lock(&THREAD->lock, false);136 137 spinlock_lock(&THREAD->lock); 148 138 if (THREAD->fpu_context_exists) { 149 139 fpu_context_restore(THREAD->saved_fpu_context); … … 152 142 if (!THREAD->saved_fpu_context) { 153 143 /* Might sleep */ 154 irq_spinlock_unlock(&THREAD->lock, false);155 irq_spinlock_unlock(&CPU->lock, false);144 spinlock_unlock(&THREAD->lock); 145 spinlock_unlock(&CPU->lock); 156 146 THREAD->saved_fpu_context = 157 147 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 158 159 148 /* We may have switched CPUs during slab_alloc */ 160 goto restart; 149 goto restart; 161 150 } 162 151 fpu_init(); 163 152 THREAD->fpu_context_exists = 1; 164 153 } 165 166 154 CPU->fpu_owner = THREAD; 167 155 THREAD->fpu_context_engaged = 1; 168 irq_spinlock_unlock(&THREAD->lock, false);169 170 irq_spinlock_unlock(&CPU->lock, false);171 } 172 #endif /* CONFIG_FPU_LAZY */156 spinlock_unlock(&THREAD->lock); 157 158 spinlock_unlock(&CPU->lock); 159 } 160 #endif 173 161 174 162 /** Initialize scheduler … … 192 180 static thread_t *find_best_thread(void) 193 181 { 182 thread_t *t; 183 runq_t *r; 184 int i; 185 194 186 ASSERT(CPU != NULL); 195 187 196 188 loop: 189 interrupts_enable(); 197 190 198 191 if (atomic_get(&CPU->nrdy) == 0) { … … 202 195 * This improves energy saving and hyperthreading. 203 196 */ 204 irq_spinlock_lock(&CPU->lock, false); 205 CPU->idle = true; 206 irq_spinlock_unlock(&CPU->lock, false); 207 interrupts_enable(); 208 197 209 198 /* 210 199 * An interrupt might occur right now and wake up a thread. … … 212 201 * even though there is a runnable thread. 213 202 */ 214 cpu_sleep(); 215 interrupts_disable(); 216 goto loop; 217 } 218 219 unsigned int i; 203 204 cpu_sleep(); 205 goto loop; 206 } 207 208 interrupts_disable(); 209 220 210 for (i = 0; i < RQ_COUNT; i++) { 221 irq_spinlock_lock(&(CPU->rq[i].lock), false); 222 if (CPU->rq[i].n == 0) { 211 r = &CPU->rq[i]; 212 spinlock_lock(&r->lock); 213 if (r->n == 0) { 223 214 /* 224 215 * If this queue is empty, try a lower-priority queue. 225 216 */ 226 irq_spinlock_unlock(&(CPU->rq[i].lock), false);217 spinlock_unlock(&r->lock); 227 218 continue; 228 219 } 229 220 230 221 atomic_dec(&CPU->nrdy); 231 222 atomic_dec(&nrdy); 232 CPU->rq[i].n--;233 223 r->n--; 224 234 225 /* 235 226 * Take the first thread from the queue. 236 227 */ 237 thread_t *thread = 238 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 239 list_remove(&thread->rq_link); 240 241 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 242 243 thread->cpu = CPU; 244 thread->ticks = us2ticks((i + 1) * 10000); 245 thread->priority = i; /* Correct rq index */ 246 228 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 229 list_remove(&t->rq_link); 230 231 spinlock_unlock(&r->lock); 232 233 spinlock_lock(&t->lock); 234 t->cpu = CPU; 235 236 t->ticks = us2ticks((i + 1) * 10000); 237 t->priority = i; /* correct rq index */ 238 247 239 /* 248 240 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 249 241 * when load balancing needs emerge. 250 242 */ 251 thread->flags &= ~THREAD_FLAG_STOLEN; 252 irq_spinlock_unlock(&thread->lock, false); 253 254 return thread; 255 } 256 243 t->flags &= ~THREAD_FLAG_STOLEN; 244 spinlock_unlock(&t->lock); 245 246 return t; 247 } 257 248 goto loop; 249 258 250 } 259 251 … … 272 264 { 273 265 link_t head; 274 266 runq_t *r; 267 int i, n; 268 275 269 list_initialize(&head); 276 irq_spinlock_lock(&CPU->lock, false); 277 270 spinlock_lock(&CPU->lock); 278 271 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 279 int i;280 272 for (i = start; i < RQ_COUNT - 1; i++) { 281 /* Remember and empty rq[i + 1] */ 282 283 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 284 list_concat(&head, &CPU->rq[i + 1].rq_head); 285 size_t n = CPU->rq[i + 1].n; 286 CPU->rq[i + 1].n = 0; 287 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 288 289 /* Append rq[i + 1] to rq[i] */ 290 291 irq_spinlock_lock(&CPU->rq[i].lock, false); 292 list_concat(&CPU->rq[i].rq_head, &head); 293 CPU->rq[i].n += n; 294 irq_spinlock_unlock(&CPU->rq[i].lock, false); 295 } 273 /* remember and empty rq[i + 1] */ 274 r = &CPU->rq[i + 1]; 275 spinlock_lock(&r->lock); 276 list_concat(&head, &r->rq_head); 277 n = r->n; 278 r->n = 0; 279 spinlock_unlock(&r->lock); 296 280 281 /* append rq[i + 1] to rq[i] */ 282 r = &CPU->rq[i]; 283 spinlock_lock(&r->lock); 284 list_concat(&r->rq_head, &head); 285 r->n += n; 286 spinlock_unlock(&r->lock); 287 } 297 288 CPU->needs_relink = 0; 298 289 } 299 300 irq_spinlock_unlock(&CPU->lock, false); 290 spinlock_unlock(&CPU->lock); 291 301 292 } 302 293 … … 311 302 { 312 303 volatile ipl_t ipl; 313 304 314 305 ASSERT(CPU != NULL); 315 306 316 307 ipl = interrupts_disable(); 317 308 318 309 if (atomic_get(&haltstate)) 319 310 halt(); 320 311 321 312 if (THREAD) { 322 irq_spinlock_lock(&THREAD->lock, false);313 spinlock_lock(&THREAD->lock); 323 314 324 /* Update thread kernelaccounting */325 THREAD-> kcycles += get_cycle() - THREAD->last_cycle;315 /* Update thread accounting */ 316 THREAD->cycles += get_cycle() - THREAD->last_cycle; 326 317 327 318 #ifndef CONFIG_FPU_LAZY … … 336 327 THREAD->last_cycle = get_cycle(); 337 328 338 irq_spinlock_unlock(&THREAD->lock, false);329 spinlock_unlock(&THREAD->lock); 339 330 interrupts_restore(THREAD->saved_context.ipl); 340 331 341 332 return; 342 333 } 343 334 344 335 /* 345 336 * Interrupt priority level of preempted thread is recorded 346 337 * here to facilitate scheduler() invocations from 347 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 348 * 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 349 339 */ 350 340 THREAD->saved_context.ipl = ipl; 351 341 } 352 342 353 343 /* 354 344 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 355 345 * and preemption counter. At this point THE could be coming either 356 346 * from THREAD's or CPU's stack. 357 *358 347 */ 359 348 the_copy(THE, (the_t *) CPU->stack); 360 349 361 350 /* 362 351 * We may not keep the old stack. … … 370 359 * Therefore the scheduler() function continues in 371 360 * scheduler_separated_stack(). 372 *373 361 */ 374 362 context_save(&CPU->saved_context); … … 376 364 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 377 365 context_restore(&CPU->saved_context); 378 379 /* Not reached */ 366 /* not reached */ 380 367 } 381 368 … … 386 373 * switch to a new thread. 387 374 * 375 * Assume THREAD->lock is held. 388 376 */ 389 377 void scheduler_separated_stack(void) 390 378 { 379 int priority; 391 380 DEADLOCK_PROBE_INIT(p_joinwq); 392 task_t *old_task = TASK; 393 as_t *old_as = AS; 394 395 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 381 396 382 ASSERT(CPU != NULL); 397 383 398 /*399 * Hold the current task and the address space to prevent their400 * possible destruction should thread_destroy() be called on this or any401 * other processor while the scheduler is still using them.402 */403 if (old_task)404 task_hold(old_task);405 406 if (old_as)407 as_hold(old_as);408 409 384 if (THREAD) { 410 /* Must be run after the switch to scheduler stack */385 /* must be run after the switch to scheduler stack */ 411 386 after_thread_ran(); 412 387 413 388 switch (THREAD->state) { 414 389 case Running: 415 irq_spinlock_unlock(&THREAD->lock, false);390 spinlock_unlock(&THREAD->lock); 416 391 thread_ready(THREAD); 417 392 break; 418 393 419 394 case Exiting: 420 395 repeat: 421 396 if (THREAD->detached) { 422 thread_destroy(THREAD , false);397 thread_destroy(THREAD); 423 398 } else { 424 399 /* … … 426 401 * somebody calls thread_detach() on it. 427 402 */ 428 if (! irq_spinlock_trylock(&THREAD->join_wq.lock)) {403 if (!spinlock_trylock(&THREAD->join_wq.lock)) { 429 404 /* 430 405 * Avoid deadlock. 431 406 */ 432 irq_spinlock_unlock(&THREAD->lock, false);407 spinlock_unlock(&THREAD->lock); 433 408 delay(HZ); 434 irq_spinlock_lock(&THREAD->lock, false);409 spinlock_lock(&THREAD->lock); 435 410 DEADLOCK_PROBE(p_joinwq, 436 411 DEADLOCK_THRESHOLD); … … 439 414 _waitq_wakeup_unsafe(&THREAD->join_wq, 440 415 WAKEUP_FIRST); 441 irq_spinlock_unlock(&THREAD->join_wq.lock, false);416 spinlock_unlock(&THREAD->join_wq.lock); 442 417 443 418 THREAD->state = Lingering; 444 irq_spinlock_unlock(&THREAD->lock, false);419 spinlock_unlock(&THREAD->lock); 445 420 } 446 421 break; … … 451 426 */ 452 427 THREAD->priority = -1; 453 428 454 429 /* 455 430 * We need to release wq->lock which we locked in … … 457 432 * THREAD->sleep_queue. 458 433 */ 459 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 460 461 irq_spinlock_unlock(&THREAD->lock, false); 434 spinlock_unlock(&THREAD->sleep_queue->lock); 435 436 /* 437 * Check for possible requests for out-of-context 438 * invocation. 439 */ 440 if (THREAD->call_me) { 441 THREAD->call_me(THREAD->call_me_with); 442 THREAD->call_me = NULL; 443 THREAD->call_me_with = NULL; 444 } 445 446 spinlock_unlock(&THREAD->lock); 447 462 448 break; 463 449 464 450 default: 465 451 /* … … 470 456 break; 471 457 } 472 458 473 459 THREAD = NULL; 474 460 } 475 461 476 462 THREAD = find_best_thread(); 477 463 478 irq_spinlock_lock(&THREAD->lock, false);479 intpriority = THREAD->priority;480 irq_spinlock_unlock(&THREAD->lock, false);481 482 relink_rq(priority); 483 484 /* 485 * If both the old and the new task are the same, 486 * lots of work isavoided.464 spinlock_lock(&THREAD->lock); 465 priority = THREAD->priority; 466 spinlock_unlock(&THREAD->lock); 467 468 relink_rq(priority); 469 470 /* 471 * If both the old and the new task are the same, lots of work is 472 * avoided. 487 473 */ 488 474 if (TASK != THREAD->task) { 489 as_t *new_as = THREAD->task->as; 475 as_t *as1 = NULL; 476 as_t *as2; 477 478 if (TASK) { 479 spinlock_lock(&TASK->lock); 480 as1 = TASK->as; 481 spinlock_unlock(&TASK->lock); 482 } 483 484 spinlock_lock(&THREAD->task->lock); 485 as2 = THREAD->task->as; 486 spinlock_unlock(&THREAD->task->lock); 490 487 491 488 /* 492 * Note that it is possible for two tasks 493 * to share one addressspace.489 * Note that it is possible for two tasks to share one address 490 * space. 494 491 */ 495 if ( old_as != new_as) {492 if (as1 != as2) { 496 493 /* 497 494 * Both tasks and address spaces are different. 498 495 * Replace the old one with the new one. 499 496 */ 500 as_switch(old_as, new_as); 501 } 502 497 as_switch(as1, as2); 498 } 503 499 TASK = THREAD->task; 504 500 before_task_runs(); 505 501 } 506 507 if (old_task) 508 task_release(old_task); 509 510 if (old_as) 511 as_release(old_as); 512 513 irq_spinlock_lock(&THREAD->lock, false); 502 503 spinlock_lock(&THREAD->lock); 514 504 THREAD->state = Running; 515 505 516 506 #ifdef SCHEDULER_VERBOSE 517 507 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 508 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 509 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 510 #endif 511 522 512 /* 523 513 * Some architectures provide late kernel PA2KA(identity) … … 529 519 */ 530 520 before_thread_runs(); 531 521 532 522 /* 533 523 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to … … 537 527 538 528 context_restore(&THREAD->saved_context); 539 540 /* Not reached */ 529 /* not reached */ 541 530 } 542 531 … … 552 541 void kcpulb(void *arg) 553 542 { 554 atomic_count_t average; 555 atomic_count_t rdy; 556 543 thread_t *t; 544 int count, average, j, k = 0; 545 unsigned int i; 546 ipl_t ipl; 547 557 548 /* 558 549 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 565 556 */ 566 557 thread_sleep(1); 567 558 568 559 not_satisfied: 569 560 /* … … 571 562 * other CPU's. Note that situation can have changed between two 572 563 * passes. Each time get the most up to date counts. 573 *574 564 */ 575 565 average = atomic_get(&nrdy) / config.cpu_active + 1; 576 rdy =atomic_get(&CPU->nrdy);577 578 if ( average <= rdy)566 count = average - atomic_get(&CPU->nrdy); 567 568 if (count <= 0) 579 569 goto satisfied; 580 581 atomic_count_t count = average - rdy; 582 570 583 571 /* 584 572 * Searching least priority queues on all CPU's first and most priority 585 573 * queues on all CPU's last. 586 * 587 */ 588 size_t acpu; 589 size_t acpu_bias = 0; 590 int rq; 591 592 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 593 for (acpu = 0; acpu < config.cpu_active; acpu++) { 594 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 595 574 */ 575 for (j = RQ_COUNT - 1; j >= 0; j--) { 576 for (i = 0; i < config.cpu_active; i++) { 577 link_t *l; 578 runq_t *r; 579 cpu_t *cpu; 580 581 cpu = &cpus[(i + k) % config.cpu_active]; 582 596 583 /* 597 584 * Not interested in ourselves. 598 585 * Doesn't require interrupt disabling for kcpulb has 599 586 * THREAD_FLAG_WIRED. 600 *601 587 */ 602 588 if (CPU == cpu) 603 589 continue; 604 605 590 if (atomic_get(&cpu->nrdy) <= average) 606 591 continue; 607 608 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 609 if (cpu->rq[rq].n == 0) { 610 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 592 593 ipl = interrupts_disable(); 594 r = &cpu->rq[j]; 595 spinlock_lock(&r->lock); 596 if (r->n == 0) { 597 spinlock_unlock(&r->lock); 598 interrupts_restore(ipl); 611 599 continue; 612 600 } 613 614 thread_t *thread = NULL; 615 616 /* Search rq from the back */ 617 link_t *link = cpu->rq[rq].rq_head.prev; 618 619 while (link != &(cpu->rq[rq].rq_head)) { 620 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 621 601 602 t = NULL; 603 l = r->rq_head.prev; /* search rq from the back */ 604 while (l != &r->rq_head) { 605 t = list_get_instance(l, thread_t, rq_link); 622 606 /* 623 607 * We don't want to steal CPU-wired threads … … 627 611 * steal threads whose FPU context is still in 628 612 * CPU. 629 *630 613 */ 631 irq_spinlock_lock(&thread->lock, false);632 633 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))634 && (!(thread->fpu_context_engaged))) {614 spinlock_lock(&t->lock); 615 if ((!(t->flags & (THREAD_FLAG_WIRED | 616 THREAD_FLAG_STOLEN))) && 617 (!(t->fpu_context_engaged))) { 635 618 /* 636 * Remove t hread from ready queue.619 * Remove t from r. 637 620 */ 638 irq_spinlock_unlock(&thread->lock, false);621 spinlock_unlock(&t->lock); 639 622 640 623 atomic_dec(&cpu->nrdy); 641 624 atomic_dec(&nrdy); 642 643 cpu->rq[rq].n--;644 list_remove(&t hread->rq_link);645 625 626 r->n--; 627 list_remove(&t->rq_link); 628 646 629 break; 647 630 } 648 649 irq_spinlock_unlock(&thread->lock, false); 650 651 link = link->prev; 652 thread = NULL; 631 spinlock_unlock(&t->lock); 632 l = l->prev; 633 t = NULL; 653 634 } 654 655 if (thread) { 635 spinlock_unlock(&r->lock); 636 637 if (t) { 656 638 /* 657 * Ready t hreadon local CPU639 * Ready t on local CPU 658 640 */ 659 660 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 661 641 spinlock_lock(&t->lock); 662 642 #ifdef KCPULB_VERBOSE 663 643 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 666 646 atomic_get(&nrdy) / config.cpu_active); 667 647 #endif 668 669 thread->flags |= THREAD_FLAG_STOLEN; 670 thread->state = Entering; 671 672 irq_spinlock_unlock(&thread->lock, true); 673 thread_ready(thread); 674 648 t->flags |= THREAD_FLAG_STOLEN; 649 t->state = Entering; 650 spinlock_unlock(&t->lock); 651 652 thread_ready(t); 653 654 interrupts_restore(ipl); 655 675 656 if (--count == 0) 676 657 goto satisfied; 677 658 678 659 /* 679 660 * We are not satisfied yet, focus on another 680 661 * CPU next time. 681 *682 662 */ 683 acpu_bias++;663 k++; 684 664 685 665 continue; 686 } else 687 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 688 689 } 690 } 691 666 } 667 interrupts_restore(ipl); 668 } 669 } 670 692 671 if (atomic_get(&CPU->nrdy)) { 693 672 /* 694 673 * Be a little bit light-weight and let migrated threads run. 695 *696 674 */ 697 675 scheduler(); … … 700 678 * We failed to migrate a single thread. 701 679 * Give up this turn. 702 *703 680 */ 704 681 goto loop; 705 682 } 706 683 707 684 goto not_satisfied; 708 685 709 686 satisfied: 710 687 goto loop; 711 688 } 689 712 690 #endif /* CONFIG_SMP */ 713 691 714 /** Print information about threads & scheduler queues 715 * 716 */ 692 693 /** Print information about threads & scheduler queues */ 717 694 void sched_print_list(void) 718 695 { 719 size_t cpu; 696 ipl_t ipl; 697 unsigned int cpu, i; 698 runq_t *r; 699 thread_t *t; 700 link_t *cur; 701 702 /* We are going to mess with scheduler structures, 703 * let's not be interrupted */ 704 ipl = interrupts_disable(); 720 705 for (cpu = 0; cpu < config.cpu_count; cpu++) { 706 721 707 if (!cpus[cpu].active) 722 708 continue; 723 724 irq_spinlock_lock(&cpus[cpu].lock, true); 725 726 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 709 710 spinlock_lock(&cpus[cpu].lock); 711 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 727 712 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 728 713 cpus[cpu].needs_relink); 729 714 730 unsigned int i;731 715 for (i = 0; i < RQ_COUNT; i++) { 732 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 733 if (cpus[cpu].rq[i].n == 0) { 734 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 716 r = &cpus[cpu].rq[i]; 717 spinlock_lock(&r->lock); 718 if (!r->n) { 719 spinlock_unlock(&r->lock); 735 720 continue; 736 721 } 737 738 722 printf("\trq[%u]: ", i); 739 link_t *cur; 740 for (cur = cpus[cpu].rq[i].rq_head.next; 741 cur != &(cpus[cpu].rq[i].rq_head); 742 cur = cur->next) { 743 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 744 printf("%" PRIu64 "(%s) ", thread->tid, 745 thread_states[thread->state]); 723 for (cur = r->rq_head.next; cur != &r->rq_head; 724 cur = cur->next) { 725 t = list_get_instance(cur, thread_t, rq_link); 726 printf("%" PRIu64 "(%s) ", t->tid, 727 thread_states[t->state]); 746 728 } 747 729 printf("\n"); 748 749 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);750 }751 752 irq_spinlock_unlock(&cpus[cpu].lock, true);753 }730 spinlock_unlock(&r->lock); 731 } 732 spinlock_unlock(&cpus[cpu].lock); 733 } 734 735 interrupts_restore(ipl); 754 736 } 755 737
Note:
See TracChangeset
for help on using the changeset viewer.