Changes in kernel/generic/src/proc/scheduler.c [98000fb:df58e44] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r98000fb rdf58e44 1 1 /* 2 * Copyright (c) 20 01-2007Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 62 62 #include <print.h> 63 63 #include <debug.h> 64 65 static void before_task_runs(void); 66 static void before_thread_runs(void); 67 static void after_thread_ran(void); 64 #include <stacktrace.h> 65 68 66 static void scheduler_separated_stack(void); 69 67 70 atomic_t nrdy; 68 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 69 72 70 /** Carry out actions before new task runs. */ 73 void before_task_runs(void)71 static void before_task_runs(void) 74 72 { 75 73 before_task_runs_arch(); … … 80 78 * Perform actions that need to be 81 79 * taken before the newly selected 82 * t read is passed control.80 * thread is passed control. 83 81 * 84 82 * THREAD->lock is locked on entry 85 83 * 86 84 */ 87 void before_thread_runs(void)85 static void before_thread_runs(void) 88 86 { 89 87 before_thread_runs_arch(); 88 90 89 #ifdef CONFIG_FPU_LAZY 91 if (THREAD == CPU->fpu_owner)90 if (THREAD == CPU->fpu_owner) 92 91 fpu_enable(); 93 92 else 94 fpu_disable(); 93 fpu_disable(); 95 94 #else 96 95 fpu_enable(); … … 102 101 } 103 102 #endif 103 104 if (THREAD->btrace) { 105 istate_t *istate = THREAD->udebug.uspace_state; 106 if (istate != NULL) { 107 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 108 stack_trace_istate(istate); 109 } 110 111 THREAD->btrace = false; 112 } 104 113 } 105 114 … … 113 122 * 114 123 */ 115 void after_thread_ran(void)124 static void after_thread_ran(void) 116 125 { 117 126 after_thread_ran_arch(); … … 123 132 restart: 124 133 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 134 irq_spinlock_lock(&CPU->lock, false); 135 127 136 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);137 if (CPU->fpu_owner != NULL) { 138 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 139 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 140 141 /* Don't prevent migration */ 132 142 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);143 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 144 CPU->fpu_owner = NULL; 135 145 } 136 137 spinlock_lock(&THREAD->lock);146 147 irq_spinlock_lock(&THREAD->lock, false); 138 148 if (THREAD->fpu_context_exists) { 139 149 fpu_context_restore(THREAD->saved_fpu_context); … … 142 152 if (!THREAD->saved_fpu_context) { 143 153 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);154 irq_spinlock_unlock(&THREAD->lock, false); 155 irq_spinlock_unlock(&CPU->lock, false); 146 156 THREAD->saved_fpu_context = 147 157 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 158 148 159 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 160 goto restart; 150 161 } 151 162 fpu_init(); 152 163 THREAD->fpu_context_exists = 1; 153 164 } 165 154 166 CPU->fpu_owner = THREAD; 155 167 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 168 irq_spinlock_unlock(&THREAD->lock, false); 169 170 irq_spinlock_unlock(&CPU->lock, false); 171 } 172 #endif /* CONFIG_FPU_LAZY */ 161 173 162 174 /** Initialize scheduler … … 180 192 static thread_t *find_best_thread(void) 181 193 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 194 ASSERT(CPU != NULL); 187 195 188 196 loop: 189 interrupts_enable();190 197 191 198 if (atomic_get(&CPU->nrdy) == 0) { … … 195 202 * This improves energy saving and hyperthreading. 196 203 */ 197 204 irq_spinlock_lock(&CPU->lock, false); 205 CPU->idle = true; 206 irq_spinlock_unlock(&CPU->lock, false); 207 interrupts_enable(); 208 198 209 /* 199 210 * An interrupt might occur right now and wake up a thread. … … 201 212 * even though there is a runnable thread. 202 213 */ 203 204 cpu_sleep(); 205 goto loop; 206 } 207 208 interrupts_disable(); 209 214 cpu_sleep(); 215 interrupts_disable(); 216 goto loop; 217 } 218 219 unsigned int i; 210 220 for (i = 0; i < RQ_COUNT; i++) { 211 r = &CPU->rq[i]; 212 spinlock_lock(&r->lock); 213 if (r->n == 0) { 221 irq_spinlock_lock(&(CPU->rq[i].lock), false); 222 if (CPU->rq[i].n == 0) { 214 223 /* 215 224 * If this queue is empty, try a lower-priority queue. 216 225 */ 217 spinlock_unlock(&r->lock);226 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 218 227 continue; 219 228 } 220 229 221 230 atomic_dec(&CPU->nrdy); 222 231 atomic_dec(&nrdy); 223 r->n--;224 232 CPU->rq[i].n--; 233 225 234 /* 226 235 * Take the first thread from the queue. 227 236 */ 228 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 229 list_remove(&t->rq_link); 230 231 spinlock_unlock(&r->lock); 232 233 spinlock_lock(&t->lock); 234 t->cpu = CPU; 235 236 t->ticks = us2ticks((i + 1) * 10000); 237 t->priority = i; /* correct rq index */ 238 237 thread_t *thread = 238 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 239 list_remove(&thread->rq_link); 240 241 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 242 243 thread->cpu = CPU; 244 thread->ticks = us2ticks((i + 1) * 10000); 245 thread->priority = i; /* Correct rq index */ 246 239 247 /* 240 248 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 241 249 * when load balancing needs emerge. 242 250 */ 243 t->flags &= ~THREAD_FLAG_STOLEN; 244 spinlock_unlock(&t->lock); 245 246 return t; 247 } 251 thread->flags &= ~THREAD_FLAG_STOLEN; 252 irq_spinlock_unlock(&thread->lock, false); 253 254 return thread; 255 } 256 248 257 goto loop; 249 250 258 } 251 259 … … 264 272 { 265 273 link_t head; 266 runq_t *r; 267 int i, n; 268 274 269 275 list_initialize(&head); 270 spinlock_lock(&CPU->lock); 276 irq_spinlock_lock(&CPU->lock, false); 277 271 278 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 279 int i; 272 280 for (i = start; i < RQ_COUNT - 1; i++) { 273 /* remember and empty rq[i + 1] */ 274 r = &CPU->rq[i + 1]; 275 spinlock_lock(&r->lock); 276 list_concat(&head, &r->rq_head); 277 n = r->n; 278 r->n = 0; 279 spinlock_unlock(&r->lock); 280 281 /* append rq[i + 1] to rq[i] */ 282 r = &CPU->rq[i]; 283 spinlock_lock(&r->lock); 284 list_concat(&r->rq_head, &head); 285 r->n += n; 286 spinlock_unlock(&r->lock); 287 } 281 /* Remember and empty rq[i + 1] */ 282 283 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 284 list_concat(&head, &CPU->rq[i + 1].rq_head); 285 size_t n = CPU->rq[i + 1].n; 286 CPU->rq[i + 1].n = 0; 287 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 288 289 /* Append rq[i + 1] to rq[i] */ 290 291 irq_spinlock_lock(&CPU->rq[i].lock, false); 292 list_concat(&CPU->rq[i].rq_head, &head); 293 CPU->rq[i].n += n; 294 irq_spinlock_unlock(&CPU->rq[i].lock, false); 295 } 296 288 297 CPU->needs_relink = 0; 289 298 } 290 spinlock_unlock(&CPU->lock);291 299 300 irq_spinlock_unlock(&CPU->lock, false); 292 301 } 293 302 … … 302 311 { 303 312 volatile ipl_t ipl; 304 313 305 314 ASSERT(CPU != NULL); 306 315 307 316 ipl = interrupts_disable(); 308 317 309 318 if (atomic_get(&haltstate)) 310 319 halt(); 311 320 312 321 if (THREAD) { 313 spinlock_lock(&THREAD->lock);314 315 /* Update thread accounting */316 THREAD-> cycles += get_cycle() - THREAD->last_cycle;322 irq_spinlock_lock(&THREAD->lock, false); 323 324 /* Update thread kernel accounting */ 325 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 317 326 318 327 #ifndef CONFIG_FPU_LAZY … … 327 336 THREAD->last_cycle = get_cycle(); 328 337 329 spinlock_unlock(&THREAD->lock);338 irq_spinlock_unlock(&THREAD->lock, false); 330 339 interrupts_restore(THREAD->saved_context.ipl); 331 340 332 341 return; 333 342 } 334 343 335 344 /* 336 345 * Interrupt priority level of preempted thread is recorded 337 346 * here to facilitate scheduler() invocations from 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 347 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 348 * 339 349 */ 340 350 THREAD->saved_context.ipl = ipl; 341 351 } 342 352 343 353 /* 344 354 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 345 355 * and preemption counter. At this point THE could be coming either 346 356 * from THREAD's or CPU's stack. 357 * 347 358 */ 348 359 the_copy(THE, (the_t *) CPU->stack); 349 360 350 361 /* 351 362 * We may not keep the old stack. … … 359 370 * Therefore the scheduler() function continues in 360 371 * scheduler_separated_stack(). 372 * 361 373 */ 362 374 context_save(&CPU->saved_context); … … 364 376 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 365 377 context_restore(&CPU->saved_context); 366 /* not reached */ 378 379 /* Not reached */ 367 380 } 368 381 … … 373 386 * switch to a new thread. 374 387 * 375 * Assume THREAD->lock is held.376 388 */ 377 389 void scheduler_separated_stack(void) 378 390 { 379 int priority;380 391 DEADLOCK_PROBE_INIT(p_joinwq); 381 392 task_t *old_task = TASK; 393 as_t *old_as = AS; 394 395 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 382 396 ASSERT(CPU != NULL); 383 397 398 /* 399 * Hold the current task and the address space to prevent their 400 * possible destruction should thread_destroy() be called on this or any 401 * other processor while the scheduler is still using them. 402 */ 403 if (old_task) 404 task_hold(old_task); 405 406 if (old_as) 407 as_hold(old_as); 408 384 409 if (THREAD) { 385 /* must be run after the switch to scheduler stack */410 /* Must be run after the switch to scheduler stack */ 386 411 after_thread_ran(); 387 412 388 413 switch (THREAD->state) { 389 414 case Running: 390 spinlock_unlock(&THREAD->lock);415 irq_spinlock_unlock(&THREAD->lock, false); 391 416 thread_ready(THREAD); 392 417 break; 393 418 394 419 case Exiting: 395 420 repeat: 396 421 if (THREAD->detached) { 397 thread_destroy(THREAD );422 thread_destroy(THREAD, false); 398 423 } else { 399 424 /* … … 401 426 * somebody calls thread_detach() on it. 402 427 */ 403 if (! spinlock_trylock(&THREAD->join_wq.lock)) {428 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 404 429 /* 405 430 * Avoid deadlock. 406 431 */ 407 spinlock_unlock(&THREAD->lock);432 irq_spinlock_unlock(&THREAD->lock, false); 408 433 delay(HZ); 409 spinlock_lock(&THREAD->lock);434 irq_spinlock_lock(&THREAD->lock, false); 410 435 DEADLOCK_PROBE(p_joinwq, 411 436 DEADLOCK_THRESHOLD); … … 414 439 _waitq_wakeup_unsafe(&THREAD->join_wq, 415 440 WAKEUP_FIRST); 416 spinlock_unlock(&THREAD->join_wq.lock);441 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 417 442 418 443 THREAD->state = Lingering; 419 spinlock_unlock(&THREAD->lock);444 irq_spinlock_unlock(&THREAD->lock, false); 420 445 } 421 446 break; … … 426 451 */ 427 452 THREAD->priority = -1; 428 453 429 454 /* 430 455 * We need to release wq->lock which we locked in … … 432 457 * THREAD->sleep_queue. 433 458 */ 434 spinlock_unlock(&THREAD->sleep_queue->lock); 435 436 /* 437 * Check for possible requests for out-of-context 438 * invocation. 439 */ 440 if (THREAD->call_me) { 441 THREAD->call_me(THREAD->call_me_with); 442 THREAD->call_me = NULL; 443 THREAD->call_me_with = NULL; 444 } 445 446 spinlock_unlock(&THREAD->lock); 447 459 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 460 461 irq_spinlock_unlock(&THREAD->lock, false); 448 462 break; 449 463 450 464 default: 451 465 /* … … 456 470 break; 457 471 } 458 472 459 473 THREAD = NULL; 460 474 } 461 475 462 476 THREAD = find_best_thread(); 463 477 464 spinlock_lock(&THREAD->lock);465 priority = THREAD->priority;466 spinlock_unlock(&THREAD->lock);467 468 relink_rq(priority); 469 470 /* 471 * If both the old and the new task are the same, lots of work is472 * avoided.478 irq_spinlock_lock(&THREAD->lock, false); 479 int priority = THREAD->priority; 480 irq_spinlock_unlock(&THREAD->lock, false); 481 482 relink_rq(priority); 483 484 /* 485 * If both the old and the new task are the same, 486 * lots of work is avoided. 473 487 */ 474 488 if (TASK != THREAD->task) { 475 as_t *as1 = NULL; 476 as_t *as2; 477 478 if (TASK) { 479 spinlock_lock(&TASK->lock); 480 as1 = TASK->as; 481 spinlock_unlock(&TASK->lock); 482 } 483 484 spinlock_lock(&THREAD->task->lock); 485 as2 = THREAD->task->as; 486 spinlock_unlock(&THREAD->task->lock); 489 as_t *new_as = THREAD->task->as; 487 490 488 491 /* 489 * Note that it is possible for two tasks to share one address490 * space.492 * Note that it is possible for two tasks 493 * to share one address space. 491 494 */ 492 if ( as1 != as2) {495 if (old_as != new_as) { 493 496 /* 494 497 * Both tasks and address spaces are different. 495 498 * Replace the old one with the new one. 496 499 */ 497 as_switch(as1, as2); 498 } 500 as_switch(old_as, new_as); 501 } 502 499 503 TASK = THREAD->task; 500 504 before_task_runs(); 501 505 } 502 503 spinlock_lock(&THREAD->lock); 506 507 if (old_task) 508 task_release(old_task); 509 510 if (old_as) 511 as_release(old_as); 512 513 irq_spinlock_lock(&THREAD->lock, false); 504 514 THREAD->state = Running; 505 515 506 516 #ifdef SCHEDULER_VERBOSE 507 517 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 508 518 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 509 519 THREAD->ticks, atomic_get(&CPU->nrdy)); 510 #endif 511 520 #endif 521 512 522 /* 513 523 * Some architectures provide late kernel PA2KA(identity) … … 519 529 */ 520 530 before_thread_runs(); 521 531 522 532 /* 523 533 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to … … 527 537 528 538 context_restore(&THREAD->saved_context); 529 /* not reached */ 539 540 /* Not reached */ 530 541 } 531 542 … … 541 552 void kcpulb(void *arg) 542 553 { 543 thread_t *t; 544 int count, average, j, k = 0; 545 unsigned int i; 546 ipl_t ipl; 547 554 atomic_count_t average; 555 atomic_count_t rdy; 556 548 557 /* 549 558 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 556 565 */ 557 566 thread_sleep(1); 558 567 559 568 not_satisfied: 560 569 /* … … 562 571 * other CPU's. Note that situation can have changed between two 563 572 * passes. Each time get the most up to date counts. 573 * 564 574 */ 565 575 average = atomic_get(&nrdy) / config.cpu_active + 1; 566 count = average -atomic_get(&CPU->nrdy);567 568 if ( count <= 0)576 rdy = atomic_get(&CPU->nrdy); 577 578 if (average <= rdy) 569 579 goto satisfied; 570 580 581 atomic_count_t count = average - rdy; 582 571 583 /* 572 584 * Searching least priority queues on all CPU's first and most priority 573 585 * queues on all CPU's last. 574 */ 575 for (j = RQ_COUNT - 1; j >= 0; j--) { 576 for (i = 0; i < config.cpu_active; i++) { 577 link_t *l; 578 runq_t *r; 579 cpu_t *cpu; 580 581 cpu = &cpus[(i + k) % config.cpu_active]; 582 586 * 587 */ 588 size_t acpu; 589 size_t acpu_bias = 0; 590 int rq; 591 592 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 593 for (acpu = 0; acpu < config.cpu_active; acpu++) { 594 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 595 583 596 /* 584 597 * Not interested in ourselves. 585 598 * Doesn't require interrupt disabling for kcpulb has 586 599 * THREAD_FLAG_WIRED. 600 * 587 601 */ 588 602 if (CPU == cpu) 589 603 continue; 604 590 605 if (atomic_get(&cpu->nrdy) <= average) 591 606 continue; 592 593 ipl = interrupts_disable(); 594 r = &cpu->rq[j]; 595 spinlock_lock(&r->lock); 596 if (r->n == 0) { 597 spinlock_unlock(&r->lock); 598 interrupts_restore(ipl); 607 608 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 609 if (cpu->rq[rq].n == 0) { 610 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 599 611 continue; 600 612 } 601 602 t = NULL; 603 l = r->rq_head.prev; /* search rq from the back */ 604 while (l != &r->rq_head) { 605 t = list_get_instance(l, thread_t, rq_link); 613 614 thread_t *thread = NULL; 615 616 /* Search rq from the back */ 617 link_t *link = cpu->rq[rq].rq_head.prev; 618 619 while (link != &(cpu->rq[rq].rq_head)) { 620 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 621 606 622 /* 607 623 * We don't want to steal CPU-wired threads … … 611 627 * steal threads whose FPU context is still in 612 628 * CPU. 629 * 613 630 */ 614 spinlock_lock(&t->lock);615 if ((!(t->flags & (THREAD_FLAG_WIRED |616 THREAD_FLAG_STOLEN))) &&617 (!(t->fpu_context_engaged))) {631 irq_spinlock_lock(&thread->lock, false); 632 633 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 634 && (!(thread->fpu_context_engaged))) { 618 635 /* 619 * Remove t from r.636 * Remove thread from ready queue. 620 637 */ 621 spinlock_unlock(&t->lock);638 irq_spinlock_unlock(&thread->lock, false); 622 639 623 640 atomic_dec(&cpu->nrdy); 624 641 atomic_dec(&nrdy); 625 626 r->n--;627 list_remove(&t ->rq_link);628 642 643 cpu->rq[rq].n--; 644 list_remove(&thread->rq_link); 645 629 646 break; 630 647 } 631 spinlock_unlock(&t->lock); 632 l = l->prev; 633 t = NULL; 648 649 irq_spinlock_unlock(&thread->lock, false); 650 651 link = link->prev; 652 thread = NULL; 634 653 } 635 spinlock_unlock(&r->lock); 636 637 if (t) { 654 655 if (thread) { 638 656 /* 639 * Ready t on local CPU657 * Ready thread on local CPU 640 658 */ 641 spinlock_lock(&t->lock); 659 660 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 661 642 662 #ifdef KCPULB_VERBOSE 643 663 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 646 666 atomic_get(&nrdy) / config.cpu_active); 647 667 #endif 648 t->flags |= THREAD_FLAG_STOLEN; 649 t->state = Entering; 650 spinlock_unlock(&t->lock); 651 652 thread_ready(t); 653 654 interrupts_restore(ipl); 655 668 669 thread->flags |= THREAD_FLAG_STOLEN; 670 thread->state = Entering; 671 672 irq_spinlock_unlock(&thread->lock, true); 673 thread_ready(thread); 674 656 675 if (--count == 0) 657 676 goto satisfied; 658 677 659 678 /* 660 679 * We are not satisfied yet, focus on another 661 680 * CPU next time. 681 * 662 682 */ 663 k++;683 acpu_bias++; 664 684 665 685 continue; 666 } 667 interrupts_restore(ipl); 668 } 669 } 670 686 } else 687 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 688 689 } 690 } 691 671 692 if (atomic_get(&CPU->nrdy)) { 672 693 /* 673 694 * Be a little bit light-weight and let migrated threads run. 695 * 674 696 */ 675 697 scheduler(); … … 678 700 * We failed to migrate a single thread. 679 701 * Give up this turn. 702 * 680 703 */ 681 704 goto loop; 682 705 } 683 706 684 707 goto not_satisfied; 685 708 686 709 satisfied: 687 710 goto loop; 688 711 } 689 690 712 #endif /* CONFIG_SMP */ 691 713 692 693 /** Print information about threads & scheduler queues */ 714 /** Print information about threads & scheduler queues 715 * 716 */ 694 717 void sched_print_list(void) 695 718 { 696 ipl_t ipl; 697 unsigned int cpu, i; 698 runq_t *r; 699 thread_t *t; 700 link_t *cur; 701 702 /* We are going to mess with scheduler structures, 703 * let's not be interrupted */ 704 ipl = interrupts_disable(); 719 size_t cpu; 705 720 for (cpu = 0; cpu < config.cpu_count; cpu++) { 706 707 721 if (!cpus[cpu].active) 708 722 continue; 709 710 spinlock_lock(&cpus[cpu].lock); 711 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 723 724 irq_spinlock_lock(&cpus[cpu].lock, true); 725 726 printf("cpu%u: address=%p, nrdy=%" PRIua ", needs_relink=%zu\n", 712 727 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 713 728 cpus[cpu].needs_relink); 714 729 730 unsigned int i; 715 731 for (i = 0; i < RQ_COUNT; i++) { 716 r = &cpus[cpu].rq[i]; 717 spinlock_lock(&r->lock); 718 if (!r->n) { 719 spinlock_unlock(&r->lock); 732 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 733 if (cpus[cpu].rq[i].n == 0) { 734 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 720 735 continue; 721 736 } 737 722 738 printf("\trq[%u]: ", i); 723 for (cur = r->rq_head.next; cur != &r->rq_head; 724 cur = cur->next) { 725 t = list_get_instance(cur, thread_t, rq_link); 726 printf("%" PRIu64 "(%s) ", t->tid, 727 thread_states[t->state]); 739 link_t *cur; 740 for (cur = cpus[cpu].rq[i].rq_head.next; 741 cur != &(cpus[cpu].rq[i].rq_head); 742 cur = cur->next) { 743 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 744 printf("%" PRIu64 "(%s) ", thread->tid, 745 thread_states[thread->state]); 728 746 } 729 747 printf("\n"); 730 spinlock_unlock(&r->lock);731 }732 spinlock_unlock(&cpus[cpu].lock);733 }734 735 interrupts_restore(ipl);748 749 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 750 } 751 752 irq_spinlock_unlock(&cpus[cpu].lock, true); 753 } 736 754 } 737 755
Note:
See TracChangeset
for help on using the changeset viewer.