Changeset da1bafb in mainline for kernel/generic/src/proc/scheduler.c
- Timestamp:
- 2010-05-24T18:57:31Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
r666f492 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; 70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 spinlock_lock(&CPU->lock);126 125 irq_spinlock_lock(&CPU->lock, false); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock);128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 /* don't prevent migration */ 131 132 /* Don't prevent migration */ 132 133 CPU->fpu_owner->fpu_context_engaged = 0; 133 spinlock_unlock(&CPU->fpu_owner->lock);134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false); 134 135 CPU->fpu_owner = NULL; 135 136 } 136 137 spinlock_lock(&THREAD->lock);137 138 irq_spinlock_lock(&THREAD->lock, false); 138 139 if (THREAD->fpu_context_exists) { 139 140 fpu_context_restore(THREAD->saved_fpu_context); … … 142 143 if (!THREAD->saved_fpu_context) { 143 144 /* Might sleep */ 144 spinlock_unlock(&THREAD->lock);145 spinlock_unlock(&CPU->lock);145 irq_spinlock_unlock(&THREAD->lock, false); 146 irq_spinlock_unlock(&CPU->lock, false); 146 147 THREAD->saved_fpu_context = 147 148 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 148 150 /* We may have switched CPUs during slab_alloc */ 149 goto restart; 151 goto restart; 150 152 } 151 153 fpu_init(); 152 154 THREAD->fpu_context_exists = 1; 153 155 } 156 154 157 CPU->fpu_owner = THREAD; 155 158 THREAD->fpu_context_engaged = 1; 156 spinlock_unlock(&THREAD->lock);157 158 spinlock_unlock(&CPU->lock);159 } 160 #endif 159 irq_spinlock_unlock(&THREAD->lock, false); 160 161 irq_spinlock_unlock(&CPU->lock, false); 162 } 163 #endif /* CONFIG_FPU_LAZY */ 161 164 162 165 /** Initialize scheduler … … 180 183 static thread_t *find_best_thread(void) 181 184 { 182 thread_t *t;183 runq_t *r;184 int i;185 186 185 ASSERT(CPU != NULL); 187 186 188 187 loop: 189 188 … … 194 193 * This improves energy saving and hyperthreading. 195 194 */ 196 195 197 196 /* Mark CPU as it was idle this clock tick */ 198 spinlock_lock(&CPU->lock);199 200 spinlock_unlock(&CPU->lock);201 202 203 197 irq_spinlock_lock(&CPU->lock, false); 198 CPU->idle = true; 199 irq_spinlock_unlock(&CPU->lock, false); 200 201 interrupts_enable(); 202 /* 204 203 * An interrupt might occur right now and wake up a thread. 205 204 * In such case, the CPU will continue to go to sleep 206 205 * even though there is a runnable thread. 207 206 */ 208 cpu_sleep(); 209 interrupts_disable(); 210 goto loop; 211 } 212 207 cpu_sleep(); 208 interrupts_disable(); 209 goto loop; 210 } 211 212 unsigned int i; 213 213 for (i = 0; i < RQ_COUNT; i++) { 214 r = &CPU->rq[i]; 215 spinlock_lock(&r->lock); 216 if (r->n == 0) { 214 irq_spinlock_lock(&(CPU->rq[i].lock), false); 215 if (CPU->rq[i].n == 0) { 217 216 /* 218 217 * If this queue is empty, try a lower-priority queue. 219 218 */ 220 spinlock_unlock(&r->lock);219 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 221 220 continue; 222 221 } 223 222 224 223 atomic_dec(&CPU->nrdy); 225 224 atomic_dec(&nrdy); 226 r->n--;227 225 CPU->rq[i].n--; 226 228 227 /* 229 228 * Take the first thread from the queue. 230 229 */ 231 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 232 list_remove(&t->rq_link); 233 234 spinlock_unlock(&r->lock); 235 236 spinlock_lock(&t->lock); 237 t->cpu = CPU; 238 239 t->ticks = us2ticks((i + 1) * 10000); 240 t->priority = i; /* correct rq index */ 241 230 thread_t *thread = 231 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 232 list_remove(&thread->rq_link); 233 234 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 235 236 thread->cpu = CPU; 237 thread->ticks = us2ticks((i + 1) * 10000); 238 thread->priority = i; /* Correct rq index */ 239 242 240 /* 243 241 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 244 242 * when load balancing needs emerge. 245 243 */ 246 t->flags &= ~THREAD_FLAG_STOLEN; 247 spinlock_unlock(&t->lock); 248 249 return t; 250 } 244 thread->flags &= ~THREAD_FLAG_STOLEN; 245 irq_spinlock_unlock(&thread->lock, false); 246 247 return thread; 248 } 249 251 250 goto loop; 252 253 251 } 254 252 … … 267 265 { 268 266 link_t head; 269 runq_t *r; 270 int i, n; 271 267 272 268 list_initialize(&head); 273 spinlock_lock(&CPU->lock); 269 irq_spinlock_lock(&CPU->lock, false); 270 274 271 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 272 int i; 275 273 for (i = start; i < RQ_COUNT - 1; i++) { 276 /* remember and empty rq[i + 1] */277 r = &CPU->rq[i + 1];278 spinlock_lock(&r->lock);279 list_concat(&head, & r->rq_head);280 n = r->n;281 r->n = 0;282 spinlock_unlock(&r->lock);283 284 /* append rq[i + 1] to rq[i] */285 r = &CPU->rq[i];286 spinlock_lock(&r->lock);287 list_concat(& r->rq_head, &head);288 r->n += n;289 spinlock_unlock(&r->lock);274 /* Remember and empty rq[i + 1] */ 275 276 irq_spinlock_lock(&CPU->rq[i + 1].lock, false); 277 list_concat(&head, &CPU->rq[i + 1].rq_head); 278 size_t n = CPU->rq[i + 1].n; 279 CPU->rq[i + 1].n = 0; 280 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false); 281 282 /* Append rq[i + 1] to rq[i] */ 283 284 irq_spinlock_lock(&CPU->rq[i].lock, false); 285 list_concat(&CPU->rq[i].rq_head, &head); 286 CPU->rq[i].n += n; 287 irq_spinlock_unlock(&CPU->rq[i].lock, false); 290 288 } 289 291 290 CPU->needs_relink = 0; 292 291 } 293 spinlock_unlock(&CPU->lock);294 292 293 irq_spinlock_unlock(&CPU->lock, false); 295 294 } 296 295 … … 305 304 { 306 305 volatile ipl_t ipl; 307 306 308 307 ASSERT(CPU != NULL); 309 308 310 309 ipl = interrupts_disable(); 311 310 312 311 if (atomic_get(&haltstate)) 313 312 halt(); 314 313 315 314 if (THREAD) { 316 spinlock_lock(&THREAD->lock);315 irq_spinlock_lock(&THREAD->lock, false); 317 316 318 317 /* Update thread kernel accounting */ … … 330 329 THREAD->last_cycle = get_cycle(); 331 330 332 spinlock_unlock(&THREAD->lock);331 irq_spinlock_unlock(&THREAD->lock, false); 333 332 interrupts_restore(THREAD->saved_context.ipl); 334 333 335 334 return; 336 335 } 337 336 338 337 /* 339 338 * Interrupt priority level of preempted thread is recorded 340 339 * here to facilitate scheduler() invocations from 341 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 340 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 341 * 342 342 */ 343 343 THREAD->saved_context.ipl = ipl; 344 344 } 345 345 346 346 /* 347 347 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 348 348 * and preemption counter. At this point THE could be coming either 349 349 * from THREAD's or CPU's stack. 350 * 350 351 */ 351 352 the_copy(THE, (the_t *) CPU->stack); 352 353 353 354 /* 354 355 * We may not keep the old stack. … … 362 363 * Therefore the scheduler() function continues in 363 364 * scheduler_separated_stack(). 365 * 364 366 */ 365 367 context_save(&CPU->saved_context); … … 367 369 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 368 370 context_restore(&CPU->saved_context); 369 /* not reached */ 371 372 /* Not reached */ 370 373 } 371 374 … … 377 380 * 378 381 * Assume THREAD->lock is held. 382 * 379 383 */ 380 384 void scheduler_separated_stack(void) 381 385 { 382 int priority;383 386 DEADLOCK_PROBE_INIT(p_joinwq); 384 387 task_t *old_task = TASK; 385 388 as_t *old_as = AS; 386 389 387 390 ASSERT(CPU != NULL); 388 391 … … 391 394 * possible destruction should thread_destroy() be called on this or any 392 395 * other processor while the scheduler is still using them. 396 * 393 397 */ 394 398 if (old_task) 395 399 task_hold(old_task); 400 396 401 if (old_as) 397 402 as_hold(old_as); 398 403 399 404 if (THREAD) { 400 /* must be run after the switch to scheduler stack */405 /* Must be run after the switch to scheduler stack */ 401 406 after_thread_ran(); 402 407 403 408 switch (THREAD->state) { 404 409 case Running: 405 spinlock_unlock(&THREAD->lock);410 irq_spinlock_unlock(&THREAD->lock, false); 406 411 thread_ready(THREAD); 407 412 break; 408 413 409 414 case Exiting: 410 415 repeat: 411 416 if (THREAD->detached) { 412 thread_destroy(THREAD );417 thread_destroy(THREAD, false); 413 418 } else { 414 419 /* 415 420 * The thread structure is kept allocated until 416 421 * somebody calls thread_detach() on it. 422 * 417 423 */ 418 if (! spinlock_trylock(&THREAD->join_wq.lock)) {424 if (!irq_spinlock_trylock(&THREAD->join_wq.lock)) { 419 425 /* 420 426 * Avoid deadlock. 427 * 421 428 */ 422 spinlock_unlock(&THREAD->lock);429 irq_spinlock_unlock(&THREAD->lock, false); 423 430 delay(HZ); 424 spinlock_lock(&THREAD->lock);431 irq_spinlock_lock(&THREAD->lock, false); 425 432 DEADLOCK_PROBE(p_joinwq, 426 433 DEADLOCK_THRESHOLD); … … 429 436 _waitq_wakeup_unsafe(&THREAD->join_wq, 430 437 WAKEUP_FIRST); 431 spinlock_unlock(&THREAD->join_wq.lock);438 irq_spinlock_unlock(&THREAD->join_wq.lock, false); 432 439 433 440 THREAD->state = Lingering; 434 spinlock_unlock(&THREAD->lock);441 irq_spinlock_unlock(&THREAD->lock, false); 435 442 } 436 443 break; … … 439 446 /* 440 447 * Prefer the thread after it's woken up. 448 * 441 449 */ 442 450 THREAD->priority = -1; 443 451 444 452 /* 445 453 * We need to release wq->lock which we locked in 446 454 * waitq_sleep(). Address of wq->lock is kept in 447 455 * THREAD->sleep_queue. 456 * 448 457 */ 449 spinlock_unlock(&THREAD->sleep_queue->lock);450 458 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 459 451 460 /* 452 461 * Check for possible requests for out-of-context 453 462 * invocation. 463 * 454 464 */ 455 465 if (THREAD->call_me) { … … 458 468 THREAD->call_me_with = NULL; 459 469 } 460 461 spinlock_unlock(&THREAD->lock);462 470 471 irq_spinlock_unlock(&THREAD->lock, false); 472 463 473 break; 464 474 465 475 default: 466 476 /* 467 477 * Entering state is unexpected. 478 * 468 479 */ 469 480 panic("tid%" PRIu64 ": unexpected state %s.", … … 471 482 break; 472 483 } 473 484 474 485 THREAD = NULL; 475 486 } 476 487 477 488 THREAD = find_best_thread(); 478 489 479 spinlock_lock(&THREAD->lock);480 priority = THREAD->priority;481 spinlock_unlock(&THREAD->lock);482 483 relink_rq(priority); 484 490 irq_spinlock_lock(&THREAD->lock, false); 491 int priority = THREAD->priority; 492 irq_spinlock_unlock(&THREAD->lock, false); 493 494 relink_rq(priority); 495 485 496 /* 486 497 * If both the old and the new task are the same, lots of work is 487 498 * avoided. 499 * 488 500 */ 489 501 if (TASK != THREAD->task) { … … 493 505 * Note that it is possible for two tasks to share one address 494 506 * space. 507 ( 495 508 */ 496 509 if (old_as != new_as) { … … 498 511 * Both tasks and address spaces are different. 499 512 * Replace the old one with the new one. 513 * 500 514 */ 501 515 as_switch(old_as, new_as); 502 516 } 503 517 504 518 TASK = THREAD->task; 505 519 before_task_runs(); 506 520 } 507 521 508 522 if (old_task) 509 523 task_release(old_task); 524 510 525 if (old_as) 511 526 as_release(old_as); 512 527 513 spinlock_lock(&THREAD->lock);528 irq_spinlock_lock(&THREAD->lock, false); 514 529 THREAD->state = Running; 515 530 516 531 #ifdef SCHEDULER_VERBOSE 517 532 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 533 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 534 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 535 #endif 536 522 537 /* 523 538 * Some architectures provide late kernel PA2KA(identity) … … 527 542 * necessary, is to be mapped in before_thread_runs(). This 528 543 * function must be executed before the switch to the new stack. 544 * 529 545 */ 530 546 before_thread_runs(); 531 547 532 548 /* 533 549 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 534 550 * thread's stack. 551 * 535 552 */ 536 553 the_copy(THE, (the_t *) THREAD->kstack); 537 554 538 555 context_restore(&THREAD->saved_context); 539 /* not reached */ 556 557 /* Not reached */ 540 558 } 541 559 … … 551 569 void kcpulb(void *arg) 552 570 { 553 thread_t *t;554 int count;555 571 atomic_count_t average; 556 unsigned int i; 557 int j; 558 int k = 0; 559 ipl_t ipl; 560 572 atomic_count_t rdy; 573 561 574 /* 562 575 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 569 582 */ 570 583 thread_sleep(1); 571 584 572 585 not_satisfied: 573 586 /* … … 575 588 * other CPU's. Note that situation can have changed between two 576 589 * passes. Each time get the most up to date counts. 590 * 577 591 */ 578 592 average = atomic_get(&nrdy) / config.cpu_active + 1; 579 count = average -atomic_get(&CPU->nrdy);580 581 if ( count <= 0)593 rdy = atomic_get(&CPU->nrdy); 594 595 if (average <= rdy) 582 596 goto satisfied; 583 597 598 atomic_count_t count = average - rdy; 599 584 600 /* 585 601 * Searching least priority queues on all CPU's first and most priority 586 602 * queues on all CPU's last. 587 */ 588 for (j = RQ_COUNT - 1; j >= 0; j--) { 589 for (i = 0; i < config.cpu_active; i++) { 590 link_t *l; 591 runq_t *r; 592 cpu_t *cpu; 593 594 cpu = &cpus[(i + k) % config.cpu_active]; 595 603 * 604 */ 605 size_t acpu; 606 size_t acpu_bias = 0; 607 int rq; 608 609 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 610 for (acpu = 0; acpu < config.cpu_active; acpu++) { 611 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 612 596 613 /* 597 614 * Not interested in ourselves. 598 615 * Doesn't require interrupt disabling for kcpulb has 599 616 * THREAD_FLAG_WIRED. 617 * 600 618 */ 601 619 if (CPU == cpu) 602 620 continue; 621 603 622 if (atomic_get(&cpu->nrdy) <= average) 604 623 continue; 605 606 ipl = interrupts_disable(); 607 r = &cpu->rq[j]; 608 spinlock_lock(&r->lock); 609 if (r->n == 0) { 610 spinlock_unlock(&r->lock); 611 interrupts_restore(ipl); 624 625 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 626 if (cpu->rq[rq].n == 0) { 627 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 612 628 continue; 613 629 } 614 615 t = NULL; 616 l = r->rq_head.prev; /* search rq from the back */ 617 while (l != &r->rq_head) { 618 t = list_get_instance(l, thread_t, rq_link); 630 631 thread_t *thread = NULL; 632 633 /* Search rq from the back */ 634 link_t *link = cpu->rq[rq].rq_head.prev; 635 636 while (link != &(cpu->rq[rq].rq_head)) { 637 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 638 619 639 /* 620 640 * We don't want to steal CPU-wired threads … … 624 644 * steal threads whose FPU context is still in 625 645 * CPU. 646 * 626 647 */ 627 spinlock_lock(&t->lock);628 if ((!(t->flags & (THREAD_FLAG_WIRED |629 THREAD_FLAG_STOLEN))) &&630 (!(t->fpu_context_engaged))) {648 irq_spinlock_lock(&thread->lock, false); 649 650 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) 651 && (!(thread->fpu_context_engaged))) { 631 652 /* 632 * Remove t from r.653 * Remove thread from ready queue. 633 654 */ 634 spinlock_unlock(&t->lock);655 irq_spinlock_unlock(&thread->lock, false); 635 656 636 657 atomic_dec(&cpu->nrdy); 637 658 atomic_dec(&nrdy); 638 639 r->n--;640 list_remove(&t ->rq_link);641 659 660 cpu->rq[rq].n--; 661 list_remove(&thread->rq_link); 662 642 663 break; 643 664 } 644 spinlock_unlock(&t->lock); 645 l = l->prev; 646 t = NULL; 665 666 irq_spinlock_unlock(&thread->lock, false); 667 668 link = link->prev; 669 thread = NULL; 647 670 } 648 spinlock_unlock(&r->lock); 649 650 if (t) { 671 672 if (thread) { 651 673 /* 652 * Ready t on local CPU 674 * Ready thread on local CPU 675 * 653 676 */ 654 spinlock_lock(&t->lock); 677 678 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 679 655 680 #ifdef KCPULB_VERBOSE 656 681 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 659 684 atomic_get(&nrdy) / config.cpu_active); 660 685 #endif 661 t->flags |= THREAD_FLAG_STOLEN; 662 t->state = Entering; 663 spinlock_unlock(&t->lock); 664 665 thread_ready(t); 666 667 interrupts_restore(ipl); 668 686 687 thread->flags |= THREAD_FLAG_STOLEN; 688 thread->state = Entering; 689 690 irq_spinlock_unlock(&thread->lock, true); 691 thread_ready(thread); 692 669 693 if (--count == 0) 670 694 goto satisfied; 671 695 672 696 /* 673 697 * We are not satisfied yet, focus on another 674 698 * CPU next time. 699 * 675 700 */ 676 k++;701 acpu_bias++; 677 702 678 703 continue; 679 } 680 interrupts_restore(ipl); 704 } else 705 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 706 681 707 } 682 708 } 683 709 684 710 if (atomic_get(&CPU->nrdy)) { 685 711 /* 686 712 * Be a little bit light-weight and let migrated threads run. 713 * 687 714 */ 688 715 scheduler(); … … 691 718 * We failed to migrate a single thread. 692 719 * Give up this turn. 720 * 693 721 */ 694 722 goto loop; 695 723 } 696 724 697 725 goto not_satisfied; 698 726 699 727 satisfied: 700 728 goto loop; 701 729 } 702 703 730 #endif /* CONFIG_SMP */ 704 731 705 706 /** Print information about threads & scheduler queues */ 732 /** Print information about threads & scheduler queues 733 * 734 */ 707 735 void sched_print_list(void) 708 736 { 709 ipl_t ipl; 710 unsigned int cpu, i; 711 runq_t *r; 712 thread_t *t; 713 link_t *cur; 714 715 /* We are going to mess with scheduler structures, 716 * let's not be interrupted */ 717 ipl = interrupts_disable(); 737 size_t cpu; 718 738 for (cpu = 0; cpu < config.cpu_count; cpu++) { 719 720 739 if (!cpus[cpu].active) 721 740 continue; 722 723 spinlock_lock(&cpus[cpu].lock); 741 742 irq_spinlock_lock(&cpus[cpu].lock, true); 743 724 744 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 725 745 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 726 746 cpus[cpu].needs_relink); 727 747 748 unsigned int i; 728 749 for (i = 0; i < RQ_COUNT; i++) { 729 r = &cpus[cpu].rq[i]; 730 spinlock_lock(&r->lock); 731 if (!r->n) { 732 spinlock_unlock(&r->lock); 750 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 751 if (cpus[cpu].rq[i].n == 0) { 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 733 753 continue; 734 754 } 755 735 756 printf("\trq[%u]: ", i); 736 for (cur = r->rq_head.next; cur != &r->rq_head; 737 cur = cur->next) { 738 t = list_get_instance(cur, thread_t, rq_link); 739 printf("%" PRIu64 "(%s) ", t->tid, 740 thread_states[t->state]); 757 link_t *cur; 758 for (cur = cpus[cpu].rq[i].rq_head.next; 759 cur != &(cpus[cpu].rq[i].rq_head); 760 cur = cur->next) { 761 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 762 printf("%" PRIu64 "(%s) ", thread->tid, 763 thread_states[thread->state]); 741 764 } 742 765 printf("\n"); 743 spinlock_unlock(&r->lock); 766 767 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 744 768 } 745 spinlock_unlock(&cpus[cpu].lock); 746 } 747 748 interrupts_restore(ipl); 769 770 irq_spinlock_unlock(&cpus[cpu].lock, true); 771 } 749 772 } 750 773
Note:
See TracChangeset
for help on using the changeset viewer.