Changes in kernel/generic/src/proc/scheduler.c [ee42e43:481d4751] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
ree42e43 r481d4751 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Scheduler and load balancing. 36 36 * 37 37 * This file contains the scheduler and kcpulb kernel thread which … … 68 68 static void scheduler_separated_stack(void); 69 69 70 atomic_t nrdy; 70 atomic_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 72 /** Carry out actions before new task runs. */ … … 89 89 before_thread_runs_arch(); 90 90 #ifdef CONFIG_FPU_LAZY 91 if(THREAD == CPU->fpu_owner) 91 if(THREAD == CPU->fpu_owner) 92 92 fpu_enable(); 93 93 else 94 fpu_disable(); 94 fpu_disable(); 95 95 #else 96 96 fpu_enable(); … … 123 123 restart: 124 124 fpu_enable(); 125 irq_spinlock_lock(&CPU->lock, false);126 125 spinlock_lock(&CPU->lock); 126 127 127 /* Save old context */ 128 if (CPU->fpu_owner != NULL) { 129 irq_spinlock_lock(&CPU->fpu_owner->lock, false);128 if (CPU->fpu_owner != NULL) { 129 spinlock_lock(&CPU->fpu_owner->lock); 130 130 fpu_context_save(CPU->fpu_owner->saved_fpu_context); 131 132 /* Don't prevent migration */ 131 /* don't prevent migration */ 133 132 CPU->fpu_owner->fpu_context_engaged = 0; 134 irq_spinlock_unlock(&CPU->fpu_owner->lock, false);133 spinlock_unlock(&CPU->fpu_owner->lock); 135 134 CPU->fpu_owner = NULL; 136 135 } 137 138 irq_spinlock_lock(&THREAD->lock, false);136 137 spinlock_lock(&THREAD->lock); 139 138 if (THREAD->fpu_context_exists) { 140 139 fpu_context_restore(THREAD->saved_fpu_context); … … 143 142 if (!THREAD->saved_fpu_context) { 144 143 /* Might sleep */ 145 irq_spinlock_unlock(&THREAD->lock, false);146 irq_spinlock_unlock(&CPU->lock, false);144 spinlock_unlock(&THREAD->lock); 145 spinlock_unlock(&CPU->lock); 147 146 THREAD->saved_fpu_context = 148 147 (fpu_context_t *) slab_alloc(fpu_context_slab, 0); 149 150 148 /* We may have switched CPUs during slab_alloc */ 151 goto restart; 149 goto restart; 152 150 } 153 151 fpu_init(); 154 152 THREAD->fpu_context_exists = 1; 155 153 } 156 157 154 CPU->fpu_owner = THREAD; 158 155 THREAD->fpu_context_engaged = 1; 159 irq_spinlock_unlock(&THREAD->lock, false);160 161 irq_spinlock_unlock(&CPU->lock, false);162 } 163 #endif /* CONFIG_FPU_LAZY */156 spinlock_unlock(&THREAD->lock); 157 158 spinlock_unlock(&CPU->lock); 159 } 160 #endif 164 161 165 162 /** Initialize scheduler … … 183 180 static thread_t *find_best_thread(void) 184 181 { 182 thread_t *t; 183 runq_t *r; 184 int i; 185 185 186 ASSERT(CPU != NULL); 186 187 187 188 loop: 188 189 … … 193 194 * This improves energy saving and hyperthreading. 194 195 */ 195 irq_spinlock_lock(&CPU->lock, false); 196 CPU->idle = true; 197 irq_spinlock_unlock(&CPU->lock, false); 198 interrupts_enable(); 199 200 /* 196 197 /* Mark CPU as it was idle this clock tick */ 198 spinlock_lock(&CPU->lock); 199 CPU->idle = true; 200 spinlock_unlock(&CPU->lock); 201 202 interrupts_enable(); 203 /* 201 204 * An interrupt might occur right now and wake up a thread. 202 205 * In such case, the CPU will continue to go to sleep 203 206 * even though there is a runnable thread. 204 207 */ 205 cpu_sleep(); 206 interrupts_disable(); 207 goto loop; 208 } 209 210 unsigned int i; 208 cpu_sleep(); 209 interrupts_disable(); 210 goto loop; 211 } 212 211 213 for (i = 0; i < RQ_COUNT; i++) { 212 irq_spinlock_lock(&(CPU->rq[i].lock), false); 213 if (CPU->rq[i].n == 0) { 214 r = &CPU->rq[i]; 215 spinlock_lock(&r->lock); 216 if (r->n == 0) { 214 217 /* 215 218 * If this queue is empty, try a lower-priority queue. 216 219 */ 217 irq_spinlock_unlock(&(CPU->rq[i].lock), false);220 spinlock_unlock(&r->lock); 218 221 continue; 219 222 } 220 223 221 224 atomic_dec(&CPU->nrdy); 222 225 atomic_dec(&nrdy); 223 CPU->rq[i].n--;224 226 r->n--; 227 225 228 /* 226 229 * Take the first thread from the queue. 227 230 */ 228 thread_t *thread = 229 list_get_instance(CPU->rq[i].rq_head.next, thread_t, rq_link); 230 list_remove(&thread->rq_link); 231 232 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 233 234 thread->cpu = CPU; 235 thread->ticks = us2ticks((i + 1) * 10000); 236 thread->priority = i; /* Correct rq index */ 237 231 t = list_get_instance(r->rq_head.next, thread_t, rq_link); 232 list_remove(&t->rq_link); 233 234 spinlock_unlock(&r->lock); 235 236 spinlock_lock(&t->lock); 237 t->cpu = CPU; 238 239 t->ticks = us2ticks((i + 1) * 10000); 240 t->priority = i; /* correct rq index */ 241 238 242 /* 239 243 * Clear the THREAD_FLAG_STOLEN flag so that t can be migrated 240 244 * when load balancing needs emerge. 241 245 */ 242 thread->flags &= ~THREAD_FLAG_STOLEN; 243 irq_spinlock_unlock(&thread->lock, false); 244 245 return thread; 246 } 247 246 t->flags &= ~THREAD_FLAG_STOLEN; 247 spinlock_unlock(&t->lock); 248 249 return t; 250 } 248 251 goto loop; 252 249 253 } 250 254 … … 263 267 { 264 268 link_t head; 265 269 runq_t *r; 270 int i, n; 271 266 272 list_initialize(&head); 267 irq_spinlock_lock(&CPU->lock, false); 268 273 spinlock_lock(&CPU->lock); 269 274 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 270 int i;271 275 for (i = start; i < RQ_COUNT - 1; i++) { 272 /* Remember and empty rq[i + 1] */273 274 irq_spinlock_lock(&CPU->rq[i + 1].lock, false);275 list_concat(&head, & CPU->rq[i + 1].rq_head);276 size_t n = CPU->rq[i + 1].n;277 CPU->rq[i + 1].n = 0;278 irq_spinlock_unlock(&CPU->rq[i + 1].lock, false);279 280 /* Append rq[i + 1] to rq[i] */281 282 irq_spinlock_lock(&CPU->rq[i].lock, false);283 list_concat(& CPU->rq[i].rq_head, &head);284 CPU->rq[i].n += n;285 irq_spinlock_unlock(&CPU->rq[i].lock, false);276 /* remember and empty rq[i + 1] */ 277 r = &CPU->rq[i + 1]; 278 spinlock_lock(&r->lock); 279 list_concat(&head, &r->rq_head); 280 n = r->n; 281 r->n = 0; 282 spinlock_unlock(&r->lock); 283 284 /* append rq[i + 1] to rq[i] */ 285 r = &CPU->rq[i]; 286 spinlock_lock(&r->lock); 287 list_concat(&r->rq_head, &head); 288 r->n += n; 289 spinlock_unlock(&r->lock); 286 290 } 287 288 291 CPU->needs_relink = 0; 289 292 } 290 291 irq_spinlock_unlock(&CPU->lock, false); 293 spinlock_unlock(&CPU->lock); 294 292 295 } 293 296 … … 302 305 { 303 306 volatile ipl_t ipl; 304 307 305 308 ASSERT(CPU != NULL); 306 309 307 310 ipl = interrupts_disable(); 308 311 309 312 if (atomic_get(&haltstate)) 310 313 halt(); 311 314 312 315 if (THREAD) { 313 irq_spinlock_lock(&THREAD->lock, false);316 spinlock_lock(&THREAD->lock); 314 317 315 318 /* Update thread kernel accounting */ … … 327 330 THREAD->last_cycle = get_cycle(); 328 331 329 irq_spinlock_unlock(&THREAD->lock, false);332 spinlock_unlock(&THREAD->lock); 330 333 interrupts_restore(THREAD->saved_context.ipl); 331 334 332 335 return; 333 336 } 334 337 335 338 /* 336 339 * Interrupt priority level of preempted thread is recorded 337 340 * here to facilitate scheduler() invocations from 338 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 339 * 341 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 340 342 */ 341 343 THREAD->saved_context.ipl = ipl; 342 344 } 343 345 344 346 /* 345 347 * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM 346 348 * and preemption counter. At this point THE could be coming either 347 349 * from THREAD's or CPU's stack. 348 *349 350 */ 350 351 the_copy(THE, (the_t *) CPU->stack); 351 352 352 353 /* 353 354 * We may not keep the old stack. … … 361 362 * Therefore the scheduler() function continues in 362 363 * scheduler_separated_stack(). 363 *364 364 */ 365 365 context_save(&CPU->saved_context); … … 367 367 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 368 368 context_restore(&CPU->saved_context); 369 370 /* Not reached */ 369 /* not reached */ 371 370 } 372 371 … … 377 376 * switch to a new thread. 378 377 * 378 * Assume THREAD->lock is held. 379 379 */ 380 380 void scheduler_separated_stack(void) 381 381 { 382 int priority; 382 383 DEADLOCK_PROBE_INIT(p_joinwq); 383 384 task_t *old_task = TASK; 384 385 as_t *old_as = AS; 385 386 ASSERT((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 386 387 387 ASSERT(CPU != NULL); 388 388 … … 391 391 * possible destruction should thread_destroy() be called on this or any 392 392 * other processor while the scheduler is still using them. 393 *394 393 */ 395 394 if (old_task) 396 395 task_hold(old_task); 397 398 396 if (old_as) 399 397 as_hold(old_as); 400 398 401 399 if (THREAD) { 402 /* Must be run after the switch to scheduler stack */400 /* must be run after the switch to scheduler stack */ 403 401 after_thread_ran(); 404 402 405 403 switch (THREAD->state) { 406 404 case Running: 407 irq_spinlock_unlock(&THREAD->lock, false);405 spinlock_unlock(&THREAD->lock); 408 406 thread_ready(THREAD); 409 407 break; 410 408 411 409 case Exiting: 412 410 repeat: 413 411 if (THREAD->detached) { 414 thread_destroy(THREAD , false);412 thread_destroy(THREAD); 415 413 } else { 416 414 /* 417 415 * The thread structure is kept allocated until 418 416 * somebody calls thread_detach() on it. 419 *420 417 */ 421 if (! irq_spinlock_trylock(&THREAD->join_wq.lock)) {418 if (!spinlock_trylock(&THREAD->join_wq.lock)) { 422 419 /* 423 420 * Avoid deadlock. 424 *425 421 */ 426 irq_spinlock_unlock(&THREAD->lock, false);422 spinlock_unlock(&THREAD->lock); 427 423 delay(HZ); 428 irq_spinlock_lock(&THREAD->lock, false);424 spinlock_lock(&THREAD->lock); 429 425 DEADLOCK_PROBE(p_joinwq, 430 426 DEADLOCK_THRESHOLD); … … 433 429 _waitq_wakeup_unsafe(&THREAD->join_wq, 434 430 WAKEUP_FIRST); 435 irq_spinlock_unlock(&THREAD->join_wq.lock, false);431 spinlock_unlock(&THREAD->join_wq.lock); 436 432 437 433 THREAD->state = Lingering; 438 irq_spinlock_unlock(&THREAD->lock, false);434 spinlock_unlock(&THREAD->lock); 439 435 } 440 436 break; … … 443 439 /* 444 440 * Prefer the thread after it's woken up. 445 *446 441 */ 447 442 THREAD->priority = -1; 448 443 449 444 /* 450 445 * We need to release wq->lock which we locked in 451 446 * waitq_sleep(). Address of wq->lock is kept in 452 447 * THREAD->sleep_queue. 453 *454 448 */ 455 irq_spinlock_unlock(&THREAD->sleep_queue->lock, false); 456 457 irq_spinlock_unlock(&THREAD->lock, false); 449 spinlock_unlock(&THREAD->sleep_queue->lock); 450 451 /* 452 * Check for possible requests for out-of-context 453 * invocation. 454 */ 455 if (THREAD->call_me) { 456 THREAD->call_me(THREAD->call_me_with); 457 THREAD->call_me = NULL; 458 THREAD->call_me_with = NULL; 459 } 460 461 spinlock_unlock(&THREAD->lock); 462 458 463 break; 459 464 460 465 default: 461 466 /* 462 467 * Entering state is unexpected. 463 *464 468 */ 465 469 panic("tid%" PRIu64 ": unexpected state %s.", … … 467 471 break; 468 472 } 469 473 470 474 THREAD = NULL; 471 475 } 472 476 473 477 THREAD = find_best_thread(); 474 478 475 irq_spinlock_lock(&THREAD->lock, false);476 intpriority = THREAD->priority;477 irq_spinlock_unlock(&THREAD->lock, false);478 479 relink_rq(priority); 480 479 spinlock_lock(&THREAD->lock); 480 priority = THREAD->priority; 481 spinlock_unlock(&THREAD->lock); 482 483 relink_rq(priority); 484 481 485 /* 482 486 * If both the old and the new task are the same, lots of work is 483 487 * avoided. 484 *485 488 */ 486 489 if (TASK != THREAD->task) { … … 490 493 * Note that it is possible for two tasks to share one address 491 494 * space. 492 (493 495 */ 494 496 if (old_as != new_as) { … … 496 498 * Both tasks and address spaces are different. 497 499 * Replace the old one with the new one. 498 *499 500 */ 500 501 as_switch(old_as, new_as); 501 502 } 502 503 503 504 TASK = THREAD->task; 504 505 before_task_runs(); 505 506 } 506 507 507 508 if (old_task) 508 509 task_release(old_task); 509 510 510 if (old_as) 511 511 as_release(old_as); 512 512 513 irq_spinlock_lock(&THREAD->lock, false);513 spinlock_lock(&THREAD->lock); 514 514 THREAD->state = Running; 515 515 516 516 #ifdef SCHEDULER_VERBOSE 517 517 printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 518 518 ", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, 519 519 THREAD->ticks, atomic_get(&CPU->nrdy)); 520 #endif 521 520 #endif 521 522 522 /* 523 523 * Some architectures provide late kernel PA2KA(identity) … … 527 527 * necessary, is to be mapped in before_thread_runs(). This 528 528 * function must be executed before the switch to the new stack. 529 *530 529 */ 531 530 before_thread_runs(); 532 531 533 532 /* 534 533 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 535 534 * thread's stack. 536 *537 535 */ 538 536 the_copy(THE, (the_t *) THREAD->kstack); 539 537 540 538 context_restore(&THREAD->saved_context); 541 542 /* Not reached */ 539 /* not reached */ 543 540 } 544 541 … … 554 551 void kcpulb(void *arg) 555 552 { 553 thread_t *t; 554 int count; 556 555 atomic_count_t average; 557 atomic_count_t rdy; 558 556 unsigned int i; 557 int j; 558 int k = 0; 559 ipl_t ipl; 560 559 561 /* 560 562 * Detach kcpulb as nobody will call thread_join_timeout() on it. … … 567 569 */ 568 570 thread_sleep(1); 569 571 570 572 not_satisfied: 571 573 /* … … 573 575 * other CPU's. Note that situation can have changed between two 574 576 * passes. Each time get the most up to date counts. 575 *576 577 */ 577 578 average = atomic_get(&nrdy) / config.cpu_active + 1; 578 rdy =atomic_get(&CPU->nrdy);579 580 if ( average <= rdy)579 count = average - atomic_get(&CPU->nrdy); 580 581 if (count <= 0) 581 582 goto satisfied; 582 583 atomic_count_t count = average - rdy; 584 583 585 584 /* 586 585 * Searching least priority queues on all CPU's first and most priority 587 586 * queues on all CPU's last. 588 * 589 */ 590 size_t acpu; 591 size_t acpu_bias = 0; 592 int rq; 593 594 for (rq = RQ_COUNT - 1; rq >= 0; rq--) { 595 for (acpu = 0; acpu < config.cpu_active; acpu++) { 596 cpu_t *cpu = &cpus[(acpu + acpu_bias) % config.cpu_active]; 597 587 */ 588 for (j = RQ_COUNT - 1; j >= 0; j--) { 589 for (i = 0; i < config.cpu_active; i++) { 590 link_t *l; 591 runq_t *r; 592 cpu_t *cpu; 593 594 cpu = &cpus[(i + k) % config.cpu_active]; 595 598 596 /* 599 597 * Not interested in ourselves. 600 598 * Doesn't require interrupt disabling for kcpulb has 601 599 * THREAD_FLAG_WIRED. 602 *603 600 */ 604 601 if (CPU == cpu) 605 602 continue; 606 607 603 if (atomic_get(&cpu->nrdy) <= average) 608 604 continue; 609 610 irq_spinlock_lock(&(cpu->rq[rq].lock), true); 611 if (cpu->rq[rq].n == 0) { 612 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 605 606 ipl = interrupts_disable(); 607 r = &cpu->rq[j]; 608 spinlock_lock(&r->lock); 609 if (r->n == 0) { 610 spinlock_unlock(&r->lock); 611 interrupts_restore(ipl); 613 612 continue; 614 613 } 615 616 thread_t *thread = NULL; 617 618 /* Search rq from the back */ 619 link_t *link = cpu->rq[rq].rq_head.prev; 620 621 while (link != &(cpu->rq[rq].rq_head)) { 622 thread = (thread_t *) list_get_instance(link, thread_t, rq_link); 623 614 615 t = NULL; 616 l = r->rq_head.prev; /* search rq from the back */ 617 while (l != &r->rq_head) { 618 t = list_get_instance(l, thread_t, rq_link); 624 619 /* 625 620 * We don't want to steal CPU-wired threads … … 629 624 * steal threads whose FPU context is still in 630 625 * CPU. 631 *632 626 */ 633 irq_spinlock_lock(&thread->lock, false);634 635 if ((!(thread->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN)))636 && (!(thread->fpu_context_engaged))) {627 spinlock_lock(&t->lock); 628 if ((!(t->flags & (THREAD_FLAG_WIRED | 629 THREAD_FLAG_STOLEN))) && 630 (!(t->fpu_context_engaged))) { 637 631 /* 638 * Remove t hread from ready queue.632 * Remove t from r. 639 633 */ 640 irq_spinlock_unlock(&thread->lock, false);634 spinlock_unlock(&t->lock); 641 635 642 636 atomic_dec(&cpu->nrdy); 643 637 atomic_dec(&nrdy); 644 645 cpu->rq[rq].n--;646 list_remove(&t hread->rq_link);647 638 639 r->n--; 640 list_remove(&t->rq_link); 641 648 642 break; 649 643 } 650 651 irq_spinlock_unlock(&thread->lock, false); 652 653 link = link->prev; 654 thread = NULL; 644 spinlock_unlock(&t->lock); 645 l = l->prev; 646 t = NULL; 655 647 } 656 657 if (thread) { 648 spinlock_unlock(&r->lock); 649 650 if (t) { 658 651 /* 659 * Ready thread on local CPU 660 * 652 * Ready t on local CPU 661 653 */ 662 663 irq_spinlock_pass(&(cpu->rq[rq].lock), &thread->lock); 664 654 spinlock_lock(&t->lock); 665 655 #ifdef KCPULB_VERBOSE 666 656 printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, " … … 669 659 atomic_get(&nrdy) / config.cpu_active); 670 660 #endif 671 672 thread->flags |= THREAD_FLAG_STOLEN; 673 thread->state = Entering; 674 675 irq_spinlock_unlock(&thread->lock, true); 676 thread_ready(thread); 677 661 t->flags |= THREAD_FLAG_STOLEN; 662 t->state = Entering; 663 spinlock_unlock(&t->lock); 664 665 thread_ready(t); 666 667 interrupts_restore(ipl); 668 678 669 if (--count == 0) 679 670 goto satisfied; 680 671 681 672 /* 682 673 * We are not satisfied yet, focus on another 683 674 * CPU next time. 684 *685 675 */ 686 acpu_bias++;676 k++; 687 677 688 678 continue; 689 } else 690 irq_spinlock_unlock(&(cpu->rq[rq].lock), true); 691 679 } 680 interrupts_restore(ipl); 692 681 } 693 682 } 694 683 695 684 if (atomic_get(&CPU->nrdy)) { 696 685 /* 697 686 * Be a little bit light-weight and let migrated threads run. 698 *699 687 */ 700 688 scheduler(); … … 703 691 * We failed to migrate a single thread. 704 692 * Give up this turn. 705 *706 693 */ 707 694 goto loop; 708 695 } 709 696 710 697 goto not_satisfied; 711 698 712 699 satisfied: 713 700 goto loop; 714 701 } 702 715 703 #endif /* CONFIG_SMP */ 716 704 717 /** Print information about threads & scheduler queues 718 * 719 */ 705 706 /** Print information about threads & scheduler queues */ 720 707 void sched_print_list(void) 721 708 { 722 size_t cpu; 709 ipl_t ipl; 710 unsigned int cpu, i; 711 runq_t *r; 712 thread_t *t; 713 link_t *cur; 714 715 /* We are going to mess with scheduler structures, 716 * let's not be interrupted */ 717 ipl = interrupts_disable(); 723 718 for (cpu = 0; cpu < config.cpu_count; cpu++) { 719 724 720 if (!cpus[cpu].active) 725 721 continue; 726 727 irq_spinlock_lock(&cpus[cpu].lock, true); 728 722 723 spinlock_lock(&cpus[cpu].lock); 729 724 printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIs "\n", 730 725 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 731 726 cpus[cpu].needs_relink); 732 727 733 unsigned int i;734 728 for (i = 0; i < RQ_COUNT; i++) { 735 irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false); 736 if (cpus[cpu].rq[i].n == 0) { 737 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 729 r = &cpus[cpu].rq[i]; 730 spinlock_lock(&r->lock); 731 if (!r->n) { 732 spinlock_unlock(&r->lock); 738 733 continue; 739 734 } 740 741 735 printf("\trq[%u]: ", i); 742 link_t *cur; 743 for (cur = cpus[cpu].rq[i].rq_head.next; 744 cur != &(cpus[cpu].rq[i].rq_head); 745 cur = cur->next) { 746 thread_t *thread = list_get_instance(cur, thread_t, rq_link); 747 printf("%" PRIu64 "(%s) ", thread->tid, 748 thread_states[thread->state]); 736 for (cur = r->rq_head.next; cur != &r->rq_head; 737 cur = cur->next) { 738 t = list_get_instance(cur, thread_t, rq_link); 739 printf("%" PRIu64 "(%s) ", t->tid, 740 thread_states[t->state]); 749 741 } 750 742 printf("\n"); 751 752 irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false); 743 spinlock_unlock(&r->lock); 753 744 } 754 755 irq_spinlock_unlock(&cpus[cpu].lock, true); 756 } 745 spinlock_unlock(&cpus[cpu].lock); 746 } 747 748 interrupts_restore(ipl); 757 749 } 758 750
Note:
See TracChangeset
for help on using the changeset viewer.