Changes in kernel/generic/src/proc/scheduler.c [ed7e057:d23712e] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/scheduler.c
red7e057 rd23712e 1 1 /* 2 2 * Copyright (c) 2010 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 50 51 #include <time/delay.h> 51 52 #include <arch/asm.h> 52 #include <arch/faddr.h>53 53 #include <arch/cycle.h> 54 54 #include <atomic.h> … … 66 66 #include <stacktrace.h> 67 67 68 static void scheduler_separated_stack(void);69 70 68 atomic_size_t nrdy; /**< Number of ready threads in the system. */ 71 69 … … 227 225 static void relink_rq(int start) 228 226 { 227 assert(interrupts_disabled()); 228 229 229 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline) 230 230 return; … … 302 302 } 303 303 304 void scheduler_run(void)305 {306 assert(interrupts_disabled());307 assert(THREAD == NULL);308 assert(CPU != NULL);309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);311 context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);312 unreachable();313 }314 315 304 /** Things to do before we switch to THREAD context. 316 305 */ … … 321 310 switch_task(THREAD->task); 322 311 323 irq_spinlock_lock(&THREAD->lock, false);324 THREAD->state = Running; 325 THREAD->cpu = CPU;326 THREAD->priority = rq_index; /* Correct rq index */312 assert(atomic_get_unordered(&THREAD->cpu) == CPU); 313 314 atomic_set_unordered(&THREAD->state, Running); 315 atomic_set_unordered(&THREAD->priority, rq_index); /* Correct rq index */ 327 316 328 317 /* … … 335 324 log(LF_OTHER, LVL_DEBUG, 336 325 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 337 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,326 ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index, 338 327 THREAD->ticks, atomic_load(&CPU->nrdy)); 339 328 #endif … … 350 339 351 340 #ifdef CONFIG_UDEBUG 352 if ( THREAD->btrace) {341 if (atomic_get_unordered(&THREAD->btrace)) { 353 342 istate_t *istate = THREAD->udebug.uspace_state; 354 343 if (istate != NULL) { 355 344 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 356 345 stack_trace_istate(istate); 346 } else { 347 printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid); 357 348 } 358 349 359 THREAD->btrace = false;350 atomic_set_unordered(&THREAD->btrace, false); 360 351 } 361 352 #endif … … 374 365 } 375 366 376 static void cleanup_after_thread(thread_t *thread, state_t out_state) 367 static void add_to_rq(thread_t *thread, cpu_t *cpu, int i) 368 { 369 /* Add to the appropriate runqueue. */ 370 runq_t *rq = &cpu->rq[i]; 371 372 irq_spinlock_lock(&rq->lock, false); 373 list_append(&thread->rq_link, &rq->rq); 374 rq->n++; 375 irq_spinlock_unlock(&rq->lock, false); 376 377 atomic_inc(&nrdy); 378 atomic_inc(&cpu->nrdy); 379 } 380 381 /** Requeue a thread that was just preempted on this CPU. 382 */ 383 static void thread_requeue_preempted(thread_t *thread) 384 { 385 assert(interrupts_disabled()); 386 assert(atomic_get_unordered(&thread->state) == Running); 387 assert(atomic_get_unordered(&thread->cpu) == CPU); 388 389 int prio = atomic_get_unordered(&thread->priority); 390 391 if (prio < RQ_COUNT - 1) { 392 prio++; 393 atomic_set_unordered(&thread->priority, prio); 394 } 395 396 atomic_set_unordered(&thread->state, Ready); 397 398 add_to_rq(thread, CPU, prio); 399 } 400 401 void thread_requeue_sleeping(thread_t *thread) 402 { 403 ipl_t ipl = interrupts_disable(); 404 405 assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering); 406 407 atomic_set_unordered(&thread->priority, 0); 408 atomic_set_unordered(&thread->state, Ready); 409 410 /* Prefer the CPU on which the thread ran last */ 411 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 412 413 if (!cpu) { 414 cpu = CPU; 415 atomic_set_unordered(&thread->cpu, CPU); 416 } 417 418 add_to_rq(thread, cpu, 0); 419 420 interrupts_restore(ipl); 421 } 422 423 static void cleanup_after_thread(thread_t *thread) 377 424 { 378 425 assert(CURRENT->mutex_locks == 0); … … 381 428 int expected; 382 429 383 switch ( out_state) {430 switch (atomic_get_unordered(&thread->state)) { 384 431 case Running: 385 thread_re ady(thread);432 thread_requeue_preempted(thread); 386 433 break; 387 434 … … 407 454 assert(expected == SLEEP_WOKE); 408 455 /* The thread has already been woken up, requeue immediately. */ 409 thread_re ady(thread);456 thread_requeue_sleeping(thread); 410 457 } 411 458 break; … … 416 463 */ 417 464 panic("tid%" PRIu64 ": unexpected state %s.", 418 thread->tid, thread_states[ thread->state]);465 thread->tid, thread_states[atomic_get_unordered(&thread->state)]); 419 466 break; 420 467 } 421 468 } 422 469 423 /** The scheduler 424 * 425 * The thread scheduling procedure. 426 * Passes control directly to 427 * scheduler_separated_stack(). 428 * 429 */ 470 /** Switch to scheduler context to let other threads run. */ 430 471 void scheduler_enter(state_t new_state) 431 472 { … … 435 476 assert(THREAD != NULL); 436 477 437 fpu_cleanup();438 439 irq_spinlock_lock(&THREAD->lock, false);440 THREAD->state = new_state;441 442 /* Update thread kernel accounting */443 THREAD->kcycles += get_cycle() - THREAD->last_cycle;444 445 if (new_state == Sleeping) {446 /* Prefer the thread after it's woken up. */447 THREAD->priority = -1;448 }449 450 /*451 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS452 * and preemption counter. At this point CURRENT could be coming either453 * from THREAD's or CPU's stack.454 *455 */456 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);457 458 /*459 * We may not keep the old stack.460 * Reason: If we kept the old stack and got blocked, for instance, in461 * find_best_thread(), the old thread could get rescheduled by another462 * CPU and overwrite the part of its own stack that was also used by463 * the scheduler on this CPU.464 *465 * Moreover, we have to bypass the compiler-generated POP sequence466 * which is fooled by SP being set to the very top of the stack.467 * Therefore the scheduler() function continues in468 * scheduler_separated_stack().469 *470 */471 context_t ctx;472 context_create(&ctx, scheduler_separated_stack,473 CPU_LOCAL->stack, STACK_SIZE);474 475 /* Switch to scheduler context and store current thread's context. */476 context_swap(&THREAD->saved_context, &ctx);477 478 /* Returned from scheduler. */479 480 irq_spinlock_unlock(&THREAD->lock, false);481 interrupts_restore(ipl);482 }483 484 /** Scheduler stack switch wrapper485 *486 * Second part of the scheduler() function487 * using new stack. Handling the actual context488 * switch to a new thread.489 *490 */491 void scheduler_separated_stack(void)492 {493 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));494 assert(CPU != NULL);495 assert(interrupts_disabled());496 497 478 if (atomic_load(&haltstate)) 498 479 halt(); 499 480 500 if (THREAD) { 501 /* 502 * On Sparc, this saves some extra userspace state that's not 503 * covered by context_save()/context_restore(). 504 */ 505 after_thread_ran_arch(); 506 507 state_t state = THREAD->state; 508 irq_spinlock_unlock(&THREAD->lock, false); 509 510 cleanup_after_thread(THREAD, state); 511 481 /* Check if we have a thread to switch to. */ 482 483 int rq_index; 484 thread_t *new_thread = try_find_thread(&rq_index); 485 486 if (new_thread == NULL && new_state == Running) { 487 /* No other thread to run, but we still have work to do here. */ 488 interrupts_restore(ipl); 489 return; 490 } 491 492 atomic_set_unordered(&THREAD->state, new_state); 493 494 /* Update thread kernel accounting */ 495 atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle); 496 497 fpu_cleanup(); 498 499 /* 500 * On Sparc, this saves some extra userspace state that's not 501 * covered by context_save()/context_restore(). 502 */ 503 after_thread_ran_arch(); 504 505 if (new_thread) { 506 thread_t *old_thread = THREAD; 507 CPU_LOCAL->prev_thread = old_thread; 508 THREAD = new_thread; 509 /* No waiting necessary, we can switch to the new thread directly. */ 510 prepare_to_run_thread(rq_index); 511 512 current_copy(CURRENT, (current_t *) new_thread->kstack); 513 context_swap(&old_thread->saved_context, &new_thread->saved_context); 514 } else { 515 /* 516 * A new thread isn't immediately available, switch to a separate 517 * stack to sleep or do other idle stuff. 518 */ 519 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 520 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context); 521 } 522 523 assert(CURRENT->mutex_locks == 0); 524 assert(interrupts_disabled()); 525 526 /* Check if we need to clean up after another thread. */ 527 if (CPU_LOCAL->prev_thread) { 528 cleanup_after_thread(CPU_LOCAL->prev_thread); 529 CPU_LOCAL->prev_thread = NULL; 530 } 531 532 interrupts_restore(ipl); 533 } 534 535 /** Enter main scheduler loop. Never returns. 536 * 537 * This function switches to a runnable thread as soon as one is available, 538 * after which it is only switched back to if a thread is stopping and there is 539 * no other thread to run in its place. We need a separate context for that 540 * because we're going to block the CPU, which means we need another context 541 * to clean up after the previous thread. 542 */ 543 void scheduler_run(void) 544 { 545 assert(interrupts_disabled()); 546 547 assert(CPU != NULL); 548 assert(TASK == NULL); 549 assert(THREAD == NULL); 550 assert(interrupts_disabled()); 551 552 while (!atomic_load(&haltstate)) { 553 assert(CURRENT->mutex_locks == 0); 554 555 int rq_index; 556 THREAD = find_best_thread(&rq_index); 557 prepare_to_run_thread(rq_index); 558 559 /* 560 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 561 * thread's stack. 562 */ 563 current_copy(CURRENT, (current_t *) THREAD->kstack); 564 565 /* Switch to thread context. */ 566 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context); 567 568 /* Back from another thread. */ 569 assert(CPU != NULL); 570 assert(THREAD != NULL); 571 assert(CURRENT->mutex_locks == 0); 572 assert(interrupts_disabled()); 573 574 cleanup_after_thread(THREAD); 575 576 /* 577 * Necessary because we're allowing interrupts in find_best_thread(), 578 * so we need to avoid other code referencing the thread we left. 579 */ 512 580 THREAD = NULL; 513 581 } 514 582 515 int rq_index; 516 THREAD = find_best_thread(&rq_index); 517 518 prepare_to_run_thread(rq_index); 519 520 /* 521 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 522 * thread's stack. 523 */ 524 current_copy(CURRENT, (current_t *) THREAD->kstack); 525 526 context_restore(&THREAD->saved_context); 583 halt(); 584 } 585 586 /** Thread wrapper. 587 * 588 * This wrapper is provided to ensure that a starting thread properly handles 589 * everything it needs to do when first scheduled, and when it exits. 590 */ 591 void thread_main_func(void) 592 { 593 assert(interrupts_disabled()); 594 595 void (*f)(void *) = THREAD->thread_code; 596 void *arg = THREAD->thread_arg; 597 598 /* This is where each thread wakes up after its creation */ 599 600 /* Check if we need to clean up after another thread. */ 601 if (CPU_LOCAL->prev_thread) { 602 cleanup_after_thread(CPU_LOCAL->prev_thread); 603 CPU_LOCAL->prev_thread = NULL; 604 } 605 606 interrupts_enable(); 607 608 f(arg); 609 610 thread_exit(); 527 611 528 612 /* Not reached */ … … 550 634 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) { 551 635 552 irq_spinlock_lock(&thread->lock, false);553 554 636 /* 555 637 * Do not steal CPU-wired threads, threads … … 558 640 * FPU context is still in the CPU. 559 641 */ 560 if (thread->stolen || thread->nomigrate || 561 thread == fpu_owner) { 562 irq_spinlock_unlock(&thread->lock, false); 642 if (thread->stolen || thread->nomigrate || thread == fpu_owner) { 563 643 continue; 564 644 } 565 645 566 646 thread->stolen = true; 567 thread->cpu = CPU; 568 569 irq_spinlock_unlock(&thread->lock, false); 647 atomic_set_unordered(&thread->cpu, CPU); 570 648 571 649 /* … … 712 790 thread) { 713 791 printf("%" PRIu64 "(%s) ", thread->tid, 714 thread_states[ thread->state]);792 thread_states[atomic_get_unordered(&thread->state)]); 715 793 } 716 794 printf("\n");
Note:
See TracChangeset
for help on using the changeset viewer.