Changeset 25939997 in mainline


Ignore:
Timestamp:
2024-01-19T16:21:20Z (10 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
c1eaec4
Parents:
1c1767f
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-16 19:36:25)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-19 16:21:20)
Message:

Make separate-stack-scheduler a loop with persistent context

We can see scheduler as a looping idle thread in which a thread
is repeatedly retrieved from a queue, prepared, switched to,
returned from, and cleaned up after.

IMO this is a more natural view of the process.

Location:
kernel/generic
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/cpu.h

    r1c1767f r25939997  
    7474        bool idle;
    7575        uint64_t last_cycle;
     76
     77        context_t scheduler_context;
    7678} cpu_local_t;
    7779
  • kernel/generic/src/main/main.c

    r1c1767f r25939997  
    287287         * starting the thread of kernel threads.
    288288         */
    289         scheduler_run();
     289        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     290        context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE);
    290291        /* not reached */
    291292}
     
    327328        ARCH_OP(post_cpu_init);
    328329
    329         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    330 
    331330        /*
    332331         * If we woke kmp up before we left the kernel stack, we could
     
    334333         * switch to this cpu's private stack prior to waking kmp up.
    335334         */
     335        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    336336        context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    337337        /* not reached */
  • kernel/generic/src/proc/scheduler.c

    r1c1767f r25939997  
    11/*
    22 * Copyright (c) 2010 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    5051#include <time/delay.h>
    5152#include <arch/asm.h>
    52 #include <arch/faddr.h>
    5353#include <arch/cycle.h>
    5454#include <atomic.h>
     
    6666#include <stacktrace.h>
    6767
    68 static void scheduler_separated_stack(void);
    69 
    7068atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    7169
     
    227225static void relink_rq(int start)
    228226{
     227        assert(interrupts_disabled());
     228
    229229        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    230230                return;
     
    302302}
    303303
    304 void scheduler_run(void)
    305 {
    306         assert(interrupts_disabled());
    307         assert(THREAD == NULL);
    308         assert(CPU != NULL);
    309 
    310         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    311         context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    312         unreachable();
    313 }
    314 
    315304/** Things to do before we switch to THREAD context.
    316305 */
     
    421410}
    422411
    423 /** The scheduler
    424  *
    425  * The thread scheduling procedure.
    426  * Passes control directly to
    427  * scheduler_separated_stack().
    428  *
    429  */
     412/** Switch to scheduler context to let other threads run. */
    430413void scheduler_enter(state_t new_state)
    431414{
     
    436419
    437420        fpu_cleanup();
     421
     422        if (atomic_load(&haltstate))
     423                halt();
    438424
    439425        irq_spinlock_lock(&THREAD->lock, false);
     
    460446         *
    461447         */
     448
    462449        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    463 
    464         /*
    465          * We may not keep the old stack.
    466          * Reason: If we kept the old stack and got blocked, for instance, in
    467          * find_best_thread(), the old thread could get rescheduled by another
    468          * CPU and overwrite the part of its own stack that was also used by
    469          * the scheduler on this CPU.
    470          *
    471          * Moreover, we have to bypass the compiler-generated POP sequence
    472          * which is fooled by SP being set to the very top of the stack.
    473          * Therefore the scheduler() function continues in
    474          * scheduler_separated_stack().
    475          *
    476          */
    477         context_t ctx;
    478         context_create(&ctx, scheduler_separated_stack,
    479             CPU_LOCAL->stack, STACK_SIZE);
    480 
    481         /* Switch to scheduler context and store current thread's context. */
    482         context_swap(&THREAD->saved_context, &ctx);
    483 
    484         /* Returned from scheduler. */
     450        context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
    485451
    486452        irq_spinlock_unlock(&THREAD->lock, false);
     
    488454}
    489455
    490 /** Scheduler stack switch wrapper
    491  *
    492  * Second part of the scheduler() function
    493  * using new stack. Handling the actual context
    494  * switch to a new thread.
    495  *
    496  */
    497 void scheduler_separated_stack(void)
    498 {
    499         assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
     456/** Enter main scheduler loop. Never returns.
     457 *
     458 * This function switches to a runnable thread as soon as one is available,
     459 * after which it is only switched back to if a thread is stopping and there is
     460 * no other thread to run in its place. We need a separate context for that
     461 * because we're going to block the CPU, which means we need another context
     462 * to clean up after the previous thread.
     463 */
     464void scheduler_run(void)
     465{
     466        assert(interrupts_disabled());
     467
    500468        assert(CPU != NULL);
     469        assert(TASK == NULL);
     470        assert(THREAD == NULL);
    501471        assert(interrupts_disabled());
    502472
    503         if (atomic_load(&haltstate))
    504                 halt();
    505 
    506         if (THREAD) {
     473        while (!atomic_load(&haltstate)) {
     474                assert(CURRENT->mutex_locks == 0);
     475
     476                int rq_index;
     477                THREAD = find_best_thread(&rq_index);
     478
     479                prepare_to_run_thread(rq_index);
     480
     481                /*
     482                 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     483                 * thread's stack.
     484                 */
     485                current_copy(CURRENT, (current_t *) THREAD->kstack);
     486
     487                /* Switch to thread context. */
     488                context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context);
     489
     490                /* Back from the thread. */
     491
     492                assert(CPU != NULL);
     493                assert(THREAD != NULL);
     494                assert(irq_spinlock_locked(&THREAD->lock));
     495                assert(interrupts_disabled());
     496
    507497                state_t state = THREAD->state;
    508498                irq_spinlock_unlock(&THREAD->lock, false);
     
    510500                cleanup_after_thread(THREAD, state);
    511501
     502                /*
     503                 * Necessary because we're allowing interrupts in find_best_thread(),
     504                 * so we need to avoid other code referencing the thread we left.
     505                 */
    512506                THREAD = NULL;
    513507        }
    514508
    515         int rq_index;
    516         THREAD = find_best_thread(&rq_index);
    517 
    518         prepare_to_run_thread(rq_index);
    519 
    520         /*
    521          * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
    522          * thread's stack.
    523          */
    524         current_copy(CURRENT, (current_t *) THREAD->kstack);
    525 
    526         context_restore(&THREAD->saved_context);
    527 
    528         /* Not reached */
     509        halt();
    529510}
    530511
  • kernel/generic/src/proc/thread.c

    r1c1767f r25939997  
    246246void thread_ready(thread_t *thread)
    247247{
     248        // TODO: move this to scheduler.c
    248249        irq_spinlock_lock(&thread->lock, true);
    249250
  • kernel/generic/src/time/clock.c

    r1c1767f r25939997  
    123123static void cpu_update_accounting(void)
    124124{
     125        // FIXME: get_cycle() is unimplemented on several platforms
    125126        uint64_t now = get_cycle();
    126127        atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
Note: See TracChangeset for help on using the changeset viewer.