Changeset 6a0e568 in mainline


Ignore:
Timestamp:
2024-01-19T16:56:48Z (11 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
286da52
Parents:
c1eaec4
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-22 18:04:40)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-19 16:56:48)
Message:

Allow fast-switch to another runnable thread

When a thread is available that can be switched to immediately
without having to block, do so without a detour through CPU
scheduler context. The new thread will handle whatever cannot
be done while the previous context is active on a CPU, such as
requeuing the thread or deallocation.

Location:
kernel/generic
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/cpu.h

    rc1eaec4 r6a0e568  
    7777        context_t scheduler_context;
    7878
     79        struct thread *prev_thread;
    7980        state_t exiting_state;
    8081} cpu_local_t;
  • kernel/generic/include/proc/scheduler.h

    rc1eaec4 r6a0e568  
    6464extern void scheduler_enter(state_t);
    6565
     66extern void thread_main_func(void);
     67
    6668/*
    6769 * To be defined by architectures.
  • kernel/generic/src/proc/scheduler.c

    rc1eaec4 r6a0e568  
    425425                halt();
    426426
     427        /* Check if we have a thread to switch to. */
     428
     429        int rq_index;
     430        thread_t *new_thread = try_find_thread(&rq_index);
     431
     432        if (new_thread == NULL && new_state == Running) {
     433                /* No other thread to run, but we still have work to do here. */
     434                interrupts_restore(ipl);
     435                return;
     436        }
     437
    427438        irq_spinlock_lock(&THREAD->lock, false);
    428439        THREAD->state = new_state;
     
    446457        CPU_LOCAL->exiting_state = new_state;
    447458
    448         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    449         context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
     459        if (new_thread) {
     460                thread_t *old_thread = THREAD;
     461                CPU_LOCAL->prev_thread = old_thread;
     462                THREAD = new_thread;
     463                /* No waiting necessary, we can switch to the new thread directly. */
     464                prepare_to_run_thread(rq_index);
     465
     466                current_copy(CURRENT, (current_t *) new_thread->kstack);
     467                context_swap(&old_thread->saved_context, &new_thread->saved_context);
     468        } else {
     469                /*
     470                 * A new thread isn't immediately available, switch to a separate
     471                 * stack to sleep or do other idle stuff.
     472                 */
     473                current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     474                context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
     475        }
    450476
    451477        assert(CURRENT->mutex_locks == 0);
    452478        assert(interrupts_disabled());
     479
     480        /* Check if we need to clean up after another thread. */
     481        if (CPU_LOCAL->prev_thread) {
     482                cleanup_after_thread(CPU_LOCAL->prev_thread, CPU_LOCAL->exiting_state);
     483                CPU_LOCAL->prev_thread = NULL;
     484        }
    453485
    454486        interrupts_restore(ipl);
     
    504536
    505537        halt();
     538}
     539
     540/** Thread wrapper.
     541 *
     542 * This wrapper is provided to ensure that a starting thread properly handles
     543 * everything it needs to do when first scheduled, and when it exits.
     544 */
     545void thread_main_func(void)
     546{
     547        assert(interrupts_disabled());
     548
     549        void (*f)(void *) = THREAD->thread_code;
     550        void *arg = THREAD->thread_arg;
     551
     552        /* This is where each thread wakes up after its creation */
     553
     554        /* Check if we need to clean up after another thread. */
     555        if (CPU_LOCAL->prev_thread) {
     556                cleanup_after_thread(CPU_LOCAL->prev_thread, CPU_LOCAL->exiting_state);
     557                CPU_LOCAL->prev_thread = NULL;
     558        }
     559
     560        interrupts_enable();
     561
     562        f(arg);
     563
     564        thread_exit();
     565
     566        /* Not reached */
    506567}
    507568
  • kernel/generic/src/proc/thread.c

    rc1eaec4 r6a0e568  
    108108static int threads_cmp(void *, void *);
    109109
    110 /** Thread wrapper.
    111  *
    112  * This wrapper is provided to ensure that every thread makes a call to
    113  * thread_exit() when its implementing function returns.
    114  *
    115  * interrupts_disable() is assumed.
    116  *
    117  */
    118 static void cushion(void)
    119 {
    120         void (*f)(void *) = THREAD->thread_code;
    121         void *arg = THREAD->thread_arg;
    122 
    123         /* This is where each thread wakes up after its creation */
    124         interrupts_enable();
    125 
    126         f(arg);
    127 
    128         thread_exit();
    129 
    130         /* Not reached */
    131 }
    132 
    133110/** Initialization and allocation for thread_t structure
    134111 *
     
    309286        irq_spinlock_unlock(&tidlock, true);
    310287
    311         context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE);
     288        context_create(&thread->saved_context, thread_main_func,
     289            thread->kstack, STACK_SIZE);
    312290
    313291        current_initialize((current_t *) thread->kstack);
Note: See TracChangeset for help on using the changeset viewer.