Changeset 8996582 in mainline


Ignore:
Timestamp:
2024-01-14T18:24:05Z (11 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
151c050
Parents:
6e49dab
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-03-12 13:48:13)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-14 18:24:05)
Message:

Move context switch preparation to a new separate function

This puts everything that's needed before activating a new
thread into a single place, and removes one lock/unlock pair.

Location:
kernel/generic/src/proc
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/scheduler.c

    r6e49dab r8996582  
    6767
    6868static void scheduler_separated_stack(void);
    69 static void fpu_restore(void);
    7069
    7170atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    72 
    73 /** Take actions before new thread runs.
    74  *
    75  * Perform actions that need to be
    76  * taken before the newly selected
    77  * thread is passed control.
    78  *
    79  * THREAD->lock is locked on entry
    80  *
    81  */
    82 static void before_thread_runs(void)
    83 {
    84         before_thread_runs_arch();
    85 
    86         fpu_restore();
    87 
    88 #ifdef CONFIG_UDEBUG
    89         if (THREAD->btrace) {
    90                 istate_t *istate = THREAD->udebug.uspace_state;
    91                 if (istate != NULL) {
    92                         printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    93                         stack_trace_istate(istate);
    94                 }
    95 
    96                 THREAD->btrace = false;
    97         }
    98 #endif
    99 }
    10071
    10172#ifdef CONFIG_FPU_LAZY
     
    174145                list_remove(&thread->rq_link);
    175146
    176                 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
    177 
    178                 thread->cpu = CPU;
    179                 thread->priority = i;  /* Correct rq index */
    180 
    181                 /* Time allocation in microseconds. */
    182                 uint64_t time_to_run = (i + 1) * 10000;
    183 
    184                 /* This is safe because interrupts are disabled. */
    185                 CPU_LOCAL->preempt_deadline =
    186                     CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
    187 
    188                 /*
    189                  * Clear the stolen flag so that it can be migrated
    190                  * when load balancing needs emerge.
    191                  */
    192                 thread->stolen = false;
    193                 irq_spinlock_unlock(&thread->lock, false);
     147                irq_spinlock_unlock(&(CPU->rq[i].lock), false);
    194148
    195149                *rq_index = i;
     
    362316}
    363317
     318/** Things to do before we switch to THREAD context.
     319 */
     320static void prepare_to_run_thread(int rq_index)
     321{
     322        relink_rq(rq_index);
     323
     324        switch_task(THREAD->task);
     325
     326        irq_spinlock_lock(&THREAD->lock, false);
     327        THREAD->state = Running;
     328        THREAD->cpu = CPU;
     329        THREAD->priority = rq_index;  /* Correct rq index */
     330
     331        /*
     332         * Clear the stolen flag so that it can be migrated
     333         * when load balancing needs emerge.
     334         */
     335        THREAD->stolen = false;
     336
     337#ifdef SCHEDULER_VERBOSE
     338        log(LF_OTHER, LVL_DEBUG,
     339            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
     340            ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     341            THREAD->ticks, atomic_load(&CPU->nrdy));
     342#endif
     343
     344        /*
     345         * Some architectures provide late kernel PA2KA(identity)
     346         * mapping in a page fault handler. However, the page fault
     347         * handler uses the kernel stack of the running thread and
     348         * therefore cannot be used to map it. The kernel stack, if
     349         * necessary, is to be mapped in before_thread_runs(). This
     350         * function must be executed before the switch to the new stack.
     351         */
     352        before_thread_runs_arch();
     353
     354#ifdef CONFIG_UDEBUG
     355        if (THREAD->btrace) {
     356                istate_t *istate = THREAD->udebug.uspace_state;
     357                if (istate != NULL) {
     358                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
     359                        stack_trace_istate(istate);
     360                }
     361
     362                THREAD->btrace = false;
     363        }
     364#endif
     365
     366        fpu_restore();
     367
     368        /* Time allocation in microseconds. */
     369        uint64_t time_to_run = (rq_index + 1) * 10000;
     370
     371        /* Set the time of next preemption. */
     372        CPU_LOCAL->preempt_deadline =
     373            CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
     374
     375        /* Save current CPU cycle */
     376        THREAD->last_cycle = get_cycle();
     377}
     378
    364379static void cleanup_after_thread(thread_t *thread, state_t out_state)
    365380{
     
    430445                         * This is the place where threads leave scheduler();
    431446                         */
    432 
    433                         /* Save current CPU cycle */
    434                         THREAD->last_cycle = get_cycle();
    435447
    436448                        irq_spinlock_unlock(&THREAD->lock, false);
     
    512524        THREAD = find_best_thread(&rq_index);
    513525
    514         relink_rq(rq_index);
    515 
    516         switch_task(THREAD->task);
    517 
    518         irq_spinlock_lock(&THREAD->lock, false);
    519         THREAD->state = Running;
    520 
    521 #ifdef SCHEDULER_VERBOSE
    522         log(LF_OTHER, LVL_DEBUG,
    523             "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    524             ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
    525             THREAD->ticks, atomic_load(&CPU->nrdy));
    526 #endif
    527 
    528         /*
    529          * Some architectures provide late kernel PA2KA(identity)
    530          * mapping in a page fault handler. However, the page fault
    531          * handler uses the kernel stack of the running thread and
    532          * therefore cannot be used to map it. The kernel stack, if
    533          * necessary, is to be mapped in before_thread_runs(). This
    534          * function must be executed before the switch to the new stack.
    535          */
    536         before_thread_runs();
     526        prepare_to_run_thread(rq_index);
    537527
    538528        /*
  • kernel/generic/src/proc/thread.c

    r6e49dab r8996582  
    121121        void (*f)(void *) = THREAD->thread_code;
    122122        void *arg = THREAD->thread_arg;
    123         THREAD->last_cycle = get_cycle();
    124123
    125124        /* This is where each thread wakes up after its creation */
Note: See TracChangeset for help on using the changeset viewer.