Changeset c7ceacf in mainline


Ignore:
Timestamp:
2024-01-15T14:54:17Z (12 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
5861b60
Parents:
4760793 (diff), 151c050 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge part of scheduler refactoring changes

A series of changes meant to remove unnecessary spinlock uses and
to improve understandability of the code.

Location:
kernel/generic
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/scheduler.h

    r4760793 rc7ceacf  
    4141#include <atomic.h>
    4242#include <adt/list.h>
     43#include <abi/proc/thread.h>
    4344
    4445#define RQ_COUNT          16
     
    5657
    5758extern void scheduler_fpu_lazy_request(void);
    58 extern void scheduler(void);
    59 extern void scheduler_locked(ipl_t);
    6059extern void kcpulb(void *arg);
    6160
    6261extern void sched_print_list(void);
     62
     63extern void scheduler_run(void) __attribute__((noreturn));
     64extern void scheduler_enter(state_t);
    6365
    6466/*
  • kernel/generic/include/proc/thread.h

    r4760793 rc7ceacf  
    113113         */
    114114        context_t saved_context;
    115         ipl_t saved_ipl;
    116115
    117116        /**
     
    190189extern void thread_interrupt(thread_t *);
    191190
     191enum sleep_state {
     192        SLEEP_INITIAL,
     193        SLEEP_ASLEEP,
     194        SLEEP_WOKE,
     195};
     196
    192197typedef enum {
    193198        THREAD_OK,
     
    238243extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
    239244
     245extern void thread_yield(void);
     246
    240247extern void thread_print_list(bool);
    241248extern thread_t *thread_find_by_id(thread_id_t);
  • kernel/generic/src/main/main.c

    r4760793 rc7ceacf  
    285285
    286286        /*
    287          * This call to scheduler() will return to kinit,
     287         * This call to scheduler_run() will return to kinit,
    288288         * starting the thread of kernel threads.
    289289         */
    290         scheduler();
     290        scheduler_run();
    291291        /* not reached */
    292292}
     
    356356
    357357        semaphore_up(&ap_completion_semaphore);
    358         scheduler();
     358        scheduler_run();
    359359        /* not reached */
    360360}
  • kernel/generic/src/proc/scheduler.c

    r4760793 rc7ceacf  
    7070atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    7171
    72 /** Take actions before new thread runs.
    73  *
    74  * Perform actions that need to be
    75  * taken before the newly selected
    76  * thread is passed control.
    77  *
    78  * THREAD->lock is locked on entry
    79  *
    80  */
    81 static void before_thread_runs(void)
    82 {
    83         before_thread_runs_arch();
    84 
    85 #ifdef CONFIG_FPU_LAZY
    86         /*
    87          * The only concurrent modification possible for fpu_owner here is
    88          * another thread changing it from itself to NULL in its destructor.
    89          */
    90         thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
    91             memory_order_relaxed);
    92 
    93         if (THREAD == owner)
    94                 fpu_enable();
    95         else
    96                 fpu_disable();
    97 #elif defined CONFIG_FPU
    98         fpu_enable();
    99         if (THREAD->fpu_context_exists)
    100                 fpu_context_restore(&THREAD->fpu_context);
    101         else {
    102                 fpu_init();
    103                 THREAD->fpu_context_exists = true;
    104         }
    105 #endif
    106 
    107 #ifdef CONFIG_UDEBUG
    108         if (THREAD->btrace) {
    109                 istate_t *istate = THREAD->udebug.uspace_state;
    110                 if (istate != NULL) {
    111                         printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    112                         stack_trace_istate(istate);
    113                 }
    114 
    115                 THREAD->btrace = false;
    116         }
    117 #endif
    118 }
    119 
    120 /** Take actions after THREAD had run.
    121  *
    122  * Perform actions that need to be
    123  * taken after the running thread
    124  * had been preempted by the scheduler.
    125  *
    126  * THREAD->lock is locked on entry
    127  *
    128  */
    129 static void after_thread_ran(void)
    130 {
    131         after_thread_ran_arch();
    132 }
    133 
    13472#ifdef CONFIG_FPU_LAZY
    13573void scheduler_fpu_lazy_request(void)
     
    207145                list_remove(&thread->rq_link);
    208146
    209                 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock);
    210 
    211                 thread->cpu = CPU;
    212                 thread->priority = i;  /* Correct rq index */
    213 
    214                 /* Time allocation in microseconds. */
    215                 uint64_t time_to_run = (i + 1) * 10000;
    216 
    217                 /* This is safe because interrupts are disabled. */
    218                 CPU_LOCAL->preempt_deadline =
    219                     CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
    220 
    221                 /*
    222                  * Clear the stolen flag so that it can be migrated
    223                  * when load balancing needs emerge.
    224                  */
    225                 thread->stolen = false;
    226                 irq_spinlock_unlock(&thread->lock, false);
     147                irq_spinlock_unlock(&(CPU->rq[i].lock), false);
    227148
    228149                *rq_index = i;
     
    341262}
    342263
    343 void scheduler(void)
    344 {
    345         ipl_t ipl = interrupts_disable();
    346 
    347         if (atomic_load(&haltstate))
    348                 halt();
    349 
    350         if (THREAD) {
    351                 irq_spinlock_lock(&THREAD->lock, false);
    352         }
    353 
    354         scheduler_locked(ipl);
     264/**
     265 * Do whatever needs to be done with current FPU state before we switch to
     266 * another thread.
     267 */
     268static void fpu_cleanup(void)
     269{
     270#if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
     271        fpu_context_save(&THREAD->fpu_context);
     272#endif
     273}
     274
     275/**
     276 * Set correct FPU state for this thread after switch from another thread.
     277 */
     278static void fpu_restore(void)
     279{
     280#ifdef CONFIG_FPU_LAZY
     281        /*
     282         * The only concurrent modification possible for fpu_owner here is
     283         * another thread changing it from itself to NULL in its destructor.
     284         */
     285        thread_t *owner = atomic_load_explicit(&CPU->fpu_owner,
     286            memory_order_relaxed);
     287
     288        if (THREAD == owner)
     289                fpu_enable();
     290        else
     291                fpu_disable();
     292
     293#elif defined CONFIG_FPU
     294        fpu_enable();
     295        if (THREAD->fpu_context_exists)
     296                fpu_context_restore(&THREAD->fpu_context);
     297        else {
     298                fpu_init();
     299                THREAD->fpu_context_exists = true;
     300        }
     301#endif
     302}
     303
     304void scheduler_run(void)
     305{
     306        assert(interrupts_disabled());
     307        assert(THREAD == NULL);
     308        assert(CPU != NULL);
     309
     310        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     311
     312        context_t ctx;
     313        context_save(&ctx);
     314        context_set(&ctx, FADDR(scheduler_separated_stack),
     315            (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
     316        context_restore(&ctx);
     317
     318        unreachable();
     319}
     320
     321/** Things to do before we switch to THREAD context.
     322 */
     323static void prepare_to_run_thread(int rq_index)
     324{
     325        relink_rq(rq_index);
     326
     327        switch_task(THREAD->task);
     328
     329        irq_spinlock_lock(&THREAD->lock, false);
     330        THREAD->state = Running;
     331        THREAD->cpu = CPU;
     332        THREAD->priority = rq_index;  /* Correct rq index */
     333
     334        /*
     335         * Clear the stolen flag so that it can be migrated
     336         * when load balancing needs emerge.
     337         */
     338        THREAD->stolen = false;
     339
     340#ifdef SCHEDULER_VERBOSE
     341        log(LF_OTHER, LVL_DEBUG,
     342            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
     343            ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     344            THREAD->ticks, atomic_load(&CPU->nrdy));
     345#endif
     346
     347        /*
     348         * Some architectures provide late kernel PA2KA(identity)
     349         * mapping in a page fault handler. However, the page fault
     350         * handler uses the kernel stack of the running thread and
     351         * therefore cannot be used to map it. The kernel stack, if
     352         * necessary, is to be mapped in before_thread_runs(). This
     353         * function must be executed before the switch to the new stack.
     354         */
     355        before_thread_runs_arch();
     356
     357#ifdef CONFIG_UDEBUG
     358        if (THREAD->btrace) {
     359                istate_t *istate = THREAD->udebug.uspace_state;
     360                if (istate != NULL) {
     361                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
     362                        stack_trace_istate(istate);
     363                }
     364
     365                THREAD->btrace = false;
     366        }
     367#endif
     368
     369        fpu_restore();
     370
     371        /* Time allocation in microseconds. */
     372        uint64_t time_to_run = (rq_index + 1) * 10000;
     373
     374        /* Set the time of next preemption. */
     375        CPU_LOCAL->preempt_deadline =
     376            CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
     377
     378        /* Save current CPU cycle */
     379        THREAD->last_cycle = get_cycle();
     380}
     381
     382static void cleanup_after_thread(thread_t *thread, state_t out_state)
     383{
     384        assert(CURRENT->mutex_locks == 0);
     385        assert(interrupts_disabled());
     386
     387        int expected;
     388
     389        switch (out_state) {
     390        case Running:
     391                thread_ready(thread);
     392                break;
     393
     394        case Exiting:
     395                waitq_close(&thread->join_wq);
     396
     397                /*
     398                 * Release the reference CPU has for the thread.
     399                 * If there are no other references (e.g. threads calling join),
     400                 * the thread structure is deallocated.
     401                 */
     402                thread_put(thread);
     403                break;
     404
     405        case Sleeping:
     406                expected = SLEEP_INITIAL;
     407
     408                /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
     409                if (!atomic_compare_exchange_strong_explicit(&thread->sleep_state,
     410                    &expected, SLEEP_ASLEEP,
     411                    memory_order_acq_rel, memory_order_acquire)) {
     412
     413                        assert(expected == SLEEP_WOKE);
     414                        /* The thread has already been woken up, requeue immediately. */
     415                        thread_ready(thread);
     416                }
     417                break;
     418
     419        default:
     420                /*
     421                 * Entering state is unexpected.
     422                 */
     423                panic("tid%" PRIu64 ": unexpected state %s.",
     424                    thread->tid, thread_states[thread->state]);
     425                break;
     426        }
    355427}
    356428
     
    362434 *
    363435 */
    364 void scheduler_locked(ipl_t ipl)
    365 {
     436void scheduler_enter(state_t new_state)
     437{
     438        ipl_t ipl = interrupts_disable();
     439
    366440        assert(CPU != NULL);
    367 
    368         if (THREAD) {
    369                 /* Update thread kernel accounting */
    370                 THREAD->kcycles += get_cycle() - THREAD->last_cycle;
    371 
    372 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY)
    373                 fpu_context_save(&THREAD->fpu_context);
    374 #endif
    375                 if (!context_save(&THREAD->saved_context)) {
    376                         /*
    377                          * This is the place where threads leave scheduler();
    378                          */
    379 
    380                         /* Save current CPU cycle */
    381                         THREAD->last_cycle = get_cycle();
    382 
    383                         irq_spinlock_unlock(&THREAD->lock, false);
    384                         interrupts_restore(THREAD->saved_ipl);
    385 
    386                         return;
    387                 }
    388 
    389                 /*
    390                  * Interrupt priority level of preempted thread is recorded
    391                  * here to facilitate scheduler() invocations from
    392                  * interrupts_disable()'d code (e.g. waitq_sleep_timeout()).
    393                  *
    394                  */
    395                 THREAD->saved_ipl = ipl;
     441        assert(THREAD != NULL);
     442
     443        fpu_cleanup();
     444
     445        irq_spinlock_lock(&THREAD->lock, false);
     446        THREAD->state = new_state;
     447
     448        /* Update thread kernel accounting */
     449        THREAD->kcycles += get_cycle() - THREAD->last_cycle;
     450
     451        if (!context_save(&THREAD->saved_context)) {
     452                /*
     453                 * This is the place where threads leave scheduler();
     454                 */
     455
     456                irq_spinlock_unlock(&THREAD->lock, false);
     457                interrupts_restore(ipl);
     458                return;
    396459        }
    397460
     
    439502        assert(interrupts_disabled());
    440503
     504        if (atomic_load(&haltstate))
     505                halt();
     506
    441507        if (THREAD) {
    442                 /* Must be run after the switch to scheduler stack */
    443                 after_thread_ran();
    444 
    445                 switch (THREAD->state) {
    446                 case Running:
    447                         irq_spinlock_unlock(&THREAD->lock, false);
    448                         thread_ready(THREAD);
    449                         break;
    450 
    451                 case Exiting:
    452                         irq_spinlock_unlock(&THREAD->lock, false);
    453                         waitq_close(&THREAD->join_wq);
    454 
    455                         /*
    456                          * Release the reference CPU has for the thread.
    457                          * If there are no other references (e.g. threads calling join),
    458                          * the thread structure is deallocated.
    459                          */
    460                         thread_put(THREAD);
    461                         break;
    462 
    463                 case Sleeping:
    464                         /*
    465                          * Prefer the thread after it's woken up.
    466                          */
     508                after_thread_ran_arch();
     509
     510                state_t state = THREAD->state;
     511
     512                if (state == Sleeping) {
     513                        /* Prefer the thread after it's woken up. */
    467514                        THREAD->priority = -1;
    468                         irq_spinlock_unlock(&THREAD->lock, false);
    469                         break;
    470 
    471                 default:
    472                         /*
    473                          * Entering state is unexpected.
    474                          */
    475                         panic("tid%" PRIu64 ": unexpected state %s.",
    476                             THREAD->tid, thread_states[THREAD->state]);
    477                         break;
    478515                }
     516
     517                irq_spinlock_unlock(&THREAD->lock, false);
     518
     519                cleanup_after_thread(THREAD, state);
    479520
    480521                THREAD = NULL;
     
    484525        THREAD = find_best_thread(&rq_index);
    485526
    486         relink_rq(rq_index);
    487 
    488         switch_task(THREAD->task);
    489 
    490         irq_spinlock_lock(&THREAD->lock, false);
    491         THREAD->state = Running;
    492 
    493 #ifdef SCHEDULER_VERBOSE
    494         log(LF_OTHER, LVL_DEBUG,
    495             "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    496             ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
    497             THREAD->ticks, atomic_load(&CPU->nrdy));
    498 #endif
    499 
    500         /*
    501          * Some architectures provide late kernel PA2KA(identity)
    502          * mapping in a page fault handler. However, the page fault
    503          * handler uses the kernel stack of the running thread and
    504          * therefore cannot be used to map it. The kernel stack, if
    505          * necessary, is to be mapped in before_thread_runs(). This
    506          * function must be executed before the switch to the new stack.
    507          */
    508         before_thread_runs();
     527        prepare_to_run_thread(rq_index);
    509528
    510529        /*
     
    660679                 *
    661680                 */
    662                 scheduler();
     681                thread_yield();
    663682        } else {
    664683                /*
  • kernel/generic/src/proc/thread.c

    r4760793 rc7ceacf  
    8282};
    8383
    84 enum sleep_state {
    85         SLEEP_INITIAL,
    86         SLEEP_ASLEEP,
    87         SLEEP_WOKE,
    88 };
    89 
    9084/** Lock protecting the @c threads ordered dictionary .
    9185 *
     
    127121        void (*f)(void *) = THREAD->thread_code;
    128122        void *arg = THREAD->thread_arg;
    129         THREAD->last_cycle = get_cycle();
    130123
    131124        /* This is where each thread wakes up after its creation */
     
    320313
    321314        current_initialize((current_t *) thread->kstack);
    322 
    323         ipl_t ipl = interrupts_disable();
    324         thread->saved_ipl = interrupts_read();
    325         interrupts_restore(ipl);
    326315
    327316        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    525514        }
    526515
    527         irq_spinlock_lock(&THREAD->lock, true);
    528         THREAD->state = Exiting;
    529         irq_spinlock_unlock(&THREAD->lock, true);
    530 
    531         scheduler();
    532 
    533         panic("should never be reached");
     516        scheduler_enter(Exiting);
     517        unreachable();
    534518}
    535519
     
    579563}
    580564
    581 static void thread_wait_internal(void)
    582 {
    583         assert(THREAD != NULL);
    584 
    585         ipl_t ipl = interrupts_disable();
    586 
    587         if (atomic_load(&haltstate))
    588                 halt();
    589 
    590         /*
    591          * Lock here to prevent a race between entering the scheduler and another
    592          * thread rescheduling this thread.
    593          */
    594         irq_spinlock_lock(&THREAD->lock, false);
    595 
    596         int expected = SLEEP_INITIAL;
    597 
    598         /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
    599         if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
    600             SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
    601                 THREAD->state = Sleeping;
    602                 scheduler_locked(ipl);
    603         } else {
    604                 assert(expected == SLEEP_WOKE);
    605                 /* Return immediately. */
    606                 irq_spinlock_unlock(&THREAD->lock, false);
    607                 interrupts_restore(ipl);
    608         }
    609 }
    610 
    611565static void thread_wait_timeout_callback(void *arg)
    612566{
     
    649603        timeout_t timeout;
    650604
     605        /* Extra check to avoid going to scheduler if we don't need to. */
     606        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     607            SLEEP_INITIAL)
     608                return THREAD_WAIT_SUCCESS;
     609
    651610        if (deadline != DEADLINE_NEVER) {
    652                 /* Extra check to avoid setting up a deadline if we don't need to. */
    653                 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    654                     SLEEP_INITIAL)
    655                         return THREAD_WAIT_SUCCESS;
    656 
    657611                timeout_initialize(&timeout);
    658612                timeout_register_deadline(&timeout, deadline,
     
    660614        }
    661615
    662         thread_wait_internal();
     616        scheduler_enter(Sleeping);
    663617
    664618        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    674628
    675629        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    676             memory_order_release);
     630            memory_order_acq_rel);
    677631
    678632        if (state == SLEEP_ASLEEP) {
     
    770724
    771725        (void) waitq_sleep_timeout(&wq, usec);
     726}
     727
     728/** Allow other threads to run. */
     729void thread_yield(void)
     730{
     731        assert(THREAD != NULL);
     732        scheduler_enter(Running);
    772733}
    773734
  • kernel/generic/src/time/clock.c

    r4760793 rc7ceacf  
    187187        if (THREAD) {
    188188                if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) {
    189                         scheduler();
     189                        thread_yield();
    190190#ifdef CONFIG_UDEBUG
    191191                        /*
Note: See TracChangeset for help on using the changeset viewer.