Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/scheduler.c

    red7e057 rd23712e  
    11/*
    22 * Copyright (c) 2010 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    5051#include <time/delay.h>
    5152#include <arch/asm.h>
    52 #include <arch/faddr.h>
    5353#include <arch/cycle.h>
    5454#include <atomic.h>
     
    6666#include <stacktrace.h>
    6767
    68 static void scheduler_separated_stack(void);
    69 
    7068atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    7169
     
    227225static void relink_rq(int start)
    228226{
     227        assert(interrupts_disabled());
     228
    229229        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    230230                return;
     
    302302}
    303303
    304 void scheduler_run(void)
    305 {
    306         assert(interrupts_disabled());
    307         assert(THREAD == NULL);
    308         assert(CPU != NULL);
    309 
    310         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    311         context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    312         unreachable();
    313 }
    314 
    315304/** Things to do before we switch to THREAD context.
    316305 */
     
    321310        switch_task(THREAD->task);
    322311
    323         irq_spinlock_lock(&THREAD->lock, false);
    324         THREAD->state = Running;
    325         THREAD->cpu = CPU;
    326         THREAD->priority = rq_index;  /* Correct rq index */
     312        assert(atomic_get_unordered(&THREAD->cpu) == CPU);
     313
     314        atomic_set_unordered(&THREAD->state, Running);
     315        atomic_set_unordered(&THREAD->priority, rq_index);  /* Correct rq index */
    327316
    328317        /*
     
    335324        log(LF_OTHER, LVL_DEBUG,
    336325            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    337             ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     326            ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index,
    338327            THREAD->ticks, atomic_load(&CPU->nrdy));
    339328#endif
     
    350339
    351340#ifdef CONFIG_UDEBUG
    352         if (THREAD->btrace) {
     341        if (atomic_get_unordered(&THREAD->btrace)) {
    353342                istate_t *istate = THREAD->udebug.uspace_state;
    354343                if (istate != NULL) {
    355344                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    356345                        stack_trace_istate(istate);
     346                } else {
     347                        printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid);
    357348                }
    358349
    359                 THREAD->btrace = false;
     350                atomic_set_unordered(&THREAD->btrace, false);
    360351        }
    361352#endif
     
    374365}
    375366
    376 static void cleanup_after_thread(thread_t *thread, state_t out_state)
     367static void add_to_rq(thread_t *thread, cpu_t *cpu, int i)
     368{
     369        /* Add to the appropriate runqueue. */
     370        runq_t *rq = &cpu->rq[i];
     371
     372        irq_spinlock_lock(&rq->lock, false);
     373        list_append(&thread->rq_link, &rq->rq);
     374        rq->n++;
     375        irq_spinlock_unlock(&rq->lock, false);
     376
     377        atomic_inc(&nrdy);
     378        atomic_inc(&cpu->nrdy);
     379}
     380
     381/** Requeue a thread that was just preempted on this CPU.
     382 */
     383static void thread_requeue_preempted(thread_t *thread)
     384{
     385        assert(interrupts_disabled());
     386        assert(atomic_get_unordered(&thread->state) == Running);
     387        assert(atomic_get_unordered(&thread->cpu) == CPU);
     388
     389        int prio = atomic_get_unordered(&thread->priority);
     390
     391        if (prio < RQ_COUNT - 1) {
     392                prio++;
     393                atomic_set_unordered(&thread->priority, prio);
     394        }
     395
     396        atomic_set_unordered(&thread->state, Ready);
     397
     398        add_to_rq(thread, CPU, prio);
     399}
     400
     401void thread_requeue_sleeping(thread_t *thread)
     402{
     403        ipl_t ipl = interrupts_disable();
     404
     405        assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
     406
     407        atomic_set_unordered(&thread->priority, 0);
     408        atomic_set_unordered(&thread->state, Ready);
     409
     410        /* Prefer the CPU on which the thread ran last */
     411        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     412
     413        if (!cpu) {
     414                cpu = CPU;
     415                atomic_set_unordered(&thread->cpu, CPU);
     416        }
     417
     418        add_to_rq(thread, cpu, 0);
     419
     420        interrupts_restore(ipl);
     421}
     422
     423static void cleanup_after_thread(thread_t *thread)
    377424{
    378425        assert(CURRENT->mutex_locks == 0);
     
    381428        int expected;
    382429
    383         switch (out_state) {
     430        switch (atomic_get_unordered(&thread->state)) {
    384431        case Running:
    385                 thread_ready(thread);
     432                thread_requeue_preempted(thread);
    386433                break;
    387434
     
    407454                        assert(expected == SLEEP_WOKE);
    408455                        /* The thread has already been woken up, requeue immediately. */
    409                         thread_ready(thread);
     456                        thread_requeue_sleeping(thread);
    410457                }
    411458                break;
     
    416463                 */
    417464                panic("tid%" PRIu64 ": unexpected state %s.",
    418                     thread->tid, thread_states[thread->state]);
     465                    thread->tid, thread_states[atomic_get_unordered(&thread->state)]);
    419466                break;
    420467        }
    421468}
    422469
    423 /** The scheduler
    424  *
    425  * The thread scheduling procedure.
    426  * Passes control directly to
    427  * scheduler_separated_stack().
    428  *
    429  */
     470/** Switch to scheduler context to let other threads run. */
    430471void scheduler_enter(state_t new_state)
    431472{
     
    435476        assert(THREAD != NULL);
    436477
    437         fpu_cleanup();
    438 
    439         irq_spinlock_lock(&THREAD->lock, false);
    440         THREAD->state = new_state;
    441 
    442         /* Update thread kernel accounting */
    443         THREAD->kcycles += get_cycle() - THREAD->last_cycle;
    444 
    445         if (new_state == Sleeping) {
    446                 /* Prefer the thread after it's woken up. */
    447                 THREAD->priority = -1;
    448         }
    449 
    450         /*
    451          * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
    452          * and preemption counter. At this point CURRENT could be coming either
    453          * from THREAD's or CPU's stack.
    454          *
    455          */
    456         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    457 
    458         /*
    459          * We may not keep the old stack.
    460          * Reason: If we kept the old stack and got blocked, for instance, in
    461          * find_best_thread(), the old thread could get rescheduled by another
    462          * CPU and overwrite the part of its own stack that was also used by
    463          * the scheduler on this CPU.
    464          *
    465          * Moreover, we have to bypass the compiler-generated POP sequence
    466          * which is fooled by SP being set to the very top of the stack.
    467          * Therefore the scheduler() function continues in
    468          * scheduler_separated_stack().
    469          *
    470          */
    471         context_t ctx;
    472         context_create(&ctx, scheduler_separated_stack,
    473             CPU_LOCAL->stack, STACK_SIZE);
    474 
    475         /* Switch to scheduler context and store current thread's context. */
    476         context_swap(&THREAD->saved_context, &ctx);
    477 
    478         /* Returned from scheduler. */
    479 
    480         irq_spinlock_unlock(&THREAD->lock, false);
    481         interrupts_restore(ipl);
    482 }
    483 
    484 /** Scheduler stack switch wrapper
    485  *
    486  * Second part of the scheduler() function
    487  * using new stack. Handling the actual context
    488  * switch to a new thread.
    489  *
    490  */
    491 void scheduler_separated_stack(void)
    492 {
    493         assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
    494         assert(CPU != NULL);
    495         assert(interrupts_disabled());
    496 
    497478        if (atomic_load(&haltstate))
    498479                halt();
    499480
    500         if (THREAD) {
    501                 /*
    502                  * On Sparc, this saves some extra userspace state that's not
    503                  * covered by context_save()/context_restore().
    504                  */
    505                 after_thread_ran_arch();
    506 
    507                 state_t state = THREAD->state;
    508                 irq_spinlock_unlock(&THREAD->lock, false);
    509 
    510                 cleanup_after_thread(THREAD, state);
    511 
     481        /* Check if we have a thread to switch to. */
     482
     483        int rq_index;
     484        thread_t *new_thread = try_find_thread(&rq_index);
     485
     486        if (new_thread == NULL && new_state == Running) {
     487                /* No other thread to run, but we still have work to do here. */
     488                interrupts_restore(ipl);
     489                return;
     490        }
     491
     492        atomic_set_unordered(&THREAD->state, new_state);
     493
     494        /* Update thread kernel accounting */
     495        atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle);
     496
     497        fpu_cleanup();
     498
     499        /*
     500         * On Sparc, this saves some extra userspace state that's not
     501         * covered by context_save()/context_restore().
     502         */
     503        after_thread_ran_arch();
     504
     505        if (new_thread) {
     506                thread_t *old_thread = THREAD;
     507                CPU_LOCAL->prev_thread = old_thread;
     508                THREAD = new_thread;
     509                /* No waiting necessary, we can switch to the new thread directly. */
     510                prepare_to_run_thread(rq_index);
     511
     512                current_copy(CURRENT, (current_t *) new_thread->kstack);
     513                context_swap(&old_thread->saved_context, &new_thread->saved_context);
     514        } else {
     515                /*
     516                 * A new thread isn't immediately available, switch to a separate
     517                 * stack to sleep or do other idle stuff.
     518                 */
     519                current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     520                context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
     521        }
     522
     523        assert(CURRENT->mutex_locks == 0);
     524        assert(interrupts_disabled());
     525
     526        /* Check if we need to clean up after another thread. */
     527        if (CPU_LOCAL->prev_thread) {
     528                cleanup_after_thread(CPU_LOCAL->prev_thread);
     529                CPU_LOCAL->prev_thread = NULL;
     530        }
     531
     532        interrupts_restore(ipl);
     533}
     534
     535/** Enter main scheduler loop. Never returns.
     536 *
     537 * This function switches to a runnable thread as soon as one is available,
     538 * after which it is only switched back to if a thread is stopping and there is
     539 * no other thread to run in its place. We need a separate context for that
     540 * because we're going to block the CPU, which means we need another context
     541 * to clean up after the previous thread.
     542 */
     543void scheduler_run(void)
     544{
     545        assert(interrupts_disabled());
     546
     547        assert(CPU != NULL);
     548        assert(TASK == NULL);
     549        assert(THREAD == NULL);
     550        assert(interrupts_disabled());
     551
     552        while (!atomic_load(&haltstate)) {
     553                assert(CURRENT->mutex_locks == 0);
     554
     555                int rq_index;
     556                THREAD = find_best_thread(&rq_index);
     557                prepare_to_run_thread(rq_index);
     558
     559                /*
     560                 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     561                 * thread's stack.
     562                 */
     563                current_copy(CURRENT, (current_t *) THREAD->kstack);
     564
     565                /* Switch to thread context. */
     566                context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context);
     567
     568                /* Back from another thread. */
     569                assert(CPU != NULL);
     570                assert(THREAD != NULL);
     571                assert(CURRENT->mutex_locks == 0);
     572                assert(interrupts_disabled());
     573
     574                cleanup_after_thread(THREAD);
     575
     576                /*
     577                 * Necessary because we're allowing interrupts in find_best_thread(),
     578                 * so we need to avoid other code referencing the thread we left.
     579                 */
    512580                THREAD = NULL;
    513581        }
    514582
    515         int rq_index;
    516         THREAD = find_best_thread(&rq_index);
    517 
    518         prepare_to_run_thread(rq_index);
    519 
    520         /*
    521          * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
    522          * thread's stack.
    523          */
    524         current_copy(CURRENT, (current_t *) THREAD->kstack);
    525 
    526         context_restore(&THREAD->saved_context);
     583        halt();
     584}
     585
     586/** Thread wrapper.
     587 *
     588 * This wrapper is provided to ensure that a starting thread properly handles
     589 * everything it needs to do when first scheduled, and when it exits.
     590 */
     591void thread_main_func(void)
     592{
     593        assert(interrupts_disabled());
     594
     595        void (*f)(void *) = THREAD->thread_code;
     596        void *arg = THREAD->thread_arg;
     597
     598        /* This is where each thread wakes up after its creation */
     599
     600        /* Check if we need to clean up after another thread. */
     601        if (CPU_LOCAL->prev_thread) {
     602                cleanup_after_thread(CPU_LOCAL->prev_thread);
     603                CPU_LOCAL->prev_thread = NULL;
     604        }
     605
     606        interrupts_enable();
     607
     608        f(arg);
     609
     610        thread_exit();
    527611
    528612        /* Not reached */
     
    550634        list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
    551635
    552                 irq_spinlock_lock(&thread->lock, false);
    553 
    554636                /*
    555637                 * Do not steal CPU-wired threads, threads
     
    558640                 * FPU context is still in the CPU.
    559641                 */
    560                 if (thread->stolen || thread->nomigrate ||
    561                     thread == fpu_owner) {
    562                         irq_spinlock_unlock(&thread->lock, false);
     642                if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
    563643                        continue;
    564644                }
    565645
    566646                thread->stolen = true;
    567                 thread->cpu = CPU;
    568 
    569                 irq_spinlock_unlock(&thread->lock, false);
     647                atomic_set_unordered(&thread->cpu, CPU);
    570648
    571649                /*
     
    712790                            thread) {
    713791                                printf("%" PRIu64 "(%s) ", thread->tid,
    714                                     thread_states[thread->state]);
     792                                    thread_states[atomic_get_unordered(&thread->state)]);
    715793                        }
    716794                        printf("\n");
Note: See TracChangeset for help on using the changeset viewer.