Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/scheduler.c

    rd23712e red7e057  
    11/*
    22 * Copyright (c) 2010 Jakub Jermar
    3  * Copyright (c) 2023 Jiří Zárevúcky
    43 * All rights reserved.
    54 *
     
    5150#include <time/delay.h>
    5251#include <arch/asm.h>
     52#include <arch/faddr.h>
    5353#include <arch/cycle.h>
    5454#include <atomic.h>
     
    6666#include <stacktrace.h>
    6767
     68static void scheduler_separated_stack(void);
     69
    6870atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    6971
     
    225227static void relink_rq(int start)
    226228{
    227         assert(interrupts_disabled());
    228 
    229229        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    230230                return;
     
    302302}
    303303
     304void scheduler_run(void)
     305{
     306        assert(interrupts_disabled());
     307        assert(THREAD == NULL);
     308        assert(CPU != NULL);
     309
     310        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     311        context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
     312        unreachable();
     313}
     314
    304315/** Things to do before we switch to THREAD context.
    305316 */
     
    310321        switch_task(THREAD->task);
    311322
    312         assert(atomic_get_unordered(&THREAD->cpu) == CPU);
    313 
    314         atomic_set_unordered(&THREAD->state, Running);
    315         atomic_set_unordered(&THREAD->priority, rq_index);  /* Correct rq index */
     323        irq_spinlock_lock(&THREAD->lock, false);
     324        THREAD->state = Running;
     325        THREAD->cpu = CPU;
     326        THREAD->priority = rq_index;  /* Correct rq index */
    316327
    317328        /*
     
    324335        log(LF_OTHER, LVL_DEBUG,
    325336            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    326             ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index,
     337            ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
    327338            THREAD->ticks, atomic_load(&CPU->nrdy));
    328339#endif
     
    339350
    340351#ifdef CONFIG_UDEBUG
    341         if (atomic_get_unordered(&THREAD->btrace)) {
     352        if (THREAD->btrace) {
    342353                istate_t *istate = THREAD->udebug.uspace_state;
    343354                if (istate != NULL) {
    344355                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    345356                        stack_trace_istate(istate);
    346                 } else {
    347                         printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid);
    348357                }
    349358
    350                 atomic_set_unordered(&THREAD->btrace, false);
     359                THREAD->btrace = false;
    351360        }
    352361#endif
     
    365374}
    366375
    367 static void add_to_rq(thread_t *thread, cpu_t *cpu, int i)
    368 {
    369         /* Add to the appropriate runqueue. */
    370         runq_t *rq = &cpu->rq[i];
    371 
    372         irq_spinlock_lock(&rq->lock, false);
    373         list_append(&thread->rq_link, &rq->rq);
    374         rq->n++;
    375         irq_spinlock_unlock(&rq->lock, false);
    376 
    377         atomic_inc(&nrdy);
    378         atomic_inc(&cpu->nrdy);
    379 }
    380 
    381 /** Requeue a thread that was just preempted on this CPU.
    382  */
    383 static void thread_requeue_preempted(thread_t *thread)
    384 {
    385         assert(interrupts_disabled());
    386         assert(atomic_get_unordered(&thread->state) == Running);
    387         assert(atomic_get_unordered(&thread->cpu) == CPU);
    388 
    389         int prio = atomic_get_unordered(&thread->priority);
    390 
    391         if (prio < RQ_COUNT - 1) {
    392                 prio++;
    393                 atomic_set_unordered(&thread->priority, prio);
    394         }
    395 
    396         atomic_set_unordered(&thread->state, Ready);
    397 
    398         add_to_rq(thread, CPU, prio);
    399 }
    400 
    401 void thread_requeue_sleeping(thread_t *thread)
    402 {
    403         ipl_t ipl = interrupts_disable();
    404 
    405         assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
    406 
    407         atomic_set_unordered(&thread->priority, 0);
    408         atomic_set_unordered(&thread->state, Ready);
    409 
    410         /* Prefer the CPU on which the thread ran last */
    411         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    412 
    413         if (!cpu) {
    414                 cpu = CPU;
    415                 atomic_set_unordered(&thread->cpu, CPU);
    416         }
    417 
    418         add_to_rq(thread, cpu, 0);
    419 
    420         interrupts_restore(ipl);
    421 }
    422 
    423 static void cleanup_after_thread(thread_t *thread)
     376static void cleanup_after_thread(thread_t *thread, state_t out_state)
    424377{
    425378        assert(CURRENT->mutex_locks == 0);
     
    428381        int expected;
    429382
    430         switch (atomic_get_unordered(&thread->state)) {
     383        switch (out_state) {
    431384        case Running:
    432                 thread_requeue_preempted(thread);
     385                thread_ready(thread);
    433386                break;
    434387
     
    454407                        assert(expected == SLEEP_WOKE);
    455408                        /* The thread has already been woken up, requeue immediately. */
    456                         thread_requeue_sleeping(thread);
     409                        thread_ready(thread);
    457410                }
    458411                break;
     
    463416                 */
    464417                panic("tid%" PRIu64 ": unexpected state %s.",
    465                     thread->tid, thread_states[atomic_get_unordered(&thread->state)]);
     418                    thread->tid, thread_states[thread->state]);
    466419                break;
    467420        }
    468421}
    469422
    470 /** Switch to scheduler context to let other threads run. */
     423/** The scheduler
     424 *
     425 * The thread scheduling procedure.
     426 * Passes control directly to
     427 * scheduler_separated_stack().
     428 *
     429 */
    471430void scheduler_enter(state_t new_state)
    472431{
     
    476435        assert(THREAD != NULL);
    477436
     437        fpu_cleanup();
     438
     439        irq_spinlock_lock(&THREAD->lock, false);
     440        THREAD->state = new_state;
     441
     442        /* Update thread kernel accounting */
     443        THREAD->kcycles += get_cycle() - THREAD->last_cycle;
     444
     445        if (new_state == Sleeping) {
     446                /* Prefer the thread after it's woken up. */
     447                THREAD->priority = -1;
     448        }
     449
     450        /*
     451         * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
     452         * and preemption counter. At this point CURRENT could be coming either
     453         * from THREAD's or CPU's stack.
     454         *
     455         */
     456        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     457
     458        /*
     459         * We may not keep the old stack.
     460         * Reason: If we kept the old stack and got blocked, for instance, in
     461         * find_best_thread(), the old thread could get rescheduled by another
     462         * CPU and overwrite the part of its own stack that was also used by
     463         * the scheduler on this CPU.
     464         *
     465         * Moreover, we have to bypass the compiler-generated POP sequence
     466         * which is fooled by SP being set to the very top of the stack.
     467         * Therefore the scheduler() function continues in
     468         * scheduler_separated_stack().
     469         *
     470         */
     471        context_t ctx;
     472        context_create(&ctx, scheduler_separated_stack,
     473            CPU_LOCAL->stack, STACK_SIZE);
     474
     475        /* Switch to scheduler context and store current thread's context. */
     476        context_swap(&THREAD->saved_context, &ctx);
     477
     478        /* Returned from scheduler. */
     479
     480        irq_spinlock_unlock(&THREAD->lock, false);
     481        interrupts_restore(ipl);
     482}
     483
     484/** Scheduler stack switch wrapper
     485 *
     486 * Second part of the scheduler() function
     487 * using new stack. Handling the actual context
     488 * switch to a new thread.
     489 *
     490 */
     491void scheduler_separated_stack(void)
     492{
     493        assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
     494        assert(CPU != NULL);
     495        assert(interrupts_disabled());
     496
    478497        if (atomic_load(&haltstate))
    479498                halt();
    480499
    481         /* Check if we have a thread to switch to. */
     500        if (THREAD) {
     501                /*
     502                 * On Sparc, this saves some extra userspace state that's not
     503                 * covered by context_save()/context_restore().
     504                 */
     505                after_thread_ran_arch();
     506
     507                state_t state = THREAD->state;
     508                irq_spinlock_unlock(&THREAD->lock, false);
     509
     510                cleanup_after_thread(THREAD, state);
     511
     512                THREAD = NULL;
     513        }
    482514
    483515        int rq_index;
    484         thread_t *new_thread = try_find_thread(&rq_index);
    485 
    486         if (new_thread == NULL && new_state == Running) {
    487                 /* No other thread to run, but we still have work to do here. */
    488                 interrupts_restore(ipl);
    489                 return;
    490         }
    491 
    492         atomic_set_unordered(&THREAD->state, new_state);
    493 
    494         /* Update thread kernel accounting */
    495         atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle);
    496 
    497         fpu_cleanup();
    498 
    499         /*
    500          * On Sparc, this saves some extra userspace state that's not
    501          * covered by context_save()/context_restore().
    502          */
    503         after_thread_ran_arch();
    504 
    505         if (new_thread) {
    506                 thread_t *old_thread = THREAD;
    507                 CPU_LOCAL->prev_thread = old_thread;
    508                 THREAD = new_thread;
    509                 /* No waiting necessary, we can switch to the new thread directly. */
    510                 prepare_to_run_thread(rq_index);
    511 
    512                 current_copy(CURRENT, (current_t *) new_thread->kstack);
    513                 context_swap(&old_thread->saved_context, &new_thread->saved_context);
    514         } else {
    515                 /*
    516                  * A new thread isn't immediately available, switch to a separate
    517                  * stack to sleep or do other idle stuff.
    518                  */
    519                 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    520                 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
    521         }
    522 
    523         assert(CURRENT->mutex_locks == 0);
    524         assert(interrupts_disabled());
    525 
    526         /* Check if we need to clean up after another thread. */
    527         if (CPU_LOCAL->prev_thread) {
    528                 cleanup_after_thread(CPU_LOCAL->prev_thread);
    529                 CPU_LOCAL->prev_thread = NULL;
    530         }
    531 
    532         interrupts_restore(ipl);
    533 }
    534 
    535 /** Enter main scheduler loop. Never returns.
    536  *
    537  * This function switches to a runnable thread as soon as one is available,
    538  * after which it is only switched back to if a thread is stopping and there is
    539  * no other thread to run in its place. We need a separate context for that
    540  * because we're going to block the CPU, which means we need another context
    541  * to clean up after the previous thread.
    542  */
    543 void scheduler_run(void)
    544 {
    545         assert(interrupts_disabled());
    546 
    547         assert(CPU != NULL);
    548         assert(TASK == NULL);
    549         assert(THREAD == NULL);
    550         assert(interrupts_disabled());
    551 
    552         while (!atomic_load(&haltstate)) {
    553                 assert(CURRENT->mutex_locks == 0);
    554 
    555                 int rq_index;
    556                 THREAD = find_best_thread(&rq_index);
    557                 prepare_to_run_thread(rq_index);
    558 
    559                 /*
    560                  * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
    561                  * thread's stack.
    562                  */
    563                 current_copy(CURRENT, (current_t *) THREAD->kstack);
    564 
    565                 /* Switch to thread context. */
    566                 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context);
    567 
    568                 /* Back from another thread. */
    569                 assert(CPU != NULL);
    570                 assert(THREAD != NULL);
    571                 assert(CURRENT->mutex_locks == 0);
    572                 assert(interrupts_disabled());
    573 
    574                 cleanup_after_thread(THREAD);
    575 
    576                 /*
    577                  * Necessary because we're allowing interrupts in find_best_thread(),
    578                  * so we need to avoid other code referencing the thread we left.
    579                  */
    580                 THREAD = NULL;
    581         }
    582 
    583         halt();
    584 }
    585 
    586 /** Thread wrapper.
    587  *
    588  * This wrapper is provided to ensure that a starting thread properly handles
    589  * everything it needs to do when first scheduled, and when it exits.
    590  */
    591 void thread_main_func(void)
    592 {
    593         assert(interrupts_disabled());
    594 
    595         void (*f)(void *) = THREAD->thread_code;
    596         void *arg = THREAD->thread_arg;
    597 
    598         /* This is where each thread wakes up after its creation */
    599 
    600         /* Check if we need to clean up after another thread. */
    601         if (CPU_LOCAL->prev_thread) {
    602                 cleanup_after_thread(CPU_LOCAL->prev_thread);
    603                 CPU_LOCAL->prev_thread = NULL;
    604         }
    605 
    606         interrupts_enable();
    607 
    608         f(arg);
    609 
    610         thread_exit();
     516        THREAD = find_best_thread(&rq_index);
     517
     518        prepare_to_run_thread(rq_index);
     519
     520        /*
     521         * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     522         * thread's stack.
     523         */
     524        current_copy(CURRENT, (current_t *) THREAD->kstack);
     525
     526        context_restore(&THREAD->saved_context);
    611527
    612528        /* Not reached */
     
    633549        /* Search rq from the back */
    634550        list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
     551
     552                irq_spinlock_lock(&thread->lock, false);
    635553
    636554                /*
     
    640558                 * FPU context is still in the CPU.
    641559                 */
    642                 if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
     560                if (thread->stolen || thread->nomigrate ||
     561                    thread == fpu_owner) {
     562                        irq_spinlock_unlock(&thread->lock, false);
    643563                        continue;
    644564                }
    645565
    646566                thread->stolen = true;
    647                 atomic_set_unordered(&thread->cpu, CPU);
     567                thread->cpu = CPU;
     568
     569                irq_spinlock_unlock(&thread->lock, false);
    648570
    649571                /*
     
    790712                            thread) {
    791713                                printf("%" PRIu64 "(%s) ", thread->tid,
    792                                     thread_states[atomic_get_unordered(&thread->state)]);
     714                                    thread_states[thread->state]);
    793715                        }
    794716                        printf("\n");
Note: See TracChangeset for help on using the changeset viewer.