Changeset a5b5f17 in mainline


Ignore:
Timestamp:
2024-01-21T16:36:15Z (11 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
1a1e124
Parents:
ed7e057 (diff), d23712e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge scheduler refactoring to remove the need for thread structure lock

All necessary synchronization is already a product of other operations
that enforce ordering (that is, runqueue manipulation and thread_sleep()
/thread_wakeup()). Some fields formally become atomic, which is only
needed because they are read from other threads to print out statistics.
These atomic operations are limited to relaxed individual reads/writes
to native-sized fields, which should at least in theory be compiled
identically to regular volatile variable accesses, the only difference
being that concurrent accesses from different threads are not undefined
behavior by definition.

Additionally, it is now made possible to switch directly to new thread
context instead of going through a separate scheduler stack. A separate
context is only needed and used when no runnable threads is immediately
available, which means we optimize switching in the limiting case where
many threads are waiting for execution. Switching is also avoided
altogether when there's only one runnable thread and it is being
preempted. Originally, the scheduler would switch to a separate stack,
requeue the thread that was running, retrieve that same thread from
queue, and switch to it again, all that work is now avoided.

Location:
kernel
Files:
1 deleted
16 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/src/proc/sun4u/scheduler.c

    red7e057 ra5b5f17  
    7676{
    7777        if (THREAD->uspace) {
     78                asm volatile ("flushw");
     79
    7880                /* sample the state of the userspace window buffer */
    7981                THREAD->arch.uspace_window_buffer =
  • kernel/arch/sparc64/src/proc/sun4v/scheduler.c

    red7e057 ra5b5f17  
    6868{
    6969        if (THREAD->uspace) {
     70                asm volatile ("flushw");
     71
    7072                /* sample the state of the userspace window buffer */
    7173                THREAD->arch.uspace_window_buffer =
  • kernel/generic/include/atomic.h

    red7e057 ra5b5f17  
    3939#include <typedefs.h>
    4040#include <stdatomic.h>
     41
     42/*
     43 * Shorthand for relaxed atomic read/write, something that's needed to formally
     44 * avoid undefined behavior in cases where we need to read a variable in
     45 * different threads and we don't particularly care about ordering
     46 * (e.g. statistic printouts). This is most likely translated into the same
     47 * assembly instructions as regular read/writes.
     48 */
     49#define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed)
     50#define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed)
    4151
    4252#define atomic_predec(val) \
  • kernel/generic/include/cpu.h

    red7e057 ra5b5f17  
    7474        bool idle;
    7575        uint64_t last_cycle;
     76
     77        context_t scheduler_context;
     78
     79        struct thread *prev_thread;
    7680} cpu_local_t;
    7781
  • kernel/generic/include/proc/scheduler.h

    red7e057 ra5b5f17  
    6464extern void scheduler_enter(state_t);
    6565
     66extern void thread_main_func(void);
     67
    6668/*
    6769 * To be defined by architectures.
  • kernel/generic/include/proc/thread.h

    red7e057 ra5b5f17  
    9595        waitq_t join_wq;
    9696
    97         /** Lock protecting thread structure.
     97        /** Thread accounting. */
     98        atomic_time_stat_t ucycles;
     99        atomic_time_stat_t kcycles;
     100
     101        /** Architecture-specific data. */
     102        thread_arch_t arch;
     103
     104#ifdef CONFIG_UDEBUG
     105        /**
     106         * If true, the scheduler will print a stack trace
     107         * to the kernel console upon scheduling this thread.
     108         */
     109        atomic_int_fast8_t btrace;
     110
     111        /** Debugging stuff */
     112        udebug_thread_t udebug;
     113#endif /* CONFIG_UDEBUG */
     114
     115        /*
     116         * Immutable fields.
    98117         *
    99          * Protects the whole thread structure except fields listed above.
    100          */
    101         IRQ_SPINLOCK_DECLARE(lock);
    102 
    103         char name[THREAD_NAME_BUFLEN];
     118         * These fields are only modified during initialization, and are not
     119         * changed at any time between initialization and destruction.
     120         * Can be accessed without synchronization in most places.
     121         */
     122
     123        /** Thread ID. */
     124        thread_id_t tid;
    104125
    105126        /** Function implementing the thread. */
     
    108129        void *thread_arg;
    109130
     131        char name[THREAD_NAME_BUFLEN];
     132
     133        /** Thread is executed in user space. */
     134        bool uspace;
     135
     136        /** Thread doesn't affect accumulated accounting. */
     137        bool uncounted;
     138
     139        /** Containing task. */
     140        task_t *task;
     141
     142        /** Thread's kernel stack. */
     143        uint8_t *kstack;
     144
     145        /*
     146         * Local fields.
     147         *
     148         * These fields can be safely accessed from code that _controls execution_
     149         * of this thread. Code controls execution of a thread if either:
     150         *  - it runs in the context of said thread AND interrupts are disabled
     151         *    (interrupts can and will access these fields)
     152         *  - the thread is not running, and the code accessing it can legally
     153         *    add/remove the thread to/from a runqueue, i.e., either:
     154         *    - it is allowed to enqueue thread in a new runqueue
     155         *    - it holds the lock to the runqueue containing the thread
     156         *
     157         */
     158
    110159        /**
    111160         * From here, the stored context is restored
     
    114163        context_t saved_context;
    115164
     165        // TODO: we only need one of the two bools below
     166
    116167        /**
    117168         * True if this thread is executing copy_from_uspace().
     
    126177        bool in_copy_to_uspace;
    127178
     179        /*
     180         * FPU context is a special case. If lazy FPU switching is disabled,
     181         * it acts as a regular local field. However, if lazy switching is enabled,
     182         * the context is synchronized via CPU->fpu_lock
     183         */
    128184#ifdef CONFIG_FPU
    129185        fpu_context_t fpu_context;
     
    134190        unsigned int nomigrate;
    135191
    136         /** Thread state. */
    137         state_t state;
    138 
    139         /** Thread CPU. */
    140         cpu_t *cpu;
    141         /** Containing task. */
    142         task_t *task;
    143192        /** Thread was migrated to another CPU and has not run yet. */
    144193        bool stolen;
    145         /** Thread is executed in user space. */
    146         bool uspace;
    147 
    148         /** Thread accounting. */
    149         uint64_t ucycles;
    150         uint64_t kcycles;
     194
     195        /**
     196         * Thread state (state_t).
     197         * This is atomic because we read it via some commands for debug output,
     198         * otherwise it could just be a regular local.
     199         */
     200        atomic_int_fast32_t state;
     201
     202        /** Thread CPU. */
     203        _Atomic(cpu_t *) cpu;
     204
     205        /** Thread's priority. Implemented as index to CPU->rq */
     206        atomic_int_fast32_t priority;
     207
    151208        /** Last sampled cycle. */
    152209        uint64_t last_cycle;
    153         /** Thread doesn't affect accumulated accounting. */
    154         bool uncounted;
    155 
    156         /** Thread's priority. Implemented as index to CPU->rq */
    157         int priority;
    158         /** Thread ID. */
    159         thread_id_t tid;
    160 
    161         /** Architecture-specific data. */
    162         thread_arch_t arch;
    163 
    164         /** Thread's kernel stack. */
    165         uint8_t *kstack;
    166 
    167 #ifdef CONFIG_UDEBUG
    168         /**
    169          * If true, the scheduler will print a stack trace
    170          * to the kernel console upon scheduling this thread.
    171          */
    172         bool btrace;
    173 
    174         /** Debugging stuff */
    175         udebug_thread_t udebug;
    176 #endif /* CONFIG_UDEBUG */
    177210} thread_t;
    178211
     
    186219extern void thread_attach(thread_t *, task_t *);
    187220extern void thread_start(thread_t *);
    188 extern void thread_ready(thread_t *);
     221extern void thread_requeue_sleeping(thread_t *);
    189222extern void thread_exit(void) __attribute__((noreturn));
    190223extern void thread_interrupt(thread_t *);
  • kernel/generic/meson.build

    red7e057 ra5b5f17  
    9595        'src/mm/malloc.c',
    9696        'src/mm/reserve.c',
    97         'src/preempt/preemption.c',
    9897        'src/printf/printf.c',
    9998        'src/printf/snprintf.c',
  • kernel/generic/src/interrupt/interrupt.c

    red7e057 ra5b5f17  
    114114
    115115        /* Account user cycles */
    116         if (THREAD) {
    117                 irq_spinlock_lock(&THREAD->lock, false);
     116        if (THREAD)
    118117                thread_update_accounting(true);
    119                 irq_spinlock_unlock(&THREAD->lock, false);
    120         }
    121118
    122119        /* Account CPU usage if it woke up from sleep */
     
    155152
    156153        /* Do not charge THREAD for exception cycles */
    157         if (THREAD) {
    158                 irq_spinlock_lock(&THREAD->lock, false);
     154        if (THREAD)
    159155                THREAD->last_cycle = end_cycle;
    160                 irq_spinlock_unlock(&THREAD->lock, false);
    161         }
    162156#else
    163157        panic("No space for any exception handler, yet we want to handle some exception.");
  • kernel/generic/src/main/main.c

    red7e057 ra5b5f17  
    287287         * starting the thread of kernel threads.
    288288         */
    289         scheduler_run();
     289        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     290        context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE);
    290291        /* not reached */
    291292}
     
    327328        ARCH_OP(post_cpu_init);
    328329
    329         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    330 
    331330        /*
    332331         * If we woke kmp up before we left the kernel stack, we could
     
    334333         * switch to this cpu's private stack prior to waking kmp up.
    335334         */
     335        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    336336        context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    337337        /* not reached */
  • kernel/generic/src/proc/scheduler.c

    red7e057 ra5b5f17  
    11/*
    22 * Copyright (c) 2010 Jakub Jermar
     3 * Copyright (c) 2023 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    5051#include <time/delay.h>
    5152#include <arch/asm.h>
    52 #include <arch/faddr.h>
    5353#include <arch/cycle.h>
    5454#include <atomic.h>
     
    6666#include <stacktrace.h>
    6767
    68 static void scheduler_separated_stack(void);
    69 
    7068atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    7169
     
    227225static void relink_rq(int start)
    228226{
     227        assert(interrupts_disabled());
     228
    229229        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    230230                return;
     
    302302}
    303303
    304 void scheduler_run(void)
    305 {
    306         assert(interrupts_disabled());
    307         assert(THREAD == NULL);
    308         assert(CPU != NULL);
    309 
    310         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    311         context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    312         unreachable();
    313 }
    314 
    315304/** Things to do before we switch to THREAD context.
    316305 */
     
    321310        switch_task(THREAD->task);
    322311
    323         irq_spinlock_lock(&THREAD->lock, false);
    324         THREAD->state = Running;
    325         THREAD->cpu = CPU;
    326         THREAD->priority = rq_index;  /* Correct rq index */
     312        assert(atomic_get_unordered(&THREAD->cpu) == CPU);
     313
     314        atomic_set_unordered(&THREAD->state, Running);
     315        atomic_set_unordered(&THREAD->priority, rq_index);  /* Correct rq index */
    327316
    328317        /*
     
    335324        log(LF_OTHER, LVL_DEBUG,
    336325            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    337             ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
     326            ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index,
    338327            THREAD->ticks, atomic_load(&CPU->nrdy));
    339328#endif
     
    350339
    351340#ifdef CONFIG_UDEBUG
    352         if (THREAD->btrace) {
     341        if (atomic_get_unordered(&THREAD->btrace)) {
    353342                istate_t *istate = THREAD->udebug.uspace_state;
    354343                if (istate != NULL) {
    355344                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    356345                        stack_trace_istate(istate);
     346                } else {
     347                        printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid);
    357348                }
    358349
    359                 THREAD->btrace = false;
     350                atomic_set_unordered(&THREAD->btrace, false);
    360351        }
    361352#endif
     
    374365}
    375366
    376 static void cleanup_after_thread(thread_t *thread, state_t out_state)
     367static void add_to_rq(thread_t *thread, cpu_t *cpu, int i)
     368{
     369        /* Add to the appropriate runqueue. */
     370        runq_t *rq = &cpu->rq[i];
     371
     372        irq_spinlock_lock(&rq->lock, false);
     373        list_append(&thread->rq_link, &rq->rq);
     374        rq->n++;
     375        irq_spinlock_unlock(&rq->lock, false);
     376
     377        atomic_inc(&nrdy);
     378        atomic_inc(&cpu->nrdy);
     379}
     380
     381/** Requeue a thread that was just preempted on this CPU.
     382 */
     383static void thread_requeue_preempted(thread_t *thread)
     384{
     385        assert(interrupts_disabled());
     386        assert(atomic_get_unordered(&thread->state) == Running);
     387        assert(atomic_get_unordered(&thread->cpu) == CPU);
     388
     389        int prio = atomic_get_unordered(&thread->priority);
     390
     391        if (prio < RQ_COUNT - 1) {
     392                prio++;
     393                atomic_set_unordered(&thread->priority, prio);
     394        }
     395
     396        atomic_set_unordered(&thread->state, Ready);
     397
     398        add_to_rq(thread, CPU, prio);
     399}
     400
     401void thread_requeue_sleeping(thread_t *thread)
     402{
     403        ipl_t ipl = interrupts_disable();
     404
     405        assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
     406
     407        atomic_set_unordered(&thread->priority, 0);
     408        atomic_set_unordered(&thread->state, Ready);
     409
     410        /* Prefer the CPU on which the thread ran last */
     411        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     412
     413        if (!cpu) {
     414                cpu = CPU;
     415                atomic_set_unordered(&thread->cpu, CPU);
     416        }
     417
     418        add_to_rq(thread, cpu, 0);
     419
     420        interrupts_restore(ipl);
     421}
     422
     423static void cleanup_after_thread(thread_t *thread)
    377424{
    378425        assert(CURRENT->mutex_locks == 0);
     
    381428        int expected;
    382429
    383         switch (out_state) {
     430        switch (atomic_get_unordered(&thread->state)) {
    384431        case Running:
    385                 thread_ready(thread);
     432                thread_requeue_preempted(thread);
    386433                break;
    387434
     
    407454                        assert(expected == SLEEP_WOKE);
    408455                        /* The thread has already been woken up, requeue immediately. */
    409                         thread_ready(thread);
     456                        thread_requeue_sleeping(thread);
    410457                }
    411458                break;
     
    416463                 */
    417464                panic("tid%" PRIu64 ": unexpected state %s.",
    418                     thread->tid, thread_states[thread->state]);
     465                    thread->tid, thread_states[atomic_get_unordered(&thread->state)]);
    419466                break;
    420467        }
    421468}
    422469
    423 /** The scheduler
    424  *
    425  * The thread scheduling procedure.
    426  * Passes control directly to
    427  * scheduler_separated_stack().
    428  *
    429  */
     470/** Switch to scheduler context to let other threads run. */
    430471void scheduler_enter(state_t new_state)
    431472{
     
    435476        assert(THREAD != NULL);
    436477
    437         fpu_cleanup();
    438 
    439         irq_spinlock_lock(&THREAD->lock, false);
    440         THREAD->state = new_state;
    441 
    442         /* Update thread kernel accounting */
    443         THREAD->kcycles += get_cycle() - THREAD->last_cycle;
    444 
    445         if (new_state == Sleeping) {
    446                 /* Prefer the thread after it's woken up. */
    447                 THREAD->priority = -1;
    448         }
    449 
    450         /*
    451          * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
    452          * and preemption counter. At this point CURRENT could be coming either
    453          * from THREAD's or CPU's stack.
    454          *
    455          */
    456         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    457 
    458         /*
    459          * We may not keep the old stack.
    460          * Reason: If we kept the old stack and got blocked, for instance, in
    461          * find_best_thread(), the old thread could get rescheduled by another
    462          * CPU and overwrite the part of its own stack that was also used by
    463          * the scheduler on this CPU.
    464          *
    465          * Moreover, we have to bypass the compiler-generated POP sequence
    466          * which is fooled by SP being set to the very top of the stack.
    467          * Therefore the scheduler() function continues in
    468          * scheduler_separated_stack().
    469          *
    470          */
    471         context_t ctx;
    472         context_create(&ctx, scheduler_separated_stack,
    473             CPU_LOCAL->stack, STACK_SIZE);
    474 
    475         /* Switch to scheduler context and store current thread's context. */
    476         context_swap(&THREAD->saved_context, &ctx);
    477 
    478         /* Returned from scheduler. */
    479 
    480         irq_spinlock_unlock(&THREAD->lock, false);
    481         interrupts_restore(ipl);
    482 }
    483 
    484 /** Scheduler stack switch wrapper
    485  *
    486  * Second part of the scheduler() function
    487  * using new stack. Handling the actual context
    488  * switch to a new thread.
    489  *
    490  */
    491 void scheduler_separated_stack(void)
    492 {
    493         assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
    494         assert(CPU != NULL);
    495         assert(interrupts_disabled());
    496 
    497478        if (atomic_load(&haltstate))
    498479                halt();
    499480
    500         if (THREAD) {
    501                 /*
    502                  * On Sparc, this saves some extra userspace state that's not
    503                  * covered by context_save()/context_restore().
    504                  */
    505                 after_thread_ran_arch();
    506 
    507                 state_t state = THREAD->state;
    508                 irq_spinlock_unlock(&THREAD->lock, false);
    509 
    510                 cleanup_after_thread(THREAD, state);
    511 
     481        /* Check if we have a thread to switch to. */
     482
     483        int rq_index;
     484        thread_t *new_thread = try_find_thread(&rq_index);
     485
     486        if (new_thread == NULL && new_state == Running) {
     487                /* No other thread to run, but we still have work to do here. */
     488                interrupts_restore(ipl);
     489                return;
     490        }
     491
     492        atomic_set_unordered(&THREAD->state, new_state);
     493
     494        /* Update thread kernel accounting */
     495        atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle);
     496
     497        fpu_cleanup();
     498
     499        /*
     500         * On Sparc, this saves some extra userspace state that's not
     501         * covered by context_save()/context_restore().
     502         */
     503        after_thread_ran_arch();
     504
     505        if (new_thread) {
     506                thread_t *old_thread = THREAD;
     507                CPU_LOCAL->prev_thread = old_thread;
     508                THREAD = new_thread;
     509                /* No waiting necessary, we can switch to the new thread directly. */
     510                prepare_to_run_thread(rq_index);
     511
     512                current_copy(CURRENT, (current_t *) new_thread->kstack);
     513                context_swap(&old_thread->saved_context, &new_thread->saved_context);
     514        } else {
     515                /*
     516                 * A new thread isn't immediately available, switch to a separate
     517                 * stack to sleep or do other idle stuff.
     518                 */
     519                current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     520                context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
     521        }
     522
     523        assert(CURRENT->mutex_locks == 0);
     524        assert(interrupts_disabled());
     525
     526        /* Check if we need to clean up after another thread. */
     527        if (CPU_LOCAL->prev_thread) {
     528                cleanup_after_thread(CPU_LOCAL->prev_thread);
     529                CPU_LOCAL->prev_thread = NULL;
     530        }
     531
     532        interrupts_restore(ipl);
     533}
     534
     535/** Enter main scheduler loop. Never returns.
     536 *
     537 * This function switches to a runnable thread as soon as one is available,
     538 * after which it is only switched back to if a thread is stopping and there is
     539 * no other thread to run in its place. We need a separate context for that
     540 * because we're going to block the CPU, which means we need another context
     541 * to clean up after the previous thread.
     542 */
     543void scheduler_run(void)
     544{
     545        assert(interrupts_disabled());
     546
     547        assert(CPU != NULL);
     548        assert(TASK == NULL);
     549        assert(THREAD == NULL);
     550        assert(interrupts_disabled());
     551
     552        while (!atomic_load(&haltstate)) {
     553                assert(CURRENT->mutex_locks == 0);
     554
     555                int rq_index;
     556                THREAD = find_best_thread(&rq_index);
     557                prepare_to_run_thread(rq_index);
     558
     559                /*
     560                 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     561                 * thread's stack.
     562                 */
     563                current_copy(CURRENT, (current_t *) THREAD->kstack);
     564
     565                /* Switch to thread context. */
     566                context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context);
     567
     568                /* Back from another thread. */
     569                assert(CPU != NULL);
     570                assert(THREAD != NULL);
     571                assert(CURRENT->mutex_locks == 0);
     572                assert(interrupts_disabled());
     573
     574                cleanup_after_thread(THREAD);
     575
     576                /*
     577                 * Necessary because we're allowing interrupts in find_best_thread(),
     578                 * so we need to avoid other code referencing the thread we left.
     579                 */
    512580                THREAD = NULL;
    513581        }
    514582
    515         int rq_index;
    516         THREAD = find_best_thread(&rq_index);
    517 
    518         prepare_to_run_thread(rq_index);
    519 
    520         /*
    521          * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
    522          * thread's stack.
    523          */
    524         current_copy(CURRENT, (current_t *) THREAD->kstack);
    525 
    526         context_restore(&THREAD->saved_context);
     583        halt();
     584}
     585
     586/** Thread wrapper.
     587 *
     588 * This wrapper is provided to ensure that a starting thread properly handles
     589 * everything it needs to do when first scheduled, and when it exits.
     590 */
     591void thread_main_func(void)
     592{
     593        assert(interrupts_disabled());
     594
     595        void (*f)(void *) = THREAD->thread_code;
     596        void *arg = THREAD->thread_arg;
     597
     598        /* This is where each thread wakes up after its creation */
     599
     600        /* Check if we need to clean up after another thread. */
     601        if (CPU_LOCAL->prev_thread) {
     602                cleanup_after_thread(CPU_LOCAL->prev_thread);
     603                CPU_LOCAL->prev_thread = NULL;
     604        }
     605
     606        interrupts_enable();
     607
     608        f(arg);
     609
     610        thread_exit();
    527611
    528612        /* Not reached */
     
    550634        list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
    551635
    552                 irq_spinlock_lock(&thread->lock, false);
    553 
    554636                /*
    555637                 * Do not steal CPU-wired threads, threads
     
    558640                 * FPU context is still in the CPU.
    559641                 */
    560                 if (thread->stolen || thread->nomigrate ||
    561                     thread == fpu_owner) {
    562                         irq_spinlock_unlock(&thread->lock, false);
     642                if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
    563643                        continue;
    564644                }
    565645
    566646                thread->stolen = true;
    567                 thread->cpu = CPU;
    568 
    569                 irq_spinlock_unlock(&thread->lock, false);
     647                atomic_set_unordered(&thread->cpu, CPU);
    570648
    571649                /*
     
    712790                            thread) {
    713791                                printf("%" PRIu64 "(%s) ", thread->tid,
    714                                     thread_states[thread->state]);
     792                                    thread_states[atomic_get_unordered(&thread->state)]);
    715793                        }
    716794                        printf("\n");
  • kernel/generic/src/proc/task.c

    red7e057 ra5b5f17  
    506506        /* Current values of threads */
    507507        list_foreach(task->threads, th_link, thread_t, thread) {
    508                 irq_spinlock_lock(&thread->lock, false);
    509 
    510508                /* Process only counted threads */
    511509                if (!thread->uncounted) {
     
    515513                        }
    516514
    517                         uret += thread->ucycles;
    518                         kret += thread->kcycles;
     515                        uret += atomic_time_read(&thread->ucycles);
     516                        kret += atomic_time_read(&thread->kcycles);
    519517                }
    520 
    521                 irq_spinlock_unlock(&thread->lock, false);
    522518        }
    523519
  • kernel/generic/src/proc/thread.c

    red7e057 ra5b5f17  
    108108static int threads_cmp(void *, void *);
    109109
    110 /** Thread wrapper.
    111  *
    112  * This wrapper is provided to ensure that every thread makes a call to
    113  * thread_exit() when its implementing function returns.
    114  *
    115  * interrupts_disable() is assumed.
    116  *
    117  */
    118 static void cushion(void)
    119 {
    120         void (*f)(void *) = THREAD->thread_code;
    121         void *arg = THREAD->thread_arg;
    122 
    123         /* This is where each thread wakes up after its creation */
    124         irq_spinlock_unlock(&THREAD->lock, false);
    125         interrupts_enable();
    126 
    127         f(arg);
    128 
    129         thread_exit();
    130 
    131         /* Not reached */
    132 }
    133 
    134110/** Initialization and allocation for thread_t structure
    135111 *
     
    139115        thread_t *thread = (thread_t *) obj;
    140116
    141         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    142117        link_initialize(&thread->rq_link);
    143118        link_initialize(&thread->wq_link);
     
    221196void thread_wire(thread_t *thread, cpu_t *cpu)
    222197{
    223         irq_spinlock_lock(&thread->lock, true);
    224         thread->cpu = cpu;
     198        ipl_t ipl = interrupts_disable();
     199        atomic_set_unordered(&thread->cpu, cpu);
    225200        thread->nomigrate++;
    226         irq_spinlock_unlock(&thread->lock, true);
     201        interrupts_restore(ipl);
    227202}
    228203
     
    233208void thread_start(thread_t *thread)
    234209{
    235         assert(thread->state == Entering);
    236         thread_ready(thread_ref(thread));
    237 }
    238 
    239 /** Make thread ready
    240  *
    241  * Switch thread to the ready state. Consumes reference passed by the caller.
    242  *
    243  * @param thread Thread to make ready.
    244  *
    245  */
    246 void thread_ready(thread_t *thread)
    247 {
    248         irq_spinlock_lock(&thread->lock, true);
    249 
    250         assert(thread->state != Ready);
    251 
    252         int i = (thread->priority < RQ_COUNT - 1) ?
    253             ++thread->priority : thread->priority;
    254 
    255         /* Prefer the CPU on which the thread ran last */
    256         cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
    257 
    258         thread->state = Ready;
    259 
    260         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    261 
    262         /*
    263          * Append thread to respective ready queue
    264          * on respective processor.
    265          */
    266 
    267         list_append(&thread->rq_link, &cpu->rq[i].rq);
    268         cpu->rq[i].n++;
    269         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    270 
    271         atomic_inc(&nrdy);
    272         atomic_inc(&cpu->nrdy);
     210        assert(atomic_get_unordered(&thread->state) == Entering);
     211        thread_requeue_sleeping(thread_ref(thread));
    273212}
    274213
     
    309248        irq_spinlock_unlock(&tidlock, true);
    310249
    311         context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE);
     250        context_create(&thread->saved_context, thread_main_func,
     251            thread->kstack, STACK_SIZE);
    312252
    313253        current_initialize((current_t *) thread->kstack);
     
    317257        thread->thread_code = func;
    318258        thread->thread_arg = arg;
    319         thread->ucycles = 0;
    320         thread->kcycles = 0;
     259        thread->ucycles = ATOMIC_TIME_INITIALIZER();
     260        thread->kcycles = ATOMIC_TIME_INITIALIZER();
    321261        thread->uncounted =
    322262            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    323         thread->priority = -1;          /* Start in rq[0] */
    324         thread->cpu = NULL;
     263        atomic_init(&thread->priority, 0);
     264        atomic_init(&thread->cpu, NULL);
    325265        thread->stolen = false;
    326266        thread->uspace =
     
    328268
    329269        thread->nomigrate = 0;
    330         thread->state = Entering;
     270        atomic_init(&thread->state, Entering);
    331271
    332272        atomic_init(&thread->sleep_queue, NULL);
     
    348288#ifdef CONFIG_UDEBUG
    349289        /* Initialize debugging stuff */
    350         thread->btrace = false;
     290        atomic_init(&thread->btrace, false);
    351291        udebug_thread_initialize(&thread->udebug);
    352292#endif
     
    392332
    393333        if (!thread->uncounted) {
    394                 thread->task->ucycles += thread->ucycles;
    395                 thread->task->kcycles += thread->kcycles;
     334                thread->task->ucycles += atomic_time_read(&thread->ucycles);
     335                thread->task->kcycles += atomic_time_read(&thread->kcycles);
    396336        }
    397337
    398338        irq_spinlock_unlock(&thread->task->lock, false);
    399339
    400         assert((thread->state == Exiting) || (thread->state == Lingering));
     340        assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
    401341
    402342        /* Clear cpu->fpu_owner if set to this thread. */
    403343#ifdef CONFIG_FPU_LAZY
    404         if (thread->cpu) {
     344        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     345        if (cpu) {
    405346                /*
    406347                 * We need to lock for this because the old CPU can concurrently try
     
    408349                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    409350                 */
    410                 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
    411 
    412                 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
    413                     memory_order_relaxed);
    414 
    415                 if (owner == thread) {
    416                         atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
    417                             memory_order_relaxed);
    418                 }
    419 
    420                 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
     351                irq_spinlock_lock(&cpu->fpu_lock, false);
     352
     353                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     354                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     355
     356                irq_spinlock_unlock(&cpu->fpu_lock, false);
    421357        }
    422358#endif
     
    635571                 * the waking thread by the sleeper in thread_wait_finish().
    636572                 */
    637                 thread_ready(thread);
     573                thread_requeue_sleeping(thread);
    638574        }
    639575}
     
    642578void thread_migration_disable(void)
    643579{
     580        ipl_t ipl = interrupts_disable();
     581
    644582        assert(THREAD);
    645 
    646583        THREAD->nomigrate++;
     584
     585        interrupts_restore(ipl);
    647586}
    648587
     
    650589void thread_migration_enable(void)
    651590{
     591        ipl_t ipl = interrupts_disable();
     592
    652593        assert(THREAD);
    653594        assert(THREAD->nomigrate > 0);
     
    655596        if (THREAD->nomigrate > 0)
    656597                THREAD->nomigrate--;
     598
     599        interrupts_restore(ipl);
    657600}
    658601
     
    700643                return EINVAL;
    701644
    702         irq_spinlock_lock(&thread->lock, true);
    703         state_t state = thread->state;
    704         irq_spinlock_unlock(&thread->lock, true);
    705 
    706         errno_t rc = EOK;
    707 
    708         if (state != Exiting)
    709                 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     645        errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    710646
    711647        if (rc == EOK)
     
    747683        uint64_t ucycles, kcycles;
    748684        char usuffix, ksuffix;
    749         order_suffix(thread->ucycles, &ucycles, &usuffix);
    750         order_suffix(thread->kcycles, &kcycles, &ksuffix);
     685        order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
     686        order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
     687
     688        state_t state = atomic_get_unordered(&thread->state);
    751689
    752690        char *name;
     
    762700        else
    763701                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    764                     thread->tid, name, thread, thread_states[thread->state],
     702                    thread->tid, name, thread, thread_states[state],
    765703                    thread->task, thread->task->container);
    766704
    767705        if (additional) {
    768                 if (thread->cpu)
    769                         printf("%-5u", thread->cpu->id);
     706                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     707                if (cpu)
     708                        printf("%-5u", cpu->id);
    770709                else
    771710                        printf("none ");
    772711
    773                 if (thread->state == Sleeping) {
     712                if (state == Sleeping) {
    774713                        printf(" %p", thread->sleep_queue);
    775714                }
     
    850789void thread_update_accounting(bool user)
    851790{
     791        assert(interrupts_disabled());
     792
    852793        uint64_t time = get_cycle();
    853794
    854         assert(interrupts_disabled());
    855         assert(irq_spinlock_locked(&THREAD->lock));
    856 
    857795        if (user)
    858                 THREAD->ucycles += time - THREAD->last_cycle;
     796                atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
    859797        else
    860                 THREAD->kcycles += time - THREAD->last_cycle;
     798                atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
    861799
    862800        THREAD->last_cycle = time;
     
    969907         */
    970908
    971         irq_spinlock_lock(&thread->lock, true);
    972 
    973         bool sleeping = false;
    974         istate_t *istate = thread->udebug.uspace_state;
    975         if (istate != NULL) {
    976                 printf("Scheduling thread stack trace.\n");
    977                 thread->btrace = true;
    978                 if (thread->state == Sleeping)
    979                         sleeping = true;
    980         } else
    981                 printf("Thread interrupt state not available.\n");
    982 
    983         irq_spinlock_unlock(&thread->lock, true);
    984 
    985         if (sleeping)
    986                 thread_wakeup(thread);
    987 
     909        printf("Scheduling thread stack trace.\n");
     910        atomic_set_unordered(&thread->btrace, true);
     911
     912        thread_wakeup(thread);
    988913        thread_put(thread);
    989914}
     
    10861011                thread_attach(thread, TASK);
    10871012#endif
    1088                 thread_ready(thread);
     1013                thread_start(thread);
     1014                thread_put(thread);
    10891015
    10901016                return 0;
  • kernel/generic/src/syscall/syscall.c

    red7e057 ra5b5f17  
    141141{
    142142        /* Do userpace accounting */
    143         irq_spinlock_lock(&THREAD->lock, true);
     143        ipl_t ipl = interrupts_disable();
    144144        thread_update_accounting(true);
    145         irq_spinlock_unlock(&THREAD->lock, true);
     145        interrupts_restore(ipl);
    146146
    147147#ifdef CONFIG_UDEBUG
     
    191191
    192192        /* Do kernel accounting */
    193         irq_spinlock_lock(&THREAD->lock, true);
     193        ipl = interrupts_disable();
    194194        thread_update_accounting(false);
    195         irq_spinlock_unlock(&THREAD->lock, true);
     195        interrupts_restore(ipl);
    196196
    197197        return rc;
  • kernel/generic/src/sysinfo/stats.c

    red7e057 ra5b5f17  
    299299{
    300300        assert(interrupts_disabled());
    301         assert(irq_spinlock_locked(&thread->lock));
    302301
    303302        stats_thread->thread_id = thread->tid;
    304303        stats_thread->task_id = thread->task->taskid;
    305         stats_thread->state = thread->state;
    306         stats_thread->priority = thread->priority;
    307         stats_thread->ucycles = thread->ucycles;
    308         stats_thread->kcycles = thread->kcycles;
    309 
    310         if (thread->cpu != NULL) {
     304        stats_thread->state = atomic_get_unordered(&thread->state);
     305        stats_thread->priority = atomic_get_unordered(&thread->priority);
     306        stats_thread->ucycles = atomic_time_read(&thread->ucycles);
     307        stats_thread->kcycles = atomic_time_read(&thread->kcycles);
     308
     309        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     310
     311        if (cpu != NULL) {
    311312                stats_thread->on_cpu = true;
    312                 stats_thread->cpu = thread->cpu->id;
     313                stats_thread->cpu = cpu->id;
    313314        } else
    314315                stats_thread->on_cpu = false;
     
    361362        thread_t *thread = thread_first();
    362363        while (thread != NULL) {
    363                 /* Interrupts are already disabled */
    364                 irq_spinlock_lock(&thread->lock, false);
    365 
    366364                /* Record the statistics and increment the index */
    367365                produce_stats_thread(thread, &stats_threads[i]);
    368366                i++;
    369 
    370                 irq_spinlock_unlock(&thread->lock, false);
    371367
    372368                thread = thread_next(thread);
     
    624620                ret.data.size = sizeof(stats_thread_t);
    625621
    626                 /*
    627                  * Replaced hand-over-hand locking with regular nested sections
    628                  * to avoid weak reference leak issues.
    629                  */
    630                 irq_spinlock_lock(&thread->lock, false);
    631622                produce_stats_thread(thread, stats_thread);
    632                 irq_spinlock_unlock(&thread->lock, false);
    633623
    634624                irq_spinlock_unlock(&threads_lock, true);
  • kernel/generic/src/time/clock.c

    red7e057 ra5b5f17  
    123123static void cpu_update_accounting(void)
    124124{
     125        // FIXME: get_cycle() is unimplemented on several platforms
    125126        uint64_t now = get_cycle();
    126127        atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
  • kernel/generic/src/udebug/udebug_ops.c

    red7e057 ra5b5f17  
    9090        }
    9191
    92         irq_spinlock_lock(&thread->lock, true);
    93 
    9492        /* Verify that 'thread' is a userspace thread. */
    9593        if (!thread->uspace) {
    96                 /* It's not, deny its existence */
    97                 irq_spinlock_unlock(&thread->lock, true);
    9894                mutex_unlock(&TASK->udebug.lock);
    9995                return ENOENT;
    10096        }
    101 
    102         /* Verify debugging state. */
    103         if (thread->udebug.active != true) {
    104                 /* Not in debugging session or undesired GO state */
    105                 irq_spinlock_unlock(&thread->lock, true);
    106                 mutex_unlock(&TASK->udebug.lock);
    107                 return ENOENT;
    108         }
    109 
    110         /* Now verify that the thread belongs to the current task. */
    111         if (thread->task != TASK) {
    112                 /* No such thread belonging this task */
    113                 irq_spinlock_unlock(&thread->lock, true);
    114                 mutex_unlock(&TASK->udebug.lock);
    115                 return ENOENT;
    116         }
    117 
    118         irq_spinlock_unlock(&thread->lock, true);
    119 
    120         /* Only mutex TASK->udebug.lock left. */
    12197
    12298        /*
     
    126102         */
    127103        mutex_lock(&thread->udebug.lock);
     104
     105        /* Verify debugging state. */
     106        if (thread->udebug.active != true) {
     107                /* Not in debugging session or undesired GO state */
     108                mutex_unlock(&thread->udebug.lock);
     109                mutex_unlock(&TASK->udebug.lock);
     110                return ENOENT;
     111        }
     112
     113        /* Now verify that the thread belongs to the current task. */
     114        if (thread->task != TASK) {
     115                /* No such thread belonging this task */
     116                mutex_unlock(&thread->udebug.lock);
     117                mutex_unlock(&TASK->udebug.lock);
     118                return ENOENT;
     119        }
    128120
    129121        /* The big task mutex is no longer needed. */
     
    388380        /* FIXME: make sure the thread isn't past debug shutdown... */
    389381        list_foreach(TASK->threads, th_link, thread_t, thread) {
    390                 irq_spinlock_lock(&thread->lock, false);
    391382                bool uspace = thread->uspace;
    392                 irq_spinlock_unlock(&thread->lock, false);
    393383
    394384                /* Not interested in kernel threads. */
Note: See TracChangeset for help on using the changeset viewer.