Changes in / [a5b5f17:ed7e057] in mainline


Ignore:
Location:
kernel
Files:
1 added
16 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/sparc64/src/proc/sun4u/scheduler.c

    ra5b5f17 red7e057  
    7676{
    7777        if (THREAD->uspace) {
    78                 asm volatile ("flushw");
    79 
    8078                /* sample the state of the userspace window buffer */
    8179                THREAD->arch.uspace_window_buffer =
  • kernel/arch/sparc64/src/proc/sun4v/scheduler.c

    ra5b5f17 red7e057  
    6868{
    6969        if (THREAD->uspace) {
    70                 asm volatile ("flushw");
    71 
    7270                /* sample the state of the userspace window buffer */
    7371                THREAD->arch.uspace_window_buffer =
  • kernel/generic/include/atomic.h

    ra5b5f17 red7e057  
    3939#include <typedefs.h>
    4040#include <stdatomic.h>
    41 
    42 /*
    43  * Shorthand for relaxed atomic read/write, something that's needed to formally
    44  * avoid undefined behavior in cases where we need to read a variable in
    45  * different threads and we don't particularly care about ordering
    46  * (e.g. statistic printouts). This is most likely translated into the same
    47  * assembly instructions as regular read/writes.
    48  */
    49 #define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed)
    50 #define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed)
    5141
    5242#define atomic_predec(val) \
  • kernel/generic/include/cpu.h

    ra5b5f17 red7e057  
    7474        bool idle;
    7575        uint64_t last_cycle;
    76 
    77         context_t scheduler_context;
    78 
    79         struct thread *prev_thread;
    8076} cpu_local_t;
    8177
  • kernel/generic/include/proc/scheduler.h

    ra5b5f17 red7e057  
    6464extern void scheduler_enter(state_t);
    6565
    66 extern void thread_main_func(void);
    67 
    6866/*
    6967 * To be defined by architectures.
  • kernel/generic/include/proc/thread.h

    ra5b5f17 red7e057  
    9595        waitq_t join_wq;
    9696
    97         /** Thread accounting. */
    98         atomic_time_stat_t ucycles;
    99         atomic_time_stat_t kcycles;
    100 
    101         /** Architecture-specific data. */
    102         thread_arch_t arch;
    103 
    104 #ifdef CONFIG_UDEBUG
    105         /**
    106          * If true, the scheduler will print a stack trace
    107          * to the kernel console upon scheduling this thread.
    108          */
    109         atomic_int_fast8_t btrace;
    110 
    111         /** Debugging stuff */
    112         udebug_thread_t udebug;
    113 #endif /* CONFIG_UDEBUG */
    114 
    115         /*
    116          * Immutable fields.
     97        /** Lock protecting thread structure.
    11798         *
    118          * These fields are only modified during initialization, and are not
    119          * changed at any time between initialization and destruction.
    120          * Can be accessed without synchronization in most places.
    121          */
    122 
    123         /** Thread ID. */
    124         thread_id_t tid;
     99         * Protects the whole thread structure except fields listed above.
     100         */
     101        IRQ_SPINLOCK_DECLARE(lock);
     102
     103        char name[THREAD_NAME_BUFLEN];
    125104
    126105        /** Function implementing the thread. */
     
    129108        void *thread_arg;
    130109
    131         char name[THREAD_NAME_BUFLEN];
    132 
     110        /**
     111         * From here, the stored context is restored
     112         * when the thread is scheduled.
     113         */
     114        context_t saved_context;
     115
     116        /**
     117         * True if this thread is executing copy_from_uspace().
     118         * False otherwise.
     119         */
     120        bool in_copy_from_uspace;
     121
     122        /**
     123         * True if this thread is executing copy_to_uspace().
     124         * False otherwise.
     125         */
     126        bool in_copy_to_uspace;
     127
     128#ifdef CONFIG_FPU
     129        fpu_context_t fpu_context;
     130#endif
     131        bool fpu_context_exists;
     132
     133        /* The thread will not be migrated if nomigrate is non-zero. */
     134        unsigned int nomigrate;
     135
     136        /** Thread state. */
     137        state_t state;
     138
     139        /** Thread CPU. */
     140        cpu_t *cpu;
     141        /** Containing task. */
     142        task_t *task;
     143        /** Thread was migrated to another CPU and has not run yet. */
     144        bool stolen;
    133145        /** Thread is executed in user space. */
    134146        bool uspace;
    135147
     148        /** Thread accounting. */
     149        uint64_t ucycles;
     150        uint64_t kcycles;
     151        /** Last sampled cycle. */
     152        uint64_t last_cycle;
    136153        /** Thread doesn't affect accumulated accounting. */
    137154        bool uncounted;
    138155
    139         /** Containing task. */
    140         task_t *task;
     156        /** Thread's priority. Implemented as index to CPU->rq */
     157        int priority;
     158        /** Thread ID. */
     159        thread_id_t tid;
     160
     161        /** Architecture-specific data. */
     162        thread_arch_t arch;
    141163
    142164        /** Thread's kernel stack. */
    143165        uint8_t *kstack;
    144166
    145         /*
    146          * Local fields.
    147          *
    148          * These fields can be safely accessed from code that _controls execution_
    149          * of this thread. Code controls execution of a thread if either:
    150          *  - it runs in the context of said thread AND interrupts are disabled
    151          *    (interrupts can and will access these fields)
    152          *  - the thread is not running, and the code accessing it can legally
    153          *    add/remove the thread to/from a runqueue, i.e., either:
    154          *    - it is allowed to enqueue thread in a new runqueue
    155          *    - it holds the lock to the runqueue containing the thread
    156          *
    157          */
    158 
    159         /**
    160          * From here, the stored context is restored
    161          * when the thread is scheduled.
    162          */
    163         context_t saved_context;
    164 
    165         // TODO: we only need one of the two bools below
    166 
    167         /**
    168          * True if this thread is executing copy_from_uspace().
    169          * False otherwise.
    170          */
    171         bool in_copy_from_uspace;
    172 
    173         /**
    174          * True if this thread is executing copy_to_uspace().
    175          * False otherwise.
    176          */
    177         bool in_copy_to_uspace;
    178 
    179         /*
    180          * FPU context is a special case. If lazy FPU switching is disabled,
    181          * it acts as a regular local field. However, if lazy switching is enabled,
    182          * the context is synchronized via CPU->fpu_lock
    183          */
    184 #ifdef CONFIG_FPU
    185         fpu_context_t fpu_context;
    186 #endif
    187         bool fpu_context_exists;
    188 
    189         /* The thread will not be migrated if nomigrate is non-zero. */
    190         unsigned int nomigrate;
    191 
    192         /** Thread was migrated to another CPU and has not run yet. */
    193         bool stolen;
    194 
    195         /**
    196          * Thread state (state_t).
    197          * This is atomic because we read it via some commands for debug output,
    198          * otherwise it could just be a regular local.
    199          */
    200         atomic_int_fast32_t state;
    201 
    202         /** Thread CPU. */
    203         _Atomic(cpu_t *) cpu;
    204 
    205         /** Thread's priority. Implemented as index to CPU->rq */
    206         atomic_int_fast32_t priority;
    207 
    208         /** Last sampled cycle. */
    209         uint64_t last_cycle;
     167#ifdef CONFIG_UDEBUG
     168        /**
     169         * If true, the scheduler will print a stack trace
     170         * to the kernel console upon scheduling this thread.
     171         */
     172        bool btrace;
     173
     174        /** Debugging stuff */
     175        udebug_thread_t udebug;
     176#endif /* CONFIG_UDEBUG */
    210177} thread_t;
    211178
     
    219186extern void thread_attach(thread_t *, task_t *);
    220187extern void thread_start(thread_t *);
    221 extern void thread_requeue_sleeping(thread_t *);
     188extern void thread_ready(thread_t *);
    222189extern void thread_exit(void) __attribute__((noreturn));
    223190extern void thread_interrupt(thread_t *);
  • kernel/generic/meson.build

    ra5b5f17 red7e057  
    9595        'src/mm/malloc.c',
    9696        'src/mm/reserve.c',
     97        'src/preempt/preemption.c',
    9798        'src/printf/printf.c',
    9899        'src/printf/snprintf.c',
  • kernel/generic/src/interrupt/interrupt.c

    ra5b5f17 red7e057  
    114114
    115115        /* Account user cycles */
    116         if (THREAD)
     116        if (THREAD) {
     117                irq_spinlock_lock(&THREAD->lock, false);
    117118                thread_update_accounting(true);
     119                irq_spinlock_unlock(&THREAD->lock, false);
     120        }
    118121
    119122        /* Account CPU usage if it woke up from sleep */
     
    152155
    153156        /* Do not charge THREAD for exception cycles */
    154         if (THREAD)
     157        if (THREAD) {
     158                irq_spinlock_lock(&THREAD->lock, false);
    155159                THREAD->last_cycle = end_cycle;
     160                irq_spinlock_unlock(&THREAD->lock, false);
     161        }
    156162#else
    157163        panic("No space for any exception handler, yet we want to handle some exception.");
  • kernel/generic/src/main/main.c

    ra5b5f17 red7e057  
    287287         * starting the thread of kernel threads.
    288288         */
    289         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    290         context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE);
     289        scheduler_run();
    291290        /* not reached */
    292291}
     
    328327        ARCH_OP(post_cpu_init);
    329328
     329        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     330
    330331        /*
    331332         * If we woke kmp up before we left the kernel stack, we could
     
    333334         * switch to this cpu's private stack prior to waking kmp up.
    334335         */
    335         current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    336336        context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
    337337        /* not reached */
  • kernel/generic/src/proc/scheduler.c

    ra5b5f17 red7e057  
    11/*
    22 * Copyright (c) 2010 Jakub Jermar
    3  * Copyright (c) 2023 Jiří Zárevúcky
    43 * All rights reserved.
    54 *
     
    5150#include <time/delay.h>
    5251#include <arch/asm.h>
     52#include <arch/faddr.h>
    5353#include <arch/cycle.h>
    5454#include <atomic.h>
     
    6666#include <stacktrace.h>
    6767
     68static void scheduler_separated_stack(void);
     69
    6870atomic_size_t nrdy;  /**< Number of ready threads in the system. */
    6971
     
    225227static void relink_rq(int start)
    226228{
    227         assert(interrupts_disabled());
    228 
    229229        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    230230                return;
     
    302302}
    303303
     304void scheduler_run(void)
     305{
     306        assert(interrupts_disabled());
     307        assert(THREAD == NULL);
     308        assert(CPU != NULL);
     309
     310        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     311        context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);
     312        unreachable();
     313}
     314
    304315/** Things to do before we switch to THREAD context.
    305316 */
     
    310321        switch_task(THREAD->task);
    311322
    312         assert(atomic_get_unordered(&THREAD->cpu) == CPU);
    313 
    314         atomic_set_unordered(&THREAD->state, Running);
    315         atomic_set_unordered(&THREAD->priority, rq_index);  /* Correct rq index */
     323        irq_spinlock_lock(&THREAD->lock, false);
     324        THREAD->state = Running;
     325        THREAD->cpu = CPU;
     326        THREAD->priority = rq_index;  /* Correct rq index */
    316327
    317328        /*
     
    324335        log(LF_OTHER, LVL_DEBUG,
    325336            "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
    326             ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index,
     337            ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,
    327338            THREAD->ticks, atomic_load(&CPU->nrdy));
    328339#endif
     
    339350
    340351#ifdef CONFIG_UDEBUG
    341         if (atomic_get_unordered(&THREAD->btrace)) {
     352        if (THREAD->btrace) {
    342353                istate_t *istate = THREAD->udebug.uspace_state;
    343354                if (istate != NULL) {
    344355                        printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid);
    345356                        stack_trace_istate(istate);
    346                 } else {
    347                         printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid);
    348357                }
    349358
    350                 atomic_set_unordered(&THREAD->btrace, false);
     359                THREAD->btrace = false;
    351360        }
    352361#endif
     
    365374}
    366375
    367 static void add_to_rq(thread_t *thread, cpu_t *cpu, int i)
    368 {
    369         /* Add to the appropriate runqueue. */
    370         runq_t *rq = &cpu->rq[i];
    371 
    372         irq_spinlock_lock(&rq->lock, false);
    373         list_append(&thread->rq_link, &rq->rq);
    374         rq->n++;
    375         irq_spinlock_unlock(&rq->lock, false);
    376 
    377         atomic_inc(&nrdy);
    378         atomic_inc(&cpu->nrdy);
    379 }
    380 
    381 /** Requeue a thread that was just preempted on this CPU.
    382  */
    383 static void thread_requeue_preempted(thread_t *thread)
    384 {
    385         assert(interrupts_disabled());
    386         assert(atomic_get_unordered(&thread->state) == Running);
    387         assert(atomic_get_unordered(&thread->cpu) == CPU);
    388 
    389         int prio = atomic_get_unordered(&thread->priority);
    390 
    391         if (prio < RQ_COUNT - 1) {
    392                 prio++;
    393                 atomic_set_unordered(&thread->priority, prio);
    394         }
    395 
    396         atomic_set_unordered(&thread->state, Ready);
    397 
    398         add_to_rq(thread, CPU, prio);
    399 }
    400 
    401 void thread_requeue_sleeping(thread_t *thread)
    402 {
    403         ipl_t ipl = interrupts_disable();
    404 
    405         assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
    406 
    407         atomic_set_unordered(&thread->priority, 0);
    408         atomic_set_unordered(&thread->state, Ready);
    409 
    410         /* Prefer the CPU on which the thread ran last */
    411         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    412 
    413         if (!cpu) {
    414                 cpu = CPU;
    415                 atomic_set_unordered(&thread->cpu, CPU);
    416         }
    417 
    418         add_to_rq(thread, cpu, 0);
    419 
    420         interrupts_restore(ipl);
    421 }
    422 
    423 static void cleanup_after_thread(thread_t *thread)
     376static void cleanup_after_thread(thread_t *thread, state_t out_state)
    424377{
    425378        assert(CURRENT->mutex_locks == 0);
     
    428381        int expected;
    429382
    430         switch (atomic_get_unordered(&thread->state)) {
     383        switch (out_state) {
    431384        case Running:
    432                 thread_requeue_preempted(thread);
     385                thread_ready(thread);
    433386                break;
    434387
     
    454407                        assert(expected == SLEEP_WOKE);
    455408                        /* The thread has already been woken up, requeue immediately. */
    456                         thread_requeue_sleeping(thread);
     409                        thread_ready(thread);
    457410                }
    458411                break;
     
    463416                 */
    464417                panic("tid%" PRIu64 ": unexpected state %s.",
    465                     thread->tid, thread_states[atomic_get_unordered(&thread->state)]);
     418                    thread->tid, thread_states[thread->state]);
    466419                break;
    467420        }
    468421}
    469422
    470 /** Switch to scheduler context to let other threads run. */
     423/** The scheduler
     424 *
     425 * The thread scheduling procedure.
     426 * Passes control directly to
     427 * scheduler_separated_stack().
     428 *
     429 */
    471430void scheduler_enter(state_t new_state)
    472431{
     
    476435        assert(THREAD != NULL);
    477436
     437        fpu_cleanup();
     438
     439        irq_spinlock_lock(&THREAD->lock, false);
     440        THREAD->state = new_state;
     441
     442        /* Update thread kernel accounting */
     443        THREAD->kcycles += get_cycle() - THREAD->last_cycle;
     444
     445        if (new_state == Sleeping) {
     446                /* Prefer the thread after it's woken up. */
     447                THREAD->priority = -1;
     448        }
     449
     450        /*
     451         * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS
     452         * and preemption counter. At this point CURRENT could be coming either
     453         * from THREAD's or CPU's stack.
     454         *
     455         */
     456        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
     457
     458        /*
     459         * We may not keep the old stack.
     460         * Reason: If we kept the old stack and got blocked, for instance, in
     461         * find_best_thread(), the old thread could get rescheduled by another
     462         * CPU and overwrite the part of its own stack that was also used by
     463         * the scheduler on this CPU.
     464         *
     465         * Moreover, we have to bypass the compiler-generated POP sequence
     466         * which is fooled by SP being set to the very top of the stack.
     467         * Therefore the scheduler() function continues in
     468         * scheduler_separated_stack().
     469         *
     470         */
     471        context_t ctx;
     472        context_create(&ctx, scheduler_separated_stack,
     473            CPU_LOCAL->stack, STACK_SIZE);
     474
     475        /* Switch to scheduler context and store current thread's context. */
     476        context_swap(&THREAD->saved_context, &ctx);
     477
     478        /* Returned from scheduler. */
     479
     480        irq_spinlock_unlock(&THREAD->lock, false);
     481        interrupts_restore(ipl);
     482}
     483
     484/** Scheduler stack switch wrapper
     485 *
     486 * Second part of the scheduler() function
     487 * using new stack. Handling the actual context
     488 * switch to a new thread.
     489 *
     490 */
     491void scheduler_separated_stack(void)
     492{
     493        assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));
     494        assert(CPU != NULL);
     495        assert(interrupts_disabled());
     496
    478497        if (atomic_load(&haltstate))
    479498                halt();
    480499
    481         /* Check if we have a thread to switch to. */
     500        if (THREAD) {
     501                /*
     502                 * On Sparc, this saves some extra userspace state that's not
     503                 * covered by context_save()/context_restore().
     504                 */
     505                after_thread_ran_arch();
     506
     507                state_t state = THREAD->state;
     508                irq_spinlock_unlock(&THREAD->lock, false);
     509
     510                cleanup_after_thread(THREAD, state);
     511
     512                THREAD = NULL;
     513        }
    482514
    483515        int rq_index;
    484         thread_t *new_thread = try_find_thread(&rq_index);
    485 
    486         if (new_thread == NULL && new_state == Running) {
    487                 /* No other thread to run, but we still have work to do here. */
    488                 interrupts_restore(ipl);
    489                 return;
    490         }
    491 
    492         atomic_set_unordered(&THREAD->state, new_state);
    493 
    494         /* Update thread kernel accounting */
    495         atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle);
    496 
    497         fpu_cleanup();
    498 
    499         /*
    500          * On Sparc, this saves some extra userspace state that's not
    501          * covered by context_save()/context_restore().
    502          */
    503         after_thread_ran_arch();
    504 
    505         if (new_thread) {
    506                 thread_t *old_thread = THREAD;
    507                 CPU_LOCAL->prev_thread = old_thread;
    508                 THREAD = new_thread;
    509                 /* No waiting necessary, we can switch to the new thread directly. */
    510                 prepare_to_run_thread(rq_index);
    511 
    512                 current_copy(CURRENT, (current_t *) new_thread->kstack);
    513                 context_swap(&old_thread->saved_context, &new_thread->saved_context);
    514         } else {
    515                 /*
    516                  * A new thread isn't immediately available, switch to a separate
    517                  * stack to sleep or do other idle stuff.
    518                  */
    519                 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    520                 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context);
    521         }
    522 
    523         assert(CURRENT->mutex_locks == 0);
    524         assert(interrupts_disabled());
    525 
    526         /* Check if we need to clean up after another thread. */
    527         if (CPU_LOCAL->prev_thread) {
    528                 cleanup_after_thread(CPU_LOCAL->prev_thread);
    529                 CPU_LOCAL->prev_thread = NULL;
    530         }
    531 
    532         interrupts_restore(ipl);
    533 }
    534 
    535 /** Enter main scheduler loop. Never returns.
    536  *
    537  * This function switches to a runnable thread as soon as one is available,
    538  * after which it is only switched back to if a thread is stopping and there is
    539  * no other thread to run in its place. We need a separate context for that
    540  * because we're going to block the CPU, which means we need another context
    541  * to clean up after the previous thread.
    542  */
    543 void scheduler_run(void)
    544 {
    545         assert(interrupts_disabled());
    546 
    547         assert(CPU != NULL);
    548         assert(TASK == NULL);
    549         assert(THREAD == NULL);
    550         assert(interrupts_disabled());
    551 
    552         while (!atomic_load(&haltstate)) {
    553                 assert(CURRENT->mutex_locks == 0);
    554 
    555                 int rq_index;
    556                 THREAD = find_best_thread(&rq_index);
    557                 prepare_to_run_thread(rq_index);
    558 
    559                 /*
    560                  * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
    561                  * thread's stack.
    562                  */
    563                 current_copy(CURRENT, (current_t *) THREAD->kstack);
    564 
    565                 /* Switch to thread context. */
    566                 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context);
    567 
    568                 /* Back from another thread. */
    569                 assert(CPU != NULL);
    570                 assert(THREAD != NULL);
    571                 assert(CURRENT->mutex_locks == 0);
    572                 assert(interrupts_disabled());
    573 
    574                 cleanup_after_thread(THREAD);
    575 
    576                 /*
    577                  * Necessary because we're allowing interrupts in find_best_thread(),
    578                  * so we need to avoid other code referencing the thread we left.
    579                  */
    580                 THREAD = NULL;
    581         }
    582 
    583         halt();
    584 }
    585 
    586 /** Thread wrapper.
    587  *
    588  * This wrapper is provided to ensure that a starting thread properly handles
    589  * everything it needs to do when first scheduled, and when it exits.
    590  */
    591 void thread_main_func(void)
    592 {
    593         assert(interrupts_disabled());
    594 
    595         void (*f)(void *) = THREAD->thread_code;
    596         void *arg = THREAD->thread_arg;
    597 
    598         /* This is where each thread wakes up after its creation */
    599 
    600         /* Check if we need to clean up after another thread. */
    601         if (CPU_LOCAL->prev_thread) {
    602                 cleanup_after_thread(CPU_LOCAL->prev_thread);
    603                 CPU_LOCAL->prev_thread = NULL;
    604         }
    605 
    606         interrupts_enable();
    607 
    608         f(arg);
    609 
    610         thread_exit();
     516        THREAD = find_best_thread(&rq_index);
     517
     518        prepare_to_run_thread(rq_index);
     519
     520        /*
     521         * Copy the knowledge of CPU, TASK, THREAD and preemption counter to
     522         * thread's stack.
     523         */
     524        current_copy(CURRENT, (current_t *) THREAD->kstack);
     525
     526        context_restore(&THREAD->saved_context);
    611527
    612528        /* Not reached */
     
    633549        /* Search rq from the back */
    634550        list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
     551
     552                irq_spinlock_lock(&thread->lock, false);
    635553
    636554                /*
     
    640558                 * FPU context is still in the CPU.
    641559                 */
    642                 if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
     560                if (thread->stolen || thread->nomigrate ||
     561                    thread == fpu_owner) {
     562                        irq_spinlock_unlock(&thread->lock, false);
    643563                        continue;
    644564                }
    645565
    646566                thread->stolen = true;
    647                 atomic_set_unordered(&thread->cpu, CPU);
     567                thread->cpu = CPU;
     568
     569                irq_spinlock_unlock(&thread->lock, false);
    648570
    649571                /*
     
    790712                            thread) {
    791713                                printf("%" PRIu64 "(%s) ", thread->tid,
    792                                     thread_states[atomic_get_unordered(&thread->state)]);
     714                                    thread_states[thread->state]);
    793715                        }
    794716                        printf("\n");
  • kernel/generic/src/proc/task.c

    ra5b5f17 red7e057  
    506506        /* Current values of threads */
    507507        list_foreach(task->threads, th_link, thread_t, thread) {
     508                irq_spinlock_lock(&thread->lock, false);
     509
    508510                /* Process only counted threads */
    509511                if (!thread->uncounted) {
     
    513515                        }
    514516
    515                         uret += atomic_time_read(&thread->ucycles);
    516                         kret += atomic_time_read(&thread->kcycles);
     517                        uret += thread->ucycles;
     518                        kret += thread->kcycles;
    517519                }
     520
     521                irq_spinlock_unlock(&thread->lock, false);
    518522        }
    519523
  • kernel/generic/src/proc/thread.c

    ra5b5f17 red7e057  
    108108static int threads_cmp(void *, void *);
    109109
     110/** Thread wrapper.
     111 *
     112 * This wrapper is provided to ensure that every thread makes a call to
     113 * thread_exit() when its implementing function returns.
     114 *
     115 * interrupts_disable() is assumed.
     116 *
     117 */
     118static void cushion(void)
     119{
     120        void (*f)(void *) = THREAD->thread_code;
     121        void *arg = THREAD->thread_arg;
     122
     123        /* This is where each thread wakes up after its creation */
     124        irq_spinlock_unlock(&THREAD->lock, false);
     125        interrupts_enable();
     126
     127        f(arg);
     128
     129        thread_exit();
     130
     131        /* Not reached */
     132}
     133
    110134/** Initialization and allocation for thread_t structure
    111135 *
     
    115139        thread_t *thread = (thread_t *) obj;
    116140
     141        irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    117142        link_initialize(&thread->rq_link);
    118143        link_initialize(&thread->wq_link);
     
    196221void thread_wire(thread_t *thread, cpu_t *cpu)
    197222{
    198         ipl_t ipl = interrupts_disable();
    199         atomic_set_unordered(&thread->cpu, cpu);
     223        irq_spinlock_lock(&thread->lock, true);
     224        thread->cpu = cpu;
    200225        thread->nomigrate++;
    201         interrupts_restore(ipl);
     226        irq_spinlock_unlock(&thread->lock, true);
    202227}
    203228
     
    208233void thread_start(thread_t *thread)
    209234{
    210         assert(atomic_get_unordered(&thread->state) == Entering);
    211         thread_requeue_sleeping(thread_ref(thread));
     235        assert(thread->state == Entering);
     236        thread_ready(thread_ref(thread));
     237}
     238
     239/** Make thread ready
     240 *
     241 * Switch thread to the ready state. Consumes reference passed by the caller.
     242 *
     243 * @param thread Thread to make ready.
     244 *
     245 */
     246void thread_ready(thread_t *thread)
     247{
     248        irq_spinlock_lock(&thread->lock, true);
     249
     250        assert(thread->state != Ready);
     251
     252        int i = (thread->priority < RQ_COUNT - 1) ?
     253            ++thread->priority : thread->priority;
     254
     255        /* Prefer the CPU on which the thread ran last */
     256        cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
     257
     258        thread->state = Ready;
     259
     260        irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     261
     262        /*
     263         * Append thread to respective ready queue
     264         * on respective processor.
     265         */
     266
     267        list_append(&thread->rq_link, &cpu->rq[i].rq);
     268        cpu->rq[i].n++;
     269        irq_spinlock_unlock(&(cpu->rq[i].lock), true);
     270
     271        atomic_inc(&nrdy);
     272        atomic_inc(&cpu->nrdy);
    212273}
    213274
     
    248309        irq_spinlock_unlock(&tidlock, true);
    249310
    250         context_create(&thread->saved_context, thread_main_func,
    251             thread->kstack, STACK_SIZE);
     311        context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE);
    252312
    253313        current_initialize((current_t *) thread->kstack);
     
    257317        thread->thread_code = func;
    258318        thread->thread_arg = arg;
    259         thread->ucycles = ATOMIC_TIME_INITIALIZER();
    260         thread->kcycles = ATOMIC_TIME_INITIALIZER();
     319        thread->ucycles = 0;
     320        thread->kcycles = 0;
    261321        thread->uncounted =
    262322            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    263         atomic_init(&thread->priority, 0);
    264         atomic_init(&thread->cpu, NULL);
     323        thread->priority = -1;          /* Start in rq[0] */
     324        thread->cpu = NULL;
    265325        thread->stolen = false;
    266326        thread->uspace =
     
    268328
    269329        thread->nomigrate = 0;
    270         atomic_init(&thread->state, Entering);
     330        thread->state = Entering;
    271331
    272332        atomic_init(&thread->sleep_queue, NULL);
     
    288348#ifdef CONFIG_UDEBUG
    289349        /* Initialize debugging stuff */
    290         atomic_init(&thread->btrace, false);
     350        thread->btrace = false;
    291351        udebug_thread_initialize(&thread->udebug);
    292352#endif
     
    332392
    333393        if (!thread->uncounted) {
    334                 thread->task->ucycles += atomic_time_read(&thread->ucycles);
    335                 thread->task->kcycles += atomic_time_read(&thread->kcycles);
     394                thread->task->ucycles += thread->ucycles;
     395                thread->task->kcycles += thread->kcycles;
    336396        }
    337397
    338398        irq_spinlock_unlock(&thread->task->lock, false);
    339399
    340         assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
     400        assert((thread->state == Exiting) || (thread->state == Lingering));
    341401
    342402        /* Clear cpu->fpu_owner if set to this thread. */
    343403#ifdef CONFIG_FPU_LAZY
    344         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    345         if (cpu) {
     404        if (thread->cpu) {
    346405                /*
    347406                 * We need to lock for this because the old CPU can concurrently try
     
    349408                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    350409                 */
    351                 irq_spinlock_lock(&cpu->fpu_lock, false);
    352 
    353                 if (atomic_get_unordered(&cpu->fpu_owner) == thread)
    354                         atomic_set_unordered(&cpu->fpu_owner, NULL);
    355 
    356                 irq_spinlock_unlock(&cpu->fpu_lock, false);
     410                irq_spinlock_lock(&thread->cpu->fpu_lock, false);
     411
     412                thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
     413                    memory_order_relaxed);
     414
     415                if (owner == thread) {
     416                        atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
     417                            memory_order_relaxed);
     418                }
     419
     420                irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
    357421        }
    358422#endif
     
    571635                 * the waking thread by the sleeper in thread_wait_finish().
    572636                 */
    573                 thread_requeue_sleeping(thread);
     637                thread_ready(thread);
    574638        }
    575639}
     
    578642void thread_migration_disable(void)
    579643{
    580         ipl_t ipl = interrupts_disable();
    581 
    582644        assert(THREAD);
     645
    583646        THREAD->nomigrate++;
    584 
    585         interrupts_restore(ipl);
    586647}
    587648
     
    589650void thread_migration_enable(void)
    590651{
    591         ipl_t ipl = interrupts_disable();
    592 
    593652        assert(THREAD);
    594653        assert(THREAD->nomigrate > 0);
     
    596655        if (THREAD->nomigrate > 0)
    597656                THREAD->nomigrate--;
    598 
    599         interrupts_restore(ipl);
    600657}
    601658
     
    643700                return EINVAL;
    644701
    645         errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     702        irq_spinlock_lock(&thread->lock, true);
     703        state_t state = thread->state;
     704        irq_spinlock_unlock(&thread->lock, true);
     705
     706        errno_t rc = EOK;
     707
     708        if (state != Exiting)
     709                rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    646710
    647711        if (rc == EOK)
     
    683747        uint64_t ucycles, kcycles;
    684748        char usuffix, ksuffix;
    685         order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
    686         order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
    687 
    688         state_t state = atomic_get_unordered(&thread->state);
     749        order_suffix(thread->ucycles, &ucycles, &usuffix);
     750        order_suffix(thread->kcycles, &kcycles, &ksuffix);
    689751
    690752        char *name;
     
    700762        else
    701763                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    702                     thread->tid, name, thread, thread_states[state],
     764                    thread->tid, name, thread, thread_states[thread->state],
    703765                    thread->task, thread->task->container);
    704766
    705767        if (additional) {
    706                 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    707                 if (cpu)
    708                         printf("%-5u", cpu->id);
     768                if (thread->cpu)
     769                        printf("%-5u", thread->cpu->id);
    709770                else
    710771                        printf("none ");
    711772
    712                 if (state == Sleeping) {
     773                if (thread->state == Sleeping) {
    713774                        printf(" %p", thread->sleep_queue);
    714775                }
     
    789850void thread_update_accounting(bool user)
    790851{
     852        uint64_t time = get_cycle();
     853
    791854        assert(interrupts_disabled());
    792 
    793         uint64_t time = get_cycle();
     855        assert(irq_spinlock_locked(&THREAD->lock));
    794856
    795857        if (user)
    796                 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
     858                THREAD->ucycles += time - THREAD->last_cycle;
    797859        else
    798                 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
     860                THREAD->kcycles += time - THREAD->last_cycle;
    799861
    800862        THREAD->last_cycle = time;
     
    907969         */
    908970
    909         printf("Scheduling thread stack trace.\n");
    910         atomic_set_unordered(&thread->btrace, true);
    911 
    912         thread_wakeup(thread);
     971        irq_spinlock_lock(&thread->lock, true);
     972
     973        bool sleeping = false;
     974        istate_t *istate = thread->udebug.uspace_state;
     975        if (istate != NULL) {
     976                printf("Scheduling thread stack trace.\n");
     977                thread->btrace = true;
     978                if (thread->state == Sleeping)
     979                        sleeping = true;
     980        } else
     981                printf("Thread interrupt state not available.\n");
     982
     983        irq_spinlock_unlock(&thread->lock, true);
     984
     985        if (sleeping)
     986                thread_wakeup(thread);
     987
    913988        thread_put(thread);
    914989}
     
    10111086                thread_attach(thread, TASK);
    10121087#endif
    1013                 thread_start(thread);
    1014                 thread_put(thread);
     1088                thread_ready(thread);
    10151089
    10161090                return 0;
  • kernel/generic/src/syscall/syscall.c

    ra5b5f17 red7e057  
    141141{
    142142        /* Do userpace accounting */
    143         ipl_t ipl = interrupts_disable();
     143        irq_spinlock_lock(&THREAD->lock, true);
    144144        thread_update_accounting(true);
    145         interrupts_restore(ipl);
     145        irq_spinlock_unlock(&THREAD->lock, true);
    146146
    147147#ifdef CONFIG_UDEBUG
     
    191191
    192192        /* Do kernel accounting */
    193         ipl = interrupts_disable();
     193        irq_spinlock_lock(&THREAD->lock, true);
    194194        thread_update_accounting(false);
    195         interrupts_restore(ipl);
     195        irq_spinlock_unlock(&THREAD->lock, true);
    196196
    197197        return rc;
  • kernel/generic/src/sysinfo/stats.c

    ra5b5f17 red7e057  
    299299{
    300300        assert(interrupts_disabled());
     301        assert(irq_spinlock_locked(&thread->lock));
    301302
    302303        stats_thread->thread_id = thread->tid;
    303304        stats_thread->task_id = thread->task->taskid;
    304         stats_thread->state = atomic_get_unordered(&thread->state);
    305         stats_thread->priority = atomic_get_unordered(&thread->priority);
    306         stats_thread->ucycles = atomic_time_read(&thread->ucycles);
    307         stats_thread->kcycles = atomic_time_read(&thread->kcycles);
    308 
    309         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    310 
    311         if (cpu != NULL) {
     305        stats_thread->state = thread->state;
     306        stats_thread->priority = thread->priority;
     307        stats_thread->ucycles = thread->ucycles;
     308        stats_thread->kcycles = thread->kcycles;
     309
     310        if (thread->cpu != NULL) {
    312311                stats_thread->on_cpu = true;
    313                 stats_thread->cpu = cpu->id;
     312                stats_thread->cpu = thread->cpu->id;
    314313        } else
    315314                stats_thread->on_cpu = false;
     
    362361        thread_t *thread = thread_first();
    363362        while (thread != NULL) {
     363                /* Interrupts are already disabled */
     364                irq_spinlock_lock(&thread->lock, false);
     365
    364366                /* Record the statistics and increment the index */
    365367                produce_stats_thread(thread, &stats_threads[i]);
    366368                i++;
     369
     370                irq_spinlock_unlock(&thread->lock, false);
    367371
    368372                thread = thread_next(thread);
     
    620624                ret.data.size = sizeof(stats_thread_t);
    621625
     626                /*
     627                 * Replaced hand-over-hand locking with regular nested sections
     628                 * to avoid weak reference leak issues.
     629                 */
     630                irq_spinlock_lock(&thread->lock, false);
    622631                produce_stats_thread(thread, stats_thread);
     632                irq_spinlock_unlock(&thread->lock, false);
    623633
    624634                irq_spinlock_unlock(&threads_lock, true);
  • kernel/generic/src/time/clock.c

    ra5b5f17 red7e057  
    123123static void cpu_update_accounting(void)
    124124{
    125         // FIXME: get_cycle() is unimplemented on several platforms
    126125        uint64_t now = get_cycle();
    127126        atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
  • kernel/generic/src/udebug/udebug_ops.c

    ra5b5f17 red7e057  
    9090        }
    9191
     92        irq_spinlock_lock(&thread->lock, true);
     93
    9294        /* Verify that 'thread' is a userspace thread. */
    9395        if (!thread->uspace) {
     96                /* It's not, deny its existence */
     97                irq_spinlock_unlock(&thread->lock, true);
    9498                mutex_unlock(&TASK->udebug.lock);
    9599                return ENOENT;
    96100        }
     101
     102        /* Verify debugging state. */
     103        if (thread->udebug.active != true) {
     104                /* Not in debugging session or undesired GO state */
     105                irq_spinlock_unlock(&thread->lock, true);
     106                mutex_unlock(&TASK->udebug.lock);
     107                return ENOENT;
     108        }
     109
     110        /* Now verify that the thread belongs to the current task. */
     111        if (thread->task != TASK) {
     112                /* No such thread belonging this task */
     113                irq_spinlock_unlock(&thread->lock, true);
     114                mutex_unlock(&TASK->udebug.lock);
     115                return ENOENT;
     116        }
     117
     118        irq_spinlock_unlock(&thread->lock, true);
     119
     120        /* Only mutex TASK->udebug.lock left. */
    97121
    98122        /*
     
    102126         */
    103127        mutex_lock(&thread->udebug.lock);
    104 
    105         /* Verify debugging state. */
    106         if (thread->udebug.active != true) {
    107                 /* Not in debugging session or undesired GO state */
    108                 mutex_unlock(&thread->udebug.lock);
    109                 mutex_unlock(&TASK->udebug.lock);
    110                 return ENOENT;
    111         }
    112 
    113         /* Now verify that the thread belongs to the current task. */
    114         if (thread->task != TASK) {
    115                 /* No such thread belonging this task */
    116                 mutex_unlock(&thread->udebug.lock);
    117                 mutex_unlock(&TASK->udebug.lock);
    118                 return ENOENT;
    119         }
    120128
    121129        /* The big task mutex is no longer needed. */
     
    380388        /* FIXME: make sure the thread isn't past debug shutdown... */
    381389        list_foreach(TASK->threads, th_link, thread_t, thread) {
     390                irq_spinlock_lock(&thread->lock, false);
    382391                bool uspace = thread->uspace;
     392                irq_spinlock_unlock(&thread->lock, false);
    383393
    384394                /* Not interested in kernel threads. */
Note: See TracChangeset for help on using the changeset viewer.