Changeset 4760793 in mainline


Ignore:
Timestamp:
2024-01-14T18:23:40Z (12 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
5663872, c7ceacf
Parents:
3b68542
Message:

Add CPU_LOCAL alongside CPU and segregate fields that are only used locally

This makes it more clear which fields can be used without synchronization
and which need more care.

Location:
kernel
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/arm64/src/interrupt.c

    r3b68542 r4760793  
    137137        while (drift > timer_increment) {
    138138                drift -= timer_increment;
    139                 CPU->missed_clock_ticks++;
     139                CPU_LOCAL->missed_clock_ticks++;
    140140        }
    141141        CNTV_CVAL_EL0_write(cntvct + timer_increment - drift);
  • kernel/arch/ia64/src/drivers/it.c

    r3b68542 r4760793  
    122122                itm += IT_DELTA;
    123123                if (itm - itc < 0)
    124                         CPU->missed_clock_ticks++;
     124                        CPU_LOCAL->missed_clock_ticks++;
    125125                else
    126126                        break;
  • kernel/arch/mips32/src/interrupt.c

    r3b68542 r4760793  
    121121        while (drift > cp0_compare_value) {
    122122                drift -= cp0_compare_value;
    123                 CPU->missed_clock_ticks++;
     123                CPU_LOCAL->missed_clock_ticks++;
    124124        }
    125125
  • kernel/arch/sparc64/src/drivers/tick.c

    r3b68542 r4760793  
    117117        while (drift > CPU->arch.clock_frequency / HZ) {
    118118                drift -= CPU->arch.clock_frequency / HZ;
    119                 CPU->missed_clock_ticks++;
     119                CPU_LOCAL->missed_clock_ticks++;
    120120        }
    121121        CPU->arch.next_tick_cmpr = tick_counter_read() +
  • kernel/generic/include/cpu.h

    r3b68542 r4760793  
    4444#include <arch.h>
    4545
    46 #define CPU                  CURRENT->cpu
     46#define CPU                  (CURRENT->cpu)
     47#define CPU_LOCAL            (&CPU->local)
     48
     49/**
     50 * Contents of CPU_LOCAL. These are variables that are only ever accessed by
     51 * the CPU they belong to, so they don't need any synchronization,
     52 * just locally disabled interrupts.
     53 */
     54typedef struct cpu_local {
     55        /**
     56         * When system clock loses a tick, it is
     57         * recorded here so that clock() can react.
     58         */
     59        size_t missed_clock_ticks;
     60
     61        uint64_t current_clock_tick;
     62        uint64_t preempt_deadline;  /* < when should the currently running thread be preempted */
     63        uint64_t relink_deadline;
     64
     65        /**
     66         * Stack used by scheduler when there is no running thread.
     67         * This field is unchanged after initialization.
     68         */
     69        uint8_t *stack;
     70
     71        /**
     72         * Processor cycle accounting.
     73         */
     74        bool idle;
     75        uint64_t last_cycle;
     76} cpu_local_t;
    4777
    4878/** CPU structure.
     
    6393
    6494        /**
    65          * When system clock loses a tick, it is
    66          * recorded here so that clock() can react.
    67          * This variable is CPU-local and can be
    68          * only accessed when interrupts are
    69          * disabled.
    70          */
    71         size_t missed_clock_ticks;
    72 
    73         /** Can only be accessed by the CPU represented by this structure when interrupts are disabled. */
    74         uint64_t current_clock_tick;
    75         uint64_t preempt_deadline;  /* < when should the currently running thread be preempted */
    76         uint64_t relink_deadline;
    77 
    78         /**
    7995         * Processor cycle accounting.
    8096         */
    81         bool idle;
    82         uint64_t last_cycle;
    8397        atomic_time_stat_t idle_cycles;
    8498        atomic_time_stat_t busy_cycles;
     
    103117        _Atomic(struct thread *) fpu_owner;
    104118
    105         /**
    106          * Stack used by scheduler when there is no running thread.
    107          */
    108         uint8_t *stack;
     119        cpu_local_t local;
    109120} cpu_t;
    110121
  • kernel/generic/src/cpu/cpu.c

    r3b68542 r4760793  
    8181                                panic("Cannot allocate CPU stack.");
    8282
    83                         cpus[i].stack = (uint8_t *) PA2KA(stack_phys);
     83                        cpus[i].local.stack = (uint8_t *) PA2KA(stack_phys);
    8484                        cpus[i].id = i;
    8585
     
    104104        CPU->tlb_active = true;
    105105
    106         CPU->idle = false;
    107         CPU->last_cycle = get_cycle();
     106        CPU_LOCAL->idle = false;
     107        CPU_LOCAL->last_cycle = get_cycle();
    108108        CPU->idle_cycles = ATOMIC_TIME_INITIALIZER();
    109109        CPU->busy_cycles = ATOMIC_TIME_INITIALIZER();
  • kernel/generic/src/interrupt/interrupt.c

    r3b68542 r4760793  
    121121
    122122        /* Account CPU usage if it woke up from sleep */
    123         if (CPU && CPU->idle) {
     123        if (CPU && CPU_LOCAL->idle) {
    124124                uint64_t now = get_cycle();
    125                 atomic_time_increment(&CPU->idle_cycles, now - CPU->last_cycle);
    126                 CPU->last_cycle = now;
    127                 CPU->idle = false;
     125                atomic_time_increment(&CPU->idle_cycles, now - CPU_LOCAL->last_cycle);
     126                CPU_LOCAL->last_cycle = now;
     127                CPU_LOCAL->idle = false;
    128128        }
    129129
  • kernel/generic/src/main/main.c

    r3b68542 r4760793  
    328328        ARCH_OP(post_cpu_init);
    329329
    330         current_copy(CURRENT, (current_t *) CPU->stack);
     330        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    331331
    332332        /*
     
    338338        context_save(&ctx);
    339339        context_set(&ctx, FADDR(main_ap_separated_stack),
    340             (uintptr_t) CPU->stack, STACK_SIZE);
     340            (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    341341        context_restore(&ctx);
    342342        /* not reached */
  • kernel/generic/src/proc/scheduler.c

    r3b68542 r4760793  
    216216
    217217                /* This is safe because interrupts are disabled. */
    218                 CPU->preempt_deadline = CPU->current_clock_tick + us2ticks(time_to_run);
     218                CPU_LOCAL->preempt_deadline =
     219                    CPU_LOCAL->current_clock_tick + us2ticks(time_to_run);
    219220
    220221                /*
     
    257258                 * This improves energy saving and hyperthreading.
    258259                 */
    259                 CPU->idle = true;
     260                CPU_LOCAL->idle = true;
    260261
    261262                /*
     
    305306static void relink_rq(int start)
    306307{
    307         if (CPU->current_clock_tick < CPU->relink_deadline)
     308        if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline)
    308309                return;
    309310
    310         CPU->relink_deadline = CPU->current_clock_tick + NEEDS_RELINK_MAX;
     311        CPU_LOCAL->relink_deadline = CPU_LOCAL->current_clock_tick + NEEDS_RELINK_MAX;
    311312
    312313        /* Temporary cache for lists we are moving. */
     
    401402         *
    402403         */
    403         current_copy(CURRENT, (current_t *) CPU->stack);
     404        current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);
    404405
    405406        /*
     
    419420        context_save(&ctx);
    420421        context_set(&ctx, FADDR(scheduler_separated_stack),
    421             (uintptr_t) CPU->stack, STACK_SIZE);
     422            (uintptr_t) CPU_LOCAL->stack, STACK_SIZE);
    422423        context_restore(&ctx);
    423424
  • kernel/generic/src/time/clock.c

    r3b68542 r4760793  
    124124{
    125125        uint64_t now = get_cycle();
    126         atomic_time_increment(&CPU->busy_cycles, now - CPU->last_cycle);
    127         CPU->last_cycle = now;
     126        atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
     127        CPU_LOCAL->last_cycle = now;
    128128}
    129129
     
    137137void clock(void)
    138138{
    139         size_t missed_clock_ticks = CPU->missed_clock_ticks;
    140         CPU->missed_clock_ticks = 0;
    141 
    142         CPU->current_clock_tick += missed_clock_ticks + 1;
    143         uint64_t current_clock_tick = CPU->current_clock_tick;
     139        size_t missed_clock_ticks = CPU_LOCAL->missed_clock_ticks;
     140        CPU_LOCAL->missed_clock_ticks = 0;
     141
     142        CPU_LOCAL->current_clock_tick += missed_clock_ticks + 1;
     143        uint64_t current_clock_tick = CPU_LOCAL->current_clock_tick;
    144144        clock_update_counters(current_clock_tick);
    145145
     
    186186
    187187        if (THREAD) {
    188                 if (current_clock_tick >= CPU->preempt_deadline && PREEMPTION_ENABLED) {
     188                if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) {
    189189                        scheduler();
    190190#ifdef CONFIG_UDEBUG
  • kernel/generic/src/time/timeout.c

    r3b68542 r4760793  
    7777                return 0;
    7878
    79         return CPU->current_clock_tick + us2ticks(usec);
     79        return CPU_LOCAL->current_clock_tick + us2ticks(usec);
    8080}
    8181
Note: See TracChangeset for help on using the changeset viewer.