Changeset 4760793 in mainline
- Timestamp:
- 2024-01-14T18:23:40Z (12 months ago)
- Branches:
- master
- Children:
- 5663872, c7ceacf
- Parents:
- 3b68542
- Location:
- kernel
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/arm64/src/interrupt.c
r3b68542 r4760793 137 137 while (drift > timer_increment) { 138 138 drift -= timer_increment; 139 CPU ->missed_clock_ticks++;139 CPU_LOCAL->missed_clock_ticks++; 140 140 } 141 141 CNTV_CVAL_EL0_write(cntvct + timer_increment - drift); -
kernel/arch/ia64/src/drivers/it.c
r3b68542 r4760793 122 122 itm += IT_DELTA; 123 123 if (itm - itc < 0) 124 CPU ->missed_clock_ticks++;124 CPU_LOCAL->missed_clock_ticks++; 125 125 else 126 126 break; -
kernel/arch/mips32/src/interrupt.c
r3b68542 r4760793 121 121 while (drift > cp0_compare_value) { 122 122 drift -= cp0_compare_value; 123 CPU ->missed_clock_ticks++;123 CPU_LOCAL->missed_clock_ticks++; 124 124 } 125 125 -
kernel/arch/sparc64/src/drivers/tick.c
r3b68542 r4760793 117 117 while (drift > CPU->arch.clock_frequency / HZ) { 118 118 drift -= CPU->arch.clock_frequency / HZ; 119 CPU ->missed_clock_ticks++;119 CPU_LOCAL->missed_clock_ticks++; 120 120 } 121 121 CPU->arch.next_tick_cmpr = tick_counter_read() + -
kernel/generic/include/cpu.h
r3b68542 r4760793 44 44 #include <arch.h> 45 45 46 #define CPU CURRENT->cpu 46 #define CPU (CURRENT->cpu) 47 #define CPU_LOCAL (&CPU->local) 48 49 /** 50 * Contents of CPU_LOCAL. These are variables that are only ever accessed by 51 * the CPU they belong to, so they don't need any synchronization, 52 * just locally disabled interrupts. 53 */ 54 typedef struct cpu_local { 55 /** 56 * When system clock loses a tick, it is 57 * recorded here so that clock() can react. 58 */ 59 size_t missed_clock_ticks; 60 61 uint64_t current_clock_tick; 62 uint64_t preempt_deadline; /* < when should the currently running thread be preempted */ 63 uint64_t relink_deadline; 64 65 /** 66 * Stack used by scheduler when there is no running thread. 67 * This field is unchanged after initialization. 68 */ 69 uint8_t *stack; 70 71 /** 72 * Processor cycle accounting. 73 */ 74 bool idle; 75 uint64_t last_cycle; 76 } cpu_local_t; 47 77 48 78 /** CPU structure. … … 63 93 64 94 /** 65 * When system clock loses a tick, it is66 * recorded here so that clock() can react.67 * This variable is CPU-local and can be68 * only accessed when interrupts are69 * disabled.70 */71 size_t missed_clock_ticks;72 73 /** Can only be accessed by the CPU represented by this structure when interrupts are disabled. */74 uint64_t current_clock_tick;75 uint64_t preempt_deadline; /* < when should the currently running thread be preempted */76 uint64_t relink_deadline;77 78 /**79 95 * Processor cycle accounting. 80 96 */ 81 bool idle;82 uint64_t last_cycle;83 97 atomic_time_stat_t idle_cycles; 84 98 atomic_time_stat_t busy_cycles; … … 103 117 _Atomic(struct thread *) fpu_owner; 104 118 105 /** 106 * Stack used by scheduler when there is no running thread. 107 */ 108 uint8_t *stack; 119 cpu_local_t local; 109 120 } cpu_t; 110 121 -
kernel/generic/src/cpu/cpu.c
r3b68542 r4760793 81 81 panic("Cannot allocate CPU stack."); 82 82 83 cpus[i]. stack = (uint8_t *) PA2KA(stack_phys);83 cpus[i].local.stack = (uint8_t *) PA2KA(stack_phys); 84 84 cpus[i].id = i; 85 85 … … 104 104 CPU->tlb_active = true; 105 105 106 CPU ->idle = false;107 CPU ->last_cycle = get_cycle();106 CPU_LOCAL->idle = false; 107 CPU_LOCAL->last_cycle = get_cycle(); 108 108 CPU->idle_cycles = ATOMIC_TIME_INITIALIZER(); 109 109 CPU->busy_cycles = ATOMIC_TIME_INITIALIZER(); -
kernel/generic/src/interrupt/interrupt.c
r3b68542 r4760793 121 121 122 122 /* Account CPU usage if it woke up from sleep */ 123 if (CPU && CPU ->idle) {123 if (CPU && CPU_LOCAL->idle) { 124 124 uint64_t now = get_cycle(); 125 atomic_time_increment(&CPU->idle_cycles, now - CPU ->last_cycle);126 CPU ->last_cycle = now;127 CPU ->idle = false;125 atomic_time_increment(&CPU->idle_cycles, now - CPU_LOCAL->last_cycle); 126 CPU_LOCAL->last_cycle = now; 127 CPU_LOCAL->idle = false; 128 128 } 129 129 -
kernel/generic/src/main/main.c
r3b68542 r4760793 328 328 ARCH_OP(post_cpu_init); 329 329 330 current_copy(CURRENT, (current_t *) CPU ->stack);330 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 331 331 332 332 /* … … 338 338 context_save(&ctx); 339 339 context_set(&ctx, FADDR(main_ap_separated_stack), 340 (uintptr_t) CPU ->stack, STACK_SIZE);340 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE); 341 341 context_restore(&ctx); 342 342 /* not reached */ -
kernel/generic/src/proc/scheduler.c
r3b68542 r4760793 216 216 217 217 /* This is safe because interrupts are disabled. */ 218 CPU->preempt_deadline = CPU->current_clock_tick + us2ticks(time_to_run); 218 CPU_LOCAL->preempt_deadline = 219 CPU_LOCAL->current_clock_tick + us2ticks(time_to_run); 219 220 220 221 /* … … 257 258 * This improves energy saving and hyperthreading. 258 259 */ 259 CPU ->idle = true;260 CPU_LOCAL->idle = true; 260 261 261 262 /* … … 305 306 static void relink_rq(int start) 306 307 { 307 if (CPU ->current_clock_tick < CPU->relink_deadline)308 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline) 308 309 return; 309 310 310 CPU ->relink_deadline = CPU->current_clock_tick + NEEDS_RELINK_MAX;311 CPU_LOCAL->relink_deadline = CPU_LOCAL->current_clock_tick + NEEDS_RELINK_MAX; 311 312 312 313 /* Temporary cache for lists we are moving. */ … … 401 402 * 402 403 */ 403 current_copy(CURRENT, (current_t *) CPU ->stack);404 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 404 405 405 406 /* … … 419 420 context_save(&ctx); 420 421 context_set(&ctx, FADDR(scheduler_separated_stack), 421 (uintptr_t) CPU ->stack, STACK_SIZE);422 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE); 422 423 context_restore(&ctx); 423 424 -
kernel/generic/src/time/clock.c
r3b68542 r4760793 124 124 { 125 125 uint64_t now = get_cycle(); 126 atomic_time_increment(&CPU->busy_cycles, now - CPU ->last_cycle);127 CPU ->last_cycle = now;126 atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle); 127 CPU_LOCAL->last_cycle = now; 128 128 } 129 129 … … 137 137 void clock(void) 138 138 { 139 size_t missed_clock_ticks = CPU ->missed_clock_ticks;140 CPU ->missed_clock_ticks = 0;141 142 CPU ->current_clock_tick += missed_clock_ticks + 1;143 uint64_t current_clock_tick = CPU ->current_clock_tick;139 size_t missed_clock_ticks = CPU_LOCAL->missed_clock_ticks; 140 CPU_LOCAL->missed_clock_ticks = 0; 141 142 CPU_LOCAL->current_clock_tick += missed_clock_ticks + 1; 143 uint64_t current_clock_tick = CPU_LOCAL->current_clock_tick; 144 144 clock_update_counters(current_clock_tick); 145 145 … … 186 186 187 187 if (THREAD) { 188 if (current_clock_tick >= CPU ->preempt_deadline && PREEMPTION_ENABLED) {188 if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) { 189 189 scheduler(); 190 190 #ifdef CONFIG_UDEBUG -
kernel/generic/src/time/timeout.c
r3b68542 r4760793 77 77 return 0; 78 78 79 return CPU ->current_clock_tick + us2ticks(usec);79 return CPU_LOCAL->current_clock_tick + us2ticks(usec); 80 80 } 81 81
Note:
See TracChangeset
for help on using the changeset viewer.