Changes in kernel/generic/src/time/clock.c [6f7071b:25939997] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/time/clock.c
r6f7071b r25939997 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2022 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 58 59 #include <ddi/ddi.h> 59 60 #include <arch/cycle.h> 61 #include <preemption.h> 60 62 61 63 /* Pointer to variable with uptime */ … … 64 66 /** Physical memory area of the real time clock */ 65 67 static parea_t clock_parea; 66 67 /** Fragment of second68 *69 * For updating seconds correctly.70 *71 */72 static sysarg_t secfrag = 0;73 68 74 69 /** Initialize realtime clock counter … … 109 104 * 110 105 * Update it only on first processor 111 * TODO: Do we really need so many write barriers? 112 * 113 */ 114 static void clock_update_counters(void) 106 */ 107 static void clock_update_counters(uint64_t current_tick) 115 108 { 116 109 if (CPU->id == 0) { 117 secfrag += 1000000 / HZ;118 if (secfrag >= 1000000) { 119 secfrag -=1000000;120 uptime->seconds1++;121 write_barrier(); 122 uptime->useconds = secfrag;123 124 uptime->seconds2 = uptime->seconds1;125 } else126 uptime->useconds += 1000000 / HZ;110 uint64_t usec = (1000000 / HZ) * current_tick; 111 112 sysarg_t secs = usec / 1000000; 113 sysarg_t usecs = usec % 1000000; 114 115 uptime->seconds1 = secs; 116 write_barrier(); 117 uptime->useconds = usecs; 118 write_barrier(); 119 uptime->seconds2 = secs; 127 120 } 128 121 } … … 130 123 static void cpu_update_accounting(void) 131 124 { 132 irq_spinlock_lock(&CPU->lock, false);125 // FIXME: get_cycle() is unimplemented on several platforms 133 126 uint64_t now = get_cycle(); 134 CPU->busy_cycles += now - CPU->last_cycle; 135 CPU->last_cycle = now; 136 irq_spinlock_unlock(&CPU->lock, false); 127 atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle); 128 CPU_LOCAL->last_cycle = now; 137 129 } 138 130 … … 146 138 void clock(void) 147 139 { 148 size_t missed_clock_ticks = CPU->missed_clock_ticks; 140 size_t missed_clock_ticks = CPU_LOCAL->missed_clock_ticks; 141 CPU_LOCAL->missed_clock_ticks = 0; 142 143 CPU_LOCAL->current_clock_tick += missed_clock_ticks + 1; 144 uint64_t current_clock_tick = CPU_LOCAL->current_clock_tick; 145 clock_update_counters(current_clock_tick); 149 146 150 147 /* Account CPU usage */ … … 156 153 * 157 154 */ 158 size_t i; 159 for (i = 0; i <= missed_clock_ticks; i++) { 160 /* Update counters and accounting */ 161 clock_update_counters(); 162 cpu_update_accounting(); 155 156 irq_spinlock_lock(&CPU->timeoutlock, false); 157 158 link_t *cur; 159 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) { 160 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 161 162 if (current_clock_tick <= timeout->deadline) { 163 break; 164 } 165 166 list_remove(cur); 167 timeout_handler_t handler = timeout->handler; 168 void *arg = timeout->arg; 169 atomic_bool *finished = &timeout->finished; 170 171 irq_spinlock_unlock(&CPU->timeoutlock, false); 172 173 handler(arg); 174 175 /* Signal that the handler is finished. */ 176 atomic_store_explicit(finished, true, memory_order_release); 163 177 164 178 irq_spinlock_lock(&CPU->timeoutlock, false); 165 166 link_t *cur;167 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) {168 timeout_t *timeout = list_get_instance(cur, timeout_t,169 link);170 171 irq_spinlock_lock(&timeout->lock, false);172 if (timeout->ticks-- != 0) {173 irq_spinlock_unlock(&timeout->lock, false);174 break;175 }176 177 list_remove(cur);178 timeout_handler_t handler = timeout->handler;179 void *arg = timeout->arg;180 timeout_reinitialize(timeout);181 182 irq_spinlock_unlock(&timeout->lock, false);183 irq_spinlock_unlock(&CPU->timeoutlock, false);184 185 handler(arg);186 187 irq_spinlock_lock(&CPU->timeoutlock, false);188 }189 190 irq_spinlock_unlock(&CPU->timeoutlock, false);191 179 } 192 CPU->missed_clock_ticks = 0; 180 181 irq_spinlock_unlock(&CPU->timeoutlock, false); 193 182 194 183 /* … … 198 187 199 188 if (THREAD) { 200 uint64_t ticks; 201 202 irq_spinlock_lock(&CPU->lock, false); 203 CPU->needs_relink += 1 + missed_clock_ticks; 204 irq_spinlock_unlock(&CPU->lock, false); 205 206 irq_spinlock_lock(&THREAD->lock, false); 207 if ((ticks = THREAD->ticks)) { 208 if (ticks >= 1 + missed_clock_ticks) 209 THREAD->ticks -= 1 + missed_clock_ticks; 210 else 211 THREAD->ticks = 0; 212 } 213 irq_spinlock_unlock(&THREAD->lock, false); 214 215 if (ticks == 0 && PREEMPTION_ENABLED) { 216 scheduler(); 189 if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) { 190 thread_yield(); 217 191 #ifdef CONFIG_UDEBUG 218 192 /*
Note:
See TracChangeset
for help on using the changeset viewer.