Changeset e04b72d6 in mainline
- Timestamp:
- 2023-01-08T21:19:21Z (2 years ago)
- Branches:
- master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d8503fd
- Parents:
- 5b19d80 (diff), eda43238 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - git-author:
- Jakub Jermář <jakub@…> (2023-01-08 21:19:21)
- git-committer:
- GitHub <noreply@…> (2023-01-08 21:19:21)
- Location:
- kernel/generic
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/cpu.h
r5b19d80 re04b72d6 74 74 size_t missed_clock_ticks; 75 75 76 /** Can only be accessed when interrupts are disabled. */ 77 uint64_t current_clock_tick; 78 76 79 /** 77 80 * Processor cycle accounting. -
kernel/generic/include/time/timeout.h
r5b19d80 re04b72d6 43 43 44 44 typedef struct { 45 IRQ_SPINLOCK_DECLARE(lock); 46 47 /** Link to the list of active timeouts on CURRENT->cpu */ 45 /** Link to the list of active timeouts on timeout->cpu */ 48 46 link_t link; 49 /** Timeout will be activated in this amount of clock() ticks. */50 uint64_t ticks;47 /** Timeout will be activated when current clock tick reaches this value. */ 48 uint64_t deadline; 51 49 /** Function that will be called on timeout activation. */ 52 50 timeout_handler_t handler; … … 61 59 extern void timeout_init(void); 62 60 extern void timeout_initialize(timeout_t *); 63 extern void timeout_reinitialize(timeout_t *);64 61 extern void timeout_register(timeout_t *, uint64_t, timeout_handler_t, void *); 65 62 extern bool timeout_unregister(timeout_t *); -
kernel/generic/src/time/clock.c
r5b19d80 re04b72d6 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2022 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 65 66 static parea_t clock_parea; 66 67 67 /** Fragment of second68 *69 * For updating seconds correctly.70 *71 */72 static sysarg_t secfrag = 0;73 74 68 /** Initialize realtime clock counter 75 69 * … … 109 103 * 110 104 * Update it only on first processor 111 * TODO: Do we really need so many write barriers? 112 * 113 */ 114 static void clock_update_counters(void) 105 */ 106 static void clock_update_counters(uint64_t current_tick) 115 107 { 116 108 if (CPU->id == 0) { 117 secfrag += 1000000 / HZ;118 if (secfrag >= 1000000) { 119 secfrag -=1000000;120 uptime->seconds1++;121 write_barrier(); 122 uptime->useconds = secfrag;123 124 uptime->seconds2 = uptime->seconds1;125 } else126 uptime->useconds += 1000000 / HZ;109 uint64_t usec = (1000000 / HZ) * current_tick; 110 111 sysarg_t secs = usec / 1000000; 112 sysarg_t usecs = usec % 1000000; 113 114 uptime->seconds1 = secs; 115 write_barrier(); 116 uptime->useconds = usecs; 117 write_barrier(); 118 uptime->seconds2 = secs; 127 119 } 128 120 } … … 147 139 { 148 140 size_t missed_clock_ticks = CPU->missed_clock_ticks; 141 CPU->missed_clock_ticks = 0; 142 143 CPU->current_clock_tick += missed_clock_ticks + 1; 144 uint64_t current_clock_tick = CPU->current_clock_tick; 145 clock_update_counters(current_clock_tick); 149 146 150 147 /* Account CPU usage */ … … 156 153 * 157 154 */ 158 size_t i; 159 for (i = 0; i <= missed_clock_ticks; i++) { 160 /* Update counters and accounting */ 161 clock_update_counters(); 162 cpu_update_accounting(); 155 156 irq_spinlock_lock(&CPU->timeoutlock, false); 157 158 link_t *cur; 159 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) { 160 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 161 162 if (current_clock_tick <= timeout->deadline) { 163 break; 164 } 165 166 list_remove(cur); 167 timeout_handler_t handler = timeout->handler; 168 void *arg = timeout->arg; 169 170 irq_spinlock_unlock(&CPU->timeoutlock, false); 171 172 handler(arg); 163 173 164 174 irq_spinlock_lock(&CPU->timeoutlock, false); 165 166 link_t *cur;167 while ((cur = list_first(&CPU->timeout_active_list)) != NULL) {168 timeout_t *timeout = list_get_instance(cur, timeout_t,169 link);170 171 irq_spinlock_lock(&timeout->lock, false);172 if (timeout->ticks-- != 0) {173 irq_spinlock_unlock(&timeout->lock, false);174 break;175 }176 177 list_remove(cur);178 timeout_handler_t handler = timeout->handler;179 void *arg = timeout->arg;180 timeout_reinitialize(timeout);181 182 irq_spinlock_unlock(&timeout->lock, false);183 irq_spinlock_unlock(&CPU->timeoutlock, false);184 185 handler(arg);186 187 irq_spinlock_lock(&CPU->timeoutlock, false);188 }189 190 irq_spinlock_unlock(&CPU->timeoutlock, false);191 175 } 192 CPU->missed_clock_ticks = 0; 176 177 irq_spinlock_unlock(&CPU->timeoutlock, false); 193 178 194 179 /* -
kernel/generic/src/time/timeout.c
r5b19d80 re04b72d6 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2022 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 57 58 } 58 59 59 /** Reinitialize timeout60 *61 * Initialize all members except the lock.62 *63 * @param timeout Timeout to be initialized.64 *65 */66 void timeout_reinitialize(timeout_t *timeout)67 {68 timeout->cpu = NULL;69 timeout->ticks = 0;70 timeout->handler = NULL;71 timeout->arg = NULL;72 link_initialize(&timeout->link);73 }74 75 60 /** Initialize timeout 76 61 * … … 82 67 void timeout_initialize(timeout_t *timeout) 83 68 { 84 irq_spinlock_initialize(&timeout->lock, "timeout_t_lock");85 timeout _reinitialize(timeout);69 link_initialize(&timeout->link); 70 timeout->cpu = NULL; 86 71 } 87 72 … … 102 87 { 103 88 irq_spinlock_lock(&CPU->timeoutlock, true); 104 irq_spinlock_lock(&timeout->lock, false);105 89 106 if (timeout->cpu) 107 panic("Unexpected: timeout->cpu != 0."); 90 assert(!link_in_use(&timeout->link)); 108 91 109 92 timeout->cpu = CPU; 110 timeout->ticks = us2ticks(time); 111 93 timeout->deadline = CPU->current_clock_tick + us2ticks(time); 112 94 timeout->handler = handler; 113 95 timeout->arg = arg; 114 96 115 /* 116 * Insert timeout into the active timeouts list according to timeout->ticks. 117 */ 118 uint64_t sum = 0; 119 timeout_t *target = NULL; 120 link_t *cur, *prev; 121 prev = NULL; 122 for (cur = list_first(&CPU->timeout_active_list); 123 cur != NULL; cur = list_next(cur, &CPU->timeout_active_list)) { 124 target = list_get_instance(cur, timeout_t, link); 125 irq_spinlock_lock(&target->lock, false); 97 /* Insert timeout into the active timeouts list according to timeout->deadline. */ 126 98 127 if (timeout->ticks < sum + target->ticks) { 128 irq_spinlock_unlock(&target->lock, false); 129 break; 99 link_t *last = list_last(&CPU->timeout_active_list); 100 if (last == NULL || timeout->deadline >= list_get_instance(last, timeout_t, link)->deadline) { 101 list_append(&timeout->link, &CPU->timeout_active_list); 102 } else { 103 for (link_t *cur = list_first(&CPU->timeout_active_list); cur != NULL; 104 cur = list_next(cur, &CPU->timeout_active_list)) { 105 106 if (timeout->deadline < list_get_instance(cur, timeout_t, link)->deadline) { 107 list_insert_before(&timeout->link, cur); 108 break; 109 } 130 110 } 131 132 sum += target->ticks;133 irq_spinlock_unlock(&target->lock, false);134 prev = cur;135 111 } 136 112 137 if (prev == NULL)138 list_prepend(&timeout->link, &CPU->timeout_active_list);139 else140 list_insert_after(&timeout->link, prev);141 142 /*143 * Adjust timeout->ticks according to ticks144 * accumulated in target's predecessors.145 */146 timeout->ticks -= sum;147 148 /*149 * Decrease ticks of timeout's immediate succesor by timeout->ticks.150 */151 if (cur != NULL) {152 irq_spinlock_lock(&target->lock, false);153 target->ticks -= timeout->ticks;154 irq_spinlock_unlock(&target->lock, false);155 }156 157 irq_spinlock_unlock(&timeout->lock, false);158 113 irq_spinlock_unlock(&CPU->timeoutlock, true); 159 114 } … … 170 125 bool timeout_unregister(timeout_t *timeout) 171 126 { 172 DEADLOCK_PROBE_INIT(p_tolock);127 assert(timeout->cpu); 173 128 174 grab_locks: 175 irq_spinlock_lock(&timeout->lock, true); 176 if (!timeout->cpu) {177 irq_spinlock_unlock(&timeout->lock, true);178 return false;129 irq_spinlock_lock(&timeout->cpu->timeoutlock, true); 130 131 bool success = link_in_use(&timeout->link); 132 if (success) { 133 list_remove(&timeout->link); 179 134 } 180 135 181 if (!irq_spinlock_trylock(&timeout->cpu->timeoutlock)) { 182 irq_spinlock_unlock(&timeout->lock, true); 183 DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD); 184 goto grab_locks; 185 } 186 187 /* 188 * Now we know for sure that timeout hasn't been activated yet 189 * and is lurking in timeout->cpu->timeout_active_list. 190 */ 191 192 link_t *cur = list_next(&timeout->link, 193 &timeout->cpu->timeout_active_list); 194 if (cur != NULL) { 195 timeout_t *tmp = list_get_instance(cur, timeout_t, link); 196 irq_spinlock_lock(&tmp->lock, false); 197 tmp->ticks += timeout->ticks; 198 irq_spinlock_unlock(&tmp->lock, false); 199 } 200 201 list_remove(&timeout->link); 202 irq_spinlock_unlock(&timeout->cpu->timeoutlock, false); 203 204 timeout_reinitialize(timeout); 205 irq_spinlock_unlock(&timeout->lock, true); 206 207 return true; 136 irq_spinlock_unlock(&timeout->cpu->timeoutlock, true); 137 return success; 208 138 } 209 139
Note:
See TracChangeset
for help on using the changeset viewer.