Changes in kernel/generic/src/time/clock.c [d0c82c5:2e4e706] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/time/clock.c
rd0c82c5 r2e4e706 33 33 /** 34 34 * @file 35 * @brief 35 * @brief High-level clock interrupt handler. 36 36 * 37 37 * This file contains the clock() function which is the source 38 38 * of preemption. It is also responsible for executing expired 39 39 * timeouts. 40 * 41 */ 42 40 */ 41 43 42 #include <time/clock.h> 44 43 #include <time/timeout.h> … … 57 56 #include <mm/frame.h> 58 57 #include <ddi/ddi.h> 59 #include <arch/cycle.h>60 58 61 59 /* Pointer to variable with uptime */ … … 65 63 static parea_t clock_parea; 66 64 67 /** Fragment of second 68 * 69 * For updating seconds correctly. 70 * 65 /* Variable holding fragment of second, so that we would update 66 * seconds correctly 71 67 */ 72 68 static unative_t secfrag = 0; … … 77 73 * information about realtime data. We allocate 1 page with these 78 74 * data and update it periodically. 79 *80 75 */ 81 76 void clock_counter_init(void) 82 77 { 83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 78 void *faddr; 79 80 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 84 81 if (!faddr) 85 82 panic("Cannot allocate page for clock."); … … 90 87 uptime->seconds2 = 0; 91 88 uptime->useconds = 0; 92 89 93 90 clock_parea.pbase = (uintptr_t) faddr; 94 91 clock_parea.frames = 1; 95 92 ddi_parea_register(&clock_parea); 96 93 97 94 /* 98 95 * Prepare information for the userspace so that it can successfully 99 96 * physmem_map() the clock_parea. 100 *101 97 */ 102 98 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); … … 104 100 } 105 101 102 106 103 /** Update public counters 107 104 * 108 105 * Update it only on first processor 109 * TODO: Do we really need so many write barriers? 110 * 106 * TODO: Do we really need so many write barriers? 111 107 */ 112 108 static void clock_update_counters(void) … … 126 122 } 127 123 128 static void cpu_update_accounting(void)129 {130 irq_spinlock_lock(&CPU->lock, false);131 uint64_t now = get_cycle();132 CPU->busy_cycles += now - CPU->last_cycle;133 CPU->last_cycle = now;134 irq_spinlock_unlock(&CPU->lock, false);135 }136 137 124 /** Clock routine 138 125 * … … 144 131 void clock(void) 145 132 { 133 link_t *l; 134 timeout_t *h; 135 timeout_handler_t f; 136 void *arg; 146 137 size_t missed_clock_ticks = CPU->missed_clock_ticks; 147 148 /* Account CPU usage */ 149 cpu_update_accounting(); 150 138 unsigned int i; 139 140 /* Account lost ticks to CPU usage */ 141 if (CPU->idle) { 142 CPU->idle_ticks += missed_clock_ticks + 1; 143 } else { 144 CPU->busy_ticks += missed_clock_ticks + 1; 145 } 146 CPU->idle = false; 147 151 148 /* 152 149 * To avoid lock ordering problems, 153 150 * run all expired timeouts as you visit them. 154 *155 151 */ 156 size_t i;157 152 for (i = 0; i <= missed_clock_ticks; i++) { 158 /* Update counters and accounting */159 153 clock_update_counters(); 160 cpu_update_accounting(); 161 162 irq_spinlock_lock(&CPU->timeoutlock, false); 163 164 link_t *cur; 165 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 167 168 irq_spinlock_lock(&timeout->lock, false); 169 if (timeout->ticks-- != 0) { 170 irq_spinlock_unlock(&timeout->lock, false); 154 spinlock_lock(&CPU->timeoutlock); 155 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 156 h = list_get_instance(l, timeout_t, link); 157 spinlock_lock(&h->lock); 158 if (h->ticks-- != 0) { 159 spinlock_unlock(&h->lock); 171 160 break; 172 161 } 173 174 list_remove(cur); 175 timeout_handler_t handler = timeout->handler; 176 void *arg = timeout->arg; 177 timeout_reinitialize(timeout); 178 179 irq_spinlock_unlock(&timeout->lock, false); 180 irq_spinlock_unlock(&CPU->timeoutlock, false); 181 182 handler(arg); 183 184 irq_spinlock_lock(&CPU->timeoutlock, false); 162 list_remove(l); 163 f = h->handler; 164 arg = h->arg; 165 timeout_reinitialize(h); 166 spinlock_unlock(&h->lock); 167 spinlock_unlock(&CPU->timeoutlock); 168 169 f(arg); 170 171 spinlock_lock(&CPU->timeoutlock); 185 172 } 186 187 irq_spinlock_unlock(&CPU->timeoutlock, false); 173 spinlock_unlock(&CPU->timeoutlock); 188 174 } 189 175 CPU->missed_clock_ticks = 0; 190 176 191 177 /* 192 178 * Do CPU usage accounting and find out whether to preempt THREAD. 193 *194 179 */ 195 180 196 181 if (THREAD) { 197 182 uint64_t ticks; 198 183 199 irq_spinlock_lock(&CPU->lock, false);184 spinlock_lock(&CPU->lock); 200 185 CPU->needs_relink += 1 + missed_clock_ticks; 201 irq_spinlock_unlock(&CPU->lock, false);202 203 irq_spinlock_lock(&THREAD->lock, false);186 spinlock_unlock(&CPU->lock); 187 188 spinlock_lock(&THREAD->lock); 204 189 if ((ticks = THREAD->ticks)) { 205 190 if (ticks >= 1 + missed_clock_ticks) … … 208 193 THREAD->ticks = 0; 209 194 } 210 irq_spinlock_unlock(&THREAD->lock, false);195 spinlock_unlock(&THREAD->lock); 211 196 212 197 if ((!ticks) && (!PREEMPTION_DISABLED)) { 198 #ifdef CONFIG_UDEBUG 199 istate_t *istate; 200 #endif 213 201 scheduler(); 214 202 #ifdef CONFIG_UDEBUG … … 217 205 * before it begins executing userspace code. 218 206 */ 219 istate _t *istate= THREAD->udebug.uspace_state;220 if ( (istate) && (istate_from_uspace(istate)))207 istate = THREAD->udebug.uspace_state; 208 if (istate && istate_from_uspace(istate)) 221 209 udebug_before_thread_runs(); 222 210 #endif 223 211 } 224 212 } 213 225 214 } 226 215
Note:
See TracChangeset
for help on using the changeset viewer.