Changes in kernel/generic/src/time/clock.c [2e4e706:d0c82c5] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/time/clock.c
r2e4e706 rd0c82c5 33 33 /** 34 34 * @file 35 * @brief 35 * @brief High-level clock interrupt handler. 36 36 * 37 37 * This file contains the clock() function which is the source 38 38 * of preemption. It is also responsible for executing expired 39 39 * timeouts. 40 */ 41 40 * 41 */ 42 42 43 #include <time/clock.h> 43 44 #include <time/timeout.h> … … 56 57 #include <mm/frame.h> 57 58 #include <ddi/ddi.h> 59 #include <arch/cycle.h> 58 60 59 61 /* Pointer to variable with uptime */ … … 63 65 static parea_t clock_parea; 64 66 65 /* Variable holding fragment of second, so that we would update 66 * seconds correctly 67 /** Fragment of second 68 * 69 * For updating seconds correctly. 70 * 67 71 */ 68 72 static unative_t secfrag = 0; … … 73 77 * information about realtime data. We allocate 1 page with these 74 78 * data and update it periodically. 79 * 75 80 */ 76 81 void clock_counter_init(void) 77 82 { 78 void *faddr; 79 80 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 83 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 81 84 if (!faddr) 82 85 panic("Cannot allocate page for clock."); … … 87 90 uptime->seconds2 = 0; 88 91 uptime->useconds = 0; 89 92 90 93 clock_parea.pbase = (uintptr_t) faddr; 91 94 clock_parea.frames = 1; 92 95 ddi_parea_register(&clock_parea); 93 96 94 97 /* 95 98 * Prepare information for the userspace so that it can successfully 96 99 * physmem_map() the clock_parea. 100 * 97 101 */ 98 102 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); … … 100 104 } 101 105 102 103 106 /** Update public counters 104 107 * 105 108 * Update it only on first processor 106 * TODO: Do we really need so many write barriers? 109 * TODO: Do we really need so many write barriers? 110 * 107 111 */ 108 112 static void clock_update_counters(void) … … 122 126 } 123 127 128 static void cpu_update_accounting(void) 129 { 130 irq_spinlock_lock(&CPU->lock, false); 131 uint64_t now = get_cycle(); 132 CPU->busy_cycles += now - CPU->last_cycle; 133 CPU->last_cycle = now; 134 irq_spinlock_unlock(&CPU->lock, false); 135 } 136 124 137 /** Clock routine 125 138 * … … 131 144 void clock(void) 132 145 { 133 link_t *l;134 timeout_t *h;135 timeout_handler_t f;136 void *arg;137 146 size_t missed_clock_ticks = CPU->missed_clock_ticks; 138 unsigned int i; 139 140 /* Account lost ticks to CPU usage */ 141 if (CPU->idle) { 142 CPU->idle_ticks += missed_clock_ticks + 1; 143 } else { 144 CPU->busy_ticks += missed_clock_ticks + 1; 145 } 146 CPU->idle = false; 147 147 148 /* Account CPU usage */ 149 cpu_update_accounting(); 150 148 151 /* 149 152 * To avoid lock ordering problems, 150 153 * run all expired timeouts as you visit them. 154 * 151 155 */ 156 size_t i; 152 157 for (i = 0; i <= missed_clock_ticks; i++) { 158 /* Update counters and accounting */ 153 159 clock_update_counters(); 154 spinlock_lock(&CPU->timeoutlock); 155 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 156 h = list_get_instance(l, timeout_t, link); 157 spinlock_lock(&h->lock); 158 if (h->ticks-- != 0) { 159 spinlock_unlock(&h->lock); 160 cpu_update_accounting(); 161 162 irq_spinlock_lock(&CPU->timeoutlock, false); 163 164 link_t *cur; 165 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 166 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 167 168 irq_spinlock_lock(&timeout->lock, false); 169 if (timeout->ticks-- != 0) { 170 irq_spinlock_unlock(&timeout->lock, false); 160 171 break; 161 172 } 162 list_remove(l); 163 f = h->handler; 164 arg = h->arg; 165 timeout_reinitialize(h); 166 spinlock_unlock(&h->lock); 167 spinlock_unlock(&CPU->timeoutlock); 168 169 f(arg); 170 171 spinlock_lock(&CPU->timeoutlock); 173 174 list_remove(cur); 175 timeout_handler_t handler = timeout->handler; 176 void *arg = timeout->arg; 177 timeout_reinitialize(timeout); 178 179 irq_spinlock_unlock(&timeout->lock, false); 180 irq_spinlock_unlock(&CPU->timeoutlock, false); 181 182 handler(arg); 183 184 irq_spinlock_lock(&CPU->timeoutlock, false); 172 185 } 173 spinlock_unlock(&CPU->timeoutlock); 186 187 irq_spinlock_unlock(&CPU->timeoutlock, false); 174 188 } 175 189 CPU->missed_clock_ticks = 0; 176 190 177 191 /* 178 192 * Do CPU usage accounting and find out whether to preempt THREAD. 193 * 179 194 */ 180 195 181 196 if (THREAD) { 182 197 uint64_t ticks; 183 198 184 spinlock_lock(&CPU->lock);199 irq_spinlock_lock(&CPU->lock, false); 185 200 CPU->needs_relink += 1 + missed_clock_ticks; 186 spinlock_unlock(&CPU->lock);187 188 spinlock_lock(&THREAD->lock);201 irq_spinlock_unlock(&CPU->lock, false); 202 203 irq_spinlock_lock(&THREAD->lock, false); 189 204 if ((ticks = THREAD->ticks)) { 190 205 if (ticks >= 1 + missed_clock_ticks) … … 193 208 THREAD->ticks = 0; 194 209 } 195 spinlock_unlock(&THREAD->lock);210 irq_spinlock_unlock(&THREAD->lock, false); 196 211 197 212 if ((!ticks) && (!PREEMPTION_DISABLED)) { 198 #ifdef CONFIG_UDEBUG199 istate_t *istate;200 #endif201 213 scheduler(); 202 214 #ifdef CONFIG_UDEBUG … … 205 217 * before it begins executing userspace code. 206 218 */ 207 istate = THREAD->udebug.uspace_state;208 if ( istate && istate_from_uspace(istate))219 istate_t *istate = THREAD->udebug.uspace_state; 220 if ((istate) && (istate_from_uspace(istate))) 209 221 udebug_before_thread_runs(); 210 222 #endif 211 223 } 212 224 } 213 214 225 } 215 226
Note:
See TracChangeset
for help on using the changeset viewer.