Changes in kernel/generic/src/time/clock.c [da1bafb:b8f7ea78] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/time/clock.c
rda1bafb rb8f7ea78 33 33 /** 34 34 * @file 35 * @brief 35 * @brief High-level clock interrupt handler. 36 36 * 37 37 * This file contains the clock() function which is the source 38 38 * of preemption. It is also responsible for executing expired 39 39 * timeouts. 40 * 41 */ 42 40 */ 41 43 42 #include <time/clock.h> 44 43 #include <time/timeout.h> … … 64 63 static parea_t clock_parea; 65 64 66 /** Fragment of second 67 * 68 * For updating seconds correctly. 69 * 65 /* Variable holding fragment of second, so that we would update 66 * seconds correctly 70 67 */ 71 68 static unative_t secfrag = 0; … … 76 73 * information about realtime data. We allocate 1 page with these 77 74 * data and update it periodically. 78 *79 75 */ 80 76 void clock_counter_init(void) 81 77 { 82 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 78 void *faddr; 79 80 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 83 81 if (!faddr) 84 82 panic("Cannot allocate page for clock."); … … 89 87 uptime->seconds2 = 0; 90 88 uptime->useconds = 0; 91 89 92 90 clock_parea.pbase = (uintptr_t) faddr; 93 91 clock_parea.frames = 1; 94 92 ddi_parea_register(&clock_parea); 95 93 96 94 /* 97 95 * Prepare information for the userspace so that it can successfully 98 96 * physmem_map() the clock_parea. 99 *100 97 */ 101 98 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); … … 103 100 } 104 101 102 105 103 /** Update public counters 106 104 * 107 105 * Update it only on first processor 108 * TODO: Do we really need so many write barriers? 109 * 106 * TODO: Do we really need so many write barriers? 110 107 */ 111 108 static void clock_update_counters(void) … … 134 131 void clock(void) 135 132 { 133 link_t *l; 134 timeout_t *h; 135 timeout_handler_t f; 136 void *arg; 136 137 size_t missed_clock_ticks = CPU->missed_clock_ticks; 137 138 unsigned int i; 139 138 140 /* Account lost ticks to CPU usage */ 139 if (CPU->idle) 141 if (CPU->idle) { 140 142 CPU->idle_ticks += missed_clock_ticks + 1; 141 else143 } else { 142 144 CPU->busy_ticks += missed_clock_ticks + 1; 143 145 } 144 146 CPU->idle = false; 145 147 146 148 /* 147 149 * To avoid lock ordering problems, 148 150 * run all expired timeouts as you visit them. 149 *150 151 */ 151 size_t i;152 152 for (i = 0; i <= missed_clock_ticks; i++) { 153 153 clock_update_counters(); 154 irq_spinlock_lock(&CPU->timeoutlock, false); 155 156 link_t *cur; 157 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 158 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 159 160 irq_spinlock_lock(&timeout->lock, false); 161 if (timeout->ticks-- != 0) { 162 irq_spinlock_unlock(&timeout->lock, false); 154 spinlock_lock(&CPU->timeoutlock); 155 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 156 h = list_get_instance(l, timeout_t, link); 157 spinlock_lock(&h->lock); 158 if (h->ticks-- != 0) { 159 spinlock_unlock(&h->lock); 163 160 break; 164 161 } 165 166 list_remove(cur); 167 timeout_handler_t handler = timeout->handler; 168 void *arg = timeout->arg; 169 timeout_reinitialize(timeout); 170 171 irq_spinlock_unlock(&timeout->lock, false); 172 irq_spinlock_unlock(&CPU->timeoutlock, false); 173 174 handler(arg); 175 176 irq_spinlock_lock(&CPU->timeoutlock, false); 162 list_remove(l); 163 f = h->handler; 164 arg = h->arg; 165 timeout_reinitialize(h); 166 spinlock_unlock(&h->lock); 167 spinlock_unlock(&CPU->timeoutlock); 168 169 f(arg); 170 171 spinlock_lock(&CPU->timeoutlock); 177 172 } 178 179 irq_spinlock_unlock(&CPU->timeoutlock, false); 173 spinlock_unlock(&CPU->timeoutlock); 180 174 } 181 175 CPU->missed_clock_ticks = 0; 182 176 183 177 /* 184 178 * Do CPU usage accounting and find out whether to preempt THREAD. 185 *186 179 */ 187 180 188 181 if (THREAD) { 189 182 uint64_t ticks; 190 183 191 irq_spinlock_lock(&CPU->lock, false);184 spinlock_lock(&CPU->lock); 192 185 CPU->needs_relink += 1 + missed_clock_ticks; 193 irq_spinlock_unlock(&CPU->lock, false);194 195 irq_spinlock_lock(&THREAD->lock, false);186 spinlock_unlock(&CPU->lock); 187 188 spinlock_lock(&THREAD->lock); 196 189 if ((ticks = THREAD->ticks)) { 197 190 if (ticks >= 1 + missed_clock_ticks) … … 200 193 THREAD->ticks = 0; 201 194 } 202 irq_spinlock_unlock(&THREAD->lock, false);195 spinlock_unlock(&THREAD->lock); 203 196 204 if ((!ticks) && (!PREEMPTION_DISABLED)) { 197 if (!ticks && !PREEMPTION_DISABLED) { 198 #ifdef CONFIG_UDEBUG 199 istate_t *istate; 200 #endif 205 201 scheduler(); 206 202 #ifdef CONFIG_UDEBUG … … 209 205 * before it begins executing userspace code. 210 206 */ 211 istate _t *istate= THREAD->udebug.uspace_state;212 if ( (istate) && (istate_from_uspace(istate)))207 istate = THREAD->udebug.uspace_state; 208 if (istate && istate_from_uspace(istate)) 213 209 udebug_before_thread_runs(); 214 210 #endif 215 211 } 216 212 } 213 217 214 } 218 215
Note:
See TracChangeset
for help on using the changeset viewer.