Changes in kernel/generic/src/time/clock.c [b8f7ea78:da1bafb] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/time/clock.c
rb8f7ea78 rda1bafb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief High-level clock interrupt handler. 36 36 * 37 37 * This file contains the clock() function which is the source 38 38 * of preemption. It is also responsible for executing expired 39 39 * timeouts. 40 */ 41 40 * 41 */ 42 42 43 #include <time/clock.h> 43 44 #include <time/timeout.h> … … 63 64 static parea_t clock_parea; 64 65 65 /* Variable holding fragment of second, so that we would update 66 * seconds correctly 66 /** Fragment of second 67 * 68 * For updating seconds correctly. 69 * 67 70 */ 68 71 static unative_t secfrag = 0; … … 73 76 * information about realtime data. We allocate 1 page with these 74 77 * data and update it periodically. 78 * 75 79 */ 76 80 void clock_counter_init(void) 77 81 { 78 void *faddr; 79 80 faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 82 void *faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); 81 83 if (!faddr) 82 84 panic("Cannot allocate page for clock."); … … 87 89 uptime->seconds2 = 0; 88 90 uptime->useconds = 0; 89 91 90 92 clock_parea.pbase = (uintptr_t) faddr; 91 93 clock_parea.frames = 1; 92 94 ddi_parea_register(&clock_parea); 93 95 94 96 /* 95 97 * Prepare information for the userspace so that it can successfully 96 98 * physmem_map() the clock_parea. 99 * 97 100 */ 98 101 sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); … … 100 103 } 101 104 102 103 105 /** Update public counters 104 106 * 105 107 * Update it only on first processor 106 * TODO: Do we really need so many write barriers? 108 * TODO: Do we really need so many write barriers? 109 * 107 110 */ 108 111 static void clock_update_counters(void) … … 131 134 void clock(void) 132 135 { 133 link_t *l;134 timeout_t *h;135 timeout_handler_t f;136 void *arg;137 136 size_t missed_clock_ticks = CPU->missed_clock_ticks; 138 unsigned int i; 139 137 140 138 /* Account lost ticks to CPU usage */ 141 if (CPU->idle) {139 if (CPU->idle) 142 140 CPU->idle_ticks += missed_clock_ticks + 1; 143 } else {141 else 144 142 CPU->busy_ticks += missed_clock_ticks + 1; 145 }143 146 144 CPU->idle = false; 147 145 148 146 /* 149 147 * To avoid lock ordering problems, 150 148 * run all expired timeouts as you visit them. 149 * 151 150 */ 151 size_t i; 152 152 for (i = 0; i <= missed_clock_ticks; i++) { 153 153 clock_update_counters(); 154 spinlock_lock(&CPU->timeoutlock); 155 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 156 h = list_get_instance(l, timeout_t, link); 157 spinlock_lock(&h->lock); 158 if (h->ticks-- != 0) { 159 spinlock_unlock(&h->lock); 154 irq_spinlock_lock(&CPU->timeoutlock, false); 155 156 link_t *cur; 157 while ((cur = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 158 timeout_t *timeout = list_get_instance(cur, timeout_t, link); 159 160 irq_spinlock_lock(&timeout->lock, false); 161 if (timeout->ticks-- != 0) { 162 irq_spinlock_unlock(&timeout->lock, false); 160 163 break; 161 164 } 162 list_remove(l); 163 f = h->handler; 164 arg = h->arg; 165 timeout_reinitialize(h); 166 spinlock_unlock(&h->lock); 167 spinlock_unlock(&CPU->timeoutlock); 168 169 f(arg); 170 171 spinlock_lock(&CPU->timeoutlock); 165 166 list_remove(cur); 167 timeout_handler_t handler = timeout->handler; 168 void *arg = timeout->arg; 169 timeout_reinitialize(timeout); 170 171 irq_spinlock_unlock(&timeout->lock, false); 172 irq_spinlock_unlock(&CPU->timeoutlock, false); 173 174 handler(arg); 175 176 irq_spinlock_lock(&CPU->timeoutlock, false); 172 177 } 173 spinlock_unlock(&CPU->timeoutlock); 178 179 irq_spinlock_unlock(&CPU->timeoutlock, false); 174 180 } 175 181 CPU->missed_clock_ticks = 0; 176 182 177 183 /* 178 184 * Do CPU usage accounting and find out whether to preempt THREAD. 185 * 179 186 */ 180 187 181 188 if (THREAD) { 182 189 uint64_t ticks; 183 190 184 spinlock_lock(&CPU->lock);191 irq_spinlock_lock(&CPU->lock, false); 185 192 CPU->needs_relink += 1 + missed_clock_ticks; 186 spinlock_unlock(&CPU->lock);187 188 spinlock_lock(&THREAD->lock);193 irq_spinlock_unlock(&CPU->lock, false); 194 195 irq_spinlock_lock(&THREAD->lock, false); 189 196 if ((ticks = THREAD->ticks)) { 190 197 if (ticks >= 1 + missed_clock_ticks) … … 193 200 THREAD->ticks = 0; 194 201 } 195 spinlock_unlock(&THREAD->lock); 196 197 if (!ticks && !PREEMPTION_DISABLED) { 198 #ifdef CONFIG_UDEBUG 199 istate_t *istate; 200 #endif 202 irq_spinlock_unlock(&THREAD->lock, false); 203 204 if ((!ticks) && (!PREEMPTION_DISABLED)) { 201 205 scheduler(); 202 206 #ifdef CONFIG_UDEBUG … … 205 209 * before it begins executing userspace code. 206 210 */ 207 istate = THREAD->udebug.uspace_state;208 if ( istate && istate_from_uspace(istate))211 istate_t *istate = THREAD->udebug.uspace_state; 212 if ((istate) && (istate_from_uspace(istate))) 209 213 udebug_before_thread_runs(); 210 214 #endif 211 215 } 212 216 } 213 214 217 } 215 218
Note:
See TracChangeset
for help on using the changeset viewer.