Changeset 43114c5 in mainline
- Timestamp:
- 2005-04-09T18:22:53Z (20 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8262010
- Parents:
- e6ba9a3f
- Files:
-
- 29 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia32/src/cpu/cpu.c
re6ba9a3f r43114c5 62 62 void cpu_arch_init(void) 63 63 { 64 the->cpu->arch.tss = tss_p;64 CPU->arch.tss = tss_p; 65 65 } 66 66 … … 71 71 int i; 72 72 73 the->cpu->arch.vendor = VendorUnknown;73 CPU->arch.vendor = VendorUnknown; 74 74 if (has_cpuid()) { 75 75 cpuid(0, &info); … … 82 82 info.cpuid_edx==AMD_CPUID_EDX) { 83 83 84 the->cpu->arch.vendor = VendorAMD;84 CPU->arch.vendor = VendorAMD; 85 85 } 86 86 … … 92 92 info.cpuid_edx==INTEL_CPUID_EDX) { 93 93 94 the->cpu->arch.vendor = VendorIntel;94 CPU->arch.vendor = VendorIntel; 95 95 96 96 } 97 97 98 98 cpuid(1, &info); 99 the->cpu->arch.family = (info.cpuid_eax>>8)&0xf;100 the->cpu->arch.model = (info.cpuid_eax>>4)&0xf;101 the->cpu->arch.stepping = (info.cpuid_eax>>0)&0xf;99 CPU->arch.family = (info.cpuid_eax>>8)&0xf; 100 CPU->arch.model = (info.cpuid_eax>>4)&0xf; 101 CPU->arch.stepping = (info.cpuid_eax>>0)&0xf; 102 102 } 103 103 } -
arch/ia32/src/drivers/i8042.c
re6ba9a3f r43114c5 55 55 trap_virtual_eoi(); 56 56 x = inb(0x60); 57 printf("%d", the->cpu->id);;57 printf("%d", CPU->id);; 58 58 } -
arch/ia32/src/drivers/i8254.c
re6ba9a3f r43114c5 111 111 112 112 113 the->cpu->delay_loop_const = ((MAGIC_NUMBER*LOOPS)/1000) / ((t1-t2)-(o1-o2)) +113 CPU->delay_loop_const = ((MAGIC_NUMBER*LOOPS)/1000) / ((t1-t2)-(o1-o2)) + 114 114 (((MAGIC_NUMBER*LOOPS)/1000) % ((t1-t2)-(o1-o2)) ? 1 : 0); 115 115 … … 119 119 clk2 = rdtsc(); 120 120 121 the->cpu->frequency_mhz = (clk2-clk1)>>SHIFT;121 CPU->frequency_mhz = (clk2-clk1)>>SHIFT; 122 122 123 123 return; -
arch/ia32/src/drivers/i8259.c
re6ba9a3f r43114c5 117 117 void pic_spurious(__u8 n, __u32 stack[]) 118 118 { 119 printf("cpu%d: PIC spurious interrupt\n", the->cpu->id);119 printf("cpu%d: PIC spurious interrupt\n", CPU->id); 120 120 } -
arch/ia32/src/smp/apic.c
re6ba9a3f r43114c5 115 115 void apic_spurious(__u8 n, __u32 stack[]) 116 116 { 117 printf("cpu%d: APIC spurious interrupt\n", the->cpu->id);117 printf("cpu%d: APIC spurious interrupt\n", CPU->id); 118 118 } 119 119 … … 143 143 144 144 /* 145 * Send all CPUs excluding the->cpuIPI vector.145 * Send all CPUs excluding CPU IPI vector. 146 146 */ 147 147 int l_apic_broadcast_custom_ipi(__u8 vector) … … 233 233 l_apic[TPR] &= TPRClear; 234 234 235 if ( the->cpu->arch.family >= 6)235 if (CPU->arch.family >= 6) 236 236 enable_l_apic_in_msr(); 237 237 … … 271 271 int i, lint; 272 272 273 printf("LVT on cpu%d, LAPIC ID: %d\n", the->cpu->id, (l_apic[L_APIC_ID] >> 24)&0xf);273 printf("LVT on cpu%d, LAPIC ID: %d\n", CPU->id, (l_apic[L_APIC_ID] >> 24)&0xf); 274 274 275 275 printf("LVT_Tm: "); … … 305 305 * This register is supported only on P6 and higher. 306 306 */ 307 if ( the->cpu->family > 5) {307 if (CPU->family > 5) { 308 308 printf("LVT_PCINT: "); 309 309 if (l_apic[LVT_PCINT] & (1<<16)) printf("masked"); else printf("not masked"); putchar(','); -
arch/ia32/src/userspace.c
re6ba9a3f r43114c5 43 43 * Prepare TSS stack selector and pointers for next syscall. 44 44 */ 45 the->cpu->arch.tss->esp0 = (__address) &the->thread->kstack[THREAD_STACK_SIZE-8];46 the->cpu->arch.tss->ss0 = selector(KDATA_DES);45 CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-8]; 46 CPU->arch.tss->ss0 = selector(KDATA_DES); 47 47 48 48 __asm__ volatile ("" -
arch/mips/src/cpu/cpu.c
re6ba9a3f r43114c5 84 84 void cpu_identify(void) 85 85 { 86 the->cpu->arch.rev_num = cp0_prid_read() & 0xff;87 the->cpu->arch.imp_num = (cp0_prid_read() >> 8) & 0xff;86 CPU->arch.rev_num = cp0_prid_read() & 0xff; 87 CPU->arch.imp_num = (cp0_prid_read() >> 8) & 0xff; 88 88 } 89 89 -
arch/mips/src/exception.c
re6ba9a3f r43114c5 43 43 cp0_status_write(cp0_status_read() & ~ cp0_status_exl_exception_bit); 44 44 45 if ( the->thread) {46 the->thread->saved_pri = pri;47 the->thread->saved_epc = epc;45 if (THREAD) { 46 THREAD->saved_pri = pri; 47 THREAD->saved_epc = epc; 48 48 } 49 49 /* decode exception number and process the exception */ … … 55 55 } 56 56 57 if ( the->thread) {58 pri = the->thread->saved_pri;59 epc = the->thread->saved_epc;57 if (THREAD) { 58 pri = THREAD->saved_pri; 59 epc = THREAD->saved_epc; 60 60 } 61 61 -
arch/mips/src/mm/tlb.c
re6ba9a3f r43114c5 47 47 void tlb_invalid(void) 48 48 { 49 panic(PANIC "%X: TLB exception at %X", cp0_badvaddr_read(), the->thread ? the->thread->saved_epc : 0);49 panic(PANIC "%X: TLB exception at %X", cp0_badvaddr_read(), THREAD ? THREAD->saved_epc : 0); 50 50 } 51 51 -
include/arch.h
re6ba9a3f r43114c5 35 35 #include <cpu.h> 36 36 37 #define CPU (the->cpu) 38 #define THREAD (the->thread) 39 #define TASK (the->task) 40 37 41 extern cpu_private_page_t *the; 38 42 -
src/cpu/cpu.c
re6ba9a3f r43114c5 28 28 29 29 #include <cpu.h> 30 #include <arch.h> 30 31 #include <arch/cpu.h> 31 32 #include <mm/heap.h> … … 85 86 #endif /* __SMP__ */ 86 87 87 the->cpu= &cpus[config.cpu_active-1];88 CPU = &cpus[config.cpu_active-1]; 88 89 cpu_identify(); 89 90 cpu_arch_init(); -
src/lib/func.c
re6ba9a3f r43114c5 39 39 haltstate = 1; 40 40 cpu_priority_high(); 41 printf("cpu%d: halted\n", the->cpu->id);41 printf("cpu%d: halted\n", CPU->id); 42 42 cpu_halt(); 43 43 } -
src/main/kinit.c
re6ba9a3f r43114c5 71 71 * Just a beautification. 72 72 */ 73 if (t = thread_create(kmp, NULL, the->task, 0)) {73 if (t = thread_create(kmp, NULL, TASK, 0)) { 74 74 spinlock_lock(&t->lock); 75 75 t->flags |= X_WIRED; … … 96 96 for (i = 0; i < config.cpu_count; i++) { 97 97 98 if (t = thread_create(kcpulb, NULL, the->task, 0)) {98 if (t = thread_create(kcpulb, NULL, TASK, 0)) { 99 99 spinlock_lock(&t->lock); 100 100 t->flags |= X_WIRED; -
src/main/main.c
re6ba9a3f r43114c5 185 185 * switch to this cpu's private stack prior to waking kmp up. 186 186 */ 187 the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8];188 the->cpu->saved_context.pc = (__address) main_ap_separated_stack;189 context_restore(& the->cpu->saved_context);187 CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; 188 CPU->saved_context.pc = (__address) main_ap_separated_stack; 189 context_restore(&CPU->saved_context); 190 190 /* not reached */ 191 191 } -
src/proc/scheduler.c
re6ba9a3f r43114c5 70 70 cpu_priority_high(); 71 71 72 spinlock_lock(& the->cpu->lock);73 n = the->cpu->nrdy;74 spinlock_unlock(& the->cpu->lock);72 spinlock_lock(&CPU->lock); 73 n = CPU->nrdy; 74 spinlock_unlock(&CPU->lock); 75 75 76 76 cpu_priority_low(); … … 82 82 * set CPU-private flag that the kcpulb has been started. 83 83 */ 84 if (test_and_set(& the->cpu->kcpulbstarted) == 0) {85 waitq_wakeup(& the->cpu->kcpulb_wq, 0);84 if (test_and_set(&CPU->kcpulbstarted) == 0) { 85 waitq_wakeup(&CPU->kcpulb_wq, 0); 86 86 goto loop; 87 87 } … … 101 101 102 102 for (i = 0; i<RQ_COUNT; i++) { 103 r = & the->cpu->rq[i];103 r = &CPU->rq[i]; 104 104 spinlock_lock(&r->lock); 105 105 if (r->n == 0) { … … 115 115 spinlock_unlock(&nrdylock); 116 116 117 spinlock_lock(& the->cpu->lock);118 the->cpu->nrdy--;119 spinlock_unlock(& the->cpu->lock);117 spinlock_lock(&CPU->lock); 118 CPU->nrdy--; 119 spinlock_unlock(&CPU->lock); 120 120 121 121 r->n--; … … 130 130 131 131 spinlock_lock(&t->lock); 132 t->cpu = the->cpu;132 t->cpu = CPU; 133 133 134 134 t->ticks = us2ticks((i+1)*10000); … … 160 160 161 161 list_initialize(&head); 162 spinlock_lock(& the->cpu->lock);163 if ( the->cpu->needs_relink > NEEDS_RELINK_MAX) {162 spinlock_lock(&CPU->lock); 163 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 164 164 for (i = start; i<RQ_COUNT-1; i++) { 165 165 /* remember and empty rq[i + 1] */ 166 r = & the->cpu->rq[i + 1];166 r = &CPU->rq[i + 1]; 167 167 spinlock_lock(&r->lock); 168 168 list_concat(&head, &r->rq_head); … … 172 172 173 173 /* append rq[i + 1] to rq[i] */ 174 r = & the->cpu->rq[i];174 r = &CPU->rq[i]; 175 175 spinlock_lock(&r->lock); 176 176 list_concat(&r->rq_head, &head); … … 178 178 spinlock_unlock(&r->lock); 179 179 } 180 the->cpu->needs_relink = 0;181 } 182 spinlock_unlock(& the->cpu->lock);180 CPU->needs_relink = 0; 181 } 182 spinlock_unlock(&CPU->lock); 183 183 184 184 } … … 196 196 halt(); 197 197 198 if ( the->thread) {199 spinlock_lock(& the->thread->lock);200 if (!context_save(& the->thread->saved_context)) {198 if (THREAD) { 199 spinlock_lock(&THREAD->lock); 200 if (!context_save(&THREAD->saved_context)) { 201 201 /* 202 202 * This is the place where threads leave scheduler(); 203 203 */ 204 spinlock_unlock(& the->thread->lock);205 cpu_priority_restore( the->thread->saved_context.pri);204 spinlock_unlock(&THREAD->lock); 205 cpu_priority_restore(THREAD->saved_context.pri); 206 206 return; 207 207 } 208 the->thread->saved_context.pri = pri;208 THREAD->saved_context.pri = pri; 209 209 } 210 210 … … 221 221 * scheduler_separated_stack(). 222 222 */ 223 context_save(& the->cpu->saved_context);224 the->cpu->saved_context.sp = (__address) &the->cpu->stack[CPU_STACK_SIZE-8];225 the->cpu->saved_context.pc = (__address) scheduler_separated_stack;226 context_restore(& the->cpu->saved_context);223 context_save(&CPU->saved_context); 224 CPU->saved_context.sp = (__address) &CPU->stack[CPU_STACK_SIZE-8]; 225 CPU->saved_context.pc = (__address) scheduler_separated_stack; 226 context_restore(&CPU->saved_context); 227 227 /* not reached */ 228 228 } … … 232 232 int priority; 233 233 234 if ( the->thread) {235 switch ( the->thread->state) {234 if (THREAD) { 235 switch (THREAD->state) { 236 236 case Running: 237 the->thread->state = Ready;238 spinlock_unlock(& the->thread->lock);239 thread_ready( the->thread);237 THREAD->state = Ready; 238 spinlock_unlock(&THREAD->lock); 239 thread_ready(THREAD); 240 240 break; 241 241 242 242 case Exiting: 243 frame_free((__address) the->thread->kstack);244 if ( the->thread->ustack) {245 frame_free((__address) the->thread->ustack);243 frame_free((__address) THREAD->kstack); 244 if (THREAD->ustack) { 245 frame_free((__address) THREAD->ustack); 246 246 } 247 247 … … 249 249 * Detach from the containing task. 250 250 */ 251 spinlock_lock(& the->task->lock);252 list_remove(& the->thread->th_link);253 spinlock_unlock(& the->task->lock);254 255 spinlock_unlock(& the->thread->lock);251 spinlock_lock(&TASK->lock); 252 list_remove(&THREAD->th_link); 253 spinlock_unlock(&TASK->lock); 254 255 spinlock_unlock(&THREAD->lock); 256 256 257 257 spinlock_lock(&threads_lock); 258 list_remove(& the->thread->threads_link);258 list_remove(&THREAD->threads_link); 259 259 spinlock_unlock(&threads_lock); 260 260 261 free( the->thread);261 free(THREAD); 262 262 263 263 break; … … 267 267 * Prefer the thread after it's woken up. 268 268 */ 269 the->thread->pri = -1;269 THREAD->pri = -1; 270 270 271 271 /* 272 272 * We need to release wq->lock which we locked in waitq_sleep(). 273 * Address of wq->lock is kept in the->thread->sleep_queue.273 * Address of wq->lock is kept in THREAD->sleep_queue. 274 274 */ 275 spinlock_unlock(& the->thread->sleep_queue->lock);275 spinlock_unlock(&THREAD->sleep_queue->lock); 276 276 277 277 /* 278 278 * Check for possible requests for out-of-context invocation. 279 279 */ 280 if ( the->thread->call_me) {281 the->thread->call_me(the->thread->call_me_with);282 the->thread->call_me = NULL;283 the->thread->call_me_with = NULL;280 if (THREAD->call_me) { 281 THREAD->call_me(THREAD->call_me_with); 282 THREAD->call_me = NULL; 283 THREAD->call_me_with = NULL; 284 284 } 285 285 286 spinlock_unlock(& the->thread->lock);286 spinlock_unlock(&THREAD->lock); 287 287 288 288 break; … … 292 292 * Entering state is unexpected. 293 293 */ 294 panic("tid%d: unexpected state %s\n", the->thread->tid, thread_states[the->thread->state]);294 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 295 295 break; 296 296 } 297 the->thread= NULL;297 THREAD = NULL; 298 298 } 299 299 300 the->thread= find_best_thread();300 THREAD = find_best_thread(); 301 301 302 spinlock_lock(& the->thread->lock);303 priority = the->thread->pri;304 spinlock_unlock(& the->thread->lock);302 spinlock_lock(&THREAD->lock); 303 priority = THREAD->pri; 304 spinlock_unlock(&THREAD->lock); 305 305 306 306 relink_rq(priority); 307 307 308 spinlock_lock(& the->thread->lock);308 spinlock_lock(&THREAD->lock); 309 309 310 310 /* 311 311 * If both the old and the new task are the same, lots of work is avoided. 312 312 */ 313 if ( the->task != the->thread->task) {313 if (TASK != THREAD->task) { 314 314 vm_t *m1 = NULL; 315 315 vm_t *m2; 316 316 317 if ( the->task) {318 spinlock_lock(& the->task->lock);319 m1 = the->task->vm;320 spinlock_unlock(& the->task->lock);321 } 322 323 spinlock_lock(& the->thread->task->lock);324 m2 = the->thread->task->vm;325 spinlock_unlock(& the->thread->task->lock);317 if (TASK) { 318 spinlock_lock(&TASK->lock); 319 m1 = TASK->vm; 320 spinlock_unlock(&TASK->lock); 321 } 322 323 spinlock_lock(&THREAD->task->lock); 324 m2 = THREAD->task->vm; 325 spinlock_unlock(&THREAD->task->lock); 326 326 327 327 /* … … 338 338 vm_install(m2); 339 339 } 340 the->task = the->thread->task;341 } 342 343 the->thread->state = Running;340 TASK = THREAD->task; 341 } 342 343 THREAD->state = Running; 344 344 345 345 #ifdef SCHEDULER_VERBOSE 346 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", the->cpu->id, the->thread->tid, the->thread->pri, the->thread->ticks, the->cpu->nrdy);346 printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); 347 347 #endif 348 348 349 context_restore(& the->thread->saved_context);349 context_restore(&THREAD->saved_context); 350 350 /* not reached */ 351 351 } … … 366 366 * Sleep until there's some work to do. 367 367 */ 368 waitq_sleep(& the->cpu->kcpulb_wq);368 waitq_sleep(&CPU->kcpulb_wq); 369 369 370 370 not_satisfied: … … 375 375 */ 376 376 pri = cpu_priority_high(); 377 spinlock_lock(& the->cpu->lock);377 spinlock_lock(&CPU->lock); 378 378 count = nrdy / config.cpu_active; 379 count -= the->cpu->nrdy;380 spinlock_unlock(& the->cpu->lock);379 count -= CPU->nrdy; 380 spinlock_unlock(&CPU->lock); 381 381 cpu_priority_restore(pri); 382 382 … … 400 400 * Doesn't require interrupt disabling for kcpulb is X_WIRED. 401 401 */ 402 if ( the->cpu== cpu)402 if (CPU == cpu) 403 403 continue; 404 404 … … 461 461 spinlock_lock(&t->lock); 462 462 #ifdef KCPULB_VERBOSE 463 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", the->cpu->id, t->tid, the->cpu->id, the->cpu->nrdy, nrdy / config.cpu_active);463 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, nrdy / config.cpu_active); 464 464 #endif 465 465 t->flags |= X_STOLEN; … … 484 484 } 485 485 486 if ( the->cpu->nrdy) {486 if (CPU->nrdy) { 487 487 /* 488 488 * Be a little bit light-weight and let migrated threads run. … … 504 504 * Tell find_best_thread() to wake us up later again. 505 505 */ 506 the->cpu->kcpulbstarted = 0;506 CPU->kcpulbstarted = 0; 507 507 goto loop; 508 508 } -
src/proc/task.c
re6ba9a3f r43114c5 41 41 void task_init(void) 42 42 { 43 the->task= NULL;43 TASK = NULL; 44 44 spinlock_initialize(&tasks_lock); 45 45 list_initialize(&tasks_head); -
src/proc/thread.c
re6ba9a3f r43114c5 67 67 void cushion(void) 68 68 { 69 void (*f)(void *) = the->thread->thread_code;70 void *arg = the->thread->thread_arg;69 void (*f)(void *) = THREAD->thread_code; 70 void *arg = THREAD->thread_arg; 71 71 72 72 /* this is where each thread wakes up after its creation */ 73 spinlock_unlock(& the->thread->lock);73 spinlock_unlock(&THREAD->lock); 74 74 cpu_priority_low(); 75 75 … … 81 81 void thread_init(void) 82 82 { 83 the->thread= NULL;83 THREAD = NULL; 84 84 nrdy = 0; 85 85 spinlock_initialize(&threads_lock); … … 215 215 restart: 216 216 pri = cpu_priority_high(); 217 spinlock_lock(& the->thread->lock);218 if ( the->thread->timeout_pending) { /* busy waiting for timeouts in progress */219 spinlock_unlock(& the->thread->lock);217 spinlock_lock(&THREAD->lock); 218 if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ 219 spinlock_unlock(&THREAD->lock); 220 220 cpu_priority_restore(pri); 221 221 goto restart; 222 222 } 223 the->thread->state = Exiting;224 spinlock_unlock(& the->thread->lock);223 THREAD->state = Exiting; 224 spinlock_unlock(&THREAD->lock); 225 225 scheduler(); 226 226 } … … 248 248 249 249 pri = cpu_priority_high(); 250 spinlock_lock(& the->thread->lock);251 the->thread->call_me = call_me;252 the->thread->call_me_with = call_me_with;253 spinlock_unlock(& the->thread->lock);250 spinlock_lock(&THREAD->lock); 251 THREAD->call_me = call_me; 252 THREAD->call_me_with = call_me_with; 253 spinlock_unlock(&THREAD->lock); 254 254 cpu_priority_restore(pri); 255 255 } -
src/synch/rwlock.c
re6ba9a3f r43114c5 81 81 82 82 pri = cpu_priority_high(); 83 spinlock_lock(& the->thread->lock);84 the->thread->rwlock_holder_type = RWLOCK_WRITER;85 spinlock_unlock(& the->thread->lock);83 spinlock_lock(&THREAD->lock); 84 THREAD->rwlock_holder_type = RWLOCK_WRITER; 85 spinlock_unlock(&THREAD->lock); 86 86 cpu_priority_restore(pri); 87 87 … … 121 121 122 122 pri = cpu_priority_high(); 123 spinlock_lock(& the->thread->lock);124 the->thread->rwlock_holder_type = RWLOCK_READER;125 spinlock_unlock(& the->thread->lock);123 spinlock_lock(&THREAD->lock); 124 THREAD->rwlock_holder_type = RWLOCK_READER; 125 spinlock_unlock(&THREAD->lock); 126 126 127 127 spinlock_lock(&rwl->lock); -
src/synch/spinlock.c
re6ba9a3f r43114c5 50 50 while (test_and_set(&sl->val)) { 51 51 if (i++ > 300000) { 52 printf("cpu%d: looping on spinlock %X, caller=%X\n", the->cpu->id, sl, caller);52 printf("cpu%d: looping on spinlock %X, caller=%X\n", CPU->id, sl, caller); 53 53 i = 0; 54 54 } -
src/synch/waitq.c
re6ba9a3f r43114c5 130 130 * there are timeouts in progress. 131 131 */ 132 spinlock_lock(& the->thread->lock);133 if ( the->thread->timeout_pending) {134 spinlock_unlock(& the->thread->lock);132 spinlock_lock(&THREAD->lock); 133 if (THREAD->timeout_pending) { 134 spinlock_unlock(&THREAD->lock); 135 135 cpu_priority_restore(pri); 136 136 goto restart; 137 137 } 138 spinlock_unlock(& the->thread->lock);138 spinlock_unlock(&THREAD->lock); 139 139 140 140 spinlock_lock(&wq->lock); … … 160 160 * Now we are firmly decided to go to sleep. 161 161 */ 162 spinlock_lock(& the->thread->lock);162 spinlock_lock(&THREAD->lock); 163 163 if (usec) { 164 164 /* We use the timeout variant. */ 165 if (!context_save(& the->thread->sleep_timeout_context)) {165 if (!context_save(&THREAD->sleep_timeout_context)) { 166 166 /* 167 167 * Short emulation of scheduler() return code. 168 168 */ 169 spinlock_unlock(& the->thread->lock);169 spinlock_unlock(&THREAD->lock); 170 170 cpu_priority_restore(pri); 171 171 return ESYNCH_TIMEOUT; 172 172 } 173 the->thread->timeout_pending = 1;174 timeout_register(& the->thread->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, the->thread);175 } 176 177 list_append(& the->thread->wq_link, &wq->head);173 THREAD->timeout_pending = 1; 174 timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD); 175 } 176 177 list_append(&THREAD->wq_link, &wq->head); 178 178 179 179 /* 180 180 * Suspend execution. 181 181 */ 182 the->thread->state = Sleeping;183 the->thread->sleep_queue = wq;184 185 spinlock_unlock(& the->thread->lock);182 THREAD->state = Sleeping; 183 THREAD->sleep_queue = wq; 184 185 spinlock_unlock(&THREAD->lock); 186 186 187 187 scheduler(); /* wq->lock is released in scheduler_separated_stack() */ -
src/time/clock.c
re6ba9a3f r43114c5 58 58 * run all expired timeouts as you visit them. 59 59 */ 60 spinlock_lock(& the->cpu->timeoutlock);61 while ((l = the->cpu->timeout_active_head.next) != &the->cpu->timeout_active_head) {60 spinlock_lock(&CPU->timeoutlock); 61 while ((l = CPU->timeout_active_head.next) != &CPU->timeout_active_head) { 62 62 h = list_get_instance(l, timeout_t, link); 63 63 spinlock_lock(&h->lock); … … 71 71 timeout_reinitialize(h); 72 72 spinlock_unlock(&h->lock); 73 spinlock_unlock(& the->cpu->timeoutlock);73 spinlock_unlock(&CPU->timeoutlock); 74 74 75 75 f(arg); 76 76 77 spinlock_lock(& the->cpu->timeoutlock);77 spinlock_lock(&CPU->timeoutlock); 78 78 } 79 spinlock_unlock(& the->cpu->timeoutlock);79 spinlock_unlock(&CPU->timeoutlock); 80 80 81 81 /* 82 * Do CPU usage accounting and find out whether to preempt the->thread.82 * Do CPU usage accounting and find out whether to preempt THREAD. 83 83 */ 84 84 85 if ( the->thread) {86 spinlock_lock(& the->cpu->lock);87 the->cpu->needs_relink++;88 spinlock_unlock(& the->cpu->lock);85 if (THREAD) { 86 spinlock_lock(&CPU->lock); 87 CPU->needs_relink++; 88 spinlock_unlock(&CPU->lock); 89 89 90 spinlock_lock(& the->thread->lock);91 if (! the->thread->ticks--) {92 spinlock_unlock(& the->thread->lock);90 spinlock_lock(&THREAD->lock); 91 if (!THREAD->ticks--) { 92 spinlock_unlock(&THREAD->lock); 93 93 scheduler(); 94 94 } 95 95 else { 96 spinlock_unlock(& the->thread->lock);96 spinlock_unlock(&THREAD->lock); 97 97 } 98 98 } -
src/time/delay.c
re6ba9a3f r43114c5 42 42 43 43 pri = cpu_priority_high(); 44 asm_delay_loop(microseconds * the->cpu->delay_loop_const);44 asm_delay_loop(microseconds * CPU->delay_loop_const); 45 45 cpu_priority_restore(pri); 46 46 } -
src/time/timeout.c
re6ba9a3f r43114c5 40 40 void timeout_init(void) 41 41 { 42 spinlock_initialize(& the->cpu->timeoutlock);43 list_initialize(& the->cpu->timeout_active_head);42 spinlock_initialize(&CPU->timeoutlock); 43 list_initialize(&CPU->timeout_active_head); 44 44 } 45 45 … … 71 71 72 72 pri = cpu_priority_high(); 73 spinlock_lock(& the->cpu->timeoutlock);73 spinlock_lock(&CPU->timeoutlock); 74 74 spinlock_lock(&t->lock); 75 75 … … 77 77 panic("timeout_register: t->cpu != 0"); 78 78 79 t->cpu = the->cpu;79 t->cpu = CPU; 80 80 t->ticks = us2ticks(time); 81 81 … … 87 87 */ 88 88 sum = 0; 89 l = the->cpu->timeout_active_head.next;90 while (l != & the->cpu->timeout_active_head) {89 l = CPU->timeout_active_head.next; 90 while (l != &CPU->timeout_active_head) { 91 91 hlp = list_get_instance(l, timeout_t, link); 92 92 spinlock_lock(&hlp->lock); … … 110 110 * Decrease ticks of t's immediate succesor by t->ticks. 111 111 */ 112 if (l != & the->cpu->timeout_active_head) {112 if (l != &CPU->timeout_active_head) { 113 113 spinlock_lock(&hlp->lock); 114 114 hlp->ticks -= t->ticks; … … 117 117 118 118 spinlock_unlock(&t->lock); 119 spinlock_unlock(& the->cpu->timeoutlock);119 spinlock_unlock(&CPU->timeoutlock); 120 120 cpu_priority_restore(pri); 121 121 } -
test/synch/rwlock2/test.c
re6ba9a3f r43114c5 75 75 rwlock_read_lock(&rwlock); 76 76 77 thrd = thread_create(writer, NULL, the->task, 0);77 thrd = thread_create(writer, NULL, TASK, 0); 78 78 if (thrd) 79 79 thread_ready(thrd); -
test/synch/rwlock3/test.c
re6ba9a3f r43114c5 45 45 void reader(void *arg) 46 46 { 47 printf("cpu%d, tid %d: trying to lock rwlock for reading....\n", the->cpu->id, the->thread->tid);47 printf("cpu%d, tid %d: trying to lock rwlock for reading....\n", CPU->id, THREAD->tid); 48 48 rwlock_read_lock(&rwlock); 49 49 rwlock_read_unlock(&rwlock); 50 printf("cpu%d, tid %d: success\n", the->cpu->id, the->thread->tid);50 printf("cpu%d, tid %d: success\n", CPU->id, THREAD->tid); 51 51 52 printf("cpu%d, tid %d: trying to lock rwlock for writing....\n", the->cpu->id, the->thread->tid);52 printf("cpu%d, tid %d: trying to lock rwlock for writing....\n", CPU->id, THREAD->tid); 53 53 54 54 rwlock_write_lock(&rwlock); 55 55 rwlock_write_unlock(&rwlock); 56 printf("cpu%d, tid %d: success\n", the->cpu->id, the->thread->tid);56 printf("cpu%d, tid %d: success\n", CPU->id, THREAD->tid); 57 57 58 58 printf("Test passed.\n"); … … 78 78 79 79 for (i=0; i<4; i++) { 80 thrd = thread_create(reader, NULL, the->task, 0);80 thrd = thread_create(reader, NULL, TASK, 0); 81 81 if (thrd) 82 82 thread_ready(thrd); -
test/synch/rwlock4/test.c
re6ba9a3f r43114c5 74 74 75 75 to = random(40000); 76 printf("cpu%d, tid %d w+ (%d)\n", the->cpu->id, the->thread->tid, to);76 printf("cpu%d, tid %d w+ (%d)\n", CPU->id, THREAD->tid, to); 77 77 rc = rwlock_write_lock_timeout(&rwlock, to); 78 78 if (SYNCH_FAILED(rc)) { 79 printf("cpu%d, tid %d w!\n", the->cpu->id, the->thread->tid);79 printf("cpu%d, tid %d w!\n", CPU->id, THREAD->tid); 80 80 return; 81 81 }; 82 printf("cpu%d, tid %d w=\n", the->cpu->id, the->thread->tid);82 printf("cpu%d, tid %d w=\n", CPU->id, THREAD->tid); 83 83 84 84 if (rwlock.readers_in) panic("Oops."); … … 87 87 88 88 rwlock_write_unlock(&rwlock); 89 printf("cpu%d, tid %d w-\n", the->cpu->id, the->thread->tid);89 printf("cpu%d, tid %d w-\n", CPU->id, THREAD->tid); 90 90 } 91 91 … … 96 96 97 97 to = random(2000); 98 printf("cpu%d, tid %d r+ (%d)\n", the->cpu->id, the->thread->tid, to);98 printf("cpu%d, tid %d r+ (%d)\n", CPU->id, THREAD->tid, to); 99 99 rc = rwlock_read_lock_timeout(&rwlock, to); 100 100 if (SYNCH_FAILED(rc)) { 101 printf("cpu%d, tid %d r!\n", the->cpu->id, the->thread->tid);101 printf("cpu%d, tid %d r!\n", CPU->id, THREAD->tid); 102 102 return; 103 103 } 104 printf("cpu%d, tid %d r=\n", the->cpu->id, the->thread->tid);104 printf("cpu%d, tid %d r=\n", CPU->id, THREAD->tid); 105 105 thread_usleep(30000); 106 106 rwlock_read_unlock(&rwlock); 107 printf("cpu%d, tid %d r-\n", the->cpu->id, the->thread->tid);107 printf("cpu%d, tid %d r-\n", CPU->id, THREAD->tid); 108 108 } 109 109 … … 135 135 printf("Creating %d readers\n", k); 136 136 for (i=0; i<k; i++) { 137 thrd = thread_create(reader, NULL, the->task, 0);137 thrd = thread_create(reader, NULL, TASK, 0); 138 138 if (thrd) 139 139 thread_ready(thrd); … … 145 145 printf("Creating %d writers\n", k); 146 146 for (i=0; i<k; i++) { 147 thrd = thread_create(writer, NULL, the->task, 0);147 thrd = thread_create(writer, NULL, TASK, 0); 148 148 if (thrd) 149 149 thread_ready(thrd); -
test/synch/rwlock5/test.c
re6ba9a3f r43114c5 96 96 for (j=0; j<(READERS+WRITERS)/2; j++) { 97 97 for (k=0; k<i; k++) { 98 thrd = thread_create(reader, NULL, the->task, 0);98 thrd = thread_create(reader, NULL, TASK, 0); 99 99 if (thrd) 100 100 thread_ready(thrd); … … 103 103 } 104 104 for (k=0; k<(4-i); k++) { 105 thrd = thread_create(writer, NULL, the->task, 0);105 thrd = thread_create(writer, NULL, TASK, 0); 106 106 if (thrd) 107 107 thread_ready(thrd); -
test/synch/semaphore1/test.c
re6ba9a3f r43114c5 100 100 for (j=0; j<(CONSUMERS+PRODUCERS)/2; j++) { 101 101 for (k=0; k<i; k++) { 102 thrd = thread_create(consumer, NULL, the->task, 0);102 thrd = thread_create(consumer, NULL, TASK, 0); 103 103 if (thrd) 104 104 thread_ready(thrd); … … 107 107 } 108 108 for (k=0; k<(4-i); k++) { 109 thrd = thread_create(producer, NULL, the->task, 0);109 thrd = thread_create(producer, NULL, TASK, 0); 110 110 if (thrd) 111 111 thread_ready(thrd); -
test/synch/semaphore2/test.c
re6ba9a3f r43114c5 70 70 71 71 to = random(20000); 72 printf("cpu%d, tid %d down+ (%d)\n", the->cpu->id, the->thread->tid, to);72 printf("cpu%d, tid %d down+ (%d)\n", CPU->id, THREAD->tid, to); 73 73 rc = semaphore_down_timeout(&sem, to); 74 74 if (SYNCH_FAILED(rc)) { 75 printf("cpu%d, tid %d down!\n", the->cpu->id, the->thread->tid);75 printf("cpu%d, tid %d down!\n", CPU->id, THREAD->tid); 76 76 return; 77 77 } 78 78 79 printf("cpu%d, tid %d down=\n", the->cpu->id, the->thread->tid);79 printf("cpu%d, tid %d down=\n", CPU->id, THREAD->tid); 80 80 thread_usleep(random(30000)); 81 81 82 82 semaphore_up(&sem); 83 printf("cpu%d, tid %d up\n", the->cpu->id, the->thread->tid);83 printf("cpu%d, tid %d up\n", CPU->id, THREAD->tid); 84 84 } 85 85 … … 108 108 printf("Creating %d consumers\n", k); 109 109 for (i=0; i<k; i++) { 110 thrd = thread_create(consumer, NULL, the->task, 0);110 thrd = thread_create(consumer, NULL, TASK, 0); 111 111 if (thrd) 112 112 thread_ready(thrd);
Note:
See TracChangeset
for help on using the changeset viewer.