Changeset 4e33b6b in mainline
- Timestamp:
- 2007-01-07T14:44:33Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d78d603
- Parents:
- c109dd0
- Location:
- kernel
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/drivers/tick.c
rc109dd0 r4e33b6b 100 100 CPU->missed_clock_ticks++; 101 101 } 102 CPU->arch.next_tick_cmpr = tick_read() + (CPU->arch.clock_frequency / HZ)103 - drift;102 CPU->arch.next_tick_cmpr = tick_read() + (CPU->arch.clock_frequency / 103 HZ) - drift; 104 104 tick_compare_write(CPU->arch.next_tick_cmpr); 105 105 clock(); -
kernel/generic/src/cpu/cpu.c
rc109dd0 r4e33b6b 60 60 int i, j; 61 61 62 62 #ifdef CONFIG_SMP 63 63 if (config.cpu_active == 1) { 64 64 #endif /* CONFIG_SMP */ 65 65 cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, 66 66 FRAME_ATOMIC); … … 84 84 } 85 85 86 86 #ifdef CONFIG_SMP 87 87 } 88 88 #endif /* CONFIG_SMP */ 89 89 90 90 CPU = &cpus[config.cpu_active-1]; -
kernel/generic/src/proc/scheduler.c
rc109dd0 r4e33b6b 1 1 /* 2 * Copyright (C) 2001-200 4Jakub Jermar2 * Copyright (C) 2001-2007 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 143 143 spinlock_unlock(&THREAD->lock); 144 144 spinlock_unlock(&CPU->lock); 145 THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); 145 THREAD->saved_fpu_context = 146 slab_alloc(fpu_context_slab, 0); 146 147 /* We may have switched CPUs during slab_alloc */ 147 148 goto restart; … … 232 233 t->cpu = CPU; 233 234 234 t->ticks = us2ticks((i +1)*10000);235 t->ticks = us2ticks((i + 1) * 10000); 235 236 t->priority = i; /* correct rq index */ 236 237 … … 268 269 spinlock_lock(&CPU->lock); 269 270 if (CPU->needs_relink > NEEDS_RELINK_MAX) { 270 for (i = start; i <RQ_COUNT-1; i++) {271 for (i = start; i < RQ_COUNT - 1; i++) { 271 272 /* remember and empty rq[i + 1] */ 272 273 r = &CPU->rq[i + 1]; … … 332 333 333 334 /* 334 * Interrupt priority level of preempted thread is recorded here335 * to facilitate scheduler() invocations from interrupts_disable()'d336 * code (e.g. waitq_sleep_timeout()).335 * Interrupt priority level of preempted thread is recorded 336 * here to facilitate scheduler() invocations from 337 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 337 338 */ 338 339 THREAD->saved_context.ipl = ipl; … … 395 396 } else { 396 397 /* 397 * The thread structure is kept allocated until somebody398 * calls thread_detach() on it.398 * The thread structure is kept allocated until 399 * somebody calls thread_detach() on it. 399 400 */ 400 401 if (!spinlock_trylock(&THREAD->join_wq.lock)) { … … 422 423 423 424 /* 424 * We need to release wq->lock which we locked in waitq_sleep(). 425 * Address of wq->lock is kept in THREAD->sleep_queue. 425 * We need to release wq->lock which we locked in 426 * waitq_sleep(). Address of wq->lock is kept in 427 * THREAD->sleep_queue. 426 428 */ 427 429 spinlock_unlock(&THREAD->sleep_queue->lock); 428 430 429 431 /* 430 * Check for possible requests for out-of-context invocation. 432 * Check for possible requests for out-of-context 433 * invocation. 431 434 */ 432 435 if (THREAD->call_me) { … … 444 447 * Entering state is unexpected. 445 448 */ 446 panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); 449 panic("tid%d: unexpected state %s\n", THREAD->tid, 450 thread_states[THREAD->state]); 447 451 break; 448 452 } … … 460 464 461 465 /* 462 * If both the old and the new task are the same, lots of work is avoided. 466 * If both the old and the new task are the same, lots of work is 467 * avoided. 463 468 */ 464 469 if (TASK != THREAD->task) { … … 477 482 478 483 /* 479 * Note that it is possible for two tasks to share one address space. 484 * Note that it is possible for two tasks to share one address 485 * space. 480 486 */ 481 487 if (as1 != as2) { … … 494 500 495 501 #ifdef SCHEDULER_VERBOSE 496 printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", 497 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); 502 printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", 503 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, 504 atomic_get(&CPU->nrdy)); 498 505 #endif 499 506 … … 509 516 510 517 /* 511 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. 518 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 519 * thread's stack. 512 520 */ 513 521 the_copy(THE, (the_t *) THREAD->kstack); … … 556 564 557 565 /* 558 * Searching least priority queues on all CPU's first and most priority queues on all CPU's last. 559 */ 560 for (j=RQ_COUNT-1; j >= 0; j--) { 561 for (i=0; i < config.cpu_active; i++) { 566 * Searching least priority queues on all CPU's first and most priority 567 * queues on all CPU's last. 568 */ 569 for (j= RQ_COUNT - 1; j >= 0; j--) { 570 for (i = 0; i < config.cpu_active; i++) { 562 571 link_t *l; 563 572 runq_t *r; … … 568 577 /* 569 578 * Not interested in ourselves. 570 * Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. 579 * Doesn't require interrupt disabling for kcpulb has 580 * THREAD_FLAG_WIRED. 571 581 */ 572 582 if (CPU == cpu) … … 589 599 t = list_get_instance(l, thread_t, rq_link); 590 600 /* 591 * We don't want to steal CPU-wired threads neither threads already 592 * stolen. The latter prevents threads from migrating between CPU's 593 * without ever being run. We don't want to steal threads whose FPU 594 * context is still in CPU. 601 * We don't want to steal CPU-wired threads 602 * neither threads already stolen. The latter 603 * prevents threads from migrating between CPU's 604 * without ever being run. We don't want to 605 * steal threads whose FPU context is still in 606 * CPU. 595 607 */ 596 608 spinlock_lock(&t->lock); 597 if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && 609 if ((!(t->flags & (THREAD_FLAG_WIRED | 610 THREAD_FLAG_STOLEN))) && 598 611 (!(t->fpu_context_engaged)) ) { 599 612 /* … … 622 635 spinlock_lock(&t->lock); 623 636 #ifdef KCPULB_VERBOSE 624 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", 625 CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), 637 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " 638 "avg=%nd\n", CPU->id, t->tid, CPU->id, 639 atomic_get(&CPU->nrdy), 626 640 atomic_get(&nrdy) / config.cpu_active); 627 641 #endif … … 638 652 639 653 /* 640 * We are not satisfied yet, focus on another CPU next time. 654 * We are not satisfied yet, focus on another 655 * CPU next time. 641 656 */ 642 657 k++; … … 689 704 spinlock_lock(&cpus[cpu].lock); 690 705 printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", 691 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink); 706 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 707 cpus[cpu].needs_relink); 692 708 693 for (i =0; i<RQ_COUNT; i++) {709 for (i = 0; i < RQ_COUNT; i++) { 694 710 r = &cpus[cpu].rq[i]; 695 711 spinlock_lock(&r->lock); … … 699 715 } 700 716 printf("\trq[%d]: ", i); 701 for (cur=r->rq_head.next; cur!=&r->rq_head; cur=cur->next) { 717 for (cur = r->rq_head.next; cur != &r->rq_head; 718 cur = cur->next) { 702 719 t = list_get_instance(cur, thread_t, rq_link); 703 720 printf("%d(%s) ", t->tid, 704 721 thread_states[t->state]); 705 722 } 706 723 printf("\n"); -
kernel/generic/src/proc/thread.c
rc109dd0 r4e33b6b 81 81 }; 82 82 83 /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ 83 /** Lock protecting the threads_btree B+tree. 84 * 85 * For locking rules, see declaration thereof. 86 */ 84 87 SPINLOCK_INITIALIZE(threads_lock); 85 88 86 89 /** B+tree of all threads. 87 90 * 88 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long89 * as the threads_lock is held.91 * When a thread is found in the threads_btree B+tree, it is guaranteed to 92 * exist as long as the threads_lock is held. 90 93 */ 91 94 btree_t threads_btree; … … 99 102 #endif 100 103 101 /** Thread wrapper 102 * 103 * This wrapper is provided to ensure that every thread 104 * makes a call to thread_exit() when its implementing 105 * function returns. 104 /** Thread wrapper. 105 * 106 * This wrapper is provided to ensure that every thread makes a call to 107 * thread_exit() when its implementing function returns. 106 108 * 107 109 * interrupts_disable() is assumed. … … 202 204 THREAD = NULL; 203 205 atomic_set(&nrdy,0); 204 thread_slab = slab_cache_create("thread_slab", 205 sizeof(thread_t),0,206 thr_constructor, thr_destructor, 0); 206 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 207 thr_constructor, thr_destructor, 0); 208 207 209 #ifdef ARCH_HAS_FPU 208 fpu_context_slab = slab_cache_create("fpu_slab", 209 sizeof(fpu_context_t), 210 FPU_CONTEXT_ALIGN, 211 NULL, NULL, 0); 210 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 211 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 212 212 #endif 213 213 … … 235 235 ASSERT(! (t->state == Ready)); 236 236 237 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;237 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 238 238 239 239 cpu = CPU; … … 268 268 void thread_destroy(thread_t *t) 269 269 { 270 bool destroy_task = false; 270 bool destroy_task = false; 271 271 272 272 ASSERT(t->state == Exiting || t->state == Undead); … … 275 275 276 276 spinlock_lock(&t->cpu->lock); 277 if(t->cpu->fpu_owner ==t)278 t->cpu->fpu_owner =NULL;277 if(t->cpu->fpu_owner == t) 278 t->cpu->fpu_owner = NULL; 279 279 spinlock_unlock(&t->cpu->lock); 280 280 … … 311 311 * @param flags Thread flags. 312 312 * @param name Symbolic name. 313 * @param uncounted Thread's accounting doesn't affect accumulated task accounting. 313 * @param uncounted Thread's accounting doesn't affect accumulated task 314 * accounting. 314 315 * 315 316 * @return New thread's structure on success, NULL on failure. 316 317 * 317 318 */ 318 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name, bool uncounted) 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 320 int flags, char *name, bool uncounted) 319 321 { 320 322 thread_t *t; … … 326 328 327 329 /* Not needed, but good for debugging */ 328 memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 330 memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 331 0); 329 332 330 333 ipl = interrupts_disable(); … … 335 338 336 339 context_save(&t->saved_context); 337 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); 340 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 341 THREAD_STACK_SIZE); 338 342 339 343 the_initialize((the_t *) t->kstack); … … 377 381 t->fpu_context_engaged = 0; 378 382 379 thread_create_arch(t); /* might depend on previous initialization */ 383 /* might depend on previous initialization */ 384 thread_create_arch(t); 380 385 381 386 /* … … 399 404 */ 400 405 spinlock_lock(&threads_lock); 401 btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); 406 btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, 407 NULL); 402 408 spinlock_unlock(&threads_lock); 403 409 … … 409 415 /** Terminate thread. 410 416 * 411 * End current thread execution and switch it to the exiting 412 * state. All pending timeouts are executed. 413 * 417 * End current thread execution and switch it to the exiting state. All pending 418 * timeouts are executed. 414 419 */ 415 420 void thread_exit(void) … … 420 425 ipl = interrupts_disable(); 421 426 spinlock_lock(&THREAD->lock); 422 if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ 427 if (THREAD->timeout_pending) { 428 /* busy waiting for timeouts in progress */ 423 429 spinlock_unlock(&THREAD->lock); 424 430 interrupts_restore(ipl); … … 444 450 void thread_sleep(uint32_t sec) 445 451 { 446 thread_usleep(sec *1000000);452 thread_usleep(sec * 1000000); 447 453 } 448 454 -
kernel/generic/src/synch/waitq.c
rc109dd0 r4e33b6b 189 189 190 190 * If usec is greater than zero, regardless of the value of the 191 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either timeout,192 * interruption or wakeup comes.193 * 194 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, the call195 * will not return until wakeup or interruption comes.196 * 197 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the call will198 * immediately return, reporting either success or failure.199 * 200 * @return Returns one of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED,201 * 202 * 203 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time 204 * of thecall there was no pending wakeup.191 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either 192 * timeout, interruption or wakeup comes. 193 * 194 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is not set in flags, 195 * the call will not return until wakeup or interruption comes. 196 * 197 * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the 198 * call will immediately return, reporting either success or failure. 199 * 200 * @return One of: ESYNCH_WOULD_BLOCK, ESYNCH_TIMEOUT, ESYNCH_INTERRUPTED, 201 * ESYNCH_OK_ATOMIC, ESYNCH_OK_BLOCKED. 202 * 203 * @li ESYNCH_WOULD_BLOCK means that the sleep failed because at the time of the 204 * call there was no pending wakeup. 205 205 * 206 206 * @li ESYNCH_TIMEOUT means that the sleep timed out. … … 352 352 } 353 353 THREAD->timeout_pending = true; 354 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, waitq_timeouted_sleep, THREAD); 354 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 355 waitq_timeouted_sleep, THREAD); 355 356 } 356 357 … … 365 366 spinlock_unlock(&THREAD->lock); 366 367 367 scheduler(); /* wq->lock is released in scheduler_separated_stack() */ 368 /* wq->lock is released in scheduler_separated_stack() */ 369 scheduler(); 368 370 369 371 return ESYNCH_OK_BLOCKED; … … 373 375 /** Wake up first thread sleeping in a wait queue 374 376 * 375 * Wake up first thread sleeping in a wait queue. 376 * This is the SMP- and IRQ-safe wrapper meant for 377 * general use. 378 * 379 * Besides its 'normal' wakeup operation, it attempts 380 * to unregister possible timeout. 377 * Wake up first thread sleeping in a wait queue. This is the SMP- and IRQ-safe 378 * wrapper meant for general use. 379 * 380 * Besides its 'normal' wakeup operation, it attempts to unregister possible 381 * timeout. 381 382 * 382 383 * @param wq Pointer to wait queue. 383 * @param all If this is non-zero, all sleeping threads 384 * will be woken up andmissed count will be zeroed.384 * @param all If this is non-zero, all sleeping threads will be woken up and 385 * missed count will be zeroed. 385 386 */ 386 387 void waitq_wakeup(waitq_t *wq, bool all) … … 399 400 /** Internal SMP- and IRQ-unsafe version of waitq_wakeup() 400 401 * 401 * This is the internal SMP- and IRQ-unsafe version 402 * of waitq_wakeup(). It assumes wq->lock is already 403 * locked and interrupts are already disabled. 402 * This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It 403 * assumes wq->lock is already locked and interrupts are already disabled. 404 404 * 405 405 * @param wq Pointer to wait queue. 406 * @param all If this is non-zero, all sleeping threads 407 * will be woken up andmissed count will be zeroed.406 * @param all If this is non-zero, all sleeping threads will be woken up and 407 * missed count will be zeroed. 408 408 */ 409 409 void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
Note:
See TracChangeset
for help on using the changeset viewer.