Changeset 4e33b6b in mainline for kernel/generic/src/proc/thread.c
- Timestamp:
- 2007-01-07T14:44:33Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d78d603
- Parents:
- c109dd0
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rc109dd0 r4e33b6b 81 81 }; 82 82 83 /** Lock protecting the threads_btree B+tree. For locking rules, see declaration thereof. */ 83 /** Lock protecting the threads_btree B+tree. 84 * 85 * For locking rules, see declaration thereof. 86 */ 84 87 SPINLOCK_INITIALIZE(threads_lock); 85 88 86 89 /** B+tree of all threads. 87 90 * 88 * When a thread is found in the threads_btree B+tree, it is guaranteed to exist as long89 * as the threads_lock is held.91 * When a thread is found in the threads_btree B+tree, it is guaranteed to 92 * exist as long as the threads_lock is held. 90 93 */ 91 94 btree_t threads_btree; … … 99 102 #endif 100 103 101 /** Thread wrapper 102 * 103 * This wrapper is provided to ensure that every thread 104 * makes a call to thread_exit() when its implementing 105 * function returns. 104 /** Thread wrapper. 105 * 106 * This wrapper is provided to ensure that every thread makes a call to 107 * thread_exit() when its implementing function returns. 106 108 * 107 109 * interrupts_disable() is assumed. … … 202 204 THREAD = NULL; 203 205 atomic_set(&nrdy,0); 204 thread_slab = slab_cache_create("thread_slab", 205 sizeof(thread_t),0,206 thr_constructor, thr_destructor, 0); 206 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 207 thr_constructor, thr_destructor, 0); 208 207 209 #ifdef ARCH_HAS_FPU 208 fpu_context_slab = slab_cache_create("fpu_slab", 209 sizeof(fpu_context_t), 210 FPU_CONTEXT_ALIGN, 211 NULL, NULL, 0); 210 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 211 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 212 212 #endif 213 213 … … 235 235 ASSERT(! (t->state == Ready)); 236 236 237 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;237 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 238 238 239 239 cpu = CPU; … … 268 268 void thread_destroy(thread_t *t) 269 269 { 270 bool destroy_task = false; 270 bool destroy_task = false; 271 271 272 272 ASSERT(t->state == Exiting || t->state == Undead); … … 275 275 276 276 spinlock_lock(&t->cpu->lock); 277 if(t->cpu->fpu_owner ==t)278 t->cpu->fpu_owner =NULL;277 if(t->cpu->fpu_owner == t) 278 t->cpu->fpu_owner = NULL; 279 279 spinlock_unlock(&t->cpu->lock); 280 280 … … 311 311 * @param flags Thread flags. 312 312 * @param name Symbolic name. 313 * @param uncounted Thread's accounting doesn't affect accumulated task accounting. 313 * @param uncounted Thread's accounting doesn't affect accumulated task 314 * accounting. 314 315 * 315 316 * @return New thread's structure on success, NULL on failure. 316 317 * 317 318 */ 318 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name, bool uncounted) 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 320 int flags, char *name, bool uncounted) 319 321 { 320 322 thread_t *t; … … 326 328 327 329 /* Not needed, but good for debugging */ 328 memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 330 memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 331 0); 329 332 330 333 ipl = interrupts_disable(); … … 335 338 336 339 context_save(&t->saved_context); 337 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); 340 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 341 THREAD_STACK_SIZE); 338 342 339 343 the_initialize((the_t *) t->kstack); … … 377 381 t->fpu_context_engaged = 0; 378 382 379 thread_create_arch(t); /* might depend on previous initialization */ 383 /* might depend on previous initialization */ 384 thread_create_arch(t); 380 385 381 386 /* … … 399 404 */ 400 405 spinlock_lock(&threads_lock); 401 btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); 406 btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, 407 NULL); 402 408 spinlock_unlock(&threads_lock); 403 409 … … 409 415 /** Terminate thread. 410 416 * 411 * End current thread execution and switch it to the exiting 412 * state. All pending timeouts are executed. 413 * 417 * End current thread execution and switch it to the exiting state. All pending 418 * timeouts are executed. 414 419 */ 415 420 void thread_exit(void) … … 420 425 ipl = interrupts_disable(); 421 426 spinlock_lock(&THREAD->lock); 422 if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ 427 if (THREAD->timeout_pending) { 428 /* busy waiting for timeouts in progress */ 423 429 spinlock_unlock(&THREAD->lock); 424 430 interrupts_restore(ipl); … … 444 450 void thread_sleep(uint32_t sec) 445 451 { 446 thread_usleep(sec *1000000);452 thread_usleep(sec * 1000000); 447 453 } 448 454
Note:
See TracChangeset
for help on using the changeset viewer.