Changeset d8431986 in mainline
- Timestamp:
- 2007-05-31T21:25:54Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7f043c0
- Parents:
- e8a0b90
- Location:
- kernel/generic
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/thread.h
re8a0b90 rd8431986 64 64 /** Thread executes in userspace. */ 65 65 #define THREAD_FLAG_USPACE (1 << 2) 66 /** Thread will be attached by the caller. */ 67 #define THREAD_FLAG_NOATTACH (1 << 3) 66 68 67 69 /** Thread states. */ … … 221 223 extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 222 224 int flags, char *name, bool uncounted); 225 extern void thread_attach(thread_t *t, task_t *task); 223 226 extern void thread_ready(thread_t *t); 224 227 extern void thread_exit(void) __attribute__((noreturn)); -
kernel/generic/src/proc/thread.c
re8a0b90 rd8431986 158 158 159 159 #ifdef ARCH_HAS_FPU 160 # 160 #ifdef CONFIG_FPU_LAZY 161 161 t->saved_fpu_context = NULL; 162 # 163 t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);162 #else 163 t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 164 164 if (!t->saved_fpu_context) 165 165 return -1; 166 # 166 #endif 167 167 #endif 168 168 169 169 t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 170 if (! 170 if (!t->kstack) { 171 171 #ifdef ARCH_HAS_FPU 172 172 if (t->saved_fpu_context) 173 slab_free(fpu_context_slab, t->saved_fpu_context);173 slab_free(fpu_context_slab, t->saved_fpu_context); 174 174 #endif 175 175 return -1; … … 190 190 #ifdef ARCH_HAS_FPU 191 191 if (t->saved_fpu_context) 192 slab_free(fpu_context_slab, t->saved_fpu_context);192 slab_free(fpu_context_slab, t->saved_fpu_context); 193 193 #endif 194 194 return 1; /* One page freed */ … … 233 233 spinlock_lock(&t->lock); 234 234 235 ASSERT(! 235 ASSERT(!(t->state == Ready)); 236 236 237 237 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; … … 259 259 260 260 interrupts_restore(ipl); 261 }262 263 /** Destroy thread memory structure264 *265 * Detach thread from all queues, cpus etc. and destroy it.266 *267 * Assume thread->lock is held!!268 */269 void thread_destroy(thread_t *t)270 {271 bool destroy_task = false;272 273 ASSERT(t->state == Exiting || t->state == Undead);274 ASSERT(t->task);275 ASSERT(t->cpu);276 277 spinlock_lock(&t->cpu->lock);278 if(t->cpu->fpu_owner == t)279 t->cpu->fpu_owner = NULL;280 spinlock_unlock(&t->cpu->lock);281 282 spinlock_unlock(&t->lock);283 284 spinlock_lock(&threads_lock);285 btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL);286 spinlock_unlock(&threads_lock);287 288 /*289 * Detach from the containing task.290 */291 spinlock_lock(&t->task->lock);292 list_remove(&t->th_link);293 if (--t->task->refcount == 0) {294 t->task->accept_new_threads = false;295 destroy_task = true;296 }297 spinlock_unlock(&t->task->lock);298 299 if (destroy_task)300 task_destroy(t->task);301 302 /*303 * If the thread had a userspace context, free up its kernel_uarg304 * structure.305 */306 if (t->flags & THREAD_FLAG_USPACE) {307 ASSERT(t->thread_arg);308 free(t->thread_arg);309 }310 311 slab_free(thread_slab, t);312 261 } 313 262 … … 393 342 /* might depend on previous initialization */ 394 343 thread_create_arch(t); 395 396 /* 397 * Attach to the containing task. 398 */ 344 399 345 ipl = interrupts_disable(); 400 346 spinlock_lock(&task->lock); … … 404 350 interrupts_restore(ipl); 405 351 return NULL; 406 } 352 } else { 353 /* 354 * Bump the reference count so that this task cannot be 355 * destroyed while the new thread is being attached to it. 356 */ 357 task->refcount++; 358 } 359 spinlock_unlock(&task->lock); 360 interrupts_restore(ipl); 361 362 if (!(flags & THREAD_FLAG_NOATTACH)) 363 thread_attach(t, task); 364 365 return t; 366 } 367 368 /** Destroy thread memory structure 369 * 370 * Detach thread from all queues, cpus etc. and destroy it. 371 * 372 * Assume thread->lock is held!! 373 */ 374 void thread_destroy(thread_t *t) 375 { 376 bool destroy_task = false; 377 378 ASSERT(t->state == Exiting || t->state == Undead); 379 ASSERT(t->task); 380 ASSERT(t->cpu); 381 382 spinlock_lock(&t->cpu->lock); 383 if (t->cpu->fpu_owner == t) 384 t->cpu->fpu_owner = NULL; 385 spinlock_unlock(&t->cpu->lock); 386 387 spinlock_unlock(&t->lock); 388 389 spinlock_lock(&threads_lock); 390 btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); 391 spinlock_unlock(&threads_lock); 392 393 /* 394 * Detach from the containing task. 395 */ 396 spinlock_lock(&t->task->lock); 397 list_remove(&t->th_link); 398 if (--t->task->refcount == 0) { 399 t->task->accept_new_threads = false; 400 destroy_task = true; 401 } 402 spinlock_unlock(&t->task->lock); 403 404 if (destroy_task) 405 task_destroy(t->task); 406 407 /* 408 * If the thread had a userspace context, free up its kernel_uarg 409 * structure. 410 */ 411 if (t->flags & THREAD_FLAG_USPACE) { 412 ASSERT(t->thread_arg); 413 free(t->thread_arg); 414 } 415 416 slab_free(thread_slab, t); 417 } 418 419 /** Make the thread visible to the system. 420 * 421 * Attach the thread structure to the current task and make it visible in the 422 * threads_btree. 423 * 424 * @param t Thread to be attached to the task. 425 * @param task Task to which the thread is to be attached. 426 */ 427 void thread_attach(thread_t *t, task_t *task) 428 { 429 ipl_t ipl; 430 431 /* 432 * Attach to the current task. 433 */ 434 ipl = interrupts_disable(); 435 spinlock_lock(&task->lock); 436 ASSERT(task->refcount); 407 437 list_append(&t->th_link, &task->th_head); 408 if (task->refcount ++ == 0)438 if (task->refcount == 1) 409 439 task->main_thread = t; 410 440 spinlock_unlock(&task->lock); … … 419 449 420 450 interrupts_restore(ipl); 421 422 return t;423 451 } 424 452 … … 666 694 } 667 695 668 t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf,669 false);696 t = thread_create(uinit, kernel_uarg, TASK, 697 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 670 698 if (t) { 699 if (uspace_thread_id != NULL) { 700 int rc; 701 702 rc = copy_to_uspace(uspace_thread_id, &t->tid, 703 sizeof(t->tid)); 704 if (rc != 0) { 705 ipl_t ipl; 706 707 /* 708 * We have encountered a failure, but the thread 709 * has already been created. We need to undo its 710 * creation now. 711 */ 712 713 /* 714 * The new thread structure is initialized, 715 * but is still not visible to the system. 716 * We can safely deallocate it. 717 */ 718 slab_free(thread_slab, t); 719 free(kernel_uarg); 720 721 /* 722 * Now we need to decrement the task reference 723 * counter. Because we are running within the 724 * same task, thread t is not the last thread 725 * in the task, so it is safe to merely 726 * decrement the counter. 727 */ 728 ipl = interrupts_disable(); 729 spinlock_lock(&TASK->lock); 730 TASK->refcount--; 731 spinlock_unlock(&TASK->lock); 732 interrupts_restore(ipl); 733 734 return (unative_t) rc; 735 } 736 } 737 thread_attach(t, TASK); 671 738 thread_ready(t); 672 if (uspace_thread_id != NULL) 673 return (unative_t) copy_to_uspace(uspace_thread_id, 674 &t->tid, sizeof(t->tid)); 675 else 676 return 0; 739 740 return 0; 677 741 } else 678 742 free(kernel_uarg);
Note:
See TracChangeset
for help on using the changeset viewer.