Changeset 7509ddc in mainline
- Timestamp:
- 2006-06-04T21:54:49Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 34dcd3f
- Parents:
- 2cb5e64
- Location:
- generic
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/include/proc/task.h
r2cb5e64 r7509ddc 41 41 /** Task structure. */ 42 42 struct task { 43 /** Task lock. 44 * 45 * Must be acquired before threads_lock and thread lock of any of its threads. 46 */ 43 47 SPINLOCK_DECLARE(lock); 48 44 49 char *name; 45 50 link_t th_head; /**< List of threads contained in this task. */ 46 51 as_t *as; /**< Address space. */ 47 52 task_id_t taskid; /**< Unique identity of task */ 53 54 /** If this is true, new threads can become part of the task. */ 55 bool accept_new_threads; 56 57 count_t refcount; /**< Number of references (i.e. threads). */ 48 58 49 59 cap_t capabilities; /**< Task capabilities. */ … … 71 81 extern void task_init(void); 72 82 extern task_t *task_create(as_t *as, char *name); 83 extern void task_destroy(task_t *t); 73 84 extern task_t *task_run_program(void *program_addr, char *name); 74 85 extern task_t *task_find_by_id(task_id_t id); 86 extern int task_kill(task_id_t id); 87 75 88 76 89 #ifndef task_create_arch -
generic/include/proc/thread.h
r2cb5e64 r7509ddc 73 73 * 74 74 * Protects the whole thread structure except list links above. 75 * Must be acquired before T.lock for each T of type task_t.76 *77 75 */ 78 76 SPINLOCK_DECLARE(lock); … … 99 97 /** True if this thread is executing copy_to_uspace(). False otherwise. */ 100 98 bool in_copy_to_uspace; 101 99 100 /** 101 * If true, the thread will not go to sleep at all and will 102 * call thread_exit() before returning to userspace. 103 */ 104 bool interrupted; 105 102 106 bool detached; /**< If true, thread_join_timeout() cannot be used on this thread. */ 103 107 waitq_t join_wq; /**< Waitq for thread_join_timeout(). */ -
generic/src/proc/task.c
r2cb5e64 r7509ddc 48 48 #include <print.h> 49 49 #include <elf.h> 50 #include <errno.h> 50 51 #include <syscall/copy.h> 51 52 … … 57 58 btree_t tasks_btree; 58 59 static task_id_t task_counter = 0; 60 61 static void ktask_cleanup(void *); 59 62 60 63 /** Initialize tasks … … 95 98 ta->name = name; 96 99 100 ta->refcount = 0; 101 97 102 ta->capabilities = 0; 103 ta->accept_new_threads = true; 98 104 99 105 ipc_answerbox_init(&ta->answerbox); … … 128 134 } 129 135 136 /** Destroy task. 137 * 138 * @param t Task to be destroyed. 139 */ 140 void task_destroy(task_t *t) 141 { 142 } 143 130 144 /** Create new task with 1 thread and run it 131 145 * … … 206 220 207 221 return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf); 222 } 223 224 /** Kill task. 225 * 226 * @param id ID of the task to be killed. 227 * 228 * @return 0 on success or an error code from errno.h 229 */ 230 int task_kill(task_id_t id) 231 { 232 ipl_t ipl; 233 task_t *ta; 234 thread_t *t; 235 link_t *cur; 236 237 ipl = interrupts_disable(); 238 spinlock_lock(&tasks_lock); 239 240 if (!(ta = task_find_by_id(id))) { 241 spinlock_unlock(&tasks_lock); 242 interrupts_restore(ipl); 243 return ENOENT; 244 } 245 246 spinlock_lock(&ta->lock); 247 ta->refcount++; 248 spinlock_unlock(&ta->lock); 249 250 t = thread_create(ktask_cleanup, NULL, ta, 0, "ktask_cleanup"); 251 252 spinlock_lock(&ta->lock); 253 ta->refcount--; 254 255 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 256 thread_t *thr; 257 bool sleeping = false; 258 259 thr = list_get_instance(cur, thread_t, th_link); 260 if (thr == t) 261 continue; 262 263 spinlock_lock(&thr->lock); 264 thr->interrupted = true; 265 if (thr->state == Sleeping) 266 sleeping = true; 267 spinlock_unlock(&thr->lock); 268 269 if (sleeping) 270 waitq_interrupt_sleep(thr); 271 } 272 273 thread_ready(t); 274 275 return 0; 208 276 } 209 277 … … 244 312 interrupts_restore(ipl); 245 313 } 314 315 /** Kernel thread used to cleanup the task. */ 316 void ktask_cleanup(void *arg) 317 { 318 /* 319 * TODO: 320 * Wait until it is save to cleanup the task (i.e. all other threads exit) 321 * and do the cleanup (e.g. close IPC communication and release used futexes). 322 * When this thread exits, the task refcount drops to zero and the task structure is 323 * cleaned. 324 */ 325 } -
generic/src/proc/thread.c
r2cb5e64 r7509ddc 234 234 void thread_destroy(thread_t *t) 235 235 { 236 bool destroy_task = false; 237 236 238 ASSERT(t->state == Exiting); 237 239 ASSERT(t->task); … … 242 244 t->cpu->fpu_owner=NULL; 243 245 spinlock_unlock(&t->cpu->lock); 246 247 spinlock_unlock(&t->lock); 248 249 spinlock_lock(&threads_lock); 250 btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); 251 spinlock_unlock(&threads_lock); 244 252 245 253 /* … … 248 256 spinlock_lock(&t->task->lock); 249 257 list_remove(&t->th_link); 250 spinlock_unlock(&t->task->lock); 251 252 spinlock_unlock(&t->lock); 253 254 spinlock_lock(&threads_lock); 255 btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); 256 spinlock_unlock(&threads_lock); 258 if (--t->task->refcount == 0) { 259 t->task->accept_new_threads = false; 260 destroy_task = true; 261 } 262 spinlock_unlock(&t->task->lock); 263 264 if (destroy_task) 265 task_destroy(t->task); 257 266 258 267 slab_free(thread_slab, t); … … 320 329 t->in_copy_from_uspace = false; 321 330 t->in_copy_to_uspace = false; 322 331 332 t->interrupted = false; 323 333 t->detached = false; 324 334 waitq_initialize(&t->join_wq); … … 330 340 t->fpu_context_exists = 0; 331 341 t->fpu_context_engaged = 0; 332 333 /*334 * Register this thread in the system-wide list.335 */336 ipl = interrupts_disable();337 spinlock_lock(&threads_lock);338 btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);339 spinlock_unlock(&threads_lock);340 342 341 343 /* … … 343 345 */ 344 346 spinlock_lock(&task->lock); 347 if (!task->accept_new_threads) { 348 spinlock_unlock(&task->lock); 349 slab_free(thread_slab, t); 350 return NULL; 351 } 345 352 list_append(&t->th_link, &task->th_head); 353 task->refcount++; 346 354 spinlock_unlock(&task->lock); 355 356 /* 357 * Register this thread in the system-wide list. 358 */ 359 ipl = interrupts_disable(); 360 spinlock_lock(&threads_lock); 361 btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); 362 spinlock_unlock(&threads_lock); 347 363 348 364 interrupts_restore(ipl); -
generic/src/synch/waitq.c
r2cb5e64 r7509ddc 312 312 spinlock_lock(&THREAD->lock); 313 313 314 if (THREAD->interrupted) { 315 spinlock_unlock(&THREAD->lock); 316 spinlock_unlock(&wq->lock); 317 return ESYNCH_INTERRUPTED; 318 } 319 314 320 if (flags & SYNCH_FLAGS_INTERRUPTIBLE) { 315 321 /* -
generic/src/syscall/syscall.c
r2cb5e64 r7509ddc 91 91 __native a4, __native id) 92 92 { 93 __native rc; 94 ipl_t ipl; 95 bool exit = false; 96 93 97 if (id < SYSCALL_END) 94 r eturnsyscall_table[id](a1,a2,a3,a4);98 rc = syscall_table[id](a1,a2,a3,a4); 95 99 else 96 100 panic("Undefined syscall %d", id); 101 102 ipl = interrupts_disable(); 103 spinlock_lock(&THREAD->lock); 104 if (THREAD->interrupted) 105 exit = true; 106 spinlock_unlock(&THREAD->lock); 107 interrupts_restore(ipl); 108 109 if (exit) 110 thread_exit(); 111 112 return rc; 97 113 } 98 114
Note:
See TracChangeset
for help on using the changeset viewer.