00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00038 #include <main/uinit.h>
00039 #include <proc/thread.h>
00040 #include <proc/task.h>
00041 #include <proc/uarg.h>
00042 #include <mm/as.h>
00043 #include <mm/slab.h>
00044 #include <synch/spinlock.h>
00045 #include <arch.h>
00046 #include <panic.h>
00047 #include <adt/btree.h>
00048 #include <adt/list.h>
00049 #include <ipc/ipc.h>
00050 #include <security/cap.h>
00051 #include <memstr.h>
00052 #include <print.h>
00053 #include <elf.h>
00054 #include <errno.h>
00055 #include <syscall/copy.h>
00056 #include <console/klog.h>
00057
00058 #ifndef LOADED_PROG_STACK_PAGES_NO
00059 #define LOADED_PROG_STACK_PAGES_NO 1
00060 #endif
00061
00063 SPINLOCK_INITIALIZE(tasks_lock);
00064
00073 btree_t tasks_btree;
00074
00075 static task_id_t task_counter = 0;
00076
00077 static void ktaskclnp(void *arg);
00078 static void ktaskgc(void *arg);
00079
00085 void task_init(void)
00086 {
00087 TASK = NULL;
00088 btree_create(&tasks_btree);
00089 }
00090
00091
00102 task_t *task_create(as_t *as, char *name)
00103 {
00104 ipl_t ipl;
00105 task_t *ta;
00106 int i;
00107
00108 ta = (task_t *) malloc(sizeof(task_t), 0);
00109
00110 task_create_arch(ta);
00111
00112 spinlock_initialize(&ta->lock, "task_ta_lock");
00113 list_initialize(&ta->th_head);
00114 ta->as = as;
00115 ta->name = name;
00116 ta->main_thread = NULL;
00117 ta->refcount = 0;
00118
00119 ta->capabilities = 0;
00120 ta->accept_new_threads = true;
00121
00122 ipc_answerbox_init(&ta->answerbox);
00123 for (i=0; i < IPC_MAX_PHONES;i++)
00124 ipc_phone_init(&ta->phones[i]);
00125 if (ipc_phone_0)
00126 ipc_phone_connect(&ta->phones[0], ipc_phone_0);
00127 atomic_set(&ta->active_calls, 0);
00128
00129 mutex_initialize(&ta->futexes_lock);
00130 btree_create(&ta->futexes);
00131
00132 ipl = interrupts_disable();
00133
00134
00135
00136
00137
00138 mutex_lock(&as->lock);
00139 as->refcount++;
00140 mutex_unlock(&as->lock);
00141
00142 spinlock_lock(&tasks_lock);
00143
00144 ta->taskid = ++task_counter;
00145 btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
00146
00147 spinlock_unlock(&tasks_lock);
00148 interrupts_restore(ipl);
00149
00150 return ta;
00151 }
00152
00157 void task_destroy(task_t *t)
00158 {
00159 task_destroy_arch(t);
00160 btree_destroy(&t->futexes);
00161
00162 mutex_lock_active(&t->as->lock);
00163 if (--t->as->refcount == 0) {
00164 mutex_unlock(&t->as->lock);
00165 as_destroy(t->as);
00166
00167
00168
00169 } else {
00170 mutex_unlock(&t->as->lock);
00171 }
00172
00173 free(t);
00174 TASK = NULL;
00175 }
00176
00184 task_t * task_run_program(void *program_addr, char *name)
00185 {
00186 as_t *as;
00187 as_area_t *a;
00188 int rc;
00189 thread_t *t1, *t2;
00190 task_t *task;
00191 uspace_arg_t *kernel_uarg;
00192
00193 as = as_create(0);
00194 ASSERT(as);
00195
00196 rc = elf_load((elf_header_t *) program_addr, as);
00197 if (rc != EE_OK) {
00198 as_destroy(as);
00199 return NULL;
00200 }
00201
00202 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
00203 kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry;
00204 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
00205 kernel_uarg->uspace_thread_function = NULL;
00206 kernel_uarg->uspace_thread_arg = NULL;
00207 kernel_uarg->uspace_uarg = NULL;
00208
00209 task = task_create(as, name);
00210 ASSERT(task);
00211
00212
00213
00214
00215 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
00216 LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
00217 USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL);
00218
00219
00220
00221
00222 t1 = thread_create(uinit, kernel_uarg, task, 0, "uinit");
00223 ASSERT(t1);
00224
00225
00226
00227
00228 t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc");
00229 ASSERT(t2);
00230 thread_ready(t2);
00231
00232 thread_ready(t1);
00233
00234 return task;
00235 }
00236
00243 __native sys_task_get_id(task_id_t *uspace_task_id)
00244 {
00245
00246
00247
00248
00249 return (__native) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid));
00250 }
00251
00261 task_t *task_find_by_id(task_id_t id)
00262 {
00263 btree_node_t *leaf;
00264
00265 return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
00266 }
00267
00274 int task_kill(task_id_t id)
00275 {
00276 ipl_t ipl;
00277 task_t *ta;
00278 thread_t *t;
00279 link_t *cur;
00280
00281 if (id == 1)
00282 return EPERM;
00283
00284 ipl = interrupts_disable();
00285 spinlock_lock(&tasks_lock);
00286
00287 if (!(ta = task_find_by_id(id))) {
00288 spinlock_unlock(&tasks_lock);
00289 interrupts_restore(ipl);
00290 return ENOENT;
00291 }
00292
00293 spinlock_lock(&ta->lock);
00294 ta->refcount++;
00295 spinlock_unlock(&ta->lock);
00296
00297 btree_remove(&tasks_btree, ta->taskid, NULL);
00298 spinlock_unlock(&tasks_lock);
00299
00300 t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp");
00301
00302 spinlock_lock(&ta->lock);
00303 ta->accept_new_threads = false;
00304 ta->refcount--;
00305
00306
00307
00308
00309 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
00310 thread_t *thr;
00311 bool sleeping = false;
00312
00313 thr = list_get_instance(cur, thread_t, th_link);
00314 if (thr == t)
00315 continue;
00316
00317 spinlock_lock(&thr->lock);
00318 thr->interrupted = true;
00319 if (thr->state == Sleeping)
00320 sleeping = true;
00321 spinlock_unlock(&thr->lock);
00322
00323 if (sleeping)
00324 waitq_interrupt_sleep(thr);
00325 }
00326
00327 spinlock_unlock(&ta->lock);
00328 interrupts_restore(ipl);
00329
00330 if (t)
00331 thread_ready(t);
00332
00333 return 0;
00334 }
00335
00337 void task_print_list(void)
00338 {
00339 link_t *cur;
00340 ipl_t ipl;
00341
00342
00343 ipl = interrupts_disable();
00344 spinlock_lock(&tasks_lock);
00345
00346 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) {
00347 btree_node_t *node;
00348 int i;
00349
00350 node = list_get_instance(cur, btree_node_t, leaf_link);
00351 for (i = 0; i < node->keys; i++) {
00352 task_t *t;
00353 int j;
00354
00355 t = (task_t *) node->value[i];
00356
00357 spinlock_lock(&t->lock);
00358 printf("%s(%lld): address=%#zx, as=%#zx, ActiveCalls: %zd",
00359 t->name, t->taskid, t, t->as, atomic_get(&t->active_calls));
00360 for (j=0; j < IPC_MAX_PHONES; j++) {
00361 if (t->phones[j].callee)
00362 printf(" Ph(%zd): %#zx ", j, t->phones[j].callee);
00363 }
00364 printf("\n");
00365 spinlock_unlock(&t->lock);
00366 }
00367 }
00368
00369 spinlock_unlock(&tasks_lock);
00370 interrupts_restore(ipl);
00371 }
00372
00374 void ktaskclnp(void *arg)
00375 {
00376 ipl_t ipl;
00377 thread_t *t = NULL, *main_thread;
00378 link_t *cur;
00379 bool again;
00380
00381 thread_detach(THREAD);
00382
00383 loop:
00384 ipl = interrupts_disable();
00385 spinlock_lock(&TASK->lock);
00386
00387 main_thread = TASK->main_thread;
00388
00389
00390
00391
00392 again = false;
00393 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
00394 t = list_get_instance(cur, thread_t, th_link);
00395
00396 spinlock_lock(&t->lock);
00397 if (t == THREAD) {
00398 spinlock_unlock(&t->lock);
00399 continue;
00400 } else if (t == main_thread) {
00401 spinlock_unlock(&t->lock);
00402 continue;
00403 } else if (t->join_type != None) {
00404 spinlock_unlock(&t->lock);
00405 again = true;
00406 continue;
00407 } else {
00408 t->join_type = TaskClnp;
00409 spinlock_unlock(&t->lock);
00410 again = false;
00411 break;
00412 }
00413 }
00414
00415 spinlock_unlock(&TASK->lock);
00416 interrupts_restore(ipl);
00417
00418 if (again) {
00419
00420
00421
00422 scheduler();
00423 goto loop;
00424 }
00425
00426 if (t != THREAD) {
00427 ASSERT(t != main_thread);
00428 thread_join(t);
00429 thread_detach(t);
00430 goto loop;
00431 }
00432
00433
00434
00435
00436
00437
00438 ipc_cleanup();
00439 futex_cleanup();
00440 klog_printf("Cleanup of task %lld completed.", TASK->taskid);
00441 }
00442
00451 void ktaskgc(void *arg)
00452 {
00453 thread_t *t = (thread_t *) arg;
00454 loop:
00455
00456
00457
00458
00459 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) {
00460 ipl_t ipl;
00461 link_t *cur;
00462 thread_t *thr = NULL;
00463
00464
00465
00466
00467 more_gc:
00468 ipl = interrupts_disable();
00469 spinlock_lock(&TASK->lock);
00470
00471 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
00472 thr = list_get_instance(cur, thread_t, th_link);
00473 spinlock_lock(&thr->lock);
00474 if (thr != t && thr->state == Undead && thr->join_type == None) {
00475 thr->join_type = TaskGC;
00476 spinlock_unlock(&thr->lock);
00477 break;
00478 }
00479 spinlock_unlock(&thr->lock);
00480 thr = NULL;
00481 }
00482 spinlock_unlock(&TASK->lock);
00483 interrupts_restore(ipl);
00484
00485 if (thr) {
00486 thread_join(thr);
00487 thread_detach(thr);
00488 scheduler();
00489 goto more_gc;
00490 }
00491
00492 goto loop;
00493 }
00494 thread_detach(t);
00495 task_kill(TASK->taskid);
00496 }
00497