Changes in kernel/generic/src/proc/task.c [1d432f9:d3808d3] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
r1d432f9 rd3808d3 60 60 61 61 /** Spinlock protecting the tasks_tree AVL tree. */ 62 IRQ_SPINLOCK_INITIALIZE(tasks_lock);62 SPINLOCK_INITIALIZE(tasks_lock); 63 63 64 64 /** AVL tree of active tasks. … … 81 81 /* Forward declarations. */ 82 82 static void task_kill_internal(task_t *); 83 static int tsk_constructor(void *, unsigned int); 84 85 /** Initialize kernel tasks support. 86 * 87 */ 83 static int tsk_constructor(void *, int); 84 85 /** Initialize kernel tasks support. */ 88 86 void task_init(void) 89 87 { … … 94 92 } 95 93 96 /** Task finish walker. 97 * 94 /* 98 95 * The idea behind this walker is to kill and count all tasks different from 99 96 * TASK. 100 *101 97 */ 102 98 static bool task_done_walker(avltree_node_t *node, void *arg) 103 99 { 104 task_t *t ask= avltree_get_instance(node, task_t, tasks_tree_node);105 size_t *cnt = (size_t*) arg;106 107 if (t ask!= TASK) {100 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 101 unsigned *cnt = (unsigned *) arg; 102 103 if (t != TASK) { 108 104 (*cnt)++; 109 110 105 #ifdef CONFIG_DEBUG 111 printf("[%"PRIu64"] ", task->taskid); 112 #endif 113 114 task_kill_internal(task); 106 printf("[%"PRIu64"] ", t->taskid); 107 #endif 108 task_kill_internal(t); 115 109 } 116 110 … … 119 113 } 120 114 121 /** Kill all tasks except the current task. 122 * 123 */ 115 /** Kill all tasks except the current task. */ 124 116 void task_done(void) 125 117 { 126 size_ttasks_left;127 128 /* Repeat until there are any tasks except TASK */129 do {118 unsigned tasks_left; 119 120 do { /* Repeat until there are any tasks except TASK */ 121 /* Messing with task structures, avoid deadlock */ 130 122 #ifdef CONFIG_DEBUG 131 123 printf("Killing tasks... "); 132 124 #endif 133 134 irq_spinlock_lock(&tasks_lock, true);125 ipl_t ipl = interrupts_disable(); 126 spinlock_lock(&tasks_lock); 135 127 tasks_left = 0; 136 128 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 137 irq_spinlock_unlock(&tasks_lock, true);138 129 spinlock_unlock(&tasks_lock); 130 interrupts_restore(ipl); 139 131 thread_sleep(1); 140 141 132 #ifdef CONFIG_DEBUG 142 133 printf("\n"); 143 134 #endif 144 } while (tasks_left > 0); 145 } 146 147 int tsk_constructor(void *obj, unsigned int kmflags) 148 { 149 task_t *task = (task_t *) obj; 150 151 atomic_set(&task->refcount, 0); 152 atomic_set(&task->lifecount, 0); 153 atomic_set(&task->active_calls, 0); 154 155 irq_spinlock_initialize(&task->lock, "task_t_lock"); 156 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 157 158 list_initialize(&task->th_head); 159 list_initialize(&task->sync_box_head); 160 161 ipc_answerbox_init(&task->answerbox, task); 162 163 size_t i; 135 } while (tasks_left); 136 } 137 138 int tsk_constructor(void *obj, int kmflags) 139 { 140 task_t *ta = obj; 141 int i; 142 143 atomic_set(&ta->refcount, 0); 144 atomic_set(&ta->lifecount, 0); 145 atomic_set(&ta->active_calls, 0); 146 147 spinlock_initialize(&ta->lock, "task_ta_lock"); 148 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 149 150 list_initialize(&ta->th_head); 151 list_initialize(&ta->sync_box_head); 152 153 ipc_answerbox_init(&ta->answerbox, ta); 164 154 for (i = 0; i < IPC_MAX_PHONES; i++) 165 ipc_phone_init(&ta sk->phones[i]);155 ipc_phone_init(&ta->phones[i]); 166 156 167 157 #ifdef CONFIG_UDEBUG 168 158 /* Init kbox stuff */ 169 ta sk->kb.thread = NULL;170 ipc_answerbox_init(&ta sk->kb.box, task);171 mutex_initialize(&ta sk->kb.cleanup_lock, MUTEX_PASSIVE);159 ta->kb.thread = NULL; 160 ipc_answerbox_init(&ta->kb.box, ta); 161 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 172 162 #endif 173 163 … … 185 175 task_t *task_create(as_t *as, const char *name) 186 176 { 187 task_t *task = (task_t *) slab_alloc(task_slab, 0); 188 task_create_arch(task); 189 190 task->as = as; 191 str_cpy(task->name, TASK_NAME_BUFLEN, name); 192 193 task->context = CONTEXT; 194 task->capabilities = 0; 195 task->ucycles = 0; 196 task->kcycles = 0; 197 198 task->ipc_info.call_sent = 0; 199 task->ipc_info.call_recieved = 0; 200 task->ipc_info.answer_sent = 0; 201 task->ipc_info.answer_recieved = 0; 202 task->ipc_info.irq_notif_recieved = 0; 203 task->ipc_info.forwarded = 0; 204 177 ipl_t ipl; 178 task_t *ta; 179 180 ta = (task_t *) slab_alloc(task_slab, 0); 181 task_create_arch(ta); 182 ta->as = as; 183 memcpy(ta->name, name, TASK_NAME_BUFLEN); 184 ta->name[TASK_NAME_BUFLEN - 1] = 0; 185 186 ta->context = CONTEXT; 187 ta->capabilities = 0; 188 ta->ucycles = 0; 189 ta->kcycles = 0; 190 191 ta->ipc_info.call_sent = 0; 192 ta->ipc_info.call_recieved = 0; 193 ta->ipc_info.answer_sent = 0; 194 ta->ipc_info.answer_recieved = 0; 195 ta->ipc_info.irq_notif_recieved = 0; 196 ta->ipc_info.forwarded = 0; 197 205 198 #ifdef CONFIG_UDEBUG 206 199 /* Init debugging stuff */ 207 udebug_task_init(&ta sk->udebug);200 udebug_task_init(&ta->udebug); 208 201 209 202 /* Init kbox stuff */ 210 ta sk->kb.finished = false;203 ta->kb.finished = false; 211 204 #endif 212 205 213 206 if ((ipc_phone_0) && 214 (context_check(ipc_phone_0->task->context, ta sk->context)))215 ipc_phone_connect(&ta sk->phones[0], ipc_phone_0);216 217 btree_create(&ta sk->futexes);207 (context_check(ipc_phone_0->task->context, ta->context))) 208 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 209 210 btree_create(&ta->futexes); 218 211 219 212 /* 220 213 * Get a reference to the address space. 221 214 */ 222 as_hold(ta sk->as);223 224 i rq_spinlock_lock(&tasks_lock, true);225 226 ta sk->taskid = ++task_counter;227 avltree_node_initialize(&ta sk->tasks_tree_node);228 ta sk->tasks_tree_node.key = task->taskid;229 avltree_insert(&tasks_tree, &ta sk->tasks_tree_node);230 231 i rq_spinlock_unlock(&tasks_lock, true);232 233 return ta sk;215 as_hold(ta->as); 216 217 ipl = interrupts_disable(); 218 spinlock_lock(&tasks_lock); 219 ta->taskid = ++task_counter; 220 avltree_node_initialize(&ta->tasks_tree_node); 221 ta->tasks_tree_node.key = ta->taskid; 222 avltree_insert(&tasks_tree, &ta->tasks_tree_node); 223 spinlock_unlock(&tasks_lock); 224 interrupts_restore(ipl); 225 226 return ta; 234 227 } 235 228 236 229 /** Destroy task. 237 230 * 238 * @param t askTask to be destroyed.239 * 240 */ 241 void task_destroy(task_t *t ask)231 * @param t Task to be destroyed. 232 * 233 */ 234 void task_destroy(task_t *t) 242 235 { 243 236 /* 244 237 * Remove the task from the task B+tree. 245 238 */ 246 irq_spinlock_lock(&tasks_lock, true);247 avltree_delete(&tasks_tree, &t ask->tasks_tree_node);248 irq_spinlock_unlock(&tasks_lock, true);239 spinlock_lock(&tasks_lock); 240 avltree_delete(&tasks_tree, &t->tasks_tree_node); 241 spinlock_unlock(&tasks_lock); 249 242 250 243 /* 251 244 * Perform architecture specific task destruction. 252 245 */ 253 task_destroy_arch(t ask);246 task_destroy_arch(t); 254 247 255 248 /* 256 249 * Free up dynamically allocated state. 257 250 */ 258 btree_destroy(&t ask->futexes);251 btree_destroy(&t->futexes); 259 252 260 253 /* 261 254 * Drop our reference to the address space. 262 255 */ 263 as_release(t ask->as);264 265 slab_free(task_slab, t ask);256 as_release(t->as); 257 258 slab_free(task_slab, t); 266 259 } 267 260 … … 270 263 * Holding a reference to a task prevents destruction of that task. 271 264 * 272 * @param task Task to be held. 273 * 274 */ 275 void task_hold(task_t *task) 276 { 277 atomic_inc(&task->refcount); 265 * @param t Task to be held. 266 */ 267 void task_hold(task_t *t) 268 { 269 atomic_inc(&t->refcount); 278 270 } 279 271 … … 282 274 * The last one to release a reference to a task destroys the task. 283 275 * 284 * @param task Task to be released. 285 * 286 */ 287 void task_release(task_t *task) 288 { 289 if ((atomic_predec(&task->refcount)) == 0) 290 task_destroy(task); 276 * @param t Task to be released. 277 */ 278 void task_release(task_t *t) 279 { 280 if ((atomic_predec(&t->refcount)) == 0) 281 task_destroy(t); 291 282 } 292 283 … … 351 342 task_t *task_find_by_id(task_id_t id) 352 343 { 353 ASSERT(interrupts_disabled());354 ASSERT(irq_spinlock_locked(&tasks_lock));355 356 344 avltree_node_t *node = 357 345 avltree_search(&tasks_tree, (avltree_key_t) id); 358 346 359 347 if (node) 360 return avltree_get_instance(node, task_t, tasks_tree_node); 348 return avltree_get_instance(node, task_t, tasks_tree_node); 361 349 362 350 return NULL; … … 365 353 /** Get accounting data of given task. 366 354 * 367 * Note that task lock of 't ask' must be already held and interrupts must be355 * Note that task lock of 't' must be already held and interrupts must be 368 356 * already disabled. 369 357 * 370 * @param t ask Pointer to the task.358 * @param t Pointer to thread. 371 359 * @param ucycles Out pointer to sum of all user cycles. 372 360 * @param kcycles Out pointer to sum of all kernel cycles. 373 361 * 374 362 */ 375 void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles) 376 { 377 ASSERT(interrupts_disabled()); 378 ASSERT(irq_spinlock_locked(&task->lock)); 379 363 void task_get_accounting(task_t *t, uint64_t *ucycles, uint64_t *kcycles) 364 { 380 365 /* Accumulated values of task */ 381 uint64_t uret = t ask->ucycles;382 uint64_t kret = t ask->kcycles;366 uint64_t uret = t->ucycles; 367 uint64_t kret = t->kcycles; 383 368 384 369 /* Current values of threads */ 385 370 link_t *cur; 386 for (cur = t ask->th_head.next; cur != &task->th_head; cur = cur->next) {387 thread_t *thr ead= list_get_instance(cur, thread_t, th_link);371 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { 372 thread_t *thr = list_get_instance(cur, thread_t, th_link); 388 373 389 irq_spinlock_lock(&thread->lock, false); 390 374 spinlock_lock(&thr->lock); 391 375 /* Process only counted threads */ 392 if (!thr ead->uncounted) {393 if (thr ead== THREAD) {376 if (!thr->uncounted) { 377 if (thr == THREAD) { 394 378 /* Update accounting of current thread */ 395 379 thread_update_accounting(false); 396 } 397 398 uret += thread->ucycles; 399 kret += thread->kcycles; 380 } 381 uret += thr->ucycles; 382 kret += thr->kcycles; 400 383 } 401 402 irq_spinlock_unlock(&thread->lock, false); 384 spinlock_unlock(&thr->lock); 403 385 } 404 386 … … 407 389 } 408 390 409 static void task_kill_internal(task_t *ta sk)391 static void task_kill_internal(task_t *ta) 410 392 { 411 393 link_t *cur; … … 414 396 * Interrupt all threads. 415 397 */ 416 irq_spinlock_lock(&task->lock, false);417 for (cur = ta sk->th_head.next; cur != &task->th_head; cur = cur->next) {418 thread_t *thr ead = list_get_instance(cur, thread_t, th_link);398 spinlock_lock(&ta->lock); 399 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 400 thread_t *thr; 419 401 bool sleeping = false; 420 402 421 irq_spinlock_lock(&thread->lock, false);403 thr = list_get_instance(cur, thread_t, th_link); 422 404 423 thread->interrupted = true; 424 if (thread->state == Sleeping) 405 spinlock_lock(&thr->lock); 406 thr->interrupted = true; 407 if (thr->state == Sleeping) 425 408 sleeping = true; 426 427 irq_spinlock_unlock(&thread->lock, false); 409 spinlock_unlock(&thr->lock); 428 410 429 411 if (sleeping) 430 waitq_interrupt_sleep(thr ead);412 waitq_interrupt_sleep(thr); 431 413 } 432 433 irq_spinlock_unlock(&task->lock, false); 414 spinlock_unlock(&ta->lock); 434 415 } 435 416 … … 446 427 int task_kill(task_id_t id) 447 428 { 429 ipl_t ipl; 430 task_t *ta; 431 448 432 if (id == 1) 449 433 return EPERM; 450 434 451 i rq_spinlock_lock(&tasks_lock, true);452 453 task_t *task = task_find_by_id(id);454 if (!task) {455 i rq_spinlock_unlock(&tasks_lock, true);435 ipl = interrupts_disable(); 436 spinlock_lock(&tasks_lock); 437 if (!(ta = task_find_by_id(id))) { 438 spinlock_unlock(&tasks_lock); 439 interrupts_restore(ipl); 456 440 return ENOENT; 457 441 } 458 459 task_kill_internal(task); 460 irq_spinlock_unlock(&tasks_lock, true); 461 462 return EOK; 442 task_kill_internal(ta); 443 spinlock_unlock(&tasks_lock); 444 interrupts_restore(ipl); 445 return 0; 463 446 } 464 447 465 448 static bool task_print_walker(avltree_node_t *node, void *arg) 466 449 { 467 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 468 irq_spinlock_lock(&task->lock, false); 450 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 451 int j; 452 453 spinlock_lock(&t->lock); 469 454 470 455 uint64_t ucycles; 471 456 uint64_t kcycles; 472 457 char usuffix, ksuffix; 473 task_get_accounting(t ask, &ucycles, &kcycles);458 task_get_accounting(t, &ucycles, &kcycles); 474 459 order_suffix(ucycles, &ucycles, &usuffix); 475 460 order_suffix(kcycles, &kcycles, &ksuffix); 476 461 477 #ifdef __32_BITS__ 462 #ifdef __32_BITS__ 478 463 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %9" 479 PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context,480 task, task->as, ucycles, usuffix, kcycles, ksuffix,481 atomic_get(&task->refcount), atomic_get(&task->active_calls));464 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 465 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 466 atomic_get(&t->active_calls)); 482 467 #endif 483 468 484 469 #ifdef __64_BITS__ 485 470 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %9" 486 PRIu64 "%c %7ld %6ld", task->taskid, task->name, task->context, 487 task, task->as, ucycles, usuffix, kcycles, ksuffix, 488 atomic_get(&task->refcount), atomic_get(&task->active_calls)); 489 #endif 490 491 size_t i; 492 for (i = 0; i < IPC_MAX_PHONES; i++) { 493 if (task->phones[i].callee) 494 printf(" %" PRIs ":%p", i, task->phones[i].callee); 471 PRIu64 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, 472 ucycles, usuffix, kcycles, ksuffix, atomic_get(&t->refcount), 473 atomic_get(&t->active_calls)); 474 #endif 475 476 for (j = 0; j < IPC_MAX_PHONES; j++) { 477 if (t->phones[j].callee) 478 printf(" %d:%p", j, t->phones[j].callee); 495 479 } 496 480 printf("\n"); 497 481 498 irq_spinlock_unlock(&task->lock, false);482 spinlock_unlock(&t->lock); 499 483 return true; 500 484 } … … 503 487 void task_print_list(void) 504 488 { 489 ipl_t ipl; 490 505 491 /* Messing with task structures, avoid deadlock */ 506 irq_spinlock_lock(&tasks_lock, true); 492 ipl = interrupts_disable(); 493 spinlock_lock(&tasks_lock); 507 494 508 495 #ifdef __32_BITS__ … … 522 509 avltree_walk(&tasks_tree, task_print_walker, NULL); 523 510 524 irq_spinlock_unlock(&tasks_lock, true); 511 spinlock_unlock(&tasks_lock); 512 interrupts_restore(ipl); 525 513 } 526 514
Note:
See TracChangeset
for help on using the changeset viewer.