Changes in kernel/generic/src/proc/task.c [5bcf1f9:5ab1648] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
r5bcf1f9 r5ab1648 1 1 /* 2 * Copyright (c) 20 10Jakub Jermar2 * Copyright (c) 2001-2004 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Task management. 36 36 */ 37 37 … … 53 53 #include <errno.h> 54 54 #include <func.h> 55 #include <str.h> 56 #include <memstr.h> 55 #include <string.h> 57 56 #include <syscall/copy.h> 58 57 #include <macros.h> … … 60 59 61 60 /** Spinlock protecting the tasks_tree AVL tree. */ 62 IRQ_SPINLOCK_INITIALIZE(tasks_lock);61 SPINLOCK_INITIALIZE(tasks_lock); 63 62 64 63 /** AVL tree of active tasks. … … 66 65 * The task is guaranteed to exist after it was found in the tasks_tree as 67 66 * long as: 68 *69 67 * @li the tasks_lock is held, 70 68 * @li the task's lock is held when task's lock is acquired before releasing … … 77 75 static task_id_t task_counter = 0; 78 76 79 static slab_cache_t *task_slab;80 81 77 /* Forward declarations. */ 82 78 static void task_kill_internal(task_t *); 83 static int tsk_constructor(void *, unsigned int); 84 85 /** Initialize kernel tasks support. 86 * 87 */ 79 80 /** Initialize kernel tasks support. */ 88 81 void task_init(void) 89 82 { 90 83 TASK = NULL; 91 84 avltree_create(&tasks_tree); 92 task_slab = slab_cache_create("task_slab", sizeof(task_t), 0, 93 tsk_constructor, NULL, 0); 94 } 95 96 /** Task finish walker. 97 * 85 } 86 87 /* 98 88 * The idea behind this walker is to kill and count all tasks different from 99 89 * TASK. 100 *101 90 */ 102 91 static bool task_done_walker(avltree_node_t *node, void *arg) 103 92 { 104 task_t *t ask= avltree_get_instance(node, task_t, tasks_tree_node);105 size_t *cnt = (size_t*) arg;106 107 if (t ask!= TASK) {93 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 94 unsigned *cnt = (unsigned *) arg; 95 96 if (t != TASK) { 108 97 (*cnt)++; 109 110 98 #ifdef CONFIG_DEBUG 111 printf("[%"PRIu64"] ", task->taskid); 112 #endif 113 114 task_kill_internal(task); 99 printf("[%"PRIu64"] ", t->taskid); 100 #endif 101 task_kill_internal(t); 115 102 } 116 117 /* Continue the walk */ 118 return true; 119 } 120 121 /** Kill all tasks except the current task. 122 * 123 */ 103 104 return true; /* continue the walk */ 105 } 106 107 /** Kill all tasks except the current task. */ 124 108 void task_done(void) 125 109 { 126 size_ttasks_left;127 128 /* Repeat until there are any tasks except TASK */129 do {110 unsigned tasks_left; 111 112 do { /* Repeat until there are any tasks except TASK */ 113 /* Messing with task structures, avoid deadlock */ 130 114 #ifdef CONFIG_DEBUG 131 115 printf("Killing tasks... "); 132 116 #endif 133 134 irq_spinlock_lock(&tasks_lock, true);117 ipl_t ipl = interrupts_disable(); 118 spinlock_lock(&tasks_lock); 135 119 tasks_left = 0; 136 120 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 137 irq_spinlock_unlock(&tasks_lock, true);138 121 spinlock_unlock(&tasks_lock); 122 interrupts_restore(ipl); 139 123 thread_sleep(1); 140 141 124 #ifdef CONFIG_DEBUG 142 125 printf("\n"); 143 126 #endif 144 } while (tasks_left > 0); 145 } 146 147 int tsk_constructor(void *obj, unsigned int kmflags) 148 { 149 task_t *task = (task_t *) obj; 150 151 atomic_set(&task->refcount, 0); 152 atomic_set(&task->lifecount, 0); 153 154 irq_spinlock_initialize(&task->lock, "task_t_lock"); 155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 156 157 list_initialize(&task->th_head); 158 list_initialize(&task->sync_box_head); 159 160 ipc_answerbox_init(&task->answerbox, task); 161 162 size_t i; 163 for (i = 0; i < IPC_MAX_PHONES; i++) 164 ipc_phone_init(&task->phones[i]); 165 166 #ifdef CONFIG_UDEBUG 167 /* Init kbox stuff */ 168 task->kb.thread = NULL; 169 ipc_answerbox_init(&task->kb.box, task); 170 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE); 171 #endif 172 173 return 0; 127 } while (tasks_left); 174 128 } 175 129 176 130 /** Create new task with no threads. 177 131 * 178 * @param as Task's address space. 179 * @param name Symbolic name (a copy is made). 180 * 181 * @return New task's structure. 182 * 183 */ 184 task_t *task_create(as_t *as, const char *name) 185 { 186 task_t *task = (task_t *) slab_alloc(task_slab, 0); 187 task_create_arch(task); 188 189 task->as = as; 190 str_cpy(task->name, TASK_NAME_BUFLEN, name); 191 192 task->context = CONTEXT; 193 task->capabilities = 0; 194 task->ucycles = 0; 195 task->kcycles = 0; 196 197 task->ipc_info.call_sent = 0; 198 task->ipc_info.call_received = 0; 199 task->ipc_info.answer_sent = 0; 200 task->ipc_info.answer_received = 0; 201 task->ipc_info.irq_notif_received = 0; 202 task->ipc_info.forwarded = 0; 203 132 * @param as Task's address space. 133 * @param name Symbolic name (a copy is made). 134 * 135 * @return New task's structure. 136 * 137 */ 138 task_t *task_create(as_t *as, char *name) 139 { 140 ipl_t ipl; 141 task_t *ta; 142 int i; 143 144 ta = (task_t *) malloc(sizeof(task_t), 0); 145 146 task_create_arch(ta); 147 148 spinlock_initialize(&ta->lock, "task_ta_lock"); 149 list_initialize(&ta->th_head); 150 ta->as = as; 151 152 memcpy(ta->name, name, TASK_NAME_BUFLEN); 153 ta->name[TASK_NAME_BUFLEN - 1] = 0; 154 155 atomic_set(&ta->refcount, 0); 156 atomic_set(&ta->lifecount, 0); 157 ta->context = CONTEXT; 158 159 ta->capabilities = 0; 160 ta->cycles = 0; 161 204 162 #ifdef CONFIG_UDEBUG 205 163 /* Init debugging stuff */ 206 udebug_task_init(&ta sk->udebug);207 164 udebug_task_init(&ta->udebug); 165 208 166 /* Init kbox stuff */ 209 task->kb.finished = false; 210 #endif 211 212 if ((ipc_phone_0) && 213 (context_check(ipc_phone_0->task->context, task->context))) 214 ipc_phone_connect(&task->phones[0], ipc_phone_0); 215 216 btree_create(&task->futexes); 217 218 /* 219 * Get a reference to the address space. 220 */ 221 as_hold(task->as); 222 223 irq_spinlock_lock(&tasks_lock, true); 224 225 task->taskid = ++task_counter; 226 avltree_node_initialize(&task->tasks_tree_node); 227 task->tasks_tree_node.key = task->taskid; 228 avltree_insert(&tasks_tree, &task->tasks_tree_node); 229 230 irq_spinlock_unlock(&tasks_lock, true); 231 232 return task; 167 ipc_answerbox_init(&ta->kb.box, ta); 168 ta->kb.thread = NULL; 169 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 170 ta->kb.finished = false; 171 #endif 172 173 ipc_answerbox_init(&ta->answerbox, ta); 174 for (i = 0; i < IPC_MAX_PHONES; i++) 175 ipc_phone_init(&ta->phones[i]); 176 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, 177 ta->context))) 178 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 179 atomic_set(&ta->active_calls, 0); 180 181 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 182 btree_create(&ta->futexes); 183 184 ipl = interrupts_disable(); 185 186 /* 187 * Increment address space reference count. 188 */ 189 atomic_inc(&as->refcount); 190 191 spinlock_lock(&tasks_lock); 192 ta->taskid = ++task_counter; 193 avltree_node_initialize(&ta->tasks_tree_node); 194 ta->tasks_tree_node.key = ta->taskid; 195 avltree_insert(&tasks_tree, &ta->tasks_tree_node); 196 spinlock_unlock(&tasks_lock); 197 interrupts_restore(ipl); 198 199 return ta; 233 200 } 234 201 235 202 /** Destroy task. 236 203 * 237 * @param task Task to be destroyed. 238 * 239 */ 240 void task_destroy(task_t *task) 204 * @param t Task to be destroyed. 205 */ 206 void task_destroy(task_t *t) 241 207 { 242 208 /* 243 209 * Remove the task from the task B+tree. 244 210 */ 245 irq_spinlock_lock(&tasks_lock, true);246 avltree_delete(&tasks_tree, &t ask->tasks_tree_node);247 irq_spinlock_unlock(&tasks_lock, true);248 211 spinlock_lock(&tasks_lock); 212 avltree_delete(&tasks_tree, &t->tasks_tree_node); 213 spinlock_unlock(&tasks_lock); 214 249 215 /* 250 216 * Perform architecture specific task destruction. 251 217 */ 252 task_destroy_arch(t ask);253 218 task_destroy_arch(t); 219 254 220 /* 255 221 * Free up dynamically allocated state. 256 222 */ 257 btree_destroy(&t ask->futexes);258 223 btree_destroy(&t->futexes); 224 259 225 /* 260 226 * Drop our reference to the address space. 261 227 */ 262 as_release(task->as); 263 264 slab_free(task_slab, task); 265 } 266 267 /** Hold a reference to a task. 268 * 269 * Holding a reference to a task prevents destruction of that task. 270 * 271 * @param task Task to be held. 272 * 273 */ 274 void task_hold(task_t *task) 275 { 276 atomic_inc(&task->refcount); 277 } 278 279 /** Release a reference to a task. 280 * 281 * The last one to release a reference to a task destroys the task. 282 * 283 * @param task Task to be released. 284 * 285 */ 286 void task_release(task_t *task) 287 { 288 if ((atomic_predec(&task->refcount)) == 0) 289 task_destroy(task); 290 } 291 292 #ifdef __32_BITS__ 293 294 /** Syscall for reading task ID from userspace (32 bits) 295 * 296 * @param uspace_taskid Pointer to user-space buffer 297 * where to store current task ID. 298 * 299 * @return Zero on success or an error code from @ref errno.h. 300 * 301 */ 302 sysarg_t sys_task_get_id(sysarg64_t *uspace_taskid) 228 if (atomic_predec(&t->as->refcount) == 0) 229 as_destroy(t->as); 230 231 free(t); 232 TASK = NULL; 233 } 234 235 /** Syscall for reading task ID from userspace. 236 * 237 * @param uspace_task_id userspace address of 8-byte buffer 238 * where to store current task ID. 239 * 240 * @return Zero on success or an error code from @ref errno.h. 241 */ 242 unative_t sys_task_get_id(task_id_t *uspace_task_id) 303 243 { 304 244 /* … … 306 246 * the lifespan of the task. 307 247 */ 308 return ( sysarg_t) copy_to_uspace(uspace_taskid, &TASK->taskid,248 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, 309 249 sizeof(TASK->taskid)); 310 250 } 311 251 312 #endif /* __32_BITS__ */313 314 #ifdef __64_BITS__315 316 /** Syscall for reading task ID from userspace (64 bits)317 *318 * @return Current task ID.319 *320 */321 sysarg_t sys_task_get_id(void)322 {323 /*324 * No need to acquire lock on TASK because taskid remains constant for325 * the lifespan of the task.326 */327 return TASK->taskid;328 }329 330 #endif /* __64_BITS__ */331 332 252 /** Syscall for setting the task name. 333 253 * 334 254 * The name simplifies identifying the task in the task list. 335 255 * 336 * @param name 337 * 256 * @param name The new name for the task. (typically the same 257 * as the command used to execute it). 338 258 * 339 259 * @return 0 on success or an error code from @ref errno.h. 340 * 341 */ 342 sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len) 343 { 260 */ 261 unative_t sys_task_set_name(const char *uspace_name, size_t name_len) 262 { 263 int rc; 344 264 char namebuf[TASK_NAME_BUFLEN]; 345 265 346 266 /* Cap length of name and copy it from userspace. */ 267 347 268 if (name_len > TASK_NAME_BUFLEN - 1) 348 269 name_len = TASK_NAME_BUFLEN - 1; 349 350 intrc = copy_from_uspace(namebuf, uspace_name, name_len);270 271 rc = copy_from_uspace(namebuf, uspace_name, name_len); 351 272 if (rc != 0) 352 return ( sysarg_t) rc;353 273 return (unative_t) rc; 274 354 275 namebuf[name_len] = '\0'; 355 356 /*357 * As the task name is referenced also from the358 * threads, lock the threads' lock for the course359 * of the update.360 */361 362 irq_spinlock_lock(&tasks_lock, true);363 irq_spinlock_lock(&TASK->lock, false);364 irq_spinlock_lock(&threads_lock, false);365 366 /* Set task name */367 276 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 368 369 irq_spinlock_unlock(&threads_lock, false); 370 irq_spinlock_unlock(&TASK->lock, false); 371 irq_spinlock_unlock(&tasks_lock, true); 372 277 373 278 return EOK; 374 }375 376 /** Syscall to forcefully terminate a task377 *378 * @param uspace_taskid Pointer to task ID in user space.379 *380 * @return 0 on success or an error code from @ref errno.h.381 *382 */383 sysarg_t sys_task_kill(task_id_t *uspace_taskid)384 {385 task_id_t taskid;386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid));387 if (rc != 0)388 return (sysarg_t) rc;389 390 return (sysarg_t) task_kill(taskid);391 279 } 392 280 … … 396 284 * interrupts must be disabled. 397 285 * 398 * @param id Task ID. 399 * 400 * @return Task structure address or NULL if there is no such task ID. 401 * 402 */ 403 task_t *task_find_by_id(task_id_t id) 404 { 405 ASSERT(interrupts_disabled()); 406 ASSERT(irq_spinlock_locked(&tasks_lock)); 407 408 avltree_node_t *node = 409 avltree_search(&tasks_tree, (avltree_key_t) id); 410 286 * @param id Task ID. 287 * 288 * @return Task structure address or NULL if there is no such task 289 * ID. 290 */ 291 task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; 292 293 node = avltree_search(&tasks_tree, (avltree_key_t) id); 294 411 295 if (node) 412 return avltree_get_instance(node, task_t, tasks_tree_node); 413 296 return avltree_get_instance(node, task_t, tasks_tree_node); 414 297 return NULL; 415 298 } … … 417 300 /** Get accounting data of given task. 418 301 * 419 * Note that task lock of 't ask' must be already held and interrupts must be302 * Note that task lock of 't' must be already held and interrupts must be 420 303 * already disabled. 421 304 * 422 * @param task Pointer to the task. 423 * @param ucycles Out pointer to sum of all user cycles. 424 * @param kcycles Out pointer to sum of all kernel cycles. 425 * 426 */ 427 void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles) 428 { 429 ASSERT(interrupts_disabled()); 430 ASSERT(irq_spinlock_locked(&task->lock)); 431 432 /* Accumulated values of task */ 433 uint64_t uret = task->ucycles; 434 uint64_t kret = task->kcycles; 305 * @param t Pointer to thread. 306 * 307 * @return Number of cycles used by the task and all its threads 308 * so far. 309 */ 310 uint64_t task_get_accounting(task_t *t) 311 { 312 /* Accumulated value of task */ 313 uint64_t ret = t->cycles; 435 314 436 315 /* Current values of threads */ 437 316 link_t *cur; 438 for (cur = t ask->th_head.next; cur != &task->th_head; cur = cur->next) {439 thread_t *thr ead= list_get_instance(cur, thread_t, th_link);317 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { 318 thread_t *thr = list_get_instance(cur, thread_t, th_link); 440 319 441 irq_spinlock_lock(&thread->lock, false); 442 320 spinlock_lock(&thr->lock); 443 321 /* Process only counted threads */ 444 if (!thr ead->uncounted) {445 if (thr ead== THREAD) {322 if (!thr->uncounted) { 323 if (thr == THREAD) { 446 324 /* Update accounting of current thread */ 447 thread_update_accounting(false); 448 } 449 450 uret += thread->ucycles; 451 kret += thread->kcycles; 325 thread_update_accounting(); 326 } 327 ret += thr->cycles; 452 328 } 453 454 irq_spinlock_unlock(&thread->lock, false); 329 spinlock_unlock(&thr->lock); 455 330 } 456 331 457 *ucycles = uret; 458 *kcycles = kret; 459 } 460 461 static void task_kill_internal(task_t *task) 462 { 463 irq_spinlock_lock(&task->lock, false); 464 irq_spinlock_lock(&threads_lock, false); 465 332 return ret; 333 } 334 335 static void task_kill_internal(task_t *ta) 336 { 337 link_t *cur; 338 466 339 /* 467 340 * Interrupt all threads. 468 341 */ 469 470 link_t *cur; 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); 342 spinlock_lock(&ta->lock); 343 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 344 thread_t *thr; 473 345 bool sleeping = false; 474 346 475 irq_spinlock_lock(&thread->lock, false);347 thr = list_get_instance(cur, thread_t, th_link); 476 348 477 thread->interrupted = true; 478 if (thread->state == Sleeping) 349 spinlock_lock(&thr->lock); 350 thr->interrupted = true; 351 if (thr->state == Sleeping) 479 352 sleeping = true; 480 481 irq_spinlock_unlock(&thread->lock, false); 353 spinlock_unlock(&thr->lock); 482 354 483 355 if (sleeping) 484 waitq_interrupt_sleep(thr ead);356 waitq_interrupt_sleep(thr); 485 357 } 486 487 irq_spinlock_unlock(&threads_lock, false); 488 irq_spinlock_unlock(&task->lock, false); 358 spinlock_unlock(&ta->lock); 489 359 } 490 360 … … 494 364 * It signals all the task's threads to bail it out. 495 365 * 496 * @param id ID of the task to be killed. 497 * 498 * @return Zero on success or an error code from errno.h. 499 * 366 * @param id ID of the task to be killed. 367 * 368 * @return Zero on success or an error code from errno.h. 500 369 */ 501 370 int task_kill(task_id_t id) 502 371 { 372 ipl_t ipl; 373 task_t *ta; 374 503 375 if (id == 1) 504 376 return EPERM; 505 377 506 i rq_spinlock_lock(&tasks_lock, true);507 508 task_t *task = task_find_by_id(id);509 if (!task) {510 i rq_spinlock_unlock(&tasks_lock, true);378 ipl = interrupts_disable(); 379 spinlock_lock(&tasks_lock); 380 if (!(ta = task_find_by_id(id))) { 381 spinlock_unlock(&tasks_lock); 382 interrupts_restore(ipl); 511 383 return ENOENT; 512 384 } 513 514 task_kill_internal(task); 515 irq_spinlock_unlock(&tasks_lock, true); 516 517 return EOK; 518 } 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 385 task_kill_internal(ta); 386 spinlock_unlock(&tasks_lock); 387 interrupts_restore(ipl); 388 return 0; 389 } 390 391 static bool task_print_walker(avltree_node_t *node, void *arg) 392 { 393 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 394 int j; 540 395 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 396 spinlock_lock(&t->lock); 397 398 uint64_t cycles; 399 char suffix; 400 order(task_get_accounting(t), &cycles, &suffix); 401 402 #ifdef __32_BITS__ 403 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 404 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 405 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 406 #endif 407 408 #ifdef __64_BITS__ 409 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 410 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 411 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 412 #endif 413 414 for (j = 0; j < IPC_MAX_PHONES; j++) { 415 if (t->phones[j].callee) 416 printf(" %d:%p", j, t->phones[j].callee); 546 417 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 565 return EOK; 566 } 567 568 static bool task_print_walker(avltree_node_t *node, void *arg) 569 { 570 bool *additional = (bool *) arg; 571 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 572 irq_spinlock_lock(&task->lock, false); 573 574 uint64_t ucycles; 575 uint64_t kcycles; 576 char usuffix, ksuffix; 577 task_get_accounting(task, &ucycles, &kcycles); 578 order_suffix(ucycles, &ucycles, &usuffix); 579 order_suffix(kcycles, &kcycles, &ksuffix); 580 581 #ifdef __32_BITS__ 582 if (*additional) 583 printf("%-8" PRIu64 " %9" PRIua, task->taskid, 584 atomic_get(&task->refcount)); 585 else 586 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" 587 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid, 588 task->name, task->context, task, task->as, 589 ucycles, usuffix, kcycles, ksuffix); 590 #endif 591 418 printf("\n"); 419 420 spinlock_unlock(&t->lock); 421 return true; 422 } 423 424 /** Print task list */ 425 void task_print_list(void) 426 { 427 ipl_t ipl; 428 429 /* Messing with task structures, avoid deadlock */ 430 ipl = interrupts_disable(); 431 spinlock_lock(&tasks_lock); 432 433 #ifdef __32_BITS__ 434 printf("taskid name ctx address as " 435 "cycles threads calls callee\n"); 436 printf("------ ------------ --- ---------- ---------- " 437 "---------- ------- ------ ------>\n"); 438 #endif 439 592 440 #ifdef __64_BITS__ 593 if (*additional) 594 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 595 "%9" PRIua, task->taskid, ucycles, usuffix, kcycles, 596 ksuffix, atomic_get(&task->refcount)); 597 else 598 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", 599 task->taskid, task->name, task->context, task, task->as); 600 #endif 601 602 if (*additional) { 603 size_t i; 604 for (i = 0; i < IPC_MAX_PHONES; i++) { 605 if (task->phones[i].callee) 606 printf(" %zu:%p", i, task->phones[i].callee); 607 } 608 printf("\n"); 609 } 610 611 irq_spinlock_unlock(&task->lock, false); 612 return true; 613 } 614 615 /** Print task list 616 * 617 * @param additional Print additional information. 618 * 619 */ 620 void task_print_list(bool additional) 621 { 622 /* Messing with task structures, avoid deadlock */ 623 irq_spinlock_lock(&tasks_lock, true); 624 625 #ifdef __32_BITS__ 626 if (additional) 627 printf("[id ] [threads] [calls] [callee\n"); 628 else 629 printf("[id ] [name ] [ctx] [address ] [as ]" 630 " [ucycles ] [kcycles ]\n"); 631 #endif 632 633 #ifdef __64_BITS__ 634 if (additional) 635 printf("[id ] [ucycles ] [kcycles ] [threads] [calls]" 636 " [callee\n"); 637 else 638 printf("[id ] [name ] [ctx] [address ]" 639 " [as ]\n"); 640 #endif 641 642 avltree_walk(&tasks_tree, task_print_walker, &additional); 643 644 irq_spinlock_unlock(&tasks_lock, true); 441 printf("taskid name ctx address as " 442 "cycles threads calls callee\n"); 443 printf("------ ------------ --- ------------------ ------------------ " 444 "---------- ------- ------ ------>\n"); 445 #endif 446 447 avltree_walk(&tasks_tree, task_print_walker, NULL); 448 449 spinlock_unlock(&tasks_lock); 450 interrupts_restore(ipl); 645 451 } 646 452
Note:
See TracChangeset
for help on using the changeset viewer.