Changes in kernel/generic/src/proc/task.c [5ab1648:5bcf1f9] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
r5ab1648 r5bcf1f9 1 1 /* 2 * Copyright (c) 20 01-2004Jakub Jermar2 * Copyright (c) 2010 Jakub Jermar 3 3 * All rights reserved. 4 4 * … … 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Task management. 36 36 */ 37 37 … … 53 53 #include <errno.h> 54 54 #include <func.h> 55 #include <string.h> 55 #include <str.h> 56 #include <memstr.h> 56 57 #include <syscall/copy.h> 57 58 #include <macros.h> … … 59 60 60 61 /** Spinlock protecting the tasks_tree AVL tree. */ 61 SPINLOCK_INITIALIZE(tasks_lock);62 IRQ_SPINLOCK_INITIALIZE(tasks_lock); 62 63 63 64 /** AVL tree of active tasks. … … 65 66 * The task is guaranteed to exist after it was found in the tasks_tree as 66 67 * long as: 68 * 67 69 * @li the tasks_lock is held, 68 70 * @li the task's lock is held when task's lock is acquired before releasing … … 75 77 static task_id_t task_counter = 0; 76 78 79 static slab_cache_t *task_slab; 80 77 81 /* Forward declarations. */ 78 82 static void task_kill_internal(task_t *); 79 80 /** Initialize kernel tasks support. */ 83 static int tsk_constructor(void *, unsigned int); 84 85 /** Initialize kernel tasks support. 86 * 87 */ 81 88 void task_init(void) 82 89 { 83 90 TASK = NULL; 84 91 avltree_create(&tasks_tree); 85 } 86 87 /* 92 task_slab = slab_cache_create("task_slab", sizeof(task_t), 0, 93 tsk_constructor, NULL, 0); 94 } 95 96 /** Task finish walker. 97 * 88 98 * The idea behind this walker is to kill and count all tasks different from 89 99 * TASK. 100 * 90 101 */ 91 102 static bool task_done_walker(avltree_node_t *node, void *arg) 92 103 { 93 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node);94 unsigned *cnt = (unsigned*) arg;95 96 if (t != TASK) {104 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 105 size_t *cnt = (size_t *) arg; 106 107 if (task != TASK) { 97 108 (*cnt)++; 109 98 110 #ifdef CONFIG_DEBUG 99 printf("[%"PRIu64"] ", t->taskid); 100 #endif 101 task_kill_internal(t); 111 printf("[%"PRIu64"] ", task->taskid); 112 #endif 113 114 task_kill_internal(task); 102 115 } 103 104 return true; /* continue the walk */ 105 } 106 107 /** Kill all tasks except the current task. */ 116 117 /* Continue the walk */ 118 return true; 119 } 120 121 /** Kill all tasks except the current task. 122 * 123 */ 108 124 void task_done(void) 109 125 { 110 unsignedtasks_left;111 112 do {/* Repeat until there are any tasks except TASK */113 /* Messing with task structures, avoid deadlock */126 size_t tasks_left; 127 128 /* Repeat until there are any tasks except TASK */ 129 do { 114 130 #ifdef CONFIG_DEBUG 115 131 printf("Killing tasks... "); 116 132 #endif 117 ipl_t ipl = interrupts_disable();118 spinlock_lock(&tasks_lock);133 134 irq_spinlock_lock(&tasks_lock, true); 119 135 tasks_left = 0; 120 136 avltree_walk(&tasks_tree, task_done_walker, &tasks_left); 121 spinlock_unlock(&tasks_lock);122 interrupts_restore(ipl);137 irq_spinlock_unlock(&tasks_lock, true); 138 123 139 thread_sleep(1); 140 124 141 #ifdef CONFIG_DEBUG 125 142 printf("\n"); 126 143 #endif 127 } while (tasks_left); 144 } while (tasks_left > 0); 145 } 146 147 int tsk_constructor(void *obj, unsigned int kmflags) 148 { 149 task_t *task = (task_t *) obj; 150 151 atomic_set(&task->refcount, 0); 152 atomic_set(&task->lifecount, 0); 153 154 irq_spinlock_initialize(&task->lock, "task_t_lock"); 155 mutex_initialize(&task->futexes_lock, MUTEX_PASSIVE); 156 157 list_initialize(&task->th_head); 158 list_initialize(&task->sync_box_head); 159 160 ipc_answerbox_init(&task->answerbox, task); 161 162 size_t i; 163 for (i = 0; i < IPC_MAX_PHONES; i++) 164 ipc_phone_init(&task->phones[i]); 165 166 #ifdef CONFIG_UDEBUG 167 /* Init kbox stuff */ 168 task->kb.thread = NULL; 169 ipc_answerbox_init(&task->kb.box, task); 170 mutex_initialize(&task->kb.cleanup_lock, MUTEX_PASSIVE); 171 #endif 172 173 return 0; 128 174 } 129 175 130 176 /** Create new task with no threads. 131 177 * 132 * @param as Task's address space. 133 * @param name Symbolic name (a copy is made). 134 * 135 * @return New task's structure. 136 * 137 */ 138 task_t *task_create(as_t *as, char *name) 139 { 140 ipl_t ipl; 141 task_t *ta; 142 int i; 143 144 ta = (task_t *) malloc(sizeof(task_t), 0); 145 146 task_create_arch(ta); 147 148 spinlock_initialize(&ta->lock, "task_ta_lock"); 149 list_initialize(&ta->th_head); 150 ta->as = as; 151 152 memcpy(ta->name, name, TASK_NAME_BUFLEN); 153 ta->name[TASK_NAME_BUFLEN - 1] = 0; 154 155 atomic_set(&ta->refcount, 0); 156 atomic_set(&ta->lifecount, 0); 157 ta->context = CONTEXT; 158 159 ta->capabilities = 0; 160 ta->cycles = 0; 161 178 * @param as Task's address space. 179 * @param name Symbolic name (a copy is made). 180 * 181 * @return New task's structure. 182 * 183 */ 184 task_t *task_create(as_t *as, const char *name) 185 { 186 task_t *task = (task_t *) slab_alloc(task_slab, 0); 187 task_create_arch(task); 188 189 task->as = as; 190 str_cpy(task->name, TASK_NAME_BUFLEN, name); 191 192 task->context = CONTEXT; 193 task->capabilities = 0; 194 task->ucycles = 0; 195 task->kcycles = 0; 196 197 task->ipc_info.call_sent = 0; 198 task->ipc_info.call_received = 0; 199 task->ipc_info.answer_sent = 0; 200 task->ipc_info.answer_received = 0; 201 task->ipc_info.irq_notif_received = 0; 202 task->ipc_info.forwarded = 0; 203 162 204 #ifdef CONFIG_UDEBUG 163 205 /* Init debugging stuff */ 164 udebug_task_init(&ta ->udebug);165 206 udebug_task_init(&task->udebug); 207 166 208 /* Init kbox stuff */ 167 ipc_answerbox_init(&ta->kb.box, ta); 168 ta->kb.thread = NULL; 169 mutex_initialize(&ta->kb.cleanup_lock, MUTEX_PASSIVE); 170 ta->kb.finished = false; 171 #endif 172 173 ipc_answerbox_init(&ta->answerbox, ta); 174 for (i = 0; i < IPC_MAX_PHONES; i++) 175 ipc_phone_init(&ta->phones[i]); 176 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, 177 ta->context))) 178 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 179 atomic_set(&ta->active_calls, 0); 180 181 mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); 182 btree_create(&ta->futexes); 183 184 ipl = interrupts_disable(); 185 186 /* 187 * Increment address space reference count. 188 */ 189 atomic_inc(&as->refcount); 190 191 spinlock_lock(&tasks_lock); 192 ta->taskid = ++task_counter; 193 avltree_node_initialize(&ta->tasks_tree_node); 194 ta->tasks_tree_node.key = ta->taskid; 195 avltree_insert(&tasks_tree, &ta->tasks_tree_node); 196 spinlock_unlock(&tasks_lock); 197 interrupts_restore(ipl); 198 199 return ta; 209 task->kb.finished = false; 210 #endif 211 212 if ((ipc_phone_0) && 213 (context_check(ipc_phone_0->task->context, task->context))) 214 ipc_phone_connect(&task->phones[0], ipc_phone_0); 215 216 btree_create(&task->futexes); 217 218 /* 219 * Get a reference to the address space. 220 */ 221 as_hold(task->as); 222 223 irq_spinlock_lock(&tasks_lock, true); 224 225 task->taskid = ++task_counter; 226 avltree_node_initialize(&task->tasks_tree_node); 227 task->tasks_tree_node.key = task->taskid; 228 avltree_insert(&tasks_tree, &task->tasks_tree_node); 229 230 irq_spinlock_unlock(&tasks_lock, true); 231 232 return task; 200 233 } 201 234 202 235 /** Destroy task. 203 236 * 204 * @param t Task to be destroyed. 205 */ 206 void task_destroy(task_t *t) 237 * @param task Task to be destroyed. 238 * 239 */ 240 void task_destroy(task_t *task) 207 241 { 208 242 /* 209 243 * Remove the task from the task B+tree. 210 244 */ 211 spinlock_lock(&tasks_lock);212 avltree_delete(&tasks_tree, &t ->tasks_tree_node);213 spinlock_unlock(&tasks_lock);214 245 irq_spinlock_lock(&tasks_lock, true); 246 avltree_delete(&tasks_tree, &task->tasks_tree_node); 247 irq_spinlock_unlock(&tasks_lock, true); 248 215 249 /* 216 250 * Perform architecture specific task destruction. 217 251 */ 218 task_destroy_arch(t );219 252 task_destroy_arch(task); 253 220 254 /* 221 255 * Free up dynamically allocated state. 222 256 */ 223 btree_destroy(&t ->futexes);224 257 btree_destroy(&task->futexes); 258 225 259 /* 226 260 * Drop our reference to the address space. 227 261 */ 228 if (atomic_predec(&t->as->refcount) == 0) 229 as_destroy(t->as); 230 231 free(t); 232 TASK = NULL; 233 } 234 235 /** Syscall for reading task ID from userspace. 236 * 237 * @param uspace_task_id userspace address of 8-byte buffer 238 * where to store current task ID. 239 * 240 * @return Zero on success or an error code from @ref errno.h. 241 */ 242 unative_t sys_task_get_id(task_id_t *uspace_task_id) 262 as_release(task->as); 263 264 slab_free(task_slab, task); 265 } 266 267 /** Hold a reference to a task. 268 * 269 * Holding a reference to a task prevents destruction of that task. 270 * 271 * @param task Task to be held. 272 * 273 */ 274 void task_hold(task_t *task) 275 { 276 atomic_inc(&task->refcount); 277 } 278 279 /** Release a reference to a task. 280 * 281 * The last one to release a reference to a task destroys the task. 282 * 283 * @param task Task to be released. 284 * 285 */ 286 void task_release(task_t *task) 287 { 288 if ((atomic_predec(&task->refcount)) == 0) 289 task_destroy(task); 290 } 291 292 #ifdef __32_BITS__ 293 294 /** Syscall for reading task ID from userspace (32 bits) 295 * 296 * @param uspace_taskid Pointer to user-space buffer 297 * where to store current task ID. 298 * 299 * @return Zero on success or an error code from @ref errno.h. 300 * 301 */ 302 sysarg_t sys_task_get_id(sysarg64_t *uspace_taskid) 243 303 { 244 304 /* … … 246 306 * the lifespan of the task. 247 307 */ 248 return ( unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,308 return (sysarg_t) copy_to_uspace(uspace_taskid, &TASK->taskid, 249 309 sizeof(TASK->taskid)); 250 310 } 251 311 312 #endif /* __32_BITS__ */ 313 314 #ifdef __64_BITS__ 315 316 /** Syscall for reading task ID from userspace (64 bits) 317 * 318 * @return Current task ID. 319 * 320 */ 321 sysarg_t sys_task_get_id(void) 322 { 323 /* 324 * No need to acquire lock on TASK because taskid remains constant for 325 * the lifespan of the task. 326 */ 327 return TASK->taskid; 328 } 329 330 #endif /* __64_BITS__ */ 331 252 332 /** Syscall for setting the task name. 253 333 * 254 334 * The name simplifies identifying the task in the task list. 255 335 * 256 * @param name 257 * 336 * @param name The new name for the task. (typically the same 337 * as the command used to execute it). 258 338 * 259 339 * @return 0 on success or an error code from @ref errno.h. 260 * /261 unative_t sys_task_set_name(const char *uspace_name, size_t name_len) 262 { 263 int rc; 340 * 341 */ 342 sysarg_t sys_task_set_name(const char *uspace_name, size_t name_len) 343 { 264 344 char namebuf[TASK_NAME_BUFLEN]; 265 345 266 346 /* Cap length of name and copy it from userspace. */ 267 268 347 if (name_len > TASK_NAME_BUFLEN - 1) 269 348 name_len = TASK_NAME_BUFLEN - 1; 270 271 rc = copy_from_uspace(namebuf, uspace_name, name_len);349 350 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 272 351 if (rc != 0) 273 return ( unative_t) rc;274 352 return (sysarg_t) rc; 353 275 354 namebuf[name_len] = '\0'; 355 356 /* 357 * As the task name is referenced also from the 358 * threads, lock the threads' lock for the course 359 * of the update. 360 */ 361 362 irq_spinlock_lock(&tasks_lock, true); 363 irq_spinlock_lock(&TASK->lock, false); 364 irq_spinlock_lock(&threads_lock, false); 365 366 /* Set task name */ 276 367 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 277 368 369 irq_spinlock_unlock(&threads_lock, false); 370 irq_spinlock_unlock(&TASK->lock, false); 371 irq_spinlock_unlock(&tasks_lock, true); 372 278 373 return EOK; 374 } 375 376 /** Syscall to forcefully terminate a task 377 * 378 * @param uspace_taskid Pointer to task ID in user space. 379 * 380 * @return 0 on success or an error code from @ref errno.h. 381 * 382 */ 383 sysarg_t sys_task_kill(task_id_t *uspace_taskid) 384 { 385 task_id_t taskid; 386 int rc = copy_from_uspace(&taskid, uspace_taskid, sizeof(taskid)); 387 if (rc != 0) 388 return (sysarg_t) rc; 389 390 return (sysarg_t) task_kill(taskid); 279 391 } 280 392 … … 284 396 * interrupts must be disabled. 285 397 * 286 * @param id Task ID. 287 * 288 * @return Task structure address or NULL if there is no such task 289 * ID. 290 */ 291 task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; 292 293 node = avltree_search(&tasks_tree, (avltree_key_t) id); 294 398 * @param id Task ID. 399 * 400 * @return Task structure address or NULL if there is no such task ID. 401 * 402 */ 403 task_t *task_find_by_id(task_id_t id) 404 { 405 ASSERT(interrupts_disabled()); 406 ASSERT(irq_spinlock_locked(&tasks_lock)); 407 408 avltree_node_t *node = 409 avltree_search(&tasks_tree, (avltree_key_t) id); 410 295 411 if (node) 296 return avltree_get_instance(node, task_t, tasks_tree_node); 412 return avltree_get_instance(node, task_t, tasks_tree_node); 413 297 414 return NULL; 298 415 } … … 300 417 /** Get accounting data of given task. 301 418 * 302 * Note that task lock of 't ' must be already held and interrupts must be419 * Note that task lock of 'task' must be already held and interrupts must be 303 420 * already disabled. 304 421 * 305 * @param t Pointer to thread. 306 * 307 * @return Number of cycles used by the task and all its threads 308 * so far. 309 */ 310 uint64_t task_get_accounting(task_t *t) 311 { 312 /* Accumulated value of task */ 313 uint64_t ret = t->cycles; 422 * @param task Pointer to the task. 423 * @param ucycles Out pointer to sum of all user cycles. 424 * @param kcycles Out pointer to sum of all kernel cycles. 425 * 426 */ 427 void task_get_accounting(task_t *task, uint64_t *ucycles, uint64_t *kcycles) 428 { 429 ASSERT(interrupts_disabled()); 430 ASSERT(irq_spinlock_locked(&task->lock)); 431 432 /* Accumulated values of task */ 433 uint64_t uret = task->ucycles; 434 uint64_t kret = task->kcycles; 314 435 315 436 /* Current values of threads */ 316 437 link_t *cur; 317 for (cur = t->th_head.next; cur != &t->th_head; cur = cur->next) { 318 thread_t *thr = list_get_instance(cur, thread_t, th_link); 319 320 spinlock_lock(&thr->lock); 438 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 439 thread_t *thread = list_get_instance(cur, thread_t, th_link); 440 441 irq_spinlock_lock(&thread->lock, false); 442 321 443 /* Process only counted threads */ 322 if (!thr ->uncounted) {323 if (thr == THREAD) {444 if (!thread->uncounted) { 445 if (thread == THREAD) { 324 446 /* Update accounting of current thread */ 325 thread_update_accounting(); 326 } 327 ret += thr->cycles; 447 thread_update_accounting(false); 448 } 449 450 uret += thread->ucycles; 451 kret += thread->kcycles; 328 452 } 329 spinlock_unlock(&thr->lock); 453 454 irq_spinlock_unlock(&thread->lock, false); 330 455 } 331 456 332 return ret; 333 } 334 335 static void task_kill_internal(task_t *ta) 336 { 457 *ucycles = uret; 458 *kcycles = kret; 459 } 460 461 static void task_kill_internal(task_t *task) 462 { 463 irq_spinlock_lock(&task->lock, false); 464 irq_spinlock_lock(&threads_lock, false); 465 466 /* 467 * Interrupt all threads. 468 */ 469 337 470 link_t *cur; 338 339 /* 340 * Interrupt all threads. 341 */ 342 spinlock_lock(&ta->lock); 343 for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { 344 thread_t *thr; 471 for (cur = task->th_head.next; cur != &task->th_head; cur = cur->next) { 472 thread_t *thread = list_get_instance(cur, thread_t, th_link); 345 473 bool sleeping = false; 346 474 347 thr = list_get_instance(cur, thread_t, th_link); 348 349 spinlock_lock(&thr->lock); 350 thr->interrupted = true; 351 if (thr->state == Sleeping) 475 irq_spinlock_lock(&thread->lock, false); 476 477 thread->interrupted = true; 478 if (thread->state == Sleeping) 352 479 sleeping = true; 353 spinlock_unlock(&thr->lock); 480 481 irq_spinlock_unlock(&thread->lock, false); 354 482 355 483 if (sleeping) 356 waitq_interrupt_sleep(thr );484 waitq_interrupt_sleep(thread); 357 485 } 358 spinlock_unlock(&ta->lock); 486 487 irq_spinlock_unlock(&threads_lock, false); 488 irq_spinlock_unlock(&task->lock, false); 359 489 } 360 490 … … 364 494 * It signals all the task's threads to bail it out. 365 495 * 366 * @param id ID of the task to be killed. 367 * 368 * @return Zero on success or an error code from errno.h. 496 * @param id ID of the task to be killed. 497 * 498 * @return Zero on success or an error code from errno.h. 499 * 369 500 */ 370 501 int task_kill(task_id_t id) 371 502 { 372 ipl_t ipl;373 task_t *ta;374 375 503 if (id == 1) 376 504 return EPERM; 377 505 378 i pl = interrupts_disable();379 spinlock_lock(&tasks_lock);380 if (!(ta = task_find_by_id(id))) {381 spinlock_unlock(&tasks_lock);382 i nterrupts_restore(ipl);506 irq_spinlock_lock(&tasks_lock, true); 507 508 task_t *task = task_find_by_id(id); 509 if (!task) { 510 irq_spinlock_unlock(&tasks_lock, true); 383 511 return ENOENT; 384 512 } 385 task_kill_internal(ta); 386 spinlock_unlock(&tasks_lock); 387 interrupts_restore(ipl); 388 return 0; 513 514 task_kill_internal(task); 515 irq_spinlock_unlock(&tasks_lock, true); 516 517 return EOK; 518 } 519 520 /** Kill the currently running task. 521 * 522 * @param notify Send out fault notifications. 523 * 524 * @return Zero on success or an error code from errno.h. 525 * 526 */ 527 void task_kill_self(bool notify) 528 { 529 /* 530 * User space can subscribe for FAULT events to take action 531 * whenever a task faults (to take a dump, run a debugger, etc.). 532 * The notification is always available, but unless udebug is enabled, 533 * that's all you get. 534 */ 535 if (notify) { 536 if (event_is_subscribed(EVENT_FAULT)) { 537 /* Notify the subscriber that a fault occurred. */ 538 event_notify_3(EVENT_FAULT, LOWER32(TASK->taskid), 539 UPPER32(TASK->taskid), (sysarg_t) THREAD); 540 541 #ifdef CONFIG_UDEBUG 542 /* Wait for a debugging session. */ 543 udebug_thread_fault(); 544 #endif 545 } 546 } 547 548 irq_spinlock_lock(&tasks_lock, true); 549 task_kill_internal(TASK); 550 irq_spinlock_unlock(&tasks_lock, true); 551 552 thread_exit(); 553 } 554 555 /** Process syscall to terminate the current task. 556 * 557 * @param notify Send out fault notifications. 558 * 559 */ 560 sysarg_t sys_task_exit(sysarg_t notify) 561 { 562 task_kill_self(notify); 563 564 /* Unreachable */ 565 return EOK; 389 566 } 390 567 391 568 static bool task_print_walker(avltree_node_t *node, void *arg) 392 569 { 393 task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); 394 int j; 395 396 spinlock_lock(&t->lock); 397 398 uint64_t cycles; 399 char suffix; 400 order(task_get_accounting(t), &cycles, &suffix); 401 402 #ifdef __32_BITS__ 403 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %10p %10p %9" PRIu64 404 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 405 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 406 #endif 407 570 bool *additional = (bool *) arg; 571 task_t *task = avltree_get_instance(node, task_t, tasks_tree_node); 572 irq_spinlock_lock(&task->lock, false); 573 574 uint64_t ucycles; 575 uint64_t kcycles; 576 char usuffix, ksuffix; 577 task_get_accounting(task, &ucycles, &kcycles); 578 order_suffix(ucycles, &ucycles, &usuffix); 579 order_suffix(kcycles, &kcycles, &ksuffix); 580 581 #ifdef __32_BITS__ 582 if (*additional) 583 printf("%-8" PRIu64 " %9" PRIua, task->taskid, 584 atomic_get(&task->refcount)); 585 else 586 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" 587 " %9" PRIu64 "%c %9" PRIu64 "%c\n", task->taskid, 588 task->name, task->context, task, task->as, 589 ucycles, usuffix, kcycles, ksuffix); 590 #endif 591 408 592 #ifdef __64_BITS__ 409 printf("%-6" PRIu64 " %-12s %-3" PRIu32 " %18p %18p %9" PRIu64 410 "%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles, 411 suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls)); 412 #endif 413 414 for (j = 0; j < IPC_MAX_PHONES; j++) { 415 if (t->phones[j].callee) 416 printf(" %d:%p", j, t->phones[j].callee); 593 if (*additional) 594 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 595 "%9" PRIua, task->taskid, ucycles, usuffix, kcycles, 596 ksuffix, atomic_get(&task->refcount)); 597 else 598 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n", 599 task->taskid, task->name, task->context, task, task->as); 600 #endif 601 602 if (*additional) { 603 size_t i; 604 for (i = 0; i < IPC_MAX_PHONES; i++) { 605 if (task->phones[i].callee) 606 printf(" %zu:%p", i, task->phones[i].callee); 607 } 608 printf("\n"); 417 609 } 418 printf("\n"); 419 420 spinlock_unlock(&t->lock); 610 611 irq_spinlock_unlock(&task->lock, false); 421 612 return true; 422 613 } 423 614 424 /** Print task list */ 425 void task_print_list(void) 426 { 427 ipl_t ipl; 428 615 /** Print task list 616 * 617 * @param additional Print additional information. 618 * 619 */ 620 void task_print_list(bool additional) 621 { 429 622 /* Messing with task structures, avoid deadlock */ 430 i pl = interrupts_disable();431 spinlock_lock(&tasks_lock);432 433 #ifdef __32_BITS__ 434 printf("taskid name ctx address as "435 "cycles threads calls callee\n");436 printf("------ ------------ --- ---------- ----------"437 "---------- ------- ------ ------>\n");438 #endif 439 623 irq_spinlock_lock(&tasks_lock, true); 624 625 #ifdef __32_BITS__ 626 if (additional) 627 printf("[id ] [threads] [calls] [callee\n"); 628 else 629 printf("[id ] [name ] [ctx] [address ] [as ]" 630 " [ucycles ] [kcycles ]\n"); 631 #endif 632 440 633 #ifdef __64_BITS__ 441 printf("taskid name ctx address as " 442 "cycles threads calls callee\n"); 443 printf("------ ------------ --- ------------------ ------------------ " 444 "---------- ------- ------ ------>\n"); 445 #endif 446 447 avltree_walk(&tasks_tree, task_print_walker, NULL); 448 449 spinlock_unlock(&tasks_lock); 450 interrupts_restore(ipl); 634 if (additional) 635 printf("[id ] [ucycles ] [kcycles ] [threads] [calls]" 636 " [callee\n"); 637 else 638 printf("[id ] [name ] [ctx] [address ]" 639 " [as ]\n"); 640 #endif 641 642 avltree_walk(&tasks_tree, task_print_walker, &additional); 643 644 irq_spinlock_unlock(&tasks_lock, true); 451 645 } 452 646
Note:
See TracChangeset
for help on using the changeset viewer.