Changes in kernel/generic/src/proc/task.c [07d4271:40eab9f] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
r07d4271 r40eab9f 158 158 return rc; 159 159 160 atomic_store(&task->refcount, 0); 160 161 atomic_store(&task->lifecount, 0); 161 162 … … 200 201 if (!task) 201 202 return NULL; 202 203 refcount_init(&task->refcount);204 203 205 204 task_create_arch(task); … … 269 268 * 270 269 */ 271 staticvoid task_destroy(task_t *task)270 void task_destroy(task_t *task) 272 271 { 273 272 /* … … 300 299 void task_hold(task_t *task) 301 300 { 302 refcount_up(&task->refcount);301 atomic_inc(&task->refcount); 303 302 } 304 303 … … 312 311 void task_release(task_t *task) 313 312 { 314 if ( refcount_down(&task->refcount))313 if ((atomic_predec(&task->refcount)) == 0) 315 314 task_destroy(task); 316 315 } … … 417 416 /** Find task structure corresponding to task ID. 418 417 * 418 * The tasks_lock must be already held by the caller of this function and 419 * interrupts must be disabled. 420 * 419 421 * @param id Task ID. 420 422 * 421 * @return Task referenceor NULL if there is no such task ID.423 * @return Task structure address or NULL if there is no such task ID. 422 424 * 423 425 */ 424 426 task_t *task_find_by_id(task_id_t id) 425 427 { 426 task_t *task = NULL; 427 428 irq_spinlock_lock(&tasks_lock, true); 428 assert(interrupts_disabled()); 429 assert(irq_spinlock_locked(&tasks_lock)); 429 430 430 431 odlink_t *odlink = odict_find_eq(&tasks, &id, NULL); 431 if (odlink != NULL) { 432 task = odict_get_instance(odlink, task_t, ltasks); 433 434 /* 435 * The directory of tasks can't hold a reference, since that would 436 * prevent task from ever being destroyed. That means we have to 437 * check for the case where the task is already being destroyed, but 438 * not yet removed from the directory. 439 */ 440 if (!refcount_try_up(&task->refcount)) 441 task = NULL; 442 } 443 444 irq_spinlock_unlock(&tasks_lock, true); 445 446 return task; 432 if (odlink != NULL) 433 return odict_get_instance(odlink, task_t, ltasks); 434 435 return NULL; 447 436 } 448 437 … … 517 506 /* Current values of threads */ 518 507 list_foreach(task->threads, th_link, thread_t, thread) { 508 irq_spinlock_lock(&thread->lock, false); 509 519 510 /* Process only counted threads */ 520 511 if (!thread->uncounted) { … … 524 515 } 525 516 526 uret += atomic_time_read(&thread->ucycles);527 kret += atomic_time_read(&thread->kcycles);517 uret += thread->ucycles; 518 kret += thread->kcycles; 528 519 } 520 521 irq_spinlock_unlock(&thread->lock, false); 529 522 } 530 523 … … 535 528 static void task_kill_internal(task_t *task) 536 529 { 537 irq_spinlock_lock(&task->lock, true);530 irq_spinlock_lock(&task->lock, false); 538 531 539 532 /* … … 545 538 } 546 539 547 irq_spinlock_unlock(&task->lock, true);540 irq_spinlock_unlock(&task->lock, false); 548 541 } 549 542 … … 563 556 return EPERM; 564 557 558 irq_spinlock_lock(&tasks_lock, true); 559 565 560 task_t *task = task_find_by_id(id); 566 if (!task) 561 if (!task) { 562 irq_spinlock_unlock(&tasks_lock, true); 567 563 return ENOENT; 564 } 568 565 569 566 task_kill_internal(task); 570 task_release(task); 567 irq_spinlock_unlock(&tasks_lock, true); 568 571 569 return EOK; 572 570 } … … 598 596 } 599 597 598 irq_spinlock_lock(&tasks_lock, true); 600 599 task_kill_internal(TASK); 600 irq_spinlock_unlock(&tasks_lock, true); 601 601 602 thread_exit(); 602 603 } … … 627 628 if (additional) 628 629 printf("%-8" PRIu64 " %9zu", task->taskid, 629 atomic_load(&task-> lifecount));630 atomic_load(&task->refcount)); 630 631 else 631 632 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" … … 639 640 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 640 641 "%9zu\n", task->taskid, ucycles, usuffix, kcycles, 641 ksuffix, atomic_load(&task-> lifecount));642 ksuffix, atomic_load(&task->refcount)); 642 643 else 643 644 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
Note:
See TracChangeset
for help on using the changeset viewer.