Changes in kernel/generic/src/proc/task.c [40eab9f:07d4271] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
r40eab9f r07d4271 158 158 return rc; 159 159 160 atomic_store(&task->refcount, 0);161 160 atomic_store(&task->lifecount, 0); 162 161 … … 201 200 if (!task) 202 201 return NULL; 202 203 refcount_init(&task->refcount); 203 204 204 205 task_create_arch(task); … … 268 269 * 269 270 */ 270 void task_destroy(task_t *task)271 static void task_destroy(task_t *task) 271 272 { 272 273 /* … … 299 300 void task_hold(task_t *task) 300 301 { 301 atomic_inc(&task->refcount);302 refcount_up(&task->refcount); 302 303 } 303 304 … … 311 312 void task_release(task_t *task) 312 313 { 313 if ( (atomic_predec(&task->refcount)) == 0)314 if (refcount_down(&task->refcount)) 314 315 task_destroy(task); 315 316 } … … 416 417 /** Find task structure corresponding to task ID. 417 418 * 418 * The tasks_lock must be already held by the caller of this function and419 * interrupts must be disabled.420 *421 419 * @param id Task ID. 422 420 * 423 * @return Task structure addressor NULL if there is no such task ID.421 * @return Task reference or NULL if there is no such task ID. 424 422 * 425 423 */ 426 424 task_t *task_find_by_id(task_id_t id) 427 425 { 428 assert(interrupts_disabled()); 429 assert(irq_spinlock_locked(&tasks_lock)); 426 task_t *task = NULL; 427 428 irq_spinlock_lock(&tasks_lock, true); 430 429 431 430 odlink_t *odlink = odict_find_eq(&tasks, &id, NULL); 432 if (odlink != NULL) 433 return odict_get_instance(odlink, task_t, ltasks); 434 435 return NULL; 431 if (odlink != NULL) { 432 task = odict_get_instance(odlink, task_t, ltasks); 433 434 /* 435 * The directory of tasks can't hold a reference, since that would 436 * prevent task from ever being destroyed. That means we have to 437 * check for the case where the task is already being destroyed, but 438 * not yet removed from the directory. 439 */ 440 if (!refcount_try_up(&task->refcount)) 441 task = NULL; 442 } 443 444 irq_spinlock_unlock(&tasks_lock, true); 445 446 return task; 436 447 } 437 448 … … 506 517 /* Current values of threads */ 507 518 list_foreach(task->threads, th_link, thread_t, thread) { 508 irq_spinlock_lock(&thread->lock, false);509 510 519 /* Process only counted threads */ 511 520 if (!thread->uncounted) { … … 515 524 } 516 525 517 uret += thread->ucycles;518 kret += thread->kcycles;526 uret += atomic_time_read(&thread->ucycles); 527 kret += atomic_time_read(&thread->kcycles); 519 528 } 520 521 irq_spinlock_unlock(&thread->lock, false);522 529 } 523 530 … … 528 535 static void task_kill_internal(task_t *task) 529 536 { 530 irq_spinlock_lock(&task->lock, false);537 irq_spinlock_lock(&task->lock, true); 531 538 532 539 /* … … 538 545 } 539 546 540 irq_spinlock_unlock(&task->lock, false);547 irq_spinlock_unlock(&task->lock, true); 541 548 } 542 549 … … 556 563 return EPERM; 557 564 558 irq_spinlock_lock(&tasks_lock, true);559 560 565 task_t *task = task_find_by_id(id); 561 if (!task) { 562 irq_spinlock_unlock(&tasks_lock, true); 566 if (!task) 563 567 return ENOENT; 564 }565 568 566 569 task_kill_internal(task); 567 irq_spinlock_unlock(&tasks_lock, true); 568 570 task_release(task); 569 571 return EOK; 570 572 } … … 596 598 } 597 599 598 irq_spinlock_lock(&tasks_lock, true);599 600 task_kill_internal(TASK); 600 irq_spinlock_unlock(&tasks_lock, true);601 602 601 thread_exit(); 603 602 } … … 628 627 if (additional) 629 628 printf("%-8" PRIu64 " %9zu", task->taskid, 630 atomic_load(&task-> refcount));629 atomic_load(&task->lifecount)); 631 630 else 632 631 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" … … 640 639 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 641 640 "%9zu\n", task->taskid, ucycles, usuffix, kcycles, 642 ksuffix, atomic_load(&task-> refcount));641 ksuffix, atomic_load(&task->lifecount)); 643 642 else 644 643 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
Note:
See TracChangeset
for help on using the changeset viewer.