Changes in kernel/generic/src/proc/task.c [f35749e:5a5269d] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/task.c
rf35749e r5a5269d 1 1 /* 2 * Copyright (c) 2025 Jiri Svoboda3 2 * Copyright (c) 2010 Jakub Jermar 3 * Copyright (c) 2018 Jiri Svoboda 4 4 * All rights reserved. 5 5 * … … 104 104 * 105 105 */ 106 void task_done( task_t *cur_task)106 void task_done(void) 107 107 { 108 108 size_t tasks_left; … … 112 112 task_t *task_0 = ipc_box_0->task; 113 113 ipc_box_0 = NULL; 114 115 114 /* 116 115 * The first task is held by kinit(), we need to release it or … … 130 129 task = task_first(); 131 130 while (task != NULL) { 132 if (task != cur_task) {131 if (task != TASK) { 133 132 tasks_left++; 134 133 #ifdef CONFIG_DEBUG … … 159 158 return rc; 160 159 160 atomic_store(&task->refcount, 0); 161 161 atomic_store(&task->lifecount, 0); 162 162 … … 201 201 if (!task) 202 202 return NULL; 203 204 refcount_init(&task->refcount);205 203 206 204 task_create_arch(task); … … 226 224 227 225 task->answerbox.active = true; 228 229 task->debug_sections = NULL;230 226 231 227 #ifdef CONFIG_UDEBUG … … 270 266 * 271 267 */ 272 staticvoid task_destroy(task_t *task)268 void task_destroy(task_t *task) 273 269 { 274 270 /* … … 301 297 void task_hold(task_t *task) 302 298 { 303 refcount_up(&task->refcount);299 atomic_inc(&task->refcount); 304 300 } 305 301 … … 313 309 void task_release(task_t *task) 314 310 { 315 if ( refcount_down(&task->refcount))311 if ((atomic_predec(&task->refcount)) == 0) 316 312 task_destroy(task); 317 313 } … … 389 385 irq_spinlock_lock(&tasks_lock, true); 390 386 irq_spinlock_lock(&TASK->lock, false); 387 irq_spinlock_lock(&threads_lock, false); 391 388 392 389 /* Set task name */ 393 390 str_cpy(TASK->name, TASK_NAME_BUFLEN, namebuf); 394 391 392 irq_spinlock_unlock(&threads_lock, false); 395 393 irq_spinlock_unlock(&TASK->lock, false); 396 394 irq_spinlock_unlock(&tasks_lock, true); … … 418 416 /** Find task structure corresponding to task ID. 419 417 * 418 * The tasks_lock must be already held by the caller of this function and 419 * interrupts must be disabled. 420 * 420 421 * @param id Task ID. 421 422 * 422 * @return Task referenceor NULL if there is no such task ID.423 * @return Task structure address or NULL if there is no such task ID. 423 424 * 424 425 */ 425 426 task_t *task_find_by_id(task_id_t id) 426 427 { 427 task_t *task = NULL; 428 429 irq_spinlock_lock(&tasks_lock, true); 428 assert(interrupts_disabled()); 429 assert(irq_spinlock_locked(&tasks_lock)); 430 430 431 431 odlink_t *odlink = odict_find_eq(&tasks, &id, NULL); 432 if (odlink != NULL) { 433 task = odict_get_instance(odlink, task_t, ltasks); 434 435 /* 436 * The directory of tasks can't hold a reference, since that would 437 * prevent task from ever being destroyed. That means we have to 438 * check for the case where the task is already being destroyed, but 439 * not yet removed from the directory. 440 */ 441 if (!refcount_try_up(&task->refcount)) 442 task = NULL; 443 } 444 445 irq_spinlock_unlock(&tasks_lock, true); 446 447 return task; 432 if (odlink != NULL) 433 return odict_get_instance(odlink, task_t, ltasks); 434 435 return NULL; 448 436 } 449 437 … … 518 506 /* Current values of threads */ 519 507 list_foreach(task->threads, th_link, thread_t, thread) { 508 irq_spinlock_lock(&thread->lock, false); 509 520 510 /* Process only counted threads */ 521 511 if (!thread->uncounted) { … … 525 515 } 526 516 527 uret += atomic_time_read(&thread->ucycles);528 kret += atomic_time_read(&thread->kcycles);517 uret += thread->ucycles; 518 kret += thread->kcycles; 529 519 } 520 521 irq_spinlock_unlock(&thread->lock, false); 530 522 } 531 523 … … 536 528 static void task_kill_internal(task_t *task) 537 529 { 538 irq_spinlock_lock(&task->lock, true); 530 irq_spinlock_lock(&task->lock, false); 531 irq_spinlock_lock(&threads_lock, false); 539 532 540 533 /* … … 543 536 544 537 list_foreach(task->threads, th_link, thread_t, thread) { 545 thread_interrupt(thread); 538 bool sleeping = false; 539 540 irq_spinlock_lock(&thread->lock, false); 541 542 thread->interrupted = true; 543 if (thread->state == Sleeping) 544 sleeping = true; 545 546 irq_spinlock_unlock(&thread->lock, false); 547 548 if (sleeping) 549 waitq_interrupt_sleep(thread); 546 550 } 547 551 548 irq_spinlock_unlock(&task->lock, true); 552 irq_spinlock_unlock(&threads_lock, false); 553 irq_spinlock_unlock(&task->lock, false); 549 554 } 550 555 … … 564 569 return EPERM; 565 570 571 irq_spinlock_lock(&tasks_lock, true); 572 566 573 task_t *task = task_find_by_id(id); 567 if (!task) 574 if (!task) { 575 irq_spinlock_unlock(&tasks_lock, true); 568 576 return ENOENT; 577 } 569 578 570 579 task_kill_internal(task); 571 task_release(task); 580 irq_spinlock_unlock(&tasks_lock, true); 581 572 582 return EOK; 573 583 } … … 599 609 } 600 610 611 irq_spinlock_lock(&tasks_lock, true); 601 612 task_kill_internal(TASK); 613 irq_spinlock_unlock(&tasks_lock, true); 614 602 615 thread_exit(); 603 616 } … … 628 641 if (additional) 629 642 printf("%-8" PRIu64 " %9zu", task->taskid, 630 atomic_load(&task-> lifecount));643 atomic_load(&task->refcount)); 631 644 else 632 645 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %10p %10p" … … 640 653 printf("%-8" PRIu64 " %9" PRIu64 "%c %9" PRIu64 "%c " 641 654 "%9zu\n", task->taskid, ucycles, usuffix, kcycles, 642 ksuffix, atomic_load(&task-> lifecount));655 ksuffix, atomic_load(&task->refcount)); 643 656 else 644 657 printf("%-8" PRIu64 " %-14s %-5" PRIu32 " %18p %18p\n",
Note:
See TracChangeset
for help on using the changeset viewer.