Changes in kernel/generic/src/proc/thread.c [b169619:dfa4be62] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rb169619 rdfa4be62 60 60 #include <arch/interrupt.h> 61 61 #include <smp/ipi.h> 62 #include <arch/faddr.h>63 62 #include <atomic.h> 64 63 #include <memw.h> … … 82 81 }; 83 82 84 enum sleep_state {85 SLEEP_INITIAL,86 SLEEP_ASLEEP,87 SLEEP_WOKE,88 };89 90 83 /** Lock protecting the @c threads ordered dictionary . 91 84 * … … 115 108 static int threads_cmp(void *, void *); 116 109 117 /** Thread wrapper.118 *119 * This wrapper is provided to ensure that every thread makes a call to120 * thread_exit() when its implementing function returns.121 *122 * interrupts_disable() is assumed.123 *124 */125 static void cushion(void)126 {127 void (*f)(void *) = THREAD->thread_code;128 void *arg = THREAD->thread_arg;129 THREAD->last_cycle = get_cycle();130 131 /* This is where each thread wakes up after its creation */132 irq_spinlock_unlock(&THREAD->lock, false);133 interrupts_enable();134 135 f(arg);136 137 thread_exit();138 139 /* Not reached */140 }141 142 110 /** Initialization and allocation for thread_t structure 143 111 * … … 147 115 thread_t *thread = (thread_t *) obj; 148 116 149 irq_spinlock_initialize(&thread->lock, "thread_t_lock");150 117 link_initialize(&thread->rq_link); 151 118 link_initialize(&thread->wq_link); … … 229 196 void thread_wire(thread_t *thread, cpu_t *cpu) 230 197 { 231 i rq_spinlock_lock(&thread->lock, true);232 thread->cpu = cpu;198 ipl_t ipl = interrupts_disable(); 199 atomic_set_unordered(&thread->cpu, cpu); 233 200 thread->nomigrate++; 234 irq_spinlock_unlock(&thread->lock, true); 235 } 236 237 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 238 static void before_thread_is_ready(thread_t *thread) 239 { 240 assert(irq_spinlock_locked(&thread->lock)); 241 } 242 243 /** Make thread ready 244 * 245 * Switch thread to the ready state. Consumes reference passed by the caller. 246 * 247 * @param thread Thread to make ready. 248 * 249 */ 250 void thread_ready(thread_t *thread) 251 { 252 irq_spinlock_lock(&thread->lock, true); 253 254 assert(thread->state != Ready); 255 256 before_thread_is_ready(thread); 257 258 int i = (thread->priority < RQ_COUNT - 1) ? 259 ++thread->priority : thread->priority; 260 261 /* Prefer the CPU on which the thread ran last */ 262 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 263 264 thread->state = Ready; 265 266 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 267 268 /* 269 * Append thread to respective ready queue 270 * on respective processor. 271 */ 272 273 list_append(&thread->rq_link, &cpu->rq[i].rq); 274 cpu->rq[i].n++; 275 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 276 277 atomic_inc(&nrdy); 278 atomic_inc(&cpu->nrdy); 201 interrupts_restore(ipl); 202 } 203 204 /** Start a thread that wasn't started yet since it was created. 205 * 206 * @param thread A reference to the newly created thread. 207 */ 208 void thread_start(thread_t *thread) 209 { 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 279 212 } 280 213 … … 315 248 irq_spinlock_unlock(&tidlock, true); 316 249 317 memset(&thread->saved_context, 0, sizeof(thread->saved_context)); 318 context_set(&thread->saved_context, FADDR(cushion), 319 (uintptr_t) thread->kstack, STACK_SIZE); 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 320 252 321 253 current_initialize((current_t *) thread->kstack); 322 323 ipl_t ipl = interrupts_disable();324 thread->saved_ipl = interrupts_read();325 interrupts_restore(ipl);326 254 327 255 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); … … 329 257 thread->thread_code = func; 330 258 thread->thread_arg = arg; 331 thread->ucycles = 0;332 thread->kcycles = 0;259 thread->ucycles = ATOMIC_TIME_INITIALIZER(); 260 thread->kcycles = ATOMIC_TIME_INITIALIZER(); 333 261 thread->uncounted = 334 262 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 335 thread->priority = -1; /* Start in rq[0] */336 thread->cpu = NULL;263 atomic_init(&thread->priority, 0); 264 atomic_init(&thread->cpu, NULL); 337 265 thread->stolen = false; 338 266 thread->uspace = … … 340 268 341 269 thread->nomigrate = 0; 342 thread->state = Entering;270 atomic_init(&thread->state, Entering); 343 271 344 272 atomic_init(&thread->sleep_queue, NULL); … … 360 288 #ifdef CONFIG_UDEBUG 361 289 /* Initialize debugging stuff */ 362 thread->btrace = false;290 atomic_init(&thread->btrace, false); 363 291 udebug_thread_initialize(&thread->udebug); 364 292 #endif … … 404 332 405 333 if (!thread->uncounted) { 406 thread->task->ucycles += thread->ucycles;407 thread->task->kcycles += thread->kcycles;334 thread->task->ucycles += atomic_time_read(&thread->ucycles); 335 thread->task->kcycles += atomic_time_read(&thread->kcycles); 408 336 } 409 337 410 338 irq_spinlock_unlock(&thread->task->lock, false); 411 339 412 assert(( thread->state == Exiting) || (thread->state== Lingering));340 assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering)); 413 341 414 342 /* Clear cpu->fpu_owner if set to this thread. */ 415 343 #ifdef CONFIG_FPU_LAZY 416 if (thread->cpu) { 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 417 346 /* 418 347 * We need to lock for this because the old CPU can concurrently try … … 420 349 * it to finish. An atomic compare-and-swap wouldn't be enough. 421 350 */ 422 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 423 424 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 425 memory_order_relaxed); 426 427 if (owner == thread) { 428 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 429 memory_order_relaxed); 430 } 431 432 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 433 357 } 434 358 #endif … … 525 449 } 526 450 527 irq_spinlock_lock(&THREAD->lock, true); 528 THREAD->state = Exiting; 529 irq_spinlock_unlock(&THREAD->lock, true); 530 531 scheduler(); 532 533 panic("should never be reached"); 451 scheduler_enter(Exiting); 452 unreachable(); 534 453 } 535 454 … … 579 498 } 580 499 581 static void thread_wait_internal(void)582 {583 assert(THREAD != NULL);584 585 ipl_t ipl = interrupts_disable();586 587 if (atomic_load(&haltstate))588 halt();589 590 /*591 * Lock here to prevent a race between entering the scheduler and another592 * thread rescheduling this thread.593 */594 irq_spinlock_lock(&THREAD->lock, false);595 596 int expected = SLEEP_INITIAL;597 598 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */599 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,600 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {601 THREAD->state = Sleeping;602 scheduler_locked(ipl);603 } else {604 assert(expected == SLEEP_WOKE);605 /* Return immediately. */606 irq_spinlock_unlock(&THREAD->lock, false);607 interrupts_restore(ipl);608 }609 }610 611 500 static void thread_wait_timeout_callback(void *arg) 612 501 { … … 649 538 timeout_t timeout; 650 539 540 /* Extra check to avoid going to scheduler if we don't need to. */ 541 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) != 542 SLEEP_INITIAL) 543 return THREAD_WAIT_SUCCESS; 544 651 545 if (deadline != DEADLINE_NEVER) { 652 /* Extra check to avoid setting up a deadline if we don't need to. */653 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=654 SLEEP_INITIAL)655 return THREAD_WAIT_SUCCESS;656 657 546 timeout_initialize(&timeout); 658 547 timeout_register_deadline(&timeout, deadline, … … 660 549 } 661 550 662 thread_wait_internal();551 scheduler_enter(Sleeping); 663 552 664 553 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { … … 674 563 675 564 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE, 676 memory_order_ release);565 memory_order_acq_rel); 677 566 678 567 if (state == SLEEP_ASLEEP) { … … 682 571 * the waking thread by the sleeper in thread_wait_finish(). 683 572 */ 684 thread_re ady(thread);573 thread_requeue_sleeping(thread); 685 574 } 686 575 } … … 689 578 void thread_migration_disable(void) 690 579 { 580 ipl_t ipl = interrupts_disable(); 581 691 582 assert(THREAD); 692 693 583 THREAD->nomigrate++; 584 585 interrupts_restore(ipl); 694 586 } 695 587 … … 697 589 void thread_migration_enable(void) 698 590 { 591 ipl_t ipl = interrupts_disable(); 592 699 593 assert(THREAD); 700 594 assert(THREAD->nomigrate > 0); … … 702 596 if (THREAD->nomigrate > 0) 703 597 THREAD->nomigrate--; 598 599 interrupts_restore(ipl); 704 600 } 705 601 … … 731 627 732 628 /** Wait for another thread to exit. 733 * This function does not destroy the thread. Reference counting handles that.629 * After successful wait, the thread reference is destroyed. 734 630 * 735 631 * @param thread Thread to join on exit. … … 742 638 errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 743 639 { 640 assert(thread != NULL); 641 744 642 if (thread == THREAD) 745 643 return EINVAL; 746 644 747 irq_spinlock_lock(&thread->lock, true); 748 state_t state = thread->state; 749 irq_spinlock_unlock(&thread->lock, true); 750 751 if (state == Exiting) { 752 return EOK; 753 } else { 754 return _waitq_sleep_timeout(&thread->join_wq, usec, flags); 755 } 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 646 647 if (rc == EOK) 648 thread_put(thread); 649 650 return rc; 651 } 652 653 void thread_detach(thread_t *thread) 654 { 655 thread_put(thread); 756 656 } 757 657 … … 770 670 771 671 (void) waitq_sleep_timeout(&wq, usec); 672 } 673 674 /** Allow other threads to run. */ 675 void thread_yield(void) 676 { 677 assert(THREAD != NULL); 678 scheduler_enter(Running); 772 679 } 773 680 … … 776 683 uint64_t ucycles, kcycles; 777 684 char usuffix, ksuffix; 778 order_suffix(thread->ucycles, &ucycles, &usuffix); 779 order_suffix(thread->kcycles, &kcycles, &ksuffix); 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 780 689 781 690 char *name; … … 791 700 else 792 701 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 793 thread->tid, name, thread, thread_states[ thread->state],702 thread->tid, name, thread, thread_states[state], 794 703 thread->task, thread->task->container); 795 704 796 705 if (additional) { 797 if (thread->cpu) 798 printf("%-5u", thread->cpu->id); 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 799 709 else 800 710 printf("none "); 801 711 802 if ( thread->state == Sleeping) {712 if (state == Sleeping) { 803 713 printf(" %p", thread->sleep_queue); 804 714 } … … 879 789 void thread_update_accounting(bool user) 880 790 { 791 assert(interrupts_disabled()); 792 881 793 uint64_t time = get_cycle(); 882 794 883 assert(interrupts_disabled());884 assert(irq_spinlock_locked(&THREAD->lock));885 886 795 if (user) 887 THREAD->ucycles += time - THREAD->last_cycle;796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle); 888 797 else 889 THREAD->kcycles += time - THREAD->last_cycle;798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle); 890 799 891 800 THREAD->last_cycle = time; … … 998 907 */ 999 908 1000 irq_spinlock_lock(&thread->lock, true); 1001 1002 bool sleeping = false; 1003 istate_t *istate = thread->udebug.uspace_state; 1004 if (istate != NULL) { 1005 printf("Scheduling thread stack trace.\n"); 1006 thread->btrace = true; 1007 if (thread->state == Sleeping) 1008 sleeping = true; 1009 } else 1010 printf("Thread interrupt state not available.\n"); 1011 1012 irq_spinlock_unlock(&thread->lock, true); 1013 1014 if (sleeping) 1015 thread_wakeup(thread); 1016 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 1017 913 thread_put(thread); 1018 914 } … … 1115 1011 thread_attach(thread, TASK); 1116 1012 #endif 1117 thread_ready(thread); 1013 thread_start(thread); 1014 thread_put(thread); 1118 1015 1119 1016 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.