Changes in kernel/generic/src/proc/thread.c [dfa4be62:ed7e057] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rdfa4be62 red7e057 108 108 static int threads_cmp(void *, void *); 109 109 110 /** Thread wrapper. 111 * 112 * This wrapper is provided to ensure that every thread makes a call to 113 * thread_exit() when its implementing function returns. 114 * 115 * interrupts_disable() is assumed. 116 * 117 */ 118 static void cushion(void) 119 { 120 void (*f)(void *) = THREAD->thread_code; 121 void *arg = THREAD->thread_arg; 122 123 /* This is where each thread wakes up after its creation */ 124 irq_spinlock_unlock(&THREAD->lock, false); 125 interrupts_enable(); 126 127 f(arg); 128 129 thread_exit(); 130 131 /* Not reached */ 132 } 133 110 134 /** Initialization and allocation for thread_t structure 111 135 * … … 115 139 thread_t *thread = (thread_t *) obj; 116 140 141 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 117 142 link_initialize(&thread->rq_link); 118 143 link_initialize(&thread->wq_link); … … 196 221 void thread_wire(thread_t *thread, cpu_t *cpu) 197 222 { 198 i pl_t ipl = interrupts_disable();199 atomic_set_unordered(&thread->cpu, cpu);223 irq_spinlock_lock(&thread->lock, true); 224 thread->cpu = cpu; 200 225 thread->nomigrate++; 201 i nterrupts_restore(ipl);226 irq_spinlock_unlock(&thread->lock, true); 202 227 } 203 228 … … 208 233 void thread_start(thread_t *thread) 209 234 { 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 235 assert(thread->state == Entering); 236 thread_ready(thread_ref(thread)); 237 } 238 239 /** Make thread ready 240 * 241 * Switch thread to the ready state. Consumes reference passed by the caller. 242 * 243 * @param thread Thread to make ready. 244 * 245 */ 246 void thread_ready(thread_t *thread) 247 { 248 irq_spinlock_lock(&thread->lock, true); 249 250 assert(thread->state != Ready); 251 252 int i = (thread->priority < RQ_COUNT - 1) ? 253 ++thread->priority : thread->priority; 254 255 /* Prefer the CPU on which the thread ran last */ 256 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 257 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 261 262 /* 263 * Append thread to respective ready queue 264 * on respective processor. 265 */ 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 271 atomic_inc(&nrdy); 272 atomic_inc(&cpu->nrdy); 212 273 } 213 274 … … 248 309 irq_spinlock_unlock(&tidlock, true); 249 310 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 311 context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE); 252 312 253 313 current_initialize((current_t *) thread->kstack); … … 257 317 thread->thread_code = func; 258 318 thread->thread_arg = arg; 259 thread->ucycles = ATOMIC_TIME_INITIALIZER();260 thread->kcycles = ATOMIC_TIME_INITIALIZER();319 thread->ucycles = 0; 320 thread->kcycles = 0; 261 321 thread->uncounted = 262 322 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 263 atomic_init(&thread->priority, 0);264 atomic_init(&thread->cpu, NULL);323 thread->priority = -1; /* Start in rq[0] */ 324 thread->cpu = NULL; 265 325 thread->stolen = false; 266 326 thread->uspace = … … 268 328 269 329 thread->nomigrate = 0; 270 atomic_init(&thread->state, Entering);330 thread->state = Entering; 271 331 272 332 atomic_init(&thread->sleep_queue, NULL); … … 288 348 #ifdef CONFIG_UDEBUG 289 349 /* Initialize debugging stuff */ 290 atomic_init(&thread->btrace, false);350 thread->btrace = false; 291 351 udebug_thread_initialize(&thread->udebug); 292 352 #endif … … 332 392 333 393 if (!thread->uncounted) { 334 thread->task->ucycles += atomic_time_read(&thread->ucycles);335 thread->task->kcycles += atomic_time_read(&thread->kcycles);394 thread->task->ucycles += thread->ucycles; 395 thread->task->kcycles += thread->kcycles; 336 396 } 337 397 338 398 irq_spinlock_unlock(&thread->task->lock, false); 339 399 340 assert(( atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state)== Lingering));400 assert((thread->state == Exiting) || (thread->state == Lingering)); 341 401 342 402 /* Clear cpu->fpu_owner if set to this thread. */ 343 403 #ifdef CONFIG_FPU_LAZY 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 404 if (thread->cpu) { 346 405 /* 347 406 * We need to lock for this because the old CPU can concurrently try … … 349 408 * it to finish. An atomic compare-and-swap wouldn't be enough. 350 409 */ 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 410 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 411 412 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 413 memory_order_relaxed); 414 415 if (owner == thread) { 416 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 417 memory_order_relaxed); 418 } 419 420 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 357 421 } 358 422 #endif … … 571 635 * the waking thread by the sleeper in thread_wait_finish(). 572 636 */ 573 thread_re queue_sleeping(thread);637 thread_ready(thread); 574 638 } 575 639 } … … 578 642 void thread_migration_disable(void) 579 643 { 580 ipl_t ipl = interrupts_disable();581 582 644 assert(THREAD); 645 583 646 THREAD->nomigrate++; 584 585 interrupts_restore(ipl);586 647 } 587 648 … … 589 650 void thread_migration_enable(void) 590 651 { 591 ipl_t ipl = interrupts_disable();592 593 652 assert(THREAD); 594 653 assert(THREAD->nomigrate > 0); … … 596 655 if (THREAD->nomigrate > 0) 597 656 THREAD->nomigrate--; 598 599 interrupts_restore(ipl);600 657 } 601 658 … … 643 700 return EINVAL; 644 701 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 702 irq_spinlock_lock(&thread->lock, true); 703 state_t state = thread->state; 704 irq_spinlock_unlock(&thread->lock, true); 705 706 errno_t rc = EOK; 707 708 if (state != Exiting) 709 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 646 710 647 711 if (rc == EOK) … … 683 747 uint64_t ucycles, kcycles; 684 748 char usuffix, ksuffix; 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 749 order_suffix(thread->ucycles, &ucycles, &usuffix); 750 order_suffix(thread->kcycles, &kcycles, &ksuffix); 689 751 690 752 char *name; … … 700 762 else 701 763 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 702 thread->tid, name, thread, thread_states[ state],764 thread->tid, name, thread, thread_states[thread->state], 703 765 thread->task, thread->task->container); 704 766 705 767 if (additional) { 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 768 if (thread->cpu) 769 printf("%-5u", thread->cpu->id); 709 770 else 710 771 printf("none "); 711 772 712 if ( state == Sleeping) {773 if (thread->state == Sleeping) { 713 774 printf(" %p", thread->sleep_queue); 714 775 } … … 789 850 void thread_update_accounting(bool user) 790 851 { 852 uint64_t time = get_cycle(); 853 791 854 assert(interrupts_disabled()); 792 793 uint64_t time = get_cycle(); 855 assert(irq_spinlock_locked(&THREAD->lock)); 794 856 795 857 if (user) 796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);858 THREAD->ucycles += time - THREAD->last_cycle; 797 859 else 798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);860 THREAD->kcycles += time - THREAD->last_cycle; 799 861 800 862 THREAD->last_cycle = time; … … 907 969 */ 908 970 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 971 irq_spinlock_lock(&thread->lock, true); 972 973 bool sleeping = false; 974 istate_t *istate = thread->udebug.uspace_state; 975 if (istate != NULL) { 976 printf("Scheduling thread stack trace.\n"); 977 thread->btrace = true; 978 if (thread->state == Sleeping) 979 sleeping = true; 980 } else 981 printf("Thread interrupt state not available.\n"); 982 983 irq_spinlock_unlock(&thread->lock, true); 984 985 if (sleeping) 986 thread_wakeup(thread); 987 913 988 thread_put(thread); 914 989 } … … 1011 1086 thread_attach(thread, TASK); 1012 1087 #endif 1013 thread_start(thread); 1014 thread_put(thread); 1088 thread_ready(thread); 1015 1089 1016 1090 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.