Changes in kernel/generic/src/proc/thread.c [ed7e057:dfa4be62] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
red7e057 rdfa4be62 108 108 static int threads_cmp(void *, void *); 109 109 110 /** Thread wrapper.111 *112 * This wrapper is provided to ensure that every thread makes a call to113 * thread_exit() when its implementing function returns.114 *115 * interrupts_disable() is assumed.116 *117 */118 static void cushion(void)119 {120 void (*f)(void *) = THREAD->thread_code;121 void *arg = THREAD->thread_arg;122 123 /* This is where each thread wakes up after its creation */124 irq_spinlock_unlock(&THREAD->lock, false);125 interrupts_enable();126 127 f(arg);128 129 thread_exit();130 131 /* Not reached */132 }133 134 110 /** Initialization and allocation for thread_t structure 135 111 * … … 139 115 thread_t *thread = (thread_t *) obj; 140 116 141 irq_spinlock_initialize(&thread->lock, "thread_t_lock");142 117 link_initialize(&thread->rq_link); 143 118 link_initialize(&thread->wq_link); … … 221 196 void thread_wire(thread_t *thread, cpu_t *cpu) 222 197 { 223 i rq_spinlock_lock(&thread->lock, true);224 thread->cpu = cpu;198 ipl_t ipl = interrupts_disable(); 199 atomic_set_unordered(&thread->cpu, cpu); 225 200 thread->nomigrate++; 226 i rq_spinlock_unlock(&thread->lock, true);201 interrupts_restore(ipl); 227 202 } 228 203 … … 233 208 void thread_start(thread_t *thread) 234 209 { 235 assert(thread->state == Entering); 236 thread_ready(thread_ref(thread)); 237 } 238 239 /** Make thread ready 240 * 241 * Switch thread to the ready state. Consumes reference passed by the caller. 242 * 243 * @param thread Thread to make ready. 244 * 245 */ 246 void thread_ready(thread_t *thread) 247 { 248 irq_spinlock_lock(&thread->lock, true); 249 250 assert(thread->state != Ready); 251 252 int i = (thread->priority < RQ_COUNT - 1) ? 253 ++thread->priority : thread->priority; 254 255 /* Prefer the CPU on which the thread ran last */ 256 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 257 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 261 262 /* 263 * Append thread to respective ready queue 264 * on respective processor. 265 */ 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 271 atomic_inc(&nrdy); 272 atomic_inc(&cpu->nrdy); 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 273 212 } 274 213 … … 309 248 irq_spinlock_unlock(&tidlock, true); 310 249 311 context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE); 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 312 252 313 253 current_initialize((current_t *) thread->kstack); … … 317 257 thread->thread_code = func; 318 258 thread->thread_arg = arg; 319 thread->ucycles = 0;320 thread->kcycles = 0;259 thread->ucycles = ATOMIC_TIME_INITIALIZER(); 260 thread->kcycles = ATOMIC_TIME_INITIALIZER(); 321 261 thread->uncounted = 322 262 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 323 thread->priority = -1; /* Start in rq[0] */324 thread->cpu = NULL;263 atomic_init(&thread->priority, 0); 264 atomic_init(&thread->cpu, NULL); 325 265 thread->stolen = false; 326 266 thread->uspace = … … 328 268 329 269 thread->nomigrate = 0; 330 thread->state = Entering;270 atomic_init(&thread->state, Entering); 331 271 332 272 atomic_init(&thread->sleep_queue, NULL); … … 348 288 #ifdef CONFIG_UDEBUG 349 289 /* Initialize debugging stuff */ 350 thread->btrace = false;290 atomic_init(&thread->btrace, false); 351 291 udebug_thread_initialize(&thread->udebug); 352 292 #endif … … 392 332 393 333 if (!thread->uncounted) { 394 thread->task->ucycles += thread->ucycles;395 thread->task->kcycles += thread->kcycles;334 thread->task->ucycles += atomic_time_read(&thread->ucycles); 335 thread->task->kcycles += atomic_time_read(&thread->kcycles); 396 336 } 397 337 398 338 irq_spinlock_unlock(&thread->task->lock, false); 399 339 400 assert(( thread->state == Exiting) || (thread->state== Lingering));340 assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering)); 401 341 402 342 /* Clear cpu->fpu_owner if set to this thread. */ 403 343 #ifdef CONFIG_FPU_LAZY 404 if (thread->cpu) { 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 405 346 /* 406 347 * We need to lock for this because the old CPU can concurrently try … … 408 349 * it to finish. An atomic compare-and-swap wouldn't be enough. 409 350 */ 410 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 411 412 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 413 memory_order_relaxed); 414 415 if (owner == thread) { 416 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 417 memory_order_relaxed); 418 } 419 420 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 421 357 } 422 358 #endif … … 635 571 * the waking thread by the sleeper in thread_wait_finish(). 636 572 */ 637 thread_re ady(thread);573 thread_requeue_sleeping(thread); 638 574 } 639 575 } … … 642 578 void thread_migration_disable(void) 643 579 { 580 ipl_t ipl = interrupts_disable(); 581 644 582 assert(THREAD); 645 646 583 THREAD->nomigrate++; 584 585 interrupts_restore(ipl); 647 586 } 648 587 … … 650 589 void thread_migration_enable(void) 651 590 { 591 ipl_t ipl = interrupts_disable(); 592 652 593 assert(THREAD); 653 594 assert(THREAD->nomigrate > 0); … … 655 596 if (THREAD->nomigrate > 0) 656 597 THREAD->nomigrate--; 598 599 interrupts_restore(ipl); 657 600 } 658 601 … … 700 643 return EINVAL; 701 644 702 irq_spinlock_lock(&thread->lock, true); 703 state_t state = thread->state; 704 irq_spinlock_unlock(&thread->lock, true); 705 706 errno_t rc = EOK; 707 708 if (state != Exiting) 709 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 710 646 711 647 if (rc == EOK) … … 747 683 uint64_t ucycles, kcycles; 748 684 char usuffix, ksuffix; 749 order_suffix(thread->ucycles, &ucycles, &usuffix); 750 order_suffix(thread->kcycles, &kcycles, &ksuffix); 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 751 689 752 690 char *name; … … 762 700 else 763 701 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 764 thread->tid, name, thread, thread_states[ thread->state],702 thread->tid, name, thread, thread_states[state], 765 703 thread->task, thread->task->container); 766 704 767 705 if (additional) { 768 if (thread->cpu) 769 printf("%-5u", thread->cpu->id); 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 770 709 else 771 710 printf("none "); 772 711 773 if ( thread->state == Sleeping) {712 if (state == Sleeping) { 774 713 printf(" %p", thread->sleep_queue); 775 714 } … … 850 789 void thread_update_accounting(bool user) 851 790 { 791 assert(interrupts_disabled()); 792 852 793 uint64_t time = get_cycle(); 853 794 854 assert(interrupts_disabled());855 assert(irq_spinlock_locked(&THREAD->lock));856 857 795 if (user) 858 THREAD->ucycles += time - THREAD->last_cycle;796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle); 859 797 else 860 THREAD->kcycles += time - THREAD->last_cycle;798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle); 861 799 862 800 THREAD->last_cycle = time; … … 969 907 */ 970 908 971 irq_spinlock_lock(&thread->lock, true); 972 973 bool sleeping = false; 974 istate_t *istate = thread->udebug.uspace_state; 975 if (istate != NULL) { 976 printf("Scheduling thread stack trace.\n"); 977 thread->btrace = true; 978 if (thread->state == Sleeping) 979 sleeping = true; 980 } else 981 printf("Thread interrupt state not available.\n"); 982 983 irq_spinlock_unlock(&thread->lock, true); 984 985 if (sleeping) 986 thread_wakeup(thread); 987 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 988 913 thread_put(thread); 989 914 } … … 1086 1011 thread_attach(thread, TASK); 1087 1012 #endif 1088 thread_ready(thread); 1013 thread_start(thread); 1014 thread_put(thread); 1089 1015 1090 1016 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.