Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rdfa4be62 red7e057  
    108108static int threads_cmp(void *, void *);
    109109
     110/** Thread wrapper.
     111 *
     112 * This wrapper is provided to ensure that every thread makes a call to
     113 * thread_exit() when its implementing function returns.
     114 *
     115 * interrupts_disable() is assumed.
     116 *
     117 */
     118static void cushion(void)
     119{
     120        void (*f)(void *) = THREAD->thread_code;
     121        void *arg = THREAD->thread_arg;
     122
     123        /* This is where each thread wakes up after its creation */
     124        irq_spinlock_unlock(&THREAD->lock, false);
     125        interrupts_enable();
     126
     127        f(arg);
     128
     129        thread_exit();
     130
     131        /* Not reached */
     132}
     133
    110134/** Initialization and allocation for thread_t structure
    111135 *
     
    115139        thread_t *thread = (thread_t *) obj;
    116140
     141        irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    117142        link_initialize(&thread->rq_link);
    118143        link_initialize(&thread->wq_link);
     
    196221void thread_wire(thread_t *thread, cpu_t *cpu)
    197222{
    198         ipl_t ipl = interrupts_disable();
    199         atomic_set_unordered(&thread->cpu, cpu);
     223        irq_spinlock_lock(&thread->lock, true);
     224        thread->cpu = cpu;
    200225        thread->nomigrate++;
    201         interrupts_restore(ipl);
     226        irq_spinlock_unlock(&thread->lock, true);
    202227}
    203228
     
    208233void thread_start(thread_t *thread)
    209234{
    210         assert(atomic_get_unordered(&thread->state) == Entering);
    211         thread_requeue_sleeping(thread_ref(thread));
     235        assert(thread->state == Entering);
     236        thread_ready(thread_ref(thread));
     237}
     238
     239/** Make thread ready
     240 *
     241 * Switch thread to the ready state. Consumes reference passed by the caller.
     242 *
     243 * @param thread Thread to make ready.
     244 *
     245 */
     246void thread_ready(thread_t *thread)
     247{
     248        irq_spinlock_lock(&thread->lock, true);
     249
     250        assert(thread->state != Ready);
     251
     252        int i = (thread->priority < RQ_COUNT - 1) ?
     253            ++thread->priority : thread->priority;
     254
     255        /* Prefer the CPU on which the thread ran last */
     256        cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
     257
     258        thread->state = Ready;
     259
     260        irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     261
     262        /*
     263         * Append thread to respective ready queue
     264         * on respective processor.
     265         */
     266
     267        list_append(&thread->rq_link, &cpu->rq[i].rq);
     268        cpu->rq[i].n++;
     269        irq_spinlock_unlock(&(cpu->rq[i].lock), true);
     270
     271        atomic_inc(&nrdy);
     272        atomic_inc(&cpu->nrdy);
    212273}
    213274
     
    248309        irq_spinlock_unlock(&tidlock, true);
    249310
    250         context_create(&thread->saved_context, thread_main_func,
    251             thread->kstack, STACK_SIZE);
     311        context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE);
    252312
    253313        current_initialize((current_t *) thread->kstack);
     
    257317        thread->thread_code = func;
    258318        thread->thread_arg = arg;
    259         thread->ucycles = ATOMIC_TIME_INITIALIZER();
    260         thread->kcycles = ATOMIC_TIME_INITIALIZER();
     319        thread->ucycles = 0;
     320        thread->kcycles = 0;
    261321        thread->uncounted =
    262322            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    263         atomic_init(&thread->priority, 0);
    264         atomic_init(&thread->cpu, NULL);
     323        thread->priority = -1;          /* Start in rq[0] */
     324        thread->cpu = NULL;
    265325        thread->stolen = false;
    266326        thread->uspace =
     
    268328
    269329        thread->nomigrate = 0;
    270         atomic_init(&thread->state, Entering);
     330        thread->state = Entering;
    271331
    272332        atomic_init(&thread->sleep_queue, NULL);
     
    288348#ifdef CONFIG_UDEBUG
    289349        /* Initialize debugging stuff */
    290         atomic_init(&thread->btrace, false);
     350        thread->btrace = false;
    291351        udebug_thread_initialize(&thread->udebug);
    292352#endif
     
    332392
    333393        if (!thread->uncounted) {
    334                 thread->task->ucycles += atomic_time_read(&thread->ucycles);
    335                 thread->task->kcycles += atomic_time_read(&thread->kcycles);
     394                thread->task->ucycles += thread->ucycles;
     395                thread->task->kcycles += thread->kcycles;
    336396        }
    337397
    338398        irq_spinlock_unlock(&thread->task->lock, false);
    339399
    340         assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
     400        assert((thread->state == Exiting) || (thread->state == Lingering));
    341401
    342402        /* Clear cpu->fpu_owner if set to this thread. */
    343403#ifdef CONFIG_FPU_LAZY
    344         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    345         if (cpu) {
     404        if (thread->cpu) {
    346405                /*
    347406                 * We need to lock for this because the old CPU can concurrently try
     
    349408                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    350409                 */
    351                 irq_spinlock_lock(&cpu->fpu_lock, false);
    352 
    353                 if (atomic_get_unordered(&cpu->fpu_owner) == thread)
    354                         atomic_set_unordered(&cpu->fpu_owner, NULL);
    355 
    356                 irq_spinlock_unlock(&cpu->fpu_lock, false);
     410                irq_spinlock_lock(&thread->cpu->fpu_lock, false);
     411
     412                thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
     413                    memory_order_relaxed);
     414
     415                if (owner == thread) {
     416                        atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
     417                            memory_order_relaxed);
     418                }
     419
     420                irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
    357421        }
    358422#endif
     
    571635                 * the waking thread by the sleeper in thread_wait_finish().
    572636                 */
    573                 thread_requeue_sleeping(thread);
     637                thread_ready(thread);
    574638        }
    575639}
     
    578642void thread_migration_disable(void)
    579643{
    580         ipl_t ipl = interrupts_disable();
    581 
    582644        assert(THREAD);
     645
    583646        THREAD->nomigrate++;
    584 
    585         interrupts_restore(ipl);
    586647}
    587648
     
    589650void thread_migration_enable(void)
    590651{
    591         ipl_t ipl = interrupts_disable();
    592 
    593652        assert(THREAD);
    594653        assert(THREAD->nomigrate > 0);
     
    596655        if (THREAD->nomigrate > 0)
    597656                THREAD->nomigrate--;
    598 
    599         interrupts_restore(ipl);
    600657}
    601658
     
    643700                return EINVAL;
    644701
    645         errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     702        irq_spinlock_lock(&thread->lock, true);
     703        state_t state = thread->state;
     704        irq_spinlock_unlock(&thread->lock, true);
     705
     706        errno_t rc = EOK;
     707
     708        if (state != Exiting)
     709                rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    646710
    647711        if (rc == EOK)
     
    683747        uint64_t ucycles, kcycles;
    684748        char usuffix, ksuffix;
    685         order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
    686         order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
    687 
    688         state_t state = atomic_get_unordered(&thread->state);
     749        order_suffix(thread->ucycles, &ucycles, &usuffix);
     750        order_suffix(thread->kcycles, &kcycles, &ksuffix);
    689751
    690752        char *name;
     
    700762        else
    701763                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    702                     thread->tid, name, thread, thread_states[state],
     764                    thread->tid, name, thread, thread_states[thread->state],
    703765                    thread->task, thread->task->container);
    704766
    705767        if (additional) {
    706                 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    707                 if (cpu)
    708                         printf("%-5u", cpu->id);
     768                if (thread->cpu)
     769                        printf("%-5u", thread->cpu->id);
    709770                else
    710771                        printf("none ");
    711772
    712                 if (state == Sleeping) {
     773                if (thread->state == Sleeping) {
    713774                        printf(" %p", thread->sleep_queue);
    714775                }
     
    789850void thread_update_accounting(bool user)
    790851{
     852        uint64_t time = get_cycle();
     853
    791854        assert(interrupts_disabled());
    792 
    793         uint64_t time = get_cycle();
     855        assert(irq_spinlock_locked(&THREAD->lock));
    794856
    795857        if (user)
    796                 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
     858                THREAD->ucycles += time - THREAD->last_cycle;
    797859        else
    798                 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
     860                THREAD->kcycles += time - THREAD->last_cycle;
    799861
    800862        THREAD->last_cycle = time;
     
    907969         */
    908970
    909         printf("Scheduling thread stack trace.\n");
    910         atomic_set_unordered(&thread->btrace, true);
    911 
    912         thread_wakeup(thread);
     971        irq_spinlock_lock(&thread->lock, true);
     972
     973        bool sleeping = false;
     974        istate_t *istate = thread->udebug.uspace_state;
     975        if (istate != NULL) {
     976                printf("Scheduling thread stack trace.\n");
     977                thread->btrace = true;
     978                if (thread->state == Sleeping)
     979                        sleeping = true;
     980        } else
     981                printf("Thread interrupt state not available.\n");
     982
     983        irq_spinlock_unlock(&thread->lock, true);
     984
     985        if (sleeping)
     986                thread_wakeup(thread);
     987
    913988        thread_put(thread);
    914989}
     
    10111086                thread_attach(thread, TASK);
    10121087#endif
    1013                 thread_start(thread);
    1014                 thread_put(thread);
     1088                thread_ready(thread);
    10151089
    10161090                return 0;
Note: See TracChangeset for help on using the changeset viewer.