Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    red7e057 rdfa4be62  
    108108static int threads_cmp(void *, void *);
    109109
    110 /** Thread wrapper.
    111  *
    112  * This wrapper is provided to ensure that every thread makes a call to
    113  * thread_exit() when its implementing function returns.
    114  *
    115  * interrupts_disable() is assumed.
    116  *
    117  */
    118 static void cushion(void)
    119 {
    120         void (*f)(void *) = THREAD->thread_code;
    121         void *arg = THREAD->thread_arg;
    122 
    123         /* This is where each thread wakes up after its creation */
    124         irq_spinlock_unlock(&THREAD->lock, false);
    125         interrupts_enable();
    126 
    127         f(arg);
    128 
    129         thread_exit();
    130 
    131         /* Not reached */
    132 }
    133 
    134110/** Initialization and allocation for thread_t structure
    135111 *
     
    139115        thread_t *thread = (thread_t *) obj;
    140116
    141         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    142117        link_initialize(&thread->rq_link);
    143118        link_initialize(&thread->wq_link);
     
    221196void thread_wire(thread_t *thread, cpu_t *cpu)
    222197{
    223         irq_spinlock_lock(&thread->lock, true);
    224         thread->cpu = cpu;
     198        ipl_t ipl = interrupts_disable();
     199        atomic_set_unordered(&thread->cpu, cpu);
    225200        thread->nomigrate++;
    226         irq_spinlock_unlock(&thread->lock, true);
     201        interrupts_restore(ipl);
    227202}
    228203
     
    233208void thread_start(thread_t *thread)
    234209{
    235         assert(thread->state == Entering);
    236         thread_ready(thread_ref(thread));
    237 }
    238 
    239 /** Make thread ready
    240  *
    241  * Switch thread to the ready state. Consumes reference passed by the caller.
    242  *
    243  * @param thread Thread to make ready.
    244  *
    245  */
    246 void thread_ready(thread_t *thread)
    247 {
    248         irq_spinlock_lock(&thread->lock, true);
    249 
    250         assert(thread->state != Ready);
    251 
    252         int i = (thread->priority < RQ_COUNT - 1) ?
    253             ++thread->priority : thread->priority;
    254 
    255         /* Prefer the CPU on which the thread ran last */
    256         cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
    257 
    258         thread->state = Ready;
    259 
    260         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    261 
    262         /*
    263          * Append thread to respective ready queue
    264          * on respective processor.
    265          */
    266 
    267         list_append(&thread->rq_link, &cpu->rq[i].rq);
    268         cpu->rq[i].n++;
    269         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    270 
    271         atomic_inc(&nrdy);
    272         atomic_inc(&cpu->nrdy);
     210        assert(atomic_get_unordered(&thread->state) == Entering);
     211        thread_requeue_sleeping(thread_ref(thread));
    273212}
    274213
     
    309248        irq_spinlock_unlock(&tidlock, true);
    310249
    311         context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE);
     250        context_create(&thread->saved_context, thread_main_func,
     251            thread->kstack, STACK_SIZE);
    312252
    313253        current_initialize((current_t *) thread->kstack);
     
    317257        thread->thread_code = func;
    318258        thread->thread_arg = arg;
    319         thread->ucycles = 0;
    320         thread->kcycles = 0;
     259        thread->ucycles = ATOMIC_TIME_INITIALIZER();
     260        thread->kcycles = ATOMIC_TIME_INITIALIZER();
    321261        thread->uncounted =
    322262            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    323         thread->priority = -1;          /* Start in rq[0] */
    324         thread->cpu = NULL;
     263        atomic_init(&thread->priority, 0);
     264        atomic_init(&thread->cpu, NULL);
    325265        thread->stolen = false;
    326266        thread->uspace =
     
    328268
    329269        thread->nomigrate = 0;
    330         thread->state = Entering;
     270        atomic_init(&thread->state, Entering);
    331271
    332272        atomic_init(&thread->sleep_queue, NULL);
     
    348288#ifdef CONFIG_UDEBUG
    349289        /* Initialize debugging stuff */
    350         thread->btrace = false;
     290        atomic_init(&thread->btrace, false);
    351291        udebug_thread_initialize(&thread->udebug);
    352292#endif
     
    392332
    393333        if (!thread->uncounted) {
    394                 thread->task->ucycles += thread->ucycles;
    395                 thread->task->kcycles += thread->kcycles;
     334                thread->task->ucycles += atomic_time_read(&thread->ucycles);
     335                thread->task->kcycles += atomic_time_read(&thread->kcycles);
    396336        }
    397337
    398338        irq_spinlock_unlock(&thread->task->lock, false);
    399339
    400         assert((thread->state == Exiting) || (thread->state == Lingering));
     340        assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
    401341
    402342        /* Clear cpu->fpu_owner if set to this thread. */
    403343#ifdef CONFIG_FPU_LAZY
    404         if (thread->cpu) {
     344        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     345        if (cpu) {
    405346                /*
    406347                 * We need to lock for this because the old CPU can concurrently try
     
    408349                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    409350                 */
    410                 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
    411 
    412                 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
    413                     memory_order_relaxed);
    414 
    415                 if (owner == thread) {
    416                         atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
    417                             memory_order_relaxed);
    418                 }
    419 
    420                 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
     351                irq_spinlock_lock(&cpu->fpu_lock, false);
     352
     353                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     354                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     355
     356                irq_spinlock_unlock(&cpu->fpu_lock, false);
    421357        }
    422358#endif
     
    635571                 * the waking thread by the sleeper in thread_wait_finish().
    636572                 */
    637                 thread_ready(thread);
     573                thread_requeue_sleeping(thread);
    638574        }
    639575}
     
    642578void thread_migration_disable(void)
    643579{
     580        ipl_t ipl = interrupts_disable();
     581
    644582        assert(THREAD);
    645 
    646583        THREAD->nomigrate++;
     584
     585        interrupts_restore(ipl);
    647586}
    648587
     
    650589void thread_migration_enable(void)
    651590{
     591        ipl_t ipl = interrupts_disable();
     592
    652593        assert(THREAD);
    653594        assert(THREAD->nomigrate > 0);
     
    655596        if (THREAD->nomigrate > 0)
    656597                THREAD->nomigrate--;
     598
     599        interrupts_restore(ipl);
    657600}
    658601
     
    700643                return EINVAL;
    701644
    702         irq_spinlock_lock(&thread->lock, true);
    703         state_t state = thread->state;
    704         irq_spinlock_unlock(&thread->lock, true);
    705 
    706         errno_t rc = EOK;
    707 
    708         if (state != Exiting)
    709                 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     645        errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    710646
    711647        if (rc == EOK)
     
    747683        uint64_t ucycles, kcycles;
    748684        char usuffix, ksuffix;
    749         order_suffix(thread->ucycles, &ucycles, &usuffix);
    750         order_suffix(thread->kcycles, &kcycles, &ksuffix);
     685        order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
     686        order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
     687
     688        state_t state = atomic_get_unordered(&thread->state);
    751689
    752690        char *name;
     
    762700        else
    763701                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    764                     thread->tid, name, thread, thread_states[thread->state],
     702                    thread->tid, name, thread, thread_states[state],
    765703                    thread->task, thread->task->container);
    766704
    767705        if (additional) {
    768                 if (thread->cpu)
    769                         printf("%-5u", thread->cpu->id);
     706                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     707                if (cpu)
     708                        printf("%-5u", cpu->id);
    770709                else
    771710                        printf("none ");
    772711
    773                 if (thread->state == Sleeping) {
     712                if (state == Sleeping) {
    774713                        printf(" %p", thread->sleep_queue);
    775714                }
     
    850789void thread_update_accounting(bool user)
    851790{
     791        assert(interrupts_disabled());
     792
    852793        uint64_t time = get_cycle();
    853794
    854         assert(interrupts_disabled());
    855         assert(irq_spinlock_locked(&THREAD->lock));
    856 
    857795        if (user)
    858                 THREAD->ucycles += time - THREAD->last_cycle;
     796                atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
    859797        else
    860                 THREAD->kcycles += time - THREAD->last_cycle;
     798                atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
    861799
    862800        THREAD->last_cycle = time;
     
    969907         */
    970908
    971         irq_spinlock_lock(&thread->lock, true);
    972 
    973         bool sleeping = false;
    974         istate_t *istate = thread->udebug.uspace_state;
    975         if (istate != NULL) {
    976                 printf("Scheduling thread stack trace.\n");
    977                 thread->btrace = true;
    978                 if (thread->state == Sleeping)
    979                         sleeping = true;
    980         } else
    981                 printf("Thread interrupt state not available.\n");
    982 
    983         irq_spinlock_unlock(&thread->lock, true);
    984 
    985         if (sleeping)
    986                 thread_wakeup(thread);
    987 
     909        printf("Scheduling thread stack trace.\n");
     910        atomic_set_unordered(&thread->btrace, true);
     911
     912        thread_wakeup(thread);
    988913        thread_put(thread);
    989914}
     
    10861011                thread_attach(thread, TASK);
    10871012#endif
    1088                 thread_ready(thread);
     1013                thread_start(thread);
     1014                thread_put(thread);
    10891015
    10901016                return 0;
Note: See TracChangeset for help on using the changeset viewer.