Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rb169619 rdfa4be62  
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
    62 #include <arch/faddr.h>
    6362#include <atomic.h>
    6463#include <memw.h>
     
    8281};
    8382
    84 enum sleep_state {
    85         SLEEP_INITIAL,
    86         SLEEP_ASLEEP,
    87         SLEEP_WOKE,
    88 };
    89 
    9083/** Lock protecting the @c threads ordered dictionary .
    9184 *
     
    115108static int threads_cmp(void *, void *);
    116109
    117 /** Thread wrapper.
    118  *
    119  * This wrapper is provided to ensure that every thread makes a call to
    120  * thread_exit() when its implementing function returns.
    121  *
    122  * interrupts_disable() is assumed.
    123  *
    124  */
    125 static void cushion(void)
    126 {
    127         void (*f)(void *) = THREAD->thread_code;
    128         void *arg = THREAD->thread_arg;
    129         THREAD->last_cycle = get_cycle();
    130 
    131         /* This is where each thread wakes up after its creation */
    132         irq_spinlock_unlock(&THREAD->lock, false);
    133         interrupts_enable();
    134 
    135         f(arg);
    136 
    137         thread_exit();
    138 
    139         /* Not reached */
    140 }
    141 
    142110/** Initialization and allocation for thread_t structure
    143111 *
     
    147115        thread_t *thread = (thread_t *) obj;
    148116
    149         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    150117        link_initialize(&thread->rq_link);
    151118        link_initialize(&thread->wq_link);
     
    229196void thread_wire(thread_t *thread, cpu_t *cpu)
    230197{
    231         irq_spinlock_lock(&thread->lock, true);
    232         thread->cpu = cpu;
     198        ipl_t ipl = interrupts_disable();
     199        atomic_set_unordered(&thread->cpu, cpu);
    233200        thread->nomigrate++;
    234         irq_spinlock_unlock(&thread->lock, true);
    235 }
    236 
    237 /** Invoked right before thread_ready() readies the thread. thread is locked. */
    238 static void before_thread_is_ready(thread_t *thread)
    239 {
    240         assert(irq_spinlock_locked(&thread->lock));
    241 }
    242 
    243 /** Make thread ready
    244  *
    245  * Switch thread to the ready state. Consumes reference passed by the caller.
    246  *
    247  * @param thread Thread to make ready.
    248  *
    249  */
    250 void thread_ready(thread_t *thread)
    251 {
    252         irq_spinlock_lock(&thread->lock, true);
    253 
    254         assert(thread->state != Ready);
    255 
    256         before_thread_is_ready(thread);
    257 
    258         int i = (thread->priority < RQ_COUNT - 1) ?
    259             ++thread->priority : thread->priority;
    260 
    261         /* Prefer the CPU on which the thread ran last */
    262         cpu_t *cpu = thread->cpu ? thread->cpu : CPU;
    263 
    264         thread->state = Ready;
    265 
    266         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    267 
    268         /*
    269          * Append thread to respective ready queue
    270          * on respective processor.
    271          */
    272 
    273         list_append(&thread->rq_link, &cpu->rq[i].rq);
    274         cpu->rq[i].n++;
    275         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    276 
    277         atomic_inc(&nrdy);
    278         atomic_inc(&cpu->nrdy);
     201        interrupts_restore(ipl);
     202}
     203
     204/** Start a thread that wasn't started yet since it was created.
     205 *
     206 * @param thread A reference to the newly created thread.
     207 */
     208void thread_start(thread_t *thread)
     209{
     210        assert(atomic_get_unordered(&thread->state) == Entering);
     211        thread_requeue_sleeping(thread_ref(thread));
    279212}
    280213
     
    315248        irq_spinlock_unlock(&tidlock, true);
    316249
    317         memset(&thread->saved_context, 0, sizeof(thread->saved_context));
    318         context_set(&thread->saved_context, FADDR(cushion),
    319             (uintptr_t) thread->kstack, STACK_SIZE);
     250        context_create(&thread->saved_context, thread_main_func,
     251            thread->kstack, STACK_SIZE);
    320252
    321253        current_initialize((current_t *) thread->kstack);
    322 
    323         ipl_t ipl = interrupts_disable();
    324         thread->saved_ipl = interrupts_read();
    325         interrupts_restore(ipl);
    326254
    327255        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    329257        thread->thread_code = func;
    330258        thread->thread_arg = arg;
    331         thread->ucycles = 0;
    332         thread->kcycles = 0;
     259        thread->ucycles = ATOMIC_TIME_INITIALIZER();
     260        thread->kcycles = ATOMIC_TIME_INITIALIZER();
    333261        thread->uncounted =
    334262            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    335         thread->priority = -1;          /* Start in rq[0] */
    336         thread->cpu = NULL;
     263        atomic_init(&thread->priority, 0);
     264        atomic_init(&thread->cpu, NULL);
    337265        thread->stolen = false;
    338266        thread->uspace =
     
    340268
    341269        thread->nomigrate = 0;
    342         thread->state = Entering;
     270        atomic_init(&thread->state, Entering);
    343271
    344272        atomic_init(&thread->sleep_queue, NULL);
     
    360288#ifdef CONFIG_UDEBUG
    361289        /* Initialize debugging stuff */
    362         thread->btrace = false;
     290        atomic_init(&thread->btrace, false);
    363291        udebug_thread_initialize(&thread->udebug);
    364292#endif
     
    404332
    405333        if (!thread->uncounted) {
    406                 thread->task->ucycles += thread->ucycles;
    407                 thread->task->kcycles += thread->kcycles;
     334                thread->task->ucycles += atomic_time_read(&thread->ucycles);
     335                thread->task->kcycles += atomic_time_read(&thread->kcycles);
    408336        }
    409337
    410338        irq_spinlock_unlock(&thread->task->lock, false);
    411339
    412         assert((thread->state == Exiting) || (thread->state == Lingering));
     340        assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
    413341
    414342        /* Clear cpu->fpu_owner if set to this thread. */
    415343#ifdef CONFIG_FPU_LAZY
    416         if (thread->cpu) {
     344        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     345        if (cpu) {
    417346                /*
    418347                 * We need to lock for this because the old CPU can concurrently try
     
    420349                 * it to finish. An atomic compare-and-swap wouldn't be enough.
    421350                 */
    422                 irq_spinlock_lock(&thread->cpu->fpu_lock, false);
    423 
    424                 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner,
    425                     memory_order_relaxed);
    426 
    427                 if (owner == thread) {
    428                         atomic_store_explicit(&thread->cpu->fpu_owner, NULL,
    429                             memory_order_relaxed);
    430                 }
    431 
    432                 irq_spinlock_unlock(&thread->cpu->fpu_lock, false);
     351                irq_spinlock_lock(&cpu->fpu_lock, false);
     352
     353                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     354                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     355
     356                irq_spinlock_unlock(&cpu->fpu_lock, false);
    433357        }
    434358#endif
     
    525449        }
    526450
    527         irq_spinlock_lock(&THREAD->lock, true);
    528         THREAD->state = Exiting;
    529         irq_spinlock_unlock(&THREAD->lock, true);
    530 
    531         scheduler();
    532 
    533         panic("should never be reached");
     451        scheduler_enter(Exiting);
     452        unreachable();
    534453}
    535454
     
    579498}
    580499
    581 static void thread_wait_internal(void)
    582 {
    583         assert(THREAD != NULL);
    584 
    585         ipl_t ipl = interrupts_disable();
    586 
    587         if (atomic_load(&haltstate))
    588                 halt();
    589 
    590         /*
    591          * Lock here to prevent a race between entering the scheduler and another
    592          * thread rescheduling this thread.
    593          */
    594         irq_spinlock_lock(&THREAD->lock, false);
    595 
    596         int expected = SLEEP_INITIAL;
    597 
    598         /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */
    599         if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,
    600             SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {
    601                 THREAD->state = Sleeping;
    602                 scheduler_locked(ipl);
    603         } else {
    604                 assert(expected == SLEEP_WOKE);
    605                 /* Return immediately. */
    606                 irq_spinlock_unlock(&THREAD->lock, false);
    607                 interrupts_restore(ipl);
    608         }
    609 }
    610 
    611500static void thread_wait_timeout_callback(void *arg)
    612501{
     
    649538        timeout_t timeout;
    650539
     540        /* Extra check to avoid going to scheduler if we don't need to. */
     541        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     542            SLEEP_INITIAL)
     543                return THREAD_WAIT_SUCCESS;
     544
    651545        if (deadline != DEADLINE_NEVER) {
    652                 /* Extra check to avoid setting up a deadline if we don't need to. */
    653                 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    654                     SLEEP_INITIAL)
    655                         return THREAD_WAIT_SUCCESS;
    656 
    657546                timeout_initialize(&timeout);
    658547                timeout_register_deadline(&timeout, deadline,
     
    660549        }
    661550
    662         thread_wait_internal();
     551        scheduler_enter(Sleeping);
    663552
    664553        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     
    674563
    675564        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    676             memory_order_release);
     565            memory_order_acq_rel);
    677566
    678567        if (state == SLEEP_ASLEEP) {
     
    682571                 * the waking thread by the sleeper in thread_wait_finish().
    683572                 */
    684                 thread_ready(thread);
     573                thread_requeue_sleeping(thread);
    685574        }
    686575}
     
    689578void thread_migration_disable(void)
    690579{
     580        ipl_t ipl = interrupts_disable();
     581
    691582        assert(THREAD);
    692 
    693583        THREAD->nomigrate++;
     584
     585        interrupts_restore(ipl);
    694586}
    695587
     
    697589void thread_migration_enable(void)
    698590{
     591        ipl_t ipl = interrupts_disable();
     592
    699593        assert(THREAD);
    700594        assert(THREAD->nomigrate > 0);
     
    702596        if (THREAD->nomigrate > 0)
    703597                THREAD->nomigrate--;
     598
     599        interrupts_restore(ipl);
    704600}
    705601
     
    731627
    732628/** Wait for another thread to exit.
    733  * This function does not destroy the thread. Reference counting handles that.
     629 * After successful wait, the thread reference is destroyed.
    734630 *
    735631 * @param thread Thread to join on exit.
     
    742638errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    743639{
     640        assert(thread != NULL);
     641
    744642        if (thread == THREAD)
    745643                return EINVAL;
    746644
    747         irq_spinlock_lock(&thread->lock, true);
    748         state_t state = thread->state;
    749         irq_spinlock_unlock(&thread->lock, true);
    750 
    751         if (state == Exiting) {
    752                 return EOK;
    753         } else {
    754                 return _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    755         }
     645        errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     646
     647        if (rc == EOK)
     648                thread_put(thread);
     649
     650        return rc;
     651}
     652
     653void thread_detach(thread_t *thread)
     654{
     655        thread_put(thread);
    756656}
    757657
     
    770670
    771671        (void) waitq_sleep_timeout(&wq, usec);
     672}
     673
     674/** Allow other threads to run. */
     675void thread_yield(void)
     676{
     677        assert(THREAD != NULL);
     678        scheduler_enter(Running);
    772679}
    773680
     
    776683        uint64_t ucycles, kcycles;
    777684        char usuffix, ksuffix;
    778         order_suffix(thread->ucycles, &ucycles, &usuffix);
    779         order_suffix(thread->kcycles, &kcycles, &ksuffix);
     685        order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
     686        order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
     687
     688        state_t state = atomic_get_unordered(&thread->state);
    780689
    781690        char *name;
     
    791700        else
    792701                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    793                     thread->tid, name, thread, thread_states[thread->state],
     702                    thread->tid, name, thread, thread_states[state],
    794703                    thread->task, thread->task->container);
    795704
    796705        if (additional) {
    797                 if (thread->cpu)
    798                         printf("%-5u", thread->cpu->id);
     706                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     707                if (cpu)
     708                        printf("%-5u", cpu->id);
    799709                else
    800710                        printf("none ");
    801711
    802                 if (thread->state == Sleeping) {
     712                if (state == Sleeping) {
    803713                        printf(" %p", thread->sleep_queue);
    804714                }
     
    879789void thread_update_accounting(bool user)
    880790{
     791        assert(interrupts_disabled());
     792
    881793        uint64_t time = get_cycle();
    882794
    883         assert(interrupts_disabled());
    884         assert(irq_spinlock_locked(&THREAD->lock));
    885 
    886795        if (user)
    887                 THREAD->ucycles += time - THREAD->last_cycle;
     796                atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
    888797        else
    889                 THREAD->kcycles += time - THREAD->last_cycle;
     798                atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
    890799
    891800        THREAD->last_cycle = time;
     
    998907         */
    999908
    1000         irq_spinlock_lock(&thread->lock, true);
    1001 
    1002         bool sleeping = false;
    1003         istate_t *istate = thread->udebug.uspace_state;
    1004         if (istate != NULL) {
    1005                 printf("Scheduling thread stack trace.\n");
    1006                 thread->btrace = true;
    1007                 if (thread->state == Sleeping)
    1008                         sleeping = true;
    1009         } else
    1010                 printf("Thread interrupt state not available.\n");
    1011 
    1012         irq_spinlock_unlock(&thread->lock, true);
    1013 
    1014         if (sleeping)
    1015                 thread_wakeup(thread);
    1016 
     909        printf("Scheduling thread stack trace.\n");
     910        atomic_set_unordered(&thread->btrace, true);
     911
     912        thread_wakeup(thread);
    1017913        thread_put(thread);
    1018914}
     
    11151011                thread_attach(thread, TASK);
    11161012#endif
    1117                 thread_ready(thread);
     1013                thread_start(thread);
     1014                thread_put(thread);
    11181015
    11191016                return 0;
Note: See TracChangeset for help on using the changeset viewer.