Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r128359eb rdfa4be62  
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
    62 #include <arch/faddr.h>
    6362#include <atomic.h>
    64 #include <mem.h>
     63#include <memw.h>
    6564#include <stdio.h>
    6665#include <stdlib.h>
     
    6968#include <errno.h>
    7069#include <debug.h>
     70#include <halt.h>
    7171
    7272/** Thread states */
     
    9494 *
    9595 * Members are of type thread_t.
     96 *
     97 * This structure contains weak references. Any reference from it must not leave
     98 * threads_lock critical section unless strengthened via thread_try_ref().
    9699 */
    97100odict_t threads;
     
    102105static slab_cache_t *thread_cache;
    103106
    104 #ifdef CONFIG_FPU
    105 slab_cache_t *fpu_context_cache;
    106 #endif
    107 
    108107static void *threads_getkey(odlink_t *);
    109108static int threads_cmp(void *, void *);
    110109
    111 /** Thread wrapper.
    112  *
    113  * This wrapper is provided to ensure that every thread makes a call to
    114  * thread_exit() when its implementing function returns.
    115  *
    116  * interrupts_disable() is assumed.
    117  *
    118  */
    119 static void cushion(void)
    120 {
    121         void (*f)(void *) = THREAD->thread_code;
    122         void *arg = THREAD->thread_arg;
    123         THREAD->last_cycle = get_cycle();
    124 
    125         /* This is where each thread wakes up after its creation */
    126         irq_spinlock_unlock(&THREAD->lock, false);
    127         interrupts_enable();
    128 
    129         f(arg);
    130 
    131         /* Accumulate accounting to the task */
    132         irq_spinlock_lock(&THREAD->lock, true);
    133         if (!THREAD->uncounted) {
    134                 thread_update_accounting(true);
    135                 uint64_t ucycles = THREAD->ucycles;
    136                 THREAD->ucycles = 0;
    137                 uint64_t kcycles = THREAD->kcycles;
    138                 THREAD->kcycles = 0;
    139 
    140                 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
    141                 TASK->ucycles += ucycles;
    142                 TASK->kcycles += kcycles;
    143                 irq_spinlock_unlock(&TASK->lock, true);
    144         } else
    145                 irq_spinlock_unlock(&THREAD->lock, true);
    146 
    147         thread_exit();
    148 
    149         /* Not reached */
    150 }
    151 
    152110/** Initialization and allocation for thread_t structure
    153111 *
     
    157115        thread_t *thread = (thread_t *) obj;
    158116
    159         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    160117        link_initialize(&thread->rq_link);
    161118        link_initialize(&thread->wq_link);
     
    164121        /* call the architecture-specific part of the constructor */
    165122        thr_constructor_arch(thread);
    166 
    167 #ifdef CONFIG_FPU
    168         thread->saved_fpu_context = slab_alloc(fpu_context_cache,
    169             FRAME_ATOMIC | kmflags);
    170         if (!thread->saved_fpu_context)
    171                 return ENOMEM;
    172 #endif /* CONFIG_FPU */
    173123
    174124        /*
     
    198148        uintptr_t stack_phys =
    199149            frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
    200         if (!stack_phys) {
    201 #ifdef CONFIG_FPU
    202                 assert(thread->saved_fpu_context);
    203                 slab_free(fpu_context_cache, thread->saved_fpu_context);
    204 #endif
     150        if (!stack_phys)
    205151                return ENOMEM;
    206         }
    207152
    208153        thread->kstack = (uint8_t *) PA2KA(stack_phys);
     
    225170        frame_free(KA2PA(thread->kstack), STACK_FRAMES);
    226171
    227 #ifdef CONFIG_FPU
    228         assert(thread->saved_fpu_context);
    229         slab_free(fpu_context_cache, thread->saved_fpu_context);
    230 #endif
    231 
    232172        return STACK_FRAMES;  /* number of frames freed */
    233173}
     
    243183
    244184        atomic_store(&nrdy, 0);
    245         thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
     185        thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
    246186            thr_constructor, thr_destructor, 0);
    247187
    248 #ifdef CONFIG_FPU
    249         fpu_context_cache = slab_cache_create("fpu_context_t",
    250             sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    251 #endif
    252 
    253188        odict_initialize(&threads, threads_getkey, threads_cmp);
    254189}
     
    261196void thread_wire(thread_t *thread, cpu_t *cpu)
    262197{
    263         irq_spinlock_lock(&thread->lock, true);
    264         thread->cpu = cpu;
    265         thread->wired = true;
    266         irq_spinlock_unlock(&thread->lock, true);
    267 }
    268 
    269 /** Invoked right before thread_ready() readies the thread. thread is locked. */
    270 static void before_thread_is_ready(thread_t *thread)
    271 {
    272         assert(irq_spinlock_locked(&thread->lock));
    273 }
    274 
    275 /** Make thread ready
    276  *
    277  * Switch thread to the ready state.
    278  *
    279  * @param thread Thread to make ready.
    280  *
    281  */
    282 void thread_ready(thread_t *thread)
    283 {
    284         irq_spinlock_lock(&thread->lock, true);
    285 
    286         assert(thread->state != Ready);
    287 
    288         before_thread_is_ready(thread);
    289 
    290         int i = (thread->priority < RQ_COUNT - 1) ?
    291             ++thread->priority : thread->priority;
    292 
    293         cpu_t *cpu;
    294         if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
    295                 /* Cannot ready to another CPU */
    296                 assert(thread->cpu != NULL);
    297                 cpu = thread->cpu;
    298         } else if (thread->stolen) {
    299                 /* Ready to the stealing CPU */
    300                 cpu = CPU;
    301         } else if (thread->cpu) {
    302                 /* Prefer the CPU on which the thread ran last */
    303                 assert(thread->cpu != NULL);
    304                 cpu = thread->cpu;
    305         } else {
    306                 cpu = CPU;
    307         }
    308 
    309         thread->state = Ready;
    310 
    311         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
    312 
    313         /*
    314          * Append thread to respective ready queue
    315          * on respective processor.
    316          */
    317 
    318         list_append(&thread->rq_link, &cpu->rq[i].rq);
    319         cpu->rq[i].n++;
    320         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    321 
    322         atomic_inc(&nrdy);
    323         atomic_inc(&cpu->nrdy);
     198        ipl_t ipl = interrupts_disable();
     199        atomic_set_unordered(&thread->cpu, cpu);
     200        thread->nomigrate++;
     201        interrupts_restore(ipl);
     202}
     203
     204/** Start a thread that wasn't started yet since it was created.
     205 *
     206 * @param thread A reference to the newly created thread.
     207 */
     208void thread_start(thread_t *thread)
     209{
     210        assert(atomic_get_unordered(&thread->state) == Entering);
     211        thread_requeue_sleeping(thread_ref(thread));
    324212}
    325213
     
    346234                return NULL;
    347235
     236        refcount_init(&thread->refcount);
     237
    348238        if (thread_create_arch(thread, flags) != EOK) {
    349239                slab_free(thread_cache, thread);
     
    358248        irq_spinlock_unlock(&tidlock, true);
    359249
    360         memset(&thread->saved_context, 0, sizeof(thread->saved_context));
    361         context_set(&thread->saved_context, FADDR(cushion),
    362             (uintptr_t) thread->kstack, STACK_SIZE);
     250        context_create(&thread->saved_context, thread_main_func,
     251            thread->kstack, STACK_SIZE);
    363252
    364253        current_initialize((current_t *) thread->kstack);
    365 
    366         ipl_t ipl = interrupts_disable();
    367         thread->saved_context.ipl = interrupts_read();
    368         interrupts_restore(ipl);
    369254
    370255        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    372257        thread->thread_code = func;
    373258        thread->thread_arg = arg;
    374         thread->ticks = -1;
    375         thread->ucycles = 0;
    376         thread->kcycles = 0;
     259        thread->ucycles = ATOMIC_TIME_INITIALIZER();
     260        thread->kcycles = ATOMIC_TIME_INITIALIZER();
    377261        thread->uncounted =
    378262            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    379         thread->priority = -1;          /* Start in rq[0] */
    380         thread->cpu = NULL;
    381         thread->wired = false;
     263        atomic_init(&thread->priority, 0);
     264        atomic_init(&thread->cpu, NULL);
    382265        thread->stolen = false;
    383266        thread->uspace =
     
    385268
    386269        thread->nomigrate = 0;
    387         thread->state = Entering;
    388 
    389         timeout_initialize(&thread->sleep_timeout);
    390         thread->sleep_interruptible = false;
    391         thread->sleep_composable = false;
    392         thread->sleep_queue = NULL;
    393         thread->timeout_pending = false;
     270        atomic_init(&thread->state, Entering);
     271
     272        atomic_init(&thread->sleep_queue, NULL);
    394273
    395274        thread->in_copy_from_uspace = false;
     
    397276
    398277        thread->interrupted = false;
    399         thread->detached = false;
     278        atomic_init(&thread->sleep_state, SLEEP_INITIAL);
     279
    400280        waitq_initialize(&thread->join_wq);
    401281
     
    403283
    404284        thread->fpu_context_exists = false;
    405         thread->fpu_context_engaged = false;
    406285
    407286        odlink_initialize(&thread->lthreads);
     
    409288#ifdef CONFIG_UDEBUG
    410289        /* Initialize debugging stuff */
    411         thread->btrace = false;
     290        atomic_init(&thread->btrace, false);
    412291        udebug_thread_initialize(&thread->udebug);
    413292#endif
     
    423302 * Detach thread from all queues, cpus etc. and destroy it.
    424303 *
    425  * @param thread  Thread to be destroyed.
    426  * @param irq_res Indicate whether it should unlock thread->lock
    427  *                in interrupts-restore mode.
    428  *
    429  */
    430 void thread_destroy(thread_t *thread, bool irq_res)
    431 {
    432         assert(irq_spinlock_locked(&thread->lock));
    433         assert((thread->state == Exiting) || (thread->state == Lingering));
     304 * @param obj  Thread to be destroyed.
     305 *
     306 */
     307static void thread_destroy(void *obj)
     308{
     309        thread_t *thread = (thread_t *) obj;
     310
     311        assert_link_not_used(&thread->rq_link);
     312        assert_link_not_used(&thread->wq_link);
     313
    434314        assert(thread->task);
    435         assert(thread->cpu);
    436 
    437         irq_spinlock_lock(&thread->cpu->lock, false);
    438         if (thread->cpu->fpu_owner == thread)
    439                 thread->cpu->fpu_owner = NULL;
    440         irq_spinlock_unlock(&thread->cpu->lock, false);
    441 
    442         irq_spinlock_pass(&thread->lock, &threads_lock);
    443 
     315
     316        ipl_t ipl = interrupts_disable();
     317
     318        /* Remove thread from global list. */
     319        irq_spinlock_lock(&threads_lock, false);
    444320        odict_remove(&thread->lthreads);
    445 
    446         irq_spinlock_pass(&threads_lock, &thread->task->lock);
     321        irq_spinlock_unlock(&threads_lock, false);
     322
     323        /* Remove thread from task's list and accumulate accounting. */
     324        irq_spinlock_lock(&thread->task->lock, false);
     325
     326        list_remove(&thread->th_link);
    447327
    448328        /*
    449          * Detach from the containing task.
     329         * No other CPU has access to this thread anymore, so we don't need
     330         * thread->lock for accessing thread's fields after this point.
    450331         */
    451         list_remove(&thread->th_link);
    452         irq_spinlock_unlock(&thread->task->lock, irq_res);
     332
     333        if (!thread->uncounted) {
     334                thread->task->ucycles += atomic_time_read(&thread->ucycles);
     335                thread->task->kcycles += atomic_time_read(&thread->kcycles);
     336        }
     337
     338        irq_spinlock_unlock(&thread->task->lock, false);
     339
     340        assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering));
     341
     342        /* Clear cpu->fpu_owner if set to this thread. */
     343#ifdef CONFIG_FPU_LAZY
     344        cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     345        if (cpu) {
     346                /*
     347                 * We need to lock for this because the old CPU can concurrently try
     348                 * to dump this thread's FPU state, in which case we need to wait for
     349                 * it to finish. An atomic compare-and-swap wouldn't be enough.
     350                 */
     351                irq_spinlock_lock(&cpu->fpu_lock, false);
     352
     353                if (atomic_get_unordered(&cpu->fpu_owner) == thread)
     354                        atomic_set_unordered(&cpu->fpu_owner, NULL);
     355
     356                irq_spinlock_unlock(&cpu->fpu_lock, false);
     357        }
     358#endif
     359
     360        interrupts_restore(ipl);
    453361
    454362        /*
     
    456364         */
    457365        task_release(thread->task);
     366        thread->task = NULL;
     367
    458368        slab_free(thread_cache, thread);
     369}
     370
     371void thread_put(thread_t *thread)
     372{
     373        if (refcount_down(&thread->refcount)) {
     374                thread_destroy(thread);
     375        }
    459376}
    460377
     
    470387void thread_attach(thread_t *thread, task_t *task)
    471388{
     389        ipl_t ipl = interrupts_disable();
     390
    472391        /*
    473392         * Attach to the specified task.
    474393         */
    475         irq_spinlock_lock(&task->lock, true);
     394        irq_spinlock_lock(&task->lock, false);
    476395
    477396        /* Hold a reference to the task. */
     
    484403        list_append(&thread->th_link, &task->threads);
    485404
    486         irq_spinlock_pass(&task->lock, &threads_lock);
     405        irq_spinlock_unlock(&task->lock, false);
    487406
    488407        /*
    489408         * Register this thread in the system-wide dictionary.
    490409         */
     410        irq_spinlock_lock(&threads_lock, false);
    491411        odict_insert(&thread->lthreads, &threads, NULL);
    492         irq_spinlock_unlock(&threads_lock, true);
     412        irq_spinlock_unlock(&threads_lock, false);
     413
     414        interrupts_restore(ipl);
    493415}
    494416
     
    527449        }
    528450
    529 restart:
    530         irq_spinlock_lock(&THREAD->lock, true);
    531         if (THREAD->timeout_pending) {
    532                 /* Busy waiting for timeouts in progress */
    533                 irq_spinlock_unlock(&THREAD->lock, true);
    534                 goto restart;
    535         }
    536 
    537         THREAD->state = Exiting;
    538         irq_spinlock_unlock(&THREAD->lock, true);
    539 
    540         scheduler();
    541 
    542         /* Not reached */
    543         while (true)
    544                 ;
     451        scheduler_enter(Exiting);
     452        unreachable();
    545453}
    546454
     
    551459 * blocking call was interruptable. See waitq_sleep_timeout().
    552460 *
    553  * The caller must guarantee the thread object is valid during the entire
    554  * function, eg by holding the threads_lock lock.
    555  *
    556461 * Interrupted threads automatically exit when returning back to user space.
    557462 *
    558  * @param thread A valid thread object. The caller must guarantee it
    559  *               will remain valid until thread_interrupt() exits.
     463 * @param thread A valid thread object.
    560464 */
    561465void thread_interrupt(thread_t *thread)
    562466{
    563467        assert(thread != NULL);
    564 
    565         irq_spinlock_lock(&thread->lock, true);
    566 
    567468        thread->interrupted = true;
    568         bool sleeping = (thread->state == Sleeping);
    569 
    570         irq_spinlock_unlock(&thread->lock, true);
    571 
    572         if (sleeping)
    573                 waitq_interrupt_sleep(thread);
    574 }
    575 
    576 /** Returns true if the thread was interrupted.
    577  *
    578  * @param thread A valid thread object. User must guarantee it will
    579  *               be alive during the entire call.
    580  * @return true if the thread was already interrupted via thread_interrupt().
    581  */
    582 bool thread_interrupted(thread_t *thread)
     469        thread_wakeup(thread);
     470}
     471
     472/** Prepare for putting the thread to sleep.
     473 *
     474 * @returns whether the thread is currently terminating. If THREAD_OK
     475 * is returned, the thread is guaranteed to be woken up instantly if the thread
     476 * is terminated at any time between this function's return and
     477 * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
     478 * go to sleep, but doing so will delay termination.
     479 */
     480thread_termination_state_t thread_wait_start(void)
     481{
     482        assert(THREAD != NULL);
     483
     484        /*
     485         * This is an exchange rather than a store so that we can use the acquire
     486         * semantics, which is needed to ensure that code after this operation sees
     487         * memory ops made before thread_wakeup() in other thread, if that wakeup
     488         * was reset by this operation.
     489         *
     490         * In particular, we need this to ensure we can't miss the thread being
     491         * terminated concurrently with a synchronization primitive preparing to
     492         * sleep.
     493         */
     494        (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
     495            memory_order_acquire);
     496
     497        return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
     498}
     499
     500static void thread_wait_timeout_callback(void *arg)
     501{
     502        thread_wakeup(arg);
     503}
     504
     505/**
     506 * Suspends this thread's execution until thread_wakeup() is called on it,
     507 * or deadline is reached.
     508 *
     509 * The way this would normally be used is that the current thread call
     510 * thread_wait_start(), and if interruption has not been signaled, stores
     511 * a reference to itself in a synchronized structure (such as waitq).
     512 * After that, it releases any spinlocks it might hold and calls this function.
     513 *
     514 * The thread doing the wakeup will acquire the thread's reference from said
     515 * synchronized structure and calls thread_wakeup() on it.
     516 *
     517 * Notably, there can be more than one thread performing wakeup.
     518 * The number of performed calls to thread_wakeup(), or their relative
     519 * ordering with thread_wait_finish(), does not matter. However, calls to
     520 * thread_wakeup() are expected to be synchronized with thread_wait_start()
     521 * with which they are associated, otherwise wakeups may be missed.
     522 * However, the operation of thread_wakeup() is defined at any time,
     523 * synchronization notwithstanding (in the sense of C un/defined behavior),
     524 * and is in fact used to interrupt waiting threads by external events.
     525 * The waiting thread must operate correctly in face of spurious wakeups,
     526 * and clean up its reference in the synchronization structure if necessary.
     527 *
     528 * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
     529 * for it to have been waken up by the timeout, but the caller must assume
     530 * that proper wakeups, timeouts and interrupts may occur concurrently, so
     531 * the fact timeout has been registered does not necessarily mean the thread
     532 * has not been woken up or interrupted.
     533 */
     534thread_wait_result_t thread_wait_finish(deadline_t deadline)
     535{
     536        assert(THREAD != NULL);
     537
     538        timeout_t timeout;
     539
     540        /* Extra check to avoid going to scheduler if we don't need to. */
     541        if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
     542            SLEEP_INITIAL)
     543                return THREAD_WAIT_SUCCESS;
     544
     545        if (deadline != DEADLINE_NEVER) {
     546                timeout_initialize(&timeout);
     547                timeout_register_deadline(&timeout, deadline,
     548                    thread_wait_timeout_callback, THREAD);
     549        }
     550
     551        scheduler_enter(Sleeping);
     552
     553        if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
     554                return THREAD_WAIT_TIMEOUT;
     555        } else {
     556                return THREAD_WAIT_SUCCESS;
     557        }
     558}
     559
     560void thread_wakeup(thread_t *thread)
    583561{
    584562        assert(thread != NULL);
    585563
    586         bool interrupted;
    587 
    588         irq_spinlock_lock(&thread->lock, true);
    589         interrupted = thread->interrupted;
    590         irq_spinlock_unlock(&thread->lock, true);
    591 
    592         return interrupted;
     564        int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
     565            memory_order_acq_rel);
     566
     567        if (state == SLEEP_ASLEEP) {
     568                /*
     569                 * Only one thread gets to do this.
     570                 * The reference consumed here is the reference implicitly passed to
     571                 * the waking thread by the sleeper in thread_wait_finish().
     572                 */
     573                thread_requeue_sleeping(thread);
     574        }
    593575}
    594576
     
    596578void thread_migration_disable(void)
    597579{
     580        ipl_t ipl = interrupts_disable();
     581
    598582        assert(THREAD);
    599 
    600583        THREAD->nomigrate++;
     584
     585        interrupts_restore(ipl);
    601586}
    602587
     
    604589void thread_migration_enable(void)
    605590{
     591        ipl_t ipl = interrupts_disable();
     592
    606593        assert(THREAD);
    607594        assert(THREAD->nomigrate > 0);
     
    609596        if (THREAD->nomigrate > 0)
    610597                THREAD->nomigrate--;
     598
     599        interrupts_restore(ipl);
    611600}
    612601
     
    632621}
    633622
     623errno_t thread_join(thread_t *thread)
     624{
     625        return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
     626}
     627
    634628/** Wait for another thread to exit.
     629 * After successful wait, the thread reference is destroyed.
    635630 *
    636631 * @param thread Thread to join on exit.
     
    643638errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    644639{
     640        assert(thread != NULL);
     641
    645642        if (thread == THREAD)
    646643                return EINVAL;
    647644
    648         /*
    649          * Since thread join can only be called once on an undetached thread,
    650          * the thread pointer is guaranteed to be still valid.
    651          */
    652 
    653         irq_spinlock_lock(&thread->lock, true);
    654         assert(!thread->detached);
    655         irq_spinlock_unlock(&thread->lock, true);
    656 
    657         return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
    658 
    659         // FIXME: join should deallocate the thread.
    660         //        Current code calls detach after join, that's contrary to how
    661         //        join is used in other threading APIs.
    662 }
    663 
    664 /** Detach thread.
    665  *
    666  * Mark the thread as detached. If the thread is already
    667  * in the Lingering state, deallocate its resources.
    668  *
    669  * @param thread Thread to be detached.
    670  *
    671  */
     645        errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
     646
     647        if (rc == EOK)
     648                thread_put(thread);
     649
     650        return rc;
     651}
     652
    672653void thread_detach(thread_t *thread)
    673654{
    674         /*
    675          * Since the thread is expected not to be already detached,
    676          * pointer to it must be still valid.
    677          */
    678         irq_spinlock_lock(&thread->lock, true);
    679         assert(!thread->detached);
    680 
    681         if (thread->state == Lingering) {
    682                 /*
    683                  * Unlock &thread->lock and restore
    684                  * interrupts in thread_destroy().
    685                  */
    686                 thread_destroy(thread, true);
    687                 return;
    688         } else {
    689                 thread->detached = true;
    690         }
    691 
    692         irq_spinlock_unlock(&thread->lock, true);
     655        thread_put(thread);
    693656}
    694657
     
    706669        waitq_initialize(&wq);
    707670
    708         (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
     671        (void) waitq_sleep_timeout(&wq, usec);
     672}
     673
     674/** Allow other threads to run. */
     675void thread_yield(void)
     676{
     677        assert(THREAD != NULL);
     678        scheduler_enter(Running);
    709679}
    710680
     
    713683        uint64_t ucycles, kcycles;
    714684        char usuffix, ksuffix;
    715         order_suffix(thread->ucycles, &ucycles, &usuffix);
    716         order_suffix(thread->kcycles, &kcycles, &ksuffix);
     685        order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
     686        order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
     687
     688        state_t state = atomic_get_unordered(&thread->state);
    717689
    718690        char *name;
     
    722694                name = thread->name;
    723695
    724 #ifdef __32_BITS__
    725696        if (additional)
    726                 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
     697                printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
    727698                    thread->tid, thread->thread_code, thread->kstack,
    728699                    ucycles, usuffix, kcycles, ksuffix);
    729700        else
    730                 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
    731                     thread->tid, name, thread, thread_states[thread->state],
     701                printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
     702                    thread->tid, name, thread, thread_states[state],
    732703                    thread->task, thread->task->container);
    733 #endif
    734 
    735 #ifdef __64_BITS__
    736         if (additional)
    737                 printf("%-8" PRIu64 " %18p %18p\n"
    738                     "         %9" PRIu64 "%c %9" PRIu64 "%c ",
    739                     thread->tid, thread->thread_code, thread->kstack,
    740                     ucycles, usuffix, kcycles, ksuffix);
    741         else
    742                 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
    743                     thread->tid, name, thread, thread_states[thread->state],
    744                     thread->task, thread->task->container);
    745 #endif
    746704
    747705        if (additional) {
    748                 if (thread->cpu)
    749                         printf("%-5u", thread->cpu->id);
     706                cpu_t *cpu = atomic_get_unordered(&thread->cpu);
     707                if (cpu)
     708                        printf("%-5u", cpu->id);
    750709                else
    751710                        printf("none ");
    752711
    753                 if (thread->state == Sleeping) {
    754 #ifdef __32_BITS__
    755                         printf(" %10p", thread->sleep_queue);
    756 #endif
    757 
    758 #ifdef __64_BITS__
    759                         printf(" %18p", thread->sleep_queue);
    760 #endif
     712                if (state == Sleeping) {
     713                        printf(" %p", thread->sleep_queue);
    761714                }
    762715
     
    774727        thread_t *thread;
    775728
    776         /* Messing with thread structures, avoid deadlock */
     729        /* Accessing system-wide threads list through thread_first()/thread_next(). */
    777730        irq_spinlock_lock(&threads_lock, true);
    778731
    779 #ifdef __32_BITS__
    780         if (additional)
    781                 printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
    782                     " [cpu] [waitqueue]\n");
    783         else
    784                 printf("[id    ] [name        ] [address ] [state ] [task    ]"
    785                     " [ctn]\n");
    786 #endif
    787 
    788 #ifdef __64_BITS__
    789         if (additional) {
    790                 printf("[id    ] [code            ] [stack           ]\n"
    791                     "         [ucycles ] [kcycles ] [cpu] [waitqueue       ]\n");
    792         } else
    793                 printf("[id    ] [name        ] [address         ] [state ]"
    794                     " [task            ] [ctn]\n");
    795 #endif
     732        if (sizeof(void *) <= 4) {
     733                if (additional)
     734                        printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
     735                            " [cpu] [waitqueue]\n");
     736                else
     737                        printf("[id    ] [name        ] [address ] [state ] [task    ]"
     738                            " [ctn]\n");
     739        } else {
     740                if (additional) {
     741                        printf("[id    ] [code            ] [stack           ] [ucycles ] [kcycles ]"
     742                            " [cpu] [waitqueue       ]\n");
     743                } else
     744                        printf("[id    ] [name        ] [address         ] [state ]"
     745                            " [task            ] [ctn]\n");
     746        }
    796747
    797748        thread = thread_first();
     
    804755}
    805756
    806 /** Check whether thread exists.
    807  *
    808  * Note that threads_lock must be already held and
    809  * interrupts must be already disabled.
    810  *
    811  * @param thread Pointer to thread.
    812  *
    813  * @return True if thread t is known to the system, false otherwise.
    814  *
    815  */
    816 bool thread_exists(thread_t *thread)
    817 {
    818         assert(interrupts_disabled());
    819         assert(irq_spinlock_locked(&threads_lock));
    820 
     757static bool thread_exists(thread_t *thread)
     758{
    821759        odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
    822760        return odlink != NULL;
    823761}
    824762
     763/** Check whether the thread exists, and if so, return a reference to it.
     764 */
     765thread_t *thread_try_get(thread_t *thread)
     766{
     767        irq_spinlock_lock(&threads_lock, true);
     768
     769        if (thread_exists(thread)) {
     770                /* Try to strengthen the reference. */
     771                thread = thread_try_ref(thread);
     772        } else {
     773                thread = NULL;
     774        }
     775
     776        irq_spinlock_unlock(&threads_lock, true);
     777
     778        return thread;
     779}
     780
    825781/** Update accounting of current thread.
    826782 *
     
    833789void thread_update_accounting(bool user)
    834790{
     791        assert(interrupts_disabled());
     792
    835793        uint64_t time = get_cycle();
    836794
    837         assert(interrupts_disabled());
    838         assert(irq_spinlock_locked(&THREAD->lock));
    839 
    840795        if (user)
    841                 THREAD->ucycles += time - THREAD->last_cycle;
     796                atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
    842797        else
    843                 THREAD->kcycles += time - THREAD->last_cycle;
     798                atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
    844799
    845800        THREAD->last_cycle = time;
     
    850805 * The threads_lock must be already held by the caller of this function and
    851806 * interrupts must be disabled.
     807 *
     808 * The returned reference is weak.
     809 * If the caller needs to keep it, thread_try_ref() must be used to upgrade
     810 * to a strong reference _before_ threads_lock is released.
    852811 *
    853812 * @param id Thread ID.
     
    928887{
    929888        irq_spinlock_lock(&threads_lock, true);
    930 
    931         thread_t *thread = thread_find_by_id(thread_id);
     889        thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
     890        irq_spinlock_unlock(&threads_lock, true);
     891
    932892        if (thread == NULL) {
    933893                printf("No such thread.\n");
    934                 irq_spinlock_unlock(&threads_lock, true);
    935894                return;
    936895        }
    937 
    938         irq_spinlock_lock(&thread->lock, false);
    939896
    940897        /*
     
    950907         */
    951908
    952         bool sleeping = false;
    953         istate_t *istate = thread->udebug.uspace_state;
    954         if (istate != NULL) {
    955                 printf("Scheduling thread stack trace.\n");
    956                 thread->btrace = true;
    957                 if (thread->state == Sleeping)
    958                         sleeping = true;
    959         } else
    960                 printf("Thread interrupt state not available.\n");
    961 
    962         irq_spinlock_unlock(&thread->lock, false);
    963 
    964         if (sleeping)
    965                 waitq_interrupt_sleep(thread);
    966 
    967         irq_spinlock_unlock(&threads_lock, true);
     909        printf("Scheduling thread stack trace.\n");
     910        atomic_set_unordered(&thread->btrace, true);
     911
     912        thread_wakeup(thread);
     913        thread_put(thread);
    968914}
    969915
     
    10651011                thread_attach(thread, TASK);
    10661012#endif
    1067                 thread_ready(thread);
     1013                thread_start(thread);
     1014                thread_put(thread);
    10681015
    10691016                return 0;
Note: See TracChangeset for help on using the changeset viewer.