Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rf35749e r128359eb  
    11/*
    2  * Copyright (c) 2025 Jiri Svoboda
    32 * Copyright (c) 2010 Jakub Jermar
     3 * Copyright (c) 2018 Jiri Svoboda
    44 * All rights reserved.
    55 *
     
    6060#include <arch/interrupt.h>
    6161#include <smp/ipi.h>
     62#include <arch/faddr.h>
    6263#include <atomic.h>
    63 #include <memw.h>
     64#include <mem.h>
    6465#include <stdio.h>
    6566#include <stdlib.h>
     
    6869#include <errno.h>
    6970#include <debug.h>
    70 #include <halt.h>
    7171
    7272/** Thread states */
     
    9494 *
    9595 * Members are of type thread_t.
    96  *
    97  * This structure contains weak references. Any reference from it must not leave
    98  * threads_lock critical section unless strengthened via thread_try_ref().
    9996 */
    10097odict_t threads;
     
    105102static slab_cache_t *thread_cache;
    106103
     104#ifdef CONFIG_FPU
     105slab_cache_t *fpu_context_cache;
     106#endif
     107
    107108static void *threads_getkey(odlink_t *);
    108109static int threads_cmp(void *, void *);
    109110
     111/** Thread wrapper.
     112 *
     113 * This wrapper is provided to ensure that every thread makes a call to
     114 * thread_exit() when its implementing function returns.
     115 *
     116 * interrupts_disable() is assumed.
     117 *
     118 */
     119static void cushion(void)
     120{
     121        void (*f)(void *) = THREAD->thread_code;
     122        void *arg = THREAD->thread_arg;
     123        THREAD->last_cycle = get_cycle();
     124
     125        /* This is where each thread wakes up after its creation */
     126        irq_spinlock_unlock(&THREAD->lock, false);
     127        interrupts_enable();
     128
     129        f(arg);
     130
     131        /* Accumulate accounting to the task */
     132        irq_spinlock_lock(&THREAD->lock, true);
     133        if (!THREAD->uncounted) {
     134                thread_update_accounting(true);
     135                uint64_t ucycles = THREAD->ucycles;
     136                THREAD->ucycles = 0;
     137                uint64_t kcycles = THREAD->kcycles;
     138                THREAD->kcycles = 0;
     139
     140                irq_spinlock_pass(&THREAD->lock, &TASK->lock);
     141                TASK->ucycles += ucycles;
     142                TASK->kcycles += kcycles;
     143                irq_spinlock_unlock(&TASK->lock, true);
     144        } else
     145                irq_spinlock_unlock(&THREAD->lock, true);
     146
     147        thread_exit();
     148
     149        /* Not reached */
     150}
     151
    110152/** Initialization and allocation for thread_t structure
    111153 *
     
    115157        thread_t *thread = (thread_t *) obj;
    116158
     159        irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    117160        link_initialize(&thread->rq_link);
    118161        link_initialize(&thread->wq_link);
     
    121164        /* call the architecture-specific part of the constructor */
    122165        thr_constructor_arch(thread);
     166
     167#ifdef CONFIG_FPU
     168        thread->saved_fpu_context = slab_alloc(fpu_context_cache,
     169            FRAME_ATOMIC | kmflags);
     170        if (!thread->saved_fpu_context)
     171                return ENOMEM;
     172#endif /* CONFIG_FPU */
    123173
    124174        /*
     
    148198        uintptr_t stack_phys =
    149199            frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1);
    150         if (!stack_phys)
     200        if (!stack_phys) {
     201#ifdef CONFIG_FPU
     202                assert(thread->saved_fpu_context);
     203                slab_free(fpu_context_cache, thread->saved_fpu_context);
     204#endif
    151205                return ENOMEM;
     206        }
    152207
    153208        thread->kstack = (uint8_t *) PA2KA(stack_phys);
     
    170225        frame_free(KA2PA(thread->kstack), STACK_FRAMES);
    171226
     227#ifdef CONFIG_FPU
     228        assert(thread->saved_fpu_context);
     229        slab_free(fpu_context_cache, thread->saved_fpu_context);
     230#endif
     231
    172232        return STACK_FRAMES;  /* number of frames freed */
    173233}
     
    183243
    184244        atomic_store(&nrdy, 0);
    185         thread_cache = slab_cache_create("thread_t", sizeof(thread_t), _Alignof(thread_t),
     245        thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0,
    186246            thr_constructor, thr_destructor, 0);
    187247
     248#ifdef CONFIG_FPU
     249        fpu_context_cache = slab_cache_create("fpu_context_t",
     250            sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0);
     251#endif
     252
    188253        odict_initialize(&threads, threads_getkey, threads_cmp);
    189254}
     
    196261void thread_wire(thread_t *thread, cpu_t *cpu)
    197262{
    198         ipl_t ipl = interrupts_disable();
    199         atomic_set_unordered(&thread->cpu, cpu);
    200         thread->nomigrate++;
    201         interrupts_restore(ipl);
    202 }
    203 
    204 /** Start a thread that wasn't started yet since it was created.
    205  *
    206  * @param thread A reference to the newly created thread.
    207  */
    208 void thread_start(thread_t *thread)
    209 {
    210         assert(atomic_get_unordered(&thread->state) == Entering);
    211         thread_requeue_sleeping(thread_ref(thread));
     263        irq_spinlock_lock(&thread->lock, true);
     264        thread->cpu = cpu;
     265        thread->wired = true;
     266        irq_spinlock_unlock(&thread->lock, true);
     267}
     268
     269/** Invoked right before thread_ready() readies the thread. thread is locked. */
     270static void before_thread_is_ready(thread_t *thread)
     271{
     272        assert(irq_spinlock_locked(&thread->lock));
     273}
     274
     275/** Make thread ready
     276 *
     277 * Switch thread to the ready state.
     278 *
     279 * @param thread Thread to make ready.
     280 *
     281 */
     282void thread_ready(thread_t *thread)
     283{
     284        irq_spinlock_lock(&thread->lock, true);
     285
     286        assert(thread->state != Ready);
     287
     288        before_thread_is_ready(thread);
     289
     290        int i = (thread->priority < RQ_COUNT - 1) ?
     291            ++thread->priority : thread->priority;
     292
     293        cpu_t *cpu;
     294        if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) {
     295                /* Cannot ready to another CPU */
     296                assert(thread->cpu != NULL);
     297                cpu = thread->cpu;
     298        } else if (thread->stolen) {
     299                /* Ready to the stealing CPU */
     300                cpu = CPU;
     301        } else if (thread->cpu) {
     302                /* Prefer the CPU on which the thread ran last */
     303                assert(thread->cpu != NULL);
     304                cpu = thread->cpu;
     305        } else {
     306                cpu = CPU;
     307        }
     308
     309        thread->state = Ready;
     310
     311        irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     312
     313        /*
     314         * Append thread to respective ready queue
     315         * on respective processor.
     316         */
     317
     318        list_append(&thread->rq_link, &cpu->rq[i].rq);
     319        cpu->rq[i].n++;
     320        irq_spinlock_unlock(&(cpu->rq[i].lock), true);
     321
     322        atomic_inc(&nrdy);
     323        atomic_inc(&cpu->nrdy);
    212324}
    213325
     
    234346                return NULL;
    235347
    236         refcount_init(&thread->refcount);
    237 
    238348        if (thread_create_arch(thread, flags) != EOK) {
    239349                slab_free(thread_cache, thread);
     
    248358        irq_spinlock_unlock(&tidlock, true);
    249359
    250         context_create(&thread->saved_context, thread_main_func,
    251             thread->kstack, STACK_SIZE);
     360        memset(&thread->saved_context, 0, sizeof(thread->saved_context));
     361        context_set(&thread->saved_context, FADDR(cushion),
     362            (uintptr_t) thread->kstack, STACK_SIZE);
    252363
    253364        current_initialize((current_t *) thread->kstack);
     365
     366        ipl_t ipl = interrupts_disable();
     367        thread->saved_context.ipl = interrupts_read();
     368        interrupts_restore(ipl);
    254369
    255370        str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
     
    257372        thread->thread_code = func;
    258373        thread->thread_arg = arg;
    259         thread->ucycles = ATOMIC_TIME_INITIALIZER();
    260         thread->kcycles = ATOMIC_TIME_INITIALIZER();
     374        thread->ticks = -1;
     375        thread->ucycles = 0;
     376        thread->kcycles = 0;
    261377        thread->uncounted =
    262378            ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED);
    263         atomic_init(&thread->priority, 0);
    264         atomic_init(&thread->cpu, NULL);
     379        thread->priority = -1;          /* Start in rq[0] */
     380        thread->cpu = NULL;
     381        thread->wired = false;
    265382        thread->stolen = false;
    266383        thread->uspace =
     
    268385
    269386        thread->nomigrate = 0;
    270         atomic_init(&thread->state, Entering);
    271 
    272         atomic_init(&thread->sleep_queue, NULL);
     387        thread->state = Entering;
     388
     389        timeout_initialize(&thread->sleep_timeout);
     390        thread->sleep_interruptible = false;
     391        thread->sleep_composable = false;
     392        thread->sleep_queue = NULL;
     393        thread->timeout_pending = false;
    273394
    274395        thread->in_copy_from_uspace = false;
     
    276397
    277398        thread->interrupted = false;
    278         atomic_init(&thread->sleep_state, SLEEP_INITIAL);
    279 
     399        thread->detached = false;
    280400        waitq_initialize(&thread->join_wq);
    281401
     
    283403
    284404        thread->fpu_context_exists = false;
     405        thread->fpu_context_engaged = false;
    285406
    286407        odlink_initialize(&thread->lthreads);
     
    288409#ifdef CONFIG_UDEBUG
    289410        /* Initialize debugging stuff */
    290         atomic_init(&thread->btrace, false);
     411        thread->btrace = false;
    291412        udebug_thread_initialize(&thread->udebug);
    292413#endif
     
    302423 * Detach thread from all queues, cpus etc. and destroy it.
    303424 *
    304  * @param obj  Thread to be destroyed.
    305  *
    306  */
    307 static void thread_destroy(void *obj)
    308 {
    309         thread_t *thread = (thread_t *) obj;
    310 
    311         assert_link_not_used(&thread->rq_link);
    312         assert_link_not_used(&thread->wq_link);
    313 
     425 * @param thread  Thread to be destroyed.
     426 * @param irq_res Indicate whether it should unlock thread->lock
     427 *                in interrupts-restore mode.
     428 *
     429 */
     430void thread_destroy(thread_t *thread, bool irq_res)
     431{
     432        assert(irq_spinlock_locked(&thread->lock));
     433        assert((thread->state == Exiting) || (thread->state == Lingering));
    314434        assert(thread->task);
    315 
    316         ipl_t ipl = interrupts_disable();
    317 
    318         /* Remove thread from global list. */
    319         irq_spinlock_lock(&threads_lock, false);
     435        assert(thread->cpu);
     436
     437        irq_spinlock_lock(&thread->cpu->lock, false);
     438        if (thread->cpu->fpu_owner == thread)
     439                thread->cpu->fpu_owner = NULL;
     440        irq_spinlock_unlock(&thread->cpu->lock, false);
     441
     442        irq_spinlock_pass(&thread->lock, &threads_lock);
     443
    320444        odict_remove(&thread->lthreads);
    321         irq_spinlock_unlock(&threads_lock, false);
    322 
    323         /* Remove thread from task's list and accumulate accounting. */
    324         irq_spinlock_lock(&thread->task->lock, false);
    325 
     445
     446        irq_spinlock_pass(&threads_lock, &thread->task->lock);
     447
     448        /*
     449         * Detach from the containing task.
     450         */
    326451        list_remove(&thread->th_link);
    327 
    328         /*
    329          * No other CPU has access to this thread anymore, so we don't need
    330          * thread->lock for accessing thread's fields after this point.
    331          */
    332 
    333         if (!thread->uncounted) {
    334                 thread->task->ucycles += atomic_time_read(&thread->ucycles);
    335                 thread->task->kcycles += atomic_time_read(&thread->kcycles);
    336         }
    337 
    338         irq_spinlock_unlock(&thread->task->lock, false);
    339 
    340         assert((atomic_get_unordered(&thread->state) == Entering) ||
    341             (atomic_get_unordered(&thread->state) == Exiting) ||
    342             (atomic_get_unordered(&thread->state) == Lingering));
    343 
    344         /* Clear cpu->fpu_owner if set to this thread. */
    345 #ifdef CONFIG_FPU_LAZY
    346         cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    347         if (cpu) {
    348                 /*
    349                  * We need to lock for this because the old CPU can concurrently try
    350                  * to dump this thread's FPU state, in which case we need to wait for
    351                  * it to finish. An atomic compare-and-swap wouldn't be enough.
    352                  */
    353                 irq_spinlock_lock(&cpu->fpu_lock, false);
    354 
    355                 if (atomic_get_unordered(&cpu->fpu_owner) == thread)
    356                         atomic_set_unordered(&cpu->fpu_owner, NULL);
    357 
    358                 irq_spinlock_unlock(&cpu->fpu_lock, false);
    359         }
    360 #endif
    361 
    362         interrupts_restore(ipl);
     452        irq_spinlock_unlock(&thread->task->lock, irq_res);
    363453
    364454        /*
     
    366456         */
    367457        task_release(thread->task);
    368         thread->task = NULL;
    369 
    370458        slab_free(thread_cache, thread);
    371 }
    372 
    373 void thread_put(thread_t *thread)
    374 {
    375         if (refcount_down(&thread->refcount)) {
    376                 thread_destroy(thread);
    377         }
    378459}
    379460
     
    389470void thread_attach(thread_t *thread, task_t *task)
    390471{
    391         ipl_t ipl = interrupts_disable();
    392 
    393472        /*
    394473         * Attach to the specified task.
    395474         */
    396         irq_spinlock_lock(&task->lock, false);
     475        irq_spinlock_lock(&task->lock, true);
    397476
    398477        /* Hold a reference to the task. */
     
    405484        list_append(&thread->th_link, &task->threads);
    406485
    407         irq_spinlock_unlock(&task->lock, false);
     486        irq_spinlock_pass(&task->lock, &threads_lock);
    408487
    409488        /*
    410489         * Register this thread in the system-wide dictionary.
    411490         */
    412         irq_spinlock_lock(&threads_lock, false);
    413491        odict_insert(&thread->lthreads, &threads, NULL);
    414         irq_spinlock_unlock(&threads_lock, false);
    415 
    416         interrupts_restore(ipl);
     492        irq_spinlock_unlock(&threads_lock, true);
    417493}
    418494
     
    451527        }
    452528
    453         scheduler_enter(Exiting);
    454         unreachable();
     529restart:
     530        irq_spinlock_lock(&THREAD->lock, true);
     531        if (THREAD->timeout_pending) {
     532                /* Busy waiting for timeouts in progress */
     533                irq_spinlock_unlock(&THREAD->lock, true);
     534                goto restart;
     535        }
     536
     537        THREAD->state = Exiting;
     538        irq_spinlock_unlock(&THREAD->lock, true);
     539
     540        scheduler();
     541
     542        /* Not reached */
     543        while (true)
     544                ;
    455545}
    456546
     
    461551 * blocking call was interruptable. See waitq_sleep_timeout().
    462552 *
     553 * The caller must guarantee the thread object is valid during the entire
     554 * function, eg by holding the threads_lock lock.
     555 *
    463556 * Interrupted threads automatically exit when returning back to user space.
    464557 *
    465  * @param thread A valid thread object.
     558 * @param thread A valid thread object. The caller must guarantee it
     559 *               will remain valid until thread_interrupt() exits.
    466560 */
    467561void thread_interrupt(thread_t *thread)
    468562{
    469563        assert(thread != NULL);
     564
     565        irq_spinlock_lock(&thread->lock, true);
     566
    470567        thread->interrupted = true;
    471         thread_wakeup(thread);
    472 }
    473 
    474 /** Prepare for putting the thread to sleep.
    475  *
    476  * @returns whether the thread is currently terminating. If THREAD_OK
    477  * is returned, the thread is guaranteed to be woken up instantly if the thread
    478  * is terminated at any time between this function's return and
    479  * thread_wait_finish(). If THREAD_TERMINATING is returned, the thread can still
    480  * go to sleep, but doing so will delay termination.
    481  */
    482 thread_termination_state_t thread_wait_start(void)
    483 {
    484         assert(THREAD != NULL);
    485 
    486         /*
    487          * This is an exchange rather than a store so that we can use the acquire
    488          * semantics, which is needed to ensure that code after this operation sees
    489          * memory ops made before thread_wakeup() in other thread, if that wakeup
    490          * was reset by this operation.
    491          *
    492          * In particular, we need this to ensure we can't miss the thread being
    493          * terminated concurrently with a synchronization primitive preparing to
    494          * sleep.
    495          */
    496         (void) atomic_exchange_explicit(&THREAD->sleep_state, SLEEP_INITIAL,
    497             memory_order_acquire);
    498 
    499         return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK;
    500 }
    501 
    502 static void thread_wait_timeout_callback(void *arg)
    503 {
    504         thread_wakeup(arg);
    505 }
    506 
    507 /**
    508  * Suspends this thread's execution until thread_wakeup() is called on it,
    509  * or deadline is reached.
    510  *
    511  * The way this would normally be used is that the current thread call
    512  * thread_wait_start(), and if interruption has not been signaled, stores
    513  * a reference to itself in a synchronized structure (such as waitq).
    514  * After that, it releases any spinlocks it might hold and calls this function.
    515  *
    516  * The thread doing the wakeup will acquire the thread's reference from said
    517  * synchronized structure and calls thread_wakeup() on it.
    518  *
    519  * Notably, there can be more than one thread performing wakeup.
    520  * The number of performed calls to thread_wakeup(), or their relative
    521  * ordering with thread_wait_finish(), does not matter. However, calls to
    522  * thread_wakeup() are expected to be synchronized with thread_wait_start()
    523  * with which they are associated, otherwise wakeups may be missed.
    524  * However, the operation of thread_wakeup() is defined at any time,
    525  * synchronization notwithstanding (in the sense of C un/defined behavior),
    526  * and is in fact used to interrupt waiting threads by external events.
    527  * The waiting thread must operate correctly in face of spurious wakeups,
    528  * and clean up its reference in the synchronization structure if necessary.
    529  *
    530  * Returns THREAD_WAIT_TIMEOUT if timeout fired, which is a necessary condition
    531  * for it to have been waken up by the timeout, but the caller must assume
    532  * that proper wakeups, timeouts and interrupts may occur concurrently, so
    533  * the fact timeout has been registered does not necessarily mean the thread
    534  * has not been woken up or interrupted.
    535  */
    536 thread_wait_result_t thread_wait_finish(deadline_t deadline)
    537 {
    538         assert(THREAD != NULL);
    539 
    540         timeout_t timeout;
    541 
    542         /* Extra check to avoid going to scheduler if we don't need to. */
    543         if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=
    544             SLEEP_INITIAL)
    545                 return THREAD_WAIT_SUCCESS;
    546 
    547         if (deadline != DEADLINE_NEVER) {
    548                 timeout_initialize(&timeout);
    549                 timeout_register_deadline(&timeout, deadline,
    550                     thread_wait_timeout_callback, THREAD);
    551         }
    552 
    553         scheduler_enter(Sleeping);
    554 
    555         if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) {
    556                 return THREAD_WAIT_TIMEOUT;
    557         } else {
    558                 return THREAD_WAIT_SUCCESS;
    559         }
    560 }
    561 
    562 void thread_wakeup(thread_t *thread)
     568        bool sleeping = (thread->state == Sleeping);
     569
     570        irq_spinlock_unlock(&thread->lock, true);
     571
     572        if (sleeping)
     573                waitq_interrupt_sleep(thread);
     574}
     575
     576/** Returns true if the thread was interrupted.
     577 *
     578 * @param thread A valid thread object. User must guarantee it will
     579 *               be alive during the entire call.
     580 * @return true if the thread was already interrupted via thread_interrupt().
     581 */
     582bool thread_interrupted(thread_t *thread)
    563583{
    564584        assert(thread != NULL);
    565585
    566         int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE,
    567             memory_order_acq_rel);
    568 
    569         if (state == SLEEP_ASLEEP) {
    570                 /*
    571                  * Only one thread gets to do this.
    572                  * The reference consumed here is the reference implicitly passed to
    573                  * the waking thread by the sleeper in thread_wait_finish().
    574                  */
    575                 thread_requeue_sleeping(thread);
    576         }
     586        bool interrupted;
     587
     588        irq_spinlock_lock(&thread->lock, true);
     589        interrupted = thread->interrupted;
     590        irq_spinlock_unlock(&thread->lock, true);
     591
     592        return interrupted;
    577593}
    578594
     
    580596void thread_migration_disable(void)
    581597{
    582         ipl_t ipl = interrupts_disable();
    583 
    584598        assert(THREAD);
     599
    585600        THREAD->nomigrate++;
    586 
    587         interrupts_restore(ipl);
    588601}
    589602
     
    591604void thread_migration_enable(void)
    592605{
    593         ipl_t ipl = interrupts_disable();
    594 
    595606        assert(THREAD);
    596607        assert(THREAD->nomigrate > 0);
     
    598609        if (THREAD->nomigrate > 0)
    599610                THREAD->nomigrate--;
    600 
    601         interrupts_restore(ipl);
    602611}
    603612
     
    623632}
    624633
    625 errno_t thread_join(thread_t *thread)
    626 {
    627         return thread_join_timeout(thread, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE);
    628 }
    629 
    630634/** Wait for another thread to exit.
    631  * After successful wait, the thread reference is destroyed.
    632635 *
    633636 * @param thread Thread to join on exit.
     
    640643errno_t thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    641644{
    642         assert(thread != NULL);
    643 
    644645        if (thread == THREAD)
    645646                return EINVAL;
    646647
    647         errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags);
    648 
    649         if (rc == EOK)
    650                 thread_put(thread);
    651 
    652         return rc;
    653 }
    654 
     648        /*
     649         * Since thread join can only be called once on an undetached thread,
     650         * the thread pointer is guaranteed to be still valid.
     651         */
     652
     653        irq_spinlock_lock(&thread->lock, true);
     654        assert(!thread->detached);
     655        irq_spinlock_unlock(&thread->lock, true);
     656
     657        return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL);
     658
     659        // FIXME: join should deallocate the thread.
     660        //        Current code calls detach after join, that's contrary to how
     661        //        join is used in other threading APIs.
     662}
     663
     664/** Detach thread.
     665 *
     666 * Mark the thread as detached. If the thread is already
     667 * in the Lingering state, deallocate its resources.
     668 *
     669 * @param thread Thread to be detached.
     670 *
     671 */
    655672void thread_detach(thread_t *thread)
    656673{
    657         thread_put(thread);
     674        /*
     675         * Since the thread is expected not to be already detached,
     676         * pointer to it must be still valid.
     677         */
     678        irq_spinlock_lock(&thread->lock, true);
     679        assert(!thread->detached);
     680
     681        if (thread->state == Lingering) {
     682                /*
     683                 * Unlock &thread->lock and restore
     684                 * interrupts in thread_destroy().
     685                 */
     686                thread_destroy(thread, true);
     687                return;
     688        } else {
     689                thread->detached = true;
     690        }
     691
     692        irq_spinlock_unlock(&thread->lock, true);
    658693}
    659694
     
    671706        waitq_initialize(&wq);
    672707
    673         (void) waitq_sleep_timeout(&wq, usec);
    674 }
    675 
    676 /** Allow other threads to run. */
    677 void thread_yield(void)
    678 {
    679         assert(THREAD != NULL);
    680         scheduler_enter(Running);
     708        (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL);
    681709}
    682710
     
    685713        uint64_t ucycles, kcycles;
    686714        char usuffix, ksuffix;
    687         order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix);
    688         order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix);
    689 
    690         state_t state = atomic_get_unordered(&thread->state);
     715        order_suffix(thread->ucycles, &ucycles, &usuffix);
     716        order_suffix(thread->kcycles, &kcycles, &ksuffix);
    691717
    692718        char *name;
     
    696722                name = thread->name;
    697723
     724#ifdef __32_BITS__
    698725        if (additional)
    699                 printf("%-8" PRIu64 " %p %p %9" PRIu64 "%c %9" PRIu64 "%c ",
     726                printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
    700727                    thread->tid, thread->thread_code, thread->kstack,
    701728                    ucycles, usuffix, kcycles, ksuffix);
    702729        else
    703                 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n",
    704                     thread->tid, name, thread, thread_states[state],
     730                printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
     731                    thread->tid, name, thread, thread_states[thread->state],
    705732                    thread->task, thread->task->container);
     733#endif
     734
     735#ifdef __64_BITS__
     736        if (additional)
     737                printf("%-8" PRIu64 " %18p %18p\n"
     738                    "         %9" PRIu64 "%c %9" PRIu64 "%c ",
     739                    thread->tid, thread->thread_code, thread->kstack,
     740                    ucycles, usuffix, kcycles, ksuffix);
     741        else
     742                printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
     743                    thread->tid, name, thread, thread_states[thread->state],
     744                    thread->task, thread->task->container);
     745#endif
    706746
    707747        if (additional) {
    708                 cpu_t *cpu = atomic_get_unordered(&thread->cpu);
    709                 if (cpu)
    710                         printf("%-5u", cpu->id);
     748                if (thread->cpu)
     749                        printf("%-5u", thread->cpu->id);
    711750                else
    712751                        printf("none ");
    713752
    714                 if (state == Sleeping) {
    715                         printf(" %p", thread->sleep_queue);
     753                if (thread->state == Sleeping) {
     754#ifdef __32_BITS__
     755                        printf(" %10p", thread->sleep_queue);
     756#endif
     757
     758#ifdef __64_BITS__
     759                        printf(" %18p", thread->sleep_queue);
     760#endif
    716761                }
    717762
     
    729774        thread_t *thread;
    730775
    731         /* Accessing system-wide threads list through thread_first()/thread_next(). */
     776        /* Messing with thread structures, avoid deadlock */
    732777        irq_spinlock_lock(&threads_lock, true);
    733778
    734         if (sizeof(void *) <= 4) {
    735                 if (additional)
    736                         printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
    737                             " [cpu] [waitqueue]\n");
    738                 else
    739                         printf("[id    ] [name        ] [address ] [state ] [task    ]"
    740                             " [ctn]\n");
    741         } else {
    742                 if (additional) {
    743                         printf("[id    ] [code            ] [stack           ] [ucycles ] [kcycles ]"
    744                             " [cpu] [waitqueue       ]\n");
    745                 } else
    746                         printf("[id    ] [name        ] [address         ] [state ]"
    747                             " [task            ] [ctn]\n");
    748         }
     779#ifdef __32_BITS__
     780        if (additional)
     781                printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
     782                    " [cpu] [waitqueue]\n");
     783        else
     784                printf("[id    ] [name        ] [address ] [state ] [task    ]"
     785                    " [ctn]\n");
     786#endif
     787
     788#ifdef __64_BITS__
     789        if (additional) {
     790                printf("[id    ] [code            ] [stack           ]\n"
     791                    "         [ucycles ] [kcycles ] [cpu] [waitqueue       ]\n");
     792        } else
     793                printf("[id    ] [name        ] [address         ] [state ]"
     794                    " [task            ] [ctn]\n");
     795#endif
    749796
    750797        thread = thread_first();
     
    757804}
    758805
    759 static bool thread_exists(thread_t *thread)
    760 {
     806/** Check whether thread exists.
     807 *
     808 * Note that threads_lock must be already held and
     809 * interrupts must be already disabled.
     810 *
     811 * @param thread Pointer to thread.
     812 *
     813 * @return True if thread t is known to the system, false otherwise.
     814 *
     815 */
     816bool thread_exists(thread_t *thread)
     817{
     818        assert(interrupts_disabled());
     819        assert(irq_spinlock_locked(&threads_lock));
     820
    761821        odlink_t *odlink = odict_find_eq(&threads, thread, NULL);
    762822        return odlink != NULL;
    763823}
    764824
    765 /** Check whether the thread exists, and if so, return a reference to it.
    766  */
    767 thread_t *thread_try_get(thread_t *thread)
    768 {
    769         irq_spinlock_lock(&threads_lock, true);
    770 
    771         if (thread_exists(thread)) {
    772                 /* Try to strengthen the reference. */
    773                 thread = thread_try_ref(thread);
    774         } else {
    775                 thread = NULL;
    776         }
    777 
    778         irq_spinlock_unlock(&threads_lock, true);
    779 
    780         return thread;
    781 }
    782 
    783825/** Update accounting of current thread.
    784826 *
     
    791833void thread_update_accounting(bool user)
    792834{
     835        uint64_t time = get_cycle();
     836
    793837        assert(interrupts_disabled());
    794 
    795         uint64_t time = get_cycle();
     838        assert(irq_spinlock_locked(&THREAD->lock));
    796839
    797840        if (user)
    798                 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);
     841                THREAD->ucycles += time - THREAD->last_cycle;
    799842        else
    800                 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);
     843                THREAD->kcycles += time - THREAD->last_cycle;
    801844
    802845        THREAD->last_cycle = time;
     
    807850 * The threads_lock must be already held by the caller of this function and
    808851 * interrupts must be disabled.
    809  *
    810  * The returned reference is weak.
    811  * If the caller needs to keep it, thread_try_ref() must be used to upgrade
    812  * to a strong reference _before_ threads_lock is released.
    813852 *
    814853 * @param id Thread ID.
     
    889928{
    890929        irq_spinlock_lock(&threads_lock, true);
    891         thread_t *thread = thread_try_ref(thread_find_by_id(thread_id));
    892         irq_spinlock_unlock(&threads_lock, true);
    893 
     930
     931        thread_t *thread = thread_find_by_id(thread_id);
    894932        if (thread == NULL) {
    895933                printf("No such thread.\n");
     934                irq_spinlock_unlock(&threads_lock, true);
    896935                return;
    897936        }
     937
     938        irq_spinlock_lock(&thread->lock, false);
    898939
    899940        /*
     
    909950         */
    910951
    911         printf("Scheduling thread stack trace.\n");
    912         atomic_set_unordered(&thread->btrace, true);
    913 
    914         thread_wakeup(thread);
    915         thread_put(thread);
     952        bool sleeping = false;
     953        istate_t *istate = thread->udebug.uspace_state;
     954        if (istate != NULL) {
     955                printf("Scheduling thread stack trace.\n");
     956                thread->btrace = true;
     957                if (thread->state == Sleeping)
     958                        sleeping = true;
     959        } else
     960                printf("Thread interrupt state not available.\n");
     961
     962        irq_spinlock_unlock(&thread->lock, false);
     963
     964        if (sleeping)
     965                waitq_interrupt_sleep(thread);
     966
     967        irq_spinlock_unlock(&threads_lock, true);
    916968}
    917969
     
    946998
    947999/** Process syscall to create new thread.
    948  * The started thread will have initial pc and sp set to the exact values passed
    949  * to the syscall. The kernel will not touch any stack data below the stack
    950  * pointer, but some architectures may require some space to be available
    951  * for use above it. See userspace() in kernel, and <libarch/thread.h> in libc.
    952  *
    953  */
    954 sys_errno_t sys_thread_create(sysarg_t pc, sysarg_t sp,
    955     uspace_ptr_char uspace_name, size_t name_len)
     1000 *
     1001 */
     1002sys_errno_t sys_thread_create(uspace_ptr_uspace_arg_t uspace_uarg, uspace_ptr_char uspace_name,
     1003    size_t name_len, uspace_ptr_thread_id_t uspace_thread_id)
    9561004{
    9571005        if (name_len > THREAD_NAME_BUFLEN - 1)
     
    9691017         * In case of success, kernel_uarg will be freed in uinit().
    9701018         */
    971         uinit_arg_t *kernel_uarg = malloc(sizeof(uinit_arg_t));
     1019        uspace_arg_t *kernel_uarg =
     1020            (uspace_arg_t *) malloc(sizeof(uspace_arg_t));
    9721021        if (!kernel_uarg)
    9731022                return (sys_errno_t) ENOMEM;
    9741023
    975         kernel_uarg->pc = pc;
    976         kernel_uarg->sp = sp;
    977 
    978         // TODO: fix some unnecessary inconsistencies between architectures
     1024        rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
     1025        if (rc != EOK) {
     1026                free(kernel_uarg);
     1027                return (sys_errno_t) rc;
     1028        }
    9791029
    9801030        thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
    9811031            THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf);
    982         if (!thread) {
     1032        if (thread) {
     1033                if (uspace_thread_id) {
     1034                        rc = copy_to_uspace(uspace_thread_id, &thread->tid,
     1035                            sizeof(thread->tid));
     1036                        if (rc != EOK) {
     1037                                /*
     1038                                 * We have encountered a failure, but the thread
     1039                                 * has already been created. We need to undo its
     1040                                 * creation now.
     1041                                 */
     1042
     1043                                /*
     1044                                 * The new thread structure is initialized, but
     1045                                 * is still not visible to the system.
     1046                                 * We can safely deallocate it.
     1047                                 */
     1048                                slab_free(thread_cache, thread);
     1049                                free(kernel_uarg);
     1050
     1051                                return (sys_errno_t) rc;
     1052                        }
     1053                }
     1054
     1055#ifdef CONFIG_UDEBUG
     1056                /*
     1057                 * Generate udebug THREAD_B event and attach the thread.
     1058                 * This must be done atomically (with the debug locks held),
     1059                 * otherwise we would either miss some thread or receive
     1060                 * THREAD_B events for threads that already existed
     1061                 * and could be detected with THREAD_READ before.
     1062                 */
     1063                udebug_thread_b_event_attach(thread, TASK);
     1064#else
     1065                thread_attach(thread, TASK);
     1066#endif
     1067                thread_ready(thread);
     1068
     1069                return 0;
     1070        } else
    9831071                free(kernel_uarg);
    984                 return (sys_errno_t) ENOMEM;
    985         }
    986 
    987 #ifdef CONFIG_UDEBUG
    988         /*
    989          * Generate udebug THREAD_B event and attach the thread.
    990          * This must be done atomically (with the debug locks held),
    991          * otherwise we would either miss some thread or receive
    992          * THREAD_B events for threads that already existed
    993          * and could be detected with THREAD_READ before.
    994          */
    995         udebug_thread_b_event_attach(thread, TASK);
    996 #else
    997         thread_attach(thread, TASK);
    998 #endif
    999         thread_start(thread);
    1000         thread_put(thread);
    1001 
    1002         return (sys_errno_t) EOK;
     1072
     1073        return (sys_errno_t) ENOMEM;
    10031074}
    10041075
Note: See TracChangeset for help on using the changeset viewer.