Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    rae0300b5 rb60c582  
    11/*
    2  * Copyright (c) 2010 Jakub Jermar
     2 * Copyright (c) 2001-2004 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    3333/**
    3434 * @file
    35  * @brief Thread management functions.
     35 * @brief       Thread management functions.
    3636 */
    3737
     
    4848#include <synch/spinlock.h>
    4949#include <synch/waitq.h>
     50#include <synch/rwlock.h>
    5051#include <cpu.h>
    51 #include <str.h>
     52#include <func.h>
    5253#include <context.h>
    5354#include <adt/avl.h>
     
    7576
    7677/** Thread states */
    77 const char *thread_states[] = {
     78char *thread_states[] = {
    7879        "Invalid",
    7980        "Running",
     
    8384        "Exiting",
    8485        "Lingering"
    85 };
    86 
    87 typedef struct {
    88         thread_id_t thread_id;
    89         thread_t *thread;
    90 } thread_iterator_t;
     86};
    9187
    9288/** Lock protecting the threads_tree AVL tree.
    9389 *
    9490 * For locking rules, see declaration thereof.
    95  *
    96  */
    97 IRQ_SPINLOCK_INITIALIZE(threads_lock);
     91 */
     92SPINLOCK_INITIALIZE(threads_lock);
    9893
    9994/** AVL tree of all threads.
     
    10196 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
    10297 * exist as long as the threads_lock is held.
    103  *
    104  */
    105 avltree_t threads_tree;
    106 
    107 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
    108 static thread_id_t last_tid = 0;
     98 */
     99avltree_t threads_tree;         
     100
     101SPINLOCK_INITIALIZE(tidlock);
     102thread_id_t last_tid = 0;
    109103
    110104static slab_cache_t *thread_slab;
    111 
    112105#ifdef CONFIG_FPU
    113106slab_cache_t *fpu_context_slab;
     
    127120        void *arg = THREAD->thread_arg;
    128121        THREAD->last_cycle = get_cycle();
    129        
     122
    130123        /* This is where each thread wakes up after its creation */
    131         irq_spinlock_unlock(&THREAD->lock, false);
     124        spinlock_unlock(&THREAD->lock);
    132125        interrupts_enable();
    133        
     126
    134127        f(arg);
    135128       
    136129        /* Accumulate accounting to the task */
    137         irq_spinlock_lock(&THREAD->lock, true);
     130        ipl_t ipl = interrupts_disable();
     131       
     132        spinlock_lock(&THREAD->lock);
    138133        if (!THREAD->uncounted) {
    139                 thread_update_accounting(true);
    140                 uint64_t ucycles = THREAD->ucycles;
    141                 THREAD->ucycles = 0;
    142                 uint64_t kcycles = THREAD->kcycles;
    143                 THREAD->kcycles = 0;
     134                thread_update_accounting();
     135                uint64_t cycles = THREAD->cycles;
     136                THREAD->cycles = 0;
     137                spinlock_unlock(&THREAD->lock);
    144138               
    145                 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
    146                 TASK->ucycles += ucycles;
    147                 TASK->kcycles += kcycles;
    148                 irq_spinlock_unlock(&TASK->lock, true);
     139                spinlock_lock(&TASK->lock);
     140                TASK->cycles += cycles;
     141                spinlock_unlock(&TASK->lock);
    149142        } else
    150                 irq_spinlock_unlock(&THREAD->lock, true);
     143                spinlock_unlock(&THREAD->lock);
     144       
     145        interrupts_restore(ipl);
    151146       
    152147        thread_exit();
    153        
    154         /* Not reached */
    155 }
    156 
    157 /** Initialization and allocation for thread_t structure
    158  *
    159  */
    160 static int thr_constructor(void *obj, unsigned int kmflags)
    161 {
    162         thread_t *thread = (thread_t *) obj;
    163        
    164         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    165         link_initialize(&thread->rq_link);
    166         link_initialize(&thread->wq_link);
    167         link_initialize(&thread->th_link);
    168        
     148        /* not reached */
     149}
     150
     151/** Initialization and allocation for thread_t structure */
     152static int thr_constructor(void *obj, int kmflags)
     153{
     154        thread_t *t = (thread_t *) obj;
     155
     156        spinlock_initialize(&t->lock, "thread_t_lock");
     157        link_initialize(&t->rq_link);
     158        link_initialize(&t->wq_link);
     159        link_initialize(&t->th_link);
     160
    169161        /* call the architecture-specific part of the constructor */
    170         thr_constructor_arch(thread);
     162        thr_constructor_arch(t);
    171163       
    172164#ifdef CONFIG_FPU
    173165#ifdef CONFIG_FPU_LAZY
    174         thread->saved_fpu_context = NULL;
    175 #else /* CONFIG_FPU_LAZY */
    176         thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
    177         if (!thread->saved_fpu_context)
     166        t->saved_fpu_context = NULL;
     167#else
     168        t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
     169        if (!t->saved_fpu_context)
    178170                return -1;
    179 #endif /* CONFIG_FPU_LAZY */
    180 #endif /* CONFIG_FPU */
    181        
    182         thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
    183         if (!thread->kstack) {
     171#endif
     172#endif
     173
     174        t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
     175        if (!t->kstack) {
    184176#ifdef CONFIG_FPU
    185                 if (thread->saved_fpu_context)
    186                         slab_free(fpu_context_slab, thread->saved_fpu_context);
     177                if (t->saved_fpu_context)
     178                        slab_free(fpu_context_slab, t->saved_fpu_context);
    187179#endif
    188180                return -1;
    189181        }
    190        
     182
    191183#ifdef CONFIG_UDEBUG
    192         mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
    193 #endif
    194        
     184        mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
     185#endif
     186
    195187        return 0;
    196188}
    197189
    198190/** Destruction of thread_t object */
    199 static size_t thr_destructor(void *obj)
    200 {
    201         thread_t *thread = (thread_t *) obj;
    202        
     191static int thr_destructor(void *obj)
     192{
     193        thread_t *t = (thread_t *) obj;
     194
    203195        /* call the architecture-specific part of the destructor */
    204         thr_destructor_arch(thread);
    205        
    206         frame_free(KA2PA(thread->kstack));
    207        
     196        thr_destructor_arch(t);
     197
     198        frame_free(KA2PA(t->kstack));
    208199#ifdef CONFIG_FPU
    209         if (thread->saved_fpu_context)
    210                 slab_free(fpu_context_slab, thread->saved_fpu_context);
    211 #endif
    212        
    213         return 1;  /* One page freed */
     200        if (t->saved_fpu_context)
     201                slab_free(fpu_context_slab, t->saved_fpu_context);
     202#endif
     203        return 1; /* One page freed */
    214204}
    215205
     
    222212{
    223213        THREAD = NULL;
    224        
    225214        atomic_set(&nrdy, 0);
    226215        thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
    227216            thr_constructor, thr_destructor, 0);
    228        
     217
    229218#ifdef CONFIG_FPU
    230219        fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
    231220            FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    232221#endif
    233        
     222
    234223        avltree_create(&threads_tree);
    235224}
     
    237226/** Make thread ready
    238227 *
    239  * Switch thread to the ready state.
    240  *
    241  * @param thread Thread to make ready.
    242  *
    243  */
    244 void thread_ready(thread_t *thread)
    245 {
    246         irq_spinlock_lock(&thread->lock, true);
    247        
    248         ASSERT(thread->state != Ready);
    249        
    250         int i = (thread->priority < RQ_COUNT - 1)
    251             ? ++thread->priority : thread->priority;
    252        
    253         cpu_t *cpu = CPU;
    254         if (thread->flags & THREAD_FLAG_WIRED) {
    255                 ASSERT(thread->cpu != NULL);
    256                 cpu = thread->cpu;
     228 * Switch thread t to the ready state.
     229 *
     230 * @param t Thread to make ready.
     231 *
     232 */
     233void thread_ready(thread_t *t)
     234{
     235        cpu_t *cpu;
     236        runq_t *r;
     237        ipl_t ipl;
     238        int i, avg;
     239
     240        ipl = interrupts_disable();
     241
     242        spinlock_lock(&t->lock);
     243
     244        ASSERT(!(t->state == Ready));
     245
     246        i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
     247       
     248        cpu = CPU;
     249        if (t->flags & THREAD_FLAG_WIRED) {
     250                ASSERT(t->cpu != NULL);
     251                cpu = t->cpu;
    257252        }
    258         thread->state = Ready;
    259        
    260         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     253        t->state = Ready;
     254        spinlock_unlock(&t->lock);
    261255       
    262256        /*
    263          * Append thread to respective ready queue
    264          * on respective processor.
     257         * Append t to respective ready queue on respective processor.
    265258         */
    266        
    267         list_append(&thread->rq_link, &cpu->rq[i].rq_head);
    268         cpu->rq[i].n++;
    269         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    270        
     259        r = &cpu->rq[i];
     260        spinlock_lock(&r->lock);
     261        list_append(&t->rq_link, &r->rq_head);
     262        r->n++;
     263        spinlock_unlock(&r->lock);
     264
    271265        atomic_inc(&nrdy);
    272         // FIXME: Why is the avg value not used
    273         // avg = atomic_get(&nrdy) / config.cpu_active;
     266        avg = atomic_get(&nrdy) / config.cpu_active;
    274267        atomic_inc(&cpu->nrdy);
     268
     269        interrupts_restore(ipl);
    275270}
    276271
     
    279274 * Create a new thread.
    280275 *
    281  * @param func      Thread's implementing function.
    282  * @param arg       Thread's implementing function argument.
    283  * @param task      Task to which the thread belongs. The caller must
    284  *                  guarantee that the task won't cease to exist during the
    285  *                  call. The task's lock may not be held.
    286  * @param flags     Thread flags.
    287  * @param name      Symbolic name (a copy is made).
    288  * @param uncounted Thread's accounting doesn't affect accumulated task
    289  *                  accounting.
    290  *
    291  * @return New thread's structure on success, NULL on failure.
     276 * @param func          Thread's implementing function.
     277 * @param arg           Thread's implementing function argument.
     278 * @param task          Task to which the thread belongs. The caller must
     279 *                      guarantee that the task won't cease to exist during the
     280 *                      call. The task's lock may not be held.
     281 * @param flags         Thread flags.
     282 * @param name          Symbolic name (a copy is made).
     283 * @param uncounted     Thread's accounting doesn't affect accumulated task
     284 *                      accounting.
     285 *
     286 * @return              New thread's structure on success, NULL on failure.
    292287 *
    293288 */
    294289thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
    295     unsigned int flags, const char *name, bool uncounted)
    296 {
    297         thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
    298         if (!thread)
     290    int flags, char *name, bool uncounted)
     291{
     292        thread_t *t;
     293        ipl_t ipl;
     294       
     295        t = (thread_t *) slab_alloc(thread_slab, 0);
     296        if (!t)
    299297                return NULL;
    300298       
    301299        /* Not needed, but good for debugging */
    302         memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
    303        
    304         irq_spinlock_lock(&tidlock, true);
    305         thread->tid = ++last_tid;
    306         irq_spinlock_unlock(&tidlock, true);
    307        
    308         context_save(&thread->saved_context);
    309         context_set(&thread->saved_context, FADDR(cushion),
    310             (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
    311        
    312         the_initialize((the_t *) thread->kstack);
    313        
    314         ipl_t ipl = interrupts_disable();
    315         thread->saved_context.ipl = interrupts_read();
     300        memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
     301       
     302        ipl = interrupts_disable();
     303        spinlock_lock(&tidlock);
     304        t->tid = ++last_tid;
     305        spinlock_unlock(&tidlock);
    316306        interrupts_restore(ipl);
    317307       
    318         str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
    319        
    320         thread->thread_code = func;
    321         thread->thread_arg = arg;
    322         thread->ticks = -1;
    323         thread->ucycles = 0;
    324         thread->kcycles = 0;
    325         thread->uncounted = uncounted;
    326         thread->priority = -1;          /* Start in rq[0] */
    327         thread->cpu = NULL;
    328         thread->flags = flags;
    329         thread->state = Entering;
    330        
    331         timeout_initialize(&thread->sleep_timeout);
    332         thread->sleep_interruptible = false;
    333         thread->sleep_queue = NULL;
    334         thread->timeout_pending = false;
    335        
    336         thread->in_copy_from_uspace = false;
    337         thread->in_copy_to_uspace = false;
    338        
    339         thread->interrupted = false;
    340         thread->detached = false;
    341         waitq_initialize(&thread->join_wq);
    342        
    343         thread->task = task;
    344        
    345         thread->fpu_context_exists = 0;
    346         thread->fpu_context_engaged = 0;
    347        
    348         avltree_node_initialize(&thread->threads_tree_node);
    349         thread->threads_tree_node.key = (uintptr_t) thread;
    350        
     308        context_save(&t->saved_context);
     309        context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
     310            THREAD_STACK_SIZE);
     311       
     312        the_initialize((the_t *) t->kstack);
     313       
     314        ipl = interrupts_disable();
     315        t->saved_context.ipl = interrupts_read();
     316        interrupts_restore(ipl);
     317       
     318        memcpy(t->name, name, THREAD_NAME_BUFLEN);
     319        t->name[THREAD_NAME_BUFLEN - 1] = 0;
     320       
     321        t->thread_code = func;
     322        t->thread_arg = arg;
     323        t->ticks = -1;
     324        t->cycles = 0;
     325        t->uncounted = uncounted;
     326        t->priority = -1;               /* start in rq[0] */
     327        t->cpu = NULL;
     328        t->flags = flags;
     329        t->state = Entering;
     330        t->call_me = NULL;
     331        t->call_me_with = NULL;
     332       
     333        timeout_initialize(&t->sleep_timeout);
     334        t->sleep_interruptible = false;
     335        t->sleep_queue = NULL;
     336        t->timeout_pending = 0;
     337
     338        t->in_copy_from_uspace = false;
     339        t->in_copy_to_uspace = false;
     340
     341        t->interrupted = false;
     342        t->detached = false;
     343        waitq_initialize(&t->join_wq);
     344       
     345        t->rwlock_holder_type = RWLOCK_NONE;
     346               
     347        t->task = task;
     348       
     349        t->fpu_context_exists = 0;
     350        t->fpu_context_engaged = 0;
     351
     352        avltree_node_initialize(&t->threads_tree_node);
     353        t->threads_tree_node.key = (uintptr_t) t;
     354
    351355#ifdef CONFIG_UDEBUG
    352         /* Initialize debugging stuff */
    353         thread->btrace = false;
    354         udebug_thread_initialize(&thread->udebug);
    355 #endif
    356        
    357         /* Might depend on previous initialization */
    358         thread_create_arch(thread);
    359        
     356        /* Init debugging stuff */
     357        udebug_thread_initialize(&t->udebug);
     358#endif
     359
     360        /* might depend on previous initialization */
     361        thread_create_arch(t); 
     362
    360363        if (!(flags & THREAD_FLAG_NOATTACH))
    361                 thread_attach(thread, task);
    362        
    363         return thread;
     364                thread_attach(t, task);
     365
     366        return t;
    364367}
    365368
     
    368371 * Detach thread from all queues, cpus etc. and destroy it.
    369372 *
    370  * @param thread  Thread to be destroyed.
    371  * @param irq_res Indicate whether it should unlock thread->lock
    372  *                in interrupts-restore mode.
    373  *
    374  */
    375 void thread_destroy(thread_t *thread, bool irq_res)
    376 {
    377         ASSERT(irq_spinlock_locked(&thread->lock));
    378         ASSERT((thread->state == Exiting) || (thread->state == Lingering));
    379         ASSERT(thread->task);
    380         ASSERT(thread->cpu);
    381        
    382         irq_spinlock_lock(&thread->cpu->lock, false);
    383         if (thread->cpu->fpu_owner == thread)
    384                 thread->cpu->fpu_owner = NULL;
    385         irq_spinlock_unlock(&thread->cpu->lock, false);
    386        
    387         irq_spinlock_pass(&thread->lock, &threads_lock);
    388        
    389         avltree_delete(&threads_tree, &thread->threads_tree_node);
    390        
    391         irq_spinlock_pass(&threads_lock, &thread->task->lock);
    392        
     373 * Assume thread->lock is held!!
     374 */
     375void thread_destroy(thread_t *t)
     376{
     377        ASSERT(t->state == Exiting || t->state == Lingering);
     378        ASSERT(t->task);
     379        ASSERT(t->cpu);
     380
     381        spinlock_lock(&t->cpu->lock);
     382        if (t->cpu->fpu_owner == t)
     383                t->cpu->fpu_owner = NULL;
     384        spinlock_unlock(&t->cpu->lock);
     385
     386        spinlock_unlock(&t->lock);
     387
     388        spinlock_lock(&threads_lock);
     389        avltree_delete(&threads_tree, &t->threads_tree_node);
     390        spinlock_unlock(&threads_lock);
     391
    393392        /*
    394393         * Detach from the containing task.
    395394         */
    396         list_remove(&thread->th_link);
    397         irq_spinlock_unlock(&thread->task->lock, irq_res);
    398        
     395        spinlock_lock(&t->task->lock);
     396        list_remove(&t->th_link);
     397        spinlock_unlock(&t->task->lock);       
     398
    399399        /*
    400          * Drop the reference to the containing task.
     400         * t is guaranteed to be the very last thread of its task.
     401         * It is safe to destroy the task.
    401402         */
    402         task_release(thread->task);
    403         slab_free(thread_slab, thread);
     403        if (atomic_predec(&t->task->refcount) == 0)
     404                task_destroy(t->task);
     405       
     406        slab_free(thread_slab, t);
    404407}
    405408
     
    409412 * threads_tree.
    410413 *
    411  * @param t    Thread to be attached to the task.
    412  * @param task Task to which the thread is to be attached.
    413  *
    414  */
    415 void thread_attach(thread_t *thread, task_t *task)
    416 {
     414 * @param t     Thread to be attached to the task.
     415 * @param task  Task to which the thread is to be attached.
     416 */
     417void thread_attach(thread_t *t, task_t *task)
     418{
     419        ipl_t ipl;
     420
    417421        /*
    418422         * Attach to the specified task.
    419423         */
    420         irq_spinlock_lock(&task->lock, true);
    421        
    422         /* Hold a reference to the task. */
    423         task_hold(task);
    424        
     424        ipl = interrupts_disable();
     425        spinlock_lock(&task->lock);
     426
     427        atomic_inc(&task->refcount);
     428
    425429        /* Must not count kbox thread into lifecount */
    426         if (thread->flags & THREAD_FLAG_USPACE)
     430        if (t->flags & THREAD_FLAG_USPACE)
    427431                atomic_inc(&task->lifecount);
    428        
    429         list_append(&thread->th_link, &task->th_head);
    430        
    431         irq_spinlock_pass(&task->lock, &threads_lock);
    432        
     432
     433        list_append(&t->th_link, &task->th_head);
     434        spinlock_unlock(&task->lock);
     435
    433436        /*
    434437         * Register this thread in the system-wide list.
    435438         */
    436         avltree_insert(&threads_tree, &thread->threads_tree_node);
    437         irq_spinlock_unlock(&threads_lock, true);
     439        spinlock_lock(&threads_lock);
     440        avltree_insert(&threads_tree, &t->threads_tree_node);
     441        spinlock_unlock(&threads_lock);
     442       
     443        interrupts_restore(ipl);
    438444}
    439445
    440446/** Terminate thread.
    441447 *
    442  * End current thread execution and switch it to the exiting state.
    443  * All pending timeouts are executed.
    444  *
     448 * End current thread execution and switch it to the exiting state. All pending
     449 * timeouts are executed.
    445450 */
    446451void thread_exit(void)
    447452{
     453        ipl_t ipl;
     454
    448455        if (THREAD->flags & THREAD_FLAG_USPACE) {
    449456#ifdef CONFIG_UDEBUG
    450457                /* Generate udebug THREAD_E event */
    451458                udebug_thread_e_event();
    452 
    453                 /*
    454                  * This thread will not execute any code or system calls from
    455                  * now on.
    456                  */
    457                 udebug_stoppable_begin();
    458459#endif
    459460                if (atomic_predec(&TASK->lifecount) == 0) {
     
    464465                         * can only be created by threads of the same task.
    465466                         * We are safe to perform cleanup.
    466                          *
    467467                         */
    468468                        ipc_cleanup();
     
    471471                }
    472472        }
    473        
     473
    474474restart:
    475         irq_spinlock_lock(&THREAD->lock, true);
    476         if (THREAD->timeout_pending) {
    477                 /* Busy waiting for timeouts in progress */
    478                 irq_spinlock_unlock(&THREAD->lock, true);
     475        ipl = interrupts_disable();
     476        spinlock_lock(&THREAD->lock);
     477        if (THREAD->timeout_pending) {
     478                /* busy waiting for timeouts in progress */
     479                spinlock_unlock(&THREAD->lock);
     480                interrupts_restore(ipl);
    479481                goto restart;
    480482        }
    481483       
    482484        THREAD->state = Exiting;
    483         irq_spinlock_unlock(&THREAD->lock, true);
    484        
     485        spinlock_unlock(&THREAD->lock);
    485486        scheduler();
    486        
     487
    487488        /* Not reached */
    488         while (true);
    489 }
     489        while (1)
     490                ;
     491}
     492
    490493
    491494/** Thread sleep
     
    498501void thread_sleep(uint32_t sec)
    499502{
    500         /* Sleep in 1000 second steps to support
    501            full argument range */
    502         while (sec > 0) {
    503                 uint32_t period = (sec > 1000) ? 1000 : sec;
    504                
    505                 thread_usleep(period * 1000000);
    506                 sec -= period;
    507         }
     503        thread_usleep(sec * 1000000);
    508504}
    509505
    510506/** Wait for another thread to exit.
    511507 *
    512  * @param thread Thread to join on exit.
    513  * @param usec   Timeout in microseconds.
    514  * @param flags  Mode of operation.
     508 * @param t Thread to join on exit.
     509 * @param usec Timeout in microseconds.
     510 * @param flags Mode of operation.
    515511 *
    516512 * @return An error code from errno.h or an error code from synch.h.
    517  *
    518  */
    519 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    520 {
    521         if (thread == THREAD)
     513 */
     514int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
     515{
     516        ipl_t ipl;
     517        int rc;
     518
     519        if (t == THREAD)
    522520                return EINVAL;
    523        
     521
    524522        /*
    525523         * Since thread join can only be called once on an undetached thread,
     
    527525         */
    528526       
    529         irq_spinlock_lock(&thread->lock, true);
    530         ASSERT(!thread->detached);
    531         irq_spinlock_unlock(&thread->lock, true);
    532        
    533         return waitq_sleep_timeout(&thread->join_wq, usec, flags);
     527        ipl = interrupts_disable();
     528        spinlock_lock(&t->lock);
     529        ASSERT(!t->detached);
     530        spinlock_unlock(&t->lock);
     531        interrupts_restore(ipl);
     532       
     533        rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
     534       
     535        return rc;     
    534536}
    535537
    536538/** Detach thread.
    537539 *
    538  * Mark the thread as detached. If the thread is already
    539  * in the Lingering state, deallocate its resources.
    540  *
    541  * @param thread Thread to be detached.
    542  *
    543  */
    544 void thread_detach(thread_t *thread)
    545 {
     540 * Mark the thread as detached, if the thread is already in the Lingering
     541 * state, deallocate its resources.
     542 *
     543 * @param t Thread to be detached.
     544 */
     545void thread_detach(thread_t *t)
     546{
     547        ipl_t ipl;
     548
    546549        /*
    547550         * Since the thread is expected not to be already detached,
    548551         * pointer to it must be still valid.
    549552         */
    550         irq_spinlock_lock(&thread->lock, true);
    551         ASSERT(!thread->detached);
    552        
    553         if (thread->state == Lingering) {
    554                 /*
    555                  * Unlock &thread->lock and restore
    556                  * interrupts in thread_destroy().
    557                  */
    558                 thread_destroy(thread, true);
     553        ipl = interrupts_disable();
     554        spinlock_lock(&t->lock);
     555        ASSERT(!t->detached);
     556        if (t->state == Lingering) {
     557                thread_destroy(t);      /* unlocks &t->lock */
     558                interrupts_restore(ipl);
    559559                return;
    560560        } else {
    561                 thread->detached = true;
     561                t->detached = true;
    562562        }
    563        
    564         irq_spinlock_unlock(&thread->lock, true);
     563        spinlock_unlock(&t->lock);
     564        interrupts_restore(ipl);
    565565}
    566566
     
    575575{
    576576        waitq_t wq;
    577        
     577                                 
    578578        waitq_initialize(&wq);
    579        
     579
    580580        (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
    581581}
    582582
     583/** Register thread out-of-context invocation
     584 *
     585 * Register a function and its argument to be executed
     586 * on next context switch to the current thread.
     587 *
     588 * @param call_me      Out-of-context function.
     589 * @param call_me_with Out-of-context function argument.
     590 *
     591 */
     592void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
     593{
     594        ipl_t ipl;
     595       
     596        ipl = interrupts_disable();
     597        spinlock_lock(&THREAD->lock);
     598        THREAD->call_me = call_me;
     599        THREAD->call_me_with = call_me_with;
     600        spinlock_unlock(&THREAD->lock);
     601        interrupts_restore(ipl);
     602}
     603
    583604static bool thread_walker(avltree_node_t *node, void *arg)
    584605{
    585         bool *additional = (bool *) arg;
    586         thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
    587        
    588         uint64_t ucycles, kcycles;
    589         char usuffix, ksuffix;
    590         order_suffix(thread->ucycles, &ucycles, &usuffix);
    591         order_suffix(thread->kcycles, &kcycles, &ksuffix);
    592        
    593         char *name;
    594         if (str_cmp(thread->name, "uinit") == 0)
    595                 name = thread->task->name;
     606        thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
     607       
     608        uint64_t cycles;
     609        char suffix;
     610        order(t->cycles, &cycles, &suffix);
     611
     612#ifdef __32_BITS__
     613        printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ",
     614            t->tid, t->name, t, thread_states[t->state], t->task,
     615        t->task->context, t->thread_code, t->kstack, cycles, suffix);
     616#endif
     617
     618#ifdef __64_BITS__
     619        printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ",
     620            t->tid, t->name, t, thread_states[t->state], t->task,
     621        t->task->context, t->thread_code, t->kstack, cycles, suffix);
     622#endif
     623                       
     624        if (t->cpu)
     625                printf("%-4u", t->cpu->id);
    596626        else
    597                 name = thread->name;
    598        
     627                printf("none");
     628                       
     629        if (t->state == Sleeping) {
    599630#ifdef __32_BITS__
    600         if (*additional)
    601                 printf("%-8" PRIu64 " %10p %10p %9" PRIu64 "%c %9" PRIu64 "%c ",
    602                     thread->tid, thread->thread_code, thread->kstack,
    603                     ucycles, usuffix, kcycles, ksuffix);
    604         else
    605                 printf("%-8" PRIu64 " %-14s %10p %-8s %10p %-5" PRIu32 "\n",
    606                     thread->tid, name, thread, thread_states[thread->state],
    607                     thread->task, thread->task->context);
    608 #endif
    609        
     631                printf(" %10p", t->sleep_queue);
     632#endif
     633
    610634#ifdef __64_BITS__
    611         if (*additional)
    612                 printf("%-8" PRIu64 " %18p %18p\n"
    613                     "         %9" PRIu64 "%c %9" PRIu64 "%c ",
    614                     thread->tid, thread->thread_code, thread->kstack,
    615                     ucycles, usuffix, kcycles, ksuffix);
    616         else
    617                 printf("%-8" PRIu64 " %-14s %18p %-8s %18p %-5" PRIu32 "\n",
    618                     thread->tid, name, thread, thread_states[thread->state],
    619                     thread->task, thread->task->context);
    620 #endif
    621        
    622         if (*additional) {
    623                 if (thread->cpu)
    624                         printf("%-5u", thread->cpu->id);
    625                 else
    626                         printf("none ");
    627                
    628                 if (thread->state == Sleeping) {
    629 #ifdef __32_BITS__
    630                         printf(" %10p", thread->sleep_queue);
    631 #endif
     635                printf(" %18p", t->sleep_queue);
     636#endif
     637        }
    632638                       
     639        printf("\n");
     640
     641        return true;
     642}
     643
     644/** Print list of threads debug info */
     645void thread_print_list(void)
     646{
     647        ipl_t ipl;
     648       
     649        /* Messing with thread structures, avoid deadlock */
     650        ipl = interrupts_disable();
     651        spinlock_lock(&threads_lock);
     652
     653#ifdef __32_BITS__     
     654        printf("tid    name       address    state    task       "
     655                "ctx code       stack      cycles     cpu  "
     656                "waitqueue\n");
     657        printf("------ ---------- ---------- -------- ---------- "
     658                "--- ---------- ---------- ---------- ---- "
     659                "----------\n");
     660#endif
     661
    633662#ifdef __64_BITS__
    634                         printf(" %18p", thread->sleep_queue);
    635 #endif
    636                 }
    637                
    638                 printf("\n");
    639         }
    640        
    641         return true;
    642 }
    643 
    644 /** Print list of threads debug info
    645  *
    646  * @param additional Print additional information.
    647  *
    648  */
    649 void thread_print_list(bool additional)
    650 {
    651         /* Messing with thread structures, avoid deadlock */
    652         irq_spinlock_lock(&threads_lock, true);
    653        
    654 #ifdef __32_BITS__
    655         if (additional)
    656                 printf("[id    ] [code    ] [stack   ] [ucycles ] [kcycles ]"
    657                     " [cpu] [waitqueue]\n");
    658         else
    659                 printf("[id    ] [name        ] [address ] [state ] [task    ]"
    660                     " [ctx]\n");
    661 #endif
    662        
    663 #ifdef __64_BITS__
    664         if (additional) {
    665                 printf("[id    ] [code            ] [stack           ]\n"
    666                     "         [ucycles ] [kcycles ] [cpu] [waitqueue       ]\n");
    667         } else
    668                 printf("[id    ] [name        ] [address         ] [state ]"
    669                     " [task            ] [ctx]\n");
    670 #endif
    671        
    672         avltree_walk(&threads_tree, thread_walker, &additional);
    673        
    674         irq_spinlock_unlock(&threads_lock, true);
     663        printf("tid    name       address            state    task               "
     664                "ctx code               stack              cycles     cpu  "
     665                "waitqueue\n");
     666        printf("------ ---------- ------------------ -------- ------------------ "
     667                "--- ------------------ ------------------ ---------- ---- "
     668                "------------------\n");
     669#endif
     670
     671        avltree_walk(&threads_tree, thread_walker, NULL);
     672
     673        spinlock_unlock(&threads_lock);
     674        interrupts_restore(ipl);
    675675}
    676676
     
    680680 * interrupts must be already disabled.
    681681 *
    682  * @param thread Pointer to thread.
     682 * @param t Pointer to thread.
    683683 *
    684684 * @return True if thread t is known to the system, false otherwise.
    685  *
    686  */
    687 bool thread_exists(thread_t *thread)
    688 {
    689         ASSERT(interrupts_disabled());
    690         ASSERT(irq_spinlock_locked(&threads_lock));
    691 
    692         avltree_node_t *node =
    693             avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
     685 */
     686bool thread_exists(thread_t *t)
     687{
     688        avltree_node_t *node;
     689
     690        node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
    694691       
    695692        return node != NULL;
     
    701698 * interrupts must be already disabled.
    702699 *
    703  * @param user True to update user accounting, false for kernel.
    704  *
    705  */
    706 void thread_update_accounting(bool user)
     700 */
     701void thread_update_accounting(void)
    707702{
    708703        uint64_t time = get_cycle();
    709 
    710         ASSERT(interrupts_disabled());
    711         ASSERT(irq_spinlock_locked(&THREAD->lock));
    712        
    713         if (user)
    714                 THREAD->ucycles += time - THREAD->last_cycle;
    715         else
    716                 THREAD->kcycles += time - THREAD->last_cycle;
    717        
     704        THREAD->cycles += time - THREAD->last_cycle;
    718705        THREAD->last_cycle = time;
    719706}
    720707
    721 static bool thread_search_walker(avltree_node_t *node, void *arg)
    722 {
    723         thread_t *thread =
    724             (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node);
    725         thread_iterator_t *iterator = (thread_iterator_t *) arg;
    726        
    727         if (thread->tid == iterator->thread_id) {
    728                 iterator->thread = thread;
    729                 return false;
    730         }
    731        
    732         return true;
    733 }
    734 
    735 /** Find thread structure corresponding to thread ID.
    736  *
    737  * The threads_lock must be already held by the caller of this function and
    738  * interrupts must be disabled.
    739  *
    740  * @param id Thread ID.
    741  *
    742  * @return Thread structure address or NULL if there is no such thread ID.
    743  *
    744  */
    745 thread_t *thread_find_by_id(thread_id_t thread_id)
    746 {
    747         ASSERT(interrupts_disabled());
    748         ASSERT(irq_spinlock_locked(&threads_lock));
    749        
    750         thread_iterator_t iterator;
    751        
    752         iterator.thread_id = thread_id;
    753         iterator.thread = NULL;
    754        
    755         avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator);
    756        
    757         return iterator.thread;
    758 }
    759 
    760 #ifdef CONFIG_UDEBUG
    761 
    762 void thread_stack_trace(thread_id_t thread_id)
    763 {
    764         irq_spinlock_lock(&threads_lock, true);
    765        
    766         thread_t *thread = thread_find_by_id(thread_id);
    767         if (thread == NULL) {
    768                 printf("No such thread.\n");
    769                 irq_spinlock_unlock(&threads_lock, true);
    770                 return;
    771         }
    772        
    773         irq_spinlock_lock(&thread->lock, false);
    774        
    775         /*
    776          * Schedule a stack trace to be printed
    777          * just before the thread is scheduled next.
    778          *
    779          * If the thread is sleeping then try to interrupt
    780          * the sleep. Any request for printing an uspace stack
    781          * trace from within the kernel should be always
    782          * considered a last resort debugging means, therefore
    783          * forcing the thread's sleep to be interrupted
    784          * is probably justifiable.
    785          */
    786        
    787         bool sleeping = false;
    788         istate_t *istate = thread->udebug.uspace_state;
    789         if (istate != NULL) {
    790                 printf("Scheduling thread stack trace.\n");
    791                 thread->btrace = true;
    792                 if (thread->state == Sleeping)
    793                         sleeping = true;
    794         } else
    795                 printf("Thread interrupt state not available.\n");
    796        
    797         irq_spinlock_unlock(&thread->lock, false);
    798        
    799         if (sleeping)
    800                 waitq_interrupt_sleep(thread);
    801        
    802         irq_spinlock_unlock(&threads_lock, true);
    803 }
    804 
    805 #endif /* CONFIG_UDEBUG */
    806 
    807708/** Process syscall to create new thread.
    808709 *
    809710 */
    810 sysarg_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
     711unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name,
    811712    size_t name_len, thread_id_t *uspace_thread_id)
    812713{
     714        thread_t *t;
     715        char namebuf[THREAD_NAME_BUFLEN];
     716        uspace_arg_t *kernel_uarg;
     717        int rc;
     718
    813719        if (name_len > THREAD_NAME_BUFLEN - 1)
    814720                name_len = THREAD_NAME_BUFLEN - 1;
    815        
    816         char namebuf[THREAD_NAME_BUFLEN];
    817         int rc = copy_from_uspace(namebuf, uspace_name, name_len);
     721
     722        rc = copy_from_uspace(namebuf, uspace_name, name_len);
    818723        if (rc != 0)
    819                 return (sysarg_t) rc;
    820        
     724                return (unative_t) rc;
     725
    821726        namebuf[name_len] = 0;
    822        
     727
    823728        /*
    824729         * In case of failure, kernel_uarg will be deallocated in this function.
    825730         * In case of success, kernel_uarg will be freed in uinit().
    826          *
    827731         */
    828         uspace_arg_t *kernel_uarg =
    829             (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
     732        kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
    830733       
    831734        rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
    832735        if (rc != 0) {
    833736                free(kernel_uarg);
    834                 return (sysarg_t) rc;
     737                return (unative_t) rc;
    835738        }
    836        
    837         thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
     739
     740        t = thread_create(uinit, kernel_uarg, TASK,
    838741            THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
    839         if (thread) {
     742        if (t) {
    840743                if (uspace_thread_id != NULL) {
    841                         rc = copy_to_uspace(uspace_thread_id, &thread->tid,
    842                             sizeof(thread->tid));
     744                        int rc;
     745
     746                        rc = copy_to_uspace(uspace_thread_id, &t->tid,
     747                            sizeof(t->tid));
    843748                        if (rc != 0) {
    844749                                /*
     
    847752                                 * creation now.
    848753                                 */
    849                                
     754
    850755                                /*
    851756                                 * The new thread structure is initialized, but
     
    853758                                 * We can safely deallocate it.
    854759                                 */
    855                                 slab_free(thread_slab, thread);
    856                                 free(kernel_uarg);
    857                                
    858                                 return (sysarg_t) rc;
     760                                slab_free(thread_slab, t);
     761                                free(kernel_uarg);
     762
     763                                return (unative_t) rc;
    859764                         }
    860765                }
    861                
    862766#ifdef CONFIG_UDEBUG
    863767                /*
     
    868772                 * and could be detected with THREAD_READ before.
    869773                 */
    870                 udebug_thread_b_event_attach(thread, TASK);
     774                udebug_thread_b_event_attach(t, TASK);
    871775#else
    872                 thread_attach(thread, TASK);
    873 #endif
    874                 thread_ready(thread);
    875                
     776                thread_attach(t, TASK);
     777#endif
     778                thread_ready(t);
     779
    876780                return 0;
    877781        } else
    878782                free(kernel_uarg);
    879        
    880         return (sysarg_t) ENOMEM;
     783
     784        return (unative_t) ENOMEM;
    881785}
    882786
     
    884788 *
    885789 */
    886 sysarg_t sys_thread_exit(int uspace_status)
     790unative_t sys_thread_exit(int uspace_status)
    887791{
    888792        thread_exit();
    889        
    890793        /* Unreachable */
    891794        return 0;
     
    898801 *
    899802 * @return 0 on success or an error code from @ref errno.h.
    900  *
    901  */
    902 sysarg_t sys_thread_get_id(thread_id_t *uspace_thread_id)
     803 */
     804unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
    903805{
    904806        /*
    905807         * No need to acquire lock on THREAD because tid
    906808         * remains constant for the lifespan of the thread.
    907          *
    908809         */
    909         return (sysarg_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
     810        return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
    910811            sizeof(THREAD->tid));
    911812}
    912813
    913 /** Syscall wrapper for sleeping. */
    914 sysarg_t sys_thread_usleep(uint32_t usec)
    915 {
    916         thread_usleep(usec);
    917         return 0;
    918 }
    919 
    920814/** @}
    921815 */
Note: See TracChangeset for help on using the changeset viewer.