Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/proc/thread.c

    r2d3ddad r7ed8530  
    3333/**
    3434 * @file
    35  * @brief Thread management functions.
     35 * @brief       Thread management functions.
    3636 */
    3737
     
    9494 *
    9595 * For locking rules, see declaration thereof.
    96  *
    97  */
    98 IRQ_SPINLOCK_INITIALIZE(threads_lock);
     96 */
     97SPINLOCK_INITIALIZE(threads_lock);
    9998
    10099/** AVL tree of all threads.
     
    102101 * When a thread is found in the threads_tree AVL tree, it is guaranteed to
    103102 * exist as long as the threads_lock is held.
    104  *
    105  */
    106 avltree_t threads_tree;
    107 
    108 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock);
    109 static thread_id_t last_tid = 0;
     103 */
     104avltree_t threads_tree;         
     105
     106SPINLOCK_INITIALIZE(tidlock);
     107thread_id_t last_tid = 0;
    110108
    111109static slab_cache_t *thread_slab;
    112 
    113110#ifdef CONFIG_FPU
    114111slab_cache_t *fpu_context_slab;
     
    128125        void *arg = THREAD->thread_arg;
    129126        THREAD->last_cycle = get_cycle();
    130        
     127
    131128        /* This is where each thread wakes up after its creation */
    132         irq_spinlock_unlock(&THREAD->lock, false);
     129        spinlock_unlock(&THREAD->lock);
    133130        interrupts_enable();
    134        
     131
    135132        f(arg);
    136133       
    137134        /* Accumulate accounting to the task */
    138         irq_spinlock_lock(&THREAD->lock, true);
     135        ipl_t ipl = interrupts_disable();
     136       
     137        spinlock_lock(&THREAD->lock);
    139138        if (!THREAD->uncounted) {
    140139                thread_update_accounting(true);
     
    143142                uint64_t kcycles = THREAD->kcycles;
    144143                THREAD->kcycles = 0;
     144
     145                spinlock_unlock(&THREAD->lock);
    145146               
    146                 irq_spinlock_pass(&THREAD->lock, &TASK->lock);
     147                spinlock_lock(&TASK->lock);
    147148                TASK->ucycles += ucycles;
    148149                TASK->kcycles += kcycles;
    149                 irq_spinlock_unlock(&TASK->lock, true);
     150                spinlock_unlock(&TASK->lock);
    150151        } else
    151                 irq_spinlock_unlock(&THREAD->lock, true);
     152                spinlock_unlock(&THREAD->lock);
     153       
     154        interrupts_restore(ipl);
    152155       
    153156        thread_exit();
    154        
    155         /* Not reached */
    156 }
    157 
    158 /** Initialization and allocation for thread_t structure
    159  *
    160  */
    161 static int thr_constructor(void *obj, unsigned int kmflags)
    162 {
    163         thread_t *thread = (thread_t *) obj;
    164        
    165         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    166         link_initialize(&thread->rq_link);
    167         link_initialize(&thread->wq_link);
    168         link_initialize(&thread->th_link);
    169        
     157        /* not reached */
     158}
     159
     160/** Initialization and allocation for thread_t structure */
     161static int thr_constructor(void *obj, int kmflags)
     162{
     163        thread_t *t = (thread_t *) obj;
     164
     165        spinlock_initialize(&t->lock, "thread_t_lock");
     166        link_initialize(&t->rq_link);
     167        link_initialize(&t->wq_link);
     168        link_initialize(&t->th_link);
     169
    170170        /* call the architecture-specific part of the constructor */
    171         thr_constructor_arch(thread);
     171        thr_constructor_arch(t);
    172172       
    173173#ifdef CONFIG_FPU
    174174#ifdef CONFIG_FPU_LAZY
    175         thread->saved_fpu_context = NULL;
    176 #else /* CONFIG_FPU_LAZY */
    177         thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
    178         if (!thread->saved_fpu_context)
     175        t->saved_fpu_context = NULL;
     176#else
     177        t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);
     178        if (!t->saved_fpu_context)
    179179                return -1;
    180 #endif /* CONFIG_FPU_LAZY */
    181 #endif /* CONFIG_FPU */
    182        
    183         thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
    184         if (!thread->kstack) {
     180#endif
     181#endif
     182
     183        t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);
     184        if (!t->kstack) {
    185185#ifdef CONFIG_FPU
    186                 if (thread->saved_fpu_context)
    187                         slab_free(fpu_context_slab, thread->saved_fpu_context);
     186                if (t->saved_fpu_context)
     187                        slab_free(fpu_context_slab, t->saved_fpu_context);
    188188#endif
    189189                return -1;
    190190        }
    191        
     191
    192192#ifdef CONFIG_UDEBUG
    193         mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE);
    194 #endif
    195        
     193        mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE);
     194#endif
     195
    196196        return 0;
    197197}
    198198
    199199/** Destruction of thread_t object */
    200 static size_t thr_destructor(void *obj)
    201 {
    202         thread_t *thread = (thread_t *) obj;
    203        
     200static int thr_destructor(void *obj)
     201{
     202        thread_t *t = (thread_t *) obj;
     203
    204204        /* call the architecture-specific part of the destructor */
    205         thr_destructor_arch(thread);
    206        
    207         frame_free(KA2PA(thread->kstack));
    208        
     205        thr_destructor_arch(t);
     206
     207        frame_free(KA2PA(t->kstack));
    209208#ifdef CONFIG_FPU
    210         if (thread->saved_fpu_context)
    211                 slab_free(fpu_context_slab, thread->saved_fpu_context);
    212 #endif
    213        
    214         return 1;  /* One page freed */
     209        if (t->saved_fpu_context)
     210                slab_free(fpu_context_slab, t->saved_fpu_context);
     211#endif
     212        return 1; /* One page freed */
    215213}
    216214
     
    223221{
    224222        THREAD = NULL;
    225        
    226223        atomic_set(&nrdy, 0);
    227224        thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0,
    228225            thr_constructor, thr_destructor, 0);
    229        
     226
    230227#ifdef CONFIG_FPU
    231228        fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t),
    232229            FPU_CONTEXT_ALIGN, NULL, NULL, 0);
    233230#endif
    234        
     231
    235232        avltree_create(&threads_tree);
    236233}
     
    238235/** Make thread ready
    239236 *
    240  * Switch thread to the ready state.
     237 * Switch thread t to the ready state.
    241238 *
    242239 * @param t Thread to make ready.
    243240 *
    244241 */
    245 void thread_ready(thread_t *thread)
    246 {
    247         irq_spinlock_lock(&thread->lock, true);
    248        
    249         ASSERT(!(thread->state == Ready));
    250        
    251         int i = (thread->priority < RQ_COUNT - 1)
    252             ? ++thread->priority : thread->priority;
    253        
    254         cpu_t *cpu = CPU;
    255         if (thread->flags & THREAD_FLAG_WIRED) {
    256                 ASSERT(thread->cpu != NULL);
    257                 cpu = thread->cpu;
    258         }
    259         thread->state = Ready;
    260        
    261         irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock));
     242void thread_ready(thread_t *t)
     243{
     244        cpu_t *cpu;
     245        runq_t *r;
     246        ipl_t ipl;
     247        int i, avg;
     248
     249        ipl = interrupts_disable();
     250
     251        spinlock_lock(&t->lock);
     252
     253        ASSERT(!(t->state == Ready));
     254
     255        i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority;
     256       
     257        cpu = CPU;
     258        if (t->flags & THREAD_FLAG_WIRED) {
     259                ASSERT(t->cpu != NULL);
     260                cpu = t->cpu;
     261        }
     262        t->state = Ready;
     263        spinlock_unlock(&t->lock);
    262264       
    263265        /*
    264          * Append thread to respective ready queue
    265          * on respective processor.
     266         * Append t to respective ready queue on respective processor.
    266267         */
    267        
    268         list_append(&thread->rq_link, &cpu->rq[i].rq_head);
    269         cpu->rq[i].n++;
    270         irq_spinlock_unlock(&(cpu->rq[i].lock), true);
    271        
     268        r = &cpu->rq[i];
     269        spinlock_lock(&r->lock);
     270        list_append(&t->rq_link, &r->rq_head);
     271        r->n++;
     272        spinlock_unlock(&r->lock);
     273
    272274        atomic_inc(&nrdy);
    273         // FIXME: Why is the avg value not used
    274         // avg = atomic_get(&nrdy) / config.cpu_active;
     275        // FIXME: Why is the avg value never read?
     276        avg = atomic_get(&nrdy) / config.cpu_active;
    275277        atomic_inc(&cpu->nrdy);
     278
     279        interrupts_restore(ipl);
    276280}
    277281
     
    280284 * Create a new thread.
    281285 *
    282  * @param func      Thread's implementing function.
    283  * @param arg       Thread's implementing function argument.
    284  * @param task      Task to which the thread belongs. The caller must
    285  *                  guarantee that the task won't cease to exist during the
    286  *                  call. The task's lock may not be held.
    287  * @param flags     Thread flags.
    288  * @param name      Symbolic name (a copy is made).
    289  * @param uncounted Thread's accounting doesn't affect accumulated task
    290  *                  accounting.
    291  *
    292  * @return New thread's structure on success, NULL on failure.
     286 * @param func          Thread's implementing function.
     287 * @param arg           Thread's implementing function argument.
     288 * @param task          Task to which the thread belongs. The caller must
     289 *                      guarantee that the task won't cease to exist during the
     290 *                      call. The task's lock may not be held.
     291 * @param flags         Thread flags.
     292 * @param name          Symbolic name (a copy is made).
     293 * @param uncounted     Thread's accounting doesn't affect accumulated task
     294 *                      accounting.
     295 *
     296 * @return              New thread's structure on success, NULL on failure.
    293297 *
    294298 */
    295299thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
    296     unsigned int flags, const char *name, bool uncounted)
    297 {
    298         thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0);
    299         if (!thread)
     300    int flags, const char *name, bool uncounted)
     301{
     302        thread_t *t;
     303        ipl_t ipl;
     304       
     305        t = (thread_t *) slab_alloc(thread_slab, 0);
     306        if (!t)
    300307                return NULL;
    301308       
    302309        /* Not needed, but good for debugging */
    303         memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
    304        
    305         irq_spinlock_lock(&tidlock, true);
    306         thread->tid = ++last_tid;
    307         irq_spinlock_unlock(&tidlock, true);
    308        
    309         context_save(&thread->saved_context);
    310         context_set(&thread->saved_context, FADDR(cushion),
    311             (uintptr_t) thread->kstack, THREAD_STACK_SIZE);
    312        
    313         the_initialize((the_t *) thread->kstack);
    314        
    315         ipl_t ipl = interrupts_disable();
    316         thread->saved_context.ipl = interrupts_read();
     310        memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
     311       
     312        ipl = interrupts_disable();
     313        spinlock_lock(&tidlock);
     314        t->tid = ++last_tid;
     315        spinlock_unlock(&tidlock);
    317316        interrupts_restore(ipl);
    318317       
    319         str_cpy(thread->name, THREAD_NAME_BUFLEN, name);
    320        
    321         thread->thread_code = func;
    322         thread->thread_arg = arg;
    323         thread->ticks = -1;
    324         thread->ucycles = 0;
    325         thread->kcycles = 0;
    326         thread->uncounted = uncounted;
    327         thread->priority = -1;          /* Start in rq[0] */
    328         thread->cpu = NULL;
    329         thread->flags = flags;
    330         thread->state = Entering;
    331         thread->call_me = NULL;
    332         thread->call_me_with = NULL;
    333        
    334         timeout_initialize(&thread->sleep_timeout);
    335         thread->sleep_interruptible = false;
    336         thread->sleep_queue = NULL;
    337         thread->timeout_pending = false;
    338        
    339         thread->in_copy_from_uspace = false;
    340         thread->in_copy_to_uspace = false;
    341        
    342         thread->interrupted = false;
    343         thread->detached = false;
    344         waitq_initialize(&thread->join_wq);
    345        
    346         thread->rwlock_holder_type = RWLOCK_NONE;
    347        
    348         thread->task = task;
    349        
    350         thread->fpu_context_exists = 0;
    351         thread->fpu_context_engaged = 0;
    352        
    353         avltree_node_initialize(&thread->threads_tree_node);
    354         thread->threads_tree_node.key = (uintptr_t) thread;
    355        
     318        context_save(&t->saved_context);
     319        context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack,
     320            THREAD_STACK_SIZE);
     321       
     322        the_initialize((the_t *) t->kstack);
     323       
     324        ipl = interrupts_disable();
     325        t->saved_context.ipl = interrupts_read();
     326        interrupts_restore(ipl);
     327       
     328        memcpy(t->name, name, THREAD_NAME_BUFLEN);
     329        t->name[THREAD_NAME_BUFLEN - 1] = 0;
     330       
     331        t->thread_code = func;
     332        t->thread_arg = arg;
     333        t->ticks = -1;
     334        t->ucycles = 0;
     335        t->kcycles = 0;
     336        t->uncounted = uncounted;
     337        t->priority = -1;               /* start in rq[0] */
     338        t->cpu = NULL;
     339        t->flags = flags;
     340        t->state = Entering;
     341        t->call_me = NULL;
     342        t->call_me_with = NULL;
     343       
     344        timeout_initialize(&t->sleep_timeout);
     345        t->sleep_interruptible = false;
     346        t->sleep_queue = NULL;
     347        t->timeout_pending = 0;
     348
     349        t->in_copy_from_uspace = false;
     350        t->in_copy_to_uspace = false;
     351
     352        t->interrupted = false;
     353        t->detached = false;
     354        waitq_initialize(&t->join_wq);
     355       
     356        t->rwlock_holder_type = RWLOCK_NONE;
     357               
     358        t->task = task;
     359       
     360        t->fpu_context_exists = 0;
     361        t->fpu_context_engaged = 0;
     362
     363        avltree_node_initialize(&t->threads_tree_node);
     364        t->threads_tree_node.key = (uintptr_t) t;
     365
    356366#ifdef CONFIG_UDEBUG
    357367        /* Init debugging stuff */
    358         udebug_thread_initialize(&thread->udebug);
    359 #endif
    360        
    361         /* Might depend on previous initialization */
    362         thread_create_arch(thread);
    363        
     368        udebug_thread_initialize(&t->udebug);
     369#endif
     370
     371        /* might depend on previous initialization */
     372        thread_create_arch(t); 
     373
    364374        if (!(flags & THREAD_FLAG_NOATTACH))
    365                 thread_attach(thread, task);
    366        
    367         return thread;
     375                thread_attach(t, task);
     376
     377        return t;
    368378}
    369379
     
    372382 * Detach thread from all queues, cpus etc. and destroy it.
    373383 *
    374  * @param thread  Thread to be destroyed.
    375  * @param irq_res Indicate whether it should unlock thread->lock
    376  *                in interrupts-restore mode.
    377  *
    378  */
    379 void thread_destroy(thread_t *thread, bool irq_res)
    380 {
    381         ASSERT(irq_spinlock_locked(&thread->lock));
    382         ASSERT((thread->state == Exiting) || (thread->state == Lingering));
    383         ASSERT(thread->task);
    384         ASSERT(thread->cpu);
    385        
    386         irq_spinlock_lock(&thread->cpu->lock, false);
    387         if (thread->cpu->fpu_owner == thread)
    388                 thread->cpu->fpu_owner = NULL;
    389         irq_spinlock_unlock(&thread->cpu->lock, false);
    390        
    391         irq_spinlock_pass(&thread->lock, &threads_lock);
    392        
    393         avltree_delete(&threads_tree, &thread->threads_tree_node);
    394        
    395         irq_spinlock_pass(&threads_lock, &thread->task->lock);
    396        
     384 * Assume thread->lock is held!!
     385 */
     386void thread_destroy(thread_t *t)
     387{
     388        ASSERT(t->state == Exiting || t->state == Lingering);
     389        ASSERT(t->task);
     390        ASSERT(t->cpu);
     391
     392        spinlock_lock(&t->cpu->lock);
     393        if (t->cpu->fpu_owner == t)
     394                t->cpu->fpu_owner = NULL;
     395        spinlock_unlock(&t->cpu->lock);
     396
     397        spinlock_unlock(&t->lock);
     398
     399        spinlock_lock(&threads_lock);
     400        avltree_delete(&threads_tree, &t->threads_tree_node);
     401        spinlock_unlock(&threads_lock);
     402
    397403        /*
    398404         * Detach from the containing task.
    399405         */
    400         list_remove(&thread->th_link);
    401         irq_spinlock_unlock(&thread->task->lock, irq_res);
    402        
     406        spinlock_lock(&t->task->lock);
     407        list_remove(&t->th_link);
     408        spinlock_unlock(&t->task->lock);       
     409
    403410        /*
    404411         * Drop the reference to the containing task.
    405412         */
    406         task_release(thread->task);
    407         slab_free(thread_slab, thread);
     413        task_release(t->task);
     414       
     415        slab_free(thread_slab, t);
    408416}
    409417
     
    413421 * threads_tree.
    414422 *
    415  * @param t    Thread to be attached to the task.
    416  * @param task Task to which the thread is to be attached.
    417  *
    418  */
    419 void thread_attach(thread_t *thread, task_t *task)
    420 {
     423 * @param t     Thread to be attached to the task.
     424 * @param task  Task to which the thread is to be attached.
     425 */
     426void thread_attach(thread_t *t, task_t *task)
     427{
     428        ipl_t ipl;
     429
    421430        /*
    422431         * Attach to the specified task.
    423432         */
    424         irq_spinlock_lock(&task->lock, true);
    425        
     433        ipl = interrupts_disable();
     434        spinlock_lock(&task->lock);
     435
    426436        /* Hold a reference to the task. */
    427437        task_hold(task);
    428        
     438
    429439        /* Must not count kbox thread into lifecount */
    430         if (thread->flags & THREAD_FLAG_USPACE)
     440        if (t->flags & THREAD_FLAG_USPACE)
    431441                atomic_inc(&task->lifecount);
    432        
    433         list_append(&thread->th_link, &task->th_head);
    434        
    435         irq_spinlock_pass(&task->lock, &threads_lock);
    436        
     442
     443        list_append(&t->th_link, &task->th_head);
     444        spinlock_unlock(&task->lock);
     445
    437446        /*
    438447         * Register this thread in the system-wide list.
    439448         */
    440         avltree_insert(&threads_tree, &thread->threads_tree_node);
    441         irq_spinlock_unlock(&threads_lock, true);
     449        spinlock_lock(&threads_lock);
     450        avltree_insert(&threads_tree, &t->threads_tree_node);
     451        spinlock_unlock(&threads_lock);
     452       
     453        interrupts_restore(ipl);
    442454}
    443455
    444456/** Terminate thread.
    445457 *
    446  * End current thread execution and switch it to the exiting state.
    447  * All pending timeouts are executed.
    448  *
     458 * End current thread execution and switch it to the exiting state. All pending
     459 * timeouts are executed.
    449460 */
    450461void thread_exit(void)
    451462{
     463        ipl_t ipl;
     464
    452465        if (THREAD->flags & THREAD_FLAG_USPACE) {
    453466#ifdef CONFIG_UDEBUG
    454467                /* Generate udebug THREAD_E event */
    455468                udebug_thread_e_event();
    456 
    457                 /*
    458                  * This thread will not execute any code or system calls from
    459                  * now on.
    460                  */
    461                 udebug_stoppable_begin();
    462469#endif
    463470                if (atomic_predec(&TASK->lifecount) == 0) {
     
    468475                         * can only be created by threads of the same task.
    469476                         * We are safe to perform cleanup.
    470                          *
    471477                         */
    472478                        ipc_cleanup();
     
    475481                }
    476482        }
    477        
     483
    478484restart:
    479         irq_spinlock_lock(&THREAD->lock, true);
    480         if (THREAD->timeout_pending) {
    481                 /* Busy waiting for timeouts in progress */
    482                 irq_spinlock_unlock(&THREAD->lock, true);
     485        ipl = interrupts_disable();
     486        spinlock_lock(&THREAD->lock);
     487        if (THREAD->timeout_pending) {
     488                /* busy waiting for timeouts in progress */
     489                spinlock_unlock(&THREAD->lock);
     490                interrupts_restore(ipl);
    483491                goto restart;
    484492        }
    485493       
    486494        THREAD->state = Exiting;
    487         irq_spinlock_unlock(&THREAD->lock, true);
    488        
     495        spinlock_unlock(&THREAD->lock);
    489496        scheduler();
    490        
     497
    491498        /* Not reached */
    492         while (true);
    493 }
     499        while (1)
     500                ;
     501}
     502
    494503
    495504/** Thread sleep
     
    506515        while (sec > 0) {
    507516                uint32_t period = (sec > 1000) ? 1000 : sec;
    508                
     517       
    509518                thread_usleep(period * 1000000);
    510519                sec -= period;
     
    514523/** Wait for another thread to exit.
    515524 *
    516  * @param thread Thread to join on exit.
    517  * @param usec   Timeout in microseconds.
    518  * @param flags  Mode of operation.
     525 * @param t Thread to join on exit.
     526 * @param usec Timeout in microseconds.
     527 * @param flags Mode of operation.
    519528 *
    520529 * @return An error code from errno.h or an error code from synch.h.
    521  *
    522  */
    523 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags)
    524 {
    525         if (thread == THREAD)
     530 */
     531int thread_join_timeout(thread_t *t, uint32_t usec, int flags)
     532{
     533        ipl_t ipl;
     534        int rc;
     535
     536        if (t == THREAD)
    526537                return EINVAL;
    527        
     538
    528539        /*
    529540         * Since thread join can only be called once on an undetached thread,
     
    531542         */
    532543       
    533         irq_spinlock_lock(&thread->lock, true);
    534         ASSERT(!thread->detached);
    535         irq_spinlock_unlock(&thread->lock, true);
    536        
    537         return waitq_sleep_timeout(&thread->join_wq, usec, flags);
     544        ipl = interrupts_disable();
     545        spinlock_lock(&t->lock);
     546        ASSERT(!t->detached);
     547        spinlock_unlock(&t->lock);
     548        interrupts_restore(ipl);
     549       
     550        rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
     551       
     552        return rc;     
    538553}
    539554
     
    543558 * state, deallocate its resources.
    544559 *
    545  * @param thread Thread to be detached.
    546  *
    547  */
    548 void thread_detach(thread_t *thread)
    549 {
     560 * @param t Thread to be detached.
     561 */
     562void thread_detach(thread_t *t)
     563{
     564        ipl_t ipl;
     565
    550566        /*
    551567         * Since the thread is expected not to be already detached,
    552568         * pointer to it must be still valid.
    553569         */
    554         irq_spinlock_lock(&thread->lock, true);
    555         ASSERT(!thread->detached);
    556        
    557         if (thread->state == Lingering) {
    558                 /*
    559                  * Unlock &thread->lock and restore
    560                  * interrupts in thread_destroy().
    561                  */
    562                 thread_destroy(thread, true);
     570        ipl = interrupts_disable();
     571        spinlock_lock(&t->lock);
     572        ASSERT(!t->detached);
     573        if (t->state == Lingering) {
     574                thread_destroy(t);      /* unlocks &t->lock */
     575                interrupts_restore(ipl);
    563576                return;
    564577        } else {
    565                 thread->detached = true;
    566         }
    567        
    568         irq_spinlock_unlock(&thread->lock, true);
     578                t->detached = true;
     579        }
     580        spinlock_unlock(&t->lock);
     581        interrupts_restore(ipl);
    569582}
    570583
     
    588601 *
    589602 * Register a function and its argument to be executed
    590  * on next context switch to the current thread. Must
    591  * be called with interrupts disabled.
     603 * on next context switch to the current thread.
    592604 *
    593605 * @param call_me      Out-of-context function.
     
    597609void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
    598610{
    599         irq_spinlock_lock(&THREAD->lock, false);
     611        ipl_t ipl;
     612       
     613        ipl = interrupts_disable();
     614        spinlock_lock(&THREAD->lock);
    600615        THREAD->call_me = call_me;
    601616        THREAD->call_me_with = call_me_with;
    602         irq_spinlock_unlock(&THREAD->lock, false);
     617        spinlock_unlock(&THREAD->lock);
     618        interrupts_restore(ipl);
    603619}
    604620
    605621static bool thread_walker(avltree_node_t *node, void *arg)
    606622{
    607         thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node);
     623        thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
    608624       
    609625        uint64_t ucycles, kcycles;
    610626        char usuffix, ksuffix;
    611         order_suffix(thread->ucycles, &ucycles, &usuffix);
    612         order_suffix(thread->kcycles, &kcycles, &ksuffix);
    613        
     627        order_suffix(t->ucycles, &ucycles, &usuffix);
     628        order_suffix(t->kcycles, &kcycles, &ksuffix);
     629
    614630#ifdef __32_BITS__
    615631        printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9"
    616                 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
    617                 thread_states[thread->state], thread->task, thread->task->context,
    618                 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
    619 #endif
    620        
     632                PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
     633                thread_states[t->state], t->task, t->task->context, t->thread_code,
     634                t->kstack, ucycles, usuffix, kcycles, ksuffix);
     635#endif
     636
    621637#ifdef __64_BITS__
    622638        printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9"
    623                 PRIu64 "%c %9" PRIu64 "%c ", thread->tid, thread->name, thread,
    624                 thread_states[thread->state], thread->task, thread->task->context,
    625                 thread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);
    626 #endif
    627        
    628         if (thread->cpu)
    629                 printf("%-4u", thread->cpu->id);
     639                PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t,
     640                thread_states[t->state], t->task, t->task->context, t->thread_code,
     641                t->kstack, ucycles, usuffix, kcycles, ksuffix);
     642#endif
     643                       
     644        if (t->cpu)
     645                printf("%-4u", t->cpu->id);
    630646        else
    631647                printf("none");
    632        
    633         if (thread->state == Sleeping) {
     648                       
     649        if (t->state == Sleeping) {
    634650#ifdef __32_BITS__
    635                 printf(" %10p", thread->sleep_queue);
    636 #endif
    637                
     651                printf(" %10p", t->sleep_queue);
     652#endif
     653
    638654#ifdef __64_BITS__
    639                 printf(" %18p", thread->sleep_queue);
    640 #endif
    641         }
    642        
     655                printf(" %18p", t->sleep_queue);
     656#endif
     657        }
     658                       
    643659        printf("\n");
    644        
     660
    645661        return true;
    646662}
    647663
    648 /** Print list of threads debug info
    649  *
    650  */
     664/** Print list of threads debug info */
    651665void thread_print_list(void)
    652666{
     667        ipl_t ipl;
     668       
    653669        /* Messing with thread structures, avoid deadlock */
    654         irq_spinlock_lock(&threads_lock, true);
    655        
    656 #ifdef __32_BITS__
     670        ipl = interrupts_disable();
     671        spinlock_lock(&threads_lock);
     672
     673#ifdef __32_BITS__     
    657674        printf("tid    name       address    state    task       "
    658675                "ctx code       stack      ucycles    kcycles    cpu  "
     
    662679                "----------\n");
    663680#endif
    664        
     681
    665682#ifdef __64_BITS__
    666683        printf("tid    name       address            state    task               "
     
    671688                "------------------\n");
    672689#endif
    673        
     690
    674691        avltree_walk(&threads_tree, thread_walker, NULL);
    675        
    676         irq_spinlock_unlock(&threads_lock, true);
     692
     693        spinlock_unlock(&threads_lock);
     694        interrupts_restore(ipl);
    677695}
    678696
     
    682700 * interrupts must be already disabled.
    683701 *
    684  * @param thread Pointer to thread.
     702 * @param t Pointer to thread.
    685703 *
    686704 * @return True if thread t is known to the system, false otherwise.
    687  *
    688  */
    689 bool thread_exists(thread_t *thread)
    690 {
    691         ASSERT(interrupts_disabled());
    692         ASSERT(irq_spinlock_locked(&threads_lock));
    693 
    694         avltree_node_t *node =
    695             avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread));
     705 */
     706bool thread_exists(thread_t *t)
     707{
     708        avltree_node_t *node;
     709
     710        node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t));
    696711       
    697712        return node != NULL;
     
    703718 * interrupts must be already disabled.
    704719 *
    705  * @param user True to update user accounting, false for kernel.
    706  *
     720 * @param user  True to update user accounting, false for kernel.
    707721 */
    708722void thread_update_accounting(bool user)
    709723{
    710724        uint64_t time = get_cycle();
    711 
    712         ASSERT(interrupts_disabled());
    713         ASSERT(irq_spinlock_locked(&THREAD->lock));
    714        
    715         if (user)
     725        if (user) {
    716726                THREAD->ucycles += time - THREAD->last_cycle;
    717         else
     727        } else {
    718728                THREAD->kcycles += time - THREAD->last_cycle;
    719        
     729        }
    720730        THREAD->last_cycle = time;
    721731}
     
    747757thread_t *thread_find_by_id(thread_id_t thread_id)
    748758{
    749         ASSERT(interrupts_disabled());
    750         ASSERT(irq_spinlock_locked(&threads_lock));
    751 
    752759        thread_iterator_t iterator;
    753760       
     
    767774    size_t name_len, thread_id_t *uspace_thread_id)
    768775{
     776        thread_t *t;
     777        char namebuf[THREAD_NAME_BUFLEN];
     778        uspace_arg_t *kernel_uarg;
     779        int rc;
     780
    769781        if (name_len > THREAD_NAME_BUFLEN - 1)
    770782                name_len = THREAD_NAME_BUFLEN - 1;
    771        
    772         char namebuf[THREAD_NAME_BUFLEN];
    773         int rc = copy_from_uspace(namebuf, uspace_name, name_len);
     783
     784        rc = copy_from_uspace(namebuf, uspace_name, name_len);
    774785        if (rc != 0)
    775786                return (unative_t) rc;
    776        
     787
    777788        namebuf[name_len] = 0;
    778        
     789
    779790        /*
    780791         * In case of failure, kernel_uarg will be deallocated in this function.
    781792         * In case of success, kernel_uarg will be freed in uinit().
    782          *
    783793         */
    784         uspace_arg_t *kernel_uarg =
    785             (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
     794        kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
    786795       
    787796        rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
     
    790799                return (unative_t) rc;
    791800        }
    792        
    793         thread_t *thread = thread_create(uinit, kernel_uarg, TASK,
     801
     802        t = thread_create(uinit, kernel_uarg, TASK,
    794803            THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false);
    795         if (thread) {
     804        if (t) {
    796805                if (uspace_thread_id != NULL) {
    797                         rc = copy_to_uspace(uspace_thread_id, &thread->tid,
    798                             sizeof(thread->tid));
     806                        int rc;
     807
     808                        rc = copy_to_uspace(uspace_thread_id, &t->tid,
     809                            sizeof(t->tid));
    799810                        if (rc != 0) {
    800811                                /*
     
    802813                                 * has already been created. We need to undo its
    803814                                 * creation now.
    804                                  *
    805815                                 */
    806                                
     816
    807817                                /*
    808818                                 * The new thread structure is initialized, but
     
    810820                                 * We can safely deallocate it.
    811821                                 */
    812                                 slab_free(thread_slab, thread);
    813                                 free(kernel_uarg);
    814                                
     822                                slab_free(thread_slab, t);
     823                                free(kernel_uarg);
     824
    815825                                return (unative_t) rc;
    816826                         }
    817827                }
    818                
    819828#ifdef CONFIG_UDEBUG
    820829                /*
     
    824833                 * THREAD_B events for threads that already existed
    825834                 * and could be detected with THREAD_READ before.
    826                  *
    827835                 */
    828                 udebug_thread_b_event_attach(thread, TASK);
     836                udebug_thread_b_event_attach(t, TASK);
    829837#else
    830                 thread_attach(thread, TASK);
    831 #endif
    832                 thread_ready(thread);
    833                
     838                thread_attach(t, TASK);
     839#endif
     840                thread_ready(t);
     841
    834842                return 0;
    835843        } else
    836844                free(kernel_uarg);
    837        
     845
    838846        return (unative_t) ENOMEM;
    839847}
     
    845853{
    846854        thread_exit();
    847        
    848855        /* Unreachable */
    849856        return 0;
     
    856863 *
    857864 * @return 0 on success or an error code from @ref errno.h.
    858  *
    859865 */
    860866unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
     
    863869         * No need to acquire lock on THREAD because tid
    864870         * remains constant for the lifespan of the thread.
    865          *
    866871         */
    867872        return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note: See TracChangeset for help on using the changeset viewer.