Changeset ea7890e7 in mainline


Ignore:
Timestamp:
2007-06-01T15:47:46Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
07be3c4
Parents:
ff3a34b
Message:

More efficient and simpler task termination.

Based on the assumption, that after its creation, only the task itself can create more threads for itself,
the last thread with userspace context to execute thread_exit() will perform futex and IPC cleanup. When
the task has no threads, it is destroyed. Both the cleanup and destruction is controlled by reference
counting.

As for userspace threads, even though there could be a global garbage collector for joining threads, it is
much simpler if the uinit thread detaches itself before switching to userspace.

task_kill() is now an idempotent operation. It just instructs the threads within a task to exit.

Change in the name of a thread state: Undead → JoinMe.

Location:
kernel/generic
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/adt/list.h

    rff3a34b rea7890e7  
    181181}
    182182
    183 #define list_get_instance(link,type,member) \
     183#define list_get_instance(link, type, member) \
    184184        ((type *)(((uint8_t *)(link)) - ((uint8_t *)&(((type *)NULL)->member))))
    185185
  • kernel/generic/include/proc/task.h

    rff3a34b rea7890e7  
    6565       
    6666        char *name;
    67         /** Pointer to the main thread. */
    68         struct thread *main_thread;
    6967        /** List of threads contained in this task. */
    7068        link_t th_head;
     
    7674        context_id_t context;   
    7775
    78         /** If this is true, new threads can become part of the task. */
    79         bool accept_new_threads;
    8076        /** Number of references (i.e. threads). */
    81         count_t refcount;       
     77        atomic_t refcount;
     78        /** Number of threads that haven't exited yet. */
     79        atomic_t lifecount;
    8280
    8381        /** Task capabilities. */
     
    123121extern cap_t cap_get(task_t *t);
    124122
    125 
    126123#ifndef task_create_arch
    127124extern void task_create_arch(task_t *t);
  • kernel/generic/include/proc/thread.h

    rff3a34b rea7890e7  
    11/*
    2  * Copyright (c) 2001-2004 Jakub Jermar
     2 * Copyright (c) 2001-2007 Jakub Jermar
    33 * All rights reserved.
    44 *
     
    4141#include <cpu.h>
    4242#include <synch/rwlock.h>
     43#include <synch/spinlock.h>
    4344#include <adt/btree.h>
    4445#include <mm/slab.h>
     
    8182        /** After a thread calls thread_exit(), it is put into Exiting state. */
    8283        Exiting,
    83         /** Threads that were not detached but exited are in the Undead state. */
    84         Undead
     84        /** Threads that were not detached but exited are in the JoinMe state. */
     85        JoinMe
    8586} state_t;
    86 
    87 /** Join types. */
    88 typedef enum {
    89         None,
    90         TaskClnp,       /**< The thread will be joined by ktaskclnp thread. */
    91         TaskGC          /**< The thread will be joined by ktaskgc thread. */
    92 } thread_join_type_t;
    9387
    9488/** Thread structure. There is one per thread. */
     
    153147        bool interrupted;                       
    154148       
    155         /** Who joinins the thread. */
    156         thread_join_type_t join_type;
    157149        /** If true, thread_join_timeout() cannot be used on this thread. */
    158150        bool detached;
    159151        /** Waitq for thread_join_timeout(). */
    160152        waitq_t join_wq;
     153        /** Link used in the joiner_head list. */
     154        link_t joiner_link;
    161155
    162156        fpu_context_t *saved_fpu_context;
  • kernel/generic/src/main/uinit.c

    rff3a34b rea7890e7  
    4646#include <userspace.h>
    4747#include <mm/slab.h>
     48#include <arch.h>
    4849
    4950/** Thread used to bring up userspace thread.
     
    5556{
    5657        uspace_arg_t uarg;
     58
     59        /*
     60         * So far, we don't have a use for joining userspace threads so we
     61         * immediately detach each uinit thread. If joining of userspace threads
     62         * is required, some userspace API based on the kernel mechanism will
     63         * have to be implemented. Moreover, garbage collecting of threads that
     64         * didn't detach themselves and nobody else joined them will have to be
     65         * deployed for the event of forceful task termination.
     66         */
     67        thread_detach(THREAD);
    5768       
    5869        uarg.uspace_entry = ((uspace_arg_t *) arg)->uspace_entry;
    5970        uarg.uspace_stack = ((uspace_arg_t *) arg)->uspace_stack;
    6071        uarg.uspace_uarg = ((uspace_arg_t *) arg)->uspace_uarg;
    61         uarg.uspace_thread_function = NULL;
    62         uarg.uspace_thread_arg = NULL;
     72        uarg.uspace_thread_function = NULL; uarg.uspace_thread_arg = NULL;
    6373
    6474        free((uspace_arg_t *) arg);
  • kernel/generic/src/proc/scheduler.c

    rff3a34b rea7890e7  
    406406                                         */
    407407                                        spinlock_unlock(&THREAD->lock);
    408                                         delay(10);
     408                                        delay(HZ);
    409409                                        spinlock_lock(&THREAD->lock);
    410410                                        DEADLOCK_PROBE(p_joinwq,
     
    416416                                spinlock_unlock(&THREAD->join_wq.lock);
    417417                               
    418                                 THREAD->state = Undead;
     418                                THREAD->state = JoinMe;
    419419                                spinlock_unlock(&THREAD->lock);
    420420                        }
  • kernel/generic/src/proc/task.c

    rff3a34b rea7890e7  
    5757#include <func.h>
    5858#include <syscall/copy.h>
    59 #include <console/klog.h>
    6059
    6160#ifndef LOADED_PROG_STACK_PAGES_NO
     
    7978
    8079static task_id_t task_counter = 0;
    81 
    82 static void ktaskclnp(void *arg);
    83 static void ktaskgc(void *arg);
    8480
    8581/** Initialize tasks
     
    165161        ta->as = as;
    166162        ta->name = name;
    167         ta->main_thread = NULL;
    168         ta->refcount = 0;
     163        atomic_set(&ta->refcount, 0);
     164        atomic_set(&ta->lifecount, 0);
    169165        ta->context = CONTEXT;
    170166
    171167        ta->capabilities = 0;
    172         ta->accept_new_threads = true;
    173168        ta->cycles = 0;
    174169       
     
    192187
    193188        spinlock_lock(&tasks_lock);
    194 
    195189        ta->taskid = ++task_counter;
    196190        btree_insert(&tasks_btree, (btree_key_t) ta->taskid, (void *) ta, NULL);
    197 
    198191        spinlock_unlock(&tasks_lock);
    199192        interrupts_restore(ipl);
     
    208201void task_destroy(task_t *t)
    209202{
     203        /*
     204         * Remove the task from the task B+tree.
     205         */
     206        spinlock_lock(&tasks_lock);
     207        btree_remove(&tasks_btree, t->taskid, NULL);
     208        spinlock_unlock(&tasks_lock);
     209
     210        /*
     211         * Perform architecture specific task destruction.
     212         */
    210213        task_destroy_arch(t);
     214
     215        /*
     216         * Free up dynamically allocated state.
     217         */
    211218        btree_destroy(&t->futexes);
    212219
     220        /*
     221         * Drop our reference to the address space.
     222         */
    213223        if (atomic_predec(&t->as->refcount) == 0)
    214224                as_destroy(t->as);
     
    230240        as_area_t *a;
    231241        int rc;
    232         thread_t *t1, *t2;
     242        thread_t *t;
    233243        task_t *task;
    234244        uspace_arg_t *kernel_uarg;
     
    264274         * Create the main thread.
    265275         */
    266         t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
     276        t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
    267277            "uinit", false);
    268         ASSERT(t1);
    269        
    270         /*
    271          * Create killer thread for the new task.
    272          */
    273         t2 = thread_create(ktaskgc, t1, task, 0, "ktaskgc", true);
    274         ASSERT(t2);
    275         thread_ready(t2);
    276 
    277         thread_ready(t1);
     278        ASSERT(t);
     279       
     280        thread_ready(t);
    278281
    279282        return task;
     
    348351/** Kill task.
    349352 *
     353 * This function is idempotent.
     354 * It signals all the task's threads to bail it out.
     355 *
    350356 * @param id ID of the task to be killed.
    351357 *
     
    356362        ipl_t ipl;
    357363        task_t *ta;
    358         thread_t *t;
    359364        link_t *cur;
    360365
     
    364369        ipl = interrupts_disable();
    365370        spinlock_lock(&tasks_lock);
    366 
    367371        if (!(ta = task_find_by_id(id))) {
    368372                spinlock_unlock(&tasks_lock);
     
    370374                return ENOENT;
    371375        }
    372 
     376        spinlock_unlock(&tasks_lock);
     377       
     378        /*
     379         * Interrupt all threads except ktaskclnp.
     380         */
    373381        spinlock_lock(&ta->lock);
    374         ta->refcount++;
    375         spinlock_unlock(&ta->lock);
    376 
    377         btree_remove(&tasks_btree, ta->taskid, NULL);
    378         spinlock_unlock(&tasks_lock);
    379        
    380         t = thread_create(ktaskclnp, NULL, ta, 0, "ktaskclnp", true);
    381        
    382         spinlock_lock(&ta->lock);
    383         ta->accept_new_threads = false;
    384         ta->refcount--;
    385 
    386         /*
    387          * Interrupt all threads except ktaskclnp.
    388          */     
    389382        for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
    390383                thread_t *thr;
    391                 bool  sleeping = false;
     384                bool sleeping = false;
    392385               
    393386                thr = list_get_instance(cur, thread_t, th_link);
    394                 if (thr == t)
    395                         continue;
    396387                       
    397388                spinlock_lock(&thr->lock);
     
    404395                        waitq_interrupt_sleep(thr);
    405396        }
    406        
    407397        spinlock_unlock(&ta->lock);
    408398        interrupts_restore(ipl);
    409399       
    410         if (t)
    411                 thread_ready(t);
    412 
    413400        return 0;
    414401}
     
    426413        printf("taskid name       ctx address    as         cycles     threads "
    427414            "calls  callee\n");
    428         printf("------ ---------- --- ---------- ---------- ---------- ------- "            "------ ------>\n");
     415        printf("------ ---------- --- ---------- ---------- ---------- ------- "
     416            "------ ------>\n");
    429417
    430418        for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head;
     
    465453}
    466454
    467 /** Kernel thread used to cleanup the task after it is killed. */
    468 void ktaskclnp(void *arg)
    469 {
    470         ipl_t ipl;
    471         thread_t *t = NULL, *main_thread;
    472         link_t *cur;
    473         bool again;
    474 
    475         thread_detach(THREAD);
    476 
    477 loop:
    478         ipl = interrupts_disable();
    479         spinlock_lock(&TASK->lock);
    480        
    481         main_thread = TASK->main_thread;
    482        
    483         /*
    484          * Find a thread to join.
    485          */
    486         again = false;
    487         for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
    488                 t = list_get_instance(cur, thread_t, th_link);
    489 
    490                 spinlock_lock(&t->lock);
    491                 if (t == THREAD) {
    492                         spinlock_unlock(&t->lock);
    493                         continue;
    494                 } else if (t == main_thread) {
    495                         spinlock_unlock(&t->lock);
    496                         continue;
    497                 } else if (t->join_type != None) {
    498                         spinlock_unlock(&t->lock);
    499                         again = true;
    500                         continue;
    501                 } else {
    502                         t->join_type = TaskClnp;
    503                         spinlock_unlock(&t->lock);
    504                         again = false;
    505                         break;
    506                 }
    507         }
    508        
    509         spinlock_unlock(&TASK->lock);
    510         interrupts_restore(ipl);
    511        
    512         if (again) {
    513                 /*
    514                  * Other cleanup (e.g. ktaskgc) is in progress.
    515                  */
    516                 scheduler();
    517                 goto loop;
    518         }
    519        
    520         if (t != THREAD) {
    521                 ASSERT(t != main_thread);       /* uninit is joined and detached
    522                                                  * in ktaskgc */
    523                 thread_join(t);
    524                 thread_detach(t);
    525                 goto loop;                      /* go for another thread */
    526         }
    527        
    528         /*
    529          * Now there are no other threads in this task
    530          * and no new threads can be created.
    531          */
    532 
    533         ipc_cleanup();
    534         futex_cleanup();
    535         klog_printf("Cleanup of task %llu completed.", TASK->taskid);
    536 }
    537 
    538 /** Kernel thread used to kill the userspace task when its main thread exits.
    539  *
    540  * This thread waits until the main userspace thread (i.e. uninit) exits.
    541  * When this happens, the task is killed. In the meantime, exited threads
    542  * are garbage collected.
    543  *
    544  * @param arg Pointer to the thread structure of the task's main thread.
    545  */
    546 void ktaskgc(void *arg)
    547 {
    548         thread_t *t = (thread_t *) arg;
    549 loop:   
    550         /*
    551          * Userspace threads cannot detach themselves,
    552          * therefore the thread pointer is guaranteed to be valid.
    553          */
    554         if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) ==
    555             ESYNCH_TIMEOUT) {   /* sleep uninterruptibly here! */
    556                 ipl_t ipl;
    557                 link_t *cur;
    558                 thread_t *thr = NULL;
    559        
    560                 /*
    561                  * The join timed out. Try to do some garbage collection of
    562                  * Undead threads.
    563                  */
    564 more_gc:               
    565                 ipl = interrupts_disable();
    566                 spinlock_lock(&TASK->lock);
    567                
    568                 for (cur = TASK->th_head.next; cur != &TASK->th_head;
    569                     cur = cur->next) {
    570                         thr = list_get_instance(cur, thread_t, th_link);
    571                         spinlock_lock(&thr->lock);
    572                         if (thr != t && thr->state == Undead &&
    573                             thr->join_type == None) {
    574                                 thr->join_type = TaskGC;
    575                                 spinlock_unlock(&thr->lock);
    576                                 break;
    577                         }
    578                         spinlock_unlock(&thr->lock);
    579                         thr = NULL;
    580                 }
    581                 spinlock_unlock(&TASK->lock);
    582                 interrupts_restore(ipl);
    583                
    584                 if (thr) {
    585                         thread_join(thr);
    586                         thread_detach(thr);
    587                         scheduler();
    588                         goto more_gc;
    589                 }
    590                        
    591                 goto loop;
    592         }
    593         thread_detach(t);
    594         task_kill(TASK->taskid);
    595 }
    596 
    597455/** @}
    598456 */
  • kernel/generic/src/proc/thread.c

    rff3a34b rea7890e7  
    6868#include <syscall/copy.h>
    6969#include <errno.h>
     70#include <console/klog.h>
    7071
    7172
     
    7879        "Entering",
    7980        "Exiting",
    80         "Undead"
     81        "JoinMe"
    8182};
    8283
     
    329330
    330331        t->interrupted = false;
    331         t->join_type = None;
    332332        t->detached = false;
    333333        waitq_initialize(&t->join_wq);
     
    343343        thread_create_arch(t); 
    344344
    345         ipl = interrupts_disable();     
    346         spinlock_lock(&task->lock);
    347         if (!task->accept_new_threads) {
    348                 spinlock_unlock(&task->lock);
    349                 slab_free(thread_slab, t);
    350                 interrupts_restore(ipl);
    351                 return NULL;
    352         } else {
    353                 /*
    354                  * Bump the reference count so that this task cannot be
    355                  * destroyed while the new thread is being attached to it.
    356                  */
    357                 task->refcount++;
    358         }
    359         spinlock_unlock(&task->lock);
    360         interrupts_restore(ipl);
    361 
    362345        if (!(flags & THREAD_FLAG_NOATTACH))
    363346                thread_attach(t, task);
     
    374357void thread_destroy(thread_t *t)
    375358{
    376         bool destroy_task = false;
    377 
    378         ASSERT(t->state == Exiting || t->state == Undead);
     359        ASSERT(t->state == Exiting || t->state == JoinMe);
    379360        ASSERT(t->task);
    380361        ASSERT(t->cpu);
     
    396377        spinlock_lock(&t->task->lock);
    397378        list_remove(&t->th_link);
    398         if (--t->task->refcount == 0) {
    399                 t->task->accept_new_threads = false;
    400                 destroy_task = true;
    401         }
    402379        spinlock_unlock(&t->task->lock);       
    403        
    404         if (destroy_task)
     380
     381        /*
     382         * t is guaranteed to be the very last thread of its task.
     383         * It is safe to destroy the task.
     384         */
     385        if (atomic_predec(&t->task->refcount) == 0)
    405386                task_destroy(t->task);
    406387       
     
    432413         * Attach to the current task.
    433414         */
    434         ipl = interrupts_disable();     
     415        ipl = interrupts_disable();
    435416        spinlock_lock(&task->lock);
    436         ASSERT(task->refcount);
     417        atomic_inc(&task->refcount);
     418        atomic_inc(&task->lifecount);
    437419        list_append(&t->th_link, &task->th_head);
    438         if (task->refcount == 1)
    439                 task->main_thread = t;
    440420        spinlock_unlock(&task->lock);
    441421
     
    459439{
    460440        ipl_t ipl;
     441
     442        if (atomic_predec(&TASK->lifecount) == 0) {
     443                /*
     444                 * We are the last thread in the task that still has not exited.
     445                 * With the exception of the moment the task was created, new
     446                 * threads can only be created by threads of the same task.
     447                 * We are safe to perform cleanup.
     448                 */
     449                if (THREAD->flags & THREAD_FLAG_USPACE) {
     450                        ipc_cleanup();
     451                        futex_cleanup();
     452                        klog_printf("Cleanup of task %llu completed.",
     453                            TASK->taskid);
     454                }
     455        }
    461456
    462457restart:
     
    469464                goto restart;
    470465        }
     466       
    471467        THREAD->state = Exiting;
    472468        spinlock_unlock(&THREAD->lock);
     
    525521/** Detach thread.
    526522 *
    527  * Mark the thread as detached, if the thread is already in the Undead state,
     523 * Mark the thread as detached, if the thread is already in the JoinMe state,
    528524 * deallocate its resources.
    529525 *
     
    541537        spinlock_lock(&t->lock);
    542538        ASSERT(!t->detached);
    543         if (t->state == Undead) {
     539        if (t->state == JoinMe) {
    544540                thread_destroy(t);      /* unlocks &t->lock */
    545541                interrupts_restore(ipl);
     
    703699                            sizeof(t->tid));
    704700                        if (rc != 0) {
    705                                 ipl_t ipl;
    706 
    707701                                /*
    708702                                 * We have encountered a failure, but the thread
     
    712706
    713707                                /*
    714                                  * The new thread structure is initialized,
    715                                  * but is still not visible to the system.
     708                                 * The new thread structure is initialized, but
     709                                 * is still not visible to the system.
    716710                                 * We can safely deallocate it.
    717711                                 */
    718712                                slab_free(thread_slab, t);
    719713                                free(kernel_uarg);
    720 
    721                                 /*
    722                                  * Now we need to decrement the task reference
    723                                  * counter. Because we are running within the
    724                                  * same task, thread t is not the last thread
    725                                  * in the task, so it is safe to merely
    726                                  * decrement the counter.
    727                                  */
    728                                 ipl = interrupts_disable();
    729                                 spinlock_lock(&TASK->lock);
    730                                 TASK->refcount--;
    731                                 spinlock_unlock(&TASK->lock);
    732                                 interrupts_restore(ipl);
    733714
    734715                                return (unative_t) rc;
Note: See TracChangeset for help on using the changeset viewer.