Changeset 7509ddc in mainline


Ignore:
Timestamp:
2006-06-04T21:54:49Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
34dcd3f
Parents:
2cb5e64
Message:

Framework for task_kill().
Some pieces (e.g. implementation of ktask_cleanup() kernel thread and
task_destroy() function) are missing.
Changed locking order for task lock, threads_lock and thread lock from
threads_lock, thread lock, task lock to task lock, threads_lock, thread lock.

Location:
generic
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • generic/include/proc/task.h

    r2cb5e64 r7509ddc  
    4141/** Task structure. */
    4242struct task {
     43        /** Task lock.
     44         *
     45         * Must be acquired before threads_lock and thread lock of any of its threads.
     46         */
    4347        SPINLOCK_DECLARE(lock);
     48       
    4449        char *name;
    4550        link_t th_head;         /**< List of threads contained in this task. */
    4651        as_t *as;               /**< Address space. */
    4752        task_id_t taskid;       /**< Unique identity of task */
     53
     54        /** If this is true, new threads can become part of the task. */
     55        bool accept_new_threads;
     56
     57        count_t refcount;       /**< Number of references (i.e. threads). */
    4858
    4959        cap_t capabilities;     /**< Task capabilities. */
     
    7181extern void task_init(void);
    7282extern task_t *task_create(as_t *as, char *name);
     83extern void task_destroy(task_t *t);
    7384extern task_t *task_run_program(void *program_addr, char *name);
    7485extern task_t *task_find_by_id(task_id_t id);
     86extern int task_kill(task_id_t id);
     87
    7588
    7689#ifndef task_create_arch
  • generic/include/proc/thread.h

    r2cb5e64 r7509ddc  
    7373         *
    7474         * Protects the whole thread structure except list links above.
    75          * Must be acquired before T.lock for each T of type task_t.
    76          *
    7775         */
    7876        SPINLOCK_DECLARE(lock);
     
    9997        /** True if this thread is executing copy_to_uspace(). False otherwise. */
    10098        bool in_copy_to_uspace;
    101 
     99       
     100        /**
     101         * If true, the thread will not go to sleep at all and will
     102         * call thread_exit() before returning to userspace.
     103         */
     104        bool interrupted;                       
     105       
    102106        bool detached;                          /**< If true, thread_join_timeout() cannot be used on this thread. */
    103107        waitq_t join_wq;                        /**< Waitq for thread_join_timeout(). */
  • generic/src/proc/task.c

    r2cb5e64 r7509ddc  
    4848#include <print.h>
    4949#include <elf.h>
     50#include <errno.h>
    5051#include <syscall/copy.h>
    5152
     
    5758btree_t tasks_btree;
    5859static task_id_t task_counter = 0;
     60
     61static void ktask_cleanup(void *);
    5962
    6063/** Initialize tasks
     
    9598        ta->name = name;
    9699
     100        ta->refcount = 0;
     101
    97102        ta->capabilities = 0;
     103        ta->accept_new_threads = true;
    98104       
    99105        ipc_answerbox_init(&ta->answerbox);
     
    128134}
    129135
     136/** Destroy task.
     137 *
     138 * @param t Task to be destroyed.
     139 */
     140void task_destroy(task_t *t)
     141{
     142}
     143
    130144/** Create new task with 1 thread and run it
    131145 *
     
    206220       
    207221        return (task_t *) btree_search(&tasks_btree, (btree_key_t) id, &leaf);
     222}
     223
     224/** Kill task.
     225 *
     226 * @param id ID of the task to be killed.
     227 *
     228 * @return 0 on success or an error code from errno.h
     229 */
     230int task_kill(task_id_t id)
     231{
     232        ipl_t ipl;
     233        task_t *ta;
     234        thread_t *t;
     235        link_t *cur;
     236       
     237        ipl = interrupts_disable();
     238        spinlock_lock(&tasks_lock);
     239
     240        if (!(ta = task_find_by_id(id))) {
     241                spinlock_unlock(&tasks_lock);
     242                interrupts_restore(ipl);
     243                return ENOENT;
     244        }
     245       
     246        spinlock_lock(&ta->lock);
     247        ta->refcount++;
     248        spinlock_unlock(&ta->lock);
     249       
     250        t = thread_create(ktask_cleanup, NULL, ta, 0, "ktask_cleanup");
     251       
     252        spinlock_lock(&ta->lock);
     253        ta->refcount--;
     254       
     255        for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) {
     256                thread_t *thr;
     257                bool  sleeping = false;
     258               
     259                thr = list_get_instance(cur, thread_t, th_link);
     260                if (thr == t)
     261                        continue;
     262                       
     263                spinlock_lock(&thr->lock);
     264                thr->interrupted = true;
     265                if (thr->state == Sleeping)
     266                        sleeping = true;
     267                spinlock_unlock(&thr->lock);
     268               
     269                if (sleeping)
     270                        waitq_interrupt_sleep(thr);
     271        }
     272       
     273        thread_ready(t);
     274       
     275        return 0;
    208276}
    209277
     
    244312        interrupts_restore(ipl);
    245313}
     314
     315/** Kernel thread used to cleanup the task. */
     316void ktask_cleanup(void *arg)
     317{
     318        /*
     319         * TODO:
     320         * Wait until it is save to cleanup the task (i.e. all other threads exit)
     321         * and do the cleanup (e.g. close IPC communication and release used futexes).
     322         * When this thread exits, the task refcount drops to zero and the task structure is
     323         * cleaned.
     324         */
     325}
  • generic/src/proc/thread.c

    r2cb5e64 r7509ddc  
    234234void thread_destroy(thread_t *t)
    235235{
     236        bool destroy_task = false;     
     237
    236238        ASSERT(t->state == Exiting);
    237239        ASSERT(t->task);
     
    242244                t->cpu->fpu_owner=NULL;
    243245        spinlock_unlock(&t->cpu->lock);
     246
     247        spinlock_unlock(&t->lock);
     248
     249        spinlock_lock(&threads_lock);
     250        btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL);
     251        spinlock_unlock(&threads_lock);
    244252
    245253        /*
     
    248256        spinlock_lock(&t->task->lock);
    249257        list_remove(&t->th_link);
    250         spinlock_unlock(&t->task->lock);
    251        
    252         spinlock_unlock(&t->lock);
    253        
    254         spinlock_lock(&threads_lock);
    255         btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL);
    256         spinlock_unlock(&threads_lock);
     258        if (--t->task->refcount == 0) {
     259                t->task->accept_new_threads = false;
     260                destroy_task = true;
     261        }
     262        spinlock_unlock(&t->task->lock);       
     263       
     264        if (destroy_task)
     265                task_destroy(t->task);
    257266       
    258267        slab_free(thread_slab, t);
     
    320329        t->in_copy_from_uspace = false;
    321330        t->in_copy_to_uspace = false;
    322        
     331
     332        t->interrupted = false;
    323333        t->detached = false;
    324334        waitq_initialize(&t->join_wq);
     
    330340        t->fpu_context_exists = 0;
    331341        t->fpu_context_engaged = 0;
    332        
    333         /*
    334          * Register this thread in the system-wide list.
    335          */
    336         ipl = interrupts_disable();
    337         spinlock_lock(&threads_lock);
    338         btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);
    339         spinlock_unlock(&threads_lock);
    340342       
    341343        /*
     
    343345         */
    344346        spinlock_lock(&task->lock);
     347        if (!task->accept_new_threads) {
     348                spinlock_unlock(&task->lock);
     349                slab_free(thread_slab, t);
     350                return NULL;
     351        }
    345352        list_append(&t->th_link, &task->th_head);
     353        task->refcount++;
    346354        spinlock_unlock(&task->lock);
     355
     356        /*
     357         * Register this thread in the system-wide list.
     358         */
     359        ipl = interrupts_disable();
     360        spinlock_lock(&threads_lock);
     361        btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);
     362        spinlock_unlock(&threads_lock);
    347363       
    348364        interrupts_restore(ipl);
  • generic/src/synch/waitq.c

    r2cb5e64 r7509ddc  
    312312        spinlock_lock(&THREAD->lock);
    313313
     314        if (THREAD->interrupted) {
     315                spinlock_unlock(&THREAD->lock);
     316                spinlock_unlock(&wq->lock);
     317                return ESYNCH_INTERRUPTED;
     318        }
     319
    314320        if (flags & SYNCH_FLAGS_INTERRUPTIBLE) {
    315321                /*
  • generic/src/syscall/syscall.c

    r2cb5e64 r7509ddc  
    9191                         __native a4, __native id)
    9292{
     93        __native rc;
     94        ipl_t ipl;
     95        bool exit = false;
     96
    9397        if (id < SYSCALL_END)
    94                 return syscall_table[id](a1,a2,a3,a4);
     98                rc = syscall_table[id](a1,a2,a3,a4);
    9599        else
    96100                panic("Undefined syscall %d", id);
     101               
     102        ipl = interrupts_disable();
     103        spinlock_lock(&THREAD->lock);
     104        if (THREAD->interrupted)
     105                exit = true;
     106        spinlock_unlock(&THREAD->lock);
     107        interrupts_restore(ipl);
     108       
     109        if (exit)
     110                thread_exit();
     111       
     112        return rc;
    97113}
    98114
Note: See TracChangeset for help on using the changeset viewer.