Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    r5a5269d rd23712e  
    7070/** Thread structure. There is one per thread. */
    7171typedef struct thread {
     72        atomic_refcount_t refcount;
     73
    7274        link_t rq_link;  /**< Run queue link. */
    7375        link_t wq_link;  /**< Wait queue link. */
     
    7779        odlink_t lthreads;
    7880
    79         /** Lock protecting thread structure.
     81        /** Tracking variable for thread_wait/thread_wakeup */
     82        atomic_int sleep_state;
     83
     84        /**
     85         * If true, the thread is terminating.
     86         * It will not go to sleep in interruptible synchronization functions
     87         * and will call thread_exit() before returning to userspace.
     88         */
     89        volatile bool interrupted;
     90
     91        /** Wait queue in which this thread sleeps. Used for debug printouts. */
     92        _Atomic(waitq_t *) sleep_queue;
     93
     94        /** Waitq for thread_join_timeout(). */
     95        waitq_t join_wq;
     96
     97        /** Thread accounting. */
     98        atomic_time_stat_t ucycles;
     99        atomic_time_stat_t kcycles;
     100
     101        /** Architecture-specific data. */
     102        thread_arch_t arch;
     103
     104#ifdef CONFIG_UDEBUG
     105        /**
     106         * If true, the scheduler will print a stack trace
     107         * to the kernel console upon scheduling this thread.
     108         */
     109        atomic_int_fast8_t btrace;
     110
     111        /** Debugging stuff */
     112        udebug_thread_t udebug;
     113#endif /* CONFIG_UDEBUG */
     114
     115        /*
     116         * Immutable fields.
    80117         *
    81          * Protects the whole thread structure except list links above.
    82          */
    83         IRQ_SPINLOCK_DECLARE(lock);
    84 
    85         char name[THREAD_NAME_BUFLEN];
     118         * These fields are only modified during initialization, and are not
     119         * changed at any time between initialization and destruction.
     120         * Can be accessed without synchronization in most places.
     121         */
     122
     123        /** Thread ID. */
     124        thread_id_t tid;
    86125
    87126        /** Function implementing the thread. */
     
    90129        void *thread_arg;
    91130
     131        char name[THREAD_NAME_BUFLEN];
     132
     133        /** Thread is executed in user space. */
     134        bool uspace;
     135
     136        /** Thread doesn't affect accumulated accounting. */
     137        bool uncounted;
     138
     139        /** Containing task. */
     140        task_t *task;
     141
     142        /** Thread's kernel stack. */
     143        uint8_t *kstack;
     144
     145        /*
     146         * Local fields.
     147         *
     148         * These fields can be safely accessed from code that _controls execution_
     149         * of this thread. Code controls execution of a thread if either:
     150         *  - it runs in the context of said thread AND interrupts are disabled
     151         *    (interrupts can and will access these fields)
     152         *  - the thread is not running, and the code accessing it can legally
     153         *    add/remove the thread to/from a runqueue, i.e., either:
     154         *    - it is allowed to enqueue thread in a new runqueue
     155         *    - it holds the lock to the runqueue containing the thread
     156         *
     157         */
     158
    92159        /**
    93160         * From here, the stored context is restored
     
    96163        context_t saved_context;
    97164
    98         /**
    99          * From here, the stored timeout context
    100          * is restored when sleep times out.
    101          */
    102         context_t sleep_timeout_context;
    103 
    104         /**
    105          * From here, the stored interruption context
    106          * is restored when sleep is interrupted.
    107          */
    108         context_t sleep_interruption_context;
    109 
    110         /** If true, the thread can be interrupted from sleep. */
    111         bool sleep_interruptible;
    112 
    113         /**
    114          * If true, and this thread's sleep returns without a wakeup
    115          * (timed out or interrupted), waitq ignores the next wakeup.
    116          * This is necessary for futex to be able to handle those conditions.
    117          */
    118         bool sleep_composable;
    119 
    120         /** Wait queue in which this thread sleeps. */
    121         waitq_t *sleep_queue;
    122         /** Timeout used for timeoutable sleeping.  */
    123         timeout_t sleep_timeout;
    124         /** Flag signalling sleep timeout in progress. */
    125         volatile bool timeout_pending;
     165        // TODO: we only need one of the two bools below
    126166
    127167        /**
     
    137177        bool in_copy_to_uspace;
    138178
    139         /**
    140          * If true, the thread will not go to sleep at all and will call
    141          * thread_exit() before returning to userspace.
    142          */
    143         bool interrupted;
    144 
    145         /** If true, thread_join_timeout() cannot be used on this thread. */
    146         bool detached;
    147         /** Waitq for thread_join_timeout(). */
    148         waitq_t join_wq;
    149         /** Link used in the joiner_head list. */
    150         link_t joiner_link;
    151 
    152         fpu_context_t *saved_fpu_context;
     179        /*
     180         * FPU context is a special case. If lazy FPU switching is disabled,
     181         * it acts as a regular local field. However, if lazy switching is enabled,
     182         * the context is synchronized via CPU->fpu_lock
     183         */
     184#ifdef CONFIG_FPU
     185        fpu_context_t fpu_context;
     186#endif
    153187        bool fpu_context_exists;
    154 
    155         /*
    156          * Defined only if thread doesn't run.
    157          * It means that fpu context is in CPU that last time executes this
    158          * thread. This disables migration.
    159          */
    160         bool fpu_context_engaged;
    161188
    162189        /* The thread will not be migrated if nomigrate is non-zero. */
    163190        unsigned int nomigrate;
    164191
    165         /** Thread state. */
    166         state_t state;
    167 
    168         /** Thread CPU. */
    169         cpu_t *cpu;
    170         /** Containing task. */
    171         task_t *task;
    172         /** Thread is wired to CPU. */
    173         bool wired;
    174192        /** Thread was migrated to another CPU and has not run yet. */
    175193        bool stolen;
    176         /** Thread is executed in user space. */
    177         bool uspace;
    178 
    179         /** Ticks before preemption. */
    180         uint64_t ticks;
    181 
    182         /** Thread accounting. */
    183         uint64_t ucycles;
    184         uint64_t kcycles;
     194
     195        /**
     196         * Thread state (state_t).
     197         * This is atomic because we read it via some commands for debug output,
     198         * otherwise it could just be a regular local.
     199         */
     200        atomic_int_fast32_t state;
     201
     202        /** Thread CPU. */
     203        _Atomic(cpu_t *) cpu;
     204
     205        /** Thread's priority. Implemented as index to CPU->rq */
     206        atomic_int_fast32_t priority;
     207
    185208        /** Last sampled cycle. */
    186209        uint64_t last_cycle;
    187         /** Thread doesn't affect accumulated accounting. */
    188         bool uncounted;
    189 
    190         /** Thread's priority. Implemented as index to CPU->rq */
    191         int priority;
    192         /** Thread ID. */
    193         thread_id_t tid;
    194 
    195         /** Architecture-specific data. */
    196         thread_arch_t arch;
    197 
    198         /** Thread's kernel stack. */
    199         uint8_t *kstack;
    200 
    201 #ifdef CONFIG_UDEBUG
    202         /**
    203          * If true, the scheduler will print a stack trace
    204          * to the kernel console upon scheduling this thread.
    205          */
    206         bool btrace;
    207 
    208         /** Debugging stuff */
    209         udebug_thread_t udebug;
    210 #endif /* CONFIG_UDEBUG */
    211210} thread_t;
    212211
     
    219218extern void thread_wire(thread_t *, cpu_t *);
    220219extern void thread_attach(thread_t *, task_t *);
    221 extern void thread_ready(thread_t *);
     220extern void thread_start(thread_t *);
     221extern void thread_requeue_sleeping(thread_t *);
    222222extern void thread_exit(void) __attribute__((noreturn));
    223223extern void thread_interrupt(thread_t *);
    224 extern bool thread_interrupted(thread_t *);
     224
     225enum sleep_state {
     226        SLEEP_INITIAL,
     227        SLEEP_ASLEEP,
     228        SLEEP_WOKE,
     229};
     230
     231typedef enum {
     232        THREAD_OK,
     233        THREAD_TERMINATING,
     234} thread_termination_state_t;
     235
     236typedef enum {
     237        THREAD_WAIT_SUCCESS,
     238        THREAD_WAIT_TIMEOUT,
     239} thread_wait_result_t;
     240
     241extern thread_termination_state_t thread_wait_start(void);
     242extern thread_wait_result_t thread_wait_finish(deadline_t);
     243extern void thread_wakeup(thread_t *);
     244
     245static inline thread_t *thread_ref(thread_t *thread)
     246{
     247        refcount_up(&thread->refcount);
     248        return thread;
     249}
     250
     251static inline thread_t *thread_try_ref(thread_t *thread)
     252{
     253        if (refcount_try_up(&thread->refcount))
     254                return thread;
     255        else
     256                return NULL;
     257}
     258
     259extern void thread_put(thread_t *);
    225260
    226261#ifndef thread_create_arch
     
    239274extern void thread_usleep(uint32_t);
    240275
    241 #define thread_join(t) \
    242         thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
    243 
     276extern errno_t thread_join(thread_t *);
    244277extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
    245278extern void thread_detach(thread_t *);
    246279
     280extern void thread_yield(void);
     281
    247282extern void thread_print_list(bool);
    248 extern void thread_destroy(thread_t *, bool);
    249283extern thread_t *thread_find_by_id(thread_id_t);
    250284extern size_t thread_count(void);
     
    252286extern thread_t *thread_next(thread_t *);
    253287extern void thread_update_accounting(bool);
    254 extern bool thread_exists(thread_t *);
     288extern thread_t *thread_try_get(thread_t *);
    255289
    256290extern void thread_migration_disable(void);
Note: See TracChangeset for help on using the changeset viewer.