Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    r3fcea34 r5a5269d  
    4545#include <arch/cpu.h>
    4646#include <mm/tlb.h>
     47#include <abi/proc/uarg.h>
    4748#include <udebug/udebug.h>
    4849#include <abi/proc/thread.h>
     
    6970/** Thread structure. There is one per thread. */
    7071typedef struct thread {
    71         atomic_refcount_t refcount;
    72 
    7372        link_t rq_link;  /**< Run queue link. */
    7473        link_t wq_link;  /**< Wait queue link. */
     
    7877        odlink_t lthreads;
    7978
    80         /** Tracking variable for thread_wait/thread_wakeup */
    81         atomic_int sleep_state;
    82 
    83         /**
    84          * If true, the thread is terminating.
    85          * It will not go to sleep in interruptible synchronization functions
    86          * and will call thread_exit() before returning to userspace.
    87          */
    88         volatile bool interrupted;
    89 
    90         /** Wait queue in which this thread sleeps. Used for debug printouts. */
    91         _Atomic(waitq_t *) sleep_queue;
    92 
    93         /** Waitq for thread_join_timeout(). */
    94         waitq_t join_wq;
    95 
    96         /** Thread accounting. */
    97         atomic_time_stat_t ucycles;
    98         atomic_time_stat_t kcycles;
    99 
    100         /** Architecture-specific data. */
    101         thread_arch_t arch;
    102 
    103 #ifdef CONFIG_UDEBUG
    104         /**
    105          * If true, the scheduler will print a stack trace
    106          * to the kernel console upon scheduling this thread.
    107          */
    108         atomic_int_fast8_t btrace;
    109 
    110         /** Debugging stuff */
    111         udebug_thread_t udebug;
    112 #endif /* CONFIG_UDEBUG */
    113 
    114         /*
    115          * Immutable fields.
     79        /** Lock protecting thread structure.
    11680         *
    117          * These fields are only modified during initialization, and are not
    118          * changed at any time between initialization and destruction.
    119          * Can be accessed without synchronization in most places.
    120          */
    121 
    122         /** Thread ID. */
    123         thread_id_t tid;
     81         * Protects the whole thread structure except list links above.
     82         */
     83        IRQ_SPINLOCK_DECLARE(lock);
     84
     85        char name[THREAD_NAME_BUFLEN];
    12486
    12587        /** Function implementing the thread. */
     
    12890        void *thread_arg;
    12991
    130         char name[THREAD_NAME_BUFLEN];
    131 
     92        /**
     93         * From here, the stored context is restored
     94         * when the thread is scheduled.
     95         */
     96        context_t saved_context;
     97
     98        /**
     99         * From here, the stored timeout context
     100         * is restored when sleep times out.
     101         */
     102        context_t sleep_timeout_context;
     103
     104        /**
     105         * From here, the stored interruption context
     106         * is restored when sleep is interrupted.
     107         */
     108        context_t sleep_interruption_context;
     109
     110        /** If true, the thread can be interrupted from sleep. */
     111        bool sleep_interruptible;
     112
     113        /**
     114         * If true, and this thread's sleep returns without a wakeup
     115         * (timed out or interrupted), waitq ignores the next wakeup.
     116         * This is necessary for futex to be able to handle those conditions.
     117         */
     118        bool sleep_composable;
     119
     120        /** Wait queue in which this thread sleeps. */
     121        waitq_t *sleep_queue;
     122        /** Timeout used for timeoutable sleeping.  */
     123        timeout_t sleep_timeout;
     124        /** Flag signalling sleep timeout in progress. */
     125        volatile bool timeout_pending;
     126
     127        /**
     128         * True if this thread is executing copy_from_uspace().
     129         * False otherwise.
     130         */
     131        bool in_copy_from_uspace;
     132
     133        /**
     134         * True if this thread is executing copy_to_uspace().
     135         * False otherwise.
     136         */
     137        bool in_copy_to_uspace;
     138
     139        /**
     140         * If true, the thread will not go to sleep at all and will call
     141         * thread_exit() before returning to userspace.
     142         */
     143        bool interrupted;
     144
     145        /** If true, thread_join_timeout() cannot be used on this thread. */
     146        bool detached;
     147        /** Waitq for thread_join_timeout(). */
     148        waitq_t join_wq;
     149        /** Link used in the joiner_head list. */
     150        link_t joiner_link;
     151
     152        fpu_context_t *saved_fpu_context;
     153        bool fpu_context_exists;
     154
     155        /*
     156         * Defined only if thread doesn't run.
     157         * It means that fpu context is in CPU that last time executes this
     158         * thread. This disables migration.
     159         */
     160        bool fpu_context_engaged;
     161
     162        /* The thread will not be migrated if nomigrate is non-zero. */
     163        unsigned int nomigrate;
     164
     165        /** Thread state. */
     166        state_t state;
     167
     168        /** Thread CPU. */
     169        cpu_t *cpu;
     170        /** Containing task. */
     171        task_t *task;
     172        /** Thread is wired to CPU. */
     173        bool wired;
     174        /** Thread was migrated to another CPU and has not run yet. */
     175        bool stolen;
    132176        /** Thread is executed in user space. */
    133177        bool uspace;
    134178
     179        /** Ticks before preemption. */
     180        uint64_t ticks;
     181
     182        /** Thread accounting. */
     183        uint64_t ucycles;
     184        uint64_t kcycles;
     185        /** Last sampled cycle. */
     186        uint64_t last_cycle;
    135187        /** Thread doesn't affect accumulated accounting. */
    136188        bool uncounted;
    137189
    138         /** Containing task. */
    139         task_t *task;
     190        /** Thread's priority. Implemented as index to CPU->rq */
     191        int priority;
     192        /** Thread ID. */
     193        thread_id_t tid;
     194
     195        /** Architecture-specific data. */
     196        thread_arch_t arch;
    140197
    141198        /** Thread's kernel stack. */
    142199        uint8_t *kstack;
    143200
    144         /*
    145          * Local fields.
    146          *
    147          * These fields can be safely accessed from code that _controls execution_
    148          * of this thread. Code controls execution of a thread if either:
    149          *  - it runs in the context of said thread AND interrupts are disabled
    150          *    (interrupts can and will access these fields)
    151          *  - the thread is not running, and the code accessing it can legally
    152          *    add/remove the thread to/from a runqueue, i.e., either:
    153          *    - it is allowed to enqueue thread in a new runqueue
    154          *    - it holds the lock to the runqueue containing the thread
    155          *
    156          */
    157 
    158         /**
    159          * From here, the stored context is restored
    160          * when the thread is scheduled.
    161          */
    162         context_t saved_context;
    163 
    164         // TODO: we only need one of the two bools below
    165 
    166         /**
    167          * True if this thread is executing copy_from_uspace().
    168          * False otherwise.
    169          */
    170         bool in_copy_from_uspace;
    171 
    172         /**
    173          * True if this thread is executing copy_to_uspace().
    174          * False otherwise.
    175          */
    176         bool in_copy_to_uspace;
    177 
    178         /*
    179          * FPU context is a special case. If lazy FPU switching is disabled,
    180          * it acts as a regular local field. However, if lazy switching is enabled,
    181          * the context is synchronized via CPU->fpu_lock
    182          */
    183 #ifdef CONFIG_FPU
    184         fpu_context_t fpu_context;
    185 #endif
    186         bool fpu_context_exists;
    187 
    188         /* The thread will not be migrated if nomigrate is non-zero. */
    189         unsigned int nomigrate;
    190 
    191         /** Thread was migrated to another CPU and has not run yet. */
    192         bool stolen;
    193 
    194         /**
    195          * Thread state (state_t).
    196          * This is atomic because we read it via some commands for debug output,
    197          * otherwise it could just be a regular local.
    198          */
    199         atomic_int_fast32_t state;
    200 
    201         /** Thread CPU. */
    202         _Atomic(cpu_t *) cpu;
    203 
    204         /** Thread's priority. Implemented as index to CPU->rq */
    205         atomic_int_fast32_t priority;
    206 
    207         /** Last sampled cycle. */
    208         uint64_t last_cycle;
     201#ifdef CONFIG_UDEBUG
     202        /**
     203         * If true, the scheduler will print a stack trace
     204         * to the kernel console upon scheduling this thread.
     205         */
     206        bool btrace;
     207
     208        /** Debugging stuff */
     209        udebug_thread_t udebug;
     210#endif /* CONFIG_UDEBUG */
    209211} thread_t;
    210212
     
    217219extern void thread_wire(thread_t *, cpu_t *);
    218220extern void thread_attach(thread_t *, task_t *);
    219 extern void thread_start(thread_t *);
    220 extern void thread_requeue_sleeping(thread_t *);
     221extern void thread_ready(thread_t *);
    221222extern void thread_exit(void) __attribute__((noreturn));
    222223extern void thread_interrupt(thread_t *);
    223 
    224 enum sleep_state {
    225         SLEEP_INITIAL,
    226         SLEEP_ASLEEP,
    227         SLEEP_WOKE,
    228 };
    229 
    230 typedef enum {
    231         THREAD_OK,
    232         THREAD_TERMINATING,
    233 } thread_termination_state_t;
    234 
    235 typedef enum {
    236         THREAD_WAIT_SUCCESS,
    237         THREAD_WAIT_TIMEOUT,
    238 } thread_wait_result_t;
    239 
    240 extern thread_termination_state_t thread_wait_start(void);
    241 extern thread_wait_result_t thread_wait_finish(deadline_t);
    242 extern void thread_wakeup(thread_t *);
    243 
    244 static inline thread_t *thread_ref(thread_t *thread)
    245 {
    246         refcount_up(&thread->refcount);
    247         return thread;
    248 }
    249 
    250 static inline thread_t *thread_try_ref(thread_t *thread)
    251 {
    252         if (refcount_try_up(&thread->refcount))
    253                 return thread;
    254         else
    255                 return NULL;
    256 }
    257 
    258 extern void thread_put(thread_t *);
     224extern bool thread_interrupted(thread_t *);
    259225
    260226#ifndef thread_create_arch
     
    273239extern void thread_usleep(uint32_t);
    274240
    275 extern errno_t thread_join(thread_t *);
     241#define thread_join(t) \
     242        thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
     243
    276244extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
    277245extern void thread_detach(thread_t *);
    278246
    279 extern void thread_yield(void);
    280 
    281247extern void thread_print_list(bool);
     248extern void thread_destroy(thread_t *, bool);
    282249extern thread_t *thread_find_by_id(thread_id_t);
    283250extern size_t thread_count(void);
     
    285252extern thread_t *thread_next(thread_t *);
    286253extern void thread_update_accounting(bool);
    287 extern thread_t *thread_try_get(thread_t *);
     254extern bool thread_exists(thread_t *);
    288255
    289256extern void thread_migration_disable(void);
Note: See TracChangeset for help on using the changeset viewer.