Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    rd23712e r5a5269d  
    7070/** Thread structure. There is one per thread. */
    7171typedef struct thread {
    72         atomic_refcount_t refcount;
    73 
    7472        link_t rq_link;  /**< Run queue link. */
    7573        link_t wq_link;  /**< Wait queue link. */
     
    7977        odlink_t lthreads;
    8078
    81         /** Tracking variable for thread_wait/thread_wakeup */
    82         atomic_int sleep_state;
    83 
    84         /**
    85          * If true, the thread is terminating.
    86          * It will not go to sleep in interruptible synchronization functions
    87          * and will call thread_exit() before returning to userspace.
    88          */
    89         volatile bool interrupted;
    90 
    91         /** Wait queue in which this thread sleeps. Used for debug printouts. */
    92         _Atomic(waitq_t *) sleep_queue;
    93 
    94         /** Waitq for thread_join_timeout(). */
    95         waitq_t join_wq;
    96 
    97         /** Thread accounting. */
    98         atomic_time_stat_t ucycles;
    99         atomic_time_stat_t kcycles;
    100 
    101         /** Architecture-specific data. */
    102         thread_arch_t arch;
    103 
    104 #ifdef CONFIG_UDEBUG
    105         /**
    106          * If true, the scheduler will print a stack trace
    107          * to the kernel console upon scheduling this thread.
    108          */
    109         atomic_int_fast8_t btrace;
    110 
    111         /** Debugging stuff */
    112         udebug_thread_t udebug;
    113 #endif /* CONFIG_UDEBUG */
    114 
    115         /*
    116          * Immutable fields.
     79        /** Lock protecting thread structure.
    11780         *
    118          * These fields are only modified during initialization, and are not
    119          * changed at any time between initialization and destruction.
    120          * Can be accessed without synchronization in most places.
    121          */
    122 
    123         /** Thread ID. */
    124         thread_id_t tid;
     81         * Protects the whole thread structure except list links above.
     82         */
     83        IRQ_SPINLOCK_DECLARE(lock);
     84
     85        char name[THREAD_NAME_BUFLEN];
    12586
    12687        /** Function implementing the thread. */
     
    12990        void *thread_arg;
    13091
    131         char name[THREAD_NAME_BUFLEN];
    132 
     92        /**
     93         * From here, the stored context is restored
     94         * when the thread is scheduled.
     95         */
     96        context_t saved_context;
     97
     98        /**
     99         * From here, the stored timeout context
     100         * is restored when sleep times out.
     101         */
     102        context_t sleep_timeout_context;
     103
     104        /**
     105         * From here, the stored interruption context
     106         * is restored when sleep is interrupted.
     107         */
     108        context_t sleep_interruption_context;
     109
     110        /** If true, the thread can be interrupted from sleep. */
     111        bool sleep_interruptible;
     112
     113        /**
     114         * If true, and this thread's sleep returns without a wakeup
     115         * (timed out or interrupted), waitq ignores the next wakeup.
     116         * This is necessary for futex to be able to handle those conditions.
     117         */
     118        bool sleep_composable;
     119
     120        /** Wait queue in which this thread sleeps. */
     121        waitq_t *sleep_queue;
     122        /** Timeout used for timeoutable sleeping.  */
     123        timeout_t sleep_timeout;
     124        /** Flag signalling sleep timeout in progress. */
     125        volatile bool timeout_pending;
     126
     127        /**
     128         * True if this thread is executing copy_from_uspace().
     129         * False otherwise.
     130         */
     131        bool in_copy_from_uspace;
     132
     133        /**
     134         * True if this thread is executing copy_to_uspace().
     135         * False otherwise.
     136         */
     137        bool in_copy_to_uspace;
     138
     139        /**
     140         * If true, the thread will not go to sleep at all and will call
     141         * thread_exit() before returning to userspace.
     142         */
     143        bool interrupted;
     144
     145        /** If true, thread_join_timeout() cannot be used on this thread. */
     146        bool detached;
     147        /** Waitq for thread_join_timeout(). */
     148        waitq_t join_wq;
     149        /** Link used in the joiner_head list. */
     150        link_t joiner_link;
     151
     152        fpu_context_t *saved_fpu_context;
     153        bool fpu_context_exists;
     154
     155        /*
     156         * Defined only if thread doesn't run.
     157         * It means that fpu context is in CPU that last time executes this
     158         * thread. This disables migration.
     159         */
     160        bool fpu_context_engaged;
     161
     162        /* The thread will not be migrated if nomigrate is non-zero. */
     163        unsigned int nomigrate;
     164
     165        /** Thread state. */
     166        state_t state;
     167
     168        /** Thread CPU. */
     169        cpu_t *cpu;
     170        /** Containing task. */
     171        task_t *task;
     172        /** Thread is wired to CPU. */
     173        bool wired;
     174        /** Thread was migrated to another CPU and has not run yet. */
     175        bool stolen;
    133176        /** Thread is executed in user space. */
    134177        bool uspace;
    135178
     179        /** Ticks before preemption. */
     180        uint64_t ticks;
     181
     182        /** Thread accounting. */
     183        uint64_t ucycles;
     184        uint64_t kcycles;
     185        /** Last sampled cycle. */
     186        uint64_t last_cycle;
    136187        /** Thread doesn't affect accumulated accounting. */
    137188        bool uncounted;
    138189
    139         /** Containing task. */
    140         task_t *task;
     190        /** Thread's priority. Implemented as index to CPU->rq */
     191        int priority;
     192        /** Thread ID. */
     193        thread_id_t tid;
     194
     195        /** Architecture-specific data. */
     196        thread_arch_t arch;
    141197
    142198        /** Thread's kernel stack. */
    143199        uint8_t *kstack;
    144200
    145         /*
    146          * Local fields.
    147          *
    148          * These fields can be safely accessed from code that _controls execution_
    149          * of this thread. Code controls execution of a thread if either:
    150          *  - it runs in the context of said thread AND interrupts are disabled
    151          *    (interrupts can and will access these fields)
    152          *  - the thread is not running, and the code accessing it can legally
    153          *    add/remove the thread to/from a runqueue, i.e., either:
    154          *    - it is allowed to enqueue thread in a new runqueue
    155          *    - it holds the lock to the runqueue containing the thread
    156          *
    157          */
    158 
    159         /**
    160          * From here, the stored context is restored
    161          * when the thread is scheduled.
    162          */
    163         context_t saved_context;
    164 
    165         // TODO: we only need one of the two bools below
    166 
    167         /**
    168          * True if this thread is executing copy_from_uspace().
    169          * False otherwise.
    170          */
    171         bool in_copy_from_uspace;
    172 
    173         /**
    174          * True if this thread is executing copy_to_uspace().
    175          * False otherwise.
    176          */
    177         bool in_copy_to_uspace;
    178 
    179         /*
    180          * FPU context is a special case. If lazy FPU switching is disabled,
    181          * it acts as a regular local field. However, if lazy switching is enabled,
    182          * the context is synchronized via CPU->fpu_lock
    183          */
    184 #ifdef CONFIG_FPU
    185         fpu_context_t fpu_context;
    186 #endif
    187         bool fpu_context_exists;
    188 
    189         /* The thread will not be migrated if nomigrate is non-zero. */
    190         unsigned int nomigrate;
    191 
    192         /** Thread was migrated to another CPU and has not run yet. */
    193         bool stolen;
    194 
    195         /**
    196          * Thread state (state_t).
    197          * This is atomic because we read it via some commands for debug output,
    198          * otherwise it could just be a regular local.
    199          */
    200         atomic_int_fast32_t state;
    201 
    202         /** Thread CPU. */
    203         _Atomic(cpu_t *) cpu;
    204 
    205         /** Thread's priority. Implemented as index to CPU->rq */
    206         atomic_int_fast32_t priority;
    207 
    208         /** Last sampled cycle. */
    209         uint64_t last_cycle;
     201#ifdef CONFIG_UDEBUG
     202        /**
     203         * If true, the scheduler will print a stack trace
     204         * to the kernel console upon scheduling this thread.
     205         */
     206        bool btrace;
     207
     208        /** Debugging stuff */
     209        udebug_thread_t udebug;
     210#endif /* CONFIG_UDEBUG */
    210211} thread_t;
    211212
     
    218219extern void thread_wire(thread_t *, cpu_t *);
    219220extern void thread_attach(thread_t *, task_t *);
    220 extern void thread_start(thread_t *);
    221 extern void thread_requeue_sleeping(thread_t *);
     221extern void thread_ready(thread_t *);
    222222extern void thread_exit(void) __attribute__((noreturn));
    223223extern void thread_interrupt(thread_t *);
    224 
    225 enum sleep_state {
    226         SLEEP_INITIAL,
    227         SLEEP_ASLEEP,
    228         SLEEP_WOKE,
    229 };
    230 
    231 typedef enum {
    232         THREAD_OK,
    233         THREAD_TERMINATING,
    234 } thread_termination_state_t;
    235 
    236 typedef enum {
    237         THREAD_WAIT_SUCCESS,
    238         THREAD_WAIT_TIMEOUT,
    239 } thread_wait_result_t;
    240 
    241 extern thread_termination_state_t thread_wait_start(void);
    242 extern thread_wait_result_t thread_wait_finish(deadline_t);
    243 extern void thread_wakeup(thread_t *);
    244 
    245 static inline thread_t *thread_ref(thread_t *thread)
    246 {
    247         refcount_up(&thread->refcount);
    248         return thread;
    249 }
    250 
    251 static inline thread_t *thread_try_ref(thread_t *thread)
    252 {
    253         if (refcount_try_up(&thread->refcount))
    254                 return thread;
    255         else
    256                 return NULL;
    257 }
    258 
    259 extern void thread_put(thread_t *);
     224extern bool thread_interrupted(thread_t *);
    260225
    261226#ifndef thread_create_arch
     
    274239extern void thread_usleep(uint32_t);
    275240
    276 extern errno_t thread_join(thread_t *);
     241#define thread_join(t) \
     242        thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
     243
    277244extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int);
    278245extern void thread_detach(thread_t *);
    279246
    280 extern void thread_yield(void);
    281 
    282247extern void thread_print_list(bool);
     248extern void thread_destroy(thread_t *, bool);
    283249extern thread_t *thread_find_by_id(thread_id_t);
    284250extern size_t thread_count(void);
     
    286252extern thread_t *thread_next(thread_t *);
    287253extern void thread_update_accounting(bool);
    288 extern thread_t *thread_try_get(thread_t *);
     254extern bool thread_exists(thread_t *);
    289255
    290256extern void thread_migration_disable(void);
Note: See TracChangeset for help on using the changeset viewer.