Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    r0f4f1b2 rd23712e  
    9595        waitq_t join_wq;
    9696
    97         /** Lock protecting thread structure.
     97        /** Thread accounting. */
     98        atomic_time_stat_t ucycles;
     99        atomic_time_stat_t kcycles;
     100
     101        /** Architecture-specific data. */
     102        thread_arch_t arch;
     103
     104#ifdef CONFIG_UDEBUG
     105        /**
     106         * If true, the scheduler will print a stack trace
     107         * to the kernel console upon scheduling this thread.
     108         */
     109        atomic_int_fast8_t btrace;
     110
     111        /** Debugging stuff */
     112        udebug_thread_t udebug;
     113#endif /* CONFIG_UDEBUG */
     114
     115        /*
     116         * Immutable fields.
    98117         *
    99          * Protects the whole thread structure except fields listed above.
    100          */
    101         IRQ_SPINLOCK_DECLARE(lock);
    102 
    103         char name[THREAD_NAME_BUFLEN];
     118         * These fields are only modified during initialization, and are not
     119         * changed at any time between initialization and destruction.
     120         * Can be accessed without synchronization in most places.
     121         */
     122
     123        /** Thread ID. */
     124        thread_id_t tid;
    104125
    105126        /** Function implementing the thread. */
     
    108129        void *thread_arg;
    109130
     131        char name[THREAD_NAME_BUFLEN];
     132
     133        /** Thread is executed in user space. */
     134        bool uspace;
     135
     136        /** Thread doesn't affect accumulated accounting. */
     137        bool uncounted;
     138
     139        /** Containing task. */
     140        task_t *task;
     141
     142        /** Thread's kernel stack. */
     143        uint8_t *kstack;
     144
     145        /*
     146         * Local fields.
     147         *
     148         * These fields can be safely accessed from code that _controls execution_
     149         * of this thread. Code controls execution of a thread if either:
     150         *  - it runs in the context of said thread AND interrupts are disabled
     151         *    (interrupts can and will access these fields)
     152         *  - the thread is not running, and the code accessing it can legally
     153         *    add/remove the thread to/from a runqueue, i.e., either:
     154         *    - it is allowed to enqueue thread in a new runqueue
     155         *    - it holds the lock to the runqueue containing the thread
     156         *
     157         */
     158
    110159        /**
    111160         * From here, the stored context is restored
     
    114163        context_t saved_context;
    115164
     165        // TODO: we only need one of the two bools below
     166
    116167        /**
    117168         * True if this thread is executing copy_from_uspace().
     
    126177        bool in_copy_to_uspace;
    127178
     179        /*
     180         * FPU context is a special case. If lazy FPU switching is disabled,
     181         * it acts as a regular local field. However, if lazy switching is enabled,
     182         * the context is synchronized via CPU->fpu_lock
     183         */
    128184#ifdef CONFIG_FPU
    129185        fpu_context_t fpu_context;
     
    134190        unsigned int nomigrate;
    135191
    136         /** Thread state. */
    137         state_t state;
    138 
    139         /** Thread CPU. */
    140         cpu_t *cpu;
    141         /** Containing task. */
    142         task_t *task;
    143192        /** Thread was migrated to another CPU and has not run yet. */
    144193        bool stolen;
    145         /** Thread is executed in user space. */
    146         bool uspace;
    147 
    148         /** Thread accounting. */
    149         uint64_t ucycles;
    150         uint64_t kcycles;
     194
     195        /**
     196         * Thread state (state_t).
     197         * This is atomic because we read it via some commands for debug output,
     198         * otherwise it could just be a regular local.
     199         */
     200        atomic_int_fast32_t state;
     201
     202        /** Thread CPU. */
     203        _Atomic(cpu_t *) cpu;
     204
     205        /** Thread's priority. Implemented as index to CPU->rq */
     206        atomic_int_fast32_t priority;
     207
    151208        /** Last sampled cycle. */
    152209        uint64_t last_cycle;
    153         /** Thread doesn't affect accumulated accounting. */
    154         bool uncounted;
    155 
    156         /** Thread's priority. Implemented as index to CPU->rq */
    157         int priority;
    158         /** Thread ID. */
    159         thread_id_t tid;
    160 
    161         /** Architecture-specific data. */
    162         thread_arch_t arch;
    163 
    164         /** Thread's kernel stack. */
    165         uint8_t *kstack;
    166 
    167 #ifdef CONFIG_UDEBUG
    168         /**
    169          * If true, the scheduler will print a stack trace
    170          * to the kernel console upon scheduling this thread.
    171          */
    172         bool btrace;
    173 
    174         /** Debugging stuff */
    175         udebug_thread_t udebug;
    176 #endif /* CONFIG_UDEBUG */
    177210} thread_t;
    178211
     
    186219extern void thread_attach(thread_t *, task_t *);
    187220extern void thread_start(thread_t *);
    188 extern void thread_ready(thread_t *);
     221extern void thread_requeue_sleeping(thread_t *);
    189222extern void thread_exit(void) __attribute__((noreturn));
    190223extern void thread_interrupt(thread_t *);
Note: See TracChangeset for help on using the changeset viewer.