Changes in kernel/generic/include/proc/thread.h [d23712e:06f81c4] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/thread.h
rd23712e r06f81c4 95 95 waitq_t join_wq; 96 96 97 /** Thread accounting. */ 98 atomic_time_stat_t ucycles; 99 atomic_time_stat_t kcycles; 100 101 /** Architecture-specific data. */ 102 thread_arch_t arch; 103 104 #ifdef CONFIG_UDEBUG 105 /** 106 * If true, the scheduler will print a stack trace 107 * to the kernel console upon scheduling this thread. 108 */ 109 atomic_int_fast8_t btrace; 110 111 /** Debugging stuff */ 112 udebug_thread_t udebug; 113 #endif /* CONFIG_UDEBUG */ 114 115 /* 116 * Immutable fields. 97 /** Lock protecting thread structure. 117 98 * 118 * These fields are only modified during initialization, and are not 119 * changed at any time between initialization and destruction. 120 * Can be accessed without synchronization in most places. 121 */ 122 123 /** Thread ID. */ 124 thread_id_t tid; 99 * Protects the whole thread structure except fields listed above. 100 */ 101 IRQ_SPINLOCK_DECLARE(lock); 102 103 char name[THREAD_NAME_BUFLEN]; 125 104 126 105 /** Function implementing the thread. */ … … 129 108 void *thread_arg; 130 109 131 char name[THREAD_NAME_BUFLEN]; 132 110 /** 111 * From here, the stored context is restored 112 * when the thread is scheduled. 113 */ 114 context_t saved_context; 115 ipl_t saved_ipl; 116 117 /** 118 * True if this thread is executing copy_from_uspace(). 119 * False otherwise. 120 */ 121 bool in_copy_from_uspace; 122 123 /** 124 * True if this thread is executing copy_to_uspace(). 125 * False otherwise. 126 */ 127 bool in_copy_to_uspace; 128 129 #ifdef CONFIG_FPU 130 fpu_context_t fpu_context; 131 #endif 132 bool fpu_context_exists; 133 134 /* The thread will not be migrated if nomigrate is non-zero. */ 135 unsigned int nomigrate; 136 137 /** Thread state. */ 138 state_t state; 139 140 /** Thread CPU. */ 141 cpu_t *cpu; 142 /** Containing task. */ 143 task_t *task; 144 /** Thread was migrated to another CPU and has not run yet. */ 145 bool stolen; 133 146 /** Thread is executed in user space. */ 134 147 bool uspace; 135 148 149 /** Thread accounting. */ 150 uint64_t ucycles; 151 uint64_t kcycles; 152 /** Last sampled cycle. */ 153 uint64_t last_cycle; 136 154 /** Thread doesn't affect accumulated accounting. */ 137 155 bool uncounted; 138 156 139 /** Containing task. */ 140 task_t *task; 157 /** Thread's priority. Implemented as index to CPU->rq */ 158 int priority; 159 /** Thread ID. */ 160 thread_id_t tid; 161 162 /** Architecture-specific data. */ 163 thread_arch_t arch; 141 164 142 165 /** Thread's kernel stack. */ 143 166 uint8_t *kstack; 144 167 145 /* 146 * Local fields. 147 * 148 * These fields can be safely accessed from code that _controls execution_ 149 * of this thread. Code controls execution of a thread if either: 150 * - it runs in the context of said thread AND interrupts are disabled 151 * (interrupts can and will access these fields) 152 * - the thread is not running, and the code accessing it can legally 153 * add/remove the thread to/from a runqueue, i.e., either: 154 * - it is allowed to enqueue thread in a new runqueue 155 * - it holds the lock to the runqueue containing the thread 156 * 157 */ 158 159 /** 160 * From here, the stored context is restored 161 * when the thread is scheduled. 162 */ 163 context_t saved_context; 164 165 // TODO: we only need one of the two bools below 166 167 /** 168 * True if this thread is executing copy_from_uspace(). 169 * False otherwise. 170 */ 171 bool in_copy_from_uspace; 172 173 /** 174 * True if this thread is executing copy_to_uspace(). 175 * False otherwise. 176 */ 177 bool in_copy_to_uspace; 178 179 /* 180 * FPU context is a special case. If lazy FPU switching is disabled, 181 * it acts as a regular local field. However, if lazy switching is enabled, 182 * the context is synchronized via CPU->fpu_lock 183 */ 184 #ifdef CONFIG_FPU 185 fpu_context_t fpu_context; 186 #endif 187 bool fpu_context_exists; 188 189 /* The thread will not be migrated if nomigrate is non-zero. */ 190 unsigned int nomigrate; 191 192 /** Thread was migrated to another CPU and has not run yet. */ 193 bool stolen; 194 195 /** 196 * Thread state (state_t). 197 * This is atomic because we read it via some commands for debug output, 198 * otherwise it could just be a regular local. 199 */ 200 atomic_int_fast32_t state; 201 202 /** Thread CPU. */ 203 _Atomic(cpu_t *) cpu; 204 205 /** Thread's priority. Implemented as index to CPU->rq */ 206 atomic_int_fast32_t priority; 207 208 /** Last sampled cycle. */ 209 uint64_t last_cycle; 168 #ifdef CONFIG_UDEBUG 169 /** 170 * If true, the scheduler will print a stack trace 171 * to the kernel console upon scheduling this thread. 172 */ 173 bool btrace; 174 175 /** Debugging stuff */ 176 udebug_thread_t udebug; 177 #endif /* CONFIG_UDEBUG */ 210 178 } thread_t; 211 179 … … 218 186 extern void thread_wire(thread_t *, cpu_t *); 219 187 extern void thread_attach(thread_t *, task_t *); 220 extern void thread_start(thread_t *); 221 extern void thread_requeue_sleeping(thread_t *); 188 extern void thread_ready(thread_t *); 222 189 extern void thread_exit(void) __attribute__((noreturn)); 223 190 extern void thread_interrupt(thread_t *); 224 225 enum sleep_state {226 SLEEP_INITIAL,227 SLEEP_ASLEEP,228 SLEEP_WOKE,229 };230 191 231 192 typedef enum { … … 276 237 extern errno_t thread_join(thread_t *); 277 238 extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int); 278 extern void thread_detach(thread_t *);279 280 extern void thread_yield(void);281 239 282 240 extern void thread_print_list(bool);
Note:
See TracChangeset
for help on using the changeset viewer.