Changes in kernel/generic/include/proc/thread.h [5a5269d:3fcea34] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/thread.h
r5a5269d r3fcea34 45 45 #include <arch/cpu.h> 46 46 #include <mm/tlb.h> 47 #include <abi/proc/uarg.h>48 47 #include <udebug/udebug.h> 49 48 #include <abi/proc/thread.h> … … 70 69 /** Thread structure. There is one per thread. */ 71 70 typedef struct thread { 71 atomic_refcount_t refcount; 72 72 73 link_t rq_link; /**< Run queue link. */ 73 74 link_t wq_link; /**< Wait queue link. */ … … 77 78 odlink_t lthreads; 78 79 79 /** Lock protecting thread structure. 80 /** Tracking variable for thread_wait/thread_wakeup */ 81 atomic_int sleep_state; 82 83 /** 84 * If true, the thread is terminating. 85 * It will not go to sleep in interruptible synchronization functions 86 * and will call thread_exit() before returning to userspace. 87 */ 88 volatile bool interrupted; 89 90 /** Wait queue in which this thread sleeps. Used for debug printouts. */ 91 _Atomic(waitq_t *) sleep_queue; 92 93 /** Waitq for thread_join_timeout(). */ 94 waitq_t join_wq; 95 96 /** Thread accounting. */ 97 atomic_time_stat_t ucycles; 98 atomic_time_stat_t kcycles; 99 100 /** Architecture-specific data. */ 101 thread_arch_t arch; 102 103 #ifdef CONFIG_UDEBUG 104 /** 105 * If true, the scheduler will print a stack trace 106 * to the kernel console upon scheduling this thread. 107 */ 108 atomic_int_fast8_t btrace; 109 110 /** Debugging stuff */ 111 udebug_thread_t udebug; 112 #endif /* CONFIG_UDEBUG */ 113 114 /* 115 * Immutable fields. 80 116 * 81 * Protects the whole thread structure except list links above. 82 */ 83 IRQ_SPINLOCK_DECLARE(lock); 84 85 char name[THREAD_NAME_BUFLEN]; 117 * These fields are only modified during initialization, and are not 118 * changed at any time between initialization and destruction. 119 * Can be accessed without synchronization in most places. 120 */ 121 122 /** Thread ID. */ 123 thread_id_t tid; 86 124 87 125 /** Function implementing the thread. */ … … 90 128 void *thread_arg; 91 129 130 char name[THREAD_NAME_BUFLEN]; 131 132 /** Thread is executed in user space. */ 133 bool uspace; 134 135 /** Thread doesn't affect accumulated accounting. */ 136 bool uncounted; 137 138 /** Containing task. */ 139 task_t *task; 140 141 /** Thread's kernel stack. */ 142 uint8_t *kstack; 143 144 /* 145 * Local fields. 146 * 147 * These fields can be safely accessed from code that _controls execution_ 148 * of this thread. Code controls execution of a thread if either: 149 * - it runs in the context of said thread AND interrupts are disabled 150 * (interrupts can and will access these fields) 151 * - the thread is not running, and the code accessing it can legally 152 * add/remove the thread to/from a runqueue, i.e., either: 153 * - it is allowed to enqueue thread in a new runqueue 154 * - it holds the lock to the runqueue containing the thread 155 * 156 */ 157 92 158 /** 93 159 * From here, the stored context is restored … … 96 162 context_t saved_context; 97 163 98 /** 99 * From here, the stored timeout context 100 * is restored when sleep times out. 101 */ 102 context_t sleep_timeout_context; 103 104 /** 105 * From here, the stored interruption context 106 * is restored when sleep is interrupted. 107 */ 108 context_t sleep_interruption_context; 109 110 /** If true, the thread can be interrupted from sleep. */ 111 bool sleep_interruptible; 112 113 /** 114 * If true, and this thread's sleep returns without a wakeup 115 * (timed out or interrupted), waitq ignores the next wakeup. 116 * This is necessary for futex to be able to handle those conditions. 117 */ 118 bool sleep_composable; 119 120 /** Wait queue in which this thread sleeps. */ 121 waitq_t *sleep_queue; 122 /** Timeout used for timeoutable sleeping. */ 123 timeout_t sleep_timeout; 124 /** Flag signalling sleep timeout in progress. */ 125 volatile bool timeout_pending; 164 // TODO: we only need one of the two bools below 126 165 127 166 /** … … 137 176 bool in_copy_to_uspace; 138 177 139 /** 140 * If true, the thread will not go to sleep at all and will call 141 * thread_exit() before returning to userspace. 142 */ 143 bool interrupted; 144 145 /** If true, thread_join_timeout() cannot be used on this thread. */ 146 bool detached; 147 /** Waitq for thread_join_timeout(). */ 148 waitq_t join_wq; 149 /** Link used in the joiner_head list. */ 150 link_t joiner_link; 151 152 fpu_context_t *saved_fpu_context; 178 /* 179 * FPU context is a special case. If lazy FPU switching is disabled, 180 * it acts as a regular local field. However, if lazy switching is enabled, 181 * the context is synchronized via CPU->fpu_lock 182 */ 183 #ifdef CONFIG_FPU 184 fpu_context_t fpu_context; 185 #endif 153 186 bool fpu_context_exists; 154 155 /*156 * Defined only if thread doesn't run.157 * It means that fpu context is in CPU that last time executes this158 * thread. This disables migration.159 */160 bool fpu_context_engaged;161 187 162 188 /* The thread will not be migrated if nomigrate is non-zero. */ 163 189 unsigned int nomigrate; 164 190 165 /** Thread state. */166 state_t state;167 168 /** Thread CPU. */169 cpu_t *cpu;170 /** Containing task. */171 task_t *task;172 /** Thread is wired to CPU. */173 bool wired;174 191 /** Thread was migrated to another CPU and has not run yet. */ 175 192 bool stolen; 176 /** Thread is executed in user space. */ 177 bool uspace; 178 179 /** Ticks before preemption. */ 180 uint64_t ticks; 181 182 /** Thread accounting. */ 183 uint64_t ucycles; 184 uint64_t kcycles; 193 194 /** 195 * Thread state (state_t). 196 * This is atomic because we read it via some commands for debug output, 197 * otherwise it could just be a regular local. 198 */ 199 atomic_int_fast32_t state; 200 201 /** Thread CPU. */ 202 _Atomic(cpu_t *) cpu; 203 204 /** Thread's priority. Implemented as index to CPU->rq */ 205 atomic_int_fast32_t priority; 206 185 207 /** Last sampled cycle. */ 186 208 uint64_t last_cycle; 187 /** Thread doesn't affect accumulated accounting. */188 bool uncounted;189 190 /** Thread's priority. Implemented as index to CPU->rq */191 int priority;192 /** Thread ID. */193 thread_id_t tid;194 195 /** Architecture-specific data. */196 thread_arch_t arch;197 198 /** Thread's kernel stack. */199 uint8_t *kstack;200 201 #ifdef CONFIG_UDEBUG202 /**203 * If true, the scheduler will print a stack trace204 * to the kernel console upon scheduling this thread.205 */206 bool btrace;207 208 /** Debugging stuff */209 udebug_thread_t udebug;210 #endif /* CONFIG_UDEBUG */211 209 } thread_t; 212 210 … … 219 217 extern void thread_wire(thread_t *, cpu_t *); 220 218 extern void thread_attach(thread_t *, task_t *); 221 extern void thread_ready(thread_t *); 219 extern void thread_start(thread_t *); 220 extern void thread_requeue_sleeping(thread_t *); 222 221 extern void thread_exit(void) __attribute__((noreturn)); 223 222 extern void thread_interrupt(thread_t *); 224 extern bool thread_interrupted(thread_t *); 223 224 enum sleep_state { 225 SLEEP_INITIAL, 226 SLEEP_ASLEEP, 227 SLEEP_WOKE, 228 }; 229 230 typedef enum { 231 THREAD_OK, 232 THREAD_TERMINATING, 233 } thread_termination_state_t; 234 235 typedef enum { 236 THREAD_WAIT_SUCCESS, 237 THREAD_WAIT_TIMEOUT, 238 } thread_wait_result_t; 239 240 extern thread_termination_state_t thread_wait_start(void); 241 extern thread_wait_result_t thread_wait_finish(deadline_t); 242 extern void thread_wakeup(thread_t *); 243 244 static inline thread_t *thread_ref(thread_t *thread) 245 { 246 refcount_up(&thread->refcount); 247 return thread; 248 } 249 250 static inline thread_t *thread_try_ref(thread_t *thread) 251 { 252 if (refcount_try_up(&thread->refcount)) 253 return thread; 254 else 255 return NULL; 256 } 257 258 extern void thread_put(thread_t *); 225 259 226 260 #ifndef thread_create_arch … … 239 273 extern void thread_usleep(uint32_t); 240 274 241 #define thread_join(t) \ 242 thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE) 243 275 extern errno_t thread_join(thread_t *); 244 276 extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int); 245 277 extern void thread_detach(thread_t *); 246 278 279 extern void thread_yield(void); 280 247 281 extern void thread_print_list(bool); 248 extern void thread_destroy(thread_t *, bool);249 282 extern thread_t *thread_find_by_id(thread_id_t); 250 283 extern size_t thread_count(void); … … 252 285 extern thread_t *thread_next(thread_t *); 253 286 extern void thread_update_accounting(bool); 254 extern bool thread_exists(thread_t *);287 extern thread_t *thread_try_get(thread_t *); 255 288 256 289 extern void thread_migration_disable(void);
Note:
See TracChangeset
for help on using the changeset viewer.