Changes in kernel/generic/include/proc/thread.h [d23712e:0f4f1b2] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/thread.h
rd23712e r0f4f1b2 95 95 waitq_t join_wq; 96 96 97 /** Thread accounting. */ 98 atomic_time_stat_t ucycles; 99 atomic_time_stat_t kcycles; 100 101 /** Architecture-specific data. */ 102 thread_arch_t arch; 103 104 #ifdef CONFIG_UDEBUG 105 /** 106 * If true, the scheduler will print a stack trace 107 * to the kernel console upon scheduling this thread. 108 */ 109 atomic_int_fast8_t btrace; 110 111 /** Debugging stuff */ 112 udebug_thread_t udebug; 113 #endif /* CONFIG_UDEBUG */ 114 115 /* 116 * Immutable fields. 97 /** Lock protecting thread structure. 117 98 * 118 * These fields are only modified during initialization, and are not 119 * changed at any time between initialization and destruction. 120 * Can be accessed without synchronization in most places. 121 */ 122 123 /** Thread ID. */ 124 thread_id_t tid; 99 * Protects the whole thread structure except fields listed above. 100 */ 101 IRQ_SPINLOCK_DECLARE(lock); 102 103 char name[THREAD_NAME_BUFLEN]; 125 104 126 105 /** Function implementing the thread. */ … … 129 108 void *thread_arg; 130 109 131 char name[THREAD_NAME_BUFLEN]; 132 110 /** 111 * From here, the stored context is restored 112 * when the thread is scheduled. 113 */ 114 context_t saved_context; 115 116 /** 117 * True if this thread is executing copy_from_uspace(). 118 * False otherwise. 119 */ 120 bool in_copy_from_uspace; 121 122 /** 123 * True if this thread is executing copy_to_uspace(). 124 * False otherwise. 125 */ 126 bool in_copy_to_uspace; 127 128 #ifdef CONFIG_FPU 129 fpu_context_t fpu_context; 130 #endif 131 bool fpu_context_exists; 132 133 /* The thread will not be migrated if nomigrate is non-zero. */ 134 unsigned int nomigrate; 135 136 /** Thread state. */ 137 state_t state; 138 139 /** Thread CPU. */ 140 cpu_t *cpu; 141 /** Containing task. */ 142 task_t *task; 143 /** Thread was migrated to another CPU and has not run yet. */ 144 bool stolen; 133 145 /** Thread is executed in user space. */ 134 146 bool uspace; 135 147 148 /** Thread accounting. */ 149 uint64_t ucycles; 150 uint64_t kcycles; 151 /** Last sampled cycle. */ 152 uint64_t last_cycle; 136 153 /** Thread doesn't affect accumulated accounting. */ 137 154 bool uncounted; 138 155 139 /** Containing task. */ 140 task_t *task; 156 /** Thread's priority. Implemented as index to CPU->rq */ 157 int priority; 158 /** Thread ID. */ 159 thread_id_t tid; 160 161 /** Architecture-specific data. */ 162 thread_arch_t arch; 141 163 142 164 /** Thread's kernel stack. */ 143 165 uint8_t *kstack; 144 166 145 /* 146 * Local fields. 147 * 148 * These fields can be safely accessed from code that _controls execution_ 149 * of this thread. Code controls execution of a thread if either: 150 * - it runs in the context of said thread AND interrupts are disabled 151 * (interrupts can and will access these fields) 152 * - the thread is not running, and the code accessing it can legally 153 * add/remove the thread to/from a runqueue, i.e., either: 154 * - it is allowed to enqueue thread in a new runqueue 155 * - it holds the lock to the runqueue containing the thread 156 * 157 */ 158 159 /** 160 * From here, the stored context is restored 161 * when the thread is scheduled. 162 */ 163 context_t saved_context; 164 165 // TODO: we only need one of the two bools below 166 167 /** 168 * True if this thread is executing copy_from_uspace(). 169 * False otherwise. 170 */ 171 bool in_copy_from_uspace; 172 173 /** 174 * True if this thread is executing copy_to_uspace(). 175 * False otherwise. 176 */ 177 bool in_copy_to_uspace; 178 179 /* 180 * FPU context is a special case. If lazy FPU switching is disabled, 181 * it acts as a regular local field. However, if lazy switching is enabled, 182 * the context is synchronized via CPU->fpu_lock 183 */ 184 #ifdef CONFIG_FPU 185 fpu_context_t fpu_context; 186 #endif 187 bool fpu_context_exists; 188 189 /* The thread will not be migrated if nomigrate is non-zero. */ 190 unsigned int nomigrate; 191 192 /** Thread was migrated to another CPU and has not run yet. */ 193 bool stolen; 194 195 /** 196 * Thread state (state_t). 197 * This is atomic because we read it via some commands for debug output, 198 * otherwise it could just be a regular local. 199 */ 200 atomic_int_fast32_t state; 201 202 /** Thread CPU. */ 203 _Atomic(cpu_t *) cpu; 204 205 /** Thread's priority. Implemented as index to CPU->rq */ 206 atomic_int_fast32_t priority; 207 208 /** Last sampled cycle. */ 209 uint64_t last_cycle; 167 #ifdef CONFIG_UDEBUG 168 /** 169 * If true, the scheduler will print a stack trace 170 * to the kernel console upon scheduling this thread. 171 */ 172 bool btrace; 173 174 /** Debugging stuff */ 175 udebug_thread_t udebug; 176 #endif /* CONFIG_UDEBUG */ 210 177 } thread_t; 211 178 … … 219 186 extern void thread_attach(thread_t *, task_t *); 220 187 extern void thread_start(thread_t *); 221 extern void thread_re queue_sleeping(thread_t *);188 extern void thread_ready(thread_t *); 222 189 extern void thread_exit(void) __attribute__((noreturn)); 223 190 extern void thread_interrupt(thread_t *);
Note:
See TracChangeset
for help on using the changeset viewer.