Changes in kernel/generic/src/proc/thread.c [c0699467:f22dc820] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rc0699467 rf22dc820 173 173 #endif /* CONFIG_FPU */ 174 174 175 /* 176 * Allocate the kernel stack from the low-memory to prevent an infinite 177 * nesting of TLB-misses when accessing the stack from the part of the 178 * TLB-miss handler written in C. 179 * 180 * Note that low-memory is safe to be used for the stack as it will be 181 * covered by the kernel identity mapping, which guarantees not to 182 * nest TLB-misses infinitely (either via some hardware mechanism or 183 * by the construciton of the assembly-language part of the TLB-miss 184 * handler). 185 * 186 * This restriction can be lifted once each architecture provides 187 * a similar guarantee, for example by locking the kernel stack 188 * in the TLB whenever it is allocated from the high-memory and the 189 * thread is being scheduled to run. 190 */ 191 kmflags |= FRAME_LOWMEM; 192 kmflags &= ~FRAME_HIGHMEM; 193 175 194 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 176 195 if (!thread->kstack) { … … 217 236 218 237 atomic_set(&nrdy, 0); 219 thread_slab = slab_cache_create("thread_ slab", sizeof(thread_t), 0,238 thread_slab = slab_cache_create("thread_t", sizeof(thread_t), 0, 220 239 thr_constructor, thr_destructor, 0); 221 240 222 241 #ifdef CONFIG_FPU 223 fpu_context_slab = slab_cache_create("fpu_ slab", sizeof(fpu_context_t),224 FPU_CONTEXT_ALIGN, NULL, NULL, 0);242 fpu_context_slab = slab_cache_create("fpu_context_t", 243 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 225 244 #endif 226 245 … … 228 247 } 229 248 249 /** Wire thread to the given CPU 250 * 251 * @param cpu CPU to wire the thread to. 252 * 253 */ 254 void thread_wire(thread_t *thread, cpu_t *cpu) 255 { 256 irq_spinlock_lock(&thread->lock, true); 257 thread->cpu = cpu; 258 thread->wired = true; 259 irq_spinlock_unlock(&thread->lock, true); 260 } 261 230 262 /** Make thread ready 231 263 * … … 241 273 ASSERT(thread->state != Ready); 242 274 243 int i = (thread->priority < RQ_COUNT - 1) 244 ?++thread->priority : thread->priority;245 246 cpu_t *cpu = CPU;247 if (thread-> flags & THREAD_FLAG_WIRED) {275 int i = (thread->priority < RQ_COUNT - 1) ? 276 ++thread->priority : thread->priority; 277 278 cpu_t *cpu; 279 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 248 280 ASSERT(thread->cpu != NULL); 249 281 cpu = thread->cpu; 250 } 282 } else 283 cpu = CPU; 284 251 285 thread->state = Ready; 252 286 … … 279 313 * @param flags Thread flags. 280 314 * @param name Symbolic name (a copy is made). 281 * @param uncounted Thread's accounting doesn't affect accumulated task282 * accounting.283 315 * 284 316 * @return New thread's structure on success, NULL on failure. … … 286 318 */ 287 319 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 288 unsigned int flags, const char *name, bool uncounted)320 thread_flags_t flags, const char *name) 289 321 { 290 322 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); … … 316 348 thread->ucycles = 0; 317 349 thread->kcycles = 0; 318 thread->uncounted = uncounted; 350 thread->uncounted = 351 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 319 352 thread->priority = -1; /* Start in rq[0] */ 320 353 thread->cpu = NULL; 321 thread->flags = flags; 354 thread->wired = false; 355 thread->stolen = false; 356 thread->uspace = 357 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 358 322 359 thread->nomigrate = 0; 323 360 thread->state = Entering; … … 337 374 thread->task = task; 338 375 339 thread->fpu_context_exists = 0;340 thread->fpu_context_engaged = 0;376 thread->fpu_context_exists = false; 377 thread->fpu_context_engaged = false; 341 378 342 379 avltree_node_initialize(&thread->threads_tree_node); … … 352 389 thread_create_arch(thread); 353 390 354 if ( !(flags & THREAD_FLAG_NOATTACH))391 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 355 392 thread_attach(thread, task); 356 393 … … 418 455 419 456 /* Must not count kbox thread into lifecount */ 420 if (thread-> flags & THREAD_FLAG_USPACE)457 if (thread->uspace) 421 458 atomic_inc(&task->lifecount); 422 459 … … 440 477 void thread_exit(void) 441 478 { 442 if (THREAD-> flags & THREAD_FLAG_USPACE) {479 if (THREAD->uspace) { 443 480 #ifdef CONFIG_UDEBUG 444 481 /* Generate udebug THREAD_E event */ 445 482 udebug_thread_e_event(); 446 483 447 484 /* 448 485 * This thread will not execute any code or system calls from … … 487 524 { 488 525 ASSERT(THREAD); 489 526 490 527 THREAD->nomigrate++; 491 528 } … … 496 533 ASSERT(THREAD); 497 534 ASSERT(THREAD->nomigrate > 0); 498 499 THREAD->nomigrate--; 535 536 if (THREAD->nomigrate > 0) 537 THREAD->nomigrate--; 500 538 } 501 539 … … 835 873 * In case of failure, kernel_uarg will be deallocated in this function. 836 874 * In case of success, kernel_uarg will be freed in uinit(). 837 *838 875 */ 839 876 uspace_arg_t *kernel_uarg = … … 847 884 848 885 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 849 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf , false);886 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); 850 887 if (thread) { 851 888 if (uspace_thread_id != NULL) {
Note:
See TracChangeset
for help on using the changeset viewer.