Changes in / [a5b5f17:ed7e057] in mainline
- Location:
- kernel
- Files:
-
- 1 added
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/proc/sun4u/scheduler.c
ra5b5f17 red7e057 76 76 { 77 77 if (THREAD->uspace) { 78 asm volatile ("flushw");79 80 78 /* sample the state of the userspace window buffer */ 81 79 THREAD->arch.uspace_window_buffer = -
kernel/arch/sparc64/src/proc/sun4v/scheduler.c
ra5b5f17 red7e057 68 68 { 69 69 if (THREAD->uspace) { 70 asm volatile ("flushw");71 72 70 /* sample the state of the userspace window buffer */ 73 71 THREAD->arch.uspace_window_buffer = -
kernel/generic/include/atomic.h
ra5b5f17 red7e057 39 39 #include <typedefs.h> 40 40 #include <stdatomic.h> 41 42 /*43 * Shorthand for relaxed atomic read/write, something that's needed to formally44 * avoid undefined behavior in cases where we need to read a variable in45 * different threads and we don't particularly care about ordering46 * (e.g. statistic printouts). This is most likely translated into the same47 * assembly instructions as regular read/writes.48 */49 #define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed)50 #define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed)51 41 52 42 #define atomic_predec(val) \ -
kernel/generic/include/cpu.h
ra5b5f17 red7e057 74 74 bool idle; 75 75 uint64_t last_cycle; 76 77 context_t scheduler_context;78 79 struct thread *prev_thread;80 76 } cpu_local_t; 81 77 -
kernel/generic/include/proc/scheduler.h
ra5b5f17 red7e057 64 64 extern void scheduler_enter(state_t); 65 65 66 extern void thread_main_func(void);67 68 66 /* 69 67 * To be defined by architectures. -
kernel/generic/include/proc/thread.h
ra5b5f17 red7e057 95 95 waitq_t join_wq; 96 96 97 /** Thread accounting. */ 98 atomic_time_stat_t ucycles; 99 atomic_time_stat_t kcycles; 100 101 /** Architecture-specific data. */ 102 thread_arch_t arch; 103 104 #ifdef CONFIG_UDEBUG 105 /** 106 * If true, the scheduler will print a stack trace 107 * to the kernel console upon scheduling this thread. 108 */ 109 atomic_int_fast8_t btrace; 110 111 /** Debugging stuff */ 112 udebug_thread_t udebug; 113 #endif /* CONFIG_UDEBUG */ 114 115 /* 116 * Immutable fields. 97 /** Lock protecting thread structure. 117 98 * 118 * These fields are only modified during initialization, and are not 119 * changed at any time between initialization and destruction. 120 * Can be accessed without synchronization in most places. 121 */ 122 123 /** Thread ID. */ 124 thread_id_t tid; 99 * Protects the whole thread structure except fields listed above. 100 */ 101 IRQ_SPINLOCK_DECLARE(lock); 102 103 char name[THREAD_NAME_BUFLEN]; 125 104 126 105 /** Function implementing the thread. */ … … 129 108 void *thread_arg; 130 109 131 char name[THREAD_NAME_BUFLEN]; 132 110 /** 111 * From here, the stored context is restored 112 * when the thread is scheduled. 113 */ 114 context_t saved_context; 115 116 /** 117 * True if this thread is executing copy_from_uspace(). 118 * False otherwise. 119 */ 120 bool in_copy_from_uspace; 121 122 /** 123 * True if this thread is executing copy_to_uspace(). 124 * False otherwise. 125 */ 126 bool in_copy_to_uspace; 127 128 #ifdef CONFIG_FPU 129 fpu_context_t fpu_context; 130 #endif 131 bool fpu_context_exists; 132 133 /* The thread will not be migrated if nomigrate is non-zero. */ 134 unsigned int nomigrate; 135 136 /** Thread state. */ 137 state_t state; 138 139 /** Thread CPU. */ 140 cpu_t *cpu; 141 /** Containing task. */ 142 task_t *task; 143 /** Thread was migrated to another CPU and has not run yet. */ 144 bool stolen; 133 145 /** Thread is executed in user space. */ 134 146 bool uspace; 135 147 148 /** Thread accounting. */ 149 uint64_t ucycles; 150 uint64_t kcycles; 151 /** Last sampled cycle. */ 152 uint64_t last_cycle; 136 153 /** Thread doesn't affect accumulated accounting. */ 137 154 bool uncounted; 138 155 139 /** Containing task. */ 140 task_t *task; 156 /** Thread's priority. Implemented as index to CPU->rq */ 157 int priority; 158 /** Thread ID. */ 159 thread_id_t tid; 160 161 /** Architecture-specific data. */ 162 thread_arch_t arch; 141 163 142 164 /** Thread's kernel stack. */ 143 165 uint8_t *kstack; 144 166 145 /* 146 * Local fields. 147 * 148 * These fields can be safely accessed from code that _controls execution_ 149 * of this thread. Code controls execution of a thread if either: 150 * - it runs in the context of said thread AND interrupts are disabled 151 * (interrupts can and will access these fields) 152 * - the thread is not running, and the code accessing it can legally 153 * add/remove the thread to/from a runqueue, i.e., either: 154 * - it is allowed to enqueue thread in a new runqueue 155 * - it holds the lock to the runqueue containing the thread 156 * 157 */ 158 159 /** 160 * From here, the stored context is restored 161 * when the thread is scheduled. 162 */ 163 context_t saved_context; 164 165 // TODO: we only need one of the two bools below 166 167 /** 168 * True if this thread is executing copy_from_uspace(). 169 * False otherwise. 170 */ 171 bool in_copy_from_uspace; 172 173 /** 174 * True if this thread is executing copy_to_uspace(). 175 * False otherwise. 176 */ 177 bool in_copy_to_uspace; 178 179 /* 180 * FPU context is a special case. If lazy FPU switching is disabled, 181 * it acts as a regular local field. However, if lazy switching is enabled, 182 * the context is synchronized via CPU->fpu_lock 183 */ 184 #ifdef CONFIG_FPU 185 fpu_context_t fpu_context; 186 #endif 187 bool fpu_context_exists; 188 189 /* The thread will not be migrated if nomigrate is non-zero. */ 190 unsigned int nomigrate; 191 192 /** Thread was migrated to another CPU and has not run yet. */ 193 bool stolen; 194 195 /** 196 * Thread state (state_t). 197 * This is atomic because we read it via some commands for debug output, 198 * otherwise it could just be a regular local. 199 */ 200 atomic_int_fast32_t state; 201 202 /** Thread CPU. */ 203 _Atomic(cpu_t *) cpu; 204 205 /** Thread's priority. Implemented as index to CPU->rq */ 206 atomic_int_fast32_t priority; 207 208 /** Last sampled cycle. */ 209 uint64_t last_cycle; 167 #ifdef CONFIG_UDEBUG 168 /** 169 * If true, the scheduler will print a stack trace 170 * to the kernel console upon scheduling this thread. 171 */ 172 bool btrace; 173 174 /** Debugging stuff */ 175 udebug_thread_t udebug; 176 #endif /* CONFIG_UDEBUG */ 210 177 } thread_t; 211 178 … … 219 186 extern void thread_attach(thread_t *, task_t *); 220 187 extern void thread_start(thread_t *); 221 extern void thread_re queue_sleeping(thread_t *);188 extern void thread_ready(thread_t *); 222 189 extern void thread_exit(void) __attribute__((noreturn)); 223 190 extern void thread_interrupt(thread_t *); -
kernel/generic/meson.build
ra5b5f17 red7e057 95 95 'src/mm/malloc.c', 96 96 'src/mm/reserve.c', 97 'src/preempt/preemption.c', 97 98 'src/printf/printf.c', 98 99 'src/printf/snprintf.c', -
kernel/generic/src/interrupt/interrupt.c
ra5b5f17 red7e057 114 114 115 115 /* Account user cycles */ 116 if (THREAD) 116 if (THREAD) { 117 irq_spinlock_lock(&THREAD->lock, false); 117 118 thread_update_accounting(true); 119 irq_spinlock_unlock(&THREAD->lock, false); 120 } 118 121 119 122 /* Account CPU usage if it woke up from sleep */ … … 152 155 153 156 /* Do not charge THREAD for exception cycles */ 154 if (THREAD) 157 if (THREAD) { 158 irq_spinlock_lock(&THREAD->lock, false); 155 159 THREAD->last_cycle = end_cycle; 160 irq_spinlock_unlock(&THREAD->lock, false); 161 } 156 162 #else 157 163 panic("No space for any exception handler, yet we want to handle some exception."); -
kernel/generic/src/main/main.c
ra5b5f17 red7e057 287 287 * starting the thread of kernel threads. 288 288 */ 289 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 290 context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE); 289 scheduler_run(); 291 290 /* not reached */ 292 291 } … … 328 327 ARCH_OP(post_cpu_init); 329 328 329 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 330 330 331 /* 331 332 * If we woke kmp up before we left the kernel stack, we could … … 333 334 * switch to this cpu's private stack prior to waking kmp up. 334 335 */ 335 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);336 336 context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE); 337 337 /* not reached */ -
kernel/generic/src/proc/scheduler.c
ra5b5f17 red7e057 1 1 /* 2 2 * Copyright (c) 2010 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky4 3 * All rights reserved. 5 4 * … … 51 50 #include <time/delay.h> 52 51 #include <arch/asm.h> 52 #include <arch/faddr.h> 53 53 #include <arch/cycle.h> 54 54 #include <atomic.h> … … 66 66 #include <stacktrace.h> 67 67 68 static void scheduler_separated_stack(void); 69 68 70 atomic_size_t nrdy; /**< Number of ready threads in the system. */ 69 71 … … 225 227 static void relink_rq(int start) 226 228 { 227 assert(interrupts_disabled());228 229 229 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline) 230 230 return; … … 302 302 } 303 303 304 void scheduler_run(void) 305 { 306 assert(interrupts_disabled()); 307 assert(THREAD == NULL); 308 assert(CPU != NULL); 309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 311 context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE); 312 unreachable(); 313 } 314 304 315 /** Things to do before we switch to THREAD context. 305 316 */ … … 310 321 switch_task(THREAD->task); 311 322 312 assert(atomic_get_unordered(&THREAD->cpu) == CPU);313 314 atomic_set_unordered(&THREAD->state, Running);315 atomic_set_unordered(&THREAD->priority, rq_index); /* Correct rq index */323 irq_spinlock_lock(&THREAD->lock, false); 324 THREAD->state = Running; 325 THREAD->cpu = CPU; 326 THREAD->priority = rq_index; /* Correct rq index */ 316 327 317 328 /* … … 324 335 log(LF_OTHER, LVL_DEBUG, 325 336 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 326 ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index,337 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority, 327 338 THREAD->ticks, atomic_load(&CPU->nrdy)); 328 339 #endif … … 339 350 340 351 #ifdef CONFIG_UDEBUG 341 if ( atomic_get_unordered(&THREAD->btrace)) {352 if (THREAD->btrace) { 342 353 istate_t *istate = THREAD->udebug.uspace_state; 343 354 if (istate != NULL) { 344 355 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 345 356 stack_trace_istate(istate); 346 } else {347 printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid);348 357 } 349 358 350 atomic_set_unordered(&THREAD->btrace, false);359 THREAD->btrace = false; 351 360 } 352 361 #endif … … 365 374 } 366 375 367 static void add_to_rq(thread_t *thread, cpu_t *cpu, int i) 368 { 369 /* Add to the appropriate runqueue. */ 370 runq_t *rq = &cpu->rq[i]; 371 372 irq_spinlock_lock(&rq->lock, false); 373 list_append(&thread->rq_link, &rq->rq); 374 rq->n++; 375 irq_spinlock_unlock(&rq->lock, false); 376 377 atomic_inc(&nrdy); 378 atomic_inc(&cpu->nrdy); 379 } 380 381 /** Requeue a thread that was just preempted on this CPU. 382 */ 383 static void thread_requeue_preempted(thread_t *thread) 384 { 385 assert(interrupts_disabled()); 386 assert(atomic_get_unordered(&thread->state) == Running); 387 assert(atomic_get_unordered(&thread->cpu) == CPU); 388 389 int prio = atomic_get_unordered(&thread->priority); 390 391 if (prio < RQ_COUNT - 1) { 392 prio++; 393 atomic_set_unordered(&thread->priority, prio); 394 } 395 396 atomic_set_unordered(&thread->state, Ready); 397 398 add_to_rq(thread, CPU, prio); 399 } 400 401 void thread_requeue_sleeping(thread_t *thread) 402 { 403 ipl_t ipl = interrupts_disable(); 404 405 assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering); 406 407 atomic_set_unordered(&thread->priority, 0); 408 atomic_set_unordered(&thread->state, Ready); 409 410 /* Prefer the CPU on which the thread ran last */ 411 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 412 413 if (!cpu) { 414 cpu = CPU; 415 atomic_set_unordered(&thread->cpu, CPU); 416 } 417 418 add_to_rq(thread, cpu, 0); 419 420 interrupts_restore(ipl); 421 } 422 423 static void cleanup_after_thread(thread_t *thread) 376 static void cleanup_after_thread(thread_t *thread, state_t out_state) 424 377 { 425 378 assert(CURRENT->mutex_locks == 0); … … 428 381 int expected; 429 382 430 switch ( atomic_get_unordered(&thread->state)) {383 switch (out_state) { 431 384 case Running: 432 thread_re queue_preempted(thread);385 thread_ready(thread); 433 386 break; 434 387 … … 454 407 assert(expected == SLEEP_WOKE); 455 408 /* The thread has already been woken up, requeue immediately. */ 456 thread_re queue_sleeping(thread);409 thread_ready(thread); 457 410 } 458 411 break; … … 463 416 */ 464 417 panic("tid%" PRIu64 ": unexpected state %s.", 465 thread->tid, thread_states[ atomic_get_unordered(&thread->state)]);418 thread->tid, thread_states[thread->state]); 466 419 break; 467 420 } 468 421 } 469 422 470 /** Switch to scheduler context to let other threads run. */ 423 /** The scheduler 424 * 425 * The thread scheduling procedure. 426 * Passes control directly to 427 * scheduler_separated_stack(). 428 * 429 */ 471 430 void scheduler_enter(state_t new_state) 472 431 { … … 476 435 assert(THREAD != NULL); 477 436 437 fpu_cleanup(); 438 439 irq_spinlock_lock(&THREAD->lock, false); 440 THREAD->state = new_state; 441 442 /* Update thread kernel accounting */ 443 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 444 445 if (new_state == Sleeping) { 446 /* Prefer the thread after it's woken up. */ 447 THREAD->priority = -1; 448 } 449 450 /* 451 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS 452 * and preemption counter. At this point CURRENT could be coming either 453 * from THREAD's or CPU's stack. 454 * 455 */ 456 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 457 458 /* 459 * We may not keep the old stack. 460 * Reason: If we kept the old stack and got blocked, for instance, in 461 * find_best_thread(), the old thread could get rescheduled by another 462 * CPU and overwrite the part of its own stack that was also used by 463 * the scheduler on this CPU. 464 * 465 * Moreover, we have to bypass the compiler-generated POP sequence 466 * which is fooled by SP being set to the very top of the stack. 467 * Therefore the scheduler() function continues in 468 * scheduler_separated_stack(). 469 * 470 */ 471 context_t ctx; 472 context_create(&ctx, scheduler_separated_stack, 473 CPU_LOCAL->stack, STACK_SIZE); 474 475 /* Switch to scheduler context and store current thread's context. */ 476 context_swap(&THREAD->saved_context, &ctx); 477 478 /* Returned from scheduler. */ 479 480 irq_spinlock_unlock(&THREAD->lock, false); 481 interrupts_restore(ipl); 482 } 483 484 /** Scheduler stack switch wrapper 485 * 486 * Second part of the scheduler() function 487 * using new stack. Handling the actual context 488 * switch to a new thread. 489 * 490 */ 491 void scheduler_separated_stack(void) 492 { 493 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 494 assert(CPU != NULL); 495 assert(interrupts_disabled()); 496 478 497 if (atomic_load(&haltstate)) 479 498 halt(); 480 499 481 /* Check if we have a thread to switch to. */ 500 if (THREAD) { 501 /* 502 * On Sparc, this saves some extra userspace state that's not 503 * covered by context_save()/context_restore(). 504 */ 505 after_thread_ran_arch(); 506 507 state_t state = THREAD->state; 508 irq_spinlock_unlock(&THREAD->lock, false); 509 510 cleanup_after_thread(THREAD, state); 511 512 THREAD = NULL; 513 } 482 514 483 515 int rq_index; 484 thread_t *new_thread = try_find_thread(&rq_index); 485 486 if (new_thread == NULL && new_state == Running) { 487 /* No other thread to run, but we still have work to do here. */ 488 interrupts_restore(ipl); 489 return; 490 } 491 492 atomic_set_unordered(&THREAD->state, new_state); 493 494 /* Update thread kernel accounting */ 495 atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle); 496 497 fpu_cleanup(); 498 499 /* 500 * On Sparc, this saves some extra userspace state that's not 501 * covered by context_save()/context_restore(). 502 */ 503 after_thread_ran_arch(); 504 505 if (new_thread) { 506 thread_t *old_thread = THREAD; 507 CPU_LOCAL->prev_thread = old_thread; 508 THREAD = new_thread; 509 /* No waiting necessary, we can switch to the new thread directly. */ 510 prepare_to_run_thread(rq_index); 511 512 current_copy(CURRENT, (current_t *) new_thread->kstack); 513 context_swap(&old_thread->saved_context, &new_thread->saved_context); 514 } else { 515 /* 516 * A new thread isn't immediately available, switch to a separate 517 * stack to sleep or do other idle stuff. 518 */ 519 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 520 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context); 521 } 522 523 assert(CURRENT->mutex_locks == 0); 524 assert(interrupts_disabled()); 525 526 /* Check if we need to clean up after another thread. */ 527 if (CPU_LOCAL->prev_thread) { 528 cleanup_after_thread(CPU_LOCAL->prev_thread); 529 CPU_LOCAL->prev_thread = NULL; 530 } 531 532 interrupts_restore(ipl); 533 } 534 535 /** Enter main scheduler loop. Never returns. 536 * 537 * This function switches to a runnable thread as soon as one is available, 538 * after which it is only switched back to if a thread is stopping and there is 539 * no other thread to run in its place. We need a separate context for that 540 * because we're going to block the CPU, which means we need another context 541 * to clean up after the previous thread. 542 */ 543 void scheduler_run(void) 544 { 545 assert(interrupts_disabled()); 546 547 assert(CPU != NULL); 548 assert(TASK == NULL); 549 assert(THREAD == NULL); 550 assert(interrupts_disabled()); 551 552 while (!atomic_load(&haltstate)) { 553 assert(CURRENT->mutex_locks == 0); 554 555 int rq_index; 556 THREAD = find_best_thread(&rq_index); 557 prepare_to_run_thread(rq_index); 558 559 /* 560 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 561 * thread's stack. 562 */ 563 current_copy(CURRENT, (current_t *) THREAD->kstack); 564 565 /* Switch to thread context. */ 566 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context); 567 568 /* Back from another thread. */ 569 assert(CPU != NULL); 570 assert(THREAD != NULL); 571 assert(CURRENT->mutex_locks == 0); 572 assert(interrupts_disabled()); 573 574 cleanup_after_thread(THREAD); 575 576 /* 577 * Necessary because we're allowing interrupts in find_best_thread(), 578 * so we need to avoid other code referencing the thread we left. 579 */ 580 THREAD = NULL; 581 } 582 583 halt(); 584 } 585 586 /** Thread wrapper. 587 * 588 * This wrapper is provided to ensure that a starting thread properly handles 589 * everything it needs to do when first scheduled, and when it exits. 590 */ 591 void thread_main_func(void) 592 { 593 assert(interrupts_disabled()); 594 595 void (*f)(void *) = THREAD->thread_code; 596 void *arg = THREAD->thread_arg; 597 598 /* This is where each thread wakes up after its creation */ 599 600 /* Check if we need to clean up after another thread. */ 601 if (CPU_LOCAL->prev_thread) { 602 cleanup_after_thread(CPU_LOCAL->prev_thread); 603 CPU_LOCAL->prev_thread = NULL; 604 } 605 606 interrupts_enable(); 607 608 f(arg); 609 610 thread_exit(); 516 THREAD = find_best_thread(&rq_index); 517 518 prepare_to_run_thread(rq_index); 519 520 /* 521 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 522 * thread's stack. 523 */ 524 current_copy(CURRENT, (current_t *) THREAD->kstack); 525 526 context_restore(&THREAD->saved_context); 611 527 612 528 /* Not reached */ … … 633 549 /* Search rq from the back */ 634 550 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) { 551 552 irq_spinlock_lock(&thread->lock, false); 635 553 636 554 /* … … 640 558 * FPU context is still in the CPU. 641 559 */ 642 if (thread->stolen || thread->nomigrate || thread == fpu_owner) { 560 if (thread->stolen || thread->nomigrate || 561 thread == fpu_owner) { 562 irq_spinlock_unlock(&thread->lock, false); 643 563 continue; 644 564 } 645 565 646 566 thread->stolen = true; 647 atomic_set_unordered(&thread->cpu, CPU); 567 thread->cpu = CPU; 568 569 irq_spinlock_unlock(&thread->lock, false); 648 570 649 571 /* … … 790 712 thread) { 791 713 printf("%" PRIu64 "(%s) ", thread->tid, 792 thread_states[ atomic_get_unordered(&thread->state)]);714 thread_states[thread->state]); 793 715 } 794 716 printf("\n"); -
kernel/generic/src/proc/task.c
ra5b5f17 red7e057 506 506 /* Current values of threads */ 507 507 list_foreach(task->threads, th_link, thread_t, thread) { 508 irq_spinlock_lock(&thread->lock, false); 509 508 510 /* Process only counted threads */ 509 511 if (!thread->uncounted) { … … 513 515 } 514 516 515 uret += atomic_time_read(&thread->ucycles);516 kret += atomic_time_read(&thread->kcycles);517 uret += thread->ucycles; 518 kret += thread->kcycles; 517 519 } 520 521 irq_spinlock_unlock(&thread->lock, false); 518 522 } 519 523 -
kernel/generic/src/proc/thread.c
ra5b5f17 red7e057 108 108 static int threads_cmp(void *, void *); 109 109 110 /** Thread wrapper. 111 * 112 * This wrapper is provided to ensure that every thread makes a call to 113 * thread_exit() when its implementing function returns. 114 * 115 * interrupts_disable() is assumed. 116 * 117 */ 118 static void cushion(void) 119 { 120 void (*f)(void *) = THREAD->thread_code; 121 void *arg = THREAD->thread_arg; 122 123 /* This is where each thread wakes up after its creation */ 124 irq_spinlock_unlock(&THREAD->lock, false); 125 interrupts_enable(); 126 127 f(arg); 128 129 thread_exit(); 130 131 /* Not reached */ 132 } 133 110 134 /** Initialization and allocation for thread_t structure 111 135 * … … 115 139 thread_t *thread = (thread_t *) obj; 116 140 141 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 117 142 link_initialize(&thread->rq_link); 118 143 link_initialize(&thread->wq_link); … … 196 221 void thread_wire(thread_t *thread, cpu_t *cpu) 197 222 { 198 i pl_t ipl = interrupts_disable();199 atomic_set_unordered(&thread->cpu, cpu);223 irq_spinlock_lock(&thread->lock, true); 224 thread->cpu = cpu; 200 225 thread->nomigrate++; 201 i nterrupts_restore(ipl);226 irq_spinlock_unlock(&thread->lock, true); 202 227 } 203 228 … … 208 233 void thread_start(thread_t *thread) 209 234 { 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 235 assert(thread->state == Entering); 236 thread_ready(thread_ref(thread)); 237 } 238 239 /** Make thread ready 240 * 241 * Switch thread to the ready state. Consumes reference passed by the caller. 242 * 243 * @param thread Thread to make ready. 244 * 245 */ 246 void thread_ready(thread_t *thread) 247 { 248 irq_spinlock_lock(&thread->lock, true); 249 250 assert(thread->state != Ready); 251 252 int i = (thread->priority < RQ_COUNT - 1) ? 253 ++thread->priority : thread->priority; 254 255 /* Prefer the CPU on which the thread ran last */ 256 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 257 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 261 262 /* 263 * Append thread to respective ready queue 264 * on respective processor. 265 */ 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 271 atomic_inc(&nrdy); 272 atomic_inc(&cpu->nrdy); 212 273 } 213 274 … … 248 309 irq_spinlock_unlock(&tidlock, true); 249 310 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 311 context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE); 252 312 253 313 current_initialize((current_t *) thread->kstack); … … 257 317 thread->thread_code = func; 258 318 thread->thread_arg = arg; 259 thread->ucycles = ATOMIC_TIME_INITIALIZER();260 thread->kcycles = ATOMIC_TIME_INITIALIZER();319 thread->ucycles = 0; 320 thread->kcycles = 0; 261 321 thread->uncounted = 262 322 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 263 atomic_init(&thread->priority, 0);264 atomic_init(&thread->cpu, NULL);323 thread->priority = -1; /* Start in rq[0] */ 324 thread->cpu = NULL; 265 325 thread->stolen = false; 266 326 thread->uspace = … … 268 328 269 329 thread->nomigrate = 0; 270 atomic_init(&thread->state, Entering);330 thread->state = Entering; 271 331 272 332 atomic_init(&thread->sleep_queue, NULL); … … 288 348 #ifdef CONFIG_UDEBUG 289 349 /* Initialize debugging stuff */ 290 atomic_init(&thread->btrace, false);350 thread->btrace = false; 291 351 udebug_thread_initialize(&thread->udebug); 292 352 #endif … … 332 392 333 393 if (!thread->uncounted) { 334 thread->task->ucycles += atomic_time_read(&thread->ucycles);335 thread->task->kcycles += atomic_time_read(&thread->kcycles);394 thread->task->ucycles += thread->ucycles; 395 thread->task->kcycles += thread->kcycles; 336 396 } 337 397 338 398 irq_spinlock_unlock(&thread->task->lock, false); 339 399 340 assert(( atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state)== Lingering));400 assert((thread->state == Exiting) || (thread->state == Lingering)); 341 401 342 402 /* Clear cpu->fpu_owner if set to this thread. */ 343 403 #ifdef CONFIG_FPU_LAZY 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 404 if (thread->cpu) { 346 405 /* 347 406 * We need to lock for this because the old CPU can concurrently try … … 349 408 * it to finish. An atomic compare-and-swap wouldn't be enough. 350 409 */ 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 410 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 411 412 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 413 memory_order_relaxed); 414 415 if (owner == thread) { 416 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 417 memory_order_relaxed); 418 } 419 420 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 357 421 } 358 422 #endif … … 571 635 * the waking thread by the sleeper in thread_wait_finish(). 572 636 */ 573 thread_re queue_sleeping(thread);637 thread_ready(thread); 574 638 } 575 639 } … … 578 642 void thread_migration_disable(void) 579 643 { 580 ipl_t ipl = interrupts_disable();581 582 644 assert(THREAD); 645 583 646 THREAD->nomigrate++; 584 585 interrupts_restore(ipl);586 647 } 587 648 … … 589 650 void thread_migration_enable(void) 590 651 { 591 ipl_t ipl = interrupts_disable();592 593 652 assert(THREAD); 594 653 assert(THREAD->nomigrate > 0); … … 596 655 if (THREAD->nomigrate > 0) 597 656 THREAD->nomigrate--; 598 599 interrupts_restore(ipl);600 657 } 601 658 … … 643 700 return EINVAL; 644 701 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 702 irq_spinlock_lock(&thread->lock, true); 703 state_t state = thread->state; 704 irq_spinlock_unlock(&thread->lock, true); 705 706 errno_t rc = EOK; 707 708 if (state != Exiting) 709 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 646 710 647 711 if (rc == EOK) … … 683 747 uint64_t ucycles, kcycles; 684 748 char usuffix, ksuffix; 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 749 order_suffix(thread->ucycles, &ucycles, &usuffix); 750 order_suffix(thread->kcycles, &kcycles, &ksuffix); 689 751 690 752 char *name; … … 700 762 else 701 763 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 702 thread->tid, name, thread, thread_states[ state],764 thread->tid, name, thread, thread_states[thread->state], 703 765 thread->task, thread->task->container); 704 766 705 767 if (additional) { 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 768 if (thread->cpu) 769 printf("%-5u", thread->cpu->id); 709 770 else 710 771 printf("none "); 711 772 712 if ( state == Sleeping) {773 if (thread->state == Sleeping) { 713 774 printf(" %p", thread->sleep_queue); 714 775 } … … 789 850 void thread_update_accounting(bool user) 790 851 { 852 uint64_t time = get_cycle(); 853 791 854 assert(interrupts_disabled()); 792 793 uint64_t time = get_cycle(); 855 assert(irq_spinlock_locked(&THREAD->lock)); 794 856 795 857 if (user) 796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle);858 THREAD->ucycles += time - THREAD->last_cycle; 797 859 else 798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle);860 THREAD->kcycles += time - THREAD->last_cycle; 799 861 800 862 THREAD->last_cycle = time; … … 907 969 */ 908 970 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 971 irq_spinlock_lock(&thread->lock, true); 972 973 bool sleeping = false; 974 istate_t *istate = thread->udebug.uspace_state; 975 if (istate != NULL) { 976 printf("Scheduling thread stack trace.\n"); 977 thread->btrace = true; 978 if (thread->state == Sleeping) 979 sleeping = true; 980 } else 981 printf("Thread interrupt state not available.\n"); 982 983 irq_spinlock_unlock(&thread->lock, true); 984 985 if (sleeping) 986 thread_wakeup(thread); 987 913 988 thread_put(thread); 914 989 } … … 1011 1086 thread_attach(thread, TASK); 1012 1087 #endif 1013 thread_start(thread); 1014 thread_put(thread); 1088 thread_ready(thread); 1015 1089 1016 1090 return 0; -
kernel/generic/src/syscall/syscall.c
ra5b5f17 red7e057 141 141 { 142 142 /* Do userpace accounting */ 143 i pl_t ipl = interrupts_disable();143 irq_spinlock_lock(&THREAD->lock, true); 144 144 thread_update_accounting(true); 145 i nterrupts_restore(ipl);145 irq_spinlock_unlock(&THREAD->lock, true); 146 146 147 147 #ifdef CONFIG_UDEBUG … … 191 191 192 192 /* Do kernel accounting */ 193 i pl = interrupts_disable();193 irq_spinlock_lock(&THREAD->lock, true); 194 194 thread_update_accounting(false); 195 i nterrupts_restore(ipl);195 irq_spinlock_unlock(&THREAD->lock, true); 196 196 197 197 return rc; -
kernel/generic/src/sysinfo/stats.c
ra5b5f17 red7e057 299 299 { 300 300 assert(interrupts_disabled()); 301 assert(irq_spinlock_locked(&thread->lock)); 301 302 302 303 stats_thread->thread_id = thread->tid; 303 304 stats_thread->task_id = thread->task->taskid; 304 stats_thread->state = atomic_get_unordered(&thread->state); 305 stats_thread->priority = atomic_get_unordered(&thread->priority); 306 stats_thread->ucycles = atomic_time_read(&thread->ucycles); 307 stats_thread->kcycles = atomic_time_read(&thread->kcycles); 308 309 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 310 311 if (cpu != NULL) { 305 stats_thread->state = thread->state; 306 stats_thread->priority = thread->priority; 307 stats_thread->ucycles = thread->ucycles; 308 stats_thread->kcycles = thread->kcycles; 309 310 if (thread->cpu != NULL) { 312 311 stats_thread->on_cpu = true; 313 stats_thread->cpu = cpu->id;312 stats_thread->cpu = thread->cpu->id; 314 313 } else 315 314 stats_thread->on_cpu = false; … … 362 361 thread_t *thread = thread_first(); 363 362 while (thread != NULL) { 363 /* Interrupts are already disabled */ 364 irq_spinlock_lock(&thread->lock, false); 365 364 366 /* Record the statistics and increment the index */ 365 367 produce_stats_thread(thread, &stats_threads[i]); 366 368 i++; 369 370 irq_spinlock_unlock(&thread->lock, false); 367 371 368 372 thread = thread_next(thread); … … 620 624 ret.data.size = sizeof(stats_thread_t); 621 625 626 /* 627 * Replaced hand-over-hand locking with regular nested sections 628 * to avoid weak reference leak issues. 629 */ 630 irq_spinlock_lock(&thread->lock, false); 622 631 produce_stats_thread(thread, stats_thread); 632 irq_spinlock_unlock(&thread->lock, false); 623 633 624 634 irq_spinlock_unlock(&threads_lock, true); -
kernel/generic/src/time/clock.c
ra5b5f17 red7e057 123 123 static void cpu_update_accounting(void) 124 124 { 125 // FIXME: get_cycle() is unimplemented on several platforms126 125 uint64_t now = get_cycle(); 127 126 atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle); -
kernel/generic/src/udebug/udebug_ops.c
ra5b5f17 red7e057 90 90 } 91 91 92 irq_spinlock_lock(&thread->lock, true); 93 92 94 /* Verify that 'thread' is a userspace thread. */ 93 95 if (!thread->uspace) { 96 /* It's not, deny its existence */ 97 irq_spinlock_unlock(&thread->lock, true); 94 98 mutex_unlock(&TASK->udebug.lock); 95 99 return ENOENT; 96 100 } 101 102 /* Verify debugging state. */ 103 if (thread->udebug.active != true) { 104 /* Not in debugging session or undesired GO state */ 105 irq_spinlock_unlock(&thread->lock, true); 106 mutex_unlock(&TASK->udebug.lock); 107 return ENOENT; 108 } 109 110 /* Now verify that the thread belongs to the current task. */ 111 if (thread->task != TASK) { 112 /* No such thread belonging this task */ 113 irq_spinlock_unlock(&thread->lock, true); 114 mutex_unlock(&TASK->udebug.lock); 115 return ENOENT; 116 } 117 118 irq_spinlock_unlock(&thread->lock, true); 119 120 /* Only mutex TASK->udebug.lock left. */ 97 121 98 122 /* … … 102 126 */ 103 127 mutex_lock(&thread->udebug.lock); 104 105 /* Verify debugging state. */106 if (thread->udebug.active != true) {107 /* Not in debugging session or undesired GO state */108 mutex_unlock(&thread->udebug.lock);109 mutex_unlock(&TASK->udebug.lock);110 return ENOENT;111 }112 113 /* Now verify that the thread belongs to the current task. */114 if (thread->task != TASK) {115 /* No such thread belonging this task */116 mutex_unlock(&thread->udebug.lock);117 mutex_unlock(&TASK->udebug.lock);118 return ENOENT;119 }120 128 121 129 /* The big task mutex is no longer needed. */ … … 380 388 /* FIXME: make sure the thread isn't past debug shutdown... */ 381 389 list_foreach(TASK->threads, th_link, thread_t, thread) { 390 irq_spinlock_lock(&thread->lock, false); 382 391 bool uspace = thread->uspace; 392 irq_spinlock_unlock(&thread->lock, false); 383 393 384 394 /* Not interested in kernel threads. */
Note:
See TracChangeset
for help on using the changeset viewer.