Changeset a5b5f17 in mainline
- Timestamp:
- 2024-01-21T16:36:15Z (11 months ago)
- Branches:
- master
- Children:
- 1a1e124
- Parents:
- ed7e057 (diff), d23712e (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- kernel
- Files:
-
- 1 deleted
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/proc/sun4u/scheduler.c
red7e057 ra5b5f17 76 76 { 77 77 if (THREAD->uspace) { 78 asm volatile ("flushw"); 79 78 80 /* sample the state of the userspace window buffer */ 79 81 THREAD->arch.uspace_window_buffer = -
kernel/arch/sparc64/src/proc/sun4v/scheduler.c
red7e057 ra5b5f17 68 68 { 69 69 if (THREAD->uspace) { 70 asm volatile ("flushw"); 71 70 72 /* sample the state of the userspace window buffer */ 71 73 THREAD->arch.uspace_window_buffer = -
kernel/generic/include/atomic.h
red7e057 ra5b5f17 39 39 #include <typedefs.h> 40 40 #include <stdatomic.h> 41 42 /* 43 * Shorthand for relaxed atomic read/write, something that's needed to formally 44 * avoid undefined behavior in cases where we need to read a variable in 45 * different threads and we don't particularly care about ordering 46 * (e.g. statistic printouts). This is most likely translated into the same 47 * assembly instructions as regular read/writes. 48 */ 49 #define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed) 50 #define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed) 41 51 42 52 #define atomic_predec(val) \ -
kernel/generic/include/cpu.h
red7e057 ra5b5f17 74 74 bool idle; 75 75 uint64_t last_cycle; 76 77 context_t scheduler_context; 78 79 struct thread *prev_thread; 76 80 } cpu_local_t; 77 81 -
kernel/generic/include/proc/scheduler.h
red7e057 ra5b5f17 64 64 extern void scheduler_enter(state_t); 65 65 66 extern void thread_main_func(void); 67 66 68 /* 67 69 * To be defined by architectures. -
kernel/generic/include/proc/thread.h
red7e057 ra5b5f17 95 95 waitq_t join_wq; 96 96 97 /** Lock protecting thread structure. 97 /** Thread accounting. */ 98 atomic_time_stat_t ucycles; 99 atomic_time_stat_t kcycles; 100 101 /** Architecture-specific data. */ 102 thread_arch_t arch; 103 104 #ifdef CONFIG_UDEBUG 105 /** 106 * If true, the scheduler will print a stack trace 107 * to the kernel console upon scheduling this thread. 108 */ 109 atomic_int_fast8_t btrace; 110 111 /** Debugging stuff */ 112 udebug_thread_t udebug; 113 #endif /* CONFIG_UDEBUG */ 114 115 /* 116 * Immutable fields. 98 117 * 99 * Protects the whole thread structure except fields listed above. 100 */ 101 IRQ_SPINLOCK_DECLARE(lock); 102 103 char name[THREAD_NAME_BUFLEN]; 118 * These fields are only modified during initialization, and are not 119 * changed at any time between initialization and destruction. 120 * Can be accessed without synchronization in most places. 121 */ 122 123 /** Thread ID. */ 124 thread_id_t tid; 104 125 105 126 /** Function implementing the thread. */ … … 108 129 void *thread_arg; 109 130 131 char name[THREAD_NAME_BUFLEN]; 132 133 /** Thread is executed in user space. */ 134 bool uspace; 135 136 /** Thread doesn't affect accumulated accounting. */ 137 bool uncounted; 138 139 /** Containing task. */ 140 task_t *task; 141 142 /** Thread's kernel stack. */ 143 uint8_t *kstack; 144 145 /* 146 * Local fields. 147 * 148 * These fields can be safely accessed from code that _controls execution_ 149 * of this thread. Code controls execution of a thread if either: 150 * - it runs in the context of said thread AND interrupts are disabled 151 * (interrupts can and will access these fields) 152 * - the thread is not running, and the code accessing it can legally 153 * add/remove the thread to/from a runqueue, i.e., either: 154 * - it is allowed to enqueue thread in a new runqueue 155 * - it holds the lock to the runqueue containing the thread 156 * 157 */ 158 110 159 /** 111 160 * From here, the stored context is restored … … 114 163 context_t saved_context; 115 164 165 // TODO: we only need one of the two bools below 166 116 167 /** 117 168 * True if this thread is executing copy_from_uspace(). … … 126 177 bool in_copy_to_uspace; 127 178 179 /* 180 * FPU context is a special case. If lazy FPU switching is disabled, 181 * it acts as a regular local field. However, if lazy switching is enabled, 182 * the context is synchronized via CPU->fpu_lock 183 */ 128 184 #ifdef CONFIG_FPU 129 185 fpu_context_t fpu_context; … … 134 190 unsigned int nomigrate; 135 191 136 /** Thread state. */137 state_t state;138 139 /** Thread CPU. */140 cpu_t *cpu;141 /** Containing task. */142 task_t *task;143 192 /** Thread was migrated to another CPU and has not run yet. */ 144 193 bool stolen; 145 /** Thread is executed in user space. */ 146 bool uspace; 147 148 /** Thread accounting. */ 149 uint64_t ucycles; 150 uint64_t kcycles; 194 195 /** 196 * Thread state (state_t). 197 * This is atomic because we read it via some commands for debug output, 198 * otherwise it could just be a regular local. 199 */ 200 atomic_int_fast32_t state; 201 202 /** Thread CPU. */ 203 _Atomic(cpu_t *) cpu; 204 205 /** Thread's priority. Implemented as index to CPU->rq */ 206 atomic_int_fast32_t priority; 207 151 208 /** Last sampled cycle. */ 152 209 uint64_t last_cycle; 153 /** Thread doesn't affect accumulated accounting. */154 bool uncounted;155 156 /** Thread's priority. Implemented as index to CPU->rq */157 int priority;158 /** Thread ID. */159 thread_id_t tid;160 161 /** Architecture-specific data. */162 thread_arch_t arch;163 164 /** Thread's kernel stack. */165 uint8_t *kstack;166 167 #ifdef CONFIG_UDEBUG168 /**169 * If true, the scheduler will print a stack trace170 * to the kernel console upon scheduling this thread.171 */172 bool btrace;173 174 /** Debugging stuff */175 udebug_thread_t udebug;176 #endif /* CONFIG_UDEBUG */177 210 } thread_t; 178 211 … … 186 219 extern void thread_attach(thread_t *, task_t *); 187 220 extern void thread_start(thread_t *); 188 extern void thread_re ady(thread_t *);221 extern void thread_requeue_sleeping(thread_t *); 189 222 extern void thread_exit(void) __attribute__((noreturn)); 190 223 extern void thread_interrupt(thread_t *); -
kernel/generic/meson.build
red7e057 ra5b5f17 95 95 'src/mm/malloc.c', 96 96 'src/mm/reserve.c', 97 'src/preempt/preemption.c',98 97 'src/printf/printf.c', 99 98 'src/printf/snprintf.c', -
kernel/generic/src/interrupt/interrupt.c
red7e057 ra5b5f17 114 114 115 115 /* Account user cycles */ 116 if (THREAD) { 117 irq_spinlock_lock(&THREAD->lock, false); 116 if (THREAD) 118 117 thread_update_accounting(true); 119 irq_spinlock_unlock(&THREAD->lock, false);120 }121 118 122 119 /* Account CPU usage if it woke up from sleep */ … … 155 152 156 153 /* Do not charge THREAD for exception cycles */ 157 if (THREAD) { 158 irq_spinlock_lock(&THREAD->lock, false); 154 if (THREAD) 159 155 THREAD->last_cycle = end_cycle; 160 irq_spinlock_unlock(&THREAD->lock, false);161 }162 156 #else 163 157 panic("No space for any exception handler, yet we want to handle some exception."); -
kernel/generic/src/main/main.c
red7e057 ra5b5f17 287 287 * starting the thread of kernel threads. 288 288 */ 289 scheduler_run(); 289 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 290 context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE); 290 291 /* not reached */ 291 292 } … … 327 328 ARCH_OP(post_cpu_init); 328 329 329 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);330 331 330 /* 332 331 * If we woke kmp up before we left the kernel stack, we could … … 334 333 * switch to this cpu's private stack prior to waking kmp up. 335 334 */ 335 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 336 336 context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE); 337 337 /* not reached */ -
kernel/generic/src/proc/scheduler.c
red7e057 ra5b5f17 1 1 /* 2 2 * Copyright (c) 2010 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 50 51 #include <time/delay.h> 51 52 #include <arch/asm.h> 52 #include <arch/faddr.h>53 53 #include <arch/cycle.h> 54 54 #include <atomic.h> … … 66 66 #include <stacktrace.h> 67 67 68 static void scheduler_separated_stack(void);69 70 68 atomic_size_t nrdy; /**< Number of ready threads in the system. */ 71 69 … … 227 225 static void relink_rq(int start) 228 226 { 227 assert(interrupts_disabled()); 228 229 229 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline) 230 230 return; … … 302 302 } 303 303 304 void scheduler_run(void)305 {306 assert(interrupts_disabled());307 assert(THREAD == NULL);308 assert(CPU != NULL);309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);311 context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);312 unreachable();313 }314 315 304 /** Things to do before we switch to THREAD context. 316 305 */ … … 321 310 switch_task(THREAD->task); 322 311 323 irq_spinlock_lock(&THREAD->lock, false);324 THREAD->state = Running; 325 THREAD->cpu = CPU;326 THREAD->priority = rq_index; /* Correct rq index */312 assert(atomic_get_unordered(&THREAD->cpu) == CPU); 313 314 atomic_set_unordered(&THREAD->state, Running); 315 atomic_set_unordered(&THREAD->priority, rq_index); /* Correct rq index */ 327 316 328 317 /* … … 335 324 log(LF_OTHER, LVL_DEBUG, 336 325 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 337 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority,326 ", nrdy=%zu)", CPU->id, THREAD->tid, rq_index, 338 327 THREAD->ticks, atomic_load(&CPU->nrdy)); 339 328 #endif … … 350 339 351 340 #ifdef CONFIG_UDEBUG 352 if ( THREAD->btrace) {341 if (atomic_get_unordered(&THREAD->btrace)) { 353 342 istate_t *istate = THREAD->udebug.uspace_state; 354 343 if (istate != NULL) { 355 344 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 356 345 stack_trace_istate(istate); 346 } else { 347 printf("Thread %" PRIu64 " interrupt state not available\n", THREAD->tid); 357 348 } 358 349 359 THREAD->btrace = false;350 atomic_set_unordered(&THREAD->btrace, false); 360 351 } 361 352 #endif … … 374 365 } 375 366 376 static void cleanup_after_thread(thread_t *thread, state_t out_state) 367 static void add_to_rq(thread_t *thread, cpu_t *cpu, int i) 368 { 369 /* Add to the appropriate runqueue. */ 370 runq_t *rq = &cpu->rq[i]; 371 372 irq_spinlock_lock(&rq->lock, false); 373 list_append(&thread->rq_link, &rq->rq); 374 rq->n++; 375 irq_spinlock_unlock(&rq->lock, false); 376 377 atomic_inc(&nrdy); 378 atomic_inc(&cpu->nrdy); 379 } 380 381 /** Requeue a thread that was just preempted on this CPU. 382 */ 383 static void thread_requeue_preempted(thread_t *thread) 384 { 385 assert(interrupts_disabled()); 386 assert(atomic_get_unordered(&thread->state) == Running); 387 assert(atomic_get_unordered(&thread->cpu) == CPU); 388 389 int prio = atomic_get_unordered(&thread->priority); 390 391 if (prio < RQ_COUNT - 1) { 392 prio++; 393 atomic_set_unordered(&thread->priority, prio); 394 } 395 396 atomic_set_unordered(&thread->state, Ready); 397 398 add_to_rq(thread, CPU, prio); 399 } 400 401 void thread_requeue_sleeping(thread_t *thread) 402 { 403 ipl_t ipl = interrupts_disable(); 404 405 assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering); 406 407 atomic_set_unordered(&thread->priority, 0); 408 atomic_set_unordered(&thread->state, Ready); 409 410 /* Prefer the CPU on which the thread ran last */ 411 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 412 413 if (!cpu) { 414 cpu = CPU; 415 atomic_set_unordered(&thread->cpu, CPU); 416 } 417 418 add_to_rq(thread, cpu, 0); 419 420 interrupts_restore(ipl); 421 } 422 423 static void cleanup_after_thread(thread_t *thread) 377 424 { 378 425 assert(CURRENT->mutex_locks == 0); … … 381 428 int expected; 382 429 383 switch ( out_state) {430 switch (atomic_get_unordered(&thread->state)) { 384 431 case Running: 385 thread_re ady(thread);432 thread_requeue_preempted(thread); 386 433 break; 387 434 … … 407 454 assert(expected == SLEEP_WOKE); 408 455 /* The thread has already been woken up, requeue immediately. */ 409 thread_re ady(thread);456 thread_requeue_sleeping(thread); 410 457 } 411 458 break; … … 416 463 */ 417 464 panic("tid%" PRIu64 ": unexpected state %s.", 418 thread->tid, thread_states[ thread->state]);465 thread->tid, thread_states[atomic_get_unordered(&thread->state)]); 419 466 break; 420 467 } 421 468 } 422 469 423 /** The scheduler 424 * 425 * The thread scheduling procedure. 426 * Passes control directly to 427 * scheduler_separated_stack(). 428 * 429 */ 470 /** Switch to scheduler context to let other threads run. */ 430 471 void scheduler_enter(state_t new_state) 431 472 { … … 435 476 assert(THREAD != NULL); 436 477 437 fpu_cleanup();438 439 irq_spinlock_lock(&THREAD->lock, false);440 THREAD->state = new_state;441 442 /* Update thread kernel accounting */443 THREAD->kcycles += get_cycle() - THREAD->last_cycle;444 445 if (new_state == Sleeping) {446 /* Prefer the thread after it's woken up. */447 THREAD->priority = -1;448 }449 450 /*451 * Through the 'CURRENT' structure, we keep track of THREAD, TASK, CPU, AS452 * and preemption counter. At this point CURRENT could be coming either453 * from THREAD's or CPU's stack.454 *455 */456 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);457 458 /*459 * We may not keep the old stack.460 * Reason: If we kept the old stack and got blocked, for instance, in461 * find_best_thread(), the old thread could get rescheduled by another462 * CPU and overwrite the part of its own stack that was also used by463 * the scheduler on this CPU.464 *465 * Moreover, we have to bypass the compiler-generated POP sequence466 * which is fooled by SP being set to the very top of the stack.467 * Therefore the scheduler() function continues in468 * scheduler_separated_stack().469 *470 */471 context_t ctx;472 context_create(&ctx, scheduler_separated_stack,473 CPU_LOCAL->stack, STACK_SIZE);474 475 /* Switch to scheduler context and store current thread's context. */476 context_swap(&THREAD->saved_context, &ctx);477 478 /* Returned from scheduler. */479 480 irq_spinlock_unlock(&THREAD->lock, false);481 interrupts_restore(ipl);482 }483 484 /** Scheduler stack switch wrapper485 *486 * Second part of the scheduler() function487 * using new stack. Handling the actual context488 * switch to a new thread.489 *490 */491 void scheduler_separated_stack(void)492 {493 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock)));494 assert(CPU != NULL);495 assert(interrupts_disabled());496 497 478 if (atomic_load(&haltstate)) 498 479 halt(); 499 480 500 if (THREAD) { 501 /* 502 * On Sparc, this saves some extra userspace state that's not 503 * covered by context_save()/context_restore(). 504 */ 505 after_thread_ran_arch(); 506 507 state_t state = THREAD->state; 508 irq_spinlock_unlock(&THREAD->lock, false); 509 510 cleanup_after_thread(THREAD, state); 511 481 /* Check if we have a thread to switch to. */ 482 483 int rq_index; 484 thread_t *new_thread = try_find_thread(&rq_index); 485 486 if (new_thread == NULL && new_state == Running) { 487 /* No other thread to run, but we still have work to do here. */ 488 interrupts_restore(ipl); 489 return; 490 } 491 492 atomic_set_unordered(&THREAD->state, new_state); 493 494 /* Update thread kernel accounting */ 495 atomic_time_increment(&THREAD->kcycles, get_cycle() - THREAD->last_cycle); 496 497 fpu_cleanup(); 498 499 /* 500 * On Sparc, this saves some extra userspace state that's not 501 * covered by context_save()/context_restore(). 502 */ 503 after_thread_ran_arch(); 504 505 if (new_thread) { 506 thread_t *old_thread = THREAD; 507 CPU_LOCAL->prev_thread = old_thread; 508 THREAD = new_thread; 509 /* No waiting necessary, we can switch to the new thread directly. */ 510 prepare_to_run_thread(rq_index); 511 512 current_copy(CURRENT, (current_t *) new_thread->kstack); 513 context_swap(&old_thread->saved_context, &new_thread->saved_context); 514 } else { 515 /* 516 * A new thread isn't immediately available, switch to a separate 517 * stack to sleep or do other idle stuff. 518 */ 519 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 520 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context); 521 } 522 523 assert(CURRENT->mutex_locks == 0); 524 assert(interrupts_disabled()); 525 526 /* Check if we need to clean up after another thread. */ 527 if (CPU_LOCAL->prev_thread) { 528 cleanup_after_thread(CPU_LOCAL->prev_thread); 529 CPU_LOCAL->prev_thread = NULL; 530 } 531 532 interrupts_restore(ipl); 533 } 534 535 /** Enter main scheduler loop. Never returns. 536 * 537 * This function switches to a runnable thread as soon as one is available, 538 * after which it is only switched back to if a thread is stopping and there is 539 * no other thread to run in its place. We need a separate context for that 540 * because we're going to block the CPU, which means we need another context 541 * to clean up after the previous thread. 542 */ 543 void scheduler_run(void) 544 { 545 assert(interrupts_disabled()); 546 547 assert(CPU != NULL); 548 assert(TASK == NULL); 549 assert(THREAD == NULL); 550 assert(interrupts_disabled()); 551 552 while (!atomic_load(&haltstate)) { 553 assert(CURRENT->mutex_locks == 0); 554 555 int rq_index; 556 THREAD = find_best_thread(&rq_index); 557 prepare_to_run_thread(rq_index); 558 559 /* 560 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 561 * thread's stack. 562 */ 563 current_copy(CURRENT, (current_t *) THREAD->kstack); 564 565 /* Switch to thread context. */ 566 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context); 567 568 /* Back from another thread. */ 569 assert(CPU != NULL); 570 assert(THREAD != NULL); 571 assert(CURRENT->mutex_locks == 0); 572 assert(interrupts_disabled()); 573 574 cleanup_after_thread(THREAD); 575 576 /* 577 * Necessary because we're allowing interrupts in find_best_thread(), 578 * so we need to avoid other code referencing the thread we left. 579 */ 512 580 THREAD = NULL; 513 581 } 514 582 515 int rq_index; 516 THREAD = find_best_thread(&rq_index); 517 518 prepare_to_run_thread(rq_index); 519 520 /* 521 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 522 * thread's stack. 523 */ 524 current_copy(CURRENT, (current_t *) THREAD->kstack); 525 526 context_restore(&THREAD->saved_context); 583 halt(); 584 } 585 586 /** Thread wrapper. 587 * 588 * This wrapper is provided to ensure that a starting thread properly handles 589 * everything it needs to do when first scheduled, and when it exits. 590 */ 591 void thread_main_func(void) 592 { 593 assert(interrupts_disabled()); 594 595 void (*f)(void *) = THREAD->thread_code; 596 void *arg = THREAD->thread_arg; 597 598 /* This is where each thread wakes up after its creation */ 599 600 /* Check if we need to clean up after another thread. */ 601 if (CPU_LOCAL->prev_thread) { 602 cleanup_after_thread(CPU_LOCAL->prev_thread); 603 CPU_LOCAL->prev_thread = NULL; 604 } 605 606 interrupts_enable(); 607 608 f(arg); 609 610 thread_exit(); 527 611 528 612 /* Not reached */ … … 550 634 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) { 551 635 552 irq_spinlock_lock(&thread->lock, false);553 554 636 /* 555 637 * Do not steal CPU-wired threads, threads … … 558 640 * FPU context is still in the CPU. 559 641 */ 560 if (thread->stolen || thread->nomigrate || 561 thread == fpu_owner) { 562 irq_spinlock_unlock(&thread->lock, false); 642 if (thread->stolen || thread->nomigrate || thread == fpu_owner) { 563 643 continue; 564 644 } 565 645 566 646 thread->stolen = true; 567 thread->cpu = CPU; 568 569 irq_spinlock_unlock(&thread->lock, false); 647 atomic_set_unordered(&thread->cpu, CPU); 570 648 571 649 /* … … 712 790 thread) { 713 791 printf("%" PRIu64 "(%s) ", thread->tid, 714 thread_states[ thread->state]);792 thread_states[atomic_get_unordered(&thread->state)]); 715 793 } 716 794 printf("\n"); -
kernel/generic/src/proc/task.c
red7e057 ra5b5f17 506 506 /* Current values of threads */ 507 507 list_foreach(task->threads, th_link, thread_t, thread) { 508 irq_spinlock_lock(&thread->lock, false);509 510 508 /* Process only counted threads */ 511 509 if (!thread->uncounted) { … … 515 513 } 516 514 517 uret += thread->ucycles;518 kret += thread->kcycles;515 uret += atomic_time_read(&thread->ucycles); 516 kret += atomic_time_read(&thread->kcycles); 519 517 } 520 521 irq_spinlock_unlock(&thread->lock, false);522 518 } 523 519 -
kernel/generic/src/proc/thread.c
red7e057 ra5b5f17 108 108 static int threads_cmp(void *, void *); 109 109 110 /** Thread wrapper.111 *112 * This wrapper is provided to ensure that every thread makes a call to113 * thread_exit() when its implementing function returns.114 *115 * interrupts_disable() is assumed.116 *117 */118 static void cushion(void)119 {120 void (*f)(void *) = THREAD->thread_code;121 void *arg = THREAD->thread_arg;122 123 /* This is where each thread wakes up after its creation */124 irq_spinlock_unlock(&THREAD->lock, false);125 interrupts_enable();126 127 f(arg);128 129 thread_exit();130 131 /* Not reached */132 }133 134 110 /** Initialization and allocation for thread_t structure 135 111 * … … 139 115 thread_t *thread = (thread_t *) obj; 140 116 141 irq_spinlock_initialize(&thread->lock, "thread_t_lock");142 117 link_initialize(&thread->rq_link); 143 118 link_initialize(&thread->wq_link); … … 221 196 void thread_wire(thread_t *thread, cpu_t *cpu) 222 197 { 223 i rq_spinlock_lock(&thread->lock, true);224 thread->cpu = cpu;198 ipl_t ipl = interrupts_disable(); 199 atomic_set_unordered(&thread->cpu, cpu); 225 200 thread->nomigrate++; 226 i rq_spinlock_unlock(&thread->lock, true);201 interrupts_restore(ipl); 227 202 } 228 203 … … 233 208 void thread_start(thread_t *thread) 234 209 { 235 assert(thread->state == Entering); 236 thread_ready(thread_ref(thread)); 237 } 238 239 /** Make thread ready 240 * 241 * Switch thread to the ready state. Consumes reference passed by the caller. 242 * 243 * @param thread Thread to make ready. 244 * 245 */ 246 void thread_ready(thread_t *thread) 247 { 248 irq_spinlock_lock(&thread->lock, true); 249 250 assert(thread->state != Ready); 251 252 int i = (thread->priority < RQ_COUNT - 1) ? 253 ++thread->priority : thread->priority; 254 255 /* Prefer the CPU on which the thread ran last */ 256 cpu_t *cpu = thread->cpu ? thread->cpu : CPU; 257 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 261 262 /* 263 * Append thread to respective ready queue 264 * on respective processor. 265 */ 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 271 atomic_inc(&nrdy); 272 atomic_inc(&cpu->nrdy); 210 assert(atomic_get_unordered(&thread->state) == Entering); 211 thread_requeue_sleeping(thread_ref(thread)); 273 212 } 274 213 … … 309 248 irq_spinlock_unlock(&tidlock, true); 310 249 311 context_create(&thread->saved_context, cushion, thread->kstack, STACK_SIZE); 250 context_create(&thread->saved_context, thread_main_func, 251 thread->kstack, STACK_SIZE); 312 252 313 253 current_initialize((current_t *) thread->kstack); … … 317 257 thread->thread_code = func; 318 258 thread->thread_arg = arg; 319 thread->ucycles = 0;320 thread->kcycles = 0;259 thread->ucycles = ATOMIC_TIME_INITIALIZER(); 260 thread->kcycles = ATOMIC_TIME_INITIALIZER(); 321 261 thread->uncounted = 322 262 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 323 thread->priority = -1; /* Start in rq[0] */324 thread->cpu = NULL;263 atomic_init(&thread->priority, 0); 264 atomic_init(&thread->cpu, NULL); 325 265 thread->stolen = false; 326 266 thread->uspace = … … 328 268 329 269 thread->nomigrate = 0; 330 thread->state = Entering;270 atomic_init(&thread->state, Entering); 331 271 332 272 atomic_init(&thread->sleep_queue, NULL); … … 348 288 #ifdef CONFIG_UDEBUG 349 289 /* Initialize debugging stuff */ 350 thread->btrace = false;290 atomic_init(&thread->btrace, false); 351 291 udebug_thread_initialize(&thread->udebug); 352 292 #endif … … 392 332 393 333 if (!thread->uncounted) { 394 thread->task->ucycles += thread->ucycles;395 thread->task->kcycles += thread->kcycles;334 thread->task->ucycles += atomic_time_read(&thread->ucycles); 335 thread->task->kcycles += atomic_time_read(&thread->kcycles); 396 336 } 397 337 398 338 irq_spinlock_unlock(&thread->task->lock, false); 399 339 400 assert(( thread->state == Exiting) || (thread->state== Lingering));340 assert((atomic_get_unordered(&thread->state) == Exiting) || (atomic_get_unordered(&thread->state) == Lingering)); 401 341 402 342 /* Clear cpu->fpu_owner if set to this thread. */ 403 343 #ifdef CONFIG_FPU_LAZY 404 if (thread->cpu) { 344 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 345 if (cpu) { 405 346 /* 406 347 * We need to lock for this because the old CPU can concurrently try … … 408 349 * it to finish. An atomic compare-and-swap wouldn't be enough. 409 350 */ 410 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 411 412 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 413 memory_order_relaxed); 414 415 if (owner == thread) { 416 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 417 memory_order_relaxed); 418 } 419 420 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 351 irq_spinlock_lock(&cpu->fpu_lock, false); 352 353 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 354 atomic_set_unordered(&cpu->fpu_owner, NULL); 355 356 irq_spinlock_unlock(&cpu->fpu_lock, false); 421 357 } 422 358 #endif … … 635 571 * the waking thread by the sleeper in thread_wait_finish(). 636 572 */ 637 thread_re ady(thread);573 thread_requeue_sleeping(thread); 638 574 } 639 575 } … … 642 578 void thread_migration_disable(void) 643 579 { 580 ipl_t ipl = interrupts_disable(); 581 644 582 assert(THREAD); 645 646 583 THREAD->nomigrate++; 584 585 interrupts_restore(ipl); 647 586 } 648 587 … … 650 589 void thread_migration_enable(void) 651 590 { 591 ipl_t ipl = interrupts_disable(); 592 652 593 assert(THREAD); 653 594 assert(THREAD->nomigrate > 0); … … 655 596 if (THREAD->nomigrate > 0) 656 597 THREAD->nomigrate--; 598 599 interrupts_restore(ipl); 657 600 } 658 601 … … 700 643 return EINVAL; 701 644 702 irq_spinlock_lock(&thread->lock, true); 703 state_t state = thread->state; 704 irq_spinlock_unlock(&thread->lock, true); 705 706 errno_t rc = EOK; 707 708 if (state != Exiting) 709 rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 645 errno_t rc = _waitq_sleep_timeout(&thread->join_wq, usec, flags); 710 646 711 647 if (rc == EOK) … … 747 683 uint64_t ucycles, kcycles; 748 684 char usuffix, ksuffix; 749 order_suffix(thread->ucycles, &ucycles, &usuffix); 750 order_suffix(thread->kcycles, &kcycles, &ksuffix); 685 order_suffix(atomic_time_read(&thread->ucycles), &ucycles, &usuffix); 686 order_suffix(atomic_time_read(&thread->kcycles), &kcycles, &ksuffix); 687 688 state_t state = atomic_get_unordered(&thread->state); 751 689 752 690 char *name; … … 762 700 else 763 701 printf("%-8" PRIu64 " %-14s %p %-8s %p %-5" PRIu32 "\n", 764 thread->tid, name, thread, thread_states[ thread->state],702 thread->tid, name, thread, thread_states[state], 765 703 thread->task, thread->task->container); 766 704 767 705 if (additional) { 768 if (thread->cpu) 769 printf("%-5u", thread->cpu->id); 706 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 707 if (cpu) 708 printf("%-5u", cpu->id); 770 709 else 771 710 printf("none "); 772 711 773 if ( thread->state == Sleeping) {712 if (state == Sleeping) { 774 713 printf(" %p", thread->sleep_queue); 775 714 } … … 850 789 void thread_update_accounting(bool user) 851 790 { 791 assert(interrupts_disabled()); 792 852 793 uint64_t time = get_cycle(); 853 794 854 assert(interrupts_disabled());855 assert(irq_spinlock_locked(&THREAD->lock));856 857 795 if (user) 858 THREAD->ucycles += time - THREAD->last_cycle;796 atomic_time_increment(&THREAD->ucycles, time - THREAD->last_cycle); 859 797 else 860 THREAD->kcycles += time - THREAD->last_cycle;798 atomic_time_increment(&THREAD->kcycles, time - THREAD->last_cycle); 861 799 862 800 THREAD->last_cycle = time; … … 969 907 */ 970 908 971 irq_spinlock_lock(&thread->lock, true); 972 973 bool sleeping = false; 974 istate_t *istate = thread->udebug.uspace_state; 975 if (istate != NULL) { 976 printf("Scheduling thread stack trace.\n"); 977 thread->btrace = true; 978 if (thread->state == Sleeping) 979 sleeping = true; 980 } else 981 printf("Thread interrupt state not available.\n"); 982 983 irq_spinlock_unlock(&thread->lock, true); 984 985 if (sleeping) 986 thread_wakeup(thread); 987 909 printf("Scheduling thread stack trace.\n"); 910 atomic_set_unordered(&thread->btrace, true); 911 912 thread_wakeup(thread); 988 913 thread_put(thread); 989 914 } … … 1086 1011 thread_attach(thread, TASK); 1087 1012 #endif 1088 thread_ready(thread); 1013 thread_start(thread); 1014 thread_put(thread); 1089 1015 1090 1016 return 0; -
kernel/generic/src/syscall/syscall.c
red7e057 ra5b5f17 141 141 { 142 142 /* Do userpace accounting */ 143 i rq_spinlock_lock(&THREAD->lock, true);143 ipl_t ipl = interrupts_disable(); 144 144 thread_update_accounting(true); 145 i rq_spinlock_unlock(&THREAD->lock, true);145 interrupts_restore(ipl); 146 146 147 147 #ifdef CONFIG_UDEBUG … … 191 191 192 192 /* Do kernel accounting */ 193 i rq_spinlock_lock(&THREAD->lock, true);193 ipl = interrupts_disable(); 194 194 thread_update_accounting(false); 195 i rq_spinlock_unlock(&THREAD->lock, true);195 interrupts_restore(ipl); 196 196 197 197 return rc; -
kernel/generic/src/sysinfo/stats.c
red7e057 ra5b5f17 299 299 { 300 300 assert(interrupts_disabled()); 301 assert(irq_spinlock_locked(&thread->lock));302 301 303 302 stats_thread->thread_id = thread->tid; 304 303 stats_thread->task_id = thread->task->taskid; 305 stats_thread->state = thread->state; 306 stats_thread->priority = thread->priority; 307 stats_thread->ucycles = thread->ucycles; 308 stats_thread->kcycles = thread->kcycles; 309 310 if (thread->cpu != NULL) { 304 stats_thread->state = atomic_get_unordered(&thread->state); 305 stats_thread->priority = atomic_get_unordered(&thread->priority); 306 stats_thread->ucycles = atomic_time_read(&thread->ucycles); 307 stats_thread->kcycles = atomic_time_read(&thread->kcycles); 308 309 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 310 311 if (cpu != NULL) { 311 312 stats_thread->on_cpu = true; 312 stats_thread->cpu = thread->cpu->id;313 stats_thread->cpu = cpu->id; 313 314 } else 314 315 stats_thread->on_cpu = false; … … 361 362 thread_t *thread = thread_first(); 362 363 while (thread != NULL) { 363 /* Interrupts are already disabled */364 irq_spinlock_lock(&thread->lock, false);365 366 364 /* Record the statistics and increment the index */ 367 365 produce_stats_thread(thread, &stats_threads[i]); 368 366 i++; 369 370 irq_spinlock_unlock(&thread->lock, false);371 367 372 368 thread = thread_next(thread); … … 624 620 ret.data.size = sizeof(stats_thread_t); 625 621 626 /*627 * Replaced hand-over-hand locking with regular nested sections628 * to avoid weak reference leak issues.629 */630 irq_spinlock_lock(&thread->lock, false);631 622 produce_stats_thread(thread, stats_thread); 632 irq_spinlock_unlock(&thread->lock, false);633 623 634 624 irq_spinlock_unlock(&threads_lock, true); -
kernel/generic/src/time/clock.c
red7e057 ra5b5f17 123 123 static void cpu_update_accounting(void) 124 124 { 125 // FIXME: get_cycle() is unimplemented on several platforms 125 126 uint64_t now = get_cycle(); 126 127 atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle); -
kernel/generic/src/udebug/udebug_ops.c
red7e057 ra5b5f17 90 90 } 91 91 92 irq_spinlock_lock(&thread->lock, true);93 94 92 /* Verify that 'thread' is a userspace thread. */ 95 93 if (!thread->uspace) { 96 /* It's not, deny its existence */97 irq_spinlock_unlock(&thread->lock, true);98 94 mutex_unlock(&TASK->udebug.lock); 99 95 return ENOENT; 100 96 } 101 102 /* Verify debugging state. */103 if (thread->udebug.active != true) {104 /* Not in debugging session or undesired GO state */105 irq_spinlock_unlock(&thread->lock, true);106 mutex_unlock(&TASK->udebug.lock);107 return ENOENT;108 }109 110 /* Now verify that the thread belongs to the current task. */111 if (thread->task != TASK) {112 /* No such thread belonging this task */113 irq_spinlock_unlock(&thread->lock, true);114 mutex_unlock(&TASK->udebug.lock);115 return ENOENT;116 }117 118 irq_spinlock_unlock(&thread->lock, true);119 120 /* Only mutex TASK->udebug.lock left. */121 97 122 98 /* … … 126 102 */ 127 103 mutex_lock(&thread->udebug.lock); 104 105 /* Verify debugging state. */ 106 if (thread->udebug.active != true) { 107 /* Not in debugging session or undesired GO state */ 108 mutex_unlock(&thread->udebug.lock); 109 mutex_unlock(&TASK->udebug.lock); 110 return ENOENT; 111 } 112 113 /* Now verify that the thread belongs to the current task. */ 114 if (thread->task != TASK) { 115 /* No such thread belonging this task */ 116 mutex_unlock(&thread->udebug.lock); 117 mutex_unlock(&TASK->udebug.lock); 118 return ENOENT; 119 } 128 120 129 121 /* The big task mutex is no longer needed. */ … … 388 380 /* FIXME: make sure the thread isn't past debug shutdown... */ 389 381 list_foreach(TASK->threads, th_link, thread_t, thread) { 390 irq_spinlock_lock(&thread->lock, false);391 382 bool uspace = thread->uspace; 392 irq_spinlock_unlock(&thread->lock, false);393 383 394 384 /* Not interested in kernel threads. */
Note:
See TracChangeset
for help on using the changeset viewer.