Changeset 25939997 in mainline
- Timestamp:
- 2024-01-19T16:21:20Z (10 months ago)
- Branches:
- master
- Children:
- c1eaec4
- Parents:
- 1c1767f
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-16 19:36:25)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-19 16:21:20)
- Location:
- kernel/generic
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/cpu.h
r1c1767f r25939997 74 74 bool idle; 75 75 uint64_t last_cycle; 76 77 context_t scheduler_context; 76 78 } cpu_local_t; 77 79 -
kernel/generic/src/main/main.c
r1c1767f r25939997 287 287 * starting the thread of kernel threads. 288 288 */ 289 scheduler_run(); 289 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 290 context_replace(scheduler_run, CPU_LOCAL->stack, STACK_SIZE); 290 291 /* not reached */ 291 292 } … … 327 328 ARCH_OP(post_cpu_init); 328 329 329 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);330 331 330 /* 332 331 * If we woke kmp up before we left the kernel stack, we could … … 334 333 * switch to this cpu's private stack prior to waking kmp up. 335 334 */ 335 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 336 336 context_replace(main_ap_separated_stack, CPU_LOCAL->stack, STACK_SIZE); 337 337 /* not reached */ -
kernel/generic/src/proc/scheduler.c
r1c1767f r25939997 1 1 /* 2 2 * Copyright (c) 2010 Jakub Jermar 3 * Copyright (c) 2023 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 50 51 #include <time/delay.h> 51 52 #include <arch/asm.h> 52 #include <arch/faddr.h>53 53 #include <arch/cycle.h> 54 54 #include <atomic.h> … … 66 66 #include <stacktrace.h> 67 67 68 static void scheduler_separated_stack(void);69 70 68 atomic_size_t nrdy; /**< Number of ready threads in the system. */ 71 69 … … 227 225 static void relink_rq(int start) 228 226 { 227 assert(interrupts_disabled()); 228 229 229 if (CPU_LOCAL->current_clock_tick < CPU_LOCAL->relink_deadline) 230 230 return; … … 302 302 } 303 303 304 void scheduler_run(void)305 {306 assert(interrupts_disabled());307 assert(THREAD == NULL);308 assert(CPU != NULL);309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack);311 context_replace(scheduler_separated_stack, CPU_LOCAL->stack, STACK_SIZE);312 unreachable();313 }314 315 304 /** Things to do before we switch to THREAD context. 316 305 */ … … 421 410 } 422 411 423 /** The scheduler 424 * 425 * The thread scheduling procedure. 426 * Passes control directly to 427 * scheduler_separated_stack(). 428 * 429 */ 412 /** Switch to scheduler context to let other threads run. */ 430 413 void scheduler_enter(state_t new_state) 431 414 { … … 436 419 437 420 fpu_cleanup(); 421 422 if (atomic_load(&haltstate)) 423 halt(); 438 424 439 425 irq_spinlock_lock(&THREAD->lock, false); … … 460 446 * 461 447 */ 448 462 449 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 463 464 /* 465 * We may not keep the old stack. 466 * Reason: If we kept the old stack and got blocked, for instance, in 467 * find_best_thread(), the old thread could get rescheduled by another 468 * CPU and overwrite the part of its own stack that was also used by 469 * the scheduler on this CPU. 470 * 471 * Moreover, we have to bypass the compiler-generated POP sequence 472 * which is fooled by SP being set to the very top of the stack. 473 * Therefore the scheduler() function continues in 474 * scheduler_separated_stack(). 475 * 476 */ 477 context_t ctx; 478 context_create(&ctx, scheduler_separated_stack, 479 CPU_LOCAL->stack, STACK_SIZE); 480 481 /* Switch to scheduler context and store current thread's context. */ 482 context_swap(&THREAD->saved_context, &ctx); 483 484 /* Returned from scheduler. */ 450 context_swap(&THREAD->saved_context, &CPU_LOCAL->scheduler_context); 485 451 486 452 irq_spinlock_unlock(&THREAD->lock, false); … … 488 454 } 489 455 490 /** Scheduler stack switch wrapper 491 * 492 * Second part of the scheduler() function 493 * using new stack. Handling the actual context 494 * switch to a new thread. 495 * 496 */ 497 void scheduler_separated_stack(void) 498 { 499 assert((!THREAD) || (irq_spinlock_locked(&THREAD->lock))); 456 /** Enter main scheduler loop. Never returns. 457 * 458 * This function switches to a runnable thread as soon as one is available, 459 * after which it is only switched back to if a thread is stopping and there is 460 * no other thread to run in its place. We need a separate context for that 461 * because we're going to block the CPU, which means we need another context 462 * to clean up after the previous thread. 463 */ 464 void scheduler_run(void) 465 { 466 assert(interrupts_disabled()); 467 500 468 assert(CPU != NULL); 469 assert(TASK == NULL); 470 assert(THREAD == NULL); 501 471 assert(interrupts_disabled()); 502 472 503 if (atomic_load(&haltstate)) 504 halt(); 505 506 if (THREAD) { 473 while (!atomic_load(&haltstate)) { 474 assert(CURRENT->mutex_locks == 0); 475 476 int rq_index; 477 THREAD = find_best_thread(&rq_index); 478 479 prepare_to_run_thread(rq_index); 480 481 /* 482 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 483 * thread's stack. 484 */ 485 current_copy(CURRENT, (current_t *) THREAD->kstack); 486 487 /* Switch to thread context. */ 488 context_swap(&CPU_LOCAL->scheduler_context, &THREAD->saved_context); 489 490 /* Back from the thread. */ 491 492 assert(CPU != NULL); 493 assert(THREAD != NULL); 494 assert(irq_spinlock_locked(&THREAD->lock)); 495 assert(interrupts_disabled()); 496 507 497 state_t state = THREAD->state; 508 498 irq_spinlock_unlock(&THREAD->lock, false); … … 510 500 cleanup_after_thread(THREAD, state); 511 501 502 /* 503 * Necessary because we're allowing interrupts in find_best_thread(), 504 * so we need to avoid other code referencing the thread we left. 505 */ 512 506 THREAD = NULL; 513 507 } 514 508 515 int rq_index; 516 THREAD = find_best_thread(&rq_index); 517 518 prepare_to_run_thread(rq_index); 519 520 /* 521 * Copy the knowledge of CPU, TASK, THREAD and preemption counter to 522 * thread's stack. 523 */ 524 current_copy(CURRENT, (current_t *) THREAD->kstack); 525 526 context_restore(&THREAD->saved_context); 527 528 /* Not reached */ 509 halt(); 529 510 } 530 511 -
kernel/generic/src/proc/thread.c
r1c1767f r25939997 246 246 void thread_ready(thread_t *thread) 247 247 { 248 // TODO: move this to scheduler.c 248 249 irq_spinlock_lock(&thread->lock, true); 249 250 -
kernel/generic/src/time/clock.c
r1c1767f r25939997 123 123 static void cpu_update_accounting(void) 124 124 { 125 // FIXME: get_cycle() is unimplemented on several platforms 125 126 uint64_t now = get_cycle(); 126 127 atomic_time_increment(&CPU->busy_cycles, now - CPU_LOCAL->last_cycle);
Note:
See TracChangeset
for help on using the changeset viewer.