Changeset 151c050 in mainline
- Timestamp:
- 2024-01-15T14:33:03Z (12 months ago)
- Branches:
- master
- Children:
- c7ceacf
- Parents:
- 8996582
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-04-15 16:15:29)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-15 14:33:03)
- Location:
- kernel/generic
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/scheduler.h
r8996582 r151c050 41 41 #include <atomic.h> 42 42 #include <adt/list.h> 43 #include <abi/proc/thread.h> 43 44 44 45 #define RQ_COUNT 16 … … 56 57 57 58 extern void scheduler_fpu_lazy_request(void); 58 extern void scheduler(void);59 extern void scheduler_locked(ipl_t);60 59 extern void kcpulb(void *arg); 61 60 62 61 extern void sched_print_list(void); 62 63 extern void scheduler_run(void) __attribute__((noreturn)); 64 extern void scheduler_enter(state_t); 63 65 64 66 /* -
kernel/generic/include/proc/thread.h
r8996582 r151c050 113 113 */ 114 114 context_t saved_context; 115 ipl_t saved_ipl;116 115 117 116 /** … … 244 243 extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int); 245 244 245 extern void thread_yield(void); 246 246 247 extern void thread_print_list(bool); 247 248 extern thread_t *thread_find_by_id(thread_id_t); -
kernel/generic/src/main/main.c
r8996582 r151c050 285 285 286 286 /* 287 * This call to scheduler () will return to kinit,287 * This call to scheduler_run() will return to kinit, 288 288 * starting the thread of kernel threads. 289 289 */ 290 scheduler ();290 scheduler_run(); 291 291 /* not reached */ 292 292 } … … 356 356 357 357 semaphore_up(&ap_completion_semaphore); 358 scheduler ();358 scheduler_run(); 359 359 /* not reached */ 360 360 } -
kernel/generic/src/proc/scheduler.c
r8996582 r151c050 302 302 } 303 303 304 void scheduler(void) 305 { 306 ipl_t ipl = interrupts_disable(); 307 308 if (atomic_load(&haltstate)) 309 halt(); 310 311 if (THREAD) { 312 irq_spinlock_lock(&THREAD->lock, false); 313 } 314 315 scheduler_locked(ipl); 304 void scheduler_run(void) 305 { 306 assert(interrupts_disabled()); 307 assert(THREAD == NULL); 308 assert(CPU != NULL); 309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 311 312 context_t ctx; 313 context_save(&ctx); 314 context_set(&ctx, FADDR(scheduler_separated_stack), 315 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE); 316 context_restore(&ctx); 317 318 unreachable(); 316 319 } 317 320 … … 431 434 * 432 435 */ 433 void scheduler_locked(ipl_t ipl) 434 { 436 void scheduler_enter(state_t new_state) 437 { 438 ipl_t ipl = interrupts_disable(); 439 435 440 assert(CPU != NULL); 436 437 if (THREAD) { 438 /* Update thread kernel accounting */ 439 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 440 441 fpu_cleanup(); 442 443 if (!context_save(&THREAD->saved_context)) { 444 /* 445 * This is the place where threads leave scheduler(); 446 */ 447 448 irq_spinlock_unlock(&THREAD->lock, false); 449 interrupts_restore(THREAD->saved_ipl); 450 451 return; 452 } 453 454 /* 455 * Interrupt priority level of preempted thread is recorded 456 * here to facilitate scheduler() invocations from 457 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 458 * 459 */ 460 THREAD->saved_ipl = ipl; 441 assert(THREAD != NULL); 442 443 fpu_cleanup(); 444 445 irq_spinlock_lock(&THREAD->lock, false); 446 THREAD->state = new_state; 447 448 /* Update thread kernel accounting */ 449 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 450 451 if (!context_save(&THREAD->saved_context)) { 452 /* 453 * This is the place where threads leave scheduler(); 454 */ 455 456 irq_spinlock_unlock(&THREAD->lock, false); 457 interrupts_restore(ipl); 458 return; 461 459 } 462 460 … … 504 502 assert(interrupts_disabled()); 505 503 504 if (atomic_load(&haltstate)) 505 halt(); 506 506 507 if (THREAD) { 507 508 after_thread_ran_arch(); … … 678 679 * 679 680 */ 680 scheduler();681 thread_yield(); 681 682 } else { 682 683 /* -
kernel/generic/src/proc/thread.c
r8996582 r151c050 314 314 current_initialize((current_t *) thread->kstack); 315 315 316 ipl_t ipl = interrupts_disable();317 thread->saved_ipl = interrupts_read();318 interrupts_restore(ipl);319 320 316 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 321 317 … … 518 514 } 519 515 520 irq_spinlock_lock(&THREAD->lock, true); 521 THREAD->state = Exiting; 522 irq_spinlock_unlock(&THREAD->lock, true); 523 524 scheduler(); 525 526 panic("should never be reached"); 516 scheduler_enter(Exiting); 517 unreachable(); 527 518 } 528 519 … … 623 614 } 624 615 625 ipl_t ipl = interrupts_disable(); 626 irq_spinlock_lock(&THREAD->lock, false); 627 THREAD->state = Sleeping; 628 scheduler_locked(ipl); 616 scheduler_enter(Sleeping); 629 617 630 618 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { … … 736 724 737 725 (void) waitq_sleep_timeout(&wq, usec); 726 } 727 728 /** Allow other threads to run. */ 729 void thread_yield(void) 730 { 731 assert(THREAD != NULL); 732 scheduler_enter(Running); 738 733 } 739 734 -
kernel/generic/src/time/clock.c
r8996582 r151c050 187 187 if (THREAD) { 188 188 if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) { 189 scheduler();189 thread_yield(); 190 190 #ifdef CONFIG_UDEBUG 191 191 /*
Note:
See TracChangeset
for help on using the changeset viewer.