Changes in / [c7ceacf:4760793] in mainline
- Location:
- kernel/generic
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/scheduler.h
rc7ceacf r4760793 41 41 #include <atomic.h> 42 42 #include <adt/list.h> 43 #include <abi/proc/thread.h>44 43 45 44 #define RQ_COUNT 16 … … 57 56 58 57 extern void scheduler_fpu_lazy_request(void); 58 extern void scheduler(void); 59 extern void scheduler_locked(ipl_t); 59 60 extern void kcpulb(void *arg); 60 61 61 62 extern void sched_print_list(void); 62 63 extern void scheduler_run(void) __attribute__((noreturn));64 extern void scheduler_enter(state_t);65 63 66 64 /* -
kernel/generic/include/proc/thread.h
rc7ceacf r4760793 113 113 */ 114 114 context_t saved_context; 115 ipl_t saved_ipl; 115 116 116 117 /** … … 189 190 extern void thread_interrupt(thread_t *); 190 191 191 enum sleep_state {192 SLEEP_INITIAL,193 SLEEP_ASLEEP,194 SLEEP_WOKE,195 };196 197 192 typedef enum { 198 193 THREAD_OK, … … 243 238 extern errno_t thread_join_timeout(thread_t *, uint32_t, unsigned int); 244 239 245 extern void thread_yield(void);246 247 240 extern void thread_print_list(bool); 248 241 extern thread_t *thread_find_by_id(thread_id_t); -
kernel/generic/src/main/main.c
rc7ceacf r4760793 285 285 286 286 /* 287 * This call to scheduler _run() will return to kinit,287 * This call to scheduler() will return to kinit, 288 288 * starting the thread of kernel threads. 289 289 */ 290 scheduler _run();290 scheduler(); 291 291 /* not reached */ 292 292 } … … 356 356 357 357 semaphore_up(&ap_completion_semaphore); 358 scheduler _run();358 scheduler(); 359 359 /* not reached */ 360 360 } -
kernel/generic/src/proc/scheduler.c
rc7ceacf r4760793 70 70 atomic_size_t nrdy; /**< Number of ready threads in the system. */ 71 71 72 /** Take actions before new thread runs. 73 * 74 * Perform actions that need to be 75 * taken before the newly selected 76 * thread is passed control. 77 * 78 * THREAD->lock is locked on entry 79 * 80 */ 81 static void before_thread_runs(void) 82 { 83 before_thread_runs_arch(); 84 85 #ifdef CONFIG_FPU_LAZY 86 /* 87 * The only concurrent modification possible for fpu_owner here is 88 * another thread changing it from itself to NULL in its destructor. 89 */ 90 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, 91 memory_order_relaxed); 92 93 if (THREAD == owner) 94 fpu_enable(); 95 else 96 fpu_disable(); 97 #elif defined CONFIG_FPU 98 fpu_enable(); 99 if (THREAD->fpu_context_exists) 100 fpu_context_restore(&THREAD->fpu_context); 101 else { 102 fpu_init(); 103 THREAD->fpu_context_exists = true; 104 } 105 #endif 106 107 #ifdef CONFIG_UDEBUG 108 if (THREAD->btrace) { 109 istate_t *istate = THREAD->udebug.uspace_state; 110 if (istate != NULL) { 111 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 112 stack_trace_istate(istate); 113 } 114 115 THREAD->btrace = false; 116 } 117 #endif 118 } 119 120 /** Take actions after THREAD had run. 121 * 122 * Perform actions that need to be 123 * taken after the running thread 124 * had been preempted by the scheduler. 125 * 126 * THREAD->lock is locked on entry 127 * 128 */ 129 static void after_thread_ran(void) 130 { 131 after_thread_ran_arch(); 132 } 133 72 134 #ifdef CONFIG_FPU_LAZY 73 135 void scheduler_fpu_lazy_request(void) … … 145 207 list_remove(&thread->rq_link); 146 208 147 irq_spinlock_unlock(&(CPU->rq[i].lock), false); 209 irq_spinlock_pass(&(CPU->rq[i].lock), &thread->lock); 210 211 thread->cpu = CPU; 212 thread->priority = i; /* Correct rq index */ 213 214 /* Time allocation in microseconds. */ 215 uint64_t time_to_run = (i + 1) * 10000; 216 217 /* This is safe because interrupts are disabled. */ 218 CPU_LOCAL->preempt_deadline = 219 CPU_LOCAL->current_clock_tick + us2ticks(time_to_run); 220 221 /* 222 * Clear the stolen flag so that it can be migrated 223 * when load balancing needs emerge. 224 */ 225 thread->stolen = false; 226 irq_spinlock_unlock(&thread->lock, false); 148 227 149 228 *rq_index = i; … … 262 341 } 263 342 264 /** 265 * Do whatever needs to be done with current FPU state before we switch to 266 * another thread. 267 */ 268 static void fpu_cleanup(void) 269 { 270 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY) 271 fpu_context_save(&THREAD->fpu_context); 272 #endif 273 } 274 275 /** 276 * Set correct FPU state for this thread after switch from another thread. 277 */ 278 static void fpu_restore(void) 279 { 280 #ifdef CONFIG_FPU_LAZY 281 /* 282 * The only concurrent modification possible for fpu_owner here is 283 * another thread changing it from itself to NULL in its destructor. 284 */ 285 thread_t *owner = atomic_load_explicit(&CPU->fpu_owner, 286 memory_order_relaxed); 287 288 if (THREAD == owner) 289 fpu_enable(); 290 else 291 fpu_disable(); 292 293 #elif defined CONFIG_FPU 294 fpu_enable(); 295 if (THREAD->fpu_context_exists) 296 fpu_context_restore(&THREAD->fpu_context); 297 else { 298 fpu_init(); 299 THREAD->fpu_context_exists = true; 300 } 301 #endif 302 } 303 304 void scheduler_run(void) 305 { 306 assert(interrupts_disabled()); 307 assert(THREAD == NULL); 308 assert(CPU != NULL); 309 310 current_copy(CURRENT, (current_t *) CPU_LOCAL->stack); 311 312 context_t ctx; 313 context_save(&ctx); 314 context_set(&ctx, FADDR(scheduler_separated_stack), 315 (uintptr_t) CPU_LOCAL->stack, STACK_SIZE); 316 context_restore(&ctx); 317 318 unreachable(); 319 } 320 321 /** Things to do before we switch to THREAD context. 322 */ 323 static void prepare_to_run_thread(int rq_index) 324 { 325 relink_rq(rq_index); 326 327 switch_task(THREAD->task); 328 329 irq_spinlock_lock(&THREAD->lock, false); 330 THREAD->state = Running; 331 THREAD->cpu = CPU; 332 THREAD->priority = rq_index; /* Correct rq index */ 333 334 /* 335 * Clear the stolen flag so that it can be migrated 336 * when load balancing needs emerge. 337 */ 338 THREAD->stolen = false; 339 340 #ifdef SCHEDULER_VERBOSE 341 log(LF_OTHER, LVL_DEBUG, 342 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 343 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority, 344 THREAD->ticks, atomic_load(&CPU->nrdy)); 345 #endif 346 347 /* 348 * Some architectures provide late kernel PA2KA(identity) 349 * mapping in a page fault handler. However, the page fault 350 * handler uses the kernel stack of the running thread and 351 * therefore cannot be used to map it. The kernel stack, if 352 * necessary, is to be mapped in before_thread_runs(). This 353 * function must be executed before the switch to the new stack. 354 */ 355 before_thread_runs_arch(); 356 357 #ifdef CONFIG_UDEBUG 358 if (THREAD->btrace) { 359 istate_t *istate = THREAD->udebug.uspace_state; 360 if (istate != NULL) { 361 printf("Thread %" PRIu64 " stack trace:\n", THREAD->tid); 362 stack_trace_istate(istate); 363 } 364 365 THREAD->btrace = false; 366 } 367 #endif 368 369 fpu_restore(); 370 371 /* Time allocation in microseconds. */ 372 uint64_t time_to_run = (rq_index + 1) * 10000; 373 374 /* Set the time of next preemption. */ 375 CPU_LOCAL->preempt_deadline = 376 CPU_LOCAL->current_clock_tick + us2ticks(time_to_run); 377 378 /* Save current CPU cycle */ 379 THREAD->last_cycle = get_cycle(); 380 } 381 382 static void cleanup_after_thread(thread_t *thread, state_t out_state) 383 { 384 assert(CURRENT->mutex_locks == 0); 385 assert(interrupts_disabled()); 386 387 int expected; 388 389 switch (out_state) { 390 case Running: 391 thread_ready(thread); 392 break; 393 394 case Exiting: 395 waitq_close(&thread->join_wq); 396 397 /* 398 * Release the reference CPU has for the thread. 399 * If there are no other references (e.g. threads calling join), 400 * the thread structure is deallocated. 401 */ 402 thread_put(thread); 403 break; 404 405 case Sleeping: 406 expected = SLEEP_INITIAL; 407 408 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */ 409 if (!atomic_compare_exchange_strong_explicit(&thread->sleep_state, 410 &expected, SLEEP_ASLEEP, 411 memory_order_acq_rel, memory_order_acquire)) { 412 413 assert(expected == SLEEP_WOKE); 414 /* The thread has already been woken up, requeue immediately. */ 415 thread_ready(thread); 416 } 417 break; 418 419 default: 420 /* 421 * Entering state is unexpected. 422 */ 423 panic("tid%" PRIu64 ": unexpected state %s.", 424 thread->tid, thread_states[thread->state]); 425 break; 426 } 343 void scheduler(void) 344 { 345 ipl_t ipl = interrupts_disable(); 346 347 if (atomic_load(&haltstate)) 348 halt(); 349 350 if (THREAD) { 351 irq_spinlock_lock(&THREAD->lock, false); 352 } 353 354 scheduler_locked(ipl); 427 355 } 428 356 … … 434 362 * 435 363 */ 436 void scheduler_enter(state_t new_state) 437 { 438 ipl_t ipl = interrupts_disable(); 439 364 void scheduler_locked(ipl_t ipl) 365 { 440 366 assert(CPU != NULL); 441 assert(THREAD != NULL); 442 443 fpu_cleanup(); 444 445 irq_spinlock_lock(&THREAD->lock, false); 446 THREAD->state = new_state; 447 448 /* Update thread kernel accounting */ 449 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 450 451 if (!context_save(&THREAD->saved_context)) { 452 /* 453 * This is the place where threads leave scheduler(); 454 */ 455 456 irq_spinlock_unlock(&THREAD->lock, false); 457 interrupts_restore(ipl); 458 return; 367 368 if (THREAD) { 369 /* Update thread kernel accounting */ 370 THREAD->kcycles += get_cycle() - THREAD->last_cycle; 371 372 #if (defined CONFIG_FPU) && (!defined CONFIG_FPU_LAZY) 373 fpu_context_save(&THREAD->fpu_context); 374 #endif 375 if (!context_save(&THREAD->saved_context)) { 376 /* 377 * This is the place where threads leave scheduler(); 378 */ 379 380 /* Save current CPU cycle */ 381 THREAD->last_cycle = get_cycle(); 382 383 irq_spinlock_unlock(&THREAD->lock, false); 384 interrupts_restore(THREAD->saved_ipl); 385 386 return; 387 } 388 389 /* 390 * Interrupt priority level of preempted thread is recorded 391 * here to facilitate scheduler() invocations from 392 * interrupts_disable()'d code (e.g. waitq_sleep_timeout()). 393 * 394 */ 395 THREAD->saved_ipl = ipl; 459 396 } 460 397 … … 502 439 assert(interrupts_disabled()); 503 440 504 if (atomic_load(&haltstate))505 halt();506 507 441 if (THREAD) { 508 after_thread_ran_arch(); 509 510 state_t state = THREAD->state; 511 512 if (state == Sleeping) { 513 /* Prefer the thread after it's woken up. */ 442 /* Must be run after the switch to scheduler stack */ 443 after_thread_ran(); 444 445 switch (THREAD->state) { 446 case Running: 447 irq_spinlock_unlock(&THREAD->lock, false); 448 thread_ready(THREAD); 449 break; 450 451 case Exiting: 452 irq_spinlock_unlock(&THREAD->lock, false); 453 waitq_close(&THREAD->join_wq); 454 455 /* 456 * Release the reference CPU has for the thread. 457 * If there are no other references (e.g. threads calling join), 458 * the thread structure is deallocated. 459 */ 460 thread_put(THREAD); 461 break; 462 463 case Sleeping: 464 /* 465 * Prefer the thread after it's woken up. 466 */ 514 467 THREAD->priority = -1; 468 irq_spinlock_unlock(&THREAD->lock, false); 469 break; 470 471 default: 472 /* 473 * Entering state is unexpected. 474 */ 475 panic("tid%" PRIu64 ": unexpected state %s.", 476 THREAD->tid, thread_states[THREAD->state]); 477 break; 515 478 } 516 517 irq_spinlock_unlock(&THREAD->lock, false);518 519 cleanup_after_thread(THREAD, state);520 479 521 480 THREAD = NULL; … … 525 484 THREAD = find_best_thread(&rq_index); 526 485 527 prepare_to_run_thread(rq_index); 486 relink_rq(rq_index); 487 488 switch_task(THREAD->task); 489 490 irq_spinlock_lock(&THREAD->lock, false); 491 THREAD->state = Running; 492 493 #ifdef SCHEDULER_VERBOSE 494 log(LF_OTHER, LVL_DEBUG, 495 "cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 496 ", nrdy=%zu)", CPU->id, THREAD->tid, THREAD->priority, 497 THREAD->ticks, atomic_load(&CPU->nrdy)); 498 #endif 499 500 /* 501 * Some architectures provide late kernel PA2KA(identity) 502 * mapping in a page fault handler. However, the page fault 503 * handler uses the kernel stack of the running thread and 504 * therefore cannot be used to map it. The kernel stack, if 505 * necessary, is to be mapped in before_thread_runs(). This 506 * function must be executed before the switch to the new stack. 507 */ 508 before_thread_runs(); 528 509 529 510 /* … … 679 660 * 680 661 */ 681 thread_yield();662 scheduler(); 682 663 } else { 683 664 /* -
kernel/generic/src/proc/thread.c
rc7ceacf r4760793 82 82 }; 83 83 84 enum sleep_state { 85 SLEEP_INITIAL, 86 SLEEP_ASLEEP, 87 SLEEP_WOKE, 88 }; 89 84 90 /** Lock protecting the @c threads ordered dictionary . 85 91 * … … 121 127 void (*f)(void *) = THREAD->thread_code; 122 128 void *arg = THREAD->thread_arg; 129 THREAD->last_cycle = get_cycle(); 123 130 124 131 /* This is where each thread wakes up after its creation */ … … 313 320 314 321 current_initialize((current_t *) thread->kstack); 322 323 ipl_t ipl = interrupts_disable(); 324 thread->saved_ipl = interrupts_read(); 325 interrupts_restore(ipl); 315 326 316 327 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); … … 514 525 } 515 526 516 scheduler_enter(Exiting); 517 unreachable(); 527 irq_spinlock_lock(&THREAD->lock, true); 528 THREAD->state = Exiting; 529 irq_spinlock_unlock(&THREAD->lock, true); 530 531 scheduler(); 532 533 panic("should never be reached"); 518 534 } 519 535 … … 561 577 562 578 return THREAD->interrupted ? THREAD_TERMINATING : THREAD_OK; 579 } 580 581 static void thread_wait_internal(void) 582 { 583 assert(THREAD != NULL); 584 585 ipl_t ipl = interrupts_disable(); 586 587 if (atomic_load(&haltstate)) 588 halt(); 589 590 /* 591 * Lock here to prevent a race between entering the scheduler and another 592 * thread rescheduling this thread. 593 */ 594 irq_spinlock_lock(&THREAD->lock, false); 595 596 int expected = SLEEP_INITIAL; 597 598 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */ 599 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected, 600 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) { 601 THREAD->state = Sleeping; 602 scheduler_locked(ipl); 603 } else { 604 assert(expected == SLEEP_WOKE); 605 /* Return immediately. */ 606 irq_spinlock_unlock(&THREAD->lock, false); 607 interrupts_restore(ipl); 608 } 563 609 } 564 610 … … 603 649 timeout_t timeout; 604 650 605 /* Extra check to avoid going to scheduler if we don't need to. */606 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=607 SLEEP_INITIAL)608 return THREAD_WAIT_SUCCESS;609 610 651 if (deadline != DEADLINE_NEVER) { 652 /* Extra check to avoid setting up a deadline if we don't need to. */ 653 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) != 654 SLEEP_INITIAL) 655 return THREAD_WAIT_SUCCESS; 656 611 657 timeout_initialize(&timeout); 612 658 timeout_register_deadline(&timeout, deadline, … … 614 660 } 615 661 616 scheduler_enter(Sleeping);662 thread_wait_internal(); 617 663 618 664 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { … … 628 674 629 675 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE, 630 memory_order_ acq_rel);676 memory_order_release); 631 677 632 678 if (state == SLEEP_ASLEEP) { … … 724 770 725 771 (void) waitq_sleep_timeout(&wq, usec); 726 }727 728 /** Allow other threads to run. */729 void thread_yield(void)730 {731 assert(THREAD != NULL);732 scheduler_enter(Running);733 772 } 734 773 -
kernel/generic/src/time/clock.c
rc7ceacf r4760793 187 187 if (THREAD) { 188 188 if (current_clock_tick >= CPU_LOCAL->preempt_deadline && PREEMPTION_ENABLED) { 189 thread_yield();189 scheduler(); 190 190 #ifdef CONFIG_UDEBUG 191 191 /*
Note:
See TracChangeset
for help on using the changeset viewer.