Changeset c7ceacf in mainline for kernel/generic/src/proc/thread.c
- Timestamp:
- 2024-01-15T14:54:17Z (12 months ago)
- Branches:
- master
- Children:
- 5861b60
- Parents:
- 4760793 (diff), 151c050 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
r4760793 rc7ceacf 82 82 }; 83 83 84 enum sleep_state {85 SLEEP_INITIAL,86 SLEEP_ASLEEP,87 SLEEP_WOKE,88 };89 90 84 /** Lock protecting the @c threads ordered dictionary . 91 85 * … … 127 121 void (*f)(void *) = THREAD->thread_code; 128 122 void *arg = THREAD->thread_arg; 129 THREAD->last_cycle = get_cycle();130 123 131 124 /* This is where each thread wakes up after its creation */ … … 320 313 321 314 current_initialize((current_t *) thread->kstack); 322 323 ipl_t ipl = interrupts_disable();324 thread->saved_ipl = interrupts_read();325 interrupts_restore(ipl);326 315 327 316 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); … … 525 514 } 526 515 527 irq_spinlock_lock(&THREAD->lock, true); 528 THREAD->state = Exiting; 529 irq_spinlock_unlock(&THREAD->lock, true); 530 531 scheduler(); 532 533 panic("should never be reached"); 516 scheduler_enter(Exiting); 517 unreachable(); 534 518 } 535 519 … … 579 563 } 580 564 581 static void thread_wait_internal(void)582 {583 assert(THREAD != NULL);584 585 ipl_t ipl = interrupts_disable();586 587 if (atomic_load(&haltstate))588 halt();589 590 /*591 * Lock here to prevent a race between entering the scheduler and another592 * thread rescheduling this thread.593 */594 irq_spinlock_lock(&THREAD->lock, false);595 596 int expected = SLEEP_INITIAL;597 598 /* Only set SLEEP_ASLEEP in sleep pad if it's still in initial state */599 if (atomic_compare_exchange_strong_explicit(&THREAD->sleep_state, &expected,600 SLEEP_ASLEEP, memory_order_acq_rel, memory_order_acquire)) {601 THREAD->state = Sleeping;602 scheduler_locked(ipl);603 } else {604 assert(expected == SLEEP_WOKE);605 /* Return immediately. */606 irq_spinlock_unlock(&THREAD->lock, false);607 interrupts_restore(ipl);608 }609 }610 611 565 static void thread_wait_timeout_callback(void *arg) 612 566 { … … 649 603 timeout_t timeout; 650 604 605 /* Extra check to avoid going to scheduler if we don't need to. */ 606 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) != 607 SLEEP_INITIAL) 608 return THREAD_WAIT_SUCCESS; 609 651 610 if (deadline != DEADLINE_NEVER) { 652 /* Extra check to avoid setting up a deadline if we don't need to. */653 if (atomic_load_explicit(&THREAD->sleep_state, memory_order_acquire) !=654 SLEEP_INITIAL)655 return THREAD_WAIT_SUCCESS;656 657 611 timeout_initialize(&timeout); 658 612 timeout_register_deadline(&timeout, deadline, … … 660 614 } 661 615 662 thread_wait_internal();616 scheduler_enter(Sleeping); 663 617 664 618 if (deadline != DEADLINE_NEVER && !timeout_unregister(&timeout)) { … … 674 628 675 629 int state = atomic_exchange_explicit(&thread->sleep_state, SLEEP_WOKE, 676 memory_order_ release);630 memory_order_acq_rel); 677 631 678 632 if (state == SLEEP_ASLEEP) { … … 770 724 771 725 (void) waitq_sleep_timeout(&wq, usec); 726 } 727 728 /** Allow other threads to run. */ 729 void thread_yield(void) 730 { 731 assert(THREAD != NULL); 732 scheduler_enter(Running); 772 733 } 773 734
Note:
See TracChangeset
for help on using the changeset viewer.