Changeset bea6233 in mainline


Ignore:
Timestamp:
2023-02-26T15:27:13Z (21 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
7cf5ddb
Parents:
deed510
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-26 15:18:02)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-02-26 15:27:13)
Message:

Replace cpu_sleep() with cpu_interruptible_sleep()

The new function combines interrupt reenabling with sleep,
so that a platform can implement this sequence atomically.
This is currently done only on ia32 and amd64.

Location:
kernel
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/abs32le/include/arch/asm.h

    rdeed510 rbea6233  
    197197}
    198198
     199/** Enables interrupts and blocks until an interrupt arrives,
     200 * atomically if possible on target architecture.
     201 * Disables interrupts again before returning to caller.
     202 */
     203_NO_TRACE static inline void cpu_interruptible_sleep(void)
     204{
     205        interrupts_enable();
     206        cpu_sleep();
     207        interrupts_disable();
     208}
     209
    199210#endif
    200211
  • kernel/arch/amd64/include/arch/asm.h

    rdeed510 rbea6233  
    4343#define IO_SPACE_BOUNDARY       ((void *) (64 * 1024))
    4444
    45 _NO_TRACE static inline void cpu_sleep(void)
    46 {
    47         asm volatile (
     45/** Enables interrupts and blocks until an interrupt arrives,
     46 * atomically if possible on target architecture.
     47 * Disables interrupts again before returning to caller.
     48 */
     49_NO_TRACE static inline void cpu_interruptible_sleep()
     50{
     51        /*
     52         * On x86, "sti" enables interrupts after the next instruction.
     53         * Therefore, this sequence is guaranteed to be atomic.
     54         */
     55        asm volatile (
     56            "sti\n"
    4857            "hlt\n"
     58            "cli\n"
    4959        );
    5060}
  • kernel/arch/arm32/include/arch/asm.h

    rdeed510 rbea6233  
    6565}
    6666
     67/** Enables interrupts and blocks until an interrupt arrives,
     68 * atomically if possible on target architecture.
     69 * Disables interrupts again before returning to caller.
     70 */
     71_NO_TRACE static inline void cpu_interruptible_sleep(void)
     72{
     73        // FIXME: do this atomically
     74        interrupts_enable();
     75        cpu_sleep();
     76        interrupts_disable();
     77}
     78
    6779_NO_TRACE static inline void cpu_spin_hint(void)
    6880{
  • kernel/arch/arm64/include/arch/asm.h

    rdeed510 rbea6233  
    5252{
    5353        asm volatile ("wfe");
     54}
     55
     56/** Enables interrupts and blocks until an interrupt arrives,
     57 * atomically if possible on target architecture.
     58 * Disables interrupts again before returning to caller.
     59 */
     60_NO_TRACE static inline void cpu_interruptible_sleep(void)
     61{
     62        // FIXME: do this atomically
     63        interrupts_enable();
     64        cpu_sleep();
     65        interrupts_disable();
    5466}
    5567
  • kernel/arch/ia32/include/arch/asm.h

    rdeed510 rbea6233  
    5757}
    5858
    59 _NO_TRACE static inline void cpu_sleep(void)
    60 {
    61         asm volatile (
     59/** Enables interrupts and blocks until an interrupt arrives,
     60 * atomically if possible on target architecture.
     61 * Disables interrupts again before returning to caller.
     62 */
     63_NO_TRACE static inline void cpu_interruptible_sleep(void)
     64{
     65        asm volatile (
     66            "sti\n"
    6267            "hlt\n"
     68            "cli\n"
    6369        );
    6470}
  • kernel/arch/ia64/include/arch/asm.h

    rdeed510 rbea6233  
    473473    uint64_t, uint64_t);
    474474
     475/** Enables interrupts and blocks until an interrupt arrives,
     476 * atomically if possible on target architecture.
     477 * Disables interrupts again before returning to caller.
     478 */
     479_NO_TRACE static inline void cpu_interruptible_sleep(void)
     480{
     481        // FIXME: do this properly
     482        interrupts_enable();
     483        cpu_sleep();
     484        interrupts_disable();
     485}
     486
    475487#endif
    476488
  • kernel/arch/mips32/include/arch/asm.h

    rdeed510 rbea6233  
    8989extern bool interrupts_disabled(void);
    9090
     91/** Enables interrupts and blocks until an interrupt arrives,
     92 * atomically if possible on target architecture.
     93 * Disables interrupts again before returning to caller.
     94 */
     95_NO_TRACE static inline void cpu_interruptible_sleep(void)
     96{
     97        // FIXME: do this atomically
     98        interrupts_enable();
     99        cpu_sleep();
     100        interrupts_disable();
     101}
     102
    91103#endif
    92104
  • kernel/arch/ppc32/include/arch/asm.h

    rdeed510 rbea6233  
    167167}
    168168
    169 _NO_TRACE static inline void cpu_sleep(void)
    170 {
     169/** Enables interrupts and blocks until an interrupt arrives,
     170 * atomically if possible on target architecture.
     171 * Disables interrupts again before returning to caller.
     172 */
     173_NO_TRACE static inline void cpu_interruptible_sleep(void)
     174{
     175        // FIXME: do this properly
     176        interrupts_enable();
     177        interrupts_disable();
    171178}
    172179
  • kernel/arch/riscv64/include/arch/asm.h

    rdeed510 rbea6233  
    9595}
    9696
    97 _NO_TRACE static inline void cpu_sleep(void)
     97/** Enables interrupts and blocks until an interrupt arrives,
     98 * atomically if possible on target architecture.
     99 * Disables interrupts again before returning to caller.
     100 */
     101_NO_TRACE static inline void cpu_interruptible_sleep(void)
    98102{
     103        // FIXME: do this properly
     104        interrupts_enable();
     105        interrupts_disable();
    99106}
    100107
  • kernel/arch/sparc64/include/arch/asm.h

    rdeed510 rbea6233  
    539539extern void switch_to_userspace(uint64_t pc, uint64_t sp, uint64_t uarg);
    540540
     541/** Enables interrupts and blocks until an interrupt arrives,
     542 * atomically if possible on target architecture.
     543 * Disables interrupts again before returning to caller.
     544 */
     545_NO_TRACE static inline void cpu_interruptible_sleep(void)
     546{
     547        // FIXME: do this atomically
     548        interrupts_enable();
     549        cpu_sleep();
     550        interrupts_disable();
     551}
     552
    541553#endif
    542554
  • kernel/generic/src/proc/scheduler.c

    rdeed510 rbea6233  
    187187
    188188loop:
    189 
    190189        if (atomic_load(&CPU->nrdy) == 0) {
    191190                /*
     
    197196                CPU->idle = true;
    198197                irq_spinlock_unlock(&CPU->lock, false);
    199                 interrupts_enable();
    200198
    201199                /*
    202                  * An interrupt might occur right now and wake up a thread.
    203                  * In such case, the CPU will continue to go to sleep
    204                  * even though there is a runnable thread.
     200                 * Go to sleep with interrupts enabled.
     201                 * Ideally, this should be atomic, but this is not guaranteed on
     202                 * all platforms yet, so it is possible we will go sleep when
     203                 * a thread has just become available.
    205204                 */
    206                 cpu_sleep();
    207                 interrupts_disable();
     205                cpu_interruptible_sleep();
     206
     207                /* Interrupts are disabled again. */
    208208                goto loop;
    209209        }
Note: See TracChangeset for help on using the changeset viewer.