Changes in / [c0d814a:a92290d] in mainline


Ignore:
Location:
kernel/generic
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/console/console.h

    rc0d814a ra92290d  
    7676extern sysarg_t sys_debug_console(void);
    7777
    78 extern void console_lock(void);
    79 extern void console_unlock(void);
    80 
    8178#endif /* KERN_CONSOLE_H_ */
    8279
  • kernel/generic/include/synch/condvar.h

    rc0d814a ra92290d  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
    3  * Copyright (c) 2025 Jiří Zárevúcky
    43 * All rights reserved.
    54 *
     
    5453        condvar_t name = CONDVAR_INITIALIZER(name)
    5554
     55#ifdef CONFIG_SMP
     56#define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
     57        _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags))
     58#else
     59#define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
     60        _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags))
     61#endif
     62
    5663extern void condvar_initialize(condvar_t *cv);
    5764extern void condvar_signal(condvar_t *cv);
    5865extern void condvar_broadcast(condvar_t *cv);
    5966
    60 extern errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx);
    61 extern errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx);
    62 extern errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx);
    63 extern errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec);
    64 extern errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *mtx, uint32_t usec);
    65 extern errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx, uint32_t usec);
     67extern errno_t condvar_wait(condvar_t *cv, mutex_t *mtx);
     68extern errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec);
    6669
    67 #define condvar_wait(cv, mtx) (_Generic((mtx), \
    68         mutex_t *: __condvar_wait_mutex, \
    69         spinlock_t *: __condvar_wait_spinlock, \
    70         irq_spinlock_t *: __condvar_wait_irq_spinlock \
    71 )(cv, mtx))
    72 
    73 #define condvar_wait_timeout(cv, mtx, usec) (_Generic((mtx), \
    74         mutex_t *: __condvar_wait_timeout_mutex, \
    75         spinlock_t *: __condvar_wait_timeout_spinlock, \
    76         irq_spinlock_t *: __condvar_wait_timeout_irq_spinlock \
    77 )(cv, mtx))
     70extern errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     71    uint32_t usec, int flags);
     72extern errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv,
     73    irq_spinlock_t *irq_lock, uint32_t usec, int flags);
    7874
    7975#endif
  • kernel/generic/include/synch/mutex.h

    rc0d814a ra92290d  
    4444        MUTEX_PASSIVE,
    4545        MUTEX_RECURSIVE,
     46        MUTEX_ACTIVE
    4647} mutex_type_t;
    4748
     
    5051typedef struct {
    5152        mutex_type_t type;
    52         int nesting;
    5353        semaphore_t sem;
    54         _Atomic(struct thread *) owner;
     54        struct thread *owner;
     55        unsigned nesting;
    5556} mutex_t;
    5657
    5758#define MUTEX_INITIALIZER(name, mtype) (mutex_t) { \
    5859        .type = (mtype), \
    59         .nesting = 0, \
    6060        .sem = SEMAPHORE_INITIALIZER((name).sem, 1), \
    6161        .owner = NULL, \
     62        .nesting = 0, \
    6263}
    6364
  • kernel/generic/src/console/console.c

    rc0d814a ra92290d  
    3434 */
    3535
    36 #include <abi/kio.h>
    37 #include <arch.h>
    3836#include <assert.h>
    39 #include <atomic.h>
     37#include <console/console.h>
    4038#include <console/chardev.h>
    41 #include <console/console.h>
     39#include <sysinfo/sysinfo.h>
     40#include <synch/waitq.h>
     41#include <synch/spinlock.h>
     42#include <typedefs.h>
     43#include <ddi/irq.h>
    4244#include <ddi/ddi.h>
    43 #include <ddi/irq.h>
    44 #include <errno.h>
    4545#include <ipc/event.h>
    4646#include <ipc/irq.h>
     47#include <arch.h>
     48#include <panic.h>
     49#include <stdio.h>
     50#include <putchar.h>
     51#include <atomic.h>
     52#include <syscall/copy.h>
     53#include <errno.h>
     54#include <str.h>
     55#include <stdatomic.h>
     56#include <abi/kio.h>
    4757#include <mm/frame.h> /* SIZE2FRAMES */
    48 #include <panic.h>
    49 #include <preemption.h>
    50 #include <proc/thread.h>
    51 #include <putchar.h>
    52 #include <stdatomic.h>
    53 #include <stdio.h>
    5458#include <stdlib.h>  /* malloc */
    55 #include <str.h>
    56 #include <synch/mutex.h>
    57 #include <synch/spinlock.h>
    58 #include <synch/waitq.h>
    59 #include <syscall/copy.h>
    60 #include <sysinfo/sysinfo.h>
    61 #include <typedefs.h>
    6259
    6360#define KIO_PAGES    8
     
    6966/** Kernel log initialized */
    7067static atomic_bool kio_inited = ATOMIC_VAR_INIT(false);
    71 
    72 /** A mutex for preventing interleaving of output lines from different threads.
    73  * May not be held in some circumstances, so locking of any internal shared
    74  * structures is still necessary.
    75  */
    76 static MUTEX_INITIALIZE(console_mutex, MUTEX_RECURSIVE);
    7768
    7869/** First kernel log characters */
     
    404395}
    405396
    406 /** Lock console output, ensuring that lines from different threads don't
    407  * interleave. Does nothing when preemption is disabled, so that debugging
    408  * and error printouts in sensitive areas still work.
    409  */
    410 void console_lock(void)
    411 {
    412         if (!PREEMPTION_DISABLED)
    413                 mutex_lock(&console_mutex);
    414 }
    415 
    416 /** Unlocks console output. See console_lock()
    417  */
    418 void console_unlock(void)
    419 {
    420         if (!PREEMPTION_DISABLED)
    421                 mutex_unlock(&console_mutex);
    422 }
    423 
    424397/** @}
    425398 */
  • kernel/generic/src/log/log.c

    rc0d814a ra92290d  
    151151void log_begin(log_facility_t fac, log_level_t level)
    152152{
    153         console_lock();
    154153        spinlock_lock(&log_lock);
    155154        spinlock_lock(&kio_lock);
     
    187186        kio_update(NULL);
    188187        log_update(NULL);
    189         console_unlock();
    190188}
    191189
  • kernel/generic/src/mm/frame.c

    rc0d814a ra92290d  
    7272 * available.
    7373 */
    74 static IRQ_SPINLOCK_INITIALIZE(mem_avail_lock);
     74static MUTEX_INITIALIZE(mem_avail_mtx, MUTEX_ACTIVE);
    7575static CONDVAR_INITIALIZE(mem_avail_cv);
    7676static size_t mem_avail_req = 0;  /**< Number of frames requested. */
     
    951951#endif
    952952
    953                 /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */
    954                 irq_spinlock_lock(&mem_avail_lock, true);
     953                /*
     954                 * Since the mem_avail_mtx is an active mutex, we need to
     955                 * disable interrupts to prevent deadlock with TLB shootdown.
     956                 */
     957                ipl_t ipl = interrupts_disable();
     958                mutex_lock(&mem_avail_mtx);
    955959
    956960                if (mem_avail_req > 0)
     
    962966
    963967                while (gen == mem_avail_gen)
    964                         condvar_wait(&mem_avail_cv, &mem_avail_lock);
    965 
    966                 irq_spinlock_unlock(&mem_avail_lock, true);
     968                        condvar_wait(&mem_avail_cv, &mem_avail_mtx);
     969
     970                mutex_unlock(&mem_avail_mtx);
     971                interrupts_restore(ipl);
    967972
    968973#ifdef CONFIG_DEBUG
     
    10221027        irq_spinlock_unlock(&zones.lock, true);
    10231028
    1024         /* Signal that some memory has been freed. */
    1025 
    1026         /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */
    1027         irq_spinlock_lock(&mem_avail_lock, true);
     1029        /*
     1030         * Signal that some memory has been freed.
     1031         * Since the mem_avail_mtx is an active mutex,
     1032         * we need to disable interruptsto prevent deadlock
     1033         * with TLB shootdown.
     1034         */
     1035
     1036        ipl_t ipl = interrupts_disable();
     1037        mutex_lock(&mem_avail_mtx);
    10281038
    10291039        if (mem_avail_req > 0)
     
    10351045        }
    10361046
    1037         irq_spinlock_unlock(&mem_avail_lock, true);
     1047        mutex_unlock(&mem_avail_mtx);
     1048        interrupts_restore(ipl);
    10381049
    10391050        if (!(flags & FRAME_NO_RESERVE))
  • kernel/generic/src/printf/vprintf.c

    rc0d814a ra92290d  
    3333 */
    3434
    35 #include <arch/asm.h>
    36 #include <console/console.h>
    3735#include <print.h>
    3836#include <printf/printf_core.h>
    3937#include <putchar.h>
     38#include <synch/spinlock.h>
     39#include <arch/asm.h>
     40#include <typedefs.h>
    4041#include <str.h>
    41 #include <synch/spinlock.h>
    42 #include <typedefs.h>
    4342
    4443static int vprintf_str_write(const char *str, size_t size, void *data)
     
    7574        char32_t uc;
    7675
    77         console_lock();
    78 
    7976        while ((uc = str_decode(str, &offset, STR_NO_LIMIT)) != 0) {
    8077                putuchar(uc);
     
    8380
    8481        putuchar('\n');
    85 
    86         console_unlock();
    8782        return chars;
    8883}
     
    9691        };
    9792
    98         console_lock();
    99         int ret = printf_core(fmt, &ps, ap);
    100         console_unlock();
    101         return ret;
     93        return printf_core(fmt, &ps, ap);
    10294}
    10395
  • kernel/generic/src/synch/condvar.c

    rc0d814a ra92290d  
    7979 * @return              See comment for waitq_sleep_timeout().
    8080 */
    81 errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec)
     81errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec)
    8282{
    8383        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     
    9292}
    9393
    94 errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx)
     94errno_t condvar_wait(condvar_t *cv, mutex_t *mtx)
    9595{
    9696        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     
    105105}
    106106
    107 /** Same as __condvar_wait_timeout_mutex(), except for spinlock_t. */
    108 errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *lock,
    109     uint32_t usec)
     107/** Wait for the condition to become true with a locked spinlock.
     108 *
     109 * The function is not aware of irq_spinlock. Therefore do not even
     110 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
     111 * instead.
     112 *
     113 * @param cv            Condition variable.
     114 * @param lock          Locked spinlock.
     115 * @param usec          Timeout value in microseconds.
     116 * @param flags         Select mode of operation.
     117 *
     118 * For exact description of meaning of possible combinations of usec and flags,
     119 * see comment for waitq_sleep_timeout().  Note that when
     120 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
     121 * returned.
     122 *
     123 * @return See comment for waitq_sleep_timeout().
     124 */
     125errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
     126    uint32_t usec, int flags)
    110127{
    111128        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     
    114131        spinlock_unlock(lock);
    115132
    116         errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec,
    117             SYNCH_FLAGS_NON_BLOCKING, guard);
     133        errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, guard);
    118134
    119135        spinlock_lock(lock);
     
    121137}
    122138
    123 errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx)
    124 {
    125         wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
    126 
    127         /* Unlock only after the waitq is locked so we don't miss a wakeup. */
    128         spinlock_unlock(mtx);
    129 
    130         errno_t rc = waitq_sleep_unsafe(&cv->wq, guard);
    131 
    132         spinlock_lock(mtx);
    133         return rc;
    134 }
    135 
    136 /** Same as __condvar_wait_timeout_mutex(), except for irq_spinlock_t. */
    137 errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv,
    138     irq_spinlock_t *irq_lock, uint32_t usec)
     139/** Wait for the condition to become true with a locked irq spinlock.
     140 *
     141 * @param cv            Condition variable.
     142 * @param lock          Locked irq spinlock.
     143 * @param usec          Timeout value in microseconds.
     144 * @param flags         Select mode of operation.
     145 *
     146 * For exact description of meaning of possible combinations of usec and flags,
     147 * see comment for waitq_sleep_timeout().  Note that when
     148 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
     149 * returned.
     150 *
     151 * @return See comment for waitq_sleep_timeout().
     152 */
     153errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
     154    uint32_t usec, int flags)
    139155{
    140156        errno_t rc;
     
    155171         * running) and there is no danger of a deadlock.
    156172         */
    157         rc = __condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec);
    158 
    159         irq_lock->guard = guard;
    160         irq_lock->ipl = ipl;
    161 
    162         return rc;
    163 }
    164 
    165 /** Same as __condvar_wait_mutex(), except for irq_spinlock_t. */
    166 errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock)
    167 {
    168         errno_t rc;
    169         /* Save spinlock's state so we can restore it correctly later on. */
    170         ipl_t ipl = irq_lock->ipl;
    171         bool guard = irq_lock->guard;
    172 
    173         irq_lock->guard = false;
    174 
    175         rc = __condvar_wait_spinlock(cv, &irq_lock->lock);
     173        rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
    176174
    177175        irq_lock->guard = guard;
  • kernel/generic/src/synch/mutex.c

    rc0d814a ra92290d  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
    3  * Copyright (c) 2025 Jiří Zárevúcky
     3 * Copyright (c) 2023 Jiří Zárevúcky
    44 * All rights reserved.
    55 *
     
    3939#include <assert.h>
    4040#include <errno.h>
    41 #include <proc/thread.h>
    42 #include <stdatomic.h>
    4341#include <synch/mutex.h>
    4442#include <synch/semaphore.h>
     43#include <arch.h>
     44#include <stacktrace.h>
     45#include <cpu.h>
     46#include <proc/thread.h>
    4547
    4648/** Initialize mutex.
     
    5456}
    5557
    56 /** A race in mtx->owner access is unavoidable, so we have to make
    57  * access to it formally atomic. These are convenience functions to
    58  * read/write the variable without memory barriers, since we don't need
    59  * them and C11 atomics default to the strongest possible memory ordering
    60  * by default, which is utterly ridiculous.
    61  */
    62 static inline thread_t *_get_owner(mutex_t *mtx)
    63 {
    64         return atomic_load_explicit(&mtx->owner, memory_order_relaxed);
    65 }
    66 
    67 /** Counterpart to _get_owner(). */
    68 static inline void _set_owner(mutex_t *mtx, thread_t *owner)
    69 {
    70         atomic_store_explicit(&mtx->owner, owner, memory_order_relaxed);
    71 }
    72 
    7358/** Find out whether the mutex is currently locked.
    7459 *
     
    7964bool mutex_locked(mutex_t *mtx)
    8065{
    81         if (!THREAD)
    82                 return mtx->nesting > 0;
     66        errno_t rc = semaphore_trydown(&mtx->sem);
     67        if (rc == EOK) {
     68                semaphore_up(&mtx->sem);
     69        }
     70        return rc != EOK;
     71}
    8372
    84         return _get_owner(mtx) == THREAD;
     73static void mutex_lock_active(mutex_t *mtx)
     74{
     75        assert((mtx->type == MUTEX_ACTIVE) || !THREAD);
     76
     77        const unsigned deadlock_treshold = 100000000;
     78        unsigned int cnt = 0;
     79        bool deadlock_reported = false;
     80
     81        while (semaphore_trydown(&mtx->sem) != EOK) {
     82                if (cnt++ > deadlock_treshold) {
     83                        printf("cpu%u: looping on active mutex %p\n", CPU->id, mtx);
     84                        stack_trace();
     85                        cnt = 0;
     86                        deadlock_reported = true;
     87                }
     88        }
     89
     90        if (deadlock_reported)
     91                printf("cpu%u: not deadlocked\n", CPU->id);
    8592}
    8693
     
    9198void mutex_lock(mutex_t *mtx)
    9299{
    93         if (!THREAD) {
    94                 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);
     100        if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) {
     101                assert(THREAD);
    95102                mtx->nesting++;
    96103                return;
    97104        }
    98105
    99         if (_get_owner(mtx) == THREAD) {
    100                 /* This will also detect nested locks on a non-recursive mutex. */
    101                 assert(mtx->type == MUTEX_RECURSIVE);
    102                 assert(mtx->nesting > 0);
    103                 mtx->nesting++;
     106        if (mtx->type == MUTEX_ACTIVE || !THREAD) {
     107                mutex_lock_active(mtx);
    104108                return;
    105109        }
    106110
    107111        semaphore_down(&mtx->sem);
    108 
    109         _set_owner(mtx, THREAD);
    110         assert(mtx->nesting == 0);
     112        mtx->owner = THREAD;
    111113        mtx->nesting = 1;
    112114}
     
    121123errno_t mutex_lock_timeout(mutex_t *mtx, uint32_t usec)
    122124{
    123         if (!THREAD) {
    124                 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);
    125                 mtx->nesting++;
    126                 return EOK;
     125        if (usec != 0) {
     126                assert(mtx->type != MUTEX_ACTIVE);
     127                assert(THREAD);
    127128        }
    128129
    129         if (_get_owner(mtx) == THREAD) {
    130                 assert(mtx->type == MUTEX_RECURSIVE);
    131                 assert(mtx->nesting > 0);
     130        if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) {
     131                assert(THREAD);
    132132                mtx->nesting++;
    133133                return EOK;
     
    135135
    136136        errno_t rc = semaphore_down_timeout(&mtx->sem, usec);
    137         if (rc != EOK)
    138                 return rc;
    139 
    140         _set_owner(mtx, THREAD);
    141         assert(mtx->nesting == 0);
    142         mtx->nesting = 1;
    143         return EOK;
     137        if (rc == EOK) {
     138                mtx->owner = THREAD;
     139                mtx->nesting = 1;
     140        }
     141        return rc;
    144142}
    145143
     
    159157void mutex_unlock(mutex_t *mtx)
    160158{
    161         if (--mtx->nesting > 0) {
    162                 assert(mtx->type == MUTEX_RECURSIVE);
    163                 return;
     159        if (mtx->type == MUTEX_RECURSIVE) {
     160                assert(mtx->owner == THREAD);
     161                if (--mtx->nesting > 0)
     162                        return;
     163                mtx->owner = NULL;
    164164        }
    165 
    166         assert(mtx->nesting == 0);
    167 
    168         if (!THREAD)
    169                 return;
    170 
    171         assert(_get_owner(mtx) == THREAD);
    172         _set_owner(mtx, NULL);
    173 
    174165        semaphore_up(&mtx->sem);
    175166}
  • kernel/generic/src/synch/spinlock.c

    rc0d814a ra92290d  
    8484                 * which are directly used to report deadlocks
    8585                 * via printf() (and recursively other functions).
    86                  * This concerns especially printf_lock and the
     86                 * This conserns especially printf_lock and the
    8787                 * framebuffer lock.
    8888                 *
  • kernel/generic/src/sysinfo/sysinfo.c

    rc0d814a ra92290d  
    5757
    5858/** Sysinfo lock */
    59 static IRQ_SPINLOCK_INITIALIZE(sysinfo_lock);
     59static MUTEX_INITIALIZE(sysinfo_lock, MUTEX_ACTIVE);
    6060
    6161/** Sysinfo item constructor
     
    327327{
    328328        /* Protect sysinfo tree consistency */
    329         irq_spinlock_lock(&sysinfo_lock, true);
     329        mutex_lock(&sysinfo_lock);
    330330
    331331        if (root == NULL)
     
    340340        }
    341341
    342         irq_spinlock_unlock(&sysinfo_lock, true);
     342        mutex_unlock(&sysinfo_lock);
    343343}
    344344
     
    360360{
    361361        /* Protect sysinfo tree consistency */
    362         irq_spinlock_lock(&sysinfo_lock, true);
     362        mutex_lock(&sysinfo_lock);
    363363
    364364        if (root == NULL)
     
    374374        }
    375375
    376         irq_spinlock_unlock(&sysinfo_lock, true);
     376        mutex_unlock(&sysinfo_lock);
    377377}
    378378
     
    390390{
    391391        /* Protect sysinfo tree consistency */
    392         irq_spinlock_lock(&sysinfo_lock, true);
     392        mutex_lock(&sysinfo_lock);
    393393
    394394        if (root == NULL)
     
    404404        }
    405405
    406         irq_spinlock_unlock(&sysinfo_lock, true);
     406        mutex_unlock(&sysinfo_lock);
    407407}
    408408
     
    425425{
    426426        /* Protect sysinfo tree consistency */
    427         irq_spinlock_lock(&sysinfo_lock, true);
     427        mutex_lock(&sysinfo_lock);
    428428
    429429        if (root == NULL)
     
    439439        }
    440440
    441         irq_spinlock_unlock(&sysinfo_lock, true);
     441        mutex_unlock(&sysinfo_lock);
    442442}
    443443
     
    452452{
    453453        /* Protect sysinfo tree consistency */
    454         irq_spinlock_lock(&sysinfo_lock, true);
     454        mutex_lock(&sysinfo_lock);
    455455
    456456        if (root == NULL)
     
    463463                printf("Could not set sysinfo item %s.\n", name);
    464464
    465         irq_spinlock_unlock(&sysinfo_lock, true);
     465        mutex_unlock(&sysinfo_lock);
    466466}
    467467
     
    479479{
    480480        /* Protect sysinfo tree consistency */
    481         irq_spinlock_lock(&sysinfo_lock, true);
     481        mutex_lock(&sysinfo_lock);
    482482
    483483        if (root == NULL)
     
    498498        }
    499499
    500         irq_spinlock_unlock(&sysinfo_lock, true);
     500        mutex_unlock(&sysinfo_lock);
    501501}
    502502
     
    596596         * while we are dumping it
    597597         */
    598         irq_spinlock_lock(&sysinfo_lock, true);
     598        mutex_lock(&sysinfo_lock);
    599599
    600600        if (root == NULL)
     
    603603                sysinfo_dump_internal(root, 0);
    604604
    605         irq_spinlock_unlock(&sysinfo_lock, true);
     605        mutex_unlock(&sysinfo_lock);
    606606}
    607607
     
    695695                 * are reading it.
    696696                 */
    697                 irq_spinlock_lock(&sysinfo_lock, true);
     697                mutex_lock(&sysinfo_lock);
    698698                ret = sysinfo_get_item(path, NULL, dry_run);
    699                 irq_spinlock_unlock(&sysinfo_lock, true);
     699                mutex_unlock(&sysinfo_lock);
    700700        }
    701701
     
    806806                 * are reading it.
    807807                 */
    808                 irq_spinlock_lock(&sysinfo_lock, true);
     808                mutex_lock(&sysinfo_lock);
    809809                ret = sysinfo_get_keys(path, NULL, dry_run);
    810                 irq_spinlock_unlock(&sysinfo_lock, true);
     810                mutex_unlock(&sysinfo_lock);
    811811        }
    812812
Note: See TracChangeset for help on using the changeset viewer.