Changeset c0d814a in mainline


Ignore:
Timestamp:
2025-04-10T20:02:30Z (2 days ago)
Author:
GitHub <noreply@…>
Children:
b8b031f
Parents:
a92290d (diff), 90dd8aee (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
git-author:
Wayne Thornton <wmthornton-dev@…> (2025-04-10 20:02:30)
git-committer:
GitHub <noreply@…> (2025-04-10 20:02:30)
Message:

Merge pull request #6 from HelenOS/master

Merge pending changes from downstream helenos/master

Location:
kernel/generic
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/console/console.h

    ra92290d rc0d814a  
    7676extern sysarg_t sys_debug_console(void);
    7777
     78extern void console_lock(void);
     79extern void console_unlock(void);
     80
    7881#endif /* KERN_CONSOLE_H_ */
    7982
  • kernel/generic/include/synch/condvar.h

    ra92290d rc0d814a  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
     3 * Copyright (c) 2025 Jiří Zárevúcky
    34 * All rights reserved.
    45 *
     
    5354        condvar_t name = CONDVAR_INITIALIZER(name)
    5455
    55 #ifdef CONFIG_SMP
    56 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
    57         _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags))
    58 #else
    59 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \
    60         _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags))
    61 #endif
    62 
    6356extern void condvar_initialize(condvar_t *cv);
    6457extern void condvar_signal(condvar_t *cv);
    6558extern void condvar_broadcast(condvar_t *cv);
    6659
    67 extern errno_t condvar_wait(condvar_t *cv, mutex_t *mtx);
    68 extern errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec);
     60extern errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx);
     61extern errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx);
     62extern errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx);
     63extern errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec);
     64extern errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *mtx, uint32_t usec);
     65extern errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx, uint32_t usec);
    6966
    70 extern errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
    71     uint32_t usec, int flags);
    72 extern errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv,
    73     irq_spinlock_t *irq_lock, uint32_t usec, int flags);
     67#define condvar_wait(cv, mtx) (_Generic((mtx), \
     68        mutex_t *: __condvar_wait_mutex, \
     69        spinlock_t *: __condvar_wait_spinlock, \
     70        irq_spinlock_t *: __condvar_wait_irq_spinlock \
     71)(cv, mtx))
     72
     73#define condvar_wait_timeout(cv, mtx, usec) (_Generic((mtx), \
     74        mutex_t *: __condvar_wait_timeout_mutex, \
     75        spinlock_t *: __condvar_wait_timeout_spinlock, \
     76        irq_spinlock_t *: __condvar_wait_timeout_irq_spinlock \
     77)(cv, mtx))
    7478
    7579#endif
  • kernel/generic/include/synch/mutex.h

    ra92290d rc0d814a  
    4444        MUTEX_PASSIVE,
    4545        MUTEX_RECURSIVE,
    46         MUTEX_ACTIVE
    4746} mutex_type_t;
    4847
     
    5150typedef struct {
    5251        mutex_type_t type;
     52        int nesting;
    5353        semaphore_t sem;
    54         struct thread *owner;
    55         unsigned nesting;
     54        _Atomic(struct thread *) owner;
    5655} mutex_t;
    5756
    5857#define MUTEX_INITIALIZER(name, mtype) (mutex_t) { \
    5958        .type = (mtype), \
     59        .nesting = 0, \
    6060        .sem = SEMAPHORE_INITIALIZER((name).sem, 1), \
    6161        .owner = NULL, \
    62         .nesting = 0, \
    6362}
    6463
  • kernel/generic/src/console/console.c

    ra92290d rc0d814a  
    3434 */
    3535
     36#include <abi/kio.h>
     37#include <arch.h>
    3638#include <assert.h>
     39#include <atomic.h>
     40#include <console/chardev.h>
    3741#include <console/console.h>
    38 #include <console/chardev.h>
    39 #include <sysinfo/sysinfo.h>
    40 #include <synch/waitq.h>
    41 #include <synch/spinlock.h>
    42 #include <typedefs.h>
     42#include <ddi/ddi.h>
    4343#include <ddi/irq.h>
    44 #include <ddi/ddi.h>
     44#include <errno.h>
    4545#include <ipc/event.h>
    4646#include <ipc/irq.h>
    47 #include <arch.h>
     47#include <mm/frame.h> /* SIZE2FRAMES */
    4848#include <panic.h>
     49#include <preemption.h>
     50#include <proc/thread.h>
     51#include <putchar.h>
     52#include <stdatomic.h>
    4953#include <stdio.h>
    50 #include <putchar.h>
    51 #include <atomic.h>
     54#include <stdlib.h>  /* malloc */
     55#include <str.h>
     56#include <synch/mutex.h>
     57#include <synch/spinlock.h>
     58#include <synch/waitq.h>
    5259#include <syscall/copy.h>
    53 #include <errno.h>
    54 #include <str.h>
    55 #include <stdatomic.h>
    56 #include <abi/kio.h>
    57 #include <mm/frame.h> /* SIZE2FRAMES */
    58 #include <stdlib.h>  /* malloc */
     60#include <sysinfo/sysinfo.h>
     61#include <typedefs.h>
    5962
    6063#define KIO_PAGES    8
     
    6669/** Kernel log initialized */
    6770static atomic_bool kio_inited = ATOMIC_VAR_INIT(false);
     71
     72/** A mutex for preventing interleaving of output lines from different threads.
     73 * May not be held in some circumstances, so locking of any internal shared
     74 * structures is still necessary.
     75 */
     76static MUTEX_INITIALIZE(console_mutex, MUTEX_RECURSIVE);
    6877
    6978/** First kernel log characters */
     
    395404}
    396405
     406/** Lock console output, ensuring that lines from different threads don't
     407 * interleave. Does nothing when preemption is disabled, so that debugging
     408 * and error printouts in sensitive areas still work.
     409 */
     410void console_lock(void)
     411{
     412        if (!PREEMPTION_DISABLED)
     413                mutex_lock(&console_mutex);
     414}
     415
     416/** Unlocks console output. See console_lock()
     417 */
     418void console_unlock(void)
     419{
     420        if (!PREEMPTION_DISABLED)
     421                mutex_unlock(&console_mutex);
     422}
     423
    397424/** @}
    398425 */
  • kernel/generic/src/log/log.c

    ra92290d rc0d814a  
    151151void log_begin(log_facility_t fac, log_level_t level)
    152152{
     153        console_lock();
    153154        spinlock_lock(&log_lock);
    154155        spinlock_lock(&kio_lock);
     
    186187        kio_update(NULL);
    187188        log_update(NULL);
     189        console_unlock();
    188190}
    189191
  • kernel/generic/src/mm/frame.c

    ra92290d rc0d814a  
    7272 * available.
    7373 */
    74 static MUTEX_INITIALIZE(mem_avail_mtx, MUTEX_ACTIVE);
     74static IRQ_SPINLOCK_INITIALIZE(mem_avail_lock);
    7575static CONDVAR_INITIALIZE(mem_avail_cv);
    7676static size_t mem_avail_req = 0;  /**< Number of frames requested. */
     
    951951#endif
    952952
    953                 /*
    954                  * Since the mem_avail_mtx is an active mutex, we need to
    955                  * disable interrupts to prevent deadlock with TLB shootdown.
    956                  */
    957                 ipl_t ipl = interrupts_disable();
    958                 mutex_lock(&mem_avail_mtx);
     953                /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */
     954                irq_spinlock_lock(&mem_avail_lock, true);
    959955
    960956                if (mem_avail_req > 0)
     
    966962
    967963                while (gen == mem_avail_gen)
    968                         condvar_wait(&mem_avail_cv, &mem_avail_mtx);
    969 
    970                 mutex_unlock(&mem_avail_mtx);
    971                 interrupts_restore(ipl);
     964                        condvar_wait(&mem_avail_cv, &mem_avail_lock);
     965
     966                irq_spinlock_unlock(&mem_avail_lock, true);
    972967
    973968#ifdef CONFIG_DEBUG
     
    10271022        irq_spinlock_unlock(&zones.lock, true);
    10281023
    1029         /*
    1030          * Signal that some memory has been freed.
    1031          * Since the mem_avail_mtx is an active mutex,
    1032          * we need to disable interruptsto prevent deadlock
    1033          * with TLB shootdown.
    1034          */
    1035 
    1036         ipl_t ipl = interrupts_disable();
    1037         mutex_lock(&mem_avail_mtx);
     1024        /* Signal that some memory has been freed. */
     1025
     1026        /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */
     1027        irq_spinlock_lock(&mem_avail_lock, true);
    10381028
    10391029        if (mem_avail_req > 0)
     
    10451035        }
    10461036
    1047         mutex_unlock(&mem_avail_mtx);
    1048         interrupts_restore(ipl);
     1037        irq_spinlock_unlock(&mem_avail_lock, true);
    10491038
    10501039        if (!(flags & FRAME_NO_RESERVE))
  • kernel/generic/src/printf/vprintf.c

    ra92290d rc0d814a  
    3333 */
    3434
     35#include <arch/asm.h>
     36#include <console/console.h>
    3537#include <print.h>
    3638#include <printf/printf_core.h>
    3739#include <putchar.h>
     40#include <str.h>
    3841#include <synch/spinlock.h>
    39 #include <arch/asm.h>
    4042#include <typedefs.h>
    41 #include <str.h>
    4243
    4344static int vprintf_str_write(const char *str, size_t size, void *data)
     
    7475        char32_t uc;
    7576
     77        console_lock();
     78
    7679        while ((uc = str_decode(str, &offset, STR_NO_LIMIT)) != 0) {
    7780                putuchar(uc);
     
    8083
    8184        putuchar('\n');
     85
     86        console_unlock();
    8287        return chars;
    8388}
     
    9196        };
    9297
    93         return printf_core(fmt, &ps, ap);
     98        console_lock();
     99        int ret = printf_core(fmt, &ps, ap);
     100        console_unlock();
     101        return ret;
    94102}
    95103
  • kernel/generic/src/synch/condvar.c

    ra92290d rc0d814a  
    7979 * @return              See comment for waitq_sleep_timeout().
    8080 */
    81 errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec)
     81errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec)
    8282{
    8383        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     
    9292}
    9393
    94 errno_t condvar_wait(condvar_t *cv, mutex_t *mtx)
     94errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx)
    9595{
    9696        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     
    105105}
    106106
    107 /** Wait for the condition to become true with a locked spinlock.
    108  *
    109  * The function is not aware of irq_spinlock. Therefore do not even
    110  * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock()
    111  * instead.
    112  *
    113  * @param cv            Condition variable.
    114  * @param lock          Locked spinlock.
    115  * @param usec          Timeout value in microseconds.
    116  * @param flags         Select mode of operation.
    117  *
    118  * For exact description of meaning of possible combinations of usec and flags,
    119  * see comment for waitq_sleep_timeout().  Note that when
    120  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    121  * returned.
    122  *
    123  * @return See comment for waitq_sleep_timeout().
    124  */
    125 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock,
    126     uint32_t usec, int flags)
     107/** Same as __condvar_wait_timeout_mutex(), except for spinlock_t. */
     108errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *lock,
     109    uint32_t usec)
    127110{
    128111        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     
    131114        spinlock_unlock(lock);
    132115
    133         errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, guard);
     116        errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec,
     117            SYNCH_FLAGS_NON_BLOCKING, guard);
    134118
    135119        spinlock_lock(lock);
     
    137121}
    138122
    139 /** Wait for the condition to become true with a locked irq spinlock.
    140  *
    141  * @param cv            Condition variable.
    142  * @param lock          Locked irq spinlock.
    143  * @param usec          Timeout value in microseconds.
    144  * @param flags         Select mode of operation.
    145  *
    146  * For exact description of meaning of possible combinations of usec and flags,
    147  * see comment for waitq_sleep_timeout().  Note that when
    148  * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always
    149  * returned.
    150  *
    151  * @return See comment for waitq_sleep_timeout().
    152  */
    153 errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,
    154     uint32_t usec, int flags)
     123errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx)
     124{
     125        wait_guard_t guard = waitq_sleep_prepare(&cv->wq);
     126
     127        /* Unlock only after the waitq is locked so we don't miss a wakeup. */
     128        spinlock_unlock(mtx);
     129
     130        errno_t rc = waitq_sleep_unsafe(&cv->wq, guard);
     131
     132        spinlock_lock(mtx);
     133        return rc;
     134}
     135
     136/** Same as __condvar_wait_timeout_mutex(), except for irq_spinlock_t. */
     137errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv,
     138    irq_spinlock_t *irq_lock, uint32_t usec)
    155139{
    156140        errno_t rc;
     
    171155         * running) and there is no danger of a deadlock.
    172156         */
    173         rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags);
     157        rc = __condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec);
     158
     159        irq_lock->guard = guard;
     160        irq_lock->ipl = ipl;
     161
     162        return rc;
     163}
     164
     165/** Same as __condvar_wait_mutex(), except for irq_spinlock_t. */
     166errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock)
     167{
     168        errno_t rc;
     169        /* Save spinlock's state so we can restore it correctly later on. */
     170        ipl_t ipl = irq_lock->ipl;
     171        bool guard = irq_lock->guard;
     172
     173        irq_lock->guard = false;
     174
     175        rc = __condvar_wait_spinlock(cv, &irq_lock->lock);
    174176
    175177        irq_lock->guard = guard;
  • kernel/generic/src/synch/mutex.c

    ra92290d rc0d814a  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
    3  * Copyright (c) 2023 Jiří Zárevúcky
     3 * Copyright (c) 2025 Jiří Zárevúcky
    44 * All rights reserved.
    55 *
     
    3939#include <assert.h>
    4040#include <errno.h>
     41#include <proc/thread.h>
     42#include <stdatomic.h>
    4143#include <synch/mutex.h>
    4244#include <synch/semaphore.h>
    43 #include <arch.h>
    44 #include <stacktrace.h>
    45 #include <cpu.h>
    46 #include <proc/thread.h>
    4745
    4846/** Initialize mutex.
     
    5654}
    5755
     56/** A race in mtx->owner access is unavoidable, so we have to make
     57 * access to it formally atomic. These are convenience functions to
     58 * read/write the variable without memory barriers, since we don't need
     59 * them and C11 atomics default to the strongest possible memory ordering
     60 * by default, which is utterly ridiculous.
     61 */
     62static inline thread_t *_get_owner(mutex_t *mtx)
     63{
     64        return atomic_load_explicit(&mtx->owner, memory_order_relaxed);
     65}
     66
     67/** Counterpart to _get_owner(). */
     68static inline void _set_owner(mutex_t *mtx, thread_t *owner)
     69{
     70        atomic_store_explicit(&mtx->owner, owner, memory_order_relaxed);
     71}
     72
    5873/** Find out whether the mutex is currently locked.
    5974 *
     
    6479bool mutex_locked(mutex_t *mtx)
    6580{
    66         errno_t rc = semaphore_trydown(&mtx->sem);
    67         if (rc == EOK) {
    68                 semaphore_up(&mtx->sem);
    69         }
    70         return rc != EOK;
    71 }
     81        if (!THREAD)
     82                return mtx->nesting > 0;
    7283
    73 static void mutex_lock_active(mutex_t *mtx)
    74 {
    75         assert((mtx->type == MUTEX_ACTIVE) || !THREAD);
    76 
    77         const unsigned deadlock_treshold = 100000000;
    78         unsigned int cnt = 0;
    79         bool deadlock_reported = false;
    80 
    81         while (semaphore_trydown(&mtx->sem) != EOK) {
    82                 if (cnt++ > deadlock_treshold) {
    83                         printf("cpu%u: looping on active mutex %p\n", CPU->id, mtx);
    84                         stack_trace();
    85                         cnt = 0;
    86                         deadlock_reported = true;
    87                 }
    88         }
    89 
    90         if (deadlock_reported)
    91                 printf("cpu%u: not deadlocked\n", CPU->id);
     84        return _get_owner(mtx) == THREAD;
    9285}
    9386
     
    9891void mutex_lock(mutex_t *mtx)
    9992{
    100         if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) {
    101                 assert(THREAD);
     93        if (!THREAD) {
     94                assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);
    10295                mtx->nesting++;
    10396                return;
    10497        }
    10598
    106         if (mtx->type == MUTEX_ACTIVE || !THREAD) {
    107                 mutex_lock_active(mtx);
     99        if (_get_owner(mtx) == THREAD) {
     100                /* This will also detect nested locks on a non-recursive mutex. */
     101                assert(mtx->type == MUTEX_RECURSIVE);
     102                assert(mtx->nesting > 0);
     103                mtx->nesting++;
    108104                return;
    109105        }
    110106
    111107        semaphore_down(&mtx->sem);
    112         mtx->owner = THREAD;
     108
     109        _set_owner(mtx, THREAD);
     110        assert(mtx->nesting == 0);
    113111        mtx->nesting = 1;
    114112}
     
    123121errno_t mutex_lock_timeout(mutex_t *mtx, uint32_t usec)
    124122{
    125         if (usec != 0) {
    126                 assert(mtx->type != MUTEX_ACTIVE);
    127                 assert(THREAD);
     123        if (!THREAD) {
     124                assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);
     125                mtx->nesting++;
     126                return EOK;
    128127        }
    129128
    130         if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) {
    131                 assert(THREAD);
     129        if (_get_owner(mtx) == THREAD) {
     130                assert(mtx->type == MUTEX_RECURSIVE);
     131                assert(mtx->nesting > 0);
    132132                mtx->nesting++;
    133133                return EOK;
     
    135135
    136136        errno_t rc = semaphore_down_timeout(&mtx->sem, usec);
    137         if (rc == EOK) {
    138                 mtx->owner = THREAD;
    139                 mtx->nesting = 1;
    140         }
    141         return rc;
     137        if (rc != EOK)
     138                return rc;
     139
     140        _set_owner(mtx, THREAD);
     141        assert(mtx->nesting == 0);
     142        mtx->nesting = 1;
     143        return EOK;
    142144}
    143145
     
    157159void mutex_unlock(mutex_t *mtx)
    158160{
    159         if (mtx->type == MUTEX_RECURSIVE) {
    160                 assert(mtx->owner == THREAD);
    161                 if (--mtx->nesting > 0)
    162                         return;
    163                 mtx->owner = NULL;
     161        if (--mtx->nesting > 0) {
     162                assert(mtx->type == MUTEX_RECURSIVE);
     163                return;
    164164        }
     165
     166        assert(mtx->nesting == 0);
     167
     168        if (!THREAD)
     169                return;
     170
     171        assert(_get_owner(mtx) == THREAD);
     172        _set_owner(mtx, NULL);
     173
    165174        semaphore_up(&mtx->sem);
    166175}
  • kernel/generic/src/synch/spinlock.c

    ra92290d rc0d814a  
    8484                 * which are directly used to report deadlocks
    8585                 * via printf() (and recursively other functions).
    86                  * This conserns especially printf_lock and the
     86                 * This concerns especially printf_lock and the
    8787                 * framebuffer lock.
    8888                 *
  • kernel/generic/src/sysinfo/sysinfo.c

    ra92290d rc0d814a  
    5757
    5858/** Sysinfo lock */
    59 static MUTEX_INITIALIZE(sysinfo_lock, MUTEX_ACTIVE);
     59static IRQ_SPINLOCK_INITIALIZE(sysinfo_lock);
    6060
    6161/** Sysinfo item constructor
     
    327327{
    328328        /* Protect sysinfo tree consistency */
    329         mutex_lock(&sysinfo_lock);
     329        irq_spinlock_lock(&sysinfo_lock, true);
    330330
    331331        if (root == NULL)
     
    340340        }
    341341
    342         mutex_unlock(&sysinfo_lock);
     342        irq_spinlock_unlock(&sysinfo_lock, true);
    343343}
    344344
     
    360360{
    361361        /* Protect sysinfo tree consistency */
    362         mutex_lock(&sysinfo_lock);
     362        irq_spinlock_lock(&sysinfo_lock, true);
    363363
    364364        if (root == NULL)
     
    374374        }
    375375
    376         mutex_unlock(&sysinfo_lock);
     376        irq_spinlock_unlock(&sysinfo_lock, true);
    377377}
    378378
     
    390390{
    391391        /* Protect sysinfo tree consistency */
    392         mutex_lock(&sysinfo_lock);
     392        irq_spinlock_lock(&sysinfo_lock, true);
    393393
    394394        if (root == NULL)
     
    404404        }
    405405
    406         mutex_unlock(&sysinfo_lock);
     406        irq_spinlock_unlock(&sysinfo_lock, true);
    407407}
    408408
     
    425425{
    426426        /* Protect sysinfo tree consistency */
    427         mutex_lock(&sysinfo_lock);
     427        irq_spinlock_lock(&sysinfo_lock, true);
    428428
    429429        if (root == NULL)
     
    439439        }
    440440
    441         mutex_unlock(&sysinfo_lock);
     441        irq_spinlock_unlock(&sysinfo_lock, true);
    442442}
    443443
     
    452452{
    453453        /* Protect sysinfo tree consistency */
    454         mutex_lock(&sysinfo_lock);
     454        irq_spinlock_lock(&sysinfo_lock, true);
    455455
    456456        if (root == NULL)
     
    463463                printf("Could not set sysinfo item %s.\n", name);
    464464
    465         mutex_unlock(&sysinfo_lock);
     465        irq_spinlock_unlock(&sysinfo_lock, true);
    466466}
    467467
     
    479479{
    480480        /* Protect sysinfo tree consistency */
    481         mutex_lock(&sysinfo_lock);
     481        irq_spinlock_lock(&sysinfo_lock, true);
    482482
    483483        if (root == NULL)
     
    498498        }
    499499
    500         mutex_unlock(&sysinfo_lock);
     500        irq_spinlock_unlock(&sysinfo_lock, true);
    501501}
    502502
     
    596596         * while we are dumping it
    597597         */
    598         mutex_lock(&sysinfo_lock);
     598        irq_spinlock_lock(&sysinfo_lock, true);
    599599
    600600        if (root == NULL)
     
    603603                sysinfo_dump_internal(root, 0);
    604604
    605         mutex_unlock(&sysinfo_lock);
     605        irq_spinlock_unlock(&sysinfo_lock, true);
    606606}
    607607
     
    695695                 * are reading it.
    696696                 */
    697                 mutex_lock(&sysinfo_lock);
     697                irq_spinlock_lock(&sysinfo_lock, true);
    698698                ret = sysinfo_get_item(path, NULL, dry_run);
    699                 mutex_unlock(&sysinfo_lock);
     699                irq_spinlock_unlock(&sysinfo_lock, true);
    700700        }
    701701
     
    806806                 * are reading it.
    807807                 */
    808                 mutex_lock(&sysinfo_lock);
     808                irq_spinlock_lock(&sysinfo_lock, true);
    809809                ret = sysinfo_get_keys(path, NULL, dry_run);
    810                 mutex_unlock(&sysinfo_lock);
     810                irq_spinlock_unlock(&sysinfo_lock, true);
    811811        }
    812812
Note: See TracChangeset for help on using the changeset viewer.