Changeset c0d814a in mainline
- Timestamp:
- 2025-04-10T20:02:30Z (2 days ago)
- Children:
- b8b031f
- Parents:
- a92290d (diff), 90dd8aee (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - git-author:
- Wayne Thornton <wmthornton-dev@…> (2025-04-10 20:02:30)
- git-committer:
- GitHub <noreply@…> (2025-04-10 20:02:30)
- Location:
- kernel/generic
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/console/console.h
ra92290d rc0d814a 76 76 extern sysarg_t sys_debug_console(void); 77 77 78 extern void console_lock(void); 79 extern void console_unlock(void); 80 78 81 #endif /* KERN_CONSOLE_H_ */ 79 82 -
kernel/generic/include/synch/condvar.h
ra92290d rc0d814a 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2025 Jiří Zárevúcky 3 4 * All rights reserved. 4 5 * … … 53 54 condvar_t name = CONDVAR_INITIALIZER(name) 54 55 55 #ifdef CONFIG_SMP56 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \57 _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags))58 #else59 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \60 _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags))61 #endif62 63 56 extern void condvar_initialize(condvar_t *cv); 64 57 extern void condvar_signal(condvar_t *cv); 65 58 extern void condvar_broadcast(condvar_t *cv); 66 59 67 extern errno_t condvar_wait(condvar_t *cv, mutex_t *mtx); 68 extern errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec); 60 extern errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx); 61 extern errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx); 62 extern errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx); 63 extern errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec); 64 extern errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *mtx, uint32_t usec); 65 extern errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx, uint32_t usec); 69 66 70 extern errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 71 uint32_t usec, int flags); 72 extern errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, 73 irq_spinlock_t *irq_lock, uint32_t usec, int flags); 67 #define condvar_wait(cv, mtx) (_Generic((mtx), \ 68 mutex_t *: __condvar_wait_mutex, \ 69 spinlock_t *: __condvar_wait_spinlock, \ 70 irq_spinlock_t *: __condvar_wait_irq_spinlock \ 71 )(cv, mtx)) 72 73 #define condvar_wait_timeout(cv, mtx, usec) (_Generic((mtx), \ 74 mutex_t *: __condvar_wait_timeout_mutex, \ 75 spinlock_t *: __condvar_wait_timeout_spinlock, \ 76 irq_spinlock_t *: __condvar_wait_timeout_irq_spinlock \ 77 )(cv, mtx)) 74 78 75 79 #endif -
kernel/generic/include/synch/mutex.h
ra92290d rc0d814a 44 44 MUTEX_PASSIVE, 45 45 MUTEX_RECURSIVE, 46 MUTEX_ACTIVE47 46 } mutex_type_t; 48 47 … … 51 50 typedef struct { 52 51 mutex_type_t type; 52 int nesting; 53 53 semaphore_t sem; 54 struct thread *owner; 55 unsigned nesting; 54 _Atomic(struct thread *) owner; 56 55 } mutex_t; 57 56 58 57 #define MUTEX_INITIALIZER(name, mtype) (mutex_t) { \ 59 58 .type = (mtype), \ 59 .nesting = 0, \ 60 60 .sem = SEMAPHORE_INITIALIZER((name).sem, 1), \ 61 61 .owner = NULL, \ 62 .nesting = 0, \63 62 } 64 63 -
kernel/generic/src/console/console.c
ra92290d rc0d814a 34 34 */ 35 35 36 #include <abi/kio.h> 37 #include <arch.h> 36 38 #include <assert.h> 39 #include <atomic.h> 40 #include <console/chardev.h> 37 41 #include <console/console.h> 38 #include <console/chardev.h> 39 #include <sysinfo/sysinfo.h> 40 #include <synch/waitq.h> 41 #include <synch/spinlock.h> 42 #include <typedefs.h> 42 #include <ddi/ddi.h> 43 43 #include <ddi/irq.h> 44 #include < ddi/ddi.h>44 #include <errno.h> 45 45 #include <ipc/event.h> 46 46 #include <ipc/irq.h> 47 #include < arch.h>47 #include <mm/frame.h> /* SIZE2FRAMES */ 48 48 #include <panic.h> 49 #include <preemption.h> 50 #include <proc/thread.h> 51 #include <putchar.h> 52 #include <stdatomic.h> 49 53 #include <stdio.h> 50 #include <putchar.h> 51 #include <atomic.h> 54 #include <stdlib.h> /* malloc */ 55 #include <str.h> 56 #include <synch/mutex.h> 57 #include <synch/spinlock.h> 58 #include <synch/waitq.h> 52 59 #include <syscall/copy.h> 53 #include <errno.h> 54 #include <str.h> 55 #include <stdatomic.h> 56 #include <abi/kio.h> 57 #include <mm/frame.h> /* SIZE2FRAMES */ 58 #include <stdlib.h> /* malloc */ 60 #include <sysinfo/sysinfo.h> 61 #include <typedefs.h> 59 62 60 63 #define KIO_PAGES 8 … … 66 69 /** Kernel log initialized */ 67 70 static atomic_bool kio_inited = ATOMIC_VAR_INIT(false); 71 72 /** A mutex for preventing interleaving of output lines from different threads. 73 * May not be held in some circumstances, so locking of any internal shared 74 * structures is still necessary. 75 */ 76 static MUTEX_INITIALIZE(console_mutex, MUTEX_RECURSIVE); 68 77 69 78 /** First kernel log characters */ … … 395 404 } 396 405 406 /** Lock console output, ensuring that lines from different threads don't 407 * interleave. Does nothing when preemption is disabled, so that debugging 408 * and error printouts in sensitive areas still work. 409 */ 410 void console_lock(void) 411 { 412 if (!PREEMPTION_DISABLED) 413 mutex_lock(&console_mutex); 414 } 415 416 /** Unlocks console output. See console_lock() 417 */ 418 void console_unlock(void) 419 { 420 if (!PREEMPTION_DISABLED) 421 mutex_unlock(&console_mutex); 422 } 423 397 424 /** @} 398 425 */ -
kernel/generic/src/log/log.c
ra92290d rc0d814a 151 151 void log_begin(log_facility_t fac, log_level_t level) 152 152 { 153 console_lock(); 153 154 spinlock_lock(&log_lock); 154 155 spinlock_lock(&kio_lock); … … 186 187 kio_update(NULL); 187 188 log_update(NULL); 189 console_unlock(); 188 190 } 189 191 -
kernel/generic/src/mm/frame.c
ra92290d rc0d814a 72 72 * available. 73 73 */ 74 static MUTEX_INITIALIZE(mem_avail_mtx, MUTEX_ACTIVE);74 static IRQ_SPINLOCK_INITIALIZE(mem_avail_lock); 75 75 static CONDVAR_INITIALIZE(mem_avail_cv); 76 76 static size_t mem_avail_req = 0; /**< Number of frames requested. */ … … 951 951 #endif 952 952 953 /* 954 * Since the mem_avail_mtx is an active mutex, we need to 955 * disable interrupts to prevent deadlock with TLB shootdown. 956 */ 957 ipl_t ipl = interrupts_disable(); 958 mutex_lock(&mem_avail_mtx); 953 /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */ 954 irq_spinlock_lock(&mem_avail_lock, true); 959 955 960 956 if (mem_avail_req > 0) … … 966 962 967 963 while (gen == mem_avail_gen) 968 condvar_wait(&mem_avail_cv, &mem_avail_mtx); 969 970 mutex_unlock(&mem_avail_mtx); 971 interrupts_restore(ipl); 964 condvar_wait(&mem_avail_cv, &mem_avail_lock); 965 966 irq_spinlock_unlock(&mem_avail_lock, true); 972 967 973 968 #ifdef CONFIG_DEBUG … … 1027 1022 irq_spinlock_unlock(&zones.lock, true); 1028 1023 1029 /* 1030 * Signal that some memory has been freed. 1031 * Since the mem_avail_mtx is an active mutex, 1032 * we need to disable interruptsto prevent deadlock 1033 * with TLB shootdown. 1034 */ 1035 1036 ipl_t ipl = interrupts_disable(); 1037 mutex_lock(&mem_avail_mtx); 1024 /* Signal that some memory has been freed. */ 1025 1026 /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */ 1027 irq_spinlock_lock(&mem_avail_lock, true); 1038 1028 1039 1029 if (mem_avail_req > 0) … … 1045 1035 } 1046 1036 1047 mutex_unlock(&mem_avail_mtx); 1048 interrupts_restore(ipl); 1037 irq_spinlock_unlock(&mem_avail_lock, true); 1049 1038 1050 1039 if (!(flags & FRAME_NO_RESERVE)) -
kernel/generic/src/printf/vprintf.c
ra92290d rc0d814a 33 33 */ 34 34 35 #include <arch/asm.h> 36 #include <console/console.h> 35 37 #include <print.h> 36 38 #include <printf/printf_core.h> 37 39 #include <putchar.h> 40 #include <str.h> 38 41 #include <synch/spinlock.h> 39 #include <arch/asm.h>40 42 #include <typedefs.h> 41 #include <str.h>42 43 43 44 static int vprintf_str_write(const char *str, size_t size, void *data) … … 74 75 char32_t uc; 75 76 77 console_lock(); 78 76 79 while ((uc = str_decode(str, &offset, STR_NO_LIMIT)) != 0) { 77 80 putuchar(uc); … … 80 83 81 84 putuchar('\n'); 85 86 console_unlock(); 82 87 return chars; 83 88 } … … 91 96 }; 92 97 93 return printf_core(fmt, &ps, ap); 98 console_lock(); 99 int ret = printf_core(fmt, &ps, ap); 100 console_unlock(); 101 return ret; 94 102 } 95 103 -
kernel/generic/src/synch/condvar.c
ra92290d rc0d814a 79 79 * @return See comment for waitq_sleep_timeout(). 80 80 */ 81 errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec)81 errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec) 82 82 { 83 83 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); … … 92 92 } 93 93 94 errno_t condvar_wait(condvar_t *cv, mutex_t *mtx)94 errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx) 95 95 { 96 96 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); … … 105 105 } 106 106 107 /** Wait for the condition to become true with a locked spinlock. 108 * 109 * The function is not aware of irq_spinlock. Therefore do not even 110 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock() 111 * instead. 112 * 113 * @param cv Condition variable. 114 * @param lock Locked spinlock. 115 * @param usec Timeout value in microseconds. 116 * @param flags Select mode of operation. 117 * 118 * For exact description of meaning of possible combinations of usec and flags, 119 * see comment for waitq_sleep_timeout(). Note that when 120 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 121 * returned. 122 * 123 * @return See comment for waitq_sleep_timeout(). 124 */ 125 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 126 uint32_t usec, int flags) 107 /** Same as __condvar_wait_timeout_mutex(), except for spinlock_t. */ 108 errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *lock, 109 uint32_t usec) 127 110 { 128 111 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); … … 131 114 spinlock_unlock(lock); 132 115 133 errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, guard); 116 errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, 117 SYNCH_FLAGS_NON_BLOCKING, guard); 134 118 135 119 spinlock_lock(lock); … … 137 121 } 138 122 139 /** Wait for the condition to become true with a locked irq spinlock. 140 * 141 * @param cv Condition variable. 142 * @param lock Locked irq spinlock. 143 * @param usec Timeout value in microseconds. 144 * @param flags Select mode of operation. 145 * 146 * For exact description of meaning of possible combinations of usec and flags, 147 * see comment for waitq_sleep_timeout(). Note that when 148 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 149 * returned. 150 * 151 * @return See comment for waitq_sleep_timeout(). 152 */153 errno_t _ condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock,154 uint32_t usec, int flags)123 errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx) 124 { 125 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); 126 127 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 128 spinlock_unlock(mtx); 129 130 errno_t rc = waitq_sleep_unsafe(&cv->wq, guard); 131 132 spinlock_lock(mtx); 133 return rc; 134 } 135 136 /** Same as __condvar_wait_timeout_mutex(), except for irq_spinlock_t. */ 137 errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv, 138 irq_spinlock_t *irq_lock, uint32_t usec) 155 139 { 156 140 errno_t rc; … … 171 155 * running) and there is no danger of a deadlock. 172 156 */ 173 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags); 157 rc = __condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec); 158 159 irq_lock->guard = guard; 160 irq_lock->ipl = ipl; 161 162 return rc; 163 } 164 165 /** Same as __condvar_wait_mutex(), except for irq_spinlock_t. */ 166 errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock) 167 { 168 errno_t rc; 169 /* Save spinlock's state so we can restore it correctly later on. */ 170 ipl_t ipl = irq_lock->ipl; 171 bool guard = irq_lock->guard; 172 173 irq_lock->guard = false; 174 175 rc = __condvar_wait_spinlock(cv, &irq_lock->lock); 174 176 175 177 irq_lock->guard = guard; -
kernel/generic/src/synch/mutex.c
ra92290d rc0d814a 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 202 3Jiří Zárevúcky3 * Copyright (c) 2025 Jiří Zárevúcky 4 4 * All rights reserved. 5 5 * … … 39 39 #include <assert.h> 40 40 #include <errno.h> 41 #include <proc/thread.h> 42 #include <stdatomic.h> 41 43 #include <synch/mutex.h> 42 44 #include <synch/semaphore.h> 43 #include <arch.h>44 #include <stacktrace.h>45 #include <cpu.h>46 #include <proc/thread.h>47 45 48 46 /** Initialize mutex. … … 56 54 } 57 55 56 /** A race in mtx->owner access is unavoidable, so we have to make 57 * access to it formally atomic. These are convenience functions to 58 * read/write the variable without memory barriers, since we don't need 59 * them and C11 atomics default to the strongest possible memory ordering 60 * by default, which is utterly ridiculous. 61 */ 62 static inline thread_t *_get_owner(mutex_t *mtx) 63 { 64 return atomic_load_explicit(&mtx->owner, memory_order_relaxed); 65 } 66 67 /** Counterpart to _get_owner(). */ 68 static inline void _set_owner(mutex_t *mtx, thread_t *owner) 69 { 70 atomic_store_explicit(&mtx->owner, owner, memory_order_relaxed); 71 } 72 58 73 /** Find out whether the mutex is currently locked. 59 74 * … … 64 79 bool mutex_locked(mutex_t *mtx) 65 80 { 66 errno_t rc = semaphore_trydown(&mtx->sem); 67 if (rc == EOK) { 68 semaphore_up(&mtx->sem); 69 } 70 return rc != EOK; 71 } 81 if (!THREAD) 82 return mtx->nesting > 0; 72 83 73 static void mutex_lock_active(mutex_t *mtx) 74 { 75 assert((mtx->type == MUTEX_ACTIVE) || !THREAD); 76 77 const unsigned deadlock_treshold = 100000000; 78 unsigned int cnt = 0; 79 bool deadlock_reported = false; 80 81 while (semaphore_trydown(&mtx->sem) != EOK) { 82 if (cnt++ > deadlock_treshold) { 83 printf("cpu%u: looping on active mutex %p\n", CPU->id, mtx); 84 stack_trace(); 85 cnt = 0; 86 deadlock_reported = true; 87 } 88 } 89 90 if (deadlock_reported) 91 printf("cpu%u: not deadlocked\n", CPU->id); 84 return _get_owner(mtx) == THREAD; 92 85 } 93 86 … … 98 91 void mutex_lock(mutex_t *mtx) 99 92 { 100 if ( mtx->type == MUTEX_RECURSIVE && mtx->owner ==THREAD) {101 assert( THREAD);93 if (!THREAD) { 94 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0); 102 95 mtx->nesting++; 103 96 return; 104 97 } 105 98 106 if (mtx->type == MUTEX_ACTIVE || !THREAD) { 107 mutex_lock_active(mtx); 99 if (_get_owner(mtx) == THREAD) { 100 /* This will also detect nested locks on a non-recursive mutex. */ 101 assert(mtx->type == MUTEX_RECURSIVE); 102 assert(mtx->nesting > 0); 103 mtx->nesting++; 108 104 return; 109 105 } 110 106 111 107 semaphore_down(&mtx->sem); 112 mtx->owner = THREAD; 108 109 _set_owner(mtx, THREAD); 110 assert(mtx->nesting == 0); 113 111 mtx->nesting = 1; 114 112 } … … 123 121 errno_t mutex_lock_timeout(mutex_t *mtx, uint32_t usec) 124 122 { 125 if (usec != 0) { 126 assert(mtx->type != MUTEX_ACTIVE); 127 assert(THREAD); 123 if (!THREAD) { 124 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0); 125 mtx->nesting++; 126 return EOK; 128 127 } 129 128 130 if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) { 131 assert(THREAD); 129 if (_get_owner(mtx) == THREAD) { 130 assert(mtx->type == MUTEX_RECURSIVE); 131 assert(mtx->nesting > 0); 132 132 mtx->nesting++; 133 133 return EOK; … … 135 135 136 136 errno_t rc = semaphore_down_timeout(&mtx->sem, usec); 137 if (rc == EOK) { 138 mtx->owner = THREAD; 139 mtx->nesting = 1; 140 } 141 return rc; 137 if (rc != EOK) 138 return rc; 139 140 _set_owner(mtx, THREAD); 141 assert(mtx->nesting == 0); 142 mtx->nesting = 1; 143 return EOK; 142 144 } 143 145 … … 157 159 void mutex_unlock(mutex_t *mtx) 158 160 { 159 if (mtx->type == MUTEX_RECURSIVE) { 160 assert(mtx->owner == THREAD); 161 if (--mtx->nesting > 0) 162 return; 163 mtx->owner = NULL; 161 if (--mtx->nesting > 0) { 162 assert(mtx->type == MUTEX_RECURSIVE); 163 return; 164 164 } 165 166 assert(mtx->nesting == 0); 167 168 if (!THREAD) 169 return; 170 171 assert(_get_owner(mtx) == THREAD); 172 _set_owner(mtx, NULL); 173 165 174 semaphore_up(&mtx->sem); 166 175 } -
kernel/generic/src/synch/spinlock.c
ra92290d rc0d814a 84 84 * which are directly used to report deadlocks 85 85 * via printf() (and recursively other functions). 86 * This con serns especially printf_lock and the86 * This concerns especially printf_lock and the 87 87 * framebuffer lock. 88 88 * -
kernel/generic/src/sysinfo/sysinfo.c
ra92290d rc0d814a 57 57 58 58 /** Sysinfo lock */ 59 static MUTEX_INITIALIZE(sysinfo_lock, MUTEX_ACTIVE);59 static IRQ_SPINLOCK_INITIALIZE(sysinfo_lock); 60 60 61 61 /** Sysinfo item constructor … … 327 327 { 328 328 /* Protect sysinfo tree consistency */ 329 mutex_lock(&sysinfo_lock);329 irq_spinlock_lock(&sysinfo_lock, true); 330 330 331 331 if (root == NULL) … … 340 340 } 341 341 342 mutex_unlock(&sysinfo_lock);342 irq_spinlock_unlock(&sysinfo_lock, true); 343 343 } 344 344 … … 360 360 { 361 361 /* Protect sysinfo tree consistency */ 362 mutex_lock(&sysinfo_lock);362 irq_spinlock_lock(&sysinfo_lock, true); 363 363 364 364 if (root == NULL) … … 374 374 } 375 375 376 mutex_unlock(&sysinfo_lock);376 irq_spinlock_unlock(&sysinfo_lock, true); 377 377 } 378 378 … … 390 390 { 391 391 /* Protect sysinfo tree consistency */ 392 mutex_lock(&sysinfo_lock);392 irq_spinlock_lock(&sysinfo_lock, true); 393 393 394 394 if (root == NULL) … … 404 404 } 405 405 406 mutex_unlock(&sysinfo_lock);406 irq_spinlock_unlock(&sysinfo_lock, true); 407 407 } 408 408 … … 425 425 { 426 426 /* Protect sysinfo tree consistency */ 427 mutex_lock(&sysinfo_lock);427 irq_spinlock_lock(&sysinfo_lock, true); 428 428 429 429 if (root == NULL) … … 439 439 } 440 440 441 mutex_unlock(&sysinfo_lock);441 irq_spinlock_unlock(&sysinfo_lock, true); 442 442 } 443 443 … … 452 452 { 453 453 /* Protect sysinfo tree consistency */ 454 mutex_lock(&sysinfo_lock);454 irq_spinlock_lock(&sysinfo_lock, true); 455 455 456 456 if (root == NULL) … … 463 463 printf("Could not set sysinfo item %s.\n", name); 464 464 465 mutex_unlock(&sysinfo_lock);465 irq_spinlock_unlock(&sysinfo_lock, true); 466 466 } 467 467 … … 479 479 { 480 480 /* Protect sysinfo tree consistency */ 481 mutex_lock(&sysinfo_lock);481 irq_spinlock_lock(&sysinfo_lock, true); 482 482 483 483 if (root == NULL) … … 498 498 } 499 499 500 mutex_unlock(&sysinfo_lock);500 irq_spinlock_unlock(&sysinfo_lock, true); 501 501 } 502 502 … … 596 596 * while we are dumping it 597 597 */ 598 mutex_lock(&sysinfo_lock);598 irq_spinlock_lock(&sysinfo_lock, true); 599 599 600 600 if (root == NULL) … … 603 603 sysinfo_dump_internal(root, 0); 604 604 605 mutex_unlock(&sysinfo_lock);605 irq_spinlock_unlock(&sysinfo_lock, true); 606 606 } 607 607 … … 695 695 * are reading it. 696 696 */ 697 mutex_lock(&sysinfo_lock);697 irq_spinlock_lock(&sysinfo_lock, true); 698 698 ret = sysinfo_get_item(path, NULL, dry_run); 699 mutex_unlock(&sysinfo_lock);699 irq_spinlock_unlock(&sysinfo_lock, true); 700 700 } 701 701 … … 806 806 * are reading it. 807 807 */ 808 mutex_lock(&sysinfo_lock);808 irq_spinlock_lock(&sysinfo_lock, true); 809 809 ret = sysinfo_get_keys(path, NULL, dry_run); 810 mutex_unlock(&sysinfo_lock);810 irq_spinlock_unlock(&sysinfo_lock, true); 811 811 } 812 812
Note:
See TracChangeset
for help on using the changeset viewer.