Changes in / [c0d814a:a92290d] in mainline
- Location:
- kernel/generic
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/console/console.h
rc0d814a ra92290d 76 76 extern sysarg_t sys_debug_console(void); 77 77 78 extern void console_lock(void);79 extern void console_unlock(void);80 81 78 #endif /* KERN_CONSOLE_H_ */ 82 79 -
kernel/generic/include/synch/condvar.h
rc0d814a ra92290d 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 2025 Jiří Zárevúcky4 3 * All rights reserved. 5 4 * … … 54 53 condvar_t name = CONDVAR_INITIALIZER(name) 55 54 55 #ifdef CONFIG_SMP 56 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \ 57 _condvar_wait_timeout_spinlock_impl((cv), (lock), (usec), (flags)) 58 #else 59 #define _condvar_wait_timeout_spinlock(cv, lock, usec, flags) \ 60 _condvar_wait_timeout_spinlock_impl((cv), NULL, (usec), (flags)) 61 #endif 62 56 63 extern void condvar_initialize(condvar_t *cv); 57 64 extern void condvar_signal(condvar_t *cv); 58 65 extern void condvar_broadcast(condvar_t *cv); 59 66 60 extern errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx); 61 extern errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx); 62 extern errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx); 63 extern errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec); 64 extern errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *mtx, uint32_t usec); 65 extern errno_t __condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *mtx, uint32_t usec); 67 extern errno_t condvar_wait(condvar_t *cv, mutex_t *mtx); 68 extern errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec); 66 69 67 #define condvar_wait(cv, mtx) (_Generic((mtx), \ 68 mutex_t *: __condvar_wait_mutex, \ 69 spinlock_t *: __condvar_wait_spinlock, \ 70 irq_spinlock_t *: __condvar_wait_irq_spinlock \ 71 )(cv, mtx)) 72 73 #define condvar_wait_timeout(cv, mtx, usec) (_Generic((mtx), \ 74 mutex_t *: __condvar_wait_timeout_mutex, \ 75 spinlock_t *: __condvar_wait_timeout_spinlock, \ 76 irq_spinlock_t *: __condvar_wait_timeout_irq_spinlock \ 77 )(cv, mtx)) 70 extern errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 71 uint32_t usec, int flags); 72 extern errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, 73 irq_spinlock_t *irq_lock, uint32_t usec, int flags); 78 74 79 75 #endif -
kernel/generic/include/synch/mutex.h
rc0d814a ra92290d 44 44 MUTEX_PASSIVE, 45 45 MUTEX_RECURSIVE, 46 MUTEX_ACTIVE 46 47 } mutex_type_t; 47 48 … … 50 51 typedef struct { 51 52 mutex_type_t type; 52 int nesting;53 53 semaphore_t sem; 54 _Atomic(struct thread *) owner; 54 struct thread *owner; 55 unsigned nesting; 55 56 } mutex_t; 56 57 57 58 #define MUTEX_INITIALIZER(name, mtype) (mutex_t) { \ 58 59 .type = (mtype), \ 59 .nesting = 0, \60 60 .sem = SEMAPHORE_INITIALIZER((name).sem, 1), \ 61 61 .owner = NULL, \ 62 .nesting = 0, \ 62 63 } 63 64 -
kernel/generic/src/console/console.c
rc0d814a ra92290d 34 34 */ 35 35 36 #include <abi/kio.h>37 #include <arch.h>38 36 #include <assert.h> 39 #include < atomic.h>37 #include <console/console.h> 40 38 #include <console/chardev.h> 41 #include <console/console.h> 39 #include <sysinfo/sysinfo.h> 40 #include <synch/waitq.h> 41 #include <synch/spinlock.h> 42 #include <typedefs.h> 43 #include <ddi/irq.h> 42 44 #include <ddi/ddi.h> 43 #include <ddi/irq.h>44 #include <errno.h>45 45 #include <ipc/event.h> 46 46 #include <ipc/irq.h> 47 #include <arch.h> 48 #include <panic.h> 49 #include <stdio.h> 50 #include <putchar.h> 51 #include <atomic.h> 52 #include <syscall/copy.h> 53 #include <errno.h> 54 #include <str.h> 55 #include <stdatomic.h> 56 #include <abi/kio.h> 47 57 #include <mm/frame.h> /* SIZE2FRAMES */ 48 #include <panic.h>49 #include <preemption.h>50 #include <proc/thread.h>51 #include <putchar.h>52 #include <stdatomic.h>53 #include <stdio.h>54 58 #include <stdlib.h> /* malloc */ 55 #include <str.h>56 #include <synch/mutex.h>57 #include <synch/spinlock.h>58 #include <synch/waitq.h>59 #include <syscall/copy.h>60 #include <sysinfo/sysinfo.h>61 #include <typedefs.h>62 59 63 60 #define KIO_PAGES 8 … … 69 66 /** Kernel log initialized */ 70 67 static atomic_bool kio_inited = ATOMIC_VAR_INIT(false); 71 72 /** A mutex for preventing interleaving of output lines from different threads.73 * May not be held in some circumstances, so locking of any internal shared74 * structures is still necessary.75 */76 static MUTEX_INITIALIZE(console_mutex, MUTEX_RECURSIVE);77 68 78 69 /** First kernel log characters */ … … 404 395 } 405 396 406 /** Lock console output, ensuring that lines from different threads don't407 * interleave. Does nothing when preemption is disabled, so that debugging408 * and error printouts in sensitive areas still work.409 */410 void console_lock(void)411 {412 if (!PREEMPTION_DISABLED)413 mutex_lock(&console_mutex);414 }415 416 /** Unlocks console output. See console_lock()417 */418 void console_unlock(void)419 {420 if (!PREEMPTION_DISABLED)421 mutex_unlock(&console_mutex);422 }423 424 397 /** @} 425 398 */ -
kernel/generic/src/log/log.c
rc0d814a ra92290d 151 151 void log_begin(log_facility_t fac, log_level_t level) 152 152 { 153 console_lock();154 153 spinlock_lock(&log_lock); 155 154 spinlock_lock(&kio_lock); … … 187 186 kio_update(NULL); 188 187 log_update(NULL); 189 console_unlock();190 188 } 191 189 -
kernel/generic/src/mm/frame.c
rc0d814a ra92290d 72 72 * available. 73 73 */ 74 static IRQ_SPINLOCK_INITIALIZE(mem_avail_lock);74 static MUTEX_INITIALIZE(mem_avail_mtx, MUTEX_ACTIVE); 75 75 static CONDVAR_INITIALIZE(mem_avail_cv); 76 76 static size_t mem_avail_req = 0; /**< Number of frames requested. */ … … 951 951 #endif 952 952 953 /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */ 954 irq_spinlock_lock(&mem_avail_lock, true); 953 /* 954 * Since the mem_avail_mtx is an active mutex, we need to 955 * disable interrupts to prevent deadlock with TLB shootdown. 956 */ 957 ipl_t ipl = interrupts_disable(); 958 mutex_lock(&mem_avail_mtx); 955 959 956 960 if (mem_avail_req > 0) … … 962 966 963 967 while (gen == mem_avail_gen) 964 condvar_wait(&mem_avail_cv, &mem_avail_lock); 965 966 irq_spinlock_unlock(&mem_avail_lock, true); 968 condvar_wait(&mem_avail_cv, &mem_avail_mtx); 969 970 mutex_unlock(&mem_avail_mtx); 971 interrupts_restore(ipl); 967 972 968 973 #ifdef CONFIG_DEBUG … … 1022 1027 irq_spinlock_unlock(&zones.lock, true); 1023 1028 1024 /* Signal that some memory has been freed. */ 1025 1026 /* Disabled interrupts needed to prevent deadlock with TLB shootdown. */ 1027 irq_spinlock_lock(&mem_avail_lock, true); 1029 /* 1030 * Signal that some memory has been freed. 1031 * Since the mem_avail_mtx is an active mutex, 1032 * we need to disable interruptsto prevent deadlock 1033 * with TLB shootdown. 1034 */ 1035 1036 ipl_t ipl = interrupts_disable(); 1037 mutex_lock(&mem_avail_mtx); 1028 1038 1029 1039 if (mem_avail_req > 0) … … 1035 1045 } 1036 1046 1037 irq_spinlock_unlock(&mem_avail_lock, true); 1047 mutex_unlock(&mem_avail_mtx); 1048 interrupts_restore(ipl); 1038 1049 1039 1050 if (!(flags & FRAME_NO_RESERVE)) -
kernel/generic/src/printf/vprintf.c
rc0d814a ra92290d 33 33 */ 34 34 35 #include <arch/asm.h>36 #include <console/console.h>37 35 #include <print.h> 38 36 #include <printf/printf_core.h> 39 37 #include <putchar.h> 38 #include <synch/spinlock.h> 39 #include <arch/asm.h> 40 #include <typedefs.h> 40 41 #include <str.h> 41 #include <synch/spinlock.h>42 #include <typedefs.h>43 42 44 43 static int vprintf_str_write(const char *str, size_t size, void *data) … … 75 74 char32_t uc; 76 75 77 console_lock();78 79 76 while ((uc = str_decode(str, &offset, STR_NO_LIMIT)) != 0) { 80 77 putuchar(uc); … … 83 80 84 81 putuchar('\n'); 85 86 console_unlock();87 82 return chars; 88 83 } … … 96 91 }; 97 92 98 console_lock(); 99 int ret = printf_core(fmt, &ps, ap); 100 console_unlock(); 101 return ret; 93 return printf_core(fmt, &ps, ap); 102 94 } 103 95 -
kernel/generic/src/synch/condvar.c
rc0d814a ra92290d 79 79 * @return See comment for waitq_sleep_timeout(). 80 80 */ 81 errno_t __condvar_wait_timeout_mutex(condvar_t *cv, mutex_t *mtx, uint32_t usec)81 errno_t condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec) 82 82 { 83 83 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); … … 92 92 } 93 93 94 errno_t __condvar_wait_mutex(condvar_t *cv, mutex_t *mtx)94 errno_t condvar_wait(condvar_t *cv, mutex_t *mtx) 95 95 { 96 96 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); … … 105 105 } 106 106 107 /** Same as __condvar_wait_timeout_mutex(), except for spinlock_t. */ 108 errno_t __condvar_wait_timeout_spinlock(condvar_t *cv, spinlock_t *lock, 109 uint32_t usec) 107 /** Wait for the condition to become true with a locked spinlock. 108 * 109 * The function is not aware of irq_spinlock. Therefore do not even 110 * try passing irq_spinlock_t to it. Use _condvar_wait_timeout_irq_spinlock() 111 * instead. 112 * 113 * @param cv Condition variable. 114 * @param lock Locked spinlock. 115 * @param usec Timeout value in microseconds. 116 * @param flags Select mode of operation. 117 * 118 * For exact description of meaning of possible combinations of usec and flags, 119 * see comment for waitq_sleep_timeout(). Note that when 120 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 121 * returned. 122 * 123 * @return See comment for waitq_sleep_timeout(). 124 */ 125 errno_t _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, 126 uint32_t usec, int flags) 110 127 { 111 128 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); … … 114 131 spinlock_unlock(lock); 115 132 116 errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, 117 SYNCH_FLAGS_NON_BLOCKING, guard); 133 errno_t rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, guard); 118 134 119 135 spinlock_lock(lock); … … 121 137 } 122 138 123 errno_t __condvar_wait_spinlock(condvar_t *cv, spinlock_t *mtx) 124 { 125 wait_guard_t guard = waitq_sleep_prepare(&cv->wq); 126 127 /* Unlock only after the waitq is locked so we don't miss a wakeup. */ 128 spinlock_unlock(mtx); 129 130 errno_t rc = waitq_sleep_unsafe(&cv->wq, guard); 131 132 spinlock_lock(mtx); 133 return rc; 134 } 135 136 /** Same as __condvar_wait_timeout_mutex(), except for irq_spinlock_t.*/137 errno_t _ _condvar_wait_timeout_irq_spinlock(condvar_t *cv,138 irq_spinlock_t *irq_lock, uint32_t usec)139 /** Wait for the condition to become true with a locked irq spinlock. 140 * 141 * @param cv Condition variable. 142 * @param lock Locked irq spinlock. 143 * @param usec Timeout value in microseconds. 144 * @param flags Select mode of operation. 145 * 146 * For exact description of meaning of possible combinations of usec and flags, 147 * see comment for waitq_sleep_timeout(). Note that when 148 * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always 149 * returned. 150 * 151 * @return See comment for waitq_sleep_timeout(). 152 */ 153 errno_t _condvar_wait_timeout_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock, 154 uint32_t usec, int flags) 139 155 { 140 156 errno_t rc; … … 155 171 * running) and there is no danger of a deadlock. 156 172 */ 157 rc = __condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec); 158 159 irq_lock->guard = guard; 160 irq_lock->ipl = ipl; 161 162 return rc; 163 } 164 165 /** Same as __condvar_wait_mutex(), except for irq_spinlock_t. */ 166 errno_t __condvar_wait_irq_spinlock(condvar_t *cv, irq_spinlock_t *irq_lock) 167 { 168 errno_t rc; 169 /* Save spinlock's state so we can restore it correctly later on. */ 170 ipl_t ipl = irq_lock->ipl; 171 bool guard = irq_lock->guard; 172 173 irq_lock->guard = false; 174 175 rc = __condvar_wait_spinlock(cv, &irq_lock->lock); 173 rc = _condvar_wait_timeout_spinlock(cv, &irq_lock->lock, usec, flags); 176 174 177 175 irq_lock->guard = guard; -
kernel/generic/src/synch/mutex.c
rc0d814a ra92290d 1 1 /* 2 2 * Copyright (c) 2001-2004 Jakub Jermar 3 * Copyright (c) 202 5Jiří Zárevúcky3 * Copyright (c) 2023 Jiří Zárevúcky 4 4 * All rights reserved. 5 5 * … … 39 39 #include <assert.h> 40 40 #include <errno.h> 41 #include <proc/thread.h>42 #include <stdatomic.h>43 41 #include <synch/mutex.h> 44 42 #include <synch/semaphore.h> 43 #include <arch.h> 44 #include <stacktrace.h> 45 #include <cpu.h> 46 #include <proc/thread.h> 45 47 46 48 /** Initialize mutex. … … 54 56 } 55 57 56 /** A race in mtx->owner access is unavoidable, so we have to make57 * access to it formally atomic. These are convenience functions to58 * read/write the variable without memory barriers, since we don't need59 * them and C11 atomics default to the strongest possible memory ordering60 * by default, which is utterly ridiculous.61 */62 static inline thread_t *_get_owner(mutex_t *mtx)63 {64 return atomic_load_explicit(&mtx->owner, memory_order_relaxed);65 }66 67 /** Counterpart to _get_owner(). */68 static inline void _set_owner(mutex_t *mtx, thread_t *owner)69 {70 atomic_store_explicit(&mtx->owner, owner, memory_order_relaxed);71 }72 73 58 /** Find out whether the mutex is currently locked. 74 59 * … … 79 64 bool mutex_locked(mutex_t *mtx) 80 65 { 81 if (!THREAD) 82 return mtx->nesting > 0; 66 errno_t rc = semaphore_trydown(&mtx->sem); 67 if (rc == EOK) { 68 semaphore_up(&mtx->sem); 69 } 70 return rc != EOK; 71 } 83 72 84 return _get_owner(mtx) == THREAD; 73 static void mutex_lock_active(mutex_t *mtx) 74 { 75 assert((mtx->type == MUTEX_ACTIVE) || !THREAD); 76 77 const unsigned deadlock_treshold = 100000000; 78 unsigned int cnt = 0; 79 bool deadlock_reported = false; 80 81 while (semaphore_trydown(&mtx->sem) != EOK) { 82 if (cnt++ > deadlock_treshold) { 83 printf("cpu%u: looping on active mutex %p\n", CPU->id, mtx); 84 stack_trace(); 85 cnt = 0; 86 deadlock_reported = true; 87 } 88 } 89 90 if (deadlock_reported) 91 printf("cpu%u: not deadlocked\n", CPU->id); 85 92 } 86 93 … … 91 98 void mutex_lock(mutex_t *mtx) 92 99 { 93 if ( !THREAD) {94 assert( mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);100 if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) { 101 assert(THREAD); 95 102 mtx->nesting++; 96 103 return; 97 104 } 98 105 99 if (_get_owner(mtx) == THREAD) { 100 /* This will also detect nested locks on a non-recursive mutex. */ 101 assert(mtx->type == MUTEX_RECURSIVE); 102 assert(mtx->nesting > 0); 103 mtx->nesting++; 106 if (mtx->type == MUTEX_ACTIVE || !THREAD) { 107 mutex_lock_active(mtx); 104 108 return; 105 109 } 106 110 107 111 semaphore_down(&mtx->sem); 108 109 _set_owner(mtx, THREAD); 110 assert(mtx->nesting == 0); 112 mtx->owner = THREAD; 111 113 mtx->nesting = 1; 112 114 } … … 121 123 errno_t mutex_lock_timeout(mutex_t *mtx, uint32_t usec) 122 124 { 123 if (!THREAD) { 124 assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0); 125 mtx->nesting++; 126 return EOK; 125 if (usec != 0) { 126 assert(mtx->type != MUTEX_ACTIVE); 127 assert(THREAD); 127 128 } 128 129 129 if (_get_owner(mtx) == THREAD) { 130 assert(mtx->type == MUTEX_RECURSIVE); 131 assert(mtx->nesting > 0); 130 if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) { 131 assert(THREAD); 132 132 mtx->nesting++; 133 133 return EOK; … … 135 135 136 136 errno_t rc = semaphore_down_timeout(&mtx->sem, usec); 137 if (rc != EOK) 138 return rc; 139 140 _set_owner(mtx, THREAD); 141 assert(mtx->nesting == 0); 142 mtx->nesting = 1; 143 return EOK; 137 if (rc == EOK) { 138 mtx->owner = THREAD; 139 mtx->nesting = 1; 140 } 141 return rc; 144 142 } 145 143 … … 159 157 void mutex_unlock(mutex_t *mtx) 160 158 { 161 if (--mtx->nesting > 0) { 162 assert(mtx->type == MUTEX_RECURSIVE); 163 return; 159 if (mtx->type == MUTEX_RECURSIVE) { 160 assert(mtx->owner == THREAD); 161 if (--mtx->nesting > 0) 162 return; 163 mtx->owner = NULL; 164 164 } 165 166 assert(mtx->nesting == 0);167 168 if (!THREAD)169 return;170 171 assert(_get_owner(mtx) == THREAD);172 _set_owner(mtx, NULL);173 174 165 semaphore_up(&mtx->sem); 175 166 } -
kernel/generic/src/synch/spinlock.c
rc0d814a ra92290d 84 84 * which are directly used to report deadlocks 85 85 * via printf() (and recursively other functions). 86 * This con cerns especially printf_lock and the86 * This conserns especially printf_lock and the 87 87 * framebuffer lock. 88 88 * -
kernel/generic/src/sysinfo/sysinfo.c
rc0d814a ra92290d 57 57 58 58 /** Sysinfo lock */ 59 static IRQ_SPINLOCK_INITIALIZE(sysinfo_lock);59 static MUTEX_INITIALIZE(sysinfo_lock, MUTEX_ACTIVE); 60 60 61 61 /** Sysinfo item constructor … … 327 327 { 328 328 /* Protect sysinfo tree consistency */ 329 irq_spinlock_lock(&sysinfo_lock, true);329 mutex_lock(&sysinfo_lock); 330 330 331 331 if (root == NULL) … … 340 340 } 341 341 342 irq_spinlock_unlock(&sysinfo_lock, true);342 mutex_unlock(&sysinfo_lock); 343 343 } 344 344 … … 360 360 { 361 361 /* Protect sysinfo tree consistency */ 362 irq_spinlock_lock(&sysinfo_lock, true);362 mutex_lock(&sysinfo_lock); 363 363 364 364 if (root == NULL) … … 374 374 } 375 375 376 irq_spinlock_unlock(&sysinfo_lock, true);376 mutex_unlock(&sysinfo_lock); 377 377 } 378 378 … … 390 390 { 391 391 /* Protect sysinfo tree consistency */ 392 irq_spinlock_lock(&sysinfo_lock, true);392 mutex_lock(&sysinfo_lock); 393 393 394 394 if (root == NULL) … … 404 404 } 405 405 406 irq_spinlock_unlock(&sysinfo_lock, true);406 mutex_unlock(&sysinfo_lock); 407 407 } 408 408 … … 425 425 { 426 426 /* Protect sysinfo tree consistency */ 427 irq_spinlock_lock(&sysinfo_lock, true);427 mutex_lock(&sysinfo_lock); 428 428 429 429 if (root == NULL) … … 439 439 } 440 440 441 irq_spinlock_unlock(&sysinfo_lock, true);441 mutex_unlock(&sysinfo_lock); 442 442 } 443 443 … … 452 452 { 453 453 /* Protect sysinfo tree consistency */ 454 irq_spinlock_lock(&sysinfo_lock, true);454 mutex_lock(&sysinfo_lock); 455 455 456 456 if (root == NULL) … … 463 463 printf("Could not set sysinfo item %s.\n", name); 464 464 465 irq_spinlock_unlock(&sysinfo_lock, true);465 mutex_unlock(&sysinfo_lock); 466 466 } 467 467 … … 479 479 { 480 480 /* Protect sysinfo tree consistency */ 481 irq_spinlock_lock(&sysinfo_lock, true);481 mutex_lock(&sysinfo_lock); 482 482 483 483 if (root == NULL) … … 498 498 } 499 499 500 irq_spinlock_unlock(&sysinfo_lock, true);500 mutex_unlock(&sysinfo_lock); 501 501 } 502 502 … … 596 596 * while we are dumping it 597 597 */ 598 irq_spinlock_lock(&sysinfo_lock, true);598 mutex_lock(&sysinfo_lock); 599 599 600 600 if (root == NULL) … … 603 603 sysinfo_dump_internal(root, 0); 604 604 605 irq_spinlock_unlock(&sysinfo_lock, true);605 mutex_unlock(&sysinfo_lock); 606 606 } 607 607 … … 695 695 * are reading it. 696 696 */ 697 irq_spinlock_lock(&sysinfo_lock, true);697 mutex_lock(&sysinfo_lock); 698 698 ret = sysinfo_get_item(path, NULL, dry_run); 699 irq_spinlock_unlock(&sysinfo_lock, true);699 mutex_unlock(&sysinfo_lock); 700 700 } 701 701 … … 806 806 * are reading it. 807 807 */ 808 irq_spinlock_lock(&sysinfo_lock, true);808 mutex_lock(&sysinfo_lock); 809 809 ret = sysinfo_get_keys(path, NULL, dry_run); 810 irq_spinlock_unlock(&sysinfo_lock, true);810 mutex_unlock(&sysinfo_lock); 811 811 } 812 812
Note:
See TracChangeset
for help on using the changeset viewer.