Changes in kernel/generic/src/syscall/syscall.c [2fa10f6:5e984f2] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/syscall/syscall.c
r2fa10f6 r5e984f2 59 59 unative_t a4, unative_t a5, unative_t a6, unative_t id) 60 60 { 61 unative_t rc; 62 ipl_t ipl; 63 61 64 /* Do userpace accounting */ 62 irq_spinlock_lock(&THREAD->lock, true); 65 ipl = interrupts_disable(); 66 spinlock_lock(&THREAD->lock); 63 67 thread_update_accounting(true); 64 irq_spinlock_unlock(&THREAD->lock, true); 65 68 spinlock_unlock(&THREAD->lock); 69 interrupts_restore(ipl); 70 66 71 #ifdef CONFIG_UDEBUG 67 72 /* 68 73 * Early check for undebugged tasks. We do not lock anything as this 69 74 * test need not be precise in either direction. 70 *71 75 */ 72 if (THREAD->udebug.active) 76 if (THREAD->udebug.active) { 73 77 udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, 0, false); 78 } 74 79 #endif 75 80 76 unative_t rc;77 81 if (id < SYSCALL_END) { 78 82 rc = syscall_table[id](a1, a2, a3, a4, a5, a6); … … 89 93 if (THREAD->udebug.active) { 90 94 udebug_syscall_event(a1, a2, a3, a4, a5, a6, id, rc, true); 91 95 92 96 /* 93 97 * Stopping point needed for tasks that only invoke … … 99 103 } 100 104 #endif 101 105 102 106 /* Do kernel accounting */ 103 irq_spinlock_lock(&THREAD->lock, true); 107 ipl = interrupts_disable(); 108 spinlock_lock(&THREAD->lock); 104 109 thread_update_accounting(false); 105 irq_spinlock_unlock(&THREAD->lock, true); 110 spinlock_unlock(&THREAD->lock); 111 interrupts_restore(ipl); 106 112 107 113 return rc; … … 159 165 (syshandler_t) sys_physmem_map, 160 166 (syshandler_t) sys_iospace_enable, 167 (syshandler_t) sys_preempt_control, 161 168 162 169 /* Sysinfo syscalls */
Note:
See TracChangeset
for help on using the changeset viewer.