Changeset 9f491d7 in mainline
- Timestamp:
- 2008-06-16T21:42:48Z (17 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 4a23cb6
- Parents:
- ad2e39b
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia32/include/atomic.h
rad2e39b r9f491d7 42 42 static inline void atomic_inc(atomic_t *val) { 43 43 #ifdef CONFIG_SMP 44 asm volatile ("lock incl %0\n" : " =m" (val->count));44 asm volatile ("lock incl %0\n" : "+m" (val->count)); 45 45 #else 46 asm volatile ("incl %0\n" : " =m" (val->count));46 asm volatile ("incl %0\n" : "+m" (val->count)); 47 47 #endif /* CONFIG_SMP */ 48 48 } … … 50 50 static inline void atomic_dec(atomic_t *val) { 51 51 #ifdef CONFIG_SMP 52 asm volatile ("lock decl %0\n" : " =m" (val->count));52 asm volatile ("lock decl %0\n" : "+m" (val->count)); 53 53 #else 54 asm volatile ("decl %0\n" : " =m" (val->count));54 asm volatile ("decl %0\n" : "+m" (val->count)); 55 55 #endif /* CONFIG_SMP */ 56 56 } … … 62 62 asm volatile ( 63 63 "lock xaddl %1, %0\n" 64 : " =m" (val->count), "+r" (r)64 : "+m" (val->count), "+r" (r) 65 65 ); 66 66 … … 74 74 asm volatile ( 75 75 "lock xaddl %1, %0\n" 76 : " =m" (val->count), "+r"(r)76 : "+m" (val->count), "+r"(r) 77 77 ); 78 78 … … 80 80 } 81 81 82 #define atomic_preinc(val) (atomic_postinc(val) +1)83 #define atomic_predec(val) (atomic_postdec(val) -1)82 #define atomic_preinc(val) (atomic_postinc(val) + 1) 83 #define atomic_predec(val) (atomic_postdec(val) - 1) 84 84 85 85 static inline uint32_t test_and_set(atomic_t *val) { … … 89 89 "movl $1, %0\n" 90 90 "xchgl %0, %1\n" 91 : "=r" (v)," =m" (val->count)91 : "=r" (v),"+m" (val->count) 92 92 ); 93 93 … … 102 102 preemption_disable(); 103 103 asm volatile ( 104 "0: ;"104 "0:\n" 105 105 #ifdef CONFIG_HT 106 "pause ;" /* Pentium 4's HT love this instruction */106 "pause\n" /* Pentium 4's HT love this instruction */ 107 107 #endif 108 "mov %0, %1 ;"109 "testl %1, %1 ;"110 "jnz 0b ;" /* Lightweight looping on locked spinlock */108 "mov %0, %1\n" 109 "testl %1, %1\n" 110 "jnz 0b\n" /* lightweight looping on locked spinlock */ 111 111 112 "incl %1 ;" /* now use the atomic operation */113 "xchgl %0, %1 ;"114 "testl %1, %1 ;"115 "jnz 0b ;"116 : " =m"(val->count),"=r"(tmp)117 112 "incl %1\n" /* now use the atomic operation */ 113 "xchgl %0, %1\n" 114 "testl %1, %1\n" 115 "jnz 0b\n" 116 : "+m" (val->count), "=r"(tmp) 117 ); 118 118 /* 119 119 * Prevent critical section code from bleeding out this way up. -
uspace/lib/libc/arch/ia32/include/atomic.h
rad2e39b r9f491d7 37 37 38 38 static inline void atomic_inc(atomic_t *val) { 39 asm volatile ("lock incl %0\n" : " =m" (val->count));39 asm volatile ("lock incl %0\n" : "+m" (val->count)); 40 40 } 41 41 42 42 static inline void atomic_dec(atomic_t *val) { 43 asm volatile ("lock decl %0\n" : " =m" (val->count));43 asm volatile ("lock decl %0\n" : "+m" (val->count)); 44 44 } 45 45 … … 51 51 "movl $1, %0\n" 52 52 "lock xaddl %0, %1\n" 53 : "=r" (r), " =m" (val->count)53 : "=r" (r), "+m" (val->count) 54 54 ); 55 55 … … 64 64 "movl $-1, %0\n" 65 65 "lock xaddl %0, %1\n" 66 : "=r" (r), " =m" (val->count)66 : "=r" (r), "+m" (val->count) 67 67 ); 68 68 … … 70 70 } 71 71 72 #define atomic_preinc(val) (atomic_postinc(val) +1)73 #define atomic_predec(val) (atomic_postdec(val) -1)72 #define atomic_preinc(val) (atomic_postinc(val) + 1) 73 #define atomic_predec(val) (atomic_postdec(val) - 1) 74 74 75 75 #endif
Note:
See TracChangeset
for help on using the changeset viewer.