Changeset b17518e in mainline
- Timestamp:
- 2012-08-05T01:18:21Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- bc216a0
- Parents:
- f1c7755
- Location:
- kernel
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/atomic.h
rf1c7755 rb17518e 141 141 142 142 143 #define _atomic_cas_ ptr_impl(pptr, exp_val, new_val, old_val, prefix) \143 #define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \ 144 144 asm volatile ( \ 145 145 prefix " cmpxchgq %[newval], %[ptr]\n" \ … … 162 162 { 163 163 void *old_val; 164 _atomic_cas_ ptr_impl(pptr, exp_val, new_val, old_val, "lock\n");164 _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n"); 165 165 return old_val; 166 166 } … … 174 174 { 175 175 void *old_val; 176 _atomic_cas_ ptr_impl(pptr, exp_val, new_val, old_val, "");176 _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); 177 177 return old_val; 178 178 } 179 179 180 /** Atomicaly sets *ptr to new_val and returns the previous value. */ 181 NO_TRACE static inline void * atomic_swap_ptr(void **pptr, void *new_val) 182 { 183 void *new_in_old_out = new_val; 184 185 asm volatile ( 186 "xchgq %[val], %[pptr]\n" 187 : [val] "+r" (new_in_old_out), 188 [pptr] "+m" (*pptr) 189 ); 190 191 return new_in_old_out; 180 181 #define _atomic_swap_impl(pptr, new_val) \ 182 ({ \ 183 typeof(*(pptr)) new_in_old_out = new_val; \ 184 asm volatile ( \ 185 "xchgq %[val], %[p_ptr]\n" \ 186 : [val] "+r" (new_in_old_out), \ 187 [p_ptr] "+m" (*pptr) \ 188 ); \ 189 \ 190 new_in_old_out; \ 191 }) 192 193 /* 194 * Issuing a xchg instruction always implies lock prefix semantics. 195 * Therefore, it is cheaper to use a cmpxchg without a lock prefix 196 * in a loop. 197 */ 198 #define _atomic_swap_local_impl(pptr, new_val) \ 199 ({ \ 200 typeof(*(pptr)) exp_val; \ 201 typeof(*(pptr)) old_val; \ 202 \ 203 do { \ 204 exp_val = *pptr; \ 205 _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \ 206 } while (old_val != exp_val); \ 207 \ 208 old_val; \ 209 }) 210 211 212 /** Atomicaly sets *ptr to val and returns the previous value. */ 213 NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val) 214 { 215 return _atomic_swap_impl(pptr, val); 192 216 } 193 217 … … 197 221 * NOT atomic wrt to other cpus. 198 222 */ 199 NO_TRACE static inline void * atomic_swap_ptr_local(void **pptr, void *new_val) 200 { 201 /* 202 * Issuing a xchg instruction always implies lock prefix semantics. 203 * Therefore, it is cheaper to use a cmpxchg without a lock prefix 204 * in a loop. 205 */ 206 void *exp_val; 207 void *old_val; 208 209 do { 210 exp_val = *pptr; 211 old_val = atomic_cas_ptr_local(pptr, exp_val, new_val); 212 } while (old_val != exp_val); 213 214 return old_val; 223 NO_TRACE static inline void * atomic_set_return_ptr_local( 224 void **pptr, void *new_val) 225 { 226 return _atomic_swap_local_impl(pptr, new_val); 227 } 228 229 /** Atomicaly sets *ptr to val and returns the previous value. */ 230 NO_TRACE static inline native_t atomic_set_return_native_t( 231 native_t *p, native_t val) 232 { 233 return _atomic_swap_impl(p, val); 234 } 235 236 /** Sets *ptr to new_val and returns the previous value. NOT smp safe. 237 * 238 * This function is only atomic wrt to local interrupts and it is 239 * NOT atomic wrt to other cpus. 240 */ 241 NO_TRACE static inline native_t atomic_set_return_native_t_local( 242 native_t *p, native_t new_val) 243 { 244 return _atomic_swap_local_impl(p, new_val); 215 245 } 216 246 217 247 218 248 #undef _atomic_cas_ptr_impl 219 249 #undef _atomic_swap_impl 250 #undef _atomic_swap_local_impl 220 251 221 252 #endif -
kernel/arch/ia32/include/atomic.h
rf1c7755 rb17518e 144 144 145 145 146 #define _atomic_cas_ ptr_impl(pptr, exp_val, new_val, old_val, prefix) \146 #define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \ 147 147 asm volatile ( \ 148 148 prefix " cmpxchgl %[newval], %[ptr]\n" \ … … 165 165 { 166 166 void *old_val; 167 _atomic_cas_ ptr_impl(pptr, exp_val, new_val, old_val, "lock\n");167 _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n"); 168 168 return old_val; 169 169 } … … 177 177 { 178 178 void *old_val; 179 _atomic_cas_ ptr_impl(pptr, exp_val, new_val, old_val, "");179 _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); 180 180 return old_val; 181 181 } 182 182 183 183 184 /** Atomicaly sets *ptr to new_val and returns the previous value. */ 185 NO_TRACE static inline void * atomic_swap_ptr(void **pptr, void *new_val) 186 { 187 void *new_in_old_out = new_val; 188 189 asm volatile ( 190 "xchgl %[val], %[pptr]\n" 191 : [val] "+r" (new_in_old_out), 192 [pptr] "+m" (*pptr) 193 ); 194 195 return new_in_old_out; 184 #define _atomic_swap_impl(pptr, new_val) \ 185 ({ \ 186 typeof(*(pptr)) new_in_old_out = new_val; \ 187 asm volatile ( \ 188 "xchgl %[val], %[p_ptr]\n" \ 189 : [val] "+r" (new_in_old_out), \ 190 [p_ptr] "+m" (*pptr) \ 191 ); \ 192 \ 193 new_in_old_out; \ 194 }) 195 196 /* 197 * Issuing a xchg instruction always implies lock prefix semantics. 198 * Therefore, it is cheaper to use a cmpxchg without a lock prefix 199 * in a loop. 200 */ 201 #define _atomic_swap_local_impl(pptr, new_val) \ 202 ({ \ 203 typeof(*(pptr)) exp_val; \ 204 typeof(*(pptr)) old_val; \ 205 \ 206 do { \ 207 exp_val = *pptr; \ 208 _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \ 209 } while (old_val != exp_val); \ 210 \ 211 old_val; \ 212 }) 213 214 215 /** Atomicaly sets *ptr to val and returns the previous value. */ 216 NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val) 217 { 218 return _atomic_swap_impl(pptr, val); 196 219 } 197 220 … … 201 224 * NOT atomic wrt to other cpus. 202 225 */ 203 NO_TRACE static inline void * atomic_swap_ptr_local(void **pptr, void *new_val) 204 { 205 /* 206 * Issuing a xchg instruction always implies lock prefix semantics. 207 * Therefore, it is cheaper to use a cmpxchg without a lock prefix 208 * in a loop. 209 */ 210 void *exp_val; 211 void *old_val; 212 213 do { 214 exp_val = *pptr; 215 old_val = atomic_cas_ptr_local(pptr, exp_val, new_val); 216 } while (old_val != exp_val); 217 218 return old_val; 219 } 226 NO_TRACE static inline void * atomic_set_return_ptr_local( 227 void **pptr, void *new_val) 228 { 229 return _atomic_swap_local_impl(pptr, new_val); 230 } 231 232 /** Atomicaly sets *ptr to val and returns the previous value. */ 233 NO_TRACE static inline native_t atomic_set_return_native_t( 234 native_t *p, native_t val) 235 { 236 return _atomic_swap_impl(p, val); 237 } 238 239 /** Sets *ptr to new_val and returns the previous value. NOT smp safe. 240 * 241 * This function is only atomic wrt to local interrupts and it is 242 * NOT atomic wrt to other cpus. 243 */ 244 NO_TRACE static inline native_t atomic_set_return_native_t_local( 245 native_t *p, native_t new_val) 246 { 247 return _atomic_swap_local_impl(p, new_val); 248 } 249 220 250 221 251 #undef _atomic_cas_ptr_impl 252 #undef _atomic_swap_impl 253 #undef _atomic_swap_local_impl 222 254 223 255 #endif -
kernel/test/atomic/atomic1.c
rf1c7755 rb17518e 72 72 73 73 ptr = 0; 74 if (atomic_s wap_ptr(&ptr, a_ptr) != 0)75 return "Failed atomic_s wap_ptr()";76 if (atomic_s wap_ptr_local(&ptr, 0) != a_ptr || ptr != 0)77 return "Failed atomic_s wap_ptr_local()";74 if (atomic_set_return_ptr(&ptr, a_ptr) != 0) 75 return "Failed atomic_set_return_ptr()"; 76 if (atomic_set_return_ptr_local(&ptr, 0) != a_ptr || ptr != 0) 77 return "Failed atomic_set_return_ptr_local()"; 78 78 79 79 return NULL;
Note:
See TracChangeset
for help on using the changeset viewer.