Changeset 2708f6a in mainline for kernel/arch/ia32/include/atomic.h


Ignore:
Timestamp:
2012-11-07T10:59:34Z (12 years ago)
Author:
Adam Hraska <adam.hraska+hos@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
cc106e4
Parents:
c8fccf5
Message:

Removed ia32 and amd64 specific atomic compare-and-swap operations (use compiler builtins instead).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/ia32/include/atomic.h

    rc8fccf5 r2708f6a  
    143143}
    144144
    145 
    146 #define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
    147         asm volatile ( \
    148                 prefix " cmpxchgl %[newval], %[ptr]\n" \
    149                 : /* Output operands. */ \
    150                 /* Old/current value is returned in eax. */ \
    151                 [oldval] "=a" (old_val), \
    152                 /* (*ptr) will be read and written to, hence "+" */ \
    153                 [ptr] "+m" (*pptr) \
    154                 : /* Input operands. */ \
    155                 /* Expected value must be in eax. */ \
    156                 [expval] "a" (exp_val), \
    157                 /* The new value may be in any register. */ \
    158                 [newval] "r" (new_val) \
    159                 : "memory" \
    160         )
    161        
    162 /** Atomically compares and swaps the pointer at pptr. */
    163 NO_TRACE static inline void * atomic_cas_ptr(void **pptr,
    164         void *exp_val, void *new_val)
    165 {
    166         void *old_val;
    167         _atomic_cas_impl(pptr, exp_val, new_val, old_val, "lock\n");
    168         return old_val;
    169 }
    170 
    171 /** Compare-and-swap of a pointer that is atomic wrt to local cpu's interrupts.
    172  *
    173  * This function is NOT smp safe and is not atomic with respect to other cpus.
    174  */
    175 NO_TRACE static inline void * atomic_cas_ptr_local(void **pptr,
    176         void *exp_val, void *new_val)
    177 {
    178         void *old_val;
    179         _atomic_cas_impl(pptr, exp_val, new_val, old_val, "");
    180         return old_val;
    181 }
    182 
    183 
    184 #define _atomic_swap_impl(pptr, new_val) \
    185 ({ \
    186         typeof(*(pptr)) new_in_old_out = new_val; \
    187         asm volatile ( \
    188                 "xchgl %[val], %[p_ptr]\n" \
    189                 : [val] "+r" (new_in_old_out), \
    190                   [p_ptr] "+m" (*pptr) \
    191         ); \
    192         \
    193         new_in_old_out; \
    194 })
    195 
    196 /*
    197  * Issuing a xchg instruction always implies lock prefix semantics.
    198  * Therefore, it is cheaper to use a cmpxchg without a lock prefix
    199  * in a loop.
    200  */
    201 #define _atomic_swap_local_impl(pptr, new_val) \
    202 ({ \
    203         typeof(*(pptr)) exp_val; \
    204         typeof(*(pptr)) old_val; \
    205         \
    206         do { \
    207                 exp_val = *pptr; \
    208                 _atomic_cas_impl(pptr, exp_val, new_val, old_val, ""); \
    209         } while (old_val != exp_val); \
    210         \
    211         old_val; \
    212 })
    213 
    214 
    215 /** Atomicaly sets *ptr to val and returns the previous value. */
    216 NO_TRACE static inline void * atomic_set_return_ptr(void **pptr, void *val)
    217 {
    218         return _atomic_swap_impl(pptr, val);
    219 }
    220 
    221 /** Sets *ptr to new_val and returns the previous value. NOT smp safe.
    222  *
    223  * This function is only atomic wrt to local interrupts and it is
    224  * NOT atomic wrt to other cpus.
    225  */
    226 NO_TRACE static inline void * atomic_set_return_ptr_local(
    227         void **pptr, void *new_val)
    228 {
    229         return _atomic_swap_local_impl(pptr, new_val);
    230 }
    231 
    232 /** Atomicaly sets *ptr to val and returns the previous value. */
    233 NO_TRACE static inline native_t atomic_set_return_native_t(
    234         native_t *p, native_t val)
    235 {
    236         return _atomic_swap_impl(p, val);
    237 }
    238 
    239 /** Sets *ptr to new_val and returns the previous value. NOT smp safe.
    240  *
    241  * This function is only atomic wrt to local interrupts and it is
    242  * NOT atomic wrt to other cpus.
    243  */
    244 NO_TRACE static inline native_t atomic_set_return_native_t_local(
    245         native_t *p, native_t new_val)
    246 {
    247         return _atomic_swap_local_impl(p, new_val);
    248 }
    249 
    250 
    251 #undef _atomic_cas_ptr_impl
    252 #undef _atomic_swap_impl
    253 #undef _atomic_swap_local_impl
    254 
    255145#endif
    256146
Note: See TracChangeset for help on using the changeset viewer.