Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/ia32/include/atomic.h

    r340ba25c r9d58539  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
    3  * Copyright (c) 2012      Adam Hraska
    43 * All rights reserved.
    54 *
     
    114113}
    115114
    116 
    117115/** ia32 specific fast spinlock */
    118116NO_TRACE static inline void atomic_lock_arch(atomic_t *val)
     
    144142}
    145143
    146 
    147 #define _atomic_cas_impl(pptr, exp_val, new_val, old_val, prefix) \
    148 ({ \
    149         switch (sizeof(typeof(*(pptr)))) { \
    150         case 1: \
    151                 asm volatile ( \
    152                         prefix " cmpxchgb %[newval], %[ptr]\n" \
    153                         : /* Output operands. */ \
    154                         /* Old/current value is returned in eax. */ \
    155                         [oldval] "=a" (old_val), \
    156                         /* (*ptr) will be read and written to, hence "+" */ \
    157                         [ptr] "+m" (*pptr) \
    158                         : /* Input operands. */ \
    159                         /* Expected value must be in eax. */ \
    160                         [expval] "a" (exp_val), \
    161                         /* The new value may be in any register. */ \
    162                         [newval] "r" (new_val) \
    163                         : "memory" \
    164                 ); \
    165                 break; \
    166         case 2: \
    167                 asm volatile ( \
    168                         prefix " cmpxchgw %[newval], %[ptr]\n" \
    169                         : /* Output operands. */ \
    170                         /* Old/current value is returned in eax. */ \
    171                         [oldval] "=a" (old_val), \
    172                         /* (*ptr) will be read and written to, hence "+" */ \
    173                         [ptr] "+m" (*pptr) \
    174                         : /* Input operands. */ \
    175                         /* Expected value must be in eax. */ \
    176                         [expval] "a" (exp_val), \
    177                         /* The new value may be in any register. */ \
    178                         [newval] "r" (new_val) \
    179                         : "memory" \
    180                 ); \
    181                 break; \
    182         case 4: \
    183                 asm volatile ( \
    184                         prefix " cmpxchgl %[newval], %[ptr]\n" \
    185                         : /* Output operands. */ \
    186                         /* Old/current value is returned in eax. */ \
    187                         [oldval] "=a" (old_val), \
    188                         /* (*ptr) will be read and written to, hence "+" */ \
    189                         [ptr] "+m" (*pptr) \
    190                         : /* Input operands. */ \
    191                         /* Expected value must be in eax. */ \
    192                         [expval] "a" (exp_val), \
    193                         /* The new value may be in any register. */ \
    194                         [newval] "r" (new_val) \
    195                         : "memory" \
    196                 ); \
    197                 break; \
    198         } \
    199 })
    200 
    201 
    202 #ifndef local_atomic_cas
    203 
    204 #define local_atomic_cas(pptr, exp_val, new_val) \
    205 ({ \
    206         /* Use proper types and avoid name clashes */ \
    207         typeof(*(pptr)) _old_val_cas; \
    208         typeof(*(pptr)) _exp_val_cas = exp_val; \
    209         typeof(*(pptr)) _new_val_cas = new_val; \
    210         _atomic_cas_impl(pptr, _exp_val_cas, _new_val_cas, _old_val_cas, ""); \
    211         \
    212         _old_val_cas; \
    213 })
    214 
    215 #else
    216 /* Check if arch/atomic.h does not accidentally include /atomic.h .*/
    217 #error Architecture specific cpu local atomics already defined! Check your includes.
    218 #endif
    219 
    220 
    221 #ifndef local_atomic_exchange
    222 /*
    223  * Issuing a xchg instruction always implies lock prefix semantics.
    224  * Therefore, it is cheaper to use a cmpxchg without a lock prefix
    225  * in a loop.
    226  */
    227 #define local_atomic_exchange(pptr, new_val) \
    228 ({ \
    229         /* Use proper types and avoid name clashes */ \
    230         typeof(*(pptr)) _exp_val_x; \
    231         typeof(*(pptr)) _old_val_x; \
    232         typeof(*(pptr)) _new_val_x = new_val; \
    233         \
    234         do { \
    235                 _exp_val_x = *pptr; \
    236                 _old_val_x = local_atomic_cas(pptr, _exp_val_x, _new_val_x); \
    237         } while (_old_val_x != _exp_val_x); \
    238         \
    239         _old_val_x; \
    240 })
    241 
    242 #else
    243 /* Check if arch/atomic.h does not accidentally include /atomic.h .*/
    244 #error Architecture specific cpu local atomics already defined! Check your includes.
    245 #endif
    246 
    247 
    248144#endif
    249145
Note: See TracChangeset for help on using the changeset viewer.