Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tile: rework <asm/cmpxchg.h>

The macrology in cmpxchg.h was designed to allow arbitrary pointer
and integer values to be passed through the routines. To support
cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we
used the idiom "(typeof(val))(typeof(val-val))". This way, in the
"size 8" branch of the switch, when the underlying cmpxchg routine
returns a 64-bit quantity, we cast it first to a typeof(val-val)
quantity (i.e. size_t if "val" is a pointer) with no warnings about
casting between pointers and integers of different sizes, then cast
onwards to typeof(val), again with no warnings. If val is not a
pointer type, the additional cast is a no-op. We can't replace the
typeof(val-val) cast with (for example) unsigned long, since then if
"val" is really a 64-bit type, we cast away the high bits.

HOWEVER, this fails with current gcc (through 4.7 at least) if "val"
is a pointer to an incomplete type. Unfortunately gcc isn't smart
enough to realize that "val - val" will always be a size_t type
even if it's an incomplete type pointer.

Accordingly, I've reworked the way we handle the casting. We have
given up the ability to use cmpxchg() on 64-bit values on tilepro,
which is OK in the kernel since we should use cmpxchg64() explicitly
on such values anyway. As a result, I can just use simple "unsigned
long" casts internally.

As I reworked it, I realized it would be cleaner to move the
architecture-specific conditionals for cmpxchg and xchg out of the
atomic.h headers and into cmpxchg, and then use the cmpxchg() and
xchg() primitives directly in atomic.h and elsewhere. This allowed
the cmpxchg.h header to stand on its own without relying on the
implicit include of it that is performed by <asm/atomic.h>.
It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines
from atomic_{32,64}.h into atomic.h.

I improved the tests that guard the allowed size of the arguments
to the routines to use a __compiletime_error() test. (By avoiding
the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as
well and use the macros there, which is otherwise impossible due
to include order dependency issues.)

The tilepro _atomic_xxx internal methods were previously set up to
take atomic_t and atomic64_t arguments, which isn't as convenient
with the new model, so I modified them to take int or u64 arguments,
which is consistent with how they used the arguments internally
anyway, so provided some nice simplification there too.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+156 -164
+52
arch/tile/include/asm/atomic.h
··· 114 114 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 115 115 116 116 /** 117 + * atomic_xchg - atomically exchange contents of memory with a new value 118 + * @v: pointer of type atomic_t 119 + * @i: integer value to store in memory 120 + * 121 + * Atomically sets @v to @i and returns old @v 122 + */ 123 + static inline int atomic_xchg(atomic_t *v, int n) 124 + { 125 + return xchg(&v->counter, n); 126 + } 127 + 128 + /** 129 + * atomic_cmpxchg - atomically exchange contents of memory if it matches 130 + * @v: pointer of type atomic_t 131 + * @o: old value that memory should have 132 + * @n: new value to write to memory if it matches 133 + * 134 + * Atomically checks if @v holds @o and replaces it with @n if so. 135 + * Returns the old value at @v. 136 + */ 137 + static inline int atomic_cmpxchg(atomic_t *v, int o, int n) 138 + { 139 + return cmpxchg(&v->counter, o, n); 140 + } 141 + 142 + /** 117 143 * atomic_add_negative - add and test if negative 118 144 * @v: pointer of type atomic_t 119 145 * @i: integer value to add ··· 158 132 #endif 159 133 160 134 #ifndef __ASSEMBLY__ 135 + 136 + /** 137 + * atomic64_xchg - atomically exchange contents of memory with a new value 138 + * @v: pointer of type atomic64_t 139 + * @i: integer value to store in memory 140 + * 141 + * Atomically sets @v to @i and returns old @v 142 + */ 143 + static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 144 + { 145 + return xchg64(&v->counter, n); 146 + } 147 + 148 + /** 149 + * atomic64_cmpxchg - atomically exchange contents of memory if it matches 150 + * @v: pointer of type atomic64_t 151 + * @o: old value that memory should have 152 + * @n: new value to write to memory if it matches 153 + * 154 + * Atomically checks if @v holds @o and replaces it with @n if so. 155 + * Returns the old value at @v. 156 + */ 157 + static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 158 + { 159 + return cmpxchg64(&v->counter, o, n); 160 + } 161 161 162 162 static inline long long atomic64_dec_if_positive(atomic64_t *v) 163 163 {
+9 -76
arch/tile/include/asm/atomic_32.h
··· 22 22 23 23 #ifndef __ASSEMBLY__ 24 24 25 - /* Tile-specific routines to support <linux/atomic.h>. */ 26 - int _atomic_xchg(atomic_t *v, int n); 27 - int _atomic_xchg_add(atomic_t *v, int i); 28 - int _atomic_xchg_add_unless(atomic_t *v, int a, int u); 29 - int _atomic_cmpxchg(atomic_t *v, int o, int n); 30 - 31 - /** 32 - * atomic_xchg - atomically exchange contents of memory with a new value 33 - * @v: pointer of type atomic_t 34 - * @i: integer value to store in memory 35 - * 36 - * Atomically sets @v to @i and returns old @v 37 - */ 38 - static inline int atomic_xchg(atomic_t *v, int n) 39 - { 40 - smp_mb(); /* barrier for proper semantics */ 41 - return _atomic_xchg(v, n); 42 - } 43 - 44 - /** 45 - * atomic_cmpxchg - atomically exchange contents of memory if it matches 46 - * @v: pointer of type atomic_t 47 - * @o: old value that memory should have 48 - * @n: new value to write to memory if it matches 49 - * 50 - * Atomically checks if @v holds @o and replaces it with @n if so. 51 - * Returns the old value at @v. 52 - */ 53 - static inline int atomic_cmpxchg(atomic_t *v, int o, int n) 54 - { 55 - smp_mb(); /* barrier for proper semantics */ 56 - return _atomic_cmpxchg(v, o, n); 57 - } 58 - 59 25 /** 60 26 * atomic_add - add integer to atomic variable 61 27 * @i: integer value to add ··· 31 65 */ 32 66 static inline void atomic_add(int i, atomic_t *v) 33 67 { 34 - _atomic_xchg_add(v, i); 68 + _atomic_xchg_add(&v->counter, i); 35 69 } 36 70 37 71 /** ··· 44 78 static inline int atomic_add_return(int i, atomic_t *v) 45 79 { 46 80 smp_mb(); /* barrier for proper semantics */ 47 - return _atomic_xchg_add(v, i) + i; 81 + return _atomic_xchg_add(&v->counter, i) + i; 48 82 } 49 83 50 84 /** ··· 59 93 static inline int __atomic_add_unless(atomic_t *v, int a, int u) 60 94 { 61 95 smp_mb(); /* barrier for proper semantics */ 62 - return _atomic_xchg_add_unless(v, a, u); 96 + return _atomic_xchg_add_unless(&v->counter, a, u); 63 97 } 64 98 65 99 /** ··· 74 108 */ 75 109 static inline void atomic_set(atomic_t *v, int n) 76 110 { 77 - _atomic_xchg(v, n); 111 + _atomic_xchg(&v->counter, n); 78 112 } 79 113 80 114 /* A 64bit atomic type */ ··· 84 118 } atomic64_t; 85 119 86 120 #define ATOMIC64_INIT(val) { (val) } 87 - 88 - u64 _atomic64_xchg(atomic64_t *v, u64 n); 89 - u64 _atomic64_xchg_add(atomic64_t *v, u64 i); 90 - u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); 91 - u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); 92 121 93 122 /** 94 123 * atomic64_read - read atomic variable ··· 98 137 * Casting away const is safe since the atomic support routines 99 138 * do not write to memory if the value has not been modified. 100 139 */ 101 - return _atomic64_xchg_add((atomic64_t *)v, 0); 102 - } 103 - 104 - /** 105 - * atomic64_xchg - atomically exchange contents of memory with a new value 106 - * @v: pointer of type atomic64_t 107 - * @i: integer value to store in memory 108 - * 109 - * Atomically sets @v to @i and returns old @v 110 - */ 111 - static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 112 - { 113 - smp_mb(); /* barrier for proper semantics */ 114 - return _atomic64_xchg(v, n); 115 - } 116 - 117 - /** 118 - * atomic64_cmpxchg - atomically exchange contents of memory if it matches 119 - * @v: pointer of type atomic64_t 120 - * @o: old value that memory should have 121 - * @n: new value to write to memory if it matches 122 - * 123 - * Atomically checks if @v holds @o and replaces it with @n if so. 124 - * Returns the old value at @v. 125 - */ 126 - static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 127 - { 128 - smp_mb(); /* barrier for proper semantics */ 129 - return _atomic64_cmpxchg(v, o, n); 140 + return _atomic64_xchg_add((u64 *)&v->counter, 0); 130 141 } 131 142 132 143 /** ··· 110 177 */ 111 178 static inline void atomic64_add(u64 i, atomic64_t *v) 112 179 { 113 - _atomic64_xchg_add(v, i); 180 + _atomic64_xchg_add(&v->counter, i); 114 181 } 115 182 116 183 /** ··· 123 190 static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 124 191 { 125 192 smp_mb(); /* barrier for proper semantics */ 126 - return _atomic64_xchg_add(v, i) + i; 193 + return _atomic64_xchg_add(&v->counter, i) + i; 127 194 } 128 195 129 196 /** ··· 138 205 static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 139 206 { 140 207 smp_mb(); /* barrier for proper semantics */ 141 - return _atomic64_xchg_add_unless(v, a, u) != u; 208 + return _atomic64_xchg_add_unless(&v->counter, a, u) != u; 142 209 } 143 210 144 211 /** ··· 153 220 */ 154 221 static inline void atomic64_set(atomic64_t *v, u64 n) 155 222 { 156 - _atomic64_xchg(v, n); 223 + _atomic64_xchg(&v->counter, n); 157 224 } 158 225 159 226 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+2 -40
arch/tile/include/asm/atomic_64.h
··· 32 32 * on any routine which updates memory and returns a value. 33 33 */ 34 34 35 - static inline int atomic_cmpxchg(atomic_t *v, int o, int n) 36 - { 37 - int val; 38 - __insn_mtspr(SPR_CMPEXCH_VALUE, o); 39 - smp_mb(); /* barrier for proper semantics */ 40 - val = __insn_cmpexch4((void *)&v->counter, n); 41 - smp_mb(); /* barrier for proper semantics */ 42 - return val; 43 - } 44 - 45 - static inline int atomic_xchg(atomic_t *v, int n) 46 - { 47 - int val; 48 - smp_mb(); /* barrier for proper semantics */ 49 - val = __insn_exch4((void *)&v->counter, n); 50 - smp_mb(); /* barrier for proper semantics */ 51 - return val; 52 - } 53 - 54 35 static inline void atomic_add(int i, atomic_t *v) 55 36 { 56 37 __insn_fetchadd4((void *)&v->counter, i); ··· 53 72 if (oldval == u) 54 73 break; 55 74 guess = oldval; 56 - oldval = atomic_cmpxchg(v, guess, guess + a); 75 + oldval = cmpxchg(&v->counter, guess, guess + a); 57 76 } while (guess != oldval); 58 77 return oldval; 59 78 } ··· 64 83 65 84 #define atomic64_read(v) ((v)->counter) 66 85 #define atomic64_set(v, i) ((v)->counter = (i)) 67 - 68 - static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n) 69 - { 70 - long val; 71 - smp_mb(); /* barrier for proper semantics */ 72 - __insn_mtspr(SPR_CMPEXCH_VALUE, o); 73 - val = __insn_cmpexch((void *)&v->counter, n); 74 - smp_mb(); /* barrier for proper semantics */ 75 - return val; 76 - } 77 - 78 - static inline long atomic64_xchg(atomic64_t *v, long n) 79 - { 80 - long val; 81 - smp_mb(); /* barrier for proper semantics */ 82 - val = __insn_exch((void *)&v->counter, n); 83 - smp_mb(); /* barrier for proper semantics */ 84 - return val; 85 - } 86 86 87 87 static inline void atomic64_add(long i, atomic64_t *v) 88 88 { ··· 86 124 if (oldval == u) 87 125 break; 88 126 guess = oldval; 89 - oldval = atomic64_cmpxchg(v, guess, guess + a); 127 + oldval = cmpxchg(&v->counter, guess, guess + a); 90 128 } while (guess != oldval); 91 129 return oldval != u; 92 130 }
+1 -1
arch/tile/include/asm/bitops_32.h
··· 16 16 #define _ASM_TILE_BITOPS_32_H 17 17 18 18 #include <linux/compiler.h> 19 - #include <linux/atomic.h> 19 + #include <asm/barrier.h> 20 20 21 21 /* Tile-specific routines to support <asm/bitops.h>. */ 22 22 unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
+3 -5
arch/tile/include/asm/bitops_64.h
··· 16 16 #define _ASM_TILE_BITOPS_64_H 17 17 18 18 #include <linux/compiler.h> 19 - #include <linux/atomic.h> 19 + #include <asm/cmpxchg.h> 20 20 21 21 /* See <asm/bitops.h> for API comments. */ 22 22 ··· 44 44 oldval = *addr; 45 45 do { 46 46 guess = oldval; 47 - oldval = atomic64_cmpxchg((atomic64_t *)addr, 48 - guess, guess ^ mask); 47 + oldval = cmpxchg(addr, guess, guess ^ mask); 49 48 } while (guess != oldval); 50 49 } 51 50 ··· 89 90 oldval = *addr; 90 91 do { 91 92 guess = oldval; 92 - oldval = atomic64_cmpxchg((atomic64_t *)addr, 93 - guess, guess ^ mask); 93 + oldval = cmpxchg(addr, guess, guess ^ mask); 94 94 } while (guess != oldval); 95 95 return (oldval & mask) != 0; 96 96 }
+73 -24
arch/tile/include/asm/cmpxchg.h
··· 20 20 21 21 #ifndef __ASSEMBLY__ 22 22 23 - /* Nonexistent functions intended to cause link errors. */ 24 - extern unsigned long __xchg_called_with_bad_pointer(void); 25 - extern unsigned long __cmpxchg_called_with_bad_pointer(void); 23 + #include <asm/barrier.h> 26 24 27 - #define xchg(ptr, x) \ 25 + /* Nonexistent functions intended to cause compile errors. */ 26 + extern void __xchg_called_with_bad_pointer(void) 27 + __compiletime_error("Bad argument size for xchg"); 28 + extern void __cmpxchg_called_with_bad_pointer(void) 29 + __compiletime_error("Bad argument size for cmpxchg"); 30 + 31 + #ifndef __tilegx__ 32 + 33 + /* Note the _atomic_xxx() routines include a final mb(). */ 34 + int _atomic_xchg(int *ptr, int n); 35 + int _atomic_xchg_add(int *v, int i); 36 + int _atomic_xchg_add_unless(int *v, int a, int u); 37 + int _atomic_cmpxchg(int *ptr, int o, int n); 38 + u64 _atomic64_xchg(u64 *v, u64 n); 39 + u64 _atomic64_xchg_add(u64 *v, u64 i); 40 + u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 41 + u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 42 + 43 + #define xchg(ptr, n) \ 44 + ({ \ 45 + if (sizeof(*(ptr)) != 4) \ 46 + __xchg_called_with_bad_pointer(); \ 47 + smp_mb(); \ 48 + (typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \ 49 + }) 50 + 51 + #define cmpxchg(ptr, o, n) \ 52 + ({ \ 53 + if (sizeof(*(ptr)) != 4) \ 54 + __cmpxchg_called_with_bad_pointer(); \ 55 + smp_mb(); \ 56 + (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 57 + }) 58 + 59 + #define xchg64(ptr, n) \ 60 + ({ \ 61 + if (sizeof(*(ptr)) != 8) \ 62 + __xchg_called_with_bad_pointer(); \ 63 + smp_mb(); \ 64 + (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 65 + }) 66 + 67 + #define cmpxchg64(ptr, o, n) \ 68 + ({ \ 69 + if (sizeof(*(ptr)) != 8) \ 70 + __cmpxchg_called_with_bad_pointer(); \ 71 + smp_mb(); \ 72 + (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 73 + }) 74 + 75 + #else 76 + 77 + #define xchg(ptr, n) \ 28 78 ({ \ 29 79 typeof(*(ptr)) __x; \ 80 + smp_mb(); \ 30 81 switch (sizeof(*(ptr))) { \ 31 82 case 4: \ 32 - __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ 33 - (atomic_t *)(ptr), \ 34 - (u32)(typeof((x)-(x)))(x)); \ 83 + __x = (typeof(__x))(unsigned long) \ 84 + __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 35 85 break; \ 36 86 case 8: \ 37 - __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ 38 - (atomic64_t *)(ptr), \ 39 - (u64)(typeof((x)-(x)))(x)); \ 87 + __x = (typeof(__x)) \ 88 + __insn_exch((ptr), (unsigned long)(n)); \ 40 89 break; \ 41 90 default: \ 42 91 __xchg_called_with_bad_pointer(); \ 92 + break; \ 43 93 } \ 94 + smp_mb(); \ 44 95 __x; \ 45 96 }) 46 97 47 98 #define cmpxchg(ptr, o, n) \ 48 99 ({ \ 49 100 typeof(*(ptr)) __x; \ 101 + __insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \ 102 + smp_mb(); \ 50 103 switch (sizeof(*(ptr))) { \ 51 104 case 4: \ 52 - __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ 53 - (atomic_t *)(ptr), \ 54 - (u32)(typeof((o)-(o)))(o), \ 55 - (u32)(typeof((n)-(n)))(n)); \ 105 + __x = (typeof(__x))(unsigned long) \ 106 + __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 56 107 break; \ 57 108 case 8: \ 58 - __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ 59 - (atomic64_t *)(ptr), \ 60 - (u64)(typeof((o)-(o)))(o), \ 61 - (u64)(typeof((n)-(n)))(n)); \ 109 + __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 62 110 break; \ 63 111 default: \ 64 112 __cmpxchg_called_with_bad_pointer(); \ 113 + break; \ 65 114 } \ 115 + smp_mb(); \ 66 116 __x; \ 67 117 }) 68 118 69 - #define tas(ptr) (xchg((ptr), 1)) 119 + #define xchg64 xchg 120 + #define cmpxchg64 cmpxchg 70 121 71 - #define cmpxchg64(ptr, o, n) \ 72 - ({ \ 73 - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 74 - cmpxchg((ptr), (o), (n)); \ 75 - }) 122 + #endif 123 + 124 + #define tas(ptr) xchg((ptr), 1) 76 125 77 126 #endif /* __ASSEMBLY__ */ 78 127
+16 -18
arch/tile/lib/atomic_32.c
··· 59 59 return __atomic_hashed_lock(v); 60 60 } 61 61 62 - int _atomic_xchg(atomic_t *v, int n) 62 + int _atomic_xchg(int *v, int n) 63 63 { 64 - return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; 64 + return __atomic_xchg(v, __atomic_setup(v), n).val; 65 65 } 66 66 EXPORT_SYMBOL(_atomic_xchg); 67 67 68 - int _atomic_xchg_add(atomic_t *v, int i) 68 + int _atomic_xchg_add(int *v, int i) 69 69 { 70 - return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; 70 + return __atomic_xchg_add(v, __atomic_setup(v), i).val; 71 71 } 72 72 EXPORT_SYMBOL(_atomic_xchg_add); 73 73 74 - int _atomic_xchg_add_unless(atomic_t *v, int a, int u) 74 + int _atomic_xchg_add_unless(int *v, int a, int u) 75 75 { 76 76 /* 77 77 * Note: argument order is switched here since it is easier 78 78 * to use the first argument consistently as the "old value" 79 79 * in the assembly, as is done for _atomic_cmpxchg(). 80 80 */ 81 - return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) 82 - .val; 81 + return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val; 83 82 } 84 83 EXPORT_SYMBOL(_atomic_xchg_add_unless); 85 84 86 - int _atomic_cmpxchg(atomic_t *v, int o, int n) 85 + int _atomic_cmpxchg(int *v, int o, int n) 87 86 { 88 - return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; 87 + return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val; 89 88 } 90 89 EXPORT_SYMBOL(_atomic_cmpxchg); 91 90 ··· 107 108 EXPORT_SYMBOL(_atomic_xor); 108 109 109 110 110 - u64 _atomic64_xchg(atomic64_t *v, u64 n) 111 + u64 _atomic64_xchg(u64 *v, u64 n) 111 112 { 112 - return __atomic64_xchg(&v->counter, __atomic_setup(v), n); 113 + return __atomic64_xchg(v, __atomic_setup(v), n); 113 114 } 114 115 EXPORT_SYMBOL(_atomic64_xchg); 115 116 116 - u64 _atomic64_xchg_add(atomic64_t *v, u64 i) 117 + u64 _atomic64_xchg_add(u64 *v, u64 i) 117 118 { 118 - return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); 119 + return __atomic64_xchg_add(v, __atomic_setup(v), i); 119 120 } 120 121 EXPORT_SYMBOL(_atomic64_xchg_add); 121 122 122 - u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) 123 + u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 123 124 { 124 125 /* 125 126 * Note: argument order is switched here since it is easier 126 127 * to use the first argument consistently as the "old value" 127 128 * in the assembly, as is done for _atomic_cmpxchg(). 128 129 */ 129 - return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), 130 - u, a); 130 + return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a); 131 131 } 132 132 EXPORT_SYMBOL(_atomic64_xchg_add_unless); 133 133 134 - u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 134 + u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 135 135 { 136 - return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); 136 + return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 137 137 } 138 138 EXPORT_SYMBOL(_atomic64_cmpxchg); 139 139