Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic: make atomic*_{cmp,}xchg optional

Most architectures define the atomic/atomic64 xchg and cmpxchg
operations in terms of arch_xchg and arch_cmpxchg respectfully.

Add fallbacks for these cases and remove the trivial cases from arch
code. On some architectures the existing definitions are kept as these
are used to build other arch_atomic*() operations.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-5-mark.rutland@arm.com

authored by

Mark Rutland and committed by
Peter Zijlstra
d12157ef a7bafa79

+179 -265
-10
arch/alpha/include/asm/atomic.h
··· 200 200 #undef ATOMIC_OP_RETURN 201 201 #undef ATOMIC_OP 202 202 203 - #define arch_atomic64_cmpxchg(v, old, new) \ 204 - (arch_cmpxchg(&((v)->counter), old, new)) 205 - #define arch_atomic64_xchg(v, new) \ 206 - (arch_xchg(&((v)->counter), new)) 207 - 208 - #define arch_atomic_cmpxchg(v, old, new) \ 209 - (arch_cmpxchg(&((v)->counter), old, new)) 210 - #define arch_atomic_xchg(v, new) \ 211 - (arch_xchg(&((v)->counter), new)) 212 - 213 203 /** 214 204 * arch_atomic_fetch_add_unless - add unless the number is a given value 215 205 * @v: pointer of type atomic_t
-24
arch/arc/include/asm/atomic.h
··· 22 22 #include <asm/atomic-spinlock.h> 23 23 #endif 24 24 25 - #define arch_atomic_cmpxchg(v, o, n) \ 26 - ({ \ 27 - arch_cmpxchg(&((v)->counter), (o), (n)); \ 28 - }) 29 - 30 - #ifdef arch_cmpxchg_relaxed 31 - #define arch_atomic_cmpxchg_relaxed(v, o, n) \ 32 - ({ \ 33 - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \ 34 - }) 35 - #endif 36 - 37 - #define arch_atomic_xchg(v, n) \ 38 - ({ \ 39 - arch_xchg(&((v)->counter), (n)); \ 40 - }) 41 - 42 - #ifdef arch_xchg_relaxed 43 - #define arch_atomic_xchg_relaxed(v, n) \ 44 - ({ \ 45 - arch_xchg_relaxed(&((v)->counter), (n)); \ 46 - }) 47 - #endif 48 - 49 25 /* 50 26 * 64-bit atomics 51 27 */
+2
arch/arc/include/asm/atomic64-arcv2.h
··· 159 159 160 160 return prev; 161 161 } 162 + #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg 162 163 163 164 static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new) 164 165 { ··· 180 179 181 180 return prev; 182 181 } 182 + #define arch_atomic64_xchg arch_atomic64_xchg 183 183 184 184 /** 185 185 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
+1 -2
arch/arm/include/asm/atomic.h
··· 210 210 211 211 return ret; 212 212 } 213 + #define arch_atomic_cmpxchg arch_atomic_cmpxchg 213 214 214 215 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot 215 216 ··· 240 239 #undef ATOMIC_FETCH_OP 241 240 #undef ATOMIC_OP_RETURN 242 241 #undef ATOMIC_OP 243 - 244 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 245 242 246 243 #ifndef CONFIG_GENERIC_ATOMIC64 247 244 typedef struct {
-28
arch/arm64/include/asm/atomic.h
··· 142 142 #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release 143 143 #define arch_atomic_fetch_xor arch_atomic_fetch_xor 144 144 145 - #define arch_atomic_xchg_relaxed(v, new) \ 146 - arch_xchg_relaxed(&((v)->counter), (new)) 147 - #define arch_atomic_xchg_acquire(v, new) \ 148 - arch_xchg_acquire(&((v)->counter), (new)) 149 - #define arch_atomic_xchg_release(v, new) \ 150 - arch_xchg_release(&((v)->counter), (new)) 151 - #define arch_atomic_xchg(v, new) \ 152 - arch_xchg(&((v)->counter), (new)) 153 - 154 - #define arch_atomic_cmpxchg_relaxed(v, old, new) \ 155 - arch_cmpxchg_relaxed(&((v)->counter), (old), (new)) 156 - #define arch_atomic_cmpxchg_acquire(v, old, new) \ 157 - arch_cmpxchg_acquire(&((v)->counter), (old), (new)) 158 - #define arch_atomic_cmpxchg_release(v, old, new) \ 159 - arch_cmpxchg_release(&((v)->counter), (old), (new)) 160 - #define arch_atomic_cmpxchg(v, old, new) \ 161 - arch_cmpxchg(&((v)->counter), (old), (new)) 162 - 163 145 #define arch_atomic_andnot arch_atomic_andnot 164 146 165 147 /* ··· 190 208 #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire 191 209 #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release 192 210 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor 193 - 194 - #define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed 195 - #define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire 196 - #define arch_atomic64_xchg_release arch_atomic_xchg_release 197 - #define arch_atomic64_xchg arch_atomic_xchg 198 - 199 - #define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed 200 - #define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire 201 - #define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release 202 - #define arch_atomic64_cmpxchg arch_atomic_cmpxchg 203 211 204 212 #define arch_atomic64_andnot arch_atomic64_andnot 205 213
-35
arch/csky/include/asm/atomic.h
··· 195 195 } 196 196 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive 197 197 198 - #define ATOMIC_OP() \ 199 - static __always_inline \ 200 - int arch_atomic_xchg_relaxed(atomic_t *v, int n) \ 201 - { \ 202 - return __xchg_relaxed(n, &(v->counter), 4); \ 203 - } \ 204 - static __always_inline \ 205 - int arch_atomic_cmpxchg_relaxed(atomic_t *v, int o, int n) \ 206 - { \ 207 - return __cmpxchg_relaxed(&(v->counter), o, n, 4); \ 208 - } \ 209 - static __always_inline \ 210 - int arch_atomic_cmpxchg_acquire(atomic_t *v, int o, int n) \ 211 - { \ 212 - return __cmpxchg_acquire(&(v->counter), o, n, 4); \ 213 - } \ 214 - static __always_inline \ 215 - int arch_atomic_cmpxchg(atomic_t *v, int o, int n) \ 216 - { \ 217 - return __cmpxchg(&(v->counter), o, n, 4); \ 218 - } 219 - 220 - #define ATOMIC_OPS() \ 221 - ATOMIC_OP() 222 - 223 - ATOMIC_OPS() 224 - 225 - #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed 226 - #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed 227 - #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire 228 - #define arch_atomic_cmpxchg arch_atomic_cmpxchg 229 - 230 - #undef ATOMIC_OPS 231 - #undef ATOMIC_OP 232 - 233 198 #else 234 199 #include <asm-generic/atomic.h> 235 200 #endif
-6
arch/hexagon/include/asm/atomic.h
··· 36 36 */ 37 37 #define arch_atomic_read(v) READ_ONCE((v)->counter) 38 38 39 - #define arch_atomic_xchg(v, new) \ 40 - (arch_xchg(&((v)->counter), (new))) 41 - 42 - #define arch_atomic_cmpxchg(v, old, new) \ 43 - (arch_cmpxchg(&((v)->counter), (old), (new))) 44 - 45 39 #define ATOMIC_OP(op) \ 46 40 static inline void arch_atomic_##op(int i, atomic_t *v) \ 47 41 { \
-7
arch/ia64/include/asm/atomic.h
··· 207 207 #undef ATOMIC64_FETCH_OP 208 208 #undef ATOMIC64_OP 209 209 210 - #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) 211 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 212 - 213 - #define arch_atomic64_cmpxchg(v, old, new) \ 214 - (arch_cmpxchg(&((v)->counter), old, new)) 215 - #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) 216 - 217 210 #define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v)) 218 211 #define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v)) 219 212
-7
arch/loongarch/include/asm/atomic.h
··· 181 181 return result; 182 182 } 183 183 184 - #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) 185 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new))) 186 - 187 184 /* 188 185 * arch_atomic_dec_if_positive - decrement by 1 if old value positive 189 186 * @v: pointer of type atomic_t ··· 338 341 339 342 return result; 340 343 } 341 - 342 - #define arch_atomic64_cmpxchg(v, o, n) \ 343 - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) 344 - #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new))) 345 344 346 345 /* 347 346 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
+3 -6
arch/m68k/include/asm/atomic.h
··· 158 158 } 159 159 #define arch_atomic_inc_and_test arch_atomic_inc_and_test 160 160 161 - #ifdef CONFIG_RMW_INSNS 162 - 163 - #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n))) 164 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 165 - 166 - #else /* !CONFIG_RMW_INSNS */ 161 + #ifndef CONFIG_RMW_INSNS 167 162 168 163 static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) 169 164 { ··· 172 177 local_irq_restore(flags); 173 178 return prev; 174 179 } 180 + #define arch_atomic_cmpxchg arch_atomic_cmpxchg 175 181 176 182 static inline int arch_atomic_xchg(atomic_t *v, int new) 177 183 { ··· 185 189 local_irq_restore(flags); 186 190 return prev; 187 191 } 192 + #define arch_atomic_xchg arch_atomic_xchg 188 193 189 194 #endif /* !CONFIG_RMW_INSNS */ 190 195
-11
arch/mips/include/asm/atomic.h
··· 33 33 { \ 34 34 WRITE_ONCE(v->counter, i); \ 35 35 } \ 36 - \ 37 - static __always_inline type \ 38 - arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \ 39 - { \ 40 - return arch_cmpxchg(&v->counter, o, n); \ 41 - } \ 42 - \ 43 - static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \ 44 - { \ 45 - return arch_xchg(&v->counter, n); \ 46 - } 47 36 48 37 ATOMIC_OPS(atomic, int) 49 38
-3
arch/openrisc/include/asm/atomic.h
··· 130 130 131 131 #include <asm/cmpxchg.h> 132 132 133 - #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v))) 134 - #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new))) 135 - 136 133 #endif /* __ASM_OPENRISC_ATOMIC_H */
-9
arch/parisc/include/asm/atomic.h
··· 73 73 return READ_ONCE((v)->counter); 74 74 } 75 75 76 - /* exported interface */ 77 - #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) 78 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 79 - 80 76 #define ATOMIC_OP(op, c_op) \ 81 77 static __inline__ void arch_atomic_##op(int i, atomic_t *v) \ 82 78 { \ ··· 213 217 { 214 218 return READ_ONCE((v)->counter); 215 219 } 216 - 217 - /* exported interface */ 218 - #define arch_atomic64_cmpxchg(v, o, n) \ 219 - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) 220 - #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) 221 220 222 221 #endif /* !CONFIG_64BIT */ 223 222
-24
arch/powerpc/include/asm/atomic.h
··· 126 126 #undef ATOMIC_OP_RETURN_RELAXED 127 127 #undef ATOMIC_OP 128 128 129 - #define arch_atomic_cmpxchg(v, o, n) \ 130 - (arch_cmpxchg(&((v)->counter), (o), (n))) 131 - #define arch_atomic_cmpxchg_relaxed(v, o, n) \ 132 - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)) 133 - #define arch_atomic_cmpxchg_acquire(v, o, n) \ 134 - arch_cmpxchg_acquire(&((v)->counter), (o), (n)) 135 - 136 - #define arch_atomic_xchg(v, new) \ 137 - (arch_xchg(&((v)->counter), new)) 138 - #define arch_atomic_xchg_relaxed(v, new) \ 139 - arch_xchg_relaxed(&((v)->counter), (new)) 140 - 141 129 /** 142 130 * atomic_fetch_add_unless - add unless the number is a given value 143 131 * @v: pointer of type atomic_t ··· 383 395 return t; 384 396 } 385 397 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 386 - 387 - #define arch_atomic64_cmpxchg(v, o, n) \ 388 - (arch_cmpxchg(&((v)->counter), (o), (n))) 389 - #define arch_atomic64_cmpxchg_relaxed(v, o, n) \ 390 - arch_cmpxchg_relaxed(&((v)->counter), (o), (n)) 391 - #define arch_atomic64_cmpxchg_acquire(v, o, n) \ 392 - arch_cmpxchg_acquire(&((v)->counter), (o), (n)) 393 - 394 - #define arch_atomic64_xchg(v, new) \ 395 - (arch_xchg(&((v)->counter), new)) 396 - #define arch_atomic64_xchg_relaxed(v, new) \ 397 - arch_xchg_relaxed(&((v)->counter), (new)) 398 398 399 399 /** 400 400 * atomic64_fetch_add_unless - add unless the number is a given value
-72
arch/riscv/include/asm/atomic.h
··· 238 238 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless 239 239 #endif 240 240 241 - /* 242 - * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as 243 - * {cmp,}xchg and the operations that return, so they need a full barrier. 244 - */ 245 - #define ATOMIC_OP(c_t, prefix, size) \ 246 - static __always_inline \ 247 - c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ 248 - { \ 249 - return __xchg_relaxed(&(v->counter), n, size); \ 250 - } \ 251 - static __always_inline \ 252 - c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ 253 - { \ 254 - return __xchg_acquire(&(v->counter), n, size); \ 255 - } \ 256 - static __always_inline \ 257 - c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ 258 - { \ 259 - return __xchg_release(&(v->counter), n, size); \ 260 - } \ 261 - static __always_inline \ 262 - c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ 263 - { \ 264 - return __arch_xchg(&(v->counter), n, size); \ 265 - } \ 266 - static __always_inline \ 267 - c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ 268 - c_t o, c_t n) \ 269 - { \ 270 - return __cmpxchg_relaxed(&(v->counter), o, n, size); \ 271 - } \ 272 - static __always_inline \ 273 - c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ 274 - c_t o, c_t n) \ 275 - { \ 276 - return __cmpxchg_acquire(&(v->counter), o, n, size); \ 277 - } \ 278 - static __always_inline \ 279 - c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ 280 - c_t o, c_t n) \ 281 - { \ 282 - return __cmpxchg_release(&(v->counter), o, n, size); \ 283 - } \ 284 - static __always_inline \ 285 - c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ 286 - { \ 287 - return __cmpxchg(&(v->counter), o, n, size); \ 288 - } 289 - 290 - #ifdef CONFIG_GENERIC_ATOMIC64 291 - #define ATOMIC_OPS() \ 292 - ATOMIC_OP(int, , 4) 293 - #else 294 - #define ATOMIC_OPS() \ 295 - ATOMIC_OP(int, , 4) \ 296 - ATOMIC_OP(s64, 64, 8) 297 - #endif 298 - 299 - ATOMIC_OPS() 300 - 301 - #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed 302 - #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire 303 - #define arch_atomic_xchg_release arch_atomic_xchg_release 304 - #define arch_atomic_xchg arch_atomic_xchg 305 - #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed 306 - #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire 307 - #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release 308 - #define arch_atomic_cmpxchg arch_atomic_cmpxchg 309 - 310 - #undef ATOMIC_OPS 311 - #undef ATOMIC_OP 312 - 313 241 static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) 314 242 { 315 243 int prev, rc;
-3
arch/sh/include/asm/atomic.h
··· 30 30 #include <asm/atomic-irq.h> 31 31 #endif 32 32 33 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 34 - #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) 35 - 36 33 #endif /* CONFIG_CPU_J2 */ 37 34 38 35 #endif /* __ASM_SH_ATOMIC_H */
+2
arch/sparc/include/asm/atomic_32.h
··· 24 24 int arch_atomic_fetch_or(int, atomic_t *); 25 25 int arch_atomic_fetch_xor(int, atomic_t *); 26 26 int arch_atomic_cmpxchg(atomic_t *, int, int); 27 + #define arch_atomic_cmpxchg arch_atomic_cmpxchg 27 28 int arch_atomic_xchg(atomic_t *, int); 29 + #define arch_atomic_xchg arch_atomic_xchg 28 30 int arch_atomic_fetch_add_unless(atomic_t *, int, int); 29 31 void arch_atomic_set(atomic_t *, int); 30 32
-11
arch/sparc/include/asm/atomic_64.h
··· 49 49 #undef ATOMIC_OP_RETURN 50 50 #undef ATOMIC_OP 51 51 52 - #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n))) 53 - 54 - static inline int arch_atomic_xchg(atomic_t *v, int new) 55 - { 56 - return arch_xchg(&v->counter, new); 57 - } 58 - 59 - #define arch_atomic64_cmpxchg(v, o, n) \ 60 - ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n))) 61 - #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) 62 - 63 52 s64 arch_atomic64_dec_if_positive(atomic64_t *v); 64 53 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 65 54
-3
arch/xtensa/include/asm/atomic.h
··· 257 257 #undef ATOMIC_OP_RETURN 258 258 #undef ATOMIC_OP 259 259 260 - #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n))) 261 - #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) 262 - 263 260 #endif /* _XTENSA_ATOMIC_H */
-3
include/asm-generic/atomic.h
··· 130 130 #define arch_atomic_read(v) READ_ONCE((v)->counter) 131 131 #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) 132 132 133 - #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (u32)(v))) 134 - #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (u32)(old), (u32)(new))) 135 - 136 133 #endif /* __ASM_GENERIC_ATOMIC_H */
+157 -1
include/linux/atomic/atomic-arch-fallback.h
··· 1091 1091 #endif /* arch_atomic_fetch_xor_relaxed */ 1092 1092 1093 1093 #ifndef arch_atomic_xchg_relaxed 1094 + #ifdef arch_atomic_xchg 1094 1095 #define arch_atomic_xchg_acquire arch_atomic_xchg 1095 1096 #define arch_atomic_xchg_release arch_atomic_xchg 1096 1097 #define arch_atomic_xchg_relaxed arch_atomic_xchg 1098 + #endif /* arch_atomic_xchg */ 1099 + 1100 + #ifndef arch_atomic_xchg 1101 + static __always_inline int 1102 + arch_atomic_xchg(atomic_t *v, int new) 1103 + { 1104 + return arch_xchg(&v->counter, new); 1105 + } 1106 + #define arch_atomic_xchg arch_atomic_xchg 1107 + #endif 1108 + 1109 + #ifndef arch_atomic_xchg_acquire 1110 + static __always_inline int 1111 + arch_atomic_xchg_acquire(atomic_t *v, int new) 1112 + { 1113 + return arch_xchg_acquire(&v->counter, new); 1114 + } 1115 + #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire 1116 + #endif 1117 + 1118 + #ifndef arch_atomic_xchg_release 1119 + static __always_inline int 1120 + arch_atomic_xchg_release(atomic_t *v, int new) 1121 + { 1122 + return arch_xchg_release(&v->counter, new); 1123 + } 1124 + #define arch_atomic_xchg_release arch_atomic_xchg_release 1125 + #endif 1126 + 1127 + #ifndef arch_atomic_xchg_relaxed 1128 + static __always_inline int 1129 + arch_atomic_xchg_relaxed(atomic_t *v, int new) 1130 + { 1131 + return arch_xchg_relaxed(&v->counter, new); 1132 + } 1133 + #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed 1134 + #endif 1135 + 1097 1136 #else /* arch_atomic_xchg_relaxed */ 1098 1137 1099 1138 #ifndef arch_atomic_xchg_acquire ··· 1172 1133 #endif /* arch_atomic_xchg_relaxed */ 1173 1134 1174 1135 #ifndef arch_atomic_cmpxchg_relaxed 1136 + #ifdef arch_atomic_cmpxchg 1175 1137 #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg 1176 1138 #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg 1177 1139 #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg 1140 + #endif /* arch_atomic_cmpxchg */ 1141 + 1142 + #ifndef arch_atomic_cmpxchg 1143 + static __always_inline int 1144 + arch_atomic_cmpxchg(atomic_t *v, int old, int new) 1145 + { 1146 + return arch_cmpxchg(&v->counter, old, new); 1147 + } 1148 + #define arch_atomic_cmpxchg arch_atomic_cmpxchg 1149 + #endif 1150 + 1151 + #ifndef arch_atomic_cmpxchg_acquire 1152 + static __always_inline int 1153 + arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) 1154 + { 1155 + return arch_cmpxchg_acquire(&v->counter, old, new); 1156 + } 1157 + #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire 1158 + #endif 1159 + 1160 + #ifndef arch_atomic_cmpxchg_release 1161 + static __always_inline int 1162 + arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) 1163 + { 1164 + return arch_cmpxchg_release(&v->counter, old, new); 1165 + } 1166 + #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release 1167 + #endif 1168 + 1169 + #ifndef arch_atomic_cmpxchg_relaxed 1170 + static __always_inline int 1171 + arch_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) 1172 + { 1173 + return arch_cmpxchg_relaxed(&v->counter, old, new); 1174 + } 1175 + #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed 1176 + #endif 1177 + 1178 1178 #else /* arch_atomic_cmpxchg_relaxed */ 1179 1179 1180 1180 #ifndef arch_atomic_cmpxchg_acquire ··· 2303 2225 #endif /* arch_atomic64_fetch_xor_relaxed */ 2304 2226 2305 2227 #ifndef arch_atomic64_xchg_relaxed 2228 + #ifdef arch_atomic64_xchg 2306 2229 #define arch_atomic64_xchg_acquire arch_atomic64_xchg 2307 2230 #define arch_atomic64_xchg_release arch_atomic64_xchg 2308 2231 #define arch_atomic64_xchg_relaxed arch_atomic64_xchg 2232 + #endif /* arch_atomic64_xchg */ 2233 + 2234 + #ifndef arch_atomic64_xchg 2235 + static __always_inline s64 2236 + arch_atomic64_xchg(atomic64_t *v, s64 new) 2237 + { 2238 + return arch_xchg(&v->counter, new); 2239 + } 2240 + #define arch_atomic64_xchg arch_atomic64_xchg 2241 + #endif 2242 + 2243 + #ifndef arch_atomic64_xchg_acquire 2244 + static __always_inline s64 2245 + arch_atomic64_xchg_acquire(atomic64_t *v, s64 new) 2246 + { 2247 + return arch_xchg_acquire(&v->counter, new); 2248 + } 2249 + #define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire 2250 + #endif 2251 + 2252 + #ifndef arch_atomic64_xchg_release 2253 + static __always_inline s64 2254 + arch_atomic64_xchg_release(atomic64_t *v, s64 new) 2255 + { 2256 + return arch_xchg_release(&v->counter, new); 2257 + } 2258 + #define arch_atomic64_xchg_release arch_atomic64_xchg_release 2259 + #endif 2260 + 2261 + #ifndef arch_atomic64_xchg_relaxed 2262 + static __always_inline s64 2263 + arch_atomic64_xchg_relaxed(atomic64_t *v, s64 new) 2264 + { 2265 + return arch_xchg_relaxed(&v->counter, new); 2266 + } 2267 + #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed 2268 + #endif 2269 + 2309 2270 #else /* arch_atomic64_xchg_relaxed */ 2310 2271 2311 2272 #ifndef arch_atomic64_xchg_acquire ··· 2384 2267 #endif /* arch_atomic64_xchg_relaxed */ 2385 2268 2386 2269 #ifndef arch_atomic64_cmpxchg_relaxed 2270 + #ifdef arch_atomic64_cmpxchg 2387 2271 #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg 2388 2272 #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg 2389 2273 #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg 2274 + #endif /* arch_atomic64_cmpxchg */ 2275 + 2276 + #ifndef arch_atomic64_cmpxchg 2277 + static __always_inline s64 2278 + arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) 2279 + { 2280 + return arch_cmpxchg(&v->counter, old, new); 2281 + } 2282 + #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg 2283 + #endif 2284 + 2285 + #ifndef arch_atomic64_cmpxchg_acquire 2286 + static __always_inline s64 2287 + arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) 2288 + { 2289 + return arch_cmpxchg_acquire(&v->counter, old, new); 2290 + } 2291 + #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire 2292 + #endif 2293 + 2294 + #ifndef arch_atomic64_cmpxchg_release 2295 + static __always_inline s64 2296 + arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) 2297 + { 2298 + return arch_cmpxchg_release(&v->counter, old, new); 2299 + } 2300 + #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release 2301 + #endif 2302 + 2303 + #ifndef arch_atomic64_cmpxchg_relaxed 2304 + static __always_inline s64 2305 + arch_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) 2306 + { 2307 + return arch_cmpxchg_relaxed(&v->counter, old, new); 2308 + } 2309 + #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed 2310 + #endif 2311 + 2390 2312 #else /* arch_atomic64_cmpxchg_relaxed */ 2391 2313 2392 2314 #ifndef arch_atomic64_cmpxchg_acquire ··· 2753 2597 #endif 2754 2598 2755 2599 #endif /* _LINUX_ATOMIC_FALLBACK_H */ 2756 - // 9f0fd6ed53267c6ec64e36cd18e6fd8df57ea277 2600 + // e1cee558cc61cae887890db30fcdf93baca9f498
+7
scripts/atomic/fallbacks/cmpxchg
··· 1 + cat <<EOF 2 + static __always_inline ${int} 3 + arch_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new) 4 + { 5 + return arch_cmpxchg${order}(&v->counter, old, new); 6 + } 7 + EOF
+7
scripts/atomic/fallbacks/xchg
··· 1 + cat <<EOF 2 + static __always_inline ${int} 3 + arch_${atomic}_xchg${order}(${atomic}_t *v, ${int} new) 4 + { 5 + return arch_xchg${order}(&v->counter, new); 6 + } 7 + EOF