Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'locking-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull atomics rework from Thomas Gleixner:
"Peter Zijlstras rework of atomics and fallbacks. This solves two
problems:

1) Compilers uninline small atomic_* static inline functions which
can expose them to instrumentation.

2) The instrumentation of atomic primitives was done at the
architecture level while composites or fallbacks were provided at
the generic level. As a result there are no uninstrumented
variants of the fallbacks.

Both issues were in the way of fully isolating fragile entry code
pathes and especially the text poke int3 handler which is prone to an
endless recursion problem when anything in that code path is about to
be instrumented. This was always a problem, but got elevated due to
the new batch mode updates of tracing.

The solution is to mark the functions __always_inline and to flip the
fallback and instrumentation so the non-instrumented variants are at
the architecture level and the instrumentation is done in generic
code.

The latter introduces another fallback variant which will go away once
all architectures have been moved over to arch_atomic_*"

* tag 'locking-urgent-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/atomics: Flip fallbacks and instrumentation
asm-generic/atomic: Use __always_inline for fallback wrappers

+2594 -269
+3 -3
arch/arm64/include/asm/atomic.h
··· 101 101 102 102 #define ATOMIC_INIT(i) { (i) } 103 103 104 - #define arch_atomic_read(v) READ_ONCE((v)->counter) 105 - #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) 104 + #define arch_atomic_read(v) __READ_ONCE((v)->counter) 105 + #define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i)) 106 106 107 107 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed 108 108 #define arch_atomic_add_return_acquire arch_atomic_add_return_acquire ··· 225 225 226 226 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 227 227 228 - #include <asm-generic/atomic-instrumented.h> 228 + #define ARCH_ATOMIC 229 229 230 230 #endif /* __ASM_ATOMIC_H */
+13 -4
arch/x86/include/asm/atomic.h
··· 28 28 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here, 29 29 * it's non-inlined function that increases binary size and stack usage. 30 30 */ 31 - return READ_ONCE((v)->counter); 31 + return __READ_ONCE((v)->counter); 32 32 } 33 33 34 34 /** ··· 40 40 */ 41 41 static __always_inline void arch_atomic_set(atomic_t *v, int i) 42 42 { 43 - WRITE_ONCE(v->counter, i); 43 + __WRITE_ONCE(v->counter, i); 44 44 } 45 45 46 46 /** ··· 166 166 { 167 167 return i + xadd(&v->counter, i); 168 168 } 169 + #define arch_atomic_add_return arch_atomic_add_return 169 170 170 171 /** 171 172 * arch_atomic_sub_return - subtract integer and return ··· 179 178 { 180 179 return arch_atomic_add_return(-i, v); 181 180 } 181 + #define arch_atomic_sub_return arch_atomic_sub_return 182 182 183 183 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v) 184 184 { 185 185 return xadd(&v->counter, i); 186 186 } 187 + #define arch_atomic_fetch_add arch_atomic_fetch_add 187 188 188 189 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v) 189 190 { 190 191 return xadd(&v->counter, -i); 191 192 } 193 + #define arch_atomic_fetch_sub arch_atomic_fetch_sub 192 194 193 195 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) 194 196 { 195 197 return arch_cmpxchg(&v->counter, old, new); 196 198 } 199 + #define arch_atomic_cmpxchg arch_atomic_cmpxchg 197 200 198 - #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg 199 201 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) 200 202 { 201 203 return try_cmpxchg(&v->counter, old, new); 202 204 } 205 + #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg 203 206 204 207 static inline int arch_atomic_xchg(atomic_t *v, int new) 205 208 { 206 209 return arch_xchg(&v->counter, new); 207 210 } 211 + #define arch_atomic_xchg arch_atomic_xchg 208 212 209 213 static inline void arch_atomic_and(int i, atomic_t *v) 210 214 { ··· 227 221 228 222 return val; 229 223 } 224 + #define arch_atomic_fetch_and arch_atomic_fetch_and 230 225 231 226 static inline void arch_atomic_or(int i, atomic_t *v) 232 227 { ··· 245 238 246 239 return val; 247 240 } 241 + #define arch_atomic_fetch_or arch_atomic_fetch_or 248 242 249 243 static inline void arch_atomic_xor(int i, atomic_t *v) 250 244 { ··· 263 255 264 256 return val; 265 257 } 258 + #define arch_atomic_fetch_xor arch_atomic_fetch_xor 266 259 267 260 #ifdef CONFIG_X86_32 268 261 # include <asm/atomic64_32.h> ··· 271 262 # include <asm/atomic64_64.h> 272 263 #endif 273 264 274 - #include <asm-generic/atomic-instrumented.h> 265 + #define ARCH_ATOMIC 275 266 276 267 #endif /* _ASM_X86_ATOMIC_H */
+9
arch/x86/include/asm/atomic64_32.h
··· 75 75 { 76 76 return arch_cmpxchg64(&v->counter, o, n); 77 77 } 78 + #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg 78 79 79 80 /** 80 81 * arch_atomic64_xchg - xchg atomic64 variable ··· 95 94 : "memory"); 96 95 return o; 97 96 } 97 + #define arch_atomic64_xchg arch_atomic64_xchg 98 98 99 99 /** 100 100 * arch_atomic64_set - set atomic64 variable ··· 140 138 ASM_NO_INPUT_CLOBBER("memory")); 141 139 return i; 142 140 } 141 + #define arch_atomic64_add_return arch_atomic64_add_return 143 142 144 143 /* 145 144 * Other variants with different arithmetic operators: ··· 152 149 ASM_NO_INPUT_CLOBBER("memory")); 153 150 return i; 154 151 } 152 + #define arch_atomic64_sub_return arch_atomic64_sub_return 155 153 156 154 static inline s64 arch_atomic64_inc_return(atomic64_t *v) 157 155 { ··· 246 242 "S" (v) : "memory"); 247 243 return (int)a; 248 244 } 245 + #define arch_atomic64_add_unless arch_atomic64_add_unless 249 246 250 247 static inline int arch_atomic64_inc_not_zero(atomic64_t *v) 251 248 { ··· 286 281 287 282 return old; 288 283 } 284 + #define arch_atomic64_fetch_and arch_atomic64_fetch_and 289 285 290 286 static inline void arch_atomic64_or(s64 i, atomic64_t *v) 291 287 { ··· 305 299 306 300 return old; 307 301 } 302 + #define arch_atomic64_fetch_or arch_atomic64_fetch_or 308 303 309 304 static inline void arch_atomic64_xor(s64 i, atomic64_t *v) 310 305 { ··· 324 317 325 318 return old; 326 319 } 320 + #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor 327 321 328 322 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) 329 323 { ··· 335 327 336 328 return old; 337 329 } 330 + #define arch_atomic64_fetch_add arch_atomic64_fetch_add 338 331 339 332 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v)) 340 333
+12 -3
arch/x86/include/asm/atomic64_64.h
··· 19 19 */ 20 20 static inline s64 arch_atomic64_read(const atomic64_t *v) 21 21 { 22 - return READ_ONCE((v)->counter); 22 + return __READ_ONCE((v)->counter); 23 23 } 24 24 25 25 /** ··· 31 31 */ 32 32 static inline void arch_atomic64_set(atomic64_t *v, s64 i) 33 33 { 34 - WRITE_ONCE(v->counter, i); 34 + __WRITE_ONCE(v->counter, i); 35 35 } 36 36 37 37 /** ··· 159 159 { 160 160 return i + xadd(&v->counter, i); 161 161 } 162 + #define arch_atomic64_add_return arch_atomic64_add_return 162 163 163 164 static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v) 164 165 { 165 166 return arch_atomic64_add_return(-i, v); 166 167 } 168 + #define arch_atomic64_sub_return arch_atomic64_sub_return 167 169 168 170 static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) 169 171 { 170 172 return xadd(&v->counter, i); 171 173 } 174 + #define arch_atomic64_fetch_add arch_atomic64_fetch_add 172 175 173 176 static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v) 174 177 { 175 178 return xadd(&v->counter, -i); 176 179 } 180 + #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub 177 181 178 182 static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) 179 183 { 180 184 return arch_cmpxchg(&v->counter, old, new); 181 185 } 186 + #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg 182 187 183 - #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg 184 188 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 185 189 { 186 190 return try_cmpxchg(&v->counter, old, new); 187 191 } 192 + #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg 188 193 189 194 static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new) 190 195 { 191 196 return arch_xchg(&v->counter, new); 192 197 } 198 + #define arch_atomic64_xchg arch_atomic64_xchg 193 199 194 200 static inline void arch_atomic64_and(s64 i, atomic64_t *v) 195 201 { ··· 213 207 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); 214 208 return val; 215 209 } 210 + #define arch_atomic64_fetch_and arch_atomic64_fetch_and 216 211 217 212 static inline void arch_atomic64_or(s64 i, atomic64_t *v) 218 213 { ··· 231 224 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); 232 225 return val; 233 226 } 227 + #define arch_atomic64_fetch_or arch_atomic64_fetch_or 234 228 235 229 static inline void arch_atomic64_xor(s64 i, atomic64_t *v) 236 230 { ··· 249 241 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); 250 242 return val; 251 243 } 244 + #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor 252 245 253 246 #endif /* _ASM_X86_ATOMIC64_64_H */
+2291
include/linux/atomic-arch-fallback.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + // Generated by scripts/atomic/gen-atomic-fallback.sh 4 + // DO NOT MODIFY THIS FILE DIRECTLY 5 + 6 + #ifndef _LINUX_ATOMIC_FALLBACK_H 7 + #define _LINUX_ATOMIC_FALLBACK_H 8 + 9 + #include <linux/compiler.h> 10 + 11 + #ifndef arch_xchg_relaxed 12 + #define arch_xchg_relaxed arch_xchg 13 + #define arch_xchg_acquire arch_xchg 14 + #define arch_xchg_release arch_xchg 15 + #else /* arch_xchg_relaxed */ 16 + 17 + #ifndef arch_xchg_acquire 18 + #define arch_xchg_acquire(...) \ 19 + __atomic_op_acquire(arch_xchg, __VA_ARGS__) 20 + #endif 21 + 22 + #ifndef arch_xchg_release 23 + #define arch_xchg_release(...) \ 24 + __atomic_op_release(arch_xchg, __VA_ARGS__) 25 + #endif 26 + 27 + #ifndef arch_xchg 28 + #define arch_xchg(...) \ 29 + __atomic_op_fence(arch_xchg, __VA_ARGS__) 30 + #endif 31 + 32 + #endif /* arch_xchg_relaxed */ 33 + 34 + #ifndef arch_cmpxchg_relaxed 35 + #define arch_cmpxchg_relaxed arch_cmpxchg 36 + #define arch_cmpxchg_acquire arch_cmpxchg 37 + #define arch_cmpxchg_release arch_cmpxchg 38 + #else /* arch_cmpxchg_relaxed */ 39 + 40 + #ifndef arch_cmpxchg_acquire 41 + #define arch_cmpxchg_acquire(...) \ 42 + __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) 43 + #endif 44 + 45 + #ifndef arch_cmpxchg_release 46 + #define arch_cmpxchg_release(...) \ 47 + __atomic_op_release(arch_cmpxchg, __VA_ARGS__) 48 + #endif 49 + 50 + #ifndef arch_cmpxchg 51 + #define arch_cmpxchg(...) \ 52 + __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) 53 + #endif 54 + 55 + #endif /* arch_cmpxchg_relaxed */ 56 + 57 + #ifndef arch_cmpxchg64_relaxed 58 + #define arch_cmpxchg64_relaxed arch_cmpxchg64 59 + #define arch_cmpxchg64_acquire arch_cmpxchg64 60 + #define arch_cmpxchg64_release arch_cmpxchg64 61 + #else /* arch_cmpxchg64_relaxed */ 62 + 63 + #ifndef arch_cmpxchg64_acquire 64 + #define arch_cmpxchg64_acquire(...) \ 65 + __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) 66 + #endif 67 + 68 + #ifndef arch_cmpxchg64_release 69 + #define arch_cmpxchg64_release(...) \ 70 + __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) 71 + #endif 72 + 73 + #ifndef arch_cmpxchg64 74 + #define arch_cmpxchg64(...) \ 75 + __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) 76 + #endif 77 + 78 + #endif /* arch_cmpxchg64_relaxed */ 79 + 80 + #ifndef arch_atomic_read_acquire 81 + static __always_inline int 82 + arch_atomic_read_acquire(const atomic_t *v) 83 + { 84 + return smp_load_acquire(&(v)->counter); 85 + } 86 + #define arch_atomic_read_acquire arch_atomic_read_acquire 87 + #endif 88 + 89 + #ifndef arch_atomic_set_release 90 + static __always_inline void 91 + arch_atomic_set_release(atomic_t *v, int i) 92 + { 93 + smp_store_release(&(v)->counter, i); 94 + } 95 + #define arch_atomic_set_release arch_atomic_set_release 96 + #endif 97 + 98 + #ifndef arch_atomic_add_return_relaxed 99 + #define arch_atomic_add_return_acquire arch_atomic_add_return 100 + #define arch_atomic_add_return_release arch_atomic_add_return 101 + #define arch_atomic_add_return_relaxed arch_atomic_add_return 102 + #else /* arch_atomic_add_return_relaxed */ 103 + 104 + #ifndef arch_atomic_add_return_acquire 105 + static __always_inline int 106 + arch_atomic_add_return_acquire(int i, atomic_t *v) 107 + { 108 + int ret = arch_atomic_add_return_relaxed(i, v); 109 + __atomic_acquire_fence(); 110 + return ret; 111 + } 112 + #define arch_atomic_add_return_acquire arch_atomic_add_return_acquire 113 + #endif 114 + 115 + #ifndef arch_atomic_add_return_release 116 + static __always_inline int 117 + arch_atomic_add_return_release(int i, atomic_t *v) 118 + { 119 + __atomic_release_fence(); 120 + return arch_atomic_add_return_relaxed(i, v); 121 + } 122 + #define arch_atomic_add_return_release arch_atomic_add_return_release 123 + #endif 124 + 125 + #ifndef arch_atomic_add_return 126 + static __always_inline int 127 + arch_atomic_add_return(int i, atomic_t *v) 128 + { 129 + int ret; 130 + __atomic_pre_full_fence(); 131 + ret = arch_atomic_add_return_relaxed(i, v); 132 + __atomic_post_full_fence(); 133 + return ret; 134 + } 135 + #define arch_atomic_add_return arch_atomic_add_return 136 + #endif 137 + 138 + #endif /* arch_atomic_add_return_relaxed */ 139 + 140 + #ifndef arch_atomic_fetch_add_relaxed 141 + #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add 142 + #define arch_atomic_fetch_add_release arch_atomic_fetch_add 143 + #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add 144 + #else /* arch_atomic_fetch_add_relaxed */ 145 + 146 + #ifndef arch_atomic_fetch_add_acquire 147 + static __always_inline int 148 + arch_atomic_fetch_add_acquire(int i, atomic_t *v) 149 + { 150 + int ret = arch_atomic_fetch_add_relaxed(i, v); 151 + __atomic_acquire_fence(); 152 + return ret; 153 + } 154 + #define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire 155 + #endif 156 + 157 + #ifndef arch_atomic_fetch_add_release 158 + static __always_inline int 159 + arch_atomic_fetch_add_release(int i, atomic_t *v) 160 + { 161 + __atomic_release_fence(); 162 + return arch_atomic_fetch_add_relaxed(i, v); 163 + } 164 + #define arch_atomic_fetch_add_release arch_atomic_fetch_add_release 165 + #endif 166 + 167 + #ifndef arch_atomic_fetch_add 168 + static __always_inline int 169 + arch_atomic_fetch_add(int i, atomic_t *v) 170 + { 171 + int ret; 172 + __atomic_pre_full_fence(); 173 + ret = arch_atomic_fetch_add_relaxed(i, v); 174 + __atomic_post_full_fence(); 175 + return ret; 176 + } 177 + #define arch_atomic_fetch_add arch_atomic_fetch_add 178 + #endif 179 + 180 + #endif /* arch_atomic_fetch_add_relaxed */ 181 + 182 + #ifndef arch_atomic_sub_return_relaxed 183 + #define arch_atomic_sub_return_acquire arch_atomic_sub_return 184 + #define arch_atomic_sub_return_release arch_atomic_sub_return 185 + #define arch_atomic_sub_return_relaxed arch_atomic_sub_return 186 + #else /* arch_atomic_sub_return_relaxed */ 187 + 188 + #ifndef arch_atomic_sub_return_acquire 189 + static __always_inline int 190 + arch_atomic_sub_return_acquire(int i, atomic_t *v) 191 + { 192 + int ret = arch_atomic_sub_return_relaxed(i, v); 193 + __atomic_acquire_fence(); 194 + return ret; 195 + } 196 + #define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire 197 + #endif 198 + 199 + #ifndef arch_atomic_sub_return_release 200 + static __always_inline int 201 + arch_atomic_sub_return_release(int i, atomic_t *v) 202 + { 203 + __atomic_release_fence(); 204 + return arch_atomic_sub_return_relaxed(i, v); 205 + } 206 + #define arch_atomic_sub_return_release arch_atomic_sub_return_release 207 + #endif 208 + 209 + #ifndef arch_atomic_sub_return 210 + static __always_inline int 211 + arch_atomic_sub_return(int i, atomic_t *v) 212 + { 213 + int ret; 214 + __atomic_pre_full_fence(); 215 + ret = arch_atomic_sub_return_relaxed(i, v); 216 + __atomic_post_full_fence(); 217 + return ret; 218 + } 219 + #define arch_atomic_sub_return arch_atomic_sub_return 220 + #endif 221 + 222 + #endif /* arch_atomic_sub_return_relaxed */ 223 + 224 + #ifndef arch_atomic_fetch_sub_relaxed 225 + #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub 226 + #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub 227 + #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub 228 + #else /* arch_atomic_fetch_sub_relaxed */ 229 + 230 + #ifndef arch_atomic_fetch_sub_acquire 231 + static __always_inline int 232 + arch_atomic_fetch_sub_acquire(int i, atomic_t *v) 233 + { 234 + int ret = arch_atomic_fetch_sub_relaxed(i, v); 235 + __atomic_acquire_fence(); 236 + return ret; 237 + } 238 + #define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire 239 + #endif 240 + 241 + #ifndef arch_atomic_fetch_sub_release 242 + static __always_inline int 243 + arch_atomic_fetch_sub_release(int i, atomic_t *v) 244 + { 245 + __atomic_release_fence(); 246 + return arch_atomic_fetch_sub_relaxed(i, v); 247 + } 248 + #define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release 249 + #endif 250 + 251 + #ifndef arch_atomic_fetch_sub 252 + static __always_inline int 253 + arch_atomic_fetch_sub(int i, atomic_t *v) 254 + { 255 + int ret; 256 + __atomic_pre_full_fence(); 257 + ret = arch_atomic_fetch_sub_relaxed(i, v); 258 + __atomic_post_full_fence(); 259 + return ret; 260 + } 261 + #define arch_atomic_fetch_sub arch_atomic_fetch_sub 262 + #endif 263 + 264 + #endif /* arch_atomic_fetch_sub_relaxed */ 265 + 266 + #ifndef arch_atomic_inc 267 + static __always_inline void 268 + arch_atomic_inc(atomic_t *v) 269 + { 270 + arch_atomic_add(1, v); 271 + } 272 + #define arch_atomic_inc arch_atomic_inc 273 + #endif 274 + 275 + #ifndef arch_atomic_inc_return_relaxed 276 + #ifdef arch_atomic_inc_return 277 + #define arch_atomic_inc_return_acquire arch_atomic_inc_return 278 + #define arch_atomic_inc_return_release arch_atomic_inc_return 279 + #define arch_atomic_inc_return_relaxed arch_atomic_inc_return 280 + #endif /* arch_atomic_inc_return */ 281 + 282 + #ifndef arch_atomic_inc_return 283 + static __always_inline int 284 + arch_atomic_inc_return(atomic_t *v) 285 + { 286 + return arch_atomic_add_return(1, v); 287 + } 288 + #define arch_atomic_inc_return arch_atomic_inc_return 289 + #endif 290 + 291 + #ifndef arch_atomic_inc_return_acquire 292 + static __always_inline int 293 + arch_atomic_inc_return_acquire(atomic_t *v) 294 + { 295 + return arch_atomic_add_return_acquire(1, v); 296 + } 297 + #define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire 298 + #endif 299 + 300 + #ifndef arch_atomic_inc_return_release 301 + static __always_inline int 302 + arch_atomic_inc_return_release(atomic_t *v) 303 + { 304 + return arch_atomic_add_return_release(1, v); 305 + } 306 + #define arch_atomic_inc_return_release arch_atomic_inc_return_release 307 + #endif 308 + 309 + #ifndef arch_atomic_inc_return_relaxed 310 + static __always_inline int 311 + arch_atomic_inc_return_relaxed(atomic_t *v) 312 + { 313 + return arch_atomic_add_return_relaxed(1, v); 314 + } 315 + #define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed 316 + #endif 317 + 318 + #else /* arch_atomic_inc_return_relaxed */ 319 + 320 + #ifndef arch_atomic_inc_return_acquire 321 + static __always_inline int 322 + arch_atomic_inc_return_acquire(atomic_t *v) 323 + { 324 + int ret = arch_atomic_inc_return_relaxed(v); 325 + __atomic_acquire_fence(); 326 + return ret; 327 + } 328 + #define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire 329 + #endif 330 + 331 + #ifndef arch_atomic_inc_return_release 332 + static __always_inline int 333 + arch_atomic_inc_return_release(atomic_t *v) 334 + { 335 + __atomic_release_fence(); 336 + return arch_atomic_inc_return_relaxed(v); 337 + } 338 + #define arch_atomic_inc_return_release arch_atomic_inc_return_release 339 + #endif 340 + 341 + #ifndef arch_atomic_inc_return 342 + static __always_inline int 343 + arch_atomic_inc_return(atomic_t *v) 344 + { 345 + int ret; 346 + __atomic_pre_full_fence(); 347 + ret = arch_atomic_inc_return_relaxed(v); 348 + __atomic_post_full_fence(); 349 + return ret; 350 + } 351 + #define arch_atomic_inc_return arch_atomic_inc_return 352 + #endif 353 + 354 + #endif /* arch_atomic_inc_return_relaxed */ 355 + 356 + #ifndef arch_atomic_fetch_inc_relaxed 357 + #ifdef arch_atomic_fetch_inc 358 + #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc 359 + #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc 360 + #define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc 361 + #endif /* arch_atomic_fetch_inc */ 362 + 363 + #ifndef arch_atomic_fetch_inc 364 + static __always_inline int 365 + arch_atomic_fetch_inc(atomic_t *v) 366 + { 367 + return arch_atomic_fetch_add(1, v); 368 + } 369 + #define arch_atomic_fetch_inc arch_atomic_fetch_inc 370 + #endif 371 + 372 + #ifndef arch_atomic_fetch_inc_acquire 373 + static __always_inline int 374 + arch_atomic_fetch_inc_acquire(atomic_t *v) 375 + { 376 + return arch_atomic_fetch_add_acquire(1, v); 377 + } 378 + #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire 379 + #endif 380 + 381 + #ifndef arch_atomic_fetch_inc_release 382 + static __always_inline int 383 + arch_atomic_fetch_inc_release(atomic_t *v) 384 + { 385 + return arch_atomic_fetch_add_release(1, v); 386 + } 387 + #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release 388 + #endif 389 + 390 + #ifndef arch_atomic_fetch_inc_relaxed 391 + static __always_inline int 392 + arch_atomic_fetch_inc_relaxed(atomic_t *v) 393 + { 394 + return arch_atomic_fetch_add_relaxed(1, v); 395 + } 396 + #define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed 397 + #endif 398 + 399 + #else /* arch_atomic_fetch_inc_relaxed */ 400 + 401 + #ifndef arch_atomic_fetch_inc_acquire 402 + static __always_inline int 403 + arch_atomic_fetch_inc_acquire(atomic_t *v) 404 + { 405 + int ret = arch_atomic_fetch_inc_relaxed(v); 406 + __atomic_acquire_fence(); 407 + return ret; 408 + } 409 + #define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire 410 + #endif 411 + 412 + #ifndef arch_atomic_fetch_inc_release 413 + static __always_inline int 414 + arch_atomic_fetch_inc_release(atomic_t *v) 415 + { 416 + __atomic_release_fence(); 417 + return arch_atomic_fetch_inc_relaxed(v); 418 + } 419 + #define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release 420 + #endif 421 + 422 + #ifndef arch_atomic_fetch_inc 423 + static __always_inline int 424 + arch_atomic_fetch_inc(atomic_t *v) 425 + { 426 + int ret; 427 + __atomic_pre_full_fence(); 428 + ret = arch_atomic_fetch_inc_relaxed(v); 429 + __atomic_post_full_fence(); 430 + return ret; 431 + } 432 + #define arch_atomic_fetch_inc arch_atomic_fetch_inc 433 + #endif 434 + 435 + #endif /* arch_atomic_fetch_inc_relaxed */ 436 + 437 + #ifndef arch_atomic_dec 438 + static __always_inline void 439 + arch_atomic_dec(atomic_t *v) 440 + { 441 + arch_atomic_sub(1, v); 442 + } 443 + #define arch_atomic_dec arch_atomic_dec 444 + #endif 445 + 446 + #ifndef arch_atomic_dec_return_relaxed 447 + #ifdef arch_atomic_dec_return 448 + #define arch_atomic_dec_return_acquire arch_atomic_dec_return 449 + #define arch_atomic_dec_return_release arch_atomic_dec_return 450 + #define arch_atomic_dec_return_relaxed arch_atomic_dec_return 451 + #endif /* arch_atomic_dec_return */ 452 + 453 + #ifndef arch_atomic_dec_return 454 + static __always_inline int 455 + arch_atomic_dec_return(atomic_t *v) 456 + { 457 + return arch_atomic_sub_return(1, v); 458 + } 459 + #define arch_atomic_dec_return arch_atomic_dec_return 460 + #endif 461 + 462 + #ifndef arch_atomic_dec_return_acquire 463 + static __always_inline int 464 + arch_atomic_dec_return_acquire(atomic_t *v) 465 + { 466 + return arch_atomic_sub_return_acquire(1, v); 467 + } 468 + #define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire 469 + #endif 470 + 471 + #ifndef arch_atomic_dec_return_release 472 + static __always_inline int 473 + arch_atomic_dec_return_release(atomic_t *v) 474 + { 475 + return arch_atomic_sub_return_release(1, v); 476 + } 477 + #define arch_atomic_dec_return_release arch_atomic_dec_return_release 478 + #endif 479 + 480 + #ifndef arch_atomic_dec_return_relaxed 481 + static __always_inline int 482 + arch_atomic_dec_return_relaxed(atomic_t *v) 483 + { 484 + return arch_atomic_sub_return_relaxed(1, v); 485 + } 486 + #define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed 487 + #endif 488 + 489 + #else /* arch_atomic_dec_return_relaxed */ 490 + 491 + #ifndef arch_atomic_dec_return_acquire 492 + static __always_inline int 493 + arch_atomic_dec_return_acquire(atomic_t *v) 494 + { 495 + int ret = arch_atomic_dec_return_relaxed(v); 496 + __atomic_acquire_fence(); 497 + return ret; 498 + } 499 + #define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire 500 + #endif 501 + 502 + #ifndef arch_atomic_dec_return_release 503 + static __always_inline int 504 + arch_atomic_dec_return_release(atomic_t *v) 505 + { 506 + __atomic_release_fence(); 507 + return arch_atomic_dec_return_relaxed(v); 508 + } 509 + #define arch_atomic_dec_return_release arch_atomic_dec_return_release 510 + #endif 511 + 512 + #ifndef arch_atomic_dec_return 513 + static __always_inline int 514 + arch_atomic_dec_return(atomic_t *v) 515 + { 516 + int ret; 517 + __atomic_pre_full_fence(); 518 + ret = arch_atomic_dec_return_relaxed(v); 519 + __atomic_post_full_fence(); 520 + return ret; 521 + } 522 + #define arch_atomic_dec_return arch_atomic_dec_return 523 + #endif 524 + 525 + #endif /* arch_atomic_dec_return_relaxed */ 526 + 527 + #ifndef arch_atomic_fetch_dec_relaxed 528 + #ifdef arch_atomic_fetch_dec 529 + #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec 530 + #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec 531 + #define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec 532 + #endif /* arch_atomic_fetch_dec */ 533 + 534 + #ifndef arch_atomic_fetch_dec 535 + static __always_inline int 536 + arch_atomic_fetch_dec(atomic_t *v) 537 + { 538 + return arch_atomic_fetch_sub(1, v); 539 + } 540 + #define arch_atomic_fetch_dec arch_atomic_fetch_dec 541 + #endif 542 + 543 + #ifndef arch_atomic_fetch_dec_acquire 544 + static __always_inline int 545 + arch_atomic_fetch_dec_acquire(atomic_t *v) 546 + { 547 + return arch_atomic_fetch_sub_acquire(1, v); 548 + } 549 + #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire 550 + #endif 551 + 552 + #ifndef arch_atomic_fetch_dec_release 553 + static __always_inline int 554 + arch_atomic_fetch_dec_release(atomic_t *v) 555 + { 556 + return arch_atomic_fetch_sub_release(1, v); 557 + } 558 + #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release 559 + #endif 560 + 561 + #ifndef arch_atomic_fetch_dec_relaxed 562 + static __always_inline int 563 + arch_atomic_fetch_dec_relaxed(atomic_t *v) 564 + { 565 + return arch_atomic_fetch_sub_relaxed(1, v); 566 + } 567 + #define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed 568 + #endif 569 + 570 + #else /* arch_atomic_fetch_dec_relaxed */ 571 + 572 + #ifndef arch_atomic_fetch_dec_acquire 573 + static __always_inline int 574 + arch_atomic_fetch_dec_acquire(atomic_t *v) 575 + { 576 + int ret = arch_atomic_fetch_dec_relaxed(v); 577 + __atomic_acquire_fence(); 578 + return ret; 579 + } 580 + #define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire 581 + #endif 582 + 583 + #ifndef arch_atomic_fetch_dec_release 584 + static __always_inline int 585 + arch_atomic_fetch_dec_release(atomic_t *v) 586 + { 587 + __atomic_release_fence(); 588 + return arch_atomic_fetch_dec_relaxed(v); 589 + } 590 + #define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release 591 + #endif 592 + 593 + #ifndef arch_atomic_fetch_dec 594 + static __always_inline int 595 + arch_atomic_fetch_dec(atomic_t *v) 596 + { 597 + int ret; 598 + __atomic_pre_full_fence(); 599 + ret = arch_atomic_fetch_dec_relaxed(v); 600 + __atomic_post_full_fence(); 601 + return ret; 602 + } 603 + #define arch_atomic_fetch_dec arch_atomic_fetch_dec 604 + #endif 605 + 606 + #endif /* arch_atomic_fetch_dec_relaxed */ 607 + 608 + #ifndef arch_atomic_fetch_and_relaxed 609 + #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and 610 + #define arch_atomic_fetch_and_release arch_atomic_fetch_and 611 + #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and 612 + #else /* arch_atomic_fetch_and_relaxed */ 613 + 614 + #ifndef arch_atomic_fetch_and_acquire 615 + static __always_inline int 616 + arch_atomic_fetch_and_acquire(int i, atomic_t *v) 617 + { 618 + int ret = arch_atomic_fetch_and_relaxed(i, v); 619 + __atomic_acquire_fence(); 620 + return ret; 621 + } 622 + #define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire 623 + #endif 624 + 625 + #ifndef arch_atomic_fetch_and_release 626 + static __always_inline int 627 + arch_atomic_fetch_and_release(int i, atomic_t *v) 628 + { 629 + __atomic_release_fence(); 630 + return arch_atomic_fetch_and_relaxed(i, v); 631 + } 632 + #define arch_atomic_fetch_and_release arch_atomic_fetch_and_release 633 + #endif 634 + 635 + #ifndef arch_atomic_fetch_and 636 + static __always_inline int 637 + arch_atomic_fetch_and(int i, atomic_t *v) 638 + { 639 + int ret; 640 + __atomic_pre_full_fence(); 641 + ret = arch_atomic_fetch_and_relaxed(i, v); 642 + __atomic_post_full_fence(); 643 + return ret; 644 + } 645 + #define arch_atomic_fetch_and arch_atomic_fetch_and 646 + #endif 647 + 648 + #endif /* arch_atomic_fetch_and_relaxed */ 649 + 650 + #ifndef arch_atomic_andnot 651 + static __always_inline void 652 + arch_atomic_andnot(int i, atomic_t *v) 653 + { 654 + arch_atomic_and(~i, v); 655 + } 656 + #define arch_atomic_andnot arch_atomic_andnot 657 + #endif 658 + 659 + #ifndef arch_atomic_fetch_andnot_relaxed 660 + #ifdef arch_atomic_fetch_andnot 661 + #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot 662 + #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot 663 + #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot 664 + #endif /* arch_atomic_fetch_andnot */ 665 + 666 + #ifndef arch_atomic_fetch_andnot 667 + static __always_inline int 668 + arch_atomic_fetch_andnot(int i, atomic_t *v) 669 + { 670 + return arch_atomic_fetch_and(~i, v); 671 + } 672 + #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot 673 + #endif 674 + 675 + #ifndef arch_atomic_fetch_andnot_acquire 676 + static __always_inline int 677 + arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) 678 + { 679 + return arch_atomic_fetch_and_acquire(~i, v); 680 + } 681 + #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire 682 + #endif 683 + 684 + #ifndef arch_atomic_fetch_andnot_release 685 + static __always_inline int 686 + arch_atomic_fetch_andnot_release(int i, atomic_t *v) 687 + { 688 + return arch_atomic_fetch_and_release(~i, v); 689 + } 690 + #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release 691 + #endif 692 + 693 + #ifndef arch_atomic_fetch_andnot_relaxed 694 + static __always_inline int 695 + arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) 696 + { 697 + return arch_atomic_fetch_and_relaxed(~i, v); 698 + } 699 + #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed 700 + #endif 701 + 702 + #else /* arch_atomic_fetch_andnot_relaxed */ 703 + 704 + #ifndef arch_atomic_fetch_andnot_acquire 705 + static __always_inline int 706 + arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) 707 + { 708 + int ret = arch_atomic_fetch_andnot_relaxed(i, v); 709 + __atomic_acquire_fence(); 710 + return ret; 711 + } 712 + #define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire 713 + #endif 714 + 715 + #ifndef arch_atomic_fetch_andnot_release 716 + static __always_inline int 717 + arch_atomic_fetch_andnot_release(int i, atomic_t *v) 718 + { 719 + __atomic_release_fence(); 720 + return arch_atomic_fetch_andnot_relaxed(i, v); 721 + } 722 + #define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release 723 + #endif 724 + 725 + #ifndef arch_atomic_fetch_andnot 726 + static __always_inline int 727 + arch_atomic_fetch_andnot(int i, atomic_t *v) 728 + { 729 + int ret; 730 + __atomic_pre_full_fence(); 731 + ret = arch_atomic_fetch_andnot_relaxed(i, v); 732 + __atomic_post_full_fence(); 733 + return ret; 734 + } 735 + #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot 736 + #endif 737 + 738 + #endif /* arch_atomic_fetch_andnot_relaxed */ 739 + 740 + #ifndef arch_atomic_fetch_or_relaxed 741 + #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or 742 + #define arch_atomic_fetch_or_release arch_atomic_fetch_or 743 + #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or 744 + #else /* arch_atomic_fetch_or_relaxed */ 745 + 746 + #ifndef arch_atomic_fetch_or_acquire 747 + static __always_inline int 748 + arch_atomic_fetch_or_acquire(int i, atomic_t *v) 749 + { 750 + int ret = arch_atomic_fetch_or_relaxed(i, v); 751 + __atomic_acquire_fence(); 752 + return ret; 753 + } 754 + #define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire 755 + #endif 756 + 757 + #ifndef arch_atomic_fetch_or_release 758 + static __always_inline int 759 + arch_atomic_fetch_or_release(int i, atomic_t *v) 760 + { 761 + __atomic_release_fence(); 762 + return arch_atomic_fetch_or_relaxed(i, v); 763 + } 764 + #define arch_atomic_fetch_or_release arch_atomic_fetch_or_release 765 + #endif 766 + 767 + #ifndef arch_atomic_fetch_or 768 + static __always_inline int 769 + arch_atomic_fetch_or(int i, atomic_t *v) 770 + { 771 + int ret; 772 + __atomic_pre_full_fence(); 773 + ret = arch_atomic_fetch_or_relaxed(i, v); 774 + __atomic_post_full_fence(); 775 + return ret; 776 + } 777 + #define arch_atomic_fetch_or arch_atomic_fetch_or 778 + #endif 779 + 780 + #endif /* arch_atomic_fetch_or_relaxed */ 781 + 782 + #ifndef arch_atomic_fetch_xor_relaxed 783 + #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor 784 + #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor 785 + #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor 786 + #else /* arch_atomic_fetch_xor_relaxed */ 787 + 788 + #ifndef arch_atomic_fetch_xor_acquire 789 + static __always_inline int 790 + arch_atomic_fetch_xor_acquire(int i, atomic_t *v) 791 + { 792 + int ret = arch_atomic_fetch_xor_relaxed(i, v); 793 + __atomic_acquire_fence(); 794 + return ret; 795 + } 796 + #define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire 797 + #endif 798 + 799 + #ifndef arch_atomic_fetch_xor_release 800 + static __always_inline int 801 + arch_atomic_fetch_xor_release(int i, atomic_t *v) 802 + { 803 + __atomic_release_fence(); 804 + return arch_atomic_fetch_xor_relaxed(i, v); 805 + } 806 + #define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release 807 + #endif 808 + 809 + #ifndef arch_atomic_fetch_xor 810 + static __always_inline int 811 + arch_atomic_fetch_xor(int i, atomic_t *v) 812 + { 813 + int ret; 814 + __atomic_pre_full_fence(); 815 + ret = arch_atomic_fetch_xor_relaxed(i, v); 816 + __atomic_post_full_fence(); 817 + return ret; 818 + } 819 + #define arch_atomic_fetch_xor arch_atomic_fetch_xor 820 + #endif 821 + 822 + #endif /* arch_atomic_fetch_xor_relaxed */ 823 + 824 + #ifndef arch_atomic_xchg_relaxed 825 + #define arch_atomic_xchg_acquire arch_atomic_xchg 826 + #define arch_atomic_xchg_release arch_atomic_xchg 827 + #define arch_atomic_xchg_relaxed arch_atomic_xchg 828 + #else /* arch_atomic_xchg_relaxed */ 829 + 830 + #ifndef arch_atomic_xchg_acquire 831 + static __always_inline int 832 + arch_atomic_xchg_acquire(atomic_t *v, int i) 833 + { 834 + int ret = arch_atomic_xchg_relaxed(v, i); 835 + __atomic_acquire_fence(); 836 + return ret; 837 + } 838 + #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire 839 + #endif 840 + 841 + #ifndef arch_atomic_xchg_release 842 + static __always_inline int 843 + arch_atomic_xchg_release(atomic_t *v, int i) 844 + { 845 + __atomic_release_fence(); 846 + return arch_atomic_xchg_relaxed(v, i); 847 + } 848 + #define arch_atomic_xchg_release arch_atomic_xchg_release 849 + #endif 850 + 851 + #ifndef arch_atomic_xchg 852 + static __always_inline int 853 + arch_atomic_xchg(atomic_t *v, int i) 854 + { 855 + int ret; 856 + __atomic_pre_full_fence(); 857 + ret = arch_atomic_xchg_relaxed(v, i); 858 + __atomic_post_full_fence(); 859 + return ret; 860 + } 861 + #define arch_atomic_xchg arch_atomic_xchg 862 + #endif 863 + 864 + #endif /* arch_atomic_xchg_relaxed */ 865 + 866 + #ifndef arch_atomic_cmpxchg_relaxed 867 + #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg 868 + #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg 869 + #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg 870 + #else /* arch_atomic_cmpxchg_relaxed */ 871 + 872 + #ifndef arch_atomic_cmpxchg_acquire 873 + static __always_inline int 874 + arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) 875 + { 876 + int ret = arch_atomic_cmpxchg_relaxed(v, old, new); 877 + __atomic_acquire_fence(); 878 + return ret; 879 + } 880 + #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire 881 + #endif 882 + 883 + #ifndef arch_atomic_cmpxchg_release 884 + static __always_inline int 885 + arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) 886 + { 887 + __atomic_release_fence(); 888 + return arch_atomic_cmpxchg_relaxed(v, old, new); 889 + } 890 + #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release 891 + #endif 892 + 893 + #ifndef arch_atomic_cmpxchg 894 + static __always_inline int 895 + arch_atomic_cmpxchg(atomic_t *v, int old, int new) 896 + { 897 + int ret; 898 + __atomic_pre_full_fence(); 899 + ret = arch_atomic_cmpxchg_relaxed(v, old, new); 900 + __atomic_post_full_fence(); 901 + return ret; 902 + } 903 + #define arch_atomic_cmpxchg arch_atomic_cmpxchg 904 + #endif 905 + 906 + #endif /* arch_atomic_cmpxchg_relaxed */ 907 + 908 + #ifndef arch_atomic_try_cmpxchg_relaxed 909 + #ifdef arch_atomic_try_cmpxchg 910 + #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg 911 + #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg 912 + #define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg 913 + #endif /* arch_atomic_try_cmpxchg */ 914 + 915 + #ifndef arch_atomic_try_cmpxchg 916 + static __always_inline bool 917 + arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) 918 + { 919 + int r, o = *old; 920 + r = arch_atomic_cmpxchg(v, o, new); 921 + if (unlikely(r != o)) 922 + *old = r; 923 + return likely(r == o); 924 + } 925 + #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg 926 + #endif 927 + 928 + #ifndef arch_atomic_try_cmpxchg_acquire 929 + static __always_inline bool 930 + arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) 931 + { 932 + int r, o = *old; 933 + r = arch_atomic_cmpxchg_acquire(v, o, new); 934 + if (unlikely(r != o)) 935 + *old = r; 936 + return likely(r == o); 937 + } 938 + #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire 939 + #endif 940 + 941 + #ifndef arch_atomic_try_cmpxchg_release 942 + static __always_inline bool 943 + arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) 944 + { 945 + int r, o = *old; 946 + r = arch_atomic_cmpxchg_release(v, o, new); 947 + if (unlikely(r != o)) 948 + *old = r; 949 + return likely(r == o); 950 + } 951 + #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release 952 + #endif 953 + 954 + #ifndef arch_atomic_try_cmpxchg_relaxed 955 + static __always_inline bool 956 + arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) 957 + { 958 + int r, o = *old; 959 + r = arch_atomic_cmpxchg_relaxed(v, o, new); 960 + if (unlikely(r != o)) 961 + *old = r; 962 + return likely(r == o); 963 + } 964 + #define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed 965 + #endif 966 + 967 + #else /* arch_atomic_try_cmpxchg_relaxed */ 968 + 969 + #ifndef arch_atomic_try_cmpxchg_acquire 970 + static __always_inline bool 971 + arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) 972 + { 973 + bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); 974 + __atomic_acquire_fence(); 975 + return ret; 976 + } 977 + #define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire 978 + #endif 979 + 980 + #ifndef arch_atomic_try_cmpxchg_release 981 + static __always_inline bool 982 + arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) 983 + { 984 + __atomic_release_fence(); 985 + return arch_atomic_try_cmpxchg_relaxed(v, old, new); 986 + } 987 + #define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release 988 + #endif 989 + 990 + #ifndef arch_atomic_try_cmpxchg 991 + static __always_inline bool 992 + arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) 993 + { 994 + bool ret; 995 + __atomic_pre_full_fence(); 996 + ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); 997 + __atomic_post_full_fence(); 998 + return ret; 999 + } 1000 + #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg 1001 + #endif 1002 + 1003 + #endif /* arch_atomic_try_cmpxchg_relaxed */ 1004 + 1005 + #ifndef arch_atomic_sub_and_test 1006 + /** 1007 + * arch_atomic_sub_and_test - subtract value from variable and test result 1008 + * @i: integer value to subtract 1009 + * @v: pointer of type atomic_t 1010 + * 1011 + * Atomically subtracts @i from @v and returns 1012 + * true if the result is zero, or false for all 1013 + * other cases. 1014 + */ 1015 + static __always_inline bool 1016 + arch_atomic_sub_and_test(int i, atomic_t *v) 1017 + { 1018 + return arch_atomic_sub_return(i, v) == 0; 1019 + } 1020 + #define arch_atomic_sub_and_test arch_atomic_sub_and_test 1021 + #endif 1022 + 1023 + #ifndef arch_atomic_dec_and_test 1024 + /** 1025 + * arch_atomic_dec_and_test - decrement and test 1026 + * @v: pointer of type atomic_t 1027 + * 1028 + * Atomically decrements @v by 1 and 1029 + * returns true if the result is 0, or false for all other 1030 + * cases. 1031 + */ 1032 + static __always_inline bool 1033 + arch_atomic_dec_and_test(atomic_t *v) 1034 + { 1035 + return arch_atomic_dec_return(v) == 0; 1036 + } 1037 + #define arch_atomic_dec_and_test arch_atomic_dec_and_test 1038 + #endif 1039 + 1040 + #ifndef arch_atomic_inc_and_test 1041 + /** 1042 + * arch_atomic_inc_and_test - increment and test 1043 + * @v: pointer of type atomic_t 1044 + * 1045 + * Atomically increments @v by 1 1046 + * and returns true if the result is zero, or false for all 1047 + * other cases. 1048 + */ 1049 + static __always_inline bool 1050 + arch_atomic_inc_and_test(atomic_t *v) 1051 + { 1052 + return arch_atomic_inc_return(v) == 0; 1053 + } 1054 + #define arch_atomic_inc_and_test arch_atomic_inc_and_test 1055 + #endif 1056 + 1057 + #ifndef arch_atomic_add_negative 1058 + /** 1059 + * arch_atomic_add_negative - add and test if negative 1060 + * @i: integer value to add 1061 + * @v: pointer of type atomic_t 1062 + * 1063 + * Atomically adds @i to @v and returns true 1064 + * if the result is negative, or false when 1065 + * result is greater than or equal to zero. 1066 + */ 1067 + static __always_inline bool 1068 + arch_atomic_add_negative(int i, atomic_t *v) 1069 + { 1070 + return arch_atomic_add_return(i, v) < 0; 1071 + } 1072 + #define arch_atomic_add_negative arch_atomic_add_negative 1073 + #endif 1074 + 1075 + #ifndef arch_atomic_fetch_add_unless 1076 + /** 1077 + * arch_atomic_fetch_add_unless - add unless the number is already a given value 1078 + * @v: pointer of type atomic_t 1079 + * @a: the amount to add to v... 1080 + * @u: ...unless v is equal to u. 1081 + * 1082 + * Atomically adds @a to @v, so long as @v was not already @u. 1083 + * Returns original value of @v 1084 + */ 1085 + static __always_inline int 1086 + arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) 1087 + { 1088 + int c = arch_atomic_read(v); 1089 + 1090 + do { 1091 + if (unlikely(c == u)) 1092 + break; 1093 + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); 1094 + 1095 + return c; 1096 + } 1097 + #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless 1098 + #endif 1099 + 1100 + #ifndef arch_atomic_add_unless 1101 + /** 1102 + * arch_atomic_add_unless - add unless the number is already a given value 1103 + * @v: pointer of type atomic_t 1104 + * @a: the amount to add to v... 1105 + * @u: ...unless v is equal to u. 1106 + * 1107 + * Atomically adds @a to @v, if @v was not already @u. 1108 + * Returns true if the addition was done. 1109 + */ 1110 + static __always_inline bool 1111 + arch_atomic_add_unless(atomic_t *v, int a, int u) 1112 + { 1113 + return arch_atomic_fetch_add_unless(v, a, u) != u; 1114 + } 1115 + #define arch_atomic_add_unless arch_atomic_add_unless 1116 + #endif 1117 + 1118 + #ifndef arch_atomic_inc_not_zero 1119 + /** 1120 + * arch_atomic_inc_not_zero - increment unless the number is zero 1121 + * @v: pointer of type atomic_t 1122 + * 1123 + * Atomically increments @v by 1, if @v is non-zero. 1124 + * Returns true if the increment was done. 1125 + */ 1126 + static __always_inline bool 1127 + arch_atomic_inc_not_zero(atomic_t *v) 1128 + { 1129 + return arch_atomic_add_unless(v, 1, 0); 1130 + } 1131 + #define arch_atomic_inc_not_zero arch_atomic_inc_not_zero 1132 + #endif 1133 + 1134 + #ifndef arch_atomic_inc_unless_negative 1135 + static __always_inline bool 1136 + arch_atomic_inc_unless_negative(atomic_t *v) 1137 + { 1138 + int c = arch_atomic_read(v); 1139 + 1140 + do { 1141 + if (unlikely(c < 0)) 1142 + return false; 1143 + } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); 1144 + 1145 + return true; 1146 + } 1147 + #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative 1148 + #endif 1149 + 1150 + #ifndef arch_atomic_dec_unless_positive 1151 + static __always_inline bool 1152 + arch_atomic_dec_unless_positive(atomic_t *v) 1153 + { 1154 + int c = arch_atomic_read(v); 1155 + 1156 + do { 1157 + if (unlikely(c > 0)) 1158 + return false; 1159 + } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); 1160 + 1161 + return true; 1162 + } 1163 + #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive 1164 + #endif 1165 + 1166 + #ifndef arch_atomic_dec_if_positive 1167 + static __always_inline int 1168 + arch_atomic_dec_if_positive(atomic_t *v) 1169 + { 1170 + int dec, c = arch_atomic_read(v); 1171 + 1172 + do { 1173 + dec = c - 1; 1174 + if (unlikely(dec < 0)) 1175 + break; 1176 + } while (!arch_atomic_try_cmpxchg(v, &c, dec)); 1177 + 1178 + return dec; 1179 + } 1180 + #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive 1181 + #endif 1182 + 1183 + #ifdef CONFIG_GENERIC_ATOMIC64 1184 + #include <asm-generic/atomic64.h> 1185 + #endif 1186 + 1187 + #ifndef arch_atomic64_read_acquire 1188 + static __always_inline s64 1189 + arch_atomic64_read_acquire(const atomic64_t *v) 1190 + { 1191 + return smp_load_acquire(&(v)->counter); 1192 + } 1193 + #define arch_atomic64_read_acquire arch_atomic64_read_acquire 1194 + #endif 1195 + 1196 + #ifndef arch_atomic64_set_release 1197 + static __always_inline void 1198 + arch_atomic64_set_release(atomic64_t *v, s64 i) 1199 + { 1200 + smp_store_release(&(v)->counter, i); 1201 + } 1202 + #define arch_atomic64_set_release arch_atomic64_set_release 1203 + #endif 1204 + 1205 + #ifndef arch_atomic64_add_return_relaxed 1206 + #define arch_atomic64_add_return_acquire arch_atomic64_add_return 1207 + #define arch_atomic64_add_return_release arch_atomic64_add_return 1208 + #define arch_atomic64_add_return_relaxed arch_atomic64_add_return 1209 + #else /* arch_atomic64_add_return_relaxed */ 1210 + 1211 + #ifndef arch_atomic64_add_return_acquire 1212 + static __always_inline s64 1213 + arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) 1214 + { 1215 + s64 ret = arch_atomic64_add_return_relaxed(i, v); 1216 + __atomic_acquire_fence(); 1217 + return ret; 1218 + } 1219 + #define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire 1220 + #endif 1221 + 1222 + #ifndef arch_atomic64_add_return_release 1223 + static __always_inline s64 1224 + arch_atomic64_add_return_release(s64 i, atomic64_t *v) 1225 + { 1226 + __atomic_release_fence(); 1227 + return arch_atomic64_add_return_relaxed(i, v); 1228 + } 1229 + #define arch_atomic64_add_return_release arch_atomic64_add_return_release 1230 + #endif 1231 + 1232 + #ifndef arch_atomic64_add_return 1233 + static __always_inline s64 1234 + arch_atomic64_add_return(s64 i, atomic64_t *v) 1235 + { 1236 + s64 ret; 1237 + __atomic_pre_full_fence(); 1238 + ret = arch_atomic64_add_return_relaxed(i, v); 1239 + __atomic_post_full_fence(); 1240 + return ret; 1241 + } 1242 + #define arch_atomic64_add_return arch_atomic64_add_return 1243 + #endif 1244 + 1245 + #endif /* arch_atomic64_add_return_relaxed */ 1246 + 1247 + #ifndef arch_atomic64_fetch_add_relaxed 1248 + #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add 1249 + #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add 1250 + #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add 1251 + #else /* arch_atomic64_fetch_add_relaxed */ 1252 + 1253 + #ifndef arch_atomic64_fetch_add_acquire 1254 + static __always_inline s64 1255 + arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) 1256 + { 1257 + s64 ret = arch_atomic64_fetch_add_relaxed(i, v); 1258 + __atomic_acquire_fence(); 1259 + return ret; 1260 + } 1261 + #define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire 1262 + #endif 1263 + 1264 + #ifndef arch_atomic64_fetch_add_release 1265 + static __always_inline s64 1266 + arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) 1267 + { 1268 + __atomic_release_fence(); 1269 + return arch_atomic64_fetch_add_relaxed(i, v); 1270 + } 1271 + #define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release 1272 + #endif 1273 + 1274 + #ifndef arch_atomic64_fetch_add 1275 + static __always_inline s64 1276 + arch_atomic64_fetch_add(s64 i, atomic64_t *v) 1277 + { 1278 + s64 ret; 1279 + __atomic_pre_full_fence(); 1280 + ret = arch_atomic64_fetch_add_relaxed(i, v); 1281 + __atomic_post_full_fence(); 1282 + return ret; 1283 + } 1284 + #define arch_atomic64_fetch_add arch_atomic64_fetch_add 1285 + #endif 1286 + 1287 + #endif /* arch_atomic64_fetch_add_relaxed */ 1288 + 1289 + #ifndef arch_atomic64_sub_return_relaxed 1290 + #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return 1291 + #define arch_atomic64_sub_return_release arch_atomic64_sub_return 1292 + #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return 1293 + #else /* arch_atomic64_sub_return_relaxed */ 1294 + 1295 + #ifndef arch_atomic64_sub_return_acquire 1296 + static __always_inline s64 1297 + arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) 1298 + { 1299 + s64 ret = arch_atomic64_sub_return_relaxed(i, v); 1300 + __atomic_acquire_fence(); 1301 + return ret; 1302 + } 1303 + #define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire 1304 + #endif 1305 + 1306 + #ifndef arch_atomic64_sub_return_release 1307 + static __always_inline s64 1308 + arch_atomic64_sub_return_release(s64 i, atomic64_t *v) 1309 + { 1310 + __atomic_release_fence(); 1311 + return arch_atomic64_sub_return_relaxed(i, v); 1312 + } 1313 + #define arch_atomic64_sub_return_release arch_atomic64_sub_return_release 1314 + #endif 1315 + 1316 + #ifndef arch_atomic64_sub_return 1317 + static __always_inline s64 1318 + arch_atomic64_sub_return(s64 i, atomic64_t *v) 1319 + { 1320 + s64 ret; 1321 + __atomic_pre_full_fence(); 1322 + ret = arch_atomic64_sub_return_relaxed(i, v); 1323 + __atomic_post_full_fence(); 1324 + return ret; 1325 + } 1326 + #define arch_atomic64_sub_return arch_atomic64_sub_return 1327 + #endif 1328 + 1329 + #endif /* arch_atomic64_sub_return_relaxed */ 1330 + 1331 + #ifndef arch_atomic64_fetch_sub_relaxed 1332 + #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub 1333 + #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub 1334 + #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub 1335 + #else /* arch_atomic64_fetch_sub_relaxed */ 1336 + 1337 + #ifndef arch_atomic64_fetch_sub_acquire 1338 + static __always_inline s64 1339 + arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) 1340 + { 1341 + s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); 1342 + __atomic_acquire_fence(); 1343 + return ret; 1344 + } 1345 + #define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire 1346 + #endif 1347 + 1348 + #ifndef arch_atomic64_fetch_sub_release 1349 + static __always_inline s64 1350 + arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) 1351 + { 1352 + __atomic_release_fence(); 1353 + return arch_atomic64_fetch_sub_relaxed(i, v); 1354 + } 1355 + #define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release 1356 + #endif 1357 + 1358 + #ifndef arch_atomic64_fetch_sub 1359 + static __always_inline s64 1360 + arch_atomic64_fetch_sub(s64 i, atomic64_t *v) 1361 + { 1362 + s64 ret; 1363 + __atomic_pre_full_fence(); 1364 + ret = arch_atomic64_fetch_sub_relaxed(i, v); 1365 + __atomic_post_full_fence(); 1366 + return ret; 1367 + } 1368 + #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub 1369 + #endif 1370 + 1371 + #endif /* arch_atomic64_fetch_sub_relaxed */ 1372 + 1373 + #ifndef arch_atomic64_inc 1374 + static __always_inline void 1375 + arch_atomic64_inc(atomic64_t *v) 1376 + { 1377 + arch_atomic64_add(1, v); 1378 + } 1379 + #define arch_atomic64_inc arch_atomic64_inc 1380 + #endif 1381 + 1382 + #ifndef arch_atomic64_inc_return_relaxed 1383 + #ifdef arch_atomic64_inc_return 1384 + #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return 1385 + #define arch_atomic64_inc_return_release arch_atomic64_inc_return 1386 + #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return 1387 + #endif /* arch_atomic64_inc_return */ 1388 + 1389 + #ifndef arch_atomic64_inc_return 1390 + static __always_inline s64 1391 + arch_atomic64_inc_return(atomic64_t *v) 1392 + { 1393 + return arch_atomic64_add_return(1, v); 1394 + } 1395 + #define arch_atomic64_inc_return arch_atomic64_inc_return 1396 + #endif 1397 + 1398 + #ifndef arch_atomic64_inc_return_acquire 1399 + static __always_inline s64 1400 + arch_atomic64_inc_return_acquire(atomic64_t *v) 1401 + { 1402 + return arch_atomic64_add_return_acquire(1, v); 1403 + } 1404 + #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire 1405 + #endif 1406 + 1407 + #ifndef arch_atomic64_inc_return_release 1408 + static __always_inline s64 1409 + arch_atomic64_inc_return_release(atomic64_t *v) 1410 + { 1411 + return arch_atomic64_add_return_release(1, v); 1412 + } 1413 + #define arch_atomic64_inc_return_release arch_atomic64_inc_return_release 1414 + #endif 1415 + 1416 + #ifndef arch_atomic64_inc_return_relaxed 1417 + static __always_inline s64 1418 + arch_atomic64_inc_return_relaxed(atomic64_t *v) 1419 + { 1420 + return arch_atomic64_add_return_relaxed(1, v); 1421 + } 1422 + #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed 1423 + #endif 1424 + 1425 + #else /* arch_atomic64_inc_return_relaxed */ 1426 + 1427 + #ifndef arch_atomic64_inc_return_acquire 1428 + static __always_inline s64 1429 + arch_atomic64_inc_return_acquire(atomic64_t *v) 1430 + { 1431 + s64 ret = arch_atomic64_inc_return_relaxed(v); 1432 + __atomic_acquire_fence(); 1433 + return ret; 1434 + } 1435 + #define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire 1436 + #endif 1437 + 1438 + #ifndef arch_atomic64_inc_return_release 1439 + static __always_inline s64 1440 + arch_atomic64_inc_return_release(atomic64_t *v) 1441 + { 1442 + __atomic_release_fence(); 1443 + return arch_atomic64_inc_return_relaxed(v); 1444 + } 1445 + #define arch_atomic64_inc_return_release arch_atomic64_inc_return_release 1446 + #endif 1447 + 1448 + #ifndef arch_atomic64_inc_return 1449 + static __always_inline s64 1450 + arch_atomic64_inc_return(atomic64_t *v) 1451 + { 1452 + s64 ret; 1453 + __atomic_pre_full_fence(); 1454 + ret = arch_atomic64_inc_return_relaxed(v); 1455 + __atomic_post_full_fence(); 1456 + return ret; 1457 + } 1458 + #define arch_atomic64_inc_return arch_atomic64_inc_return 1459 + #endif 1460 + 1461 + #endif /* arch_atomic64_inc_return_relaxed */ 1462 + 1463 + #ifndef arch_atomic64_fetch_inc_relaxed 1464 + #ifdef arch_atomic64_fetch_inc 1465 + #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc 1466 + #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc 1467 + #define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc 1468 + #endif /* arch_atomic64_fetch_inc */ 1469 + 1470 + #ifndef arch_atomic64_fetch_inc 1471 + static __always_inline s64 1472 + arch_atomic64_fetch_inc(atomic64_t *v) 1473 + { 1474 + return arch_atomic64_fetch_add(1, v); 1475 + } 1476 + #define arch_atomic64_fetch_inc arch_atomic64_fetch_inc 1477 + #endif 1478 + 1479 + #ifndef arch_atomic64_fetch_inc_acquire 1480 + static __always_inline s64 1481 + arch_atomic64_fetch_inc_acquire(atomic64_t *v) 1482 + { 1483 + return arch_atomic64_fetch_add_acquire(1, v); 1484 + } 1485 + #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire 1486 + #endif 1487 + 1488 + #ifndef arch_atomic64_fetch_inc_release 1489 + static __always_inline s64 1490 + arch_atomic64_fetch_inc_release(atomic64_t *v) 1491 + { 1492 + return arch_atomic64_fetch_add_release(1, v); 1493 + } 1494 + #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release 1495 + #endif 1496 + 1497 + #ifndef arch_atomic64_fetch_inc_relaxed 1498 + static __always_inline s64 1499 + arch_atomic64_fetch_inc_relaxed(atomic64_t *v) 1500 + { 1501 + return arch_atomic64_fetch_add_relaxed(1, v); 1502 + } 1503 + #define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed 1504 + #endif 1505 + 1506 + #else /* arch_atomic64_fetch_inc_relaxed */ 1507 + 1508 + #ifndef arch_atomic64_fetch_inc_acquire 1509 + static __always_inline s64 1510 + arch_atomic64_fetch_inc_acquire(atomic64_t *v) 1511 + { 1512 + s64 ret = arch_atomic64_fetch_inc_relaxed(v); 1513 + __atomic_acquire_fence(); 1514 + return ret; 1515 + } 1516 + #define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire 1517 + #endif 1518 + 1519 + #ifndef arch_atomic64_fetch_inc_release 1520 + static __always_inline s64 1521 + arch_atomic64_fetch_inc_release(atomic64_t *v) 1522 + { 1523 + __atomic_release_fence(); 1524 + return arch_atomic64_fetch_inc_relaxed(v); 1525 + } 1526 + #define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release 1527 + #endif 1528 + 1529 + #ifndef arch_atomic64_fetch_inc 1530 + static __always_inline s64 1531 + arch_atomic64_fetch_inc(atomic64_t *v) 1532 + { 1533 + s64 ret; 1534 + __atomic_pre_full_fence(); 1535 + ret = arch_atomic64_fetch_inc_relaxed(v); 1536 + __atomic_post_full_fence(); 1537 + return ret; 1538 + } 1539 + #define arch_atomic64_fetch_inc arch_atomic64_fetch_inc 1540 + #endif 1541 + 1542 + #endif /* arch_atomic64_fetch_inc_relaxed */ 1543 + 1544 + #ifndef arch_atomic64_dec 1545 + static __always_inline void 1546 + arch_atomic64_dec(atomic64_t *v) 1547 + { 1548 + arch_atomic64_sub(1, v); 1549 + } 1550 + #define arch_atomic64_dec arch_atomic64_dec 1551 + #endif 1552 + 1553 + #ifndef arch_atomic64_dec_return_relaxed 1554 + #ifdef arch_atomic64_dec_return 1555 + #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return 1556 + #define arch_atomic64_dec_return_release arch_atomic64_dec_return 1557 + #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return 1558 + #endif /* arch_atomic64_dec_return */ 1559 + 1560 + #ifndef arch_atomic64_dec_return 1561 + static __always_inline s64 1562 + arch_atomic64_dec_return(atomic64_t *v) 1563 + { 1564 + return arch_atomic64_sub_return(1, v); 1565 + } 1566 + #define arch_atomic64_dec_return arch_atomic64_dec_return 1567 + #endif 1568 + 1569 + #ifndef arch_atomic64_dec_return_acquire 1570 + static __always_inline s64 1571 + arch_atomic64_dec_return_acquire(atomic64_t *v) 1572 + { 1573 + return arch_atomic64_sub_return_acquire(1, v); 1574 + } 1575 + #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire 1576 + #endif 1577 + 1578 + #ifndef arch_atomic64_dec_return_release 1579 + static __always_inline s64 1580 + arch_atomic64_dec_return_release(atomic64_t *v) 1581 + { 1582 + return arch_atomic64_sub_return_release(1, v); 1583 + } 1584 + #define arch_atomic64_dec_return_release arch_atomic64_dec_return_release 1585 + #endif 1586 + 1587 + #ifndef arch_atomic64_dec_return_relaxed 1588 + static __always_inline s64 1589 + arch_atomic64_dec_return_relaxed(atomic64_t *v) 1590 + { 1591 + return arch_atomic64_sub_return_relaxed(1, v); 1592 + } 1593 + #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed 1594 + #endif 1595 + 1596 + #else /* arch_atomic64_dec_return_relaxed */ 1597 + 1598 + #ifndef arch_atomic64_dec_return_acquire 1599 + static __always_inline s64 1600 + arch_atomic64_dec_return_acquire(atomic64_t *v) 1601 + { 1602 + s64 ret = arch_atomic64_dec_return_relaxed(v); 1603 + __atomic_acquire_fence(); 1604 + return ret; 1605 + } 1606 + #define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire 1607 + #endif 1608 + 1609 + #ifndef arch_atomic64_dec_return_release 1610 + static __always_inline s64 1611 + arch_atomic64_dec_return_release(atomic64_t *v) 1612 + { 1613 + __atomic_release_fence(); 1614 + return arch_atomic64_dec_return_relaxed(v); 1615 + } 1616 + #define arch_atomic64_dec_return_release arch_atomic64_dec_return_release 1617 + #endif 1618 + 1619 + #ifndef arch_atomic64_dec_return 1620 + static __always_inline s64 1621 + arch_atomic64_dec_return(atomic64_t *v) 1622 + { 1623 + s64 ret; 1624 + __atomic_pre_full_fence(); 1625 + ret = arch_atomic64_dec_return_relaxed(v); 1626 + __atomic_post_full_fence(); 1627 + return ret; 1628 + } 1629 + #define arch_atomic64_dec_return arch_atomic64_dec_return 1630 + #endif 1631 + 1632 + #endif /* arch_atomic64_dec_return_relaxed */ 1633 + 1634 + #ifndef arch_atomic64_fetch_dec_relaxed 1635 + #ifdef arch_atomic64_fetch_dec 1636 + #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec 1637 + #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec 1638 + #define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec 1639 + #endif /* arch_atomic64_fetch_dec */ 1640 + 1641 + #ifndef arch_atomic64_fetch_dec 1642 + static __always_inline s64 1643 + arch_atomic64_fetch_dec(atomic64_t *v) 1644 + { 1645 + return arch_atomic64_fetch_sub(1, v); 1646 + } 1647 + #define arch_atomic64_fetch_dec arch_atomic64_fetch_dec 1648 + #endif 1649 + 1650 + #ifndef arch_atomic64_fetch_dec_acquire 1651 + static __always_inline s64 1652 + arch_atomic64_fetch_dec_acquire(atomic64_t *v) 1653 + { 1654 + return arch_atomic64_fetch_sub_acquire(1, v); 1655 + } 1656 + #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire 1657 + #endif 1658 + 1659 + #ifndef arch_atomic64_fetch_dec_release 1660 + static __always_inline s64 1661 + arch_atomic64_fetch_dec_release(atomic64_t *v) 1662 + { 1663 + return arch_atomic64_fetch_sub_release(1, v); 1664 + } 1665 + #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release 1666 + #endif 1667 + 1668 + #ifndef arch_atomic64_fetch_dec_relaxed 1669 + static __always_inline s64 1670 + arch_atomic64_fetch_dec_relaxed(atomic64_t *v) 1671 + { 1672 + return arch_atomic64_fetch_sub_relaxed(1, v); 1673 + } 1674 + #define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed 1675 + #endif 1676 + 1677 + #else /* arch_atomic64_fetch_dec_relaxed */ 1678 + 1679 + #ifndef arch_atomic64_fetch_dec_acquire 1680 + static __always_inline s64 1681 + arch_atomic64_fetch_dec_acquire(atomic64_t *v) 1682 + { 1683 + s64 ret = arch_atomic64_fetch_dec_relaxed(v); 1684 + __atomic_acquire_fence(); 1685 + return ret; 1686 + } 1687 + #define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire 1688 + #endif 1689 + 1690 + #ifndef arch_atomic64_fetch_dec_release 1691 + static __always_inline s64 1692 + arch_atomic64_fetch_dec_release(atomic64_t *v) 1693 + { 1694 + __atomic_release_fence(); 1695 + return arch_atomic64_fetch_dec_relaxed(v); 1696 + } 1697 + #define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release 1698 + #endif 1699 + 1700 + #ifndef arch_atomic64_fetch_dec 1701 + static __always_inline s64 1702 + arch_atomic64_fetch_dec(atomic64_t *v) 1703 + { 1704 + s64 ret; 1705 + __atomic_pre_full_fence(); 1706 + ret = arch_atomic64_fetch_dec_relaxed(v); 1707 + __atomic_post_full_fence(); 1708 + return ret; 1709 + } 1710 + #define arch_atomic64_fetch_dec arch_atomic64_fetch_dec 1711 + #endif 1712 + 1713 + #endif /* arch_atomic64_fetch_dec_relaxed */ 1714 + 1715 + #ifndef arch_atomic64_fetch_and_relaxed 1716 + #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and 1717 + #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and 1718 + #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and 1719 + #else /* arch_atomic64_fetch_and_relaxed */ 1720 + 1721 + #ifndef arch_atomic64_fetch_and_acquire 1722 + static __always_inline s64 1723 + arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) 1724 + { 1725 + s64 ret = arch_atomic64_fetch_and_relaxed(i, v); 1726 + __atomic_acquire_fence(); 1727 + return ret; 1728 + } 1729 + #define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire 1730 + #endif 1731 + 1732 + #ifndef arch_atomic64_fetch_and_release 1733 + static __always_inline s64 1734 + arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) 1735 + { 1736 + __atomic_release_fence(); 1737 + return arch_atomic64_fetch_and_relaxed(i, v); 1738 + } 1739 + #define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release 1740 + #endif 1741 + 1742 + #ifndef arch_atomic64_fetch_and 1743 + static __always_inline s64 1744 + arch_atomic64_fetch_and(s64 i, atomic64_t *v) 1745 + { 1746 + s64 ret; 1747 + __atomic_pre_full_fence(); 1748 + ret = arch_atomic64_fetch_and_relaxed(i, v); 1749 + __atomic_post_full_fence(); 1750 + return ret; 1751 + } 1752 + #define arch_atomic64_fetch_and arch_atomic64_fetch_and 1753 + #endif 1754 + 1755 + #endif /* arch_atomic64_fetch_and_relaxed */ 1756 + 1757 + #ifndef arch_atomic64_andnot 1758 + static __always_inline void 1759 + arch_atomic64_andnot(s64 i, atomic64_t *v) 1760 + { 1761 + arch_atomic64_and(~i, v); 1762 + } 1763 + #define arch_atomic64_andnot arch_atomic64_andnot 1764 + #endif 1765 + 1766 + #ifndef arch_atomic64_fetch_andnot_relaxed 1767 + #ifdef arch_atomic64_fetch_andnot 1768 + #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot 1769 + #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot 1770 + #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot 1771 + #endif /* arch_atomic64_fetch_andnot */ 1772 + 1773 + #ifndef arch_atomic64_fetch_andnot 1774 + static __always_inline s64 1775 + arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) 1776 + { 1777 + return arch_atomic64_fetch_and(~i, v); 1778 + } 1779 + #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot 1780 + #endif 1781 + 1782 + #ifndef arch_atomic64_fetch_andnot_acquire 1783 + static __always_inline s64 1784 + arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 1785 + { 1786 + return arch_atomic64_fetch_and_acquire(~i, v); 1787 + } 1788 + #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire 1789 + #endif 1790 + 1791 + #ifndef arch_atomic64_fetch_andnot_release 1792 + static __always_inline s64 1793 + arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) 1794 + { 1795 + return arch_atomic64_fetch_and_release(~i, v); 1796 + } 1797 + #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release 1798 + #endif 1799 + 1800 + #ifndef arch_atomic64_fetch_andnot_relaxed 1801 + static __always_inline s64 1802 + arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) 1803 + { 1804 + return arch_atomic64_fetch_and_relaxed(~i, v); 1805 + } 1806 + #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed 1807 + #endif 1808 + 1809 + #else /* arch_atomic64_fetch_andnot_relaxed */ 1810 + 1811 + #ifndef arch_atomic64_fetch_andnot_acquire 1812 + static __always_inline s64 1813 + arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 1814 + { 1815 + s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); 1816 + __atomic_acquire_fence(); 1817 + return ret; 1818 + } 1819 + #define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire 1820 + #endif 1821 + 1822 + #ifndef arch_atomic64_fetch_andnot_release 1823 + static __always_inline s64 1824 + arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) 1825 + { 1826 + __atomic_release_fence(); 1827 + return arch_atomic64_fetch_andnot_relaxed(i, v); 1828 + } 1829 + #define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release 1830 + #endif 1831 + 1832 + #ifndef arch_atomic64_fetch_andnot 1833 + static __always_inline s64 1834 + arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) 1835 + { 1836 + s64 ret; 1837 + __atomic_pre_full_fence(); 1838 + ret = arch_atomic64_fetch_andnot_relaxed(i, v); 1839 + __atomic_post_full_fence(); 1840 + return ret; 1841 + } 1842 + #define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot 1843 + #endif 1844 + 1845 + #endif /* arch_atomic64_fetch_andnot_relaxed */ 1846 + 1847 + #ifndef arch_atomic64_fetch_or_relaxed 1848 + #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or 1849 + #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or 1850 + #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or 1851 + #else /* arch_atomic64_fetch_or_relaxed */ 1852 + 1853 + #ifndef arch_atomic64_fetch_or_acquire 1854 + static __always_inline s64 1855 + arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) 1856 + { 1857 + s64 ret = arch_atomic64_fetch_or_relaxed(i, v); 1858 + __atomic_acquire_fence(); 1859 + return ret; 1860 + } 1861 + #define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire 1862 + #endif 1863 + 1864 + #ifndef arch_atomic64_fetch_or_release 1865 + static __always_inline s64 1866 + arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) 1867 + { 1868 + __atomic_release_fence(); 1869 + return arch_atomic64_fetch_or_relaxed(i, v); 1870 + } 1871 + #define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release 1872 + #endif 1873 + 1874 + #ifndef arch_atomic64_fetch_or 1875 + static __always_inline s64 1876 + arch_atomic64_fetch_or(s64 i, atomic64_t *v) 1877 + { 1878 + s64 ret; 1879 + __atomic_pre_full_fence(); 1880 + ret = arch_atomic64_fetch_or_relaxed(i, v); 1881 + __atomic_post_full_fence(); 1882 + return ret; 1883 + } 1884 + #define arch_atomic64_fetch_or arch_atomic64_fetch_or 1885 + #endif 1886 + 1887 + #endif /* arch_atomic64_fetch_or_relaxed */ 1888 + 1889 + #ifndef arch_atomic64_fetch_xor_relaxed 1890 + #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor 1891 + #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor 1892 + #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor 1893 + #else /* arch_atomic64_fetch_xor_relaxed */ 1894 + 1895 + #ifndef arch_atomic64_fetch_xor_acquire 1896 + static __always_inline s64 1897 + arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) 1898 + { 1899 + s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); 1900 + __atomic_acquire_fence(); 1901 + return ret; 1902 + } 1903 + #define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire 1904 + #endif 1905 + 1906 + #ifndef arch_atomic64_fetch_xor_release 1907 + static __always_inline s64 1908 + arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) 1909 + { 1910 + __atomic_release_fence(); 1911 + return arch_atomic64_fetch_xor_relaxed(i, v); 1912 + } 1913 + #define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release 1914 + #endif 1915 + 1916 + #ifndef arch_atomic64_fetch_xor 1917 + static __always_inline s64 1918 + arch_atomic64_fetch_xor(s64 i, atomic64_t *v) 1919 + { 1920 + s64 ret; 1921 + __atomic_pre_full_fence(); 1922 + ret = arch_atomic64_fetch_xor_relaxed(i, v); 1923 + __atomic_post_full_fence(); 1924 + return ret; 1925 + } 1926 + #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor 1927 + #endif 1928 + 1929 + #endif /* arch_atomic64_fetch_xor_relaxed */ 1930 + 1931 + #ifndef arch_atomic64_xchg_relaxed 1932 + #define arch_atomic64_xchg_acquire arch_atomic64_xchg 1933 + #define arch_atomic64_xchg_release arch_atomic64_xchg 1934 + #define arch_atomic64_xchg_relaxed arch_atomic64_xchg 1935 + #else /* arch_atomic64_xchg_relaxed */ 1936 + 1937 + #ifndef arch_atomic64_xchg_acquire 1938 + static __always_inline s64 1939 + arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) 1940 + { 1941 + s64 ret = arch_atomic64_xchg_relaxed(v, i); 1942 + __atomic_acquire_fence(); 1943 + return ret; 1944 + } 1945 + #define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire 1946 + #endif 1947 + 1948 + #ifndef arch_atomic64_xchg_release 1949 + static __always_inline s64 1950 + arch_atomic64_xchg_release(atomic64_t *v, s64 i) 1951 + { 1952 + __atomic_release_fence(); 1953 + return arch_atomic64_xchg_relaxed(v, i); 1954 + } 1955 + #define arch_atomic64_xchg_release arch_atomic64_xchg_release 1956 + #endif 1957 + 1958 + #ifndef arch_atomic64_xchg 1959 + static __always_inline s64 1960 + arch_atomic64_xchg(atomic64_t *v, s64 i) 1961 + { 1962 + s64 ret; 1963 + __atomic_pre_full_fence(); 1964 + ret = arch_atomic64_xchg_relaxed(v, i); 1965 + __atomic_post_full_fence(); 1966 + return ret; 1967 + } 1968 + #define arch_atomic64_xchg arch_atomic64_xchg 1969 + #endif 1970 + 1971 + #endif /* arch_atomic64_xchg_relaxed */ 1972 + 1973 + #ifndef arch_atomic64_cmpxchg_relaxed 1974 + #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg 1975 + #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg 1976 + #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg 1977 + #else /* arch_atomic64_cmpxchg_relaxed */ 1978 + 1979 + #ifndef arch_atomic64_cmpxchg_acquire 1980 + static __always_inline s64 1981 + arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) 1982 + { 1983 + s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); 1984 + __atomic_acquire_fence(); 1985 + return ret; 1986 + } 1987 + #define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire 1988 + #endif 1989 + 1990 + #ifndef arch_atomic64_cmpxchg_release 1991 + static __always_inline s64 1992 + arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) 1993 + { 1994 + __atomic_release_fence(); 1995 + return arch_atomic64_cmpxchg_relaxed(v, old, new); 1996 + } 1997 + #define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release 1998 + #endif 1999 + 2000 + #ifndef arch_atomic64_cmpxchg 2001 + static __always_inline s64 2002 + arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) 2003 + { 2004 + s64 ret; 2005 + __atomic_pre_full_fence(); 2006 + ret = arch_atomic64_cmpxchg_relaxed(v, old, new); 2007 + __atomic_post_full_fence(); 2008 + return ret; 2009 + } 2010 + #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg 2011 + #endif 2012 + 2013 + #endif /* arch_atomic64_cmpxchg_relaxed */ 2014 + 2015 + #ifndef arch_atomic64_try_cmpxchg_relaxed 2016 + #ifdef arch_atomic64_try_cmpxchg 2017 + #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg 2018 + #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg 2019 + #define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg 2020 + #endif /* arch_atomic64_try_cmpxchg */ 2021 + 2022 + #ifndef arch_atomic64_try_cmpxchg 2023 + static __always_inline bool 2024 + arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 2025 + { 2026 + s64 r, o = *old; 2027 + r = arch_atomic64_cmpxchg(v, o, new); 2028 + if (unlikely(r != o)) 2029 + *old = r; 2030 + return likely(r == o); 2031 + } 2032 + #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg 2033 + #endif 2034 + 2035 + #ifndef arch_atomic64_try_cmpxchg_acquire 2036 + static __always_inline bool 2037 + arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) 2038 + { 2039 + s64 r, o = *old; 2040 + r = arch_atomic64_cmpxchg_acquire(v, o, new); 2041 + if (unlikely(r != o)) 2042 + *old = r; 2043 + return likely(r == o); 2044 + } 2045 + #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire 2046 + #endif 2047 + 2048 + #ifndef arch_atomic64_try_cmpxchg_release 2049 + static __always_inline bool 2050 + arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) 2051 + { 2052 + s64 r, o = *old; 2053 + r = arch_atomic64_cmpxchg_release(v, o, new); 2054 + if (unlikely(r != o)) 2055 + *old = r; 2056 + return likely(r == o); 2057 + } 2058 + #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release 2059 + #endif 2060 + 2061 + #ifndef arch_atomic64_try_cmpxchg_relaxed 2062 + static __always_inline bool 2063 + arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) 2064 + { 2065 + s64 r, o = *old; 2066 + r = arch_atomic64_cmpxchg_relaxed(v, o, new); 2067 + if (unlikely(r != o)) 2068 + *old = r; 2069 + return likely(r == o); 2070 + } 2071 + #define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed 2072 + #endif 2073 + 2074 + #else /* arch_atomic64_try_cmpxchg_relaxed */ 2075 + 2076 + #ifndef arch_atomic64_try_cmpxchg_acquire 2077 + static __always_inline bool 2078 + arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) 2079 + { 2080 + bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); 2081 + __atomic_acquire_fence(); 2082 + return ret; 2083 + } 2084 + #define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire 2085 + #endif 2086 + 2087 + #ifndef arch_atomic64_try_cmpxchg_release 2088 + static __always_inline bool 2089 + arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) 2090 + { 2091 + __atomic_release_fence(); 2092 + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); 2093 + } 2094 + #define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release 2095 + #endif 2096 + 2097 + #ifndef arch_atomic64_try_cmpxchg 2098 + static __always_inline bool 2099 + arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 2100 + { 2101 + bool ret; 2102 + __atomic_pre_full_fence(); 2103 + ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); 2104 + __atomic_post_full_fence(); 2105 + return ret; 2106 + } 2107 + #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg 2108 + #endif 2109 + 2110 + #endif /* arch_atomic64_try_cmpxchg_relaxed */ 2111 + 2112 + #ifndef arch_atomic64_sub_and_test 2113 + /** 2114 + * arch_atomic64_sub_and_test - subtract value from variable and test result 2115 + * @i: integer value to subtract 2116 + * @v: pointer of type atomic64_t 2117 + * 2118 + * Atomically subtracts @i from @v and returns 2119 + * true if the result is zero, or false for all 2120 + * other cases. 2121 + */ 2122 + static __always_inline bool 2123 + arch_atomic64_sub_and_test(s64 i, atomic64_t *v) 2124 + { 2125 + return arch_atomic64_sub_return(i, v) == 0; 2126 + } 2127 + #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test 2128 + #endif 2129 + 2130 + #ifndef arch_atomic64_dec_and_test 2131 + /** 2132 + * arch_atomic64_dec_and_test - decrement and test 2133 + * @v: pointer of type atomic64_t 2134 + * 2135 + * Atomically decrements @v by 1 and 2136 + * returns true if the result is 0, or false for all other 2137 + * cases. 2138 + */ 2139 + static __always_inline bool 2140 + arch_atomic64_dec_and_test(atomic64_t *v) 2141 + { 2142 + return arch_atomic64_dec_return(v) == 0; 2143 + } 2144 + #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test 2145 + #endif 2146 + 2147 + #ifndef arch_atomic64_inc_and_test 2148 + /** 2149 + * arch_atomic64_inc_and_test - increment and test 2150 + * @v: pointer of type atomic64_t 2151 + * 2152 + * Atomically increments @v by 1 2153 + * and returns true if the result is zero, or false for all 2154 + * other cases. 2155 + */ 2156 + static __always_inline bool 2157 + arch_atomic64_inc_and_test(atomic64_t *v) 2158 + { 2159 + return arch_atomic64_inc_return(v) == 0; 2160 + } 2161 + #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test 2162 + #endif 2163 + 2164 + #ifndef arch_atomic64_add_negative 2165 + /** 2166 + * arch_atomic64_add_negative - add and test if negative 2167 + * @i: integer value to add 2168 + * @v: pointer of type atomic64_t 2169 + * 2170 + * Atomically adds @i to @v and returns true 2171 + * if the result is negative, or false when 2172 + * result is greater than or equal to zero. 2173 + */ 2174 + static __always_inline bool 2175 + arch_atomic64_add_negative(s64 i, atomic64_t *v) 2176 + { 2177 + return arch_atomic64_add_return(i, v) < 0; 2178 + } 2179 + #define arch_atomic64_add_negative arch_atomic64_add_negative 2180 + #endif 2181 + 2182 + #ifndef arch_atomic64_fetch_add_unless 2183 + /** 2184 + * arch_atomic64_fetch_add_unless - add unless the number is already a given value 2185 + * @v: pointer of type atomic64_t 2186 + * @a: the amount to add to v... 2187 + * @u: ...unless v is equal to u. 2188 + * 2189 + * Atomically adds @a to @v, so long as @v was not already @u. 2190 + * Returns original value of @v 2191 + */ 2192 + static __always_inline s64 2193 + arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) 2194 + { 2195 + s64 c = arch_atomic64_read(v); 2196 + 2197 + do { 2198 + if (unlikely(c == u)) 2199 + break; 2200 + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); 2201 + 2202 + return c; 2203 + } 2204 + #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless 2205 + #endif 2206 + 2207 + #ifndef arch_atomic64_add_unless 2208 + /** 2209 + * arch_atomic64_add_unless - add unless the number is already a given value 2210 + * @v: pointer of type atomic64_t 2211 + * @a: the amount to add to v... 2212 + * @u: ...unless v is equal to u. 2213 + * 2214 + * Atomically adds @a to @v, if @v was not already @u. 2215 + * Returns true if the addition was done. 2216 + */ 2217 + static __always_inline bool 2218 + arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) 2219 + { 2220 + return arch_atomic64_fetch_add_unless(v, a, u) != u; 2221 + } 2222 + #define arch_atomic64_add_unless arch_atomic64_add_unless 2223 + #endif 2224 + 2225 + #ifndef arch_atomic64_inc_not_zero 2226 + /** 2227 + * arch_atomic64_inc_not_zero - increment unless the number is zero 2228 + * @v: pointer of type atomic64_t 2229 + * 2230 + * Atomically increments @v by 1, if @v is non-zero. 2231 + * Returns true if the increment was done. 2232 + */ 2233 + static __always_inline bool 2234 + arch_atomic64_inc_not_zero(atomic64_t *v) 2235 + { 2236 + return arch_atomic64_add_unless(v, 1, 0); 2237 + } 2238 + #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero 2239 + #endif 2240 + 2241 + #ifndef arch_atomic64_inc_unless_negative 2242 + static __always_inline bool 2243 + arch_atomic64_inc_unless_negative(atomic64_t *v) 2244 + { 2245 + s64 c = arch_atomic64_read(v); 2246 + 2247 + do { 2248 + if (unlikely(c < 0)) 2249 + return false; 2250 + } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); 2251 + 2252 + return true; 2253 + } 2254 + #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative 2255 + #endif 2256 + 2257 + #ifndef arch_atomic64_dec_unless_positive 2258 + static __always_inline bool 2259 + arch_atomic64_dec_unless_positive(atomic64_t *v) 2260 + { 2261 + s64 c = arch_atomic64_read(v); 2262 + 2263 + do { 2264 + if (unlikely(c > 0)) 2265 + return false; 2266 + } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); 2267 + 2268 + return true; 2269 + } 2270 + #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive 2271 + #endif 2272 + 2273 + #ifndef arch_atomic64_dec_if_positive 2274 + static __always_inline s64 2275 + arch_atomic64_dec_if_positive(atomic64_t *v) 2276 + { 2277 + s64 dec, c = arch_atomic64_read(v); 2278 + 2279 + do { 2280 + dec = c - 1; 2281 + if (unlikely(dec < 0)) 2282 + break; 2283 + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); 2284 + 2285 + return dec; 2286 + } 2287 + #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive 2288 + #endif 2289 + 2290 + #endif /* _LINUX_ATOMIC_FALLBACK_H */ 2291 + // 90cd26cfd69d2250303d654955a0cc12620fb91b
+171 -175
include/linux/atomic-fallback.h
··· 6 6 #ifndef _LINUX_ATOMIC_FALLBACK_H 7 7 #define _LINUX_ATOMIC_FALLBACK_H 8 8 9 + #include <linux/compiler.h> 10 + 9 11 #ifndef xchg_relaxed 10 12 #define xchg_relaxed xchg 11 13 #define xchg_acquire xchg ··· 78 76 #endif /* cmpxchg64_relaxed */ 79 77 80 78 #ifndef atomic_read_acquire 81 - static inline int 79 + static __always_inline int 82 80 atomic_read_acquire(const atomic_t *v) 83 81 { 84 82 return smp_load_acquire(&(v)->counter); ··· 87 85 #endif 88 86 89 87 #ifndef atomic_set_release 90 - static inline void 88 + static __always_inline void 91 89 atomic_set_release(atomic_t *v, int i) 92 90 { 93 91 smp_store_release(&(v)->counter, i); ··· 102 100 #else /* atomic_add_return_relaxed */ 103 101 104 102 #ifndef atomic_add_return_acquire 105 - static inline int 103 + static __always_inline int 106 104 atomic_add_return_acquire(int i, atomic_t *v) 107 105 { 108 106 int ret = atomic_add_return_relaxed(i, v); ··· 113 111 #endif 114 112 115 113 #ifndef atomic_add_return_release 116 - static inline int 114 + static __always_inline int 117 115 atomic_add_return_release(int i, atomic_t *v) 118 116 { 119 117 __atomic_release_fence(); ··· 123 121 #endif 124 122 125 123 #ifndef atomic_add_return 126 - static inline int 124 + static __always_inline int 127 125 atomic_add_return(int i, atomic_t *v) 128 126 { 129 127 int ret; ··· 144 142 #else /* atomic_fetch_add_relaxed */ 145 143 146 144 #ifndef atomic_fetch_add_acquire 147 - static inline int 145 + static __always_inline int 148 146 atomic_fetch_add_acquire(int i, atomic_t *v) 149 147 { 150 148 int ret = atomic_fetch_add_relaxed(i, v); ··· 155 153 #endif 156 154 157 155 #ifndef atomic_fetch_add_release 158 - static inline int 156 + static __always_inline int 159 157 atomic_fetch_add_release(int i, atomic_t *v) 160 158 { 161 159 __atomic_release_fence(); ··· 165 163 #endif 166 164 167 165 #ifndef atomic_fetch_add 168 - static inline int 166 + static __always_inline int 169 167 atomic_fetch_add(int i, atomic_t *v) 170 168 { 171 169 int ret; ··· 186 184 #else /* atomic_sub_return_relaxed */ 187 185 188 186 #ifndef atomic_sub_return_acquire 189 - static inline int 187 + static __always_inline int 190 188 atomic_sub_return_acquire(int i, atomic_t *v) 191 189 { 192 190 int ret = atomic_sub_return_relaxed(i, v); ··· 197 195 #endif 198 196 199 197 #ifndef atomic_sub_return_release 200 - static inline int 198 + static __always_inline int 201 199 atomic_sub_return_release(int i, atomic_t *v) 202 200 { 203 201 __atomic_release_fence(); ··· 207 205 #endif 208 206 209 207 #ifndef atomic_sub_return 210 - static inline int 208 + static __always_inline int 211 209 atomic_sub_return(int i, atomic_t *v) 212 210 { 213 211 int ret; ··· 228 226 #else /* atomic_fetch_sub_relaxed */ 229 227 230 228 #ifndef atomic_fetch_sub_acquire 231 - static inline int 229 + static __always_inline int 232 230 atomic_fetch_sub_acquire(int i, atomic_t *v) 233 231 { 234 232 int ret = atomic_fetch_sub_relaxed(i, v); ··· 239 237 #endif 240 238 241 239 #ifndef atomic_fetch_sub_release 242 - static inline int 240 + static __always_inline int 243 241 atomic_fetch_sub_release(int i, atomic_t *v) 244 242 { 245 243 __atomic_release_fence(); ··· 249 247 #endif 250 248 251 249 #ifndef atomic_fetch_sub 252 - static inline int 250 + static __always_inline int 253 251 atomic_fetch_sub(int i, atomic_t *v) 254 252 { 255 253 int ret; ··· 264 262 #endif /* atomic_fetch_sub_relaxed */ 265 263 266 264 #ifndef atomic_inc 267 - static inline void 265 + static __always_inline void 268 266 atomic_inc(atomic_t *v) 269 267 { 270 268 atomic_add(1, v); ··· 280 278 #endif /* atomic_inc_return */ 281 279 282 280 #ifndef atomic_inc_return 283 - static inline int 281 + static __always_inline int 284 282 atomic_inc_return(atomic_t *v) 285 283 { 286 284 return atomic_add_return(1, v); ··· 289 287 #endif 290 288 291 289 #ifndef atomic_inc_return_acquire 292 - static inline int 290 + static __always_inline int 293 291 atomic_inc_return_acquire(atomic_t *v) 294 292 { 295 293 return atomic_add_return_acquire(1, v); ··· 298 296 #endif 299 297 300 298 #ifndef atomic_inc_return_release 301 - static inline int 299 + static __always_inline int 302 300 atomic_inc_return_release(atomic_t *v) 303 301 { 304 302 return atomic_add_return_release(1, v); ··· 307 305 #endif 308 306 309 307 #ifndef atomic_inc_return_relaxed 310 - static inline int 308 + static __always_inline int 311 309 atomic_inc_return_relaxed(atomic_t *v) 312 310 { 313 311 return atomic_add_return_relaxed(1, v); ··· 318 316 #else /* atomic_inc_return_relaxed */ 319 317 320 318 #ifndef atomic_inc_return_acquire 321 - static inline int 319 + static __always_inline int 322 320 atomic_inc_return_acquire(atomic_t *v) 323 321 { 324 322 int ret = atomic_inc_return_relaxed(v); ··· 329 327 #endif 330 328 331 329 #ifndef atomic_inc_return_release 332 - static inline int 330 + static __always_inline int 333 331 atomic_inc_return_release(atomic_t *v) 334 332 { 335 333 __atomic_release_fence(); ··· 339 337 #endif 340 338 341 339 #ifndef atomic_inc_return 342 - static inline int 340 + static __always_inline int 343 341 atomic_inc_return(atomic_t *v) 344 342 { 345 343 int ret; ··· 361 359 #endif /* atomic_fetch_inc */ 362 360 363 361 #ifndef atomic_fetch_inc 364 - static inline int 362 + static __always_inline int 365 363 atomic_fetch_inc(atomic_t *v) 366 364 { 367 365 return atomic_fetch_add(1, v); ··· 370 368 #endif 371 369 372 370 #ifndef atomic_fetch_inc_acquire 373 - static inline int 371 + static __always_inline int 374 372 atomic_fetch_inc_acquire(atomic_t *v) 375 373 { 376 374 return atomic_fetch_add_acquire(1, v); ··· 379 377 #endif 380 378 381 379 #ifndef atomic_fetch_inc_release 382 - static inline int 380 + static __always_inline int 383 381 atomic_fetch_inc_release(atomic_t *v) 384 382 { 385 383 return atomic_fetch_add_release(1, v); ··· 388 386 #endif 389 387 390 388 #ifndef atomic_fetch_inc_relaxed 391 - static inline int 389 + static __always_inline int 392 390 atomic_fetch_inc_relaxed(atomic_t *v) 393 391 { 394 392 return atomic_fetch_add_relaxed(1, v); ··· 399 397 #else /* atomic_fetch_inc_relaxed */ 400 398 401 399 #ifndef atomic_fetch_inc_acquire 402 - static inline int 400 + static __always_inline int 403 401 atomic_fetch_inc_acquire(atomic_t *v) 404 402 { 405 403 int ret = atomic_fetch_inc_relaxed(v); ··· 410 408 #endif 411 409 412 410 #ifndef atomic_fetch_inc_release 413 - static inline int 411 + static __always_inline int 414 412 atomic_fetch_inc_release(atomic_t *v) 415 413 { 416 414 __atomic_release_fence(); ··· 420 418 #endif 421 419 422 420 #ifndef atomic_fetch_inc 423 - static inline int 421 + static __always_inline int 424 422 atomic_fetch_inc(atomic_t *v) 425 423 { 426 424 int ret; ··· 435 433 #endif /* atomic_fetch_inc_relaxed */ 436 434 437 435 #ifndef atomic_dec 438 - static inline void 436 + static __always_inline void 439 437 atomic_dec(atomic_t *v) 440 438 { 441 439 atomic_sub(1, v); ··· 451 449 #endif /* atomic_dec_return */ 452 450 453 451 #ifndef atomic_dec_return 454 - static inline int 452 + static __always_inline int 455 453 atomic_dec_return(atomic_t *v) 456 454 { 457 455 return atomic_sub_return(1, v); ··· 460 458 #endif 461 459 462 460 #ifndef atomic_dec_return_acquire 463 - static inline int 461 + static __always_inline int 464 462 atomic_dec_return_acquire(atomic_t *v) 465 463 { 466 464 return atomic_sub_return_acquire(1, v); ··· 469 467 #endif 470 468 471 469 #ifndef atomic_dec_return_release 472 - static inline int 470 + static __always_inline int 473 471 atomic_dec_return_release(atomic_t *v) 474 472 { 475 473 return atomic_sub_return_release(1, v); ··· 478 476 #endif 479 477 480 478 #ifndef atomic_dec_return_relaxed 481 - static inline int 479 + static __always_inline int 482 480 atomic_dec_return_relaxed(atomic_t *v) 483 481 { 484 482 return atomic_sub_return_relaxed(1, v); ··· 489 487 #else /* atomic_dec_return_relaxed */ 490 488 491 489 #ifndef atomic_dec_return_acquire 492 - static inline int 490 + static __always_inline int 493 491 atomic_dec_return_acquire(atomic_t *v) 494 492 { 495 493 int ret = atomic_dec_return_relaxed(v); ··· 500 498 #endif 501 499 502 500 #ifndef atomic_dec_return_release 503 - static inline int 501 + static __always_inline int 504 502 atomic_dec_return_release(atomic_t *v) 505 503 { 506 504 __atomic_release_fence(); ··· 510 508 #endif 511 509 512 510 #ifndef atomic_dec_return 513 - static inline int 511 + static __always_inline int 514 512 atomic_dec_return(atomic_t *v) 515 513 { 516 514 int ret; ··· 532 530 #endif /* atomic_fetch_dec */ 533 531 534 532 #ifndef atomic_fetch_dec 535 - static inline int 533 + static __always_inline int 536 534 atomic_fetch_dec(atomic_t *v) 537 535 { 538 536 return atomic_fetch_sub(1, v); ··· 541 539 #endif 542 540 543 541 #ifndef atomic_fetch_dec_acquire 544 - static inline int 542 + static __always_inline int 545 543 atomic_fetch_dec_acquire(atomic_t *v) 546 544 { 547 545 return atomic_fetch_sub_acquire(1, v); ··· 550 548 #endif 551 549 552 550 #ifndef atomic_fetch_dec_release 553 - static inline int 551 + static __always_inline int 554 552 atomic_fetch_dec_release(atomic_t *v) 555 553 { 556 554 return atomic_fetch_sub_release(1, v); ··· 559 557 #endif 560 558 561 559 #ifndef atomic_fetch_dec_relaxed 562 - static inline int 560 + static __always_inline int 563 561 atomic_fetch_dec_relaxed(atomic_t *v) 564 562 { 565 563 return atomic_fetch_sub_relaxed(1, v); ··· 570 568 #else /* atomic_fetch_dec_relaxed */ 571 569 572 570 #ifndef atomic_fetch_dec_acquire 573 - static inline int 571 + static __always_inline int 574 572 atomic_fetch_dec_acquire(atomic_t *v) 575 573 { 576 574 int ret = atomic_fetch_dec_relaxed(v); ··· 581 579 #endif 582 580 583 581 #ifndef atomic_fetch_dec_release 584 - static inline int 582 + static __always_inline int 585 583 atomic_fetch_dec_release(atomic_t *v) 586 584 { 587 585 __atomic_release_fence(); ··· 591 589 #endif 592 590 593 591 #ifndef atomic_fetch_dec 594 - static inline int 592 + static __always_inline int 595 593 atomic_fetch_dec(atomic_t *v) 596 594 { 597 595 int ret; ··· 612 610 #else /* atomic_fetch_and_relaxed */ 613 611 614 612 #ifndef atomic_fetch_and_acquire 615 - static inline int 613 + static __always_inline int 616 614 atomic_fetch_and_acquire(int i, atomic_t *v) 617 615 { 618 616 int ret = atomic_fetch_and_relaxed(i, v); ··· 623 621 #endif 624 622 625 623 #ifndef atomic_fetch_and_release 626 - static inline int 624 + static __always_inline int 627 625 atomic_fetch_and_release(int i, atomic_t *v) 628 626 { 629 627 __atomic_release_fence(); ··· 633 631 #endif 634 632 635 633 #ifndef atomic_fetch_and 636 - static inline int 634 + static __always_inline int 637 635 atomic_fetch_and(int i, atomic_t *v) 638 636 { 639 637 int ret; ··· 648 646 #endif /* atomic_fetch_and_relaxed */ 649 647 650 648 #ifndef atomic_andnot 651 - static inline void 649 + static __always_inline void 652 650 atomic_andnot(int i, atomic_t *v) 653 651 { 654 652 atomic_and(~i, v); ··· 664 662 #endif /* atomic_fetch_andnot */ 665 663 666 664 #ifndef atomic_fetch_andnot 667 - static inline int 665 + static __always_inline int 668 666 atomic_fetch_andnot(int i, atomic_t *v) 669 667 { 670 668 return atomic_fetch_and(~i, v); ··· 673 671 #endif 674 672 675 673 #ifndef atomic_fetch_andnot_acquire 676 - static inline int 674 + static __always_inline int 677 675 atomic_fetch_andnot_acquire(int i, atomic_t *v) 678 676 { 679 677 return atomic_fetch_and_acquire(~i, v); ··· 682 680 #endif 683 681 684 682 #ifndef atomic_fetch_andnot_release 685 - static inline int 683 + static __always_inline int 686 684 atomic_fetch_andnot_release(int i, atomic_t *v) 687 685 { 688 686 return atomic_fetch_and_release(~i, v); ··· 691 689 #endif 692 690 693 691 #ifndef atomic_fetch_andnot_relaxed 694 - static inline int 692 + static __always_inline int 695 693 atomic_fetch_andnot_relaxed(int i, atomic_t *v) 696 694 { 697 695 return atomic_fetch_and_relaxed(~i, v); ··· 702 700 #else /* atomic_fetch_andnot_relaxed */ 703 701 704 702 #ifndef atomic_fetch_andnot_acquire 705 - static inline int 703 + static __always_inline int 706 704 atomic_fetch_andnot_acquire(int i, atomic_t *v) 707 705 { 708 706 int ret = atomic_fetch_andnot_relaxed(i, v); ··· 713 711 #endif 714 712 715 713 #ifndef atomic_fetch_andnot_release 716 - static inline int 714 + static __always_inline int 717 715 atomic_fetch_andnot_release(int i, atomic_t *v) 718 716 { 719 717 __atomic_release_fence(); ··· 723 721 #endif 724 722 725 723 #ifndef atomic_fetch_andnot 726 - static inline int 724 + static __always_inline int 727 725 atomic_fetch_andnot(int i, atomic_t *v) 728 726 { 729 727 int ret; ··· 744 742 #else /* atomic_fetch_or_relaxed */ 745 743 746 744 #ifndef atomic_fetch_or_acquire 747 - static inline int 745 + static __always_inline int 748 746 atomic_fetch_or_acquire(int i, atomic_t *v) 749 747 { 750 748 int ret = atomic_fetch_or_relaxed(i, v); ··· 755 753 #endif 756 754 757 755 #ifndef atomic_fetch_or_release 758 - static inline int 756 + static __always_inline int 759 757 atomic_fetch_or_release(int i, atomic_t *v) 760 758 { 761 759 __atomic_release_fence(); ··· 765 763 #endif 766 764 767 765 #ifndef atomic_fetch_or 768 - static inline int 766 + static __always_inline int 769 767 atomic_fetch_or(int i, atomic_t *v) 770 768 { 771 769 int ret; ··· 786 784 #else /* atomic_fetch_xor_relaxed */ 787 785 788 786 #ifndef atomic_fetch_xor_acquire 789 - static inline int 787 + static __always_inline int 790 788 atomic_fetch_xor_acquire(int i, atomic_t *v) 791 789 { 792 790 int ret = atomic_fetch_xor_relaxed(i, v); ··· 797 795 #endif 798 796 799 797 #ifndef atomic_fetch_xor_release 800 - static inline int 798 + static __always_inline int 801 799 atomic_fetch_xor_release(int i, atomic_t *v) 802 800 { 803 801 __atomic_release_fence(); ··· 807 805 #endif 808 806 809 807 #ifndef atomic_fetch_xor 810 - static inline int 808 + static __always_inline int 811 809 atomic_fetch_xor(int i, atomic_t *v) 812 810 { 813 811 int ret; ··· 828 826 #else /* atomic_xchg_relaxed */ 829 827 830 828 #ifndef atomic_xchg_acquire 831 - static inline int 829 + static __always_inline int 832 830 atomic_xchg_acquire(atomic_t *v, int i) 833 831 { 834 832 int ret = atomic_xchg_relaxed(v, i); ··· 839 837 #endif 840 838 841 839 #ifndef atomic_xchg_release 842 - static inline int 840 + static __always_inline int 843 841 atomic_xchg_release(atomic_t *v, int i) 844 842 { 845 843 __atomic_release_fence(); ··· 849 847 #endif 850 848 851 849 #ifndef atomic_xchg 852 - static inline int 850 + static __always_inline int 853 851 atomic_xchg(atomic_t *v, int i) 854 852 { 855 853 int ret; ··· 870 868 #else /* atomic_cmpxchg_relaxed */ 871 869 872 870 #ifndef atomic_cmpxchg_acquire 873 - static inline int 871 + static __always_inline int 874 872 atomic_cmpxchg_acquire(atomic_t *v, int old, int new) 875 873 { 876 874 int ret = atomic_cmpxchg_relaxed(v, old, new); ··· 881 879 #endif 882 880 883 881 #ifndef atomic_cmpxchg_release 884 - static inline int 882 + static __always_inline int 885 883 atomic_cmpxchg_release(atomic_t *v, int old, int new) 886 884 { 887 885 __atomic_release_fence(); ··· 891 889 #endif 892 890 893 891 #ifndef atomic_cmpxchg 894 - static inline int 892 + static __always_inline int 895 893 atomic_cmpxchg(atomic_t *v, int old, int new) 896 894 { 897 895 int ret; ··· 913 911 #endif /* atomic_try_cmpxchg */ 914 912 915 913 #ifndef atomic_try_cmpxchg 916 - static inline bool 914 + static __always_inline bool 917 915 atomic_try_cmpxchg(atomic_t *v, int *old, int new) 918 916 { 919 917 int r, o = *old; ··· 926 924 #endif 927 925 928 926 #ifndef atomic_try_cmpxchg_acquire 929 - static inline bool 927 + static __always_inline bool 930 928 atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) 931 929 { 932 930 int r, o = *old; ··· 939 937 #endif 940 938 941 939 #ifndef atomic_try_cmpxchg_release 942 - static inline bool 940 + static __always_inline bool 943 941 atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) 944 942 { 945 943 int r, o = *old; ··· 952 950 #endif 953 951 954 952 #ifndef atomic_try_cmpxchg_relaxed 955 - static inline bool 953 + static __always_inline bool 956 954 atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) 957 955 { 958 956 int r, o = *old; ··· 967 965 #else /* atomic_try_cmpxchg_relaxed */ 968 966 969 967 #ifndef atomic_try_cmpxchg_acquire 970 - static inline bool 968 + static __always_inline bool 971 969 atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) 972 970 { 973 971 bool ret = atomic_try_cmpxchg_relaxed(v, old, new); ··· 978 976 #endif 979 977 980 978 #ifndef atomic_try_cmpxchg_release 981 - static inline bool 979 + static __always_inline bool 982 980 atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) 983 981 { 984 982 __atomic_release_fence(); ··· 988 986 #endif 989 987 990 988 #ifndef atomic_try_cmpxchg 991 - static inline bool 989 + static __always_inline bool 992 990 atomic_try_cmpxchg(atomic_t *v, int *old, int new) 993 991 { 994 992 bool ret; ··· 1012 1010 * true if the result is zero, or false for all 1013 1011 * other cases. 1014 1012 */ 1015 - static inline bool 1013 + static __always_inline bool 1016 1014 atomic_sub_and_test(int i, atomic_t *v) 1017 1015 { 1018 1016 return atomic_sub_return(i, v) == 0; ··· 1029 1027 * returns true if the result is 0, or false for all other 1030 1028 * cases. 1031 1029 */ 1032 - static inline bool 1030 + static __always_inline bool 1033 1031 atomic_dec_and_test(atomic_t *v) 1034 1032 { 1035 1033 return atomic_dec_return(v) == 0; ··· 1046 1044 * and returns true if the result is zero, or false for all 1047 1045 * other cases. 1048 1046 */ 1049 - static inline bool 1047 + static __always_inline bool 1050 1048 atomic_inc_and_test(atomic_t *v) 1051 1049 { 1052 1050 return atomic_inc_return(v) == 0; ··· 1064 1062 * if the result is negative, or false when 1065 1063 * result is greater than or equal to zero. 1066 1064 */ 1067 - static inline bool 1065 + static __always_inline bool 1068 1066 atomic_add_negative(int i, atomic_t *v) 1069 1067 { 1070 1068 return atomic_add_return(i, v) < 0; ··· 1082 1080 * Atomically adds @a to @v, so long as @v was not already @u. 1083 1081 * Returns original value of @v 1084 1082 */ 1085 - static inline int 1083 + static __always_inline int 1086 1084 atomic_fetch_add_unless(atomic_t *v, int a, int u) 1087 1085 { 1088 1086 int c = atomic_read(v); ··· 1107 1105 * Atomically adds @a to @v, if @v was not already @u. 1108 1106 * Returns true if the addition was done. 1109 1107 */ 1110 - static inline bool 1108 + static __always_inline bool 1111 1109 atomic_add_unless(atomic_t *v, int a, int u) 1112 1110 { 1113 1111 return atomic_fetch_add_unless(v, a, u) != u; ··· 1123 1121 * Atomically increments @v by 1, if @v is non-zero. 1124 1122 * Returns true if the increment was done. 1125 1123 */ 1126 - static inline bool 1124 + static __always_inline bool 1127 1125 atomic_inc_not_zero(atomic_t *v) 1128 1126 { 1129 1127 return atomic_add_unless(v, 1, 0); ··· 1132 1130 #endif 1133 1131 1134 1132 #ifndef atomic_inc_unless_negative 1135 - static inline bool 1133 + static __always_inline bool 1136 1134 atomic_inc_unless_negative(atomic_t *v) 1137 1135 { 1138 1136 int c = atomic_read(v); ··· 1148 1146 #endif 1149 1147 1150 1148 #ifndef atomic_dec_unless_positive 1151 - static inline bool 1149 + static __always_inline bool 1152 1150 atomic_dec_unless_positive(atomic_t *v) 1153 1151 { 1154 1152 int c = atomic_read(v); ··· 1164 1162 #endif 1165 1163 1166 1164 #ifndef atomic_dec_if_positive 1167 - static inline int 1165 + static __always_inline int 1168 1166 atomic_dec_if_positive(atomic_t *v) 1169 1167 { 1170 1168 int dec, c = atomic_read(v); ··· 1180 1178 #define atomic_dec_if_positive atomic_dec_if_positive 1181 1179 #endif 1182 1180 1183 - #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) 1184 - #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) 1185 - 1186 1181 #ifdef CONFIG_GENERIC_ATOMIC64 1187 1182 #include <asm-generic/atomic64.h> 1188 1183 #endif 1189 1184 1190 1185 #ifndef atomic64_read_acquire 1191 - static inline s64 1186 + static __always_inline s64 1192 1187 atomic64_read_acquire(const atomic64_t *v) 1193 1188 { 1194 1189 return smp_load_acquire(&(v)->counter); ··· 1194 1195 #endif 1195 1196 1196 1197 #ifndef atomic64_set_release 1197 - static inline void 1198 + static __always_inline void 1198 1199 atomic64_set_release(atomic64_t *v, s64 i) 1199 1200 { 1200 1201 smp_store_release(&(v)->counter, i); ··· 1209 1210 #else /* atomic64_add_return_relaxed */ 1210 1211 1211 1212 #ifndef atomic64_add_return_acquire 1212 - static inline s64 1213 + static __always_inline s64 1213 1214 atomic64_add_return_acquire(s64 i, atomic64_t *v) 1214 1215 { 1215 1216 s64 ret = atomic64_add_return_relaxed(i, v); ··· 1220 1221 #endif 1221 1222 1222 1223 #ifndef atomic64_add_return_release 1223 - static inline s64 1224 + static __always_inline s64 1224 1225 atomic64_add_return_release(s64 i, atomic64_t *v) 1225 1226 { 1226 1227 __atomic_release_fence(); ··· 1230 1231 #endif 1231 1232 1232 1233 #ifndef atomic64_add_return 1233 - static inline s64 1234 + static __always_inline s64 1234 1235 atomic64_add_return(s64 i, atomic64_t *v) 1235 1236 { 1236 1237 s64 ret; ··· 1251 1252 #else /* atomic64_fetch_add_relaxed */ 1252 1253 1253 1254 #ifndef atomic64_fetch_add_acquire 1254 - static inline s64 1255 + static __always_inline s64 1255 1256 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) 1256 1257 { 1257 1258 s64 ret = atomic64_fetch_add_relaxed(i, v); ··· 1262 1263 #endif 1263 1264 1264 1265 #ifndef atomic64_fetch_add_release 1265 - static inline s64 1266 + static __always_inline s64 1266 1267 atomic64_fetch_add_release(s64 i, atomic64_t *v) 1267 1268 { 1268 1269 __atomic_release_fence(); ··· 1272 1273 #endif 1273 1274 1274 1275 #ifndef atomic64_fetch_add 1275 - static inline s64 1276 + static __always_inline s64 1276 1277 atomic64_fetch_add(s64 i, atomic64_t *v) 1277 1278 { 1278 1279 s64 ret; ··· 1293 1294 #else /* atomic64_sub_return_relaxed */ 1294 1295 1295 1296 #ifndef atomic64_sub_return_acquire 1296 - static inline s64 1297 + static __always_inline s64 1297 1298 atomic64_sub_return_acquire(s64 i, atomic64_t *v) 1298 1299 { 1299 1300 s64 ret = atomic64_sub_return_relaxed(i, v); ··· 1304 1305 #endif 1305 1306 1306 1307 #ifndef atomic64_sub_return_release 1307 - static inline s64 1308 + static __always_inline s64 1308 1309 atomic64_sub_return_release(s64 i, atomic64_t *v) 1309 1310 { 1310 1311 __atomic_release_fence(); ··· 1314 1315 #endif 1315 1316 1316 1317 #ifndef atomic64_sub_return 1317 - static inline s64 1318 + static __always_inline s64 1318 1319 atomic64_sub_return(s64 i, atomic64_t *v) 1319 1320 { 1320 1321 s64 ret; ··· 1335 1336 #else /* atomic64_fetch_sub_relaxed */ 1336 1337 1337 1338 #ifndef atomic64_fetch_sub_acquire 1338 - static inline s64 1339 + static __always_inline s64 1339 1340 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) 1340 1341 { 1341 1342 s64 ret = atomic64_fetch_sub_relaxed(i, v); ··· 1346 1347 #endif 1347 1348 1348 1349 #ifndef atomic64_fetch_sub_release 1349 - static inline s64 1350 + static __always_inline s64 1350 1351 atomic64_fetch_sub_release(s64 i, atomic64_t *v) 1351 1352 { 1352 1353 __atomic_release_fence(); ··· 1356 1357 #endif 1357 1358 1358 1359 #ifndef atomic64_fetch_sub 1359 - static inline s64 1360 + static __always_inline s64 1360 1361 atomic64_fetch_sub(s64 i, atomic64_t *v) 1361 1362 { 1362 1363 s64 ret; ··· 1371 1372 #endif /* atomic64_fetch_sub_relaxed */ 1372 1373 1373 1374 #ifndef atomic64_inc 1374 - static inline void 1375 + static __always_inline void 1375 1376 atomic64_inc(atomic64_t *v) 1376 1377 { 1377 1378 atomic64_add(1, v); ··· 1387 1388 #endif /* atomic64_inc_return */ 1388 1389 1389 1390 #ifndef atomic64_inc_return 1390 - static inline s64 1391 + static __always_inline s64 1391 1392 atomic64_inc_return(atomic64_t *v) 1392 1393 { 1393 1394 return atomic64_add_return(1, v); ··· 1396 1397 #endif 1397 1398 1398 1399 #ifndef atomic64_inc_return_acquire 1399 - static inline s64 1400 + static __always_inline s64 1400 1401 atomic64_inc_return_acquire(atomic64_t *v) 1401 1402 { 1402 1403 return atomic64_add_return_acquire(1, v); ··· 1405 1406 #endif 1406 1407 1407 1408 #ifndef atomic64_inc_return_release 1408 - static inline s64 1409 + static __always_inline s64 1409 1410 atomic64_inc_return_release(atomic64_t *v) 1410 1411 { 1411 1412 return atomic64_add_return_release(1, v); ··· 1414 1415 #endif 1415 1416 1416 1417 #ifndef atomic64_inc_return_relaxed 1417 - static inline s64 1418 + static __always_inline s64 1418 1419 atomic64_inc_return_relaxed(atomic64_t *v) 1419 1420 { 1420 1421 return atomic64_add_return_relaxed(1, v); ··· 1425 1426 #else /* atomic64_inc_return_relaxed */ 1426 1427 1427 1428 #ifndef atomic64_inc_return_acquire 1428 - static inline s64 1429 + static __always_inline s64 1429 1430 atomic64_inc_return_acquire(atomic64_t *v) 1430 1431 { 1431 1432 s64 ret = atomic64_inc_return_relaxed(v); ··· 1436 1437 #endif 1437 1438 1438 1439 #ifndef atomic64_inc_return_release 1439 - static inline s64 1440 + static __always_inline s64 1440 1441 atomic64_inc_return_release(atomic64_t *v) 1441 1442 { 1442 1443 __atomic_release_fence(); ··· 1446 1447 #endif 1447 1448 1448 1449 #ifndef atomic64_inc_return 1449 - static inline s64 1450 + static __always_inline s64 1450 1451 atomic64_inc_return(atomic64_t *v) 1451 1452 { 1452 1453 s64 ret; ··· 1468 1469 #endif /* atomic64_fetch_inc */ 1469 1470 1470 1471 #ifndef atomic64_fetch_inc 1471 - static inline s64 1472 + static __always_inline s64 1472 1473 atomic64_fetch_inc(atomic64_t *v) 1473 1474 { 1474 1475 return atomic64_fetch_add(1, v); ··· 1477 1478 #endif 1478 1479 1479 1480 #ifndef atomic64_fetch_inc_acquire 1480 - static inline s64 1481 + static __always_inline s64 1481 1482 atomic64_fetch_inc_acquire(atomic64_t *v) 1482 1483 { 1483 1484 return atomic64_fetch_add_acquire(1, v); ··· 1486 1487 #endif 1487 1488 1488 1489 #ifndef atomic64_fetch_inc_release 1489 - static inline s64 1490 + static __always_inline s64 1490 1491 atomic64_fetch_inc_release(atomic64_t *v) 1491 1492 { 1492 1493 return atomic64_fetch_add_release(1, v); ··· 1495 1496 #endif 1496 1497 1497 1498 #ifndef atomic64_fetch_inc_relaxed 1498 - static inline s64 1499 + static __always_inline s64 1499 1500 atomic64_fetch_inc_relaxed(atomic64_t *v) 1500 1501 { 1501 1502 return atomic64_fetch_add_relaxed(1, v); ··· 1506 1507 #else /* atomic64_fetch_inc_relaxed */ 1507 1508 1508 1509 #ifndef atomic64_fetch_inc_acquire 1509 - static inline s64 1510 + static __always_inline s64 1510 1511 atomic64_fetch_inc_acquire(atomic64_t *v) 1511 1512 { 1512 1513 s64 ret = atomic64_fetch_inc_relaxed(v); ··· 1517 1518 #endif 1518 1519 1519 1520 #ifndef atomic64_fetch_inc_release 1520 - static inline s64 1521 + static __always_inline s64 1521 1522 atomic64_fetch_inc_release(atomic64_t *v) 1522 1523 { 1523 1524 __atomic_release_fence(); ··· 1527 1528 #endif 1528 1529 1529 1530 #ifndef atomic64_fetch_inc 1530 - static inline s64 1531 + static __always_inline s64 1531 1532 atomic64_fetch_inc(atomic64_t *v) 1532 1533 { 1533 1534 s64 ret; ··· 1542 1543 #endif /* atomic64_fetch_inc_relaxed */ 1543 1544 1544 1545 #ifndef atomic64_dec 1545 - static inline void 1546 + static __always_inline void 1546 1547 atomic64_dec(atomic64_t *v) 1547 1548 { 1548 1549 atomic64_sub(1, v); ··· 1558 1559 #endif /* atomic64_dec_return */ 1559 1560 1560 1561 #ifndef atomic64_dec_return 1561 - static inline s64 1562 + static __always_inline s64 1562 1563 atomic64_dec_return(atomic64_t *v) 1563 1564 { 1564 1565 return atomic64_sub_return(1, v); ··· 1567 1568 #endif 1568 1569 1569 1570 #ifndef atomic64_dec_return_acquire 1570 - static inline s64 1571 + static __always_inline s64 1571 1572 atomic64_dec_return_acquire(atomic64_t *v) 1572 1573 { 1573 1574 return atomic64_sub_return_acquire(1, v); ··· 1576 1577 #endif 1577 1578 1578 1579 #ifndef atomic64_dec_return_release 1579 - static inline s64 1580 + static __always_inline s64 1580 1581 atomic64_dec_return_release(atomic64_t *v) 1581 1582 { 1582 1583 return atomic64_sub_return_release(1, v); ··· 1585 1586 #endif 1586 1587 1587 1588 #ifndef atomic64_dec_return_relaxed 1588 - static inline s64 1589 + static __always_inline s64 1589 1590 atomic64_dec_return_relaxed(atomic64_t *v) 1590 1591 { 1591 1592 return atomic64_sub_return_relaxed(1, v); ··· 1596 1597 #else /* atomic64_dec_return_relaxed */ 1597 1598 1598 1599 #ifndef atomic64_dec_return_acquire 1599 - static inline s64 1600 + static __always_inline s64 1600 1601 atomic64_dec_return_acquire(atomic64_t *v) 1601 1602 { 1602 1603 s64 ret = atomic64_dec_return_relaxed(v); ··· 1607 1608 #endif 1608 1609 1609 1610 #ifndef atomic64_dec_return_release 1610 - static inline s64 1611 + static __always_inline s64 1611 1612 atomic64_dec_return_release(atomic64_t *v) 1612 1613 { 1613 1614 __atomic_release_fence(); ··· 1617 1618 #endif 1618 1619 1619 1620 #ifndef atomic64_dec_return 1620 - static inline s64 1621 + static __always_inline s64 1621 1622 atomic64_dec_return(atomic64_t *v) 1622 1623 { 1623 1624 s64 ret; ··· 1639 1640 #endif /* atomic64_fetch_dec */ 1640 1641 1641 1642 #ifndef atomic64_fetch_dec 1642 - static inline s64 1643 + static __always_inline s64 1643 1644 atomic64_fetch_dec(atomic64_t *v) 1644 1645 { 1645 1646 return atomic64_fetch_sub(1, v); ··· 1648 1649 #endif 1649 1650 1650 1651 #ifndef atomic64_fetch_dec_acquire 1651 - static inline s64 1652 + static __always_inline s64 1652 1653 atomic64_fetch_dec_acquire(atomic64_t *v) 1653 1654 { 1654 1655 return atomic64_fetch_sub_acquire(1, v); ··· 1657 1658 #endif 1658 1659 1659 1660 #ifndef atomic64_fetch_dec_release 1660 - static inline s64 1661 + static __always_inline s64 1661 1662 atomic64_fetch_dec_release(atomic64_t *v) 1662 1663 { 1663 1664 return atomic64_fetch_sub_release(1, v); ··· 1666 1667 #endif 1667 1668 1668 1669 #ifndef atomic64_fetch_dec_relaxed 1669 - static inline s64 1670 + static __always_inline s64 1670 1671 atomic64_fetch_dec_relaxed(atomic64_t *v) 1671 1672 { 1672 1673 return atomic64_fetch_sub_relaxed(1, v); ··· 1677 1678 #else /* atomic64_fetch_dec_relaxed */ 1678 1679 1679 1680 #ifndef atomic64_fetch_dec_acquire 1680 - static inline s64 1681 + static __always_inline s64 1681 1682 atomic64_fetch_dec_acquire(atomic64_t *v) 1682 1683 { 1683 1684 s64 ret = atomic64_fetch_dec_relaxed(v); ··· 1688 1689 #endif 1689 1690 1690 1691 #ifndef atomic64_fetch_dec_release 1691 - static inline s64 1692 + static __always_inline s64 1692 1693 atomic64_fetch_dec_release(atomic64_t *v) 1693 1694 { 1694 1695 __atomic_release_fence(); ··· 1698 1699 #endif 1699 1700 1700 1701 #ifndef atomic64_fetch_dec 1701 - static inline s64 1702 + static __always_inline s64 1702 1703 atomic64_fetch_dec(atomic64_t *v) 1703 1704 { 1704 1705 s64 ret; ··· 1719 1720 #else /* atomic64_fetch_and_relaxed */ 1720 1721 1721 1722 #ifndef atomic64_fetch_and_acquire 1722 - static inline s64 1723 + static __always_inline s64 1723 1724 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) 1724 1725 { 1725 1726 s64 ret = atomic64_fetch_and_relaxed(i, v); ··· 1730 1731 #endif 1731 1732 1732 1733 #ifndef atomic64_fetch_and_release 1733 - static inline s64 1734 + static __always_inline s64 1734 1735 atomic64_fetch_and_release(s64 i, atomic64_t *v) 1735 1736 { 1736 1737 __atomic_release_fence(); ··· 1740 1741 #endif 1741 1742 1742 1743 #ifndef atomic64_fetch_and 1743 - static inline s64 1744 + static __always_inline s64 1744 1745 atomic64_fetch_and(s64 i, atomic64_t *v) 1745 1746 { 1746 1747 s64 ret; ··· 1755 1756 #endif /* atomic64_fetch_and_relaxed */ 1756 1757 1757 1758 #ifndef atomic64_andnot 1758 - static inline void 1759 + static __always_inline void 1759 1760 atomic64_andnot(s64 i, atomic64_t *v) 1760 1761 { 1761 1762 atomic64_and(~i, v); ··· 1771 1772 #endif /* atomic64_fetch_andnot */ 1772 1773 1773 1774 #ifndef atomic64_fetch_andnot 1774 - static inline s64 1775 + static __always_inline s64 1775 1776 atomic64_fetch_andnot(s64 i, atomic64_t *v) 1776 1777 { 1777 1778 return atomic64_fetch_and(~i, v); ··· 1780 1781 #endif 1781 1782 1782 1783 #ifndef atomic64_fetch_andnot_acquire 1783 - static inline s64 1784 + static __always_inline s64 1784 1785 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 1785 1786 { 1786 1787 return atomic64_fetch_and_acquire(~i, v); ··· 1789 1790 #endif 1790 1791 1791 1792 #ifndef atomic64_fetch_andnot_release 1792 - static inline s64 1793 + static __always_inline s64 1793 1794 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) 1794 1795 { 1795 1796 return atomic64_fetch_and_release(~i, v); ··· 1798 1799 #endif 1799 1800 1800 1801 #ifndef atomic64_fetch_andnot_relaxed 1801 - static inline s64 1802 + static __always_inline s64 1802 1803 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) 1803 1804 { 1804 1805 return atomic64_fetch_and_relaxed(~i, v); ··· 1809 1810 #else /* atomic64_fetch_andnot_relaxed */ 1810 1811 1811 1812 #ifndef atomic64_fetch_andnot_acquire 1812 - static inline s64 1813 + static __always_inline s64 1813 1814 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) 1814 1815 { 1815 1816 s64 ret = atomic64_fetch_andnot_relaxed(i, v); ··· 1820 1821 #endif 1821 1822 1822 1823 #ifndef atomic64_fetch_andnot_release 1823 - static inline s64 1824 + static __always_inline s64 1824 1825 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) 1825 1826 { 1826 1827 __atomic_release_fence(); ··· 1830 1831 #endif 1831 1832 1832 1833 #ifndef atomic64_fetch_andnot 1833 - static inline s64 1834 + static __always_inline s64 1834 1835 atomic64_fetch_andnot(s64 i, atomic64_t *v) 1835 1836 { 1836 1837 s64 ret; ··· 1851 1852 #else /* atomic64_fetch_or_relaxed */ 1852 1853 1853 1854 #ifndef atomic64_fetch_or_acquire 1854 - static inline s64 1855 + static __always_inline s64 1855 1856 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) 1856 1857 { 1857 1858 s64 ret = atomic64_fetch_or_relaxed(i, v); ··· 1862 1863 #endif 1863 1864 1864 1865 #ifndef atomic64_fetch_or_release 1865 - static inline s64 1866 + static __always_inline s64 1866 1867 atomic64_fetch_or_release(s64 i, atomic64_t *v) 1867 1868 { 1868 1869 __atomic_release_fence(); ··· 1872 1873 #endif 1873 1874 1874 1875 #ifndef atomic64_fetch_or 1875 - static inline s64 1876 + static __always_inline s64 1876 1877 atomic64_fetch_or(s64 i, atomic64_t *v) 1877 1878 { 1878 1879 s64 ret; ··· 1893 1894 #else /* atomic64_fetch_xor_relaxed */ 1894 1895 1895 1896 #ifndef atomic64_fetch_xor_acquire 1896 - static inline s64 1897 + static __always_inline s64 1897 1898 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) 1898 1899 { 1899 1900 s64 ret = atomic64_fetch_xor_relaxed(i, v); ··· 1904 1905 #endif 1905 1906 1906 1907 #ifndef atomic64_fetch_xor_release 1907 - static inline s64 1908 + static __always_inline s64 1908 1909 atomic64_fetch_xor_release(s64 i, atomic64_t *v) 1909 1910 { 1910 1911 __atomic_release_fence(); ··· 1914 1915 #endif 1915 1916 1916 1917 #ifndef atomic64_fetch_xor 1917 - static inline s64 1918 + static __always_inline s64 1918 1919 atomic64_fetch_xor(s64 i, atomic64_t *v) 1919 1920 { 1920 1921 s64 ret; ··· 1935 1936 #else /* atomic64_xchg_relaxed */ 1936 1937 1937 1938 #ifndef atomic64_xchg_acquire 1938 - static inline s64 1939 + static __always_inline s64 1939 1940 atomic64_xchg_acquire(atomic64_t *v, s64 i) 1940 1941 { 1941 1942 s64 ret = atomic64_xchg_relaxed(v, i); ··· 1946 1947 #endif 1947 1948 1948 1949 #ifndef atomic64_xchg_release 1949 - static inline s64 1950 + static __always_inline s64 1950 1951 atomic64_xchg_release(atomic64_t *v, s64 i) 1951 1952 { 1952 1953 __atomic_release_fence(); ··· 1956 1957 #endif 1957 1958 1958 1959 #ifndef atomic64_xchg 1959 - static inline s64 1960 + static __always_inline s64 1960 1961 atomic64_xchg(atomic64_t *v, s64 i) 1961 1962 { 1962 1963 s64 ret; ··· 1977 1978 #else /* atomic64_cmpxchg_relaxed */ 1978 1979 1979 1980 #ifndef atomic64_cmpxchg_acquire 1980 - static inline s64 1981 + static __always_inline s64 1981 1982 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) 1982 1983 { 1983 1984 s64 ret = atomic64_cmpxchg_relaxed(v, old, new); ··· 1988 1989 #endif 1989 1990 1990 1991 #ifndef atomic64_cmpxchg_release 1991 - static inline s64 1992 + static __always_inline s64 1992 1993 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) 1993 1994 { 1994 1995 __atomic_release_fence(); ··· 1998 1999 #endif 1999 2000 2000 2001 #ifndef atomic64_cmpxchg 2001 - static inline s64 2002 + static __always_inline s64 2002 2003 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) 2003 2004 { 2004 2005 s64 ret; ··· 2020 2021 #endif /* atomic64_try_cmpxchg */ 2021 2022 2022 2023 #ifndef atomic64_try_cmpxchg 2023 - static inline bool 2024 + static __always_inline bool 2024 2025 atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 2025 2026 { 2026 2027 s64 r, o = *old; ··· 2033 2034 #endif 2034 2035 2035 2036 #ifndef atomic64_try_cmpxchg_acquire 2036 - static inline bool 2037 + static __always_inline bool 2037 2038 atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) 2038 2039 { 2039 2040 s64 r, o = *old; ··· 2046 2047 #endif 2047 2048 2048 2049 #ifndef atomic64_try_cmpxchg_release 2049 - static inline bool 2050 + static __always_inline bool 2050 2051 atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) 2051 2052 { 2052 2053 s64 r, o = *old; ··· 2059 2060 #endif 2060 2061 2061 2062 #ifndef atomic64_try_cmpxchg_relaxed 2062 - static inline bool 2063 + static __always_inline bool 2063 2064 atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) 2064 2065 { 2065 2066 s64 r, o = *old; ··· 2074 2075 #else /* atomic64_try_cmpxchg_relaxed */ 2075 2076 2076 2077 #ifndef atomic64_try_cmpxchg_acquire 2077 - static inline bool 2078 + static __always_inline bool 2078 2079 atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) 2079 2080 { 2080 2081 bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); ··· 2085 2086 #endif 2086 2087 2087 2088 #ifndef atomic64_try_cmpxchg_release 2088 - static inline bool 2089 + static __always_inline bool 2089 2090 atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) 2090 2091 { 2091 2092 __atomic_release_fence(); ··· 2095 2096 #endif 2096 2097 2097 2098 #ifndef atomic64_try_cmpxchg 2098 - static inline bool 2099 + static __always_inline bool 2099 2100 atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) 2100 2101 { 2101 2102 bool ret; ··· 2119 2120 * true if the result is zero, or false for all 2120 2121 * other cases. 2121 2122 */ 2122 - static inline bool 2123 + static __always_inline bool 2123 2124 atomic64_sub_and_test(s64 i, atomic64_t *v) 2124 2125 { 2125 2126 return atomic64_sub_return(i, v) == 0; ··· 2136 2137 * returns true if the result is 0, or false for all other 2137 2138 * cases. 2138 2139 */ 2139 - static inline bool 2140 + static __always_inline bool 2140 2141 atomic64_dec_and_test(atomic64_t *v) 2141 2142 { 2142 2143 return atomic64_dec_return(v) == 0; ··· 2153 2154 * and returns true if the result is zero, or false for all 2154 2155 * other cases. 2155 2156 */ 2156 - static inline bool 2157 + static __always_inline bool 2157 2158 atomic64_inc_and_test(atomic64_t *v) 2158 2159 { 2159 2160 return atomic64_inc_return(v) == 0; ··· 2171 2172 * if the result is negative, or false when 2172 2173 * result is greater than or equal to zero. 2173 2174 */ 2174 - static inline bool 2175 + static __always_inline bool 2175 2176 atomic64_add_negative(s64 i, atomic64_t *v) 2176 2177 { 2177 2178 return atomic64_add_return(i, v) < 0; ··· 2189 2190 * Atomically adds @a to @v, so long as @v was not already @u. 2190 2191 * Returns original value of @v 2191 2192 */ 2192 - static inline s64 2193 + static __always_inline s64 2193 2194 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) 2194 2195 { 2195 2196 s64 c = atomic64_read(v); ··· 2214 2215 * Atomically adds @a to @v, if @v was not already @u. 2215 2216 * Returns true if the addition was done. 2216 2217 */ 2217 - static inline bool 2218 + static __always_inline bool 2218 2219 atomic64_add_unless(atomic64_t *v, s64 a, s64 u) 2219 2220 { 2220 2221 return atomic64_fetch_add_unless(v, a, u) != u; ··· 2230 2231 * Atomically increments @v by 1, if @v is non-zero. 2231 2232 * Returns true if the increment was done. 2232 2233 */ 2233 - static inline bool 2234 + static __always_inline bool 2234 2235 atomic64_inc_not_zero(atomic64_t *v) 2235 2236 { 2236 2237 return atomic64_add_unless(v, 1, 0); ··· 2239 2240 #endif 2240 2241 2241 2242 #ifndef atomic64_inc_unless_negative 2242 - static inline bool 2243 + static __always_inline bool 2243 2244 atomic64_inc_unless_negative(atomic64_t *v) 2244 2245 { 2245 2246 s64 c = atomic64_read(v); ··· 2255 2256 #endif 2256 2257 2257 2258 #ifndef atomic64_dec_unless_positive 2258 - static inline bool 2259 + static __always_inline bool 2259 2260 atomic64_dec_unless_positive(atomic64_t *v) 2260 2261 { 2261 2262 s64 c = atomic64_read(v); ··· 2271 2272 #endif 2272 2273 2273 2274 #ifndef atomic64_dec_if_positive 2274 - static inline s64 2275 + static __always_inline s64 2275 2276 atomic64_dec_if_positive(atomic64_t *v) 2276 2277 { 2277 2278 s64 dec, c = atomic64_read(v); ··· 2287 2288 #define atomic64_dec_if_positive atomic64_dec_if_positive 2288 2289 #endif 2289 2290 2290 - #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) 2291 - #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) 2292 - 2293 2291 #endif /* _LINUX_ATOMIC_FALLBACK_H */ 2294 - // 25de4a2804d70f57e994fe3b419148658bb5378a 2292 + // 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
+11
include/linux/atomic.h
··· 25 25 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. 26 26 */ 27 27 28 + #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) 29 + #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) 30 + 31 + #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) 32 + #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) 33 + 28 34 /* 29 35 * The idea here is to build acquire/release variants by adding explicit 30 36 * barriers on top of the relaxed variant. In the case where the relaxed ··· 77 71 __ret; \ 78 72 }) 79 73 74 + #ifdef ARCH_ATOMIC 75 + #include <linux/atomic-arch-fallback.h> 76 + #include <asm-generic/atomic-instrumented.h> 77 + #else 80 78 #include <linux/atomic-fallback.h> 79 + #endif 81 80 82 81 #include <asm-generic/atomic-long.h> 83 82
+3 -3
scripts/atomic/fallbacks/acquire
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_${pfx}${name}${sfx}_acquire(${params}) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params}) 4 4 { 5 - ${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args}); 5 + ${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args}); 6 6 __atomic_acquire_fence(); 7 7 return ret; 8 8 }
+4 -4
scripts/atomic/fallbacks/add_negative
··· 1 1 cat <<EOF 2 2 /** 3 - * ${atomic}_add_negative - add and test if negative 3 + * ${arch}${atomic}_add_negative - add and test if negative 4 4 * @i: integer value to add 5 5 * @v: pointer of type ${atomic}_t 6 6 * ··· 8 8 * if the result is negative, or false when 9 9 * result is greater than or equal to zero. 10 10 */ 11 - static inline bool 12 - ${atomic}_add_negative(${int} i, ${atomic}_t *v) 11 + static __always_inline bool 12 + ${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v) 13 13 { 14 - return ${atomic}_add_return(i, v) < 0; 14 + return ${arch}${atomic}_add_return(i, v) < 0; 15 15 } 16 16 EOF
+4 -4
scripts/atomic/fallbacks/add_unless
··· 1 1 cat << EOF 2 2 /** 3 - * ${atomic}_add_unless - add unless the number is already a given value 3 + * ${arch}${atomic}_add_unless - add unless the number is already a given value 4 4 * @v: pointer of type ${atomic}_t 5 5 * @a: the amount to add to v... 6 6 * @u: ...unless v is equal to u. ··· 8 8 * Atomically adds @a to @v, if @v was not already @u. 9 9 * Returns true if the addition was done. 10 10 */ 11 - static inline bool 12 - ${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) 11 + static __always_inline bool 12 + ${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u) 13 13 { 14 - return ${atomic}_fetch_add_unless(v, a, u) != u; 14 + return ${arch}${atomic}_fetch_add_unless(v, a, u) != u; 15 15 } 16 16 EOF
+3 -3
scripts/atomic/fallbacks/andnot
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v) 4 4 { 5 - ${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v); 5 + ${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v); 6 6 } 7 7 EOF
+3 -3
scripts/atomic/fallbacks/dec
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v) 4 4 { 5 - ${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v); 5 + ${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v); 6 6 } 7 7 EOF
+4 -4
scripts/atomic/fallbacks/dec_and_test
··· 1 1 cat <<EOF 2 2 /** 3 - * ${atomic}_dec_and_test - decrement and test 3 + * ${arch}${atomic}_dec_and_test - decrement and test 4 4 * @v: pointer of type ${atomic}_t 5 5 * 6 6 * Atomically decrements @v by 1 and 7 7 * returns true if the result is 0, or false for all other 8 8 * cases. 9 9 */ 10 - static inline bool 11 - ${atomic}_dec_and_test(${atomic}_t *v) 10 + static __always_inline bool 11 + ${arch}${atomic}_dec_and_test(${atomic}_t *v) 12 12 { 13 - return ${atomic}_dec_return(v) == 0; 13 + return ${arch}${atomic}_dec_return(v) == 0; 14 14 } 15 15 EOF
+4 -4
scripts/atomic/fallbacks/dec_if_positive
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_dec_if_positive(${atomic}_t *v) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_dec_if_positive(${atomic}_t *v) 4 4 { 5 - ${int} dec, c = ${atomic}_read(v); 5 + ${int} dec, c = ${arch}${atomic}_read(v); 6 6 7 7 do { 8 8 dec = c - 1; 9 9 if (unlikely(dec < 0)) 10 10 break; 11 - } while (!${atomic}_try_cmpxchg(v, &c, dec)); 11 + } while (!${arch}${atomic}_try_cmpxchg(v, &c, dec)); 12 12 13 13 return dec; 14 14 }
+4 -4
scripts/atomic/fallbacks/dec_unless_positive
··· 1 1 cat <<EOF 2 - static inline bool 3 - ${atomic}_dec_unless_positive(${atomic}_t *v) 2 + static __always_inline bool 3 + ${arch}${atomic}_dec_unless_positive(${atomic}_t *v) 4 4 { 5 - ${int} c = ${atomic}_read(v); 5 + ${int} c = ${arch}${atomic}_read(v); 6 6 7 7 do { 8 8 if (unlikely(c > 0)) 9 9 return false; 10 - } while (!${atomic}_try_cmpxchg(v, &c, c - 1)); 10 + } while (!${arch}${atomic}_try_cmpxchg(v, &c, c - 1)); 11 11 12 12 return true; 13 13 }
+3 -3
scripts/atomic/fallbacks/fence
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_${pfx}${name}${sfx}(${params}) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_${pfx}${name}${sfx}(${params}) 4 4 { 5 5 ${ret} ret; 6 6 __atomic_pre_full_fence(); 7 - ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args}); 7 + ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args}); 8 8 __atomic_post_full_fence(); 9 9 return ret; 10 10 }
+5 -5
scripts/atomic/fallbacks/fetch_add_unless
··· 1 1 cat << EOF 2 2 /** 3 - * ${atomic}_fetch_add_unless - add unless the number is already a given value 3 + * ${arch}${atomic}_fetch_add_unless - add unless the number is already a given value 4 4 * @v: pointer of type ${atomic}_t 5 5 * @a: the amount to add to v... 6 6 * @u: ...unless v is equal to u. ··· 8 8 * Atomically adds @a to @v, so long as @v was not already @u. 9 9 * Returns original value of @v 10 10 */ 11 - static inline ${int} 12 - ${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) 11 + static __always_inline ${int} 12 + ${arch}${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u) 13 13 { 14 - ${int} c = ${atomic}_read(v); 14 + ${int} c = ${arch}${atomic}_read(v); 15 15 16 16 do { 17 17 if (unlikely(c == u)) 18 18 break; 19 - } while (!${atomic}_try_cmpxchg(v, &c, c + a)); 19 + } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + a)); 20 20 21 21 return c; 22 22 }
+3 -3
scripts/atomic/fallbacks/inc
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v) 4 4 { 5 - ${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v); 5 + ${retstmt}${arch}${atomic}_${pfx}add${sfx}${order}(1, v); 6 6 } 7 7 EOF
+4 -4
scripts/atomic/fallbacks/inc_and_test
··· 1 1 cat <<EOF 2 2 /** 3 - * ${atomic}_inc_and_test - increment and test 3 + * ${arch}${atomic}_inc_and_test - increment and test 4 4 * @v: pointer of type ${atomic}_t 5 5 * 6 6 * Atomically increments @v by 1 7 7 * and returns true if the result is zero, or false for all 8 8 * other cases. 9 9 */ 10 - static inline bool 11 - ${atomic}_inc_and_test(${atomic}_t *v) 10 + static __always_inline bool 11 + ${arch}${atomic}_inc_and_test(${atomic}_t *v) 12 12 { 13 - return ${atomic}_inc_return(v) == 0; 13 + return ${arch}${atomic}_inc_return(v) == 0; 14 14 } 15 15 EOF
+4 -4
scripts/atomic/fallbacks/inc_not_zero
··· 1 1 cat <<EOF 2 2 /** 3 - * ${atomic}_inc_not_zero - increment unless the number is zero 3 + * ${arch}${atomic}_inc_not_zero - increment unless the number is zero 4 4 * @v: pointer of type ${atomic}_t 5 5 * 6 6 * Atomically increments @v by 1, if @v is non-zero. 7 7 * Returns true if the increment was done. 8 8 */ 9 - static inline bool 10 - ${atomic}_inc_not_zero(${atomic}_t *v) 9 + static __always_inline bool 10 + ${arch}${atomic}_inc_not_zero(${atomic}_t *v) 11 11 { 12 - return ${atomic}_add_unless(v, 1, 0); 12 + return ${arch}${atomic}_add_unless(v, 1, 0); 13 13 } 14 14 EOF
+4 -4
scripts/atomic/fallbacks/inc_unless_negative
··· 1 1 cat <<EOF 2 - static inline bool 3 - ${atomic}_inc_unless_negative(${atomic}_t *v) 2 + static __always_inline bool 3 + ${arch}${atomic}_inc_unless_negative(${atomic}_t *v) 4 4 { 5 - ${int} c = ${atomic}_read(v); 5 + ${int} c = ${arch}${atomic}_read(v); 6 6 7 7 do { 8 8 if (unlikely(c < 0)) 9 9 return false; 10 - } while (!${atomic}_try_cmpxchg(v, &c, c + 1)); 10 + } while (!${arch}${atomic}_try_cmpxchg(v, &c, c + 1)); 11 11 12 12 return true; 13 13 }
+2 -2
scripts/atomic/fallbacks/read_acquire
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_read_acquire(const ${atomic}_t *v) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_read_acquire(const ${atomic}_t *v) 4 4 { 5 5 return smp_load_acquire(&(v)->counter); 6 6 }
+3 -3
scripts/atomic/fallbacks/release
··· 1 1 cat <<EOF 2 - static inline ${ret} 3 - ${atomic}_${pfx}${name}${sfx}_release(${params}) 2 + static __always_inline ${ret} 3 + ${arch}${atomic}_${pfx}${name}${sfx}_release(${params}) 4 4 { 5 5 __atomic_release_fence(); 6 - ${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args}); 6 + ${retstmt}${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args}); 7 7 } 8 8 EOF
+2 -2
scripts/atomic/fallbacks/set_release
··· 1 1 cat <<EOF 2 - static inline void 3 - ${atomic}_set_release(${atomic}_t *v, ${int} i) 2 + static __always_inline void 3 + ${arch}${atomic}_set_release(${atomic}_t *v, ${int} i) 4 4 { 5 5 smp_store_release(&(v)->counter, i); 6 6 }
+4 -4
scripts/atomic/fallbacks/sub_and_test
··· 1 1 cat <<EOF 2 2 /** 3 - * ${atomic}_sub_and_test - subtract value from variable and test result 3 + * ${arch}${atomic}_sub_and_test - subtract value from variable and test result 4 4 * @i: integer value to subtract 5 5 * @v: pointer of type ${atomic}_t 6 6 * ··· 8 8 * true if the result is zero, or false for all 9 9 * other cases. 10 10 */ 11 - static inline bool 12 - ${atomic}_sub_and_test(${int} i, ${atomic}_t *v) 11 + static __always_inline bool 12 + ${arch}${atomic}_sub_and_test(${int} i, ${atomic}_t *v) 13 13 { 14 - return ${atomic}_sub_return(i, v) == 0; 14 + return ${arch}${atomic}_sub_return(i, v) == 0; 15 15 } 16 16 EOF
+3 -3
scripts/atomic/fallbacks/try_cmpxchg
··· 1 1 cat <<EOF 2 - static inline bool 3 - ${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) 2 + static __always_inline bool 3 + ${arch}${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new) 4 4 { 5 5 ${int} r, o = *old; 6 - r = ${atomic}_cmpxchg${order}(v, o, new); 6 + r = ${arch}${atomic}_cmpxchg${order}(v, o, new); 7 7 if (unlikely(r != o)) 8 8 *old = r; 9 9 return likely(r == o);
+15 -16
scripts/atomic/gen-atomic-fallback.sh
··· 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 4 ATOMICDIR=$(dirname $0) 5 + ARCH=$2 5 6 6 7 . ${ATOMICDIR}/atomic-tbl.sh 7 8 8 - #gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...) 9 + #gen_template_fallback(template, meta, pfx, name, sfx, order, arch, atomic, int, args...) 9 10 gen_template_fallback() 10 11 { 11 12 local template="$1"; shift ··· 15 14 local name="$1"; shift 16 15 local sfx="$1"; shift 17 16 local order="$1"; shift 17 + local arch="$1"; shift 18 18 local atomic="$1"; shift 19 19 local int="$1"; shift 20 20 21 - local atomicname="${atomic}_${pfx}${name}${sfx}${order}" 21 + local atomicname="${arch}${atomic}_${pfx}${name}${sfx}${order}" 22 22 23 23 local ret="$(gen_ret_type "${meta}" "${int}")" 24 24 local retstmt="$(gen_ret_stmt "${meta}")" ··· 34 32 fi 35 33 } 36 34 37 - #gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...) 35 + #gen_proto_fallback(meta, pfx, name, sfx, order, arch, atomic, int, args...) 38 36 gen_proto_fallback() 39 37 { 40 38 local meta="$1"; shift ··· 58 56 EOF 59 57 } 60 58 61 - #gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...) 59 + #gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...) 62 60 gen_proto_order_variants() 63 61 { 64 62 local meta="$1"; shift 65 63 local pfx="$1"; shift 66 64 local name="$1"; shift 67 65 local sfx="$1"; shift 68 - local atomic="$1" 66 + local arch="$1" 67 + local atomic="$2" 69 68 70 - local basename="${atomic}_${pfx}${name}${sfx}" 69 + local basename="${arch}${atomic}_${pfx}${name}${sfx}" 71 70 72 71 local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")" 73 72 ··· 97 94 gen_basic_fallbacks "${basename}" 98 95 99 96 if [ ! -z "${template}" ]; then 100 - printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n" 97 + printf "#endif /* ${arch}${atomic}_${pfx}${name}${sfx} */\n\n" 101 98 gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@" 102 99 gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@" 103 100 gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@" ··· 152 149 #ifndef _LINUX_ATOMIC_FALLBACK_H 153 150 #define _LINUX_ATOMIC_FALLBACK_H 154 151 152 + #include <linux/compiler.h> 153 + 155 154 EOF 156 155 157 - for xchg in "xchg" "cmpxchg" "cmpxchg64"; do 156 + for xchg in "${ARCH}xchg" "${ARCH}cmpxchg" "${ARCH}cmpxchg64"; do 158 157 gen_xchg_fallbacks "${xchg}" 159 158 done 160 159 161 160 grep '^[a-z]' "$1" | while read name meta args; do 162 - gen_proto "${meta}" "${name}" "atomic" "int" ${args} 161 + gen_proto "${meta}" "${name}" "${ARCH}" "atomic" "int" ${args} 163 162 done 164 163 165 164 cat <<EOF 166 - #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) 167 - #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) 168 - 169 165 #ifdef CONFIG_GENERIC_ATOMIC64 170 166 #include <asm-generic/atomic64.h> 171 167 #endif ··· 172 170 EOF 173 171 174 172 grep '^[a-z]' "$1" | while read name meta args; do 175 - gen_proto "${meta}" "${name}" "atomic64" "s64" ${args} 173 + gen_proto "${meta}" "${name}" "${ARCH}" "atomic64" "s64" ${args} 176 174 done 177 175 178 176 cat <<EOF 179 - #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) 180 - #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) 181 - 182 177 #endif /* _LINUX_ATOMIC_FALLBACK_H */ 183 178 EOF
+3 -2
scripts/atomic/gen-atomics.sh
··· 10 10 cat <<EOF | 11 11 gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h 12 12 gen-atomic-long.sh asm-generic/atomic-long.h 13 + gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_ 13 14 gen-atomic-fallback.sh linux/atomic-fallback.h 14 15 EOF 15 - while read script header; do 16 - /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} > ${LINUXDIR}/include/${header} 16 + while read script header args; do 17 + /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header} 17 18 HASH="$(sha1sum ${LINUXDIR}/include/${header})" 18 19 HASH="${HASH%% *}" 19 20 printf "// %s\n" "${HASH}" >> ${LINUXDIR}/include/${header}