Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking, ARM, atomics: Define our SMP atomics in terms of _relaxed() operations

By defining our SMP atomics in terms of relaxed operations, we gain
a small reduction in code size and have acquire/release/fence variants
generated automatically by the core code.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman.Long@hp.com
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1438880084-18856-9-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Will Deacon and committed by
Ingo Molnar
0ca326de cd074aea

+24 -60
+16 -21
arch/arm/include/asm/atomic.h
··· 57 57 } \ 58 58 59 59 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 60 - static inline int atomic_##op##_return(int i, atomic_t *v) \ 60 + static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ 61 61 { \ 62 62 unsigned long tmp; \ 63 63 int result; \ 64 64 \ 65 - smp_mb(); \ 66 65 prefetchw(&v->counter); \ 67 66 \ 68 67 __asm__ __volatile__("@ atomic_" #op "_return\n" \ ··· 74 75 : "r" (&v->counter), "Ir" (i) \ 75 76 : "cc"); \ 76 77 \ 77 - smp_mb(); \ 78 - \ 79 78 return result; \ 80 79 } 81 80 82 - static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 81 + #define atomic_add_return_relaxed atomic_add_return_relaxed 82 + #define atomic_sub_return_relaxed atomic_sub_return_relaxed 83 + 84 + static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) 83 85 { 84 86 int oldval; 85 87 unsigned long res; 86 88 87 - smp_mb(); 88 89 prefetchw(&ptr->counter); 89 90 90 91 do { ··· 98 99 : "cc"); 99 100 } while (res); 100 101 101 - smp_mb(); 102 - 103 102 return oldval; 104 103 } 104 + #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed 105 105 106 106 static inline int __atomic_add_unless(atomic_t *v, int a, int u) 107 107 { ··· 295 297 } \ 296 298 297 299 #define ATOMIC64_OP_RETURN(op, op1, op2) \ 298 - static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ 300 + static inline long long \ 301 + atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ 299 302 { \ 300 303 long long result; \ 301 304 unsigned long tmp; \ 302 305 \ 303 - smp_mb(); \ 304 306 prefetchw(&v->counter); \ 305 307 \ 306 308 __asm__ __volatile__("@ atomic64_" #op "_return\n" \ ··· 314 316 : "r" (&v->counter), "r" (i) \ 315 317 : "cc"); \ 316 318 \ 317 - smp_mb(); \ 318 - \ 319 319 return result; \ 320 320 } 321 321 ··· 323 327 324 328 ATOMIC64_OPS(add, adds, adc) 325 329 ATOMIC64_OPS(sub, subs, sbc) 330 + 331 + #define atomic64_add_return_relaxed atomic64_add_return_relaxed 332 + #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed 326 333 327 334 #define atomic64_andnot atomic64_andnot 328 335 ··· 338 339 #undef ATOMIC64_OP_RETURN 339 340 #undef ATOMIC64_OP 340 341 341 - static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, 342 - long long new) 342 + static inline long long 343 + atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new) 343 344 { 344 345 long long oldval; 345 346 unsigned long res; 346 347 347 - smp_mb(); 348 348 prefetchw(&ptr->counter); 349 349 350 350 do { ··· 358 360 : "cc"); 359 361 } while (res); 360 362 361 - smp_mb(); 362 - 363 363 return oldval; 364 364 } 365 + #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed 365 366 366 - static inline long long atomic64_xchg(atomic64_t *ptr, long long new) 367 + static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new) 367 368 { 368 369 long long result; 369 370 unsigned long tmp; 370 371 371 - smp_mb(); 372 372 prefetchw(&ptr->counter); 373 373 374 374 __asm__ __volatile__("@ atomic64_xchg\n" ··· 378 382 : "r" (&ptr->counter), "r" (new) 379 383 : "cc"); 380 384 381 - smp_mb(); 382 - 383 385 return result; 384 386 } 387 + #define atomic64_xchg_relaxed atomic64_xchg_relaxed 385 388 386 389 static inline long long atomic64_dec_if_positive(atomic64_t *v) 387 390 {
+8 -39
arch/arm/include/asm/cmpxchg.h
··· 35 35 unsigned int tmp; 36 36 #endif 37 37 38 - smp_mb(); 39 38 prefetchw((const void *)ptr); 40 39 41 40 switch (size) { ··· 97 98 __bad_xchg(ptr, size), ret = 0; 98 99 break; 99 100 } 100 - smp_mb(); 101 101 102 102 return ret; 103 103 } 104 104 105 - #define xchg(ptr, x) ({ \ 105 + #define xchg_relaxed(ptr, x) ({ \ 106 106 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ 107 107 sizeof(*(ptr))); \ 108 108 }) ··· 114 116 #ifdef CONFIG_SMP 115 117 #error "SMP is not supported on this platform" 116 118 #endif 119 + 120 + #define xchg xchg_relaxed 117 121 118 122 /* 119 123 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make ··· 194 194 return oldval; 195 195 } 196 196 197 - static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, 198 - unsigned long new, int size) 199 - { 200 - unsigned long ret; 201 - 202 - smp_mb(); 203 - ret = __cmpxchg(ptr, old, new, size); 204 - smp_mb(); 205 - 206 - return ret; 207 - } 208 - 209 - #define cmpxchg(ptr,o,n) ({ \ 210 - (__typeof__(*(ptr)))__cmpxchg_mb((ptr), \ 211 - (unsigned long)(o), \ 212 - (unsigned long)(n), \ 213 - sizeof(*(ptr))); \ 197 + #define cmpxchg_relaxed(ptr,o,n) ({ \ 198 + (__typeof__(*(ptr)))__cmpxchg((ptr), \ 199 + (unsigned long)(o), \ 200 + (unsigned long)(n), \ 201 + sizeof(*(ptr))); \ 214 202 }) 215 203 216 204 static inline unsigned long __cmpxchg_local(volatile void *ptr, ··· 260 272 }) 261 273 262 274 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) 263 - 264 - static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, 265 - unsigned long long old, 266 - unsigned long long new) 267 - { 268 - unsigned long long ret; 269 - 270 - smp_mb(); 271 - ret = __cmpxchg64(ptr, old, new); 272 - smp_mb(); 273 - 274 - return ret; 275 - } 276 - 277 - #define cmpxchg64(ptr, o, n) ({ \ 278 - (__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ 279 - (unsigned long long)(o), \ 280 - (unsigned long long)(n)); \ 281 - }) 282 275 283 276 #endif /* __LINUX_ARM_ARCH__ >= 6 */ 284 277