Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: atomics: tidy up common atomic{,64}_* macros

The common (i.e. identical for ll/sc and lse) atomic macros in atomic.h
are needlessley different for atomic_t and atomic64_t.

This patch tidies up the definitions to make them consistent across the
two atomic types and factors out common code such as the add_unless
implementation based on cmpxchg.

Reviewed-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

+38 -57
+38 -57
arch/arm64/include/asm/atomic.h
··· 26 26 #include <asm/barrier.h> 27 27 #include <asm/lse.h> 28 28 29 - #define ATOMIC_INIT(i) { (i) } 30 - 31 29 #ifdef __KERNEL__ 32 30 33 31 #define __ARM64_IN_ATOMIC_IMPL ··· 40 42 41 43 #include <asm/cmpxchg.h> 42 44 43 - /* 44 - * On ARM, ordinary assignment (str instruction) doesn't clear the local 45 - * strex/ldrex monitor on some implementations. The reason we can use it for 46 - * atomic_set() is the clrex or dummy strex done on every exception return. 47 - */ 48 - #define atomic_read(v) ACCESS_ONCE((v)->counter) 49 - #define atomic_set(v,i) (((v)->counter) = (i)) 45 + #define ___atomic_add_unless(v, a, u, sfx) \ 46 + ({ \ 47 + typeof((v)->counter) c, old; \ 48 + \ 49 + c = atomic##sfx##_read(v); \ 50 + while (c != (u) && \ 51 + (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ 52 + c = old; \ 53 + c; \ 54 + }) 50 55 51 - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 56 + #define ATOMIC_INIT(i) { (i) } 52 57 53 - static inline int __atomic_add_unless(atomic_t *v, int a, int u) 54 - { 55 - int c, old; 58 + #define atomic_read(v) READ_ONCE((v)->counter) 59 + #define atomic_set(v, i) (((v)->counter) = (i)) 60 + #define atomic_xchg(v, new) xchg(&((v)->counter), (new)) 56 61 57 - c = atomic_read(v); 58 - while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) 59 - c = old; 60 - return c; 61 - } 62 - 63 - #define atomic_inc(v) atomic_add(1, v) 64 - #define atomic_dec(v) atomic_sub(1, v) 65 - 66 - #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) 67 - #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) 68 - #define atomic_inc_return(v) (atomic_add_return(1, v)) 69 - #define atomic_dec_return(v) (atomic_sub_return(1, v)) 70 - #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 71 - 72 - #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) 73 - 74 - #define atomic_andnot atomic_andnot 62 + #define atomic_inc(v) atomic_add(1, (v)) 63 + #define atomic_dec(v) atomic_sub(1, (v)) 64 + #define atomic_inc_return(v) atomic_add_return(1, (v)) 65 + #define atomic_dec_return(v) atomic_sub_return(1, (v)) 66 + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 67 + #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 68 + #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 69 + #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) 70 + #define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) 71 + #define atomic_andnot atomic_andnot 75 72 76 73 /* 77 74 * 64-bit atomic operations. 78 75 */ 79 - #define ATOMIC64_INIT(i) { (i) } 76 + #define ATOMIC64_INIT ATOMIC_INIT 77 + #define atomic64_read atomic_read 78 + #define atomic64_set atomic_set 79 + #define atomic64_xchg atomic_xchg 80 80 81 - #define atomic64_read(v) ACCESS_ONCE((v)->counter) 82 - #define atomic64_set(v,i) (((v)->counter) = (i)) 83 - 84 - #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 85 - 86 - static inline int atomic64_add_unless(atomic64_t *v, long a, long u) 87 - { 88 - long c, old; 89 - 90 - c = atomic64_read(v); 91 - while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) 92 - c = old; 93 - 94 - return c != u; 95 - } 96 - 97 - #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 98 - #define atomic64_inc(v) atomic64_add(1LL, (v)) 99 - #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) 81 + #define atomic64_inc(v) atomic64_add(1, (v)) 82 + #define atomic64_dec(v) atomic64_sub(1, (v)) 83 + #define atomic64_inc_return(v) atomic64_add_return(1, (v)) 84 + #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 100 85 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 101 - #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) 102 - #define atomic64_dec(v) atomic64_sub(1LL, (v)) 103 - #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) 104 - #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 105 - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) 86 + #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) 87 + #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) 88 + #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) 89 + #define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) 90 + #define atomic64_andnot atomic64_andnot 106 91 107 - #define atomic64_andnot atomic64_andnot 92 + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 108 93 109 94 #endif 110 95 #endif