Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic, arch/alpha: Convert to _relaxed atomics

Generic code will construct {,_acquire,_release} versions by adding the
required smp_mb__{before,after}_atomic() calls.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-alpha@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
fe14d2f1 e37837fb

+24 -12
+24 -12
arch/alpha/include/asm/atomic.h
··· 46 46 } \ 47 47 48 48 #define ATOMIC_OP_RETURN(op, asm_op) \ 49 - static inline int atomic_##op##_return(int i, atomic_t *v) \ 49 + static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ 50 50 { \ 51 51 long temp, result; \ 52 - smp_mb(); \ 53 52 __asm__ __volatile__( \ 54 53 "1: ldl_l %0,%1\n" \ 55 54 " " #asm_op " %0,%3,%2\n" \ ··· 60 61 ".previous" \ 61 62 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 62 63 :"Ir" (i), "m" (v->counter) : "memory"); \ 63 - smp_mb(); \ 64 64 return result; \ 65 65 } 66 66 67 67 #define ATOMIC_FETCH_OP(op, asm_op) \ 68 - static inline int atomic_fetch_##op(int i, atomic_t *v) \ 68 + static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ 69 69 { \ 70 70 long temp, result; \ 71 - smp_mb(); \ 72 71 __asm__ __volatile__( \ 73 72 "1: ldl_l %2,%1\n" \ 74 73 " " #asm_op " %2,%3,%0\n" \ ··· 77 80 ".previous" \ 78 81 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 79 82 :"Ir" (i), "m" (v->counter) : "memory"); \ 80 - smp_mb(); \ 81 83 return result; \ 82 84 } 83 85 ··· 97 101 } \ 98 102 99 103 #define ATOMIC64_OP_RETURN(op, asm_op) \ 100 - static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 104 + static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ 101 105 { \ 102 106 long temp, result; \ 103 - smp_mb(); \ 104 107 __asm__ __volatile__( \ 105 108 "1: ldq_l %0,%1\n" \ 106 109 " " #asm_op " %0,%3,%2\n" \ ··· 111 116 ".previous" \ 112 117 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 113 118 :"Ir" (i), "m" (v->counter) : "memory"); \ 114 - smp_mb(); \ 115 119 return result; \ 116 120 } 117 121 118 122 #define ATOMIC64_FETCH_OP(op, asm_op) \ 119 - static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \ 123 + static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ 120 124 { \ 121 125 long temp, result; \ 122 - smp_mb(); \ 123 126 __asm__ __volatile__( \ 124 127 "1: ldq_l %2,%1\n" \ 125 128 " " #asm_op " %2,%3,%0\n" \ ··· 128 135 ".previous" \ 129 136 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 130 137 :"Ir" (i), "m" (v->counter) : "memory"); \ 131 - smp_mb(); \ 132 138 return result; \ 133 139 } 134 140 ··· 141 149 142 150 ATOMIC_OPS(add) 143 151 ATOMIC_OPS(sub) 152 + 153 + #define atomic_add_return_relaxed atomic_add_return_relaxed 154 + #define atomic_sub_return_relaxed atomic_sub_return_relaxed 155 + #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed 156 + #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed 157 + 158 + #define atomic64_add_return_relaxed atomic64_add_return_relaxed 159 + #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed 160 + #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed 161 + #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed 144 162 145 163 #define atomic_andnot atomic_andnot 146 164 #define atomic64_andnot atomic64_andnot ··· 166 164 ATOMIC_OPS(andnot, bic) 167 165 ATOMIC_OPS(or, bis) 168 166 ATOMIC_OPS(xor, xor) 167 + 168 + #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed 169 + #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed 170 + #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed 171 + #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed 172 + 173 + #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed 174 + #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed 175 + #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed 176 + #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed 169 177 170 178 #undef ATOMIC_OPS 171 179 #undef ATOMIC64_FETCH_OP