Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic, arch/arm64: Implement atomic{,64}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}() for LSE instructions

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

This patch implements the LSE variants.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1461344493-8262-2-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Will Deacon and committed by
Ingo Molnar
2efe95fe 6822a84d

+172
+172
arch/arm64/include/asm/atomic_lse.h
··· 46 46 47 47 #undef ATOMIC_OP 48 48 49 + #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ 50 + static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ 51 + { \ 52 + register int w0 asm ("w0") = i; \ 53 + register atomic_t *x1 asm ("x1") = v; \ 54 + \ 55 + asm volatile(ARM64_LSE_ATOMIC_INSN( \ 56 + /* LL/SC */ \ 57 + __LL_SC_ATOMIC(fetch_##op##name), \ 58 + /* LSE atomics */ \ 59 + " " #asm_op #mb " %w[i], %w[i], %[v]") \ 60 + : [i] "+r" (w0), [v] "+Q" (v->counter) \ 61 + : "r" (x1) \ 62 + : __LL_SC_CLOBBERS, ##cl); \ 63 + \ 64 + return w0; \ 65 + } 66 + 67 + #define ATOMIC_FETCH_OPS(op, asm_op) \ 68 + ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \ 69 + ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \ 70 + ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \ 71 + ATOMIC_FETCH_OP( , al, op, asm_op, "memory") 72 + 73 + ATOMIC_FETCH_OPS(andnot, ldclr) 74 + ATOMIC_FETCH_OPS(or, ldset) 75 + ATOMIC_FETCH_OPS(xor, ldeor) 76 + ATOMIC_FETCH_OPS(add, ldadd) 77 + 78 + #undef ATOMIC_FETCH_OP 79 + #undef ATOMIC_FETCH_OPS 80 + 49 81 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ 50 82 static inline int atomic_add_return##name(int i, atomic_t *v) \ 51 83 { \ ··· 121 89 : "r" (x1) 122 90 : __LL_SC_CLOBBERS); 123 91 } 92 + 93 + #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ 94 + static inline int atomic_fetch_and##name(int i, atomic_t *v) \ 95 + { \ 96 + register int w0 asm ("w0") = i; \ 97 + register atomic_t *x1 asm ("x1") = v; \ 98 + \ 99 + asm volatile(ARM64_LSE_ATOMIC_INSN( \ 100 + /* LL/SC */ \ 101 + " nop\n" \ 102 + __LL_SC_ATOMIC(fetch_and##name), \ 103 + /* LSE atomics */ \ 104 + " mvn %w[i], %w[i]\n" \ 105 + " ldclr" #mb " %w[i], %w[i], %[v]") \ 106 + : [i] "+r" (w0), [v] "+Q" (v->counter) \ 107 + : "r" (x1) \ 108 + : __LL_SC_CLOBBERS, ##cl); \ 109 + \ 110 + return w0; \ 111 + } 112 + 113 + ATOMIC_FETCH_OP_AND(_relaxed, ) 114 + ATOMIC_FETCH_OP_AND(_acquire, a, "memory") 115 + ATOMIC_FETCH_OP_AND(_release, l, "memory") 116 + ATOMIC_FETCH_OP_AND( , al, "memory") 117 + 118 + #undef ATOMIC_FETCH_OP_AND 124 119 125 120 static inline void atomic_sub(int i, atomic_t *v) 126 121 { ··· 194 135 ATOMIC_OP_SUB_RETURN( , al, "memory") 195 136 196 137 #undef ATOMIC_OP_SUB_RETURN 138 + 139 + #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ 140 + static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ 141 + { \ 142 + register int w0 asm ("w0") = i; \ 143 + register atomic_t *x1 asm ("x1") = v; \ 144 + \ 145 + asm volatile(ARM64_LSE_ATOMIC_INSN( \ 146 + /* LL/SC */ \ 147 + " nop\n" \ 148 + __LL_SC_ATOMIC(fetch_sub##name), \ 149 + /* LSE atomics */ \ 150 + " neg %w[i], %w[i]\n" \ 151 + " ldadd" #mb " %w[i], %w[i], %[v]") \ 152 + : [i] "+r" (w0), [v] "+Q" (v->counter) \ 153 + : "r" (x1) \ 154 + : __LL_SC_CLOBBERS, ##cl); \ 155 + \ 156 + return w0; \ 157 + } 158 + 159 + ATOMIC_FETCH_OP_SUB(_relaxed, ) 160 + ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") 161 + ATOMIC_FETCH_OP_SUB(_release, l, "memory") 162 + ATOMIC_FETCH_OP_SUB( , al, "memory") 163 + 164 + #undef ATOMIC_FETCH_OP_SUB 197 165 #undef __LL_SC_ATOMIC 198 166 199 167 #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) ··· 243 157 ATOMIC64_OP(add, stadd) 244 158 245 159 #undef ATOMIC64_OP 160 + 161 + #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ 162 + static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ 163 + { \ 164 + register long x0 asm ("x0") = i; \ 165 + register atomic64_t *x1 asm ("x1") = v; \ 166 + \ 167 + asm volatile(ARM64_LSE_ATOMIC_INSN( \ 168 + /* LL/SC */ \ 169 + __LL_SC_ATOMIC64(fetch_##op##name), \ 170 + /* LSE atomics */ \ 171 + " " #asm_op #mb " %[i], %[i], %[v]") \ 172 + : [i] "+r" (x0), [v] "+Q" (v->counter) \ 173 + : "r" (x1) \ 174 + : __LL_SC_CLOBBERS, ##cl); \ 175 + \ 176 + return x0; \ 177 + } 178 + 179 + #define ATOMIC64_FETCH_OPS(op, asm_op) \ 180 + ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \ 181 + ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \ 182 + ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \ 183 + ATOMIC64_FETCH_OP( , al, op, asm_op, "memory") 184 + 185 + ATOMIC64_FETCH_OPS(andnot, ldclr) 186 + ATOMIC64_FETCH_OPS(or, ldset) 187 + ATOMIC64_FETCH_OPS(xor, ldeor) 188 + ATOMIC64_FETCH_OPS(add, ldadd) 189 + 190 + #undef ATOMIC64_FETCH_OP 191 + #undef ATOMIC64_FETCH_OPS 246 192 247 193 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ 248 194 static inline long atomic64_add_return##name(long i, atomic64_t *v) \ ··· 319 201 : "r" (x1) 320 202 : __LL_SC_CLOBBERS); 321 203 } 204 + 205 + #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ 206 + static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ 207 + { \ 208 + register long x0 asm ("w0") = i; \ 209 + register atomic64_t *x1 asm ("x1") = v; \ 210 + \ 211 + asm volatile(ARM64_LSE_ATOMIC_INSN( \ 212 + /* LL/SC */ \ 213 + " nop\n" \ 214 + __LL_SC_ATOMIC64(fetch_and##name), \ 215 + /* LSE atomics */ \ 216 + " mvn %[i], %[i]\n" \ 217 + " ldclr" #mb " %[i], %[i], %[v]") \ 218 + : [i] "+r" (x0), [v] "+Q" (v->counter) \ 219 + : "r" (x1) \ 220 + : __LL_SC_CLOBBERS, ##cl); \ 221 + \ 222 + return x0; \ 223 + } 224 + 225 + ATOMIC64_FETCH_OP_AND(_relaxed, ) 226 + ATOMIC64_FETCH_OP_AND(_acquire, a, "memory") 227 + ATOMIC64_FETCH_OP_AND(_release, l, "memory") 228 + ATOMIC64_FETCH_OP_AND( , al, "memory") 229 + 230 + #undef ATOMIC64_FETCH_OP_AND 322 231 323 232 static inline void atomic64_sub(long i, atomic64_t *v) 324 233 { ··· 392 247 ATOMIC64_OP_SUB_RETURN( , al, "memory") 393 248 394 249 #undef ATOMIC64_OP_SUB_RETURN 250 + 251 + #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ 252 + static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ 253 + { \ 254 + register long x0 asm ("w0") = i; \ 255 + register atomic64_t *x1 asm ("x1") = v; \ 256 + \ 257 + asm volatile(ARM64_LSE_ATOMIC_INSN( \ 258 + /* LL/SC */ \ 259 + " nop\n" \ 260 + __LL_SC_ATOMIC64(fetch_sub##name), \ 261 + /* LSE atomics */ \ 262 + " neg %[i], %[i]\n" \ 263 + " ldadd" #mb " %[i], %[i], %[v]") \ 264 + : [i] "+r" (x0), [v] "+Q" (v->counter) \ 265 + : "r" (x1) \ 266 + : __LL_SC_CLOBBERS, ##cl); \ 267 + \ 268 + return x0; \ 269 + } 270 + 271 + ATOMIC64_FETCH_OP_SUB(_relaxed, ) 272 + ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") 273 + ATOMIC64_FETCH_OP_SUB(_release, l, "memory") 274 + ATOMIC64_FETCH_OP_SUB( , al, "memory") 275 + 276 + #undef ATOMIC64_FETCH_OP_SUB 395 277 396 278 static inline long atomic64_dec_if_positive(atomic64_t *v) 397 279 {