Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.6 269 lines 8.1 kB view raw
1/* 2 * Based on arch/arm/include/asm/atomic.h 3 * 4 * Copyright (C) 1996 Russell King. 5 * Copyright (C) 2002 Deep Blue Solutions Ltd. 6 * Copyright (C) 2012 ARM Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 */ 20 21#ifndef __ASM_ATOMIC_LL_SC_H 22#define __ASM_ATOMIC_LL_SC_H 23 24#ifndef __ARM64_IN_ATOMIC_IMPL 25#error "please don't include this file directly" 26#endif 27 28/* 29 * AArch64 UP and SMP safe atomic ops. We use load exclusive and 30 * store exclusive to ensure that these are atomic. We may loop 31 * to ensure that the update happens. 32 * 33 * NOTE: these functions do *not* follow the PCS and must explicitly 34 * save any clobbered registers other than x0 (regardless of return 35 * value). This is achieved through -fcall-saved-* compiler flags for 36 * this file, which unfortunately don't work on a per-function basis 37 * (the optimize attribute silently ignores these options). 38 */ 39 40#define ATOMIC_OP(op, asm_op) \ 41__LL_SC_INLINE void \ 42__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ 43{ \ 44 unsigned long tmp; \ 45 int result; \ 46 \ 47 asm volatile("// atomic_" #op "\n" \ 48" prfm pstl1strm, %2\n" \ 49"1: ldxr %w0, %2\n" \ 50" " #asm_op " %w0, %w0, %w3\n" \ 51" stxr %w1, %w0, %2\n" \ 52" cbnz %w1, 1b" \ 53 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 54 : "Ir" (i)); \ 55} \ 56__LL_SC_EXPORT(atomic_##op); 57 58#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ 59__LL_SC_INLINE int \ 60__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ 61{ \ 62 unsigned long tmp; \ 63 int result; \ 64 \ 65 asm volatile("// atomic_" #op "_return" #name "\n" \ 66" prfm pstl1strm, %2\n" \ 67"1: ld" #acq "xr %w0, %2\n" \ 68" " #asm_op " %w0, %w0, %w3\n" \ 69" st" #rel "xr %w1, %w0, %2\n" \ 70" cbnz %w1, 1b\n" \ 71" " #mb \ 72 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 73 : "Ir" (i) \ 74 : cl); \ 75 \ 76 return result; \ 77} \ 78__LL_SC_EXPORT(atomic_##op##_return##name); 79 80#define ATOMIC_OPS(...) \ 81 ATOMIC_OP(__VA_ARGS__) \ 82 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__) 83 84#define ATOMIC_OPS_RLX(...) \ 85 ATOMIC_OPS(__VA_ARGS__) \ 86 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\ 87 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\ 88 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__) 89 90ATOMIC_OPS_RLX(add, add) 91ATOMIC_OPS_RLX(sub, sub) 92 93ATOMIC_OP(and, and) 94ATOMIC_OP(andnot, bic) 95ATOMIC_OP(or, orr) 96ATOMIC_OP(xor, eor) 97 98#undef ATOMIC_OPS_RLX 99#undef ATOMIC_OPS 100#undef ATOMIC_OP_RETURN 101#undef ATOMIC_OP 102 103#define ATOMIC64_OP(op, asm_op) \ 104__LL_SC_INLINE void \ 105__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ 106{ \ 107 long result; \ 108 unsigned long tmp; \ 109 \ 110 asm volatile("// atomic64_" #op "\n" \ 111" prfm pstl1strm, %2\n" \ 112"1: ldxr %0, %2\n" \ 113" " #asm_op " %0, %0, %3\n" \ 114" stxr %w1, %0, %2\n" \ 115" cbnz %w1, 1b" \ 116 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 117 : "Ir" (i)); \ 118} \ 119__LL_SC_EXPORT(atomic64_##op); 120 121#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ 122__LL_SC_INLINE long \ 123__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ 124{ \ 125 long result; \ 126 unsigned long tmp; \ 127 \ 128 asm volatile("// atomic64_" #op "_return" #name "\n" \ 129" prfm pstl1strm, %2\n" \ 130"1: ld" #acq "xr %0, %2\n" \ 131" " #asm_op " %0, %0, %3\n" \ 132" st" #rel "xr %w1, %0, %2\n" \ 133" cbnz %w1, 1b\n" \ 134" " #mb \ 135 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ 136 : "Ir" (i) \ 137 : cl); \ 138 \ 139 return result; \ 140} \ 141__LL_SC_EXPORT(atomic64_##op##_return##name); 142 143#define ATOMIC64_OPS(...) \ 144 ATOMIC64_OP(__VA_ARGS__) \ 145 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) 146 147#define ATOMIC64_OPS_RLX(...) \ 148 ATOMIC64_OPS(__VA_ARGS__) \ 149 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \ 150 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \ 151 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) 152 153ATOMIC64_OPS_RLX(add, add) 154ATOMIC64_OPS_RLX(sub, sub) 155 156ATOMIC64_OP(and, and) 157ATOMIC64_OP(andnot, bic) 158ATOMIC64_OP(or, orr) 159ATOMIC64_OP(xor, eor) 160 161#undef ATOMIC64_OPS_RLX 162#undef ATOMIC64_OPS 163#undef ATOMIC64_OP_RETURN 164#undef ATOMIC64_OP 165 166__LL_SC_INLINE long 167__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) 168{ 169 long result; 170 unsigned long tmp; 171 172 asm volatile("// atomic64_dec_if_positive\n" 173" prfm pstl1strm, %2\n" 174"1: ldxr %0, %2\n" 175" subs %0, %0, #1\n" 176" b.lt 2f\n" 177" stlxr %w1, %0, %2\n" 178" cbnz %w1, 1b\n" 179" dmb ish\n" 180"2:" 181 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 182 : 183 : "cc", "memory"); 184 185 return result; 186} 187__LL_SC_EXPORT(atomic64_dec_if_positive); 188 189#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \ 190__LL_SC_INLINE unsigned long \ 191__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ 192 unsigned long old, \ 193 unsigned long new)) \ 194{ \ 195 unsigned long tmp, oldval; \ 196 \ 197 asm volatile( \ 198 " prfm pstl1strm, %[v]\n" \ 199 "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \ 200 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ 201 " cbnz %" #w "[tmp], 2f\n" \ 202 " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ 203 " cbnz %w[tmp], 1b\n" \ 204 " " #mb "\n" \ 205 " mov %" #w "[oldval], %" #w "[old]\n" \ 206 "2:" \ 207 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ 208 [v] "+Q" (*(unsigned long *)ptr) \ 209 : [old] "Lr" (old), [new] "r" (new) \ 210 : cl); \ 211 \ 212 return oldval; \ 213} \ 214__LL_SC_EXPORT(__cmpxchg_case_##name); 215 216__CMPXCHG_CASE(w, b, 1, , , , ) 217__CMPXCHG_CASE(w, h, 2, , , , ) 218__CMPXCHG_CASE(w, , 4, , , , ) 219__CMPXCHG_CASE( , , 8, , , , ) 220__CMPXCHG_CASE(w, b, acq_1, , a, , "memory") 221__CMPXCHG_CASE(w, h, acq_2, , a, , "memory") 222__CMPXCHG_CASE(w, , acq_4, , a, , "memory") 223__CMPXCHG_CASE( , , acq_8, , a, , "memory") 224__CMPXCHG_CASE(w, b, rel_1, , , l, "memory") 225__CMPXCHG_CASE(w, h, rel_2, , , l, "memory") 226__CMPXCHG_CASE(w, , rel_4, , , l, "memory") 227__CMPXCHG_CASE( , , rel_8, , , l, "memory") 228__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory") 229__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory") 230__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory") 231__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") 232 233#undef __CMPXCHG_CASE 234 235#define __CMPXCHG_DBL(name, mb, rel, cl) \ 236__LL_SC_INLINE long \ 237__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ 238 unsigned long old2, \ 239 unsigned long new1, \ 240 unsigned long new2, \ 241 volatile void *ptr)) \ 242{ \ 243 unsigned long tmp, ret; \ 244 \ 245 asm volatile("// __cmpxchg_double" #name "\n" \ 246 " prfm pstl1strm, %2\n" \ 247 "1: ldxp %0, %1, %2\n" \ 248 " eor %0, %0, %3\n" \ 249 " eor %1, %1, %4\n" \ 250 " orr %1, %0, %1\n" \ 251 " cbnz %1, 2f\n" \ 252 " st" #rel "xp %w0, %5, %6, %2\n" \ 253 " cbnz %w0, 1b\n" \ 254 " " #mb "\n" \ 255 "2:" \ 256 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ 257 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ 258 : cl); \ 259 \ 260 return ret; \ 261} \ 262__LL_SC_EXPORT(__cmpxchg_double##name); 263 264__CMPXCHG_DBL( , , , ) 265__CMPXCHG_DBL(_mb, dmb ish, l, "memory") 266 267#undef __CMPXCHG_DBL 268 269#endif /* __ASM_ATOMIC_LL_SC_H */