Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
5/*
6 * PowerPC atomic operations
7 */
8
9#ifdef __KERNEL__
10#include <linux/types.h>
11#include <asm/cmpxchg.h>
12#include <asm/barrier.h>
13#include <asm/asm-const.h>
14
15/*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20#define __atomic_acquire_fence() \
21 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
22
23#define __atomic_release_fence() \
24 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
25
26static __inline__ int arch_atomic_read(const atomic_t *v)
27{
28 int t;
29
30 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
31
32 return t;
33}
34
35static __inline__ void arch_atomic_set(atomic_t *v, int i)
36{
37 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
38}
39
40#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
41static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
42{ \
43 int t; \
44 \
45 __asm__ __volatile__( \
46"1: lwarx %0,0,%3 # atomic_" #op "\n" \
47 #asm_op "%I2" suffix " %0,%0,%2\n" \
48" stwcx. %0,0,%3 \n" \
49" bne- 1b\n" \
50 : "=&r" (t), "+m" (v->counter) \
51 : "r"#sign (a), "r" (&v->counter) \
52 : "cc", ##__VA_ARGS__); \
53} \
54
55#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
56static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
57{ \
58 int t; \
59 \
60 __asm__ __volatile__( \
61"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
62 #asm_op "%I2" suffix " %0,%0,%2\n" \
63" stwcx. %0,0,%3\n" \
64" bne- 1b\n" \
65 : "=&r" (t), "+m" (v->counter) \
66 : "r"#sign (a), "r" (&v->counter) \
67 : "cc", ##__VA_ARGS__); \
68 \
69 return t; \
70}
71
72#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
73static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
74{ \
75 int res, t; \
76 \
77 __asm__ __volatile__( \
78"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
79 #asm_op "%I3" suffix " %1,%0,%3\n" \
80" stwcx. %1,0,%4\n" \
81" bne- 1b\n" \
82 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
83 : "r"#sign (a), "r" (&v->counter) \
84 : "cc", ##__VA_ARGS__); \
85 \
86 return res; \
87}
88
89#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
90 ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
91 ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
92 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
93
94ATOMIC_OPS(add, add, "c", I, "xer")
95ATOMIC_OPS(sub, sub, "c", I, "xer")
96
97#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
98#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
99
100#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
101#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
102
103#undef ATOMIC_OPS
104#define ATOMIC_OPS(op, asm_op, suffix, sign) \
105 ATOMIC_OP(op, asm_op, suffix, sign) \
106 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
107
108ATOMIC_OPS(and, and, ".", K)
109ATOMIC_OPS(or, or, "", K)
110ATOMIC_OPS(xor, xor, "", K)
111
112#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
113#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
114#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
115
116#undef ATOMIC_OPS
117#undef ATOMIC_FETCH_OP_RELAXED
118#undef ATOMIC_OP_RETURN_RELAXED
119#undef ATOMIC_OP
120
121#define arch_atomic_cmpxchg(v, o, n) \
122 (arch_cmpxchg(&((v)->counter), (o), (n)))
123#define arch_atomic_cmpxchg_relaxed(v, o, n) \
124 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
125#define arch_atomic_cmpxchg_acquire(v, o, n) \
126 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
127
128#define arch_atomic_xchg(v, new) \
129 (arch_xchg(&((v)->counter), new))
130#define arch_atomic_xchg_relaxed(v, new) \
131 arch_xchg_relaxed(&((v)->counter), (new))
132
133/*
134 * Don't want to override the generic atomic_try_cmpxchg_acquire, because
135 * we add a lock hint to the lwarx, which may not be wanted for the
136 * _acquire case (and is not used by the other _acquire variants so it
137 * would be a surprise).
138 */
139static __always_inline bool
140arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
141{
142 int r, o = *old;
143
144 __asm__ __volatile__ (
145"1: lwarx %0,0,%2,%5 # atomic_try_cmpxchg_acquire \n"
146" cmpw 0,%0,%3 \n"
147" bne- 2f \n"
148" stwcx. %4,0,%2 \n"
149" bne- 1b \n"
150"\t" PPC_ACQUIRE_BARRIER " \n"
151"2: \n"
152 : "=&r" (r), "+m" (v->counter)
153 : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
154 : "cr0", "memory");
155
156 if (unlikely(r != o))
157 *old = r;
158 return likely(r == o);
159}
160
161/**
162 * atomic_fetch_add_unless - add unless the number is a given value
163 * @v: pointer of type atomic_t
164 * @a: the amount to add to v...
165 * @u: ...unless v is equal to u.
166 *
167 * Atomically adds @a to @v, so long as it was not @u.
168 * Returns the old value of @v.
169 */
170static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
171{
172 int t;
173
174 __asm__ __volatile__ (
175 PPC_ATOMIC_ENTRY_BARRIER
176"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
177 cmpw 0,%0,%3 \n\
178 beq 2f \n\
179 add%I2c %0,%0,%2 \n"
180" stwcx. %0,0,%1 \n\
181 bne- 1b \n"
182 PPC_ATOMIC_EXIT_BARRIER
183" sub%I2c %0,%0,%2 \n\
1842:"
185 : "=&r" (t)
186 : "r" (&v->counter), "rI" (a), "r" (u)
187 : "cc", "memory", "xer");
188
189 return t;
190}
191#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
192
193/*
194 * Atomically test *v and decrement if it is greater than 0.
195 * The function returns the old value of *v minus 1, even if
196 * the atomic variable, v, was not decremented.
197 */
198static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
199{
200 int t;
201
202 __asm__ __volatile__(
203 PPC_ATOMIC_ENTRY_BARRIER
204"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
205 cmpwi %0,1\n\
206 addi %0,%0,-1\n\
207 blt- 2f\n"
208" stwcx. %0,0,%1\n\
209 bne- 1b"
210 PPC_ATOMIC_EXIT_BARRIER
211 "\n\
2122:" : "=&b" (t)
213 : "r" (&v->counter)
214 : "cc", "memory");
215
216 return t;
217}
218#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
219
220#ifdef __powerpc64__
221
222#define ATOMIC64_INIT(i) { (i) }
223
224static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
225{
226 s64 t;
227
228 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
229
230 return t;
231}
232
233static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
234{
235 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
236}
237
238#define ATOMIC64_OP(op, asm_op) \
239static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
240{ \
241 s64 t; \
242 \
243 __asm__ __volatile__( \
244"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
245 #asm_op " %0,%2,%0\n" \
246" stdcx. %0,0,%3 \n" \
247" bne- 1b\n" \
248 : "=&r" (t), "+m" (v->counter) \
249 : "r" (a), "r" (&v->counter) \
250 : "cc"); \
251}
252
253#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
254static inline s64 \
255arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
256{ \
257 s64 t; \
258 \
259 __asm__ __volatile__( \
260"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
261 #asm_op " %0,%2,%0\n" \
262" stdcx. %0,0,%3\n" \
263" bne- 1b\n" \
264 : "=&r" (t), "+m" (v->counter) \
265 : "r" (a), "r" (&v->counter) \
266 : "cc"); \
267 \
268 return t; \
269}
270
271#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
272static inline s64 \
273arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
274{ \
275 s64 res, t; \
276 \
277 __asm__ __volatile__( \
278"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
279 #asm_op " %1,%3,%0\n" \
280" stdcx. %1,0,%4\n" \
281" bne- 1b\n" \
282 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
283 : "r" (a), "r" (&v->counter) \
284 : "cc"); \
285 \
286 return res; \
287}
288
289#define ATOMIC64_OPS(op, asm_op) \
290 ATOMIC64_OP(op, asm_op) \
291 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
292 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
293
294ATOMIC64_OPS(add, add)
295ATOMIC64_OPS(sub, subf)
296
297#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
298#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
299
300#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
301#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
302
303#undef ATOMIC64_OPS
304#define ATOMIC64_OPS(op, asm_op) \
305 ATOMIC64_OP(op, asm_op) \
306 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
307
308ATOMIC64_OPS(and, and)
309ATOMIC64_OPS(or, or)
310ATOMIC64_OPS(xor, xor)
311
312#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
313#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
314#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
315
316#undef ATOPIC64_OPS
317#undef ATOMIC64_FETCH_OP_RELAXED
318#undef ATOMIC64_OP_RETURN_RELAXED
319#undef ATOMIC64_OP
320
321static __inline__ void arch_atomic64_inc(atomic64_t *v)
322{
323 s64 t;
324
325 __asm__ __volatile__(
326"1: ldarx %0,0,%2 # atomic64_inc\n\
327 addic %0,%0,1\n\
328 stdcx. %0,0,%2 \n\
329 bne- 1b"
330 : "=&r" (t), "+m" (v->counter)
331 : "r" (&v->counter)
332 : "cc", "xer");
333}
334#define arch_atomic64_inc arch_atomic64_inc
335
336static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
337{
338 s64 t;
339
340 __asm__ __volatile__(
341"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
342" addic %0,%0,1\n"
343" stdcx. %0,0,%2\n"
344" bne- 1b"
345 : "=&r" (t), "+m" (v->counter)
346 : "r" (&v->counter)
347 : "cc", "xer");
348
349 return t;
350}
351
352static __inline__ void arch_atomic64_dec(atomic64_t *v)
353{
354 s64 t;
355
356 __asm__ __volatile__(
357"1: ldarx %0,0,%2 # atomic64_dec\n\
358 addic %0,%0,-1\n\
359 stdcx. %0,0,%2\n\
360 bne- 1b"
361 : "=&r" (t), "+m" (v->counter)
362 : "r" (&v->counter)
363 : "cc", "xer");
364}
365#define arch_atomic64_dec arch_atomic64_dec
366
367static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
368{
369 s64 t;
370
371 __asm__ __volatile__(
372"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
373" addic %0,%0,-1\n"
374" stdcx. %0,0,%2\n"
375" bne- 1b"
376 : "=&r" (t), "+m" (v->counter)
377 : "r" (&v->counter)
378 : "cc", "xer");
379
380 return t;
381}
382
383#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
384#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
385
386/*
387 * Atomically test *v and decrement if it is greater than 0.
388 * The function returns the old value of *v minus 1.
389 */
390static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
391{
392 s64 t;
393
394 __asm__ __volatile__(
395 PPC_ATOMIC_ENTRY_BARRIER
396"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
397 addic. %0,%0,-1\n\
398 blt- 2f\n\
399 stdcx. %0,0,%1\n\
400 bne- 1b"
401 PPC_ATOMIC_EXIT_BARRIER
402 "\n\
4032:" : "=&r" (t)
404 : "r" (&v->counter)
405 : "cc", "xer", "memory");
406
407 return t;
408}
409#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
410
411#define arch_atomic64_cmpxchg(v, o, n) \
412 (arch_cmpxchg(&((v)->counter), (o), (n)))
413#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
414 arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
415#define arch_atomic64_cmpxchg_acquire(v, o, n) \
416 arch_cmpxchg_acquire(&((v)->counter), (o), (n))
417
418#define arch_atomic64_xchg(v, new) \
419 (arch_xchg(&((v)->counter), new))
420#define arch_atomic64_xchg_relaxed(v, new) \
421 arch_xchg_relaxed(&((v)->counter), (new))
422
423/**
424 * atomic64_fetch_add_unless - add unless the number is a given value
425 * @v: pointer of type atomic64_t
426 * @a: the amount to add to v...
427 * @u: ...unless v is equal to u.
428 *
429 * Atomically adds @a to @v, so long as it was not @u.
430 * Returns the old value of @v.
431 */
432static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
433{
434 s64 t;
435
436 __asm__ __volatile__ (
437 PPC_ATOMIC_ENTRY_BARRIER
438"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
439 cmpd 0,%0,%3 \n\
440 beq 2f \n\
441 add %0,%2,%0 \n"
442" stdcx. %0,0,%1 \n\
443 bne- 1b \n"
444 PPC_ATOMIC_EXIT_BARRIER
445" subf %0,%2,%0 \n\
4462:"
447 : "=&r" (t)
448 : "r" (&v->counter), "r" (a), "r" (u)
449 : "cc", "memory");
450
451 return t;
452}
453#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
454
455/**
456 * atomic_inc64_not_zero - increment unless the number is zero
457 * @v: pointer of type atomic64_t
458 *
459 * Atomically increments @v by 1, so long as @v is non-zero.
460 * Returns non-zero if @v was non-zero, and zero otherwise.
461 */
462static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
463{
464 s64 t1, t2;
465
466 __asm__ __volatile__ (
467 PPC_ATOMIC_ENTRY_BARRIER
468"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
469 cmpdi 0,%0,0\n\
470 beq- 2f\n\
471 addic %1,%0,1\n\
472 stdcx. %1,0,%2\n\
473 bne- 1b\n"
474 PPC_ATOMIC_EXIT_BARRIER
475 "\n\
4762:"
477 : "=&r" (t1), "=&r" (t2)
478 : "r" (&v->counter)
479 : "cc", "xer", "memory");
480
481 return t1 != 0;
482}
483#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
484
485#endif /* __powerpc64__ */
486
487#endif /* __KERNEL__ */
488#endif /* _ASM_POWERPC_ATOMIC_H_ */