Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_ATOMIC64_32_H
3#define _ASM_X86_ATOMIC64_32_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7//#include <asm/cmpxchg.h>
8
9/* An 64bit atomic type */
10
11typedef struct {
12 s64 __aligned(8) counter;
13} atomic64_t;
14
15#define ATOMIC64_INIT(val) { (val) }
16
17/*
18 * Read an atomic64_t non-atomically.
19 *
20 * This is intended to be used in cases where a subsequent atomic operation
21 * will handle the torn value, and can be used to prime the first iteration
22 * of unconditional try_cmpxchg() loops, e.g.:
23 *
24 * s64 val = arch_atomic64_read_nonatomic(v);
25 * do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
26 *
27 * This is NOT safe to use where the value is not always checked by a
28 * subsequent atomic operation, such as in conditional try_cmpxchg() loops
29 * that can break before the atomic operation, e.g.:
30 *
31 * s64 val = arch_atomic64_read_nonatomic(v);
32 * do {
33 * if (condition(val))
34 * break;
35 * } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
36 */
37static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
38{
39 /* See comment in arch_atomic_read(). */
40 return __READ_ONCE(v->counter);
41}
42
43#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
44#ifndef ATOMIC64_EXPORT
45#define ATOMIC64_DECL_ONE __ATOMIC64_DECL
46#else
47#define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
48 ATOMIC64_EXPORT(atomic64_##sym)
49#endif
50
51#ifdef CONFIG_X86_CMPXCHG64
52#define __alternative_atomic64(f, g, out, in...) \
53 asm volatile("call %c[func]" \
54 : ALT_OUTPUT_SP(out) \
55 : [func] "i" (atomic64_##g##_cx8), ## in)
56
57#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
58#else
59#define __alternative_atomic64(f, g, out, in...) \
60 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
61 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
62
63#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
64 ATOMIC64_DECL_ONE(sym##_386)
65
66ATOMIC64_DECL_ONE(add_386);
67ATOMIC64_DECL_ONE(sub_386);
68ATOMIC64_DECL_ONE(inc_386);
69ATOMIC64_DECL_ONE(dec_386);
70#endif
71
72#define alternative_atomic64(f, out, in...) \
73 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
74
75ATOMIC64_DECL(read);
76ATOMIC64_DECL(set);
77ATOMIC64_DECL(xchg);
78ATOMIC64_DECL(add_return);
79ATOMIC64_DECL(sub_return);
80ATOMIC64_DECL(inc_return);
81ATOMIC64_DECL(dec_return);
82ATOMIC64_DECL(dec_if_positive);
83ATOMIC64_DECL(inc_not_zero);
84ATOMIC64_DECL(add_unless);
85
86#undef ATOMIC64_DECL
87#undef ATOMIC64_DECL_ONE
88#undef __ATOMIC64_DECL
89#undef ATOMIC64_EXPORT
90
91static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
92{
93 return arch_cmpxchg64(&v->counter, old, new);
94}
95#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
96
97static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
98{
99 return arch_try_cmpxchg64(&v->counter, old, new);
100}
101#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
102
103static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
104{
105 s64 o;
106 unsigned high = (unsigned)(n >> 32);
107 unsigned low = (unsigned)n;
108 alternative_atomic64(xchg, "=&A" (o),
109 "S" (v), "b" (low), "c" (high)
110 : "memory");
111 return o;
112}
113#define arch_atomic64_xchg arch_atomic64_xchg
114
115static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
116{
117 unsigned high = (unsigned)(i >> 32);
118 unsigned low = (unsigned)i;
119 alternative_atomic64(set, /* no output */,
120 "S" (v), "b" (low), "c" (high)
121 : "eax", "edx", "memory");
122}
123
124static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
125{
126 s64 r;
127 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
128 return r;
129}
130
131static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
132{
133 alternative_atomic64(add_return,
134 ASM_OUTPUT2("+A" (i), "+c" (v)),
135 ASM_NO_INPUT_CLOBBER("memory"));
136 return i;
137}
138#define arch_atomic64_add_return arch_atomic64_add_return
139
140static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
141{
142 alternative_atomic64(sub_return,
143 ASM_OUTPUT2("+A" (i), "+c" (v)),
144 ASM_NO_INPUT_CLOBBER("memory"));
145 return i;
146}
147#define arch_atomic64_sub_return arch_atomic64_sub_return
148
149static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
150{
151 s64 a;
152 alternative_atomic64(inc_return, "=&A" (a),
153 "S" (v) : "memory", "ecx");
154 return a;
155}
156#define arch_atomic64_inc_return arch_atomic64_inc_return
157
158static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
159{
160 s64 a;
161 alternative_atomic64(dec_return, "=&A" (a),
162 "S" (v) : "memory", "ecx");
163 return a;
164}
165#define arch_atomic64_dec_return arch_atomic64_dec_return
166
167static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
168{
169 __alternative_atomic64(add, add_return,
170 ASM_OUTPUT2("+A" (i), "+c" (v)),
171 ASM_NO_INPUT_CLOBBER("memory"));
172}
173
174static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
175{
176 __alternative_atomic64(sub, sub_return,
177 ASM_OUTPUT2("+A" (i), "+c" (v)),
178 ASM_NO_INPUT_CLOBBER("memory"));
179}
180
181static __always_inline void arch_atomic64_inc(atomic64_t *v)
182{
183 __alternative_atomic64(inc, inc_return, /* no output */,
184 "S" (v) : "memory", "eax", "ecx", "edx");
185}
186#define arch_atomic64_inc arch_atomic64_inc
187
188static __always_inline void arch_atomic64_dec(atomic64_t *v)
189{
190 __alternative_atomic64(dec, dec_return, /* no output */,
191 "S" (v) : "memory", "eax", "ecx", "edx");
192}
193#define arch_atomic64_dec arch_atomic64_dec
194
195static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
196{
197 unsigned low = (unsigned)u;
198 unsigned high = (unsigned)(u >> 32);
199 alternative_atomic64(add_unless,
200 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
201 "S" (v) : "memory");
202 return (int)a;
203}
204#define arch_atomic64_add_unless arch_atomic64_add_unless
205
206static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
207{
208 int r;
209 alternative_atomic64(inc_not_zero, "=&a" (r),
210 "S" (v) : "ecx", "edx", "memory");
211 return r;
212}
213#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
214
215static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
216{
217 s64 r;
218 alternative_atomic64(dec_if_positive, "=&A" (r),
219 "S" (v) : "ecx", "memory");
220 return r;
221}
222#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
223
224#undef alternative_atomic64
225#undef __alternative_atomic64
226
227static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
228{
229 s64 val = arch_atomic64_read_nonatomic(v);
230
231 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
232}
233
234static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
235{
236 s64 val = arch_atomic64_read_nonatomic(v);
237
238 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
239
240 return val;
241}
242#define arch_atomic64_fetch_and arch_atomic64_fetch_and
243
244static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
245{
246 s64 val = arch_atomic64_read_nonatomic(v);
247
248 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
249}
250
251static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
252{
253 s64 val = arch_atomic64_read_nonatomic(v);
254
255 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
256
257 return val;
258}
259#define arch_atomic64_fetch_or arch_atomic64_fetch_or
260
261static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
262{
263 s64 val = arch_atomic64_read_nonatomic(v);
264
265 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
266}
267
268static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
269{
270 s64 val = arch_atomic64_read_nonatomic(v);
271
272 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
273
274 return val;
275}
276#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
277
278static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
279{
280 s64 val = arch_atomic64_read_nonatomic(v);
281
282 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));
283
284 return val;
285}
286#define arch_atomic64_fetch_add arch_atomic64_fetch_add
287
288#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
289
290#endif /* _ASM_X86_ATOMIC64_32_H */