Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright IBM Corp. 1999, 2016
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Denis Joseph Barrow,
5 * Arnd Bergmann,
6 */
7
8#ifndef __ARCH_S390_ATOMIC__
9#define __ARCH_S390_ATOMIC__
10
11#include <linux/compiler.h>
12#include <linux/types.h>
13#include <asm/atomic_ops.h>
14#include <asm/barrier.h>
15#include <asm/cmpxchg.h>
16
17#define ATOMIC_INIT(i) { (i) }
18
19static inline int atomic_read(const atomic_t *v)
20{
21 int c;
22
23 asm volatile(
24 " l %0,%1\n"
25 : "=d" (c) : "Q" (v->counter));
26 return c;
27}
28
29static inline void atomic_set(atomic_t *v, int i)
30{
31 asm volatile(
32 " st %1,%0\n"
33 : "=Q" (v->counter) : "d" (i));
34}
35
36static inline int atomic_add_return(int i, atomic_t *v)
37{
38 return __atomic_add_barrier(i, &v->counter) + i;
39}
40
41static inline int atomic_fetch_add(int i, atomic_t *v)
42{
43 return __atomic_add_barrier(i, &v->counter);
44}
45
46static inline void atomic_add(int i, atomic_t *v)
47{
48#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
49 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
50 __atomic_add_const(i, &v->counter);
51 return;
52 }
53#endif
54 __atomic_add(i, &v->counter);
55}
56
57#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
58#define atomic_inc(_v) atomic_add(1, _v)
59#define atomic_inc_return(_v) atomic_add_return(1, _v)
60#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
61#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
62#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
63#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
64#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
65#define atomic_dec(_v) atomic_sub(1, _v)
66#define atomic_dec_return(_v) atomic_sub_return(1, _v)
67#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
68
69#define ATOMIC_OPS(op) \
70static inline void atomic_##op(int i, atomic_t *v) \
71{ \
72 __atomic_##op(i, &v->counter); \
73} \
74static inline int atomic_fetch_##op(int i, atomic_t *v) \
75{ \
76 return __atomic_##op##_barrier(i, &v->counter); \
77}
78
79ATOMIC_OPS(and)
80ATOMIC_OPS(or)
81ATOMIC_OPS(xor)
82
83#undef ATOMIC_OPS
84
85#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
86
87static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
88{
89 return __atomic_cmpxchg(&v->counter, old, new);
90}
91
92static inline int __atomic_add_unless(atomic_t *v, int a, int u)
93{
94 int c, old;
95 c = atomic_read(v);
96 for (;;) {
97 if (unlikely(c == u))
98 break;
99 old = atomic_cmpxchg(v, c, c + a);
100 if (likely(old == c))
101 break;
102 c = old;
103 }
104 return c;
105}
106
107#define ATOMIC64_INIT(i) { (i) }
108
109static inline long atomic64_read(const atomic64_t *v)
110{
111 long c;
112
113 asm volatile(
114 " lg %0,%1\n"
115 : "=d" (c) : "Q" (v->counter));
116 return c;
117}
118
119static inline void atomic64_set(atomic64_t *v, long i)
120{
121 asm volatile(
122 " stg %1,%0\n"
123 : "=Q" (v->counter) : "d" (i));
124}
125
126static inline long atomic64_add_return(long i, atomic64_t *v)
127{
128 return __atomic64_add_barrier(i, &v->counter) + i;
129}
130
131static inline long atomic64_fetch_add(long i, atomic64_t *v)
132{
133 return __atomic64_add_barrier(i, &v->counter);
134}
135
136static inline void atomic64_add(long i, atomic64_t *v)
137{
138#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
139 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
140 __atomic64_add_const(i, &v->counter);
141 return;
142 }
143#endif
144 __atomic64_add(i, &v->counter);
145}
146
147#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
148
149static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
150{
151 return __atomic64_cmpxchg(&v->counter, old, new);
152}
153
154#define ATOMIC64_OPS(op) \
155static inline void atomic64_##op(long i, atomic64_t *v) \
156{ \
157 __atomic64_##op(i, &v->counter); \
158} \
159static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
160{ \
161 return __atomic64_##op##_barrier(i, &v->counter); \
162}
163
164ATOMIC64_OPS(and)
165ATOMIC64_OPS(or)
166ATOMIC64_OPS(xor)
167
168#undef ATOMIC64_OPS
169
170static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
171{
172 long c, old;
173
174 c = atomic64_read(v);
175 for (;;) {
176 if (unlikely(c == u))
177 break;
178 old = atomic64_cmpxchg(v, c, c + i);
179 if (likely(old == c))
180 break;
181 c = old;
182 }
183 return c != u;
184}
185
186static inline long atomic64_dec_if_positive(atomic64_t *v)
187{
188 long c, old, dec;
189
190 c = atomic64_read(v);
191 for (;;) {
192 dec = c - 1;
193 if (unlikely(dec < 0))
194 break;
195 old = atomic64_cmpxchg((v), c, dec);
196 if (likely(old == c))
197 break;
198 c = old;
199 }
200 return dec;
201}
202
203#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
204#define atomic64_inc(_v) atomic64_add(1, _v)
205#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
206#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
207#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
208#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
209#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
210#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
211#define atomic64_dec(_v) atomic64_sub(1, _v)
212#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
213#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
214#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
215
216#endif /* __ARCH_S390_ATOMIC__ */