Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _S390_RWSEM_H
2#define _S390_RWSEM_H
3
4/*
5 * S390 version
6 * Copyright IBM Corp. 2002
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
10 */
11
12/*
13 *
14 * The MSW of the count is the negated number of active writers and waiting
15 * lockers, and the LSW is the total number of active locks
16 *
17 * The lock count is initialized to 0 (no active and no waiting lockers).
18 *
19 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
20 * uncontended lock. This can be determined because XADD returns the old value.
21 * Readers increment by 1 and see a positive value when uncontended, negative
22 * if there are writers (and maybe) readers waiting (in which case it goes to
23 * sleep).
24 *
25 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
26 * be extended to 65534 by manually checking the whole MSW rather than relying
27 * on the S flag.
28 *
29 * The value of ACTIVE_BIAS supports up to 65535 active processes.
30 *
31 * This should be totally fair - if anything is waiting, a process that wants a
32 * lock will go to the back of the queue. When the currently active lock is
33 * released, if there's a writer at the front of the queue, then that and only
34 * that will be woken up; if there's a bunch of consequtive readers at the
35 * front, then they'll all be woken up, but no other readers will be.
36 */
37
38#ifndef _LINUX_RWSEM_H
39#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
40#endif
41
42#ifndef CONFIG_64BIT
43#define RWSEM_UNLOCKED_VALUE 0x00000000
44#define RWSEM_ACTIVE_BIAS 0x00000001
45#define RWSEM_ACTIVE_MASK 0x0000ffff
46#define RWSEM_WAITING_BIAS (-0x00010000)
47#else /* CONFIG_64BIT */
48#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
49#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
50#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
51#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
52#endif /* CONFIG_64BIT */
53#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
54#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
55
56/*
57 * lock for reading
58 */
59static inline void __down_read(struct rw_semaphore *sem)
60{
61 signed long old, new;
62
63 asm volatile(
64#ifndef CONFIG_64BIT
65 " l %0,%2\n"
66 "0: lr %1,%0\n"
67 " ahi %1,%4\n"
68 " cs %0,%1,%2\n"
69 " jl 0b"
70#else /* CONFIG_64BIT */
71 " lg %0,%2\n"
72 "0: lgr %1,%0\n"
73 " aghi %1,%4\n"
74 " csg %0,%1,%2\n"
75 " jl 0b"
76#endif /* CONFIG_64BIT */
77 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
78 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
79 : "cc", "memory");
80 if (old < 0)
81 rwsem_down_read_failed(sem);
82}
83
84/*
85 * trylock for reading -- returns 1 if successful, 0 if contention
86 */
87static inline int __down_read_trylock(struct rw_semaphore *sem)
88{
89 signed long old, new;
90
91 asm volatile(
92#ifndef CONFIG_64BIT
93 " l %0,%2\n"
94 "0: ltr %1,%0\n"
95 " jm 1f\n"
96 " ahi %1,%4\n"
97 " cs %0,%1,%2\n"
98 " jl 0b\n"
99 "1:"
100#else /* CONFIG_64BIT */
101 " lg %0,%2\n"
102 "0: ltgr %1,%0\n"
103 " jm 1f\n"
104 " aghi %1,%4\n"
105 " csg %0,%1,%2\n"
106 " jl 0b\n"
107 "1:"
108#endif /* CONFIG_64BIT */
109 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
110 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
111 : "cc", "memory");
112 return old >= 0 ? 1 : 0;
113}
114
115/*
116 * lock for writing
117 */
118static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
119{
120 signed long old, new, tmp;
121
122 tmp = RWSEM_ACTIVE_WRITE_BIAS;
123 asm volatile(
124#ifndef CONFIG_64BIT
125 " l %0,%2\n"
126 "0: lr %1,%0\n"
127 " a %1,%4\n"
128 " cs %0,%1,%2\n"
129 " jl 0b"
130#else /* CONFIG_64BIT */
131 " lg %0,%2\n"
132 "0: lgr %1,%0\n"
133 " ag %1,%4\n"
134 " csg %0,%1,%2\n"
135 " jl 0b"
136#endif /* CONFIG_64BIT */
137 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
138 : "Q" (sem->count), "m" (tmp)
139 : "cc", "memory");
140 if (old != 0)
141 rwsem_down_write_failed(sem);
142}
143
144static inline void __down_write(struct rw_semaphore *sem)
145{
146 __down_write_nested(sem, 0);
147}
148
149/*
150 * trylock for writing -- returns 1 if successful, 0 if contention
151 */
152static inline int __down_write_trylock(struct rw_semaphore *sem)
153{
154 signed long old;
155
156 asm volatile(
157#ifndef CONFIG_64BIT
158 " l %0,%1\n"
159 "0: ltr %0,%0\n"
160 " jnz 1f\n"
161 " cs %0,%3,%1\n"
162 " jl 0b\n"
163#else /* CONFIG_64BIT */
164 " lg %0,%1\n"
165 "0: ltgr %0,%0\n"
166 " jnz 1f\n"
167 " csg %0,%3,%1\n"
168 " jl 0b\n"
169#endif /* CONFIG_64BIT */
170 "1:"
171 : "=&d" (old), "=Q" (sem->count)
172 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
173 : "cc", "memory");
174 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
175}
176
177/*
178 * unlock after reading
179 */
180static inline void __up_read(struct rw_semaphore *sem)
181{
182 signed long old, new;
183
184 asm volatile(
185#ifndef CONFIG_64BIT
186 " l %0,%2\n"
187 "0: lr %1,%0\n"
188 " ahi %1,%4\n"
189 " cs %0,%1,%2\n"
190 " jl 0b"
191#else /* CONFIG_64BIT */
192 " lg %0,%2\n"
193 "0: lgr %1,%0\n"
194 " aghi %1,%4\n"
195 " csg %0,%1,%2\n"
196 " jl 0b"
197#endif /* CONFIG_64BIT */
198 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
199 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
200 : "cc", "memory");
201 if (new < 0)
202 if ((new & RWSEM_ACTIVE_MASK) == 0)
203 rwsem_wake(sem);
204}
205
206/*
207 * unlock after writing
208 */
209static inline void __up_write(struct rw_semaphore *sem)
210{
211 signed long old, new, tmp;
212
213 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
214 asm volatile(
215#ifndef CONFIG_64BIT
216 " l %0,%2\n"
217 "0: lr %1,%0\n"
218 " a %1,%4\n"
219 " cs %0,%1,%2\n"
220 " jl 0b"
221#else /* CONFIG_64BIT */
222 " lg %0,%2\n"
223 "0: lgr %1,%0\n"
224 " ag %1,%4\n"
225 " csg %0,%1,%2\n"
226 " jl 0b"
227#endif /* CONFIG_64BIT */
228 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
229 : "Q" (sem->count), "m" (tmp)
230 : "cc", "memory");
231 if (new < 0)
232 if ((new & RWSEM_ACTIVE_MASK) == 0)
233 rwsem_wake(sem);
234}
235
236/*
237 * downgrade write lock to read lock
238 */
239static inline void __downgrade_write(struct rw_semaphore *sem)
240{
241 signed long old, new, tmp;
242
243 tmp = -RWSEM_WAITING_BIAS;
244 asm volatile(
245#ifndef CONFIG_64BIT
246 " l %0,%2\n"
247 "0: lr %1,%0\n"
248 " a %1,%4\n"
249 " cs %0,%1,%2\n"
250 " jl 0b"
251#else /* CONFIG_64BIT */
252 " lg %0,%2\n"
253 "0: lgr %1,%0\n"
254 " ag %1,%4\n"
255 " csg %0,%1,%2\n"
256 " jl 0b"
257#endif /* CONFIG_64BIT */
258 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
259 : "Q" (sem->count), "m" (tmp)
260 : "cc", "memory");
261 if (new > 1)
262 rwsem_downgrade_wake(sem);
263}
264
265/*
266 * implement atomic add functionality
267 */
268static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
269{
270 signed long old, new;
271
272 asm volatile(
273#ifndef CONFIG_64BIT
274 " l %0,%2\n"
275 "0: lr %1,%0\n"
276 " ar %1,%4\n"
277 " cs %0,%1,%2\n"
278 " jl 0b"
279#else /* CONFIG_64BIT */
280 " lg %0,%2\n"
281 "0: lgr %1,%0\n"
282 " agr %1,%4\n"
283 " csg %0,%1,%2\n"
284 " jl 0b"
285#endif /* CONFIG_64BIT */
286 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
287 : "Q" (sem->count), "d" (delta)
288 : "cc", "memory");
289}
290
291/*
292 * implement exchange and add functionality
293 */
294static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
295{
296 signed long old, new;
297
298 asm volatile(
299#ifndef CONFIG_64BIT
300 " l %0,%2\n"
301 "0: lr %1,%0\n"
302 " ar %1,%4\n"
303 " cs %0,%1,%2\n"
304 " jl 0b"
305#else /* CONFIG_64BIT */
306 " lg %0,%2\n"
307 "0: lgr %1,%0\n"
308 " agr %1,%4\n"
309 " csg %0,%1,%2\n"
310 " jl 0b"
311#endif /* CONFIG_64BIT */
312 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
313 : "Q" (sem->count), "d" (delta)
314 : "cc", "memory");
315 return new;
316}
317
318#endif /* _S390_RWSEM_H */