Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/spinlock.h"
7 */
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <linux/smp.h>
13
14#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15
16extern int spin_retry;
17
18static inline int
19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20{
21 return __sync_bool_compare_and_swap(lock, old, new);
22}
23
24/*
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
27 *
28 * We make no fairness assumptions. They have a cost.
29 *
30 * (the type definitions are in asm/spinlock_types.h)
31 */
32
33void arch_lock_relax(unsigned int cpu);
34
35void arch_spin_lock_wait(arch_spinlock_t *);
36int arch_spin_trylock_retry(arch_spinlock_t *);
37void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
38
39static inline void arch_spin_relax(arch_spinlock_t *lock)
40{
41 arch_lock_relax(lock->lock);
42}
43
44static inline u32 arch_spin_lockval(int cpu)
45{
46 return ~cpu;
47}
48
49static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
50{
51 return lock.lock == 0;
52}
53
54static inline int arch_spin_is_locked(arch_spinlock_t *lp)
55{
56 return ACCESS_ONCE(lp->lock) != 0;
57}
58
59static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
60{
61 barrier();
62 return likely(arch_spin_value_unlocked(*lp) &&
63 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
64}
65
66static inline void arch_spin_lock(arch_spinlock_t *lp)
67{
68 if (!arch_spin_trylock_once(lp))
69 arch_spin_lock_wait(lp);
70}
71
72static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
73 unsigned long flags)
74{
75 if (!arch_spin_trylock_once(lp))
76 arch_spin_lock_wait_flags(lp, flags);
77}
78
79static inline int arch_spin_trylock(arch_spinlock_t *lp)
80{
81 if (!arch_spin_trylock_once(lp))
82 return arch_spin_trylock_retry(lp);
83 return 1;
84}
85
86static inline void arch_spin_unlock(arch_spinlock_t *lp)
87{
88 typecheck(unsigned int, lp->lock);
89 asm volatile(
90 __ASM_BARRIER
91 "st %1,%0\n"
92 : "+Q" (lp->lock)
93 : "d" (0)
94 : "cc", "memory");
95}
96
97static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
98{
99 while (arch_spin_is_locked(lock))
100 arch_spin_relax(lock);
101}
102
103/*
104 * Read-write spinlocks, allowing multiple readers
105 * but only one writer.
106 *
107 * NOTE! it is quite common to have readers in interrupts
108 * but no interrupt writers. For those circumstances we
109 * can "mix" irq-safe locks - any writer needs to get a
110 * irq-safe write-lock, but readers can get non-irqsafe
111 * read-locks.
112 */
113
114/**
115 * read_can_lock - would read_trylock() succeed?
116 * @lock: the rwlock in question.
117 */
118#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
119
120/**
121 * write_can_lock - would write_trylock() succeed?
122 * @lock: the rwlock in question.
123 */
124#define arch_write_can_lock(x) ((x)->lock == 0)
125
126extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
127extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
128
129#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
130#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
131
132static inline int arch_read_trylock_once(arch_rwlock_t *rw)
133{
134 unsigned int old = ACCESS_ONCE(rw->lock);
135 return likely((int) old >= 0 &&
136 _raw_compare_and_swap(&rw->lock, old, old + 1));
137}
138
139static inline int arch_write_trylock_once(arch_rwlock_t *rw)
140{
141 unsigned int old = ACCESS_ONCE(rw->lock);
142 return likely(old == 0 &&
143 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
144}
145
146#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
147
148#define __RAW_OP_OR "lao"
149#define __RAW_OP_AND "lan"
150#define __RAW_OP_ADD "laa"
151
152#define __RAW_LOCK(ptr, op_val, op_string) \
153({ \
154 unsigned int old_val; \
155 \
156 typecheck(unsigned int *, ptr); \
157 asm volatile( \
158 op_string " %0,%2,%1\n" \
159 "bcr 14,0\n" \
160 : "=d" (old_val), "+Q" (*ptr) \
161 : "d" (op_val) \
162 : "cc", "memory"); \
163 old_val; \
164})
165
166#define __RAW_UNLOCK(ptr, op_val, op_string) \
167({ \
168 unsigned int old_val; \
169 \
170 typecheck(unsigned int *, ptr); \
171 asm volatile( \
172 "bcr 14,0\n" \
173 op_string " %0,%2,%1\n" \
174 : "=d" (old_val), "+Q" (*ptr) \
175 : "d" (op_val) \
176 : "cc", "memory"); \
177 old_val; \
178})
179
180extern void _raw_read_lock_wait(arch_rwlock_t *lp);
181extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
182
183static inline void arch_read_lock(arch_rwlock_t *rw)
184{
185 unsigned int old;
186
187 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
188 if ((int) old < 0)
189 _raw_read_lock_wait(rw);
190}
191
192static inline void arch_read_unlock(arch_rwlock_t *rw)
193{
194 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
195}
196
197static inline void arch_write_lock(arch_rwlock_t *rw)
198{
199 unsigned int old;
200
201 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
202 if (old != 0)
203 _raw_write_lock_wait(rw, old);
204 rw->owner = SPINLOCK_LOCKVAL;
205}
206
207static inline void arch_write_unlock(arch_rwlock_t *rw)
208{
209 rw->owner = 0;
210 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
211}
212
213#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
214
215extern void _raw_read_lock_wait(arch_rwlock_t *lp);
216extern void _raw_write_lock_wait(arch_rwlock_t *lp);
217
218static inline void arch_read_lock(arch_rwlock_t *rw)
219{
220 if (!arch_read_trylock_once(rw))
221 _raw_read_lock_wait(rw);
222}
223
224static inline void arch_read_unlock(arch_rwlock_t *rw)
225{
226 unsigned int old;
227
228 do {
229 old = ACCESS_ONCE(rw->lock);
230 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
231}
232
233static inline void arch_write_lock(arch_rwlock_t *rw)
234{
235 if (!arch_write_trylock_once(rw))
236 _raw_write_lock_wait(rw);
237 rw->owner = SPINLOCK_LOCKVAL;
238}
239
240static inline void arch_write_unlock(arch_rwlock_t *rw)
241{
242 typecheck(unsigned int, rw->lock);
243
244 rw->owner = 0;
245 asm volatile(
246 __ASM_BARRIER
247 "st %1,%0\n"
248 : "+Q" (rw->lock)
249 : "d" (0)
250 : "cc", "memory");
251}
252
253#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
254
255static inline int arch_read_trylock(arch_rwlock_t *rw)
256{
257 if (!arch_read_trylock_once(rw))
258 return _raw_read_trylock_retry(rw);
259 return 1;
260}
261
262static inline int arch_write_trylock(arch_rwlock_t *rw)
263{
264 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
265 return 0;
266 rw->owner = SPINLOCK_LOCKVAL;
267 return 1;
268}
269
270static inline void arch_read_relax(arch_rwlock_t *rw)
271{
272 arch_lock_relax(rw->owner);
273}
274
275static inline void arch_write_relax(arch_rwlock_t *rw)
276{
277 arch_lock_relax(rw->owner);
278}
279
280#endif /* __ASM_SPINLOCK_H */