Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
3#define _ASM_POWERPC_SIMPLE_SPINLOCK_H
4
5/*
6 * Simple spin lock operations.
7 *
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
12 *
13 * Type of int is used as a full 64b word is not necessary.
14 *
15 * (the type definitions are in asm/simple_spinlock_types.h)
16 */
17#include <linux/irqflags.h>
18#include <asm/paravirt.h>
19#include <asm/paca.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
23#ifdef CONFIG_PPC64
24/* use 0x800000yy when locked, where yy == CPU number */
25#ifdef __BIG_ENDIAN__
26#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
27#else
28#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
29#endif
30#else
31#define LOCK_TOKEN 1
32#endif
33
34static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
35{
36 return lock.slock == 0;
37}
38
39static inline int arch_spin_is_locked(arch_spinlock_t *lock)
40{
41 return !arch_spin_value_unlocked(READ_ONCE(*lock));
42}
43
44/*
45 * This returns the old value in the lock, so we succeeded
46 * in getting the lock if the return value is 0.
47 */
48static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
49{
50 unsigned long tmp, token;
51
52 token = LOCK_TOKEN;
53 __asm__ __volatile__(
54"1: lwarx %0,0,%2,1\n\
55 cmpwi 0,%0,0\n\
56 bne- 2f\n\
57 stwcx. %1,0,%2\n\
58 bne- 1b\n"
59 PPC_ACQUIRE_BARRIER
60"2:"
61 : "=&r" (tmp)
62 : "r" (token), "r" (&lock->slock)
63 : "cr0", "memory");
64
65 return tmp;
66}
67
68static inline int arch_spin_trylock(arch_spinlock_t *lock)
69{
70 return __arch_spin_trylock(lock) == 0;
71}
72
73/*
74 * On a system with shared processors (that is, where a physical
75 * processor is multiplexed between several virtual processors),
76 * there is no point spinning on a lock if the holder of the lock
77 * isn't currently scheduled on a physical processor. Instead
78 * we detect this situation and ask the hypervisor to give the
79 * rest of our timeslice to the lock holder.
80 *
81 * So that we can tell which virtual processor is holding a lock,
82 * we put 0x80000000 | smp_processor_id() in the lock when it is
83 * held. Conveniently, we have a word in the paca that holds this
84 * value.
85 */
86
87#if defined(CONFIG_PPC_SPLPAR)
88/* We only yield to the hypervisor if we are in shared processor mode */
89void splpar_spin_yield(arch_spinlock_t *lock);
90void splpar_rw_yield(arch_rwlock_t *lock);
91#else /* SPLPAR */
92static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
93static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
94#endif
95
96static inline void spin_yield(arch_spinlock_t *lock)
97{
98 if (is_shared_processor())
99 splpar_spin_yield(lock);
100 else
101 barrier();
102}
103
104static inline void rw_yield(arch_rwlock_t *lock)
105{
106 if (is_shared_processor())
107 splpar_rw_yield(lock);
108 else
109 barrier();
110}
111
112static inline void arch_spin_lock(arch_spinlock_t *lock)
113{
114 while (1) {
115 if (likely(__arch_spin_trylock(lock) == 0))
116 break;
117 do {
118 HMT_low();
119 if (is_shared_processor())
120 splpar_spin_yield(lock);
121 } while (unlikely(lock->slock != 0));
122 HMT_medium();
123 }
124}
125
126static inline
127void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
128{
129 unsigned long flags_dis;
130
131 while (1) {
132 if (likely(__arch_spin_trylock(lock) == 0))
133 break;
134 local_save_flags(flags_dis);
135 local_irq_restore(flags);
136 do {
137 HMT_low();
138 if (is_shared_processor())
139 splpar_spin_yield(lock);
140 } while (unlikely(lock->slock != 0));
141 HMT_medium();
142 local_irq_restore(flags_dis);
143 }
144}
145#define arch_spin_lock_flags arch_spin_lock_flags
146
147static inline void arch_spin_unlock(arch_spinlock_t *lock)
148{
149 __asm__ __volatile__("# arch_spin_unlock\n\t"
150 PPC_RELEASE_BARRIER: : :"memory");
151 lock->slock = 0;
152}
153
154/*
155 * Read-write spinlocks, allowing multiple readers
156 * but only one writer.
157 *
158 * NOTE! it is quite common to have readers in interrupts
159 * but no interrupt writers. For those circumstances we
160 * can "mix" irq-safe locks - any writer needs to get a
161 * irq-safe write-lock, but readers can get non-irqsafe
162 * read-locks.
163 */
164
165#ifdef CONFIG_PPC64
166#define __DO_SIGN_EXTEND "extsw %0,%0\n"
167#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
168#else
169#define __DO_SIGN_EXTEND
170#define WRLOCK_TOKEN (-1)
171#endif
172
173/*
174 * This returns the old value in the lock + 1,
175 * so we got a read lock if the return value is > 0.
176 */
177static inline long __arch_read_trylock(arch_rwlock_t *rw)
178{
179 long tmp;
180
181 __asm__ __volatile__(
182"1: lwarx %0,0,%1,1\n"
183 __DO_SIGN_EXTEND
184" addic. %0,%0,1\n\
185 ble- 2f\n"
186" stwcx. %0,0,%1\n\
187 bne- 1b\n"
188 PPC_ACQUIRE_BARRIER
189"2:" : "=&r" (tmp)
190 : "r" (&rw->lock)
191 : "cr0", "xer", "memory");
192
193 return tmp;
194}
195
196/*
197 * This returns the old value in the lock,
198 * so we got the write lock if the return value is 0.
199 */
200static inline long __arch_write_trylock(arch_rwlock_t *rw)
201{
202 long tmp, token;
203
204 token = WRLOCK_TOKEN;
205 __asm__ __volatile__(
206"1: lwarx %0,0,%2,1\n\
207 cmpwi 0,%0,0\n\
208 bne- 2f\n"
209" stwcx. %1,0,%2\n\
210 bne- 1b\n"
211 PPC_ACQUIRE_BARRIER
212"2:" : "=&r" (tmp)
213 : "r" (token), "r" (&rw->lock)
214 : "cr0", "memory");
215
216 return tmp;
217}
218
219static inline void arch_read_lock(arch_rwlock_t *rw)
220{
221 while (1) {
222 if (likely(__arch_read_trylock(rw) > 0))
223 break;
224 do {
225 HMT_low();
226 if (is_shared_processor())
227 splpar_rw_yield(rw);
228 } while (unlikely(rw->lock < 0));
229 HMT_medium();
230 }
231}
232
233static inline void arch_write_lock(arch_rwlock_t *rw)
234{
235 while (1) {
236 if (likely(__arch_write_trylock(rw) == 0))
237 break;
238 do {
239 HMT_low();
240 if (is_shared_processor())
241 splpar_rw_yield(rw);
242 } while (unlikely(rw->lock != 0));
243 HMT_medium();
244 }
245}
246
247static inline int arch_read_trylock(arch_rwlock_t *rw)
248{
249 return __arch_read_trylock(rw) > 0;
250}
251
252static inline int arch_write_trylock(arch_rwlock_t *rw)
253{
254 return __arch_write_trylock(rw) == 0;
255}
256
257static inline void arch_read_unlock(arch_rwlock_t *rw)
258{
259 long tmp;
260
261 __asm__ __volatile__(
262 "# read_unlock\n\t"
263 PPC_RELEASE_BARRIER
264"1: lwarx %0,0,%1\n\
265 addic %0,%0,-1\n"
266" stwcx. %0,0,%1\n\
267 bne- 1b"
268 : "=&r"(tmp)
269 : "r"(&rw->lock)
270 : "cr0", "xer", "memory");
271}
272
273static inline void arch_write_unlock(arch_rwlock_t *rw)
274{
275 __asm__ __volatile__("# write_unlock\n\t"
276 PPC_RELEASE_BARRIER: : :"memory");
277 rw->lock = 0;
278}
279
280#define arch_spin_relax(lock) spin_yield(lock)
281#define arch_read_relax(lock) rw_yield(lock)
282#define arch_write_relax(lock) rw_yield(lock)
283
284#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */