Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4#ifdef __KERNEL__
5
6/*
7 * Simple spin lock operations.
8 *
9 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12 * Rework to support virtual processors
13 *
14 * Type of int is used as a full 64b word is not necessary.
15 *
16 * (the type definitions are in asm/spinlock_types.h)
17 */
18#include <linux/irqflags.h>
19#ifdef CONFIG_PPC64
20#include <asm/paca.h>
21#include <asm/hvcall.h>
22#endif
23#include <asm/synch.h>
24#include <asm/ppc-opcode.h>
25#include <asm/asm-405.h>
26
27#ifdef CONFIG_PPC64
28/* use 0x800000yy when locked, where yy == CPU number */
29#ifdef __BIG_ENDIAN__
30#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
31#else
32#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
33#endif
34#else
35#define LOCK_TOKEN 1
36#endif
37
38#ifdef CONFIG_PPC_PSERIES
39#define vcpu_is_preempted vcpu_is_preempted
40static inline bool vcpu_is_preempted(int cpu)
41{
42 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
43 return false;
44 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
45}
46#endif
47
48static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49{
50 return lock.slock == 0;
51}
52
53static inline int arch_spin_is_locked(arch_spinlock_t *lock)
54{
55 smp_mb();
56 return !arch_spin_value_unlocked(*lock);
57}
58
59/*
60 * This returns the old value in the lock, so we succeeded
61 * in getting the lock if the return value is 0.
62 */
63static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
64{
65 unsigned long tmp, token;
66
67 token = LOCK_TOKEN;
68 __asm__ __volatile__(
69"1: " PPC_LWARX(%0,0,%2,1) "\n\
70 cmpwi 0,%0,0\n\
71 bne- 2f\n\
72 stwcx. %1,0,%2\n\
73 bne- 1b\n"
74 PPC_ACQUIRE_BARRIER
75"2:"
76 : "=&r" (tmp)
77 : "r" (token), "r" (&lock->slock)
78 : "cr0", "memory");
79
80 return tmp;
81}
82
83static inline int arch_spin_trylock(arch_spinlock_t *lock)
84{
85 return __arch_spin_trylock(lock) == 0;
86}
87
88/*
89 * On a system with shared processors (that is, where a physical
90 * processor is multiplexed between several virtual processors),
91 * there is no point spinning on a lock if the holder of the lock
92 * isn't currently scheduled on a physical processor. Instead
93 * we detect this situation and ask the hypervisor to give the
94 * rest of our timeslice to the lock holder.
95 *
96 * So that we can tell which virtual processor is holding a lock,
97 * we put 0x80000000 | smp_processor_id() in the lock when it is
98 * held. Conveniently, we have a word in the paca that holds this
99 * value.
100 */
101
102#if defined(CONFIG_PPC_SPLPAR)
103/* We only yield to the hypervisor if we are in shared processor mode */
104#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
105extern void __spin_yield(arch_spinlock_t *lock);
106extern void __rw_yield(arch_rwlock_t *lock);
107#else /* SPLPAR */
108#define __spin_yield(x) barrier()
109#define __rw_yield(x) barrier()
110#define SHARED_PROCESSOR 0
111#endif
112
113static inline void arch_spin_lock(arch_spinlock_t *lock)
114{
115 while (1) {
116 if (likely(__arch_spin_trylock(lock) == 0))
117 break;
118 do {
119 HMT_low();
120 if (SHARED_PROCESSOR)
121 __spin_yield(lock);
122 } while (unlikely(lock->slock != 0));
123 HMT_medium();
124 }
125}
126
127static inline
128void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
129{
130 unsigned long flags_dis;
131
132 while (1) {
133 if (likely(__arch_spin_trylock(lock) == 0))
134 break;
135 local_save_flags(flags_dis);
136 local_irq_restore(flags);
137 do {
138 HMT_low();
139 if (SHARED_PROCESSOR)
140 __spin_yield(lock);
141 } while (unlikely(lock->slock != 0));
142 HMT_medium();
143 local_irq_restore(flags_dis);
144 }
145}
146#define arch_spin_lock_flags arch_spin_lock_flags
147
148static inline void arch_spin_unlock(arch_spinlock_t *lock)
149{
150 __asm__ __volatile__("# arch_spin_unlock\n\t"
151 PPC_RELEASE_BARRIER: : :"memory");
152 lock->slock = 0;
153}
154
155/*
156 * Read-write spinlocks, allowing multiple readers
157 * but only one writer.
158 *
159 * NOTE! it is quite common to have readers in interrupts
160 * but no interrupt writers. For those circumstances we
161 * can "mix" irq-safe locks - any writer needs to get a
162 * irq-safe write-lock, but readers can get non-irqsafe
163 * read-locks.
164 */
165
166#ifdef CONFIG_PPC64
167#define __DO_SIGN_EXTEND "extsw %0,%0\n"
168#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
169#else
170#define __DO_SIGN_EXTEND
171#define WRLOCK_TOKEN (-1)
172#endif
173
174/*
175 * This returns the old value in the lock + 1,
176 * so we got a read lock if the return value is > 0.
177 */
178static inline long __arch_read_trylock(arch_rwlock_t *rw)
179{
180 long tmp;
181
182 __asm__ __volatile__(
183"1: " PPC_LWARX(%0,0,%1,1) "\n"
184 __DO_SIGN_EXTEND
185" addic. %0,%0,1\n\
186 ble- 2f\n"
187 PPC405_ERR77(0,%1)
188" stwcx. %0,0,%1\n\
189 bne- 1b\n"
190 PPC_ACQUIRE_BARRIER
191"2:" : "=&r" (tmp)
192 : "r" (&rw->lock)
193 : "cr0", "xer", "memory");
194
195 return tmp;
196}
197
198/*
199 * This returns the old value in the lock,
200 * so we got the write lock if the return value is 0.
201 */
202static inline long __arch_write_trylock(arch_rwlock_t *rw)
203{
204 long tmp, token;
205
206 token = WRLOCK_TOKEN;
207 __asm__ __volatile__(
208"1: " PPC_LWARX(%0,0,%2,1) "\n\
209 cmpwi 0,%0,0\n\
210 bne- 2f\n"
211 PPC405_ERR77(0,%1)
212" stwcx. %1,0,%2\n\
213 bne- 1b\n"
214 PPC_ACQUIRE_BARRIER
215"2:" : "=&r" (tmp)
216 : "r" (token), "r" (&rw->lock)
217 : "cr0", "memory");
218
219 return tmp;
220}
221
222static inline void arch_read_lock(arch_rwlock_t *rw)
223{
224 while (1) {
225 if (likely(__arch_read_trylock(rw) > 0))
226 break;
227 do {
228 HMT_low();
229 if (SHARED_PROCESSOR)
230 __rw_yield(rw);
231 } while (unlikely(rw->lock < 0));
232 HMT_medium();
233 }
234}
235
236static inline void arch_write_lock(arch_rwlock_t *rw)
237{
238 while (1) {
239 if (likely(__arch_write_trylock(rw) == 0))
240 break;
241 do {
242 HMT_low();
243 if (SHARED_PROCESSOR)
244 __rw_yield(rw);
245 } while (unlikely(rw->lock != 0));
246 HMT_medium();
247 }
248}
249
250static inline int arch_read_trylock(arch_rwlock_t *rw)
251{
252 return __arch_read_trylock(rw) > 0;
253}
254
255static inline int arch_write_trylock(arch_rwlock_t *rw)
256{
257 return __arch_write_trylock(rw) == 0;
258}
259
260static inline void arch_read_unlock(arch_rwlock_t *rw)
261{
262 long tmp;
263
264 __asm__ __volatile__(
265 "# read_unlock\n\t"
266 PPC_RELEASE_BARRIER
267"1: lwarx %0,0,%1\n\
268 addic %0,%0,-1\n"
269 PPC405_ERR77(0,%1)
270" stwcx. %0,0,%1\n\
271 bne- 1b"
272 : "=&r"(tmp)
273 : "r"(&rw->lock)
274 : "cr0", "xer", "memory");
275}
276
277static inline void arch_write_unlock(arch_rwlock_t *rw)
278{
279 __asm__ __volatile__("# write_unlock\n\t"
280 PPC_RELEASE_BARRIER: : :"memory");
281 rw->lock = 0;
282}
283
284#define arch_spin_relax(lock) __spin_yield(lock)
285#define arch_read_relax(lock) __rw_yield(lock)
286#define arch_write_relax(lock) __rw_yield(lock)
287
288/* See include/linux/spinlock.h */
289#define smp_mb__after_spinlock() smp_mb()
290
291#endif /* __KERNEL__ */
292#endif /* __ASM_SPINLOCK_H */