Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3#ifdef __KERNEL__
4
5/*
6 * Simple spin lock operations.
7 *
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
12 *
13 * Type of int is used as a full 64b word is not necessary.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * (the type definitions are in asm/spinlock_types.h)
21 */
22#include <linux/irqflags.h>
23#ifdef CONFIG_PPC64
24#include <asm/paca.h>
25#include <asm/hvcall.h>
26#endif
27#include <asm/synch.h>
28#include <asm/ppc-opcode.h>
29#include <asm/asm-405.h>
30
31#ifdef CONFIG_PPC64
32/* use 0x800000yy when locked, where yy == CPU number */
33#ifdef __BIG_ENDIAN__
34#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
35#else
36#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
37#endif
38#else
39#define LOCK_TOKEN 1
40#endif
41
42#ifdef CONFIG_PPC_PSERIES
43#define vcpu_is_preempted vcpu_is_preempted
44static inline bool vcpu_is_preempted(int cpu)
45{
46 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
47 return false;
48 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
49}
50#endif
51
52static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
53{
54 return lock.slock == 0;
55}
56
57static inline int arch_spin_is_locked(arch_spinlock_t *lock)
58{
59 smp_mb();
60 return !arch_spin_value_unlocked(*lock);
61}
62
63/*
64 * This returns the old value in the lock, so we succeeded
65 * in getting the lock if the return value is 0.
66 */
67static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
68{
69 unsigned long tmp, token;
70
71 token = LOCK_TOKEN;
72 __asm__ __volatile__(
73"1: " PPC_LWARX(%0,0,%2,1) "\n\
74 cmpwi 0,%0,0\n\
75 bne- 2f\n\
76 stwcx. %1,0,%2\n\
77 bne- 1b\n"
78 PPC_ACQUIRE_BARRIER
79"2:"
80 : "=&r" (tmp)
81 : "r" (token), "r" (&lock->slock)
82 : "cr0", "memory");
83
84 return tmp;
85}
86
87static inline int arch_spin_trylock(arch_spinlock_t *lock)
88{
89 return __arch_spin_trylock(lock) == 0;
90}
91
92/*
93 * On a system with shared processors (that is, where a physical
94 * processor is multiplexed between several virtual processors),
95 * there is no point spinning on a lock if the holder of the lock
96 * isn't currently scheduled on a physical processor. Instead
97 * we detect this situation and ask the hypervisor to give the
98 * rest of our timeslice to the lock holder.
99 *
100 * So that we can tell which virtual processor is holding a lock,
101 * we put 0x80000000 | smp_processor_id() in the lock when it is
102 * held. Conveniently, we have a word in the paca that holds this
103 * value.
104 */
105
106#if defined(CONFIG_PPC_SPLPAR)
107/* We only yield to the hypervisor if we are in shared processor mode */
108#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
109extern void __spin_yield(arch_spinlock_t *lock);
110extern void __rw_yield(arch_rwlock_t *lock);
111#else /* SPLPAR */
112#define __spin_yield(x) barrier()
113#define __rw_yield(x) barrier()
114#define SHARED_PROCESSOR 0
115#endif
116
117static inline void arch_spin_lock(arch_spinlock_t *lock)
118{
119 while (1) {
120 if (likely(__arch_spin_trylock(lock) == 0))
121 break;
122 do {
123 HMT_low();
124 if (SHARED_PROCESSOR)
125 __spin_yield(lock);
126 } while (unlikely(lock->slock != 0));
127 HMT_medium();
128 }
129}
130
131static inline
132void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
133{
134 unsigned long flags_dis;
135
136 while (1) {
137 if (likely(__arch_spin_trylock(lock) == 0))
138 break;
139 local_save_flags(flags_dis);
140 local_irq_restore(flags);
141 do {
142 HMT_low();
143 if (SHARED_PROCESSOR)
144 __spin_yield(lock);
145 } while (unlikely(lock->slock != 0));
146 HMT_medium();
147 local_irq_restore(flags_dis);
148 }
149}
150#define arch_spin_lock_flags arch_spin_lock_flags
151
152static inline void arch_spin_unlock(arch_spinlock_t *lock)
153{
154 __asm__ __volatile__("# arch_spin_unlock\n\t"
155 PPC_RELEASE_BARRIER: : :"memory");
156 lock->slock = 0;
157}
158
159/*
160 * Read-write spinlocks, allowing multiple readers
161 * but only one writer.
162 *
163 * NOTE! it is quite common to have readers in interrupts
164 * but no interrupt writers. For those circumstances we
165 * can "mix" irq-safe locks - any writer needs to get a
166 * irq-safe write-lock, but readers can get non-irqsafe
167 * read-locks.
168 */
169
170#ifdef CONFIG_PPC64
171#define __DO_SIGN_EXTEND "extsw %0,%0\n"
172#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
173#else
174#define __DO_SIGN_EXTEND
175#define WRLOCK_TOKEN (-1)
176#endif
177
178/*
179 * This returns the old value in the lock + 1,
180 * so we got a read lock if the return value is > 0.
181 */
182static inline long __arch_read_trylock(arch_rwlock_t *rw)
183{
184 long tmp;
185
186 __asm__ __volatile__(
187"1: " PPC_LWARX(%0,0,%1,1) "\n"
188 __DO_SIGN_EXTEND
189" addic. %0,%0,1\n\
190 ble- 2f\n"
191 PPC405_ERR77(0,%1)
192" stwcx. %0,0,%1\n\
193 bne- 1b\n"
194 PPC_ACQUIRE_BARRIER
195"2:" : "=&r" (tmp)
196 : "r" (&rw->lock)
197 : "cr0", "xer", "memory");
198
199 return tmp;
200}
201
202/*
203 * This returns the old value in the lock,
204 * so we got the write lock if the return value is 0.
205 */
206static inline long __arch_write_trylock(arch_rwlock_t *rw)
207{
208 long tmp, token;
209
210 token = WRLOCK_TOKEN;
211 __asm__ __volatile__(
212"1: " PPC_LWARX(%0,0,%2,1) "\n\
213 cmpwi 0,%0,0\n\
214 bne- 2f\n"
215 PPC405_ERR77(0,%1)
216" stwcx. %1,0,%2\n\
217 bne- 1b\n"
218 PPC_ACQUIRE_BARRIER
219"2:" : "=&r" (tmp)
220 : "r" (token), "r" (&rw->lock)
221 : "cr0", "memory");
222
223 return tmp;
224}
225
226static inline void arch_read_lock(arch_rwlock_t *rw)
227{
228 while (1) {
229 if (likely(__arch_read_trylock(rw) > 0))
230 break;
231 do {
232 HMT_low();
233 if (SHARED_PROCESSOR)
234 __rw_yield(rw);
235 } while (unlikely(rw->lock < 0));
236 HMT_medium();
237 }
238}
239
240static inline void arch_write_lock(arch_rwlock_t *rw)
241{
242 while (1) {
243 if (likely(__arch_write_trylock(rw) == 0))
244 break;
245 do {
246 HMT_low();
247 if (SHARED_PROCESSOR)
248 __rw_yield(rw);
249 } while (unlikely(rw->lock != 0));
250 HMT_medium();
251 }
252}
253
254static inline int arch_read_trylock(arch_rwlock_t *rw)
255{
256 return __arch_read_trylock(rw) > 0;
257}
258
259static inline int arch_write_trylock(arch_rwlock_t *rw)
260{
261 return __arch_write_trylock(rw) == 0;
262}
263
264static inline void arch_read_unlock(arch_rwlock_t *rw)
265{
266 long tmp;
267
268 __asm__ __volatile__(
269 "# read_unlock\n\t"
270 PPC_RELEASE_BARRIER
271"1: lwarx %0,0,%1\n\
272 addic %0,%0,-1\n"
273 PPC405_ERR77(0,%1)
274" stwcx. %0,0,%1\n\
275 bne- 1b"
276 : "=&r"(tmp)
277 : "r"(&rw->lock)
278 : "cr0", "xer", "memory");
279}
280
281static inline void arch_write_unlock(arch_rwlock_t *rw)
282{
283 __asm__ __volatile__("# write_unlock\n\t"
284 PPC_RELEASE_BARRIER: : :"memory");
285 rw->lock = 0;
286}
287
288#define arch_spin_relax(lock) __spin_yield(lock)
289#define arch_read_relax(lock) __rw_yield(lock)
290#define arch_write_relax(lock) __rw_yield(lock)
291
292/* See include/linux/spinlock.h */
293#define smp_mb__after_spinlock() smp_mb()
294
295#endif /* __KERNEL__ */
296#endif /* __ASM_SPINLOCK_H */