Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3#ifdef __KERNEL__
4
5/*
6 * Simple spin lock operations.
7 *
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
12 *
13 * Type of int is used as a full 64b word is not necessary.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * (the type definitions are in asm/spinlock_types.h)
21 */
22#ifdef CONFIG_PPC64
23#include <asm/paca.h>
24#include <asm/hvcall.h>
25#include <asm/iseries/hv_call.h>
26#endif
27#include <asm/asm-compat.h>
28#include <asm/synch.h>
29
30#define __raw_spin_is_locked(x) ((x)->slock != 0)
31
32#ifdef CONFIG_PPC64
33/* use 0x800000yy when locked, where yy == CPU number */
34#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
35#else
36#define LOCK_TOKEN 1
37#endif
38
39/*
40 * This returns the old value in the lock, so we succeeded
41 * in getting the lock if the return value is 0.
42 */
43static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
44{
45 unsigned long tmp, token;
46
47 token = LOCK_TOKEN;
48 __asm__ __volatile__(
49"1: lwarx %0,0,%2\n\
50 cmpwi 0,%0,0\n\
51 bne- 2f\n\
52 stwcx. %1,0,%2\n\
53 bne- 1b\n\
54 isync\n\
552:" : "=&r" (tmp)
56 : "r" (token), "r" (&lock->slock)
57 : "cr0", "memory");
58
59 return tmp;
60}
61
62static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
63{
64 return __spin_trylock(lock) == 0;
65}
66
67/*
68 * On a system with shared processors (that is, where a physical
69 * processor is multiplexed between several virtual processors),
70 * there is no point spinning on a lock if the holder of the lock
71 * isn't currently scheduled on a physical processor. Instead
72 * we detect this situation and ask the hypervisor to give the
73 * rest of our timeslice to the lock holder.
74 *
75 * So that we can tell which virtual processor is holding a lock,
76 * we put 0x80000000 | smp_processor_id() in the lock when it is
77 * held. Conveniently, we have a word in the paca that holds this
78 * value.
79 */
80
81#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
82/* We only yield to the hypervisor if we are in shared processor mode */
83#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
84extern void __spin_yield(raw_spinlock_t *lock);
85extern void __rw_yield(raw_rwlock_t *lock);
86#else /* SPLPAR || ISERIES */
87#define __spin_yield(x) barrier()
88#define __rw_yield(x) barrier()
89#define SHARED_PROCESSOR 0
90#endif
91
92static void __inline__ __raw_spin_lock(raw_spinlock_t *lock)
93{
94 while (1) {
95 if (likely(__spin_trylock(lock) == 0))
96 break;
97 do {
98 HMT_low();
99 if (SHARED_PROCESSOR)
100 __spin_yield(lock);
101 } while (unlikely(lock->slock != 0));
102 HMT_medium();
103 }
104}
105
106static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
107{
108 unsigned long flags_dis;
109
110 while (1) {
111 if (likely(__spin_trylock(lock) == 0))
112 break;
113 local_save_flags(flags_dis);
114 local_irq_restore(flags);
115 do {
116 HMT_low();
117 if (SHARED_PROCESSOR)
118 __spin_yield(lock);
119 } while (unlikely(lock->slock != 0));
120 HMT_medium();
121 local_irq_restore(flags_dis);
122 }
123}
124
125static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
126{
127 __asm__ __volatile__("# __raw_spin_unlock\n\t"
128 LWSYNC_ON_SMP: : :"memory");
129 lock->slock = 0;
130}
131
132#ifdef CONFIG_PPC64
133extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
134#else
135#define __raw_spin_unlock_wait(lock) \
136 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
137#endif
138
139/*
140 * Read-write spinlocks, allowing multiple readers
141 * but only one writer.
142 *
143 * NOTE! it is quite common to have readers in interrupts
144 * but no interrupt writers. For those circumstances we
145 * can "mix" irq-safe locks - any writer needs to get a
146 * irq-safe write-lock, but readers can get non-irqsafe
147 * read-locks.
148 */
149
150#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
151#define __raw_write_can_lock(rw) (!(rw)->lock)
152
153#ifdef CONFIG_PPC64
154#define __DO_SIGN_EXTEND "extsw %0,%0\n"
155#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
156#else
157#define __DO_SIGN_EXTEND
158#define WRLOCK_TOKEN (-1)
159#endif
160
161/*
162 * This returns the old value in the lock + 1,
163 * so we got a read lock if the return value is > 0.
164 */
165static long __inline__ __read_trylock(raw_rwlock_t *rw)
166{
167 long tmp;
168
169 __asm__ __volatile__(
170"1: lwarx %0,0,%1\n"
171 __DO_SIGN_EXTEND
172" addic. %0,%0,1\n\
173 ble- 2f\n"
174 PPC405_ERR77(0,%1)
175" stwcx. %0,0,%1\n\
176 bne- 1b\n\
177 isync\n\
1782:" : "=&r" (tmp)
179 : "r" (&rw->lock)
180 : "cr0", "xer", "memory");
181
182 return tmp;
183}
184
185/*
186 * This returns the old value in the lock,
187 * so we got the write lock if the return value is 0.
188 */
189static __inline__ long __write_trylock(raw_rwlock_t *rw)
190{
191 long tmp, token;
192
193 token = WRLOCK_TOKEN;
194 __asm__ __volatile__(
195"1: lwarx %0,0,%2\n\
196 cmpwi 0,%0,0\n\
197 bne- 2f\n"
198 PPC405_ERR77(0,%1)
199" stwcx. %1,0,%2\n\
200 bne- 1b\n\
201 isync\n\
2022:" : "=&r" (tmp)
203 : "r" (token), "r" (&rw->lock)
204 : "cr0", "memory");
205
206 return tmp;
207}
208
209static void __inline__ __raw_read_lock(raw_rwlock_t *rw)
210{
211 while (1) {
212 if (likely(__read_trylock(rw) > 0))
213 break;
214 do {
215 HMT_low();
216 if (SHARED_PROCESSOR)
217 __rw_yield(rw);
218 } while (unlikely(rw->lock < 0));
219 HMT_medium();
220 }
221}
222
223static void __inline__ __raw_write_lock(raw_rwlock_t *rw)
224{
225 while (1) {
226 if (likely(__write_trylock(rw) == 0))
227 break;
228 do {
229 HMT_low();
230 if (SHARED_PROCESSOR)
231 __rw_yield(rw);
232 } while (unlikely(rw->lock != 0));
233 HMT_medium();
234 }
235}
236
237static int __inline__ __raw_read_trylock(raw_rwlock_t *rw)
238{
239 return __read_trylock(rw) > 0;
240}
241
242static int __inline__ __raw_write_trylock(raw_rwlock_t *rw)
243{
244 return __write_trylock(rw) == 0;
245}
246
247static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
248{
249 long tmp;
250
251 __asm__ __volatile__(
252 "# read_unlock\n\t"
253 LWSYNC_ON_SMP
254"1: lwarx %0,0,%1\n\
255 addic %0,%0,-1\n"
256 PPC405_ERR77(0,%1)
257" stwcx. %0,0,%1\n\
258 bne- 1b"
259 : "=&r"(tmp)
260 : "r"(&rw->lock)
261 : "cr0", "memory");
262}
263
264static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
265{
266 __asm__ __volatile__("# write_unlock\n\t"
267 LWSYNC_ON_SMP: : :"memory");
268 rw->lock = 0;
269}
270
271#endif /* __KERNEL__ */
272#endif /* __ASM_SPINLOCK_H */