Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __LINUX_SPINLOCK_H
2#define __LINUX_SPINLOCK_H
3
4/*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6 *
7 * here's the role of the various spinlock/rwlock related include files:
8 *
9 * on SMP builds:
10 *
11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers
13 *
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
16 *
17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
19 *
20 * (also included on UP-debug builds:)
21 *
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
24 *
25 * linux/spinlock.h: builds the final spin_*() APIs.
26 *
27 * on UP builds:
28 *
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
32 *
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
35 *
36 * linux/spinlock_up.h:
37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
40 *
41 * (included on UP-non-debug builds:)
42 *
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
45 *
46 * linux/spinlock.h: builds the final spin_*() APIs.
47 */
48
49#include <linux/typecheck.h>
50#include <linux/preempt.h>
51#include <linux/linkage.h>
52#include <linux/compiler.h>
53#include <linux/irqflags.h>
54#include <linux/thread_info.h>
55#include <linux/kernel.h>
56#include <linux/stringify.h>
57#include <linux/bottom_half.h>
58#include <asm/barrier.h>
59
60
61/*
62 * Must define these before including other files, inline functions need them
63 */
64#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
65
66#define LOCK_SECTION_START(extra) \
67 ".subsection 1\n\t" \
68 extra \
69 ".ifndef " LOCK_SECTION_NAME "\n\t" \
70 LOCK_SECTION_NAME ":\n\t" \
71 ".endif\n"
72
73#define LOCK_SECTION_END \
74 ".previous\n\t"
75
76#define __lockfunc __attribute__((section(".spinlock.text")))
77
78/*
79 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
80 */
81#include <linux/spinlock_types.h>
82
83/*
84 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
85 */
86#ifdef CONFIG_SMP
87# include <asm/spinlock.h>
88#else
89# include <linux/spinlock_up.h>
90#endif
91
92#ifdef CONFIG_DEBUG_SPINLOCK
93 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
94 struct lock_class_key *key);
95# define raw_spin_lock_init(lock) \
96do { \
97 static struct lock_class_key __key; \
98 \
99 __raw_spin_lock_init((lock), #lock, &__key); \
100} while (0)
101
102#else
103# define raw_spin_lock_init(lock) \
104 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
105#endif
106
107#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
108
109#ifdef CONFIG_GENERIC_LOCKBREAK
110#define raw_spin_is_contended(lock) ((lock)->break_lock)
111#else
112
113#ifdef arch_spin_is_contended
114#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115#else
116#define raw_spin_is_contended(lock) (((void)(lock), 0))
117#endif /*arch_spin_is_contended*/
118#endif
119
120/*
121 * Despite its name it doesn't necessarily has to be a full barrier.
122 * It should only guarantee that a STORE before the critical section
123 * can not be reordered with a LOAD inside this section.
124 * spin_lock() is the one-way barrier, this LOAD can not escape out
125 * of the region. So the default implementation simply ensures that
126 * a STORE can not move into the critical section, smp_wmb() should
127 * serialize it with another STORE done by spin_lock().
128 */
129#ifndef smp_mb__before_spinlock
130#define smp_mb__before_spinlock() smp_wmb()
131#endif
132
133/**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139#ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
144#else
145static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146{
147 __acquire(lock);
148 arch_spin_lock(&lock->raw_lock);
149}
150
151static inline void
152do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153{
154 __acquire(lock);
155 arch_spin_lock_flags(&lock->raw_lock, *flags);
156}
157
158static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159{
160 return arch_spin_trylock(&(lock)->raw_lock);
161}
162
163static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164{
165 arch_spin_unlock(&lock->raw_lock);
166 __release(lock);
167}
168#endif
169
170/*
171 * Define the various spin_lock methods. Note we define these
172 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173 * various methods are defined as nops in the case they are not
174 * required.
175 */
176#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
177
178#define raw_spin_lock(lock) _raw_spin_lock(lock)
179
180#ifdef CONFIG_DEBUG_LOCK_ALLOC
181# define raw_spin_lock_nested(lock, subclass) \
182 _raw_spin_lock_nested(lock, subclass)
183
184# define raw_spin_lock_nest_lock(lock, nest_lock) \
185 do { \
186 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
187 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
188 } while (0)
189#else
190# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
191# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
192#endif
193
194#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
195
196#define raw_spin_lock_irqsave(lock, flags) \
197 do { \
198 typecheck(unsigned long, flags); \
199 flags = _raw_spin_lock_irqsave(lock); \
200 } while (0)
201
202#ifdef CONFIG_DEBUG_LOCK_ALLOC
203#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
204 do { \
205 typecheck(unsigned long, flags); \
206 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
207 } while (0)
208#else
209#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
210 do { \
211 typecheck(unsigned long, flags); \
212 flags = _raw_spin_lock_irqsave(lock); \
213 } while (0)
214#endif
215
216#else
217
218#define raw_spin_lock_irqsave(lock, flags) \
219 do { \
220 typecheck(unsigned long, flags); \
221 _raw_spin_lock_irqsave(lock, flags); \
222 } while (0)
223
224#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
225 raw_spin_lock_irqsave(lock, flags)
226
227#endif
228
229#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
230#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
231#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
232#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
233
234#define raw_spin_unlock_irqrestore(lock, flags) \
235 do { \
236 typecheck(unsigned long, flags); \
237 _raw_spin_unlock_irqrestore(lock, flags); \
238 } while (0)
239#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
240
241#define raw_spin_trylock_bh(lock) \
242 __cond_lock(lock, _raw_spin_trylock_bh(lock))
243
244#define raw_spin_trylock_irq(lock) \
245({ \
246 local_irq_disable(); \
247 raw_spin_trylock(lock) ? \
248 1 : ({ local_irq_enable(); 0; }); \
249})
250
251#define raw_spin_trylock_irqsave(lock, flags) \
252({ \
253 local_irq_save(flags); \
254 raw_spin_trylock(lock) ? \
255 1 : ({ local_irq_restore(flags); 0; }); \
256})
257
258/**
259 * raw_spin_can_lock - would raw_spin_trylock() succeed?
260 * @lock: the spinlock in question.
261 */
262#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
263
264/* Include rwlock functions */
265#include <linux/rwlock.h>
266
267/*
268 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
269 */
270#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
271# include <linux/spinlock_api_smp.h>
272#else
273# include <linux/spinlock_api_up.h>
274#endif
275
276/*
277 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
278 */
279
280static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
281{
282 return &lock->rlock;
283}
284
285#define spin_lock_init(_lock) \
286do { \
287 spinlock_check(_lock); \
288 raw_spin_lock_init(&(_lock)->rlock); \
289} while (0)
290
291static inline void spin_lock(spinlock_t *lock)
292{
293 raw_spin_lock(&lock->rlock);
294}
295
296static inline void spin_lock_bh(spinlock_t *lock)
297{
298 raw_spin_lock_bh(&lock->rlock);
299}
300
301static inline int spin_trylock(spinlock_t *lock)
302{
303 return raw_spin_trylock(&lock->rlock);
304}
305
306#define spin_lock_nested(lock, subclass) \
307do { \
308 raw_spin_lock_nested(spinlock_check(lock), subclass); \
309} while (0)
310
311#define spin_lock_nest_lock(lock, nest_lock) \
312do { \
313 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
314} while (0)
315
316static inline void spin_lock_irq(spinlock_t *lock)
317{
318 raw_spin_lock_irq(&lock->rlock);
319}
320
321#define spin_lock_irqsave(lock, flags) \
322do { \
323 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
324} while (0)
325
326#define spin_lock_irqsave_nested(lock, flags, subclass) \
327do { \
328 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
329} while (0)
330
331static inline void spin_unlock(spinlock_t *lock)
332{
333 raw_spin_unlock(&lock->rlock);
334}
335
336static inline void spin_unlock_bh(spinlock_t *lock)
337{
338 raw_spin_unlock_bh(&lock->rlock);
339}
340
341static inline void spin_unlock_irq(spinlock_t *lock)
342{
343 raw_spin_unlock_irq(&lock->rlock);
344}
345
346static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
347{
348 raw_spin_unlock_irqrestore(&lock->rlock, flags);
349}
350
351static inline int spin_trylock_bh(spinlock_t *lock)
352{
353 return raw_spin_trylock_bh(&lock->rlock);
354}
355
356static inline int spin_trylock_irq(spinlock_t *lock)
357{
358 return raw_spin_trylock_irq(&lock->rlock);
359}
360
361#define spin_trylock_irqsave(lock, flags) \
362({ \
363 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
364})
365
366static inline void spin_unlock_wait(spinlock_t *lock)
367{
368 raw_spin_unlock_wait(&lock->rlock);
369}
370
371static inline int spin_is_locked(spinlock_t *lock)
372{
373 return raw_spin_is_locked(&lock->rlock);
374}
375
376static inline int spin_is_contended(spinlock_t *lock)
377{
378 return raw_spin_is_contended(&lock->rlock);
379}
380
381static inline int spin_can_lock(spinlock_t *lock)
382{
383 return raw_spin_can_lock(&lock->rlock);
384}
385
386#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
387
388/*
389 * Pull the atomic_t declaration:
390 * (asm-mips/atomic.h needs above definitions)
391 */
392#include <linux/atomic.h>
393/**
394 * atomic_dec_and_lock - lock on reaching reference count zero
395 * @atomic: the atomic counter
396 * @lock: the spinlock in question
397 *
398 * Decrements @atomic by 1. If the result is 0, returns true and locks
399 * @lock. Returns false for all other cases.
400 */
401extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
402#define atomic_dec_and_lock(atomic, lock) \
403 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
404
405#endif /* __LINUX_SPINLOCK_H */