Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Runtime locking correctness validator
4 *
5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7 *
8 * see Documentation/locking/lockdep-design.rst for more details.
9 */
10#ifndef __LINUX_LOCKDEP_H
11#define __LINUX_LOCKDEP_H
12
13#include <linux/lockdep_types.h>
14#include <linux/smp.h>
15#include <asm/percpu.h>
16
17struct task_struct;
18
19#ifdef CONFIG_LOCKDEP
20
21#include <linux/linkage.h>
22#include <linux/list.h>
23#include <linux/debug_locks.h>
24#include <linux/stacktrace.h>
25
26static inline void lockdep_copy_map(struct lockdep_map *to,
27 struct lockdep_map *from)
28{
29 int i;
30
31 *to = *from;
32 /*
33 * Since the class cache can be modified concurrently we could observe
34 * half pointers (64bit arch using 32bit copy insns). Therefore clear
35 * the caches and take the performance hit.
36 *
37 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
38 * that relies on cache abuse.
39 */
40 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
41 to->class_cache[i] = NULL;
42}
43
44/*
45 * Every lock has a list of other locks that were taken after it.
46 * We only grow the list, never remove from it:
47 */
48struct lock_list {
49 struct list_head entry;
50 struct lock_class *class;
51 struct lock_class *links_to;
52 const struct lock_trace *trace;
53 u16 distance;
54 /* bitmap of different dependencies from head to this */
55 u8 dep;
56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */
57 u8 only_xr;
58
59 /*
60 * The parent field is used to implement breadth-first search, and the
61 * bit 0 is reused to indicate if the lock has been accessed in BFS.
62 */
63 struct lock_list *parent;
64};
65
66/**
67 * struct lock_chain - lock dependency chain record
68 *
69 * @irq_context: the same as irq_context in held_lock below
70 * @depth: the number of held locks in this chain
71 * @base: the index in chain_hlocks for this chain
72 * @entry: the collided lock chains in lock_chain hash list
73 * @chain_key: the hash key of this lock_chain
74 */
75struct lock_chain {
76 /* see BUILD_BUG_ON()s in add_chain_cache() */
77 unsigned int irq_context : 2,
78 depth : 6,
79 base : 24;
80 /* 4 byte hole */
81 struct hlist_node entry;
82 u64 chain_key;
83};
84
85#define MAX_LOCKDEP_KEYS_BITS 13
86#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
87#define INITIAL_CHAIN_KEY -1
88
89struct held_lock {
90 /*
91 * One-way hash of the dependency chain up to this point. We
92 * hash the hashes step by step as the dependency chain grows.
93 *
94 * We use it for dependency-caching and we skip detection
95 * passes and dependency-updates if there is a cache-hit, so
96 * it is absolutely critical for 100% coverage of the validator
97 * to have a unique key value for every unique dependency path
98 * that can occur in the system, to make a unique hash value
99 * as likely as possible - hence the 64-bit width.
100 *
101 * The task struct holds the current hash value (initialized
102 * with zero), here we store the previous hash value:
103 */
104 u64 prev_chain_key;
105 unsigned long acquire_ip;
106 struct lockdep_map *instance;
107 struct lockdep_map *nest_lock;
108#ifdef CONFIG_LOCK_STAT
109 u64 waittime_stamp;
110 u64 holdtime_stamp;
111#endif
112 /*
113 * class_idx is zero-indexed; it points to the element in
114 * lock_classes this held lock instance belongs to. class_idx is in
115 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
116 */
117 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
118 /*
119 * The lock-stack is unified in that the lock chains of interrupt
120 * contexts nest ontop of process context chains, but we 'separate'
121 * the hashes by starting with 0 if we cross into an interrupt
122 * context, and we also keep do not add cross-context lock
123 * dependencies - the lock usage graph walking covers that area
124 * anyway, and we'd just unnecessarily increase the number of
125 * dependencies otherwise. [Note: hardirq and softirq contexts
126 * are separated from each other too.]
127 *
128 * The following field is used to detect when we cross into an
129 * interrupt context:
130 */
131 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
132 unsigned int trylock:1; /* 16 bits */
133
134 unsigned int read:2; /* see lock_acquire() comment */
135 unsigned int check:1; /* see lock_acquire() comment */
136 unsigned int hardirqs_off:1;
137 unsigned int references:12; /* 32 bits */
138 unsigned int pin_count;
139};
140
141/*
142 * Initialization, self-test and debugging-output methods:
143 */
144extern void lockdep_init(void);
145extern void lockdep_reset(void);
146extern void lockdep_reset_lock(struct lockdep_map *lock);
147extern void lockdep_free_key_range(void *start, unsigned long size);
148extern asmlinkage void lockdep_sys_exit(void);
149extern void lockdep_set_selftest_task(struct task_struct *task);
150
151extern void lockdep_init_task(struct task_struct *task);
152
153/*
154 * Split the recursion counter in two to readily detect 'off' vs recursion.
155 */
156#define LOCKDEP_RECURSION_BITS 16
157#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
158#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
159
160/*
161 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
162 * to header dependencies.
163 */
164
165#define lockdep_off() \
166do { \
167 current->lockdep_recursion += LOCKDEP_OFF; \
168} while (0)
169
170#define lockdep_on() \
171do { \
172 current->lockdep_recursion -= LOCKDEP_OFF; \
173} while (0)
174
175extern void lockdep_register_key(struct lock_class_key *key);
176extern void lockdep_unregister_key(struct lock_class_key *key);
177
178/*
179 * These methods are used by specific locking variants (spinlocks,
180 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
181 * to lockdep:
182 */
183
184extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
185 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
186
187static inline void
188lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
189 struct lock_class_key *key, int subclass, u8 inner, u8 outer)
190{
191 lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
192}
193
194static inline void
195lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
196 struct lock_class_key *key, int subclass, u8 inner)
197{
198 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
199}
200
201static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
202 struct lock_class_key *key, int subclass)
203{
204 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
205}
206
207/*
208 * Reinitialize a lock key - for cases where there is special locking or
209 * special initialization of locks so that the validator gets the scope
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
212 */
213#define lockdep_set_class(lock, key) \
214 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
215 (lock)->dep_map.wait_type_inner, \
216 (lock)->dep_map.wait_type_outer)
217
218#define lockdep_set_class_and_name(lock, key, name) \
219 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
220 (lock)->dep_map.wait_type_inner, \
221 (lock)->dep_map.wait_type_outer)
222
223#define lockdep_set_class_and_subclass(lock, key, sub) \
224 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
225 (lock)->dep_map.wait_type_inner, \
226 (lock)->dep_map.wait_type_outer)
227
228#define lockdep_set_subclass(lock, sub) \
229 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
230 (lock)->dep_map.wait_type_inner, \
231 (lock)->dep_map.wait_type_outer)
232
233#define lockdep_set_novalidate_class(lock) \
234 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
235
236/*
237 * Compare locking classes
238 */
239#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
240
241static inline int lockdep_match_key(struct lockdep_map *lock,
242 struct lock_class_key *key)
243{
244 return lock->key == key;
245}
246
247/*
248 * Acquire a lock.
249 *
250 * Values for "read":
251 *
252 * 0: exclusive (write) acquire
253 * 1: read-acquire (no recursion allowed)
254 * 2: read-acquire with same-instance recursion allowed
255 *
256 * Values for check:
257 *
258 * 0: simple checks (freeing, held-at-exit-time, etc.)
259 * 1: full validation
260 */
261extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
262 int trylock, int read, int check,
263 struct lockdep_map *nest_lock, unsigned long ip);
264
265extern void lock_release(struct lockdep_map *lock, unsigned long ip);
266
267/* lock_is_held_type() returns */
268#define LOCK_STATE_UNKNOWN -1
269#define LOCK_STATE_NOT_HELD 0
270#define LOCK_STATE_HELD 1
271
272/*
273 * Same "read" as for lock_acquire(), except -1 means any.
274 */
275extern int lock_is_held_type(const struct lockdep_map *lock, int read);
276
277static inline int lock_is_held(const struct lockdep_map *lock)
278{
279 return lock_is_held_type(lock, -1);
280}
281
282#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
283#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
284
285extern void lock_set_class(struct lockdep_map *lock, const char *name,
286 struct lock_class_key *key, unsigned int subclass,
287 unsigned long ip);
288
289#define lock_set_novalidate_class(l, n, i) \
290 lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
291
292static inline void lock_set_subclass(struct lockdep_map *lock,
293 unsigned int subclass, unsigned long ip)
294{
295 lock_set_class(lock, lock->name, lock->key, subclass, ip);
296}
297
298extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
299
300#define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
301
302extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
303extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
304extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
305
306#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
307
308#define lockdep_assert(cond) \
309 do { WARN_ON(debug_locks && !(cond)); } while (0)
310
311#define lockdep_assert_once(cond) \
312 do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
313
314#define lockdep_assert_held(l) \
315 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
316
317#define lockdep_assert_not_held(l) \
318 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
319
320#define lockdep_assert_held_write(l) \
321 lockdep_assert(lockdep_is_held_type(l, 0))
322
323#define lockdep_assert_held_read(l) \
324 lockdep_assert(lockdep_is_held_type(l, 1))
325
326#define lockdep_assert_held_once(l) \
327 lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
328
329#define lockdep_assert_none_held_once() \
330 lockdep_assert_once(!current->lockdep_depth)
331
332#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
333
334#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
335#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
336#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
337
338#else /* !CONFIG_LOCKDEP */
339
340static inline void lockdep_init_task(struct task_struct *task)
341{
342}
343
344static inline void lockdep_off(void)
345{
346}
347
348static inline void lockdep_on(void)
349{
350}
351
352static inline void lockdep_set_selftest_task(struct task_struct *task)
353{
354}
355
356# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
357# define lock_release(l, i) do { } while (0)
358# define lock_downgrade(l, i) do { } while (0)
359# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
360# define lock_set_novalidate_class(l, n, i) do { } while (0)
361# define lock_set_subclass(l, s, i) do { } while (0)
362# define lockdep_init() do { } while (0)
363# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
364 do { (void)(name); (void)(key); } while (0)
365# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
366 do { (void)(name); (void)(key); } while (0)
367# define lockdep_init_map_wait(lock, name, key, sub, inner) \
368 do { (void)(name); (void)(key); } while (0)
369# define lockdep_init_map(lock, name, key, sub) \
370 do { (void)(name); (void)(key); } while (0)
371# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
372# define lockdep_set_class_and_name(lock, key, name) \
373 do { (void)(key); (void)(name); } while (0)
374#define lockdep_set_class_and_subclass(lock, key, sub) \
375 do { (void)(key); } while (0)
376#define lockdep_set_subclass(lock, sub) do { } while (0)
377
378#define lockdep_set_novalidate_class(lock) do { } while (0)
379
380/*
381 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
382 * case since the result is not well defined and the caller should rather
383 * #ifdef the call himself.
384 */
385
386# define lockdep_reset() do { debug_locks = 1; } while (0)
387# define lockdep_free_key_range(start, size) do { } while (0)
388# define lockdep_sys_exit() do { } while (0)
389
390static inline void lockdep_register_key(struct lock_class_key *key)
391{
392}
393
394static inline void lockdep_unregister_key(struct lock_class_key *key)
395{
396}
397
398#define lockdep_depth(tsk) (0)
399
400/*
401 * Dummy forward declarations, allow users to write less ifdef-y code
402 * and depend on dead code elimination.
403 */
404extern int lock_is_held(const void *);
405extern int lockdep_is_held(const void *);
406#define lockdep_is_held_type(l, r) (1)
407
408#define lockdep_assert(c) do { } while (0)
409#define lockdep_assert_once(c) do { } while (0)
410
411#define lockdep_assert_held(l) do { (void)(l); } while (0)
412#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
413#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
414#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
415#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
416#define lockdep_assert_none_held_once() do { } while (0)
417
418#define lockdep_recursing(tsk) (0)
419
420#define NIL_COOKIE (struct pin_cookie){ }
421
422#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
423#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
424#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
425
426#endif /* !LOCKDEP */
427
428enum xhlock_context_t {
429 XHLOCK_HARD,
430 XHLOCK_SOFT,
431 XHLOCK_CTX_NR,
432};
433
434#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
435/*
436 * To initialize a lockdep_map statically use this macro.
437 * Note that _name must not be NULL.
438 */
439#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
440 { .name = (_name), .key = (void *)(_key), }
441
442static inline void lockdep_invariant_state(bool force) {}
443static inline void lockdep_free_task(struct task_struct *task) {}
444
445#ifdef CONFIG_LOCK_STAT
446
447extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
448extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
449
450#define LOCK_CONTENDED(_lock, try, lock) \
451do { \
452 if (!try(_lock)) { \
453 lock_contended(&(_lock)->dep_map, _RET_IP_); \
454 lock(_lock); \
455 } \
456 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
457} while (0)
458
459#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
460({ \
461 int ____err = 0; \
462 if (!try(_lock)) { \
463 lock_contended(&(_lock)->dep_map, _RET_IP_); \
464 ____err = lock(_lock); \
465 } \
466 if (!____err) \
467 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
468 ____err; \
469})
470
471#else /* CONFIG_LOCK_STAT */
472
473#define lock_contended(lockdep_map, ip) do {} while (0)
474#define lock_acquired(lockdep_map, ip) do {} while (0)
475
476#define LOCK_CONTENDED(_lock, try, lock) \
477 lock(_lock)
478
479#define LOCK_CONTENDED_RETURN(_lock, try, lock) \
480 lock(_lock)
481
482#endif /* CONFIG_LOCK_STAT */
483
484#ifdef CONFIG_PROVE_LOCKING
485extern void print_irqtrace_events(struct task_struct *curr);
486#else
487static inline void print_irqtrace_events(struct task_struct *curr)
488{
489}
490#endif
491
492/* Variable used to make lockdep treat read_lock() as recursive in selftests */
493#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
494extern unsigned int force_read_lock_recursive;
495#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
496#define force_read_lock_recursive 0
497#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
498
499#ifdef CONFIG_LOCKDEP
500extern bool read_lock_is_recursive(void);
501#else /* CONFIG_LOCKDEP */
502/* If !LOCKDEP, the value is meaningless */
503#define read_lock_is_recursive() 0
504#endif
505
506/*
507 * For trivial one-depth nesting of a lock-class, the following
508 * global define can be used. (Subsystems with multiple levels
509 * of nesting should define their own lock-nesting subclasses.)
510 */
511#define SINGLE_DEPTH_NESTING 1
512
513/*
514 * Map the dependency ops to NOP or to real lockdep ops, depending
515 * on the per lock-class debug mode:
516 */
517
518#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
519#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
520#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
521
522#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
523#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
524#define spin_release(l, i) lock_release(l, i)
525
526#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
527#define rwlock_acquire_read(l, s, t, i) \
528do { \
529 if (read_lock_is_recursive()) \
530 lock_acquire_shared_recursive(l, s, t, NULL, i); \
531 else \
532 lock_acquire_shared(l, s, t, NULL, i); \
533} while (0)
534
535#define rwlock_release(l, i) lock_release(l, i)
536
537#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
538#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
539#define seqcount_release(l, i) lock_release(l, i)
540
541#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
542#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
543#define mutex_release(l, i) lock_release(l, i)
544
545#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
546#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
547#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
548#define rwsem_release(l, i) lock_release(l, i)
549
550#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
551#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
552#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
553#define lock_map_release(l) lock_release(l, _THIS_IP_)
554
555#ifdef CONFIG_PROVE_LOCKING
556# define might_lock(lock) \
557do { \
558 typecheck(struct lockdep_map *, &(lock)->dep_map); \
559 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
560 lock_release(&(lock)->dep_map, _THIS_IP_); \
561} while (0)
562# define might_lock_read(lock) \
563do { \
564 typecheck(struct lockdep_map *, &(lock)->dep_map); \
565 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
566 lock_release(&(lock)->dep_map, _THIS_IP_); \
567} while (0)
568# define might_lock_nested(lock, subclass) \
569do { \
570 typecheck(struct lockdep_map *, &(lock)->dep_map); \
571 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
572 _THIS_IP_); \
573 lock_release(&(lock)->dep_map, _THIS_IP_); \
574} while (0)
575
576DECLARE_PER_CPU(int, hardirqs_enabled);
577DECLARE_PER_CPU(int, hardirq_context);
578DECLARE_PER_CPU(unsigned int, lockdep_recursion);
579
580#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
581
582#define lockdep_assert_irqs_enabled() \
583do { \
584 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
585} while (0)
586
587#define lockdep_assert_irqs_disabled() \
588do { \
589 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
590} while (0)
591
592#define lockdep_assert_in_irq() \
593do { \
594 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
595} while (0)
596
597#define lockdep_assert_preemption_enabled() \
598do { \
599 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
600 __lockdep_enabled && \
601 (preempt_count() != 0 || \
602 !this_cpu_read(hardirqs_enabled))); \
603} while (0)
604
605#define lockdep_assert_preemption_disabled() \
606do { \
607 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
608 __lockdep_enabled && \
609 (preempt_count() == 0 && \
610 this_cpu_read(hardirqs_enabled))); \
611} while (0)
612
613/*
614 * Acceptable for protecting per-CPU resources accessed from BH.
615 * Much like in_softirq() - semantics are ambiguous, use carefully.
616 */
617#define lockdep_assert_in_softirq() \
618do { \
619 WARN_ON_ONCE(__lockdep_enabled && \
620 (!in_softirq() || in_irq() || in_nmi())); \
621} while (0)
622
623#else
624# define might_lock(lock) do { } while (0)
625# define might_lock_read(lock) do { } while (0)
626# define might_lock_nested(lock, subclass) do { } while (0)
627
628# define lockdep_assert_irqs_enabled() do { } while (0)
629# define lockdep_assert_irqs_disabled() do { } while (0)
630# define lockdep_assert_in_irq() do { } while (0)
631
632# define lockdep_assert_preemption_enabled() do { } while (0)
633# define lockdep_assert_preemption_disabled() do { } while (0)
634# define lockdep_assert_in_softirq() do { } while (0)
635#endif
636
637#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
638
639# define lockdep_assert_RT_in_threaded_ctx() do { \
640 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
641 lockdep_hardirq_context() && \
642 !(current->hardirq_threaded || current->irq_config), \
643 "Not in threaded context on PREEMPT_RT as expected\n"); \
644} while (0)
645
646#else
647
648# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
649
650#endif
651
652#ifdef CONFIG_LOCKDEP
653void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
654#else
655static inline void
656lockdep_rcu_suspicious(const char *file, const int line, const char *s)
657{
658}
659#endif
660
661#endif /* __LINUX_LOCKDEP_H */