at v5.14 22 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10#ifndef __LINUX_LOCKDEP_H 11#define __LINUX_LOCKDEP_H 12 13#include <linux/lockdep_types.h> 14#include <linux/smp.h> 15#include <asm/percpu.h> 16 17struct task_struct; 18 19/* for sysctl */ 20extern int prove_locking; 21extern int lock_stat; 22 23#ifdef CONFIG_LOCKDEP 24 25#include <linux/linkage.h> 26#include <linux/list.h> 27#include <linux/debug_locks.h> 28#include <linux/stacktrace.h> 29 30static inline void lockdep_copy_map(struct lockdep_map *to, 31 struct lockdep_map *from) 32{ 33 int i; 34 35 *to = *from; 36 /* 37 * Since the class cache can be modified concurrently we could observe 38 * half pointers (64bit arch using 32bit copy insns). Therefore clear 39 * the caches and take the performance hit. 40 * 41 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 42 * that relies on cache abuse. 43 */ 44 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 45 to->class_cache[i] = NULL; 46} 47 48/* 49 * Every lock has a list of other locks that were taken after it. 50 * We only grow the list, never remove from it: 51 */ 52struct lock_list { 53 struct list_head entry; 54 struct lock_class *class; 55 struct lock_class *links_to; 56 const struct lock_trace *trace; 57 u16 distance; 58 /* bitmap of different dependencies from head to this */ 59 u8 dep; 60 /* used by BFS to record whether "prev -> this" only has -(*R)-> */ 61 u8 only_xr; 62 63 /* 64 * The parent field is used to implement breadth-first search, and the 65 * bit 0 is reused to indicate if the lock has been accessed in BFS. 66 */ 67 struct lock_list *parent; 68}; 69 70/** 71 * struct lock_chain - lock dependency chain record 72 * 73 * @irq_context: the same as irq_context in held_lock below 74 * @depth: the number of held locks in this chain 75 * @base: the index in chain_hlocks for this chain 76 * @entry: the collided lock chains in lock_chain hash list 77 * @chain_key: the hash key of this lock_chain 78 */ 79struct lock_chain { 80 /* see BUILD_BUG_ON()s in add_chain_cache() */ 81 unsigned int irq_context : 2, 82 depth : 6, 83 base : 24; 84 /* 4 byte hole */ 85 struct hlist_node entry; 86 u64 chain_key; 87}; 88 89#define MAX_LOCKDEP_KEYS_BITS 13 90#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 91#define INITIAL_CHAIN_KEY -1 92 93struct held_lock { 94 /* 95 * One-way hash of the dependency chain up to this point. We 96 * hash the hashes step by step as the dependency chain grows. 97 * 98 * We use it for dependency-caching and we skip detection 99 * passes and dependency-updates if there is a cache-hit, so 100 * it is absolutely critical for 100% coverage of the validator 101 * to have a unique key value for every unique dependency path 102 * that can occur in the system, to make a unique hash value 103 * as likely as possible - hence the 64-bit width. 104 * 105 * The task struct holds the current hash value (initialized 106 * with zero), here we store the previous hash value: 107 */ 108 u64 prev_chain_key; 109 unsigned long acquire_ip; 110 struct lockdep_map *instance; 111 struct lockdep_map *nest_lock; 112#ifdef CONFIG_LOCK_STAT 113 u64 waittime_stamp; 114 u64 holdtime_stamp; 115#endif 116 /* 117 * class_idx is zero-indexed; it points to the element in 118 * lock_classes this held lock instance belongs to. class_idx is in 119 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 120 */ 121 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 122 /* 123 * The lock-stack is unified in that the lock chains of interrupt 124 * contexts nest ontop of process context chains, but we 'separate' 125 * the hashes by starting with 0 if we cross into an interrupt 126 * context, and we also keep do not add cross-context lock 127 * dependencies - the lock usage graph walking covers that area 128 * anyway, and we'd just unnecessarily increase the number of 129 * dependencies otherwise. [Note: hardirq and softirq contexts 130 * are separated from each other too.] 131 * 132 * The following field is used to detect when we cross into an 133 * interrupt context: 134 */ 135 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 136 unsigned int trylock:1; /* 16 bits */ 137 138 unsigned int read:2; /* see lock_acquire() comment */ 139 unsigned int check:1; /* see lock_acquire() comment */ 140 unsigned int hardirqs_off:1; 141 unsigned int references:12; /* 32 bits */ 142 unsigned int pin_count; 143}; 144 145/* 146 * Initialization, self-test and debugging-output methods: 147 */ 148extern void lockdep_init(void); 149extern void lockdep_reset(void); 150extern void lockdep_reset_lock(struct lockdep_map *lock); 151extern void lockdep_free_key_range(void *start, unsigned long size); 152extern asmlinkage void lockdep_sys_exit(void); 153extern void lockdep_set_selftest_task(struct task_struct *task); 154 155extern void lockdep_init_task(struct task_struct *task); 156 157/* 158 * Split the recursion counter in two to readily detect 'off' vs recursion. 159 */ 160#define LOCKDEP_RECURSION_BITS 16 161#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) 162#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) 163 164/* 165 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due 166 * to header dependencies. 167 */ 168 169#define lockdep_off() \ 170do { \ 171 current->lockdep_recursion += LOCKDEP_OFF; \ 172} while (0) 173 174#define lockdep_on() \ 175do { \ 176 current->lockdep_recursion -= LOCKDEP_OFF; \ 177} while (0) 178 179extern void lockdep_register_key(struct lock_class_key *key); 180extern void lockdep_unregister_key(struct lock_class_key *key); 181 182/* 183 * These methods are used by specific locking variants (spinlocks, 184 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 185 * to lockdep: 186 */ 187 188extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 189 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); 190 191static inline void 192lockdep_init_map_waits(struct lockdep_map *lock, const char *name, 193 struct lock_class_key *key, int subclass, u8 inner, u8 outer) 194{ 195 lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); 196} 197 198static inline void 199lockdep_init_map_wait(struct lockdep_map *lock, const char *name, 200 struct lock_class_key *key, int subclass, u8 inner) 201{ 202 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); 203} 204 205static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, 206 struct lock_class_key *key, int subclass) 207{ 208 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); 209} 210 211/* 212 * Reinitialize a lock key - for cases where there is special locking or 213 * special initialization of locks so that the validator gets the scope 214 * of dependencies wrong: they are either too broad (they need a class-split) 215 * or they are too narrow (they suffer from a false class-split): 216 */ 217#define lockdep_set_class(lock, key) \ 218 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ 219 (lock)->dep_map.wait_type_inner, \ 220 (lock)->dep_map.wait_type_outer) 221 222#define lockdep_set_class_and_name(lock, key, name) \ 223 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ 224 (lock)->dep_map.wait_type_inner, \ 225 (lock)->dep_map.wait_type_outer) 226 227#define lockdep_set_class_and_subclass(lock, key, sub) \ 228 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ 229 (lock)->dep_map.wait_type_inner, \ 230 (lock)->dep_map.wait_type_outer) 231 232#define lockdep_set_subclass(lock, sub) \ 233 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 234 (lock)->dep_map.wait_type_inner, \ 235 (lock)->dep_map.wait_type_outer) 236 237#define lockdep_set_novalidate_class(lock) \ 238 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 239 240/* 241 * Compare locking classes 242 */ 243#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 244 245static inline int lockdep_match_key(struct lockdep_map *lock, 246 struct lock_class_key *key) 247{ 248 return lock->key == key; 249} 250 251/* 252 * Acquire a lock. 253 * 254 * Values for "read": 255 * 256 * 0: exclusive (write) acquire 257 * 1: read-acquire (no recursion allowed) 258 * 2: read-acquire with same-instance recursion allowed 259 * 260 * Values for check: 261 * 262 * 0: simple checks (freeing, held-at-exit-time, etc.) 263 * 1: full validation 264 */ 265extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 266 int trylock, int read, int check, 267 struct lockdep_map *nest_lock, unsigned long ip); 268 269extern void lock_release(struct lockdep_map *lock, unsigned long ip); 270 271/* lock_is_held_type() returns */ 272#define LOCK_STATE_UNKNOWN -1 273#define LOCK_STATE_NOT_HELD 0 274#define LOCK_STATE_HELD 1 275 276/* 277 * Same "read" as for lock_acquire(), except -1 means any. 278 */ 279extern int lock_is_held_type(const struct lockdep_map *lock, int read); 280 281static inline int lock_is_held(const struct lockdep_map *lock) 282{ 283 return lock_is_held_type(lock, -1); 284} 285 286#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 287#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 288 289extern void lock_set_class(struct lockdep_map *lock, const char *name, 290 struct lock_class_key *key, unsigned int subclass, 291 unsigned long ip); 292 293static inline void lock_set_subclass(struct lockdep_map *lock, 294 unsigned int subclass, unsigned long ip) 295{ 296 lock_set_class(lock, lock->name, lock->key, subclass, ip); 297} 298 299extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 300 301#define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 302 303extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 304extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 305extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 306 307#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 308 309#define lockdep_assert_held(l) do { \ 310 WARN_ON(debug_locks && \ 311 lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ 312 } while (0) 313 314#define lockdep_assert_not_held(l) do { \ 315 WARN_ON(debug_locks && \ 316 lockdep_is_held(l) == LOCK_STATE_HELD); \ 317 } while (0) 318 319#define lockdep_assert_held_write(l) do { \ 320 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 321 } while (0) 322 323#define lockdep_assert_held_read(l) do { \ 324 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ 325 } while (0) 326 327#define lockdep_assert_held_once(l) do { \ 328 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ 329 } while (0) 330 331#define lockdep_assert_none_held_once() do { \ 332 WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ 333 } while (0) 334 335#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 336 337#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 338#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 339#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 340 341#else /* !CONFIG_LOCKDEP */ 342 343static inline void lockdep_init_task(struct task_struct *task) 344{ 345} 346 347static inline void lockdep_off(void) 348{ 349} 350 351static inline void lockdep_on(void) 352{ 353} 354 355static inline void lockdep_set_selftest_task(struct task_struct *task) 356{ 357} 358 359# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 360# define lock_release(l, i) do { } while (0) 361# define lock_downgrade(l, i) do { } while (0) 362# define lock_set_class(l, n, k, s, i) do { } while (0) 363# define lock_set_subclass(l, s, i) do { } while (0) 364# define lockdep_init() do { } while (0) 365# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ 366 do { (void)(name); (void)(key); } while (0) 367# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ 368 do { (void)(name); (void)(key); } while (0) 369# define lockdep_init_map_wait(lock, name, key, sub, inner) \ 370 do { (void)(name); (void)(key); } while (0) 371# define lockdep_init_map(lock, name, key, sub) \ 372 do { (void)(name); (void)(key); } while (0) 373# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 374# define lockdep_set_class_and_name(lock, key, name) \ 375 do { (void)(key); (void)(name); } while (0) 376#define lockdep_set_class_and_subclass(lock, key, sub) \ 377 do { (void)(key); } while (0) 378#define lockdep_set_subclass(lock, sub) do { } while (0) 379 380#define lockdep_set_novalidate_class(lock) do { } while (0) 381 382/* 383 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 384 * case since the result is not well defined and the caller should rather 385 * #ifdef the call himself. 386 */ 387 388# define lockdep_reset() do { debug_locks = 1; } while (0) 389# define lockdep_free_key_range(start, size) do { } while (0) 390# define lockdep_sys_exit() do { } while (0) 391 392static inline void lockdep_register_key(struct lock_class_key *key) 393{ 394} 395 396static inline void lockdep_unregister_key(struct lock_class_key *key) 397{ 398} 399 400#define lockdep_depth(tsk) (0) 401 402/* 403 * Dummy forward declarations, allow users to write less ifdef-y code 404 * and depend on dead code elimination. 405 */ 406extern int lock_is_held(const void *); 407extern int lockdep_is_held(const void *); 408#define lockdep_is_held_type(l, r) (1) 409 410#define lockdep_assert_held(l) do { (void)(l); } while (0) 411#define lockdep_assert_not_held(l) do { (void)(l); } while (0) 412#define lockdep_assert_held_write(l) do { (void)(l); } while (0) 413#define lockdep_assert_held_read(l) do { (void)(l); } while (0) 414#define lockdep_assert_held_once(l) do { (void)(l); } while (0) 415#define lockdep_assert_none_held_once() do { } while (0) 416 417#define lockdep_recursing(tsk) (0) 418 419#define NIL_COOKIE (struct pin_cookie){ } 420 421#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 422#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 423#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 424 425#endif /* !LOCKDEP */ 426 427enum xhlock_context_t { 428 XHLOCK_HARD, 429 XHLOCK_SOFT, 430 XHLOCK_CTX_NR, 431}; 432 433#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 434/* 435 * To initialize a lockdep_map statically use this macro. 436 * Note that _name must not be NULL. 437 */ 438#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 439 { .name = (_name), .key = (void *)(_key), } 440 441static inline void lockdep_invariant_state(bool force) {} 442static inline void lockdep_free_task(struct task_struct *task) {} 443 444#ifdef CONFIG_LOCK_STAT 445 446extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 447extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 448 449#define LOCK_CONTENDED(_lock, try, lock) \ 450do { \ 451 if (!try(_lock)) { \ 452 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 453 lock(_lock); \ 454 } \ 455 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 456} while (0) 457 458#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 459({ \ 460 int ____err = 0; \ 461 if (!try(_lock)) { \ 462 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 463 ____err = lock(_lock); \ 464 } \ 465 if (!____err) \ 466 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 467 ____err; \ 468}) 469 470#else /* CONFIG_LOCK_STAT */ 471 472#define lock_contended(lockdep_map, ip) do {} while (0) 473#define lock_acquired(lockdep_map, ip) do {} while (0) 474 475#define LOCK_CONTENDED(_lock, try, lock) \ 476 lock(_lock) 477 478#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 479 lock(_lock) 480 481#endif /* CONFIG_LOCK_STAT */ 482 483#ifdef CONFIG_LOCKDEP 484 485/* 486 * On lockdep we dont want the hand-coded irq-enable of 487 * _raw_*_lock_flags() code, because lockdep assumes 488 * that interrupts are not re-enabled during lock-acquire: 489 */ 490#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 491 LOCK_CONTENDED((_lock), (try), (lock)) 492 493#else /* CONFIG_LOCKDEP */ 494 495#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 496 lockfl((_lock), (flags)) 497 498#endif /* CONFIG_LOCKDEP */ 499 500#ifdef CONFIG_PROVE_LOCKING 501extern void print_irqtrace_events(struct task_struct *curr); 502#else 503static inline void print_irqtrace_events(struct task_struct *curr) 504{ 505} 506#endif 507 508/* Variable used to make lockdep treat read_lock() as recursive in selftests */ 509#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS 510extern unsigned int force_read_lock_recursive; 511#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 512#define force_read_lock_recursive 0 513#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 514 515#ifdef CONFIG_LOCKDEP 516extern bool read_lock_is_recursive(void); 517#else /* CONFIG_LOCKDEP */ 518/* If !LOCKDEP, the value is meaningless */ 519#define read_lock_is_recursive() 0 520#endif 521 522/* 523 * For trivial one-depth nesting of a lock-class, the following 524 * global define can be used. (Subsystems with multiple levels 525 * of nesting should define their own lock-nesting subclasses.) 526 */ 527#define SINGLE_DEPTH_NESTING 1 528 529/* 530 * Map the dependency ops to NOP or to real lockdep ops, depending 531 * on the per lock-class debug mode: 532 */ 533 534#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 535#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 536#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 537 538#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 539#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 540#define spin_release(l, i) lock_release(l, i) 541 542#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 543#define rwlock_acquire_read(l, s, t, i) \ 544do { \ 545 if (read_lock_is_recursive()) \ 546 lock_acquire_shared_recursive(l, s, t, NULL, i); \ 547 else \ 548 lock_acquire_shared(l, s, t, NULL, i); \ 549} while (0) 550 551#define rwlock_release(l, i) lock_release(l, i) 552 553#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 554#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 555#define seqcount_release(l, i) lock_release(l, i) 556 557#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 558#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 559#define mutex_release(l, i) lock_release(l, i) 560 561#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 562#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 563#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 564#define rwsem_release(l, i) lock_release(l, i) 565 566#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 567#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 568#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 569#define lock_map_release(l) lock_release(l, _THIS_IP_) 570 571#ifdef CONFIG_PROVE_LOCKING 572# define might_lock(lock) \ 573do { \ 574 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 575 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 576 lock_release(&(lock)->dep_map, _THIS_IP_); \ 577} while (0) 578# define might_lock_read(lock) \ 579do { \ 580 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 581 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 582 lock_release(&(lock)->dep_map, _THIS_IP_); \ 583} while (0) 584# define might_lock_nested(lock, subclass) \ 585do { \ 586 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 587 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ 588 _THIS_IP_); \ 589 lock_release(&(lock)->dep_map, _THIS_IP_); \ 590} while (0) 591 592DECLARE_PER_CPU(int, hardirqs_enabled); 593DECLARE_PER_CPU(int, hardirq_context); 594DECLARE_PER_CPU(unsigned int, lockdep_recursion); 595 596#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) 597 598#define lockdep_assert_irqs_enabled() \ 599do { \ 600 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ 601} while (0) 602 603#define lockdep_assert_irqs_disabled() \ 604do { \ 605 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ 606} while (0) 607 608#define lockdep_assert_in_irq() \ 609do { \ 610 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ 611} while (0) 612 613#define lockdep_assert_preemption_enabled() \ 614do { \ 615 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 616 __lockdep_enabled && \ 617 (preempt_count() != 0 || \ 618 !this_cpu_read(hardirqs_enabled))); \ 619} while (0) 620 621#define lockdep_assert_preemption_disabled() \ 622do { \ 623 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 624 __lockdep_enabled && \ 625 (preempt_count() == 0 && \ 626 this_cpu_read(hardirqs_enabled))); \ 627} while (0) 628 629/* 630 * Acceptable for protecting per-CPU resources accessed from BH. 631 * Much like in_softirq() - semantics are ambiguous, use carefully. 632 */ 633#define lockdep_assert_in_softirq() \ 634do { \ 635 WARN_ON_ONCE(__lockdep_enabled && \ 636 (!in_softirq() || in_irq() || in_nmi())); \ 637} while (0) 638 639#else 640# define might_lock(lock) do { } while (0) 641# define might_lock_read(lock) do { } while (0) 642# define might_lock_nested(lock, subclass) do { } while (0) 643 644# define lockdep_assert_irqs_enabled() do { } while (0) 645# define lockdep_assert_irqs_disabled() do { } while (0) 646# define lockdep_assert_in_irq() do { } while (0) 647 648# define lockdep_assert_preemption_enabled() do { } while (0) 649# define lockdep_assert_preemption_disabled() do { } while (0) 650# define lockdep_assert_in_softirq() do { } while (0) 651#endif 652 653#ifdef CONFIG_PROVE_RAW_LOCK_NESTING 654 655# define lockdep_assert_RT_in_threaded_ctx() do { \ 656 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 657 lockdep_hardirq_context() && \ 658 !(current->hardirq_threaded || current->irq_config), \ 659 "Not in threaded context on PREEMPT_RT as expected\n"); \ 660} while (0) 661 662#else 663 664# define lockdep_assert_RT_in_threaded_ctx() do { } while (0) 665 666#endif 667 668#ifdef CONFIG_LOCKDEP 669void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 670#else 671static inline void 672lockdep_rcu_suspicious(const char *file, const int line, const char *s) 673{ 674} 675#endif 676 677#endif /* __LINUX_LOCKDEP_H */