at v6.4 22 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10#ifndef __LINUX_LOCKDEP_H 11#define __LINUX_LOCKDEP_H 12 13#include <linux/lockdep_types.h> 14#include <linux/smp.h> 15#include <asm/percpu.h> 16 17struct task_struct; 18 19#ifdef CONFIG_LOCKDEP 20 21#include <linux/linkage.h> 22#include <linux/list.h> 23#include <linux/debug_locks.h> 24#include <linux/stacktrace.h> 25 26static inline void lockdep_copy_map(struct lockdep_map *to, 27 struct lockdep_map *from) 28{ 29 int i; 30 31 *to = *from; 32 /* 33 * Since the class cache can be modified concurrently we could observe 34 * half pointers (64bit arch using 32bit copy insns). Therefore clear 35 * the caches and take the performance hit. 36 * 37 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 38 * that relies on cache abuse. 39 */ 40 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 41 to->class_cache[i] = NULL; 42} 43 44/* 45 * Every lock has a list of other locks that were taken after it. 46 * We only grow the list, never remove from it: 47 */ 48struct lock_list { 49 struct list_head entry; 50 struct lock_class *class; 51 struct lock_class *links_to; 52 const struct lock_trace *trace; 53 u16 distance; 54 /* bitmap of different dependencies from head to this */ 55 u8 dep; 56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */ 57 u8 only_xr; 58 59 /* 60 * The parent field is used to implement breadth-first search, and the 61 * bit 0 is reused to indicate if the lock has been accessed in BFS. 62 */ 63 struct lock_list *parent; 64}; 65 66/** 67 * struct lock_chain - lock dependency chain record 68 * 69 * @irq_context: the same as irq_context in held_lock below 70 * @depth: the number of held locks in this chain 71 * @base: the index in chain_hlocks for this chain 72 * @entry: the collided lock chains in lock_chain hash list 73 * @chain_key: the hash key of this lock_chain 74 */ 75struct lock_chain { 76 /* see BUILD_BUG_ON()s in add_chain_cache() */ 77 unsigned int irq_context : 2, 78 depth : 6, 79 base : 24; 80 /* 4 byte hole */ 81 struct hlist_node entry; 82 u64 chain_key; 83}; 84 85#define MAX_LOCKDEP_KEYS_BITS 13 86#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 87#define INITIAL_CHAIN_KEY -1 88 89struct held_lock { 90 /* 91 * One-way hash of the dependency chain up to this point. We 92 * hash the hashes step by step as the dependency chain grows. 93 * 94 * We use it for dependency-caching and we skip detection 95 * passes and dependency-updates if there is a cache-hit, so 96 * it is absolutely critical for 100% coverage of the validator 97 * to have a unique key value for every unique dependency path 98 * that can occur in the system, to make a unique hash value 99 * as likely as possible - hence the 64-bit width. 100 * 101 * The task struct holds the current hash value (initialized 102 * with zero), here we store the previous hash value: 103 */ 104 u64 prev_chain_key; 105 unsigned long acquire_ip; 106 struct lockdep_map *instance; 107 struct lockdep_map *nest_lock; 108#ifdef CONFIG_LOCK_STAT 109 u64 waittime_stamp; 110 u64 holdtime_stamp; 111#endif 112 /* 113 * class_idx is zero-indexed; it points to the element in 114 * lock_classes this held lock instance belongs to. class_idx is in 115 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 116 */ 117 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 118 /* 119 * The lock-stack is unified in that the lock chains of interrupt 120 * contexts nest ontop of process context chains, but we 'separate' 121 * the hashes by starting with 0 if we cross into an interrupt 122 * context, and we also keep do not add cross-context lock 123 * dependencies - the lock usage graph walking covers that area 124 * anyway, and we'd just unnecessarily increase the number of 125 * dependencies otherwise. [Note: hardirq and softirq contexts 126 * are separated from each other too.] 127 * 128 * The following field is used to detect when we cross into an 129 * interrupt context: 130 */ 131 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 132 unsigned int trylock:1; /* 16 bits */ 133 134 unsigned int read:2; /* see lock_acquire() comment */ 135 unsigned int check:1; /* see lock_acquire() comment */ 136 unsigned int hardirqs_off:1; 137 unsigned int sync:1; 138 unsigned int references:11; /* 32 bits */ 139 unsigned int pin_count; 140}; 141 142/* 143 * Initialization, self-test and debugging-output methods: 144 */ 145extern void lockdep_init(void); 146extern void lockdep_reset(void); 147extern void lockdep_reset_lock(struct lockdep_map *lock); 148extern void lockdep_free_key_range(void *start, unsigned long size); 149extern asmlinkage void lockdep_sys_exit(void); 150extern void lockdep_set_selftest_task(struct task_struct *task); 151 152extern void lockdep_init_task(struct task_struct *task); 153 154/* 155 * Split the recursion counter in two to readily detect 'off' vs recursion. 156 */ 157#define LOCKDEP_RECURSION_BITS 16 158#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) 159#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) 160 161/* 162 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due 163 * to header dependencies. 164 */ 165 166#define lockdep_off() \ 167do { \ 168 current->lockdep_recursion += LOCKDEP_OFF; \ 169} while (0) 170 171#define lockdep_on() \ 172do { \ 173 current->lockdep_recursion -= LOCKDEP_OFF; \ 174} while (0) 175 176extern void lockdep_register_key(struct lock_class_key *key); 177extern void lockdep_unregister_key(struct lock_class_key *key); 178 179/* 180 * These methods are used by specific locking variants (spinlocks, 181 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 182 * to lockdep: 183 */ 184 185extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 186 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); 187 188static inline void 189lockdep_init_map_waits(struct lockdep_map *lock, const char *name, 190 struct lock_class_key *key, int subclass, u8 inner, u8 outer) 191{ 192 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); 193} 194 195static inline void 196lockdep_init_map_wait(struct lockdep_map *lock, const char *name, 197 struct lock_class_key *key, int subclass, u8 inner) 198{ 199 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); 200} 201 202static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, 203 struct lock_class_key *key, int subclass) 204{ 205 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); 206} 207 208/* 209 * Reinitialize a lock key - for cases where there is special locking or 210 * special initialization of locks so that the validator gets the scope 211 * of dependencies wrong: they are either too broad (they need a class-split) 212 * or they are too narrow (they suffer from a false class-split): 213 */ 214#define lockdep_set_class(lock, key) \ 215 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \ 216 (lock)->dep_map.wait_type_inner, \ 217 (lock)->dep_map.wait_type_outer, \ 218 (lock)->dep_map.lock_type) 219 220#define lockdep_set_class_and_name(lock, key, name) \ 221 lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \ 222 (lock)->dep_map.wait_type_inner, \ 223 (lock)->dep_map.wait_type_outer, \ 224 (lock)->dep_map.lock_type) 225 226#define lockdep_set_class_and_subclass(lock, key, sub) \ 227 lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \ 228 (lock)->dep_map.wait_type_inner, \ 229 (lock)->dep_map.wait_type_outer, \ 230 (lock)->dep_map.lock_type) 231 232#define lockdep_set_subclass(lock, sub) \ 233 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 234 (lock)->dep_map.wait_type_inner, \ 235 (lock)->dep_map.wait_type_outer, \ 236 (lock)->dep_map.lock_type) 237 238#define lockdep_set_novalidate_class(lock) \ 239 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 240 241/* 242 * Compare locking classes 243 */ 244#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 245 246static inline int lockdep_match_key(struct lockdep_map *lock, 247 struct lock_class_key *key) 248{ 249 return lock->key == key; 250} 251 252/* 253 * Acquire a lock. 254 * 255 * Values for "read": 256 * 257 * 0: exclusive (write) acquire 258 * 1: read-acquire (no recursion allowed) 259 * 2: read-acquire with same-instance recursion allowed 260 * 261 * Values for check: 262 * 263 * 0: simple checks (freeing, held-at-exit-time, etc.) 264 * 1: full validation 265 */ 266extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 267 int trylock, int read, int check, 268 struct lockdep_map *nest_lock, unsigned long ip); 269 270extern void lock_release(struct lockdep_map *lock, unsigned long ip); 271 272extern void lock_sync(struct lockdep_map *lock, unsigned int subclass, 273 int read, int check, struct lockdep_map *nest_lock, 274 unsigned long ip); 275 276/* lock_is_held_type() returns */ 277#define LOCK_STATE_UNKNOWN -1 278#define LOCK_STATE_NOT_HELD 0 279#define LOCK_STATE_HELD 1 280 281/* 282 * Same "read" as for lock_acquire(), except -1 means any. 283 */ 284extern int lock_is_held_type(const struct lockdep_map *lock, int read); 285 286static inline int lock_is_held(const struct lockdep_map *lock) 287{ 288 return lock_is_held_type(lock, -1); 289} 290 291#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 292#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 293 294extern void lock_set_class(struct lockdep_map *lock, const char *name, 295 struct lock_class_key *key, unsigned int subclass, 296 unsigned long ip); 297 298#define lock_set_novalidate_class(l, n, i) \ 299 lock_set_class(l, n, &__lockdep_no_validate__, 0, i) 300 301static inline void lock_set_subclass(struct lockdep_map *lock, 302 unsigned int subclass, unsigned long ip) 303{ 304 lock_set_class(lock, lock->name, lock->key, subclass, ip); 305} 306 307extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 308 309#define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 310 311extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 312extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 313extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 314 315#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 316 317#define lockdep_assert(cond) \ 318 do { WARN_ON(debug_locks && !(cond)); } while (0) 319 320#define lockdep_assert_once(cond) \ 321 do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) 322 323#define lockdep_assert_held(l) \ 324 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) 325 326#define lockdep_assert_not_held(l) \ 327 lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) 328 329#define lockdep_assert_held_write(l) \ 330 lockdep_assert(lockdep_is_held_type(l, 0)) 331 332#define lockdep_assert_held_read(l) \ 333 lockdep_assert(lockdep_is_held_type(l, 1)) 334 335#define lockdep_assert_held_once(l) \ 336 lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) 337 338#define lockdep_assert_none_held_once() \ 339 lockdep_assert_once(!current->lockdep_depth) 340 341#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 342 343#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 344#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 345#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 346 347/* 348 * Must use lock_map_aquire_try() with override maps to avoid 349 * lockdep thinking they participate in the block chain. 350 */ 351#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 352 struct lockdep_map _name = { \ 353 .name = #_name "-wait-type-override", \ 354 .wait_type_inner = _wait_type, \ 355 .lock_type = LD_LOCK_WAIT_OVERRIDE, } 356 357#else /* !CONFIG_LOCKDEP */ 358 359static inline void lockdep_init_task(struct task_struct *task) 360{ 361} 362 363static inline void lockdep_off(void) 364{ 365} 366 367static inline void lockdep_on(void) 368{ 369} 370 371static inline void lockdep_set_selftest_task(struct task_struct *task) 372{ 373} 374 375# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 376# define lock_release(l, i) do { } while (0) 377# define lock_downgrade(l, i) do { } while (0) 378# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0) 379# define lock_set_novalidate_class(l, n, i) do { } while (0) 380# define lock_set_subclass(l, s, i) do { } while (0) 381# define lockdep_init() do { } while (0) 382# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ 383 do { (void)(name); (void)(key); } while (0) 384# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ 385 do { (void)(name); (void)(key); } while (0) 386# define lockdep_init_map_wait(lock, name, key, sub, inner) \ 387 do { (void)(name); (void)(key); } while (0) 388# define lockdep_init_map(lock, name, key, sub) \ 389 do { (void)(name); (void)(key); } while (0) 390# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 391# define lockdep_set_class_and_name(lock, key, name) \ 392 do { (void)(key); (void)(name); } while (0) 393#define lockdep_set_class_and_subclass(lock, key, sub) \ 394 do { (void)(key); } while (0) 395#define lockdep_set_subclass(lock, sub) do { } while (0) 396 397#define lockdep_set_novalidate_class(lock) do { } while (0) 398 399/* 400 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 401 * case since the result is not well defined and the caller should rather 402 * #ifdef the call himself. 403 */ 404 405# define lockdep_reset() do { debug_locks = 1; } while (0) 406# define lockdep_free_key_range(start, size) do { } while (0) 407# define lockdep_sys_exit() do { } while (0) 408 409static inline void lockdep_register_key(struct lock_class_key *key) 410{ 411} 412 413static inline void lockdep_unregister_key(struct lock_class_key *key) 414{ 415} 416 417#define lockdep_depth(tsk) (0) 418 419/* 420 * Dummy forward declarations, allow users to write less ifdef-y code 421 * and depend on dead code elimination. 422 */ 423extern int lock_is_held(const void *); 424extern int lockdep_is_held(const void *); 425#define lockdep_is_held_type(l, r) (1) 426 427#define lockdep_assert(c) do { } while (0) 428#define lockdep_assert_once(c) do { } while (0) 429 430#define lockdep_assert_held(l) do { (void)(l); } while (0) 431#define lockdep_assert_not_held(l) do { (void)(l); } while (0) 432#define lockdep_assert_held_write(l) do { (void)(l); } while (0) 433#define lockdep_assert_held_read(l) do { (void)(l); } while (0) 434#define lockdep_assert_held_once(l) do { (void)(l); } while (0) 435#define lockdep_assert_none_held_once() do { } while (0) 436 437#define lockdep_recursing(tsk) (0) 438 439#define NIL_COOKIE (struct pin_cookie){ } 440 441#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 442#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 443#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 444 445#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 446 struct lockdep_map __maybe_unused _name = {} 447 448#endif /* !LOCKDEP */ 449 450enum xhlock_context_t { 451 XHLOCK_HARD, 452 XHLOCK_SOFT, 453 XHLOCK_CTX_NR, 454}; 455 456/* 457 * To initialize a lockdep_map statically use this macro. 458 * Note that _name must not be NULL. 459 */ 460#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 461 { .name = (_name), .key = (void *)(_key), } 462 463static inline void lockdep_invariant_state(bool force) {} 464static inline void lockdep_free_task(struct task_struct *task) {} 465 466#ifdef CONFIG_LOCK_STAT 467 468extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 469extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 470 471#define LOCK_CONTENDED(_lock, try, lock) \ 472do { \ 473 if (!try(_lock)) { \ 474 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 475 lock(_lock); \ 476 } \ 477 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 478} while (0) 479 480#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 481({ \ 482 int ____err = 0; \ 483 if (!try(_lock)) { \ 484 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 485 ____err = lock(_lock); \ 486 } \ 487 if (!____err) \ 488 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 489 ____err; \ 490}) 491 492#else /* CONFIG_LOCK_STAT */ 493 494#define lock_contended(lockdep_map, ip) do {} while (0) 495#define lock_acquired(lockdep_map, ip) do {} while (0) 496 497#define LOCK_CONTENDED(_lock, try, lock) \ 498 lock(_lock) 499 500#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 501 lock(_lock) 502 503#endif /* CONFIG_LOCK_STAT */ 504 505#ifdef CONFIG_PROVE_LOCKING 506extern void print_irqtrace_events(struct task_struct *curr); 507#else 508static inline void print_irqtrace_events(struct task_struct *curr) 509{ 510} 511#endif 512 513/* Variable used to make lockdep treat read_lock() as recursive in selftests */ 514#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS 515extern unsigned int force_read_lock_recursive; 516#else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 517#define force_read_lock_recursive 0 518#endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 519 520#ifdef CONFIG_LOCKDEP 521extern bool read_lock_is_recursive(void); 522#else /* CONFIG_LOCKDEP */ 523/* If !LOCKDEP, the value is meaningless */ 524#define read_lock_is_recursive() 0 525#endif 526 527/* 528 * For trivial one-depth nesting of a lock-class, the following 529 * global define can be used. (Subsystems with multiple levels 530 * of nesting should define their own lock-nesting subclasses.) 531 */ 532#define SINGLE_DEPTH_NESTING 1 533 534/* 535 * Map the dependency ops to NOP or to real lockdep ops, depending 536 * on the per lock-class debug mode: 537 */ 538 539#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 540#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 541#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 542 543#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 544#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 545#define spin_release(l, i) lock_release(l, i) 546 547#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 548#define rwlock_acquire_read(l, s, t, i) \ 549do { \ 550 if (read_lock_is_recursive()) \ 551 lock_acquire_shared_recursive(l, s, t, NULL, i); \ 552 else \ 553 lock_acquire_shared(l, s, t, NULL, i); \ 554} while (0) 555 556#define rwlock_release(l, i) lock_release(l, i) 557 558#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 559#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 560#define seqcount_release(l, i) lock_release(l, i) 561 562#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 563#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 564#define mutex_release(l, i) lock_release(l, i) 565 566#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 567#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 568#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 569#define rwsem_release(l, i) lock_release(l, i) 570 571#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 572#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) 573#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 574#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 575#define lock_map_release(l) lock_release(l, _THIS_IP_) 576#define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_) 577 578#ifdef CONFIG_PROVE_LOCKING 579# define might_lock(lock) \ 580do { \ 581 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 582 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 583 lock_release(&(lock)->dep_map, _THIS_IP_); \ 584} while (0) 585# define might_lock_read(lock) \ 586do { \ 587 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 588 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 589 lock_release(&(lock)->dep_map, _THIS_IP_); \ 590} while (0) 591# define might_lock_nested(lock, subclass) \ 592do { \ 593 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 594 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ 595 _THIS_IP_); \ 596 lock_release(&(lock)->dep_map, _THIS_IP_); \ 597} while (0) 598 599DECLARE_PER_CPU(int, hardirqs_enabled); 600DECLARE_PER_CPU(int, hardirq_context); 601DECLARE_PER_CPU(unsigned int, lockdep_recursion); 602 603#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) 604 605#define lockdep_assert_irqs_enabled() \ 606do { \ 607 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ 608} while (0) 609 610#define lockdep_assert_irqs_disabled() \ 611do { \ 612 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ 613} while (0) 614 615#define lockdep_assert_in_irq() \ 616do { \ 617 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ 618} while (0) 619 620#define lockdep_assert_preemption_enabled() \ 621do { \ 622 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 623 __lockdep_enabled && \ 624 (preempt_count() != 0 || \ 625 !this_cpu_read(hardirqs_enabled))); \ 626} while (0) 627 628#define lockdep_assert_preemption_disabled() \ 629do { \ 630 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 631 __lockdep_enabled && \ 632 (preempt_count() == 0 && \ 633 this_cpu_read(hardirqs_enabled))); \ 634} while (0) 635 636/* 637 * Acceptable for protecting per-CPU resources accessed from BH. 638 * Much like in_softirq() - semantics are ambiguous, use carefully. 639 */ 640#define lockdep_assert_in_softirq() \ 641do { \ 642 WARN_ON_ONCE(__lockdep_enabled && \ 643 (!in_softirq() || in_irq() || in_nmi())); \ 644} while (0) 645 646#else 647# define might_lock(lock) do { } while (0) 648# define might_lock_read(lock) do { } while (0) 649# define might_lock_nested(lock, subclass) do { } while (0) 650 651# define lockdep_assert_irqs_enabled() do { } while (0) 652# define lockdep_assert_irqs_disabled() do { } while (0) 653# define lockdep_assert_in_irq() do { } while (0) 654 655# define lockdep_assert_preemption_enabled() do { } while (0) 656# define lockdep_assert_preemption_disabled() do { } while (0) 657# define lockdep_assert_in_softirq() do { } while (0) 658#endif 659 660#ifdef CONFIG_PROVE_RAW_LOCK_NESTING 661 662# define lockdep_assert_RT_in_threaded_ctx() do { \ 663 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 664 lockdep_hardirq_context() && \ 665 !(current->hardirq_threaded || current->irq_config), \ 666 "Not in threaded context on PREEMPT_RT as expected\n"); \ 667} while (0) 668 669#else 670 671# define lockdep_assert_RT_in_threaded_ctx() do { } while (0) 672 673#endif 674 675#ifdef CONFIG_LOCKDEP 676void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 677#else 678static inline void 679lockdep_rcu_suspicious(const char *file, const int line, const char *s) 680{ 681} 682#endif 683 684#endif /* __LINUX_LOCKDEP_H */