at v5.3 20 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10#ifndef __LINUX_LOCKDEP_H 11#define __LINUX_LOCKDEP_H 12 13struct task_struct; 14struct lockdep_map; 15 16/* for sysctl */ 17extern int prove_locking; 18extern int lock_stat; 19 20#define MAX_LOCKDEP_SUBCLASSES 8UL 21 22#include <linux/types.h> 23 24#ifdef CONFIG_LOCKDEP 25 26#include <linux/linkage.h> 27#include <linux/list.h> 28#include <linux/debug_locks.h> 29#include <linux/stacktrace.h> 30 31/* 32 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need 33 * the total number of states... :-( 34 */ 35#define XXX_LOCK_USAGE_STATES (1+2*4) 36 37/* 38 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes 39 * cached in the instance of lockdep_map 40 * 41 * Currently main class (subclass == 0) and signle depth subclass 42 * are cached in lockdep_map. This optimization is mainly targeting 43 * on rq->lock. double_rq_lock() acquires this highly competitive with 44 * single depth. 45 */ 46#define NR_LOCKDEP_CACHING_CLASSES 2 47 48/* 49 * A lockdep key is associated with each lock object. For static locks we use 50 * the lock address itself as the key. Dynamically allocated lock objects can 51 * have a statically or dynamically allocated key. Dynamically allocated lock 52 * keys must be registered before being used and must be unregistered before 53 * the key memory is freed. 54 */ 55struct lockdep_subclass_key { 56 char __one_byte; 57} __attribute__ ((__packed__)); 58 59/* hash_entry is used to keep track of dynamically allocated keys. */ 60struct lock_class_key { 61 union { 62 struct hlist_node hash_entry; 63 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 64 }; 65}; 66 67extern struct lock_class_key __lockdep_no_validate__; 68 69struct lock_trace { 70 unsigned int nr_entries; 71 unsigned int offset; 72}; 73 74#define LOCKSTAT_POINTS 4 75 76/* 77 * The lock-class itself. The order of the structure members matters. 78 * reinit_class() zeroes the key member and all subsequent members. 79 */ 80struct lock_class { 81 /* 82 * class-hash: 83 */ 84 struct hlist_node hash_entry; 85 86 /* 87 * Entry in all_lock_classes when in use. Entry in free_lock_classes 88 * when not in use. Instances that are being freed are on one of the 89 * zapped_classes lists. 90 */ 91 struct list_head lock_entry; 92 93 /* 94 * These fields represent a directed graph of lock dependencies, 95 * to every node we attach a list of "forward" and a list of 96 * "backward" graph nodes. 97 */ 98 struct list_head locks_after, locks_before; 99 100 struct lockdep_subclass_key *key; 101 unsigned int subclass; 102 unsigned int dep_gen_id; 103 104 /* 105 * IRQ/softirq usage tracking bits: 106 */ 107 unsigned long usage_mask; 108 struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; 109 110 /* 111 * Generation counter, when doing certain classes of graph walking, 112 * to ensure that we check one node only once: 113 */ 114 int name_version; 115 const char *name; 116 117#ifdef CONFIG_LOCK_STAT 118 unsigned long contention_point[LOCKSTAT_POINTS]; 119 unsigned long contending_point[LOCKSTAT_POINTS]; 120#endif 121} __no_randomize_layout; 122 123#ifdef CONFIG_LOCK_STAT 124struct lock_time { 125 s64 min; 126 s64 max; 127 s64 total; 128 unsigned long nr; 129}; 130 131enum bounce_type { 132 bounce_acquired_write, 133 bounce_acquired_read, 134 bounce_contended_write, 135 bounce_contended_read, 136 nr_bounce_types, 137 138 bounce_acquired = bounce_acquired_write, 139 bounce_contended = bounce_contended_write, 140}; 141 142struct lock_class_stats { 143 unsigned long contention_point[LOCKSTAT_POINTS]; 144 unsigned long contending_point[LOCKSTAT_POINTS]; 145 struct lock_time read_waittime; 146 struct lock_time write_waittime; 147 struct lock_time read_holdtime; 148 struct lock_time write_holdtime; 149 unsigned long bounces[nr_bounce_types]; 150}; 151 152struct lock_class_stats lock_stats(struct lock_class *class); 153void clear_lock_stats(struct lock_class *class); 154#endif 155 156/* 157 * Map the lock object (the lock instance) to the lock-class object. 158 * This is embedded into specific lock instances: 159 */ 160struct lockdep_map { 161 struct lock_class_key *key; 162 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; 163 const char *name; 164#ifdef CONFIG_LOCK_STAT 165 int cpu; 166 unsigned long ip; 167#endif 168}; 169 170static inline void lockdep_copy_map(struct lockdep_map *to, 171 struct lockdep_map *from) 172{ 173 int i; 174 175 *to = *from; 176 /* 177 * Since the class cache can be modified concurrently we could observe 178 * half pointers (64bit arch using 32bit copy insns). Therefore clear 179 * the caches and take the performance hit. 180 * 181 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 182 * that relies on cache abuse. 183 */ 184 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 185 to->class_cache[i] = NULL; 186} 187 188/* 189 * Every lock has a list of other locks that were taken after it. 190 * We only grow the list, never remove from it: 191 */ 192struct lock_list { 193 struct list_head entry; 194 struct lock_class *class; 195 struct lock_class *links_to; 196 struct lock_trace trace; 197 int distance; 198 199 /* 200 * The parent field is used to implement breadth-first search, and the 201 * bit 0 is reused to indicate if the lock has been accessed in BFS. 202 */ 203 struct lock_list *parent; 204}; 205 206/** 207 * struct lock_chain - lock dependency chain record 208 * 209 * @irq_context: the same as irq_context in held_lock below 210 * @depth: the number of held locks in this chain 211 * @base: the index in chain_hlocks for this chain 212 * @entry: the collided lock chains in lock_chain hash list 213 * @chain_key: the hash key of this lock_chain 214 */ 215struct lock_chain { 216 /* see BUILD_BUG_ON()s in add_chain_cache() */ 217 unsigned int irq_context : 2, 218 depth : 6, 219 base : 24; 220 /* 4 byte hole */ 221 struct hlist_node entry; 222 u64 chain_key; 223}; 224 225#define MAX_LOCKDEP_KEYS_BITS 13 226#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 227#define INITIAL_CHAIN_KEY -1 228 229struct held_lock { 230 /* 231 * One-way hash of the dependency chain up to this point. We 232 * hash the hashes step by step as the dependency chain grows. 233 * 234 * We use it for dependency-caching and we skip detection 235 * passes and dependency-updates if there is a cache-hit, so 236 * it is absolutely critical for 100% coverage of the validator 237 * to have a unique key value for every unique dependency path 238 * that can occur in the system, to make a unique hash value 239 * as likely as possible - hence the 64-bit width. 240 * 241 * The task struct holds the current hash value (initialized 242 * with zero), here we store the previous hash value: 243 */ 244 u64 prev_chain_key; 245 unsigned long acquire_ip; 246 struct lockdep_map *instance; 247 struct lockdep_map *nest_lock; 248#ifdef CONFIG_LOCK_STAT 249 u64 waittime_stamp; 250 u64 holdtime_stamp; 251#endif 252 /* 253 * class_idx is zero-indexed; it points to the element in 254 * lock_classes this held lock instance belongs to. class_idx is in 255 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 256 */ 257 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 258 /* 259 * The lock-stack is unified in that the lock chains of interrupt 260 * contexts nest ontop of process context chains, but we 'separate' 261 * the hashes by starting with 0 if we cross into an interrupt 262 * context, and we also keep do not add cross-context lock 263 * dependencies - the lock usage graph walking covers that area 264 * anyway, and we'd just unnecessarily increase the number of 265 * dependencies otherwise. [Note: hardirq and softirq contexts 266 * are separated from each other too.] 267 * 268 * The following field is used to detect when we cross into an 269 * interrupt context: 270 */ 271 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 272 unsigned int trylock:1; /* 16 bits */ 273 274 unsigned int read:2; /* see lock_acquire() comment */ 275 unsigned int check:1; /* see lock_acquire() comment */ 276 unsigned int hardirqs_off:1; 277 unsigned int references:12; /* 32 bits */ 278 unsigned int pin_count; 279}; 280 281/* 282 * Initialization, self-test and debugging-output methods: 283 */ 284extern void lockdep_init(void); 285extern void lockdep_reset(void); 286extern void lockdep_reset_lock(struct lockdep_map *lock); 287extern void lockdep_free_key_range(void *start, unsigned long size); 288extern asmlinkage void lockdep_sys_exit(void); 289extern void lockdep_set_selftest_task(struct task_struct *task); 290 291extern void lockdep_init_task(struct task_struct *task); 292 293extern void lockdep_off(void); 294extern void lockdep_on(void); 295 296extern void lockdep_register_key(struct lock_class_key *key); 297extern void lockdep_unregister_key(struct lock_class_key *key); 298 299/* 300 * These methods are used by specific locking variants (spinlocks, 301 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 302 * to lockdep: 303 */ 304 305extern void lockdep_init_map(struct lockdep_map *lock, const char *name, 306 struct lock_class_key *key, int subclass); 307 308/* 309 * Reinitialize a lock key - for cases where there is special locking or 310 * special initialization of locks so that the validator gets the scope 311 * of dependencies wrong: they are either too broad (they need a class-split) 312 * or they are too narrow (they suffer from a false class-split): 313 */ 314#define lockdep_set_class(lock, key) \ 315 lockdep_init_map(&(lock)->dep_map, #key, key, 0) 316#define lockdep_set_class_and_name(lock, key, name) \ 317 lockdep_init_map(&(lock)->dep_map, name, key, 0) 318#define lockdep_set_class_and_subclass(lock, key, sub) \ 319 lockdep_init_map(&(lock)->dep_map, #key, key, sub) 320#define lockdep_set_subclass(lock, sub) \ 321 lockdep_init_map(&(lock)->dep_map, #lock, \ 322 (lock)->dep_map.key, sub) 323 324#define lockdep_set_novalidate_class(lock) \ 325 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 326/* 327 * Compare locking classes 328 */ 329#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 330 331static inline int lockdep_match_key(struct lockdep_map *lock, 332 struct lock_class_key *key) 333{ 334 return lock->key == key; 335} 336 337/* 338 * Acquire a lock. 339 * 340 * Values for "read": 341 * 342 * 0: exclusive (write) acquire 343 * 1: read-acquire (no recursion allowed) 344 * 2: read-acquire with same-instance recursion allowed 345 * 346 * Values for check: 347 * 348 * 0: simple checks (freeing, held-at-exit-time, etc.) 349 * 1: full validation 350 */ 351extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 352 int trylock, int read, int check, 353 struct lockdep_map *nest_lock, unsigned long ip); 354 355extern void lock_release(struct lockdep_map *lock, int nested, 356 unsigned long ip); 357 358/* 359 * Same "read" as for lock_acquire(), except -1 means any. 360 */ 361extern int lock_is_held_type(const struct lockdep_map *lock, int read); 362 363static inline int lock_is_held(const struct lockdep_map *lock) 364{ 365 return lock_is_held_type(lock, -1); 366} 367 368#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 369#define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 370 371extern void lock_set_class(struct lockdep_map *lock, const char *name, 372 struct lock_class_key *key, unsigned int subclass, 373 unsigned long ip); 374 375static inline void lock_set_subclass(struct lockdep_map *lock, 376 unsigned int subclass, unsigned long ip) 377{ 378 lock_set_class(lock, lock->name, lock->key, subclass, ip); 379} 380 381extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 382 383struct pin_cookie { unsigned int val; }; 384 385#define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 386 387extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 388extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 389extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 390 391#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 392 393#define lockdep_assert_held(l) do { \ 394 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 395 } while (0) 396 397#define lockdep_assert_held_write(l) do { \ 398 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 399 } while (0) 400 401#define lockdep_assert_held_read(l) do { \ 402 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ 403 } while (0) 404 405#define lockdep_assert_held_once(l) do { \ 406 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ 407 } while (0) 408 409#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 410 411#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 412#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 413#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 414 415#else /* !CONFIG_LOCKDEP */ 416 417static inline void lockdep_init_task(struct task_struct *task) 418{ 419} 420 421static inline void lockdep_off(void) 422{ 423} 424 425static inline void lockdep_on(void) 426{ 427} 428 429static inline void lockdep_set_selftest_task(struct task_struct *task) 430{ 431} 432 433# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 434# define lock_release(l, n, i) do { } while (0) 435# define lock_downgrade(l, i) do { } while (0) 436# define lock_set_class(l, n, k, s, i) do { } while (0) 437# define lock_set_subclass(l, s, i) do { } while (0) 438# define lockdep_init() do { } while (0) 439# define lockdep_init_map(lock, name, key, sub) \ 440 do { (void)(name); (void)(key); } while (0) 441# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 442# define lockdep_set_class_and_name(lock, key, name) \ 443 do { (void)(key); (void)(name); } while (0) 444#define lockdep_set_class_and_subclass(lock, key, sub) \ 445 do { (void)(key); } while (0) 446#define lockdep_set_subclass(lock, sub) do { } while (0) 447 448#define lockdep_set_novalidate_class(lock) do { } while (0) 449 450/* 451 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 452 * case since the result is not well defined and the caller should rather 453 * #ifdef the call himself. 454 */ 455 456# define lockdep_reset() do { debug_locks = 1; } while (0) 457# define lockdep_free_key_range(start, size) do { } while (0) 458# define lockdep_sys_exit() do { } while (0) 459/* 460 * The class key takes no space if lockdep is disabled: 461 */ 462struct lock_class_key { }; 463 464static inline void lockdep_register_key(struct lock_class_key *key) 465{ 466} 467 468static inline void lockdep_unregister_key(struct lock_class_key *key) 469{ 470} 471 472/* 473 * The lockdep_map takes no space if lockdep is disabled: 474 */ 475struct lockdep_map { }; 476 477#define lockdep_depth(tsk) (0) 478 479#define lockdep_is_held_type(l, r) (1) 480 481#define lockdep_assert_held(l) do { (void)(l); } while (0) 482#define lockdep_assert_held_write(l) do { (void)(l); } while (0) 483#define lockdep_assert_held_read(l) do { (void)(l); } while (0) 484#define lockdep_assert_held_once(l) do { (void)(l); } while (0) 485 486#define lockdep_recursing(tsk) (0) 487 488struct pin_cookie { }; 489 490#define NIL_COOKIE (struct pin_cookie){ } 491 492#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 493#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 494#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 495 496#endif /* !LOCKDEP */ 497 498enum xhlock_context_t { 499 XHLOCK_HARD, 500 XHLOCK_SOFT, 501 XHLOCK_CTX_NR, 502}; 503 504#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 505/* 506 * To initialize a lockdep_map statically use this macro. 507 * Note that _name must not be NULL. 508 */ 509#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 510 { .name = (_name), .key = (void *)(_key), } 511 512static inline void lockdep_invariant_state(bool force) {} 513static inline void lockdep_free_task(struct task_struct *task) {} 514 515#ifdef CONFIG_LOCK_STAT 516 517extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 518extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 519 520#define LOCK_CONTENDED(_lock, try, lock) \ 521do { \ 522 if (!try(_lock)) { \ 523 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 524 lock(_lock); \ 525 } \ 526 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 527} while (0) 528 529#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 530({ \ 531 int ____err = 0; \ 532 if (!try(_lock)) { \ 533 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 534 ____err = lock(_lock); \ 535 } \ 536 if (!____err) \ 537 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 538 ____err; \ 539}) 540 541#else /* CONFIG_LOCK_STAT */ 542 543#define lock_contended(lockdep_map, ip) do {} while (0) 544#define lock_acquired(lockdep_map, ip) do {} while (0) 545 546#define LOCK_CONTENDED(_lock, try, lock) \ 547 lock(_lock) 548 549#define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 550 lock(_lock) 551 552#endif /* CONFIG_LOCK_STAT */ 553 554#ifdef CONFIG_LOCKDEP 555 556/* 557 * On lockdep we dont want the hand-coded irq-enable of 558 * _raw_*_lock_flags() code, because lockdep assumes 559 * that interrupts are not re-enabled during lock-acquire: 560 */ 561#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 562 LOCK_CONTENDED((_lock), (try), (lock)) 563 564#else /* CONFIG_LOCKDEP */ 565 566#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 567 lockfl((_lock), (flags)) 568 569#endif /* CONFIG_LOCKDEP */ 570 571#ifdef CONFIG_PROVE_LOCKING 572extern void print_irqtrace_events(struct task_struct *curr); 573#else 574static inline void print_irqtrace_events(struct task_struct *curr) 575{ 576} 577#endif 578 579/* 580 * For trivial one-depth nesting of a lock-class, the following 581 * global define can be used. (Subsystems with multiple levels 582 * of nesting should define their own lock-nesting subclasses.) 583 */ 584#define SINGLE_DEPTH_NESTING 1 585 586/* 587 * Map the dependency ops to NOP or to real lockdep ops, depending 588 * on the per lock-class debug mode: 589 */ 590 591#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 592#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 593#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 594 595#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 596#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 597#define spin_release(l, n, i) lock_release(l, n, i) 598 599#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 600#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 601#define rwlock_release(l, n, i) lock_release(l, n, i) 602 603#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 604#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 605#define seqcount_release(l, n, i) lock_release(l, n, i) 606 607#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 608#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 609#define mutex_release(l, n, i) lock_release(l, n, i) 610 611#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 612#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 613#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 614#define rwsem_release(l, n, i) lock_release(l, n, i) 615 616#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 617#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 618#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 619#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 620 621#ifdef CONFIG_PROVE_LOCKING 622# define might_lock(lock) \ 623do { \ 624 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 625 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 626 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 627} while (0) 628# define might_lock_read(lock) \ 629do { \ 630 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 631 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 632 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 633} while (0) 634 635#define lockdep_assert_irqs_enabled() do { \ 636 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 637 !current->hardirqs_enabled, \ 638 "IRQs not enabled as expected\n"); \ 639 } while (0) 640 641#define lockdep_assert_irqs_disabled() do { \ 642 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 643 current->hardirqs_enabled, \ 644 "IRQs not disabled as expected\n"); \ 645 } while (0) 646 647#define lockdep_assert_in_irq() do { \ 648 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 649 !current->hardirq_context, \ 650 "Not in hardirq as expected\n"); \ 651 } while (0) 652 653#else 654# define might_lock(lock) do { } while (0) 655# define might_lock_read(lock) do { } while (0) 656# define lockdep_assert_irqs_enabled() do { } while (0) 657# define lockdep_assert_irqs_disabled() do { } while (0) 658# define lockdep_assert_in_irq() do { } while (0) 659#endif 660 661#ifdef CONFIG_LOCKDEP 662void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 663#else 664static inline void 665lockdep_rcu_suspicious(const char *file, const int line, const char *s) 666{ 667} 668#endif 669 670#endif /* __LINUX_LOCKDEP_H */