at v3.13 16 kB view raw
1/* 2 * Runtime locking correctness validator 3 * 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * 7 * see Documentation/lockdep-design.txt for more details. 8 */ 9#ifndef __LINUX_LOCKDEP_H 10#define __LINUX_LOCKDEP_H 11 12struct task_struct; 13struct lockdep_map; 14 15/* for sysctl */ 16extern int prove_locking; 17extern int lock_stat; 18 19#ifdef CONFIG_LOCKDEP 20 21#include <linux/linkage.h> 22#include <linux/list.h> 23#include <linux/debug_locks.h> 24#include <linux/stacktrace.h> 25 26/* 27 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need 28 * the total number of states... :-( 29 */ 30#define XXX_LOCK_USAGE_STATES (1+3*4) 31 32#define MAX_LOCKDEP_SUBCLASSES 8UL 33 34/* 35 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes 36 * cached in the instance of lockdep_map 37 * 38 * Currently main class (subclass == 0) and signle depth subclass 39 * are cached in lockdep_map. This optimization is mainly targeting 40 * on rq->lock. double_rq_lock() acquires this highly competitive with 41 * single depth. 42 */ 43#define NR_LOCKDEP_CACHING_CLASSES 2 44 45/* 46 * Lock-classes are keyed via unique addresses, by embedding the 47 * lockclass-key into the kernel (or module) .data section. (For 48 * static locks we use the lock address itself as the key.) 49 */ 50struct lockdep_subclass_key { 51 char __one_byte; 52} __attribute__ ((__packed__)); 53 54struct lock_class_key { 55 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 56}; 57 58extern struct lock_class_key __lockdep_no_validate__; 59 60#define LOCKSTAT_POINTS 4 61 62/* 63 * The lock-class itself: 64 */ 65struct lock_class { 66 /* 67 * class-hash: 68 */ 69 struct list_head hash_entry; 70 71 /* 72 * global list of all lock-classes: 73 */ 74 struct list_head lock_entry; 75 76 struct lockdep_subclass_key *key; 77 unsigned int subclass; 78 unsigned int dep_gen_id; 79 80 /* 81 * IRQ/softirq usage tracking bits: 82 */ 83 unsigned long usage_mask; 84 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; 85 86 /* 87 * These fields represent a directed graph of lock dependencies, 88 * to every node we attach a list of "forward" and a list of 89 * "backward" graph nodes. 90 */ 91 struct list_head locks_after, locks_before; 92 93 /* 94 * Generation counter, when doing certain classes of graph walking, 95 * to ensure that we check one node only once: 96 */ 97 unsigned int version; 98 99 /* 100 * Statistics counter: 101 */ 102 unsigned long ops; 103 104 const char *name; 105 int name_version; 106 107#ifdef CONFIG_LOCK_STAT 108 unsigned long contention_point[LOCKSTAT_POINTS]; 109 unsigned long contending_point[LOCKSTAT_POINTS]; 110#endif 111}; 112 113#ifdef CONFIG_LOCK_STAT 114struct lock_time { 115 s64 min; 116 s64 max; 117 s64 total; 118 unsigned long nr; 119}; 120 121enum bounce_type { 122 bounce_acquired_write, 123 bounce_acquired_read, 124 bounce_contended_write, 125 bounce_contended_read, 126 nr_bounce_types, 127 128 bounce_acquired = bounce_acquired_write, 129 bounce_contended = bounce_contended_write, 130}; 131 132struct lock_class_stats { 133 unsigned long contention_point[4]; 134 unsigned long contending_point[4]; 135 struct lock_time read_waittime; 136 struct lock_time write_waittime; 137 struct lock_time read_holdtime; 138 struct lock_time write_holdtime; 139 unsigned long bounces[nr_bounce_types]; 140}; 141 142struct lock_class_stats lock_stats(struct lock_class *class); 143void clear_lock_stats(struct lock_class *class); 144#endif 145 146/* 147 * Map the lock object (the lock instance) to the lock-class object. 148 * This is embedded into specific lock instances: 149 */ 150struct lockdep_map { 151 struct lock_class_key *key; 152 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; 153 const char *name; 154#ifdef CONFIG_LOCK_STAT 155 int cpu; 156 unsigned long ip; 157#endif 158}; 159 160static inline void lockdep_copy_map(struct lockdep_map *to, 161 struct lockdep_map *from) 162{ 163 int i; 164 165 *to = *from; 166 /* 167 * Since the class cache can be modified concurrently we could observe 168 * half pointers (64bit arch using 32bit copy insns). Therefore clear 169 * the caches and take the performance hit. 170 * 171 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 172 * that relies on cache abuse. 173 */ 174 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 175 to->class_cache[i] = NULL; 176} 177 178/* 179 * Every lock has a list of other locks that were taken after it. 180 * We only grow the list, never remove from it: 181 */ 182struct lock_list { 183 struct list_head entry; 184 struct lock_class *class; 185 struct stack_trace trace; 186 int distance; 187 188 /* 189 * The parent field is used to implement breadth-first search, and the 190 * bit 0 is reused to indicate if the lock has been accessed in BFS. 191 */ 192 struct lock_list *parent; 193}; 194 195/* 196 * We record lock dependency chains, so that we can cache them: 197 */ 198struct lock_chain { 199 u8 irq_context; 200 u8 depth; 201 u16 base; 202 struct list_head entry; 203 u64 chain_key; 204}; 205 206#define MAX_LOCKDEP_KEYS_BITS 13 207/* 208 * Subtract one because we offset hlock->class_idx by 1 in order 209 * to make 0 mean no class. This avoids overflowing the class_idx 210 * bitfield and hitting the BUG in hlock_class(). 211 */ 212#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) 213 214struct held_lock { 215 /* 216 * One-way hash of the dependency chain up to this point. We 217 * hash the hashes step by step as the dependency chain grows. 218 * 219 * We use it for dependency-caching and we skip detection 220 * passes and dependency-updates if there is a cache-hit, so 221 * it is absolutely critical for 100% coverage of the validator 222 * to have a unique key value for every unique dependency path 223 * that can occur in the system, to make a unique hash value 224 * as likely as possible - hence the 64-bit width. 225 * 226 * The task struct holds the current hash value (initialized 227 * with zero), here we store the previous hash value: 228 */ 229 u64 prev_chain_key; 230 unsigned long acquire_ip; 231 struct lockdep_map *instance; 232 struct lockdep_map *nest_lock; 233#ifdef CONFIG_LOCK_STAT 234 u64 waittime_stamp; 235 u64 holdtime_stamp; 236#endif 237 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 238 /* 239 * The lock-stack is unified in that the lock chains of interrupt 240 * contexts nest ontop of process context chains, but we 'separate' 241 * the hashes by starting with 0 if we cross into an interrupt 242 * context, and we also keep do not add cross-context lock 243 * dependencies - the lock usage graph walking covers that area 244 * anyway, and we'd just unnecessarily increase the number of 245 * dependencies otherwise. [Note: hardirq and softirq contexts 246 * are separated from each other too.] 247 * 248 * The following field is used to detect when we cross into an 249 * interrupt context: 250 */ 251 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 252 unsigned int trylock:1; /* 16 bits */ 253 254 unsigned int read:2; /* see lock_acquire() comment */ 255 unsigned int check:2; /* see lock_acquire() comment */ 256 unsigned int hardirqs_off:1; 257 unsigned int references:11; /* 32 bits */ 258}; 259 260/* 261 * Initialization, self-test and debugging-output methods: 262 */ 263extern void lockdep_init(void); 264extern void lockdep_info(void); 265extern void lockdep_reset(void); 266extern void lockdep_reset_lock(struct lockdep_map *lock); 267extern void lockdep_free_key_range(void *start, unsigned long size); 268extern void lockdep_sys_exit(void); 269 270extern void lockdep_off(void); 271extern void lockdep_on(void); 272 273/* 274 * These methods are used by specific locking variants (spinlocks, 275 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 276 * to lockdep: 277 */ 278 279extern void lockdep_init_map(struct lockdep_map *lock, const char *name, 280 struct lock_class_key *key, int subclass); 281 282/* 283 * To initialize a lockdep_map statically use this macro. 284 * Note that _name must not be NULL. 285 */ 286#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 287 { .name = (_name), .key = (void *)(_key), } 288 289/* 290 * Reinitialize a lock key - for cases where there is special locking or 291 * special initialization of locks so that the validator gets the scope 292 * of dependencies wrong: they are either too broad (they need a class-split) 293 * or they are too narrow (they suffer from a false class-split): 294 */ 295#define lockdep_set_class(lock, key) \ 296 lockdep_init_map(&(lock)->dep_map, #key, key, 0) 297#define lockdep_set_class_and_name(lock, key, name) \ 298 lockdep_init_map(&(lock)->dep_map, name, key, 0) 299#define lockdep_set_class_and_subclass(lock, key, sub) \ 300 lockdep_init_map(&(lock)->dep_map, #key, key, sub) 301#define lockdep_set_subclass(lock, sub) \ 302 lockdep_init_map(&(lock)->dep_map, #lock, \ 303 (lock)->dep_map.key, sub) 304 305#define lockdep_set_novalidate_class(lock) \ 306 lockdep_set_class(lock, &__lockdep_no_validate__) 307/* 308 * Compare locking classes 309 */ 310#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 311 312static inline int lockdep_match_key(struct lockdep_map *lock, 313 struct lock_class_key *key) 314{ 315 return lock->key == key; 316} 317 318/* 319 * Acquire a lock. 320 * 321 * Values for "read": 322 * 323 * 0: exclusive (write) acquire 324 * 1: read-acquire (no recursion allowed) 325 * 2: read-acquire with same-instance recursion allowed 326 * 327 * Values for check: 328 * 329 * 0: disabled 330 * 1: simple checks (freeing, held-at-exit-time, etc.) 331 * 2: full validation 332 */ 333extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 334 int trylock, int read, int check, 335 struct lockdep_map *nest_lock, unsigned long ip); 336 337extern void lock_release(struct lockdep_map *lock, int nested, 338 unsigned long ip); 339 340#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 341 342extern int lock_is_held(struct lockdep_map *lock); 343 344extern void lock_set_class(struct lockdep_map *lock, const char *name, 345 struct lock_class_key *key, unsigned int subclass, 346 unsigned long ip); 347 348static inline void lock_set_subclass(struct lockdep_map *lock, 349 unsigned int subclass, unsigned long ip) 350{ 351 lock_set_class(lock, lock->name, lock->key, subclass, ip); 352} 353 354extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); 355extern void lockdep_clear_current_reclaim_state(void); 356extern void lockdep_trace_alloc(gfp_t mask); 357 358# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, 359 360#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 361 362#define lockdep_assert_held(l) do { \ 363 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 364 } while (0) 365 366#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 367 368#else /* !CONFIG_LOCKDEP */ 369 370static inline void lockdep_off(void) 371{ 372} 373 374static inline void lockdep_on(void) 375{ 376} 377 378# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 379# define lock_release(l, n, i) do { } while (0) 380# define lock_set_class(l, n, k, s, i) do { } while (0) 381# define lock_set_subclass(l, s, i) do { } while (0) 382# define lockdep_set_current_reclaim_state(g) do { } while (0) 383# define lockdep_clear_current_reclaim_state() do { } while (0) 384# define lockdep_trace_alloc(g) do { } while (0) 385# define lockdep_init() do { } while (0) 386# define lockdep_info() do { } while (0) 387# define lockdep_init_map(lock, name, key, sub) \ 388 do { (void)(name); (void)(key); } while (0) 389# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 390# define lockdep_set_class_and_name(lock, key, name) \ 391 do { (void)(key); (void)(name); } while (0) 392#define lockdep_set_class_and_subclass(lock, key, sub) \ 393 do { (void)(key); } while (0) 394#define lockdep_set_subclass(lock, sub) do { } while (0) 395 396#define lockdep_set_novalidate_class(lock) do { } while (0) 397 398/* 399 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 400 * case since the result is not well defined and the caller should rather 401 * #ifdef the call himself. 402 */ 403 404# define INIT_LOCKDEP 405# define lockdep_reset() do { debug_locks = 1; } while (0) 406# define lockdep_free_key_range(start, size) do { } while (0) 407# define lockdep_sys_exit() do { } while (0) 408/* 409 * The class key takes no space if lockdep is disabled: 410 */ 411struct lock_class_key { }; 412 413#define lockdep_depth(tsk) (0) 414 415#define lockdep_assert_held(l) do { (void)(l); } while (0) 416 417#define lockdep_recursing(tsk) (0) 418 419#endif /* !LOCKDEP */ 420 421#ifdef CONFIG_LOCK_STAT 422 423extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 424extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 425 426#define LOCK_CONTENDED(_lock, try, lock) \ 427do { \ 428 if (!try(_lock)) { \ 429 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 430 lock(_lock); \ 431 } \ 432 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 433} while (0) 434 435#else /* CONFIG_LOCK_STAT */ 436 437#define lock_contended(lockdep_map, ip) do {} while (0) 438#define lock_acquired(lockdep_map, ip) do {} while (0) 439 440#define LOCK_CONTENDED(_lock, try, lock) \ 441 lock(_lock) 442 443#endif /* CONFIG_LOCK_STAT */ 444 445#ifdef CONFIG_LOCKDEP 446 447/* 448 * On lockdep we dont want the hand-coded irq-enable of 449 * _raw_*_lock_flags() code, because lockdep assumes 450 * that interrupts are not re-enabled during lock-acquire: 451 */ 452#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 453 LOCK_CONTENDED((_lock), (try), (lock)) 454 455#else /* CONFIG_LOCKDEP */ 456 457#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 458 lockfl((_lock), (flags)) 459 460#endif /* CONFIG_LOCKDEP */ 461 462#ifdef CONFIG_TRACE_IRQFLAGS 463extern void print_irqtrace_events(struct task_struct *curr); 464#else 465static inline void print_irqtrace_events(struct task_struct *curr) 466{ 467} 468#endif 469 470/* 471 * For trivial one-depth nesting of a lock-class, the following 472 * global define can be used. (Subsystems with multiple levels 473 * of nesting should define their own lock-nesting subclasses.) 474 */ 475#define SINGLE_DEPTH_NESTING 1 476 477/* 478 * Map the dependency ops to NOP or to real lockdep ops, depending 479 * on the per lock-class debug mode: 480 */ 481 482#ifdef CONFIG_PROVE_LOCKING 483 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) 484 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i) 485 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i) 486#else 487 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 488 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 489 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 490#endif 491 492#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 493#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 494#define spin_release(l, n, i) lock_release(l, n, i) 495 496#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 497#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 498#define rwlock_release(l, n, i) lock_release(l, n, i) 499 500#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 501#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 502#define seqcount_release(l, n, i) lock_release(l, n, i) 503 504#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 505#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 506#define mutex_release(l, n, i) lock_release(l, n, i) 507 508#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 509#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 510#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 511#define rwsem_release(l, n, i) lock_release(l, n, i) 512 513#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 514#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 515#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 516 517#ifdef CONFIG_PROVE_LOCKING 518# define might_lock(lock) \ 519do { \ 520 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 521 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ 522 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 523} while (0) 524# define might_lock_read(lock) \ 525do { \ 526 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 527 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ 528 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 529} while (0) 530#else 531# define might_lock(lock) do { } while (0) 532# define might_lock_read(lock) do { } while (0) 533#endif 534 535#ifdef CONFIG_PROVE_RCU 536void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 537#endif 538 539#endif /* __LINUX_LOCKDEP_H */