at v3.2 17 kB view raw
1/* 2 * Runtime locking correctness validator 3 * 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * 7 * see Documentation/lockdep-design.txt for more details. 8 */ 9#ifndef __LINUX_LOCKDEP_H 10#define __LINUX_LOCKDEP_H 11 12struct task_struct; 13struct lockdep_map; 14 15/* for sysctl */ 16extern int prove_locking; 17extern int lock_stat; 18 19#ifdef CONFIG_LOCKDEP 20 21#include <linux/linkage.h> 22#include <linux/list.h> 23#include <linux/debug_locks.h> 24#include <linux/stacktrace.h> 25 26/* 27 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need 28 * the total number of states... :-( 29 */ 30#define XXX_LOCK_USAGE_STATES (1+3*4) 31 32#define MAX_LOCKDEP_SUBCLASSES 8UL 33 34/* 35 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes 36 * cached in the instance of lockdep_map 37 * 38 * Currently main class (subclass == 0) and signle depth subclass 39 * are cached in lockdep_map. This optimization is mainly targeting 40 * on rq->lock. double_rq_lock() acquires this highly competitive with 41 * single depth. 42 */ 43#define NR_LOCKDEP_CACHING_CLASSES 2 44 45/* 46 * Lock-classes are keyed via unique addresses, by embedding the 47 * lockclass-key into the kernel (or module) .data section. (For 48 * static locks we use the lock address itself as the key.) 49 */ 50struct lockdep_subclass_key { 51 char __one_byte; 52} __attribute__ ((__packed__)); 53 54struct lock_class_key { 55 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 56}; 57 58extern struct lock_class_key __lockdep_no_validate__; 59 60#define LOCKSTAT_POINTS 4 61 62/* 63 * The lock-class itself: 64 */ 65struct lock_class { 66 /* 67 * class-hash: 68 */ 69 struct list_head hash_entry; 70 71 /* 72 * global list of all lock-classes: 73 */ 74 struct list_head lock_entry; 75 76 struct lockdep_subclass_key *key; 77 unsigned int subclass; 78 unsigned int dep_gen_id; 79 80 /* 81 * IRQ/softirq usage tracking bits: 82 */ 83 unsigned long usage_mask; 84 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; 85 86 /* 87 * These fields represent a directed graph of lock dependencies, 88 * to every node we attach a list of "forward" and a list of 89 * "backward" graph nodes. 90 */ 91 struct list_head locks_after, locks_before; 92 93 /* 94 * Generation counter, when doing certain classes of graph walking, 95 * to ensure that we check one node only once: 96 */ 97 unsigned int version; 98 99 /* 100 * Statistics counter: 101 */ 102 unsigned long ops; 103 104 const char *name; 105 int name_version; 106 107#ifdef CONFIG_LOCK_STAT 108 unsigned long contention_point[LOCKSTAT_POINTS]; 109 unsigned long contending_point[LOCKSTAT_POINTS]; 110#endif 111}; 112 113#ifdef CONFIG_LOCK_STAT 114struct lock_time { 115 s64 min; 116 s64 max; 117 s64 total; 118 unsigned long nr; 119}; 120 121enum bounce_type { 122 bounce_acquired_write, 123 bounce_acquired_read, 124 bounce_contended_write, 125 bounce_contended_read, 126 nr_bounce_types, 127 128 bounce_acquired = bounce_acquired_write, 129 bounce_contended = bounce_contended_write, 130}; 131 132struct lock_class_stats { 133 unsigned long contention_point[4]; 134 unsigned long contending_point[4]; 135 struct lock_time read_waittime; 136 struct lock_time write_waittime; 137 struct lock_time read_holdtime; 138 struct lock_time write_holdtime; 139 unsigned long bounces[nr_bounce_types]; 140}; 141 142struct lock_class_stats lock_stats(struct lock_class *class); 143void clear_lock_stats(struct lock_class *class); 144#endif 145 146/* 147 * Map the lock object (the lock instance) to the lock-class object. 148 * This is embedded into specific lock instances: 149 */ 150struct lockdep_map { 151 struct lock_class_key *key; 152 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; 153 const char *name; 154#ifdef CONFIG_LOCK_STAT 155 int cpu; 156 unsigned long ip; 157#endif 158}; 159 160/* 161 * Every lock has a list of other locks that were taken after it. 162 * We only grow the list, never remove from it: 163 */ 164struct lock_list { 165 struct list_head entry; 166 struct lock_class *class; 167 struct stack_trace trace; 168 int distance; 169 170 /* 171 * The parent field is used to implement breadth-first search, and the 172 * bit 0 is reused to indicate if the lock has been accessed in BFS. 173 */ 174 struct lock_list *parent; 175}; 176 177/* 178 * We record lock dependency chains, so that we can cache them: 179 */ 180struct lock_chain { 181 u8 irq_context; 182 u8 depth; 183 u16 base; 184 struct list_head entry; 185 u64 chain_key; 186}; 187 188#define MAX_LOCKDEP_KEYS_BITS 13 189/* 190 * Subtract one because we offset hlock->class_idx by 1 in order 191 * to make 0 mean no class. This avoids overflowing the class_idx 192 * bitfield and hitting the BUG in hlock_class(). 193 */ 194#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) 195 196struct held_lock { 197 /* 198 * One-way hash of the dependency chain up to this point. We 199 * hash the hashes step by step as the dependency chain grows. 200 * 201 * We use it for dependency-caching and we skip detection 202 * passes and dependency-updates if there is a cache-hit, so 203 * it is absolutely critical for 100% coverage of the validator 204 * to have a unique key value for every unique dependency path 205 * that can occur in the system, to make a unique hash value 206 * as likely as possible - hence the 64-bit width. 207 * 208 * The task struct holds the current hash value (initialized 209 * with zero), here we store the previous hash value: 210 */ 211 u64 prev_chain_key; 212 unsigned long acquire_ip; 213 struct lockdep_map *instance; 214 struct lockdep_map *nest_lock; 215#ifdef CONFIG_LOCK_STAT 216 u64 waittime_stamp; 217 u64 holdtime_stamp; 218#endif 219 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 220 /* 221 * The lock-stack is unified in that the lock chains of interrupt 222 * contexts nest ontop of process context chains, but we 'separate' 223 * the hashes by starting with 0 if we cross into an interrupt 224 * context, and we also keep do not add cross-context lock 225 * dependencies - the lock usage graph walking covers that area 226 * anyway, and we'd just unnecessarily increase the number of 227 * dependencies otherwise. [Note: hardirq and softirq contexts 228 * are separated from each other too.] 229 * 230 * The following field is used to detect when we cross into an 231 * interrupt context: 232 */ 233 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 234 unsigned int trylock:1; /* 16 bits */ 235 236 unsigned int read:2; /* see lock_acquire() comment */ 237 unsigned int check:2; /* see lock_acquire() comment */ 238 unsigned int hardirqs_off:1; 239 unsigned int references:11; /* 32 bits */ 240}; 241 242/* 243 * Initialization, self-test and debugging-output methods: 244 */ 245extern void lockdep_init(void); 246extern void lockdep_info(void); 247extern void lockdep_reset(void); 248extern void lockdep_reset_lock(struct lockdep_map *lock); 249extern void lockdep_free_key_range(void *start, unsigned long size); 250extern void lockdep_sys_exit(void); 251 252extern void lockdep_off(void); 253extern void lockdep_on(void); 254 255/* 256 * These methods are used by specific locking variants (spinlocks, 257 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 258 * to lockdep: 259 */ 260 261extern void lockdep_init_map(struct lockdep_map *lock, const char *name, 262 struct lock_class_key *key, int subclass); 263 264/* 265 * To initialize a lockdep_map statically use this macro. 266 * Note that _name must not be NULL. 267 */ 268#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 269 { .name = (_name), .key = (void *)(_key), } 270 271/* 272 * Reinitialize a lock key - for cases where there is special locking or 273 * special initialization of locks so that the validator gets the scope 274 * of dependencies wrong: they are either too broad (they need a class-split) 275 * or they are too narrow (they suffer from a false class-split): 276 */ 277#define lockdep_set_class(lock, key) \ 278 lockdep_init_map(&(lock)->dep_map, #key, key, 0) 279#define lockdep_set_class_and_name(lock, key, name) \ 280 lockdep_init_map(&(lock)->dep_map, name, key, 0) 281#define lockdep_set_class_and_subclass(lock, key, sub) \ 282 lockdep_init_map(&(lock)->dep_map, #key, key, sub) 283#define lockdep_set_subclass(lock, sub) \ 284 lockdep_init_map(&(lock)->dep_map, #lock, \ 285 (lock)->dep_map.key, sub) 286 287#define lockdep_set_novalidate_class(lock) \ 288 lockdep_set_class(lock, &__lockdep_no_validate__) 289/* 290 * Compare locking classes 291 */ 292#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 293 294static inline int lockdep_match_key(struct lockdep_map *lock, 295 struct lock_class_key *key) 296{ 297 return lock->key == key; 298} 299 300/* 301 * Acquire a lock. 302 * 303 * Values for "read": 304 * 305 * 0: exclusive (write) acquire 306 * 1: read-acquire (no recursion allowed) 307 * 2: read-acquire with same-instance recursion allowed 308 * 309 * Values for check: 310 * 311 * 0: disabled 312 * 1: simple checks (freeing, held-at-exit-time, etc.) 313 * 2: full validation 314 */ 315extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 316 int trylock, int read, int check, 317 struct lockdep_map *nest_lock, unsigned long ip); 318 319extern void lock_release(struct lockdep_map *lock, int nested, 320 unsigned long ip); 321 322#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 323 324extern int lock_is_held(struct lockdep_map *lock); 325 326extern void lock_set_class(struct lockdep_map *lock, const char *name, 327 struct lock_class_key *key, unsigned int subclass, 328 unsigned long ip); 329 330static inline void lock_set_subclass(struct lockdep_map *lock, 331 unsigned int subclass, unsigned long ip) 332{ 333 lock_set_class(lock, lock->name, lock->key, subclass, ip); 334} 335 336extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); 337extern void lockdep_clear_current_reclaim_state(void); 338extern void lockdep_trace_alloc(gfp_t mask); 339 340# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, 341 342#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 343 344#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) 345 346#else /* !LOCKDEP */ 347 348static inline void lockdep_off(void) 349{ 350} 351 352static inline void lockdep_on(void) 353{ 354} 355 356# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 357# define lock_release(l, n, i) do { } while (0) 358# define lock_set_class(l, n, k, s, i) do { } while (0) 359# define lock_set_subclass(l, s, i) do { } while (0) 360# define lockdep_set_current_reclaim_state(g) do { } while (0) 361# define lockdep_clear_current_reclaim_state() do { } while (0) 362# define lockdep_trace_alloc(g) do { } while (0) 363# define lockdep_init() do { } while (0) 364# define lockdep_info() do { } while (0) 365# define lockdep_init_map(lock, name, key, sub) \ 366 do { (void)(name); (void)(key); } while (0) 367# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 368# define lockdep_set_class_and_name(lock, key, name) \ 369 do { (void)(key); (void)(name); } while (0) 370#define lockdep_set_class_and_subclass(lock, key, sub) \ 371 do { (void)(key); } while (0) 372#define lockdep_set_subclass(lock, sub) do { } while (0) 373 374#define lockdep_set_novalidate_class(lock) do { } while (0) 375 376/* 377 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 378 * case since the result is not well defined and the caller should rather 379 * #ifdef the call himself. 380 */ 381 382# define INIT_LOCKDEP 383# define lockdep_reset() do { debug_locks = 1; } while (0) 384# define lockdep_free_key_range(start, size) do { } while (0) 385# define lockdep_sys_exit() do { } while (0) 386/* 387 * The class key takes no space if lockdep is disabled: 388 */ 389struct lock_class_key { }; 390 391#define lockdep_depth(tsk) (0) 392 393#define lockdep_assert_held(l) do { } while (0) 394 395#endif /* !LOCKDEP */ 396 397#ifdef CONFIG_LOCK_STAT 398 399extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 400extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 401 402#define LOCK_CONTENDED(_lock, try, lock) \ 403do { \ 404 if (!try(_lock)) { \ 405 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 406 lock(_lock); \ 407 } \ 408 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 409} while (0) 410 411#else /* CONFIG_LOCK_STAT */ 412 413#define lock_contended(lockdep_map, ip) do {} while (0) 414#define lock_acquired(lockdep_map, ip) do {} while (0) 415 416#define LOCK_CONTENDED(_lock, try, lock) \ 417 lock(_lock) 418 419#endif /* CONFIG_LOCK_STAT */ 420 421#ifdef CONFIG_LOCKDEP 422 423/* 424 * On lockdep we dont want the hand-coded irq-enable of 425 * _raw_*_lock_flags() code, because lockdep assumes 426 * that interrupts are not re-enabled during lock-acquire: 427 */ 428#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 429 LOCK_CONTENDED((_lock), (try), (lock)) 430 431#else /* CONFIG_LOCKDEP */ 432 433#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 434 lockfl((_lock), (flags)) 435 436#endif /* CONFIG_LOCKDEP */ 437 438#ifdef CONFIG_TRACE_IRQFLAGS 439extern void print_irqtrace_events(struct task_struct *curr); 440#else 441static inline void print_irqtrace_events(struct task_struct *curr) 442{ 443} 444#endif 445 446/* 447 * For trivial one-depth nesting of a lock-class, the following 448 * global define can be used. (Subsystems with multiple levels 449 * of nesting should define their own lock-nesting subclasses.) 450 */ 451#define SINGLE_DEPTH_NESTING 1 452 453/* 454 * Map the dependency ops to NOP or to real lockdep ops, depending 455 * on the per lock-class debug mode: 456 */ 457 458#ifdef CONFIG_DEBUG_LOCK_ALLOC 459# ifdef CONFIG_PROVE_LOCKING 460# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 461# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) 462# else 463# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 464# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) 465# endif 466# define spin_release(l, n, i) lock_release(l, n, i) 467#else 468# define spin_acquire(l, s, t, i) do { } while (0) 469# define spin_release(l, n, i) do { } while (0) 470#endif 471 472#ifdef CONFIG_DEBUG_LOCK_ALLOC 473# ifdef CONFIG_PROVE_LOCKING 474# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 475# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) 476# else 477# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 478# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) 479# endif 480# define rwlock_release(l, n, i) lock_release(l, n, i) 481#else 482# define rwlock_acquire(l, s, t, i) do { } while (0) 483# define rwlock_acquire_read(l, s, t, i) do { } while (0) 484# define rwlock_release(l, n, i) do { } while (0) 485#endif 486 487#ifdef CONFIG_DEBUG_LOCK_ALLOC 488# ifdef CONFIG_PROVE_LOCKING 489# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 490# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) 491# else 492# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 493# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 494# endif 495# define mutex_release(l, n, i) lock_release(l, n, i) 496#else 497# define mutex_acquire(l, s, t, i) do { } while (0) 498# define mutex_acquire_nest(l, s, t, n, i) do { } while (0) 499# define mutex_release(l, n, i) do { } while (0) 500#endif 501 502#ifdef CONFIG_DEBUG_LOCK_ALLOC 503# ifdef CONFIG_PROVE_LOCKING 504# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) 505# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) 506# else 507# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) 508# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) 509# endif 510# define rwsem_release(l, n, i) lock_release(l, n, i) 511#else 512# define rwsem_acquire(l, s, t, i) do { } while (0) 513# define rwsem_acquire_read(l, s, t, i) do { } while (0) 514# define rwsem_release(l, n, i) do { } while (0) 515#endif 516 517#ifdef CONFIG_DEBUG_LOCK_ALLOC 518# ifdef CONFIG_PROVE_LOCKING 519# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) 520# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) 521# else 522# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) 523# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) 524# endif 525# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 526#else 527# define lock_map_acquire(l) do { } while (0) 528# define lock_map_acquire_read(l) do { } while (0) 529# define lock_map_release(l) do { } while (0) 530#endif 531 532#ifdef CONFIG_PROVE_LOCKING 533# define might_lock(lock) \ 534do { \ 535 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 536 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ 537 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 538} while (0) 539# define might_lock_read(lock) \ 540do { \ 541 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 542 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ 543 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 544} while (0) 545#else 546# define might_lock(lock) do { } while (0) 547# define might_lock_read(lock) do { } while (0) 548#endif 549 550#ifdef CONFIG_PROVE_RCU 551void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 552#endif 553 554#endif /* __LINUX_LOCKDEP_H */