at v6.6 5.6 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10#ifndef __LINUX_LOCKDEP_TYPES_H 11#define __LINUX_LOCKDEP_TYPES_H 12 13#include <linux/types.h> 14 15#define MAX_LOCKDEP_SUBCLASSES 8UL 16 17enum lockdep_wait_type { 18 LD_WAIT_INV = 0, /* not checked, catch all */ 19 20 LD_WAIT_FREE, /* wait free, rcu etc.. */ 21 LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ 22 23#ifdef CONFIG_PROVE_RAW_LOCK_NESTING 24 LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */ 25#else 26 LD_WAIT_CONFIG = LD_WAIT_SPIN, 27#endif 28 LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ 29 30 LD_WAIT_MAX, /* must be last */ 31}; 32 33enum lockdep_lock_type { 34 LD_LOCK_NORMAL = 0, /* normal, catch all */ 35 LD_LOCK_PERCPU, /* percpu */ 36 LD_LOCK_WAIT_OVERRIDE, /* annotation */ 37 LD_LOCK_MAX, 38}; 39 40#ifdef CONFIG_LOCKDEP 41 42/* 43 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need 44 * the total number of states... :-( 45 * 46 * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each 47 * of those we generates 4 states, Additionally we report on USED and USED_READ. 48 */ 49#define XXX_LOCK_USAGE_STATES 2 50#define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2) 51 52/* 53 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes 54 * cached in the instance of lockdep_map 55 * 56 * Currently main class (subclass == 0) and single depth subclass 57 * are cached in lockdep_map. This optimization is mainly targeting 58 * on rq->lock. double_rq_lock() acquires this highly competitive with 59 * single depth. 60 */ 61#define NR_LOCKDEP_CACHING_CLASSES 2 62 63/* 64 * A lockdep key is associated with each lock object. For static locks we use 65 * the lock address itself as the key. Dynamically allocated lock objects can 66 * have a statically or dynamically allocated key. Dynamically allocated lock 67 * keys must be registered before being used and must be unregistered before 68 * the key memory is freed. 69 */ 70struct lockdep_subclass_key { 71 char __one_byte; 72} __attribute__ ((__packed__)); 73 74/* hash_entry is used to keep track of dynamically allocated keys. */ 75struct lock_class_key { 76 union { 77 struct hlist_node hash_entry; 78 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 79 }; 80}; 81 82extern struct lock_class_key __lockdep_no_validate__; 83 84struct lock_trace; 85 86#define LOCKSTAT_POINTS 4 87 88struct lockdep_map; 89typedef int (*lock_cmp_fn)(const struct lockdep_map *a, 90 const struct lockdep_map *b); 91typedef void (*lock_print_fn)(const struct lockdep_map *map); 92 93/* 94 * The lock-class itself. The order of the structure members matters. 95 * reinit_class() zeroes the key member and all subsequent members. 96 */ 97struct lock_class { 98 /* 99 * class-hash: 100 */ 101 struct hlist_node hash_entry; 102 103 /* 104 * Entry in all_lock_classes when in use. Entry in free_lock_classes 105 * when not in use. Instances that are being freed are on one of the 106 * zapped_classes lists. 107 */ 108 struct list_head lock_entry; 109 110 /* 111 * These fields represent a directed graph of lock dependencies, 112 * to every node we attach a list of "forward" and a list of 113 * "backward" graph nodes. 114 */ 115 struct list_head locks_after, locks_before; 116 117 const struct lockdep_subclass_key *key; 118 lock_cmp_fn cmp_fn; 119 lock_print_fn print_fn; 120 121 unsigned int subclass; 122 unsigned int dep_gen_id; 123 124 /* 125 * IRQ/softirq usage tracking bits: 126 */ 127 unsigned long usage_mask; 128 const struct lock_trace *usage_traces[LOCK_TRACE_STATES]; 129 130 /* 131 * Generation counter, when doing certain classes of graph walking, 132 * to ensure that we check one node only once: 133 */ 134 int name_version; 135 const char *name; 136 137 u8 wait_type_inner; 138 u8 wait_type_outer; 139 u8 lock_type; 140 /* u8 hole; */ 141 142#ifdef CONFIG_LOCK_STAT 143 unsigned long contention_point[LOCKSTAT_POINTS]; 144 unsigned long contending_point[LOCKSTAT_POINTS]; 145#endif 146} __no_randomize_layout; 147 148#ifdef CONFIG_LOCK_STAT 149struct lock_time { 150 s64 min; 151 s64 max; 152 s64 total; 153 unsigned long nr; 154}; 155 156enum bounce_type { 157 bounce_acquired_write, 158 bounce_acquired_read, 159 bounce_contended_write, 160 bounce_contended_read, 161 nr_bounce_types, 162 163 bounce_acquired = bounce_acquired_write, 164 bounce_contended = bounce_contended_write, 165}; 166 167struct lock_class_stats { 168 unsigned long contention_point[LOCKSTAT_POINTS]; 169 unsigned long contending_point[LOCKSTAT_POINTS]; 170 struct lock_time read_waittime; 171 struct lock_time write_waittime; 172 struct lock_time read_holdtime; 173 struct lock_time write_holdtime; 174 unsigned long bounces[nr_bounce_types]; 175}; 176 177struct lock_class_stats lock_stats(struct lock_class *class); 178void clear_lock_stats(struct lock_class *class); 179#endif 180 181/* 182 * Map the lock object (the lock instance) to the lock-class object. 183 * This is embedded into specific lock instances: 184 */ 185struct lockdep_map { 186 struct lock_class_key *key; 187 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; 188 const char *name; 189 u8 wait_type_outer; /* can be taken in this context */ 190 u8 wait_type_inner; /* presents this context */ 191 u8 lock_type; 192 /* u8 hole; */ 193#ifdef CONFIG_LOCK_STAT 194 int cpu; 195 unsigned long ip; 196#endif 197}; 198 199struct pin_cookie { unsigned int val; }; 200 201#else /* !CONFIG_LOCKDEP */ 202 203/* 204 * The class key takes no space if lockdep is disabled: 205 */ 206struct lock_class_key { }; 207 208/* 209 * The lockdep_map takes no space if lockdep is disabled: 210 */ 211struct lockdep_map { }; 212 213struct pin_cookie { }; 214 215#endif /* !LOCKDEP */ 216 217#endif /* __LINUX_LOCKDEP_TYPES_H */