Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/lockdep: Add support for dynamic keys

A shortcoming of the current lockdep implementation is that it requires
lock keys to be allocated statically. That forces all instances of lock
objects that occur in a given data structure to share a lock key. Since
lock dependency analysis groups lock objects per key sharing lock keys
can cause false positive lockdep reports. Make it possible to avoid
such false positive reports by allowing lock keys to be allocated
dynamically. Require that dynamically allocated lock keys are
registered before use by calling lockdep_register_key(). Complain about
attempts to register the same lock key pointer twice without calling
lockdep_unregister_key() between successive registration calls.

The purpose of the new lock_keys_hash[] data structure that keeps
track of all dynamic keys is twofold:

- Verify whether the lockdep_register_key() and lockdep_unregister_key()
functions are used correctly.

- Avoid that lockdep_init_map() complains when encountering a dynamically
allocated key.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: johannes.berg@intel.com
Cc: tj@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-19-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Bart Van Assche and committed by
Ingo Molnar
108c1485 4bf50862

+131 -11
+18 -3
include/linux/lockdep.h
··· 46 46 #define NR_LOCKDEP_CACHING_CLASSES 2 47 47 48 48 /* 49 - * Lock-classes are keyed via unique addresses, by embedding the 50 - * lockclass-key into the kernel (or module) .data section. (For 51 - * static locks we use the lock address itself as the key.) 49 + * A lockdep key is associated with each lock object. For static locks we use 50 + * the lock address itself as the key. Dynamically allocated lock objects can 51 + * have a statically or dynamically allocated key. Dynamically allocated lock 52 + * keys must be registered before being used and must be unregistered before 53 + * the key memory is freed. 52 54 */ 53 55 struct lockdep_subclass_key { 54 56 char __one_byte; 55 57 } __attribute__ ((__packed__)); 56 58 59 + /* hash_entry is used to keep track of dynamically allocated keys. */ 57 60 struct lock_class_key { 61 + struct hlist_node hash_entry; 58 62 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 59 63 }; 60 64 ··· 277 273 extern void lockdep_off(void); 278 274 extern void lockdep_on(void); 279 275 276 + extern void lockdep_register_key(struct lock_class_key *key); 277 + extern void lockdep_unregister_key(struct lock_class_key *key); 278 + 280 279 /* 281 280 * These methods are used by specific locking variants (spinlocks, 282 281 * rwlocks, mutexes and rwsems) to pass init/acquire/release events ··· 440 433 * The class key takes no space if lockdep is disabled: 441 434 */ 442 435 struct lock_class_key { }; 436 + 437 + static inline void lockdep_register_key(struct lock_class_key *key) 438 + { 439 + } 440 + 441 + static inline void lockdep_unregister_key(struct lock_class_key *key) 442 + { 443 + } 443 444 444 445 /* 445 446 * The lockdep_map takes no space if lockdep is disabled:
+113 -8
kernel/locking/lockdep.c
··· 143 143 * nr_lock_classes is the number of elements of lock_classes[] that is 144 144 * in use. 145 145 */ 146 + #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 147 + #define KEYHASH_SIZE (1UL << KEYHASH_BITS) 148 + static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; 146 149 unsigned long nr_lock_classes; 147 150 #ifndef CONFIG_DEBUG_LOCKDEP 148 151 static ··· 644 641 * Is this the address of a static object: 645 642 */ 646 643 #ifdef __KERNEL__ 647 - static int static_obj(void *obj) 644 + static int static_obj(const void *obj) 648 645 { 649 646 unsigned long start = (unsigned long) &_stext, 650 647 end = (unsigned long) &_end, ··· 978 975 } 979 976 } 980 977 978 + static inline struct hlist_head *keyhashentry(const struct lock_class_key *key) 979 + { 980 + unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS); 981 + 982 + return lock_keys_hash + hash; 983 + } 984 + 985 + /* Register a dynamically allocated key. */ 986 + void lockdep_register_key(struct lock_class_key *key) 987 + { 988 + struct hlist_head *hash_head; 989 + struct lock_class_key *k; 990 + unsigned long flags; 991 + 992 + if (WARN_ON_ONCE(static_obj(key))) 993 + return; 994 + hash_head = keyhashentry(key); 995 + 996 + raw_local_irq_save(flags); 997 + if (!graph_lock()) 998 + goto restore_irqs; 999 + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1000 + if (WARN_ON_ONCE(k == key)) 1001 + goto out_unlock; 1002 + } 1003 + hlist_add_head_rcu(&key->hash_entry, hash_head); 1004 + out_unlock: 1005 + graph_unlock(); 1006 + restore_irqs: 1007 + raw_local_irq_restore(flags); 1008 + } 1009 + EXPORT_SYMBOL_GPL(lockdep_register_key); 1010 + 1011 + /* Check whether a key has been registered as a dynamic key. */ 1012 + static bool is_dynamic_key(const struct lock_class_key *key) 1013 + { 1014 + struct hlist_head *hash_head; 1015 + struct lock_class_key *k; 1016 + bool found = false; 1017 + 1018 + if (WARN_ON_ONCE(static_obj(key))) 1019 + return false; 1020 + 1021 + /* 1022 + * If lock debugging is disabled lock_keys_hash[] may contain 1023 + * pointers to memory that has already been freed. Avoid triggering 1024 + * a use-after-free in that case by returning early. 1025 + */ 1026 + if (!debug_locks) 1027 + return true; 1028 + 1029 + hash_head = keyhashentry(key); 1030 + 1031 + rcu_read_lock(); 1032 + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1033 + if (k == key) { 1034 + found = true; 1035 + break; 1036 + } 1037 + } 1038 + rcu_read_unlock(); 1039 + 1040 + return found; 1041 + } 1042 + 981 1043 /* 982 1044 * Register a lock's class in the hash-table, if the class is not present 983 1045 * yet. Otherwise we look it up. We cache the result in the lock object ··· 1064 996 if (!lock->key) { 1065 997 if (!assign_lock_key(lock)) 1066 998 return NULL; 1067 - } else if (!static_obj(lock->key)) { 999 + } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { 1068 1000 return NULL; 1069 1001 } 1070 1002 ··· 3446 3378 if (DEBUG_LOCKS_WARN_ON(!key)) 3447 3379 return; 3448 3380 /* 3449 - * Sanity check, the lock-class key must be persistent: 3381 + * Sanity check, the lock-class key must either have been allocated 3382 + * statically or must have been registered as a dynamic key. 3450 3383 */ 3451 - if (!static_obj(key)) { 3452 - printk("BUG: key %px not in .data!\n", key); 3453 - /* 3454 - * What it says above ^^^^^, I suggest you read it. 3455 - */ 3384 + if (!static_obj(key) && !is_dynamic_key(key)) { 3385 + if (debug_locks) 3386 + printk(KERN_ERR "BUG: key %px has not been registered!\n", key); 3456 3387 DEBUG_LOCKS_WARN_ON(1); 3457 3388 return; 3458 3389 } ··· 4861 4794 else 4862 4795 lockdep_reset_lock_reg(lock); 4863 4796 } 4797 + 4798 + /* Unregister a dynamically allocated key. */ 4799 + void lockdep_unregister_key(struct lock_class_key *key) 4800 + { 4801 + struct hlist_head *hash_head = keyhashentry(key); 4802 + struct lock_class_key *k; 4803 + struct pending_free *pf; 4804 + unsigned long flags; 4805 + bool found = false; 4806 + 4807 + might_sleep(); 4808 + 4809 + if (WARN_ON_ONCE(static_obj(key))) 4810 + return; 4811 + 4812 + raw_local_irq_save(flags); 4813 + if (!graph_lock()) 4814 + goto out_irq; 4815 + 4816 + pf = get_pending_free(); 4817 + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 4818 + if (k == key) { 4819 + hlist_del_rcu(&k->hash_entry); 4820 + found = true; 4821 + break; 4822 + } 4823 + } 4824 + WARN_ON_ONCE(!found); 4825 + __lockdep_free_key_range(pf, key, 1); 4826 + call_rcu_zapped(pf); 4827 + graph_unlock(); 4828 + out_irq: 4829 + raw_local_irq_restore(flags); 4830 + 4831 + /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ 4832 + synchronize_rcu(); 4833 + } 4834 + EXPORT_SYMBOL_GPL(lockdep_unregister_key); 4864 4835 4865 4836 void __init lockdep_init(void) 4866 4837 {