Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/lockdep: Convert hash tables to hlists

Mike said:

: CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled, i.e.
: kernel with CONFIG_UBSAN_ALIGNMENT=y fails to load without even any error
: message.
:
: The problem is that ubsan callbacks use spinlocks and might be called
: before lockdep is initialized. Particularly this line in the
: reserve_ebda_region function causes problem:
:
: lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
:
: If i put lockdep_init() before reserve_ebda_region call in
: x86_64_start_reservations kernel loads well.

Fix this ordering issue permanently: change lockdep so that it uses hlists
for the hash tables. Unlike a list_head, an hlist_head is in its
initialized state when it is all-zeroes, so lockdep is ready for operation
immediately upon boot - lockdep_init() need not have run.

The patch will also save some memory.

Probably lockdep_init() and lockdep_initialized can be done away with now.

Suggested-by: Mike Krinkin <krinkin.m.u@gmail.com>
Reported-by: Mike Krinkin <krinkin.m.u@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: mm-commits@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Andrew Morton and committed by
Ingo Molnar
a63f38cc 3aa6b46c

+21 -25
+2 -2
include/linux/lockdep.h
··· 66 66 /* 67 67 * class-hash: 68 68 */ 69 - struct list_head hash_entry; 69 + struct hlist_node hash_entry; 70 70 71 71 /* 72 72 * global list of all lock-classes: ··· 199 199 u8 irq_context; 200 200 u8 depth; 201 201 u16 base; 202 - struct list_head entry; 202 + struct hlist_node entry; 203 203 u64 chain_key; 204 204 }; 205 205
+19 -23
kernel/locking/lockdep.c
··· 292 292 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 293 293 #define classhashentry(key) (classhash_table + __classhashfn((key))) 294 294 295 - static struct list_head classhash_table[CLASSHASH_SIZE]; 295 + static struct hlist_head classhash_table[CLASSHASH_SIZE]; 296 296 297 297 /* 298 298 * We put the lock dependency chains into a hash-table as well, to cache ··· 303 303 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 304 304 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 305 305 306 - static struct list_head chainhash_table[CHAINHASH_SIZE]; 306 + static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 307 307 308 308 /* 309 309 * The hash key of the lock dependency chains is a hash itself too: ··· 666 666 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 667 667 { 668 668 struct lockdep_subclass_key *key; 669 - struct list_head *hash_head; 669 + struct hlist_head *hash_head; 670 670 struct lock_class *class; 671 671 672 672 #ifdef CONFIG_DEBUG_LOCKDEP ··· 719 719 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 720 720 return NULL; 721 721 722 - list_for_each_entry_rcu(class, hash_head, hash_entry) { 722 + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 723 723 if (class->key == key) { 724 724 /* 725 725 * Huh! same key, different name? Did someone trample ··· 742 742 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 743 743 { 744 744 struct lockdep_subclass_key *key; 745 - struct list_head *hash_head; 745 + struct hlist_head *hash_head; 746 746 struct lock_class *class; 747 747 748 748 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); ··· 774 774 * We have to do the hash-walk again, to avoid races 775 775 * with another CPU: 776 776 */ 777 - list_for_each_entry_rcu(class, hash_head, hash_entry) { 777 + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 778 778 if (class->key == key) 779 779 goto out_unlock_set; 780 780 } ··· 805 805 * We use RCU's safe list-add method to make 806 806 * parallel walking of the hash-list safe: 807 807 */ 808 - list_add_tail_rcu(&class->hash_entry, hash_head); 808 + hlist_add_head_rcu(&class->hash_entry, hash_head); 809 809 /* 810 810 * Add it to the global list of classes: 811 811 */ ··· 2021 2021 u64 chain_key) 2022 2022 { 2023 2023 struct lock_class *class = hlock_class(hlock); 2024 - struct list_head *hash_head = chainhashentry(chain_key); 2024 + struct hlist_head *hash_head = chainhashentry(chain_key); 2025 2025 struct lock_chain *chain; 2026 2026 struct held_lock *hlock_curr; 2027 2027 int i, j; ··· 2037 2037 * We can walk it lock-free, because entries only get added 2038 2038 * to the hash: 2039 2039 */ 2040 - list_for_each_entry_rcu(chain, hash_head, entry) { 2040 + hlist_for_each_entry_rcu(chain, hash_head, entry) { 2041 2041 if (chain->chain_key == chain_key) { 2042 2042 cache_hit: 2043 2043 debug_atomic_inc(chain_lookup_hits); ··· 2061 2061 /* 2062 2062 * We have to walk the chain again locked - to avoid duplicates: 2063 2063 */ 2064 - list_for_each_entry(chain, hash_head, entry) { 2064 + hlist_for_each_entry(chain, hash_head, entry) { 2065 2065 if (chain->chain_key == chain_key) { 2066 2066 graph_unlock(); 2067 2067 goto cache_hit; ··· 2095 2095 } 2096 2096 chain_hlocks[chain->base + j] = class - lock_classes; 2097 2097 } 2098 - list_add_tail_rcu(&chain->entry, hash_head); 2098 + hlist_add_head_rcu(&chain->entry, hash_head); 2099 2099 debug_atomic_inc(chain_lookup_misses); 2100 2100 inc_chains(); 2101 2101 ··· 3879 3879 nr_process_chains = 0; 3880 3880 debug_locks = 1; 3881 3881 for (i = 0; i < CHAINHASH_SIZE; i++) 3882 - INIT_LIST_HEAD(chainhash_table + i); 3882 + INIT_HLIST_HEAD(chainhash_table + i); 3883 3883 raw_local_irq_restore(flags); 3884 3884 } 3885 3885 ··· 3898 3898 /* 3899 3899 * Unhash the class and remove it from the all_lock_classes list: 3900 3900 */ 3901 - list_del_rcu(&class->hash_entry); 3901 + hlist_del_rcu(&class->hash_entry); 3902 3902 list_del_rcu(&class->lock_entry); 3903 3903 3904 3904 RCU_INIT_POINTER(class->key, NULL); ··· 3921 3921 void lockdep_free_key_range(void *start, unsigned long size) 3922 3922 { 3923 3923 struct lock_class *class; 3924 - struct list_head *head; 3924 + struct hlist_head *head; 3925 3925 unsigned long flags; 3926 3926 int i; 3927 3927 int locked; ··· 3934 3934 */ 3935 3935 for (i = 0; i < CLASSHASH_SIZE; i++) { 3936 3936 head = classhash_table + i; 3937 - if (list_empty(head)) 3938 - continue; 3939 - list_for_each_entry_rcu(class, head, hash_entry) { 3937 + hlist_for_each_entry_rcu(class, head, hash_entry) { 3940 3938 if (within(class->key, start, size)) 3941 3939 zap_class(class); 3942 3940 else if (within(class->name, start, size)) ··· 3964 3966 void lockdep_reset_lock(struct lockdep_map *lock) 3965 3967 { 3966 3968 struct lock_class *class; 3967 - struct list_head *head; 3969 + struct hlist_head *head; 3968 3970 unsigned long flags; 3969 3971 int i, j; 3970 3972 int locked; ··· 3989 3991 locked = graph_lock(); 3990 3992 for (i = 0; i < CLASSHASH_SIZE; i++) { 3991 3993 head = classhash_table + i; 3992 - if (list_empty(head)) 3993 - continue; 3994 - list_for_each_entry_rcu(class, head, hash_entry) { 3994 + hlist_for_each_entry_rcu(class, head, hash_entry) { 3995 3995 int match = 0; 3996 3996 3997 3997 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) ··· 4027 4031 return; 4028 4032 4029 4033 for (i = 0; i < CLASSHASH_SIZE; i++) 4030 - INIT_LIST_HEAD(classhash_table + i); 4034 + INIT_HLIST_HEAD(classhash_table + i); 4031 4035 4032 4036 for (i = 0; i < CHAINHASH_SIZE; i++) 4033 - INIT_LIST_HEAD(chainhash_table + i); 4037 + INIT_HLIST_HEAD(chainhash_table + i); 4034 4038 4035 4039 lockdep_initialized = 1; 4036 4040 }