Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kernel/locking/lockdep.c: convert hash tables to hlists

Mike said:

: CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled, i. e
: kernel with CONFIG_UBSAN_ALIGNMENT fails to load without even any error
: message.
:
: The problem is that ubsan callbacks use spinlocks and might be called
: before lockdep is initialized. Particularly this line in the
: reserve_ebda_region function causes problem:
:
: lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
:
: If i put lockdep_init() before reserve_ebda_region call in
: x86_64_start_reservations kernel loads well.

Fix this ordering issue permanently: change lockdep so that it uses
hlists for the hash tables. Unlike a list_head, an hlist_head is in its
initialized state when it is all-zeroes, so lockdep is ready for
operation immediately upon boot - lockdep_init() need not have run.

The patch will also save some memory.

lockdep_init() and lockdep_initialized can be done away with now - a 4.6
patch has been prepared to do this.

Reported-by: Mike Krinkin <krinkin.m.u@gmail.com>
Suggested-by: Mike Krinkin <krinkin.m.u@gmail.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andrew Morton and committed by
Linus Torvalds
4a389810 6b75d149

+21 -25
+2 -2
include/linux/lockdep.h
··· 66 66 /* 67 67 * class-hash: 68 68 */ 69 - struct list_head hash_entry; 69 + struct hlist_node hash_entry; 70 70 71 71 /* 72 72 * global list of all lock-classes: ··· 199 199 u8 irq_context; 200 200 u8 depth; 201 201 u16 base; 202 - struct list_head entry; 202 + struct hlist_node entry; 203 203 u64 chain_key; 204 204 }; 205 205
+19 -23
kernel/locking/lockdep.c
··· 292 292 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 293 293 #define classhashentry(key) (classhash_table + __classhashfn((key))) 294 294 295 - static struct list_head classhash_table[CLASSHASH_SIZE]; 295 + static struct hlist_head classhash_table[CLASSHASH_SIZE]; 296 296 297 297 /* 298 298 * We put the lock dependency chains into a hash-table as well, to cache ··· 303 303 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 304 304 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 305 305 306 - static struct list_head chainhash_table[CHAINHASH_SIZE]; 306 + static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 307 307 308 308 /* 309 309 * The hash key of the lock dependency chains is a hash itself too: ··· 666 666 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 667 667 { 668 668 struct lockdep_subclass_key *key; 669 - struct list_head *hash_head; 669 + struct hlist_head *hash_head; 670 670 struct lock_class *class; 671 671 672 672 #ifdef CONFIG_DEBUG_LOCKDEP ··· 719 719 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 720 720 return NULL; 721 721 722 - list_for_each_entry_rcu(class, hash_head, hash_entry) { 722 + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 723 723 if (class->key == key) { 724 724 /* 725 725 * Huh! same key, different name? Did someone trample ··· 742 742 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 743 743 { 744 744 struct lockdep_subclass_key *key; 745 - struct list_head *hash_head; 745 + struct hlist_head *hash_head; 746 746 struct lock_class *class; 747 747 748 748 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); ··· 774 774 * We have to do the hash-walk again, to avoid races 775 775 * with another CPU: 776 776 */ 777 - list_for_each_entry_rcu(class, hash_head, hash_entry) { 777 + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 778 778 if (class->key == key) 779 779 goto out_unlock_set; 780 780 } ··· 805 805 * We use RCU's safe list-add method to make 806 806 * parallel walking of the hash-list safe: 807 807 */ 808 - list_add_tail_rcu(&class->hash_entry, hash_head); 808 + hlist_add_head_rcu(&class->hash_entry, hash_head); 809 809 /* 810 810 * Add it to the global list of classes: 811 811 */ ··· 2017 2017 u64 chain_key) 2018 2018 { 2019 2019 struct lock_class *class = hlock_class(hlock); 2020 - struct list_head *hash_head = chainhashentry(chain_key); 2020 + struct hlist_head *hash_head = chainhashentry(chain_key); 2021 2021 struct lock_chain *chain; 2022 2022 struct held_lock *hlock_curr; 2023 2023 int i, j; ··· 2033 2033 * We can walk it lock-free, because entries only get added 2034 2034 * to the hash: 2035 2035 */ 2036 - list_for_each_entry_rcu(chain, hash_head, entry) { 2036 + hlist_for_each_entry_rcu(chain, hash_head, entry) { 2037 2037 if (chain->chain_key == chain_key) { 2038 2038 cache_hit: 2039 2039 debug_atomic_inc(chain_lookup_hits); ··· 2057 2057 /* 2058 2058 * We have to walk the chain again locked - to avoid duplicates: 2059 2059 */ 2060 - list_for_each_entry(chain, hash_head, entry) { 2060 + hlist_for_each_entry(chain, hash_head, entry) { 2061 2061 if (chain->chain_key == chain_key) { 2062 2062 graph_unlock(); 2063 2063 goto cache_hit; ··· 2091 2091 } 2092 2092 chain_hlocks[chain->base + j] = class - lock_classes; 2093 2093 } 2094 - list_add_tail_rcu(&chain->entry, hash_head); 2094 + hlist_add_head_rcu(&chain->entry, hash_head); 2095 2095 debug_atomic_inc(chain_lookup_misses); 2096 2096 inc_chains(); 2097 2097 ··· 3875 3875 nr_process_chains = 0; 3876 3876 debug_locks = 1; 3877 3877 for (i = 0; i < CHAINHASH_SIZE; i++) 3878 - INIT_LIST_HEAD(chainhash_table + i); 3878 + INIT_HLIST_HEAD(chainhash_table + i); 3879 3879 raw_local_irq_restore(flags); 3880 3880 } 3881 3881 ··· 3894 3894 /* 3895 3895 * Unhash the class and remove it from the all_lock_classes list: 3896 3896 */ 3897 - list_del_rcu(&class->hash_entry); 3897 + hlist_del_rcu(&class->hash_entry); 3898 3898 list_del_rcu(&class->lock_entry); 3899 3899 3900 3900 RCU_INIT_POINTER(class->key, NULL); ··· 3917 3917 void lockdep_free_key_range(void *start, unsigned long size) 3918 3918 { 3919 3919 struct lock_class *class; 3920 - struct list_head *head; 3920 + struct hlist_head *head; 3921 3921 unsigned long flags; 3922 3922 int i; 3923 3923 int locked; ··· 3930 3930 */ 3931 3931 for (i = 0; i < CLASSHASH_SIZE; i++) { 3932 3932 head = classhash_table + i; 3933 - if (list_empty(head)) 3934 - continue; 3935 - list_for_each_entry_rcu(class, head, hash_entry) { 3933 + hlist_for_each_entry_rcu(class, head, hash_entry) { 3936 3934 if (within(class->key, start, size)) 3937 3935 zap_class(class); 3938 3936 else if (within(class->name, start, size)) ··· 3960 3962 void lockdep_reset_lock(struct lockdep_map *lock) 3961 3963 { 3962 3964 struct lock_class *class; 3963 - struct list_head *head; 3965 + struct hlist_head *head; 3964 3966 unsigned long flags; 3965 3967 int i, j; 3966 3968 int locked; ··· 3985 3987 locked = graph_lock(); 3986 3988 for (i = 0; i < CLASSHASH_SIZE; i++) { 3987 3989 head = classhash_table + i; 3988 - if (list_empty(head)) 3989 - continue; 3990 - list_for_each_entry_rcu(class, head, hash_entry) { 3990 + hlist_for_each_entry_rcu(class, head, hash_entry) { 3991 3991 int match = 0; 3992 3992 3993 3993 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) ··· 4023 4027 return; 4024 4028 4025 4029 for (i = 0; i < CLASSHASH_SIZE; i++) 4026 - INIT_LIST_HEAD(classhash_table + i); 4030 + INIT_HLIST_HEAD(classhash_table + i); 4027 4031 4028 4032 for (i = 0; i < CHAINHASH_SIZE; i++) 4029 - INIT_LIST_HEAD(chainhash_table + i); 4033 + INIT_HLIST_HEAD(chainhash_table + i); 4030 4034 4031 4035 lockdep_initialized = 1; 4032 4036 }