Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/lockdep: Eliminate lockdep_init()

Lockdep is initialized at compile time now. Get rid of lockdep_init().

Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Krinkin <krinkin.m.u@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: mm-commits@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Andrey Ryabinin and committed by
Ingo Molnar
06bea3db a63f38cc

-98
-2
arch/c6x/kernel/setup.c
··· 281 281 */ 282 282 set_ist(_vectors_start); 283 283 284 - lockdep_init(); 285 - 286 284 /* 287 285 * dtb is passed in from bootloader. 288 286 * fdt is linked in blob.
-2
arch/microblaze/kernel/setup.c
··· 130 130 memset(__bss_start, 0, __bss_stop-__bss_start); 131 131 memset(_ssbss, 0, _esbss-_ssbss); 132 132 133 - lockdep_init(); 134 - 135 133 /* initialize device tree for usage in early_printk */ 136 134 early_init_devtree(_fdt_start); 137 135
-2
arch/powerpc/kernel/setup_32.c
··· 114 114 115 115 notrace void __init machine_init(u64 dt_ptr) 116 116 { 117 - lockdep_init(); 118 - 119 117 /* Enable early debugging if any specified (see udbg.h) */ 120 118 udbg_early_init(); 121 119
-3
arch/powerpc/kernel/setup_64.c
··· 255 255 setup_paca(&boot_paca); 256 256 fixup_boot_paca(); 257 257 258 - /* Initialize lockdep early or else spinlocks will blow */ 259 - lockdep_init(); 260 - 261 258 /* -------- printk is now safe to use ------- */ 262 259 263 260 /* Enable early debugging if any specified (see udbg.h) */
-1
arch/s390/kernel/early.c
··· 448 448 rescue_initrd(); 449 449 clear_bss_section(); 450 450 init_kernel_storage_key(); 451 - lockdep_init(); 452 451 lockdep_off(); 453 452 setup_lowcore_early(); 454 453 setup_facility_list();
-8
arch/sparc/kernel/head_64.S
··· 696 696 call __bzero 697 697 sub %o1, %o0, %o1 698 698 699 - #ifdef CONFIG_LOCKDEP 700 - /* We have this call this super early, as even prom_init can grab 701 - * spinlocks and thus call into the lockdep code. 702 - */ 703 - call lockdep_init 704 - nop 705 - #endif 706 - 707 699 call prom_init 708 700 mov %l7, %o0 ! OpenPROM cif handler 709 701
-6
arch/x86/lguest/boot.c
··· 1520 1520 */ 1521 1521 reserve_top_address(lguest_data.reserve_mem); 1522 1522 1523 - /* 1524 - * If we don't initialize the lock dependency checker now, it crashes 1525 - * atomic_notifier_chain_register, then paravirt_disable_iospace. 1526 - */ 1527 - lockdep_init(); 1528 - 1529 1523 /* Hook in our special panic hypercall code. */ 1530 1524 atomic_notifier_chain_register(&panic_notifier_list, &paniced); 1531 1525
-2
include/linux/lockdep.h
··· 261 261 /* 262 262 * Initialization, self-test and debugging-output methods: 263 263 */ 264 - extern void lockdep_init(void); 265 264 extern void lockdep_info(void); 266 265 extern void lockdep_reset(void); 267 266 extern void lockdep_reset_lock(struct lockdep_map *lock); ··· 391 392 # define lockdep_set_current_reclaim_state(g) do { } while (0) 392 393 # define lockdep_clear_current_reclaim_state() do { } while (0) 393 394 # define lockdep_trace_alloc(g) do { } while (0) 394 - # define lockdep_init() do { } while (0) 395 395 # define lockdep_info() do { } while (0) 396 396 # define lockdep_init_map(lock, name, key, sub) \ 397 397 do { (void)(name); (void)(key); } while (0)
-5
init/main.c
··· 499 499 char *command_line; 500 500 char *after_dashes; 501 501 502 - /* 503 - * Need to run as early as possible, to initialize the 504 - * lockdep hash: 505 - */ 506 - lockdep_init(); 507 502 set_task_stack_end_magic(&init_task); 508 503 smp_setup_processor_id(); 509 504 debug_objects_early_init();
-59
kernel/locking/lockdep.c
··· 123 123 return ret; 124 124 } 125 125 126 - static int lockdep_initialized; 127 - 128 126 unsigned long nr_list_entries; 129 127 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 130 128 ··· 432 434 433 435 #ifdef CONFIG_DEBUG_LOCKDEP 434 436 /* 435 - * We cannot printk in early bootup code. Not even early_printk() 436 - * might work. So we mark any initialization errors and printk 437 - * about it later on, in lockdep_info(). 438 - */ 439 - static int lockdep_init_error; 440 - static const char *lock_init_error; 441 - static unsigned long lockdep_init_trace_data[20]; 442 - static struct stack_trace lockdep_init_trace = { 443 - .max_entries = ARRAY_SIZE(lockdep_init_trace_data), 444 - .entries = lockdep_init_trace_data, 445 - }; 446 - 447 - /* 448 437 * Various lockdep statistics: 449 438 */ 450 439 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); ··· 653 668 struct lockdep_subclass_key *key; 654 669 struct hlist_head *hash_head; 655 670 struct lock_class *class; 656 - 657 - #ifdef CONFIG_DEBUG_LOCKDEP 658 - /* 659 - * If the architecture calls into lockdep before initializing 660 - * the hashes then we'll warn about it later. (we cannot printk 661 - * right now) 662 - */ 663 - if (unlikely(!lockdep_initialized)) { 664 - lockdep_init(); 665 - lockdep_init_error = 1; 666 - lock_init_error = lock->name; 667 - save_stack_trace(&lockdep_init_trace); 668 - } 669 - #endif 670 671 671 672 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 672 673 debug_locks_off(); ··· 3984 4013 raw_local_irq_restore(flags); 3985 4014 } 3986 4015 3987 - void lockdep_init(void) 3988 - { 3989 - int i; 3990 - 3991 - /* 3992 - * Some architectures have their own start_kernel() 3993 - * code which calls lockdep_init(), while we also 3994 - * call lockdep_init() from the start_kernel() itself, 3995 - * and we want to initialize the hashes only once: 3996 - */ 3997 - if (lockdep_initialized) 3998 - return; 3999 - 4000 - for (i = 0; i < CLASSHASH_SIZE; i++) 4001 - INIT_HLIST_HEAD(classhash_table + i); 4002 - 4003 - for (i = 0; i < CHAINHASH_SIZE; i++) 4004 - INIT_HLIST_HEAD(chainhash_table + i); 4005 - 4006 - lockdep_initialized = 1; 4007 - } 4008 - 4009 4016 void __init lockdep_info(void) 4010 4017 { 4011 4018 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); ··· 4010 4061 4011 4062 printk(" per task-struct memory footprint: %lu bytes\n", 4012 4063 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 4013 - 4014 - #ifdef CONFIG_DEBUG_LOCKDEP 4015 - if (lockdep_init_error) { 4016 - printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error); 4017 - printk("Call stack leading to lockdep invocation was:\n"); 4018 - print_stack_trace(&lockdep_init_trace, 0); 4019 - } 4020 - #endif 4021 4064 } 4022 4065 4023 4066 static void
-5
tools/lib/lockdep/common.c
··· 11 11 bool debug_locks = true; 12 12 bool debug_locks_silent; 13 13 14 - __attribute__((constructor)) static void liblockdep_init(void) 15 - { 16 - lockdep_init(); 17 - } 18 - 19 14 __attribute__((destructor)) static void liblockdep_exit(void) 20 15 { 21 16 debug_check_no_locks_held();
-1
tools/lib/lockdep/include/liblockdep/common.h
··· 44 44 void lock_release(struct lockdep_map *lock, int nested, 45 45 unsigned long ip); 46 46 extern void debug_check_no_locks_freed(const void *from, unsigned long len); 47 - extern void lockdep_init(void); 48 47 49 48 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 50 49 { .name = (_name), .key = (void *)(_key), }
-2
tools/lib/lockdep/preload.c
··· 439 439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); 440 440 #endif 441 441 442 - lockdep_init(); 443 - 444 442 __init_state = done; 445 443 }