Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] lockdep: locking init debugging improvement

Locking init improvement:

- introduce and use __SPIN_LOCK_UNLOCKED for array initializations,
to pass in the name string of locks, used by debugging

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ingo Molnar and committed by
Linus Torvalds
e4d91918 9cebb552

+37 -26
+3 -3
drivers/char/random.c
··· 416 416 .poolinfo = &poolinfo_table[0], 417 417 .name = "input", 418 418 .limit = 1, 419 - .lock = SPIN_LOCK_UNLOCKED, 419 + .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock), 420 420 .pool = input_pool_data 421 421 }; 422 422 ··· 425 425 .name = "blocking", 426 426 .limit = 1, 427 427 .pull = &input_pool, 428 - .lock = SPIN_LOCK_UNLOCKED, 428 + .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock), 429 429 .pool = blocking_pool_data 430 430 }; 431 431 ··· 433 433 .poolinfo = &poolinfo_table[1], 434 434 .name = "nonblocking", 435 435 .pull = &input_pool, 436 - .lock = SPIN_LOCK_UNLOCKED, 436 + .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock), 437 437 .pool = nonblocking_pool_data 438 438 }; 439 439
+1 -1
fs/dcache.c
··· 38 38 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); 39 39 40 40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); 41 - static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; 41 + static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); 42 42 43 43 EXPORT_SYMBOL(dcache_lock); 44 44
+1 -1
include/linux/idr.h
··· 66 66 .id_free = NULL, \ 67 67 .layers = 0, \ 68 68 .id_free_cnt = 0, \ 69 - .lock = SPIN_LOCK_UNLOCKED, \ 69 + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 70 70 } 71 71 #define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 72 72
+5 -5
include/linux/init_task.h
··· 21 21 .count = ATOMIC_INIT(1), \ 22 22 .fdt = &init_files.fdtab, \ 23 23 .fdtab = INIT_FDTABLE, \ 24 - .file_lock = SPIN_LOCK_UNLOCKED, \ 24 + .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), \ 25 25 .next_fd = 0, \ 26 26 .close_on_exec_init = { { 0, } }, \ 27 27 .open_fds_init = { { 0, } }, \ ··· 36 36 .user_id = 0, \ 37 37 .next = NULL, \ 38 38 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ 39 - .ctx_lock = SPIN_LOCK_UNLOCKED, \ 39 + .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ 40 40 .reqs_active = 0U, \ 41 41 .max_reqs = ~0U, \ 42 42 } ··· 48 48 .mm_users = ATOMIC_INIT(2), \ 49 49 .mm_count = ATOMIC_INIT(1), \ 50 50 .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ 51 - .page_table_lock = SPIN_LOCK_UNLOCKED, \ 51 + .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ 52 52 .mmlist = LIST_HEAD_INIT(name.mmlist), \ 53 53 .cpu_vm_mask = CPU_MASK_ALL, \ 54 54 } ··· 69 69 #define INIT_SIGHAND(sighand) { \ 70 70 .count = ATOMIC_INIT(1), \ 71 71 .action = { { { .sa_handler = NULL, } }, }, \ 72 - .siglock = SPIN_LOCK_UNLOCKED, \ 72 + .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \ 73 73 } 74 74 75 75 extern struct group_info init_groups; ··· 119 119 .list = LIST_HEAD_INIT(tsk.pending.list), \ 120 120 .signal = {{0}}}, \ 121 121 .blocked = {{0}}, \ 122 - .alloc_lock = SPIN_LOCK_UNLOCKED, \ 122 + .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 123 123 .journal_info = NULL, \ 124 124 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 125 125 .fs_excl = ATOMIC_INIT(0), \
+1 -1
include/linux/notifier.h
··· 65 65 } while (0) 66 66 67 67 #define ATOMIC_NOTIFIER_INIT(name) { \ 68 - .lock = SPIN_LOCK_UNLOCKED, \ 68 + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 69 69 .head = NULL } 70 70 #define BLOCKING_NOTIFIER_INIT(name) { \ 71 71 .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
+10 -2
include/linux/seqlock.h
··· 38 38 * These macros triggered gcc-3.x compile-time problems. We think these are 39 39 * OK now. Be cautious. 40 40 */ 41 - #define SEQLOCK_UNLOCKED { 0, SPIN_LOCK_UNLOCKED } 42 - #define seqlock_init(x) do { *(x) = (seqlock_t) SEQLOCK_UNLOCKED; } while (0) 41 + #define __SEQLOCK_UNLOCKED(lockname) \ 42 + { 0, __SPIN_LOCK_UNLOCKED(lockname) } 43 43 44 + #define SEQLOCK_UNLOCKED \ 45 + __SEQLOCK_UNLOCKED(old_style_seqlock_init) 46 + 47 + #define seqlock_init(x) \ 48 + do { *(x) = (seqlock_t) __SEQLOCK_UNLOCKED(x); } while (0) 49 + 50 + #define DEFINE_SEQLOCK(x) \ 51 + seqlock_t x = __SEQLOCK_UNLOCKED(x) 44 52 45 53 /* Lock out other writers and update the count. 46 54 * Acts like a normal spin_lock/unlock.
+9 -6
include/linux/spinlock_types.h
··· 44 44 #define SPINLOCK_OWNER_INIT ((void *)-1L) 45 45 46 46 #ifdef CONFIG_DEBUG_SPINLOCK 47 - # define SPIN_LOCK_UNLOCKED \ 47 + # define __SPIN_LOCK_UNLOCKED(lockname) \ 48 48 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 49 49 .magic = SPINLOCK_MAGIC, \ 50 50 .owner = SPINLOCK_OWNER_INIT, \ 51 51 .owner_cpu = -1 } 52 - #define RW_LOCK_UNLOCKED \ 52 + #define __RW_LOCK_UNLOCKED(lockname) \ 53 53 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 54 54 .magic = RWLOCK_MAGIC, \ 55 55 .owner = SPINLOCK_OWNER_INIT, \ 56 56 .owner_cpu = -1 } 57 57 #else 58 - # define SPIN_LOCK_UNLOCKED \ 58 + # define __SPIN_LOCK_UNLOCKED(lockname) \ 59 59 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } 60 - #define RW_LOCK_UNLOCKED \ 60 + #define __RW_LOCK_UNLOCKED(lockname) \ 61 61 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } 62 62 #endif 63 63 64 - #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED 65 - #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED 64 + #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 65 + #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) 66 + 67 + #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 68 + #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 66 69 67 70 #endif /* __LINUX_SPINLOCK_TYPES_H */
+1 -1
include/linux/wait.h
··· 68 68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 69 69 70 70 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 71 - .lock = SPIN_LOCK_UNLOCKED, \ 71 + .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 72 72 .task_list = { &(name).task_list, &(name).task_list } } 73 73 74 74 #define DECLARE_WAIT_QUEUE_HEAD(name) \
+2 -2
kernel/rcupdate.c
··· 53 53 static struct rcu_ctrlblk rcu_ctrlblk = { 54 54 .cur = -300, 55 55 .completed = -300, 56 - .lock = SPIN_LOCK_UNLOCKED, 56 + .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 57 57 .cpumask = CPU_MASK_NONE, 58 58 }; 59 59 static struct rcu_ctrlblk rcu_bh_ctrlblk = { 60 60 .cur = -300, 61 61 .completed = -300, 62 - .lock = SPIN_LOCK_UNLOCKED, 62 + .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 63 63 .cpumask = CPU_MASK_NONE, 64 64 }; 65 65
+1 -1
kernel/timer.c
··· 1208 1208 * playing with xtime and avenrun. 1209 1209 */ 1210 1210 #ifndef ARCH_HAVE_XTIME_LOCK 1211 - seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; 1211 + __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); 1212 1212 1213 1213 EXPORT_SYMBOL(xtime_lock); 1214 1214 #endif
+1 -1
mm/swap_state.c
··· 38 38 39 39 struct address_space swapper_space = { 40 40 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 41 - .tree_lock = RW_LOCK_UNLOCKED, 41 + .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock), 42 42 .a_ops = &swap_aops, 43 43 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), 44 44 .backing_dev_info = &swap_backing_dev_info,
+1 -1
net/ipv4/tcp_ipv4.c
··· 90 90 void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 91 91 92 92 struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { 93 - .lhash_lock = RW_LOCK_UNLOCKED, 93 + .lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock), 94 94 .lhash_users = ATOMIC_INIT(0), 95 95 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), 96 96 };
+1 -1
net/ipv4/tcp_minisocks.c
··· 40 40 struct inet_timewait_death_row tcp_death_row = { 41 41 .sysctl_max_tw_buckets = NR_FILE * 2, 42 42 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, 43 - .death_lock = SPIN_LOCK_UNLOCKED, 43 + .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), 44 44 .hashinfo = &tcp_hashinfo, 45 45 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, 46 46 (unsigned long)&tcp_death_row),