Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
lockdep: fix build if CONFIG_PROVE_LOCKING not defined
lockdep: use WARN() in kernel/lockdep.c
lockdep: spin_lock_nest_lock(), checkpatch fixes
lockdep: build fix

+21 -13
+2 -4
kernel/lockdep.c
··· 1759 1759 hlock = curr->held_locks + i; 1760 1760 if (chain_key != hlock->prev_chain_key) { 1761 1761 debug_locks_off(); 1762 - printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", 1762 + WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 1763 1763 curr->lockdep_depth, i, 1764 1764 (unsigned long long)chain_key, 1765 1765 (unsigned long long)hlock->prev_chain_key); 1766 - WARN_ON(1); 1767 1766 return; 1768 1767 } 1769 1768 id = hlock->class_idx - 1; ··· 1777 1778 } 1778 1779 if (chain_key != curr->curr_chain_key) { 1779 1780 debug_locks_off(); 1780 - printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", 1781 + WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 1781 1782 curr->lockdep_depth, i, 1782 1783 (unsigned long long)chain_key, 1783 1784 (unsigned long long)curr->curr_chain_key); 1784 - WARN_ON(1); 1785 1785 } 1786 1786 #endif 1787 1787 }
+13
kernel/lockdep_internals.h
··· 50 50 extern unsigned int max_lockdep_depth; 51 51 extern unsigned int max_recursion_depth; 52 52 53 + #ifdef CONFIG_PROVE_LOCKING 53 54 extern unsigned long lockdep_count_forward_deps(struct lock_class *); 54 55 extern unsigned long lockdep_count_backward_deps(struct lock_class *); 56 + #else 57 + static inline unsigned long 58 + lockdep_count_forward_deps(struct lock_class *class) 59 + { 60 + return 0; 61 + } 62 + static inline unsigned long 63 + lockdep_count_backward_deps(struct lock_class *class) 64 + { 65 + return 0; 66 + } 67 + #endif 55 68 56 69 #ifdef CONFIG_DEBUG_LOCKDEP 57 70 /*
+6 -6
kernel/lockdep_proc.c
··· 82 82 83 83 static int l_show(struct seq_file *m, void *v) 84 84 { 85 - unsigned long nr_forward_deps, nr_backward_deps; 86 85 struct lock_class *class = v; 87 86 struct lock_list *entry; 88 87 char c1, c2, c3, c4; ··· 95 96 #ifdef CONFIG_DEBUG_LOCKDEP 96 97 seq_printf(m, " OPS:%8ld", class->ops); 97 98 #endif 98 - nr_forward_deps = lockdep_count_forward_deps(class); 99 - seq_printf(m, " FD:%5ld", nr_forward_deps); 100 - 101 - nr_backward_deps = lockdep_count_backward_deps(class); 102 - seq_printf(m, " BD:%5ld", nr_backward_deps); 99 + #ifdef CONFIG_PROVE_LOCKING 100 + seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); 101 + seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); 102 + #endif 103 103 104 104 get_usage_chars(class, &c1, &c2, &c3, &c4); 105 105 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); ··· 323 325 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 324 326 nr_hardirq_read_unsafe++; 325 327 328 + #ifdef CONFIG_PROVE_LOCKING 326 329 sum_forward_deps += lockdep_count_forward_deps(class); 330 + #endif 327 331 } 328 332 #ifdef CONFIG_DEBUG_LOCKDEP 329 333 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
-3
kernel/spinlock.c
··· 290 290 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 291 291 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 292 292 } 293 - 294 293 EXPORT_SYMBOL(_spin_lock_nested); 295 294 296 295 unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) ··· 311 312 #endif 312 313 return flags; 313 314 } 314 - 315 315 EXPORT_SYMBOL(_spin_lock_irqsave_nested); 316 316 317 317 void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, ··· 320 322 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); 321 323 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 322 324 } 323 - 324 325 EXPORT_SYMBOL(_spin_lock_nest_lock); 325 326 326 327 #endif