Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] lockdep: prove spinlock rwlock locking correctness

Use the lock validator framework to prove spinlock and rwlock locking
correctness.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ingo Molnar and committed by
Linus Torvalds
8a25d5de 4ea2176d

+217 -34
+7
include/asm-i386/spinlock.h
··· 68 68 "=m" (lock->slock) : : "memory"); 69 69 } 70 70 71 + /* 72 + * It is easier for the lock validator if interrupts are not re-enabled 73 + * in the middle of a lock-acquire. This is a performance feature anyway 74 + * so we turn it off: 75 + */ 76 + #ifndef CONFIG_PROVE_LOCKING 71 77 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 72 78 { 73 79 alternative_smp( ··· 81 75 __raw_spin_lock_string_up, 82 76 "=m" (lock->slock) : "r" (flags) : "memory"); 83 77 } 78 + #endif 84 79 85 80 static inline int __raw_spin_trylock(raw_spinlock_t *lock) 86 81 {
+46 -17
include/linux/spinlock.h
··· 82 82 /* 83 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 84 84 */ 85 - #if defined(CONFIG_SMP) 85 + #ifdef CONFIG_SMP 86 86 # include <asm/spinlock.h> 87 87 #else 88 88 # include <linux/spinlock_up.h> 89 89 #endif 90 90 91 - #define spin_lock_init(lock) do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 92 - #define rwlock_init(lock) do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 91 + #ifdef CONFIG_DEBUG_SPINLOCK 92 + extern void __spin_lock_init(spinlock_t *lock, const char *name, 93 + struct lock_class_key *key); 94 + # define spin_lock_init(lock) \ 95 + do { \ 96 + static struct lock_class_key __key; \ 97 + \ 98 + __spin_lock_init((lock), #lock, &__key); \ 99 + } while (0) 100 + 101 + #else 102 + # define spin_lock_init(lock) \ 103 + do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 104 + #endif 105 + 106 + #ifdef CONFIG_DEBUG_SPINLOCK 107 + extern void __rwlock_init(rwlock_t *lock, const char *name, 108 + struct lock_class_key *key); 109 + # define rwlock_init(lock) \ 110 + do { \ 111 + static struct lock_class_key __key; \ 112 + \ 113 + __rwlock_init((lock), #lock, &__key); \ 114 + } while (0) 115 + #else 116 + # define rwlock_init(lock) \ 117 + do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 118 + #endif 93 119 94 120 #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 95 121 ··· 139 113 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 140 114 extern int _raw_spin_trylock(spinlock_t *lock); 141 115 extern void _raw_spin_unlock(spinlock_t *lock); 142 - 143 116 extern void _raw_read_lock(rwlock_t *lock); 144 117 extern int _raw_read_trylock(rwlock_t *lock); 145 118 extern void _raw_read_unlock(rwlock_t *lock); ··· 146 121 extern int _raw_write_trylock(rwlock_t *lock); 147 122 extern void _raw_write_unlock(rwlock_t *lock); 148 123 #else 149 - # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 150 - # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 151 124 # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 152 125 # define _raw_spin_lock_flags(lock, flags) \ 153 126 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 127 + # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 128 + # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 154 129 # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 155 - # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 156 - # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 157 - # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 158 130 # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 131 + # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 132 + # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 159 133 # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 134 + # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 160 135 #endif 161 136 162 137 #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) ··· 172 147 #define write_trylock(lock) __cond_lock(_write_trylock(lock)) 173 148 174 149 #define spin_lock(lock) _spin_lock(lock) 150 + 151 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 152 + # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 153 + #else 154 + # define spin_lock_nested(lock, subclass) _spin_lock(lock) 155 + #endif 156 + 175 157 #define write_lock(lock) _write_lock(lock) 176 158 #define read_lock(lock) _read_lock(lock) 177 159 ··· 204 172 /* 205 173 * We inline the unlock functions in the nondebug case: 206 174 */ 207 - #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) 175 + #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ 176 + !defined(CONFIG_SMP) 208 177 # define spin_unlock(lock) _spin_unlock(lock) 209 178 # define read_unlock(lock) _read_unlock(lock) 210 179 # define write_unlock(lock) _write_unlock(lock) 211 - #else 212 - # define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 213 - # define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) 214 - # define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) 215 - #endif 216 - 217 - #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) 218 180 # define spin_unlock_irq(lock) _spin_unlock_irq(lock) 219 181 # define read_unlock_irq(lock) _read_unlock_irq(lock) 220 182 # define write_unlock_irq(lock) _write_unlock_irq(lock) 221 183 #else 184 + # define spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 185 + # define read_unlock(lock) __raw_read_unlock(&(lock)->raw_lock) 186 + # define write_unlock(lock) __raw_write_unlock(&(lock)->raw_lock) 222 187 # define spin_unlock_irq(lock) \ 223 188 do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0) 224 189 # define read_unlock_irq(lock) \
+2
include/linux/spinlock_api_smp.h
··· 20 20 #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 21 21 22 22 void __lockfunc _spin_lock(spinlock_t *lock) __acquires(spinlock_t); 23 + void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 24 + __acquires(spinlock_t); 23 25 void __lockfunc _read_lock(rwlock_t *lock) __acquires(rwlock_t); 24 26 void __lockfunc _write_lock(rwlock_t *lock) __acquires(rwlock_t); 25 27 void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(spinlock_t);
+1
include/linux/spinlock_api_up.h
··· 49 49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 50 50 51 51 #define _spin_lock(lock) __LOCK(lock) 52 + #define _spin_lock_nested(lock, subclass) __LOCK(lock) 52 53 #define _read_lock(lock) __LOCK(lock) 53 54 #define _write_lock(lock) __LOCK(lock) 54 55 #define _spin_lock_bh(lock) __LOCK_BH(lock)
+28 -4
include/linux/spinlock_types.h
··· 9 9 * Released under the General Public License (GPL). 10 10 */ 11 11 12 + #include <linux/lockdep.h> 13 + 12 14 #if defined(CONFIG_SMP) 13 15 # include <asm/spinlock_types.h> 14 16 #else ··· 26 24 unsigned int magic, owner_cpu; 27 25 void *owner; 28 26 #endif 27 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 28 + struct lockdep_map dep_map; 29 + #endif 29 30 } spinlock_t; 30 31 31 32 #define SPINLOCK_MAGIC 0xdead4ead ··· 42 37 unsigned int magic, owner_cpu; 43 38 void *owner; 44 39 #endif 40 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 41 + struct lockdep_map dep_map; 42 + #endif 45 43 } rwlock_t; 46 44 47 45 #define RWLOCK_MAGIC 0xdeaf1eed 48 46 49 47 #define SPINLOCK_OWNER_INIT ((void *)-1L) 50 48 49 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 50 + # define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 51 + #else 52 + # define SPIN_DEP_MAP_INIT(lockname) 53 + #endif 54 + 55 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 56 + # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 57 + #else 58 + # define RW_DEP_MAP_INIT(lockname) 59 + #endif 60 + 51 61 #ifdef CONFIG_DEBUG_SPINLOCK 52 62 # define __SPIN_LOCK_UNLOCKED(lockname) \ 53 63 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 54 64 .magic = SPINLOCK_MAGIC, \ 55 65 .owner = SPINLOCK_OWNER_INIT, \ 56 - .owner_cpu = -1 } 66 + .owner_cpu = -1, \ 67 + SPIN_DEP_MAP_INIT(lockname) } 57 68 #define __RW_LOCK_UNLOCKED(lockname) \ 58 69 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 59 70 .magic = RWLOCK_MAGIC, \ 60 71 .owner = SPINLOCK_OWNER_INIT, \ 61 - .owner_cpu = -1 } 72 + .owner_cpu = -1, \ 73 + RW_DEP_MAP_INIT(lockname) } 62 74 #else 63 75 # define __SPIN_LOCK_UNLOCKED(lockname) \ 64 - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED } 76 + (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 77 + SPIN_DEP_MAP_INIT(lockname) } 65 78 #define __RW_LOCK_UNLOCKED(lockname) \ 66 - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED } 79 + (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 80 + RW_DEP_MAP_INIT(lockname) } 67 81 #endif 68 82 69 83 #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
+8 -1
include/linux/spinlock_types_up.h
··· 12 12 * Released under the General Public License (GPL). 13 13 */ 14 14 15 - #ifdef CONFIG_DEBUG_SPINLOCK 15 + #if defined(CONFIG_DEBUG_SPINLOCK) || \ 16 + defined(CONFIG_DEBUG_LOCK_ALLOC) 16 17 17 18 typedef struct { 18 19 volatile unsigned int slock; 20 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 21 + struct lockdep_map dep_map; 22 + #endif 19 23 } raw_spinlock_t; 20 24 21 25 #define __RAW_SPIN_LOCK_UNLOCKED { 1 } ··· 34 30 35 31 typedef struct { 36 32 /* no debug version on UP */ 33 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 34 + struct lockdep_map dep_map; 35 + #endif 37 36 } raw_rwlock_t; 38 37 39 38 #define __RAW_RW_LOCK_UNLOCKED { }
-1
include/linux/spinlock_up.h
··· 18 18 */ 19 19 20 20 #ifdef CONFIG_DEBUG_SPINLOCK 21 - 22 21 #define __raw_spin_is_locked(x) ((x)->slock == 0) 23 22 24 23 static inline void __raw_spin_lock(raw_spinlock_t *lock)
+1
kernel/Makefile
··· 27 27 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 28 28 obj-$(CONFIG_SMP) += cpu.o spinlock.o 29 29 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 30 + obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 30 31 obj-$(CONFIG_UID16) += uid16.o 31 32 obj-$(CONFIG_MODULES) += module.o 32 33 obj-$(CONFIG_KALLSYMS) += kallsyms.o
+10
kernel/sched.c
··· 308 308 /* this is a valid case when another task releases the spinlock */ 309 309 rq->lock.owner = current; 310 310 #endif 311 + /* 312 + * If we are tracking spinlock dependencies then we have to 313 + * fix up the runqueue lock - which gets 'carried over' from 314 + * prev into current: 315 + */ 316 + spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 317 + 311 318 spin_unlock_irq(&rq->lock); 312 319 } 313 320 ··· 1785 1778 WARN_ON(rq->prev_mm); 1786 1779 rq->prev_mm = oldmm; 1787 1780 } 1781 + spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 1788 1782 1789 1783 /* Here we just switch the register state and the stack. */ 1790 1784 switch_to(prev, next, prev); ··· 4392 4384 * no need to preempt or enable interrupts: 4393 4385 */ 4394 4386 __release(rq->lock); 4387 + spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 4395 4388 _raw_spin_unlock(&rq->lock); 4396 4389 preempt_enable_no_resched(); 4397 4390 ··· 4456 4447 spin_lock(lock); 4457 4448 } 4458 4449 if (need_resched() && __resched_legal()) { 4450 + spin_release(&lock->dep_map, 1, _THIS_IP_); 4459 4451 _raw_spin_unlock(lock); 4460 4452 preempt_enable_no_resched(); 4461 4453 __cond_resched();
+70 -9
kernel/spinlock.c
··· 13 13 #include <linux/preempt.h> 14 14 #include <linux/spinlock.h> 15 15 #include <linux/interrupt.h> 16 + #include <linux/debug_locks.h> 16 17 #include <linux/module.h> 17 18 18 19 /* ··· 30 29 int __lockfunc _spin_trylock(spinlock_t *lock) 31 30 { 32 31 preempt_disable(); 33 - if (_raw_spin_trylock(lock)) 32 + if (_raw_spin_trylock(lock)) { 33 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 34 34 return 1; 35 + } 35 36 36 37 preempt_enable(); 37 38 return 0; ··· 43 40 int __lockfunc _read_trylock(rwlock_t *lock) 44 41 { 45 42 preempt_disable(); 46 - if (_raw_read_trylock(lock)) 43 + if (_raw_read_trylock(lock)) { 44 + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); 47 45 return 1; 46 + } 48 47 49 48 preempt_enable(); 50 49 return 0; ··· 56 51 int __lockfunc _write_trylock(rwlock_t *lock) 57 52 { 58 53 preempt_disable(); 59 - if (_raw_write_trylock(lock)) 54 + if (_raw_write_trylock(lock)) { 55 + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); 60 56 return 1; 57 + } 61 58 62 59 preempt_enable(); 63 60 return 0; 64 61 } 65 62 EXPORT_SYMBOL(_write_trylock); 66 63 67 - #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) 64 + /* 65 + * If lockdep is enabled then we use the non-preemption spin-ops 66 + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 67 + * not re-enabled during lock-acquire (which the preempt-spin-ops do): 68 + */ 69 + #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ 70 + defined(CONFIG_PROVE_LOCKING) 68 71 69 72 void __lockfunc _read_lock(rwlock_t *lock) 70 73 { 71 74 preempt_disable(); 75 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 72 76 _raw_read_lock(lock); 73 77 } 74 78 EXPORT_SYMBOL(_read_lock); ··· 88 74 89 75 local_irq_save(flags); 90 76 preempt_disable(); 77 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 78 + /* 79 + * On lockdep we dont want the hand-coded irq-enable of 80 + * _raw_spin_lock_flags() code, because lockdep assumes 81 + * that interrupts are not re-enabled during lock-acquire: 82 + */ 83 + #ifdef CONFIG_PROVE_LOCKING 84 + _raw_spin_lock(lock); 85 + #else 91 86 _raw_spin_lock_flags(lock, &flags); 87 + #endif 92 88 return flags; 93 89 } 94 90 EXPORT_SYMBOL(_spin_lock_irqsave); ··· 107 83 { 108 84 local_irq_disable(); 109 85 preempt_disable(); 86 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 110 87 _raw_spin_lock(lock); 111 88 } 112 89 EXPORT_SYMBOL(_spin_lock_irq); ··· 116 91 { 117 92 local_bh_disable(); 118 93 preempt_disable(); 94 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 119 95 _raw_spin_lock(lock); 120 96 } 121 97 EXPORT_SYMBOL(_spin_lock_bh); ··· 127 101 128 102 local_irq_save(flags); 129 103 preempt_disable(); 104 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 130 105 _raw_read_lock(lock); 131 106 return flags; 132 107 } ··· 137 110 { 138 111 local_irq_disable(); 139 112 preempt_disable(); 113 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 140 114 _raw_read_lock(lock); 141 115 } 142 116 EXPORT_SYMBOL(_read_lock_irq); ··· 146 118 { 147 119 local_bh_disable(); 148 120 preempt_disable(); 121 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 149 122 _raw_read_lock(lock); 150 123 } 151 124 EXPORT_SYMBOL(_read_lock_bh); ··· 157 128 158 129 local_irq_save(flags); 159 130 preempt_disable(); 131 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 160 132 _raw_write_lock(lock); 161 133 return flags; 162 134 } ··· 167 137 { 168 138 local_irq_disable(); 169 139 preempt_disable(); 140 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 170 141 _raw_write_lock(lock); 171 142 } 172 143 EXPORT_SYMBOL(_write_lock_irq); ··· 176 145 { 177 146 local_bh_disable(); 178 147 preempt_disable(); 148 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 179 149 _raw_write_lock(lock); 180 150 } 181 151 EXPORT_SYMBOL(_write_lock_bh); ··· 184 152 void __lockfunc _spin_lock(spinlock_t *lock) 185 153 { 186 154 preempt_disable(); 155 + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 187 156 _raw_spin_lock(lock); 188 157 } 189 158 ··· 193 160 void __lockfunc _write_lock(rwlock_t *lock) 194 161 { 195 162 preempt_disable(); 163 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 196 164 _raw_write_lock(lock); 197 165 } 198 166 ··· 289 255 290 256 #endif /* CONFIG_PREEMPT */ 291 257 258 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 259 + 260 + void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 261 + { 262 + preempt_disable(); 263 + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 264 + _raw_spin_lock(lock); 265 + } 266 + 267 + EXPORT_SYMBOL(_spin_lock_nested); 268 + 269 + #endif 270 + 292 271 void __lockfunc _spin_unlock(spinlock_t *lock) 293 272 { 273 + spin_release(&lock->dep_map, 1, _RET_IP_); 294 274 _raw_spin_unlock(lock); 295 275 preempt_enable(); 296 276 } ··· 312 264 313 265 void __lockfunc _write_unlock(rwlock_t *lock) 314 266 { 267 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 315 268 _raw_write_unlock(lock); 316 269 preempt_enable(); 317 270 } ··· 320 271 321 272 void __lockfunc _read_unlock(rwlock_t *lock) 322 273 { 274 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 323 275 _raw_read_unlock(lock); 324 276 preempt_enable(); 325 277 } ··· 328 278 329 279 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 330 280 { 281 + spin_release(&lock->dep_map, 1, _RET_IP_); 331 282 _raw_spin_unlock(lock); 332 283 local_irq_restore(flags); 333 284 preempt_enable(); ··· 337 286 338 287 void __lockfunc _spin_unlock_irq(spinlock_t *lock) 339 288 { 289 + spin_release(&lock->dep_map, 1, _RET_IP_); 340 290 _raw_spin_unlock(lock); 341 291 local_irq_enable(); 342 292 preempt_enable(); ··· 346 294 347 295 void __lockfunc _spin_unlock_bh(spinlock_t *lock) 348 296 { 297 + spin_release(&lock->dep_map, 1, _RET_IP_); 349 298 _raw_spin_unlock(lock); 350 299 preempt_enable_no_resched(); 351 - local_bh_enable(); 300 + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 352 301 } 353 302 EXPORT_SYMBOL(_spin_unlock_bh); 354 303 355 304 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 356 305 { 306 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 357 307 _raw_read_unlock(lock); 358 308 local_irq_restore(flags); 359 309 preempt_enable(); ··· 364 310 365 311 void __lockfunc _read_unlock_irq(rwlock_t *lock) 366 312 { 313 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 367 314 _raw_read_unlock(lock); 368 315 local_irq_enable(); 369 316 preempt_enable(); ··· 373 318 374 319 void __lockfunc _read_unlock_bh(rwlock_t *lock) 375 320 { 321 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 376 322 _raw_read_unlock(lock); 377 323 preempt_enable_no_resched(); 378 - local_bh_enable(); 324 + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 379 325 } 380 326 EXPORT_SYMBOL(_read_unlock_bh); 381 327 382 328 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 383 329 { 330 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 384 331 _raw_write_unlock(lock); 385 332 local_irq_restore(flags); 386 333 preempt_enable(); ··· 391 334 392 335 void __lockfunc _write_unlock_irq(rwlock_t *lock) 393 336 { 337 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 394 338 _raw_write_unlock(lock); 395 339 local_irq_enable(); 396 340 preempt_enable(); ··· 400 342 401 343 void __lockfunc _write_unlock_bh(rwlock_t *lock) 402 344 { 345 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 403 346 _raw_write_unlock(lock); 404 347 preempt_enable_no_resched(); 405 - local_bh_enable(); 348 + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 406 349 } 407 350 EXPORT_SYMBOL(_write_unlock_bh); 408 351 ··· 411 352 { 412 353 local_bh_disable(); 413 354 preempt_disable(); 414 - if (_raw_spin_trylock(lock)) 355 + if (_raw_spin_trylock(lock)) { 356 + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 415 357 return 1; 358 + } 416 359 417 360 preempt_enable_no_resched(); 418 - local_bh_enable(); 361 + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 419 362 return 0; 420 363 } 421 364 EXPORT_SYMBOL(_spin_trylock_bh);
+6 -1
lib/kernel_lock.c
··· 177 177 178 178 static inline void __unlock_kernel(void) 179 179 { 180 - spin_unlock(&kernel_flag); 180 + /* 181 + * the BKL is not covered by lockdep, so we open-code the 182 + * unlocking sequence (and thus avoid the dep-chain ops): 183 + */ 184 + _raw_spin_unlock(&kernel_flag); 185 + preempt_enable(); 181 186 } 182 187 183 188 /*
+36
lib/spinlock_debug.c
··· 12 12 #include <linux/delay.h> 13 13 #include <linux/module.h> 14 14 15 + void __spin_lock_init(spinlock_t *lock, const char *name, 16 + struct lock_class_key *key) 17 + { 18 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 19 + /* 20 + * Make sure we are not reinitializing a held lock: 21 + */ 22 + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 + lockdep_init_map(&lock->dep_map, name, key); 24 + #endif 25 + lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 + lock->magic = SPINLOCK_MAGIC; 27 + lock->owner = SPINLOCK_OWNER_INIT; 28 + lock->owner_cpu = -1; 29 + } 30 + 31 + EXPORT_SYMBOL(__spin_lock_init); 32 + 33 + void __rwlock_init(rwlock_t *lock, const char *name, 34 + struct lock_class_key *key) 35 + { 36 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 37 + /* 38 + * Make sure we are not reinitializing a held lock: 39 + */ 40 + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 41 + lockdep_init_map(&lock->dep_map, name, key); 42 + #endif 43 + lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 + lock->magic = RWLOCK_MAGIC; 45 + lock->owner = SPINLOCK_OWNER_INIT; 46 + lock->owner_cpu = -1; 47 + } 48 + 49 + EXPORT_SYMBOL(__rwlock_init); 50 + 15 51 static void spin_bug(spinlock_t *lock, const char *msg) 16 52 { 17 53 struct task_struct *owner = NULL;
+2 -1
net/ipv4/route.c
··· 205 205 struct rt_hash_bucket { 206 206 struct rtable *chain; 207 207 }; 208 - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 208 + #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ 209 + defined(CONFIG_PROVE_LOCKING) 209 210 /* 210 211 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks 211 212 * The size of this table is a power of two and depends on the number of CPUS.