Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] lockdep: prove mutex locking correctness

Use the lock validator framework to prove mutex locking correctness.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ingo Molnar and committed by
Linus Torvalds
ef5d4707 8a25d5de

+63 -12
+7 -1
include/linux/mutex-debug.h
··· 2 2 #define __LINUX_MUTEX_DEBUG_H 3 3 4 4 #include <linux/linkage.h> 5 + #include <linux/lockdep.h> 5 6 6 7 /* 7 8 * Mutexes - debugging helpers: ··· 11 10 #define __DEBUG_MUTEX_INITIALIZER(lockname) \ 12 11 , .magic = &lockname 13 12 14 - #define mutex_init(sem) __mutex_init(sem, __FILE__":"#sem) 13 + #define mutex_init(mutex) \ 14 + do { \ 15 + static struct lock_class_key __key; \ 16 + \ 17 + __mutex_init((mutex), #mutex, &__key); \ 18 + } while (0) 15 19 16 20 extern void FASTCALL(mutex_destroy(struct mutex *lock)); 17 21
+28 -3
include/linux/mutex.h
··· 13 13 #include <linux/list.h> 14 14 #include <linux/spinlock_types.h> 15 15 #include <linux/linkage.h> 16 + #include <linux/lockdep.h> 16 17 17 18 #include <asm/atomic.h> 18 19 ··· 54 53 const char *name; 55 54 void *magic; 56 55 #endif 56 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 57 + struct lockdep_map dep_map; 58 + #endif 57 59 }; 58 60 59 61 /* ··· 76 72 # include <linux/mutex-debug.h> 77 73 #else 78 74 # define __DEBUG_MUTEX_INITIALIZER(lockname) 79 - # define mutex_init(mutex) __mutex_init(mutex, NULL) 75 + # define mutex_init(mutex) \ 76 + do { \ 77 + static struct lock_class_key __key; \ 78 + \ 79 + __mutex_init((mutex), #mutex, &__key); \ 80 + } while (0) 80 81 # define mutex_destroy(mutex) do { } while (0) 82 + #endif 83 + 84 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 85 + # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 86 + , .dep_map = { .name = #lockname } 87 + #else 88 + # define __DEP_MAP_MUTEX_INITIALIZER(lockname) 81 89 #endif 82 90 83 91 #define __MUTEX_INITIALIZER(lockname) \ 84 92 { .count = ATOMIC_INIT(1) \ 85 93 , .wait_lock = SPIN_LOCK_UNLOCKED \ 86 94 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 87 - __DEBUG_MUTEX_INITIALIZER(lockname) } 95 + __DEBUG_MUTEX_INITIALIZER(lockname) \ 96 + __DEP_MAP_MUTEX_INITIALIZER(lockname) } 88 97 89 98 #define DEFINE_MUTEX(mutexname) \ 90 99 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 91 100 92 - extern void fastcall __mutex_init(struct mutex *lock, const char *name); 101 + extern void __mutex_init(struct mutex *lock, const char *name, 102 + struct lock_class_key *key); 93 103 94 104 /*** 95 105 * mutex_is_locked - is the mutex locked ··· 122 104 */ 123 105 extern void fastcall mutex_lock(struct mutex *lock); 124 106 extern int fastcall mutex_lock_interruptible(struct mutex *lock); 107 + 108 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 109 + extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 110 + #else 111 + # define mutex_lock_nested(lock, subclass) mutex_lock(lock) 112 + #endif 113 + 125 114 /* 126 115 * NOTE: mutex_trylock() follows the spin_trylock() convention, 127 116 * not the down_trylock() convention!
+5 -1
kernel/mutex-debug.c
··· 83 83 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 84 84 } 85 85 86 - void debug_mutex_init(struct mutex *lock, const char *name) 86 + void debug_mutex_init(struct mutex *lock, const char *name, 87 + struct lock_class_key *key) 87 88 { 89 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 88 90 /* 89 91 * Make sure we are not reinitializing a held lock: 90 92 */ 91 93 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 94 + lockdep_init_map(&lock->dep_map, name, key); 95 + #endif 92 96 lock->owner = NULL; 93 97 lock->magic = lock; 94 98 }
+22 -6
kernel/mutex.c
··· 39 39 * 40 40 * It is not allowed to initialize an already locked mutex. 41 41 */ 42 - __always_inline void fastcall __mutex_init(struct mutex *lock, const char *name) 42 + void 43 + __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 43 44 { 44 45 atomic_set(&lock->count, 1); 45 46 spin_lock_init(&lock->wait_lock); 46 47 INIT_LIST_HEAD(&lock->wait_list); 47 48 48 - debug_mutex_init(lock, name); 49 + debug_mutex_init(lock, name, key); 49 50 } 50 51 51 52 EXPORT_SYMBOL(__mutex_init); ··· 132 131 spin_lock_mutex(&lock->wait_lock, flags); 133 132 134 133 debug_mutex_lock_common(lock, &waiter); 134 + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 135 135 debug_mutex_add_waiter(lock, &waiter, task->thread_info); 136 136 137 137 /* add waiting tasks to the end of the waitqueue (FIFO): */ ··· 160 158 if (unlikely(state == TASK_INTERRUPTIBLE && 161 159 signal_pending(task))) { 162 160 mutex_remove_waiter(lock, &waiter, task->thread_info); 161 + mutex_release(&lock->dep_map, 1, _RET_IP_); 163 162 spin_unlock_mutex(&lock->wait_lock, flags); 164 163 165 164 debug_mutex_free_waiter(&waiter); ··· 197 194 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); 198 195 } 199 196 197 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 198 + void __sched 199 + mutex_lock_nested(struct mutex *lock, unsigned int subclass) 200 + { 201 + might_sleep(); 202 + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); 203 + } 204 + 205 + EXPORT_SYMBOL_GPL(mutex_lock_nested); 206 + #endif 207 + 200 208 /* 201 209 * Release the lock, slowpath: 202 210 */ 203 211 static fastcall inline void 204 - __mutex_unlock_common_slowpath(atomic_t *lock_count) 212 + __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) 205 213 { 206 214 struct mutex *lock = container_of(lock_count, struct mutex, count); 207 215 unsigned long flags; 208 216 209 217 spin_lock_mutex(&lock->wait_lock, flags); 218 + mutex_release(&lock->dep_map, nested, _RET_IP_); 210 219 debug_mutex_unlock(lock); 211 220 212 221 /* ··· 251 236 static fastcall noinline void 252 237 __mutex_unlock_slowpath(atomic_t *lock_count) 253 238 { 254 - __mutex_unlock_common_slowpath(lock_count); 239 + __mutex_unlock_common_slowpath(lock_count, 1); 255 240 } 256 241 257 242 /* ··· 302 287 spin_lock_mutex(&lock->wait_lock, flags); 303 288 304 289 prev = atomic_xchg(&lock->count, -1); 305 - if (likely(prev == 1)) 290 + if (likely(prev == 1)) { 306 291 debug_mutex_set_owner(lock, current_thread_info()); 307 - 292 + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 293 + } 308 294 /* Set it back to 0 if there are no waiters: */ 309 295 if (likely(list_empty(&lock->wait_list))) 310 296 atomic_set(&lock->count, 0);
+1 -1
kernel/mutex.h
··· 22 22 #define debug_mutex_free_waiter(waiter) do { } while (0) 23 23 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) 24 24 #define debug_mutex_unlock(lock) do { } while (0) 25 - #define debug_mutex_init(lock, name) do { } while (0) 25 + #define debug_mutex_init(lock, name, key) do { } while (0) 26 26 27 27 static inline void 28 28 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)