[PATCH] fix/simplify mutex debugging code

Let's switch mutex_debug_check_no_locks_freed() to take (addr, len) as
arguments instead, since all its callers were just calculating the 'to'
address for themselves anyway... (and sometimes doing so badly).

Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by David Woodhouse and committed by Linus Torvalds a4fc7ab1 a8b9ee73

+9 -8
+1 -1
arch/i386/mm/pageattr.c
··· 224 224 return; 225 225 if (!enable) 226 226 mutex_debug_check_no_locks_freed(page_address(page), 227 - page_address(page+numpages)); 227 + numpages * PAGE_SIZE); 228 228 229 229 /* the return value is ignored - the calls cannot fail, 230 230 * large pages are disabled at boot time.
+1 -1
include/linux/mm.h
··· 1027 1027 { 1028 1028 if (!PageHighMem(page) && !enable) 1029 1029 mutex_debug_check_no_locks_freed(page_address(page), 1030 - page_address(page + numpages)); 1030 + numpages * PAGE_SIZE); 1031 1031 } 1032 1032 #endif 1033 1033
+1 -1
include/linux/mutex-debug.h
··· 18 18 extern void mutex_debug_show_all_locks(void); 19 19 extern void mutex_debug_show_held_locks(struct task_struct *filter); 20 20 extern void mutex_debug_check_no_locks_held(struct task_struct *task); 21 - extern void mutex_debug_check_no_locks_freed(const void *from, const void *to); 21 + extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len); 22 22 23 23 #endif
+1 -1
include/linux/mutex.h
··· 79 79 # define mutex_debug_show_all_locks() do { } while (0) 80 80 # define mutex_debug_show_held_locks(p) do { } while (0) 81 81 # define mutex_debug_check_no_locks_held(task) do { } while (0) 82 - # define mutex_debug_check_no_locks_freed(from, to) do { } while (0) 82 + # define mutex_debug_check_no_locks_freed(from, len) do { } while (0) 83 83 #endif 84 84 85 85 #define __MUTEX_INITIALIZER(lockname) \
+3 -2
kernel/mutex-debug.c
··· 333 333 * is destroyed or reinitialized - this code checks whether there is 334 334 * any held lock in the memory range of <from> to <to>: 335 335 */ 336 - void mutex_debug_check_no_locks_freed(const void *from, const void *to) 336 + void mutex_debug_check_no_locks_freed(const void *from, unsigned long len) 337 337 { 338 338 struct list_head *curr, *next; 339 + const void *to = from + len; 339 340 unsigned long flags; 340 341 struct mutex *lock; 341 342 void *lock_addr; ··· 438 437 /* 439 438 * Make sure we are not reinitializing a held lock: 440 439 */ 441 - mutex_debug_check_no_locks_freed((void *)lock, (void *)(lock + 1)); 440 + mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 442 441 lock->owner = NULL; 443 442 INIT_LIST_HEAD(&lock->held_list); 444 443 lock->name = name;
+1 -1
mm/page_alloc.c
··· 417 417 arch_free_page(page, order); 418 418 if (!PageHighMem(page)) 419 419 mutex_debug_check_no_locks_freed(page_address(page), 420 - page_address(page+(1<<order))); 420 + PAGE_SIZE<<order); 421 421 422 422 #ifndef CONFIG_MMU 423 423 for (i = 1 ; i < (1 << order) ; ++i)
+1 -1
mm/slab.c
··· 3071 3071 local_irq_save(flags); 3072 3072 kfree_debugcheck(objp); 3073 3073 c = page_get_cache(virt_to_page(objp)); 3074 - mutex_debug_check_no_locks_freed(objp, objp+obj_reallen(c)); 3074 + mutex_debug_check_no_locks_freed(objp, obj_reallen(c)); 3075 3075 __cache_free(c, (void *)objp); 3076 3076 local_irq_restore(flags); 3077 3077 }