[PATCH] fix i386 mutex fastpath on FRAME_POINTER && !DEBUG_MUTEXES

Call the mutex slowpath more conservatively - e.g. FRAME_POINTERS can
change the calling convention, in which case a direct branch to the
slowpath becomes illegal. Bug found by Hugh Dickins.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Ingo Molnar and committed by
Linus Torvalds
73165b88 042c904c

+14 -11
+14 -2
include/asm-i386/mutex.h
··· 28 \ 29 __asm__ __volatile__( \ 30 LOCK " decl (%%eax) \n" \ 31 - " js "#fail_fn" \n" \ 32 \ 33 :"=a" (dummy) \ 34 : "a" (count) \ ··· 84 \ 85 __asm__ __volatile__( \ 86 LOCK " incl (%%eax) \n" \ 87 - " jle "#fail_fn" \n" \ 88 \ 89 :"=a" (dummy) \ 90 : "a" (count) \
··· 28 \ 29 __asm__ __volatile__( \ 30 LOCK " decl (%%eax) \n" \ 31 + " js 2f \n" \ 32 + "1: \n" \ 33 + \ 34 + LOCK_SECTION_START("") \ 35 + "2: call "#fail_fn" \n" \ 36 + " jmp 1b \n" \ 37 + LOCK_SECTION_END \ 38 \ 39 :"=a" (dummy) \ 40 : "a" (count) \ ··· 78 \ 79 __asm__ __volatile__( \ 80 LOCK " incl (%%eax) \n" \ 81 + " jle 2f \n" \ 82 + "1: \n" \ 83 + \ 84 + LOCK_SECTION_START("") \ 85 + "2: call "#fail_fn" \n" \ 86 + " jmp 1b \n" \ 87 + LOCK_SECTION_END \ 88 \ 89 :"=a" (dummy) \ 90 : "a" (count) \
-9
kernel/mutex.c
··· 84 /* 85 * The locking fastpath is the 1->0 transition from 86 * 'unlocked' into 'locked' state. 87 - * 88 - * NOTE: if asm/mutex.h is included, then some architectures 89 - * rely on mutex_lock() having _no other code_ here but this 90 - * fastpath. That allows the assembly fastpath to do 91 - * tail-merging optimizations. (If you want to put testcode 92 - * here, do it under #ifndef CONFIG_MUTEX_DEBUG.) 93 */ 94 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 95 } ··· 109 /* 110 * The unlocking fastpath is the 0->1 transition from 'locked' 111 * into 'unlocked' state: 112 - * 113 - * NOTE: no other code must be here - see mutex_lock() . 114 */ 115 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 116 } ··· 253 */ 254 int fastcall __sched mutex_lock_interruptible(struct mutex *lock) 255 { 256 - /* NOTE: no other code must be here - see mutex_lock() */ 257 return __mutex_fastpath_lock_retval 258 (&lock->count, __mutex_lock_interruptible_slowpath); 259 }
··· 84 /* 85 * The locking fastpath is the 1->0 transition from 86 * 'unlocked' into 'locked' state. 87 */ 88 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 89 } ··· 115 /* 116 * The unlocking fastpath is the 0->1 transition from 'locked' 117 * into 'unlocked' state: 118 */ 119 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 120 } ··· 261 */ 262 int fastcall __sched mutex_lock_interruptible(struct mutex *lock) 263 { 264 return __mutex_fastpath_lock_retval 265 (&lock->count, __mutex_lock_interruptible_slowpath); 266 }