Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not

This will allow me to call functions that have multiple
arguments if fastpath fails. This is required to support ticket
mutexes, because they need to be able to pass an extra argument
to the fail function.

Originally I duplicated the functions, by adding
__mutex_fastpath_lock_retval_arg. This ended up being just a
duplication of the existing function, so a way to test if
fastpath was called ended up being better.

This also cleaned up the reservation mutex patch some by being
able to call an atomic_set instead of atomic_xchg, and making it
easier to detect if the wrong unlock function was previously
used.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: robclark@gmail.com
Cc: rostedt@goodmis.org
Cc: daniel@ffwll.ch
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patser
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Maarten Lankhorst and committed by
Ingo Molnar
a41b56ef 1e876e3b

+41 -59
+4 -6
arch/ia64/include/asm/mutex.h
··· 29 29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 30 30 * from 1 to a 0 value 31 31 * @count: pointer of type atomic_t 32 - * @fail_fn: function to call if the original value was not 1 33 32 * 34 - * Change the count from 1 to a value lower than 1, and call <fail_fn> if 35 - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 36 - * or anything the slow path function returns. 33 + * Change the count from 1 to a value lower than 1. This function returns 0 34 + * if the fastpath succeeds, or -1 otherwise. 37 35 */ 38 36 static inline int 39 - __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 37 + __mutex_fastpath_lock_retval(atomic_t *count) 40 38 { 41 39 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) 42 - return fail_fn(count); 40 + return -1; 43 41 return 0; 44 42 } 45 43
+4 -6
arch/powerpc/include/asm/mutex.h
··· 82 82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 83 83 * from 1 to a 0 value 84 84 * @count: pointer of type atomic_t 85 - * @fail_fn: function to call if the original value was not 1 86 85 * 87 - * Change the count from 1 to a value lower than 1, and call <fail_fn> if 88 - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 89 - * or anything the slow path function returns. 86 + * Change the count from 1 to a value lower than 1. This function returns 0 87 + * if the fastpath succeeds, or -1 otherwise. 90 88 */ 91 89 static inline int 92 - __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 90 + __mutex_fastpath_lock_retval(atomic_t *count) 93 91 { 94 92 if (unlikely(__mutex_dec_return_lock(count) < 0)) 95 - return fail_fn(count); 93 + return -1; 96 94 return 0; 97 95 } 98 96
+2 -2
arch/sh/include/asm/mutex-llsc.h
··· 37 37 } 38 38 39 39 static inline int 40 - __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40 + __mutex_fastpath_lock_retval(atomic_t *count) 41 41 { 42 42 int __done, __res; 43 43 ··· 51 51 : "t"); 52 52 53 53 if (unlikely(!__done || __res != 0)) 54 - __res = fail_fn(count); 54 + __res = -1; 55 55 56 56 return __res; 57 57 }
+4 -7
arch/x86/include/asm/mutex_32.h
··· 42 42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 43 43 * from 1 to a 0 value 44 44 * @count: pointer of type atomic_t 45 - * @fail_fn: function to call if the original value was not 1 46 45 * 47 - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 48 - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 49 - * or anything the slow path function returns 46 + * Change the count from 1 to a value lower than 1. This function returns 0 47 + * if the fastpath succeeds, or -1 otherwise. 50 48 */ 51 - static inline int __mutex_fastpath_lock_retval(atomic_t *count, 52 - int (*fail_fn)(atomic_t *)) 49 + static inline int __mutex_fastpath_lock_retval(atomic_t *count) 53 50 { 54 51 if (unlikely(atomic_dec_return(count) < 0)) 55 - return fail_fn(count); 52 + return -1; 56 53 else 57 54 return 0; 58 55 }
+4 -7
arch/x86/include/asm/mutex_64.h
··· 37 37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 38 38 * from 1 to a 0 value 39 39 * @count: pointer of type atomic_t 40 - * @fail_fn: function to call if the original value was not 1 41 40 * 42 - * Change the count from 1 to a value lower than 1, and call <fail_fn> if 43 - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 44 - * or anything the slow path function returns 41 + * Change the count from 1 to a value lower than 1. This function returns 0 42 + * if the fastpath succeeds, or -1 otherwise. 45 43 */ 46 - static inline int __mutex_fastpath_lock_retval(atomic_t *count, 47 - int (*fail_fn)(atomic_t *)) 44 + static inline int __mutex_fastpath_lock_retval(atomic_t *count) 48 45 { 49 46 if (unlikely(atomic_dec_return(count) < 0)) 50 - return fail_fn(count); 47 + return -1; 51 48 else 52 49 return 0; 53 50 }
+4 -6
include/asm-generic/mutex-dec.h
··· 28 28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 29 29 * from 1 to a 0 value 30 30 * @count: pointer of type atomic_t 31 - * @fail_fn: function to call if the original value was not 1 32 31 * 33 - * Change the count from 1 to a value lower than 1, and call <fail_fn> if 34 - * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 35 - * or anything the slow path function returns. 32 + * Change the count from 1 to a value lower than 1. This function returns 0 33 + * if the fastpath succeeds, or -1 otherwise. 36 34 */ 37 35 static inline int 38 - __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 36 + __mutex_fastpath_lock_retval(atomic_t *count) 39 37 { 40 38 if (unlikely(atomic_dec_return(count) < 0)) 41 - return fail_fn(count); 39 + return -1; 42 40 return 0; 43 41 } 44 42
+1 -1
include/asm-generic/mutex-null.h
··· 11 11 #define _ASM_GENERIC_MUTEX_NULL_H 12 12 13 13 #define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) 14 - #define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) 14 + #define __mutex_fastpath_lock_retval(count) (-1) 15 15 #define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) 16 16 #define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) 17 17 #define __mutex_slowpath_needs_to_unlock() 1
+4 -6
include/asm-generic/mutex-xchg.h
··· 39 39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 40 40 * from 1 to a 0 value 41 41 * @count: pointer of type atomic_t 42 - * @fail_fn: function to call if the original value was not 1 43 42 * 44 - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 45 - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 46 - * or anything the slow path function returns 43 + * Change the count from 1 to a value lower than 1. This function returns 0 44 + * if the fastpath succeeds, or -1 otherwise. 47 45 */ 48 46 static inline int 49 - __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 47 + __mutex_fastpath_lock_retval(atomic_t *count) 50 48 { 51 49 if (unlikely(atomic_xchg(count, 0) != 1)) 52 50 if (likely(atomic_xchg(count, -1) != 1)) 53 - return fail_fn(count); 51 + return -1; 54 52 return 0; 55 53 } 56 54
+14 -18
kernel/mutex.c
··· 494 494 * mutex_lock_interruptible() and mutex_trylock(). 495 495 */ 496 496 static noinline int __sched 497 - __mutex_lock_killable_slowpath(atomic_t *lock_count); 497 + __mutex_lock_killable_slowpath(struct mutex *lock); 498 498 499 499 static noinline int __sched 500 - __mutex_lock_interruptible_slowpath(atomic_t *lock_count); 500 + __mutex_lock_interruptible_slowpath(struct mutex *lock); 501 501 502 502 /** 503 503 * mutex_lock_interruptible - acquire the mutex, interruptible ··· 515 515 int ret; 516 516 517 517 might_sleep(); 518 - ret = __mutex_fastpath_lock_retval 519 - (&lock->count, __mutex_lock_interruptible_slowpath); 520 - if (!ret) 518 + ret = __mutex_fastpath_lock_retval(&lock->count); 519 + if (likely(!ret)) { 521 520 mutex_set_owner(lock); 522 - 523 - return ret; 521 + return 0; 522 + } else 523 + return __mutex_lock_interruptible_slowpath(lock); 524 524 } 525 525 526 526 EXPORT_SYMBOL(mutex_lock_interruptible); ··· 530 530 int ret; 531 531 532 532 might_sleep(); 533 - ret = __mutex_fastpath_lock_retval 534 - (&lock->count, __mutex_lock_killable_slowpath); 535 - if (!ret) 533 + ret = __mutex_fastpath_lock_retval(&lock->count); 534 + if (likely(!ret)) { 536 535 mutex_set_owner(lock); 537 - 538 - return ret; 536 + return 0; 537 + } else 538 + return __mutex_lock_killable_slowpath(lock); 539 539 } 540 540 EXPORT_SYMBOL(mutex_lock_killable); 541 541 ··· 548 548 } 549 549 550 550 static noinline int __sched 551 - __mutex_lock_killable_slowpath(atomic_t *lock_count) 551 + __mutex_lock_killable_slowpath(struct mutex *lock) 552 552 { 553 - struct mutex *lock = container_of(lock_count, struct mutex, count); 554 - 555 553 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 556 554 } 557 555 558 556 static noinline int __sched 559 - __mutex_lock_interruptible_slowpath(atomic_t *lock_count) 557 + __mutex_lock_interruptible_slowpath(struct mutex *lock) 560 558 { 561 - struct mutex *lock = container_of(lock_count, struct mutex, count); 562 - 563 559 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 564 560 } 565 561 #endif