Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: merge semaphore.h

powerpc: Merge semaphore.h

Adopted the ppc64 version of semaphore.h. The 32-bit version used
smp_wmb(), but recent updates to atomic.h mean this is no longer required.
The 64-bit version made use of unlikely(), which has been retained in the
combined version.

This patch requires the recent atomic.h patch.

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Becky Bruce and committed by
Paul Mackerras
d6a4c847 342e73b3

+11 -122
+11 -24
include/asm-ppc/semaphore.h include/asm-powerpc/semaphore.h
··· 1 - #ifndef _PPC_SEMAPHORE_H 2 - #define _PPC_SEMAPHORE_H 1 + #ifndef _ASM_POWERPC_SEMAPHORE_H 2 + #define _ASM_POWERPC_SEMAPHORE_H 3 3 4 4 /* 5 - * Swiped from asm-sparc/semaphore.h and modified 6 - * -- Cort (cort@cs.nmt.edu) 7 - * 8 - * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h 9 - * -- Ani Joshi (ajoshi@unixbox.com) 10 - * 11 5 * Remove spinlock-based RW semaphores; RW semaphore definitions are 12 6 * now in rwsem.h and we use the generic lib/rwsem.c implementation. 13 7 * Rework semaphores to use atomic_dec_if_positive. ··· 60 66 extern int __down_interruptible(struct semaphore * sem); 61 67 extern void __up(struct semaphore * sem); 62 68 63 - extern inline void down(struct semaphore * sem) 69 + static inline void down(struct semaphore * sem) 64 70 { 65 71 might_sleep(); 66 72 67 73 /* 68 74 * Try to get the semaphore, take the slow path if we fail. 69 75 */ 70 - if (atomic_dec_return(&sem->count) < 0) 76 + if (unlikely(atomic_dec_return(&sem->count) < 0)) 71 77 __down(sem); 72 - smp_wmb(); 73 78 } 74 79 75 - extern inline int down_interruptible(struct semaphore * sem) 80 + static inline int down_interruptible(struct semaphore * sem) 76 81 { 77 82 int ret = 0; 78 83 79 84 might_sleep(); 80 85 81 - if (atomic_dec_return(&sem->count) < 0) 86 + if (unlikely(atomic_dec_return(&sem->count) < 0)) 82 87 ret = __down_interruptible(sem); 83 - smp_wmb(); 84 88 return ret; 85 89 } 86 90 87 - extern inline int down_trylock(struct semaphore * sem) 91 + static inline int down_trylock(struct semaphore * sem) 88 92 { 89 - int ret; 90 - 91 - ret = atomic_dec_if_positive(&sem->count) < 0; 92 - smp_wmb(); 93 - return ret; 93 + return atomic_dec_if_positive(&sem->count) < 0; 94 94 } 95 95 96 - extern inline void up(struct semaphore * sem) 96 + static inline void up(struct semaphore * sem) 97 97 { 98 - smp_wmb(); 99 - if (atomic_inc_return(&sem->count) <= 0) 98 + if (unlikely(atomic_inc_return(&sem->count) <= 0)) 100 99 __up(sem); 101 100 } 102 101 103 102 #endif /* __KERNEL__ */ 104 103 105 - #endif /* !(_PPC_SEMAPHORE_H) */ 104 + #endif /* _ASM_POWERPC_SEMAPHORE_H */
-98
include/asm-ppc64/semaphore.h
··· 1 - #ifndef _PPC64_SEMAPHORE_H 2 - #define _PPC64_SEMAPHORE_H 3 - 4 - /* 5 - * Remove spinlock-based RW semaphores; RW semaphore definitions are 6 - * now in rwsem.h and we use the generic lib/rwsem.c implementation. 7 - * Rework semaphores to use atomic_dec_if_positive. 8 - * -- Paul Mackerras (paulus@samba.org) 9 - */ 10 - 11 - #ifdef __KERNEL__ 12 - 13 - #include <asm/atomic.h> 14 - #include <asm/system.h> 15 - #include <linux/wait.h> 16 - #include <linux/rwsem.h> 17 - 18 - struct semaphore { 19 - /* 20 - * Note that any negative value of count is equivalent to 0, 21 - * but additionally indicates that some process(es) might be 22 - * sleeping on `wait'. 23 - */ 24 - atomic_t count; 25 - wait_queue_head_t wait; 26 - }; 27 - 28 - #define __SEMAPHORE_INITIALIZER(name, n) \ 29 - { \ 30 - .count = ATOMIC_INIT(n), \ 31 - .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 32 - } 33 - 34 - #define __MUTEX_INITIALIZER(name) \ 35 - __SEMAPHORE_INITIALIZER(name, 1) 36 - 37 - #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ 38 - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 39 - 40 - #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) 41 - #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) 42 - 43 - static inline void sema_init (struct semaphore *sem, int val) 44 - { 45 - atomic_set(&sem->count, val); 46 - init_waitqueue_head(&sem->wait); 47 - } 48 - 49 - static inline void init_MUTEX (struct semaphore *sem) 50 - { 51 - sema_init(sem, 1); 52 - } 53 - 54 - static inline void init_MUTEX_LOCKED (struct semaphore *sem) 55 - { 56 - sema_init(sem, 0); 57 - } 58 - 59 - extern void __down(struct semaphore * sem); 60 - extern int __down_interruptible(struct semaphore * sem); 61 - extern void __up(struct semaphore * sem); 62 - 63 - static inline void down(struct semaphore * sem) 64 - { 65 - might_sleep(); 66 - 67 - /* 68 - * Try to get the semaphore, take the slow path if we fail. 69 - */ 70 - if (unlikely(atomic_dec_return(&sem->count) < 0)) 71 - __down(sem); 72 - } 73 - 74 - static inline int down_interruptible(struct semaphore * sem) 75 - { 76 - int ret = 0; 77 - 78 - might_sleep(); 79 - 80 - if (unlikely(atomic_dec_return(&sem->count) < 0)) 81 - ret = __down_interruptible(sem); 82 - return ret; 83 - } 84 - 85 - static inline int down_trylock(struct semaphore * sem) 86 - { 87 - return atomic_dec_if_positive(&sem->count) < 0; 88 - } 89 - 90 - static inline void up(struct semaphore * sem) 91 - { 92 - if (unlikely(atomic_inc_return(&sem->count) <= 0)) 93 - __up(sem); 94 - } 95 - 96 - #endif /* __KERNEL__ */ 97 - 98 - #endif /* !(_PPC64_SEMAPHORE_H) */