Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: Merge asm-ppc*/rwsem.h

Merge asm-ppc*/rwsem.h into include/asm-powerpc.
Removed smp_*mb() memory barriers from the ppc32 code
as they are now burried in the atomic_*() functions as
suggested by Paul, implemented by Arnd, and pushed out
by Becky. I am not the droid you are looking for.

This patch depends on Becky's atomic.h merge patch.

Signed-off-by: Jon Loeliger <jdl@freescale.com>
Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Jon Loeliger and committed by
Paul Mackerras
342e73b3 feaf7cf1

+14 -190
+14 -23
include/asm-ppc/rwsem.h include/asm-powerpc/rwsem.h
··· 1 + #ifndef _ASM_POWERPC_RWSEM_H 2 + #define _ASM_POWERPC_RWSEM_H 3 + 4 + #ifdef __KERNEL__ 5 + 1 6 /* 2 - * include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff 7 + * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff 3 8 * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 4 9 * by Paul Mackerras <paulus@samba.org>. 5 10 */ 6 11 7 - #ifndef _PPC_RWSEM_H 8 - #define _PPC_RWSEM_H 9 - 10 - #ifdef __KERNEL__ 11 12 #include <linux/list.h> 12 13 #include <linux/spinlock.h> 13 14 #include <asm/atomic.h> ··· 19 18 */ 20 19 struct rw_semaphore { 21 20 /* XXX this should be able to be an atomic_t -- paulus */ 22 - signed long count; 21 + signed int count; 23 22 #define RWSEM_UNLOCKED_VALUE 0x00000000 24 23 #define RWSEM_ACTIVE_BIAS 0x00000001 25 24 #define RWSEM_ACTIVE_MASK 0x0000ffff ··· 70 69 */ 71 70 static inline void __down_read(struct rw_semaphore *sem) 72 71 { 73 - if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) 74 - smp_wmb(); 75 - else 72 + if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) 76 73 rwsem_down_read_failed(sem); 77 74 } 78 75 ··· 81 82 while ((tmp = sem->count) >= 0) { 82 83 if (tmp == cmpxchg(&sem->count, tmp, 83 84 tmp + RWSEM_ACTIVE_READ_BIAS)) { 84 - smp_wmb(); 85 85 return 1; 86 86 } 87 87 } ··· 96 98 97 99 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 98 100 (atomic_t *)(&sem->count)); 99 - if (tmp == RWSEM_ACTIVE_WRITE_BIAS) 100 - smp_wmb(); 101 - else 101 + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 102 102 rwsem_down_write_failed(sem); 103 103 } 104 104 ··· 106 110 107 111 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 108 112 RWSEM_ACTIVE_WRITE_BIAS); 109 - smp_wmb(); 110 113 return tmp == RWSEM_UNLOCKED_VALUE; 111 114 } 112 115 ··· 116 121 { 117 122 int tmp; 118 123 119 - smp_wmb(); 120 124 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 121 - if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) 125 + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 122 126 rwsem_wake(sem); 123 127 } 124 128 ··· 126 132 */ 127 133 static inline void __up_write(struct rw_semaphore *sem) 128 134 { 129 - smp_wmb(); 130 - if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 131 - (atomic_t *)(&sem->count)) < 0) 135 + if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 136 + (atomic_t *)(&sem->count)) < 0)) 132 137 rwsem_wake(sem); 133 138 } 134 139 ··· 146 153 { 147 154 int tmp; 148 155 149 - smp_wmb(); 150 156 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 151 157 if (tmp < 0) 152 158 rwsem_downgrade_wake(sem); ··· 156 164 */ 157 165 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 158 166 { 159 - smp_mb(); 160 167 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 161 168 } 162 169 163 - #endif /* __KERNEL__ */ 164 - #endif /* _PPC_RWSEM_XADD_H */ 170 + #endif /* __KERNEL__ */ 171 + #endif /* _ASM_POWERPC_RWSEM_H */
-167
include/asm-ppc64/rwsem.h
··· 1 - /* 2 - * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff 3 - * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h 4 - * by Paul Mackerras <paulus@samba.org>. 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the License, or (at your option) any later version. 10 - */ 11 - 12 - #ifndef _PPC64_RWSEM_H 13 - #define _PPC64_RWSEM_H 14 - 15 - #ifdef __KERNEL__ 16 - #include <linux/list.h> 17 - #include <linux/spinlock.h> 18 - #include <asm/atomic.h> 19 - #include <asm/system.h> 20 - 21 - /* 22 - * the semaphore definition 23 - */ 24 - struct rw_semaphore { 25 - /* XXX this should be able to be an atomic_t -- paulus */ 26 - signed int count; 27 - #define RWSEM_UNLOCKED_VALUE 0x00000000 28 - #define RWSEM_ACTIVE_BIAS 0x00000001 29 - #define RWSEM_ACTIVE_MASK 0x0000ffff 30 - #define RWSEM_WAITING_BIAS (-0x00010000) 31 - #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 32 - #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 33 - spinlock_t wait_lock; 34 - struct list_head wait_list; 35 - #if RWSEM_DEBUG 36 - int debug; 37 - #endif 38 - }; 39 - 40 - /* 41 - * initialisation 42 - */ 43 - #if RWSEM_DEBUG 44 - #define __RWSEM_DEBUG_INIT , 0 45 - #else 46 - #define __RWSEM_DEBUG_INIT /* */ 47 - #endif 48 - 49 - #define __RWSEM_INITIALIZER(name) \ 50 - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 51 - LIST_HEAD_INIT((name).wait_list) \ 52 - __RWSEM_DEBUG_INIT } 53 - 54 - #define DECLARE_RWSEM(name) \ 55 - struct rw_semaphore name = __RWSEM_INITIALIZER(name) 56 - 57 - extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); 58 - extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 59 - extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); 60 - extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 61 - 62 - static inline void init_rwsem(struct rw_semaphore *sem) 63 - { 64 - sem->count = RWSEM_UNLOCKED_VALUE; 65 - spin_lock_init(&sem->wait_lock); 66 - INIT_LIST_HEAD(&sem->wait_list); 67 - #if RWSEM_DEBUG 68 - sem->debug = 0; 69 - #endif 70 - } 71 - 72 - /* 73 - * lock for reading 74 - */ 75 - static inline void __down_read(struct rw_semaphore *sem) 76 - { 77 - if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0)) 78 - rwsem_down_read_failed(sem); 79 - } 80 - 81 - static inline int __down_read_trylock(struct rw_semaphore *sem) 82 - { 83 - int tmp; 84 - 85 - while ((tmp = sem->count) >= 0) { 86 - if (tmp == cmpxchg(&sem->count, tmp, 87 - tmp + RWSEM_ACTIVE_READ_BIAS)) { 88 - return 1; 89 - } 90 - } 91 - return 0; 92 - } 93 - 94 - /* 95 - * lock for writing 96 - */ 97 - static inline void __down_write(struct rw_semaphore *sem) 98 - { 99 - int tmp; 100 - 101 - tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 102 - (atomic_t *)(&sem->count)); 103 - if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 104 - rwsem_down_write_failed(sem); 105 - } 106 - 107 - static inline int __down_write_trylock(struct rw_semaphore *sem) 108 - { 109 - int tmp; 110 - 111 - tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 112 - RWSEM_ACTIVE_WRITE_BIAS); 113 - return tmp == RWSEM_UNLOCKED_VALUE; 114 - } 115 - 116 - /* 117 - * unlock after reading 118 - */ 119 - static inline void __up_read(struct rw_semaphore *sem) 120 - { 121 - int tmp; 122 - 123 - tmp = atomic_dec_return((atomic_t *)(&sem->count)); 124 - if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 125 - rwsem_wake(sem); 126 - } 127 - 128 - /* 129 - * unlock after writing 130 - */ 131 - static inline void __up_write(struct rw_semaphore *sem) 132 - { 133 - if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 134 - (atomic_t *)(&sem->count)) < 0)) 135 - rwsem_wake(sem); 136 - } 137 - 138 - /* 139 - * implement atomic add functionality 140 - */ 141 - static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) 142 - { 143 - atomic_add(delta, (atomic_t *)(&sem->count)); 144 - } 145 - 146 - /* 147 - * downgrade write lock to read lock 148 - */ 149 - static inline void __downgrade_write(struct rw_semaphore *sem) 150 - { 151 - int tmp; 152 - 153 - tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 154 - if (tmp < 0) 155 - rwsem_downgrade_wake(sem); 156 - } 157 - 158 - /* 159 - * implement exchange and add functionality 160 - */ 161 - static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 162 - { 163 - return atomic_add_return(delta, (atomic_t *)(&sem->count)); 164 - } 165 - 166 - #endif /* __KERNEL__ */ 167 - #endif /* _PPC_RWSEM_XADD_H */