Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/rwsem: Drop explicit memory barriers

sh and xtensa seem to be the only architectures which use explicit
memory barriers for rw_semaphore operations even though they are not
really needed because there is the full memory barrier is always implied
by atomic_{inc,dec,add,sub}_return() resp. cmpxchg(). Remove them.

Signed-off-by: Michal Hocko <mhocko@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Signed-off-by: Jason Low <jason.low2@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: linux-alpha@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Cc: linux-xtensa@linux-xtensa.org
Cc: sparclinux@vger.kernel.org
Link: http://lkml.kernel.org/r/1460041951-22347-3-git-send-email-mhocko@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Michal Hocko and committed by
Ingo Molnar
2e927c64 f8e04d85

+4 -24
+2 -12
arch/sh/include/asm/rwsem.h
··· 24 24 */ 25 25 static inline void __down_read(struct rw_semaphore *sem) 26 26 { 27 - if (atomic_inc_return((atomic_t *)(&sem->count)) > 0) 28 - smp_wmb(); 29 - else 27 + if (atomic_inc_return((atomic_t *)(&sem->count)) <= 0) 30 28 rwsem_down_read_failed(sem); 31 29 } 32 30 ··· 35 37 while ((tmp = sem->count) >= 0) { 36 38 if (tmp == cmpxchg(&sem->count, tmp, 37 39 tmp + RWSEM_ACTIVE_READ_BIAS)) { 38 - smp_wmb(); 39 40 return 1; 40 41 } 41 42 } ··· 50 53 51 54 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 52 55 (atomic_t *)(&sem->count)); 53 - if (tmp == RWSEM_ACTIVE_WRITE_BIAS) 54 - smp_wmb(); 55 - else 56 + if (tmp != RWSEM_ACTIVE_WRITE_BIAS) 56 57 rwsem_down_write_failed(sem); 57 58 } 58 59 ··· 60 65 61 66 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 62 67 RWSEM_ACTIVE_WRITE_BIAS); 63 - smp_wmb(); 64 68 return tmp == RWSEM_UNLOCKED_VALUE; 65 69 } 66 70 ··· 70 76 { 71 77 int tmp; 72 78 73 - smp_wmb(); 74 79 tmp = atomic_dec_return((atomic_t *)(&sem->count)); 75 80 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) 76 81 rwsem_wake(sem); ··· 80 87 */ 81 88 static inline void __up_write(struct rw_semaphore *sem) 82 89 { 83 - smp_wmb(); 84 90 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 85 91 (atomic_t *)(&sem->count)) < 0) 86 92 rwsem_wake(sem); ··· 100 108 { 101 109 int tmp; 102 110 103 - smp_wmb(); 104 111 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 105 112 if (tmp < 0) 106 113 rwsem_downgrade_wake(sem); ··· 110 119 */ 111 120 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 112 121 { 113 - smp_mb(); 114 122 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 115 123 } 116 124
+2 -12
arch/xtensa/include/asm/rwsem.h
··· 29 29 */ 30 30 static inline void __down_read(struct rw_semaphore *sem) 31 31 { 32 - if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0) 33 - smp_wmb(); 34 - else 32 + if (atomic_add_return(1,(atomic_t *)(&sem->count)) <= 0) 35 33 rwsem_down_read_failed(sem); 36 34 } 37 35 ··· 40 42 while ((tmp = sem->count) >= 0) { 41 43 if (tmp == cmpxchg(&sem->count, tmp, 42 44 tmp + RWSEM_ACTIVE_READ_BIAS)) { 43 - smp_wmb(); 44 45 return 1; 45 46 } 46 47 } ··· 55 58 56 59 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS, 57 60 (atomic_t *)(&sem->count)); 58 - if (tmp == RWSEM_ACTIVE_WRITE_BIAS) 59 - smp_wmb(); 60 - else 61 + if (tmp != RWSEM_ACTIVE_WRITE_BIAS) 61 62 rwsem_down_write_failed(sem); 62 63 } 63 64 ··· 65 70 66 71 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 67 72 RWSEM_ACTIVE_WRITE_BIAS); 68 - smp_wmb(); 69 73 return tmp == RWSEM_UNLOCKED_VALUE; 70 74 } 71 75 ··· 75 81 { 76 82 int tmp; 77 83 78 - smp_wmb(); 79 84 tmp = atomic_sub_return(1,(atomic_t *)(&sem->count)); 80 85 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0) 81 86 rwsem_wake(sem); ··· 85 92 */ 86 93 static inline void __up_write(struct rw_semaphore *sem) 87 94 { 88 - smp_wmb(); 89 95 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 90 96 (atomic_t *)(&sem->count)) < 0) 91 97 rwsem_wake(sem); ··· 105 113 { 106 114 int tmp; 107 115 108 - smp_wmb(); 109 116 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 110 117 if (tmp < 0) 111 118 rwsem_downgrade_wake(sem); ··· 115 124 */ 116 125 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 117 126 { 118 - smp_mb(); 119 127 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 120 128 } 121 129