Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/rwlock: use directed yield for write-locked rwlocks

Add an owner field to the arch_rwlock_t to be able to pass the timeslice
of a virtual CPU with diagnose 0x9c to the lock owner in case the rwlock
is write-locked. The undirected yield in case the rwlock is acquired
writable but the lock is read-locked is removed.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

+54 -32
-2
arch/s390/include/asm/smp.h
··· 29 29 extern int smp_store_status(int cpu); 30 30 extern int smp_vcpu_scheduled(int cpu); 31 31 extern void smp_yield_cpu(int cpu); 32 - extern void smp_yield(void); 33 32 extern void smp_cpu_set_polarization(int cpu, int val); 34 33 extern int smp_cpu_get_polarization(int cpu); 35 34 extern void smp_fill_possible_mask(void); ··· 49 50 static inline int smp_store_status(int cpu) { return 0; } 50 51 static inline int smp_vcpu_scheduled(int cpu) { return 1; } 51 52 static inline void smp_yield_cpu(int cpu) { } 52 - static inline void smp_yield(void) { } 53 53 static inline void smp_fill_possible_mask(void) { } 54 54 55 55 #endif /* CONFIG_SMP */
+23 -5
arch/s390/include/asm/spinlock.h
··· 37 37 * (the type definitions are in asm/spinlock_types.h) 38 38 */ 39 39 40 + void arch_lock_relax(unsigned int cpu); 41 + 40 42 void arch_spin_lock_wait(arch_spinlock_t *); 41 43 int arch_spin_trylock_retry(arch_spinlock_t *); 42 - void arch_spin_relax(arch_spinlock_t *); 43 44 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 45 + 46 + static inline void arch_spin_relax(arch_spinlock_t *lock) 47 + { 48 + arch_lock_relax(lock->lock); 49 + } 44 50 45 51 static inline u32 arch_spin_lockval(int cpu) 46 52 { ··· 176 170 { 177 171 if (!arch_write_trylock_once(rw)) 178 172 _raw_write_lock_wait(rw); 173 + rw->owner = SPINLOCK_LOCKVAL; 179 174 } 180 175 181 176 static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 182 177 { 183 178 if (!arch_write_trylock_once(rw)) 184 179 _raw_write_lock_wait_flags(rw, flags); 180 + rw->owner = SPINLOCK_LOCKVAL; 185 181 } 186 182 187 183 static inline void arch_write_unlock(arch_rwlock_t *rw) 188 184 { 189 185 typecheck(unsigned int, rw->lock); 186 + 187 + rw->owner = 0; 190 188 asm volatile( 191 189 __ASM_BARRIER 192 190 "st %1,%0\n" ··· 208 198 209 199 static inline int arch_write_trylock(arch_rwlock_t *rw) 210 200 { 211 - if (!arch_write_trylock_once(rw)) 212 - return _raw_write_trylock_retry(rw); 201 + if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw)) 202 + return 0; 203 + rw->owner = SPINLOCK_LOCKVAL; 213 204 return 1; 214 205 } 215 206 216 - #define arch_read_relax(lock) cpu_relax() 217 - #define arch_write_relax(lock) cpu_relax() 207 + static inline void arch_read_relax(arch_rwlock_t *rw) 208 + { 209 + arch_lock_relax(rw->owner); 210 + } 211 + 212 + static inline void arch_write_relax(arch_rwlock_t *rw) 213 + { 214 + arch_lock_relax(rw->owner); 215 + } 218 216 219 217 #endif /* __ASM_SPINLOCK_H */
+1
arch/s390/include/asm/spinlock_types.h
··· 13 13 14 14 typedef struct { 15 15 unsigned int lock; 16 + unsigned int owner; 16 17 } arch_rwlock_t; 17 18 18 19 #define __ARCH_RW_LOCK_UNLOCKED { 0 }
-6
arch/s390/kernel/smp.c
··· 333 333 return pcpu_running(pcpu_devices + cpu); 334 334 } 335 335 336 - void smp_yield(void) 337 - { 338 - if (MACHINE_HAS_DIAG44) 339 - asm volatile("diag 0,0,0x44"); 340 - } 341 - 342 336 void smp_yield_cpu(int cpu) 343 337 { 344 338 if (MACHINE_HAS_DIAG9C)
+30 -19
arch/s390/lib/spinlock.c
··· 98 98 } 99 99 EXPORT_SYMBOL(arch_spin_lock_wait_flags); 100 100 101 - void arch_spin_relax(arch_spinlock_t *lp) 102 - { 103 - unsigned int cpu = lp->lock; 104 - if (cpu != 0) { 105 - if (MACHINE_IS_VM || MACHINE_IS_KVM || 106 - !smp_vcpu_scheduled(~cpu)) 107 - smp_yield_cpu(~cpu); 108 - } 109 - } 110 - EXPORT_SYMBOL(arch_spin_relax); 111 - 112 101 int arch_spin_trylock_retry(arch_spinlock_t *lp) 113 102 { 114 103 int count; ··· 111 122 112 123 void _raw_read_lock_wait(arch_rwlock_t *rw) 113 124 { 114 - unsigned int old; 125 + unsigned int owner, old; 115 126 int count = spin_retry; 116 127 128 + owner = 0; 117 129 while (1) { 118 130 if (count-- <= 0) { 119 - smp_yield(); 131 + if (owner && !smp_vcpu_scheduled(~owner)) 132 + smp_yield_cpu(~owner); 120 133 count = spin_retry; 121 134 } 122 135 old = ACCESS_ONCE(rw->lock); 136 + owner = ACCESS_ONCE(rw->owner); 123 137 if ((int) old < 0) 124 138 continue; 125 139 if (_raw_compare_and_swap(&rw->lock, old, old + 1)) ··· 133 141 134 142 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 135 143 { 136 - unsigned int old; 144 + unsigned int owner, old; 137 145 int count = spin_retry; 138 146 139 147 local_irq_restore(flags); 148 + owner = 0; 140 149 while (1) { 141 150 if (count-- <= 0) { 142 - smp_yield(); 151 + if (owner && !smp_vcpu_scheduled(~owner)) 152 + smp_yield_cpu(~owner); 143 153 count = spin_retry; 144 154 } 145 155 old = ACCESS_ONCE(rw->lock); 156 + owner = ACCESS_ONCE(rw->owner); 146 157 if ((int) old < 0) 147 158 continue; 148 159 local_irq_disable(); ··· 174 179 175 180 void _raw_write_lock_wait(arch_rwlock_t *rw) 176 181 { 177 - unsigned int old; 182 + unsigned int owner, old; 178 183 int count = spin_retry; 179 184 185 + owner = 0; 180 186 while (1) { 181 187 if (count-- <= 0) { 182 - smp_yield(); 188 + if (owner && !smp_vcpu_scheduled(~owner)) 189 + smp_yield_cpu(~owner); 183 190 count = spin_retry; 184 191 } 185 192 old = ACCESS_ONCE(rw->lock); 193 + owner = ACCESS_ONCE(rw->owner); 186 194 if (old) 187 195 continue; 188 196 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) ··· 196 198 197 199 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 198 200 { 199 - unsigned int old; 201 + unsigned int owner, old; 200 202 int count = spin_retry; 201 203 202 204 local_irq_restore(flags); 205 + owner = 0; 203 206 while (1) { 204 207 if (count-- <= 0) { 205 - smp_yield(); 208 + if (owner && !smp_vcpu_scheduled(~owner)) 209 + smp_yield_cpu(~owner); 206 210 count = spin_retry; 207 211 } 208 212 old = ACCESS_ONCE(rw->lock); 213 + owner = ACCESS_ONCE(rw->owner); 209 214 if (old) 210 215 continue; 211 216 local_irq_disable(); ··· 234 233 return 0; 235 234 } 236 235 EXPORT_SYMBOL(_raw_write_trylock_retry); 236 + 237 + void arch_lock_relax(unsigned int cpu) 238 + { 239 + if (!cpu) 240 + return; 241 + if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) 242 + return; 243 + smp_yield_cpu(~cpu); 244 + } 245 + EXPORT_SYMBOL(arch_lock_relax);