Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] do local_irq_restore while spinning in spin_lock_irqsave.

In s390's spin_lock_irqsave, interrupts remain disabled while
spinning. In other architectures like x86 and powerpc, interrupts are
re-enabled while spinning if IRQ is not masked before spin_lock_irqsave
is called.

The following patch re-enables interrupts through local_irq_restore
while spinning for a lock acquisition.
This can improve system response.

[heiko.carstens@de.ibm.com: removed saving of pc]

Signed-off-by: Hisashi Hifumi <hifumi.hisashi@oss.ntt.co.jp>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Hisashi Hifumi and committed by
Martin Schwidefsky
894cdde2 dab5209c

+35 -1
+23
arch/s390/lib/spinlock.c
··· 59 59 } 60 60 EXPORT_SYMBOL(_raw_spin_lock_wait); 61 61 62 + void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) 63 + { 64 + int count = spin_retry; 65 + unsigned int cpu = ~smp_processor_id(); 66 + 67 + local_irq_restore(flags); 68 + while (1) { 69 + if (count-- <= 0) { 70 + unsigned int owner = lp->owner_cpu; 71 + if (owner != 0) 72 + _raw_yield_cpu(~owner); 73 + count = spin_retry; 74 + } 75 + if (__raw_spin_is_locked(lp)) 76 + continue; 77 + local_irq_disable(); 78 + if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 79 + return; 80 + local_irq_restore(flags); 81 + } 82 + } 83 + EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 84 + 62 85 int _raw_spin_trylock_retry(raw_spinlock_t *lp) 63 86 { 64 87 unsigned int cpu = ~smp_processor_id();
+12 -1
include/asm-s390/spinlock.h
··· 53 53 */ 54 54 55 55 #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 56 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 57 56 #define __raw_spin_unlock_wait(lock) \ 58 57 do { while (__raw_spin_is_locked(lock)) \ 59 58 _raw_spin_relax(lock); } while (0) 60 59 61 60 extern void _raw_spin_lock_wait(raw_spinlock_t *); 61 + extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); 62 62 extern int _raw_spin_trylock_retry(raw_spinlock_t *); 63 63 extern void _raw_spin_relax(raw_spinlock_t *lock); 64 64 ··· 70 70 if (likely(old == 0)) 71 71 return; 72 72 _raw_spin_lock_wait(lp); 73 + } 74 + 75 + static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, 76 + unsigned long flags) 77 + { 78 + int old; 79 + 80 + old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 81 + if (likely(old == 0)) 82 + return; 83 + _raw_spin_lock_wait_flags(lp, flags); 73 84 } 74 85 75 86 static inline int __raw_spin_trylock(raw_spinlock_t *lp)