Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: Switch to more fine grained lws locks

Increase the number of lws locks to 256 entries (instead of 16) and
choose lock entry based on bits 3-11 (instead of 4-7) of the relevant
address. With this change we archieve more fine-grained locking in
futex syscalls and thus reduce the number of possible stalls.

Signed-off-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>

authored by

John David Anglin and committed by
Helge Deller
53a42b63 2a7d4eed

+7 -7
+2 -2
arch/parisc/include/asm/futex.h
··· 16 16 _futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) 17 17 { 18 18 extern u32 lws_lock_start[]; 19 - long index = ((long)uaddr & 0xf0) >> 2; 19 + long index = ((long)uaddr & 0x3f8) >> 1; 20 20 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; 21 21 local_irq_save(*flags); 22 22 arch_spin_lock(s); ··· 26 26 _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) 27 27 { 28 28 extern u32 lws_lock_start[]; 29 - long index = ((long)uaddr & 0xf0) >> 2; 29 + long index = ((long)uaddr & 0x3f8) >> 1; 30 30 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; 31 31 arch_spin_unlock(s); 32 32 local_irq_restore(*flags);
+5 -5
arch/parisc/kernel/syscall.S
··· 571 571 ldil L%lws_lock_start, %r20 572 572 ldo R%lws_lock_start(%r20), %r28 573 573 574 - /* Extract four bits from r26 and hash lock (Bits 4-7) */ 575 - extru %r26, 27, 4, %r20 574 + /* Extract eight bits from r26 and hash lock (Bits 3-11) */ 575 + extru %r26, 28, 8, %r20 576 576 577 577 /* Find lock to use, the hash is either one of 0 to 578 578 15, multiplied by 16 (keep it 16-byte aligned) ··· 761 761 ldil L%lws_lock_start, %r20 762 762 ldo R%lws_lock_start(%r20), %r28 763 763 764 - /* Extract four bits from r26 and hash lock (Bits 4-7) */ 765 - extru %r26, 27, 4, %r20 764 + /* Extract eight bits from r26 and hash lock (Bits 3-11) */ 765 + extru %r26, 28, 8, %r20 766 766 767 767 /* Find lock to use, the hash is either one of 0 to 768 768 15, multiplied by 16 (keep it 16-byte aligned) ··· 950 950 .align L1_CACHE_BYTES 951 951 ENTRY(lws_lock_start) 952 952 /* lws locks */ 953 - .rept 16 953 + .rept 256 954 954 /* Keep locks aligned at 16-bytes */ 955 955 .word 1 956 956 .word 0