Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS

Valentin Rothberg reported that we use CONFIG_QUEUED_SPINLOCKS
in arch/x86/kernel/paravirt_patch_32.c, while the symbol is
called CONFIG_QUEUED_SPINLOCK. (Note the extra 'S')

But the typo was natural: the proper English term for such
a generic object would be 'queued spinlocks' - so rename
this and related symbols accordingly to the plural form.

Reported-by: Valentin Rothberg <valentinrothberg@gmail.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+32 -32
+2 -2
arch/x86/Kconfig
··· 127 127 select MODULES_USE_ELF_RELA if X86_64 128 128 select CLONE_BACKWARDS if X86_32 129 129 select ARCH_USE_BUILTIN_BSWAP 130 - select ARCH_USE_QUEUED_SPINLOCK 130 + select ARCH_USE_QUEUED_SPINLOCKS 131 131 select ARCH_USE_QUEUE_RWLOCK 132 132 select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION 133 133 select OLD_SIGACTION if X86_32 ··· 667 667 config PARAVIRT_SPINLOCKS 668 668 bool "Paravirtualization layer for spinlocks" 669 669 depends on PARAVIRT && SMP 670 - select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCK 670 + select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS 671 671 ---help--- 672 672 Paravirtualized spinlocks allow a pvops backend to replace the 673 673 spinlock implementation with something virtualization-friendly
+3 -3
arch/x86/include/asm/paravirt.h
··· 712 712 713 713 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 714 714 715 - #ifdef CONFIG_QUEUED_SPINLOCK 715 + #ifdef CONFIG_QUEUED_SPINLOCKS 716 716 717 717 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, 718 718 u32 val) ··· 735 735 PVOP_VCALL1(pv_lock_ops.kick, cpu); 736 736 } 737 737 738 - #else /* !CONFIG_QUEUED_SPINLOCK */ 738 + #else /* !CONFIG_QUEUED_SPINLOCKS */ 739 739 740 740 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, 741 741 __ticket_t ticket) ··· 749 749 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); 750 750 } 751 751 752 - #endif /* CONFIG_QUEUED_SPINLOCK */ 752 + #endif /* CONFIG_QUEUED_SPINLOCKS */ 753 753 754 754 #endif /* SMP && PARAVIRT_SPINLOCKS */ 755 755
+3 -3
arch/x86/include/asm/paravirt_types.h
··· 336 336 struct qspinlock; 337 337 338 338 struct pv_lock_ops { 339 - #ifdef CONFIG_QUEUED_SPINLOCK 339 + #ifdef CONFIG_QUEUED_SPINLOCKS 340 340 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); 341 341 struct paravirt_callee_save queued_spin_unlock; 342 342 343 343 void (*wait)(u8 *ptr, u8 val); 344 344 void (*kick)(int cpu); 345 - #else /* !CONFIG_QUEUED_SPINLOCK */ 345 + #else /* !CONFIG_QUEUED_SPINLOCKS */ 346 346 struct paravirt_callee_save lock_spinning; 347 347 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); 348 - #endif /* !CONFIG_QUEUED_SPINLOCK */ 348 + #endif /* !CONFIG_QUEUED_SPINLOCKS */ 349 349 }; 350 350 351 351 /* This contains all the paravirt structures: we get a convenient
+2 -2
arch/x86/include/asm/spinlock.h
··· 42 42 extern struct static_key paravirt_ticketlocks_enabled; 43 43 static __always_inline bool static_key_false(struct static_key *key); 44 44 45 - #ifdef CONFIG_QUEUED_SPINLOCK 45 + #ifdef CONFIG_QUEUED_SPINLOCKS 46 46 #include <asm/qspinlock.h> 47 47 #else 48 48 ··· 200 200 cpu_relax(); 201 201 } 202 202 } 203 - #endif /* CONFIG_QUEUED_SPINLOCK */ 203 + #endif /* CONFIG_QUEUED_SPINLOCKS */ 204 204 205 205 /* 206 206 * Read-write spinlocks, allowing multiple readers
+2 -2
arch/x86/include/asm/spinlock_types.h
··· 23 23 24 24 #define TICKET_SHIFT (sizeof(__ticket_t) * 8) 25 25 26 - #ifdef CONFIG_QUEUED_SPINLOCK 26 + #ifdef CONFIG_QUEUED_SPINLOCKS 27 27 #include <asm-generic/qspinlock_types.h> 28 28 #else 29 29 typedef struct arch_spinlock { ··· 36 36 } arch_spinlock_t; 37 37 38 38 #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 39 - #endif /* CONFIG_QUEUED_SPINLOCK */ 39 + #endif /* CONFIG_QUEUED_SPINLOCKS */ 40 40 41 41 #include <asm-generic/qrwlock_types.h> 42 42
+5 -5
arch/x86/kernel/kvm.c
··· 585 585 } 586 586 587 587 588 - #ifdef CONFIG_QUEUED_SPINLOCK 588 + #ifdef CONFIG_QUEUED_SPINLOCKS 589 589 590 590 #include <asm/qspinlock.h> 591 591 ··· 615 615 local_irq_restore(flags); 616 616 } 617 617 618 - #else /* !CONFIG_QUEUED_SPINLOCK */ 618 + #else /* !CONFIG_QUEUED_SPINLOCKS */ 619 619 620 620 enum kvm_contention_stat { 621 621 TAKEN_SLOW, ··· 850 850 } 851 851 } 852 852 853 - #endif /* !CONFIG_QUEUED_SPINLOCK */ 853 + #endif /* !CONFIG_QUEUED_SPINLOCKS */ 854 854 855 855 /* 856 856 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. ··· 863 863 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 864 864 return; 865 865 866 - #ifdef CONFIG_QUEUED_SPINLOCK 866 + #ifdef CONFIG_QUEUED_SPINLOCKS 867 867 __pv_init_lock_hash(); 868 868 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 869 869 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); 870 870 pv_lock_ops.wait = kvm_wait; 871 871 pv_lock_ops.kick = kvm_kick_cpu; 872 - #else /* !CONFIG_QUEUED_SPINLOCK */ 872 + #else /* !CONFIG_QUEUED_SPINLOCKS */ 873 873 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 874 874 pv_lock_ops.unlock_kick = kvm_unlock_kick; 875 875 #endif
+4 -4
arch/x86/kernel/paravirt-spinlocks.c
··· 8 8 9 9 #include <asm/paravirt.h> 10 10 11 - #ifdef CONFIG_QUEUED_SPINLOCK 11 + #ifdef CONFIG_QUEUED_SPINLOCKS 12 12 __visible void __native_queued_spin_unlock(struct qspinlock *lock) 13 13 { 14 14 native_queued_spin_unlock(lock); ··· 25 25 26 26 struct pv_lock_ops pv_lock_ops = { 27 27 #ifdef CONFIG_SMP 28 - #ifdef CONFIG_QUEUED_SPINLOCK 28 + #ifdef CONFIG_QUEUED_SPINLOCKS 29 29 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, 30 30 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), 31 31 .wait = paravirt_nop, 32 32 .kick = paravirt_nop, 33 - #else /* !CONFIG_QUEUED_SPINLOCK */ 33 + #else /* !CONFIG_QUEUED_SPINLOCKS */ 34 34 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop), 35 35 .unlock_kick = paravirt_nop, 36 - #endif /* !CONFIG_QUEUED_SPINLOCK */ 36 + #endif /* !CONFIG_QUEUED_SPINLOCKS */ 37 37 #endif /* SMP */ 38 38 }; 39 39 EXPORT_SYMBOL(pv_lock_ops);
+2 -2
arch/x86/kernel/paravirt_patch_64.c
··· 21 21 DEF_NATIVE(, mov32, "mov %edi, %eax"); 22 22 DEF_NATIVE(, mov64, "mov %rdi, %rax"); 23 23 24 - #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK) 24 + #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) 25 25 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)"); 26 26 #endif 27 27 ··· 65 65 PATCH_SITE(pv_cpu_ops, clts); 66 66 PATCH_SITE(pv_mmu_ops, flush_tlb_single); 67 67 PATCH_SITE(pv_cpu_ops, wbinvd); 68 - #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK) 68 + #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) 69 69 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): 70 70 if (pv_is_native_spin_unlock()) { 71 71 start = start_pv_lock_ops_queued_spin_unlock;
+5 -5
arch/x86/xen/spinlock.c
··· 21 21 static DEFINE_PER_CPU(char *, irq_name); 22 22 static bool xen_pvspin = true; 23 23 24 - #ifdef CONFIG_QUEUED_SPINLOCK 24 + #ifdef CONFIG_QUEUED_SPINLOCKS 25 25 26 26 #include <asm/qspinlock.h> 27 27 ··· 65 65 xen_poll_irq(irq); 66 66 } 67 67 68 - #else /* CONFIG_QUEUED_SPINLOCK */ 68 + #else /* CONFIG_QUEUED_SPINLOCKS */ 69 69 70 70 enum xen_contention_stat { 71 71 TAKEN_SLOW, ··· 264 264 } 265 265 } 266 266 } 267 - #endif /* CONFIG_QUEUED_SPINLOCK */ 267 + #endif /* CONFIG_QUEUED_SPINLOCKS */ 268 268 269 269 static irqreturn_t dummy_handler(int irq, void *dev_id) 270 270 { ··· 328 328 return; 329 329 } 330 330 printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); 331 - #ifdef CONFIG_QUEUED_SPINLOCK 331 + #ifdef CONFIG_QUEUED_SPINLOCKS 332 332 __pv_init_lock_hash(); 333 333 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; 334 334 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); ··· 366 366 } 367 367 early_param("xen_nopvspin", xen_parse_nopvspin); 368 368 369 - #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK) 369 + #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS) 370 370 371 371 static struct dentry *d_spin_debug; 372 372
+3 -3
kernel/Kconfig.locks
··· 235 235 def_bool y 236 236 depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER 237 237 238 - config ARCH_USE_QUEUED_SPINLOCK 238 + config ARCH_USE_QUEUED_SPINLOCKS 239 239 bool 240 240 241 - config QUEUED_SPINLOCK 242 - def_bool y if ARCH_USE_QUEUED_SPINLOCK 241 + config QUEUED_SPINLOCKS 242 + def_bool y if ARCH_USE_QUEUED_SPINLOCKS 243 243 depends on SMP 244 244 245 245 config ARCH_USE_QUEUE_RWLOCK
+1 -1
kernel/locking/Makefile
··· 17 17 obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o 18 18 obj-$(CONFIG_SMP) += lglock.o 19 19 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o 20 - obj-$(CONFIG_QUEUED_SPINLOCK) += qspinlock.o 20 + obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o 21 21 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o 22 22 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o 23 23 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o