Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/pseries: Move some PAPR paravirt functions to their own file

These functions will be used by the queued spinlock implementation,
and may be useful elsewhere too, so move them out of spinlock.h.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200724131423.1362108-2-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
20d444d0 dbce4562

+66 -29
+59
arch/powerpc/include/asm/paravirt.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _ASM_POWERPC_PARAVIRT_H 3 + #define _ASM_POWERPC_PARAVIRT_H 4 + 5 + #include <linux/jump_label.h> 6 + #include <asm/smp.h> 7 + #ifdef CONFIG_PPC64 8 + #include <asm/paca.h> 9 + #include <asm/hvcall.h> 10 + #endif 11 + 12 + #ifdef CONFIG_PPC_SPLPAR 13 + DECLARE_STATIC_KEY_FALSE(shared_processor); 14 + 15 + static inline bool is_shared_processor(void) 16 + { 17 + return static_branch_unlikely(&shared_processor); 18 + } 19 + 20 + /* If bit 0 is set, the cpu has been preempted */ 21 + static inline u32 yield_count_of(int cpu) 22 + { 23 + __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count); 24 + return be32_to_cpu(yield_count); 25 + } 26 + 27 + static inline void yield_to_preempted(int cpu, u32 yield_count) 28 + { 29 + plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count); 30 + } 31 + #else 32 + static inline bool is_shared_processor(void) 33 + { 34 + return false; 35 + } 36 + 37 + static inline u32 yield_count_of(int cpu) 38 + { 39 + return 0; 40 + } 41 + 42 + extern void ___bad_yield_to_preempted(void); 43 + static inline void yield_to_preempted(int cpu, u32 yield_count) 44 + { 45 + ___bad_yield_to_preempted(); /* This would be a bug */ 46 + } 47 + #endif 48 + 49 + #define vcpu_is_preempted vcpu_is_preempted 50 + static inline bool vcpu_is_preempted(int cpu) 51 + { 52 + if (!is_shared_processor()) 53 + return false; 54 + if (yield_count_of(cpu) & 1) 55 + return true; 56 + return false; 57 + } 58 + 59 + #endif /* _ASM_POWERPC_PARAVIRT_H */
+1 -23
arch/powerpc/include/asm/spinlock.h
··· 15 15 * 16 16 * (the type definitions are in asm/spinlock_types.h) 17 17 */ 18 - #include <linux/jump_label.h> 19 18 #include <linux/irqflags.h> 19 + #include <asm/paravirt.h> 20 20 #ifdef CONFIG_PPC64 21 21 #include <asm/paca.h> 22 - #include <asm/hvcall.h> 23 22 #endif 24 23 #include <asm/synch.h> 25 24 #include <asm/ppc-opcode.h> ··· 32 33 #endif 33 34 #else 34 35 #define LOCK_TOKEN 1 35 - #endif 36 - 37 - #ifdef CONFIG_PPC_PSERIES 38 - DECLARE_STATIC_KEY_FALSE(shared_processor); 39 - 40 - #define vcpu_is_preempted vcpu_is_preempted 41 - static inline bool vcpu_is_preempted(int cpu) 42 - { 43 - if (!static_branch_unlikely(&shared_processor)) 44 - return false; 45 - return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1); 46 - } 47 36 #endif 48 37 49 38 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) ··· 96 109 static inline void splpar_spin_yield(arch_spinlock_t *lock) {}; 97 110 static inline void splpar_rw_yield(arch_rwlock_t *lock) {}; 98 111 #endif 99 - 100 - static inline bool is_shared_processor(void) 101 - { 102 - #ifdef CONFIG_PPC_SPLPAR 103 - return static_branch_unlikely(&shared_processor); 104 - #else 105 - return false; 106 - #endif 107 - } 108 112 109 113 static inline void spin_yield(arch_spinlock_t *lock) 110 114 {
+6 -6
arch/powerpc/lib/locks.c
··· 27 27 return; 28 28 holder_cpu = lock_value & 0xffff; 29 29 BUG_ON(holder_cpu >= NR_CPUS); 30 - yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); 30 + 31 + yield_count = yield_count_of(holder_cpu); 31 32 if ((yield_count & 1) == 0) 32 33 return; /* virtual cpu is currently running */ 33 34 rmb(); 34 35 if (lock->slock != lock_value) 35 36 return; /* something has changed */ 36 - plpar_hcall_norets(H_CONFER, 37 - get_hard_smp_processor_id(holder_cpu), yield_count); 37 + yield_to_preempted(holder_cpu, yield_count); 38 38 } 39 39 EXPORT_SYMBOL_GPL(splpar_spin_yield); 40 40 ··· 53 53 return; /* no write lock at present */ 54 54 holder_cpu = lock_value & 0xffff; 55 55 BUG_ON(holder_cpu >= NR_CPUS); 56 - yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count); 56 + 57 + yield_count = yield_count_of(holder_cpu); 57 58 if ((yield_count & 1) == 0) 58 59 return; /* virtual cpu is currently running */ 59 60 rmb(); 60 61 if (rw->lock != lock_value) 61 62 return; /* something has changed */ 62 - plpar_hcall_norets(H_CONFER, 63 - get_hard_smp_processor_id(holder_cpu), yield_count); 63 + yield_to_preempted(holder_cpu, yield_count); 64 64 } 65 65 #endif