Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Introduce low level MSR accessor

kvmppc_get_msr() and kvmppc_set_msr_fast() serve as accessors for the
MSR. However because the MSR is kept in the shared regs they include a
conditional check for kvmppc_shared_big_endian() and endian conversion.

Within the Book3S HV specific code there are direct reads and writes of
shregs::msr. In preparation for Nested APIv2 these accesses need to be
replaced with accessor functions so it is possible to extend their
behavior. However, using the kvmppc_get_msr() and kvmppc_set_msr_fast()
functions is undesirable because it would introduce a conditional branch
and endian conversion that is not currently present.

kvmppc_set_msr_hv() already exists, it is used for the
kvmppc_ops::set_msr callback.

Introduce a low level accessor __kvmppc_{s,g}et_msr_hv() that simply
gets and sets shregs::msr. This will be extend for Nested APIv2 support.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-8-jniethe5@gmail.com

authored by

Jordan Niethe and committed by
Michael Ellerman
6de2e837 ebc88ea7

+33 -21
+3 -2
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 28 28 #include <asm/pte-walk.h> 29 29 30 30 #include "book3s.h" 31 + #include "book3s_hv.h" 31 32 #include "trace_hv.h" 32 33 33 34 //#define DEBUG_RESIZE_HPT 1 ··· 348 347 unsigned long v, orig_v, gr; 349 348 __be64 *hptep; 350 349 long int index; 351 - int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); 350 + int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR); 352 351 353 352 if (kvm_is_radix(vcpu->kvm)) 354 353 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); ··· 386 385 387 386 /* Get PP bits and key for permission check */ 388 387 pp = gr & (HPTE_R_PP0 | HPTE_R_PP); 389 - key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; 388 + key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; 390 389 key &= slb_v; 391 390 392 391 /* Calculate permissions */
+17 -17
arch/powerpc/kvm/book3s_hv.c
··· 1374 1374 */ 1375 1375 static void kvmppc_cede(struct kvm_vcpu *vcpu) 1376 1376 { 1377 - vcpu->arch.shregs.msr |= MSR_EE; 1377 + __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE); 1378 1378 vcpu->arch.ceded = 1; 1379 1379 smp_mb(); 1380 1380 if (vcpu->arch.prodded) { ··· 1589 1589 * That can happen due to a bug, or due to a machine check 1590 1590 * occurring at just the wrong time. 1591 1591 */ 1592 - if (vcpu->arch.shregs.msr & MSR_HV) { 1592 + if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) { 1593 1593 printk(KERN_EMERG "KVM trap in HV mode!\n"); 1594 1594 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1595 1595 vcpu->arch.trap, kvmppc_get_pc(vcpu), ··· 1640 1640 * so that it knows that the machine check occurred. 1641 1641 */ 1642 1642 if (!vcpu->kvm->arch.fwnmi_enabled) { 1643 - ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) | 1643 + ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) | 1644 1644 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 1645 1645 kvmppc_core_queue_machine_check(vcpu, flags); 1646 1646 r = RESUME_GUEST; ··· 1670 1670 * as a result of a hypervisor emulation interrupt 1671 1671 * (e40) getting turned into a 700 by BML RTAS. 1672 1672 */ 1673 - flags = (vcpu->arch.shregs.msr & 0x1f0000ull) | 1673 + flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) | 1674 1674 (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 1675 1675 kvmppc_core_queue_program(vcpu, flags); 1676 1676 r = RESUME_GUEST; ··· 1680 1680 { 1681 1681 int i; 1682 1682 1683 - if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { 1683 + if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 1684 1684 /* 1685 1685 * Guest userspace executed sc 1. This can only be 1686 1686 * reached by the P9 path because the old path ··· 1758 1758 break; 1759 1759 } 1760 1760 1761 - if (!(vcpu->arch.shregs.msr & MSR_DR)) 1761 + if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR)) 1762 1762 vsid = vcpu->kvm->arch.vrma_slb_v; 1763 1763 else 1764 1764 vsid = vcpu->arch.fault_gpa; ··· 1782 1782 long err; 1783 1783 1784 1784 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); 1785 - vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & 1785 + vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) & 1786 1786 DSISR_SRR1_MATCH_64S; 1787 1787 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { 1788 1788 /* ··· 1791 1791 * hash fault handling below is v3 only (it uses ASDR 1792 1792 * via fault_gpa). 1793 1793 */ 1794 - if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) 1794 + if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE) 1795 1795 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; 1796 1796 r = RESUME_PAGE_FAULT; 1797 1797 break; ··· 1805 1805 break; 1806 1806 } 1807 1807 1808 - if (!(vcpu->arch.shregs.msr & MSR_IR)) 1808 + if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR)) 1809 1809 vsid = vcpu->kvm->arch.vrma_slb_v; 1810 1810 else 1811 1811 vsid = vcpu->arch.fault_gpa; ··· 1895 1895 kvmppc_dump_regs(vcpu); 1896 1896 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1897 1897 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1898 - vcpu->arch.shregs.msr); 1898 + __kvmppc_get_msr_hv(vcpu)); 1899 1899 run->hw.hardware_exit_reason = vcpu->arch.trap; 1900 1900 r = RESUME_HOST; 1901 1901 break; ··· 1919 1919 * That can happen due to a bug, or due to a machine check 1920 1920 * occurring at just the wrong time. 1921 1921 */ 1922 - if (vcpu->arch.shregs.msr & MSR_HV) { 1922 + if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) { 1923 1923 pr_emerg("KVM trap in HV mode while nested!\n"); 1924 1924 pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", 1925 1925 vcpu->arch.trap, kvmppc_get_pc(vcpu), 1926 - vcpu->arch.shregs.msr); 1926 + __kvmppc_get_msr_hv(vcpu)); 1927 1927 kvmppc_dump_regs(vcpu); 1928 1928 return RESUME_HOST; 1929 1929 } ··· 1980 1980 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); 1981 1981 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & 1982 1982 DSISR_SRR1_MATCH_64S; 1983 - if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) 1983 + if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE) 1984 1984 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; 1985 1985 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1986 1986 r = kvmhv_nested_page_fault(vcpu); ··· 2940 2940 spin_lock_init(&vcpu->arch.vpa_update_lock); 2941 2941 spin_lock_init(&vcpu->arch.tbacct_lock); 2942 2942 vcpu->arch.busy_preempt = TB_NIL; 2943 - vcpu->arch.shregs.msr = MSR_ME; 2943 + __kvmppc_set_msr_hv(vcpu, MSR_ME); 2944 2944 vcpu->arch.intr_msr = MSR_SF | MSR_ME; 2945 2945 2946 2946 /* ··· 4188 4188 __this_cpu_write(cpu_in_guest, NULL); 4189 4189 4190 4190 if (trap == BOOK3S_INTERRUPT_SYSCALL && 4191 - !(vcpu->arch.shregs.msr & MSR_PR)) { 4191 + !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 4192 4192 unsigned long req = kvmppc_get_gpr(vcpu, 3); 4193 4193 4194 4194 /* ··· 4667 4667 4668 4668 if (!nested) { 4669 4669 kvmppc_core_prepare_to_enter(vcpu); 4670 - if (vcpu->arch.shregs.msr & MSR_EE) { 4670 + if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) { 4671 4671 if (xive_interrupt_pending(vcpu)) 4672 4672 kvmppc_inject_interrupt_hv(vcpu, 4673 4673 BOOK3S_INTERRUPT_EXTERNAL, 0); ··· 4880 4880 if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { 4881 4881 accumulate_time(vcpu, &vcpu->arch.hcall); 4882 4882 4883 - if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { 4883 + if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) { 4884 4884 /* 4885 4885 * These should have been caught reflected 4886 4886 * into the guest by now. Final sanity check:
+10
arch/powerpc/kvm/book3s_hv.h
··· 51 51 #define end_timing(vcpu) do {} while (0) 52 52 #endif 53 53 54 + static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val) 55 + { 56 + vcpu->arch.shregs.msr = val; 57 + } 58 + 59 + static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu) 60 + { 61 + return vcpu->arch.shregs.msr; 62 + } 63 + 54 64 #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \ 55 65 static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \ 56 66 { \
+3 -2
arch/powerpc/kvm/book3s_hv_builtin.c
··· 32 32 33 33 #include "book3s_xics.h" 34 34 #include "book3s_xive.h" 35 + #include "book3s_hv.h" 35 36 36 37 /* 37 38 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) ··· 515 514 */ 516 515 if ((msr & MSR_TS_MASK) == MSR_TS_MASK) 517 516 msr &= ~MSR_TS_MASK; 518 - vcpu->arch.shregs.msr = msr; 517 + __kvmppc_set_msr_hv(vcpu, msr); 519 518 kvmppc_end_cede(vcpu); 520 519 } 521 520 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); ··· 553 552 kvmppc_set_srr0(vcpu, pc); 554 553 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); 555 554 kvmppc_set_pc(vcpu, new_pc); 556 - vcpu->arch.shregs.msr = new_msr; 555 + __kvmppc_set_msr_hv(vcpu, new_msr); 557 556 } 558 557 559 558 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)