Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Split hyp/debug-sr.c to VHE/nVHE

debug-sr.c contains KVM's code for context-switching debug registers, with some
code shared between VHE/nVHE. These common routines are moved to a header file,
VHE-specific code is moved to vhe/debug-sr.c and nVHE-specific code to
nvhe/debug-sr.c.

Functions are slightly refactored to move code hidden behind `has_vhe()` checks
to the corresponding .c files.

Signed-off-by: David Brazdil <dbrazdil@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200625131420.71444-11-dbrazdil@google.com

authored by

David Brazdil and committed by
Marc Zyngier
d400c5b2 09cf57eb

+118 -74
-5
arch/arm64/kernel/image-vars.h
··· 66 66 /* Symbols defined in aarch32.c (not yet compiled with nVHE build rules). */ 67 67 KVM_NVHE_ALIAS(kvm_skip_instr32); 68 68 69 - /* Symbols defined in debug-sr.c (not yet compiled with nVHE build rules). */ 70 - KVM_NVHE_ALIAS(__debug_switch_to_guest); 71 - KVM_NVHE_ALIAS(__debug_switch_to_host); 72 - KVM_NVHE_ALIAS(__kvm_get_mdcr_el2); 73 - 74 69 /* Symbols defined in entry.S (not yet compiled with nVHE build rules). */ 75 70 KVM_NVHE_ALIAS(__guest_enter); 76 71 KVM_NVHE_ALIAS(__guest_exit);
+1 -1
arch/arm64/kvm/hyp/Makefile
··· 14 14 obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o 15 15 16 16 hyp-y := vgic-v3-sr.o timer-sr.o aarch32.o vgic-v2-cpuif-proxy.o sysreg-sr.o \ 17 - debug-sr.o entry.o fpsimd.o 17 + entry.o fpsimd.o 18 18 19 19 # KVM code is run at a different exception code with a different map, so 20 20 # compiler instrumentation that inserts callbacks or checks into the code may
+12 -66
arch/arm64/kvm/hyp/debug-sr.c arch/arm64/kvm/hyp/include/hyp/debug-sr.h
··· 4 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 5 */ 6 6 7 + #ifndef __ARM64_KVM_HYP_DEBUG_SR_H__ 8 + #define __ARM64_KVM_HYP_DEBUG_SR_H__ 9 + 7 10 #include <linux/compiler.h> 8 11 #include <linux/kvm_host.h> 9 12 ··· 88 85 default: write_debug(ptr[0], reg, 0); \ 89 86 } 90 87 91 - static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1) 92 - { 93 - u64 reg; 94 - 95 - /* Clear pmscr in case of early return */ 96 - *pmscr_el1 = 0; 97 - 98 - /* SPE present on this CPU? */ 99 - if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), 100 - ID_AA64DFR0_PMSVER_SHIFT)) 101 - return; 102 - 103 - /* Yes; is it owned by EL3? */ 104 - reg = read_sysreg_s(SYS_PMBIDR_EL1); 105 - if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) 106 - return; 107 - 108 - /* No; is the host actually using the thing? */ 109 - reg = read_sysreg_s(SYS_PMBLIMITR_EL1); 110 - if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) 111 - return; 112 - 113 - /* Yes; save the control register and disable data generation */ 114 - *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1); 115 - write_sysreg_s(0, SYS_PMSCR_EL1); 116 - isb(); 117 - 118 - /* Now drain all buffered data to memory */ 119 - psb_csync(); 120 - dsb(nsh); 121 - } 122 - 123 - static void __hyp_text __debug_restore_spe_nvhe(u64 pmscr_el1) 124 - { 125 - if (!pmscr_el1) 126 - return; 127 - 128 - /* The host page table is installed, but not yet synchronised */ 129 - isb(); 130 - 131 - /* Re-enable data generation */ 132 - write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1); 133 - } 134 - 135 - static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, 136 - struct kvm_guest_debug_arch *dbg, 137 - struct kvm_cpu_context *ctxt) 88 + static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu, 89 + struct kvm_guest_debug_arch *dbg, 90 + struct kvm_cpu_context *ctxt) 138 91 { 139 92 u64 aa64dfr0; 140 93 int brps, wrps; ··· 107 148 ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); 108 149 } 109 150 110 - static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, 111 - struct kvm_guest_debug_arch *dbg, 112 - struct kvm_cpu_context *ctxt) 151 + static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu, 152 + struct kvm_guest_debug_arch *dbg, 153 + struct kvm_cpu_context *ctxt) 113 154 { 114 155 u64 aa64dfr0; 115 156 int brps, wrps; ··· 127 168 write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); 128 169 } 129 170 130 - void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) 171 + static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) 131 172 { 132 173 struct kvm_cpu_context *host_ctxt; 133 174 struct kvm_cpu_context *guest_ctxt; 134 175 struct kvm_guest_debug_arch *host_dbg; 135 176 struct kvm_guest_debug_arch *guest_dbg; 136 - 137 - /* 138 - * Non-VHE: Disable and flush SPE data generation 139 - * VHE: The vcpu can run, but it can't hide. 140 - */ 141 - if (!has_vhe()) 142 - __debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1); 143 177 144 178 if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) 145 179 return; ··· 146 194 __debug_restore_state(vcpu, guest_dbg, guest_ctxt); 147 195 } 148 196 149 - void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) 197 + static inline void __hyp_text __debug_switch_to_host_common(struct kvm_vcpu *vcpu) 150 198 { 151 199 struct kvm_cpu_context *host_ctxt; 152 200 struct kvm_cpu_context *guest_ctxt; 153 201 struct kvm_guest_debug_arch *host_dbg; 154 202 struct kvm_guest_debug_arch *guest_dbg; 155 - 156 - if (!has_vhe()) 157 - __debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1); 158 203 159 204 if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) 160 205 return; ··· 167 218 vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY; 168 219 } 169 220 170 - u32 __hyp_text __kvm_get_mdcr_el2(void) 171 - { 172 - return read_sysreg(mdcr_el2); 173 - } 221 + #endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */
+1 -1
arch/arm64/kvm/hyp/nvhe/Makefile
··· 6 6 asflags-y := -D__KVM_NVHE_HYPERVISOR__ 7 7 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ 8 8 9 - obj-y := switch.o tlb.o hyp-init.o ../hyp-entry.o 9 + obj-y := debug-sr.o switch.o tlb.o hyp-init.o ../hyp-entry.o 10 10 11 11 obj-y := $(patsubst %.o,%.hyp.o,$(obj-y)) 12 12 extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
+77
arch/arm64/kvm/hyp/nvhe/debug-sr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2015 - ARM Ltd 4 + * Author: Marc Zyngier <marc.zyngier@arm.com> 5 + */ 6 + 7 + #include <hyp/debug-sr.h> 8 + 9 + #include <linux/compiler.h> 10 + #include <linux/kvm_host.h> 11 + 12 + #include <asm/debug-monitors.h> 13 + #include <asm/kvm_asm.h> 14 + #include <asm/kvm_hyp.h> 15 + #include <asm/kvm_mmu.h> 16 + 17 + static void __hyp_text __debug_save_spe(u64 *pmscr_el1) 18 + { 19 + u64 reg; 20 + 21 + /* Clear pmscr in case of early return */ 22 + *pmscr_el1 = 0; 23 + 24 + /* SPE present on this CPU? */ 25 + if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1), 26 + ID_AA64DFR0_PMSVER_SHIFT)) 27 + return; 28 + 29 + /* Yes; is it owned by EL3? */ 30 + reg = read_sysreg_s(SYS_PMBIDR_EL1); 31 + if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) 32 + return; 33 + 34 + /* No; is the host actually using the thing? */ 35 + reg = read_sysreg_s(SYS_PMBLIMITR_EL1); 36 + if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT))) 37 + return; 38 + 39 + /* Yes; save the control register and disable data generation */ 40 + *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1); 41 + write_sysreg_s(0, SYS_PMSCR_EL1); 42 + isb(); 43 + 44 + /* Now drain all buffered data to memory */ 45 + psb_csync(); 46 + dsb(nsh); 47 + } 48 + 49 + static void __hyp_text __debug_restore_spe(u64 pmscr_el1) 50 + { 51 + if (!pmscr_el1) 52 + return; 53 + 54 + /* The host page table is installed, but not yet synchronised */ 55 + isb(); 56 + 57 + /* Re-enable data generation */ 58 + write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1); 59 + } 60 + 61 + void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) 62 + { 63 + /* Disable and flush SPE data generation */ 64 + __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1); 65 + __debug_switch_to_guest_common(vcpu); 66 + } 67 + 68 + void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) 69 + { 70 + __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1); 71 + __debug_switch_to_host_common(vcpu); 72 + } 73 + 74 + u32 __hyp_text __kvm_get_mdcr_el2(void) 75 + { 76 + return read_sysreg(mdcr_el2); 77 + }
+1 -1
arch/arm64/kvm/hyp/vhe/Makefile
··· 6 6 asflags-y := -D__KVM_VHE_HYPERVISOR__ 7 7 ccflags-y := -D__KVM_VHE_HYPERVISOR__ 8 8 9 - obj-y := switch.o tlb.o ../hyp-entry.o 9 + obj-y := debug-sr.o switch.o tlb.o ../hyp-entry.o 10 10 11 11 # KVM code is run at a different exception code with a different map, so 12 12 # compiler instrumentation that inserts callbacks or checks into the code may
+26
arch/arm64/kvm/hyp/vhe/debug-sr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2015 - ARM Ltd 4 + * Author: Marc Zyngier <marc.zyngier@arm.com> 5 + */ 6 + 7 + #include <hyp/debug-sr.h> 8 + 9 + #include <linux/kvm_host.h> 10 + 11 + #include <asm/kvm_hyp.h> 12 + 13 + void __debug_switch_to_guest(struct kvm_vcpu *vcpu) 14 + { 15 + __debug_switch_to_guest_common(vcpu); 16 + } 17 + 18 + void __debug_switch_to_host(struct kvm_vcpu *vcpu) 19 + { 20 + __debug_switch_to_host_common(vcpu); 21 + } 22 + 23 + u32 __kvm_get_mdcr_el2(void) 24 + { 25 + return read_sysreg(mdcr_el2); 26 + }