Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Pass pmu events to hyp via vcpu

Instead of the host accessing hyp data directly, pass the pmu
events of the current cpu to hyp via the vcpu.

This adds 64 bits (in two fields) to the vcpu that need to be
synced before every vcpu run in nvhe and protected modes.
However, it isolates the hypervisor from the host, which allows
us to use pmu in protected mode in a subsequent patch.

No visible side effects in behavior intended.

Signed-off-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Oliver Upton <oupton@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220510095710.148178-4-tabba@google.com

authored by

Fuad Tabba and committed by
Marc Zyngier
84d751a0 e987a4c6

+32 -28
+1 -6
arch/arm64/include/asm/kvm_host.h
··· 254 254 struct kvm_vcpu *__hyp_running_vcpu; 255 255 }; 256 256 257 - struct kvm_pmu_events { 258 - u32 events_host; 259 - u32 events_guest; 260 - }; 261 - 262 257 struct kvm_host_data { 263 258 struct kvm_cpu_context host_ctxt; 264 - struct kvm_pmu_events pmu_events; 265 259 }; 266 260 267 261 struct kvm_host_psci_config { ··· 790 796 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); 791 797 void kvm_clr_pmu_events(u32 clr); 792 798 799 + struct kvm_pmu_events *kvm_get_pmu_events(void); 793 800 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 794 801 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 795 802 #else
+15
arch/arm64/kvm/arm.c
··· 751 751 return ret; 752 752 } 753 753 754 + /* 755 + * Updates the vcpu's view of the pmu events for this cpu. 756 + * Must be called before every vcpu run after disabling interrupts, to ensure 757 + * that an interrupt cannot fire and update the structure. 758 + */ 759 + static void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) 760 + { 761 + if (has_vhe() || !kvm_vcpu_has_pmu(vcpu)) 762 + return; 763 + 764 + vcpu->arch.pmu.events = *kvm_get_pmu_events(); 765 + } 766 + 754 767 /** 755 768 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 756 769 * @vcpu: The VCPU pointer ··· 827 814 local_irq_disable(); 828 815 829 816 kvm_vgic_flush_hwstate(vcpu); 817 + 818 + kvm_pmu_update_vcpu_events(vcpu); 830 819 831 820 /* 832 821 * Ensure we set mode to IN_GUEST_MODE after we disable
+6 -14
arch/arm64/kvm/hyp/nvhe/switch.c
··· 123 123 /** 124 124 * Disable host events, enable guest events 125 125 */ 126 - static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) 126 + static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu) 127 127 { 128 - struct kvm_host_data *host; 129 - struct kvm_pmu_events *pmu; 130 - 131 - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); 132 - pmu = &host->pmu_events; 128 + struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 133 129 134 130 if (pmu->events_host) 135 131 write_sysreg(pmu->events_host, pmcntenclr_el0); ··· 139 143 /** 140 144 * Disable guest events, enable host events 141 145 */ 142 - static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) 146 + static void __pmu_switch_to_host(struct kvm_vcpu *vcpu) 143 147 { 144 - struct kvm_host_data *host; 145 - struct kvm_pmu_events *pmu; 146 - 147 - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); 148 - pmu = &host->pmu_events; 148 + struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; 149 149 150 150 if (pmu->events_guest) 151 151 write_sysreg(pmu->events_guest, pmcntenclr_el0); ··· 266 274 host_ctxt->__hyp_running_vcpu = vcpu; 267 275 guest_ctxt = &vcpu->arch.ctxt; 268 276 269 - pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); 277 + pmu_switch_needed = __pmu_switch_to_guest(vcpu); 270 278 271 279 __sysreg_save_state_nvhe(host_ctxt); 272 280 /* ··· 328 336 __debug_restore_host_buffers_nvhe(vcpu); 329 337 330 338 if (pmu_switch_needed) 331 - __pmu_switch_to_host(host_ctxt); 339 + __pmu_switch_to_host(vcpu); 332 340 333 341 /* Returning to host will clear PSR.I, remask PMR if needed */ 334 342 if (system_uses_irq_prio_masking())
+4 -8
arch/arm64/kvm/pmu.c
··· 5 5 */ 6 6 #include <linux/kvm_host.h> 7 7 #include <linux/perf_event.h> 8 - #include <asm/kvm_hyp.h> 8 + 9 + static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events); 9 10 10 11 /* 11 12 * Given the perf event attributes and system type, determine ··· 26 25 return (attr->exclude_host != attr->exclude_guest); 27 26 } 28 27 29 - static struct kvm_pmu_events *kvm_get_pmu_events(void) 28 + struct kvm_pmu_events *kvm_get_pmu_events(void) 30 29 { 31 - struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); 32 - 33 - if (!ctx) 34 - return NULL; 35 - 36 - return &ctx->pmu_events; 30 + return this_cpu_ptr(&kvm_pmu_events); 37 31 } 38 32 39 33 /*
+6
include/kvm/arm_pmu.h
··· 20 20 struct perf_event *perf_event; 21 21 }; 22 22 23 + struct kvm_pmu_events { 24 + u32 events_host; 25 + u32 events_guest; 26 + }; 27 + 23 28 struct kvm_pmu { 24 29 struct irq_work overflow_work; 30 + struct kvm_pmu_events events; 25 31 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 26 32 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); 27 33 int irq_num;