at v6.19 199 lines 6.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7#ifndef __ASM_ARM_KVM_PMU_H 8#define __ASM_ARM_KVM_PMU_H 9 10#include <linux/perf_event.h> 11#include <linux/perf/arm_pmuv3.h> 12 13#define KVM_ARMV8_PMU_MAX_COUNTERS 32 14 15#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) 16struct kvm_pmc { 17 u8 idx; /* index into the pmu->pmc array */ 18 struct perf_event *perf_event; 19}; 20 21struct kvm_pmu_events { 22 u64 events_host; 23 u64 events_guest; 24}; 25 26struct kvm_pmu { 27 struct irq_work overflow_work; 28 struct kvm_pmu_events events; 29 struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS]; 30 int irq_num; 31 bool created; 32 bool irq_level; 33}; 34 35struct arm_pmu_entry { 36 struct list_head entry; 37 struct arm_pmu *arm_pmu; 38}; 39 40bool kvm_supports_guest_pmuv3(void); 41#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 42u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 43void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 44void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 45u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); 46u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); 47u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 48void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 49void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 50void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val); 51void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 52void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 53bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 54void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 55void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 56void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 57void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 58 u64 select_idx); 59void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); 60int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 61 struct kvm_device_attr *attr); 62int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 63 struct kvm_device_attr *attr); 64int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 65 struct kvm_device_attr *attr); 66int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 67 68struct kvm_pmu_events *kvm_get_pmu_events(void); 69void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 70void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 71void kvm_vcpu_pmu_resync_el0(void); 72 73#define kvm_vcpu_has_pmu(vcpu) \ 74 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) 75 76/* 77 * Updates the vcpu's view of the pmu events for this cpu. 78 * Must be called before every vcpu run after disabling interrupts, to ensure 79 * that an interrupt cannot fire and update the structure. 80 */ 81#define kvm_pmu_update_vcpu_events(vcpu) \ 82 do { \ 83 if (!has_vhe() && system_supports_pmuv3()) \ 84 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ 85 } while (0) 86 87u8 kvm_arm_pmu_get_pmuver_limit(void); 88u64 kvm_pmu_evtyper_mask(struct kvm *kvm); 89int kvm_arm_set_default_pmu(struct kvm *kvm); 90u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); 91 92u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); 93bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); 94void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); 95#else 96struct kvm_pmu { 97}; 98 99static inline bool kvm_supports_guest_pmuv3(void) 100{ 101 return false; 102} 103 104#define kvm_arm_pmu_irq_initialized(v) (false) 105static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 106 u64 select_idx) 107{ 108 return 0; 109} 110static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 111 u64 select_idx, u64 val) {} 112static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, 113 u64 select_idx, u64 val) {} 114static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) 115{ 116 return 0; 117} 118static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) 119{ 120 return 0; 121} 122static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 123static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 124static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 125static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 126static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 127static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 128{ 129 return false; 130} 131static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 132static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 133static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 134static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 135 u64 data, u64 select_idx) {} 136static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 137 struct kvm_device_attr *attr) 138{ 139 return -ENXIO; 140} 141static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 142 struct kvm_device_attr *attr) 143{ 144 return -ENXIO; 145} 146static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 147 struct kvm_device_attr *attr) 148{ 149 return -ENXIO; 150} 151static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 152{ 153 return 0; 154} 155static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 156{ 157 return 0; 158} 159 160#define kvm_vcpu_has_pmu(vcpu) ({ false; }) 161static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} 162static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} 163static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} 164static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} 165static inline u8 kvm_arm_pmu_get_pmuver_limit(void) 166{ 167 return 0; 168} 169static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) 170{ 171 return 0; 172} 173static inline void kvm_vcpu_pmu_resync_el0(void) {} 174 175static inline int kvm_arm_set_default_pmu(struct kvm *kvm) 176{ 177 return -ENODEV; 178} 179 180static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) 181{ 182 return 0; 183} 184 185static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) 186{ 187 return 0; 188} 189 190static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) 191{ 192 return false; 193} 194 195static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {} 196 197#endif 198 199#endif