Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.4-rc1 173 lines 5.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2015 Linaro Ltd. 4 * Author: Shannon Zhao <shannon.zhao@linaro.org> 5 */ 6 7#ifndef __ASM_ARM_KVM_PMU_H 8#define __ASM_ARM_KVM_PMU_H 9 10#include <linux/perf_event.h> 11#include <linux/perf/arm_pmuv3.h> 12 13#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) 14 15#ifdef CONFIG_HW_PERF_EVENTS 16 17struct kvm_pmc { 18 u8 idx; /* index into the pmu->pmc array */ 19 struct perf_event *perf_event; 20}; 21 22struct kvm_pmu_events { 23 u32 events_host; 24 u32 events_guest; 25}; 26 27struct kvm_pmu { 28 struct irq_work overflow_work; 29 struct kvm_pmu_events events; 30 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; 31 int irq_num; 32 bool created; 33 bool irq_level; 34}; 35 36struct arm_pmu_entry { 37 struct list_head entry; 38 struct arm_pmu *arm_pmu; 39}; 40 41DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); 42 43static __always_inline bool kvm_arm_support_pmu_v3(void) 44{ 45 return static_branch_likely(&kvm_arm_pmu_available); 46} 47 48#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 49u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 50void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 51u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); 52u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 53void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 54void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 55void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 56void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 57void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); 58void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); 59void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); 60bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); 61void kvm_pmu_update_run(struct kvm_vcpu *vcpu); 62void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); 63void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); 64void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, 65 u64 select_idx); 66int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 67 struct kvm_device_attr *attr); 68int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 69 struct kvm_device_attr *attr); 70int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 71 struct kvm_device_attr *attr); 72int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 73 74struct kvm_pmu_events *kvm_get_pmu_events(void); 75void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); 76void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); 77 78#define kvm_vcpu_has_pmu(vcpu) \ 79 (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) 80 81/* 82 * Updates the vcpu's view of the pmu events for this cpu. 83 * Must be called before every vcpu run after disabling interrupts, to ensure 84 * that an interrupt cannot fire and update the structure. 85 */ 86#define kvm_pmu_update_vcpu_events(vcpu) \ 87 do { \ 88 if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \ 89 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ 90 } while (0) 91 92/* 93 * Evaluates as true when emulating PMUv3p5, and false otherwise. 94 */ 95#define kvm_pmu_is_3p5(vcpu) \ 96 (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5) 97 98u8 kvm_arm_pmu_get_pmuver_limit(void); 99 100#else 101struct kvm_pmu { 102}; 103 104static inline bool kvm_arm_support_pmu_v3(void) 105{ 106 return false; 107} 108 109#define kvm_arm_pmu_irq_initialized(v) (false) 110static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, 111 u64 select_idx) 112{ 113 return 0; 114} 115static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 116 u64 select_idx, u64 val) {} 117static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) 118{ 119 return 0; 120} 121static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 122static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 123static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 124static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 125static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 126static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} 127static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} 128static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) 129{ 130 return false; 131} 132static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} 133static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} 134static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} 135static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, 136 u64 data, u64 select_idx) {} 137static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, 138 struct kvm_device_attr *attr) 139{ 140 return -ENXIO; 141} 142static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, 143 struct kvm_device_attr *attr) 144{ 145 return -ENXIO; 146} 147static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 148 struct kvm_device_attr *attr) 149{ 150 return -ENXIO; 151} 152static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 153{ 154 return 0; 155} 156static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) 157{ 158 return 0; 159} 160 161#define kvm_vcpu_has_pmu(vcpu) ({ false; }) 162#define kvm_pmu_is_3p5(vcpu) ({ false; }) 163static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} 164static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} 165static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} 166static inline u8 kvm_arm_pmu_get_pmuver_limit(void) 167{ 168 return 0; 169} 170 171#endif 172 173#endif