Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
5 */
6
7#ifndef __ASM_ARM_KVM_PMU_H
8#define __ASM_ARM_KVM_PMU_H
9
10#include <linux/perf_event.h>
11#include <linux/perf/arm_pmuv3.h>
12
13#define KVM_ARMV8_PMU_MAX_COUNTERS 32
14
15#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
16struct kvm_pmc {
17 u8 idx; /* index into the pmu->pmc array */
18 struct perf_event *perf_event;
19};
20
21struct kvm_pmu_events {
22 u64 events_host;
23 u64 events_guest;
24};
25
26struct kvm_pmu {
27 struct irq_work overflow_work;
28 struct kvm_pmu_events events;
29 struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
30 int irq_num;
31 bool created;
32 bool irq_level;
33};
34
35struct arm_pmu_entry {
36 struct list_head entry;
37 struct arm_pmu *arm_pmu;
38};
39
40DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
41
42static __always_inline bool kvm_arm_support_pmu_v3(void)
43{
44 return static_branch_likely(&kvm_arm_pmu_available);
45}
46
47#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
48u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
49void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
50u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
51u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
52u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
53void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
54void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
55void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
56void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
57void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
58void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
59void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
60bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
61void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
62void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
63void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
64void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
65 u64 select_idx);
66void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
67int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
68 struct kvm_device_attr *attr);
69int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
70 struct kvm_device_attr *attr);
71int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
72 struct kvm_device_attr *attr);
73int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
74
75struct kvm_pmu_events *kvm_get_pmu_events(void);
76void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
77void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
78void kvm_vcpu_pmu_resync_el0(void);
79
80#define kvm_vcpu_has_pmu(vcpu) \
81 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
82
83/*
84 * Updates the vcpu's view of the pmu events for this cpu.
85 * Must be called before every vcpu run after disabling interrupts, to ensure
86 * that an interrupt cannot fire and update the structure.
87 */
88#define kvm_pmu_update_vcpu_events(vcpu) \
89 do { \
90 if (!has_vhe() && kvm_arm_support_pmu_v3()) \
91 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
92 } while (0)
93
94u8 kvm_arm_pmu_get_pmuver_limit(void);
95u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
96int kvm_arm_set_default_pmu(struct kvm *kvm);
97u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
98
99u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
100bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
101void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
102#else
103struct kvm_pmu {
104};
105
106static inline bool kvm_arm_support_pmu_v3(void)
107{
108 return false;
109}
110
111#define kvm_arm_pmu_irq_initialized(v) (false)
112static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
113 u64 select_idx)
114{
115 return 0;
116}
117static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
118 u64 select_idx, u64 val) {}
119static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
120{
121 return 0;
122}
123static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
124{
125 return 0;
126}
127static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
128static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
129static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
130static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
131static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
132static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
133static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
134static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
135{
136 return false;
137}
138static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
139static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
140static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
141static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
142 u64 data, u64 select_idx) {}
143static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
144 struct kvm_device_attr *attr)
145{
146 return -ENXIO;
147}
148static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
149 struct kvm_device_attr *attr)
150{
151 return -ENXIO;
152}
153static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
154 struct kvm_device_attr *attr)
155{
156 return -ENXIO;
157}
158static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
159{
160 return 0;
161}
162static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
163{
164 return 0;
165}
166
167#define kvm_vcpu_has_pmu(vcpu) ({ false; })
168static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
169static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
170static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
171static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
172static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
173{
174 return 0;
175}
176static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
177{
178 return 0;
179}
180static inline void kvm_vcpu_pmu_resync_el0(void) {}
181
182static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
183{
184 return -ENODEV;
185}
186
187static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
188{
189 return 0;
190}
191
192static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
193{
194 return 0;
195}
196
197static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
198{
199 return false;
200}
201
202static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
203
204#endif
205
206#endif