Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
5 */
6
7#ifndef __ASM_ARM_KVM_PMU_H
8#define __ASM_ARM_KVM_PMU_H
9
10#include <linux/perf_event.h>
11#include <linux/perf/arm_pmuv3.h>
12
13#define KVM_ARMV8_PMU_MAX_COUNTERS 32
14
15#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
16struct kvm_pmc {
17 u8 idx; /* index into the pmu->pmc array */
18 struct perf_event *perf_event;
19};
20
21struct kvm_pmu_events {
22 u64 events_host;
23 u64 events_guest;
24};
25
26struct kvm_pmu {
27 struct irq_work overflow_work;
28 struct kvm_pmu_events events;
29 struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
30 int irq_num;
31 bool created;
32 bool irq_level;
33};
34
35struct arm_pmu_entry {
36 struct list_head entry;
37 struct arm_pmu *arm_pmu;
38};
39
40DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
41
42static __always_inline bool kvm_arm_support_pmu_v3(void)
43{
44 return static_branch_likely(&kvm_arm_pmu_available);
45}
46
47#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
48u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
49void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
50u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
51u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
52u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
53void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
54void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
55void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
56void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
57void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
58void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
59bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
60void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
61void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
62void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
63void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
64 u64 select_idx);
65void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
66int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
67 struct kvm_device_attr *attr);
68int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
69 struct kvm_device_attr *attr);
70int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
71 struct kvm_device_attr *attr);
72int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
73
74struct kvm_pmu_events *kvm_get_pmu_events(void);
75void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
76void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
77void kvm_vcpu_pmu_resync_el0(void);
78
79#define kvm_vcpu_has_pmu(vcpu) \
80 (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
81
82/*
83 * Updates the vcpu's view of the pmu events for this cpu.
84 * Must be called before every vcpu run after disabling interrupts, to ensure
85 * that an interrupt cannot fire and update the structure.
86 */
87#define kvm_pmu_update_vcpu_events(vcpu) \
88 do { \
89 if (!has_vhe() && kvm_arm_support_pmu_v3()) \
90 vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
91 } while (0)
92
93u8 kvm_arm_pmu_get_pmuver_limit(void);
94u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
95int kvm_arm_set_default_pmu(struct kvm *kvm);
96u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
97
98u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
99bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
100void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
101#else
102struct kvm_pmu {
103};
104
105static inline bool kvm_arm_support_pmu_v3(void)
106{
107 return false;
108}
109
110#define kvm_arm_pmu_irq_initialized(v) (false)
111static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
112 u64 select_idx)
113{
114 return 0;
115}
116static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
117 u64 select_idx, u64 val) {}
118static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
119{
120 return 0;
121}
122static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
123{
124 return 0;
125}
126static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
127static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
128static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
129static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
130static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
131static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
132static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
133{
134 return false;
135}
136static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
137static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
138static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
139static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
140 u64 data, u64 select_idx) {}
141static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
142 struct kvm_device_attr *attr)
143{
144 return -ENXIO;
145}
146static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
147 struct kvm_device_attr *attr)
148{
149 return -ENXIO;
150}
151static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
152 struct kvm_device_attr *attr)
153{
154 return -ENXIO;
155}
156static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
157{
158 return 0;
159}
160static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
161{
162 return 0;
163}
164
165#define kvm_vcpu_has_pmu(vcpu) ({ false; })
166static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
167static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
168static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
169static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
170static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
171{
172 return 0;
173}
174static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
175{
176 return 0;
177}
178static inline void kvm_vcpu_pmu_resync_el0(void) {}
179
180static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
181{
182 return -ENODEV;
183}
184
185static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
186{
187 return 0;
188}
189
190static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
191{
192 return 0;
193}
194
195static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
196{
197 return false;
198}
199
200static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
201
202#endif
203
204#endif