Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Wrapper for getting pmu_events

Eases migrating away from using hyp data and simplifies the code.

No functional change intended.

Reviewed-by: Oliver Upton <oupton@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220510095710.148178-2-tabba@google.com

authored by

Fuad Tabba and committed by
Marc Zyngier
3cb8a091 b2d229d4

+26 -16
+26 -16
arch/arm64/kvm/pmu.c
··· 25 25 return (attr->exclude_host != attr->exclude_guest); 26 26 } 27 27 28 + static struct kvm_pmu_events *kvm_get_pmu_events(void) 29 + { 30 + struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); 31 + 32 + if (!ctx) 33 + return NULL; 34 + 35 + return &ctx->pmu_events; 36 + } 37 + 28 38 /* 29 39 * Add events to track that we may want to switch at guest entry/exit 30 40 * time. 31 41 */ 32 42 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) 33 43 { 34 - struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); 44 + struct kvm_pmu_events *pmu = kvm_get_pmu_events(); 35 45 36 - if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr)) 46 + if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr)) 37 47 return; 38 48 39 49 if (!attr->exclude_host) 40 - ctx->pmu_events.events_host |= set; 50 + pmu->events_host |= set; 41 51 if (!attr->exclude_guest) 42 - ctx->pmu_events.events_guest |= set; 52 + pmu->events_guest |= set; 43 53 } 44 54 45 55 /* ··· 57 47 */ 58 48 void kvm_clr_pmu_events(u32 clr) 59 49 { 60 - struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data); 50 + struct kvm_pmu_events *pmu = kvm_get_pmu_events(); 61 51 62 - if (!kvm_arm_support_pmu_v3() || !ctx) 52 + if (!kvm_arm_support_pmu_v3() || !pmu) 63 53 return; 64 54 65 - ctx->pmu_events.events_host &= ~clr; 66 - ctx->pmu_events.events_guest &= ~clr; 55 + pmu->events_host &= ~clr; 56 + pmu->events_guest &= ~clr; 67 57 } 68 58 69 59 #define PMEVTYPER_READ_CASE(idx) \ ··· 179 169 */ 180 170 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) 181 171 { 182 - struct kvm_host_data *host; 172 + struct kvm_pmu_events *pmu; 183 173 u32 events_guest, events_host; 184 174 185 175 if (!kvm_arm_support_pmu_v3() || !has_vhe()) 186 176 return; 187 177 188 178 preempt_disable(); 189 - host = this_cpu_ptr_hyp_sym(kvm_host_data); 190 - events_guest = host->pmu_events.events_guest; 191 - events_host = host->pmu_events.events_host; 179 + pmu = kvm_get_pmu_events(); 180 + events_guest = pmu->events_guest; 181 + events_host = pmu->events_host; 192 182 193 183 kvm_vcpu_pmu_enable_el0(events_guest); 194 184 kvm_vcpu_pmu_disable_el0(events_host); ··· 200 190 */ 201 191 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) 202 192 { 203 - struct kvm_host_data *host; 193 + struct kvm_pmu_events *pmu; 204 194 u32 events_guest, events_host; 205 195 206 196 if (!kvm_arm_support_pmu_v3() || !has_vhe()) 207 197 return; 208 198 209 - host = this_cpu_ptr_hyp_sym(kvm_host_data); 210 - events_guest = host->pmu_events.events_guest; 211 - events_host = host->pmu_events.events_host; 199 + pmu = kvm_get_pmu_events(); 200 + events_guest = pmu->events_guest; 201 + events_host = pmu->events_host; 212 202 213 203 kvm_vcpu_pmu_enable_el0(events_host); 214 204 kvm_vcpu_pmu_disable_el0(events_guest);