Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kvm-arm64/pmu-fixes' into kvmarm/next

* kvm-arm64/pmu-fixes:
: vPMU fixes for 6.15 courtesy of Akihiko Odaki
:
: Various fixes to KVM's vPMU implementation, notably ensuring
: userspace-directed changes to the PMCs are reflected in the backing perf
: events.
KVM: arm64: PMU: Reload when resetting
KVM: arm64: PMU: Reload when user modifies registers
KVM: arm64: PMU: Fix SET_ONE_REG for vPMC regs
KVM: arm64: PMU: Assume PMU presence in pmu-emul.c
KVM: arm64: PMU: Set raw values from user to PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR}

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>

+63 -76
+11 -6
arch/arm64/kvm/arm.c
··· 858 858 if (ret) 859 859 return ret; 860 860 861 - ret = kvm_arm_pmu_v3_enable(vcpu); 862 - if (ret) 863 - return ret; 861 + if (kvm_vcpu_has_pmu(vcpu)) { 862 + ret = kvm_arm_pmu_v3_enable(vcpu); 863 + if (ret) 864 + return ret; 865 + } 864 866 865 867 if (is_protected_kvm_enabled()) { 866 868 ret = pkvm_create_hyp_vm(kvm); ··· 1177 1175 */ 1178 1176 preempt_disable(); 1179 1177 1180 - kvm_pmu_flush_hwstate(vcpu); 1178 + if (kvm_vcpu_has_pmu(vcpu)) 1179 + kvm_pmu_flush_hwstate(vcpu); 1181 1180 1182 1181 local_irq_disable(); 1183 1182 ··· 1197 1194 if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) { 1198 1195 vcpu->mode = OUTSIDE_GUEST_MODE; 1199 1196 isb(); /* Ensure work in x_flush_hwstate is committed */ 1200 - kvm_pmu_sync_hwstate(vcpu); 1197 + if (kvm_vcpu_has_pmu(vcpu)) 1198 + kvm_pmu_sync_hwstate(vcpu); 1201 1199 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 1202 1200 kvm_timer_sync_user(vcpu); 1203 1201 kvm_vgic_sync_hwstate(vcpu); ··· 1228 1224 * that the vgic can properly sample the updated state of the 1229 1225 * interrupt line. 1230 1226 */ 1231 - kvm_pmu_sync_hwstate(vcpu); 1227 + if (kvm_vcpu_has_pmu(vcpu)) 1228 + kvm_pmu_sync_hwstate(vcpu); 1232 1229 1233 1230 /* 1234 1231 * Sync the vgic state before syncing the timer state because
+4 -2
arch/arm64/kvm/emulate-nested.c
··· 2518 2518 vcpu_clear_flag(vcpu, IN_NESTED_ERET); 2519 2519 preempt_enable(); 2520 2520 2521 - kvm_pmu_nested_transition(vcpu); 2521 + if (kvm_vcpu_has_pmu(vcpu)) 2522 + kvm_pmu_nested_transition(vcpu); 2522 2523 } 2523 2524 2524 2525 static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2, ··· 2602 2601 kvm_arch_vcpu_load(vcpu, smp_processor_id()); 2603 2602 preempt_enable(); 2604 2603 2605 - kvm_pmu_nested_transition(vcpu); 2604 + if (kvm_vcpu_has_pmu(vcpu)) 2605 + kvm_pmu_nested_transition(vcpu); 2606 2606 2607 2607 return 1; 2608 2608 }
+14 -42
arch/arm64/kvm/pmu-emul.c
··· 154 154 */ 155 155 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) 156 156 { 157 - if (!kvm_vcpu_has_pmu(vcpu)) 158 - return 0; 159 - 160 157 return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 161 158 } 162 159 ··· 192 195 */ 193 196 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 194 197 { 195 - if (!kvm_vcpu_has_pmu(vcpu)) 196 - return; 197 - 198 198 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false); 199 + } 200 + 201 + /** 202 + * kvm_pmu_set_counter_value_user - set PMU counter value from user 203 + * @vcpu: The vcpu pointer 204 + * @select_idx: The counter index 205 + * @val: The counter value 206 + */ 207 + void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 208 + { 209 + kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx)); 210 + __vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val; 211 + kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 199 212 } 200 213 201 214 /** ··· 256 249 257 250 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) 258 251 pmu->pmc[i].idx = i; 259 - } 260 - 261 - /** 262 - * kvm_pmu_vcpu_reset - reset pmu state for cpu 263 - * @vcpu: The vcpu pointer 264 - * 265 - */ 266 - void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) 267 - { 268 - unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu); 269 - int i; 270 - 271 - for_each_set_bit(i, &mask, 32) 272 - kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i)); 273 252 } 274 253 275 254 /** ··· 347 354 { 348 355 int i; 349 356 350 - if (!kvm_vcpu_has_pmu(vcpu) || !val) 357 + if (!val) 351 358 return; 352 359 353 360 for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) { ··· 397 404 { 398 405 struct kvm_pmu *pmu = &vcpu->arch.pmu; 399 406 bool overflow; 400 - 401 - if (!kvm_vcpu_has_pmu(vcpu)) 402 - return; 403 407 404 408 overflow = kvm_pmu_overflow_status(vcpu); 405 409 if (pmu->irq_level == overflow) ··· 593 603 { 594 604 int i; 595 605 596 - if (!kvm_vcpu_has_pmu(vcpu)) 597 - return; 598 - 599 606 /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */ 600 607 if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5)) 601 608 val &= ~ARMV8_PMU_PMCR_LP; ··· 780 793 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); 781 794 u64 reg; 782 795 783 - if (!kvm_vcpu_has_pmu(vcpu)) 784 - return; 785 - 786 796 reg = counter_index_to_evtreg(pmc->idx); 787 797 __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); 788 798 ··· 885 901 u64 val, mask = 0; 886 902 int base, i, nr_events; 887 903 888 - if (!kvm_vcpu_has_pmu(vcpu)) 889 - return 0; 890 - 891 904 if (!pmceid1) { 892 905 val = compute_pmceid0(cpu_pmu); 893 906 base = 0; ··· 925 944 926 945 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) 927 946 { 928 - if (!kvm_vcpu_has_pmu(vcpu)) 929 - return 0; 930 - 931 947 if (!vcpu->arch.pmu.created) 932 948 return -EINVAL; 933 949 ··· 946 968 } else if (kvm_arm_pmu_irq_initialized(vcpu)) { 947 969 return -EINVAL; 948 970 } 949 - 950 - /* One-off reload of the PMU on first run */ 951 - kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 952 971 953 972 return 0; 954 973 } ··· 1269 1294 bool reprogrammed = false; 1270 1295 unsigned long mask; 1271 1296 int i; 1272 - 1273 - if (!kvm_vcpu_has_pmu(vcpu)) 1274 - return; 1275 1297 1276 1298 mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); 1277 1299 for_each_set_bit(i, &mask, 32) {
-3
arch/arm64/kvm/reset.c
··· 196 196 vcpu->arch.reset_state.reset = false; 197 197 spin_unlock(&vcpu->arch.mp_state_lock); 198 198 199 - /* Reset PMU outside of the non-preemptible section */ 200 - kvm_pmu_vcpu_reset(vcpu); 201 - 202 199 preempt_disable(); 203 200 loaded = (vcpu->cpu != -1); 204 201 if (loaded)
+31 -21
arch/arm64/kvm/sys_regs.c
··· 967 967 return 0; 968 968 } 969 969 970 + static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, 971 + u64 val) 972 + { 973 + u64 idx; 974 + 975 + if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0) 976 + /* PMCCNTR_EL0 */ 977 + idx = ARMV8_PMU_CYCLE_IDX; 978 + else 979 + /* PMEVCNTRn_EL0 */ 980 + idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 981 + 982 + kvm_pmu_set_counter_value_user(vcpu, idx, val); 983 + return 0; 984 + } 985 + 970 986 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, 971 987 struct sys_reg_params *p, 972 988 const struct sys_reg_desc *r) ··· 1074 1058 1075 1059 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) 1076 1060 { 1077 - bool set; 1061 + u64 mask = kvm_pmu_accessible_counter_mask(vcpu); 1078 1062 1079 - val &= kvm_pmu_accessible_counter_mask(vcpu); 1080 - 1081 - switch (r->reg) { 1082 - case PMOVSSET_EL0: 1083 - /* CRm[1] being set indicates a SET register, and CLR otherwise */ 1084 - set = r->CRm & 2; 1085 - break; 1086 - default: 1087 - /* Op2[0] being set indicates a SET register, and CLR otherwise */ 1088 - set = r->Op2 & 1; 1089 - break; 1090 - } 1091 - 1092 - if (set) 1093 - __vcpu_sys_reg(vcpu, r->reg) |= val; 1094 - else 1095 - __vcpu_sys_reg(vcpu, r->reg) &= ~val; 1063 + __vcpu_sys_reg(vcpu, r->reg) = val & mask; 1064 + kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1096 1065 1097 1066 return 0; 1098 1067 } ··· 1237 1236 val |= ARMV8_PMU_PMCR_LC; 1238 1237 1239 1238 __vcpu_sys_reg(vcpu, r->reg) = val; 1239 + kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 1240 + 1240 1241 return 0; 1241 1242 } 1242 1243 ··· 1265 1262 #define PMU_PMEVCNTR_EL0(n) \ 1266 1263 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \ 1267 1264 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \ 1265 + .set_user = set_pmu_evcntr, \ 1268 1266 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), } 1269 1267 1270 1268 /* Macro to expand the PMEVTYPERn_EL0 register */ ··· 1884 1880 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, 1885 1881 const struct sys_reg_desc *rd) 1886 1882 { 1887 - u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); 1883 + u8 perfmon; 1888 1884 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1); 1889 1885 1890 1886 val &= ~ID_DFR0_EL1_PerfMon_MASK; 1891 - if (kvm_vcpu_has_pmu(vcpu)) 1887 + if (kvm_vcpu_has_pmu(vcpu)) { 1888 + perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit()); 1892 1889 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon); 1890 + } 1893 1891 1894 1892 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8); 1895 1893 ··· 3058 3052 .access = access_pmceid, .reset = NULL }, 3059 3053 { PMU_SYS_REG(PMCCNTR_EL0), 3060 3054 .access = access_pmu_evcntr, .reset = reset_unknown, 3061 - .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr}, 3055 + .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr, 3056 + .set_user = set_pmu_evcntr }, 3062 3057 { PMU_SYS_REG(PMXEVTYPER_EL0), 3063 3058 .access = access_pmu_evtyper, .reset = NULL }, 3064 3059 { PMU_SYS_REG(PMXEVCNTR_EL0), ··· 4719 4712 } 4720 4713 4721 4714 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags); 4715 + 4716 + if (kvm_vcpu_has_pmu(vcpu)) 4717 + kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); 4722 4718 } 4723 4719 4724 4720 /**
+3 -2
include/kvm/arm_pmu.h
··· 41 41 #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) 42 42 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); 43 43 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 44 + void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); 44 45 u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); 45 46 u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); 46 47 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); 47 48 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); 48 - void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); 49 49 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); 50 50 void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val); 51 51 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); ··· 109 109 } 110 110 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, 111 111 u64 select_idx, u64 val) {} 112 + static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, 113 + u64 select_idx, u64 val) {} 112 114 static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) 113 115 { 114 116 return 0; ··· 120 118 return 0; 121 119 } 122 120 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} 123 - static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} 124 121 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} 125 122 static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} 126 123 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}