Merge tag 'kvmarm-fixes-5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.18, take #2

- Take care of faults occuring between the PARange and
IPA range by injecting an exception

- Fix S2 faults taken from a host EL0 in protected mode

- Work around Oops caused by a PMU access from a 32bit
guest when PMU has been created. This is a temporary
bodge until we fix it for good.

+78 -9
+1
arch/arm64/include/asm/kvm_emulate.h
··· 40 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 43 44 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 45
··· 40 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 43 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu); 44 45 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 46
+8 -8
arch/arm64/kvm/hyp/nvhe/host.S
··· 198 invalid_host_el2_vect // FIQ EL2h 199 invalid_host_el2_vect // Error EL2h 200 201 - host_el1_sync_vect // Synchronous 64-bit EL1 202 - invalid_host_el1_vect // IRQ 64-bit EL1 203 - invalid_host_el1_vect // FIQ 64-bit EL1 204 - invalid_host_el1_vect // Error 64-bit EL1 205 206 - invalid_host_el1_vect // Synchronous 32-bit EL1 207 - invalid_host_el1_vect // IRQ 32-bit EL1 208 - invalid_host_el1_vect // FIQ 32-bit EL1 209 - invalid_host_el1_vect // Error 32-bit EL1 210 SYM_CODE_END(__kvm_hyp_host_vector) 211 212 /*
··· 198 invalid_host_el2_vect // FIQ EL2h 199 invalid_host_el2_vect // Error EL2h 200 201 + host_el1_sync_vect // Synchronous 64-bit EL1/EL0 202 + invalid_host_el1_vect // IRQ 64-bit EL1/EL0 203 + invalid_host_el1_vect // FIQ 64-bit EL1/EL0 204 + invalid_host_el1_vect // Error 64-bit EL1/EL0 205 206 + host_el1_sync_vect // Synchronous 32-bit EL1/EL0 207 + invalid_host_el1_vect // IRQ 32-bit EL1/EL0 208 + invalid_host_el1_vect // FIQ 32-bit EL1/EL0 209 + invalid_host_el1_vect // Error 32-bit EL1/EL0 210 SYM_CODE_END(__kvm_hyp_host_vector) 211 212 /*
+28
arch/arm64/kvm/inject_fault.c
··· 145 inject_abt64(vcpu, true, addr); 146 } 147 148 /** 149 * kvm_inject_undefined - inject an undefined instruction into the guest 150 * @vcpu: The vCPU in which to inject the exception
··· 145 inject_abt64(vcpu, true, addr); 146 } 147 148 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu) 149 + { 150 + unsigned long addr, esr; 151 + 152 + addr = kvm_vcpu_get_fault_ipa(vcpu); 153 + addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 154 + 155 + if (kvm_vcpu_trap_is_iabt(vcpu)) 156 + kvm_inject_pabt(vcpu, addr); 157 + else 158 + kvm_inject_dabt(vcpu, addr); 159 + 160 + /* 161 + * If AArch64 or LPAE, set FSC to 0 to indicate an Address 162 + * Size Fault at level 0, as if exceeding PARange. 163 + * 164 + * Non-LPAE guests will only get the external abort, as there 165 + * is no way to to describe the ASF. 166 + */ 167 + if (vcpu_el1_is_32bit(vcpu) && 168 + !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) 169 + return; 170 + 171 + esr = vcpu_read_sys_reg(vcpu, ESR_EL1); 172 + esr &= ~GENMASK_ULL(5, 0); 173 + vcpu_write_sys_reg(vcpu, esr, ESR_EL1); 174 + } 175 + 176 /** 177 * kvm_inject_undefined - inject an undefined instruction into the guest 178 * @vcpu: The vCPU in which to inject the exception
+19
arch/arm64/kvm/mmu.c
··· 1337 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1338 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1339 1340 /* Synchronous External Abort? */ 1341 if (kvm_vcpu_abt_issea(vcpu)) { 1342 /*
··· 1337 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1338 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1339 1340 + if (fault_status == FSC_FAULT) { 1341 + /* Beyond sanitised PARange (which is the IPA limit) */ 1342 + if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { 1343 + kvm_inject_size_fault(vcpu); 1344 + return 1; 1345 + } 1346 + 1347 + /* Falls between the IPA range and the PARange? */ 1348 + if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { 1349 + fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 1350 + 1351 + if (is_iabt) 1352 + kvm_inject_pabt(vcpu, fault_ipa); 1353 + else 1354 + kvm_inject_dabt(vcpu, fault_ipa); 1355 + return 1; 1356 + } 1357 + } 1358 + 1359 /* Synchronous External Abort? */ 1360 if (kvm_vcpu_abt_issea(vcpu)) { 1361 /*
+22 -1
arch/arm64/kvm/pmu-emul.c
··· 177 struct kvm_pmu *pmu = &vcpu->arch.pmu; 178 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; 179 180 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); 181 182 if (kvm_pmu_pmc_is_chained(pmc) && ··· 200 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 201 { 202 u64 reg; 203 204 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 205 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; ··· 328 struct kvm_pmu *pmu = &vcpu->arch.pmu; 329 struct kvm_pmc *pmc; 330 331 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) 332 return; 333 ··· 366 struct kvm_pmu *pmu = &vcpu->arch.pmu; 367 struct kvm_pmc *pmc; 368 369 - if (!val) 370 return; 371 372 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { ··· 536 struct kvm_pmu *pmu = &vcpu->arch.pmu; 537 int i; 538 539 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 540 return; 541 ··· 587 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 588 { 589 int i; 590 591 if (val & ARMV8_PMU_PMCR_E) { 592 kvm_pmu_enable_counter_mask(vcpu, ··· 754 { 755 u64 reg, mask; 756 757 mask = ARMV8_PMU_EVTYPE_MASK; 758 mask &= ~ARMV8_PMU_EVTYPE_EVENT; 759 mask |= kvm_pmu_event_mask(vcpu->kvm); ··· 844 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 845 u64 val, mask = 0; 846 int base, i, nr_events; 847 848 if (!pmceid1) { 849 val = read_sysreg(pmceid0_el0);
··· 177 struct kvm_pmu *pmu = &vcpu->arch.pmu; 178 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; 179 180 + if (!kvm_vcpu_has_pmu(vcpu)) 181 + return 0; 182 + 183 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); 184 185 if (kvm_pmu_pmc_is_chained(pmc) && ··· 197 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 198 { 199 u64 reg; 200 + 201 + if (!kvm_vcpu_has_pmu(vcpu)) 202 + return; 203 204 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 205 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; ··· 322 struct kvm_pmu *pmu = &vcpu->arch.pmu; 323 struct kvm_pmc *pmc; 324 325 + if (!kvm_vcpu_has_pmu(vcpu)) 326 + return; 327 + 328 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) 329 return; 330 ··· 357 struct kvm_pmu *pmu = &vcpu->arch.pmu; 358 struct kvm_pmc *pmc; 359 360 + if (!kvm_vcpu_has_pmu(vcpu) || !val) 361 return; 362 363 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { ··· 527 struct kvm_pmu *pmu = &vcpu->arch.pmu; 528 int i; 529 530 + if (!kvm_vcpu_has_pmu(vcpu)) 531 + return; 532 + 533 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 534 return; 535 ··· 575 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 576 { 577 int i; 578 + 579 + if (!kvm_vcpu_has_pmu(vcpu)) 580 + return; 581 582 if (val & ARMV8_PMU_PMCR_E) { 583 kvm_pmu_enable_counter_mask(vcpu, ··· 739 { 740 u64 reg, mask; 741 742 + if (!kvm_vcpu_has_pmu(vcpu)) 743 + return; 744 + 745 mask = ARMV8_PMU_EVTYPE_MASK; 746 mask &= ~ARMV8_PMU_EVTYPE_EVENT; 747 mask |= kvm_pmu_event_mask(vcpu->kvm); ··· 826 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 827 u64 val, mask = 0; 828 int base, i, nr_events; 829 + 830 + if (!kvm_vcpu_has_pmu(vcpu)) 831 + return 0; 832 833 if (!pmceid1) { 834 val = read_sysreg(pmceid0_el0);