Merge tag 'kvmarm-fixes-5.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.18, take #2

- Take care of faults occuring between the PARange and
IPA range by injecting an exception

- Fix S2 faults taken from a host EL0 in protected mode

- Work around Oops caused by a PMU access from a 32bit
guest when PMU has been created. This is a temporary
bodge until we fix it for good.

+78 -9
+1
arch/arm64/include/asm/kvm_emulate.h
··· 40 40 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 41 41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 42 42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 43 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu); 43 44 44 45 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 45 46
+8 -8
arch/arm64/kvm/hyp/nvhe/host.S
··· 198 198 invalid_host_el2_vect // FIQ EL2h 199 199 invalid_host_el2_vect // Error EL2h 200 200 201 - host_el1_sync_vect // Synchronous 64-bit EL1 202 - invalid_host_el1_vect // IRQ 64-bit EL1 203 - invalid_host_el1_vect // FIQ 64-bit EL1 204 - invalid_host_el1_vect // Error 64-bit EL1 201 + host_el1_sync_vect // Synchronous 64-bit EL1/EL0 202 + invalid_host_el1_vect // IRQ 64-bit EL1/EL0 203 + invalid_host_el1_vect // FIQ 64-bit EL1/EL0 204 + invalid_host_el1_vect // Error 64-bit EL1/EL0 205 205 206 - invalid_host_el1_vect // Synchronous 32-bit EL1 207 - invalid_host_el1_vect // IRQ 32-bit EL1 208 - invalid_host_el1_vect // FIQ 32-bit EL1 209 - invalid_host_el1_vect // Error 32-bit EL1 206 + host_el1_sync_vect // Synchronous 32-bit EL1/EL0 207 + invalid_host_el1_vect // IRQ 32-bit EL1/EL0 208 + invalid_host_el1_vect // FIQ 32-bit EL1/EL0 209 + invalid_host_el1_vect // Error 32-bit EL1/EL0 210 210 SYM_CODE_END(__kvm_hyp_host_vector) 211 211 212 212 /*
+28
arch/arm64/kvm/inject_fault.c
··· 145 145 inject_abt64(vcpu, true, addr); 146 146 } 147 147 148 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu) 149 + { 150 + unsigned long addr, esr; 151 + 152 + addr = kvm_vcpu_get_fault_ipa(vcpu); 153 + addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 154 + 155 + if (kvm_vcpu_trap_is_iabt(vcpu)) 156 + kvm_inject_pabt(vcpu, addr); 157 + else 158 + kvm_inject_dabt(vcpu, addr); 159 + 160 + /* 161 + * If AArch64 or LPAE, set FSC to 0 to indicate an Address 162 + * Size Fault at level 0, as if exceeding PARange. 163 + * 164 + * Non-LPAE guests will only get the external abort, as there 165 + * is no way to to describe the ASF. 166 + */ 167 + if (vcpu_el1_is_32bit(vcpu) && 168 + !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) 169 + return; 170 + 171 + esr = vcpu_read_sys_reg(vcpu, ESR_EL1); 172 + esr &= ~GENMASK_ULL(5, 0); 173 + vcpu_write_sys_reg(vcpu, esr, ESR_EL1); 174 + } 175 + 148 176 /** 149 177 * kvm_inject_undefined - inject an undefined instruction into the guest 150 178 * @vcpu: The vCPU in which to inject the exception
+19
arch/arm64/kvm/mmu.c
··· 1337 1337 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1338 1338 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1339 1339 1340 + if (fault_status == FSC_FAULT) { 1341 + /* Beyond sanitised PARange (which is the IPA limit) */ 1342 + if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { 1343 + kvm_inject_size_fault(vcpu); 1344 + return 1; 1345 + } 1346 + 1347 + /* Falls between the IPA range and the PARange? */ 1348 + if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { 1349 + fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 1350 + 1351 + if (is_iabt) 1352 + kvm_inject_pabt(vcpu, fault_ipa); 1353 + else 1354 + kvm_inject_dabt(vcpu, fault_ipa); 1355 + return 1; 1356 + } 1357 + } 1358 + 1340 1359 /* Synchronous External Abort? */ 1341 1360 if (kvm_vcpu_abt_issea(vcpu)) { 1342 1361 /*
+22 -1
arch/arm64/kvm/pmu-emul.c
··· 177 177 struct kvm_pmu *pmu = &vcpu->arch.pmu; 178 178 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; 179 179 180 + if (!kvm_vcpu_has_pmu(vcpu)) 181 + return 0; 182 + 180 183 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); 181 184 182 185 if (kvm_pmu_pmc_is_chained(pmc) && ··· 200 197 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 201 198 { 202 199 u64 reg; 200 + 201 + if (!kvm_vcpu_has_pmu(vcpu)) 202 + return; 203 203 204 204 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 205 205 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; ··· 328 322 struct kvm_pmu *pmu = &vcpu->arch.pmu; 329 323 struct kvm_pmc *pmc; 330 324 325 + if (!kvm_vcpu_has_pmu(vcpu)) 326 + return; 327 + 331 328 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) 332 329 return; 333 330 ··· 366 357 struct kvm_pmu *pmu = &vcpu->arch.pmu; 367 358 struct kvm_pmc *pmc; 368 359 369 - if (!val) 360 + if (!kvm_vcpu_has_pmu(vcpu) || !val) 370 361 return; 371 362 372 363 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { ··· 536 527 struct kvm_pmu *pmu = &vcpu->arch.pmu; 537 528 int i; 538 529 530 + if (!kvm_vcpu_has_pmu(vcpu)) 531 + return; 532 + 539 533 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 540 534 return; 541 535 ··· 587 575 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 588 576 { 589 577 int i; 578 + 579 + if (!kvm_vcpu_has_pmu(vcpu)) 580 + return; 590 581 591 582 if (val & ARMV8_PMU_PMCR_E) { 592 583 kvm_pmu_enable_counter_mask(vcpu, ··· 754 739 { 755 740 u64 reg, mask; 756 741 742 + if (!kvm_vcpu_has_pmu(vcpu)) 743 + return; 744 + 757 745 mask = ARMV8_PMU_EVTYPE_MASK; 758 746 mask &= ~ARMV8_PMU_EVTYPE_EVENT; 759 747 mask |= kvm_pmu_event_mask(vcpu->kvm); ··· 844 826 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 845 827 u64 val, mask = 0; 846 828 int base, i, nr_events; 829 + 830 + if (!kvm_vcpu_has_pmu(vcpu)) 831 + return 0; 847 832 848 833 if (!pmceid1) { 849 834 val = read_sysreg(pmceid0_el0);