Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'kvmarm-fixes-6.3-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.3, part #3

- Ensure the guest PMU context is restored before the first KVM_RUN,
fixing an issue where EL0 event counting is broken after vCPU
save/restore

- Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the
sanitized, system-wide values for protected VMs

+30 -10
+25 -1
arch/arm64/kvm/arm.c
··· 1890 1890 return ret; 1891 1891 } 1892 1892 1893 + static u64 get_hyp_id_aa64pfr0_el1(void) 1894 + { 1895 + /* 1896 + * Track whether the system isn't affected by spectre/meltdown in the 1897 + * hypervisor's view of id_aa64pfr0_el1, used for protected VMs. 1898 + * Although this is per-CPU, we make it global for simplicity, e.g., not 1899 + * to have to worry about vcpu migration. 1900 + * 1901 + * Unlike for non-protected VMs, userspace cannot override this for 1902 + * protected VMs. 1903 + */ 1904 + u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1905 + 1906 + val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | 1907 + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3)); 1908 + 1909 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 1910 + arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); 1911 + val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1912 + arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); 1913 + 1914 + return val; 1915 + } 1916 + 1893 1917 static void kvm_hyp_init_symbols(void) 1894 1918 { 1895 - kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1919 + kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1(); 1896 1920 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); 1897 1921 kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); 1898 1922 kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+4 -1
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
··· 33 33 * Allow for protected VMs: 34 34 * - Floating-point and Advanced SIMD 35 35 * - Data Independent Timing 36 + * - Spectre/Meltdown Mitigation 36 37 */ 37 38 #define PVM_ID_AA64PFR0_ALLOW (\ 38 39 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \ 39 40 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \ 40 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \ 41 + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \ 42 + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \ 43 + ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \ 41 44 ) 42 45 43 46 /*
-7
arch/arm64/kvm/hyp/nvhe/sys_regs.c
··· 85 85 86 86 static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu) 87 87 { 88 - const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm); 89 88 u64 set_mask = 0; 90 89 u64 allow_mask = PVM_ID_AA64PFR0_ALLOW; 91 90 92 91 set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val, 93 92 PVM_ID_AA64PFR0_RESTRICT_UNSIGNED); 94 - 95 - /* Spectre and Meltdown mitigation in KVM */ 96 - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 97 - (u64)kvm->arch.pfr0_csv2); 98 - set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 99 - (u64)kvm->arch.pfr0_csv3); 100 93 101 94 return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask; 102 95 }
+1
arch/arm64/kvm/pmu-emul.c
··· 558 558 for_each_set_bit(i, &mask, 32) 559 559 kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true); 560 560 } 561 + kvm_vcpu_pmu_restore_guest(vcpu); 561 562 } 562 563 563 564 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
-1
arch/arm64/kvm/sys_regs.c
··· 794 794 if (!kvm_supports_32bit_el0()) 795 795 val |= ARMV8_PMU_PMCR_LC; 796 796 kvm_pmu_handle_pmcr(vcpu, val); 797 - kvm_vcpu_pmu_restore_guest(vcpu); 798 797 } else { 799 798 /* PMCR.P & PMCR.C are RAZ */ 800 799 val = __vcpu_sys_reg(vcpu, PMCR_EL0)