Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

smccc/kvm_guest: Enable errata based on implementation CPUs

Retrieve any migration target implementation CPUs using the hypercall
and enable associated errata.

Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Sebastian Ott <sebott@redhat.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20250221140229.12588-6-shameerali.kolothum.thodi@huawei.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>

authored by

Shameer Kolothum and committed by
Oliver Upton
86edf6bd c8c2647e

+114 -5
+7
arch/arm64/include/asm/cputype.h
··· 276 276 return _model == model && rv >= rv_min && rv <= rv_max; 277 277 } 278 278 279 + struct target_impl_cpu { 280 + u64 midr; 281 + u64 revidr; 282 + u64 aidr; 283 + }; 284 + 285 + bool cpu_errata_set_target_impl(u64 num, void *impl_cpus); 279 286 bool is_midr_in_range_list(struct midr_range const *ranges); 280 287 281 288 static inline u64 __attribute_const__ read_cpuid_mpidr(void)
+1
arch/arm64/include/asm/hypervisor.h
··· 6 6 7 7 void kvm_init_hyp_services(void); 8 8 bool kvm_arm_hyp_service_available(u32 func_id); 9 + void kvm_arm_target_impl_cpu_init(void); 9 10 10 11 #ifdef CONFIG_ARM_PKVM_GUEST 11 12 void pkvm_init_hyp_services(void);
+40 -5
arch/arm64/kernel/cpu_errata.c
··· 14 14 #include <asm/kvm_asm.h> 15 15 #include <asm/smp_plat.h> 16 16 17 + static u64 target_impl_cpu_num; 18 + static struct target_impl_cpu *target_impl_cpus; 19 + 20 + bool cpu_errata_set_target_impl(u64 num, void *impl_cpus) 21 + { 22 + if (target_impl_cpu_num || !num || !impl_cpus) 23 + return false; 24 + 25 + target_impl_cpu_num = num; 26 + target_impl_cpus = impl_cpus; 27 + return true; 28 + } 29 + 17 30 static inline bool is_midr_in_range(struct midr_range const *range) 18 31 { 19 - return midr_is_cpu_model_range(read_cpuid_id(), range->model, 20 - range->rv_min, range->rv_max); 32 + int i; 33 + 34 + if (!target_impl_cpu_num) 35 + return midr_is_cpu_model_range(read_cpuid_id(), range->model, 36 + range->rv_min, range->rv_max); 37 + 38 + for (i = 0; i < target_impl_cpu_num; i++) { 39 + if (midr_is_cpu_model_range(target_impl_cpus[i].midr, 40 + range->model, 41 + range->rv_min, range->rv_max)) 42 + return true; 43 + } 44 + return false; 21 45 } 22 46 23 47 bool is_midr_in_range_list(struct midr_range const *ranges) ··· 71 47 static bool __maybe_unused 72 48 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) 73 49 { 74 - WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 75 - return __is_affected_midr_range(entry, read_cpuid_id(), 76 - read_cpuid(REVIDR_EL1)); 50 + int i; 51 + 52 + if (!target_impl_cpu_num) { 53 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); 54 + return __is_affected_midr_range(entry, read_cpuid_id(), 55 + read_cpuid(REVIDR_EL1)); 56 + } 57 + 58 + for (i = 0; i < target_impl_cpu_num; i++) { 59 + if (__is_affected_midr_range(entry, target_impl_cpus[i].midr, 60 + target_impl_cpus[i].midr)) 61 + return true; 62 + } 63 + return false; 77 64 } 78 65 79 66 static bool __maybe_unused
+2
arch/arm64/kernel/cpufeature.c
··· 86 86 #include <asm/kvm_host.h> 87 87 #include <asm/mmu_context.h> 88 88 #include <asm/mte.h> 89 + #include <asm/hypervisor.h> 89 90 #include <asm/processor.h> 90 91 #include <asm/smp.h> 91 92 #include <asm/sysreg.h> ··· 3681 3680 3682 3681 static void __init setup_boot_cpu_capabilities(void) 3683 3682 { 3683 + kvm_arm_target_impl_cpu_init(); 3684 3684 /* 3685 3685 * The boot CPU's feature register values have been recorded. Detect 3686 3686 * boot cpucaps and local cpucaps for the boot CPU, then enable and
+64
drivers/firmware/smccc/kvm_guest.c
··· 6 6 #include <linux/bitmap.h> 7 7 #include <linux/cache.h> 8 8 #include <linux/kernel.h> 9 + #include <linux/memblock.h> 9 10 #include <linux/string.h> 11 + 12 + #include <uapi/linux/psci.h> 10 13 11 14 #include <asm/hypervisor.h> 12 15 ··· 54 51 return test_bit(func_id, __kvm_arm_hyp_services); 55 52 } 56 53 EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available); 54 + 55 + void __init kvm_arm_target_impl_cpu_init(void) 56 + { 57 + int i; 58 + u32 ver; 59 + u64 max_cpus; 60 + struct arm_smccc_res res; 61 + struct target_impl_cpu *target; 62 + 63 + if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) || 64 + !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)) 65 + return; 66 + 67 + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID, 68 + 0, &res); 69 + if (res.a0 != SMCCC_RET_SUCCESS) 70 + return; 71 + 72 + /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */ 73 + ver = lower_32_bits(res.a1); 74 + if (PSCI_VERSION_MAJOR(ver) != 1) { 75 + pr_warn("Unsupported target CPU implementation version v%d.%d\n", 76 + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); 77 + return; 78 + } 79 + 80 + if (!res.a2) { 81 + pr_warn("No target implementation CPUs specified\n"); 82 + return; 83 + } 84 + 85 + max_cpus = res.a2; 86 + target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target)); 87 + if (!target) { 88 + pr_warn("Not enough memory for struct target_impl_cpu\n"); 89 + return; 90 + } 91 + 92 + for (i = 0; i < max_cpus; i++) { 93 + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID, 94 + i, &res); 95 + if (res.a0 != SMCCC_RET_SUCCESS) { 96 + pr_warn("Discovering target implementation CPUs failed\n"); 97 + goto mem_free; 98 + } 99 + target[i].midr = res.a1; 100 + target[i].revidr = res.a2; 101 + target[i].aidr = res.a3; 102 + }; 103 + 104 + if (!cpu_errata_set_target_impl(max_cpus, target)) { 105 + pr_warn("Failed to set target implementation CPUs\n"); 106 + goto mem_free; 107 + } 108 + 109 + pr_info("Number of target implementation CPUs is %lld\n", max_cpus); 110 + return; 111 + 112 + mem_free: 113 + memblock_free(target, sizeof(*target) * max_cpus); 114 + }