Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Unmap 'kvm_arm_hyp_percpu_base' from the host

When pKVM is enabled, the hypervisor at EL2 does not trust the host at
EL1 and must therefore prevent it from having unrestricted access to
internal hypervisor state.

The 'kvm_arm_hyp_percpu_base' array holds the offsets for hypervisor
per-cpu allocations, so move this this into the nVHE code where it
cannot be modified by the untrusted host at EL1.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-22-will@kernel.org

authored by

Quentin Perret and committed by
Marc Zyngier
fe41a7f8 f41dff4e

+8 -10
+2 -2
arch/arm64/include/asm/kvm_asm.h
··· 109 109 #define per_cpu_ptr_nvhe_sym(sym, cpu) \ 110 110 ({ \ 111 111 unsigned long base, off; \ 112 - base = kvm_arm_hyp_percpu_base[cpu]; \ 112 + base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \ 113 113 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \ 114 114 (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \ 115 115 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \ ··· 214 214 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) 215 215 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) 216 216 217 - extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; 217 + extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[]; 218 218 DECLARE_KVM_NVHE_SYM(__per_cpu_start); 219 219 DECLARE_KVM_NVHE_SYM(__per_cpu_end); 220 220
-3
arch/arm64/kernel/image-vars.h
··· 89 89 KVM_NVHE_ALIAS(__start___kvm_ex_table); 90 90 KVM_NVHE_ALIAS(__stop___kvm_ex_table); 91 91 92 - /* Array containing bases of nVHE per-CPU memory regions. */ 93 - KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base); 94 - 95 92 /* PMU available static key */ 96 93 #ifdef CONFIG_HW_PERF_EVENTS 97 94 KVM_NVHE_ALIAS(kvm_arm_pmu_available);
+4 -5
arch/arm64/kvm/arm.c
··· 51 51 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); 52 52 53 53 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 54 - unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; 55 54 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); 56 55 57 56 static bool vgic_present; ··· 1856 1857 free_hyp_pgds(); 1857 1858 for_each_possible_cpu(cpu) { 1858 1859 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1859 - free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); 1860 + free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); 1860 1861 } 1861 1862 } 1862 1863 1863 1864 static int do_pkvm_init(u32 hyp_va_bits) 1864 1865 { 1865 - void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base); 1866 + void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)); 1866 1867 int ret; 1867 1868 1868 1869 preempt_disable(); ··· 1966 1967 1967 1968 page_addr = page_address(page); 1968 1969 memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); 1969 - kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr; 1970 + kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; 1970 1971 } 1971 1972 1972 1973 /* ··· 2059 2060 } 2060 2061 2061 2062 for_each_possible_cpu(cpu) { 2062 - char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; 2063 + char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; 2063 2064 char *percpu_end = percpu_begin + nvhe_percpu_size(); 2064 2065 2065 2066 /* Map Hyp percpu pages */
+2
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
··· 23 23 return hyp_cpu_logical_map[cpu]; 24 24 } 25 25 26 + unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS]; 27 + 26 28 unsigned long __hyp_per_cpu_offset(unsigned int cpu) 27 29 { 28 30 unsigned long *cpu_base_array;