Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: nVHE: Use separate vector for the host

The host is treated differently from the guests when an exception is
taken so introduce a separate vector that is specialized for the host.
This also allows the nVHE specific code to move out of hyp-entry.S and
into nvhe/host.S.

The host is only expected to make HVC calls and anything else is
considered invalid and results in a panic.

Hyp initialization is now passed the vector that is used for the host
and it is swapped for the guest vector during the context switch.

Signed-off-by: Andrew Scull <ascull@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200915104643.2543892-7-ascull@google.com

authored by

Andrew Scull and committed by
Marc Zyngier
6e3bfbb2 a0e47952

+125 -68
+2
arch/arm64/include/asm/kvm_asm.h
··· 111 111 struct kvm_s2_mmu; 112 112 113 113 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); 114 + DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector); 114 115 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); 115 116 #define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init) 117 + #define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector) 116 118 #define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector) 117 119 118 120 #ifdef CONFIG_KVM_INDIRECT_VECTORS
+1
arch/arm64/kernel/image-vars.h
··· 71 71 /* Global kernel state accessed by nVHE hyp code. */ 72 72 KVM_NVHE_ALIAS(arm64_ssbd_callback_required); 73 73 KVM_NVHE_ALIAS(kvm_host_data); 74 + KVM_NVHE_ALIAS(kvm_hyp_vector); 74 75 KVM_NVHE_ALIAS(kvm_vgic_global_state); 75 76 76 77 /* Kernel constant needed to compute idmap addresses. */
+10 -1
arch/arm64/kvm/arm.c
··· 1277 1277 1278 1278 pgd_ptr = kvm_mmu_get_httbr(); 1279 1279 hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE; 1280 - vector_ptr = __this_cpu_read(kvm_hyp_vector); 1280 + vector_ptr = (unsigned long)kern_hyp_va(kvm_ksym_ref(__kvm_hyp_host_vector)); 1281 1281 1282 1282 /* 1283 1283 * Call initialization code, and switch to the full blown HYP code. ··· 1542 1542 1543 1543 for_each_possible_cpu(cpu) { 1544 1544 struct kvm_host_data *cpu_data; 1545 + unsigned long *vector; 1545 1546 1546 1547 cpu_data = per_cpu_ptr(&kvm_host_data, cpu); 1547 1548 err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); 1548 1549 1549 1550 if (err) { 1550 1551 kvm_err("Cannot map host CPU state: %d\n", err); 1552 + goto out_err; 1553 + } 1554 + 1555 + vector = per_cpu_ptr(&kvm_hyp_vector, cpu); 1556 + err = create_hyp_mappings(vector, vector + 1, PAGE_HYP); 1557 + 1558 + if (err) { 1559 + kvm_err("Cannot map hyp guest vector address\n"); 1551 1560 goto out_err; 1552 1561 } 1553 1562 }
-66
arch/arm64/kvm/hyp/hyp-entry.S
··· 12 12 #include <asm/cpufeature.h> 13 13 #include <asm/kvm_arm.h> 14 14 #include <asm/kvm_asm.h> 15 - #include <asm/kvm_mmu.h> 16 15 #include <asm/mmu.h> 17 16 18 17 .macro save_caller_saved_regs_vect ··· 40 41 41 42 .text 42 43 43 - .macro do_el2_call 44 - /* 45 - * Shuffle the parameters before calling the function 46 - * pointed to in x0. Assumes parameters in x[1,2,3]. 47 - */ 48 - str lr, [sp, #-16]! 49 - mov lr, x0 50 - mov x0, x1 51 - mov x1, x2 52 - mov x2, x3 53 - blr lr 54 - ldr lr, [sp], #16 55 - .endm 56 - 57 44 el1_sync: // Guest trapped into EL2 58 45 59 46 mrs x0, esr_el2 ··· 48 63 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne 49 64 b.ne el1_trap 50 65 51 - #ifdef __KVM_NVHE_HYPERVISOR__ 52 - mrs x1, vttbr_el2 // If vttbr is valid, the guest 53 - cbnz x1, el1_hvc_guest // called HVC 54 - 55 - /* Here, we're pretty sure the host called HVC. */ 56 - ldp x0, x1, [sp], #16 57 - 58 - /* Check for a stub HVC call */ 59 - cmp x0, #HVC_STUB_HCALL_NR 60 - b.hs 1f 61 - 62 - /* 63 - * Compute the idmap address of __kvm_handle_stub_hvc and 64 - * jump there. Since we use kimage_voffset, do not use the 65 - * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead 66 - * (by loading it from the constant pool). 67 - * 68 - * Preserve x0-x4, which may contain stub parameters. 69 - */ 70 - ldr x5, =__kvm_handle_stub_hvc 71 - ldr_l x6, kimage_voffset 72 - 73 - /* x5 = __pa(x5) */ 74 - sub x5, x5, x6 75 - br x5 76 - 77 - 1: 78 - /* 79 - * Perform the EL2 call 80 - */ 81 - kern_hyp_va x0 82 - do_el2_call 83 - 84 - eret 85 - sb 86 - #endif /* __KVM_NVHE_HYPERVISOR__ */ 87 - 88 - el1_hvc_guest: 89 66 /* 90 67 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. 91 68 * The workaround has already been applied on the host, ··· 145 198 eret 146 199 sb 147 200 148 - #ifdef __KVM_NVHE_HYPERVISOR__ 149 - SYM_FUNC_START(__hyp_do_panic) 150 - mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 151 - PSR_MODE_EL1h) 152 - msr spsr_el2, lr 153 - ldr lr, =panic 154 - msr elr_el2, lr 155 - eret 156 - sb 157 - SYM_FUNC_END(__hyp_do_panic) 158 - #endif 159 - 160 201 .macro invalid_vector label, target = hyp_panic 161 202 .align 2 162 203 SYM_CODE_START(\label) ··· 157 222 invalid_vector el2t_irq_invalid 158 223 invalid_vector el2t_fiq_invalid 159 224 invalid_vector el2t_error_invalid 160 - invalid_vector el2h_sync_invalid 161 225 invalid_vector el2h_irq_invalid 162 226 invalid_vector el2h_fiq_invalid 163 227 invalid_vector el1_fiq_invalid
+1 -1
arch/arm64/kvm/hyp/nvhe/Makefile
··· 6 6 asflags-y := -D__KVM_NVHE_HYPERVISOR__ 7 7 ccflags-y := -D__KVM_NVHE_HYPERVISOR__ 8 8 9 - obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o 9 + obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o 10 10 obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ 11 11 ../fpsimd.o ../hyp-entry.o 12 12
+108
arch/arm64/kvm/hyp/nvhe/host.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 - Google Inc 4 + * Author: Andrew Scull <ascull@google.com> 5 + */ 6 + 7 + #include <linux/linkage.h> 8 + 9 + #include <asm/assembler.h> 10 + #include <asm/kvm_asm.h> 11 + #include <asm/kvm_mmu.h> 12 + 13 + .text 14 + 15 + SYM_FUNC_START(__hyp_do_panic) 16 + mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 17 + PSR_MODE_EL1h) 18 + msr spsr_el2, lr 19 + ldr lr, =panic 20 + msr elr_el2, lr 21 + eret 22 + sb 23 + SYM_FUNC_END(__hyp_do_panic) 24 + 25 + .macro host_el1_sync_vect 26 + .align 7 27 + .L__vect_start\@: 28 + esb 29 + stp x0, x1, [sp, #-16]! 30 + mrs x0, esr_el2 31 + lsr x0, x0, #ESR_ELx_EC_SHIFT 32 + cmp x0, #ESR_ELx_EC_HVC64 33 + ldp x0, x1, [sp], #16 34 + b.ne hyp_panic 35 + 36 + /* Check for a stub HVC call */ 37 + cmp x0, #HVC_STUB_HCALL_NR 38 + b.hs 1f 39 + 40 + /* 41 + * Compute the idmap address of __kvm_handle_stub_hvc and 42 + * jump there. Since we use kimage_voffset, do not use the 43 + * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead 44 + * (by loading it from the constant pool). 45 + * 46 + * Preserve x0-x4, which may contain stub parameters. 47 + */ 48 + ldr x5, =__kvm_handle_stub_hvc 49 + ldr_l x6, kimage_voffset 50 + 51 + /* x5 = __pa(x5) */ 52 + sub x5, x5, x6 53 + br x5 54 + 55 + 1: 56 + /* 57 + * Shuffle the parameters before calling the function 58 + * pointed to in x0. Assumes parameters in x[1,2,3]. 59 + */ 60 + kern_hyp_va x0 61 + str lr, [sp, #-16]! 62 + mov lr, x0 63 + mov x0, x1 64 + mov x1, x2 65 + mov x2, x3 66 + blr lr 67 + ldr lr, [sp], #16 68 + 69 + eret 70 + sb 71 + .L__vect_end\@: 72 + .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) 73 + .error "host_el1_sync_vect larger than vector entry" 74 + .endif 75 + .endm 76 + 77 + .macro invalid_host_vect 78 + .align 7 79 + b hyp_panic 80 + .endm 81 + 82 + /* 83 + * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the 84 + * host knows about the EL2 vectors already, and there is no point in hiding 85 + * them. 86 + */ 87 + .align 11 88 + SYM_CODE_START(__kvm_hyp_host_vector) 89 + invalid_host_vect // Synchronous EL2t 90 + invalid_host_vect // IRQ EL2t 91 + invalid_host_vect // FIQ EL2t 92 + invalid_host_vect // Error EL2t 93 + 94 + invalid_host_vect // Synchronous EL2h 95 + invalid_host_vect // IRQ EL2h 96 + invalid_host_vect // FIQ EL2h 97 + invalid_host_vect // Error EL2h 98 + 99 + host_el1_sync_vect // Synchronous 64-bit EL1 100 + invalid_host_vect // IRQ 64-bit EL1 101 + invalid_host_vect // FIQ 64-bit EL1 102 + invalid_host_vect // Error 64-bit EL1 103 + 104 + invalid_host_vect // Synchronous 32-bit EL1 105 + invalid_host_vect // IRQ 32-bit EL1 106 + invalid_host_vect // FIQ 32-bit EL1 107 + invalid_host_vect // Error 32-bit EL1 108 + SYM_CODE_END(__kvm_hyp_host_vector)
+3
arch/arm64/kvm/hyp/nvhe/switch.c
··· 42 42 } 43 43 44 44 write_sysreg(val, cptr_el2); 45 + write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2); 45 46 46 47 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 47 48 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; ··· 61 60 62 61 static void __deactivate_traps(struct kvm_vcpu *vcpu) 63 62 { 63 + extern char __kvm_hyp_host_vector[]; 64 64 u64 mdcr_el2; 65 65 66 66 ___deactivate_traps(vcpu); ··· 93 91 write_sysreg(mdcr_el2, mdcr_el2); 94 92 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2); 95 93 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); 94 + write_sysreg(__kvm_hyp_host_vector, vbar_el2); 96 95 } 97 96 98 97 static void __load_host_stage2(void)