Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: KVM: Simplify HYP init/teardown

Now that we only have the "merged page tables" case to deal with,
there is a bunch of things we can simplify in the HYP code (both
at init and teardown time).

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Marc Zyngier and committed by
Christoffer Dall
3421e9d8 0535a3e2

+26 -92
+3 -9
arch/arm64/include/asm/kvm_host.h
··· 48 48 int __attribute_const__ kvm_target_cpu(void); 49 49 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); 50 50 int kvm_arch_dev_ioctl_check_extension(long ext); 51 - unsigned long kvm_hyp_reset_entry(void); 52 51 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); 53 52 54 53 struct kvm_arch { ··· 356 357 * Call initialization code, and switch to the full blown 357 358 * HYP code. 358 359 */ 359 - __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, 360 - hyp_stack_ptr, vector_ptr); 360 + __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr); 361 361 } 362 362 363 + void __kvm_hyp_teardown(void); 363 364 static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr, 364 365 phys_addr_t phys_idmap_start) 365 366 { 366 - /* 367 - * Call reset code, and switch back to stub hyp vectors. 368 - * Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation. 369 - */ 370 - __kvm_call_hyp((void *)kvm_hyp_reset_entry(), 371 - boot_pgd_ptr, phys_idmap_start); 367 + kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start); 372 368 } 373 369 374 370 static inline void kvm_arch_hardware_unsetup(void) {}
+8 -53
arch/arm64/kvm/hyp-init.S
··· 53 53 b . 54 54 55 55 /* 56 - * x0: HYP boot pgd 57 - * x1: HYP pgd 58 - * x2: HYP stack 59 - * x3: HYP vectors 56 + * x0: HYP pgd 57 + * x1: HYP stack 58 + * x2: HYP vectors 60 59 */ 61 60 __do_hyp_init: 62 61 ··· 109 110 msr sctlr_el2, x4 110 111 isb 111 112 112 - /* Skip the trampoline dance if we merged the boot and runtime PGDs */ 113 - cmp x0, x1 114 - b.eq merged 115 - 116 - /* MMU is now enabled. Get ready for the trampoline dance */ 117 - ldr x4, =TRAMPOLINE_VA 118 - adr x5, target 119 - bfi x4, x5, #0, #PAGE_SHIFT 120 - br x4 121 - 122 - target: /* We're now in the trampoline code, switch page tables */ 123 - msr ttbr0_el2, x1 124 - isb 125 - 126 - /* Invalidate the old TLBs */ 127 - tlbi alle2 128 - dsb sy 129 - 130 - merged: 131 113 /* Set the stack and new vectors */ 114 + kern_hyp_va x1 115 + mov sp, x1 132 116 kern_hyp_va x2 133 - mov sp, x2 134 - kern_hyp_va x3 135 - msr vbar_el2, x3 117 + msr vbar_el2, x2 136 118 137 119 /* Hello, World! */ 138 120 eret 139 121 ENDPROC(__kvm_hyp_init) 140 122 141 123 /* 142 - * Reset kvm back to the hyp stub. This is the trampoline dance in 143 - * reverse. If kvm used an extended idmap, __extended_idmap_trampoline 144 - * calls this code directly in the idmap. In this case switching to the 145 - * boot tables is a no-op. 146 - * 147 - * x0: HYP boot pgd 148 - * x1: HYP phys_idmap_start 124 + * Reset kvm back to the hyp stub. 149 125 */ 150 126 ENTRY(__kvm_hyp_reset) 151 - /* We're in trampoline code in VA, switch back to boot page tables */ 152 - msr ttbr0_el2, x0 153 - isb 154 - 155 - /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */ 156 - ic iallu 157 - tlbi alle2 158 - dsb sy 159 - isb 160 - 161 - /* Branch into PA space */ 162 - adr x0, 1f 163 - bfi x1, x0, #0, #PAGE_SHIFT 164 - br x1 165 - 166 127 /* We're now in idmap, disable MMU */ 167 - 1: mrs x0, sctlr_el2 128 + mrs x0, sctlr_el2 168 129 ldr x1, =SCTLR_ELx_FLAGS 169 130 bic x0, x0, x1 // Clear SCTL_M and etc 170 131 msr sctlr_el2, x0 171 132 isb 172 - 173 - /* Invalidate the old TLBs */ 174 - tlbi alle2 175 - dsb sy 176 133 177 134 /* Install stub vectors */ 178 135 adr_l x0, __hyp_stub_vectors
-19
arch/arm64/kvm/hyp/entry.S
··· 164 164 165 165 eret 166 166 ENDPROC(__fpsimd_guest_restore) 167 - 168 - /* 169 - * When using the extended idmap, we don't have a trampoline page we can use 170 - * while we switch pages tables during __kvm_hyp_reset. Accessing the idmap 171 - * directly would be ideal, but if we're using the extended idmap then the 172 - * idmap is located above HYP_PAGE_OFFSET, and the address will be masked by 173 - * kvm_call_hyp using kern_hyp_va. 174 - * 175 - * x0: HYP boot pgd 176 - * x1: HYP phys_idmap_start 177 - */ 178 - ENTRY(__extended_idmap_trampoline) 179 - mov x4, x1 180 - adr_l x3, __kvm_hyp_reset 181 - 182 - /* insert __kvm_hyp_reset()s offset into phys_idmap_start */ 183 - bfi x4, x3, #0, #PAGE_SHIFT 184 - br x4 185 - ENDPROC(__extended_idmap_trampoline)
+15
arch/arm64/kvm/hyp/hyp-entry.S
··· 62 62 isb 63 63 ret 64 64 ENDPROC(__vhe_hyp_call) 65 + 66 + /* 67 + * Compute the idmap address of __kvm_hyp_reset based on the idmap 68 + * start passed as a parameter, and jump there. 69 + * 70 + * x0: HYP phys_idmap_start 71 + */ 72 + ENTRY(__kvm_hyp_teardown) 73 + mov x4, x0 74 + adr_l x3, __kvm_hyp_reset 75 + 76 + /* insert __kvm_hyp_reset()s offset into phys_idmap_start */ 77 + bfi x4, x3, #0, #PAGE_SHIFT 78 + br x4 79 + ENDPROC(__kvm_hyp_teardown) 65 80 66 81 el1_sync: // Guest trapped into EL2 67 82 save_x0_to_x3
-11
arch/arm64/kvm/reset.c
··· 132 132 /* Reset timer */ 133 133 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 134 134 } 135 - 136 - unsigned long kvm_hyp_reset_entry(void) 137 - { 138 - /* 139 - * KVM is running with merged page tables, which don't have the 140 - * trampoline page mapped. We know the idmap is still mapped, 141 - * but can't be called into directly. Use 142 - * __extended_idmap_trampoline to do the call. 143 - */ 144 - return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline); 145 - }