Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Rename the VHE switch to "finalise_el2"

as we are about to perform a lot more in 'mutate_to_vhe' than
we currently do, this function really becomes the point where
we finalise the basic EL2 configuration.

Reflect this into the code by renaming a bunch of things:
- HVC_VHE_RESTART -> HVC_FINALISE_EL2
- switch_to_vhe --> finalise_el2
- mutate_to_vhe -> __finalise_el2

No functional changes.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220630160500.1536744-2-maz@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Marc Zyngier and committed by
Will Deacon
7ddb0c3d 0aaa6853

+22 -22
+6 -5
Documentation/virt/kvm/arm/hyp-abi.rst
··· 60 60 61 61 * :: 62 62 63 - x0 = HVC_VHE_RESTART (arm64 only) 63 + x0 = HVC_FINALISE_EL2 (arm64 only) 64 64 65 - Attempt to upgrade the kernel's exception level from EL1 to EL2 by enabling 66 - the VHE mode. This is conditioned by the CPU supporting VHE, the EL2 MMU 67 - being off, and VHE not being disabled by any other means (command line 68 - option, for example). 65 + Finish configuring EL2 depending on the command-line options, 66 + including an attempt to upgrade the kernel's exception level from 67 + EL1 to EL2 by enabling the VHE mode. This is conditioned by the CPU 68 + supporting VHE, the EL2 MMU being off, and VHE not being disabled by 69 + any other means (command line option, for example). 69 70 70 71 Any other value of r0/x0 triggers a hypervisor-specific handling, 71 72 which is not documented here.
+2 -2
arch/arm64/include/asm/virt.h
··· 36 36 #define HVC_RESET_VECTORS 2 37 37 38 38 /* 39 - * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible 39 + * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible 40 40 */ 41 - #define HVC_VHE_RESTART 3 41 + #define HVC_FINALISE_EL2 3 42 42 43 43 /* Max number of HYP stub hypercalls */ 44 44 #define HVC_STUB_HCALL_NR 4
+3 -3
arch/arm64/kernel/head.S
··· 459 459 mov x0, x22 // pass FDT address in x0 460 460 bl init_feature_override // Parse cpu feature overrides 461 461 mov x0, x20 462 - bl switch_to_vhe // Prefer VHE if possible 462 + bl finalise_el2 // Prefer VHE if possible 463 463 ldp x29, x30, [sp], #16 464 464 bl start_kernel 465 465 ASM_BUG() ··· 542 542 eret 543 543 544 544 __cpu_stick_to_vhe: 545 - mov x0, #HVC_VHE_RESTART 545 + mov x0, #HVC_FINALISE_EL2 546 546 hvc #0 547 547 mov x0, #BOOT_CPU_MODE_EL2 548 548 ret ··· 592 592 * Common entry point for secondary CPUs. 593 593 */ 594 594 mov x20, x0 // preserve boot mode 595 - bl switch_to_vhe 595 + bl finalise_el2 596 596 bl __cpu_secondary_check52bitva 597 597 #if VA_BITS > 48 598 598 ldr_l x0, vabits_actual
+10 -11
arch/arm64/kernel/hyp-stub.S
··· 51 51 msr vbar_el2, x1 52 52 b 9f 53 53 54 - 1: cmp x0, #HVC_VHE_RESTART 55 - b.eq mutate_to_vhe 54 + 1: cmp x0, #HVC_FINALISE_EL2 55 + b.eq __finalise_el2 56 56 57 57 2: cmp x0, #HVC_SOFT_RESTART 58 58 b.ne 3f ··· 73 73 eret 74 74 SYM_CODE_END(elx_sync) 75 75 76 - // nVHE? No way! Give me the real thing! 77 - SYM_CODE_START_LOCAL(mutate_to_vhe) 76 + SYM_CODE_START_LOCAL(__finalise_el2) 77 + // nVHE? No way! Give me the real thing! 78 78 // Sanity check: MMU *must* be off 79 79 mrs x1, sctlr_el2 80 80 tbnz x1, #0, 1f ··· 140 140 msr spsr_el1, x0 141 141 142 142 b enter_vhe 143 - SYM_CODE_END(mutate_to_vhe) 143 + SYM_CODE_END(__finalise_el2) 144 144 145 145 // At the point where we reach enter_vhe(), we run with 146 - // the MMU off (which is enforced by mutate_to_vhe()). 146 + // the MMU off (which is enforced by __finalise_el2()). 147 147 // We thus need to be in the idmap, or everything will 148 148 // explode when enabling the MMU. 149 149 ··· 222 222 SYM_FUNC_END(__hyp_reset_vectors) 223 223 224 224 /* 225 - * Entry point to switch to VHE if deemed capable 225 + * Entry point to finalise EL2 and switch to VHE if deemed capable 226 226 * 227 227 * w0: boot mode, as returned by init_kernel_el() 228 228 */ 229 - SYM_FUNC_START(switch_to_vhe) 229 + SYM_FUNC_START(finalise_el2) 230 230 // Need to have booted at EL2 231 231 cmp w0, #BOOT_CPU_MODE_EL2 232 232 b.ne 1f ··· 236 236 cmp x0, #CurrentEL_EL1 237 237 b.ne 1f 238 238 239 - // Turn the world upside down 240 - mov x0, #HVC_VHE_RESTART 239 + mov x0, #HVC_FINALISE_EL2 241 240 hvc #0 242 241 1: 243 242 ret 244 - SYM_FUNC_END(switch_to_vhe) 243 + SYM_FUNC_END(finalise_el2)
+1 -1
arch/arm64/kernel/sleep.S
··· 100 100 .pushsection ".idmap.text", "awx" 101 101 SYM_CODE_START(cpu_resume) 102 102 bl init_kernel_el 103 - bl switch_to_vhe 103 + bl finalise_el2 104 104 bl __cpu_setup 105 105 /* enable the MMU early - so we can access sleep_save_stash by va */ 106 106 adrp x1, swapper_pg_dir