Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: KVM: Refactor kern_hyp_va to deal with multiple offsets

As we move towards a selectable HYP VA range, it is obvious that
we don't want to test a variable to find out if we need to use
the bottom VA range, the top VA range, or use the address as is
(for VHE).

Instead, we can expand our current helper to generate the right
mask or nop with code patching. We default to using the top VA
space, with alternatives to switch to the bottom one or to nop
out the instructions.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Marc Zyngier and committed by
Christoffer Dall
fd81e6bf d53d9bc6

+39 -14
-11
arch/arm64/include/asm/kvm_hyp.h
··· 25 25 26 26 #define __hyp_text __section(.hyp.text) notrace 27 27 28 - static inline unsigned long __kern_hyp_va(unsigned long v) 29 - { 30 - asm volatile(ALTERNATIVE("and %0, %0, %1", 31 - "nop", 32 - ARM64_HAS_VIRT_HOST_EXTN) 33 - : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); 34 - return v; 35 - } 36 - 37 - #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) 38 - 39 28 #define read_sysreg_elx(r,nvh,vh) \ 40 29 ({ \ 41 30 u64 reg; \
+39 -3
arch/arm64/include/asm/kvm_mmu.h
··· 90 90 /* 91 91 * Convert a kernel VA into a HYP VA. 92 92 * reg: VA to be converted. 93 + * 94 + * This generates the following sequences: 95 + * - High mask: 96 + * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK 97 + * nop 98 + * - Low mask: 99 + * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK 100 + * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK 101 + * - VHE: 102 + * nop 103 + * nop 104 + * 105 + * The "low mask" version works because the mask is a strict subset of 106 + * the "high mask", hence performing the first mask for nothing. 107 + * Should be completely invisible on any viable CPU. 93 108 */ 94 109 .macro kern_hyp_va reg 95 - alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 96 - and \reg, \reg, #HYP_PAGE_OFFSET_MASK 110 + alternative_if_not ARM64_HAS_VIRT_HOST_EXTN 111 + and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK 97 112 alternative_else 98 113 nop 114 + alternative_endif 115 + alternative_if_not ARM64_HYP_OFFSET_LOW 116 + nop 117 + alternative_else 118 + and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK 99 119 alternative_endif 100 120 .endm 101 121 ··· 127 107 #include <asm/mmu_context.h> 128 108 #include <asm/pgtable.h> 129 109 130 - #define KERN_TO_HYP(kva) ((unsigned long)kva & HYP_PAGE_OFFSET_MASK) 110 + static inline unsigned long __kern_hyp_va(unsigned long v) 111 + { 112 + asm volatile(ALTERNATIVE("and %0, %0, %1", 113 + "nop", 114 + ARM64_HAS_VIRT_HOST_EXTN) 115 + : "+r" (v) 116 + : "i" (HYP_PAGE_OFFSET_HIGH_MASK)); 117 + asm volatile(ALTERNATIVE("nop", 118 + "and %0, %0, %1", 119 + ARM64_HYP_OFFSET_LOW) 120 + : "+r" (v) 121 + : "i" (HYP_PAGE_OFFSET_LOW_MASK)); 122 + return v; 123 + } 124 + 125 + #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) 126 + #define KERN_TO_HYP(v) kern_hyp_va(v) 131 127 132 128 /* 133 129 * We currently only support a 40bit IPA.