Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Save/restore MTE registers

Define the new system registers that MTE introduces and context switch
them. The MTE feature is still hidden from the ID register as it isn't
supported in a VM yet.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210621111716.37157-4-steven.price@arm.com

authored by

Steven Price and committed by
Marc Zyngier
e1f358b5 ea7fc1bb

+124 -6
+2 -1
arch/arm64/include/asm/kvm_arm.h
··· 12 12 #include <asm/types.h> 13 13 14 14 /* Hyp Configuration Register (HCR) bits */ 15 - #define HCR_ATA (UL(1) << 56) 15 + #define HCR_ATA_SHIFT 56 16 + #define HCR_ATA (UL(1) << HCR_ATA_SHIFT) 16 17 #define HCR_FWB (UL(1) << 46) 17 18 #define HCR_API (UL(1) << 41) 18 19 #define HCR_APK (UL(1) << 40)
+6
arch/arm64/include/asm/kvm_host.h
··· 209 209 CNTP_CVAL_EL0, 210 210 CNTP_CTL_EL0, 211 211 212 + /* Memory Tagging Extension registers */ 213 + RGSR_EL1, /* Random Allocation Tag Seed Register */ 214 + GCR_EL1, /* Tag Control Register */ 215 + TFSR_EL1, /* Tag Fault Status Register (EL1) */ 216 + TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ 217 + 212 218 /* 32bit specific registers. Keep them at the end of the range */ 213 219 DACR32_EL2, /* Domain Access Control Register */ 214 220 IFSR32_EL2, /* Instruction Fault Status Register */
+66
arch/arm64/include/asm/kvm_mte.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2020-2021 ARM Ltd. 4 + */ 5 + #ifndef __ASM_KVM_MTE_H 6 + #define __ASM_KVM_MTE_H 7 + 8 + #ifdef __ASSEMBLY__ 9 + 10 + #include <asm/sysreg.h> 11 + 12 + #ifdef CONFIG_ARM64_MTE 13 + 14 + .macro mte_switch_to_guest g_ctxt, h_ctxt, reg1 15 + alternative_if_not ARM64_MTE 16 + b .L__skip_switch\@ 17 + alternative_else_nop_endif 18 + mrs \reg1, hcr_el2 19 + tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@ 20 + 21 + mrs_s \reg1, SYS_RGSR_EL1 22 + str \reg1, [\h_ctxt, #CPU_RGSR_EL1] 23 + mrs_s \reg1, SYS_GCR_EL1 24 + str \reg1, [\h_ctxt, #CPU_GCR_EL1] 25 + 26 + ldr \reg1, [\g_ctxt, #CPU_RGSR_EL1] 27 + msr_s SYS_RGSR_EL1, \reg1 28 + ldr \reg1, [\g_ctxt, #CPU_GCR_EL1] 29 + msr_s SYS_GCR_EL1, \reg1 30 + 31 + .L__skip_switch\@: 32 + .endm 33 + 34 + .macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1 35 + alternative_if_not ARM64_MTE 36 + b .L__skip_switch\@ 37 + alternative_else_nop_endif 38 + mrs \reg1, hcr_el2 39 + tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@ 40 + 41 + mrs_s \reg1, SYS_RGSR_EL1 42 + str \reg1, [\g_ctxt, #CPU_RGSR_EL1] 43 + mrs_s \reg1, SYS_GCR_EL1 44 + str \reg1, [\g_ctxt, #CPU_GCR_EL1] 45 + 46 + ldr \reg1, [\h_ctxt, #CPU_RGSR_EL1] 47 + msr_s SYS_RGSR_EL1, \reg1 48 + ldr \reg1, [\h_ctxt, #CPU_GCR_EL1] 49 + msr_s SYS_GCR_EL1, \reg1 50 + 51 + isb 52 + 53 + .L__skip_switch\@: 54 + .endm 55 + 56 + #else /* !CONFIG_ARM64_MTE */ 57 + 58 + .macro mte_switch_to_guest g_ctxt, h_ctxt, reg1 59 + .endm 60 + 61 + .macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1 62 + .endm 63 + 64 + #endif /* CONFIG_ARM64_MTE */ 65 + #endif /* __ASSEMBLY__ */ 66 + #endif /* __ASM_KVM_MTE_H */
+2 -1
arch/arm64/include/asm/sysreg.h
··· 651 651 652 652 #define INIT_SCTLR_EL2_MMU_ON \ 653 653 (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \ 654 - SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | SCTLR_EL2_RES1) 654 + SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \ 655 + SCTLR_ELx_ITFSB | SCTLR_EL2_RES1) 655 656 656 657 #define INIT_SCTLR_EL2_MMU_OFF \ 657 658 (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+2
arch/arm64/kernel/asm-offsets.c
··· 111 111 DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); 112 112 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); 113 113 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); 114 + DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); 115 + DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); 114 116 DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); 115 117 DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); 116 118 DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
+7
arch/arm64/kvm/hyp/entry.S
··· 13 13 #include <asm/kvm_arm.h> 14 14 #include <asm/kvm_asm.h> 15 15 #include <asm/kvm_mmu.h> 16 + #include <asm/kvm_mte.h> 16 17 #include <asm/kvm_ptrauth.h> 17 18 18 19 .text ··· 51 50 set_loaded_vcpu x0, x1, x2 52 51 53 52 add x29, x0, #VCPU_CONTEXT 53 + 54 + // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1) 55 + mte_switch_to_guest x29, x1, x2 54 56 55 57 // Macro ptrauth_switch_to_guest format: 56 58 // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) ··· 145 141 // as it may cause Pointer Authentication key signing mismatch errors 146 142 // when this feature is enabled for kernel code. 147 143 ptrauth_switch_to_hyp x1, x2, x3, x4, x5 144 + 145 + // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1) 146 + mte_switch_to_hyp x1, x2, x3 148 147 149 148 // Restore hyp's sp_el0 150 149 restore_sp_el0 x2, x3
+21
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
··· 14 14 #include <asm/kvm_asm.h> 15 15 #include <asm/kvm_emulate.h> 16 16 #include <asm/kvm_hyp.h> 17 + #include <asm/kvm_mmu.h> 17 18 18 19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) 19 20 { ··· 25 24 { 26 25 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); 27 26 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); 27 + } 28 + 29 + static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) 30 + { 31 + struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; 32 + 33 + if (!vcpu) 34 + vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); 35 + 36 + return kvm_has_mte(kern_hyp_va(vcpu->kvm)); 28 37 } 29 38 30 39 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) ··· 56 45 ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL); 57 46 ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par(); 58 47 ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1); 48 + 49 + if (ctxt_has_mte(ctxt)) { 50 + ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR); 51 + ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1); 52 + } 59 53 60 54 ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1); 61 55 ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR); ··· 122 106 write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL); 123 107 write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1); 124 108 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1); 109 + 110 + if (ctxt_has_mte(ctxt)) { 111 + write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR); 112 + write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1); 113 + } 125 114 126 115 if (!has_vhe() && 127 116 cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
+18 -4
arch/arm64/kvm/sys_regs.c
··· 1309 1309 return true; 1310 1310 } 1311 1311 1312 + static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, 1313 + const struct sys_reg_desc *rd) 1314 + { 1315 + return REG_HIDDEN; 1316 + } 1317 + 1318 + #define MTE_REG(name) { \ 1319 + SYS_DESC(SYS_##name), \ 1320 + .access = undef_access, \ 1321 + .reset = reset_unknown, \ 1322 + .reg = name, \ 1323 + .visibility = mte_visibility, \ 1324 + } 1325 + 1312 1326 /* sys_reg_desc initialiser for known cpufeature ID registers */ 1313 1327 #define ID_SANITISED(name) { \ 1314 1328 SYS_DESC(SYS_##name), \ ··· 1491 1477 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, 1492 1478 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, 1493 1479 1494 - { SYS_DESC(SYS_RGSR_EL1), undef_access }, 1495 - { SYS_DESC(SYS_GCR_EL1), undef_access }, 1480 + MTE_REG(RGSR_EL1), 1481 + MTE_REG(GCR_EL1), 1496 1482 1497 1483 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, 1498 1484 { SYS_DESC(SYS_TRFCR_EL1), undef_access }, ··· 1519 1505 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, 1520 1506 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, 1521 1507 1522 - { SYS_DESC(SYS_TFSR_EL1), undef_access }, 1523 - { SYS_DESC(SYS_TFSRE0_EL1), undef_access }, 1508 + MTE_REG(TFSR_EL1), 1509 + MTE_REG(TFSRE0_EL1), 1524 1510 1525 1511 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, 1526 1512 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },