at v6.15 248 lines 6.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __ARM64_KVM_NESTED_H 3#define __ARM64_KVM_NESTED_H 4 5#include <linux/bitfield.h> 6#include <linux/kvm_host.h> 7#include <asm/kvm_emulate.h> 8#include <asm/kvm_pgtable.h> 9 10static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) 11{ 12 return (!__is_defined(__KVM_NVHE_HYPERVISOR__) && 13 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && 14 vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2)); 15} 16 17/* Translation helpers from non-VHE EL2 to EL1 */ 18static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2) 19{ 20 return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT; 21} 22 23static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr) 24{ 25 return TCR_EPD1_MASK | /* disable TTBR1_EL1 */ 26 ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) | 27 tcr_el2_ps_to_tcr_el1_ips(tcr) | 28 (tcr & TCR_EL2_TG0_MASK) | 29 (tcr & TCR_EL2_ORGN0_MASK) | 30 (tcr & TCR_EL2_IRGN0_MASK) | 31 (tcr & TCR_EL2_T0SZ_MASK); 32} 33 34static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2) 35{ 36 u64 cpacr_el1 = CPACR_EL1_RES1; 37 38 if (cptr_el2 & CPTR_EL2_TTA) 39 cpacr_el1 |= CPACR_EL1_TTA; 40 if (!(cptr_el2 & CPTR_EL2_TFP)) 41 cpacr_el1 |= CPACR_EL1_FPEN; 42 if (!(cptr_el2 & CPTR_EL2_TZ)) 43 cpacr_el1 |= CPACR_EL1_ZEN; 44 45 cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM); 46 47 return cpacr_el1; 48} 49 50static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val) 51{ 52 /* Only preserve the minimal set of bits we support */ 53 val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA | 54 SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE); 55 val |= SCTLR_EL1_RES1; 56 57 return val; 58} 59 60static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) 61{ 62 /* Clear the ASID field */ 63 return ttbr0 & ~GENMASK_ULL(63, 48); 64} 65 66extern bool forward_smc_trap(struct kvm_vcpu *vcpu); 67extern bool forward_debug_exception(struct kvm_vcpu *vcpu); 68extern void kvm_init_nested(struct kvm *kvm); 69extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu); 70extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu); 71extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu); 72 73union tlbi_info; 74 75extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid, 76 const union tlbi_info *info, 77 void (*)(struct kvm_s2_mmu *, 78 const union tlbi_info *)); 79extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu); 80extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu); 81 82extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu); 83 84struct kvm_s2_trans { 85 phys_addr_t output; 86 unsigned long block_size; 87 bool writable; 88 bool readable; 89 int level; 90 u32 esr; 91 u64 desc; 92}; 93 94static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans) 95{ 96 return trans->output; 97} 98 99static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans) 100{ 101 return trans->block_size; 102} 103 104static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans) 105{ 106 return trans->esr; 107} 108 109static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans) 110{ 111 return trans->readable; 112} 113 114static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans) 115{ 116 return trans->writable; 117} 118 119static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans) 120{ 121 return !(trans->desc & BIT(54)); 122} 123 124extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa, 125 struct kvm_s2_trans *result); 126extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, 127 struct kvm_s2_trans *trans); 128extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2); 129extern void kvm_nested_s2_wp(struct kvm *kvm); 130extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block); 131extern void kvm_nested_s2_flush(struct kvm *kvm); 132 133unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val); 134 135static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr) 136{ 137 struct kvm *kvm = vpcu->kvm; 138 u8 CRm = sys_reg_CRm(instr); 139 140 if (!(sys_reg_Op0(instr) == TLBI_Op0 && 141 sys_reg_Op1(instr) == TLBI_Op1_EL1)) 142 return false; 143 144 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS || 145 (sys_reg_CRn(instr) == TLBI_CRn_nXS && 146 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)))) 147 return false; 148 149 if (CRm == TLBI_CRm_nROS && 150 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 151 return false; 152 153 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS || 154 CRm == TLBI_CRm_RNS) && 155 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 156 return false; 157 158 return true; 159} 160 161static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr) 162{ 163 struct kvm *kvm = vpcu->kvm; 164 u8 CRm = sys_reg_CRm(instr); 165 166 if (!(sys_reg_Op0(instr) == TLBI_Op0 && 167 sys_reg_Op1(instr) == TLBI_Op1_EL2)) 168 return false; 169 170 if (!(sys_reg_CRn(instr) == TLBI_CRn_XS || 171 (sys_reg_CRn(instr) == TLBI_CRn_nXS && 172 kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)))) 173 return false; 174 175 if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS) 176 return false; 177 178 if (CRm == TLBI_CRm_nROS && 179 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS)) 180 return false; 181 182 if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS || 183 CRm == TLBI_CRm_RNS) && 184 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE)) 185 return false; 186 187 return true; 188} 189 190int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu); 191u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val); 192 193#ifdef CONFIG_ARM64_PTR_AUTH 194bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr); 195#else 196static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr) 197{ 198 /* We really should never execute this... */ 199 WARN_ON_ONCE(1); 200 *elr = 0xbad9acc0debadbad; 201 return false; 202} 203#endif 204 205#define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0) 206 207static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans) 208{ 209 return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level); 210} 211 212/* Adjust alignment for the contiguous bit as per StageOA() */ 213#define contiguous_bit_shift(d, wi, l) \ 214 ({ \ 215 u8 shift = 0; \ 216 \ 217 if ((d) & PTE_CONT) { \ 218 switch (BIT((wi)->pgshift)) { \ 219 case SZ_4K: \ 220 shift = 4; \ 221 break; \ 222 case SZ_16K: \ 223 shift = (l) == 2 ? 5 : 7; \ 224 break; \ 225 case SZ_64K: \ 226 shift = 5; \ 227 break; \ 228 } \ 229 } \ 230 \ 231 shift; \ 232 }) 233 234static inline unsigned int ps_to_output_size(unsigned int ps) 235{ 236 switch (ps) { 237 case 0: return 32; 238 case 1: return 36; 239 case 2: return 40; 240 case 3: return 42; 241 case 4: return 44; 242 case 5: 243 default: 244 return 48; 245 } 246} 247 248#endif /* __ARM64_KVM_NESTED_H */