Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.5 351 lines 8.0 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 4 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 5 */ 6 7#ifndef __ARM_KVM_EMULATE_H__ 8#define __ARM_KVM_EMULATE_H__ 9 10#include <linux/kvm_host.h> 11#include <asm/kvm_asm.h> 12#include <asm/kvm_mmio.h> 13#include <asm/kvm_arm.h> 14#include <asm/cputype.h> 15 16/* arm64 compatibility macros */ 17#define PSR_AA32_MODE_ABT ABT_MODE 18#define PSR_AA32_MODE_UND UND_MODE 19#define PSR_AA32_T_BIT PSR_T_BIT 20#define PSR_AA32_I_BIT PSR_I_BIT 21#define PSR_AA32_A_BIT PSR_A_BIT 22#define PSR_AA32_E_BIT PSR_E_BIT 23#define PSR_AA32_IT_MASK PSR_IT_MASK 24 25unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 26 27static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num) 28{ 29 return vcpu_reg(vcpu, reg_num); 30} 31 32unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu); 33 34static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu) 35{ 36 return *__vcpu_spsr(vcpu); 37} 38 39static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) 40{ 41 *__vcpu_spsr(vcpu) = v; 42} 43 44static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, 45 u8 reg_num) 46{ 47 return *vcpu_reg(vcpu, reg_num); 48} 49 50static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 51 unsigned long val) 52{ 53 *vcpu_reg(vcpu, reg_num) = val; 54} 55 56bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); 57void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); 58void kvm_inject_undef32(struct kvm_vcpu *vcpu); 59void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 60void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 61void kvm_inject_vabt(struct kvm_vcpu *vcpu); 62 63static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu) 64{ 65 kvm_inject_undef32(vcpu); 66} 67 68static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) 69{ 70 kvm_inject_dabt32(vcpu, addr); 71} 72 73static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) 74{ 75 kvm_inject_pabt32(vcpu, addr); 76} 77 78static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 79{ 80 return kvm_condition_valid32(vcpu); 81} 82 83static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 84{ 85 kvm_skip_instr32(vcpu, is_wide_instr); 86} 87 88static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) 89{ 90 vcpu->arch.hcr = HCR_GUEST_MASK; 91} 92 93static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu) 94{ 95 return (unsigned long *)&vcpu->arch.hcr; 96} 97 98static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) 99{ 100 vcpu->arch.hcr &= ~HCR_TWE; 101} 102 103static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) 104{ 105 vcpu->arch.hcr |= HCR_TWE; 106} 107 108static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 109{ 110 return true; 111} 112 113static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) 114{ 115 return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; 116} 117 118static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 119{ 120 return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; 121} 122 123static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 124{ 125 *vcpu_cpsr(vcpu) |= PSR_T_BIT; 126} 127 128static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) 129{ 130 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; 131 return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); 132} 133 134static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) 135{ 136 unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; 137 return cpsr_mode > USR_MODE; 138} 139 140static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 141{ 142 return vcpu->arch.fault.hsr; 143} 144 145static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 146{ 147 u32 hsr = kvm_vcpu_get_hsr(vcpu); 148 149 if (hsr & HSR_CV) 150 return (hsr & HSR_COND) >> HSR_COND_SHIFT; 151 152 return -1; 153} 154 155static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) 156{ 157 return vcpu->arch.fault.hxfar; 158} 159 160static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) 161{ 162 return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; 163} 164 165static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) 166{ 167 return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; 168} 169 170static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) 171{ 172 return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC); 173} 174 175static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) 176{ 177 return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; 178} 179 180static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) 181{ 182 return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; 183} 184 185static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) 186{ 187 return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; 188} 189 190static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) 191{ 192 return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; 193} 194 195static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) 196{ 197 return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); 198} 199 200/* Get Access Size from a data abort */ 201static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) 202{ 203 switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { 204 case 0: 205 return 1; 206 case 1: 207 return 2; 208 case 2: 209 return 4; 210 default: 211 kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); 212 return -EFAULT; 213 } 214} 215 216/* This one is not specific to Data Abort */ 217static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) 218{ 219 return kvm_vcpu_get_hsr(vcpu) & HSR_IL; 220} 221 222static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) 223{ 224 return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; 225} 226 227static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) 228{ 229 return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; 230} 231 232static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) 233{ 234 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; 235} 236 237static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) 238{ 239 return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; 240} 241 242static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) 243{ 244 switch (kvm_vcpu_trap_get_fault(vcpu)) { 245 case FSC_SEA: 246 case FSC_SEA_TTW0: 247 case FSC_SEA_TTW1: 248 case FSC_SEA_TTW2: 249 case FSC_SEA_TTW3: 250 case FSC_SECC: 251 case FSC_SECC_TTW0: 252 case FSC_SECC_TTW1: 253 case FSC_SECC_TTW2: 254 case FSC_SECC_TTW3: 255 return true; 256 default: 257 return false; 258 } 259} 260 261static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) 262{ 263 if (kvm_vcpu_trap_is_iabt(vcpu)) 264 return false; 265 266 return kvm_vcpu_dabt_iswrite(vcpu); 267} 268 269static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) 270{ 271 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; 272} 273 274static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 275{ 276 return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; 277} 278 279static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu) 280{ 281 return false; 282} 283 284static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu, 285 bool flag) 286{ 287} 288 289static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) 290{ 291 *vcpu_cpsr(vcpu) |= PSR_E_BIT; 292} 293 294static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) 295{ 296 return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); 297} 298 299static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, 300 unsigned long data, 301 unsigned int len) 302{ 303 if (kvm_vcpu_is_be(vcpu)) { 304 switch (len) { 305 case 1: 306 return data & 0xff; 307 case 2: 308 return be16_to_cpu(data & 0xffff); 309 default: 310 return be32_to_cpu(data); 311 } 312 } else { 313 switch (len) { 314 case 1: 315 return data & 0xff; 316 case 2: 317 return le16_to_cpu(data & 0xffff); 318 default: 319 return le32_to_cpu(data); 320 } 321 } 322} 323 324static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, 325 unsigned long data, 326 unsigned int len) 327{ 328 if (kvm_vcpu_is_be(vcpu)) { 329 switch (len) { 330 case 1: 331 return data & 0xff; 332 case 2: 333 return cpu_to_be16(data & 0xffff); 334 default: 335 return cpu_to_be32(data); 336 } 337 } else { 338 switch (len) { 339 case 1: 340 return data & 0xff; 341 case 2: 342 return cpu_to_le16(data & 0xffff); 343 default: 344 return cpu_to_le32(data); 345 } 346 } 347} 348 349static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} 350 351#endif /* __ARM_KVM_EMULATE_H__ */