Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: ARM: Emulation framework and CP15 emulation

Adds a new important function in the main KVM/ARM code called
handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns
from guest execution. This function examines the Hyp-Syndrome-Register
(HSR), which contains information telling KVM what caused the exit from
the guest.

Some of the reasons for an exit are CP15 accesses, which are
not allowed from the guest and this commit handles these exits by
emulating the intended operation in software and skipping the guest
instruction.

Minor notes about the coproc register reset:
1) We reserve a value of 0 as an invalid cp15 offset, to catch bugs in our
table, at cost of 4 bytes per vcpu.

2) Added comments on the table indicating how we handle each register, for
simplicity of understanding.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>

+1160 -4
+11
arch/arm/include/asm/kvm_arm.h
··· 70 70 HCR_SWIO | HCR_TIDCP) 71 71 #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) 72 72 73 + /* System Control Register (SCTLR) bits */ 74 + #define SCTLR_TE (1 << 30) 75 + #define SCTLR_EE (1 << 25) 76 + #define SCTLR_V (1 << 13) 77 + 73 78 /* Hyp System Control Register (HSCTLR) bits */ 74 79 #define HSCTLR_TE (1 << 30) 75 80 #define HSCTLR_EE (1 << 25) ··· 176 171 #define HSR_FSC (0x3f) 177 172 #define HSR_FSC_TYPE (0x3c) 178 173 #define HSR_WNR (1 << 6) 174 + #define HSR_CV_SHIFT (24) 175 + #define HSR_CV (1U << HSR_CV_SHIFT) 176 + #define HSR_COND_SHIFT (20) 177 + #define HSR_COND (0xfU << HSR_COND_SHIFT) 179 178 180 179 #define FSC_FAULT (0x04) 181 180 #define FSC_PERM (0x0c) ··· 205 196 #define HSR_EC_IABT_HYP (0x21) 206 197 #define HSR_EC_DABT (0x24) 207 198 #define HSR_EC_DABT_HYP (0x25) 199 + 200 + #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) 208 201 209 202 #endif /* __ARM_KVM_ARM_H__ */
+14
arch/arm/include/asm/kvm_coproc.h
··· 21 21 22 22 void kvm_reset_coprocs(struct kvm_vcpu *vcpu); 23 23 24 + struct kvm_coproc_target_table { 25 + unsigned target; 26 + const struct coproc_reg *table; 27 + size_t num; 28 + }; 29 + void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); 30 + 31 + int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); 32 + int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 33 + int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 34 + int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 35 + int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 36 + int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 37 + void kvm_coproc_table_init(void); 24 38 #endif /* __ARM_KVM_COPROC_H__ */
+6
arch/arm/include/asm/kvm_emulate.h
··· 25 25 u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 26 26 u32 *vcpu_spsr(struct kvm_vcpu *vcpu); 27 27 28 + int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); 29 + void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 30 + void kvm_inject_undefined(struct kvm_vcpu *vcpu); 31 + void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 32 + void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 33 + 28 34 static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) 29 35 { 30 36 return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+4
arch/arm/include/asm/kvm_host.h
··· 94 94 * Anything that is not used directly from assembly code goes 95 95 * here. 96 96 */ 97 + /* dcache set/way operation pending */ 98 + int last_pcpu; 99 + cpumask_t require_dcache_flush; 100 + 97 101 /* Interrupt related fields */ 98 102 u32 irq_lines; /* IRQ and FIQ levels */ 99 103
+1 -1
arch/arm/kvm/Makefile
··· 18 18 19 19 obj-y += kvm-arm.o init.o interrupts.o 20 20 obj-y += arm.o guest.o mmu.o emulate.o reset.o 21 - obj-y += coproc.o 21 + obj-y += coproc.o coproc_a15.o
+166 -3
arch/arm/kvm/arm.c
··· 36 36 #include <asm/mman.h> 37 37 #include <asm/cputype.h> 38 38 #include <asm/tlbflush.h> 39 + #include <asm/cacheflush.h> 39 40 #include <asm/virt.h> 40 41 #include <asm/kvm_arm.h> 41 42 #include <asm/kvm_asm.h> 42 43 #include <asm/kvm_mmu.h> 43 44 #include <asm/kvm_emulate.h> 45 + #include <asm/kvm_coproc.h> 46 + #include <asm/opcodes.h> 44 47 45 48 #ifdef REQUIRES_VIRT 46 49 __asm__(".arch_extension virt"); ··· 297 294 { 298 295 vcpu->cpu = cpu; 299 296 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); 297 + 298 + /* 299 + * Check whether this vcpu requires the cache to be flushed on 300 + * this physical CPU. This is a consequence of doing dcache 301 + * operations by set/way on this vcpu. We do it here to be in 302 + * a non-preemptible section. 303 + */ 304 + if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) 305 + flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ 300 306 } 301 307 302 308 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ··· 331 319 return -EINVAL; 332 320 } 333 321 322 + /** 323 + * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled 324 + * @v: The VCPU pointer 325 + * 326 + * If the guest CPU is not waiting for interrupts or an interrupt line is 327 + * asserted, the CPU is by definition runnable. 328 + */ 334 329 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 335 330 { 336 - return 0; 331 + return !!v->arch.irq_lines; 337 332 } 338 333 339 334 /* Just ensure a guest exit from a particular CPU */ ··· 430 411 spin_unlock(&kvm_vmid_lock); 431 412 } 432 413 414 + static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) 415 + { 416 + /* SVC called from Hyp mode should never get here */ 417 + kvm_debug("SVC called from Hyp mode shouldn't go here\n"); 418 + BUG(); 419 + return -EINVAL; /* Squash warning */ 420 + } 421 + 422 + static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) 423 + { 424 + trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 425 + vcpu->arch.hsr & HSR_HVC_IMM_MASK); 426 + 427 + kvm_inject_undefined(vcpu); 428 + return 1; 429 + } 430 + 431 + static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 432 + { 433 + /* We don't support SMC; don't do that. */ 434 + kvm_debug("smc: at %08x", *vcpu_pc(vcpu)); 435 + kvm_inject_undefined(vcpu); 436 + return 1; 437 + } 438 + 439 + static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) 440 + { 441 + /* The hypervisor should never cause aborts */ 442 + kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", 443 + vcpu->arch.hxfar, vcpu->arch.hsr); 444 + return -EFAULT; 445 + } 446 + 447 + static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) 448 + { 449 + /* This is either an error in the ws. code or an external abort */ 450 + kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", 451 + vcpu->arch.hxfar, vcpu->arch.hsr); 452 + return -EFAULT; 453 + } 454 + 455 + typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); 456 + static exit_handle_fn arm_exit_handlers[] = { 457 + [HSR_EC_WFI] = kvm_handle_wfi, 458 + [HSR_EC_CP15_32] = kvm_handle_cp15_32, 459 + [HSR_EC_CP15_64] = kvm_handle_cp15_64, 460 + [HSR_EC_CP14_MR] = kvm_handle_cp14_access, 461 + [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, 462 + [HSR_EC_CP14_64] = kvm_handle_cp14_access, 463 + [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, 464 + [HSR_EC_CP10_ID] = kvm_handle_cp10_id, 465 + [HSR_EC_SVC_HYP] = handle_svc_hyp, 466 + [HSR_EC_HVC] = handle_hvc, 467 + [HSR_EC_SMC] = handle_smc, 468 + [HSR_EC_IABT] = kvm_handle_guest_abort, 469 + [HSR_EC_IABT_HYP] = handle_pabt_hyp, 470 + [HSR_EC_DABT] = kvm_handle_guest_abort, 471 + [HSR_EC_DABT_HYP] = handle_dabt_hyp, 472 + }; 473 + 474 + /* 475 + * A conditional instruction is allowed to trap, even though it 476 + * wouldn't be executed. So let's re-implement the hardware, in 477 + * software! 478 + */ 479 + static bool kvm_condition_valid(struct kvm_vcpu *vcpu) 480 + { 481 + unsigned long cpsr, cond, insn; 482 + 483 + /* 484 + * Exception Code 0 can only happen if we set HCR.TGE to 1, to 485 + * catch undefined instructions, and then we won't get past 486 + * the arm_exit_handlers test anyway. 487 + */ 488 + BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); 489 + 490 + /* Top two bits non-zero? Unconditional. */ 491 + if (vcpu->arch.hsr >> 30) 492 + return true; 493 + 494 + cpsr = *vcpu_cpsr(vcpu); 495 + 496 + /* Is condition field valid? */ 497 + if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) 498 + cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; 499 + else { 500 + /* This can happen in Thumb mode: examine IT state. */ 501 + unsigned long it; 502 + 503 + it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); 504 + 505 + /* it == 0 => unconditional. */ 506 + if (it == 0) 507 + return true; 508 + 509 + /* The cond for this insn works out as the top 4 bits. */ 510 + cond = (it >> 4); 511 + } 512 + 513 + /* Shift makes it look like an ARM-mode instruction */ 514 + insn = cond << 28; 515 + return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; 516 + } 517 + 433 518 /* 434 519 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 435 520 * proper exit to QEMU. ··· 541 418 static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 542 419 int exception_index) 543 420 { 544 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 545 - return 0; 421 + unsigned long hsr_ec; 422 + 423 + switch (exception_index) { 424 + case ARM_EXCEPTION_IRQ: 425 + return 1; 426 + case ARM_EXCEPTION_UNDEFINED: 427 + kvm_err("Undefined exception in Hyp mode at: %#08x\n", 428 + vcpu->arch.hyp_pc); 429 + BUG(); 430 + panic("KVM: Hypervisor undefined exception!\n"); 431 + case ARM_EXCEPTION_DATA_ABORT: 432 + case ARM_EXCEPTION_PREF_ABORT: 433 + case ARM_EXCEPTION_HVC: 434 + hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; 435 + 436 + if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) 437 + || !arm_exit_handlers[hsr_ec]) { 438 + kvm_err("Unkown exception class: %#08lx, " 439 + "hsr: %#08x\n", hsr_ec, 440 + (unsigned int)vcpu->arch.hsr); 441 + BUG(); 442 + } 443 + 444 + /* 445 + * See ARM ARM B1.14.1: "Hyp traps on instructions 446 + * that fail their condition code check" 447 + */ 448 + if (!kvm_condition_valid(vcpu)) { 449 + bool is_wide = vcpu->arch.hsr & HSR_IL; 450 + kvm_skip_instr(vcpu, is_wide); 451 + return 1; 452 + } 453 + 454 + return arm_exit_handlers[hsr_ec](vcpu, run); 455 + default: 456 + kvm_pr_unimpl("Unsupported exception type: %d", 457 + exception_index); 458 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 459 + return 0; 460 + } 546 461 } 547 462 548 463 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ··· 654 493 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 655 494 656 495 vcpu->mode = OUTSIDE_GUEST_MODE; 496 + vcpu->arch.last_pcpu = smp_processor_id(); 657 497 kvm_guest_exit(); 658 498 trace_kvm_exit(*vcpu_pc(vcpu)); 659 499 /* ··· 963 801 if (err) 964 802 goto out_err; 965 803 804 + kvm_coproc_table_init(); 966 805 return 0; 967 806 out_err: 968 807 return err;
+360
arch/arm/kvm/coproc.c
··· 16 16 * along with this program; if not, write to the Free Software 17 17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 18 */ 19 + #include <linux/mm.h> 19 20 #include <linux/kvm_host.h> 21 + #include <asm/kvm_arm.h> 22 + #include <asm/kvm_host.h> 23 + #include <asm/kvm_emulate.h> 24 + #include <asm/kvm_coproc.h> 25 + #include <asm/cacheflush.h> 26 + #include <asm/cputype.h> 27 + #include <trace/events/kvm.h> 20 28 29 + #include "trace.h" 30 + #include "coproc.h" 31 + 32 + 33 + /****************************************************************************** 34 + * Co-processor emulation 35 + *****************************************************************************/ 36 + 37 + int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) 38 + { 39 + kvm_inject_undefined(vcpu); 40 + return 1; 41 + } 42 + 43 + int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 44 + { 45 + /* 46 + * We can get here, if the host has been built without VFPv3 support, 47 + * but the guest attempted a floating point operation. 48 + */ 49 + kvm_inject_undefined(vcpu); 50 + return 1; 51 + } 52 + 53 + int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 54 + { 55 + kvm_inject_undefined(vcpu); 56 + return 1; 57 + } 58 + 59 + int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 60 + { 61 + kvm_inject_undefined(vcpu); 62 + return 1; 63 + } 64 + 65 + /* See note at ARM ARM B1.14.4 */ 66 + static bool access_dcsw(struct kvm_vcpu *vcpu, 67 + const struct coproc_params *p, 68 + const struct coproc_reg *r) 69 + { 70 + u32 val; 71 + int cpu; 72 + 73 + cpu = get_cpu(); 74 + 75 + if (!p->is_write) 76 + return read_from_write_only(vcpu, p); 77 + 78 + cpumask_setall(&vcpu->arch.require_dcache_flush); 79 + cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); 80 + 81 + /* If we were already preempted, take the long way around */ 82 + if (cpu != vcpu->arch.last_pcpu) { 83 + flush_cache_all(); 84 + goto done; 85 + } 86 + 87 + val = *vcpu_reg(vcpu, p->Rt1); 88 + 89 + switch (p->CRm) { 90 + case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ 91 + case 14: /* DCCISW */ 92 + asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); 93 + break; 94 + 95 + case 10: /* DCCSW */ 96 + asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); 97 + break; 98 + } 99 + 100 + done: 101 + put_cpu(); 102 + 103 + return true; 104 + } 105 + 106 + /* 107 + * We could trap ID_DFR0 and tell the guest we don't support performance 108 + * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 109 + * NAKed, so it will read the PMCR anyway. 110 + * 111 + * Therefore we tell the guest we have 0 counters. Unfortunately, we 112 + * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 113 + * all PM registers, which doesn't crash the guest kernel at least. 114 + */ 115 + static bool pm_fake(struct kvm_vcpu *vcpu, 116 + const struct coproc_params *p, 117 + const struct coproc_reg *r) 118 + { 119 + if (p->is_write) 120 + return ignore_write(vcpu, p); 121 + else 122 + return read_zero(vcpu, p); 123 + } 124 + 125 + #define access_pmcr pm_fake 126 + #define access_pmcntenset pm_fake 127 + #define access_pmcntenclr pm_fake 128 + #define access_pmovsr pm_fake 129 + #define access_pmselr pm_fake 130 + #define access_pmceid0 pm_fake 131 + #define access_pmceid1 pm_fake 132 + #define access_pmccntr pm_fake 133 + #define access_pmxevtyper pm_fake 134 + #define access_pmxevcntr pm_fake 135 + #define access_pmuserenr pm_fake 136 + #define access_pmintenset pm_fake 137 + #define access_pmintenclr pm_fake 138 + 139 + /* Architected CP15 registers. 140 + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 141 + */ 142 + static const struct coproc_reg cp15_regs[] = { 143 + /* CSSELR: swapped by interrupt.S. */ 144 + { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, 145 + NULL, reset_unknown, c0_CSSELR }, 146 + 147 + /* TTBR0/TTBR1: swapped by interrupt.S. */ 148 + { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 149 + { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 150 + 151 + /* TTBCR: swapped by interrupt.S. */ 152 + { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 153 + NULL, reset_val, c2_TTBCR, 0x00000000 }, 154 + 155 + /* DACR: swapped by interrupt.S. */ 156 + { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, 157 + NULL, reset_unknown, c3_DACR }, 158 + 159 + /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ 160 + { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, 161 + NULL, reset_unknown, c5_DFSR }, 162 + { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, 163 + NULL, reset_unknown, c5_IFSR }, 164 + { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, 165 + NULL, reset_unknown, c5_ADFSR }, 166 + { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, 167 + NULL, reset_unknown, c5_AIFSR }, 168 + 169 + /* DFAR/IFAR: swapped by interrupt.S. */ 170 + { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, 171 + NULL, reset_unknown, c6_DFAR }, 172 + { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, 173 + NULL, reset_unknown, c6_IFAR }, 174 + /* 175 + * DC{C,I,CI}SW operations: 176 + */ 177 + { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, 178 + { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, 179 + { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, 180 + /* 181 + * Dummy performance monitor implementation. 182 + */ 183 + { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, 184 + { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, 185 + { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, 186 + { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, 187 + { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, 188 + { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, 189 + { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, 190 + { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, 191 + { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, 192 + { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, 193 + { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, 194 + { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, 195 + { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, 196 + 197 + /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ 198 + { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, 199 + NULL, reset_unknown, c10_PRRR}, 200 + { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, 201 + NULL, reset_unknown, c10_NMRR}, 202 + 203 + /* VBAR: swapped by interrupt.S. */ 204 + { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, 205 + NULL, reset_val, c12_VBAR, 0x00000000 }, 206 + 207 + /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ 208 + { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, 209 + NULL, reset_val, c13_CID, 0x00000000 }, 210 + { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, 211 + NULL, reset_unknown, c13_TID_URW }, 212 + { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, 213 + NULL, reset_unknown, c13_TID_URO }, 214 + { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, 215 + NULL, reset_unknown, c13_TID_PRIV }, 216 + }; 217 + 218 + /* Target specific emulation tables */ 219 + static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 220 + 221 + void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) 222 + { 223 + target_tables[table->target] = table; 224 + } 225 + 226 + /* Get specific register table for this target. */ 227 + static const struct coproc_reg *get_target_table(unsigned target, size_t *num) 228 + { 229 + struct kvm_coproc_target_table *table; 230 + 231 + table = target_tables[target]; 232 + *num = table->num; 233 + return table->table; 234 + } 235 + 236 + static const struct coproc_reg *find_reg(const struct coproc_params *params, 237 + const struct coproc_reg table[], 238 + unsigned int num) 239 + { 240 + unsigned int i; 241 + 242 + for (i = 0; i < num; i++) { 243 + const struct coproc_reg *r = &table[i]; 244 + 245 + if (params->is_64bit != r->is_64) 246 + continue; 247 + if (params->CRn != r->CRn) 248 + continue; 249 + if (params->CRm != r->CRm) 250 + continue; 251 + if (params->Op1 != r->Op1) 252 + continue; 253 + if (params->Op2 != r->Op2) 254 + continue; 255 + 256 + return r; 257 + } 258 + return NULL; 259 + } 260 + 261 + static int emulate_cp15(struct kvm_vcpu *vcpu, 262 + const struct coproc_params *params) 263 + { 264 + size_t num; 265 + const struct coproc_reg *table, *r; 266 + 267 + trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, 268 + params->CRm, params->Op2, params->is_write); 269 + 270 + table = get_target_table(vcpu->arch.target, &num); 271 + 272 + /* Search target-specific then generic table. */ 273 + r = find_reg(params, table, num); 274 + if (!r) 275 + r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); 276 + 277 + if (likely(r)) { 278 + /* If we don't have an accessor, we should never get here! */ 279 + BUG_ON(!r->access); 280 + 281 + if (likely(r->access(vcpu, params, r))) { 282 + /* Skip instruction, since it was emulated */ 283 + kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); 284 + return 1; 285 + } 286 + /* If access function fails, it should complain. */ 287 + } else { 288 + kvm_err("Unsupported guest CP15 access at: %08x\n", 289 + *vcpu_pc(vcpu)); 290 + print_cp_instr(params); 291 + } 292 + kvm_inject_undefined(vcpu); 293 + return 1; 294 + } 295 + 296 + /** 297 + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 298 + * @vcpu: The VCPU pointer 299 + * @run: The kvm_run struct 300 + */ 301 + int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 302 + { 303 + struct coproc_params params; 304 + 305 + params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 306 + params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 307 + params.is_write = ((vcpu->arch.hsr & 1) == 0); 308 + params.is_64bit = true; 309 + 310 + params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; 311 + params.Op2 = 0; 312 + params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; 313 + params.CRn = 0; 314 + 315 + return emulate_cp15(vcpu, &params); 316 + } 317 + 318 + static void reset_coproc_regs(struct kvm_vcpu *vcpu, 319 + const struct coproc_reg *table, size_t num) 320 + { 321 + unsigned long i; 322 + 323 + for (i = 0; i < num; i++) 324 + if (table[i].reset) 325 + table[i].reset(vcpu, &table[i]); 326 + } 327 + 328 + /** 329 + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 330 + * @vcpu: The VCPU pointer 331 + * @run: The kvm_run struct 332 + */ 333 + int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 334 + { 335 + struct coproc_params params; 336 + 337 + params.CRm = (vcpu->arch.hsr >> 1) & 0xf; 338 + params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; 339 + params.is_write = ((vcpu->arch.hsr & 1) == 0); 340 + params.is_64bit = false; 341 + 342 + params.CRn = (vcpu->arch.hsr >> 10) & 0xf; 343 + params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; 344 + params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; 345 + params.Rt2 = 0; 346 + 347 + return emulate_cp15(vcpu, &params); 348 + } 349 + 350 + void kvm_coproc_table_init(void) 351 + { 352 + unsigned int i; 353 + 354 + /* Make sure tables are unique and in order. */ 355 + for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) 356 + BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); 357 + } 358 + 359 + /** 360 + * kvm_reset_coprocs - sets cp15 registers to reset value 361 + * @vcpu: The VCPU pointer 362 + * 363 + * This function finds the right table above and sets the registers on the 364 + * virtual CPU struct to their architecturally defined reset values. 365 + */ 21 366 void kvm_reset_coprocs(struct kvm_vcpu *vcpu) 22 367 { 368 + size_t num; 369 + const struct coproc_reg *table; 370 + 371 + /* Catch someone adding a register without putting in reset entry. */ 372 + memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); 373 + 374 + /* Generic chip reset first (so target could override). */ 375 + reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); 376 + 377 + table = get_target_table(vcpu->arch.target, &num); 378 + reset_coproc_regs(vcpu, table, num); 379 + 380 + for (num = 1; num < NR_CP15_REGS; num++) 381 + if (vcpu->arch.cp15[num] == 0x42424242) 382 + panic("Didn't reset vcpu->arch.cp15[%zi]", num); 23 383 }
+153
arch/arm/kvm/coproc.h
··· 1 + /* 2 + * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 + * Authors: Christoffer Dall <c.dall@virtualopensystems.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License, version 2, as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 + */ 18 + 19 + #ifndef __ARM_KVM_COPROC_LOCAL_H__ 20 + #define __ARM_KVM_COPROC_LOCAL_H__ 21 + 22 + struct coproc_params { 23 + unsigned long CRn; 24 + unsigned long CRm; 25 + unsigned long Op1; 26 + unsigned long Op2; 27 + unsigned long Rt1; 28 + unsigned long Rt2; 29 + bool is_64bit; 30 + bool is_write; 31 + }; 32 + 33 + struct coproc_reg { 34 + /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ 35 + unsigned long CRn; 36 + unsigned long CRm; 37 + unsigned long Op1; 38 + unsigned long Op2; 39 + 40 + bool is_64; 41 + 42 + /* Trapped access from guest, if non-NULL. */ 43 + bool (*access)(struct kvm_vcpu *, 44 + const struct coproc_params *, 45 + const struct coproc_reg *); 46 + 47 + /* Initialization for vcpu. */ 48 + void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); 49 + 50 + /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ 51 + unsigned long reg; 52 + 53 + /* Value (usually reset value) */ 54 + u64 val; 55 + }; 56 + 57 + static inline void print_cp_instr(const struct coproc_params *p) 58 + { 59 + /* Look, we even formatted it for you to paste into the table! */ 60 + if (p->is_64bit) { 61 + kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", 62 + p->CRm, p->Op1, p->is_write ? "write" : "read"); 63 + } else { 64 + kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," 65 + " func_%s },\n", 66 + p->CRn, p->CRm, p->Op1, p->Op2, 67 + p->is_write ? "write" : "read"); 68 + } 69 + } 70 + 71 + static inline bool ignore_write(struct kvm_vcpu *vcpu, 72 + const struct coproc_params *p) 73 + { 74 + return true; 75 + } 76 + 77 + static inline bool read_zero(struct kvm_vcpu *vcpu, 78 + const struct coproc_params *p) 79 + { 80 + *vcpu_reg(vcpu, p->Rt1) = 0; 81 + return true; 82 + } 83 + 84 + static inline bool write_to_read_only(struct kvm_vcpu *vcpu, 85 + const struct coproc_params *params) 86 + { 87 + kvm_debug("CP15 write to read-only register at: %08x\n", 88 + *vcpu_pc(vcpu)); 89 + print_cp_instr(params); 90 + return false; 91 + } 92 + 93 + static inline bool read_from_write_only(struct kvm_vcpu *vcpu, 94 + const struct coproc_params *params) 95 + { 96 + kvm_debug("CP15 read to write-only register at: %08x\n", 97 + *vcpu_pc(vcpu)); 98 + print_cp_instr(params); 99 + return false; 100 + } 101 + 102 + /* Reset functions */ 103 + static inline void reset_unknown(struct kvm_vcpu *vcpu, 104 + const struct coproc_reg *r) 105 + { 106 + BUG_ON(!r->reg); 107 + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); 108 + vcpu->arch.cp15[r->reg] = 0xdecafbad; 109 + } 110 + 111 + static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 112 + { 113 + BUG_ON(!r->reg); 114 + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); 115 + vcpu->arch.cp15[r->reg] = r->val; 116 + } 117 + 118 + static inline void reset_unknown64(struct kvm_vcpu *vcpu, 119 + const struct coproc_reg *r) 120 + { 121 + BUG_ON(!r->reg); 122 + BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); 123 + 124 + vcpu->arch.cp15[r->reg] = 0xdecafbad; 125 + vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; 126 + } 127 + 128 + static inline int cmp_reg(const struct coproc_reg *i1, 129 + const struct coproc_reg *i2) 130 + { 131 + BUG_ON(i1 == i2); 132 + if (!i1) 133 + return 1; 134 + else if (!i2) 135 + return -1; 136 + if (i1->CRn != i2->CRn) 137 + return i1->CRn - i2->CRn; 138 + if (i1->CRm != i2->CRm) 139 + return i1->CRm - i2->CRm; 140 + if (i1->Op1 != i2->Op1) 141 + return i1->Op1 - i2->Op1; 142 + return i1->Op2 - i2->Op2; 143 + } 144 + 145 + 146 + #define CRn(_x) .CRn = _x 147 + #define CRm(_x) .CRm = _x 148 + #define Op1(_x) .Op1 = _x 149 + #define Op2(_x) .Op2 = _x 150 + #define is64 .is_64 = true 151 + #define is32 .is_64 = false 152 + 153 + #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
+162
arch/arm/kvm/coproc_a15.c
··· 1 + /* 2 + * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 + * Authors: Rusty Russell <rusty@rustcorp.au> 4 + * Christoffer Dall <c.dall@virtualopensystems.com> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License, version 2, as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 + */ 19 + #include <linux/kvm_host.h> 20 + #include <asm/cputype.h> 21 + #include <asm/kvm_arm.h> 22 + #include <asm/kvm_host.h> 23 + #include <asm/kvm_emulate.h> 24 + #include <asm/kvm_coproc.h> 25 + #include <linux/init.h> 26 + 27 + static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 28 + { 29 + /* 30 + * Compute guest MPIDR: 31 + * (Even if we present only one VCPU to the guest on an SMP 32 + * host we don't set the U bit in the MPIDR, or vice versa, as 33 + * revealing the underlying hardware properties is likely to 34 + * be the best choice). 35 + */ 36 + vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK) 37 + | (vcpu->vcpu_id & MPIDR_LEVEL_MASK); 38 + } 39 + 40 + #include "coproc.h" 41 + 42 + /* A15 TRM 4.3.28: RO WI */ 43 + static bool access_actlr(struct kvm_vcpu *vcpu, 44 + const struct coproc_params *p, 45 + const struct coproc_reg *r) 46 + { 47 + if (p->is_write) 48 + return ignore_write(vcpu, p); 49 + 50 + *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; 51 + return true; 52 + } 53 + 54 + /* A15 TRM 4.3.60: R/O. */ 55 + static bool access_cbar(struct kvm_vcpu *vcpu, 56 + const struct coproc_params *p, 57 + const struct coproc_reg *r) 58 + { 59 + if (p->is_write) 60 + return write_to_read_only(vcpu, p); 61 + return read_zero(vcpu, p); 62 + } 63 + 64 + /* A15 TRM 4.3.48: R/O WI. */ 65 + static bool access_l2ctlr(struct kvm_vcpu *vcpu, 66 + const struct coproc_params *p, 67 + const struct coproc_reg *r) 68 + { 69 + if (p->is_write) 70 + return ignore_write(vcpu, p); 71 + 72 + *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; 73 + return true; 74 + } 75 + 76 + static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 77 + { 78 + u32 l2ctlr, ncores; 79 + 80 + asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); 81 + l2ctlr &= ~(3 << 24); 82 + ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; 83 + l2ctlr |= (ncores & 3) << 24; 84 + 85 + vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; 86 + } 87 + 88 + static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 89 + { 90 + u32 actlr; 91 + 92 + /* ACTLR contains SMP bit: make sure you create all cpus first! */ 93 + asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); 94 + /* Make the SMP bit consistent with the guest configuration */ 95 + if (atomic_read(&vcpu->kvm->online_vcpus) > 1) 96 + actlr |= 1U << 6; 97 + else 98 + actlr &= ~(1U << 6); 99 + 100 + vcpu->arch.cp15[c1_ACTLR] = actlr; 101 + } 102 + 103 + /* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ 104 + static bool access_l2ectlr(struct kvm_vcpu *vcpu, 105 + const struct coproc_params *p, 106 + const struct coproc_reg *r) 107 + { 108 + if (p->is_write) 109 + return ignore_write(vcpu, p); 110 + 111 + *vcpu_reg(vcpu, p->Rt1) = 0; 112 + return true; 113 + } 114 + 115 + /* 116 + * A15-specific CP15 registers. 117 + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 118 + */ 119 + static const struct coproc_reg a15_regs[] = { 120 + /* MPIDR: we use VMPIDR for guest access. */ 121 + { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, 122 + NULL, reset_mpidr, c0_MPIDR }, 123 + 124 + /* SCTLR: swapped by interrupt.S. */ 125 + { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, 126 + NULL, reset_val, c1_SCTLR, 0x00C50078 }, 127 + /* ACTLR: trapped by HCR.TAC bit. */ 128 + { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, 129 + access_actlr, reset_actlr, c1_ACTLR }, 130 + /* CPACR: swapped by interrupt.S. */ 131 + { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, 132 + NULL, reset_val, c1_CPACR, 0x00000000 }, 133 + 134 + /* 135 + * L2CTLR access (guest wants to know #CPUs). 136 + */ 137 + { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, 138 + access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, 139 + { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, 140 + 141 + /* The Configuration Base Address Register. */ 142 + { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, 143 + }; 144 + 145 + static struct kvm_coproc_target_table a15_target_table = { 146 + .target = KVM_ARM_TARGET_CORTEX_A15, 147 + .table = a15_regs, 148 + .num = ARRAY_SIZE(a15_regs), 149 + }; 150 + 151 + static int __init coproc_a15_init(void) 152 + { 153 + unsigned int i; 154 + 155 + for (i = 1; i < ARRAY_SIZE(a15_regs); i++) 156 + BUG_ON(cmp_reg(&a15_regs[i-1], 157 + &a15_regs[i]) >= 0); 158 + 159 + kvm_register_target_coproc_table(&a15_target_table); 160 + return 0; 161 + } 162 + late_initcall(coproc_a15_init);
+218
arch/arm/kvm/emulate.c
··· 16 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 17 */ 18 18 19 + #include <linux/mm.h> 20 + #include <linux/kvm_host.h> 21 + #include <asm/kvm_arm.h> 19 22 #include <asm/kvm_emulate.h> 23 + #include <trace/events/kvm.h> 24 + 25 + #include "trace.h" 20 26 21 27 #define VCPU_NR_MODES 6 22 28 #define VCPU_REG_OFFSET_USR 0 ··· 158 152 default: 159 153 BUG(); 160 154 } 155 + } 156 + 157 + /** 158 + * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest 159 + * @vcpu: the vcpu pointer 160 + * @run: the kvm_run structure pointer 161 + * 162 + * Simply sets the wait_for_interrupts flag on the vcpu structure, which will 163 + * halt execution of world-switches and schedule other host processes until 164 + * there is an incoming IRQ or FIQ to the VM. 165 + */ 166 + int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) 167 + { 168 + trace_kvm_wfi(*vcpu_pc(vcpu)); 169 + kvm_vcpu_block(vcpu); 170 + return 1; 171 + } 172 + 173 + /** 174 + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block 175 + * @vcpu: The VCPU pointer 176 + * 177 + * When exceptions occur while instructions are executed in Thumb IF-THEN 178 + * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have 179 + * to do this little bit of work manually. The fields map like this: 180 + * 181 + * IT[7:0] -> CPSR[26:25],CPSR[15:10] 182 + */ 183 + static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) 184 + { 185 + unsigned long itbits, cond; 186 + unsigned long cpsr = *vcpu_cpsr(vcpu); 187 + bool is_arm = !(cpsr & PSR_T_BIT); 188 + 189 + BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); 190 + 191 + if (!(cpsr & PSR_IT_MASK)) 192 + return; 193 + 194 + cond = (cpsr & 0xe000) >> 13; 195 + itbits = (cpsr & 0x1c00) >> (10 - 2); 196 + itbits |= (cpsr & (0x3 << 25)) >> 25; 197 + 198 + /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ 199 + if ((itbits & 0x7) == 0) 200 + itbits = cond = 0; 201 + else 202 + itbits = (itbits << 1) & 0x1f; 203 + 204 + cpsr &= ~PSR_IT_MASK; 205 + cpsr |= cond << 13; 206 + cpsr |= (itbits & 0x1c) << (10 - 2); 207 + cpsr |= (itbits & 0x3) << 25; 208 + *vcpu_cpsr(vcpu) = cpsr; 209 + } 210 + 211 + /** 212 + * kvm_skip_instr - skip a trapped instruction and proceed to the next 213 + * @vcpu: The vcpu pointer 214 + */ 215 + void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 216 + { 217 + bool is_thumb; 218 + 219 + is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); 220 + if (is_thumb && !is_wide_instr) 221 + *vcpu_pc(vcpu) += 2; 222 + else 223 + *vcpu_pc(vcpu) += 4; 224 + kvm_adjust_itstate(vcpu); 225 + } 226 + 227 + 228 + /****************************************************************************** 229 + * Inject exceptions into the guest 230 + */ 231 + 232 + static u32 exc_vector_base(struct kvm_vcpu *vcpu) 233 + { 234 + u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 235 + u32 vbar = vcpu->arch.cp15[c12_VBAR]; 236 + 237 + if (sctlr & SCTLR_V) 238 + return 0xffff0000; 239 + else /* always have security exceptions */ 240 + return vbar; 241 + } 242 + 243 + /** 244 + * kvm_inject_undefined - inject an undefined exception into the guest 245 + * @vcpu: The VCPU to receive the undefined exception 246 + * 247 + * It is assumed that this code is called from the VCPU thread and that the 248 + * VCPU therefore is not currently executing guest code. 249 + * 250 + * Modelled after TakeUndefInstrException() pseudocode. 251 + */ 252 + void kvm_inject_undefined(struct kvm_vcpu *vcpu) 253 + { 254 + u32 new_lr_value; 255 + u32 new_spsr_value; 256 + u32 cpsr = *vcpu_cpsr(vcpu); 257 + u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 258 + bool is_thumb = (cpsr & PSR_T_BIT); 259 + u32 vect_offset = 4; 260 + u32 return_offset = (is_thumb) ? 2 : 4; 261 + 262 + new_spsr_value = cpsr; 263 + new_lr_value = *vcpu_pc(vcpu) - return_offset; 264 + 265 + *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; 266 + *vcpu_cpsr(vcpu) |= PSR_I_BIT; 267 + *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); 268 + 269 + if (sctlr & SCTLR_TE) 270 + *vcpu_cpsr(vcpu) |= PSR_T_BIT; 271 + if (sctlr & SCTLR_EE) 272 + *vcpu_cpsr(vcpu) |= PSR_E_BIT; 273 + 274 + /* Note: These now point to UND banked copies */ 275 + *vcpu_spsr(vcpu) = cpsr; 276 + *vcpu_reg(vcpu, 14) = new_lr_value; 277 + 278 + /* Branch to exception vector */ 279 + *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; 280 + } 281 + 282 + /* 283 + * Modelled after TakeDataAbortException() and TakePrefetchAbortException 284 + * pseudocode. 285 + */ 286 + static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 287 + { 288 + u32 new_lr_value; 289 + u32 new_spsr_value; 290 + u32 cpsr = *vcpu_cpsr(vcpu); 291 + u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 292 + bool is_thumb = (cpsr & PSR_T_BIT); 293 + u32 vect_offset; 294 + u32 return_offset = (is_thumb) ? 4 : 0; 295 + bool is_lpae; 296 + 297 + new_spsr_value = cpsr; 298 + new_lr_value = *vcpu_pc(vcpu) + return_offset; 299 + 300 + *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; 301 + *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; 302 + *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); 303 + 304 + if (sctlr & SCTLR_TE) 305 + *vcpu_cpsr(vcpu) |= PSR_T_BIT; 306 + if (sctlr & SCTLR_EE) 307 + *vcpu_cpsr(vcpu) |= PSR_E_BIT; 308 + 309 + /* Note: These now point to ABT banked copies */ 310 + *vcpu_spsr(vcpu) = cpsr; 311 + *vcpu_reg(vcpu, 14) = new_lr_value; 312 + 313 + if (is_pabt) 314 + vect_offset = 12; 315 + else 316 + vect_offset = 16; 317 + 318 + /* Branch to exception vector */ 319 + *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; 320 + 321 + if (is_pabt) { 322 + /* Set DFAR and DFSR */ 323 + vcpu->arch.cp15[c6_IFAR] = addr; 324 + is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); 325 + /* Always give debug fault for now - should give guest a clue */ 326 + if (is_lpae) 327 + vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; 328 + else 329 + vcpu->arch.cp15[c5_IFSR] = 2; 330 + } else { /* !iabt */ 331 + /* Set DFAR and DFSR */ 332 + vcpu->arch.cp15[c6_DFAR] = addr; 333 + is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); 334 + /* Always give debug fault for now - should give guest a clue */ 335 + if (is_lpae) 336 + vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; 337 + else 338 + vcpu->arch.cp15[c5_DFSR] = 2; 339 + } 340 + 341 + } 342 + 343 + /** 344 + * kvm_inject_dabt - inject a data abort into the guest 345 + * @vcpu: The VCPU to receive the undefined exception 346 + * @addr: The address to report in the DFAR 347 + * 348 + * It is assumed that this code is called from the VCPU thread and that the 349 + * VCPU therefore is not currently executing guest code. 350 + */ 351 + void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) 352 + { 353 + inject_abt(vcpu, false, addr); 354 + } 355 + 356 + /** 357 + * kvm_inject_pabt - inject a prefetch abort into the guest 358 + * @vcpu: The VCPU to receive the undefined exception 359 + * @addr: The address to report in the DFAR 360 + * 361 + * It is assumed that this code is called from the VCPU thread and that the 362 + * VCPU therefore is not currently executing guest code. 363 + */ 364 + void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) 365 + { 366 + inject_abt(vcpu, true, addr); 161 367 }
+65
arch/arm/kvm/trace.h
··· 64 64 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) 65 65 ); 66 66 67 + /* Architecturally implementation defined CP15 register access */ 68 + TRACE_EVENT(kvm_emulate_cp15_imp, 69 + TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, 70 + unsigned long CRm, unsigned long Op2, bool is_write), 71 + TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), 72 + 73 + TP_STRUCT__entry( 74 + __field( unsigned int, Op1 ) 75 + __field( unsigned int, Rt1 ) 76 + __field( unsigned int, CRn ) 77 + __field( unsigned int, CRm ) 78 + __field( unsigned int, Op2 ) 79 + __field( bool, is_write ) 80 + ), 81 + 82 + TP_fast_assign( 83 + __entry->is_write = is_write; 84 + __entry->Op1 = Op1; 85 + __entry->Rt1 = Rt1; 86 + __entry->CRn = CRn; 87 + __entry->CRm = CRm; 88 + __entry->Op2 = Op2; 89 + ), 90 + 91 + TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", 92 + (__entry->is_write) ? "mcr" : "mrc", 93 + __entry->Op1, __entry->Rt1, __entry->CRn, 94 + __entry->CRm, __entry->Op2) 95 + ); 96 + 97 + TRACE_EVENT(kvm_wfi, 98 + TP_PROTO(unsigned long vcpu_pc), 99 + TP_ARGS(vcpu_pc), 100 + 101 + TP_STRUCT__entry( 102 + __field( unsigned long, vcpu_pc ) 103 + ), 104 + 105 + TP_fast_assign( 106 + __entry->vcpu_pc = vcpu_pc; 107 + ), 108 + 109 + TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) 110 + ); 111 + 67 112 TRACE_EVENT(kvm_unmap_hva, 68 113 TP_PROTO(unsigned long hva), 69 114 TP_ARGS(hva), ··· 155 110 ), 156 111 157 112 TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) 113 + ); 114 + 115 + TRACE_EVENT(kvm_hvc, 116 + TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), 117 + TP_ARGS(vcpu_pc, r0, imm), 118 + 119 + TP_STRUCT__entry( 120 + __field( unsigned long, vcpu_pc ) 121 + __field( unsigned long, r0 ) 122 + __field( unsigned long, imm ) 123 + ), 124 + 125 + TP_fast_assign( 126 + __entry->vcpu_pc = vcpu_pc; 127 + __entry->r0 = r0; 128 + __entry->imm = imm; 129 + ), 130 + 131 + TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", 132 + __entry->vcpu_pc, __entry->r0, __entry->imm) 158 133 ); 159 134 160 135 #endif /* _TRACE_KVM_H */