Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: ARM: World-switch implementation

Provides complete world-switch implementation to switch to other guests
running in non-secure modes. Includes Hyp exception handlers that
capture necessary exception information and stores the information on
the VCPU and KVM structures.

The following Hyp-ABI is also documented in the code:

Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
Switching to Hyp mode is done through a simple HVC #0 instruction. The
exception vector code will check that the HVC comes from VMID==0 and if
so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
- r0 contains a pointer to a HYP function
- r1, r2, and r3 contain arguments to the above function.
- The HYP function will be called with its arguments in r0, r1 and r2.
On HYP function return, we return directly to SVC.

A call to a function executing in Hyp mode is performed like the following:

<svc code>
ldr r0, =BSYM(my_hyp_fn)
ldr r1, =my_param
hvc #0 ; Call my_hyp_fn(my_param) from HYP mode
<svc code>

Otherwise, the world-switch is pretty straight-forward. All state that
can be modified by the guest is first backed up on the Hyp stack and the
VCPU values is loaded onto the hardware. State, which is not loaded, but
theoretically modifiable by the guest is protected through the
virtualiation features to generate a trap and cause software emulation.
Upon guest returns, all state is restored from hardware onto the VCPU
struct and the original state is restored from the Hyp-stack onto the
hardware.

SMP support using the VMPIDR calculated on the basis of the host MPIDR
and overriding the low bits with KVM vcpu_id contributed by Marc Zyngier.

Reuse of VMIDs has been implemented by Antonios Motakis and adapated from
a separate patch into the appropriate patches introducing the
functionality. Note that the VMIDs are stored per VM as required by the ARM
architecture reference manual.

To support VFP/NEON we trap those instructions using the HPCTR. When
we trap, we switch the FPU. After a guest exit, the VFP state is
returned to the host. When disabling access to floating point
instructions, we also mask FPEXC_EN in order to avoid the guest
receiving Undefined instruction exceptions before we have a chance to
switch back the floating point state. We are reusing vfp_hard_struct,
so we depend on VFPv3 being enabled in the host kernel, if not, we still
trap cp10 and cp11 in order to inject an undefined instruction exception
whenever the guest tries to use VFP/NEON. VFP/NEON developed by
Antionios Motakis and Rusty Russell.

Aborts that are permission faults, and not stage-1 page table walk, do
not report the faulting address in the HPFAR. We have to resolve the
IPA, and store it just like the HPFAR register on the VCPU struct. If
the IPA cannot be resolved, it means another CPU is playing with the
page tables, and we simply restart the guest. This quirk was fixed by
Marc Zyngier.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Antonios Motakis <a.motakis@virtualopensystems.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>

+1122 -4
+51
arch/arm/include/asm/kvm_arm.h
··· 98 98 #define TTBCR_T0SZ 3 99 99 #define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) 100 100 101 + /* Hyp System Trap Register */ 102 + #define HSTR_T(x) (1 << x) 103 + #define HSTR_TTEE (1 << 16) 104 + #define HSTR_TJDBX (1 << 17) 105 + 106 + /* Hyp Coprocessor Trap Register */ 107 + #define HCPTR_TCP(x) (1 << x) 108 + #define HCPTR_TCP_MASK (0x3fff) 109 + #define HCPTR_TASE (1 << 15) 110 + #define HCPTR_TTA (1 << 20) 111 + #define HCPTR_TCPAC (1 << 31) 112 + 101 113 /* Hyp Debug Configuration Register bits */ 102 114 #define HDCR_TDRA (1 << 11) 103 115 #define HDCR_TDOSA (1 << 10) ··· 156 144 #else 157 145 #define VTTBR_X (5 - KVM_T0SZ) 158 146 #endif 147 + #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 148 + #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) 149 + #define VTTBR_VMID_SHIFT (48LLU) 150 + #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) 159 151 152 + /* Hyp Syndrome Register (HSR) bits */ 153 + #define HSR_EC_SHIFT (26) 154 + #define HSR_EC (0x3fU << HSR_EC_SHIFT) 155 + #define HSR_IL (1U << 25) 156 + #define HSR_ISS (HSR_IL - 1) 157 + #define HSR_ISV_SHIFT (24) 158 + #define HSR_ISV (1U << HSR_ISV_SHIFT) 159 + #define HSR_FSC (0x3f) 160 + #define HSR_FSC_TYPE (0x3c) 161 + #define HSR_WNR (1 << 6) 162 + 163 + #define FSC_FAULT (0x04) 164 + #define FSC_PERM (0x0c) 165 + 166 + /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ 167 + #define HPFAR_MASK (~0xf) 168 + 169 + #define HSR_EC_UNKNOWN (0x00) 170 + #define HSR_EC_WFI (0x01) 171 + #define HSR_EC_CP15_32 (0x03) 172 + #define HSR_EC_CP15_64 (0x04) 173 + #define HSR_EC_CP14_MR (0x05) 174 + #define HSR_EC_CP14_LS (0x06) 175 + #define HSR_EC_CP_0_13 (0x07) 176 + #define HSR_EC_CP10_ID (0x08) 177 + #define HSR_EC_JAZELLE (0x09) 178 + #define HSR_EC_BXJ (0x0A) 179 + #define HSR_EC_CP14_64 (0x0C) 180 + #define HSR_EC_SVC_HYP (0x11) 181 + #define HSR_EC_HVC (0x12) 182 + #define HSR_EC_SMC (0x13) 183 + #define HSR_EC_IABT (0x20) 184 + #define HSR_EC_IABT_HYP (0x21) 185 + #define HSR_EC_DABT (0x24) 186 + #define HSR_EC_DABT_HYP (0x25) 160 187 161 188 #endif /* __ARM_KVM_ARM_H__ */
+13
arch/arm/include/asm/kvm_host.h
··· 21 21 22 22 #include <asm/kvm.h> 23 23 #include <asm/kvm_asm.h> 24 + #include <asm/fpstate.h> 24 25 25 26 #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 26 27 #define KVM_MEMORY_SLOTS 32 ··· 86 85 u32 hxfar; /* Hyp Data/Inst Fault Address Register */ 87 86 u32 hpfar; /* Hyp IPA Fault Address Register */ 88 87 88 + /* Floating point registers (VFP and Advanced SIMD/NEON) */ 89 + struct vfp_hard_struct vfp_guest; 90 + struct vfp_hard_struct *vfp_host; 91 + 92 + /* 93 + * Anything that is not used directly from assembly code goes 94 + * here. 95 + */ 89 96 /* Interrupt related fields */ 90 97 u32 irq_lines; /* IRQ and FIQ levels */ 91 98 ··· 102 93 103 94 /* Cache some mmu pages needed inside spinlock regions */ 104 95 struct kvm_mmu_memory_cache mmu_page_cache; 96 + 97 + /* Detect first run of a vcpu */ 98 + bool has_run_once; 105 99 }; 106 100 107 101 struct kvm_vm_stat { ··· 124 112 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 125 113 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 126 114 u64 kvm_call_hyp(void *hypfn, ...); 115 + void force_vm_exit(const cpumask_t *mask); 127 116 128 117 #define KVM_ARCH_WANT_MMU_NOTIFIER 129 118 struct kvm;
+25
arch/arm/kernel/asm-offsets.c
··· 13 13 #include <linux/sched.h> 14 14 #include <linux/mm.h> 15 15 #include <linux/dma-mapping.h> 16 + #ifdef CONFIG_KVM_ARM_HOST 17 + #include <linux/kvm_host.h> 18 + #endif 16 19 #include <asm/cacheflush.h> 17 20 #include <asm/glue-df.h> 18 21 #include <asm/glue-pf.h> ··· 149 146 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 150 147 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 151 148 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 149 + #ifdef CONFIG_KVM_ARM_HOST 150 + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 151 + DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); 152 + DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); 153 + DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); 154 + DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); 155 + DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); 156 + DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); 157 + DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); 158 + DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); 159 + DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); 160 + DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); 161 + DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); 162 + DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); 163 + DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); 164 + DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 165 + DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); 166 + DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); 167 + DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); 168 + DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); 169 + DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 170 + #endif 152 171 return 0; 153 172 }
+199 -1
arch/arm/kvm/arm.c
··· 40 40 #include <asm/kvm_arm.h> 41 41 #include <asm/kvm_asm.h> 42 42 #include <asm/kvm_mmu.h> 43 + #include <asm/kvm_emulate.h> 43 44 44 45 #ifdef REQUIRES_VIRT 45 46 __asm__(".arch_extension virt"); ··· 50 49 static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 51 50 static unsigned long hyp_default_vectors; 52 51 52 + /* The VMID used in the VTTBR */ 53 + static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 54 + static u8 kvm_next_vmid; 55 + static DEFINE_SPINLOCK(kvm_vmid_lock); 53 56 54 57 int kvm_arch_hardware_enable(void *garbage) 55 58 { ··· 281 276 282 277 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 283 278 { 279 + /* Force users to call KVM_ARM_VCPU_INIT */ 280 + vcpu->arch.target = -1; 284 281 return 0; 285 282 } 286 283 ··· 293 286 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 294 287 { 295 288 vcpu->cpu = cpu; 289 + vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); 296 290 } 297 291 298 292 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ··· 324 316 return 0; 325 317 } 326 318 319 + /* Just ensure a guest exit from a particular CPU */ 320 + static void exit_vm_noop(void *info) 321 + { 322 + } 323 + 324 + void force_vm_exit(const cpumask_t *mask) 325 + { 326 + smp_call_function_many(mask, exit_vm_noop, NULL, true); 327 + } 328 + 329 + /** 330 + * need_new_vmid_gen - check that the VMID is still valid 331 + * @kvm: The VM's VMID to checkt 332 + * 333 + * return true if there is a new generation of VMIDs being used 334 + * 335 + * The hardware supports only 256 values with the value zero reserved for the 336 + * host, so we check if an assigned value belongs to a previous generation, 337 + * which which requires us to assign a new value. If we're the first to use a 338 + * VMID for the new generation, we must flush necessary caches and TLBs on all 339 + * CPUs. 340 + */ 341 + static bool need_new_vmid_gen(struct kvm *kvm) 342 + { 343 + return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); 344 + } 345 + 346 + /** 347 + * update_vttbr - Update the VTTBR with a valid VMID before the guest runs 348 + * @kvm The guest that we are about to run 349 + * 350 + * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the 351 + * VM has a valid VMID, otherwise assigns a new one and flushes corresponding 352 + * caches and TLBs. 353 + */ 354 + static void update_vttbr(struct kvm *kvm) 355 + { 356 + phys_addr_t pgd_phys; 357 + u64 vmid; 358 + 359 + if (!need_new_vmid_gen(kvm)) 360 + return; 361 + 362 + spin_lock(&kvm_vmid_lock); 363 + 364 + /* 365 + * We need to re-check the vmid_gen here to ensure that if another vcpu 366 + * already allocated a valid vmid for this vm, then this vcpu should 367 + * use the same vmid. 368 + */ 369 + if (!need_new_vmid_gen(kvm)) { 370 + spin_unlock(&kvm_vmid_lock); 371 + return; 372 + } 373 + 374 + /* First user of a new VMID generation? */ 375 + if (unlikely(kvm_next_vmid == 0)) { 376 + atomic64_inc(&kvm_vmid_gen); 377 + kvm_next_vmid = 1; 378 + 379 + /* 380 + * On SMP we know no other CPUs can use this CPU's or each 381 + * other's VMID after force_vm_exit returns since the 382 + * kvm_vmid_lock blocks them from reentry to the guest. 383 + */ 384 + force_vm_exit(cpu_all_mask); 385 + /* 386 + * Now broadcast TLB + ICACHE invalidation over the inner 387 + * shareable domain to make sure all data structures are 388 + * clean. 389 + */ 390 + kvm_call_hyp(__kvm_flush_vm_context); 391 + } 392 + 393 + kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); 394 + kvm->arch.vmid = kvm_next_vmid; 395 + kvm_next_vmid++; 396 + 397 + /* update vttbr to be used with the new vmid */ 398 + pgd_phys = virt_to_phys(kvm->arch.pgd); 399 + vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; 400 + kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; 401 + kvm->arch.vttbr |= vmid; 402 + 403 + spin_unlock(&kvm_vmid_lock); 404 + } 405 + 406 + /* 407 + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 408 + * proper exit to QEMU. 409 + */ 410 + static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 411 + int exception_index) 412 + { 413 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 414 + return 0; 415 + } 416 + 417 + static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 418 + { 419 + if (likely(vcpu->arch.has_run_once)) 420 + return 0; 421 + 422 + vcpu->arch.has_run_once = true; 423 + return 0; 424 + } 425 + 426 + /** 427 + * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 428 + * @vcpu: The VCPU pointer 429 + * @run: The kvm_run structure pointer used for userspace state exchange 430 + * 431 + * This function is called through the VCPU_RUN ioctl called from user space. It 432 + * will execute VM code in a loop until the time slice for the process is used 433 + * or some emulation is needed from user space in which case the function will 434 + * return with return value 0 and with the kvm_run structure filled in with the 435 + * required data for the requested emulation. 436 + */ 327 437 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 328 438 { 329 - return -EINVAL; 439 + int ret; 440 + sigset_t sigsaved; 441 + 442 + /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ 443 + if (unlikely(vcpu->arch.target < 0)) 444 + return -ENOEXEC; 445 + 446 + ret = kvm_vcpu_first_run_init(vcpu); 447 + if (ret) 448 + return ret; 449 + 450 + if (vcpu->sigset_active) 451 + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 452 + 453 + ret = 1; 454 + run->exit_reason = KVM_EXIT_UNKNOWN; 455 + while (ret > 0) { 456 + /* 457 + * Check conditions before entering the guest 458 + */ 459 + cond_resched(); 460 + 461 + update_vttbr(vcpu->kvm); 462 + 463 + local_irq_disable(); 464 + 465 + /* 466 + * Re-check atomic conditions 467 + */ 468 + if (signal_pending(current)) { 469 + ret = -EINTR; 470 + run->exit_reason = KVM_EXIT_INTR; 471 + } 472 + 473 + if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { 474 + local_irq_enable(); 475 + continue; 476 + } 477 + 478 + /************************************************************** 479 + * Enter the guest 480 + */ 481 + trace_kvm_entry(*vcpu_pc(vcpu)); 482 + kvm_guest_enter(); 483 + vcpu->mode = IN_GUEST_MODE; 484 + 485 + ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 486 + 487 + vcpu->mode = OUTSIDE_GUEST_MODE; 488 + kvm_guest_exit(); 489 + trace_kvm_exit(*vcpu_pc(vcpu)); 490 + /* 491 + * We may have taken a host interrupt in HYP mode (ie 492 + * while executing the guest). This interrupt is still 493 + * pending, as we haven't serviced it yet! 494 + * 495 + * We're now back in SVC mode, with interrupts 496 + * disabled. Enabling the interrupts now will have 497 + * the effect of taking the interrupt again, in SVC 498 + * mode this time. 499 + */ 500 + local_irq_enable(); 501 + 502 + /* 503 + * Back from guest 504 + *************************************************************/ 505 + 506 + ret = handle_exit(vcpu, run, ret); 507 + } 508 + 509 + if (vcpu->sigset_active) 510 + sigprocmask(SIG_SETMASK, &sigsaved, NULL); 511 + return ret; 330 512 } 331 513 332 514 static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
+393 -3
arch/arm/kvm/interrupts.S
··· 20 20 #include <linux/const.h> 21 21 #include <asm/unified.h> 22 22 #include <asm/page.h> 23 + #include <asm/ptrace.h> 23 24 #include <asm/asm-offsets.h> 24 25 #include <asm/kvm_asm.h> 25 26 #include <asm/kvm_arm.h> 27 + #include <asm/vfpmacros.h> 28 + #include "interrupts_head.S" 26 29 27 30 .text 28 31 ··· 34 31 35 32 /******************************************************************** 36 33 * Flush per-VMID TLBs 34 + * 35 + * void __kvm_tlb_flush_vmid(struct kvm *kvm); 36 + * 37 + * We rely on the hardware to broadcast the TLB invalidation to all CPUs 38 + * inside the inner-shareable domain (which is the case for all v7 39 + * implementations). If we come across a non-IS SMP implementation, we'll 40 + * have to use an IPI based mechanism. Until then, we stick to the simple 41 + * hardware assisted version. 37 42 */ 38 43 ENTRY(__kvm_tlb_flush_vmid) 44 + push {r2, r3} 45 + 46 + add r0, r0, #KVM_VTTBR 47 + ldrd r2, r3, [r0] 48 + mcrr p15, 6, r2, r3, c2 @ Write VTTBR 49 + isb 50 + mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) 51 + dsb 52 + isb 53 + mov r2, #0 54 + mov r3, #0 55 + mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 56 + isb @ Not necessary if followed by eret 57 + 58 + pop {r2, r3} 39 59 bx lr 40 60 ENDPROC(__kvm_tlb_flush_vmid) 41 61 42 62 /******************************************************************** 43 - * Flush TLBs and instruction caches of current CPU for all VMIDs 63 + * Flush TLBs and instruction caches of all CPUs inside the inner-shareable 64 + * domain, for all VMIDs 65 + * 66 + * void __kvm_flush_vm_context(void); 44 67 */ 45 68 ENTRY(__kvm_flush_vm_context) 69 + mov r0, #0 @ rn parameter for c15 flushes is SBZ 70 + 71 + /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ 72 + mcr p15, 4, r0, c8, c3, 4 73 + /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ 74 + mcr p15, 0, r0, c7, c1, 0 75 + dsb 76 + isb @ Not necessary if followed by eret 77 + 46 78 bx lr 47 79 ENDPROC(__kvm_flush_vm_context) 48 80 81 + 49 82 /******************************************************************** 50 83 * Hypervisor world-switch code 84 + * 85 + * 86 + * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) 51 87 */ 52 88 ENTRY(__kvm_vcpu_run) 53 - bx lr 89 + @ Save the vcpu pointer 90 + mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR 91 + 92 + save_host_regs 93 + 94 + @ Store hardware CP15 state and load guest state 95 + read_cp15_state store_to_vcpu = 0 96 + write_cp15_state read_from_vcpu = 1 97 + 98 + @ If the host kernel has not been configured with VFPv3 support, 99 + @ then it is safer if we deny guests from using it as well. 100 + #ifdef CONFIG_VFPv3 101 + @ Set FPEXC_EN so the guest doesn't trap floating point instructions 102 + VFPFMRX r2, FPEXC @ VMRS 103 + push {r2} 104 + orr r2, r2, #FPEXC_EN 105 + VFPFMXR FPEXC, r2 @ VMSR 106 + #endif 107 + 108 + @ Configure Hyp-role 109 + configure_hyp_role vmentry 110 + 111 + @ Trap coprocessor CRx accesses 112 + set_hstr vmentry 113 + set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) 114 + set_hdcr vmentry 115 + 116 + @ Write configured ID register into MIDR alias 117 + ldr r1, [vcpu, #VCPU_MIDR] 118 + mcr p15, 4, r1, c0, c0, 0 119 + 120 + @ Write guest view of MPIDR into VMPIDR 121 + ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] 122 + mcr p15, 4, r1, c0, c0, 5 123 + 124 + @ Set up guest memory translation 125 + ldr r1, [vcpu, #VCPU_KVM] 126 + add r1, r1, #KVM_VTTBR 127 + ldrd r2, r3, [r1] 128 + mcrr p15, 6, r2, r3, c2 @ Write VTTBR 129 + 130 + @ We're all done, just restore the GPRs and go to the guest 131 + restore_guest_regs 132 + clrex @ Clear exclusive monitor 133 + eret 134 + 135 + __kvm_vcpu_return: 136 + /* 137 + * return convention: 138 + * guest r0, r1, r2 saved on the stack 139 + * r0: vcpu pointer 140 + * r1: exception code 141 + */ 142 + save_guest_regs 143 + 144 + @ Set VMID == 0 145 + mov r2, #0 146 + mov r3, #0 147 + mcrr p15, 6, r2, r3, c2 @ Write VTTBR 148 + 149 + @ Don't trap coprocessor accesses for host kernel 150 + set_hstr vmexit 151 + set_hdcr vmexit 152 + set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) 153 + 154 + #ifdef CONFIG_VFPv3 155 + @ Save floating point registers we if let guest use them. 156 + tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) 157 + bne after_vfp_restore 158 + 159 + @ Switch VFP/NEON hardware state to the host's 160 + add r7, vcpu, #VCPU_VFP_GUEST 161 + store_vfp_state r7 162 + add r7, vcpu, #VCPU_VFP_HOST 163 + ldr r7, [r7] 164 + restore_vfp_state r7 165 + 166 + after_vfp_restore: 167 + @ Restore FPEXC_EN which we clobbered on entry 168 + pop {r2} 169 + VFPFMXR FPEXC, r2 170 + #endif 171 + 172 + @ Reset Hyp-role 173 + configure_hyp_role vmexit 174 + 175 + @ Let host read hardware MIDR 176 + mrc p15, 0, r2, c0, c0, 0 177 + mcr p15, 4, r2, c0, c0, 0 178 + 179 + @ Back to hardware MPIDR 180 + mrc p15, 0, r2, c0, c0, 5 181 + mcr p15, 4, r2, c0, c0, 5 182 + 183 + @ Store guest CP15 state and restore host state 184 + read_cp15_state store_to_vcpu = 1 185 + write_cp15_state read_from_vcpu = 0 186 + 187 + restore_host_regs 188 + clrex @ Clear exclusive monitor 189 + mov r0, r1 @ Return the return code 190 + mov r1, #0 @ Clear upper bits in return value 191 + bx lr @ return to IOCTL 54 192 55 193 /******************************************************************** 56 194 * Call function in Hyp mode ··· 221 77 222 78 /******************************************************************** 223 79 * Hypervisor exception vector and handlers 80 + * 81 + * 82 + * The KVM/ARM Hypervisor ABI is defined as follows: 83 + * 84 + * Entry to Hyp mode from the host kernel will happen _only_ when an HVC 85 + * instruction is issued since all traps are disabled when running the host 86 + * kernel as per the Hyp-mode initialization at boot time. 87 + * 88 + * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc 89 + * below) when the HVC instruction is called from SVC mode (i.e. a guest or the 90 + * host kernel) and they cause a trap to the vector page + offset 0xc when HVC 91 + * instructions are called from within Hyp-mode. 92 + * 93 + * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): 94 + * Switching to Hyp mode is done through a simple HVC #0 instruction. The 95 + * exception vector code will check that the HVC comes from VMID==0 and if 96 + * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. 97 + * - r0 contains a pointer to a HYP function 98 + * - r1, r2, and r3 contain arguments to the above function. 99 + * - The HYP function will be called with its arguments in r0, r1 and r2. 100 + * On HYP function return, we return directly to SVC. 101 + * 102 + * Note that the above is used to execute code in Hyp-mode from a host-kernel 103 + * point of view, and is a different concept from performing a world-switch and 104 + * executing guest code SVC mode (with a VMID != 0). 224 105 */ 106 + 107 + /* Handle undef, svc, pabt, or dabt by crashing with a user notice */ 108 + .macro bad_exception exception_code, panic_str 109 + push {r0-r2} 110 + mrrc p15, 6, r0, r1, c2 @ Read VTTBR 111 + lsr r1, r1, #16 112 + ands r1, r1, #0xff 113 + beq 99f 114 + 115 + load_vcpu @ Load VCPU pointer 116 + .if \exception_code == ARM_EXCEPTION_DATA_ABORT 117 + mrc p15, 4, r2, c5, c2, 0 @ HSR 118 + mrc p15, 4, r1, c6, c0, 0 @ HDFAR 119 + str r2, [vcpu, #VCPU_HSR] 120 + str r1, [vcpu, #VCPU_HxFAR] 121 + .endif 122 + .if \exception_code == ARM_EXCEPTION_PREF_ABORT 123 + mrc p15, 4, r2, c5, c2, 0 @ HSR 124 + mrc p15, 4, r1, c6, c0, 2 @ HIFAR 125 + str r2, [vcpu, #VCPU_HSR] 126 + str r1, [vcpu, #VCPU_HxFAR] 127 + .endif 128 + mov r1, #\exception_code 129 + b __kvm_vcpu_return 130 + 131 + @ We were in the host already. Let's craft a panic-ing return to SVC. 132 + 99: mrs r2, cpsr 133 + bic r2, r2, #MODE_MASK 134 + orr r2, r2, #SVC_MODE 135 + THUMB( orr r2, r2, #PSR_T_BIT ) 136 + msr spsr_cxsf, r2 137 + mrs r1, ELR_hyp 138 + ldr r2, =BSYM(panic) 139 + msr ELR_hyp, r2 140 + ldr r0, =\panic_str 141 + eret 142 + .endm 143 + 144 + .text 225 145 226 146 .align 5 227 147 __kvm_hyp_vector: 228 148 .globl __kvm_hyp_vector 229 - nop 149 + 150 + @ Hyp-mode exception vector 151 + W(b) hyp_reset 152 + W(b) hyp_undef 153 + W(b) hyp_svc 154 + W(b) hyp_pabt 155 + W(b) hyp_dabt 156 + W(b) hyp_hvc 157 + W(b) hyp_irq 158 + W(b) hyp_fiq 159 + 160 + .align 161 + hyp_reset: 162 + b hyp_reset 163 + 164 + .align 165 + hyp_undef: 166 + bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str 167 + 168 + .align 169 + hyp_svc: 170 + bad_exception ARM_EXCEPTION_HVC, svc_die_str 171 + 172 + .align 173 + hyp_pabt: 174 + bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str 175 + 176 + .align 177 + hyp_dabt: 178 + bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str 179 + 180 + .align 181 + hyp_hvc: 182 + /* 183 + * Getting here is either becuase of a trap from a guest or from calling 184 + * HVC from the host kernel, which means "switch to Hyp mode". 185 + */ 186 + push {r0, r1, r2} 187 + 188 + @ Check syndrome register 189 + mrc p15, 4, r1, c5, c2, 0 @ HSR 190 + lsr r0, r1, #HSR_EC_SHIFT 191 + #ifdef CONFIG_VFPv3 192 + cmp r0, #HSR_EC_CP_0_13 193 + beq switch_to_guest_vfp 194 + #endif 195 + cmp r0, #HSR_EC_HVC 196 + bne guest_trap @ Not HVC instr. 197 + 198 + /* 199 + * Let's check if the HVC came from VMID 0 and allow simple 200 + * switch to Hyp mode 201 + */ 202 + mrrc p15, 6, r0, r2, c2 203 + lsr r2, r2, #16 204 + and r2, r2, #0xff 205 + cmp r2, #0 206 + bne guest_trap @ Guest called HVC 207 + 208 + host_switch_to_hyp: 209 + pop {r0, r1, r2} 210 + 211 + push {lr} 212 + mrs lr, SPSR 213 + push {lr} 214 + 215 + mov lr, r0 216 + mov r0, r1 217 + mov r1, r2 218 + mov r2, r3 219 + 220 + THUMB( orr lr, #1) 221 + blx lr @ Call the HYP function 222 + 223 + pop {lr} 224 + msr SPSR_csxf, lr 225 + pop {lr} 226 + eret 227 + 228 + guest_trap: 229 + load_vcpu @ Load VCPU pointer to r0 230 + str r1, [vcpu, #VCPU_HSR] 231 + 232 + @ Check if we need the fault information 233 + lsr r1, r1, #HSR_EC_SHIFT 234 + cmp r1, #HSR_EC_IABT 235 + mrceq p15, 4, r2, c6, c0, 2 @ HIFAR 236 + beq 2f 237 + cmp r1, #HSR_EC_DABT 238 + bne 1f 239 + mrc p15, 4, r2, c6, c0, 0 @ HDFAR 240 + 241 + 2: str r2, [vcpu, #VCPU_HxFAR] 242 + 243 + /* 244 + * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: 245 + * 246 + * Abort on the stage 2 translation for a memory access from a 247 + * Non-secure PL1 or PL0 mode: 248 + * 249 + * For any Access flag fault or Translation fault, and also for any 250 + * Permission fault on the stage 2 translation of a memory access 251 + * made as part of a translation table walk for a stage 1 translation, 252 + * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR 253 + * is UNKNOWN. 254 + */ 255 + 256 + /* Check for permission fault, and S1PTW */ 257 + mrc p15, 4, r1, c5, c2, 0 @ HSR 258 + and r0, r1, #HSR_FSC_TYPE 259 + cmp r0, #FSC_PERM 260 + tsteq r1, #(1 << 7) @ S1PTW 261 + mrcne p15, 4, r2, c6, c0, 4 @ HPFAR 262 + bne 3f 263 + 264 + /* Resolve IPA using the xFAR */ 265 + mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR 266 + isb 267 + mrrc p15, 0, r0, r1, c7 @ PAR 268 + tst r0, #1 269 + bne 4f @ Failed translation 270 + ubfx r2, r0, #12, #20 271 + lsl r2, r2, #4 272 + orr r2, r2, r1, lsl #24 273 + 274 + 3: load_vcpu @ Load VCPU pointer to r0 275 + str r2, [r0, #VCPU_HPFAR] 276 + 277 + 1: mov r1, #ARM_EXCEPTION_HVC 278 + b __kvm_vcpu_return 279 + 280 + 4: pop {r0, r1, r2} @ Failed translation, return to guest 281 + eret 282 + 283 + /* 284 + * If VFPv3 support is not available, then we will not switch the VFP 285 + * registers; however cp10 and cp11 accesses will still trap and fallback 286 + * to the regular coprocessor emulation code, which currently will 287 + * inject an undefined exception to the guest. 288 + */ 289 + #ifdef CONFIG_VFPv3 290 + switch_to_guest_vfp: 291 + load_vcpu @ Load VCPU pointer to r0 292 + push {r3-r7} 293 + 294 + @ NEON/VFP used. Turn on VFP access. 295 + set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) 296 + 297 + @ Switch VFP/NEON hardware state to the guest's 298 + add r7, r0, #VCPU_VFP_HOST 299 + ldr r7, [r7] 300 + store_vfp_state r7 301 + add r7, r0, #VCPU_VFP_GUEST 302 + restore_vfp_state r7 303 + 304 + pop {r3-r7} 305 + pop {r0-r2} 306 + eret 307 + #endif 308 + 309 + .align 310 + hyp_irq: 311 + push {r0, r1, r2} 312 + mov r1, #ARM_EXCEPTION_IRQ 313 + load_vcpu @ Load VCPU pointer to r0 314 + b __kvm_vcpu_return 315 + 316 + .align 317 + hyp_fiq: 318 + b hyp_fiq 319 + 320 + .ltorg 230 321 231 322 __kvm_hyp_code_end: 232 323 .globl __kvm_hyp_code_end 324 + 325 + .section ".rodata" 326 + 327 + und_die_str: 328 + .ascii "unexpected undefined exception in Hyp mode at: %#08x" 329 + pabt_die_str: 330 + .ascii "unexpected prefetch abort in Hyp mode at: %#08x" 331 + dabt_die_str: 332 + .ascii "unexpected data abort in Hyp mode at: %#08x" 333 + svc_die_str: 334 + .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
+441
arch/arm/kvm/interrupts_head.S
··· 1 + #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) 2 + #define VCPU_USR_SP (VCPU_USR_REG(13)) 3 + #define VCPU_USR_LR (VCPU_USR_REG(14)) 4 + #define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) 5 + 6 + /* 7 + * Many of these macros need to access the VCPU structure, which is always 8 + * held in r0. These macros should never clobber r1, as it is used to hold the 9 + * exception code on the return path (except of course the macro that switches 10 + * all the registers before the final jump to the VM). 11 + */ 12 + vcpu .req r0 @ vcpu pointer always in r0 13 + 14 + /* Clobbers {r2-r6} */ 15 + .macro store_vfp_state vfp_base 16 + @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions 17 + VFPFMRX r2, FPEXC 18 + @ Make sure VFP is enabled so we can touch the registers. 19 + orr r6, r2, #FPEXC_EN 20 + VFPFMXR FPEXC, r6 21 + 22 + VFPFMRX r3, FPSCR 23 + tst r2, #FPEXC_EX @ Check for VFP Subarchitecture 24 + beq 1f 25 + @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so 26 + @ we only need to save them if FPEXC_EX is set. 27 + VFPFMRX r4, FPINST 28 + tst r2, #FPEXC_FP2V 29 + VFPFMRX r5, FPINST2, ne @ vmrsne 30 + bic r6, r2, #FPEXC_EX @ FPEXC_EX disable 31 + VFPFMXR FPEXC, r6 32 + 1: 33 + VFPFSTMIA \vfp_base, r6 @ Save VFP registers 34 + stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 35 + .endm 36 + 37 + /* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ 38 + .macro restore_vfp_state vfp_base 39 + VFPFLDMIA \vfp_base, r6 @ Load VFP registers 40 + ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 41 + 42 + VFPFMXR FPSCR, r3 43 + tst r2, #FPEXC_EX @ Check for VFP Subarchitecture 44 + beq 1f 45 + VFPFMXR FPINST, r4 46 + tst r2, #FPEXC_FP2V 47 + VFPFMXR FPINST2, r5, ne 48 + 1: 49 + VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) 50 + .endm 51 + 52 + /* These are simply for the macros to work - value don't have meaning */ 53 + .equ usr, 0 54 + .equ svc, 1 55 + .equ abt, 2 56 + .equ und, 3 57 + .equ irq, 4 58 + .equ fiq, 5 59 + 60 + .macro push_host_regs_mode mode 61 + mrs r2, SP_\mode 62 + mrs r3, LR_\mode 63 + mrs r4, SPSR_\mode 64 + push {r2, r3, r4} 65 + .endm 66 + 67 + /* 68 + * Store all host persistent registers on the stack. 69 + * Clobbers all registers, in all modes, except r0 and r1. 70 + */ 71 + .macro save_host_regs 72 + /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ 73 + mrs r2, ELR_hyp 74 + push {r2} 75 + 76 + /* usr regs */ 77 + push {r4-r12} @ r0-r3 are always clobbered 78 + mrs r2, SP_usr 79 + mov r3, lr 80 + push {r2, r3} 81 + 82 + push_host_regs_mode svc 83 + push_host_regs_mode abt 84 + push_host_regs_mode und 85 + push_host_regs_mode irq 86 + 87 + /* fiq regs */ 88 + mrs r2, r8_fiq 89 + mrs r3, r9_fiq 90 + mrs r4, r10_fiq 91 + mrs r5, r11_fiq 92 + mrs r6, r12_fiq 93 + mrs r7, SP_fiq 94 + mrs r8, LR_fiq 95 + mrs r9, SPSR_fiq 96 + push {r2-r9} 97 + .endm 98 + 99 + .macro pop_host_regs_mode mode 100 + pop {r2, r3, r4} 101 + msr SP_\mode, r2 102 + msr LR_\mode, r3 103 + msr SPSR_\mode, r4 104 + .endm 105 + 106 + /* 107 + * Restore all host registers from the stack. 108 + * Clobbers all registers, in all modes, except r0 and r1. 109 + */ 110 + .macro restore_host_regs 111 + pop {r2-r9} 112 + msr r8_fiq, r2 113 + msr r9_fiq, r3 114 + msr r10_fiq, r4 115 + msr r11_fiq, r5 116 + msr r12_fiq, r6 117 + msr SP_fiq, r7 118 + msr LR_fiq, r8 119 + msr SPSR_fiq, r9 120 + 121 + pop_host_regs_mode irq 122 + pop_host_regs_mode und 123 + pop_host_regs_mode abt 124 + pop_host_regs_mode svc 125 + 126 + pop {r2, r3} 127 + msr SP_usr, r2 128 + mov lr, r3 129 + pop {r4-r12} 130 + 131 + pop {r2} 132 + msr ELR_hyp, r2 133 + .endm 134 + 135 + /* 136 + * Restore SP, LR and SPSR for a given mode. offset is the offset of 137 + * this mode's registers from the VCPU base. 138 + * 139 + * Assumes vcpu pointer in vcpu reg 140 + * 141 + * Clobbers r1, r2, r3, r4. 142 + */ 143 + .macro restore_guest_regs_mode mode, offset 144 + add r1, vcpu, \offset 145 + ldm r1, {r2, r3, r4} 146 + msr SP_\mode, r2 147 + msr LR_\mode, r3 148 + msr SPSR_\mode, r4 149 + .endm 150 + 151 + /* 152 + * Restore all guest registers from the vcpu struct. 153 + * 154 + * Assumes vcpu pointer in vcpu reg 155 + * 156 + * Clobbers *all* registers. 157 + */ 158 + .macro restore_guest_regs 159 + restore_guest_regs_mode svc, #VCPU_SVC_REGS 160 + restore_guest_regs_mode abt, #VCPU_ABT_REGS 161 + restore_guest_regs_mode und, #VCPU_UND_REGS 162 + restore_guest_regs_mode irq, #VCPU_IRQ_REGS 163 + 164 + add r1, vcpu, #VCPU_FIQ_REGS 165 + ldm r1, {r2-r9} 166 + msr r8_fiq, r2 167 + msr r9_fiq, r3 168 + msr r10_fiq, r4 169 + msr r11_fiq, r5 170 + msr r12_fiq, r6 171 + msr SP_fiq, r7 172 + msr LR_fiq, r8 173 + msr SPSR_fiq, r9 174 + 175 + @ Load return state 176 + ldr r2, [vcpu, #VCPU_PC] 177 + ldr r3, [vcpu, #VCPU_CPSR] 178 + msr ELR_hyp, r2 179 + msr SPSR_cxsf, r3 180 + 181 + @ Load user registers 182 + ldr r2, [vcpu, #VCPU_USR_SP] 183 + ldr r3, [vcpu, #VCPU_USR_LR] 184 + msr SP_usr, r2 185 + mov lr, r3 186 + add vcpu, vcpu, #(VCPU_USR_REGS) 187 + ldm vcpu, {r0-r12} 188 + .endm 189 + 190 + /* 191 + * Save SP, LR and SPSR for a given mode. offset is the offset of 192 + * this mode's registers from the VCPU base. 193 + * 194 + * Assumes vcpu pointer in vcpu reg 195 + * 196 + * Clobbers r2, r3, r4, r5. 197 + */ 198 + .macro save_guest_regs_mode mode, offset 199 + add r2, vcpu, \offset 200 + mrs r3, SP_\mode 201 + mrs r4, LR_\mode 202 + mrs r5, SPSR_\mode 203 + stm r2, {r3, r4, r5} 204 + .endm 205 + 206 + /* 207 + * Save all guest registers to the vcpu struct 208 + * Expects guest's r0, r1, r2 on the stack. 209 + * 210 + * Assumes vcpu pointer in vcpu reg 211 + * 212 + * Clobbers r2, r3, r4, r5. 213 + */ 214 + .macro save_guest_regs 215 + @ Store usr registers 216 + add r2, vcpu, #VCPU_USR_REG(3) 217 + stm r2, {r3-r12} 218 + add r2, vcpu, #VCPU_USR_REG(0) 219 + pop {r3, r4, r5} @ r0, r1, r2 220 + stm r2, {r3, r4, r5} 221 + mrs r2, SP_usr 222 + mov r3, lr 223 + str r2, [vcpu, #VCPU_USR_SP] 224 + str r3, [vcpu, #VCPU_USR_LR] 225 + 226 + @ Store return state 227 + mrs r2, ELR_hyp 228 + mrs r3, spsr 229 + str r2, [vcpu, #VCPU_PC] 230 + str r3, [vcpu, #VCPU_CPSR] 231 + 232 + @ Store other guest registers 233 + save_guest_regs_mode svc, #VCPU_SVC_REGS 234 + save_guest_regs_mode abt, #VCPU_ABT_REGS 235 + save_guest_regs_mode und, #VCPU_UND_REGS 236 + save_guest_regs_mode irq, #VCPU_IRQ_REGS 237 + .endm 238 + 239 + /* Reads cp15 registers from hardware and stores them in memory 240 + * @store_to_vcpu: If 0, registers are written in-order to the stack, 241 + * otherwise to the VCPU struct pointed to by vcpup 242 + * 243 + * Assumes vcpu pointer in vcpu reg 244 + * 245 + * Clobbers r2 - r12 246 + */ 247 + .macro read_cp15_state store_to_vcpu 248 + mrc p15, 0, r2, c1, c0, 0 @ SCTLR 249 + mrc p15, 0, r3, c1, c0, 2 @ CPACR 250 + mrc p15, 0, r4, c2, c0, 2 @ TTBCR 251 + mrc p15, 0, r5, c3, c0, 0 @ DACR 252 + mrrc p15, 0, r6, r7, c2 @ TTBR 0 253 + mrrc p15, 1, r8, r9, c2 @ TTBR 1 254 + mrc p15, 0, r10, c10, c2, 0 @ PRRR 255 + mrc p15, 0, r11, c10, c2, 1 @ NMRR 256 + mrc p15, 2, r12, c0, c0, 0 @ CSSELR 257 + 258 + .if \store_to_vcpu == 0 259 + push {r2-r12} @ Push CP15 registers 260 + .else 261 + str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] 262 + str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] 263 + str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] 264 + str r5, [vcpu, #CP15_OFFSET(c3_DACR)] 265 + add r2, vcpu, #CP15_OFFSET(c2_TTBR0) 266 + strd r6, r7, [r2] 267 + add r2, vcpu, #CP15_OFFSET(c2_TTBR1) 268 + strd r8, r9, [r2] 269 + str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] 270 + str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] 271 + str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] 272 + .endif 273 + 274 + mrc p15, 0, r2, c13, c0, 1 @ CID 275 + mrc p15, 0, r3, c13, c0, 2 @ TID_URW 276 + mrc p15, 0, r4, c13, c0, 3 @ TID_URO 277 + mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV 278 + mrc p15, 0, r6, c5, c0, 0 @ DFSR 279 + mrc p15, 0, r7, c5, c0, 1 @ IFSR 280 + mrc p15, 0, r8, c5, c1, 0 @ ADFSR 281 + mrc p15, 0, r9, c5, c1, 1 @ AIFSR 282 + mrc p15, 0, r10, c6, c0, 0 @ DFAR 283 + mrc p15, 0, r11, c6, c0, 2 @ IFAR 284 + mrc p15, 0, r12, c12, c0, 0 @ VBAR 285 + 286 + .if \store_to_vcpu == 0 287 + push {r2-r12} @ Push CP15 registers 288 + .else 289 + str r2, [vcpu, #CP15_OFFSET(c13_CID)] 290 + str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] 291 + str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] 292 + str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] 293 + str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] 294 + str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] 295 + str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] 296 + str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] 297 + str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] 298 + str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] 299 + str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] 300 + .endif 301 + .endm 302 + 303 + /* 304 + * Reads cp15 registers from memory and writes them to hardware 305 + * @read_from_vcpu: If 0, registers are read in-order from the stack, 306 + * otherwise from the VCPU struct pointed to by vcpup 307 + * 308 + * Assumes vcpu pointer in vcpu reg 309 + */ 310 + .macro write_cp15_state read_from_vcpu 311 + .if \read_from_vcpu == 0 312 + pop {r2-r12} 313 + .else 314 + ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] 315 + ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] 316 + ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] 317 + ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] 318 + ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] 319 + ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] 320 + ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] 321 + ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] 322 + ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] 323 + ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] 324 + ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] 325 + .endif 326 + 327 + mcr p15, 0, r2, c13, c0, 1 @ CID 328 + mcr p15, 0, r3, c13, c0, 2 @ TID_URW 329 + mcr p15, 0, r4, c13, c0, 3 @ TID_URO 330 + mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV 331 + mcr p15, 0, r6, c5, c0, 0 @ DFSR 332 + mcr p15, 0, r7, c5, c0, 1 @ IFSR 333 + mcr p15, 0, r8, c5, c1, 0 @ ADFSR 334 + mcr p15, 0, r9, c5, c1, 1 @ AIFSR 335 + mcr p15, 0, r10, c6, c0, 0 @ DFAR 336 + mcr p15, 0, r11, c6, c0, 2 @ IFAR 337 + mcr p15, 0, r12, c12, c0, 0 @ VBAR 338 + 339 + .if \read_from_vcpu == 0 340 + pop {r2-r12} 341 + .else 342 + ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] 343 + ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] 344 + ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] 345 + ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] 346 + add r12, vcpu, #CP15_OFFSET(c2_TTBR0) 347 + ldrd r6, r7, [r12] 348 + add r12, vcpu, #CP15_OFFSET(c2_TTBR1) 349 + ldrd r8, r9, [r12] 350 + ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] 351 + ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] 352 + ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] 353 + .endif 354 + 355 + mcr p15, 0, r2, c1, c0, 0 @ SCTLR 356 + mcr p15, 0, r3, c1, c0, 2 @ CPACR 357 + mcr p15, 0, r4, c2, c0, 2 @ TTBCR 358 + mcr p15, 0, r5, c3, c0, 0 @ DACR 359 + mcrr p15, 0, r6, r7, c2 @ TTBR 0 360 + mcrr p15, 1, r8, r9, c2 @ TTBR 1 361 + mcr p15, 0, r10, c10, c2, 0 @ PRRR 362 + mcr p15, 0, r11, c10, c2, 1 @ NMRR 363 + mcr p15, 2, r12, c0, c0, 0 @ CSSELR 364 + .endm 365 + 366 + /* 367 + * Save the VGIC CPU state into memory 368 + * 369 + * Assumes vcpu pointer in vcpu reg 370 + */ 371 + .macro save_vgic_state 372 + .endm 373 + 374 + /* 375 + * Restore the VGIC CPU state from memory 376 + * 377 + * Assumes vcpu pointer in vcpu reg 378 + */ 379 + .macro restore_vgic_state 380 + .endm 381 + 382 + .equ vmentry, 0 383 + .equ vmexit, 1 384 + 385 + /* Configures the HSTR (Hyp System Trap Register) on entry/return 386 + * (hardware reset value is 0) */ 387 + .macro set_hstr operation 388 + mrc p15, 4, r2, c1, c1, 3 389 + ldr r3, =HSTR_T(15) 390 + .if \operation == vmentry 391 + orr r2, r2, r3 @ Trap CR{15} 392 + .else 393 + bic r2, r2, r3 @ Don't trap any CRx accesses 394 + .endif 395 + mcr p15, 4, r2, c1, c1, 3 396 + .endm 397 + 398 + /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return 399 + * (hardware reset value is 0). Keep previous value in r2. */ 400 + .macro set_hcptr operation, mask 401 + mrc p15, 4, r2, c1, c1, 2 402 + ldr r3, =\mask 403 + .if \operation == vmentry 404 + orr r3, r2, r3 @ Trap coproc-accesses defined in mask 405 + .else 406 + bic r3, r2, r3 @ Don't trap defined coproc-accesses 407 + .endif 408 + mcr p15, 4, r3, c1, c1, 2 409 + .endm 410 + 411 + /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return 412 + * (hardware reset value is 0) */ 413 + .macro set_hdcr operation 414 + mrc p15, 4, r2, c1, c1, 1 415 + ldr r3, =(HDCR_TPM|HDCR_TPMCR) 416 + .if \operation == vmentry 417 + orr r2, r2, r3 @ Trap some perfmon accesses 418 + .else 419 + bic r2, r2, r3 @ Don't trap any perfmon accesses 420 + .endif 421 + mcr p15, 4, r2, c1, c1, 1 422 + .endm 423 + 424 + /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ 425 + .macro configure_hyp_role operation 426 + mrc p15, 4, r2, c1, c1, 0 @ HCR 427 + bic r2, r2, #HCR_VIRT_EXCP_MASK 428 + ldr r3, =HCR_GUEST_MASK 429 + .if \operation == vmentry 430 + orr r2, r2, r3 431 + ldr r3, [vcpu, #VCPU_IRQ_LINES] 432 + orr r2, r2, r3 433 + .else 434 + bic r2, r2, r3 435 + .endif 436 + mcr p15, 4, r2, c1, c1, 0 437 + .endm 438 + 439 + .macro load_vcpu 440 + mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR 441 + .endm