Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.9-rc5 1169 lines 27 kB view raw
1/* 2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19#include <linux/errno.h> 20#include <linux/err.h> 21#include <linux/kvm_host.h> 22#include <linux/module.h> 23#include <linux/vmalloc.h> 24#include <linux/fs.h> 25#include <linux/mman.h> 26#include <linux/sched.h> 27#include <linux/kvm.h> 28#include <trace/events/kvm.h> 29 30#define CREATE_TRACE_POINTS 31#include "trace.h" 32 33#include <asm/unified.h> 34#include <asm/uaccess.h> 35#include <asm/ptrace.h> 36#include <asm/mman.h> 37#include <asm/cputype.h> 38#include <asm/tlbflush.h> 39#include <asm/cacheflush.h> 40#include <asm/virt.h> 41#include <asm/kvm_arm.h> 42#include <asm/kvm_asm.h> 43#include <asm/kvm_mmu.h> 44#include <asm/kvm_emulate.h> 45#include <asm/kvm_coproc.h> 46#include <asm/kvm_psci.h> 47#include <asm/opcodes.h> 48 49#ifdef REQUIRES_VIRT 50__asm__(".arch_extension virt"); 51#endif 52 53static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 55static unsigned long hyp_default_vectors; 56 57/* Per-CPU variable containing the currently running vcpu. */ 58static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); 59 60/* The VMID used in the VTTBR */ 61static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 62static u8 kvm_next_vmid; 63static DEFINE_SPINLOCK(kvm_vmid_lock); 64 65static bool vgic_present; 66 67static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) 68{ 69 BUG_ON(preemptible()); 70 __get_cpu_var(kvm_arm_running_vcpu) = vcpu; 71} 72 73/** 74 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. 75 * Must be called from non-preemptible context 76 */ 77struct kvm_vcpu *kvm_arm_get_running_vcpu(void) 78{ 79 BUG_ON(preemptible()); 80 return __get_cpu_var(kvm_arm_running_vcpu); 81} 82 83/** 84 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. 85 */ 86struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) 87{ 88 return &kvm_arm_running_vcpu; 89} 90 91int kvm_arch_hardware_enable(void *garbage) 92{ 93 return 0; 94} 95 96int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 97{ 98 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; 99} 100 101void kvm_arch_hardware_disable(void *garbage) 102{ 103} 104 105int kvm_arch_hardware_setup(void) 106{ 107 return 0; 108} 109 110void kvm_arch_hardware_unsetup(void) 111{ 112} 113 114void kvm_arch_check_processor_compat(void *rtn) 115{ 116 *(int *)rtn = 0; 117} 118 119void kvm_arch_sync_events(struct kvm *kvm) 120{ 121} 122 123/** 124 * kvm_arch_init_vm - initializes a VM data structure 125 * @kvm: pointer to the KVM struct 126 */ 127int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 128{ 129 int ret = 0; 130 131 if (type) 132 return -EINVAL; 133 134 ret = kvm_alloc_stage2_pgd(kvm); 135 if (ret) 136 goto out_fail_alloc; 137 138 ret = create_hyp_mappings(kvm, kvm + 1); 139 if (ret) 140 goto out_free_stage2_pgd; 141 142 /* Mark the initial VMID generation invalid */ 143 kvm->arch.vmid_gen = 0; 144 145 return ret; 146out_free_stage2_pgd: 147 kvm_free_stage2_pgd(kvm); 148out_fail_alloc: 149 return ret; 150} 151 152int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 153{ 154 return VM_FAULT_SIGBUS; 155} 156 157void kvm_arch_free_memslot(struct kvm_memory_slot *free, 158 struct kvm_memory_slot *dont) 159{ 160} 161 162int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) 163{ 164 return 0; 165} 166 167/** 168 * kvm_arch_destroy_vm - destroy the VM data structure 169 * @kvm: pointer to the KVM struct 170 */ 171void kvm_arch_destroy_vm(struct kvm *kvm) 172{ 173 int i; 174 175 kvm_free_stage2_pgd(kvm); 176 177 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 178 if (kvm->vcpus[i]) { 179 kvm_arch_vcpu_free(kvm->vcpus[i]); 180 kvm->vcpus[i] = NULL; 181 } 182 } 183} 184 185int kvm_dev_ioctl_check_extension(long ext) 186{ 187 int r; 188 switch (ext) { 189 case KVM_CAP_IRQCHIP: 190 r = vgic_present; 191 break; 192 case KVM_CAP_USER_MEMORY: 193 case KVM_CAP_SYNC_MMU: 194 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 195 case KVM_CAP_ONE_REG: 196 case KVM_CAP_ARM_PSCI: 197 r = 1; 198 break; 199 case KVM_CAP_COALESCED_MMIO: 200 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 201 break; 202 case KVM_CAP_ARM_SET_DEVICE_ADDR: 203 r = 1; 204 case KVM_CAP_NR_VCPUS: 205 r = num_online_cpus(); 206 break; 207 case KVM_CAP_MAX_VCPUS: 208 r = KVM_MAX_VCPUS; 209 break; 210 default: 211 r = 0; 212 break; 213 } 214 return r; 215} 216 217long kvm_arch_dev_ioctl(struct file *filp, 218 unsigned int ioctl, unsigned long arg) 219{ 220 return -EINVAL; 221} 222 223int kvm_arch_set_memory_region(struct kvm *kvm, 224 struct kvm_userspace_memory_region *mem, 225 struct kvm_memory_slot old, 226 int user_alloc) 227{ 228 return 0; 229} 230 231int kvm_arch_prepare_memory_region(struct kvm *kvm, 232 struct kvm_memory_slot *memslot, 233 struct kvm_memory_slot old, 234 struct kvm_userspace_memory_region *mem, 235 bool user_alloc) 236{ 237 return 0; 238} 239 240void kvm_arch_commit_memory_region(struct kvm *kvm, 241 struct kvm_userspace_memory_region *mem, 242 struct kvm_memory_slot old, 243 bool user_alloc) 244{ 245} 246 247void kvm_arch_flush_shadow_all(struct kvm *kvm) 248{ 249} 250 251void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 252 struct kvm_memory_slot *slot) 253{ 254} 255 256struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 257{ 258 int err; 259 struct kvm_vcpu *vcpu; 260 261 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 262 if (!vcpu) { 263 err = -ENOMEM; 264 goto out; 265 } 266 267 err = kvm_vcpu_init(vcpu, kvm, id); 268 if (err) 269 goto free_vcpu; 270 271 err = create_hyp_mappings(vcpu, vcpu + 1); 272 if (err) 273 goto vcpu_uninit; 274 275 return vcpu; 276vcpu_uninit: 277 kvm_vcpu_uninit(vcpu); 278free_vcpu: 279 kmem_cache_free(kvm_vcpu_cache, vcpu); 280out: 281 return ERR_PTR(err); 282} 283 284int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 285{ 286 return 0; 287} 288 289void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 290{ 291 kvm_mmu_free_memory_caches(vcpu); 292 kvm_timer_vcpu_terminate(vcpu); 293 kmem_cache_free(kvm_vcpu_cache, vcpu); 294} 295 296void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 297{ 298 kvm_arch_vcpu_free(vcpu); 299} 300 301int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 302{ 303 return 0; 304} 305 306int __attribute_const__ kvm_target_cpu(void) 307{ 308 unsigned long implementor = read_cpuid_implementor(); 309 unsigned long part_number = read_cpuid_part_number(); 310 311 if (implementor != ARM_CPU_IMP_ARM) 312 return -EINVAL; 313 314 switch (part_number) { 315 case ARM_CPU_PART_CORTEX_A15: 316 return KVM_ARM_TARGET_CORTEX_A15; 317 default: 318 return -EINVAL; 319 } 320} 321 322int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 323{ 324 int ret; 325 326 /* Force users to call KVM_ARM_VCPU_INIT */ 327 vcpu->arch.target = -1; 328 329 /* Set up VGIC */ 330 ret = kvm_vgic_vcpu_init(vcpu); 331 if (ret) 332 return ret; 333 334 /* Set up the timer */ 335 kvm_timer_vcpu_init(vcpu); 336 337 return 0; 338} 339 340void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 341{ 342} 343 344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 345{ 346 vcpu->cpu = cpu; 347 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); 348 349 /* 350 * Check whether this vcpu requires the cache to be flushed on 351 * this physical CPU. This is a consequence of doing dcache 352 * operations by set/way on this vcpu. We do it here to be in 353 * a non-preemptible section. 354 */ 355 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) 356 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ 357 358 kvm_arm_set_running_vcpu(vcpu); 359} 360 361void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 362{ 363 kvm_arm_set_running_vcpu(NULL); 364} 365 366int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 367 struct kvm_guest_debug *dbg) 368{ 369 return -EINVAL; 370} 371 372 373int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 374 struct kvm_mp_state *mp_state) 375{ 376 return -EINVAL; 377} 378 379int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 380 struct kvm_mp_state *mp_state) 381{ 382 return -EINVAL; 383} 384 385/** 386 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled 387 * @v: The VCPU pointer 388 * 389 * If the guest CPU is not waiting for interrupts or an interrupt line is 390 * asserted, the CPU is by definition runnable. 391 */ 392int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 393{ 394 return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); 395} 396 397/* Just ensure a guest exit from a particular CPU */ 398static void exit_vm_noop(void *info) 399{ 400} 401 402void force_vm_exit(const cpumask_t *mask) 403{ 404 smp_call_function_many(mask, exit_vm_noop, NULL, true); 405} 406 407/** 408 * need_new_vmid_gen - check that the VMID is still valid 409 * @kvm: The VM's VMID to checkt 410 * 411 * return true if there is a new generation of VMIDs being used 412 * 413 * The hardware supports only 256 values with the value zero reserved for the 414 * host, so we check if an assigned value belongs to a previous generation, 415 * which which requires us to assign a new value. If we're the first to use a 416 * VMID for the new generation, we must flush necessary caches and TLBs on all 417 * CPUs. 418 */ 419static bool need_new_vmid_gen(struct kvm *kvm) 420{ 421 return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); 422} 423 424/** 425 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs 426 * @kvm The guest that we are about to run 427 * 428 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the 429 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding 430 * caches and TLBs. 431 */ 432static void update_vttbr(struct kvm *kvm) 433{ 434 phys_addr_t pgd_phys; 435 u64 vmid; 436 437 if (!need_new_vmid_gen(kvm)) 438 return; 439 440 spin_lock(&kvm_vmid_lock); 441 442 /* 443 * We need to re-check the vmid_gen here to ensure that if another vcpu 444 * already allocated a valid vmid for this vm, then this vcpu should 445 * use the same vmid. 446 */ 447 if (!need_new_vmid_gen(kvm)) { 448 spin_unlock(&kvm_vmid_lock); 449 return; 450 } 451 452 /* First user of a new VMID generation? */ 453 if (unlikely(kvm_next_vmid == 0)) { 454 atomic64_inc(&kvm_vmid_gen); 455 kvm_next_vmid = 1; 456 457 /* 458 * On SMP we know no other CPUs can use this CPU's or each 459 * other's VMID after force_vm_exit returns since the 460 * kvm_vmid_lock blocks them from reentry to the guest. 461 */ 462 force_vm_exit(cpu_all_mask); 463 /* 464 * Now broadcast TLB + ICACHE invalidation over the inner 465 * shareable domain to make sure all data structures are 466 * clean. 467 */ 468 kvm_call_hyp(__kvm_flush_vm_context); 469 } 470 471 kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); 472 kvm->arch.vmid = kvm_next_vmid; 473 kvm_next_vmid++; 474 475 /* update vttbr to be used with the new vmid */ 476 pgd_phys = virt_to_phys(kvm->arch.pgd); 477 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; 478 kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; 479 kvm->arch.vttbr |= vmid; 480 481 spin_unlock(&kvm_vmid_lock); 482} 483 484static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) 485{ 486 /* SVC called from Hyp mode should never get here */ 487 kvm_debug("SVC called from Hyp mode shouldn't go here\n"); 488 BUG(); 489 return -EINVAL; /* Squash warning */ 490} 491 492static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) 493{ 494 trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 495 vcpu->arch.hsr & HSR_HVC_IMM_MASK); 496 497 if (kvm_psci_call(vcpu)) 498 return 1; 499 500 kvm_inject_undefined(vcpu); 501 return 1; 502} 503 504static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 505{ 506 if (kvm_psci_call(vcpu)) 507 return 1; 508 509 kvm_inject_undefined(vcpu); 510 return 1; 511} 512 513static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) 514{ 515 /* The hypervisor should never cause aborts */ 516 kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", 517 vcpu->arch.hxfar, vcpu->arch.hsr); 518 return -EFAULT; 519} 520 521static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) 522{ 523 /* This is either an error in the ws. code or an external abort */ 524 kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", 525 vcpu->arch.hxfar, vcpu->arch.hsr); 526 return -EFAULT; 527} 528 529typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); 530static exit_handle_fn arm_exit_handlers[] = { 531 [HSR_EC_WFI] = kvm_handle_wfi, 532 [HSR_EC_CP15_32] = kvm_handle_cp15_32, 533 [HSR_EC_CP15_64] = kvm_handle_cp15_64, 534 [HSR_EC_CP14_MR] = kvm_handle_cp14_access, 535 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, 536 [HSR_EC_CP14_64] = kvm_handle_cp14_access, 537 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, 538 [HSR_EC_CP10_ID] = kvm_handle_cp10_id, 539 [HSR_EC_SVC_HYP] = handle_svc_hyp, 540 [HSR_EC_HVC] = handle_hvc, 541 [HSR_EC_SMC] = handle_smc, 542 [HSR_EC_IABT] = kvm_handle_guest_abort, 543 [HSR_EC_IABT_HYP] = handle_pabt_hyp, 544 [HSR_EC_DABT] = kvm_handle_guest_abort, 545 [HSR_EC_DABT_HYP] = handle_dabt_hyp, 546}; 547 548/* 549 * A conditional instruction is allowed to trap, even though it 550 * wouldn't be executed. So let's re-implement the hardware, in 551 * software! 552 */ 553static bool kvm_condition_valid(struct kvm_vcpu *vcpu) 554{ 555 unsigned long cpsr, cond, insn; 556 557 /* 558 * Exception Code 0 can only happen if we set HCR.TGE to 1, to 559 * catch undefined instructions, and then we won't get past 560 * the arm_exit_handlers test anyway. 561 */ 562 BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); 563 564 /* Top two bits non-zero? Unconditional. */ 565 if (vcpu->arch.hsr >> 30) 566 return true; 567 568 cpsr = *vcpu_cpsr(vcpu); 569 570 /* Is condition field valid? */ 571 if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) 572 cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; 573 else { 574 /* This can happen in Thumb mode: examine IT state. */ 575 unsigned long it; 576 577 it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); 578 579 /* it == 0 => unconditional. */ 580 if (it == 0) 581 return true; 582 583 /* The cond for this insn works out as the top 4 bits. */ 584 cond = (it >> 4); 585 } 586 587 /* Shift makes it look like an ARM-mode instruction */ 588 insn = cond << 28; 589 return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; 590} 591 592/* 593 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 594 * proper exit to QEMU. 595 */ 596static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 597 int exception_index) 598{ 599 unsigned long hsr_ec; 600 601 switch (exception_index) { 602 case ARM_EXCEPTION_IRQ: 603 return 1; 604 case ARM_EXCEPTION_UNDEFINED: 605 kvm_err("Undefined exception in Hyp mode at: %#08x\n", 606 vcpu->arch.hyp_pc); 607 BUG(); 608 panic("KVM: Hypervisor undefined exception!\n"); 609 case ARM_EXCEPTION_DATA_ABORT: 610 case ARM_EXCEPTION_PREF_ABORT: 611 case ARM_EXCEPTION_HVC: 612 hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; 613 614 if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) 615 || !arm_exit_handlers[hsr_ec]) { 616 kvm_err("Unkown exception class: %#08lx, " 617 "hsr: %#08x\n", hsr_ec, 618 (unsigned int)vcpu->arch.hsr); 619 BUG(); 620 } 621 622 /* 623 * See ARM ARM B1.14.1: "Hyp traps on instructions 624 * that fail their condition code check" 625 */ 626 if (!kvm_condition_valid(vcpu)) { 627 bool is_wide = vcpu->arch.hsr & HSR_IL; 628 kvm_skip_instr(vcpu, is_wide); 629 return 1; 630 } 631 632 return arm_exit_handlers[hsr_ec](vcpu, run); 633 default: 634 kvm_pr_unimpl("Unsupported exception type: %d", 635 exception_index); 636 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 637 return 0; 638 } 639} 640 641static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) 642{ 643 if (likely(vcpu->arch.has_run_once)) 644 return 0; 645 646 vcpu->arch.has_run_once = true; 647 648 /* 649 * Initialize the VGIC before running a vcpu the first time on 650 * this VM. 651 */ 652 if (irqchip_in_kernel(vcpu->kvm) && 653 unlikely(!vgic_initialized(vcpu->kvm))) { 654 int ret = kvm_vgic_init(vcpu->kvm); 655 if (ret) 656 return ret; 657 } 658 659 /* 660 * Handle the "start in power-off" case by calling into the 661 * PSCI code. 662 */ 663 if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { 664 *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; 665 kvm_psci_call(vcpu); 666 } 667 668 return 0; 669} 670 671static void vcpu_pause(struct kvm_vcpu *vcpu) 672{ 673 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); 674 675 wait_event_interruptible(*wq, !vcpu->arch.pause); 676} 677 678/** 679 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 680 * @vcpu: The VCPU pointer 681 * @run: The kvm_run structure pointer used for userspace state exchange 682 * 683 * This function is called through the VCPU_RUN ioctl called from user space. It 684 * will execute VM code in a loop until the time slice for the process is used 685 * or some emulation is needed from user space in which case the function will 686 * return with return value 0 and with the kvm_run structure filled in with the 687 * required data for the requested emulation. 688 */ 689int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 690{ 691 int ret; 692 sigset_t sigsaved; 693 694 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ 695 if (unlikely(vcpu->arch.target < 0)) 696 return -ENOEXEC; 697 698 ret = kvm_vcpu_first_run_init(vcpu); 699 if (ret) 700 return ret; 701 702 if (run->exit_reason == KVM_EXIT_MMIO) { 703 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 704 if (ret) 705 return ret; 706 } 707 708 if (vcpu->sigset_active) 709 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 710 711 ret = 1; 712 run->exit_reason = KVM_EXIT_UNKNOWN; 713 while (ret > 0) { 714 /* 715 * Check conditions before entering the guest 716 */ 717 cond_resched(); 718 719 update_vttbr(vcpu->kvm); 720 721 if (vcpu->arch.pause) 722 vcpu_pause(vcpu); 723 724 kvm_vgic_flush_hwstate(vcpu); 725 kvm_timer_flush_hwstate(vcpu); 726 727 local_irq_disable(); 728 729 /* 730 * Re-check atomic conditions 731 */ 732 if (signal_pending(current)) { 733 ret = -EINTR; 734 run->exit_reason = KVM_EXIT_INTR; 735 } 736 737 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { 738 local_irq_enable(); 739 kvm_timer_sync_hwstate(vcpu); 740 kvm_vgic_sync_hwstate(vcpu); 741 continue; 742 } 743 744 /************************************************************** 745 * Enter the guest 746 */ 747 trace_kvm_entry(*vcpu_pc(vcpu)); 748 kvm_guest_enter(); 749 vcpu->mode = IN_GUEST_MODE; 750 751 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); 752 753 vcpu->mode = OUTSIDE_GUEST_MODE; 754 vcpu->arch.last_pcpu = smp_processor_id(); 755 kvm_guest_exit(); 756 trace_kvm_exit(*vcpu_pc(vcpu)); 757 /* 758 * We may have taken a host interrupt in HYP mode (ie 759 * while executing the guest). This interrupt is still 760 * pending, as we haven't serviced it yet! 761 * 762 * We're now back in SVC mode, with interrupts 763 * disabled. Enabling the interrupts now will have 764 * the effect of taking the interrupt again, in SVC 765 * mode this time. 766 */ 767 local_irq_enable(); 768 769 /* 770 * Back from guest 771 *************************************************************/ 772 773 kvm_timer_sync_hwstate(vcpu); 774 kvm_vgic_sync_hwstate(vcpu); 775 776 ret = handle_exit(vcpu, run, ret); 777 } 778 779 if (vcpu->sigset_active) 780 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 781 return ret; 782} 783 784static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) 785{ 786 int bit_index; 787 bool set; 788 unsigned long *ptr; 789 790 if (number == KVM_ARM_IRQ_CPU_IRQ) 791 bit_index = __ffs(HCR_VI); 792 else /* KVM_ARM_IRQ_CPU_FIQ */ 793 bit_index = __ffs(HCR_VF); 794 795 ptr = (unsigned long *)&vcpu->arch.irq_lines; 796 if (level) 797 set = test_and_set_bit(bit_index, ptr); 798 else 799 set = test_and_clear_bit(bit_index, ptr); 800 801 /* 802 * If we didn't change anything, no need to wake up or kick other CPUs 803 */ 804 if (set == level) 805 return 0; 806 807 /* 808 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and 809 * trigger a world-switch round on the running physical CPU to set the 810 * virtual IRQ/FIQ fields in the HCR appropriately. 811 */ 812 kvm_vcpu_kick(vcpu); 813 814 return 0; 815} 816 817int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) 818{ 819 u32 irq = irq_level->irq; 820 unsigned int irq_type, vcpu_idx, irq_num; 821 int nrcpus = atomic_read(&kvm->online_vcpus); 822 struct kvm_vcpu *vcpu = NULL; 823 bool level = irq_level->level; 824 825 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; 826 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; 827 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; 828 829 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 830 831 switch (irq_type) { 832 case KVM_ARM_IRQ_TYPE_CPU: 833 if (irqchip_in_kernel(kvm)) 834 return -ENXIO; 835 836 if (vcpu_idx >= nrcpus) 837 return -EINVAL; 838 839 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 840 if (!vcpu) 841 return -EINVAL; 842 843 if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 844 return -EINVAL; 845 846 return vcpu_interrupt_line(vcpu, irq_num, level); 847 case KVM_ARM_IRQ_TYPE_PPI: 848 if (!irqchip_in_kernel(kvm)) 849 return -ENXIO; 850 851 if (vcpu_idx >= nrcpus) 852 return -EINVAL; 853 854 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 855 if (!vcpu) 856 return -EINVAL; 857 858 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) 859 return -EINVAL; 860 861 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); 862 case KVM_ARM_IRQ_TYPE_SPI: 863 if (!irqchip_in_kernel(kvm)) 864 return -ENXIO; 865 866 if (irq_num < VGIC_NR_PRIVATE_IRQS || 867 irq_num > KVM_ARM_IRQ_GIC_MAX) 868 return -EINVAL; 869 870 return kvm_vgic_inject_irq(kvm, 0, irq_num, level); 871 } 872 873 return -EINVAL; 874} 875 876long kvm_arch_vcpu_ioctl(struct file *filp, 877 unsigned int ioctl, unsigned long arg) 878{ 879 struct kvm_vcpu *vcpu = filp->private_data; 880 void __user *argp = (void __user *)arg; 881 882 switch (ioctl) { 883 case KVM_ARM_VCPU_INIT: { 884 struct kvm_vcpu_init init; 885 886 if (copy_from_user(&init, argp, sizeof(init))) 887 return -EFAULT; 888 889 return kvm_vcpu_set_target(vcpu, &init); 890 891 } 892 case KVM_SET_ONE_REG: 893 case KVM_GET_ONE_REG: { 894 struct kvm_one_reg reg; 895 if (copy_from_user(&reg, argp, sizeof(reg))) 896 return -EFAULT; 897 if (ioctl == KVM_SET_ONE_REG) 898 return kvm_arm_set_reg(vcpu, &reg); 899 else 900 return kvm_arm_get_reg(vcpu, &reg); 901 } 902 case KVM_GET_REG_LIST: { 903 struct kvm_reg_list __user *user_list = argp; 904 struct kvm_reg_list reg_list; 905 unsigned n; 906 907 if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 908 return -EFAULT; 909 n = reg_list.n; 910 reg_list.n = kvm_arm_num_regs(vcpu); 911 if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) 912 return -EFAULT; 913 if (n < reg_list.n) 914 return -E2BIG; 915 return kvm_arm_copy_reg_indices(vcpu, user_list->reg); 916 } 917 default: 918 return -EINVAL; 919 } 920} 921 922int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 923{ 924 return -EINVAL; 925} 926 927static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, 928 struct kvm_arm_device_addr *dev_addr) 929{ 930 unsigned long dev_id, type; 931 932 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> 933 KVM_ARM_DEVICE_ID_SHIFT; 934 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> 935 KVM_ARM_DEVICE_TYPE_SHIFT; 936 937 switch (dev_id) { 938 case KVM_ARM_DEVICE_VGIC_V2: 939 if (!vgic_present) 940 return -ENXIO; 941 return kvm_vgic_set_addr(kvm, type, dev_addr->addr); 942 default: 943 return -ENODEV; 944 } 945} 946 947long kvm_arch_vm_ioctl(struct file *filp, 948 unsigned int ioctl, unsigned long arg) 949{ 950 struct kvm *kvm = filp->private_data; 951 void __user *argp = (void __user *)arg; 952 953 switch (ioctl) { 954 case KVM_CREATE_IRQCHIP: { 955 if (vgic_present) 956 return kvm_vgic_create(kvm); 957 else 958 return -ENXIO; 959 } 960 case KVM_ARM_SET_DEVICE_ADDR: { 961 struct kvm_arm_device_addr dev_addr; 962 963 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) 964 return -EFAULT; 965 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); 966 } 967 default: 968 return -EINVAL; 969 } 970} 971 972static void cpu_init_hyp_mode(void *vector) 973{ 974 unsigned long long pgd_ptr; 975 unsigned long pgd_low, pgd_high; 976 unsigned long hyp_stack_ptr; 977 unsigned long stack_page; 978 unsigned long vector_ptr; 979 980 /* Switch from the HYP stub to our own HYP init vector */ 981 __hyp_set_vectors((unsigned long)vector); 982 983 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 984 pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); 985 pgd_high = (pgd_ptr >> 32ULL); 986 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 987 hyp_stack_ptr = stack_page + PAGE_SIZE; 988 vector_ptr = (unsigned long)__kvm_hyp_vector; 989 990 /* 991 * Call initialization code, and switch to the full blown 992 * HYP code. The init code doesn't need to preserve these registers as 993 * r1-r3 and r12 are already callee save according to the AAPCS. 994 * Note that we slightly misuse the prototype by casing the pgd_low to 995 * a void *. 996 */ 997 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); 998} 999 1000/** 1001 * Inits Hyp-mode on all online CPUs 1002 */ 1003static int init_hyp_mode(void) 1004{ 1005 phys_addr_t init_phys_addr; 1006 int cpu; 1007 int err = 0; 1008 1009 /* 1010 * Allocate Hyp PGD and setup Hyp identity mapping 1011 */ 1012 err = kvm_mmu_init(); 1013 if (err) 1014 goto out_err; 1015 1016 /* 1017 * It is probably enough to obtain the default on one 1018 * CPU. It's unlikely to be different on the others. 1019 */ 1020 hyp_default_vectors = __hyp_get_vectors(); 1021 1022 /* 1023 * Allocate stack pages for Hypervisor-mode 1024 */ 1025 for_each_possible_cpu(cpu) { 1026 unsigned long stack_page; 1027 1028 stack_page = __get_free_page(GFP_KERNEL); 1029 if (!stack_page) { 1030 err = -ENOMEM; 1031 goto out_free_stack_pages; 1032 } 1033 1034 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; 1035 } 1036 1037 /* 1038 * Execute the init code on each CPU. 1039 * 1040 * Note: The stack is not mapped yet, so don't do anything else than 1041 * initializing the hypervisor mode on each CPU using a local stack 1042 * space for temporary storage. 1043 */ 1044 init_phys_addr = virt_to_phys(__kvm_hyp_init); 1045 for_each_online_cpu(cpu) { 1046 smp_call_function_single(cpu, cpu_init_hyp_mode, 1047 (void *)(long)init_phys_addr, 1); 1048 } 1049 1050 /* 1051 * Unmap the identity mapping 1052 */ 1053 kvm_clear_hyp_idmap(); 1054 1055 /* 1056 * Map the Hyp-code called directly from the host 1057 */ 1058 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); 1059 if (err) { 1060 kvm_err("Cannot map world-switch code\n"); 1061 goto out_free_mappings; 1062 } 1063 1064 /* 1065 * Map the Hyp stack pages 1066 */ 1067 for_each_possible_cpu(cpu) { 1068 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); 1069 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); 1070 1071 if (err) { 1072 kvm_err("Cannot map hyp stack\n"); 1073 goto out_free_mappings; 1074 } 1075 } 1076 1077 /* 1078 * Map the host VFP structures 1079 */ 1080 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); 1081 if (!kvm_host_vfp_state) { 1082 err = -ENOMEM; 1083 kvm_err("Cannot allocate host VFP state\n"); 1084 goto out_free_mappings; 1085 } 1086 1087 for_each_possible_cpu(cpu) { 1088 struct vfp_hard_struct *vfp; 1089 1090 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); 1091 err = create_hyp_mappings(vfp, vfp + 1); 1092 1093 if (err) { 1094 kvm_err("Cannot map host VFP state: %d\n", err); 1095 goto out_free_vfp; 1096 } 1097 } 1098 1099 /* 1100 * Init HYP view of VGIC 1101 */ 1102 err = kvm_vgic_hyp_init(); 1103 if (err) 1104 goto out_free_vfp; 1105 1106#ifdef CONFIG_KVM_ARM_VGIC 1107 vgic_present = true; 1108#endif 1109 1110 /* 1111 * Init HYP architected timer support 1112 */ 1113 err = kvm_timer_hyp_init(); 1114 if (err) 1115 goto out_free_mappings; 1116 1117 kvm_info("Hyp mode initialized successfully\n"); 1118 return 0; 1119out_free_vfp: 1120 free_percpu(kvm_host_vfp_state); 1121out_free_mappings: 1122 free_hyp_pmds(); 1123out_free_stack_pages: 1124 for_each_possible_cpu(cpu) 1125 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 1126out_err: 1127 kvm_err("error initializing Hyp mode: %d\n", err); 1128 return err; 1129} 1130 1131/** 1132 * Initialize Hyp-mode and memory mappings on all CPUs. 1133 */ 1134int kvm_arch_init(void *opaque) 1135{ 1136 int err; 1137 1138 if (!is_hyp_mode_available()) { 1139 kvm_err("HYP mode not available\n"); 1140 return -ENODEV; 1141 } 1142 1143 if (kvm_target_cpu() < 0) { 1144 kvm_err("Target CPU not supported!\n"); 1145 return -ENODEV; 1146 } 1147 1148 err = init_hyp_mode(); 1149 if (err) 1150 goto out_err; 1151 1152 kvm_coproc_table_init(); 1153 return 0; 1154out_err: 1155 return err; 1156} 1157 1158/* NOP: Compiling as a module not supported */ 1159void kvm_arch_exit(void) 1160{ 1161} 1162 1163static int arm_init(void) 1164{ 1165 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1166 return rc; 1167} 1168 1169module_init(arm_init);