Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-arm-for-3.14' of git://git.linaro.org/people/christoffer.dall/linux-kvm-arm into kvm-queue

+41 -30
+1 -1
Documentation/virtual/kvm/api.txt
··· 2327 2327 Capability: basic 2328 2328 Architectures: arm, arm64 2329 2329 Type: vcpu ioctl 2330 - Parameters: struct struct kvm_vcpu_init (in) 2330 + Parameters: struct kvm_vcpu_init (in) 2331 2331 Returns: 0 on success; -1 on error 2332 2332 Errors: 2333 2333  EINVAL:    the target is unknown, or the combination of features is invalid.
+1
arch/arm/include/asm/kvm_mmu.h
··· 140 140 } 141 141 142 142 #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) 143 + #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) 143 144 144 145 #endif /* !__ASSEMBLY__ */ 145 146
+19 -11
arch/arm/kvm/arm.c
··· 489 489 return ret; 490 490 } 491 491 492 - /* 493 - * Handle the "start in power-off" case by calling into the 494 - * PSCI code. 495 - */ 496 - if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { 497 - *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; 498 - kvm_psci_call(vcpu); 499 - } 500 - 501 492 return 0; 502 493 } 503 494 ··· 702 711 return -EINVAL; 703 712 } 704 713 714 + static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, 715 + struct kvm_vcpu_init *init) 716 + { 717 + int ret; 718 + 719 + ret = kvm_vcpu_set_target(vcpu, init); 720 + if (ret) 721 + return ret; 722 + 723 + /* 724 + * Handle the "start in power-off" case by marking the VCPU as paused. 725 + */ 726 + if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) 727 + vcpu->arch.pause = true; 728 + 729 + return 0; 730 + } 731 + 705 732 long kvm_arch_vcpu_ioctl(struct file *filp, 706 733 unsigned int ioctl, unsigned long arg) 707 734 { ··· 733 724 if (copy_from_user(&init, argp, sizeof(init))) 734 725 return -EFAULT; 735 726 736 - return kvm_vcpu_set_target(vcpu, &init); 737 - 727 + return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); 738 728 } 739 729 case KVM_SET_ONE_REG: 740 730 case KVM_GET_ONE_REG: {
-2
arch/arm/kvm/handle_exit.c
··· 26 26 27 27 #include "trace.h" 28 28 29 - #include "trace.h" 30 - 31 29 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); 32 30 33 31 static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
+13 -11
arch/arm/kvm/mmu.c
··· 667 667 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; 668 668 } else { 669 669 /* 670 - * Pages belonging to VMAs not aligned to the PMD mapping 671 - * granularity cannot be mapped using block descriptors even 672 - * if the pages belong to a THP for the process, because the 673 - * stage-2 block descriptor will cover more than a single THP 674 - * and we loose atomicity for unmapping, updates, and splits 675 - * of the THP or other pages in the stage-2 block range. 670 + * Pages belonging to memslots that don't have the same 671 + * alignment for userspace and IPA cannot be mapped using 672 + * block descriptors even if the pages belong to a THP for 673 + * the process, because the stage-2 block descriptor will 674 + * cover more than a single THP and we loose atomicity for 675 + * unmapping, updates, and splits of the THP or other pages 676 + * in the stage-2 block range. 676 677 */ 677 - if (vma->vm_start & ~PMD_MASK) 678 + if ((memslot->userspace_addr & ~PMD_MASK) != 679 + ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK)) 678 680 force_pte = true; 679 681 } 680 682 up_read(&current->mm->mmap_sem); ··· 918 916 { 919 917 int err; 920 918 921 - hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start); 922 - hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end); 923 - hyp_idmap_vector = virt_to_phys(__kvm_hyp_init); 919 + hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start); 920 + hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); 921 + hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); 924 922 925 923 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { 926 924 /* ··· 947 945 */ 948 946 kvm_flush_dcache_to_poc(init_bounce_page, len); 949 947 950 - phys_base = virt_to_phys(init_bounce_page); 948 + phys_base = kvm_virt_to_phys(init_bounce_page); 951 949 hyp_idmap_vector += phys_base - hyp_idmap_start; 952 950 hyp_idmap_start = phys_base; 953 951 hyp_idmap_end = phys_base + len;
+6 -5
arch/arm/kvm/psci.c
··· 54 54 } 55 55 } 56 56 57 - if (!vcpu) 57 + /* 58 + * Make sure the caller requested a valid CPU and that the CPU is 59 + * turned off. 60 + */ 61 + if (!vcpu || !vcpu->arch.pause) 58 62 return KVM_PSCI_RET_INVAL; 59 63 60 64 target_pc = *vcpu_reg(source_vcpu, 2); 61 - 62 - wq = kvm_arch_vcpu_wq(vcpu); 63 - if (!waitqueue_active(wq)) 64 - return KVM_PSCI_RET_INVAL; 65 65 66 66 kvm_reset_vcpu(vcpu); 67 67 ··· 79 79 vcpu->arch.pause = false; 80 80 smp_mb(); /* Make sure the above is visible */ 81 81 82 + wq = kvm_arch_vcpu_wq(vcpu); 82 83 wake_up_interruptible(wq); 83 84 84 85 return KVM_PSCI_RET_SUCCESS;
+1
arch/arm64/include/asm/kvm_mmu.h
··· 136 136 } 137 137 138 138 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 139 + #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) 139 140 140 141 #endif /* __ASSEMBLY__ */ 141 142 #endif /* __ARM64_KVM_MMU_H__ */