Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
"Bug fixes for system management mode emulation.

The first two patches fix SMM emulation on Nehalem processors. The
others fix some cases that became apparent as work progressed on the
firmware side"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: fix RSM into 64-bit protected mode
KVM: x86: fix previous commit for 32-bit
KVM: x86: fix SMI to halted VCPU
KVM: x86: clean up kvm_arch_vcpu_runnable
KVM: x86: map/unmap private slots in __x86_set_memory_region
KVM: x86: build kvm_userspace_memory_region in x86_set_memory_region

+90 -87
+2 -4
arch/x86/include/asm/kvm_host.h
··· 1226 1226 1227 1227 int kvm_is_in_guest(void); 1228 1228 1229 - int __x86_set_memory_region(struct kvm *kvm, 1230 - const struct kvm_userspace_memory_region *mem); 1231 - int x86_set_memory_region(struct kvm *kvm, 1232 - const struct kvm_userspace_memory_region *mem); 1229 + int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1230 + int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); 1233 1231 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); 1234 1232 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); 1235 1233
+7 -3
arch/x86/kvm/emulate.c
··· 2418 2418 u64 val, cr0, cr4; 2419 2419 u32 base3; 2420 2420 u16 selector; 2421 - int i; 2421 + int i, r; 2422 2422 2423 2423 for (i = 0; i < 16; i++) 2424 2424 *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); ··· 2460 2460 dt.address = GET_SMSTATE(u64, smbase, 0x7e68); 2461 2461 ctxt->ops->set_gdt(ctxt, &dt); 2462 2462 2463 + r = rsm_enter_protected_mode(ctxt, cr0, cr4); 2464 + if (r != X86EMUL_CONTINUE) 2465 + return r; 2466 + 2463 2467 for (i = 0; i < 6; i++) { 2464 - int r = rsm_load_seg_64(ctxt, smbase, i); 2468 + r = rsm_load_seg_64(ctxt, smbase, i); 2465 2469 if (r != X86EMUL_CONTINUE) 2466 2470 return r; 2467 2471 } 2468 2472 2469 - return rsm_enter_protected_mode(ctxt, cr0, cr4); 2473 + return X86EMUL_CONTINUE; 2470 2474 } 2471 2475 2472 2476 static int em_rsm(struct x86_emulate_ctxt *ctxt)
+6 -20
arch/x86/kvm/vmx.c
··· 4105 4105 static int alloc_apic_access_page(struct kvm *kvm) 4106 4106 { 4107 4107 struct page *page; 4108 - struct kvm_userspace_memory_region kvm_userspace_mem; 4109 4108 int r = 0; 4110 4109 4111 4110 mutex_lock(&kvm->slots_lock); 4112 4111 if (kvm->arch.apic_access_page_done) 4113 4112 goto out; 4114 - kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 4115 - kvm_userspace_mem.flags = 0; 4116 - kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; 4117 - kvm_userspace_mem.memory_size = PAGE_SIZE; 4118 - r = __x86_set_memory_region(kvm, &kvm_userspace_mem); 4113 + r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 4114 + APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); 4119 4115 if (r) 4120 4116 goto out; 4121 4117 ··· 4136 4140 { 4137 4141 /* Called with kvm->slots_lock held. */ 4138 4142 4139 - struct kvm_userspace_memory_region kvm_userspace_mem; 4140 4143 int r = 0; 4141 4144 4142 4145 BUG_ON(kvm->arch.ept_identity_pagetable_done); 4143 4146 4144 - kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 4145 - kvm_userspace_mem.flags = 0; 4146 - kvm_userspace_mem.guest_phys_addr = 4147 - kvm->arch.ept_identity_map_addr; 4148 - kvm_userspace_mem.memory_size = PAGE_SIZE; 4149 - r = __x86_set_memory_region(kvm, &kvm_userspace_mem); 4147 + r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 4148 + kvm->arch.ept_identity_map_addr, PAGE_SIZE); 4150 4149 4151 4150 return r; 4152 4151 } ··· 4940 4949 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) 4941 4950 { 4942 4951 int ret; 4943 - struct kvm_userspace_memory_region tss_mem = { 4944 - .slot = TSS_PRIVATE_MEMSLOT, 4945 - .guest_phys_addr = addr, 4946 - .memory_size = PAGE_SIZE * 3, 4947 - .flags = 0, 4948 - }; 4949 4952 4950 - ret = x86_set_memory_region(kvm, &tss_mem); 4953 + ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, 4954 + PAGE_SIZE * 3); 4951 4955 if (ret) 4952 4956 return ret; 4953 4957 kvm->arch.tss_addr = addr;
+75 -60
arch/x86/kvm/x86.c
··· 6453 6453 return 1; 6454 6454 } 6455 6455 6456 + static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) 6457 + { 6458 + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 6459 + !vcpu->arch.apf.halted); 6460 + } 6461 + 6456 6462 static int vcpu_run(struct kvm_vcpu *vcpu) 6457 6463 { 6458 6464 int r; ··· 6467 6461 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6468 6462 6469 6463 for (;;) { 6470 - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 6471 - !vcpu->arch.apf.halted) 6464 + if (kvm_vcpu_running(vcpu)) 6472 6465 r = vcpu_enter_guest(vcpu); 6473 6466 else 6474 6467 r = vcpu_block(kvm, vcpu); ··· 7479 7474 kvm_free_pit(kvm); 7480 7475 } 7481 7476 7482 - int __x86_set_memory_region(struct kvm *kvm, 7483 - const struct kvm_userspace_memory_region *mem) 7477 + int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) 7484 7478 { 7485 7479 int i, r; 7480 + unsigned long hva; 7481 + struct kvm_memslots *slots = kvm_memslots(kvm); 7482 + struct kvm_memory_slot *slot, old; 7486 7483 7487 7484 /* Called with kvm->slots_lock held. */ 7488 - BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); 7485 + if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) 7486 + return -EINVAL; 7489 7487 7488 + slot = id_to_memslot(slots, id); 7489 + if (size) { 7490 + if (WARN_ON(slot->npages)) 7491 + return -EEXIST; 7492 + 7493 + /* 7494 + * MAP_SHARED to prevent internal slot pages from being moved 7495 + * by fork()/COW. 7496 + */ 7497 + hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, 7498 + MAP_SHARED | MAP_ANONYMOUS, 0); 7499 + if (IS_ERR((void *)hva)) 7500 + return PTR_ERR((void *)hva); 7501 + } else { 7502 + if (!slot->npages) 7503 + return 0; 7504 + 7505 + hva = 0; 7506 + } 7507 + 7508 + old = *slot; 7490 7509 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 7491 - struct kvm_userspace_memory_region m = *mem; 7510 + struct kvm_userspace_memory_region m; 7492 7511 7493 - m.slot |= i << 16; 7512 + m.slot = id | (i << 16); 7513 + m.flags = 0; 7514 + m.guest_phys_addr = gpa; 7515 + m.userspace_addr = hva; 7516 + m.memory_size = size; 7494 7517 r = __kvm_set_memory_region(kvm, &m); 7495 7518 if (r < 0) 7496 7519 return r; 7520 + } 7521 + 7522 + if (!size) { 7523 + r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); 7524 + WARN_ON(r < 0); 7497 7525 } 7498 7526 7499 7527 return 0; 7500 7528 } 7501 7529 EXPORT_SYMBOL_GPL(__x86_set_memory_region); 7502 7530 7503 - int x86_set_memory_region(struct kvm *kvm, 7504 - const struct kvm_userspace_memory_region *mem) 7531 + int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) 7505 7532 { 7506 7533 int r; 7507 7534 7508 7535 mutex_lock(&kvm->slots_lock); 7509 - r = __x86_set_memory_region(kvm, mem); 7536 + r = __x86_set_memory_region(kvm, id, gpa, size); 7510 7537 mutex_unlock(&kvm->slots_lock); 7511 7538 7512 7539 return r; ··· 7553 7516 * unless the the memory map has changed due to process exit 7554 7517 * or fd copying. 7555 7518 */ 7556 - struct kvm_userspace_memory_region mem; 7557 - memset(&mem, 0, sizeof(mem)); 7558 - mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; 7559 - x86_set_memory_region(kvm, &mem); 7560 - 7561 - mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 7562 - x86_set_memory_region(kvm, &mem); 7563 - 7564 - mem.slot = TSS_PRIVATE_MEMSLOT; 7565 - x86_set_memory_region(kvm, &mem); 7519 + x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0); 7520 + x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); 7521 + x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 7566 7522 } 7567 7523 kvm_iommu_unmap_guest(kvm); 7568 7524 kfree(kvm->arch.vpic); ··· 7658 7628 const struct kvm_userspace_memory_region *mem, 7659 7629 enum kvm_mr_change change) 7660 7630 { 7661 - /* 7662 - * Only private memory slots need to be mapped here since 7663 - * KVM_SET_MEMORY_REGION ioctl is no longer supported. 7664 - */ 7665 - if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { 7666 - unsigned long userspace_addr; 7667 - 7668 - /* 7669 - * MAP_SHARED to prevent internal slot pages from being moved 7670 - * by fork()/COW. 7671 - */ 7672 - userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, 7673 - PROT_READ | PROT_WRITE, 7674 - MAP_SHARED | MAP_ANONYMOUS, 0); 7675 - 7676 - if (IS_ERR((void *)userspace_addr)) 7677 - return PTR_ERR((void *)userspace_addr); 7678 - 7679 - memslot->userspace_addr = userspace_addr; 7680 - } 7681 - 7682 7631 return 0; 7683 7632 } 7684 7633 ··· 7719 7710 { 7720 7711 int nr_mmu_pages = 0; 7721 7712 7722 - if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) { 7723 - int ret; 7724 - 7725 - ret = vm_munmap(old->userspace_addr, 7726 - old->npages * PAGE_SIZE); 7727 - if (ret < 0) 7728 - printk(KERN_WARNING 7729 - "kvm_vm_ioctl_set_memory_region: " 7730 - "failed to munmap memory\n"); 7731 - } 7732 - 7733 7713 if (!kvm->arch.n_requested_mmu_pages) 7734 7714 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 7735 7715 ··· 7767 7769 kvm_mmu_invalidate_zap_all_pages(kvm); 7768 7770 } 7769 7771 7772 + static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 7773 + { 7774 + if (!list_empty_careful(&vcpu->async_pf.done)) 7775 + return true; 7776 + 7777 + if (kvm_apic_has_events(vcpu)) 7778 + return true; 7779 + 7780 + if (vcpu->arch.pv.pv_unhalted) 7781 + return true; 7782 + 7783 + if (atomic_read(&vcpu->arch.nmi_queued)) 7784 + return true; 7785 + 7786 + if (test_bit(KVM_REQ_SMI, &vcpu->requests)) 7787 + return true; 7788 + 7789 + if (kvm_arch_interrupt_allowed(vcpu) && 7790 + kvm_cpu_has_interrupt(vcpu)) 7791 + return true; 7792 + 7793 + return false; 7794 + } 7795 + 7770 7796 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 7771 7797 { 7772 7798 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) 7773 7799 kvm_x86_ops->check_nested_events(vcpu, false); 7774 7800 7775 - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && 7776 - !vcpu->arch.apf.halted) 7777 - || !list_empty_careful(&vcpu->async_pf.done) 7778 - || kvm_apic_has_events(vcpu) 7779 - || vcpu->arch.pv.pv_unhalted 7780 - || atomic_read(&vcpu->arch.nmi_queued) || 7781 - (kvm_arch_interrupt_allowed(vcpu) && 7782 - kvm_cpu_has_interrupt(vcpu)); 7801 + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); 7783 7802 } 7784 7803 7785 7804 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)