Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (134 commits)
KVM: ia64: Add intel iommu support for guests.
KVM: ia64: add directed mmio range support for kvm guests
KVM: ia64: Make pmt table be able to hold physical mmio entries.
KVM: Move irqchip_in_kernel() from ioapic.h to irq.h
KVM: Separate irq ack notification out of arch/x86/kvm/irq.c
KVM: Change is_mmio_pfn to kvm_is_mmio_pfn, and make it common for all archs
KVM: Move device assignment logic to common code
KVM: Device Assignment: Move vtd.c from arch/x86/kvm/ to virt/kvm/
KVM: VMX: enable invlpg exiting if EPT is disabled
KVM: x86: Silence various LAPIC-related host kernel messages
KVM: Device Assignment: Map mmio pages into VT-d page table
KVM: PIC: enhance IPI avoidance
KVM: MMU: add "oos_shadow" parameter to disable oos
KVM: MMU: speed up mmu_unsync_walk
KVM: MMU: out of sync shadow core
KVM: MMU: mmu_convert_notrap helper
KVM: MMU: awareness of new kvm_mmu_zap_page behaviour
KVM: MMU: mmu_parent_walk
KVM: x86: trap invlpg
KVM: MMU: sync roots on mmu reload
...

+3450 -1250
+8 -1
MAINTAINERS
··· 2448 2448 2449 2449 KERNEL VIRTUAL MACHINE (KVM) 2450 2450 P: Avi Kivity 2451 - M: avi@qumranet.com 2451 + M: avi@redhat.com 2452 + L: kvm@vger.kernel.org 2453 + W: http://kvm.qumranet.com 2454 + S: Supported 2455 + 2456 + KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V 2457 + P: Joerg Roedel 2458 + M: joerg.roedel@amd.com 2452 2459 L: kvm@vger.kernel.org 2453 2460 W: http://kvm.qumranet.com 2454 2461 S: Supported
+5 -1
arch/ia64/include/asm/kvm_host.h
··· 132 132 #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ 133 133 #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ 134 134 #define GPFN_GFW (6UL << 60) /* Guest Firmware */ 135 - #define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ 135 + #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */ 136 136 137 137 #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ 138 138 #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ ··· 413 413 struct kvm_ioapic *vioapic; 414 414 struct kvm_vm_stat stat; 415 415 struct kvm_sal_data rdv_sal_data; 416 + 417 + struct list_head assigned_dev_head; 418 + struct dmar_domain *intel_iommu_domain; 419 + struct hlist_head irq_ack_notifier_list; 416 420 }; 417 421 418 422 union cpuid3_t {
+2
arch/ia64/kvm/Kconfig
··· 46 46 config KVM_TRACE 47 47 bool 48 48 49 + source drivers/virtio/Kconfig 50 + 49 51 endif # VIRTUALIZATION
+5 -1
arch/ia64/kvm/Makefile
··· 44 44 EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 45 45 46 46 common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 47 - coalesced_mmio.o) 47 + coalesced_mmio.o irq_comm.o) 48 + 49 + ifeq ($(CONFIG_DMAR),y) 50 + common-objs += $(addprefix ../../../virt/kvm/, vtd.o) 51 + endif 48 52 49 53 kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 50 54 obj-$(CONFIG_KVM) += kvm.o
+31
arch/ia64/kvm/irq.h
··· 1 + /* 2 + * irq.h: In-kernel interrupt controller related definitions 3 + * Copyright (c) 2008, Intel Corporation. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 + * Place - Suite 330, Boston, MA 02111-1307 USA. 17 + * 18 + * Authors: 19 + * Xiantao Zhang <xiantao.zhang@intel.com> 20 + * 21 + */ 22 + 23 + #ifndef __IRQ_H 24 + #define __IRQ_H 25 + 26 + static inline int irqchip_in_kernel(struct kvm *kvm) 27 + { 28 + return 1; 29 + } 30 + 31 + #endif
+58 -8
arch/ia64/kvm/kvm-ia64.c
··· 31 31 #include <linux/bitops.h> 32 32 #include <linux/hrtimer.h> 33 33 #include <linux/uaccess.h> 34 + #include <linux/intel-iommu.h> 34 35 35 36 #include <asm/pgtable.h> 36 37 #include <asm/gcc_intrin.h> ··· 46 45 #include "iodev.h" 47 46 #include "ioapic.h" 48 47 #include "lapic.h" 48 + #include "irq.h" 49 49 50 50 static unsigned long kvm_vmm_base; 51 51 static unsigned long kvm_vsa_base; ··· 181 179 switch (ext) { 182 180 case KVM_CAP_IRQCHIP: 183 181 case KVM_CAP_USER_MEMORY: 182 + case KVM_CAP_MP_STATE: 184 183 185 184 r = 1; 186 185 break; 187 186 case KVM_CAP_COALESCED_MMIO: 188 187 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 188 + break; 189 + case KVM_CAP_IOMMU: 190 + r = intel_iommu_found(); 189 191 break; 190 192 default: 191 193 r = 0; ··· 777 771 */ 778 772 kvm_build_io_pmt(kvm); 779 773 774 + INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 780 775 } 781 776 782 777 struct kvm *kvm_arch_create_vm(void) ··· 1341 1334 1342 1335 void kvm_arch_destroy_vm(struct kvm *kvm) 1343 1336 { 1337 + kvm_iommu_unmap_guest(kvm); 1338 + #ifdef KVM_CAP_DEVICE_ASSIGNMENT 1339 + kvm_free_all_assigned_devices(kvm); 1340 + #endif 1344 1341 kfree(kvm->arch.vioapic); 1345 1342 kvm_release_vm_pages(kvm); 1346 1343 kvm_free_physmem(kvm); ··· 1446 1435 int user_alloc) 1447 1436 { 1448 1437 unsigned long i; 1449 - struct page *page; 1438 + unsigned long pfn; 1450 1439 int npages = mem->memory_size >> PAGE_SHIFT; 1451 1440 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; 1452 1441 unsigned long base_gfn = memslot->base_gfn; 1453 1442 1454 1443 for (i = 0; i < npages; i++) { 1455 - page = gfn_to_page(kvm, base_gfn + i); 1456 - kvm_set_pmt_entry(kvm, base_gfn + i, 1457 - page_to_pfn(page) << PAGE_SHIFT, 1458 - _PAGE_AR_RWX|_PAGE_MA_WB); 1459 - memslot->rmap[i] = (unsigned long)page; 1444 + pfn = gfn_to_pfn(kvm, base_gfn + i); 1445 + if (!kvm_is_mmio_pfn(pfn)) { 1446 + kvm_set_pmt_entry(kvm, base_gfn + i, 1447 + pfn << PAGE_SHIFT, 1448 + _PAGE_AR_RWX | _PAGE_MA_WB); 1449 + memslot->rmap[i] = (unsigned long)pfn_to_page(pfn); 1450 + } else { 1451 + kvm_set_pmt_entry(kvm, base_gfn + i, 1452 + GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT), 1453 + _PAGE_MA_UC); 1454 + memslot->rmap[i] = 0; 1455 + } 1460 1456 } 1461 1457 1462 1458 return 0; ··· 1807 1789 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1808 1790 struct kvm_mp_state *mp_state) 1809 1791 { 1810 - return -EINVAL; 1792 + vcpu_load(vcpu); 1793 + mp_state->mp_state = vcpu->arch.mp_state; 1794 + vcpu_put(vcpu); 1795 + return 0; 1796 + } 1797 + 1798 + static int vcpu_reset(struct kvm_vcpu *vcpu) 1799 + { 1800 + int r; 1801 + long psr; 1802 + local_irq_save(psr); 1803 + r = kvm_insert_vmm_mapping(vcpu); 1804 + if (r) 1805 + goto fail; 1806 + 1807 + vcpu->arch.launched = 0; 1808 + kvm_arch_vcpu_uninit(vcpu); 1809 + r = kvm_arch_vcpu_init(vcpu); 1810 + if (r) 1811 + goto fail; 1812 + 1813 + kvm_purge_vmm_mapping(vcpu); 1814 + r = 0; 1815 + fail: 1816 + local_irq_restore(psr); 1817 + return r; 1811 1818 } 1812 1819 1813 1820 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1814 1821 struct kvm_mp_state *mp_state) 1815 1822 { 1816 - return -EINVAL; 1823 + int r = 0; 1824 + 1825 + vcpu_load(vcpu); 1826 + vcpu->arch.mp_state = mp_state->mp_state; 1827 + if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) 1828 + r = vcpu_reset(vcpu); 1829 + vcpu_put(vcpu); 1830 + return r; 1817 1831 }
+8 -17
arch/ia64/kvm/kvm_minstate.h
··· 50 50 51 51 #define PAL_VSA_SYNC_READ \ 52 52 /* begin to call pal vps sync_read */ \ 53 + {.mii; \ 53 54 add r25 = VMM_VPD_BASE_OFFSET, r21; \ 54 - adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \ 55 - ;; \ 56 - ld8 r25 = [r25]; /* read vpd base */ \ 57 - ld8 r20 = [r20]; \ 58 - ;; \ 59 - add r20 = PAL_VPS_SYNC_READ,r20; \ 60 - ;; \ 61 - { .mii; \ 62 55 nop 0x0; \ 63 - mov r24 = ip; \ 64 - mov b0 = r20; \ 56 + mov r24=ip; \ 57 + ;; \ 58 + } \ 59 + {.mmb \ 60 + add r24=0x20, r24; \ 61 + ld8 r25 = [r25]; /* read vpd base */ \ 62 + br.cond.sptk kvm_vps_sync_read; /*call the service*/ \ 65 63 ;; \ 66 64 }; \ 67 - { .mmb; \ 68 - add r24 = 0x20, r24; \ 69 - nop 0x0; \ 70 - br.cond.sptk b0; /* call the service */ \ 71 - ;; \ 72 - }; 73 - 74 65 75 66 76 67 #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
+160 -21
arch/ia64/kvm/optvfault.S
··· 1 1 /* 2 - * arch/ia64/vmx/optvfault.S 2 + * arch/ia64/kvm/optvfault.S 3 3 * optimize virtualization fault handler 4 4 * 5 5 * Copyright (C) 2006 Intel Co 6 6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> 7 + * Copyright (C) 2008 Intel Co 8 + * Add the support for Tukwila processors. 9 + * Xiantao Zhang <xiantao.zhang@intel.com> 7 10 */ 8 11 9 12 #include <asm/asmmacro.h> ··· 22 19 #define ACCE_SSM 23 20 #define ACCE_MOV_TO_PSR 24 21 #define ACCE_THASH 22 + 23 + #define VMX_VPS_SYNC_READ \ 24 + add r16=VMM_VPD_BASE_OFFSET,r21; \ 25 + mov r17 = b0; \ 26 + mov r18 = r24; \ 27 + mov r19 = r25; \ 28 + mov r20 = r31; \ 29 + ;; \ 30 + {.mii; \ 31 + ld8 r16 = [r16]; \ 32 + nop 0x0; \ 33 + mov r24 = ip; \ 34 + ;; \ 35 + }; \ 36 + {.mmb; \ 37 + add r24=0x20, r24; \ 38 + mov r25 =r16; \ 39 + br.sptk.many kvm_vps_sync_read; \ 40 + }; \ 41 + mov b0 = r17; \ 42 + mov r24 = r18; \ 43 + mov r25 = r19; \ 44 + mov r31 = r20 45 + 46 + ENTRY(kvm_vps_entry) 47 + adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 48 + ;; 49 + ld8 r29 = [r29] 50 + ;; 51 + add r29 = r29, r30 52 + ;; 53 + mov b0 = r29 54 + br.sptk.many b0 55 + END(kvm_vps_entry) 56 + 57 + /* 58 + * Inputs: 59 + * r24 : return address 60 + * r25 : vpd 61 + * r29 : scratch 62 + * 63 + */ 64 + GLOBAL_ENTRY(kvm_vps_sync_read) 65 + movl r30 = PAL_VPS_SYNC_READ 66 + ;; 67 + br.sptk.many kvm_vps_entry 68 + END(kvm_vps_sync_read) 69 + 70 + /* 71 + * Inputs: 72 + * r24 : return address 73 + * r25 : vpd 74 + * r29 : scratch 75 + * 76 + */ 77 + GLOBAL_ENTRY(kvm_vps_sync_write) 78 + movl r30 = PAL_VPS_SYNC_WRITE 79 + ;; 80 + br.sptk.many kvm_vps_entry 81 + END(kvm_vps_sync_write) 82 + 83 + /* 84 + * Inputs: 85 + * r23 : pr 86 + * r24 : guest b0 87 + * r25 : vpd 88 + * 89 + */ 90 + GLOBAL_ENTRY(kvm_vps_resume_normal) 91 + movl r30 = PAL_VPS_RESUME_NORMAL 92 + ;; 93 + mov pr=r23,-2 94 + br.sptk.many kvm_vps_entry 95 + END(kvm_vps_resume_normal) 96 + 97 + /* 98 + * Inputs: 99 + * r23 : pr 100 + * r24 : guest b0 101 + * r25 : vpd 102 + * r17 : isr 103 + */ 104 + GLOBAL_ENTRY(kvm_vps_resume_handler) 105 + movl r30 = PAL_VPS_RESUME_HANDLER 106 + ;; 107 + ld8 r27=[r25] 108 + shr r17=r17,IA64_ISR_IR_BIT 109 + ;; 110 + dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE 111 + mov pr=r23,-2 112 + br.sptk.many kvm_vps_entry 113 + END(kvm_vps_resume_handler) 25 114 26 115 //mov r1=ar3 27 116 GLOBAL_ENTRY(kvm_asm_mov_from_ar) ··· 252 157 #ifndef ACCE_RSM 253 158 br.many kvm_virtualization_fault_back 254 159 #endif 255 - add r16=VMM_VPD_BASE_OFFSET,r21 160 + VMX_VPS_SYNC_READ 161 + ;; 256 162 extr.u r26=r25,6,21 257 163 extr.u r27=r25,31,2 258 164 ;; 259 - ld8 r16=[r16] 260 165 extr.u r28=r25,36,1 261 166 dep r26=r27,r26,21,2 262 167 ;; ··· 291 196 tbit.nz p6,p0=r23,0 292 197 ;; 293 198 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT 294 - (p6) br.dptk kvm_resume_to_guest 199 + (p6) br.dptk kvm_resume_to_guest_with_sync 295 200 ;; 296 201 add r26=VMM_VCPU_META_RR0_OFFSET,r21 297 202 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 ··· 307 212 mov rr[r28]=r27 308 213 ;; 309 214 srlz.d 310 - br.many kvm_resume_to_guest 215 + br.many kvm_resume_to_guest_with_sync 311 216 END(kvm_asm_rsm) 312 217 313 218 ··· 316 221 #ifndef ACCE_SSM 317 222 br.many kvm_virtualization_fault_back 318 223 #endif 319 - add r16=VMM_VPD_BASE_OFFSET,r21 224 + VMX_VPS_SYNC_READ 225 + ;; 320 226 extr.u r26=r25,6,21 321 227 extr.u r27=r25,31,2 322 228 ;; 323 - ld8 r16=[r16] 324 229 extr.u r28=r25,36,1 325 230 dep r26=r27,r26,21,2 326 231 ;; //r26 is imm24 ··· 366 271 tbit.nz p6,p0=r29,IA64_PSR_I_BIT 367 272 ;; 368 273 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT 369 - (p6) br.dptk kvm_resume_to_guest 274 + (p6) br.dptk kvm_resume_to_guest_with_sync 370 275 ;; 371 276 add r29=VPD_VTPR_START_OFFSET,r16 372 277 add r30=VPD_VHPI_START_OFFSET,r16 ··· 381 286 ;; 382 287 cmp.gt p6,p0=r30,r17 383 288 (p6) br.dpnt.few kvm_asm_dispatch_vexirq 384 - br.many kvm_resume_to_guest 289 + br.many kvm_resume_to_guest_with_sync 385 290 END(kvm_asm_ssm) 386 291 387 292 ··· 390 295 #ifndef ACCE_MOV_TO_PSR 391 296 br.many kvm_virtualization_fault_back 392 297 #endif 393 - add r16=VMM_VPD_BASE_OFFSET,r21 394 - extr.u r26=r25,13,7 //r2 298 + VMX_VPS_SYNC_READ 395 299 ;; 396 - ld8 r16=[r16] 300 + extr.u r26=r25,13,7 //r2 397 301 addl r20=@gprel(asm_mov_from_reg),gp 398 302 ;; 399 303 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 ··· 468 374 ;; 469 375 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT 470 376 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT 471 - (p6) br.dpnt.few kvm_resume_to_guest 377 + (p6) br.dpnt.few kvm_resume_to_guest_with_sync 472 378 ;; 473 379 add r29=VPD_VTPR_START_OFFSET,r16 474 380 add r30=VPD_VHPI_START_OFFSET,r16 ··· 483 389 ;; 484 390 cmp.gt p6,p0=r30,r17 485 391 (p6) br.dpnt.few kvm_asm_dispatch_vexirq 486 - br.many kvm_resume_to_guest 392 + br.many kvm_resume_to_guest_with_sync 487 393 END(kvm_asm_mov_to_psr) 488 394 489 395 490 396 ENTRY(kvm_asm_dispatch_vexirq) 491 397 //increment iip 398 + mov r17 = b0 399 + mov r18 = r31 400 + {.mii 401 + add r25=VMM_VPD_BASE_OFFSET,r21 402 + nop 0x0 403 + mov r24 = ip 404 + ;; 405 + } 406 + {.mmb 407 + add r24 = 0x20, r24 408 + ld8 r25 = [r25] 409 + br.sptk.many kvm_vps_sync_write 410 + } 411 + mov b0 =r17 492 412 mov r16=cr.ipsr 413 + mov r31 = r18 414 + mov r19 = 37 493 415 ;; 494 416 extr.u r17=r16,IA64_PSR_RI_BIT,2 495 417 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 ··· 545 435 ;; 546 436 kvm_asm_thash_back1: 547 437 shr.u r23=r19,61 // get RR number 548 - adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr 438 + adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr 549 439 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta 550 440 ;; 551 - shladd r27=r23,3,r25 // get vcpu->arch.vrr[r23]'s addr 441 + shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr 552 442 ld8 r17=[r16] // get PTA 553 443 mov r26=1 554 444 ;; 555 - extr.u r29=r17,2,6 // get pta.size 556 - ld8 r25=[r27] // get vcpu->arch.vrr[r23]'s value 445 + extr.u r29=r17,2,6 // get pta.size 446 + ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value 557 447 ;; 558 - extr.u r25=r25,2,6 // get rr.ps 448 + mov b0=r24 449 + //Fallback to C if pta.vf is set 450 + tbit.nz p6,p0=r17, 8 451 + ;; 452 + (p6) mov r24=EVENT_THASH 453 + (p6) br.cond.dpnt.many kvm_virtualization_fault_back 454 + extr.u r28=r28,2,6 // get rr.ps 559 455 shl r22=r26,r29 // 1UL << pta.size 560 456 ;; 561 - shr.u r23=r19,r25 // vaddr >> rr.ps 457 + shr.u r23=r19,r28 // vaddr >> rr.ps 562 458 adds r26=3,r29 // pta.size + 3 563 459 shl r27=r17,3 // pta << 3 564 460 ;; 565 461 shl r23=r23,3 // (vaddr >> rr.ps) << 3 566 - shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) 462 + shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3) 567 463 movl r16=7<<61 568 464 ;; 569 465 adds r22=-1,r22 // (1UL << pta.size) - 1 ··· 840 724 * r31: pr 841 725 * r24: b0 842 726 */ 727 + ENTRY(kvm_resume_to_guest_with_sync) 728 + adds r19=VMM_VPD_BASE_OFFSET,r21 729 + mov r16 = r31 730 + mov r17 = r24 731 + ;; 732 + {.mii 733 + ld8 r25 =[r19] 734 + nop 0x0 735 + mov r24 = ip 736 + ;; 737 + } 738 + {.mmb 739 + add r24 =0x20, r24 740 + nop 0x0 741 + br.sptk.many kvm_vps_sync_write 742 + } 743 + 744 + mov r31 = r16 745 + mov r24 =r17 746 + ;; 747 + br.sptk.many kvm_resume_to_guest 748 + END(kvm_resume_to_guest_with_sync) 749 + 843 750 ENTRY(kvm_resume_to_guest) 844 751 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 845 752 ;;
+2 -2
arch/ia64/kvm/process.c
··· 962 962 void vmm_transition(struct kvm_vcpu *vcpu) 963 963 { 964 964 ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, 965 - 0, 0, 0, 0, 0, 0); 965 + 1, 0, 0, 0, 0, 0); 966 966 vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); 967 967 ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, 968 - 0, 0, 0, 0, 0, 0); 968 + 1, 0, 0, 0, 0, 0); 969 969 kvm_do_resume_op(vcpu); 970 970 }
+13 -13
arch/ia64/kvm/vcpu.h
··· 313 313 trp->rid = rid; 314 314 } 315 315 316 - extern u64 kvm_lookup_mpa(u64 gpfn); 317 - extern u64 kvm_gpa_to_mpa(u64 gpa); 316 + extern u64 kvm_get_mpt_entry(u64 gpfn); 318 317 319 - /* Return I/O type if trye */ 320 - #define __gpfn_is_io(gpfn) \ 321 - ({ \ 322 - u64 pte, ret = 0; \ 323 - pte = kvm_lookup_mpa(gpfn); \ 324 - if (!(pte & GPFN_INV_MASK)) \ 325 - ret = pte & GPFN_IO_MASK; \ 326 - ret; \ 327 - }) 328 - 318 + /* Return I/ */ 319 + static inline u64 __gpfn_is_io(u64 gpfn) 320 + { 321 + u64 pte; 322 + pte = kvm_get_mpt_entry(gpfn); 323 + if (!(pte & GPFN_INV_MASK)) { 324 + pte = pte & GPFN_IO_MASK; 325 + if (pte != GPFN_PHYS_MMIO) 326 + return pte; 327 + } 328 + return 0; 329 + } 329 330 #endif 330 - 331 331 #define IA64_NO_FAULT 0 332 332 #define IA64_FAULT 1 333 333
+6 -33
arch/ia64/kvm/vmm_ivt.S
··· 1261 1261 adds r19=VMM_VPD_VPSR_OFFSET,r18 1262 1262 ;; 1263 1263 ld8 r19=[r19] //vpsr 1264 - adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21 1265 - ;; 1266 - ld8 r20=[r20] 1267 - ;; 1268 - //vsa_sync_write_start 1269 1264 mov r25=r18 1270 1265 adds r16= VMM_VCPU_GP_OFFSET,r21 1271 1266 ;; ··· 1269 1274 ;; 1270 1275 add r24=r24,r16 1271 1276 ;; 1272 - add r16=PAL_VPS_SYNC_WRITE,r20 1273 - ;; 1274 - mov b0=r16 1275 - br.cond.sptk b0 // call the service 1277 + br.sptk.many kvm_vps_sync_write // call the service 1276 1278 ;; 1277 1279 END(ia64_leave_hypervisor) 1278 1280 // fall through ··· 1280 1288 * r17:cr.isr 1281 1289 * r18:vpd 1282 1290 * r19:vpsr 1283 - * r20:__vsa_base 1284 1291 * r22:b0 1285 1292 * r23:predicate 1286 1293 */ 1287 1294 mov r24=r22 1288 1295 mov r25=r18 1289 1296 tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic 1297 + (p1) br.cond.sptk.few kvm_vps_resume_normal 1298 + (p2) br.cond.sptk.many kvm_vps_resume_handler 1290 1299 ;; 1291 - (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 1292 - (p1) br.sptk.many ia64_vmm_entry_out 1293 - ;; 1294 - tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir 1295 - ;; 1296 - (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 1297 - (p2) add r29=PAL_VPS_RESUME_HANDLER,r20 1298 - (p2) ld8 r26=[r25] 1299 - ;; 1300 - ia64_vmm_entry_out: 1301 - mov pr=r23,-2 1302 - mov b0=r29 1303 - ;; 1304 - br.cond.sptk b0 // call pal service 1305 1300 END(ia64_vmm_entry) 1306 1301 1307 1302 ··· 1355 1376 //set up ipsr, iip, vpd.vpsr, dcr 1356 1377 // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 1357 1378 // For DCR: all bits 0 1379 + bsw.0 1380 + ;; 1381 + mov r21 =r13 1358 1382 adds r14=-VMM_PT_REGS_SIZE, r12 1359 1383 ;; 1360 1384 movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 ··· 1368 1386 rsm psr.ic | psr.i 1369 1387 ;; 1370 1388 srlz.i 1371 - ;; 1372 - bsw.0 1373 - ;; 1374 - mov r21 =r13 1375 - ;; 1376 - bsw.1 1377 1389 ;; 1378 1390 mov ar.rsc = 0 1379 1391 ;; ··· 1382 1406 ld8 r1 = [r20] 1383 1407 ;; 1384 1408 mov cr.iip=r4 1385 - ;; 1386 1409 adds r16=VMM_VPD_BASE_OFFSET,r13 1387 - adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13 1388 1410 ;; 1389 1411 ld8 r18=[r16] 1390 - ld8 r20=[r20] 1391 1412 ;; 1392 1413 adds r19=VMM_VPD_VPSR_OFFSET,r18 1393 1414 ;;
+17 -6
arch/ia64/kvm/vtlb.c
··· 390 390 391 391 u64 translate_phy_pte(u64 *pte, u64 itir, u64 va) 392 392 { 393 - u64 ps, ps_mask, paddr, maddr; 393 + u64 ps, ps_mask, paddr, maddr, io_mask; 394 394 union pte_flags phy_pte; 395 395 396 396 ps = itir_ps(itir); ··· 398 398 phy_pte.val = *pte; 399 399 paddr = *pte; 400 400 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); 401 - maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT); 402 - if (maddr & GPFN_IO_MASK) { 401 + maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT); 402 + io_mask = maddr & GPFN_IO_MASK; 403 + if (io_mask && (io_mask != GPFN_PHYS_MMIO)) { 403 404 *pte |= VTLB_PTE_IO; 404 405 return -1; 405 406 } ··· 419 418 u64 ifa, int type) 420 419 { 421 420 u64 ps; 422 - u64 phy_pte; 421 + u64 phy_pte, io_mask, index; 423 422 union ia64_rr vrr, mrr; 424 423 int ret = 0; 425 424 ··· 427 426 vrr.val = vcpu_get_rr(v, ifa); 428 427 mrr.val = ia64_get_rr(ifa); 429 428 429 + index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT; 430 + io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK; 430 431 phy_pte = translate_phy_pte(&pte, itir, ifa); 431 432 432 433 /* Ensure WB attribute if pte is related to a normal mem page, 433 434 * which is required by vga acceleration since qemu maps shared 434 435 * vram buffer with WB. 435 436 */ 436 - if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) { 437 + if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) && 438 + io_mask != GPFN_PHYS_MMIO) { 437 439 pte &= ~_PAGE_MA_MASK; 438 440 phy_pte &= ~_PAGE_MA_MASK; 439 441 } ··· 570 566 } 571 567 } 572 568 573 - u64 kvm_lookup_mpa(u64 gpfn) 569 + u64 kvm_get_mpt_entry(u64 gpfn) 574 570 { 575 571 u64 *base = (u64 *) KVM_P2M_BASE; 576 572 return *(base + gpfn); 573 + } 574 + 575 + u64 kvm_lookup_mpa(u64 gpfn) 576 + { 577 + u64 maddr; 578 + maddr = kvm_get_mpt_entry(gpfn); 579 + return maddr&_PAGE_PPN_MASK; 577 580 } 578 581 579 582 u64 kvm_gpa_to_mpa(u64 gpa)
+12 -2
arch/powerpc/include/asm/kvm_host.h
··· 81 81 struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; 82 82 /* Pages which are referenced in the shadow TLB. */ 83 83 struct page *shadow_pages[PPC44x_TLB_SIZE]; 84 - /* Copy of the host's TLB. */ 85 - struct tlbe host_tlb[PPC44x_TLB_SIZE]; 84 + 85 + /* Track which TLB entries we've modified in the current exit. */ 86 + u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; 86 87 87 88 u32 host_stack; 88 89 u32 host_pid; 90 + u32 host_dbcr0; 91 + u32 host_dbcr1; 92 + u32 host_dbcr2; 93 + u32 host_iac[4]; 94 + u32 host_msr; 89 95 90 96 u64 fpr[32]; 91 97 u32 gpr[32]; ··· 129 123 u32 ivor[16]; 130 124 u32 ivpr; 131 125 u32 pir; 126 + 127 + u32 shadow_pid; 132 128 u32 pid; 129 + u32 swap_pid; 130 + 133 131 u32 pvr; 134 132 u32 ccr0; 135 133 u32 ccr1;
+12
arch/powerpc/include/asm/kvm_ppc.h
··· 64 64 extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 65 65 gva_t eend, u32 asid); 66 66 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 67 + extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); 68 + 69 + /* XXX Book E specific */ 70 + extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); 67 71 68 72 extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); 69 73 ··· 94 90 95 91 if (vcpu->arch.msr & MSR_WE) 96 92 kvm_vcpu_block(vcpu); 93 + } 94 + 95 + static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) 96 + { 97 + if (vcpu->arch.pid != new_pid) { 98 + vcpu->arch.pid = new_pid; 99 + vcpu->arch.swap_pid = 1; 100 + } 97 101 } 98 102 99 103 #endif /* __POWERPC_KVM_PPC_H__ */
+2 -2
arch/powerpc/kernel/asm-offsets.c
··· 359 359 360 360 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 361 361 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 362 - DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb)); 363 362 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); 363 + DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod)); 364 364 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 365 365 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 366 366 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); ··· 372 372 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 373 373 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 374 374 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 375 - DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid)); 375 + DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 376 376 377 377 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 378 378 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+36 -17
arch/powerpc/kvm/44x_tlb.c
··· 19 19 20 20 #include <linux/types.h> 21 21 #include <linux/string.h> 22 + #include <linux/kvm.h> 22 23 #include <linux/kvm_host.h> 23 24 #include <linux/highmem.h> 24 25 #include <asm/mmu-44x.h> ··· 110 109 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); 111 110 } 112 111 113 - /* Must be called with mmap_sem locked for writing. */ 114 112 static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, 115 113 unsigned int index) 116 114 { ··· 122 122 else 123 123 kvm_release_page_clean(page); 124 124 } 125 + } 126 + 127 + void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) 128 + { 129 + vcpu->arch.shadow_tlb_mod[i] = 1; 125 130 } 126 131 127 132 /* Caller must ensure that the specified guest TLB entry is safe to insert into ··· 147 142 stlbe = &vcpu->arch.shadow_tlb[victim]; 148 143 149 144 /* Get reference to new page. */ 150 - down_read(&current->mm->mmap_sem); 151 145 new_page = gfn_to_page(vcpu->kvm, gfn); 152 146 if (is_error_page(new_page)) { 153 147 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); 154 148 kvm_release_page_clean(new_page); 155 - up_read(&current->mm->mmap_sem); 156 149 return; 157 150 } 158 151 hpaddr = page_to_phys(new_page); 159 152 160 153 /* Drop reference to old page. */ 161 154 kvmppc_44x_shadow_release(vcpu, victim); 162 - up_read(&current->mm->mmap_sem); 163 155 164 156 vcpu->arch.shadow_pages[victim] = new_page; 165 157 ··· 166 164 167 165 /* XXX what about AS? */ 168 166 169 - stlbe->tid = asid & 0xff; 167 + stlbe->tid = !(asid & 0xff); 170 168 171 169 /* Force TS=1 for all guest mappings. */ 172 170 /* For now we hardcode 4KB mappings, but it will be important to 173 171 * use host large pages in the future. */ 174 172 stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS 175 173 | PPC44x_TLB_4K; 176 - 177 174 stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 178 175 stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, 179 176 vcpu->arch.msr & MSR_PR); 177 + kvmppc_tlbe_set_modified(vcpu, victim); 178 + 179 + KVMTRACE_5D(STLB_WRITE, vcpu, victim, 180 + stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, 181 + handler); 180 182 } 181 183 182 184 void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 183 185 gva_t eend, u32 asid) 184 186 { 185 - unsigned int pid = asid & 0xff; 187 + unsigned int pid = !(asid & 0xff); 186 188 int i; 187 189 188 190 /* XXX Replace loop with fancy data structures. */ 189 - down_write(&current->mm->mmap_sem); 190 191 for (i = 0; i <= tlb_44x_hwater; i++) { 191 192 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 192 193 unsigned int tid; ··· 209 204 210 205 kvmppc_44x_shadow_release(vcpu, i); 211 206 stlbe->word0 = 0; 207 + kvmppc_tlbe_set_modified(vcpu, i); 208 + KVMTRACE_5D(STLB_INVAL, vcpu, i, 209 + stlbe->tid, stlbe->word0, stlbe->word1, 210 + stlbe->word2, handler); 212 211 } 213 - up_write(&current->mm->mmap_sem); 214 212 } 215 213 216 - /* Invalidate all mappings, so that when they fault back in they will get the 217 - * proper permission bits. */ 214 + /* Invalidate all mappings on the privilege switch after PID has been changed. 215 + * The guest always runs with PID=1, so we must clear the entire TLB when 216 + * switching address spaces. */ 218 217 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 219 218 { 220 219 int i; 221 220 222 - /* XXX Replace loop with fancy data structures. */ 223 - down_write(&current->mm->mmap_sem); 224 - for (i = 0; i <= tlb_44x_hwater; i++) { 225 - kvmppc_44x_shadow_release(vcpu, i); 226 - vcpu->arch.shadow_tlb[i].word0 = 0; 221 + if (vcpu->arch.swap_pid) { 222 + /* XXX Replace loop with fancy data structures. */ 223 + for (i = 0; i <= tlb_44x_hwater; i++) { 224 + struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 225 + 226 + /* Future optimization: clear only userspace mappings. */ 227 + kvmppc_44x_shadow_release(vcpu, i); 228 + stlbe->word0 = 0; 229 + kvmppc_tlbe_set_modified(vcpu, i); 230 + KVMTRACE_5D(STLB_INVAL, vcpu, i, 231 + stlbe->tid, stlbe->word0, stlbe->word1, 232 + stlbe->word2, handler); 233 + } 234 + vcpu->arch.swap_pid = 0; 227 235 } 228 - up_write(&current->mm->mmap_sem); 236 + 237 + vcpu->arch.shadow_pid = !usermode; 229 238 }
+11
arch/powerpc/kvm/Kconfig
··· 37 37 Provides host support for KVM on Book E PowerPC processors. Currently 38 38 this works on 440 processors only. 39 39 40 + config KVM_TRACE 41 + bool "KVM trace support" 42 + depends on KVM && MARKERS && SYSFS 43 + select RELAY 44 + select DEBUG_FS 45 + default n 46 + ---help--- 47 + This option allows reading a trace of kvm-related events through 48 + relayfs. Note the ABI is not considered stable and will be 49 + modified in future updates. 50 + 40 51 source drivers/virtio/Kconfig 41 52 42 53 endif # VIRTUALIZATION
+4 -2
arch/powerpc/kvm/Makefile
··· 4 4 5 5 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm 6 6 7 - common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 7 + common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 8 8 9 - kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o 9 + common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) 10 + 11 + kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o 10 12 obj-$(CONFIG_KVM) += kvm.o 11 13 12 14 AFLAGS_booke_interrupts.o := -I$(obj)
+17
arch/powerpc/kvm/booke_guest.c
··· 410 410 break; 411 411 } 412 412 413 + case BOOKE_INTERRUPT_DEBUG: { 414 + u32 dbsr; 415 + 416 + vcpu->arch.pc = mfspr(SPRN_CSRR0); 417 + 418 + /* clear IAC events in DBSR register */ 419 + dbsr = mfspr(SPRN_DBSR); 420 + dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; 421 + mtspr(SPRN_DBSR, dbsr); 422 + 423 + run->exit_reason = KVM_EXIT_DEBUG; 424 + r = RESUME_HOST; 425 + break; 426 + } 427 + 413 428 default: 414 429 printk(KERN_EMERG "exit_nr %d\n", exit_nr); 415 430 BUG(); ··· 485 470 vcpu->arch.pc = 0; 486 471 vcpu->arch.msr = 0; 487 472 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 473 + 474 + vcpu->arch.shadow_pid = 1; 488 475 489 476 /* Eye-catching number so we know if the guest takes an interrupt 490 477 * before it's programmed its own IVPR. */
+49 -34
arch/powerpc/kvm/booke_interrupts.S
··· 42 42 #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 43 43 44 44 #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ 45 - (1<<BOOKE_INTERRUPT_DTLB_MISS)) 45 + (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ 46 + (1<<BOOKE_INTERRUPT_DEBUG)) 46 47 47 48 #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ 48 49 (1<<BOOKE_INTERRUPT_DTLB_MISS)) ··· 332 331 333 332 mfspr r3, SPRN_PID 334 333 stw r3, VCPU_HOST_PID(r4) 335 - lwz r3, VCPU_PID(r4) 334 + lwz r3, VCPU_SHADOW_PID(r4) 336 335 mtspr SPRN_PID, r3 337 336 338 - /* Prevent all TLB updates. */ 337 + /* Prevent all asynchronous TLB updates. */ 339 338 mfmsr r5 340 339 lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h 341 340 ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l 342 341 andc r6, r5, r6 343 342 mtmsr r6 344 343 345 - /* Save the host's non-pinned TLB mappings, and load the guest mappings 346 - * over them. Leave the host's "pinned" kernel mappings in place. */ 347 - /* XXX optimization: use generation count to avoid swapping unmodified 348 - * entries. */ 344 + /* Load the guest mappings, leaving the host's "pinned" kernel mappings 345 + * in place. */ 349 346 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ 350 - lis r8, tlb_44x_hwater@ha 351 - lwz r8, tlb_44x_hwater@l(r8) 352 - addi r3, r4, VCPU_HOST_TLB - 4 353 - addi r9, r4, VCPU_SHADOW_TLB - 4 347 + li r5, PPC44x_TLB_SIZE 348 + lis r5, tlb_44x_hwater@ha 349 + lwz r5, tlb_44x_hwater@l(r5) 350 + mtctr r5 351 + addi r9, r4, VCPU_SHADOW_TLB 352 + addi r5, r4, VCPU_SHADOW_MOD 353 + li r3, 0 354 + 1: 355 + lbzx r7, r3, r5 356 + cmpwi r7, 0 357 + beq 3f 358 + 359 + /* Load guest entry. */ 360 + mulli r11, r3, TLBE_BYTES 361 + add r11, r11, r9 362 + lwz r7, 0(r11) 363 + mtspr SPRN_MMUCR, r7 364 + lwz r7, 4(r11) 365 + tlbwe r7, r3, PPC44x_TLB_PAGEID 366 + lwz r7, 8(r11) 367 + tlbwe r7, r3, PPC44x_TLB_XLAT 368 + lwz r7, 12(r11) 369 + tlbwe r7, r3, PPC44x_TLB_ATTRIB 370 + 3: 371 + addi r3, r3, 1 /* Increment index. */ 372 + bdnz 1b 373 + 374 + mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ 375 + 376 + /* Clear bitmap of modified TLB entries */ 377 + li r5, PPC44x_TLB_SIZE>>2 378 + mtctr r5 379 + addi r5, r4, VCPU_SHADOW_MOD - 4 354 380 li r6, 0 355 381 1: 356 - /* Save host entry. */ 357 - tlbre r7, r6, PPC44x_TLB_PAGEID 358 - mfspr r5, SPRN_MMUCR 359 - stwu r5, 4(r3) 360 - stwu r7, 4(r3) 361 - tlbre r7, r6, PPC44x_TLB_XLAT 362 - stwu r7, 4(r3) 363 - tlbre r7, r6, PPC44x_TLB_ATTRIB 364 - stwu r7, 4(r3) 365 - /* Load guest entry. */ 366 - lwzu r7, 4(r9) 367 - mtspr SPRN_MMUCR, r7 368 - lwzu r7, 4(r9) 369 - tlbwe r7, r6, PPC44x_TLB_PAGEID 370 - lwzu r7, 4(r9) 371 - tlbwe r7, r6, PPC44x_TLB_XLAT 372 - lwzu r7, 4(r9) 373 - tlbwe r7, r6, PPC44x_TLB_ATTRIB 374 - /* Increment index. */ 375 - addi r6, r6, 1 376 - cmpw r6, r8 377 - blt 1b 378 - mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ 382 + stwu r6, 4(r5) 383 + bdnz 1b 379 384 380 385 iccci 0, 0 /* XXX hack */ 381 386 ··· 438 431 oris r3, r3, KVMPPC_MSR_MASK@h 439 432 ori r3, r3, KVMPPC_MSR_MASK@l 440 433 mtsrr1 r3 434 + 435 + /* Clear any debug events which occurred since we disabled MSR[DE]. 436 + * XXX This gives us a 3-instruction window in which a breakpoint 437 + * intended for guest context could fire in the host instead. */ 438 + lis r3, 0xffff 439 + ori r3, r3, 0xffff 440 + mtspr SPRN_DBSR, r3 441 + 441 442 lwz r3, VCPU_GPR(r3)(r4) 442 443 lwz r4, VCPU_GPR(r4)(r4) 443 444 rfi
+7 -1
arch/powerpc/kvm/emulate.c
··· 170 170 kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags); 171 171 } 172 172 173 + KVMTRACE_5D(GTLB_WRITE, vcpu, index, 174 + tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2, 175 + handler); 176 + 173 177 return EMULATE_DONE; 174 178 } 175 179 ··· 508 504 case SPRN_MMUCR: 509 505 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; 510 506 case SPRN_PID: 511 - vcpu->arch.pid = vcpu->arch.gpr[rs]; break; 507 + kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; 512 508 case SPRN_CCR0: 513 509 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; 514 510 case SPRN_CCR1: ··· 768 764 emulated = EMULATE_FAIL; 769 765 break; 770 766 } 767 + 768 + KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit); 771 769 772 770 if (advance) 773 771 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+98 -1
arch/powerpc/kvm/powerpc.c
··· 27 27 #include <asm/cputable.h> 28 28 #include <asm/uaccess.h> 29 29 #include <asm/kvm_ppc.h> 30 + #include <asm/tlbflush.h> 30 31 31 32 32 33 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) ··· 240 239 { 241 240 } 242 241 242 + /* Note: clearing MSR[DE] just means that the debug interrupt will not be 243 + * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. 244 + * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt 245 + * will be delivered as an "imprecise debug event" (which is indicated by 246 + * DBSR[IDE]. 247 + */ 248 + static void kvmppc_disable_debug_interrupts(void) 249 + { 250 + mtmsr(mfmsr() & ~MSR_DE); 251 + } 252 + 253 + static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu) 254 + { 255 + kvmppc_disable_debug_interrupts(); 256 + 257 + mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); 258 + mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); 259 + mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); 260 + mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); 261 + mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); 262 + mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); 263 + mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); 264 + mtmsr(vcpu->arch.host_msr); 265 + } 266 + 267 + static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) 268 + { 269 + struct kvm_guest_debug *dbg = &vcpu->guest_debug; 270 + u32 dbcr0 = 0; 271 + 272 + vcpu->arch.host_msr = mfmsr(); 273 + kvmppc_disable_debug_interrupts(); 274 + 275 + /* Save host debug register state. */ 276 + vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); 277 + vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); 278 + vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); 279 + vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); 280 + vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); 281 + vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); 282 + vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); 283 + 284 + /* set registers up for guest */ 285 + 286 + if (dbg->bp[0]) { 287 + mtspr(SPRN_IAC1, dbg->bp[0]); 288 + dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; 289 + } 290 + if (dbg->bp[1]) { 291 + mtspr(SPRN_IAC2, dbg->bp[1]); 292 + dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; 293 + } 294 + if (dbg->bp[2]) { 295 + mtspr(SPRN_IAC3, dbg->bp[2]); 296 + dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; 297 + } 298 + if (dbg->bp[3]) { 299 + mtspr(SPRN_IAC4, dbg->bp[3]); 300 + dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; 301 + } 302 + 303 + mtspr(SPRN_DBCR0, dbcr0); 304 + mtspr(SPRN_DBCR1, 0); 305 + mtspr(SPRN_DBCR2, 0); 306 + } 307 + 243 308 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 244 309 { 310 + int i; 311 + 312 + if (vcpu->guest_debug.enabled) 313 + kvmppc_load_guest_debug_registers(vcpu); 314 + 315 + /* Mark every guest entry in the shadow TLB entry modified, so that they 316 + * will all be reloaded on the next vcpu run (instead of being 317 + * demand-faulted). */ 318 + for (i = 0; i <= tlb_44x_hwater; i++) 319 + kvmppc_tlbe_set_modified(vcpu, i); 245 320 } 246 321 247 322 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 248 323 { 324 + if (vcpu->guest_debug.enabled) 325 + kvmppc_restore_host_debug_state(vcpu); 326 + 327 + /* Don't leave guest TLB entries resident when being de-scheduled. */ 328 + /* XXX It would be nice to differentiate between heavyweight exit and 329 + * sched_out here, since we could avoid the TLB flush for heavyweight 330 + * exits. */ 331 + _tlbia(); 249 332 } 250 333 251 334 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, 252 335 struct kvm_debug_guest *dbg) 253 336 { 254 - return -ENOTSUPP; 337 + int i; 338 + 339 + vcpu->guest_debug.enabled = dbg->enabled; 340 + if (vcpu->guest_debug.enabled) { 341 + for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) { 342 + if (dbg->breakpoints[i].enabled) 343 + vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; 344 + else 345 + vcpu->guest_debug.bp[i] = 0; 346 + } 347 + } 348 + 349 + return 0; 255 350 } 256 351 257 352 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
+5 -2
arch/s390/Kconfig
··· 565 565 Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. 566 566 567 567 config S390_GUEST 568 - bool "s390 guest support (EXPERIMENTAL)" 568 + bool "s390 guest support for KVM (EXPERIMENTAL)" 569 569 depends on 64BIT && EXPERIMENTAL 570 570 select VIRTIO 571 571 select VIRTIO_RING 572 572 select VIRTIO_CONSOLE 573 573 help 574 - Select this option if you want to run the kernel under s390 linux 574 + Select this option if you want to run the kernel as a guest under 575 + the KVM hypervisor. This will add detection for KVM as well as a 576 + virtio transport. If KVM is detected, the virtio console will be 577 + the default console. 575 578 endmenu 576 579 577 580 source "net/Kconfig"
+2 -2
arch/s390/kvm/priv.c
··· 157 157 int rc; 158 158 159 159 vcpu->stat.instruction_stfl++; 160 - facility_list &= ~(1UL<<24); /* no stfle */ 161 - facility_list &= ~(1UL<<23); /* no large pages */ 160 + /* only pass the facility bits, which we can handle */ 161 + facility_list &= 0xfe00fff3; 162 162 163 163 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 164 164 &facility_list, sizeof(facility_list));
+30
arch/x86/kernel/kvmclock.c
··· 78 78 return ret; 79 79 } 80 80 81 + /* 82 + * If we don't do that, there is the possibility that the guest 83 + * will calibrate under heavy load - thus, getting a lower lpj - 84 + * and execute the delays themselves without load. This is wrong, 85 + * because no delay loop can finish beforehand. 86 + * Any heuristics is subject to fail, because ultimately, a large 87 + * poll of guests can be running and trouble each other. So we preset 88 + * lpj here 89 + */ 90 + static unsigned long kvm_get_tsc_khz(void) 91 + { 92 + return preset_lpj; 93 + } 94 + 95 + static void kvm_get_preset_lpj(void) 96 + { 97 + struct pvclock_vcpu_time_info *src; 98 + unsigned long khz; 99 + u64 lpj; 100 + 101 + src = &per_cpu(hv_clock, 0); 102 + khz = pvclock_tsc_khz(src); 103 + 104 + lpj = ((u64)khz * 1000); 105 + do_div(lpj, HZ); 106 + preset_lpj = lpj; 107 + } 108 + 81 109 static struct clocksource kvm_clock = { 82 110 .name = "kvm-clock", 83 111 .read = kvm_clock_read, ··· 181 153 pv_time_ops.get_wallclock = kvm_get_wallclock; 182 154 pv_time_ops.set_wallclock = kvm_set_wallclock; 183 155 pv_time_ops.sched_clock = kvm_clock_read; 156 + pv_time_ops.get_tsc_khz = kvm_get_tsc_khz; 184 157 #ifdef CONFIG_X86_LOCAL_APIC 185 158 pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; 186 159 #endif ··· 192 163 #ifdef CONFIG_KEXEC 193 164 machine_ops.crash_shutdown = kvm_crash_shutdown; 194 165 #endif 166 + kvm_get_preset_lpj(); 195 167 clocksource_register(&kvm_clock); 196 168 } 197 169 }
+12
arch/x86/kernel/pvclock.c
··· 97 97 return dst->version; 98 98 } 99 99 100 + unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) 101 + { 102 + u64 pv_tsc_khz = 1000000ULL << 32; 103 + 104 + do_div(pv_tsc_khz, src->tsc_to_system_mul); 105 + if (src->tsc_shift < 0) 106 + pv_tsc_khz <<= -src->tsc_shift; 107 + else 108 + pv_tsc_khz >>= src->tsc_shift; 109 + return pv_tsc_khz; 110 + } 111 + 100 112 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) 101 113 { 102 114 struct pvclock_shadow_time shadow;
+4 -1
arch/x86/kvm/Makefile
··· 3 3 # 4 4 5 5 common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 6 - coalesced_mmio.o) 6 + coalesced_mmio.o irq_comm.o) 7 7 ifeq ($(CONFIG_KVM_TRACE),y) 8 8 common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o) 9 + endif 10 + ifeq ($(CONFIG_DMAR),y) 11 + common-objs += $(addprefix ../../../virt/kvm/, vtd.o) 9 12 endif 10 13 11 14 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
+39 -42
arch/x86/kvm/i8254.c
··· 200 200 201 201 if (!atomic_inc_and_test(&pt->pending)) 202 202 set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests); 203 - if (vcpu0 && waitqueue_active(&vcpu0->wq)) { 204 - vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE; 203 + 204 + if (vcpu0 && waitqueue_active(&vcpu0->wq)) 205 205 wake_up_interruptible(&vcpu0->wq); 206 - } 207 206 208 207 pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period); 209 208 pt->scheduled = ktime_to_ns(pt->timer.expires); 209 + if (pt->period) 210 + ps->channels[0].count_load_time = pt->timer.expires; 210 211 211 212 return (pt->period == 0 ? 0 : 1); 212 213 } ··· 216 215 { 217 216 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 218 217 219 - if (pit && vcpu->vcpu_id == 0 && pit->pit_state.inject_pending) 218 + if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack) 220 219 return atomic_read(&pit->pit_state.pit_timer.pending); 221 - 222 220 return 0; 221 + } 222 + 223 + static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) 224 + { 225 + struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, 226 + irq_ack_notifier); 227 + spin_lock(&ps->inject_lock); 228 + if (atomic_dec_return(&ps->pit_timer.pending) < 0) 229 + atomic_inc(&ps->pit_timer.pending); 230 + ps->irq_ack = 1; 231 + spin_unlock(&ps->inject_lock); 223 232 } 224 233 225 234 static enum hrtimer_restart pit_timer_fn(struct hrtimer *data) ··· 266 255 hrtimer_cancel(&pt->timer); 267 256 } 268 257 269 - static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period) 258 + static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) 270 259 { 260 + struct kvm_kpit_timer *pt = &ps->pit_timer; 271 261 s64 interval; 272 262 273 263 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); ··· 280 268 pt->period = (is_period == 0) ? 0 : interval; 281 269 pt->timer.function = pit_timer_fn; 282 270 atomic_set(&pt->pending, 0); 271 + ps->irq_ack = 1; 283 272 284 273 hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), 285 274 HRTIMER_MODE_ABS); ··· 315 302 case 1: 316 303 /* FIXME: enhance mode 4 precision */ 317 304 case 4: 318 - create_pit_timer(&ps->pit_timer, val, 0); 305 + create_pit_timer(ps, val, 0); 319 306 break; 320 307 case 2: 321 308 case 3: 322 - create_pit_timer(&ps->pit_timer, val, 1); 309 + create_pit_timer(ps, val, 1); 323 310 break; 324 311 default: 325 312 destroy_pit_timer(&ps->pit_timer); ··· 533 520 mutex_unlock(&pit->pit_state.lock); 534 521 535 522 atomic_set(&pit->pit_state.pit_timer.pending, 0); 536 - pit->pit_state.inject_pending = 1; 523 + pit->pit_state.irq_ack = 1; 537 524 } 538 525 539 526 struct kvm_pit *kvm_create_pit(struct kvm *kvm) ··· 547 534 548 535 mutex_init(&pit->pit_state.lock); 549 536 mutex_lock(&pit->pit_state.lock); 537 + spin_lock_init(&pit->pit_state.inject_lock); 550 538 551 539 /* Initialize PIO device */ 552 540 pit->dev.read = pit_ioport_read; ··· 569 555 pit_state->pit = pit; 570 556 hrtimer_init(&pit_state->pit_timer.timer, 571 557 CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 558 + pit_state->irq_ack_notifier.gsi = 0; 559 + pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; 560 + kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 572 561 mutex_unlock(&pit->pit_state.lock); 573 562 574 563 kvm_pit_reset(pit); ··· 595 578 static void __inject_pit_timer_intr(struct kvm *kvm) 596 579 { 597 580 mutex_lock(&kvm->lock); 598 - kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1); 599 - kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0); 600 - kvm_pic_set_irq(pic_irqchip(kvm), 0, 1); 601 - kvm_pic_set_irq(pic_irqchip(kvm), 0, 0); 581 + kvm_set_irq(kvm, 0, 1); 582 + kvm_set_irq(kvm, 0, 0); 602 583 mutex_unlock(&kvm->lock); 603 584 } 604 585 ··· 607 592 struct kvm_kpit_state *ps; 608 593 609 594 if (vcpu && pit) { 595 + int inject = 0; 610 596 ps = &pit->pit_state; 611 597 612 - /* Try to inject pending interrupts when: 613 - * 1. Pending exists 614 - * 2. Last interrupt was accepted or waited for too long time*/ 615 - if (atomic_read(&ps->pit_timer.pending) && 616 - (ps->inject_pending || 617 - (jiffies - ps->last_injected_time 618 - >= KVM_MAX_PIT_INTR_INTERVAL))) { 619 - ps->inject_pending = 0; 598 + /* Try to inject pending interrupts when 599 + * last one has been acked. 600 + */ 601 + spin_lock(&ps->inject_lock); 602 + if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { 603 + ps->irq_ack = 0; 604 + inject = 1; 605 + } 606 + spin_unlock(&ps->inject_lock); 607 + if (inject) 620 608 __inject_pit_timer_intr(kvm); 621 - ps->last_injected_time = jiffies; 622 - } 623 - } 624 - } 625 - 626 - void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec) 627 - { 628 - struct kvm_arch *arch = &vcpu->kvm->arch; 629 - struct kvm_kpit_state *ps; 630 - 631 - if (vcpu && arch->vpit) { 632 - ps = &arch->vpit->pit_state; 633 - if (atomic_read(&ps->pit_timer.pending) && 634 - (((arch->vpic->pics[0].imr & 1) == 0 && 635 - arch->vpic->pics[0].irq_base == vec) || 636 - (arch->vioapic->redirtbl[0].fields.vector == vec && 637 - arch->vioapic->redirtbl[0].fields.mask != 1))) { 638 - ps->inject_pending = 1; 639 - atomic_dec(&ps->pit_timer.pending); 640 - ps->channels[0].count_load_time = ktime_get(); 641 - } 642 609 } 643 610 }
+3 -4
arch/x86/kvm/i8254.h
··· 8 8 int irq; 9 9 s64 period; /* unit: ns */ 10 10 s64 scheduled; 11 - ktime_t last_update; 12 11 atomic_t pending; 13 12 }; 14 13 ··· 33 34 u32 speaker_data_on; 34 35 struct mutex lock; 35 36 struct kvm_pit *pit; 36 - bool inject_pending; /* if inject pending interrupts */ 37 - unsigned long last_injected_time; 37 + spinlock_t inject_lock; 38 + unsigned long irq_ack; 39 + struct kvm_irq_ack_notifier irq_ack_notifier; 38 40 }; 39 41 40 42 struct kvm_pit { ··· 54 54 #define KVM_PIT_CHANNEL_MASK 0x3 55 55 56 56 void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu); 57 - void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec); 58 57 void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val); 59 58 struct kvm_pit *kvm_create_pit(struct kvm *kvm); 60 59 void kvm_free_pit(struct kvm *kvm);
+44 -9
arch/x86/kvm/i8259.c
··· 30 30 31 31 #include <linux/kvm_host.h> 32 32 33 + static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 34 + { 35 + s->isr &= ~(1 << irq); 36 + s->isr_ack |= (1 << irq); 37 + } 38 + 39 + void kvm_pic_clear_isr_ack(struct kvm *kvm) 40 + { 41 + struct kvm_pic *s = pic_irqchip(kvm); 42 + s->pics[0].isr_ack = 0xff; 43 + s->pics[1].isr_ack = 0xff; 44 + } 45 + 33 46 /* 34 47 * set irq level. If an edge is detected, then the IRR is set to 1 35 48 */ ··· 154 141 */ 155 142 static inline void pic_intack(struct kvm_kpic_state *s, int irq) 156 143 { 144 + s->isr |= 1 << irq; 157 145 if (s->auto_eoi) { 158 146 if (s->rotate_on_auto_eoi) 159 147 s->priority_add = (irq + 1) & 7; 160 - } else 161 - s->isr |= (1 << irq); 148 + pic_clear_isr(s, irq); 149 + } 162 150 /* 163 151 * We don't clear a level sensitive interrupt here 164 152 */ ··· 167 153 s->irr &= ~(1 << irq); 168 154 } 169 155 170 - int kvm_pic_read_irq(struct kvm_pic *s) 156 + int kvm_pic_read_irq(struct kvm *kvm) 171 157 { 172 158 int irq, irq2, intno; 159 + struct kvm_pic *s = pic_irqchip(kvm); 173 160 174 161 irq = pic_get_irq(&s->pics[0]); 175 162 if (irq >= 0) { ··· 196 181 intno = s->pics[0].irq_base + irq; 197 182 } 198 183 pic_update_irq(s); 184 + kvm_notify_acked_irq(kvm, irq); 199 185 200 186 return intno; 201 187 } 202 188 203 189 void kvm_pic_reset(struct kvm_kpic_state *s) 204 190 { 191 + int irq, irqbase; 192 + struct kvm *kvm = s->pics_state->irq_request_opaque; 193 + struct kvm_vcpu *vcpu0 = kvm->vcpus[0]; 194 + 195 + if (s == &s->pics_state->pics[0]) 196 + irqbase = 0; 197 + else 198 + irqbase = 8; 199 + 200 + for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { 201 + if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) 202 + if (s->irr & (1 << irq) || s->isr & (1 << irq)) 203 + kvm_notify_acked_irq(kvm, irq+irqbase); 204 + } 205 205 s->last_irr = 0; 206 206 s->irr = 0; 207 207 s->imr = 0; 208 208 s->isr = 0; 209 + s->isr_ack = 0xff; 209 210 s->priority_add = 0; 210 211 s->irq_base = 0; 211 212 s->read_reg_select = 0; ··· 274 243 priority = get_priority(s, s->isr); 275 244 if (priority != 8) { 276 245 irq = (priority + s->priority_add) & 7; 277 - s->isr &= ~(1 << irq); 246 + pic_clear_isr(s, irq); 278 247 if (cmd == 5) 279 248 s->priority_add = (irq + 1) & 7; 280 249 pic_update_irq(s->pics_state); ··· 282 251 break; 283 252 case 3: 284 253 irq = val & 7; 285 - s->isr &= ~(1 << irq); 254 + pic_clear_isr(s, irq); 286 255 pic_update_irq(s->pics_state); 287 256 break; 288 257 case 6: ··· 291 260 break; 292 261 case 7: 293 262 irq = val & 7; 294 - s->isr &= ~(1 << irq); 295 263 s->priority_add = (irq + 1) & 7; 264 + pic_clear_isr(s, irq); 296 265 pic_update_irq(s->pics_state); 297 266 break; 298 267 default: ··· 334 303 s->pics_state->pics[0].irr &= ~(1 << 2); 335 304 } 336 305 s->irr &= ~(1 << ret); 337 - s->isr &= ~(1 << ret); 306 + pic_clear_isr(s, ret); 338 307 if (addr1 >> 7 || ret != 2) 339 308 pic_update_irq(s->pics_state); 340 309 } else { ··· 453 422 { 454 423 struct kvm *kvm = opaque; 455 424 struct kvm_vcpu *vcpu = kvm->vcpus[0]; 425 + struct kvm_pic *s = pic_irqchip(kvm); 426 + int irq = pic_get_irq(&s->pics[0]); 456 427 457 - pic_irqchip(kvm)->output = level; 458 - if (vcpu) 428 + s->output = level; 429 + if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { 430 + s->pics[0].isr_ack &= ~(1 << irq); 459 431 kvm_vcpu_kick(vcpu); 432 + } 460 433 } 461 434 462 435 struct kvm_pic *kvm_create_pic(struct kvm *kvm)
+1 -2
arch/x86/kvm/irq.c
··· 72 72 if (kvm_apic_accept_pic_intr(v)) { 73 73 s = pic_irqchip(v->kvm); 74 74 s->output = 0; /* PIC */ 75 - vector = kvm_pic_read_irq(s); 75 + vector = kvm_pic_read_irq(v->kvm); 76 76 } 77 77 } 78 78 return vector; ··· 90 90 void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec) 91 91 { 92 92 kvm_apic_timer_intr_post(vcpu, vec); 93 - kvm_pit_timer_intr_post(vcpu, vec); 94 93 /* TODO: PIT, RTC etc. */ 95 94 } 96 95 EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
+4 -2
arch/x86/kvm/irq.h
··· 42 42 u8 irr; /* interrupt request register */ 43 43 u8 imr; /* interrupt mask register */ 44 44 u8 isr; /* interrupt service register */ 45 + u8 isr_ack; /* interrupt ack detection */ 45 46 u8 priority_add; /* highest irq priority */ 46 47 u8 irq_base; 47 48 u8 read_reg_select; ··· 64 63 void *irq_request_opaque; 65 64 int output; /* intr from master PIC */ 66 65 struct kvm_io_device dev; 66 + void (*ack_notifier)(void *opaque, int irq); 67 67 }; 68 68 69 69 struct kvm_pic *kvm_create_pic(struct kvm *kvm); 70 - void kvm_pic_set_irq(void *opaque, int irq, int level); 71 - int kvm_pic_read_irq(struct kvm_pic *s); 70 + int kvm_pic_read_irq(struct kvm *kvm); 72 71 void kvm_pic_update_irq(struct kvm_pic *s); 72 + void kvm_pic_clear_isr_ack(struct kvm *kvm); 73 73 74 74 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm) 75 75 {
+32
arch/x86/kvm/kvm_cache_regs.h
··· 1 + #ifndef ASM_KVM_CACHE_REGS_H 2 + #define ASM_KVM_CACHE_REGS_H 3 + 4 + static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, 5 + enum kvm_reg reg) 6 + { 7 + if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) 8 + kvm_x86_ops->cache_reg(vcpu, reg); 9 + 10 + return vcpu->arch.regs[reg]; 11 + } 12 + 13 + static inline void kvm_register_write(struct kvm_vcpu *vcpu, 14 + enum kvm_reg reg, 15 + unsigned long val) 16 + { 17 + vcpu->arch.regs[reg] = val; 18 + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); 19 + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); 20 + } 21 + 22 + static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) 23 + { 24 + return kvm_register_read(vcpu, VCPU_REGS_RIP); 25 + } 26 + 27 + static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) 28 + { 29 + kvm_register_write(vcpu, VCPU_REGS_RIP, val); 30 + } 31 + 32 + #endif
+18 -25
arch/x86/kvm/lapic.c
··· 32 32 #include <asm/current.h> 33 33 #include <asm/apicdef.h> 34 34 #include <asm/atomic.h> 35 + #include "kvm_cache_regs.h" 35 36 #include "irq.h" 36 37 37 38 #define PRId64 "d" ··· 339 338 } else 340 339 apic_clear_vector(vector, apic->regs + APIC_TMR); 341 340 342 - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 343 - kvm_vcpu_kick(vcpu); 344 - else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) { 345 - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 346 - if (waitqueue_active(&vcpu->wq)) 347 - wake_up_interruptible(&vcpu->wq); 348 - } 341 + kvm_vcpu_kick(vcpu); 349 342 350 343 result = (orig_irr == 0); 351 344 break; ··· 365 370 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 366 371 kvm_vcpu_kick(vcpu); 367 372 } else { 368 - printk(KERN_DEBUG 369 - "Ignoring de-assert INIT to vcpu %d\n", 370 - vcpu->vcpu_id); 373 + apic_debug("Ignoring de-assert INIT to vcpu %d\n", 374 + vcpu->vcpu_id); 371 375 } 372 - 373 376 break; 374 377 375 378 case APIC_DM_STARTUP: 376 - printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", 377 - vcpu->vcpu_id, vector); 379 + apic_debug("SIPI to vcpu %d vector 0x%02x\n", 380 + vcpu->vcpu_id, vector); 378 381 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 379 382 vcpu->arch.sipi_vector = vector; 380 383 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; 381 - if (waitqueue_active(&vcpu->wq)) 382 - wake_up_interruptible(&vcpu->wq); 384 + kvm_vcpu_kick(vcpu); 383 385 } 384 386 break; 385 387 ··· 430 438 static void apic_set_eoi(struct kvm_lapic *apic) 431 439 { 432 440 int vector = apic_find_highest_isr(apic); 433 - 441 + int trigger_mode; 434 442 /* 435 443 * Not every write EOI will has corresponding ISR, 436 444 * one example is when Kernel check timer on setup_IO_APIC ··· 442 450 apic_update_ppr(apic); 443 451 444 452 if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR)) 445 - kvm_ioapic_update_eoi(apic->vcpu->kvm, vector); 453 + trigger_mode = IOAPIC_LEVEL_TRIG; 454 + else 455 + trigger_mode = IOAPIC_EDGE_TRIG; 456 + kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); 446 457 } 447 458 448 459 static void apic_send_ipi(struct kvm_lapic *apic) ··· 553 558 struct kvm_run *run = vcpu->run; 554 559 555 560 set_bit(KVM_REQ_REPORT_TPR_ACCESS, &vcpu->requests); 556 - kvm_x86_ops->cache_regs(vcpu); 557 - run->tpr_access.rip = vcpu->arch.rip; 561 + run->tpr_access.rip = kvm_rip_read(vcpu); 558 562 run->tpr_access.is_write = write; 559 563 } 560 564 ··· 677 683 * Refer SDM 8.4.1 678 684 */ 679 685 if (len != 4 || alignment) { 680 - if (printk_ratelimit()) 681 - printk(KERN_ERR "apic write: bad size=%d %lx\n", 682 - len, (long)address); 686 + /* Don't shout loud, $infamous_os would cause only noise. */ 687 + apic_debug("apic write: bad size=%d %lx\n", 688 + len, (long)address); 683 689 return; 684 690 } 685 691 ··· 941 947 942 948 if(!atomic_inc_and_test(&apic->timer.pending)) 943 949 set_bit(KVM_REQ_PENDING_TIMER, &apic->vcpu->requests); 944 - if (waitqueue_active(q)) { 945 - apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 950 + if (waitqueue_active(q)) 946 951 wake_up_interruptible(q); 947 - } 952 + 948 953 if (apic_lvtt_period(apic)) { 949 954 result = 1; 950 955 apic->timer.dev.expires = ktime_add_ns(
+543 -147
arch/x86/kvm/mmu.c
··· 70 70 module_param(dbg, bool, 0644); 71 71 #endif 72 72 73 + static int oos_shadow = 1; 74 + module_param(oos_shadow, bool, 0644); 75 + 73 76 #ifndef MMU_DEBUG 74 77 #define ASSERT(x) do { } while (0) 75 78 #else ··· 138 135 #define ACC_USER_MASK PT_USER_MASK 139 136 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 140 137 141 - struct kvm_pv_mmu_op_buffer { 142 - void *ptr; 143 - unsigned len; 144 - unsigned processed; 145 - char buf[512] __aligned(sizeof(long)); 146 - }; 138 + #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 147 139 148 140 struct kvm_rmap_desc { 149 141 u64 *shadow_ptes[RMAP_EXT]; 150 142 struct kvm_rmap_desc *more; 151 143 }; 144 + 145 + struct kvm_shadow_walk { 146 + int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, 147 + u64 addr, u64 *spte, int level); 148 + }; 149 + 150 + struct kvm_unsync_walk { 151 + int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); 152 + }; 153 + 154 + typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); 152 155 153 156 static struct kmem_cache *pte_chain_cache; 154 157 static struct kmem_cache *rmap_desc_cache; ··· 414 405 { 415 406 struct vm_area_struct *vma; 416 407 unsigned long addr; 408 + int ret = 0; 417 409 418 410 addr = gfn_to_hva(kvm, gfn); 419 411 if (kvm_is_error_hva(addr)) 420 - return 0; 412 + return ret; 421 413 414 + down_read(&current->mm->mmap_sem); 422 415 vma = find_vma(current->mm, addr); 423 416 if (vma && is_vm_hugetlb_page(vma)) 424 - return 1; 417 + ret = 1; 418 + up_read(&current->mm->mmap_sem); 425 419 426 - return 0; 420 + return ret; 427 421 } 428 422 429 423 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn) ··· 661 649 662 650 if (write_protected) 663 651 kvm_flush_remote_tlbs(kvm); 664 - 665 - account_shadowed(kvm, gfn); 666 652 } 667 653 668 654 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) ··· 869 859 BUG(); 870 860 } 871 861 862 + 863 + static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 864 + mmu_parent_walk_fn fn) 865 + { 866 + struct kvm_pte_chain *pte_chain; 867 + struct hlist_node *node; 868 + struct kvm_mmu_page *parent_sp; 869 + int i; 870 + 871 + if (!sp->multimapped && sp->parent_pte) { 872 + parent_sp = page_header(__pa(sp->parent_pte)); 873 + fn(vcpu, parent_sp); 874 + mmu_parent_walk(vcpu, parent_sp, fn); 875 + return; 876 + } 877 + hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) 878 + for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { 879 + if (!pte_chain->parent_ptes[i]) 880 + break; 881 + parent_sp = page_header(__pa(pte_chain->parent_ptes[i])); 882 + fn(vcpu, parent_sp); 883 + mmu_parent_walk(vcpu, parent_sp, fn); 884 + } 885 + } 886 + 887 + static void kvm_mmu_update_unsync_bitmap(u64 *spte) 888 + { 889 + unsigned int index; 890 + struct kvm_mmu_page *sp = page_header(__pa(spte)); 891 + 892 + index = spte - sp->spt; 893 + __set_bit(index, sp->unsync_child_bitmap); 894 + sp->unsync_children = 1; 895 + } 896 + 897 + static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) 898 + { 899 + struct kvm_pte_chain *pte_chain; 900 + struct hlist_node *node; 901 + int i; 902 + 903 + if (!sp->parent_pte) 904 + return; 905 + 906 + if (!sp->multimapped) { 907 + kvm_mmu_update_unsync_bitmap(sp->parent_pte); 908 + return; 909 + } 910 + 911 + hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) 912 + for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { 913 + if (!pte_chain->parent_ptes[i]) 914 + break; 915 + kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]); 916 + } 917 + } 918 + 919 + static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 920 + { 921 + sp->unsync_children = 1; 922 + kvm_mmu_update_parents_unsync(sp); 923 + return 1; 924 + } 925 + 926 + static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu, 927 + struct kvm_mmu_page *sp) 928 + { 929 + mmu_parent_walk(vcpu, sp, unsync_walk_fn); 930 + kvm_mmu_update_parents_unsync(sp); 931 + } 932 + 872 933 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, 873 934 struct kvm_mmu_page *sp) 874 935 { ··· 947 866 948 867 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 949 868 sp->spt[i] = shadow_trap_nonpresent_pte; 869 + } 870 + 871 + static int nonpaging_sync_page(struct kvm_vcpu *vcpu, 872 + struct kvm_mmu_page *sp) 873 + { 874 + return 1; 875 + } 876 + 877 + static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 878 + { 879 + } 880 + 881 + #define for_each_unsync_children(bitmap, idx) \ 882 + for (idx = find_first_bit(bitmap, 512); \ 883 + idx < 512; \ 884 + idx = find_next_bit(bitmap, 512, idx+1)) 885 + 886 + static int mmu_unsync_walk(struct kvm_mmu_page *sp, 887 + struct kvm_unsync_walk *walker) 888 + { 889 + int i, ret; 890 + 891 + if (!sp->unsync_children) 892 + return 0; 893 + 894 + for_each_unsync_children(sp->unsync_child_bitmap, i) { 895 + u64 ent = sp->spt[i]; 896 + 897 + if (is_shadow_present_pte(ent)) { 898 + struct kvm_mmu_page *child; 899 + child = page_header(ent & PT64_BASE_ADDR_MASK); 900 + 901 + if (child->unsync_children) { 902 + ret = mmu_unsync_walk(child, walker); 903 + if (ret) 904 + return ret; 905 + __clear_bit(i, sp->unsync_child_bitmap); 906 + } 907 + 908 + if (child->unsync) { 909 + ret = walker->entry(child, walker); 910 + __clear_bit(i, sp->unsync_child_bitmap); 911 + if (ret) 912 + return ret; 913 + } 914 + } 915 + } 916 + 917 + if (find_first_bit(sp->unsync_child_bitmap, 512) == 512) 918 + sp->unsync_children = 0; 919 + 920 + return 0; 950 921 } 951 922 952 923 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) ··· 1021 888 return NULL; 1022 889 } 1023 890 891 + static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 892 + { 893 + WARN_ON(!sp->unsync); 894 + sp->unsync = 0; 895 + --kvm->stat.mmu_unsync; 896 + } 897 + 898 + static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); 899 + 900 + static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 901 + { 902 + if (sp->role.glevels != vcpu->arch.mmu.root_level) { 903 + kvm_mmu_zap_page(vcpu->kvm, sp); 904 + return 1; 905 + } 906 + 907 + rmap_write_protect(vcpu->kvm, sp->gfn); 908 + if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 909 + kvm_mmu_zap_page(vcpu->kvm, sp); 910 + return 1; 911 + } 912 + 913 + kvm_mmu_flush_tlb(vcpu); 914 + kvm_unlink_unsync_page(vcpu->kvm, sp); 915 + return 0; 916 + } 917 + 918 + struct sync_walker { 919 + struct kvm_vcpu *vcpu; 920 + struct kvm_unsync_walk walker; 921 + }; 922 + 923 + static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) 924 + { 925 + struct sync_walker *sync_walk = container_of(walk, struct sync_walker, 926 + walker); 927 + struct kvm_vcpu *vcpu = sync_walk->vcpu; 928 + 929 + kvm_sync_page(vcpu, sp); 930 + return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)); 931 + } 932 + 933 + static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 934 + { 935 + struct sync_walker walker = { 936 + .walker = { .entry = mmu_sync_fn, }, 937 + .vcpu = vcpu, 938 + }; 939 + 940 + while (mmu_unsync_walk(sp, &walker.walker)) 941 + cond_resched_lock(&vcpu->kvm->mmu_lock); 942 + } 943 + 1024 944 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, 1025 945 gfn_t gfn, 1026 946 gva_t gaddr, ··· 1087 901 unsigned quadrant; 1088 902 struct hlist_head *bucket; 1089 903 struct kvm_mmu_page *sp; 1090 - struct hlist_node *node; 904 + struct hlist_node *node, *tmp; 1091 905 1092 906 role.word = 0; 1093 907 role.glevels = vcpu->arch.mmu.root_level; ··· 1103 917 gfn, role.word); 1104 918 index = kvm_page_table_hashfn(gfn); 1105 919 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1106 - hlist_for_each_entry(sp, node, bucket, hash_link) 1107 - if (sp->gfn == gfn && sp->role.word == role.word) { 920 + hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) 921 + if (sp->gfn == gfn) { 922 + if (sp->unsync) 923 + if (kvm_sync_page(vcpu, sp)) 924 + continue; 925 + 926 + if (sp->role.word != role.word) 927 + continue; 928 + 1108 929 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 930 + if (sp->unsync_children) { 931 + set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 932 + kvm_mmu_mark_parents_unsync(vcpu, sp); 933 + } 1109 934 pgprintk("%s: found\n", __func__); 1110 935 return sp; 1111 936 } ··· 1128 931 sp->gfn = gfn; 1129 932 sp->role = role; 1130 933 hlist_add_head(&sp->hash_link, bucket); 1131 - if (!metaphysical) 934 + if (!metaphysical) { 1132 935 rmap_write_protect(vcpu->kvm, gfn); 936 + account_shadowed(vcpu->kvm, gfn); 937 + } 1133 938 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) 1134 939 vcpu->arch.mmu.prefetch_page(vcpu, sp); 1135 940 else 1136 941 nonpaging_prefetch_page(vcpu, sp); 1137 942 return sp; 943 + } 944 + 945 + static int walk_shadow(struct kvm_shadow_walk *walker, 946 + struct kvm_vcpu *vcpu, u64 addr) 947 + { 948 + hpa_t shadow_addr; 949 + int level; 950 + int r; 951 + u64 *sptep; 952 + unsigned index; 953 + 954 + shadow_addr = vcpu->arch.mmu.root_hpa; 955 + level = vcpu->arch.mmu.shadow_root_level; 956 + if (level == PT32E_ROOT_LEVEL) { 957 + shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 958 + shadow_addr &= PT64_BASE_ADDR_MASK; 959 + --level; 960 + } 961 + 962 + while (level >= PT_PAGE_TABLE_LEVEL) { 963 + index = SHADOW_PT_INDEX(addr, level); 964 + sptep = ((u64 *)__va(shadow_addr)) + index; 965 + r = walker->entry(walker, vcpu, addr, sptep, level); 966 + if (r) 967 + return r; 968 + shadow_addr = *sptep & PT64_BASE_ADDR_MASK; 969 + --level; 970 + } 971 + return 0; 1138 972 } 1139 973 1140 974 static void kvm_mmu_page_unlink_children(struct kvm *kvm, ··· 1183 955 rmap_remove(kvm, &pt[i]); 1184 956 pt[i] = shadow_trap_nonpresent_pte; 1185 957 } 1186 - kvm_flush_remote_tlbs(kvm); 1187 958 return; 1188 959 } 1189 960 ··· 1201 974 } 1202 975 pt[i] = shadow_trap_nonpresent_pte; 1203 976 } 1204 - kvm_flush_remote_tlbs(kvm); 1205 977 } 1206 978 1207 979 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) ··· 1217 991 kvm->vcpus[i]->arch.last_pte_updated = NULL; 1218 992 } 1219 993 1220 - static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 994 + static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 1221 995 { 1222 996 u64 *parent_pte; 1223 997 1224 - ++kvm->stat.mmu_shadow_zapped; 1225 998 while (sp->multimapped || sp->parent_pte) { 1226 999 if (!sp->multimapped) 1227 1000 parent_pte = sp->parent_pte; ··· 1235 1010 kvm_mmu_put_page(sp, parent_pte); 1236 1011 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); 1237 1012 } 1013 + } 1014 + 1015 + struct zap_walker { 1016 + struct kvm_unsync_walk walker; 1017 + struct kvm *kvm; 1018 + int zapped; 1019 + }; 1020 + 1021 + static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) 1022 + { 1023 + struct zap_walker *zap_walk = container_of(walk, struct zap_walker, 1024 + walker); 1025 + kvm_mmu_zap_page(zap_walk->kvm, sp); 1026 + zap_walk->zapped = 1; 1027 + return 0; 1028 + } 1029 + 1030 + static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp) 1031 + { 1032 + struct zap_walker walker = { 1033 + .walker = { .entry = mmu_zap_fn, }, 1034 + .kvm = kvm, 1035 + .zapped = 0, 1036 + }; 1037 + 1038 + if (sp->role.level == PT_PAGE_TABLE_LEVEL) 1039 + return 0; 1040 + mmu_unsync_walk(sp, &walker.walker); 1041 + return walker.zapped; 1042 + } 1043 + 1044 + static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1045 + { 1046 + int ret; 1047 + ++kvm->stat.mmu_shadow_zapped; 1048 + ret = mmu_zap_unsync_children(kvm, sp); 1238 1049 kvm_mmu_page_unlink_children(kvm, sp); 1050 + kvm_mmu_unlink_parents(kvm, sp); 1051 + kvm_flush_remote_tlbs(kvm); 1052 + if (!sp->role.invalid && !sp->role.metaphysical) 1053 + unaccount_shadowed(kvm, sp->gfn); 1054 + if (sp->unsync) 1055 + kvm_unlink_unsync_page(kvm, sp); 1239 1056 if (!sp->root_count) { 1240 - if (!sp->role.metaphysical && !sp->role.invalid) 1241 - unaccount_shadowed(kvm, sp->gfn); 1242 1057 hlist_del(&sp->hash_link); 1243 1058 kvm_mmu_free_page(kvm, sp); 1244 1059 } else { 1245 - int invalid = sp->role.invalid; 1246 - list_move(&sp->link, &kvm->arch.active_mmu_pages); 1247 1060 sp->role.invalid = 1; 1061 + list_move(&sp->link, &kvm->arch.active_mmu_pages); 1248 1062 kvm_reload_remote_mmus(kvm); 1249 - if (!sp->role.metaphysical && !invalid) 1250 - unaccount_shadowed(kvm, sp->gfn); 1251 1063 } 1252 1064 kvm_mmu_reset_last_pte_updated(kvm); 1065 + return ret; 1253 1066 } 1254 1067 1255 1068 /* ··· 1340 1077 if (sp->gfn == gfn && !sp->role.metaphysical) { 1341 1078 pgprintk("%s: gfn %lx role %x\n", __func__, gfn, 1342 1079 sp->role.word); 1343 - kvm_mmu_zap_page(kvm, sp); 1344 1080 r = 1; 1081 + if (kvm_mmu_zap_page(kvm, sp)) 1082 + n = bucket->first; 1345 1083 } 1346 1084 return r; 1347 1085 } ··· 1365 1101 __set_bit(slot, &sp->slot_bitmap); 1366 1102 } 1367 1103 1104 + static void mmu_convert_notrap(struct kvm_mmu_page *sp) 1105 + { 1106 + int i; 1107 + u64 *pt = sp->spt; 1108 + 1109 + if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte) 1110 + return; 1111 + 1112 + for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 1113 + if (pt[i] == shadow_notrap_nonpresent_pte) 1114 + set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte); 1115 + } 1116 + } 1117 + 1368 1118 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) 1369 1119 { 1370 1120 struct page *page; ··· 1388 1110 if (gpa == UNMAPPED_GVA) 1389 1111 return NULL; 1390 1112 1391 - down_read(&current->mm->mmap_sem); 1392 1113 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1393 - up_read(&current->mm->mmap_sem); 1394 1114 1395 1115 return page; 1116 + } 1117 + 1118 + static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1119 + { 1120 + unsigned index; 1121 + struct hlist_head *bucket; 1122 + struct kvm_mmu_page *s; 1123 + struct hlist_node *node, *n; 1124 + 1125 + index = kvm_page_table_hashfn(sp->gfn); 1126 + bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1127 + /* don't unsync if pagetable is shadowed with multiple roles */ 1128 + hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { 1129 + if (s->gfn != sp->gfn || s->role.metaphysical) 1130 + continue; 1131 + if (s->role.word != sp->role.word) 1132 + return 1; 1133 + } 1134 + kvm_mmu_mark_parents_unsync(vcpu, sp); 1135 + ++vcpu->kvm->stat.mmu_unsync; 1136 + sp->unsync = 1; 1137 + mmu_convert_notrap(sp); 1138 + return 0; 1139 + } 1140 + 1141 + static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 1142 + bool can_unsync) 1143 + { 1144 + struct kvm_mmu_page *shadow; 1145 + 1146 + shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); 1147 + if (shadow) { 1148 + if (shadow->role.level != PT_PAGE_TABLE_LEVEL) 1149 + return 1; 1150 + if (shadow->unsync) 1151 + return 0; 1152 + if (can_unsync && oos_shadow) 1153 + return kvm_unsync_page(vcpu, shadow); 1154 + return 1; 1155 + } 1156 + return 0; 1157 + } 1158 + 1159 + static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1160 + unsigned pte_access, int user_fault, 1161 + int write_fault, int dirty, int largepage, 1162 + gfn_t gfn, pfn_t pfn, bool speculative, 1163 + bool can_unsync) 1164 + { 1165 + u64 spte; 1166 + int ret = 0; 1167 + /* 1168 + * We don't set the accessed bit, since we sometimes want to see 1169 + * whether the guest actually used the pte (in order to detect 1170 + * demand paging). 1171 + */ 1172 + spte = shadow_base_present_pte | shadow_dirty_mask; 1173 + if (!speculative) 1174 + spte |= shadow_accessed_mask; 1175 + if (!dirty) 1176 + pte_access &= ~ACC_WRITE_MASK; 1177 + if (pte_access & ACC_EXEC_MASK) 1178 + spte |= shadow_x_mask; 1179 + else 1180 + spte |= shadow_nx_mask; 1181 + if (pte_access & ACC_USER_MASK) 1182 + spte |= shadow_user_mask; 1183 + if (largepage) 1184 + spte |= PT_PAGE_SIZE_MASK; 1185 + 1186 + spte |= (u64)pfn << PAGE_SHIFT; 1187 + 1188 + if ((pte_access & ACC_WRITE_MASK) 1189 + || (write_fault && !is_write_protection(vcpu) && !user_fault)) { 1190 + 1191 + if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) { 1192 + ret = 1; 1193 + spte = shadow_trap_nonpresent_pte; 1194 + goto set_pte; 1195 + } 1196 + 1197 + spte |= PT_WRITABLE_MASK; 1198 + 1199 + if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1200 + pgprintk("%s: found shadow page for %lx, marking ro\n", 1201 + __func__, gfn); 1202 + ret = 1; 1203 + pte_access &= ~ACC_WRITE_MASK; 1204 + if (is_writeble_pte(spte)) 1205 + spte &= ~PT_WRITABLE_MASK; 1206 + } 1207 + } 1208 + 1209 + if (pte_access & ACC_WRITE_MASK) 1210 + mark_page_dirty(vcpu->kvm, gfn); 1211 + 1212 + set_pte: 1213 + set_shadow_pte(shadow_pte, spte); 1214 + return ret; 1396 1215 } 1397 1216 1398 1217 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, ··· 1498 1123 int *ptwrite, int largepage, gfn_t gfn, 1499 1124 pfn_t pfn, bool speculative) 1500 1125 { 1501 - u64 spte; 1502 1126 int was_rmapped = 0; 1503 1127 int was_writeble = is_writeble_pte(*shadow_pte); 1504 1128 ··· 1528 1154 was_rmapped = 1; 1529 1155 } 1530 1156 } 1531 - 1532 - /* 1533 - * We don't set the accessed bit, since we sometimes want to see 1534 - * whether the guest actually used the pte (in order to detect 1535 - * demand paging). 1536 - */ 1537 - spte = shadow_base_present_pte | shadow_dirty_mask; 1538 - if (!speculative) 1539 - pte_access |= PT_ACCESSED_MASK; 1540 - if (!dirty) 1541 - pte_access &= ~ACC_WRITE_MASK; 1542 - if (pte_access & ACC_EXEC_MASK) 1543 - spte |= shadow_x_mask; 1544 - else 1545 - spte |= shadow_nx_mask; 1546 - if (pte_access & ACC_USER_MASK) 1547 - spte |= shadow_user_mask; 1548 - if (largepage) 1549 - spte |= PT_PAGE_SIZE_MASK; 1550 - 1551 - spte |= (u64)pfn << PAGE_SHIFT; 1552 - 1553 - if ((pte_access & ACC_WRITE_MASK) 1554 - || (write_fault && !is_write_protection(vcpu) && !user_fault)) { 1555 - struct kvm_mmu_page *shadow; 1556 - 1557 - spte |= PT_WRITABLE_MASK; 1558 - 1559 - shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn); 1560 - if (shadow || 1561 - (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { 1562 - pgprintk("%s: found shadow page for %lx, marking ro\n", 1563 - __func__, gfn); 1564 - pte_access &= ~ACC_WRITE_MASK; 1565 - if (is_writeble_pte(spte)) { 1566 - spte &= ~PT_WRITABLE_MASK; 1567 - kvm_x86_ops->tlb_flush(vcpu); 1568 - } 1569 - if (write_fault) 1570 - *ptwrite = 1; 1571 - } 1157 + if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1158 + dirty, largepage, gfn, pfn, speculative, true)) { 1159 + if (write_fault) 1160 + *ptwrite = 1; 1161 + kvm_x86_ops->tlb_flush(vcpu); 1572 1162 } 1573 1163 1574 - if (pte_access & ACC_WRITE_MASK) 1575 - mark_page_dirty(vcpu->kvm, gfn); 1576 - 1577 - pgprintk("%s: setting spte %llx\n", __func__, spte); 1164 + pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte); 1578 1165 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", 1579 - (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", 1580 - (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); 1581 - set_shadow_pte(shadow_pte, spte); 1582 - if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK) 1583 - && (spte & PT_PRESENT_MASK)) 1166 + is_large_pte(*shadow_pte)? "2MB" : "4kB", 1167 + is_present_pte(*shadow_pte)?"RW":"R", gfn, 1168 + *shadow_pte, shadow_pte); 1169 + if (!was_rmapped && is_large_pte(*shadow_pte)) 1584 1170 ++vcpu->kvm->stat.lpages; 1585 1171 1586 1172 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); ··· 1564 1230 { 1565 1231 } 1566 1232 1567 - static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 1568 - int largepage, gfn_t gfn, pfn_t pfn, 1569 - int level) 1233 + struct direct_shadow_walk { 1234 + struct kvm_shadow_walk walker; 1235 + pfn_t pfn; 1236 + int write; 1237 + int largepage; 1238 + int pt_write; 1239 + }; 1240 + 1241 + static int direct_map_entry(struct kvm_shadow_walk *_walk, 1242 + struct kvm_vcpu *vcpu, 1243 + u64 addr, u64 *sptep, int level) 1570 1244 { 1571 - hpa_t table_addr = vcpu->arch.mmu.root_hpa; 1572 - int pt_write = 0; 1245 + struct direct_shadow_walk *walk = 1246 + container_of(_walk, struct direct_shadow_walk, walker); 1247 + struct kvm_mmu_page *sp; 1248 + gfn_t pseudo_gfn; 1249 + gfn_t gfn = addr >> PAGE_SHIFT; 1573 1250 1574 - for (; ; level--) { 1575 - u32 index = PT64_INDEX(v, level); 1576 - u64 *table; 1577 - 1578 - ASSERT(VALID_PAGE(table_addr)); 1579 - table = __va(table_addr); 1580 - 1581 - if (level == 1) { 1582 - mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 1583 - 0, write, 1, &pt_write, 0, gfn, pfn, false); 1584 - return pt_write; 1585 - } 1586 - 1587 - if (largepage && level == 2) { 1588 - mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 1589 - 0, write, 1, &pt_write, 1, gfn, pfn, false); 1590 - return pt_write; 1591 - } 1592 - 1593 - if (table[index] == shadow_trap_nonpresent_pte) { 1594 - struct kvm_mmu_page *new_table; 1595 - gfn_t pseudo_gfn; 1596 - 1597 - pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK) 1598 - >> PAGE_SHIFT; 1599 - new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, 1600 - v, level - 1, 1601 - 1, ACC_ALL, &table[index]); 1602 - if (!new_table) { 1603 - pgprintk("nonpaging_map: ENOMEM\n"); 1604 - kvm_release_pfn_clean(pfn); 1605 - return -ENOMEM; 1606 - } 1607 - 1608 - set_shadow_pte(&table[index], 1609 - __pa(new_table->spt) 1610 - | PT_PRESENT_MASK | PT_WRITABLE_MASK 1611 - | shadow_user_mask | shadow_x_mask); 1612 - } 1613 - table_addr = table[index] & PT64_BASE_ADDR_MASK; 1251 + if (level == PT_PAGE_TABLE_LEVEL 1252 + || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { 1253 + mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, 1254 + 0, walk->write, 1, &walk->pt_write, 1255 + walk->largepage, gfn, walk->pfn, false); 1256 + ++vcpu->stat.pf_fixed; 1257 + return 1; 1614 1258 } 1259 + 1260 + if (*sptep == shadow_trap_nonpresent_pte) { 1261 + pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT; 1262 + sp = kvm_mmu_get_page(vcpu, pseudo_gfn, (gva_t)addr, level - 1, 1263 + 1, ACC_ALL, sptep); 1264 + if (!sp) { 1265 + pgprintk("nonpaging_map: ENOMEM\n"); 1266 + kvm_release_pfn_clean(walk->pfn); 1267 + return -ENOMEM; 1268 + } 1269 + 1270 + set_shadow_pte(sptep, 1271 + __pa(sp->spt) 1272 + | PT_PRESENT_MASK | PT_WRITABLE_MASK 1273 + | shadow_user_mask | shadow_x_mask); 1274 + } 1275 + return 0; 1276 + } 1277 + 1278 + static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 1279 + int largepage, gfn_t gfn, pfn_t pfn) 1280 + { 1281 + int r; 1282 + struct direct_shadow_walk walker = { 1283 + .walker = { .entry = direct_map_entry, }, 1284 + .pfn = pfn, 1285 + .largepage = largepage, 1286 + .write = write, 1287 + .pt_write = 0, 1288 + }; 1289 + 1290 + r = walk_shadow(&walker.walker, vcpu, gfn << PAGE_SHIFT); 1291 + if (r < 0) 1292 + return r; 1293 + return walker.pt_write; 1615 1294 } 1616 1295 1617 1296 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) ··· 1634 1287 pfn_t pfn; 1635 1288 unsigned long mmu_seq; 1636 1289 1637 - down_read(&current->mm->mmap_sem); 1638 1290 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1639 1291 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1640 1292 largepage = 1; 1641 1293 } 1642 1294 1643 1295 mmu_seq = vcpu->kvm->mmu_notifier_seq; 1644 - /* implicit mb(), we'll read before PT lock is unlocked */ 1296 + smp_rmb(); 1645 1297 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1646 - up_read(&current->mm->mmap_sem); 1647 1298 1648 1299 /* mmio */ 1649 1300 if (is_error_pfn(pfn)) { ··· 1653 1308 if (mmu_notifier_retry(vcpu, mmu_seq)) 1654 1309 goto out_unlock; 1655 1310 kvm_mmu_free_some_pages(vcpu); 1656 - r = __direct_map(vcpu, v, write, largepage, gfn, pfn, 1657 - PT32E_ROOT_LEVEL); 1311 + r = __direct_map(vcpu, v, write, largepage, gfn, pfn); 1658 1312 spin_unlock(&vcpu->kvm->mmu_lock); 1659 1313 1660 1314 ··· 1749 1405 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); 1750 1406 } 1751 1407 1408 + static void mmu_sync_roots(struct kvm_vcpu *vcpu) 1409 + { 1410 + int i; 1411 + struct kvm_mmu_page *sp; 1412 + 1413 + if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) 1414 + return; 1415 + if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { 1416 + hpa_t root = vcpu->arch.mmu.root_hpa; 1417 + sp = page_header(root); 1418 + mmu_sync_children(vcpu, sp); 1419 + return; 1420 + } 1421 + for (i = 0; i < 4; ++i) { 1422 + hpa_t root = vcpu->arch.mmu.pae_root[i]; 1423 + 1424 + if (root) { 1425 + root &= PT64_BASE_ADDR_MASK; 1426 + sp = page_header(root); 1427 + mmu_sync_children(vcpu, sp); 1428 + } 1429 + } 1430 + } 1431 + 1432 + void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 1433 + { 1434 + spin_lock(&vcpu->kvm->mmu_lock); 1435 + mmu_sync_roots(vcpu); 1436 + spin_unlock(&vcpu->kvm->mmu_lock); 1437 + } 1438 + 1752 1439 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 1753 1440 { 1754 1441 return vaddr; ··· 1821 1446 if (r) 1822 1447 return r; 1823 1448 1824 - down_read(&current->mm->mmap_sem); 1825 1449 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1826 1450 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1827 1451 largepage = 1; 1828 1452 } 1829 1453 mmu_seq = vcpu->kvm->mmu_notifier_seq; 1830 - /* implicit mb(), we'll read before PT lock is unlocked */ 1454 + smp_rmb(); 1831 1455 pfn = gfn_to_pfn(vcpu->kvm, gfn); 1832 - up_read(&current->mm->mmap_sem); 1833 1456 if (is_error_pfn(pfn)) { 1834 1457 kvm_release_pfn_clean(pfn); 1835 1458 return 1; ··· 1837 1464 goto out_unlock; 1838 1465 kvm_mmu_free_some_pages(vcpu); 1839 1466 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 1840 - largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); 1467 + largepage, gfn, pfn); 1841 1468 spin_unlock(&vcpu->kvm->mmu_lock); 1842 1469 1843 1470 return r; ··· 1862 1489 context->gva_to_gpa = nonpaging_gva_to_gpa; 1863 1490 context->free = nonpaging_free; 1864 1491 context->prefetch_page = nonpaging_prefetch_page; 1492 + context->sync_page = nonpaging_sync_page; 1493 + context->invlpg = nonpaging_invlpg; 1865 1494 context->root_level = 0; 1866 1495 context->shadow_root_level = PT32E_ROOT_LEVEL; 1867 1496 context->root_hpa = INVALID_PAGE; ··· 1911 1536 context->page_fault = paging64_page_fault; 1912 1537 context->gva_to_gpa = paging64_gva_to_gpa; 1913 1538 context->prefetch_page = paging64_prefetch_page; 1539 + context->sync_page = paging64_sync_page; 1540 + context->invlpg = paging64_invlpg; 1914 1541 context->free = paging_free; 1915 1542 context->root_level = level; 1916 1543 context->shadow_root_level = level; ··· 1934 1557 context->gva_to_gpa = paging32_gva_to_gpa; 1935 1558 context->free = paging_free; 1936 1559 context->prefetch_page = paging32_prefetch_page; 1560 + context->sync_page = paging32_sync_page; 1561 + context->invlpg = paging32_invlpg; 1937 1562 context->root_level = PT32_ROOT_LEVEL; 1938 1563 context->shadow_root_level = PT32E_ROOT_LEVEL; 1939 1564 context->root_hpa = INVALID_PAGE; ··· 1955 1576 context->page_fault = tdp_page_fault; 1956 1577 context->free = nonpaging_free; 1957 1578 context->prefetch_page = nonpaging_prefetch_page; 1579 + context->sync_page = nonpaging_sync_page; 1580 + context->invlpg = nonpaging_invlpg; 1958 1581 context->shadow_root_level = kvm_x86_ops->get_tdp_level(); 1959 1582 context->root_hpa = INVALID_PAGE; 1960 1583 ··· 2028 1647 spin_lock(&vcpu->kvm->mmu_lock); 2029 1648 kvm_mmu_free_some_pages(vcpu); 2030 1649 mmu_alloc_roots(vcpu); 1650 + mmu_sync_roots(vcpu); 2031 1651 spin_unlock(&vcpu->kvm->mmu_lock); 2032 1652 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 2033 1653 kvm_mmu_flush_tlb(vcpu); ··· 2149 1767 return; 2150 1768 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 2151 1769 2152 - down_read(&current->mm->mmap_sem); 2153 1770 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { 2154 1771 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 2155 1772 vcpu->arch.update_pte.largepage = 1; 2156 1773 } 2157 1774 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; 2158 - /* implicit mb(), we'll read before PT lock is unlocked */ 1775 + smp_rmb(); 2159 1776 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2160 - up_read(&current->mm->mmap_sem); 2161 1777 2162 1778 if (is_error_pfn(pfn)) { 2163 1779 kvm_release_pfn_clean(pfn); ··· 2217 1837 index = kvm_page_table_hashfn(gfn); 2218 1838 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 2219 1839 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 2220 - if (sp->gfn != gfn || sp->role.metaphysical) 1840 + if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) 2221 1841 continue; 2222 1842 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 2223 1843 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); ··· 2235 1855 */ 2236 1856 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 2237 1857 gpa, bytes, sp->role.word); 2238 - kvm_mmu_zap_page(vcpu->kvm, sp); 1858 + if (kvm_mmu_zap_page(vcpu->kvm, sp)) 1859 + n = bucket->first; 2239 1860 ++vcpu->kvm->stat.mmu_flooded; 2240 1861 continue; 2241 1862 } ··· 2350 1969 } 2351 1970 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 2352 1971 1972 + void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 1973 + { 1974 + spin_lock(&vcpu->kvm->mmu_lock); 1975 + vcpu->arch.mmu.invlpg(vcpu, gva); 1976 + spin_unlock(&vcpu->kvm->mmu_lock); 1977 + kvm_mmu_flush_tlb(vcpu); 1978 + ++vcpu->stat.invlpg; 1979 + } 1980 + EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); 1981 + 2353 1982 void kvm_enable_tdp(void) 2354 1983 { 2355 1984 tdp_enabled = true; ··· 2446 2055 { 2447 2056 struct kvm_mmu_page *sp; 2448 2057 2058 + spin_lock(&kvm->mmu_lock); 2449 2059 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { 2450 2060 int i; 2451 2061 u64 *pt; ··· 2460 2068 if (pt[i] & PT_WRITABLE_MASK) 2461 2069 pt[i] &= ~PT_WRITABLE_MASK; 2462 2070 } 2071 + kvm_flush_remote_tlbs(kvm); 2072 + spin_unlock(&kvm->mmu_lock); 2463 2073 } 2464 2074 2465 2075 void kvm_mmu_zap_all(struct kvm *kvm) ··· 2470 2076 2471 2077 spin_lock(&kvm->mmu_lock); 2472 2078 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) 2473 - kvm_mmu_zap_page(kvm, sp); 2079 + if (kvm_mmu_zap_page(kvm, sp)) 2080 + node = container_of(kvm->arch.active_mmu_pages.next, 2081 + struct kvm_mmu_page, link); 2474 2082 spin_unlock(&kvm->mmu_lock); 2475 2083 2476 2084 kvm_flush_remote_tlbs(kvm); ··· 2687 2291 gpa_t addr, unsigned long *ret) 2688 2292 { 2689 2293 int r; 2690 - struct kvm_pv_mmu_op_buffer buffer; 2294 + struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer; 2691 2295 2692 - buffer.ptr = buffer.buf; 2693 - buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf); 2694 - buffer.processed = 0; 2296 + buffer->ptr = buffer->buf; 2297 + buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf); 2298 + buffer->processed = 0; 2695 2299 2696 - r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len); 2300 + r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len); 2697 2301 if (r) 2698 2302 goto out; 2699 2303 2700 - while (buffer.len) { 2701 - r = kvm_pv_mmu_op_one(vcpu, &buffer); 2304 + while (buffer->len) { 2305 + r = kvm_pv_mmu_op_one(vcpu, buffer); 2702 2306 if (r < 0) 2703 2307 goto out; 2704 2308 if (r == 0) ··· 2707 2311 2708 2312 r = 1; 2709 2313 out: 2710 - *ret = buffer.processed; 2314 + *ret = buffer->processed; 2711 2315 return r; 2712 2316 } 2713 2317
+169 -82
arch/x86/kvm/paging_tmpl.h
··· 25 25 #if PTTYPE == 64 26 26 #define pt_element_t u64 27 27 #define guest_walker guest_walker64 28 + #define shadow_walker shadow_walker64 28 29 #define FNAME(name) paging##64_##name 29 30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 30 31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK 31 32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 - #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 33 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 34 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS 35 35 #ifdef CONFIG_X86_64 ··· 42 42 #elif PTTYPE == 32 43 43 #define pt_element_t u32 44 44 #define guest_walker guest_walker32 45 + #define shadow_walker shadow_walker32 45 46 #define FNAME(name) paging##32_##name 46 47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 47 48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK 48 49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) 49 - #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 50 50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 51 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS 52 52 #define PT_MAX_FULL_LEVELS 2 ··· 73 73 u32 error_code; 74 74 }; 75 75 76 + struct shadow_walker { 77 + struct kvm_shadow_walk walker; 78 + struct guest_walker *guest_walker; 79 + int user_fault; 80 + int write_fault; 81 + int largepage; 82 + int *ptwrite; 83 + pfn_t pfn; 84 + u64 *sptep; 85 + }; 86 + 76 87 static gfn_t gpte_to_gfn(pt_element_t gpte) 77 88 { 78 89 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; ··· 102 91 pt_element_t *table; 103 92 struct page *page; 104 93 105 - down_read(&current->mm->mmap_sem); 106 94 page = gfn_to_page(kvm, table_gfn); 107 - up_read(&current->mm->mmap_sem); 108 95 109 96 table = kmap_atomic(page, KM_USER0); 110 - 111 97 ret = CMPXCHG(&table[index], orig_pte, new_pte); 112 - 113 98 kunmap_atomic(table, KM_USER0); 114 99 115 100 kvm_release_page_dirty(page); ··· 281 274 /* 282 275 * Fetch a shadow pte for a specific level in the paging hierarchy. 283 276 */ 277 + static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, 278 + struct kvm_vcpu *vcpu, u64 addr, 279 + u64 *sptep, int level) 280 + { 281 + struct shadow_walker *sw = 282 + container_of(_sw, struct shadow_walker, walker); 283 + struct guest_walker *gw = sw->guest_walker; 284 + unsigned access = gw->pt_access; 285 + struct kvm_mmu_page *shadow_page; 286 + u64 spte; 287 + int metaphysical; 288 + gfn_t table_gfn; 289 + int r; 290 + pt_element_t curr_pte; 291 + 292 + if (level == PT_PAGE_TABLE_LEVEL 293 + || (sw->largepage && level == PT_DIRECTORY_LEVEL)) { 294 + mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, 295 + sw->user_fault, sw->write_fault, 296 + gw->ptes[gw->level-1] & PT_DIRTY_MASK, 297 + sw->ptwrite, sw->largepage, gw->gfn, sw->pfn, 298 + false); 299 + sw->sptep = sptep; 300 + return 1; 301 + } 302 + 303 + if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) 304 + return 0; 305 + 306 + if (is_large_pte(*sptep)) { 307 + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 308 + kvm_flush_remote_tlbs(vcpu->kvm); 309 + rmap_remove(vcpu->kvm, sptep); 310 + } 311 + 312 + if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { 313 + metaphysical = 1; 314 + if (!is_dirty_pte(gw->ptes[level - 1])) 315 + access &= ~ACC_WRITE_MASK; 316 + table_gfn = gpte_to_gfn(gw->ptes[level - 1]); 317 + } else { 318 + metaphysical = 0; 319 + table_gfn = gw->table_gfn[level - 2]; 320 + } 321 + shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1, 322 + metaphysical, access, sptep); 323 + if (!metaphysical) { 324 + r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], 325 + &curr_pte, sizeof(curr_pte)); 326 + if (r || curr_pte != gw->ptes[level - 2]) { 327 + kvm_release_pfn_clean(sw->pfn); 328 + sw->sptep = NULL; 329 + return 1; 330 + } 331 + } 332 + 333 + spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK 334 + | PT_WRITABLE_MASK | PT_USER_MASK; 335 + *sptep = spte; 336 + return 0; 337 + } 338 + 284 339 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 285 - struct guest_walker *walker, 340 + struct guest_walker *guest_walker, 286 341 int user_fault, int write_fault, int largepage, 287 342 int *ptwrite, pfn_t pfn) 288 343 { 289 - hpa_t shadow_addr; 290 - int level; 291 - u64 *shadow_ent; 292 - unsigned access = walker->pt_access; 344 + struct shadow_walker walker = { 345 + .walker = { .entry = FNAME(shadow_walk_entry), }, 346 + .guest_walker = guest_walker, 347 + .user_fault = user_fault, 348 + .write_fault = write_fault, 349 + .largepage = largepage, 350 + .ptwrite = ptwrite, 351 + .pfn = pfn, 352 + }; 293 353 294 - if (!is_present_pte(walker->ptes[walker->level - 1])) 354 + if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1])) 295 355 return NULL; 296 356 297 - shadow_addr = vcpu->arch.mmu.root_hpa; 298 - level = vcpu->arch.mmu.shadow_root_level; 299 - if (level == PT32E_ROOT_LEVEL) { 300 - shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 301 - shadow_addr &= PT64_BASE_ADDR_MASK; 302 - --level; 303 - } 357 + walk_shadow(&walker.walker, vcpu, addr); 304 358 305 - for (; ; level--) { 306 - u32 index = SHADOW_PT_INDEX(addr, level); 307 - struct kvm_mmu_page *shadow_page; 308 - u64 shadow_pte; 309 - int metaphysical; 310 - gfn_t table_gfn; 311 - 312 - shadow_ent = ((u64 *)__va(shadow_addr)) + index; 313 - if (level == PT_PAGE_TABLE_LEVEL) 314 - break; 315 - 316 - if (largepage && level == PT_DIRECTORY_LEVEL) 317 - break; 318 - 319 - if (is_shadow_present_pte(*shadow_ent) 320 - && !is_large_pte(*shadow_ent)) { 321 - shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; 322 - continue; 323 - } 324 - 325 - if (is_large_pte(*shadow_ent)) 326 - rmap_remove(vcpu->kvm, shadow_ent); 327 - 328 - if (level - 1 == PT_PAGE_TABLE_LEVEL 329 - && walker->level == PT_DIRECTORY_LEVEL) { 330 - metaphysical = 1; 331 - if (!is_dirty_pte(walker->ptes[level - 1])) 332 - access &= ~ACC_WRITE_MASK; 333 - table_gfn = gpte_to_gfn(walker->ptes[level - 1]); 334 - } else { 335 - metaphysical = 0; 336 - table_gfn = walker->table_gfn[level - 2]; 337 - } 338 - shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 339 - metaphysical, access, 340 - shadow_ent); 341 - if (!metaphysical) { 342 - int r; 343 - pt_element_t curr_pte; 344 - r = kvm_read_guest_atomic(vcpu->kvm, 345 - walker->pte_gpa[level - 2], 346 - &curr_pte, sizeof(curr_pte)); 347 - if (r || curr_pte != walker->ptes[level - 2]) { 348 - kvm_release_pfn_clean(pfn); 349 - return NULL; 350 - } 351 - } 352 - shadow_addr = __pa(shadow_page->spt); 353 - shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 354 - | PT_WRITABLE_MASK | PT_USER_MASK; 355 - set_shadow_pte(shadow_ent, shadow_pte); 356 - } 357 - 358 - mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 359 - user_fault, write_fault, 360 - walker->ptes[walker->level-1] & PT_DIRTY_MASK, 361 - ptwrite, largepage, walker->gfn, pfn, false); 362 - 363 - return shadow_ent; 359 + return walker.sptep; 364 360 } 365 361 366 362 /* ··· 417 407 return 0; 418 408 } 419 409 420 - down_read(&current->mm->mmap_sem); 421 410 if (walker.level == PT_DIRECTORY_LEVEL) { 422 411 gfn_t large_gfn; 423 412 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1); ··· 426 417 } 427 418 } 428 419 mmu_seq = vcpu->kvm->mmu_notifier_seq; 429 - /* implicit mb(), we'll read before PT lock is unlocked */ 420 + smp_rmb(); 430 421 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); 431 - up_read(&current->mm->mmap_sem); 432 422 433 423 /* mmio */ 434 424 if (is_error_pfn(pfn)) { ··· 459 451 spin_unlock(&vcpu->kvm->mmu_lock); 460 452 kvm_release_pfn_clean(pfn); 461 453 return 0; 454 + } 455 + 456 + static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, 457 + struct kvm_vcpu *vcpu, u64 addr, 458 + u64 *sptep, int level) 459 + { 460 + 461 + if (level == PT_PAGE_TABLE_LEVEL) { 462 + if (is_shadow_present_pte(*sptep)) 463 + rmap_remove(vcpu->kvm, sptep); 464 + set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 465 + return 1; 466 + } 467 + if (!is_shadow_present_pte(*sptep)) 468 + return 1; 469 + return 0; 470 + } 471 + 472 + static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 473 + { 474 + struct shadow_walker walker = { 475 + .walker = { .entry = FNAME(shadow_invlpg_entry), }, 476 + }; 477 + 478 + walk_shadow(&walker.walker, vcpu, gva); 462 479 } 463 480 464 481 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) ··· 532 499 } 533 500 } 534 501 502 + /* 503 + * Using the cached information from sp->gfns is safe because: 504 + * - The spte has a reference to the struct page, so the pfn for a given gfn 505 + * can't change unless all sptes pointing to it are nuked first. 506 + * - Alias changes zap the entire shadow cache. 507 + */ 508 + static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 509 + { 510 + int i, offset, nr_present; 511 + 512 + offset = nr_present = 0; 513 + 514 + if (PTTYPE == 32) 515 + offset = sp->role.quadrant << PT64_LEVEL_BITS; 516 + 517 + for (i = 0; i < PT64_ENT_PER_PAGE; i++) { 518 + unsigned pte_access; 519 + pt_element_t gpte; 520 + gpa_t pte_gpa; 521 + gfn_t gfn = sp->gfns[i]; 522 + 523 + if (!is_shadow_present_pte(sp->spt[i])) 524 + continue; 525 + 526 + pte_gpa = gfn_to_gpa(sp->gfn); 527 + pte_gpa += (i+offset) * sizeof(pt_element_t); 528 + 529 + if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, 530 + sizeof(pt_element_t))) 531 + return -EINVAL; 532 + 533 + if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || 534 + !(gpte & PT_ACCESSED_MASK)) { 535 + u64 nonpresent; 536 + 537 + rmap_remove(vcpu->kvm, &sp->spt[i]); 538 + if (is_present_pte(gpte)) 539 + nonpresent = shadow_trap_nonpresent_pte; 540 + else 541 + nonpresent = shadow_notrap_nonpresent_pte; 542 + set_shadow_pte(&sp->spt[i], nonpresent); 543 + continue; 544 + } 545 + 546 + nr_present++; 547 + pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 548 + set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 549 + is_dirty_pte(gpte), 0, gfn, 550 + spte_to_pfn(sp->spt[i]), true, false); 551 + } 552 + 553 + return !nr_present; 554 + } 555 + 535 556 #undef pt_element_t 536 557 #undef guest_walker 558 + #undef shadow_walker 537 559 #undef FNAME 538 560 #undef PT_BASE_ADDR_MASK 539 561 #undef PT_INDEX 540 - #undef SHADOW_PT_INDEX 541 562 #undef PT_LEVEL_MASK 542 563 #undef PT_DIR_BASE_ADDR_MASK 543 564 #undef PT_LEVEL_BITS
+63 -93
arch/x86/kvm/svm.c
··· 18 18 #include "kvm_svm.h" 19 19 #include "irq.h" 20 20 #include "mmu.h" 21 + #include "kvm_cache_regs.h" 21 22 22 23 #include <linux/module.h> 23 24 #include <linux/kernel.h> ··· 36 35 #define IOPM_ALLOC_ORDER 2 37 36 #define MSRPM_ALLOC_ORDER 1 38 37 39 - #define DB_VECTOR 1 40 - #define UD_VECTOR 6 41 - #define GP_VECTOR 13 42 - 43 38 #define DR7_GD_MASK (1 << 13) 44 39 #define DR6_BD_MASK (1 << 13) 45 40 ··· 44 47 45 48 #define SVM_FEATURE_NPT (1 << 0) 46 49 #define SVM_FEATURE_LBRV (1 << 1) 47 - #define SVM_DEATURE_SVML (1 << 2) 50 + #define SVM_FEATURE_SVML (1 << 2) 48 51 49 52 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 50 53 ··· 233 236 printk(KERN_DEBUG "%s: NOP\n", __func__); 234 237 return; 235 238 } 236 - if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) 237 - printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", 238 - __func__, 239 - svm->vmcb->save.rip, 240 - svm->next_rip); 239 + if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) 240 + printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n", 241 + __func__, kvm_rip_read(vcpu), svm->next_rip); 241 242 242 - vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip; 243 + kvm_rip_write(vcpu, svm->next_rip); 243 244 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 244 245 245 246 vcpu->arch.interrupt_window_open = 1; ··· 525 530 (1ULL << INTERCEPT_CPUID) | 526 531 (1ULL << INTERCEPT_INVD) | 527 532 (1ULL << INTERCEPT_HLT) | 533 + (1ULL << INTERCEPT_INVLPG) | 528 534 (1ULL << INTERCEPT_INVLPGA) | 529 535 (1ULL << INTERCEPT_IOIO_PROT) | 530 536 (1ULL << INTERCEPT_MSR_PROT) | ··· 577 581 save->dr7 = 0x400; 578 582 save->rflags = 2; 579 583 save->rip = 0x0000fff0; 584 + svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; 580 585 581 586 /* 582 587 * cr0 val on cpu init should be 0x60000010, we enable cpu ··· 590 593 if (npt_enabled) { 591 594 /* Setup VMCB for Nested Paging */ 592 595 control->nested_ctl = 1; 593 - control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH); 596 + control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | 597 + (1ULL << INTERCEPT_INVLPG)); 594 598 control->intercept_exceptions &= ~(1 << PF_VECTOR); 595 599 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| 596 600 INTERCEPT_CR3_MASK); ··· 613 615 init_vmcb(svm); 614 616 615 617 if (vcpu->vcpu_id != 0) { 616 - svm->vmcb->save.rip = 0; 618 + kvm_rip_write(vcpu, 0); 617 619 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; 618 620 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; 619 621 } 622 + vcpu->arch.regs_avail = ~0; 623 + vcpu->arch.regs_dirty = ~0; 620 624 621 625 return 0; 622 626 } ··· 719 719 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 720 720 721 721 rdtscll(vcpu->arch.host_tsc); 722 - } 723 - 724 - static void svm_cache_regs(struct kvm_vcpu *vcpu) 725 - { 726 - struct vcpu_svm *svm = to_svm(vcpu); 727 - 728 - vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 729 - vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 730 - vcpu->arch.rip = svm->vmcb->save.rip; 731 - } 732 - 733 - static void svm_decache_regs(struct kvm_vcpu *vcpu) 734 - { 735 - struct vcpu_svm *svm = to_svm(vcpu); 736 - svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 737 - svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 738 - svm->vmcb->save.rip = vcpu->arch.rip; 739 722 } 740 723 741 724 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) ··· 1023 1040 if (npt_enabled) 1024 1041 svm_flush_tlb(&svm->vcpu); 1025 1042 1026 - if (event_injection) 1043 + if (!npt_enabled && event_injection) 1027 1044 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); 1028 1045 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1029 1046 } ··· 1122 1139 1123 1140 static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1124 1141 { 1125 - svm->next_rip = svm->vmcb->save.rip + 1; 1142 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; 1126 1143 skip_emulated_instruction(&svm->vcpu); 1127 1144 return kvm_emulate_halt(&svm->vcpu); 1128 1145 } 1129 1146 1130 1147 static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1131 1148 { 1132 - svm->next_rip = svm->vmcb->save.rip + 3; 1149 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1133 1150 skip_emulated_instruction(&svm->vcpu); 1134 1151 kvm_emulate_hypercall(&svm->vcpu); 1135 1152 return 1; ··· 1161 1178 1162 1179 static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1163 1180 { 1164 - svm->next_rip = svm->vmcb->save.rip + 2; 1181 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 1165 1182 kvm_emulate_cpuid(&svm->vcpu); 1183 + return 1; 1184 + } 1185 + 1186 + static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1187 + { 1188 + if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) 1189 + pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); 1166 1190 return 1; 1167 1191 } 1168 1192 ··· 1263 1273 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, 1264 1274 (u32)(data >> 32), handler); 1265 1275 1266 - svm->vmcb->save.rax = data & 0xffffffff; 1276 + svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; 1267 1277 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; 1268 - svm->next_rip = svm->vmcb->save.rip + 2; 1278 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 1269 1279 skip_emulated_instruction(&svm->vcpu); 1270 1280 } 1271 1281 return 1; ··· 1349 1359 static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1350 1360 { 1351 1361 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 1352 - u64 data = (svm->vmcb->save.rax & -1u) 1362 + u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) 1353 1363 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 1354 1364 1355 1365 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), 1356 1366 handler); 1357 1367 1358 - svm->next_rip = svm->vmcb->save.rip + 2; 1368 + svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 1359 1369 if (svm_set_msr(&svm->vcpu, ecx, data)) 1360 1370 kvm_inject_gp(&svm->vcpu, 0); 1361 1371 else ··· 1426 1436 [SVM_EXIT_CPUID] = cpuid_interception, 1427 1437 [SVM_EXIT_INVD] = emulate_on_interception, 1428 1438 [SVM_EXIT_HLT] = halt_interception, 1429 - [SVM_EXIT_INVLPG] = emulate_on_interception, 1439 + [SVM_EXIT_INVLPG] = invlpg_interception, 1430 1440 [SVM_EXIT_INVLPGA] = invalid_op_interception, 1431 1441 [SVM_EXIT_IOIO] = io_interception, 1432 1442 [SVM_EXIT_MSR] = msr_interception, ··· 1528 1538 1529 1539 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler); 1530 1540 1541 + ++svm->vcpu.stat.irq_injections; 1531 1542 control = &svm->vmcb->control; 1532 1543 control->int_vector = irq; 1533 1544 control->int_ctl &= ~V_INTR_PRIO_MASK; ··· 1707 1716 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; 1708 1717 } 1709 1718 1719 + #ifdef CONFIG_X86_64 1720 + #define R "r" 1721 + #else 1722 + #define R "e" 1723 + #endif 1724 + 1710 1725 static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1711 1726 { 1712 1727 struct vcpu_svm *svm = to_svm(vcpu); 1713 1728 u16 fs_selector; 1714 1729 u16 gs_selector; 1715 1730 u16 ldt_selector; 1731 + 1732 + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 1733 + svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 1734 + svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 1716 1735 1717 1736 pre_svm_run(svm); 1718 1737 ··· 1751 1750 local_irq_enable(); 1752 1751 1753 1752 asm volatile ( 1753 + "push %%"R"bp; \n\t" 1754 + "mov %c[rbx](%[svm]), %%"R"bx \n\t" 1755 + "mov %c[rcx](%[svm]), %%"R"cx \n\t" 1756 + "mov %c[rdx](%[svm]), %%"R"dx \n\t" 1757 + "mov %c[rsi](%[svm]), %%"R"si \n\t" 1758 + "mov %c[rdi](%[svm]), %%"R"di \n\t" 1759 + "mov %c[rbp](%[svm]), %%"R"bp \n\t" 1754 1760 #ifdef CONFIG_X86_64 1755 - "push %%rbp; \n\t" 1756 - #else 1757 - "push %%ebp; \n\t" 1758 - #endif 1759 - 1760 - #ifdef CONFIG_X86_64 1761 - "mov %c[rbx](%[svm]), %%rbx \n\t" 1762 - "mov %c[rcx](%[svm]), %%rcx \n\t" 1763 - "mov %c[rdx](%[svm]), %%rdx \n\t" 1764 - "mov %c[rsi](%[svm]), %%rsi \n\t" 1765 - "mov %c[rdi](%[svm]), %%rdi \n\t" 1766 - "mov %c[rbp](%[svm]), %%rbp \n\t" 1767 1761 "mov %c[r8](%[svm]), %%r8 \n\t" 1768 1762 "mov %c[r9](%[svm]), %%r9 \n\t" 1769 1763 "mov %c[r10](%[svm]), %%r10 \n\t" ··· 1767 1771 "mov %c[r13](%[svm]), %%r13 \n\t" 1768 1772 "mov %c[r14](%[svm]), %%r14 \n\t" 1769 1773 "mov %c[r15](%[svm]), %%r15 \n\t" 1770 - #else 1771 - "mov %c[rbx](%[svm]), %%ebx \n\t" 1772 - "mov %c[rcx](%[svm]), %%ecx \n\t" 1773 - "mov %c[rdx](%[svm]), %%edx \n\t" 1774 - "mov %c[rsi](%[svm]), %%esi \n\t" 1775 - "mov %c[rdi](%[svm]), %%edi \n\t" 1776 - "mov %c[rbp](%[svm]), %%ebp \n\t" 1777 1774 #endif 1778 1775 1779 - #ifdef CONFIG_X86_64 1780 1776 /* Enter guest mode */ 1781 - "push %%rax \n\t" 1782 - "mov %c[vmcb](%[svm]), %%rax \n\t" 1777 + "push %%"R"ax \n\t" 1778 + "mov %c[vmcb](%[svm]), %%"R"ax \n\t" 1783 1779 __ex(SVM_VMLOAD) "\n\t" 1784 1780 __ex(SVM_VMRUN) "\n\t" 1785 1781 __ex(SVM_VMSAVE) "\n\t" 1786 - "pop %%rax \n\t" 1787 - #else 1788 - /* Enter guest mode */ 1789 - "push %%eax \n\t" 1790 - "mov %c[vmcb](%[svm]), %%eax \n\t" 1791 - __ex(SVM_VMLOAD) "\n\t" 1792 - __ex(SVM_VMRUN) "\n\t" 1793 - __ex(SVM_VMSAVE) "\n\t" 1794 - "pop %%eax \n\t" 1795 - #endif 1782 + "pop %%"R"ax \n\t" 1796 1783 1797 1784 /* Save guest registers, load host registers */ 1785 + "mov %%"R"bx, %c[rbx](%[svm]) \n\t" 1786 + "mov %%"R"cx, %c[rcx](%[svm]) \n\t" 1787 + "mov %%"R"dx, %c[rdx](%[svm]) \n\t" 1788 + "mov %%"R"si, %c[rsi](%[svm]) \n\t" 1789 + "mov %%"R"di, %c[rdi](%[svm]) \n\t" 1790 + "mov %%"R"bp, %c[rbp](%[svm]) \n\t" 1798 1791 #ifdef CONFIG_X86_64 1799 - "mov %%rbx, %c[rbx](%[svm]) \n\t" 1800 - "mov %%rcx, %c[rcx](%[svm]) \n\t" 1801 - "mov %%rdx, %c[rdx](%[svm]) \n\t" 1802 - "mov %%rsi, %c[rsi](%[svm]) \n\t" 1803 - "mov %%rdi, %c[rdi](%[svm]) \n\t" 1804 - "mov %%rbp, %c[rbp](%[svm]) \n\t" 1805 1792 "mov %%r8, %c[r8](%[svm]) \n\t" 1806 1793 "mov %%r9, %c[r9](%[svm]) \n\t" 1807 1794 "mov %%r10, %c[r10](%[svm]) \n\t" ··· 1793 1814 "mov %%r13, %c[r13](%[svm]) \n\t" 1794 1815 "mov %%r14, %c[r14](%[svm]) \n\t" 1795 1816 "mov %%r15, %c[r15](%[svm]) \n\t" 1796 - 1797 - "pop %%rbp; \n\t" 1798 - #else 1799 - "mov %%ebx, %c[rbx](%[svm]) \n\t" 1800 - "mov %%ecx, %c[rcx](%[svm]) \n\t" 1801 - "mov %%edx, %c[rdx](%[svm]) \n\t" 1802 - "mov %%esi, %c[rsi](%[svm]) \n\t" 1803 - "mov %%edi, %c[rdi](%[svm]) \n\t" 1804 - "mov %%ebp, %c[rbp](%[svm]) \n\t" 1805 - 1806 - "pop %%ebp; \n\t" 1807 1817 #endif 1818 + "pop %%"R"bp" 1808 1819 : 1809 1820 : [svm]"a"(svm), 1810 1821 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), ··· 1815 1846 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) 1816 1847 #endif 1817 1848 : "cc", "memory" 1849 + , R"bx", R"cx", R"dx", R"si", R"di" 1818 1850 #ifdef CONFIG_X86_64 1819 - , "rbx", "rcx", "rdx", "rsi", "rdi" 1820 1851 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15" 1821 - #else 1822 - , "ebx", "ecx", "edx" , "esi", "edi" 1823 1852 #endif 1824 1853 ); 1825 1854 ··· 1825 1858 load_db_regs(svm->host_db_regs); 1826 1859 1827 1860 vcpu->arch.cr2 = svm->vmcb->save.cr2; 1861 + vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 1862 + vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 1863 + vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 1828 1864 1829 1865 write_dr6(svm->host_dr6); 1830 1866 write_dr7(svm->host_dr7); ··· 1848 1878 1849 1879 svm->next_rip = 0; 1850 1880 } 1881 + 1882 + #undef R 1851 1883 1852 1884 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1853 1885 { ··· 1949 1977 .set_gdt = svm_set_gdt, 1950 1978 .get_dr = svm_get_dr, 1951 1979 .set_dr = svm_set_dr, 1952 - .cache_regs = svm_cache_regs, 1953 - .decache_regs = svm_decache_regs, 1954 1980 .get_rflags = svm_get_rflags, 1955 1981 .set_rflags = svm_set_rflags, 1956 1982
+491 -227
arch/x86/kvm/vmx.c
··· 26 26 #include <linux/highmem.h> 27 27 #include <linux/sched.h> 28 28 #include <linux/moduleparam.h> 29 + #include "kvm_cache_regs.h" 30 + #include "x86.h" 29 31 30 32 #include <asm/io.h> 31 33 #include <asm/desc.h> ··· 49 47 static int enable_ept = 1; 50 48 module_param(enable_ept, bool, 0); 51 49 50 + static int emulate_invalid_guest_state = 0; 51 + module_param(emulate_invalid_guest_state, bool, 0); 52 + 52 53 struct vmcs { 53 54 u32 revision_id; 54 55 u32 abort; ··· 61 56 struct vcpu_vmx { 62 57 struct kvm_vcpu vcpu; 63 58 struct list_head local_vcpus_link; 59 + unsigned long host_rsp; 64 60 int launched; 65 61 u8 fail; 66 62 u32 idt_vectoring_info; ··· 89 83 } irq; 90 84 } rmode; 91 85 int vpid; 86 + bool emulation_required; 92 87 }; 93 88 94 89 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) ··· 475 468 if (!vcpu->fpu_active) 476 469 eb |= 1u << NM_VECTOR; 477 470 if (vcpu->guest_debug.enabled) 478 - eb |= 1u << 1; 471 + eb |= 1u << DB_VECTOR; 479 472 if (vcpu->arch.rmode.active) 480 473 eb = ~0; 481 474 if (vm_need_ept()) ··· 722 715 unsigned long rip; 723 716 u32 interruptibility; 724 717 725 - rip = vmcs_readl(GUEST_RIP); 718 + rip = kvm_rip_read(vcpu); 726 719 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); 727 - vmcs_writel(GUEST_RIP, rip); 720 + kvm_rip_write(vcpu, rip); 728 721 729 722 /* 730 723 * We emulated an instruction, so temporary interrupt blocking ··· 740 733 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 741 734 bool has_error_code, u32 error_code) 742 735 { 736 + struct vcpu_vmx *vmx = to_vmx(vcpu); 737 + 738 + if (has_error_code) 739 + vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 740 + 741 + if (vcpu->arch.rmode.active) { 742 + vmx->rmode.irq.pending = true; 743 + vmx->rmode.irq.vector = nr; 744 + vmx->rmode.irq.rip = kvm_rip_read(vcpu); 745 + if (nr == BP_VECTOR) 746 + vmx->rmode.irq.rip++; 747 + vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 748 + nr | INTR_TYPE_SOFT_INTR 749 + | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) 750 + | INTR_INFO_VALID_MASK); 751 + vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 752 + kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); 753 + return; 754 + } 755 + 743 756 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 744 757 nr | INTR_TYPE_EXCEPTION 745 758 | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0) 746 759 | INTR_INFO_VALID_MASK); 747 - if (has_error_code) 748 - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 749 760 } 750 761 751 762 static bool vmx_exception_injected(struct kvm_vcpu *vcpu) 752 763 { 753 - struct vcpu_vmx *vmx = to_vmx(vcpu); 754 - 755 - return !(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); 764 + return false; 756 765 } 757 766 758 767 /* ··· 970 947 return ret; 971 948 } 972 949 973 - /* 974 - * Sync the rsp and rip registers into the vcpu structure. This allows 975 - * registers to be accessed by indexing vcpu->arch.regs. 976 - */ 977 - static void vcpu_load_rsp_rip(struct kvm_vcpu *vcpu) 950 + static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) 978 951 { 979 - vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 980 - vcpu->arch.rip = vmcs_readl(GUEST_RIP); 981 - } 982 - 983 - /* 984 - * Syncs rsp and rip back into the vmcs. Should be called after possible 985 - * modification. 986 - */ 987 - static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu) 988 - { 989 - vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 990 - vmcs_writel(GUEST_RIP, vcpu->arch.rip); 952 + __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); 953 + switch (reg) { 954 + case VCPU_REGS_RSP: 955 + vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); 956 + break; 957 + case VCPU_REGS_RIP: 958 + vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 959 + break; 960 + default: 961 + break; 962 + } 991 963 } 992 964 993 965 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) ··· 1025 1007 1026 1008 static int vmx_get_irq(struct kvm_vcpu *vcpu) 1027 1009 { 1028 - struct vcpu_vmx *vmx = to_vmx(vcpu); 1029 - u32 idtv_info_field; 1030 - 1031 - idtv_info_field = vmx->idt_vectoring_info; 1032 - if (idtv_info_field & INTR_INFO_VALID_MASK) { 1033 - if (is_external_interrupt(idtv_info_field)) 1034 - return idtv_info_field & VECTORING_INFO_VECTOR_MASK; 1035 - else 1036 - printk(KERN_DEBUG "pending exception: not handled yet\n"); 1037 - } 1038 - return -1; 1010 + if (!vcpu->arch.interrupt.pending) 1011 + return -1; 1012 + return vcpu->arch.interrupt.nr; 1039 1013 } 1040 1014 1041 1015 static __init int cpu_has_kvm_support(void) ··· 1041 1031 u64 msr; 1042 1032 1043 1033 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); 1044 - return (msr & (MSR_IA32_FEATURE_CONTROL_LOCKED | 1045 - MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) 1046 - == MSR_IA32_FEATURE_CONTROL_LOCKED; 1034 + return (msr & (FEATURE_CONTROL_LOCKED | 1035 + FEATURE_CONTROL_VMXON_ENABLED)) 1036 + == FEATURE_CONTROL_LOCKED; 1047 1037 /* locked but not enabled */ 1048 1038 } 1049 1039 ··· 1055 1045 1056 1046 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); 1057 1047 rdmsrl(MSR_IA32_FEATURE_CONTROL, old); 1058 - if ((old & (MSR_IA32_FEATURE_CONTROL_LOCKED | 1059 - MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) 1060 - != (MSR_IA32_FEATURE_CONTROL_LOCKED | 1061 - MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED)) 1048 + if ((old & (FEATURE_CONTROL_LOCKED | 1049 + FEATURE_CONTROL_VMXON_ENABLED)) 1050 + != (FEATURE_CONTROL_LOCKED | 1051 + FEATURE_CONTROL_VMXON_ENABLED)) 1062 1052 /* enable and lock */ 1063 1053 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | 1064 - MSR_IA32_FEATURE_CONTROL_LOCKED | 1065 - MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED); 1054 + FEATURE_CONTROL_LOCKED | 1055 + FEATURE_CONTROL_VMXON_ENABLED); 1066 1056 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ 1067 1057 asm volatile (ASM_VMX_VMXON_RAX 1068 1058 : : "a"(&phys_addr), "m"(phys_addr) ··· 1130 1120 CPU_BASED_CR3_STORE_EXITING | 1131 1121 CPU_BASED_USE_IO_BITMAPS | 1132 1122 CPU_BASED_MOV_DR_EXITING | 1133 - CPU_BASED_USE_TSC_OFFSETING; 1123 + CPU_BASED_USE_TSC_OFFSETING | 1124 + CPU_BASED_INVLPG_EXITING; 1134 1125 opt = CPU_BASED_TPR_SHADOW | 1135 1126 CPU_BASED_USE_MSR_BITMAPS | 1136 1127 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; ··· 1160 1149 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; 1161 1150 #endif 1162 1151 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 1163 - /* CR3 accesses don't need to cause VM Exits when EPT enabled */ 1152 + /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 1153 + enabled */ 1164 1154 min &= ~(CPU_BASED_CR3_LOAD_EXITING | 1165 - CPU_BASED_CR3_STORE_EXITING); 1155 + CPU_BASED_CR3_STORE_EXITING | 1156 + CPU_BASED_INVLPG_EXITING); 1166 1157 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, 1167 1158 &_cpu_based_exec_control) < 0) 1168 1159 return -EIO; ··· 1301 1288 static void enter_pmode(struct kvm_vcpu *vcpu) 1302 1289 { 1303 1290 unsigned long flags; 1291 + struct vcpu_vmx *vmx = to_vmx(vcpu); 1304 1292 1293 + vmx->emulation_required = 1; 1305 1294 vcpu->arch.rmode.active = 0; 1306 1295 1307 1296 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); ··· 1319 1304 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); 1320 1305 1321 1306 update_exception_bitmap(vcpu); 1307 + 1308 + if (emulate_invalid_guest_state) 1309 + return; 1322 1310 1323 1311 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); 1324 1312 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); ··· 1363 1345 static void enter_rmode(struct kvm_vcpu *vcpu) 1364 1346 { 1365 1347 unsigned long flags; 1348 + struct vcpu_vmx *vmx = to_vmx(vcpu); 1366 1349 1350 + vmx->emulation_required = 1; 1367 1351 vcpu->arch.rmode.active = 1; 1368 1352 1369 1353 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); ··· 1387 1367 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); 1388 1368 update_exception_bitmap(vcpu); 1389 1369 1370 + if (emulate_invalid_guest_state) 1371 + goto continue_rmode; 1372 + 1390 1373 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); 1391 1374 vmcs_write32(GUEST_SS_LIMIT, 0xffff); 1392 1375 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); ··· 1405 1382 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); 1406 1383 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); 1407 1384 1385 + continue_rmode: 1408 1386 kvm_mmu_reset_context(vcpu); 1409 1387 init_rmode(vcpu->kvm); 1410 1388 } ··· 1739 1715 vmcs_writel(GUEST_GDTR_BASE, dt->base); 1740 1716 } 1741 1717 1718 + static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) 1719 + { 1720 + struct kvm_segment var; 1721 + u32 ar; 1722 + 1723 + vmx_get_segment(vcpu, &var, seg); 1724 + ar = vmx_segment_access_rights(&var); 1725 + 1726 + if (var.base != (var.selector << 4)) 1727 + return false; 1728 + if (var.limit != 0xffff) 1729 + return false; 1730 + if (ar != 0xf3) 1731 + return false; 1732 + 1733 + return true; 1734 + } 1735 + 1736 + static bool code_segment_valid(struct kvm_vcpu *vcpu) 1737 + { 1738 + struct kvm_segment cs; 1739 + unsigned int cs_rpl; 1740 + 1741 + vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 1742 + cs_rpl = cs.selector & SELECTOR_RPL_MASK; 1743 + 1744 + if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) 1745 + return false; 1746 + if (!cs.s) 1747 + return false; 1748 + if (!(~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK))) { 1749 + if (cs.dpl > cs_rpl) 1750 + return false; 1751 + } else if (cs.type & AR_TYPE_CODE_MASK) { 1752 + if (cs.dpl != cs_rpl) 1753 + return false; 1754 + } 1755 + if (!cs.present) 1756 + return false; 1757 + 1758 + /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ 1759 + return true; 1760 + } 1761 + 1762 + static bool stack_segment_valid(struct kvm_vcpu *vcpu) 1763 + { 1764 + struct kvm_segment ss; 1765 + unsigned int ss_rpl; 1766 + 1767 + vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 1768 + ss_rpl = ss.selector & SELECTOR_RPL_MASK; 1769 + 1770 + if ((ss.type != 3) || (ss.type != 7)) 1771 + return false; 1772 + if (!ss.s) 1773 + return false; 1774 + if (ss.dpl != ss_rpl) /* DPL != RPL */ 1775 + return false; 1776 + if (!ss.present) 1777 + return false; 1778 + 1779 + return true; 1780 + } 1781 + 1782 + static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) 1783 + { 1784 + struct kvm_segment var; 1785 + unsigned int rpl; 1786 + 1787 + vmx_get_segment(vcpu, &var, seg); 1788 + rpl = var.selector & SELECTOR_RPL_MASK; 1789 + 1790 + if (!var.s) 1791 + return false; 1792 + if (!var.present) 1793 + return false; 1794 + if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { 1795 + if (var.dpl < rpl) /* DPL < RPL */ 1796 + return false; 1797 + } 1798 + 1799 + /* TODO: Add other members to kvm_segment_field to allow checking for other access 1800 + * rights flags 1801 + */ 1802 + return true; 1803 + } 1804 + 1805 + static bool tr_valid(struct kvm_vcpu *vcpu) 1806 + { 1807 + struct kvm_segment tr; 1808 + 1809 + vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); 1810 + 1811 + if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ 1812 + return false; 1813 + if ((tr.type != 3) || (tr.type != 11)) /* TODO: Check if guest is in IA32e mode */ 1814 + return false; 1815 + if (!tr.present) 1816 + return false; 1817 + 1818 + return true; 1819 + } 1820 + 1821 + static bool ldtr_valid(struct kvm_vcpu *vcpu) 1822 + { 1823 + struct kvm_segment ldtr; 1824 + 1825 + vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); 1826 + 1827 + if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ 1828 + return false; 1829 + if (ldtr.type != 2) 1830 + return false; 1831 + if (!ldtr.present) 1832 + return false; 1833 + 1834 + return true; 1835 + } 1836 + 1837 + static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) 1838 + { 1839 + struct kvm_segment cs, ss; 1840 + 1841 + vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); 1842 + vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); 1843 + 1844 + return ((cs.selector & SELECTOR_RPL_MASK) == 1845 + (ss.selector & SELECTOR_RPL_MASK)); 1846 + } 1847 + 1848 + /* 1849 + * Check if guest state is valid. Returns true if valid, false if 1850 + * not. 1851 + * We assume that registers are always usable 1852 + */ 1853 + static bool guest_state_valid(struct kvm_vcpu *vcpu) 1854 + { 1855 + /* real mode guest state checks */ 1856 + if (!(vcpu->arch.cr0 & X86_CR0_PE)) { 1857 + if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) 1858 + return false; 1859 + if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) 1860 + return false; 1861 + if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) 1862 + return false; 1863 + if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) 1864 + return false; 1865 + if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) 1866 + return false; 1867 + if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) 1868 + return false; 1869 + } else { 1870 + /* protected mode guest state checks */ 1871 + if (!cs_ss_rpl_check(vcpu)) 1872 + return false; 1873 + if (!code_segment_valid(vcpu)) 1874 + return false; 1875 + if (!stack_segment_valid(vcpu)) 1876 + return false; 1877 + if (!data_segment_valid(vcpu, VCPU_SREG_DS)) 1878 + return false; 1879 + if (!data_segment_valid(vcpu, VCPU_SREG_ES)) 1880 + return false; 1881 + if (!data_segment_valid(vcpu, VCPU_SREG_FS)) 1882 + return false; 1883 + if (!data_segment_valid(vcpu, VCPU_SREG_GS)) 1884 + return false; 1885 + if (!tr_valid(vcpu)) 1886 + return false; 1887 + if (!ldtr_valid(vcpu)) 1888 + return false; 1889 + } 1890 + /* TODO: 1891 + * - Add checks on RIP 1892 + * - Add checks on RFLAGS 1893 + */ 1894 + 1895 + return true; 1896 + } 1897 + 1742 1898 static int init_rmode_tss(struct kvm *kvm) 1743 1899 { 1744 1900 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; ··· 1930 1726 if (r < 0) 1931 1727 goto out; 1932 1728 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 1933 - r = kvm_write_guest_page(kvm, fn++, &data, 0x66, sizeof(u16)); 1729 + r = kvm_write_guest_page(kvm, fn++, &data, 1730 + TSS_IOPB_BASE_OFFSET, sizeof(u16)); 1934 1731 if (r < 0) 1935 1732 goto out; 1936 1733 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); ··· 1994 1789 vmcs_write16(sf->selector, 0); 1995 1790 vmcs_writel(sf->base, 0); 1996 1791 vmcs_write32(sf->limit, 0xffff); 1997 - vmcs_write32(sf->ar_bytes, 0x93); 1792 + vmcs_write32(sf->ar_bytes, 0xf3); 1998 1793 } 1999 1794 2000 1795 static int alloc_apic_access_page(struct kvm *kvm) ··· 2013 1808 if (r) 2014 1809 goto out; 2015 1810 2016 - down_read(&current->mm->mmap_sem); 2017 1811 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); 2018 - up_read(&current->mm->mmap_sem); 2019 1812 out: 2020 1813 up_write(&kvm->slots_lock); 2021 1814 return r; ··· 2035 1832 if (r) 2036 1833 goto out; 2037 1834 2038 - down_read(&current->mm->mmap_sem); 2039 1835 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, 2040 1836 VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); 2041 - up_read(&current->mm->mmap_sem); 2042 1837 out: 2043 1838 up_write(&kvm->slots_lock); 2044 1839 return r; ··· 2118 1917 } 2119 1918 if (!vm_need_ept()) 2120 1919 exec_control |= CPU_BASED_CR3_STORE_EXITING | 2121 - CPU_BASED_CR3_LOAD_EXITING; 1920 + CPU_BASED_CR3_LOAD_EXITING | 1921 + CPU_BASED_INVLPG_EXITING; 2122 1922 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); 2123 1923 2124 1924 if (cpu_has_secondary_exec_ctrls()) { ··· 2221 2019 u64 msr; 2222 2020 int ret; 2223 2021 2022 + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 2224 2023 down_read(&vcpu->kvm->slots_lock); 2225 2024 if (!init_rmode(vmx->vcpu.kvm)) { 2226 2025 ret = -ENOMEM; ··· 2239 2036 2240 2037 fx_init(&vmx->vcpu); 2241 2038 2039 + seg_setup(VCPU_SREG_CS); 2242 2040 /* 2243 2041 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode 2244 2042 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. ··· 2251 2047 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); 2252 2048 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); 2253 2049 } 2254 - vmcs_write32(GUEST_CS_LIMIT, 0xffff); 2255 - vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); 2256 2050 2257 2051 seg_setup(VCPU_SREG_DS); 2258 2052 seg_setup(VCPU_SREG_ES); ··· 2274 2072 2275 2073 vmcs_writel(GUEST_RFLAGS, 0x02); 2276 2074 if (vmx->vcpu.vcpu_id == 0) 2277 - vmcs_writel(GUEST_RIP, 0xfff0); 2075 + kvm_rip_write(vcpu, 0xfff0); 2278 2076 else 2279 - vmcs_writel(GUEST_RIP, 0); 2280 - vmcs_writel(GUEST_RSP, 0); 2077 + kvm_rip_write(vcpu, 0); 2078 + kvm_register_write(vcpu, VCPU_REGS_RSP, 0); 2281 2079 2282 2080 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ 2283 2081 vmcs_writel(GUEST_DR7, 0x400); ··· 2327 2125 2328 2126 ret = 0; 2329 2127 2128 + /* HACK: Don't enable emulation on guest boot/reset */ 2129 + vmx->emulation_required = 0; 2130 + 2330 2131 out: 2331 2132 up_read(&vcpu->kvm->slots_lock); 2332 2133 return ret; ··· 2341 2136 2342 2137 KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); 2343 2138 2139 + ++vcpu->stat.irq_injections; 2344 2140 if (vcpu->arch.rmode.active) { 2345 2141 vmx->rmode.irq.pending = true; 2346 2142 vmx->rmode.irq.vector = irq; 2347 - vmx->rmode.irq.rip = vmcs_readl(GUEST_RIP); 2143 + vmx->rmode.irq.rip = kvm_rip_read(vcpu); 2348 2144 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2349 2145 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); 2350 2146 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 2351 - vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip - 1); 2147 + kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); 2352 2148 return; 2353 2149 } 2354 2150 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, ··· 2360 2154 { 2361 2155 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2362 2156 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); 2363 - vcpu->arch.nmi_pending = 0; 2364 2157 } 2365 2158 2366 2159 static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) ··· 2371 2166 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); 2372 2167 if (!vcpu->arch.irq_pending[word_index]) 2373 2168 clear_bit(word_index, &vcpu->arch.irq_summary); 2374 - vmx_inject_irq(vcpu, irq); 2169 + kvm_queue_interrupt(vcpu, irq); 2375 2170 } 2376 2171 2377 2172 ··· 2385 2180 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); 2386 2181 2387 2182 if (vcpu->arch.interrupt_window_open && 2388 - vcpu->arch.irq_summary && 2389 - !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK)) 2390 - /* 2391 - * If interrupts enabled, and not blocked by sti or mov ss. Good. 2392 - */ 2183 + vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) 2393 2184 kvm_do_inject_irq(vcpu); 2185 + 2186 + if (vcpu->arch.interrupt_window_open && vcpu->arch.interrupt.pending) 2187 + vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); 2394 2188 2395 2189 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); 2396 2190 if (!vcpu->arch.interrupt_window_open && ··· 2441 2237 static int handle_rmode_exception(struct kvm_vcpu *vcpu, 2442 2238 int vec, u32 err_code) 2443 2239 { 2444 - if (!vcpu->arch.rmode.active) 2445 - return 0; 2446 - 2447 2240 /* 2448 2241 * Instruction with address size override prefix opcode 0x67 2449 2242 * Cause the #SS fault with 0 error code in VM86 mode. ··· 2448 2247 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) 2449 2248 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE) 2450 2249 return 1; 2250 + /* 2251 + * Forward all other exceptions that are valid in real mode. 2252 + * FIXME: Breaks guest debugging in real mode, needs to be fixed with 2253 + * the required debugging infrastructure rework. 2254 + */ 2255 + switch (vec) { 2256 + case DE_VECTOR: 2257 + case DB_VECTOR: 2258 + case BP_VECTOR: 2259 + case OF_VECTOR: 2260 + case BR_VECTOR: 2261 + case UD_VECTOR: 2262 + case DF_VECTOR: 2263 + case SS_VECTOR: 2264 + case GP_VECTOR: 2265 + case MF_VECTOR: 2266 + kvm_queue_exception(vcpu, vec); 2267 + return 1; 2268 + } 2451 2269 return 0; 2452 2270 } 2453 2271 ··· 2508 2288 } 2509 2289 2510 2290 error_code = 0; 2511 - rip = vmcs_readl(GUEST_RIP); 2291 + rip = kvm_rip_read(vcpu); 2512 2292 if (intr_info & INTR_INFO_DELIVER_CODE_MASK) 2513 2293 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); 2514 2294 if (is_page_fault(intr_info)) { ··· 2518 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2519 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2520 2300 (u32)((u64)cr2 >> 32), handler); 2521 - if (vect_info & VECTORING_INFO_VALID_MASK) 2301 + if (vcpu->arch.interrupt.pending || vcpu->arch.exception.pending) 2522 2302 kvm_mmu_unprotect_page_virt(vcpu, cr2); 2523 2303 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2524 2304 } ··· 2606 2386 reg = (exit_qualification >> 8) & 15; 2607 2387 switch ((exit_qualification >> 4) & 3) { 2608 2388 case 0: /* mov to cr */ 2609 - KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg], 2610 - (u32)((u64)vcpu->arch.regs[reg] >> 32), handler); 2389 + KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, 2390 + (u32)kvm_register_read(vcpu, reg), 2391 + (u32)((u64)kvm_register_read(vcpu, reg) >> 32), 2392 + handler); 2611 2393 switch (cr) { 2612 2394 case 0: 2613 - vcpu_load_rsp_rip(vcpu); 2614 - kvm_set_cr0(vcpu, vcpu->arch.regs[reg]); 2395 + kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg)); 2615 2396 skip_emulated_instruction(vcpu); 2616 2397 return 1; 2617 2398 case 3: 2618 - vcpu_load_rsp_rip(vcpu); 2619 - kvm_set_cr3(vcpu, vcpu->arch.regs[reg]); 2399 + kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg)); 2620 2400 skip_emulated_instruction(vcpu); 2621 2401 return 1; 2622 2402 case 4: 2623 - vcpu_load_rsp_rip(vcpu); 2624 - kvm_set_cr4(vcpu, vcpu->arch.regs[reg]); 2403 + kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg)); 2625 2404 skip_emulated_instruction(vcpu); 2626 2405 return 1; 2627 2406 case 8: 2628 - vcpu_load_rsp_rip(vcpu); 2629 - kvm_set_cr8(vcpu, vcpu->arch.regs[reg]); 2407 + kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg)); 2630 2408 skip_emulated_instruction(vcpu); 2631 2409 if (irqchip_in_kernel(vcpu->kvm)) 2632 2410 return 1; ··· 2633 2415 }; 2634 2416 break; 2635 2417 case 2: /* clts */ 2636 - vcpu_load_rsp_rip(vcpu); 2637 2418 vmx_fpu_deactivate(vcpu); 2638 2419 vcpu->arch.cr0 &= ~X86_CR0_TS; 2639 2420 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); ··· 2643 2426 case 1: /*mov from cr*/ 2644 2427 switch (cr) { 2645 2428 case 3: 2646 - vcpu_load_rsp_rip(vcpu); 2647 - vcpu->arch.regs[reg] = vcpu->arch.cr3; 2648 - vcpu_put_rsp_rip(vcpu); 2429 + kvm_register_write(vcpu, reg, vcpu->arch.cr3); 2649 2430 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, 2650 - (u32)vcpu->arch.regs[reg], 2651 - (u32)((u64)vcpu->arch.regs[reg] >> 32), 2431 + (u32)kvm_register_read(vcpu, reg), 2432 + (u32)((u64)kvm_register_read(vcpu, reg) >> 32), 2652 2433 handler); 2653 2434 skip_emulated_instruction(vcpu); 2654 2435 return 1; 2655 2436 case 8: 2656 - vcpu_load_rsp_rip(vcpu); 2657 - vcpu->arch.regs[reg] = kvm_get_cr8(vcpu); 2658 - vcpu_put_rsp_rip(vcpu); 2437 + kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu)); 2659 2438 KVMTRACE_2D(CR_READ, vcpu, (u32)cr, 2660 - (u32)vcpu->arch.regs[reg], handler); 2439 + (u32)kvm_register_read(vcpu, reg), handler); 2661 2440 skip_emulated_instruction(vcpu); 2662 2441 return 1; 2663 2442 } ··· 2685 2472 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 2686 2473 dr = exit_qualification & 7; 2687 2474 reg = (exit_qualification >> 8) & 15; 2688 - vcpu_load_rsp_rip(vcpu); 2689 2475 if (exit_qualification & 16) { 2690 2476 /* mov from dr */ 2691 2477 switch (dr) { ··· 2697 2485 default: 2698 2486 val = 0; 2699 2487 } 2700 - vcpu->arch.regs[reg] = val; 2488 + kvm_register_write(vcpu, reg, val); 2701 2489 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 2702 2490 } else { 2703 2491 /* mov to dr */ 2704 2492 } 2705 - vcpu_put_rsp_rip(vcpu); 2706 2493 skip_emulated_instruction(vcpu); 2707 2494 return 1; 2708 2495 } ··· 2791 2580 { 2792 2581 skip_emulated_instruction(vcpu); 2793 2582 kvm_emulate_hypercall(vcpu); 2583 + return 1; 2584 + } 2585 + 2586 + static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2587 + { 2588 + u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 2589 + 2590 + kvm_mmu_invlpg(vcpu, exit_qualification); 2591 + skip_emulated_instruction(vcpu); 2794 2592 return 1; 2795 2593 } 2796 2594 ··· 2915 2695 return 1; 2916 2696 } 2917 2697 2698 + static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, 2699 + struct kvm_run *kvm_run) 2700 + { 2701 + struct vcpu_vmx *vmx = to_vmx(vcpu); 2702 + int err; 2703 + 2704 + preempt_enable(); 2705 + local_irq_enable(); 2706 + 2707 + while (!guest_state_valid(vcpu)) { 2708 + err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); 2709 + 2710 + switch (err) { 2711 + case EMULATE_DONE: 2712 + break; 2713 + case EMULATE_DO_MMIO: 2714 + kvm_report_emulation_failure(vcpu, "mmio"); 2715 + /* TODO: Handle MMIO */ 2716 + return; 2717 + default: 2718 + kvm_report_emulation_failure(vcpu, "emulation failure"); 2719 + return; 2720 + } 2721 + 2722 + if (signal_pending(current)) 2723 + break; 2724 + if (need_resched()) 2725 + schedule(); 2726 + } 2727 + 2728 + local_irq_disable(); 2729 + preempt_disable(); 2730 + 2731 + /* Guest state should be valid now, no more emulation should be needed */ 2732 + vmx->emulation_required = 0; 2733 + } 2734 + 2918 2735 /* 2919 2736 * The exit handlers return 1 if the exit was handled fully and guest execution 2920 2737 * may resume. Otherwise they set the kvm_run parameter to indicate what needs ··· 2971 2714 [EXIT_REASON_MSR_WRITE] = handle_wrmsr, 2972 2715 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, 2973 2716 [EXIT_REASON_HLT] = handle_halt, 2717 + [EXIT_REASON_INVLPG] = handle_invlpg, 2974 2718 [EXIT_REASON_VMCALL] = handle_vmcall, 2975 2719 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, 2976 2720 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, ··· 2993 2735 struct vcpu_vmx *vmx = to_vmx(vcpu); 2994 2736 u32 vectoring_info = vmx->idt_vectoring_info; 2995 2737 2996 - KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), 2997 - (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); 2738 + KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), 2739 + (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); 2998 2740 2999 2741 /* Access CR3 don't cause VMExit in paging mode, so we need 3000 2742 * to sync with guest real CR3. */ ··· 3087 2829 enable_irq_window(vcpu); 3088 2830 } 3089 2831 3090 - static void vmx_intr_assist(struct kvm_vcpu *vcpu) 2832 + static void vmx_complete_interrupts(struct vcpu_vmx *vmx) 3091 2833 { 3092 - struct vcpu_vmx *vmx = to_vmx(vcpu); 3093 - u32 idtv_info_field, intr_info_field, exit_intr_info_field; 3094 - int vector; 2834 + u32 exit_intr_info; 2835 + u32 idt_vectoring_info; 2836 + bool unblock_nmi; 2837 + u8 vector; 2838 + int type; 2839 + bool idtv_info_valid; 2840 + u32 error; 3095 2841 3096 - update_tpr_threshold(vcpu); 3097 - 3098 - intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD); 3099 - exit_intr_info_field = vmcs_read32(VM_EXIT_INTR_INFO); 3100 - idtv_info_field = vmx->idt_vectoring_info; 3101 - if (intr_info_field & INTR_INFO_VALID_MASK) { 3102 - if (idtv_info_field & INTR_INFO_VALID_MASK) { 3103 - /* TODO: fault when IDT_Vectoring */ 3104 - if (printk_ratelimit()) 3105 - printk(KERN_ERR "Fault when IDT_Vectoring\n"); 3106 - } 3107 - enable_intr_window(vcpu); 3108 - return; 3109 - } 3110 - if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) { 3111 - if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) 3112 - == INTR_TYPE_EXT_INTR 3113 - && vcpu->arch.rmode.active) { 3114 - u8 vect = idtv_info_field & VECTORING_INFO_VECTOR_MASK; 3115 - 3116 - vmx_inject_irq(vcpu, vect); 3117 - enable_intr_window(vcpu); 3118 - return; 3119 - } 3120 - 3121 - KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); 3122 - 3123 - /* 3124 - * SDM 3: 25.7.1.2 3125 - * Clear bit "block by NMI" before VM entry if a NMI delivery 3126 - * faulted. 3127 - */ 3128 - if ((idtv_info_field & VECTORING_INFO_TYPE_MASK) 3129 - == INTR_TYPE_NMI_INTR && cpu_has_virtual_nmis()) 3130 - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 3131 - vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3132 - ~GUEST_INTR_STATE_NMI); 3133 - 3134 - vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field 3135 - & ~INTR_INFO_RESVD_BITS_MASK); 3136 - vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 3137 - vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); 3138 - 3139 - if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK)) 3140 - vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, 3141 - vmcs_read32(IDT_VECTORING_ERROR_CODE)); 3142 - enable_intr_window(vcpu); 3143 - return; 3144 - } 2842 + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 3145 2843 if (cpu_has_virtual_nmis()) { 2844 + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; 2845 + vector = exit_intr_info & INTR_INFO_VECTOR_MASK; 3146 2846 /* 3147 2847 * SDM 3: 25.7.1.2 3148 2848 * Re-set bit "block by NMI" before VM entry if vmexit caused by 3149 2849 * a guest IRET fault. 3150 2850 */ 3151 - if ((exit_intr_info_field & INTR_INFO_UNBLOCK_NMI) && 3152 - (exit_intr_info_field & INTR_INFO_VECTOR_MASK) != 8) 3153 - vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 3154 - vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) | 3155 - GUEST_INTR_STATE_NMI); 3156 - else if (vcpu->arch.nmi_pending) { 3157 - if (vmx_nmi_enabled(vcpu)) 3158 - vmx_inject_nmi(vcpu); 2851 + if (unblock_nmi && vector != DF_VECTOR) 2852 + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 2853 + GUEST_INTR_STATE_NMI); 2854 + } 2855 + 2856 + idt_vectoring_info = vmx->idt_vectoring_info; 2857 + idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 2858 + vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; 2859 + type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; 2860 + if (vmx->vcpu.arch.nmi_injected) { 2861 + /* 2862 + * SDM 3: 25.7.1.2 2863 + * Clear bit "block by NMI" before VM entry if a NMI delivery 2864 + * faulted. 2865 + */ 2866 + if (idtv_info_valid && type == INTR_TYPE_NMI_INTR) 2867 + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, 2868 + GUEST_INTR_STATE_NMI); 2869 + else 2870 + vmx->vcpu.arch.nmi_injected = false; 2871 + } 2872 + kvm_clear_exception_queue(&vmx->vcpu); 2873 + if (idtv_info_valid && type == INTR_TYPE_EXCEPTION) { 2874 + if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { 2875 + error = vmcs_read32(IDT_VECTORING_ERROR_CODE); 2876 + kvm_queue_exception_e(&vmx->vcpu, vector, error); 2877 + } else 2878 + kvm_queue_exception(&vmx->vcpu, vector); 2879 + vmx->idt_vectoring_info = 0; 2880 + } 2881 + kvm_clear_interrupt_queue(&vmx->vcpu); 2882 + if (idtv_info_valid && type == INTR_TYPE_EXT_INTR) { 2883 + kvm_queue_interrupt(&vmx->vcpu, vector); 2884 + vmx->idt_vectoring_info = 0; 2885 + } 2886 + } 2887 + 2888 + static void vmx_intr_assist(struct kvm_vcpu *vcpu) 2889 + { 2890 + update_tpr_threshold(vcpu); 2891 + 2892 + if (cpu_has_virtual_nmis()) { 2893 + if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { 2894 + if (vmx_nmi_enabled(vcpu)) { 2895 + vcpu->arch.nmi_pending = false; 2896 + vcpu->arch.nmi_injected = true; 2897 + } else { 2898 + enable_intr_window(vcpu); 2899 + return; 2900 + } 2901 + } 2902 + if (vcpu->arch.nmi_injected) { 2903 + vmx_inject_nmi(vcpu); 3159 2904 enable_intr_window(vcpu); 3160 2905 return; 3161 2906 } 3162 - 3163 2907 } 3164 - if (!kvm_cpu_has_interrupt(vcpu)) 3165 - return; 3166 - if (vmx_irq_enabled(vcpu)) { 3167 - vector = kvm_cpu_get_interrupt(vcpu); 3168 - vmx_inject_irq(vcpu, vector); 3169 - kvm_timer_intr_post(vcpu, vector); 3170 - } else 3171 - enable_irq_window(vcpu); 2908 + if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { 2909 + if (vmx_irq_enabled(vcpu)) 2910 + kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); 2911 + else 2912 + enable_irq_window(vcpu); 2913 + } 2914 + if (vcpu->arch.interrupt.pending) { 2915 + vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); 2916 + kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr); 2917 + } 3172 2918 } 3173 2919 3174 2920 /* ··· 3184 2922 static void fixup_rmode_irq(struct vcpu_vmx *vmx) 3185 2923 { 3186 2924 vmx->rmode.irq.pending = 0; 3187 - if (vmcs_readl(GUEST_RIP) + 1 != vmx->rmode.irq.rip) 2925 + if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip) 3188 2926 return; 3189 - vmcs_writel(GUEST_RIP, vmx->rmode.irq.rip); 2927 + kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip); 3190 2928 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) { 3191 2929 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK; 3192 2930 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR; ··· 3198 2936 | vmx->rmode.irq.vector; 3199 2937 } 3200 2938 2939 + #ifdef CONFIG_X86_64 2940 + #define R "r" 2941 + #define Q "q" 2942 + #else 2943 + #define R "e" 2944 + #define Q "l" 2945 + #endif 2946 + 3201 2947 static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3202 2948 { 3203 2949 struct vcpu_vmx *vmx = to_vmx(vcpu); 3204 2950 u32 intr_info; 2951 + 2952 + /* Handle invalid guest state instead of entering VMX */ 2953 + if (vmx->emulation_required && emulate_invalid_guest_state) { 2954 + handle_invalid_guest_state(vcpu, kvm_run); 2955 + return; 2956 + } 2957 + 2958 + if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) 2959 + vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 2960 + if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 2961 + vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 3205 2962 3206 2963 /* 3207 2964 * Loading guest fpu may have cleared host cr0.ts ··· 3229 2948 3230 2949 asm( 3231 2950 /* Store host registers */ 3232 - #ifdef CONFIG_X86_64 3233 - "push %%rdx; push %%rbp;" 3234 - "push %%rcx \n\t" 3235 - #else 3236 - "push %%edx; push %%ebp;" 3237 - "push %%ecx \n\t" 3238 - #endif 2951 + "push %%"R"dx; push %%"R"bp;" 2952 + "push %%"R"cx \n\t" 2953 + "cmp %%"R"sp, %c[host_rsp](%0) \n\t" 2954 + "je 1f \n\t" 2955 + "mov %%"R"sp, %c[host_rsp](%0) \n\t" 3239 2956 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" 2957 + "1: \n\t" 3240 2958 /* Check if vmlaunch of vmresume is needed */ 3241 2959 "cmpl $0, %c[launched](%0) \n\t" 3242 2960 /* Load guest registers. Don't clobber flags. */ 2961 + "mov %c[cr2](%0), %%"R"ax \n\t" 2962 + "mov %%"R"ax, %%cr2 \n\t" 2963 + "mov %c[rax](%0), %%"R"ax \n\t" 2964 + "mov %c[rbx](%0), %%"R"bx \n\t" 2965 + "mov %c[rdx](%0), %%"R"dx \n\t" 2966 + "mov %c[rsi](%0), %%"R"si \n\t" 2967 + "mov %c[rdi](%0), %%"R"di \n\t" 2968 + "mov %c[rbp](%0), %%"R"bp \n\t" 3243 2969 #ifdef CONFIG_X86_64 3244 - "mov %c[cr2](%0), %%rax \n\t" 3245 - "mov %%rax, %%cr2 \n\t" 3246 - "mov %c[rax](%0), %%rax \n\t" 3247 - "mov %c[rbx](%0), %%rbx \n\t" 3248 - "mov %c[rdx](%0), %%rdx \n\t" 3249 - "mov %c[rsi](%0), %%rsi \n\t" 3250 - "mov %c[rdi](%0), %%rdi \n\t" 3251 - "mov %c[rbp](%0), %%rbp \n\t" 3252 2970 "mov %c[r8](%0), %%r8 \n\t" 3253 2971 "mov %c[r9](%0), %%r9 \n\t" 3254 2972 "mov %c[r10](%0), %%r10 \n\t" ··· 3256 2976 "mov %c[r13](%0), %%r13 \n\t" 3257 2977 "mov %c[r14](%0), %%r14 \n\t" 3258 2978 "mov %c[r15](%0), %%r15 \n\t" 3259 - "mov %c[rcx](%0), %%rcx \n\t" /* kills %0 (rcx) */ 3260 - #else 3261 - "mov %c[cr2](%0), %%eax \n\t" 3262 - "mov %%eax, %%cr2 \n\t" 3263 - "mov %c[rax](%0), %%eax \n\t" 3264 - "mov %c[rbx](%0), %%ebx \n\t" 3265 - "mov %c[rdx](%0), %%edx \n\t" 3266 - "mov %c[rsi](%0), %%esi \n\t" 3267 - "mov %c[rdi](%0), %%edi \n\t" 3268 - "mov %c[rbp](%0), %%ebp \n\t" 3269 - "mov %c[rcx](%0), %%ecx \n\t" /* kills %0 (ecx) */ 3270 2979 #endif 2980 + "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */ 2981 + 3271 2982 /* Enter guest mode */ 3272 2983 "jne .Llaunched \n\t" 3273 2984 __ex(ASM_VMX_VMLAUNCH) "\n\t" ··· 3266 2995 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" 3267 2996 ".Lkvm_vmx_return: " 3268 2997 /* Save guest registers, load host registers, keep flags */ 2998 + "xchg %0, (%%"R"sp) \n\t" 2999 + "mov %%"R"ax, %c[rax](%0) \n\t" 3000 + "mov %%"R"bx, %c[rbx](%0) \n\t" 3001 + "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t" 3002 + "mov %%"R"dx, %c[rdx](%0) \n\t" 3003 + "mov %%"R"si, %c[rsi](%0) \n\t" 3004 + "mov %%"R"di, %c[rdi](%0) \n\t" 3005 + "mov %%"R"bp, %c[rbp](%0) \n\t" 3269 3006 #ifdef CONFIG_X86_64 3270 - "xchg %0, (%%rsp) \n\t" 3271 - "mov %%rax, %c[rax](%0) \n\t" 3272 - "mov %%rbx, %c[rbx](%0) \n\t" 3273 - "pushq (%%rsp); popq %c[rcx](%0) \n\t" 3274 - "mov %%rdx, %c[rdx](%0) \n\t" 3275 - "mov %%rsi, %c[rsi](%0) \n\t" 3276 - "mov %%rdi, %c[rdi](%0) \n\t" 3277 - "mov %%rbp, %c[rbp](%0) \n\t" 3278 3007 "mov %%r8, %c[r8](%0) \n\t" 3279 3008 "mov %%r9, %c[r9](%0) \n\t" 3280 3009 "mov %%r10, %c[r10](%0) \n\t" ··· 3283 3012 "mov %%r13, %c[r13](%0) \n\t" 3284 3013 "mov %%r14, %c[r14](%0) \n\t" 3285 3014 "mov %%r15, %c[r15](%0) \n\t" 3286 - "mov %%cr2, %%rax \n\t" 3287 - "mov %%rax, %c[cr2](%0) \n\t" 3288 - 3289 - "pop %%rbp; pop %%rbp; pop %%rdx \n\t" 3290 - #else 3291 - "xchg %0, (%%esp) \n\t" 3292 - "mov %%eax, %c[rax](%0) \n\t" 3293 - "mov %%ebx, %c[rbx](%0) \n\t" 3294 - "pushl (%%esp); popl %c[rcx](%0) \n\t" 3295 - "mov %%edx, %c[rdx](%0) \n\t" 3296 - "mov %%esi, %c[rsi](%0) \n\t" 3297 - "mov %%edi, %c[rdi](%0) \n\t" 3298 - "mov %%ebp, %c[rbp](%0) \n\t" 3299 - "mov %%cr2, %%eax \n\t" 3300 - "mov %%eax, %c[cr2](%0) \n\t" 3301 - 3302 - "pop %%ebp; pop %%ebp; pop %%edx \n\t" 3303 3015 #endif 3016 + "mov %%cr2, %%"R"ax \n\t" 3017 + "mov %%"R"ax, %c[cr2](%0) \n\t" 3018 + 3019 + "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t" 3304 3020 "setbe %c[fail](%0) \n\t" 3305 3021 : : "c"(vmx), "d"((unsigned long)HOST_RSP), 3306 3022 [launched]"i"(offsetof(struct vcpu_vmx, launched)), 3307 3023 [fail]"i"(offsetof(struct vcpu_vmx, fail)), 3024 + [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), 3308 3025 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), 3309 3026 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), 3310 3027 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), ··· 3312 3053 #endif 3313 3054 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) 3314 3055 : "cc", "memory" 3056 + , R"bx", R"di", R"si" 3315 3057 #ifdef CONFIG_X86_64 3316 - , "rbx", "rdi", "rsi" 3317 3058 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" 3318 - #else 3319 - , "ebx", "edi", "rsi" 3320 3059 #endif 3321 3060 ); 3061 + 3062 + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 3063 + vcpu->arch.regs_dirty = 0; 3322 3064 3323 3065 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3324 3066 if (vmx->rmode.irq.pending) ··· 3340 3080 KVMTRACE_0D(NMI, vcpu, handler); 3341 3081 asm("int $2"); 3342 3082 } 3083 + 3084 + vmx_complete_interrupts(vmx); 3343 3085 } 3086 + 3087 + #undef R 3088 + #undef Q 3344 3089 3345 3090 static void vmx_free_vmcs(struct kvm_vcpu *vcpu) 3346 3091 { ··· 3489 3224 .set_idt = vmx_set_idt, 3490 3225 .get_gdt = vmx_get_gdt, 3491 3226 .set_gdt = vmx_set_gdt, 3492 - .cache_regs = vcpu_load_rsp_rip, 3493 - .decache_regs = vcpu_put_rsp_rip, 3227 + .cache_reg = vmx_cache_reg, 3494 3228 .get_rflags = vmx_get_rflags, 3495 3229 .set_rflags = vmx_set_rflags, 3496 3230
-3
arch/x86/kvm/vmx.h
··· 331 331 332 332 #define AR_RESERVD_MASK 0xfffe0f00 333 333 334 - #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 335 - #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 336 - 337 334 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 338 335 #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT 10 339 336
+328 -232
arch/x86/kvm/x86.c
··· 4 4 * derived from drivers/kvm/kvm_main.c 5 5 * 6 6 * Copyright (C) 2006 Qumranet, Inc. 7 + * Copyright (C) 2008 Qumranet, Inc. 8 + * Copyright IBM Corporation, 2008 7 9 * 8 10 * Authors: 9 11 * Avi Kivity <avi@qumranet.com> 10 12 * Yaniv Kamay <yaniv@qumranet.com> 13 + * Amit Shah <amit.shah@qumranet.com> 14 + * Ben-Ami Yassour <benami@il.ibm.com> 11 15 * 12 16 * This work is licensed under the terms of the GNU GPL, version 2. See 13 17 * the COPYING file in the top-level directory. ··· 23 19 #include "mmu.h" 24 20 #include "i8254.h" 25 21 #include "tss.h" 22 + #include "kvm_cache_regs.h" 23 + #include "x86.h" 26 24 27 25 #include <linux/clocksource.h> 26 + #include <linux/interrupt.h> 28 27 #include <linux/kvm.h> 29 28 #include <linux/fs.h> 30 29 #include <linux/vmalloc.h> 31 30 #include <linux/module.h> 32 31 #include <linux/mman.h> 33 32 #include <linux/highmem.h> 33 + #include <linux/intel-iommu.h> 34 34 35 35 #include <asm/uaccess.h> 36 36 #include <asm/msr.h> ··· 69 61 struct kvm_cpuid_entry2 __user *entries); 70 62 71 63 struct kvm_x86_ops *kvm_x86_ops; 64 + EXPORT_SYMBOL_GPL(kvm_x86_ops); 72 65 73 66 struct kvm_stats_debugfs_item debugfs_entries[] = { 74 67 { "pf_fixed", VCPU_STAT(pf_fixed) }, ··· 92 83 { "fpu_reload", VCPU_STAT(fpu_reload) }, 93 84 { "insn_emulation", VCPU_STAT(insn_emulation) }, 94 85 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, 86 + { "irq_injections", VCPU_STAT(irq_injections) }, 95 87 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, 96 88 { "mmu_pte_write", VM_STAT(mmu_pte_write) }, 97 89 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, ··· 100 90 { "mmu_flooded", VM_STAT(mmu_flooded) }, 101 91 { "mmu_recycled", VM_STAT(mmu_recycled) }, 102 92 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, 93 + { "mmu_unsync", VM_STAT(mmu_unsync) }, 103 94 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 104 95 { "largepages", VM_STAT(lpages) }, 105 96 { NULL } 106 97 }; 107 - 108 98 109 99 unsigned long segment_base(u16 selector) 110 100 { ··· 362 352 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 363 353 { 364 354 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) { 355 + kvm_mmu_sync_roots(vcpu); 365 356 kvm_mmu_flush_tlb(vcpu); 366 357 return; 367 358 } ··· 673 662 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n", 674 663 __func__, data); 675 664 break; 665 + case MSR_IA32_DEBUGCTLMSR: 666 + if (!data) { 667 + /* We support the non-activated case already */ 668 + break; 669 + } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { 670 + /* Values other than LBR and BTF are vendor-specific, 671 + thus reserved and should throw a #GP */ 672 + return 1; 673 + } 674 + pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 675 + __func__, data); 676 + break; 676 677 case MSR_IA32_UCODE_REV: 677 678 case MSR_IA32_UCODE_WRITE: 678 679 break; ··· 715 692 /* ...but clean it before doing the actual write */ 716 693 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); 717 694 718 - down_read(&current->mm->mmap_sem); 719 695 vcpu->arch.time_page = 720 696 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); 721 - up_read(&current->mm->mmap_sem); 722 697 723 698 if (is_error_page(vcpu->arch.time_page)) { 724 699 kvm_release_page_clean(vcpu->arch.time_page); ··· 773 752 case MSR_IA32_MC0_MISC+8: 774 753 case MSR_IA32_MC0_MISC+12: 775 754 case MSR_IA32_MC0_MISC+16: 755 + case MSR_IA32_MC0_MISC+20: 776 756 case MSR_IA32_UCODE_REV: 777 757 case MSR_IA32_EBL_CR_POWERON: 758 + case MSR_IA32_DEBUGCTLMSR: 759 + case MSR_IA32_LASTBRANCHFROMIP: 760 + case MSR_IA32_LASTBRANCHTOIP: 761 + case MSR_IA32_LASTINTFROMIP: 762 + case MSR_IA32_LASTINTTOIP: 778 763 data = 0; 779 764 break; 780 765 case MSR_MTRRcap: ··· 927 900 break; 928 901 case KVM_CAP_PV_MMU: 929 902 r = !tdp_enabled; 903 + break; 904 + case KVM_CAP_IOMMU: 905 + r = intel_iommu_found(); 930 906 break; 931 907 default: 932 908 r = 0; ··· 1333 1303 struct kvm_vcpu *vcpu = filp->private_data; 1334 1304 void __user *argp = (void __user *)arg; 1335 1305 int r; 1306 + struct kvm_lapic_state *lapic = NULL; 1336 1307 1337 1308 switch (ioctl) { 1338 1309 case KVM_GET_LAPIC: { 1339 - struct kvm_lapic_state lapic; 1310 + lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); 1340 1311 1341 - memset(&lapic, 0, sizeof lapic); 1342 - r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic); 1312 + r = -ENOMEM; 1313 + if (!lapic) 1314 + goto out; 1315 + r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic); 1343 1316 if (r) 1344 1317 goto out; 1345 1318 r = -EFAULT; 1346 - if (copy_to_user(argp, &lapic, sizeof lapic)) 1319 + if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state))) 1347 1320 goto out; 1348 1321 r = 0; 1349 1322 break; 1350 1323 } 1351 1324 case KVM_SET_LAPIC: { 1352 - struct kvm_lapic_state lapic; 1353 - 1354 - r = -EFAULT; 1355 - if (copy_from_user(&lapic, argp, sizeof lapic)) 1325 + lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); 1326 + r = -ENOMEM; 1327 + if (!lapic) 1356 1328 goto out; 1357 - r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);; 1329 + r = -EFAULT; 1330 + if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state))) 1331 + goto out; 1332 + r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic); 1358 1333 if (r) 1359 1334 goto out; 1360 1335 r = 0; ··· 1457 1422 r = -EINVAL; 1458 1423 } 1459 1424 out: 1425 + if (lapic) 1426 + kfree(lapic); 1460 1427 return r; 1461 1428 } 1462 1429 ··· 1667 1630 struct kvm *kvm = filp->private_data; 1668 1631 void __user *argp = (void __user *)arg; 1669 1632 int r = -EINVAL; 1633 + /* 1634 + * This union makes it completely explicit to gcc-3.x 1635 + * that these two variables' stack usage should be 1636 + * combined, not added together. 1637 + */ 1638 + union { 1639 + struct kvm_pit_state ps; 1640 + struct kvm_memory_alias alias; 1641 + } u; 1670 1642 1671 1643 switch (ioctl) { 1672 1644 case KVM_SET_TSS_ADDR: ··· 1707 1661 case KVM_GET_NR_MMU_PAGES: 1708 1662 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); 1709 1663 break; 1710 - case KVM_SET_MEMORY_ALIAS: { 1711 - struct kvm_memory_alias alias; 1712 - 1664 + case KVM_SET_MEMORY_ALIAS: 1713 1665 r = -EFAULT; 1714 - if (copy_from_user(&alias, argp, sizeof alias)) 1666 + if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias))) 1715 1667 goto out; 1716 - r = kvm_vm_ioctl_set_memory_alias(kvm, &alias); 1668 + r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias); 1717 1669 if (r) 1718 1670 goto out; 1719 1671 break; 1720 - } 1721 1672 case KVM_CREATE_IRQCHIP: 1722 1673 r = -ENOMEM; 1723 1674 kvm->arch.vpic = kvm_create_pic(kvm); ··· 1742 1699 goto out; 1743 1700 if (irqchip_in_kernel(kvm)) { 1744 1701 mutex_lock(&kvm->lock); 1745 - if (irq_event.irq < 16) 1746 - kvm_pic_set_irq(pic_irqchip(kvm), 1747 - irq_event.irq, 1748 - irq_event.level); 1749 - kvm_ioapic_set_irq(kvm->arch.vioapic, 1750 - irq_event.irq, 1751 - irq_event.level); 1702 + kvm_set_irq(kvm, irq_event.irq, irq_event.level); 1752 1703 mutex_unlock(&kvm->lock); 1753 1704 r = 0; 1754 1705 } ··· 1750 1713 } 1751 1714 case KVM_GET_IRQCHIP: { 1752 1715 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 1753 - struct kvm_irqchip chip; 1716 + struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); 1754 1717 1755 - r = -EFAULT; 1756 - if (copy_from_user(&chip, argp, sizeof chip)) 1718 + r = -ENOMEM; 1719 + if (!chip) 1757 1720 goto out; 1721 + r = -EFAULT; 1722 + if (copy_from_user(chip, argp, sizeof *chip)) 1723 + goto get_irqchip_out; 1758 1724 r = -ENXIO; 1759 1725 if (!irqchip_in_kernel(kvm)) 1760 - goto out; 1761 - r = kvm_vm_ioctl_get_irqchip(kvm, &chip); 1726 + goto get_irqchip_out; 1727 + r = kvm_vm_ioctl_get_irqchip(kvm, chip); 1728 + if (r) 1729 + goto get_irqchip_out; 1730 + r = -EFAULT; 1731 + if (copy_to_user(argp, chip, sizeof *chip)) 1732 + goto get_irqchip_out; 1733 + r = 0; 1734 + get_irqchip_out: 1735 + kfree(chip); 1762 1736 if (r) 1763 1737 goto out; 1764 - r = -EFAULT; 1765 - if (copy_to_user(argp, &chip, sizeof chip)) 1766 - goto out; 1767 - r = 0; 1768 1738 break; 1769 1739 } 1770 1740 case KVM_SET_IRQCHIP: { 1771 1741 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ 1772 - struct kvm_irqchip chip; 1742 + struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL); 1773 1743 1774 - r = -EFAULT; 1775 - if (copy_from_user(&chip, argp, sizeof chip)) 1744 + r = -ENOMEM; 1745 + if (!chip) 1776 1746 goto out; 1747 + r = -EFAULT; 1748 + if (copy_from_user(chip, argp, sizeof *chip)) 1749 + goto set_irqchip_out; 1777 1750 r = -ENXIO; 1778 1751 if (!irqchip_in_kernel(kvm)) 1779 - goto out; 1780 - r = kvm_vm_ioctl_set_irqchip(kvm, &chip); 1752 + goto set_irqchip_out; 1753 + r = kvm_vm_ioctl_set_irqchip(kvm, chip); 1754 + if (r) 1755 + goto set_irqchip_out; 1756 + r = 0; 1757 + set_irqchip_out: 1758 + kfree(chip); 1781 1759 if (r) 1782 1760 goto out; 1783 - r = 0; 1784 1761 break; 1785 1762 } 1786 1763 case KVM_GET_PIT: { 1787 - struct kvm_pit_state ps; 1788 1764 r = -EFAULT; 1789 - if (copy_from_user(&ps, argp, sizeof ps)) 1765 + if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) 1790 1766 goto out; 1791 1767 r = -ENXIO; 1792 1768 if (!kvm->arch.vpit) 1793 1769 goto out; 1794 - r = kvm_vm_ioctl_get_pit(kvm, &ps); 1770 + r = kvm_vm_ioctl_get_pit(kvm, &u.ps); 1795 1771 if (r) 1796 1772 goto out; 1797 1773 r = -EFAULT; 1798 - if (copy_to_user(argp, &ps, sizeof ps)) 1774 + if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) 1799 1775 goto out; 1800 1776 r = 0; 1801 1777 break; 1802 1778 } 1803 1779 case KVM_SET_PIT: { 1804 - struct kvm_pit_state ps; 1805 1780 r = -EFAULT; 1806 - if (copy_from_user(&ps, argp, sizeof ps)) 1781 + if (copy_from_user(&u.ps, argp, sizeof u.ps)) 1807 1782 goto out; 1808 1783 r = -ENXIO; 1809 1784 if (!kvm->arch.vpit) 1810 1785 goto out; 1811 - r = kvm_vm_ioctl_set_pit(kvm, &ps); 1786 + r = kvm_vm_ioctl_set_pit(kvm, &u.ps); 1812 1787 if (r) 1813 1788 goto out; 1814 1789 r = 0; ··· 2067 2018 2068 2019 val = *(u64 *)new; 2069 2020 2070 - down_read(&current->mm->mmap_sem); 2071 2021 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 2072 - up_read(&current->mm->mmap_sem); 2073 2022 2074 2023 kaddr = kmap_atomic(page, KM_USER0); 2075 2024 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); ··· 2087 2040 2088 2041 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) 2089 2042 { 2043 + kvm_mmu_invlpg(vcpu, address); 2090 2044 return X86EMUL_CONTINUE; 2091 2045 } 2092 2046 ··· 2128 2080 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) 2129 2081 { 2130 2082 u8 opcodes[4]; 2131 - unsigned long rip = vcpu->arch.rip; 2083 + unsigned long rip = kvm_rip_read(vcpu); 2132 2084 unsigned long rip_linear; 2133 2085 2134 2086 if (!printk_ratelimit()) ··· 2150 2102 .cmpxchg_emulated = emulator_cmpxchg_emulated, 2151 2103 }; 2152 2104 2105 + static void cache_all_regs(struct kvm_vcpu *vcpu) 2106 + { 2107 + kvm_register_read(vcpu, VCPU_REGS_RAX); 2108 + kvm_register_read(vcpu, VCPU_REGS_RSP); 2109 + kvm_register_read(vcpu, VCPU_REGS_RIP); 2110 + vcpu->arch.regs_dirty = ~0; 2111 + } 2112 + 2153 2113 int emulate_instruction(struct kvm_vcpu *vcpu, 2154 2114 struct kvm_run *run, 2155 2115 unsigned long cr2, ··· 2167 2111 int r; 2168 2112 struct decode_cache *c; 2169 2113 2114 + kvm_clear_exception_queue(vcpu); 2170 2115 vcpu->arch.mmio_fault_cr2 = cr2; 2171 - kvm_x86_ops->cache_regs(vcpu); 2116 + /* 2117 + * TODO: fix x86_emulate.c to use guest_read/write_register 2118 + * instead of direct ->regs accesses, can save hundred cycles 2119 + * on Intel for instructions that don't read/change RSP, for 2120 + * for example. 2121 + */ 2122 + cache_all_regs(vcpu); 2172 2123 2173 2124 vcpu->mmio_is_write = 0; 2174 2125 vcpu->arch.pio.string = 0; ··· 2235 2172 return EMULATE_DO_MMIO; 2236 2173 } 2237 2174 2238 - kvm_x86_ops->decache_regs(vcpu); 2239 2175 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 2240 2176 2241 2177 if (vcpu->mmio_is_write) { ··· 2287 2225 struct kvm_pio_request *io = &vcpu->arch.pio; 2288 2226 long delta; 2289 2227 int r; 2290 - 2291 - kvm_x86_ops->cache_regs(vcpu); 2228 + unsigned long val; 2292 2229 2293 2230 if (!io->string) { 2294 - if (io->in) 2295 - memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data, 2296 - io->size); 2231 + if (io->in) { 2232 + val = kvm_register_read(vcpu, VCPU_REGS_RAX); 2233 + memcpy(&val, vcpu->arch.pio_data, io->size); 2234 + kvm_register_write(vcpu, VCPU_REGS_RAX, val); 2235 + } 2297 2236 } else { 2298 2237 if (io->in) { 2299 2238 r = pio_copy_data(vcpu); 2300 - if (r) { 2301 - kvm_x86_ops->cache_regs(vcpu); 2239 + if (r) 2302 2240 return r; 2303 - } 2304 2241 } 2305 2242 2306 2243 delta = 1; ··· 2309 2248 * The size of the register should really depend on 2310 2249 * current address size. 2311 2250 */ 2312 - vcpu->arch.regs[VCPU_REGS_RCX] -= delta; 2251 + val = kvm_register_read(vcpu, VCPU_REGS_RCX); 2252 + val -= delta; 2253 + kvm_register_write(vcpu, VCPU_REGS_RCX, val); 2313 2254 } 2314 2255 if (io->down) 2315 2256 delta = -delta; 2316 2257 delta *= io->size; 2317 - if (io->in) 2318 - vcpu->arch.regs[VCPU_REGS_RDI] += delta; 2319 - else 2320 - vcpu->arch.regs[VCPU_REGS_RSI] += delta; 2258 + if (io->in) { 2259 + val = kvm_register_read(vcpu, VCPU_REGS_RDI); 2260 + val += delta; 2261 + kvm_register_write(vcpu, VCPU_REGS_RDI, val); 2262 + } else { 2263 + val = kvm_register_read(vcpu, VCPU_REGS_RSI); 2264 + val += delta; 2265 + kvm_register_write(vcpu, VCPU_REGS_RSI, val); 2266 + } 2321 2267 } 2322 - 2323 - kvm_x86_ops->decache_regs(vcpu); 2324 2268 2325 2269 io->count -= io->cur_count; 2326 2270 io->cur_count = 0; ··· 2379 2313 int size, unsigned port) 2380 2314 { 2381 2315 struct kvm_io_device *pio_dev; 2316 + unsigned long val; 2382 2317 2383 2318 vcpu->run->exit_reason = KVM_EXIT_IO; 2384 2319 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; ··· 2400 2333 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, 2401 2334 handler); 2402 2335 2403 - kvm_x86_ops->cache_regs(vcpu); 2404 - memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); 2336 + val = kvm_register_read(vcpu, VCPU_REGS_RAX); 2337 + memcpy(vcpu->arch.pio_data, &val, 4); 2405 2338 2406 2339 kvm_x86_ops->skip_emulated_instruction(vcpu); 2407 2340 ··· 2559 2492 KVMTRACE_0D(HLT, vcpu, handler); 2560 2493 if (irqchip_in_kernel(vcpu->kvm)) { 2561 2494 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 2562 - up_read(&vcpu->kvm->slots_lock); 2563 - kvm_vcpu_block(vcpu); 2564 - down_read(&vcpu->kvm->slots_lock); 2565 - if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 2566 - return -EINTR; 2567 2495 return 1; 2568 2496 } else { 2569 2497 vcpu->run->exit_reason = KVM_EXIT_HLT; ··· 2581 2519 unsigned long nr, a0, a1, a2, a3, ret; 2582 2520 int r = 1; 2583 2521 2584 - kvm_x86_ops->cache_regs(vcpu); 2585 - 2586 - nr = vcpu->arch.regs[VCPU_REGS_RAX]; 2587 - a0 = vcpu->arch.regs[VCPU_REGS_RBX]; 2588 - a1 = vcpu->arch.regs[VCPU_REGS_RCX]; 2589 - a2 = vcpu->arch.regs[VCPU_REGS_RDX]; 2590 - a3 = vcpu->arch.regs[VCPU_REGS_RSI]; 2522 + nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 2523 + a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 2524 + a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); 2525 + a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); 2526 + a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); 2591 2527 2592 2528 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); 2593 2529 ··· 2608 2548 ret = -KVM_ENOSYS; 2609 2549 break; 2610 2550 } 2611 - vcpu->arch.regs[VCPU_REGS_RAX] = ret; 2612 - kvm_x86_ops->decache_regs(vcpu); 2551 + kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 2613 2552 ++vcpu->stat.hypercalls; 2614 2553 return r; 2615 2554 } ··· 2618 2559 { 2619 2560 char instruction[3]; 2620 2561 int ret = 0; 2562 + unsigned long rip = kvm_rip_read(vcpu); 2621 2563 2622 2564 2623 2565 /* ··· 2628 2568 */ 2629 2569 kvm_mmu_zap_all(vcpu->kvm); 2630 2570 2631 - kvm_x86_ops->cache_regs(vcpu); 2632 2571 kvm_x86_ops->patch_hypercall(vcpu, instruction); 2633 - if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu) 2572 + if (emulator_write_emulated(rip, instruction, 3, vcpu) 2634 2573 != X86EMUL_CONTINUE) 2635 2574 ret = -EFAULT; 2636 2575 ··· 2759 2700 u32 function, index; 2760 2701 struct kvm_cpuid_entry2 *e, *best; 2761 2702 2762 - kvm_x86_ops->cache_regs(vcpu); 2763 - function = vcpu->arch.regs[VCPU_REGS_RAX]; 2764 - index = vcpu->arch.regs[VCPU_REGS_RCX]; 2765 - vcpu->arch.regs[VCPU_REGS_RAX] = 0; 2766 - vcpu->arch.regs[VCPU_REGS_RBX] = 0; 2767 - vcpu->arch.regs[VCPU_REGS_RCX] = 0; 2768 - vcpu->arch.regs[VCPU_REGS_RDX] = 0; 2703 + function = kvm_register_read(vcpu, VCPU_REGS_RAX); 2704 + index = kvm_register_read(vcpu, VCPU_REGS_RCX); 2705 + kvm_register_write(vcpu, VCPU_REGS_RAX, 0); 2706 + kvm_register_write(vcpu, VCPU_REGS_RBX, 0); 2707 + kvm_register_write(vcpu, VCPU_REGS_RCX, 0); 2708 + kvm_register_write(vcpu, VCPU_REGS_RDX, 0); 2769 2709 best = NULL; 2770 2710 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { 2771 2711 e = &vcpu->arch.cpuid_entries[i]; ··· 2782 2724 best = e; 2783 2725 } 2784 2726 if (best) { 2785 - vcpu->arch.regs[VCPU_REGS_RAX] = best->eax; 2786 - vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx; 2787 - vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx; 2788 - vcpu->arch.regs[VCPU_REGS_RDX] = best->edx; 2727 + kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax); 2728 + kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx); 2729 + kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx); 2730 + kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); 2789 2731 } 2790 - kvm_x86_ops->decache_regs(vcpu); 2791 2732 kvm_x86_ops->skip_emulated_instruction(vcpu); 2792 2733 KVMTRACE_5D(CPUID, vcpu, function, 2793 - (u32)vcpu->arch.regs[VCPU_REGS_RAX], 2794 - (u32)vcpu->arch.regs[VCPU_REGS_RBX], 2795 - (u32)vcpu->arch.regs[VCPU_REGS_RCX], 2796 - (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler); 2734 + (u32)kvm_register_read(vcpu, VCPU_REGS_RAX), 2735 + (u32)kvm_register_read(vcpu, VCPU_REGS_RBX), 2736 + (u32)kvm_register_read(vcpu, VCPU_REGS_RCX), 2737 + (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler); 2797 2738 } 2798 2739 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 2799 2740 ··· 2833 2776 if (!apic || !apic->vapic_addr) 2834 2777 return; 2835 2778 2836 - down_read(&current->mm->mmap_sem); 2837 2779 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 2838 - up_read(&current->mm->mmap_sem); 2839 2780 2840 2781 vcpu->arch.apic->vapic_page = page; 2841 2782 } ··· 2851 2796 up_read(&vcpu->kvm->slots_lock); 2852 2797 } 2853 2798 2854 - static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2799 + static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2855 2800 { 2856 2801 int r; 2857 2802 2858 - if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { 2859 - pr_debug("vcpu %d received sipi with vector # %x\n", 2860 - vcpu->vcpu_id, vcpu->arch.sipi_vector); 2861 - kvm_lapic_reset(vcpu); 2862 - r = kvm_x86_ops->vcpu_reset(vcpu); 2863 - if (r) 2864 - return r; 2865 - vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 2866 - } 2867 - 2868 - down_read(&vcpu->kvm->slots_lock); 2869 - vapic_enter(vcpu); 2870 - 2871 - preempted: 2872 - if (vcpu->guest_debug.enabled) 2873 - kvm_x86_ops->guest_debug_pre(vcpu); 2874 - 2875 - again: 2876 2803 if (vcpu->requests) 2877 2804 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 2878 2805 kvm_mmu_unload(vcpu); ··· 2866 2829 if (vcpu->requests) { 2867 2830 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests)) 2868 2831 __kvm_migrate_timers(vcpu); 2832 + if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests)) 2833 + kvm_mmu_sync_roots(vcpu); 2869 2834 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) 2870 2835 kvm_x86_ops->tlb_flush(vcpu); 2871 2836 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS, ··· 2893 2854 2894 2855 local_irq_disable(); 2895 2856 2896 - if (vcpu->requests || need_resched()) { 2857 + if (vcpu->requests || need_resched() || signal_pending(current)) { 2897 2858 local_irq_enable(); 2898 2859 preempt_enable(); 2899 2860 r = 1; 2900 2861 goto out; 2901 2862 } 2902 2863 2903 - if (signal_pending(current)) { 2904 - local_irq_enable(); 2905 - preempt_enable(); 2906 - r = -EINTR; 2907 - kvm_run->exit_reason = KVM_EXIT_INTR; 2908 - ++vcpu->stat.signal_exits; 2909 - goto out; 2910 - } 2864 + if (vcpu->guest_debug.enabled) 2865 + kvm_x86_ops->guest_debug_pre(vcpu); 2911 2866 2912 2867 vcpu->guest_mode = 1; 2913 2868 /* ··· 2950 2917 * Profile KVM exit RIPs: 2951 2918 */ 2952 2919 if (unlikely(prof_on == KVM_PROFILING)) { 2953 - kvm_x86_ops->cache_regs(vcpu); 2954 - profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip); 2920 + unsigned long rip = kvm_rip_read(vcpu); 2921 + profile_hit(KVM_PROFILING, (void *)rip); 2955 2922 } 2956 2923 2957 2924 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu)) ··· 2960 2927 kvm_lapic_sync_from_vapic(vcpu); 2961 2928 2962 2929 r = kvm_x86_ops->handle_exit(kvm_run, vcpu); 2963 - 2964 - if (r > 0) { 2965 - if (dm_request_for_irq_injection(vcpu, kvm_run)) { 2966 - r = -EINTR; 2967 - kvm_run->exit_reason = KVM_EXIT_INTR; 2968 - ++vcpu->stat.request_irq_exits; 2969 - goto out; 2970 - } 2971 - if (!need_resched()) 2972 - goto again; 2973 - } 2974 - 2975 2930 out: 2976 - up_read(&vcpu->kvm->slots_lock); 2977 - if (r > 0) { 2978 - kvm_resched(vcpu); 2979 - down_read(&vcpu->kvm->slots_lock); 2980 - goto preempted; 2931 + return r; 2932 + } 2933 + 2934 + static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2935 + { 2936 + int r; 2937 + 2938 + if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) { 2939 + pr_debug("vcpu %d received sipi with vector # %x\n", 2940 + vcpu->vcpu_id, vcpu->arch.sipi_vector); 2941 + kvm_lapic_reset(vcpu); 2942 + r = kvm_x86_ops->vcpu_reset(vcpu); 2943 + if (r) 2944 + return r; 2945 + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 2981 2946 } 2982 2947 2948 + down_read(&vcpu->kvm->slots_lock); 2949 + vapic_enter(vcpu); 2950 + 2951 + r = 1; 2952 + while (r > 0) { 2953 + if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) 2954 + r = vcpu_enter_guest(vcpu, kvm_run); 2955 + else { 2956 + up_read(&vcpu->kvm->slots_lock); 2957 + kvm_vcpu_block(vcpu); 2958 + down_read(&vcpu->kvm->slots_lock); 2959 + if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) 2960 + if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) 2961 + vcpu->arch.mp_state = 2962 + KVM_MP_STATE_RUNNABLE; 2963 + if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) 2964 + r = -EINTR; 2965 + } 2966 + 2967 + if (r > 0) { 2968 + if (dm_request_for_irq_injection(vcpu, kvm_run)) { 2969 + r = -EINTR; 2970 + kvm_run->exit_reason = KVM_EXIT_INTR; 2971 + ++vcpu->stat.request_irq_exits; 2972 + } 2973 + if (signal_pending(current)) { 2974 + r = -EINTR; 2975 + kvm_run->exit_reason = KVM_EXIT_INTR; 2976 + ++vcpu->stat.signal_exits; 2977 + } 2978 + if (need_resched()) { 2979 + up_read(&vcpu->kvm->slots_lock); 2980 + kvm_resched(vcpu); 2981 + down_read(&vcpu->kvm->slots_lock); 2982 + } 2983 + } 2984 + } 2985 + 2986 + up_read(&vcpu->kvm->slots_lock); 2983 2987 post_kvm_run_save(vcpu, kvm_run); 2984 2988 2985 2989 vapic_exit(vcpu); ··· 3036 2966 3037 2967 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 3038 2968 kvm_vcpu_block(vcpu); 2969 + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 3039 2970 r = -EAGAIN; 3040 2971 goto out; 3041 2972 } ··· 3070 2999 } 3071 3000 } 3072 3001 #endif 3073 - if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) { 3074 - kvm_x86_ops->cache_regs(vcpu); 3075 - vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret; 3076 - kvm_x86_ops->decache_regs(vcpu); 3077 - } 3002 + if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) 3003 + kvm_register_write(vcpu, VCPU_REGS_RAX, 3004 + kvm_run->hypercall.ret); 3078 3005 3079 3006 r = __vcpu_run(vcpu, kvm_run); 3080 3007 ··· 3088 3019 { 3089 3020 vcpu_load(vcpu); 3090 3021 3091 - kvm_x86_ops->cache_regs(vcpu); 3092 - 3093 - regs->rax = vcpu->arch.regs[VCPU_REGS_RAX]; 3094 - regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX]; 3095 - regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX]; 3096 - regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX]; 3097 - regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI]; 3098 - regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI]; 3099 - regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 3100 - regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP]; 3022 + regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); 3023 + regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); 3024 + regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); 3025 + regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); 3026 + regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); 3027 + regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); 3028 + regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); 3029 + regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); 3101 3030 #ifdef CONFIG_X86_64 3102 - regs->r8 = vcpu->arch.regs[VCPU_REGS_R8]; 3103 - regs->r9 = vcpu->arch.regs[VCPU_REGS_R9]; 3104 - regs->r10 = vcpu->arch.regs[VCPU_REGS_R10]; 3105 - regs->r11 = vcpu->arch.regs[VCPU_REGS_R11]; 3106 - regs->r12 = vcpu->arch.regs[VCPU_REGS_R12]; 3107 - regs->r13 = vcpu->arch.regs[VCPU_REGS_R13]; 3108 - regs->r14 = vcpu->arch.regs[VCPU_REGS_R14]; 3109 - regs->r15 = vcpu->arch.regs[VCPU_REGS_R15]; 3031 + regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); 3032 + regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); 3033 + regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); 3034 + regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); 3035 + regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); 3036 + regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); 3037 + regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); 3038 + regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); 3110 3039 #endif 3111 3040 3112 - regs->rip = vcpu->arch.rip; 3041 + regs->rip = kvm_rip_read(vcpu); 3113 3042 regs->rflags = kvm_x86_ops->get_rflags(vcpu); 3114 3043 3115 3044 /* ··· 3125 3058 { 3126 3059 vcpu_load(vcpu); 3127 3060 3128 - vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax; 3129 - vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx; 3130 - vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx; 3131 - vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx; 3132 - vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi; 3133 - vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi; 3134 - vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp; 3135 - vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp; 3061 + kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); 3062 + kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); 3063 + kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); 3064 + kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); 3065 + kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); 3066 + kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); 3067 + kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); 3068 + kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); 3136 3069 #ifdef CONFIG_X86_64 3137 - vcpu->arch.regs[VCPU_REGS_R8] = regs->r8; 3138 - vcpu->arch.regs[VCPU_REGS_R9] = regs->r9; 3139 - vcpu->arch.regs[VCPU_REGS_R10] = regs->r10; 3140 - vcpu->arch.regs[VCPU_REGS_R11] = regs->r11; 3141 - vcpu->arch.regs[VCPU_REGS_R12] = regs->r12; 3142 - vcpu->arch.regs[VCPU_REGS_R13] = regs->r13; 3143 - vcpu->arch.regs[VCPU_REGS_R14] = regs->r14; 3144 - vcpu->arch.regs[VCPU_REGS_R15] = regs->r15; 3070 + kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); 3071 + kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); 3072 + kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); 3073 + kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); 3074 + kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); 3075 + kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); 3076 + kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); 3077 + kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); 3078 + 3145 3079 #endif 3146 3080 3147 - vcpu->arch.rip = regs->rip; 3081 + kvm_rip_write(vcpu, regs->rip); 3148 3082 kvm_x86_ops->set_rflags(vcpu, regs->rflags); 3149 3083 3150 - kvm_x86_ops->decache_regs(vcpu); 3151 3084 3152 3085 vcpu->arch.exception.pending = false; 3153 3086 ··· 3361 3294 return 0; 3362 3295 } 3363 3296 3297 + static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg) 3298 + { 3299 + struct kvm_segment segvar = { 3300 + .base = selector << 4, 3301 + .limit = 0xffff, 3302 + .selector = selector, 3303 + .type = 3, 3304 + .present = 1, 3305 + .dpl = 3, 3306 + .db = 0, 3307 + .s = 1, 3308 + .l = 0, 3309 + .g = 0, 3310 + .avl = 0, 3311 + .unusable = 0, 3312 + }; 3313 + kvm_x86_ops->set_segment(vcpu, &segvar, seg); 3314 + return 0; 3315 + } 3316 + 3364 3317 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3365 3318 int type_bits, int seg) 3366 3319 { 3367 3320 struct kvm_segment kvm_seg; 3368 3321 3322 + if (!(vcpu->arch.cr0 & X86_CR0_PE)) 3323 + return kvm_load_realmode_segment(vcpu, selector, seg); 3369 3324 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) 3370 3325 return 1; 3371 3326 kvm_seg.type |= type_bits; ··· 3405 3316 struct tss_segment_32 *tss) 3406 3317 { 3407 3318 tss->cr3 = vcpu->arch.cr3; 3408 - tss->eip = vcpu->arch.rip; 3319 + tss->eip = kvm_rip_read(vcpu); 3409 3320 tss->eflags = kvm_x86_ops->get_rflags(vcpu); 3410 - tss->eax = vcpu->arch.regs[VCPU_REGS_RAX]; 3411 - tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX]; 3412 - tss->edx = vcpu->arch.regs[VCPU_REGS_RDX]; 3413 - tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX]; 3414 - tss->esp = vcpu->arch.regs[VCPU_REGS_RSP]; 3415 - tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP]; 3416 - tss->esi = vcpu->arch.regs[VCPU_REGS_RSI]; 3417 - tss->edi = vcpu->arch.regs[VCPU_REGS_RDI]; 3418 - 3321 + tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX); 3322 + tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); 3323 + tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX); 3324 + tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX); 3325 + tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP); 3326 + tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP); 3327 + tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI); 3328 + tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI); 3419 3329 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); 3420 3330 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); 3421 3331 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS); ··· 3430 3342 { 3431 3343 kvm_set_cr3(vcpu, tss->cr3); 3432 3344 3433 - vcpu->arch.rip = tss->eip; 3345 + kvm_rip_write(vcpu, tss->eip); 3434 3346 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2); 3435 3347 3436 - vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax; 3437 - vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx; 3438 - vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx; 3439 - vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx; 3440 - vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp; 3441 - vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp; 3442 - vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi; 3443 - vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi; 3348 + kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax); 3349 + kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx); 3350 + kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx); 3351 + kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx); 3352 + kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp); 3353 + kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp); 3354 + kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi); 3355 + kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi); 3444 3356 3445 3357 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR)) 3446 3358 return 1; ··· 3468 3380 static void save_state_to_tss16(struct kvm_vcpu *vcpu, 3469 3381 struct tss_segment_16 *tss) 3470 3382 { 3471 - tss->ip = vcpu->arch.rip; 3383 + tss->ip = kvm_rip_read(vcpu); 3472 3384 tss->flag = kvm_x86_ops->get_rflags(vcpu); 3473 - tss->ax = vcpu->arch.regs[VCPU_REGS_RAX]; 3474 - tss->cx = vcpu->arch.regs[VCPU_REGS_RCX]; 3475 - tss->dx = vcpu->arch.regs[VCPU_REGS_RDX]; 3476 - tss->bx = vcpu->arch.regs[VCPU_REGS_RBX]; 3477 - tss->sp = vcpu->arch.regs[VCPU_REGS_RSP]; 3478 - tss->bp = vcpu->arch.regs[VCPU_REGS_RBP]; 3479 - tss->si = vcpu->arch.regs[VCPU_REGS_RSI]; 3480 - tss->di = vcpu->arch.regs[VCPU_REGS_RDI]; 3385 + tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX); 3386 + tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX); 3387 + tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX); 3388 + tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX); 3389 + tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP); 3390 + tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP); 3391 + tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI); 3392 + tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI); 3481 3393 3482 3394 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES); 3483 3395 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS); ··· 3490 3402 static int load_state_from_tss16(struct kvm_vcpu *vcpu, 3491 3403 struct tss_segment_16 *tss) 3492 3404 { 3493 - vcpu->arch.rip = tss->ip; 3405 + kvm_rip_write(vcpu, tss->ip); 3494 3406 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2); 3495 - vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax; 3496 - vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx; 3497 - vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx; 3498 - vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx; 3499 - vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp; 3500 - vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp; 3501 - vcpu->arch.regs[VCPU_REGS_RSI] = tss->si; 3502 - vcpu->arch.regs[VCPU_REGS_RDI] = tss->di; 3407 + kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax); 3408 + kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx); 3409 + kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx); 3410 + kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx); 3411 + kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp); 3412 + kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp); 3413 + kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si); 3414 + kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di); 3503 3415 3504 3416 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR)) 3505 3417 return 1; ··· 3622 3534 } 3623 3535 3624 3536 kvm_x86_ops->skip_emulated_instruction(vcpu); 3625 - kvm_x86_ops->cache_regs(vcpu); 3626 3537 3627 3538 if (nseg_desc.type & 8) 3628 3539 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, ··· 3646 3559 tr_seg.type = 11; 3647 3560 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3648 3561 out: 3649 - kvm_x86_ops->decache_regs(vcpu); 3650 3562 return ret; 3651 3563 } 3652 3564 EXPORT_SYMBOL_GPL(kvm_task_switch); ··· 3708 3622 pr_debug("Set back pending irq %d\n", 3709 3623 pending_vec); 3710 3624 } 3625 + kvm_pic_clear_isr_ack(vcpu->kvm); 3711 3626 } 3712 3627 3713 3628 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); ··· 3720 3633 3721 3634 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 3722 3635 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 3636 + 3637 + /* Older userspace won't unhalt the vcpu on reset. */ 3638 + if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 && 3639 + sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 3640 + !(vcpu->arch.cr0 & X86_CR0_PE)) 3641 + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 3723 3642 3724 3643 vcpu_put(vcpu); 3725 3644 ··· 4011 3918 return ERR_PTR(-ENOMEM); 4012 3919 4013 3920 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); 3921 + INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); 4014 3922 4015 3923 return kvm; 4016 3924 } ··· 4044 3950 4045 3951 void kvm_arch_destroy_vm(struct kvm *kvm) 4046 3952 { 3953 + kvm_iommu_unmap_guest(kvm); 3954 + kvm_free_all_assigned_devices(kvm); 4047 3955 kvm_free_pit(kvm); 4048 3956 kfree(kvm->arch.vpic); 4049 3957 kfree(kvm->arch.vioapic); ··· 4077 3981 userspace_addr = do_mmap(NULL, 0, 4078 3982 npages * PAGE_SIZE, 4079 3983 PROT_READ | PROT_WRITE, 4080 - MAP_SHARED | MAP_ANONYMOUS, 3984 + MAP_PRIVATE | MAP_ANONYMOUS, 4081 3985 0); 4082 3986 up_write(&current->mm->mmap_sem); 4083 3987
+22
arch/x86/kvm/x86.h
··· 1 + #ifndef ARCH_X86_KVM_X86_H 2 + #define ARCH_X86_KVM_X86_H 3 + 4 + #include <linux/kvm_host.h> 5 + 6 + static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) 7 + { 8 + vcpu->arch.exception.pending = false; 9 + } 10 + 11 + static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector) 12 + { 13 + vcpu->arch.interrupt.pending = true; 14 + vcpu->arch.interrupt.nr = vector; 15 + } 16 + 17 + static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) 18 + { 19 + vcpu->arch.interrupt.pending = false; 20 + } 21 + 22 + #endif
+114 -56
arch/x86/kvm/x86_emulate.c
··· 26 26 #define DPRINTF(_f, _a ...) printf(_f , ## _a) 27 27 #else 28 28 #include <linux/kvm_host.h> 29 + #include "kvm_cache_regs.h" 29 30 #define DPRINTF(x...) do {} while (0) 30 31 #endif 31 32 #include <linux/module.h> ··· 47 46 #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */ 48 47 #define DstReg (2<<1) /* Register operand. */ 49 48 #define DstMem (3<<1) /* Memory operand. */ 50 - #define DstMask (3<<1) 49 + #define DstAcc (4<<1) /* Destination Accumulator */ 50 + #define DstMask (7<<1) 51 51 /* Source operand type. */ 52 - #define SrcNone (0<<3) /* No source operand. */ 53 - #define SrcImplicit (0<<3) /* Source operand is implicit in the opcode. */ 54 - #define SrcReg (1<<3) /* Register operand. */ 55 - #define SrcMem (2<<3) /* Memory operand. */ 56 - #define SrcMem16 (3<<3) /* Memory operand (16-bit). */ 57 - #define SrcMem32 (4<<3) /* Memory operand (32-bit). */ 58 - #define SrcImm (5<<3) /* Immediate operand. */ 59 - #define SrcImmByte (6<<3) /* 8-bit sign-extended immediate operand. */ 60 - #define SrcMask (7<<3) 52 + #define SrcNone (0<<4) /* No source operand. */ 53 + #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */ 54 + #define SrcReg (1<<4) /* Register operand. */ 55 + #define SrcMem (2<<4) /* Memory operand. */ 56 + #define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 57 + #define SrcMem32 (4<<4) /* Memory operand (32-bit). */ 58 + #define SrcImm (5<<4) /* Immediate operand. */ 59 + #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 60 + #define SrcMask (7<<4) 61 61 /* Generic ModRM decode. */ 62 - #define ModRM (1<<6) 62 + #define ModRM (1<<7) 63 63 /* Destination is only written; never read. */ 64 - #define Mov (1<<7) 65 - #define BitOp (1<<8) 66 - #define MemAbs (1<<9) /* Memory operand is absolute displacement */ 67 - #define String (1<<10) /* String instruction (rep capable) */ 68 - #define Stack (1<<11) /* Stack instruction (push/pop) */ 64 + #define Mov (1<<8) 65 + #define BitOp (1<<9) 66 + #define MemAbs (1<<10) /* Memory operand is absolute displacement */ 67 + #define String (1<<12) /* String instruction (rep capable) */ 68 + #define Stack (1<<13) /* Stack instruction (push/pop) */ 69 69 #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 70 70 #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ 71 71 #define GroupMask 0xff /* Group number stored in bits 0:7 */ ··· 96 94 /* 0x20 - 0x27 */ 97 95 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 98 96 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 99 - SrcImmByte, SrcImm, 0, 0, 97 + DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, 100 98 /* 0x28 - 0x2F */ 101 99 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 102 100 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, ··· 108 106 /* 0x38 - 0x3F */ 109 107 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 110 108 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 111 - 0, 0, 0, 0, 109 + ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 110 + 0, 0, 112 111 /* 0x40 - 0x47 */ 113 112 DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, 114 113 /* 0x48 - 0x4F */ ··· 156 153 0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, 157 154 ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, 158 155 ByteOp | ImplicitOps | String, ImplicitOps | String, 159 - /* 0xB0 - 0xBF */ 160 - 0, 0, 0, 0, 0, 0, 0, 0, 161 - DstReg | SrcImm | Mov, 0, 0, 0, 0, 0, 0, 0, 156 + /* 0xB0 - 0xB7 */ 157 + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, 158 + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, 159 + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, 160 + ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, 161 + /* 0xB8 - 0xBF */ 162 + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, 163 + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, 164 + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, 165 + DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, 162 166 /* 0xC0 - 0xC7 */ 163 167 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 164 168 0, ImplicitOps | Stack, 0, 0, ··· 179 169 /* 0xD8 - 0xDF */ 180 170 0, 0, 0, 0, 0, 0, 0, 0, 181 171 /* 0xE0 - 0xE7 */ 182 - 0, 0, 0, 0, 0, 0, 0, 0, 172 + 0, 0, 0, 0, 173 + SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 174 + SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 183 175 /* 0xE8 - 0xEF */ 184 176 ImplicitOps | Stack, SrcImm | ImplicitOps, 185 177 ImplicitOps, SrcImmByte | ImplicitOps, 186 - 0, 0, 0, 0, 178 + SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 179 + SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 187 180 /* 0xF0 - 0xF7 */ 188 181 0, 0, 0, 0, 189 182 ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3, 190 183 /* 0xF8 - 0xFF */ 191 184 ImplicitOps, 0, ImplicitOps, ImplicitOps, 192 - 0, 0, Group | Group4, Group | Group5, 185 + ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, 193 186 }; 194 187 195 188 static u16 twobyte_table[256] = { ··· 281 268 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, 282 269 0, 0, 0, 0, 283 270 [Group3*8] = 284 - DstMem | SrcImm | ModRM | SrcImm, 0, 285 - DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, 271 + DstMem | SrcImm | ModRM, 0, 272 + DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 286 273 0, 0, 0, 0, 287 274 [Group4*8] = 288 275 ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, 289 276 0, 0, 0, 0, 0, 0, 290 277 [Group5*8] = 291 - DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 0, 0, 292 - SrcMem | ModRM, 0, SrcMem | ModRM | Stack, 0, 278 + DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 279 + SrcMem | ModRM | Stack, 0, 280 + SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0, 293 281 [Group7*8] = 294 282 0, 0, ModRM | SrcMem, ModRM | SrcMem, 295 283 SrcNone | ModRM | DstMem | Mov, 0, ··· 853 839 /* Shadow copy of register state. Committed on successful emulation. */ 854 840 855 841 memset(c, 0, sizeof(struct decode_cache)); 856 - c->eip = ctxt->vcpu->arch.rip; 842 + c->eip = kvm_rip_read(ctxt->vcpu); 857 843 ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); 858 844 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); 859 845 ··· 1062 1048 } 1063 1049 c->dst.type = OP_MEM; 1064 1050 break; 1051 + case DstAcc: 1052 + c->dst.type = OP_REG; 1053 + c->dst.bytes = c->op_bytes; 1054 + c->dst.ptr = &c->regs[VCPU_REGS_RAX]; 1055 + switch (c->op_bytes) { 1056 + case 1: 1057 + c->dst.val = *(u8 *)c->dst.ptr; 1058 + break; 1059 + case 2: 1060 + c->dst.val = *(u16 *)c->dst.ptr; 1061 + break; 1062 + case 4: 1063 + c->dst.val = *(u32 *)c->dst.ptr; 1064 + break; 1065 + } 1066 + c->dst.orig_val = c->dst.val; 1067 + break; 1065 1068 } 1066 1069 1067 1070 if (c->rip_relative) ··· 1182 1151 case 1: /* dec */ 1183 1152 emulate_1op("dec", c->dst, ctxt->eflags); 1184 1153 break; 1154 + case 2: /* call near abs */ { 1155 + long int old_eip; 1156 + old_eip = c->eip; 1157 + c->eip = c->src.val; 1158 + c->src.val = old_eip; 1159 + emulate_push(ctxt); 1160 + break; 1161 + } 1185 1162 case 4: /* jmp abs */ 1186 1163 c->eip = c->src.val; 1187 1164 break; ··· 1290 1251 u64 msr_data; 1291 1252 unsigned long saved_eip = 0; 1292 1253 struct decode_cache *c = &ctxt->decode; 1254 + unsigned int port; 1255 + int io_dir_in; 1293 1256 int rc = 0; 1294 1257 1295 1258 /* Shadow copy of register state. Committed on successful emulation. ··· 1308 1267 if (c->rep_prefix && (c->d & String)) { 1309 1268 /* All REP prefixes have the same first termination condition */ 1310 1269 if (c->regs[VCPU_REGS_RCX] == 0) { 1311 - ctxt->vcpu->arch.rip = c->eip; 1270 + kvm_rip_write(ctxt->vcpu, c->eip); 1312 1271 goto done; 1313 1272 } 1314 1273 /* The second termination condition only applies for REPE ··· 1322 1281 (c->b == 0xae) || (c->b == 0xaf)) { 1323 1282 if ((c->rep_prefix == REPE_PREFIX) && 1324 1283 ((ctxt->eflags & EFLG_ZF) == 0)) { 1325 - ctxt->vcpu->arch.rip = c->eip; 1284 + kvm_rip_write(ctxt->vcpu, c->eip); 1326 1285 goto done; 1327 1286 } 1328 1287 if ((c->rep_prefix == REPNE_PREFIX) && 1329 1288 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) { 1330 - ctxt->vcpu->arch.rip = c->eip; 1289 + kvm_rip_write(ctxt->vcpu, c->eip); 1331 1290 goto done; 1332 1291 } 1333 1292 } 1334 1293 c->regs[VCPU_REGS_RCX]--; 1335 - c->eip = ctxt->vcpu->arch.rip; 1294 + c->eip = kvm_rip_read(ctxt->vcpu); 1336 1295 } 1337 1296 1338 1297 if (c->src.type == OP_MEM) { ··· 1392 1351 sbb: /* sbb */ 1393 1352 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); 1394 1353 break; 1395 - case 0x20 ... 0x23: 1354 + case 0x20 ... 0x25: 1396 1355 and: /* and */ 1397 1356 emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); 1398 1357 break; 1399 - case 0x24: /* and al imm8 */ 1400 - c->dst.type = OP_REG; 1401 - c->dst.ptr = &c->regs[VCPU_REGS_RAX]; 1402 - c->dst.val = *(u8 *)c->dst.ptr; 1403 - c->dst.bytes = 1; 1404 - c->dst.orig_val = c->dst.val; 1405 - goto and; 1406 - case 0x25: /* and ax imm16, or eax imm32 */ 1407 - c->dst.type = OP_REG; 1408 - c->dst.bytes = c->op_bytes; 1409 - c->dst.ptr = &c->regs[VCPU_REGS_RAX]; 1410 - if (c->op_bytes == 2) 1411 - c->dst.val = *(u16 *)c->dst.ptr; 1412 - else 1413 - c->dst.val = *(u32 *)c->dst.ptr; 1414 - c->dst.orig_val = c->dst.val; 1415 - goto and; 1416 1358 case 0x28 ... 0x2d: 1417 1359 sub: /* sub */ 1418 1360 emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags); ··· 1683 1659 case 0xae ... 0xaf: /* scas */ 1684 1660 DPRINTF("Urk! I don't handle SCAS.\n"); 1685 1661 goto cannot_emulate; 1686 - case 0xb8: /* mov r, imm */ 1662 + case 0xb0 ... 0xbf: /* mov r, imm */ 1687 1663 goto mov; 1688 1664 case 0xc0 ... 0xc1: 1689 1665 emulate_grp2(ctxt); ··· 1703 1679 c->src.val = c->regs[VCPU_REGS_RCX]; 1704 1680 emulate_grp2(ctxt); 1705 1681 break; 1682 + case 0xe4: /* inb */ 1683 + case 0xe5: /* in */ 1684 + port = insn_fetch(u8, 1, c->eip); 1685 + io_dir_in = 1; 1686 + goto do_io; 1687 + case 0xe6: /* outb */ 1688 + case 0xe7: /* out */ 1689 + port = insn_fetch(u8, 1, c->eip); 1690 + io_dir_in = 0; 1691 + goto do_io; 1706 1692 case 0xe8: /* call (near) */ { 1707 1693 long int rel; 1708 1694 switch (c->op_bytes) { ··· 1763 1729 jmp_rel(c, c->src.val); 1764 1730 c->dst.type = OP_NONE; /* Disable writeback. */ 1765 1731 break; 1732 + case 0xec: /* in al,dx */ 1733 + case 0xed: /* in (e/r)ax,dx */ 1734 + port = c->regs[VCPU_REGS_RDX]; 1735 + io_dir_in = 1; 1736 + goto do_io; 1737 + case 0xee: /* out al,dx */ 1738 + case 0xef: /* out (e/r)ax,dx */ 1739 + port = c->regs[VCPU_REGS_RDX]; 1740 + io_dir_in = 0; 1741 + do_io: if (kvm_emulate_pio(ctxt->vcpu, NULL, io_dir_in, 1742 + (c->d & ByteOp) ? 1 : c->op_bytes, 1743 + port) != 0) { 1744 + c->eip = saved_eip; 1745 + goto cannot_emulate; 1746 + } 1747 + return 0; 1766 1748 case 0xf4: /* hlt */ 1767 1749 ctxt->vcpu->arch.halt_request = 1; 1768 1750 break; ··· 1804 1754 ctxt->eflags |= X86_EFLAGS_IF; 1805 1755 c->dst.type = OP_NONE; /* Disable writeback. */ 1806 1756 break; 1757 + case 0xfc: /* cld */ 1758 + ctxt->eflags &= ~EFLG_DF; 1759 + c->dst.type = OP_NONE; /* Disable writeback. */ 1760 + break; 1761 + case 0xfd: /* std */ 1762 + ctxt->eflags |= EFLG_DF; 1763 + c->dst.type = OP_NONE; /* Disable writeback. */ 1764 + break; 1807 1765 case 0xfe ... 0xff: /* Grp4/Grp5 */ 1808 1766 rc = emulate_grp45(ctxt, ops); 1809 1767 if (rc != 0) ··· 1826 1768 1827 1769 /* Commit shadow register state. */ 1828 1770 memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); 1829 - ctxt->vcpu->arch.rip = c->eip; 1771 + kvm_rip_write(ctxt->vcpu, c->eip); 1830 1772 1831 1773 done: 1832 1774 if (rc == X86EMUL_UNHANDLEABLE) { ··· 1851 1793 goto done; 1852 1794 1853 1795 /* Let the processor re-execute the fixed hypercall */ 1854 - c->eip = ctxt->vcpu->arch.rip; 1796 + c->eip = kvm_rip_read(ctxt->vcpu); 1855 1797 /* Disable writeback. */ 1856 1798 c->dst.type = OP_NONE; 1857 1799 break; ··· 1947 1889 rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); 1948 1890 if (rc) { 1949 1891 kvm_inject_gp(ctxt->vcpu, 0); 1950 - c->eip = ctxt->vcpu->arch.rip; 1892 + c->eip = kvm_rip_read(ctxt->vcpu); 1951 1893 } 1952 1894 rc = X86EMUL_CONTINUE; 1953 1895 c->dst.type = OP_NONE; ··· 1957 1899 rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); 1958 1900 if (rc) { 1959 1901 kvm_inject_gp(ctxt->vcpu, 0); 1960 - c->eip = ctxt->vcpu->arch.rip; 1902 + c->eip = kvm_rip_read(ctxt->vcpu); 1961 1903 } else { 1962 1904 c->regs[VCPU_REGS_RAX] = (u32)msr_data; 1963 1905 c->regs[VCPU_REGS_RDX] = msr_data >> 32;
+2 -9
arch/x86/xen/time.c
··· 198 198 /* Get the TSC speed from Xen */ 199 199 unsigned long xen_tsc_khz(void) 200 200 { 201 - u64 xen_khz = 1000000ULL << 32; 202 - const struct pvclock_vcpu_time_info *info = 201 + struct pvclock_vcpu_time_info *info = 203 202 &HYPERVISOR_shared_info->vcpu_info[0].time; 204 203 205 - do_div(xen_khz, info->tsc_to_system_mul); 206 - if (info->tsc_shift < 0) 207 - xen_khz <<= -info->tsc_shift; 208 - else 209 - xen_khz >>= info->tsc_shift; 210 - 211 - return xen_khz; 204 + return pvclock_tsc_khz(info); 212 205 } 213 206 214 207 cycle_t xen_clocksource_read(void)
drivers/pci/dma_remapping.h include/linux/dma_remapping.h
+2 -2
drivers/pci/dmar.c
··· 28 28 29 29 #include <linux/pci.h> 30 30 #include <linux/dmar.h> 31 + #include <linux/iova.h> 32 + #include <linux/intel-iommu.h> 31 33 #include <linux/timer.h> 32 - #include "iova.h" 33 - #include "intel-iommu.h" 34 34 35 35 #undef PREFIX 36 36 #define PREFIX "DMAR:"
+112 -4
drivers/pci/intel-iommu.c
··· 33 33 #include <linux/dma-mapping.h> 34 34 #include <linux/mempool.h> 35 35 #include <linux/timer.h> 36 - #include "iova.h" 37 - #include "intel-iommu.h" 36 + #include <linux/iova.h> 37 + #include <linux/intel-iommu.h> 38 38 #include <asm/proto.h> /* force_iommu in this header in x86-64*/ 39 39 #include <asm/cacheflush.h> 40 40 #include <asm/iommu.h> ··· 156 156 return iommu_kmem_cache_alloc(iommu_domain_cache); 157 157 } 158 158 159 - static inline void free_domain_mem(void *vaddr) 159 + static void free_domain_mem(void *vaddr) 160 160 { 161 161 kmem_cache_free(iommu_domain_cache, vaddr); 162 162 } ··· 1341 1341 * find_domain 1342 1342 * Note: we use struct pci_dev->dev.archdata.iommu stores the info 1343 1343 */ 1344 - struct dmar_domain * 1344 + static struct dmar_domain * 1345 1345 find_domain(struct pci_dev *pdev) 1346 1346 { 1347 1347 struct device_domain_info *info; ··· 2318 2318 return 0; 2319 2319 } 2320 2320 2321 + void intel_iommu_domain_exit(struct dmar_domain *domain) 2322 + { 2323 + u64 end; 2324 + 2325 + /* Domain 0 is reserved, so dont process it */ 2326 + if (!domain) 2327 + return; 2328 + 2329 + end = DOMAIN_MAX_ADDR(domain->gaw); 2330 + end = end & (~PAGE_MASK_4K); 2331 + 2332 + /* clear ptes */ 2333 + dma_pte_clear_range(domain, 0, end); 2334 + 2335 + /* free page tables */ 2336 + dma_pte_free_pagetable(domain, 0, end); 2337 + 2338 + iommu_free_domain(domain); 2339 + free_domain_mem(domain); 2340 + } 2341 + EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); 2342 + 2343 + struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) 2344 + { 2345 + struct dmar_drhd_unit *drhd; 2346 + struct dmar_domain *domain; 2347 + struct intel_iommu *iommu; 2348 + 2349 + drhd = dmar_find_matched_drhd_unit(pdev); 2350 + if (!drhd) { 2351 + printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); 2352 + return NULL; 2353 + } 2354 + 2355 + iommu = drhd->iommu; 2356 + if (!iommu) { 2357 + printk(KERN_ERR 2358 + "intel_iommu_domain_alloc: iommu == NULL\n"); 2359 + return NULL; 2360 + } 2361 + domain = iommu_alloc_domain(iommu); 2362 + if (!domain) { 2363 + printk(KERN_ERR 2364 + "intel_iommu_domain_alloc: domain == NULL\n"); 2365 + return NULL; 2366 + } 2367 + if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 2368 + printk(KERN_ERR 2369 + "intel_iommu_domain_alloc: domain_init() failed\n"); 2370 + intel_iommu_domain_exit(domain); 2371 + return NULL; 2372 + } 2373 + return domain; 2374 + } 2375 + EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); 2376 + 2377 + int intel_iommu_context_mapping( 2378 + struct dmar_domain *domain, struct pci_dev *pdev) 2379 + { 2380 + int rc; 2381 + rc = domain_context_mapping(domain, pdev); 2382 + return rc; 2383 + } 2384 + EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); 2385 + 2386 + int intel_iommu_page_mapping( 2387 + struct dmar_domain *domain, dma_addr_t iova, 2388 + u64 hpa, size_t size, int prot) 2389 + { 2390 + int rc; 2391 + rc = domain_page_mapping(domain, iova, hpa, size, prot); 2392 + return rc; 2393 + } 2394 + EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); 2395 + 2396 + void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 2397 + { 2398 + detach_domain_for_dev(domain, bus, devfn); 2399 + } 2400 + EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); 2401 + 2402 + struct dmar_domain * 2403 + intel_iommu_find_domain(struct pci_dev *pdev) 2404 + { 2405 + return find_domain(pdev); 2406 + } 2407 + EXPORT_SYMBOL_GPL(intel_iommu_find_domain); 2408 + 2409 + int intel_iommu_found(void) 2410 + { 2411 + return g_num_of_iommus; 2412 + } 2413 + EXPORT_SYMBOL_GPL(intel_iommu_found); 2414 + 2415 + u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) 2416 + { 2417 + struct dma_pte *pte; 2418 + u64 pfn; 2419 + 2420 + pfn = 0; 2421 + pte = addr_to_dma_pte(domain, iova); 2422 + 2423 + if (pte) 2424 + pfn = dma_pte_addr(*pte); 2425 + 2426 + return pfn >> PAGE_SHIFT_4K; 2427 + } 2428 + EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
+22 -2
drivers/pci/intel-iommu.h include/linux/intel-iommu.h
··· 25 25 #include <linux/types.h> 26 26 #include <linux/msi.h> 27 27 #include <linux/sysdev.h> 28 - #include "iova.h" 28 + #include <linux/iova.h> 29 29 #include <linux/io.h> 30 + #include <linux/dma_remapping.h> 30 31 #include <asm/cacheflush.h> 31 - #include "dma_remapping.h" 32 32 33 33 /* 34 34 * Intel IOMMU register specification per version 1.0 public spec. ··· 304 304 extern void qi_global_iec(struct intel_iommu *iommu); 305 305 306 306 extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 307 + 308 + void intel_iommu_domain_exit(struct dmar_domain *domain); 309 + struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev); 310 + int intel_iommu_context_mapping(struct dmar_domain *domain, 311 + struct pci_dev *pdev); 312 + int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova, 313 + u64 hpa, size_t size, int prot); 314 + void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn); 315 + struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev); 316 + u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova); 317 + 318 + #ifdef CONFIG_DMAR 319 + int intel_iommu_found(void); 320 + #else /* CONFIG_DMAR */ 321 + static inline int intel_iommu_found(void) 322 + { 323 + return 0; 324 + } 325 + #endif /* CONFIG_DMAR */ 326 + 307 327 #endif
+1 -1
drivers/pci/intr_remapping.c
··· 4 4 #include <linux/pci.h> 5 5 #include <linux/irq.h> 6 6 #include <asm/io_apic.h> 7 - #include "intel-iommu.h" 7 + #include <linux/intel-iommu.h> 8 8 #include "intr_remapping.h" 9 9 10 10 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
+1 -1
drivers/pci/intr_remapping.h
··· 1 - #include "intel-iommu.h" 1 + #include <linux/intel-iommu.h> 2 2 3 3 struct ioapic_scope { 4 4 struct intel_iommu *iommu;
+1 -1
drivers/pci/iova.c
··· 7 7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 8 */ 9 9 10 - #include "iova.h" 10 + #include <linux/iova.h> 11 11 12 12 void 13 13 init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
drivers/pci/iova.h include/linux/iova.h
-22
include/asm-x86/kvm.h
··· 208 208 struct kvm_pit_state { 209 209 struct kvm_pit_channel_state channels[3]; 210 210 }; 211 - 212 - #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) 213 - #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) 214 - #define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) 215 - #define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) 216 - #define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) 217 - #define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) 218 - #define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) 219 - #define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) 220 - #define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) 221 - #define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) 222 - #define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) 223 - #define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) 224 - #define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) 225 - #define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) 226 - #define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) 227 - #define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) 228 - #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) 229 - #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) 230 - #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 231 - #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) 232 - 233 211 #endif /* ASM_X86__KVM_H */
+48 -34
include/asm-x86/kvm_host.h
··· 57 57 #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) 58 58 59 59 #define DE_VECTOR 0 60 + #define DB_VECTOR 1 61 + #define BP_VECTOR 3 62 + #define OF_VECTOR 4 63 + #define BR_VECTOR 5 60 64 #define UD_VECTOR 6 61 65 #define NM_VECTOR 7 62 66 #define DF_VECTOR 8 ··· 69 65 #define SS_VECTOR 12 70 66 #define GP_VECTOR 13 71 67 #define PF_VECTOR 14 68 + #define MF_VECTOR 16 72 69 #define MC_VECTOR 18 73 70 74 71 #define SELECTOR_TI_MASK (1 << 2) ··· 94 89 struct kvm_vcpu; 95 90 struct kvm; 96 91 97 - enum { 92 + enum kvm_reg { 98 93 VCPU_REGS_RAX = 0, 99 94 VCPU_REGS_RCX = 1, 100 95 VCPU_REGS_RDX = 2, ··· 113 108 VCPU_REGS_R14 = 14, 114 109 VCPU_REGS_R15 = 15, 115 110 #endif 111 + VCPU_REGS_RIP, 116 112 NR_VCPU_REGS 117 113 }; 118 114 ··· 195 189 */ 196 190 int multimapped; /* More than one parent_pte? */ 197 191 int root_count; /* Currently serving as active root */ 192 + bool unsync; 193 + bool unsync_children; 198 194 union { 199 195 u64 *parent_pte; /* !multimapped */ 200 196 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ 201 197 }; 198 + DECLARE_BITMAP(unsync_child_bitmap, 512); 199 + }; 200 + 201 + struct kvm_pv_mmu_op_buffer { 202 + void *ptr; 203 + unsigned len; 204 + unsigned processed; 205 + char buf[512] __aligned(sizeof(long)); 202 206 }; 203 207 204 208 /* ··· 223 207 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 224 208 void (*prefetch_page)(struct kvm_vcpu *vcpu, 225 209 struct kvm_mmu_page *page); 210 + int (*sync_page)(struct kvm_vcpu *vcpu, 211 + struct kvm_mmu_page *sp); 212 + void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 226 213 hpa_t root_hpa; 227 214 int root_level; 228 215 int shadow_root_level; ··· 238 219 int interrupt_window_open; 239 220 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 240 221 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); 241 - unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ 242 - unsigned long rip; /* needs vcpu_load_rsp_rip() */ 222 + /* 223 + * rip and regs accesses must go through 224 + * kvm_{register,rip}_{read,write} functions. 225 + */ 226 + unsigned long regs[NR_VCPU_REGS]; 227 + u32 regs_avail; 228 + u32 regs_dirty; 243 229 244 230 unsigned long cr0; 245 231 unsigned long cr2; ··· 261 237 bool tpr_access_reporting; 262 238 263 239 struct kvm_mmu mmu; 240 + /* only needed in kvm_pv_mmu_op() path, but it's hot so 241 + * put it here to avoid allocation */ 242 + struct kvm_pv_mmu_op_buffer mmu_op_buffer; 264 243 265 244 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 266 245 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; ··· 296 269 u32 error_code; 297 270 } exception; 298 271 272 + struct kvm_queued_interrupt { 273 + bool pending; 274 + u8 nr; 275 + } interrupt; 276 + 299 277 struct { 300 278 int active; 301 279 u8 save_iopl; ··· 326 294 struct page *time_page; 327 295 328 296 bool nmi_pending; 297 + bool nmi_injected; 329 298 330 299 u64 mtrr[0x100]; 331 300 }; ··· 349 316 * Hash table of struct kvm_mmu_page. 350 317 */ 351 318 struct list_head active_mmu_pages; 319 + struct list_head assigned_dev_head; 320 + struct dmar_domain *intel_iommu_domain; 352 321 struct kvm_pic *vpic; 353 322 struct kvm_ioapic *vioapic; 354 323 struct kvm_pit *vpit; 324 + struct hlist_head irq_ack_notifier_list; 355 325 356 326 int round_robin_prev_vcpu; 357 327 unsigned int tss_addr; ··· 374 338 u32 mmu_flooded; 375 339 u32 mmu_recycled; 376 340 u32 mmu_cache_miss; 341 + u32 mmu_unsync; 377 342 u32 remote_tlb_flush; 378 343 u32 lpages; 379 344 }; ··· 401 364 u32 insn_emulation; 402 365 u32 insn_emulation_fail; 403 366 u32 hypercalls; 367 + u32 irq_injections; 404 368 }; 405 369 406 370 struct descriptor_table { ··· 452 414 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); 453 415 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, 454 416 int *exception); 455 - void (*cache_regs)(struct kvm_vcpu *vcpu); 456 - void (*decache_regs)(struct kvm_vcpu *vcpu); 417 + void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 457 418 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 458 419 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 459 420 ··· 565 528 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 566 529 u32 error_code); 567 530 531 + void kvm_pic_set_irq(void *opaque, int irq, int level); 532 + 568 533 void kvm_inject_nmi(struct kvm_vcpu *vcpu); 569 534 570 535 void fx_init(struct kvm_vcpu *vcpu); ··· 589 550 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 590 551 int kvm_mmu_load(struct kvm_vcpu *vcpu); 591 552 void kvm_mmu_unload(struct kvm_vcpu *vcpu); 553 + void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 592 554 593 555 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 594 556 595 557 int kvm_fix_hypercall(struct kvm_vcpu *vcpu); 596 558 597 559 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 560 + void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); 598 561 599 562 void kvm_enable_tdp(void); 600 563 void kvm_disable_tdp(void); ··· 727 686 TASK_SWITCH_GATE = 3, 728 687 }; 729 688 730 - #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ 731 - trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 732 - vcpu, 5, d1, d2, d3, d4, d5) 733 - #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ 734 - trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 735 - vcpu, 4, d1, d2, d3, d4, 0) 736 - #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ 737 - trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 738 - vcpu, 3, d1, d2, d3, 0, 0) 739 - #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ 740 - trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 741 - vcpu, 2, d1, d2, 0, 0, 0) 742 - #define KVMTRACE_1D(evt, vcpu, d1, name) \ 743 - trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 744 - vcpu, 1, d1, 0, 0, 0, 0) 745 - #define KVMTRACE_0D(evt, vcpu, name) \ 746 - trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 747 - vcpu, 0, 0, 0, 0, 0, 0) 748 - 749 - #ifdef CONFIG_64BIT 750 - # define KVM_EX_ENTRY ".quad" 751 - # define KVM_EX_PUSH "pushq" 752 - #else 753 - # define KVM_EX_ENTRY ".long" 754 - # define KVM_EX_PUSH "pushl" 755 - #endif 756 - 757 689 /* 758 690 * Hardware virtualization extension instructions may fault if a 759 691 * reboot turns off virtualization while processes are running. ··· 738 724 "666: " insn "\n\t" \ 739 725 ".pushsection .fixup, \"ax\" \n" \ 740 726 "667: \n\t" \ 741 - KVM_EX_PUSH " $666b \n\t" \ 727 + __ASM_SIZE(push) " $666b \n\t" \ 742 728 "jmp kvm_handle_fault_on_reboot \n\t" \ 743 729 ".popsection \n\t" \ 744 730 ".pushsection __ex_table, \"a\" \n\t" \ 745 - KVM_EX_ENTRY " 666b, 667b \n\t" \ 731 + _ASM_PTR " 666b, 667b \n\t" \ 746 732 ".popsection" 747 733 748 734 #define KVM_ARCH_WANT_MMU_NOTIFIER
+3
include/asm-x86/msr-index.h
··· 178 178 #define MSR_IA32_EBL_CR_POWERON 0x0000002a 179 179 #define MSR_IA32_FEATURE_CONTROL 0x0000003a 180 180 181 + #define FEATURE_CONTROL_LOCKED (1<<0) 182 + #define FEATURE_CONTROL_VMXON_ENABLED (1<<2) 183 + 181 184 #define MSR_IA32_APICBASE 0x0000001b 182 185 #define MSR_IA32_APICBASE_BSP (1<<8) 183 186 #define MSR_IA32_APICBASE_ENABLE (1<<11)
+1
include/asm-x86/pvclock.h
··· 6 6 7 7 /* some helper functions for xen and kvm pv clock sources */ 8 8 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); 9 + unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); 9 10 void pvclock_read_wallclock(struct pvclock_wall_clock *wall, 10 11 struct pvclock_vcpu_time_info *vcpu, 11 12 struct timespec *ts);
+66 -6
include/linux/kvm.h
··· 311 311 312 312 /* This structure represents a single trace buffer record. */ 313 313 struct kvm_trace_rec { 314 - __u32 event:28; 315 - __u32 extra_u32:3; 316 - __u32 cycle_in:1; 314 + /* variable rec_val 315 + * is split into: 316 + * bits 0 - 27 -> event id 317 + * bits 28 -30 -> number of extra data args of size u32 318 + * bits 31 -> binary indicator for if tsc is in record 319 + */ 320 + __u32 rec_val; 317 321 __u32 pid; 318 322 __u32 vcpu_id; 319 323 union { 320 324 struct { 321 - __u64 cycle_u64; 325 + __u64 timestamp; 322 326 __u32 extra_u32[KVM_TRC_EXTRA_MAX]; 323 - } __attribute__((packed)) cycle; 327 + } __attribute__((packed)) timestamp; 324 328 struct { 325 329 __u32 extra_u32[KVM_TRC_EXTRA_MAX]; 326 - } nocycle; 330 + } notimestamp; 327 331 } u; 328 332 }; 333 + 334 + #define TRACE_REC_EVENT_ID(val) \ 335 + (0x0fffffff & (val)) 336 + #define TRACE_REC_NUM_DATA_ARGS(val) \ 337 + (0x70000000 & ((val) << 28)) 338 + #define TRACE_REC_TCS(val) \ 339 + (0x80000000 & ((val) << 31)) 329 340 330 341 #define KVMIO 0xAE 331 342 ··· 383 372 #define KVM_CAP_MP_STATE 14 384 373 #define KVM_CAP_COALESCED_MMIO 15 385 374 #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ 375 + #if defined(CONFIG_X86)||defined(CONFIG_IA64) 376 + #define KVM_CAP_DEVICE_ASSIGNMENT 17 377 + #endif 378 + #define KVM_CAP_IOMMU 18 386 379 387 380 /* 388 381 * ioctls for VM fds ··· 416 401 _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) 417 402 #define KVM_UNREGISTER_COALESCED_MMIO \ 418 403 _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) 404 + #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ 405 + struct kvm_assigned_pci_dev) 406 + #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ 407 + struct kvm_assigned_irq) 419 408 420 409 /* 421 410 * ioctls for vcpu fds ··· 458 439 #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) 459 440 #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) 460 441 #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) 442 + 443 + #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) 444 + #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) 445 + #define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) 446 + #define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) 447 + #define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) 448 + #define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) 449 + #define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) 450 + #define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) 451 + #define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) 452 + #define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) 453 + #define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) 454 + #define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) 455 + #define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) 456 + #define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) 457 + #define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) 458 + #define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) 459 + #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) 460 + #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) 461 + #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) 462 + #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) 463 + #define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) 464 + #define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) 465 + #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) 466 + #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) 467 + 468 + struct kvm_assigned_pci_dev { 469 + __u32 assigned_dev_id; 470 + __u32 busnr; 471 + __u32 devfn; 472 + __u32 flags; 473 + }; 474 + 475 + struct kvm_assigned_irq { 476 + __u32 assigned_dev_id; 477 + __u32 host_irq; 478 + __u32 guest_irq; 479 + __u32 flags; 480 + }; 481 + 482 + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 461 483 462 484 #endif
+82
include/linux/kvm_host.h
··· 34 34 #define KVM_REQ_MMU_RELOAD 3 35 35 #define KVM_REQ_TRIPLE_FAULT 4 36 36 #define KVM_REQ_PENDING_TIMER 5 37 + #define KVM_REQ_UNHALT 6 38 + #define KVM_REQ_MMU_SYNC 7 37 39 38 40 struct kvm_vcpu; 39 41 extern struct kmem_cache *kvm_vcpu_cache; ··· 281 279 282 280 struct kvm *kvm_arch_create_vm(void); 283 281 void kvm_arch_destroy_vm(struct kvm *kvm); 282 + void kvm_free_all_assigned_devices(struct kvm *kvm); 284 283 285 284 int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 286 285 int kvm_cpu_has_interrupt(struct kvm_vcpu *v); 287 286 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 288 287 void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 288 + 289 + int kvm_is_mmio_pfn(pfn_t pfn); 290 + 291 + struct kvm_irq_ack_notifier { 292 + struct hlist_node link; 293 + unsigned gsi; 294 + void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 295 + }; 296 + 297 + struct kvm_assigned_dev_kernel { 298 + struct kvm_irq_ack_notifier ack_notifier; 299 + struct work_struct interrupt_work; 300 + struct list_head list; 301 + int assigned_dev_id; 302 + int host_busnr; 303 + int host_devfn; 304 + int host_irq; 305 + int guest_irq; 306 + int irq_requested; 307 + struct pci_dev *dev; 308 + struct kvm *kvm; 309 + }; 310 + void kvm_set_irq(struct kvm *kvm, int irq, int level); 311 + void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); 312 + void kvm_register_irq_ack_notifier(struct kvm *kvm, 313 + struct kvm_irq_ack_notifier *kian); 314 + void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 315 + struct kvm_irq_ack_notifier *kian); 316 + 317 + #ifdef CONFIG_DMAR 318 + int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, 319 + unsigned long npages); 320 + int kvm_iommu_map_guest(struct kvm *kvm, 321 + struct kvm_assigned_dev_kernel *assigned_dev); 322 + int kvm_iommu_unmap_guest(struct kvm *kvm); 323 + #else /* CONFIG_DMAR */ 324 + static inline int kvm_iommu_map_pages(struct kvm *kvm, 325 + gfn_t base_gfn, 326 + unsigned long npages) 327 + { 328 + return 0; 329 + } 330 + 331 + static inline int kvm_iommu_map_guest(struct kvm *kvm, 332 + struct kvm_assigned_dev_kernel 333 + *assigned_dev) 334 + { 335 + return -ENODEV; 336 + } 337 + 338 + static inline int kvm_iommu_unmap_guest(struct kvm *kvm) 339 + { 340 + return 0; 341 + } 342 + #endif /* CONFIG_DMAR */ 289 343 290 344 static inline void kvm_guest_enter(void) 291 345 { ··· 365 307 return (gpa_t)gfn << PAGE_SHIFT; 366 308 } 367 309 310 + static inline hpa_t pfn_to_hpa(pfn_t pfn) 311 + { 312 + return (hpa_t)pfn << PAGE_SHIFT; 313 + } 314 + 368 315 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 369 316 { 370 317 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); ··· 388 325 }; 389 326 extern struct kvm_stats_debugfs_item debugfs_entries[]; 390 327 extern struct dentry *kvm_debugfs_dir; 328 + 329 + #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ 330 + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 331 + vcpu, 5, d1, d2, d3, d4, d5) 332 + #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ 333 + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 334 + vcpu, 4, d1, d2, d3, d4, 0) 335 + #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ 336 + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 337 + vcpu, 3, d1, d2, d3, 0, 0) 338 + #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ 339 + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 340 + vcpu, 2, d1, d2, 0, 0, 0) 341 + #define KVMTRACE_1D(evt, vcpu, d1, name) \ 342 + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 343 + vcpu, 1, d1, 0, 0, 0, 0) 344 + #define KVMTRACE_0D(evt, vcpu, name) \ 345 + trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ 346 + vcpu, 0, 0, 0, 0, 0, 0) 391 347 392 348 #ifdef CONFIG_KVM_TRACE 393 349 int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
+14 -8
virt/kvm/ioapic.c
··· 39 39 40 40 #include "ioapic.h" 41 41 #include "lapic.h" 42 + #include "irq.h" 42 43 43 44 #if 0 44 45 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) ··· 286 285 } 287 286 } 288 287 289 - static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi) 288 + static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int gsi, 289 + int trigger_mode) 290 290 { 291 291 union ioapic_redir_entry *ent; 292 292 293 293 ent = &ioapic->redirtbl[gsi]; 294 - ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 295 294 296 - ent->fields.remote_irr = 0; 297 - if (!ent->fields.mask && (ioapic->irr & (1 << gsi))) 298 - ioapic_service(ioapic, gsi); 295 + kvm_notify_acked_irq(ioapic->kvm, gsi); 296 + 297 + if (trigger_mode == IOAPIC_LEVEL_TRIG) { 298 + ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 299 + ent->fields.remote_irr = 0; 300 + if (!ent->fields.mask && (ioapic->irr & (1 << gsi))) 301 + ioapic_service(ioapic, gsi); 302 + } 299 303 } 300 304 301 - void kvm_ioapic_update_eoi(struct kvm *kvm, int vector) 305 + void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) 302 306 { 303 307 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 304 308 int i; 305 309 306 310 for (i = 0; i < IOAPIC_NUM_PINS; i++) 307 311 if (ioapic->redirtbl[i].fields.vector == vector) 308 - __kvm_ioapic_update_eoi(ioapic, i); 312 + __kvm_ioapic_update_eoi(ioapic, i, trigger_mode); 309 313 } 310 314 311 315 static int ioapic_in_range(struct kvm_io_device *this, gpa_t addr, ··· 386 380 break; 387 381 #ifdef CONFIG_IA64 388 382 case IOAPIC_REG_EOI: 389 - kvm_ioapic_update_eoi(ioapic->kvm, data); 383 + kvm_ioapic_update_eoi(ioapic->kvm, data, IOAPIC_LEVEL_TRIG); 390 384 break; 391 385 #endif 392 386
+2 -8
virt/kvm/ioapic.h
··· 58 58 } redirtbl[IOAPIC_NUM_PINS]; 59 59 struct kvm_io_device dev; 60 60 struct kvm *kvm; 61 + void (*ack_notifier)(void *opaque, int irq); 61 62 }; 62 63 63 64 #ifdef DEBUG ··· 79 78 return kvm->arch.vioapic; 80 79 } 81 80 82 - #ifdef CONFIG_IA64 83 - static inline int irqchip_in_kernel(struct kvm *kvm) 84 - { 85 - return 1; 86 - } 87 - #endif 88 - 89 81 struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector, 90 82 unsigned long bitmap); 91 - void kvm_ioapic_update_eoi(struct kvm *kvm, int vector); 83 + void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); 92 84 int kvm_ioapic_init(struct kvm *kvm); 93 85 void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 94 86 void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
+60
virt/kvm/irq_comm.c
··· 1 + /* 2 + * irq_comm.c: Common API for in kernel interrupt controller 3 + * Copyright (c) 2007, Intel Corporation. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 16 + * Place - Suite 330, Boston, MA 02111-1307 USA. 17 + * Authors: 18 + * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 19 + * 20 + */ 21 + 22 + #include <linux/kvm_host.h> 23 + #include "irq.h" 24 + 25 + #include "ioapic.h" 26 + 27 + /* This should be called with the kvm->lock mutex held */ 28 + void kvm_set_irq(struct kvm *kvm, int irq, int level) 29 + { 30 + /* Not possible to detect if the guest uses the PIC or the 31 + * IOAPIC. So set the bit in both. The guest will ignore 32 + * writes to the unused one. 33 + */ 34 + kvm_ioapic_set_irq(kvm->arch.vioapic, irq, level); 35 + #ifdef CONFIG_X86 36 + kvm_pic_set_irq(pic_irqchip(kvm), irq, level); 37 + #endif 38 + } 39 + 40 + void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi) 41 + { 42 + struct kvm_irq_ack_notifier *kian; 43 + struct hlist_node *n; 44 + 45 + hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) 46 + if (kian->gsi == gsi) 47 + kian->irq_acked(kian); 48 + } 49 + 50 + void kvm_register_irq_ack_notifier(struct kvm *kvm, 51 + struct kvm_irq_ack_notifier *kian) 52 + { 53 + hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); 54 + } 55 + 56 + void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 57 + struct kvm_irq_ack_notifier *kian) 58 + { 59 + hlist_del(&kian->link); 60 + }
+339 -47
virt/kvm/kvm_main.c
··· 51 51 #include "coalesced_mmio.h" 52 52 #endif 53 53 54 + #ifdef KVM_CAP_DEVICE_ASSIGNMENT 55 + #include <linux/pci.h> 56 + #include <linux/interrupt.h> 57 + #include "irq.h" 58 + #endif 59 + 54 60 MODULE_AUTHOR("Qumranet"); 55 61 MODULE_LICENSE("GPL"); 56 62 ··· 77 71 78 72 bool kvm_rebooting; 79 73 74 + #ifdef KVM_CAP_DEVICE_ASSIGNMENT 75 + static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, 76 + int assigned_dev_id) 77 + { 78 + struct list_head *ptr; 79 + struct kvm_assigned_dev_kernel *match; 80 + 81 + list_for_each(ptr, head) { 82 + match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); 83 + if (match->assigned_dev_id == assigned_dev_id) 84 + return match; 85 + } 86 + return NULL; 87 + } 88 + 89 + static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) 90 + { 91 + struct kvm_assigned_dev_kernel *assigned_dev; 92 + 93 + assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, 94 + interrupt_work); 95 + 96 + /* This is taken to safely inject irq inside the guest. When 97 + * the interrupt injection (or the ioapic code) uses a 98 + * finer-grained lock, update this 99 + */ 100 + mutex_lock(&assigned_dev->kvm->lock); 101 + kvm_set_irq(assigned_dev->kvm, 102 + assigned_dev->guest_irq, 1); 103 + mutex_unlock(&assigned_dev->kvm->lock); 104 + kvm_put_kvm(assigned_dev->kvm); 105 + } 106 + 107 + /* FIXME: Implement the OR logic needed to make shared interrupts on 108 + * this line behave properly 109 + */ 110 + static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 111 + { 112 + struct kvm_assigned_dev_kernel *assigned_dev = 113 + (struct kvm_assigned_dev_kernel *) dev_id; 114 + 115 + kvm_get_kvm(assigned_dev->kvm); 116 + schedule_work(&assigned_dev->interrupt_work); 117 + disable_irq_nosync(irq); 118 + return IRQ_HANDLED; 119 + } 120 + 121 + /* Ack the irq line for an assigned device */ 122 + static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 123 + { 124 + struct kvm_assigned_dev_kernel *dev; 125 + 126 + if (kian->gsi == -1) 127 + return; 128 + 129 + dev = container_of(kian, struct kvm_assigned_dev_kernel, 130 + ack_notifier); 131 + kvm_set_irq(dev->kvm, dev->guest_irq, 0); 132 + enable_irq(dev->host_irq); 133 + } 134 + 135 + static void kvm_free_assigned_device(struct kvm *kvm, 136 + struct kvm_assigned_dev_kernel 137 + *assigned_dev) 138 + { 139 + if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested) 140 + free_irq(assigned_dev->host_irq, (void *)assigned_dev); 141 + 142 + kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); 143 + 144 + if (cancel_work_sync(&assigned_dev->interrupt_work)) 145 + /* We had pending work. That means we will have to take 146 + * care of kvm_put_kvm. 147 + */ 148 + kvm_put_kvm(kvm); 149 + 150 + pci_release_regions(assigned_dev->dev); 151 + pci_disable_device(assigned_dev->dev); 152 + pci_dev_put(assigned_dev->dev); 153 + 154 + list_del(&assigned_dev->list); 155 + kfree(assigned_dev); 156 + } 157 + 158 + void kvm_free_all_assigned_devices(struct kvm *kvm) 159 + { 160 + struct list_head *ptr, *ptr2; 161 + struct kvm_assigned_dev_kernel *assigned_dev; 162 + 163 + list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { 164 + assigned_dev = list_entry(ptr, 165 + struct kvm_assigned_dev_kernel, 166 + list); 167 + 168 + kvm_free_assigned_device(kvm, assigned_dev); 169 + } 170 + } 171 + 172 + static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, 173 + struct kvm_assigned_irq 174 + *assigned_irq) 175 + { 176 + int r = 0; 177 + struct kvm_assigned_dev_kernel *match; 178 + 179 + mutex_lock(&kvm->lock); 180 + 181 + match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 182 + assigned_irq->assigned_dev_id); 183 + if (!match) { 184 + mutex_unlock(&kvm->lock); 185 + return -EINVAL; 186 + } 187 + 188 + if (match->irq_requested) { 189 + match->guest_irq = assigned_irq->guest_irq; 190 + match->ack_notifier.gsi = assigned_irq->guest_irq; 191 + mutex_unlock(&kvm->lock); 192 + return 0; 193 + } 194 + 195 + INIT_WORK(&match->interrupt_work, 196 + kvm_assigned_dev_interrupt_work_handler); 197 + 198 + if (irqchip_in_kernel(kvm)) { 199 + if (!capable(CAP_SYS_RAWIO)) { 200 + r = -EPERM; 201 + goto out_release; 202 + } 203 + 204 + if (assigned_irq->host_irq) 205 + match->host_irq = assigned_irq->host_irq; 206 + else 207 + match->host_irq = match->dev->irq; 208 + match->guest_irq = assigned_irq->guest_irq; 209 + match->ack_notifier.gsi = assigned_irq->guest_irq; 210 + match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 211 + kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); 212 + 213 + /* Even though this is PCI, we don't want to use shared 214 + * interrupts. Sharing host devices with guest-assigned devices 215 + * on the same interrupt line is not a happy situation: there 216 + * are going to be long delays in accepting, acking, etc. 217 + */ 218 + if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0, 219 + "kvm_assigned_device", (void *)match)) { 220 + r = -EIO; 221 + goto out_release; 222 + } 223 + } 224 + 225 + match->irq_requested = true; 226 + mutex_unlock(&kvm->lock); 227 + return r; 228 + out_release: 229 + mutex_unlock(&kvm->lock); 230 + kvm_free_assigned_device(kvm, match); 231 + return r; 232 + } 233 + 234 + static int kvm_vm_ioctl_assign_device(struct kvm *kvm, 235 + struct kvm_assigned_pci_dev *assigned_dev) 236 + { 237 + int r = 0; 238 + struct kvm_assigned_dev_kernel *match; 239 + struct pci_dev *dev; 240 + 241 + mutex_lock(&kvm->lock); 242 + 243 + match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 244 + assigned_dev->assigned_dev_id); 245 + if (match) { 246 + /* device already assigned */ 247 + r = -EINVAL; 248 + goto out; 249 + } 250 + 251 + match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); 252 + if (match == NULL) { 253 + printk(KERN_INFO "%s: Couldn't allocate memory\n", 254 + __func__); 255 + r = -ENOMEM; 256 + goto out; 257 + } 258 + dev = pci_get_bus_and_slot(assigned_dev->busnr, 259 + assigned_dev->devfn); 260 + if (!dev) { 261 + printk(KERN_INFO "%s: host device not found\n", __func__); 262 + r = -EINVAL; 263 + goto out_free; 264 + } 265 + if (pci_enable_device(dev)) { 266 + printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); 267 + r = -EBUSY; 268 + goto out_put; 269 + } 270 + r = pci_request_regions(dev, "kvm_assigned_device"); 271 + if (r) { 272 + printk(KERN_INFO "%s: Could not get access to device regions\n", 273 + __func__); 274 + goto out_disable; 275 + } 276 + match->assigned_dev_id = assigned_dev->assigned_dev_id; 277 + match->host_busnr = assigned_dev->busnr; 278 + match->host_devfn = assigned_dev->devfn; 279 + match->dev = dev; 280 + 281 + match->kvm = kvm; 282 + 283 + list_add(&match->list, &kvm->arch.assigned_dev_head); 284 + 285 + if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { 286 + r = kvm_iommu_map_guest(kvm, match); 287 + if (r) 288 + goto out_list_del; 289 + } 290 + 291 + out: 292 + mutex_unlock(&kvm->lock); 293 + return r; 294 + out_list_del: 295 + list_del(&match->list); 296 + pci_release_regions(dev); 297 + out_disable: 298 + pci_disable_device(dev); 299 + out_put: 300 + pci_dev_put(dev); 301 + out_free: 302 + kfree(match); 303 + mutex_unlock(&kvm->lock); 304 + return r; 305 + } 306 + #endif 307 + 80 308 static inline int valid_vcpu(int n) 81 309 { 82 310 return likely(n >= 0 && n < KVM_MAX_VCPUS); 311 + } 312 + 313 + inline int kvm_is_mmio_pfn(pfn_t pfn) 314 + { 315 + if (pfn_valid(pfn)) 316 + return PageReserved(pfn_to_page(pfn)); 317 + 318 + return true; 83 319 } 84 320 85 321 /* ··· 818 570 } 819 571 820 572 kvm_free_physmem_slot(&old, &new); 573 + #ifdef CONFIG_DMAR 574 + /* map the pages in iommu page table */ 575 + r = kvm_iommu_map_pages(kvm, base_gfn, npages); 576 + if (r) 577 + goto out; 578 + #endif 821 579 return 0; 822 580 823 581 out_free: ··· 962 708 } 963 709 EXPORT_SYMBOL_GPL(gfn_to_hva); 964 710 965 - /* 966 - * Requires current->mm->mmap_sem to be held 967 - */ 968 711 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 969 712 { 970 713 struct page *page[1]; ··· 977 726 return page_to_pfn(bad_page); 978 727 } 979 728 980 - npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, 981 - NULL); 729 + npages = get_user_pages_fast(addr, 1, 1, page); 982 730 983 731 if (unlikely(npages != 1)) { 984 732 struct vm_area_struct *vma; 985 733 734 + down_read(&current->mm->mmap_sem); 986 735 vma = find_vma(current->mm, addr); 736 + 987 737 if (vma == NULL || addr < vma->vm_start || 988 738 !(vma->vm_flags & VM_PFNMAP)) { 739 + up_read(&current->mm->mmap_sem); 989 740 get_page(bad_page); 990 741 return page_to_pfn(bad_page); 991 742 } 992 743 993 744 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 994 - BUG_ON(pfn_valid(pfn)); 745 + up_read(&current->mm->mmap_sem); 746 + BUG_ON(!kvm_is_mmio_pfn(pfn)); 995 747 } else 996 748 pfn = page_to_pfn(page[0]); 997 749 ··· 1008 754 pfn_t pfn; 1009 755 1010 756 pfn = gfn_to_pfn(kvm, gfn); 1011 - if (pfn_valid(pfn)) 757 + if (!kvm_is_mmio_pfn(pfn)) 1012 758 return pfn_to_page(pfn); 1013 759 1014 - WARN_ON(!pfn_valid(pfn)); 760 + WARN_ON(kvm_is_mmio_pfn(pfn)); 1015 761 1016 762 get_page(bad_page); 1017 763 return bad_page; ··· 1027 773 1028 774 void kvm_release_pfn_clean(pfn_t pfn) 1029 775 { 1030 - if (pfn_valid(pfn)) 776 + if (!kvm_is_mmio_pfn(pfn)) 1031 777 put_page(pfn_to_page(pfn)); 1032 778 } 1033 779 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); ··· 1053 799 1054 800 void kvm_set_pfn_dirty(pfn_t pfn) 1055 801 { 1056 - if (pfn_valid(pfn)) { 802 + if (!kvm_is_mmio_pfn(pfn)) { 1057 803 struct page *page = pfn_to_page(pfn); 1058 804 if (!PageReserved(page)) 1059 805 SetPageDirty(page); ··· 1063 809 1064 810 void kvm_set_pfn_accessed(pfn_t pfn) 1065 811 { 1066 - if (pfn_valid(pfn)) 812 + if (!kvm_is_mmio_pfn(pfn)) 1067 813 mark_page_accessed(pfn_to_page(pfn)); 1068 814 } 1069 815 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1070 816 1071 817 void kvm_get_pfn(pfn_t pfn) 1072 818 { 1073 - if (pfn_valid(pfn)) 819 + if (!kvm_is_mmio_pfn(pfn)) 1074 820 get_page(pfn_to_page(pfn)); 1075 821 } 1076 822 EXPORT_SYMBOL_GPL(kvm_get_pfn); ··· 1226 972 for (;;) { 1227 973 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1228 974 1229 - if (kvm_cpu_has_interrupt(vcpu)) 975 + if (kvm_cpu_has_interrupt(vcpu) || 976 + kvm_cpu_has_pending_timer(vcpu) || 977 + kvm_arch_vcpu_runnable(vcpu)) { 978 + set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1230 979 break; 1231 - if (kvm_cpu_has_pending_timer(vcpu)) 1232 - break; 1233 - if (kvm_arch_vcpu_runnable(vcpu)) 1234 - break; 980 + } 1235 981 if (signal_pending(current)) 1236 982 break; 1237 983 ··· 1328 1074 1329 1075 r = kvm_arch_vcpu_setup(vcpu); 1330 1076 if (r) 1331 - goto vcpu_destroy; 1077 + return r; 1332 1078 1333 1079 mutex_lock(&kvm->lock); 1334 1080 if (kvm->vcpus[n]) { 1335 1081 r = -EEXIST; 1336 - mutex_unlock(&kvm->lock); 1337 1082 goto vcpu_destroy; 1338 1083 } 1339 1084 kvm->vcpus[n] = vcpu; ··· 1348 1095 unlink: 1349 1096 mutex_lock(&kvm->lock); 1350 1097 kvm->vcpus[n] = NULL; 1351 - mutex_unlock(&kvm->lock); 1352 1098 vcpu_destroy: 1099 + mutex_unlock(&kvm->lock); 1353 1100 kvm_arch_vcpu_destroy(vcpu); 1354 1101 return r; 1355 1102 } ··· 1371 1118 struct kvm_vcpu *vcpu = filp->private_data; 1372 1119 void __user *argp = (void __user *)arg; 1373 1120 int r; 1121 + struct kvm_fpu *fpu = NULL; 1122 + struct kvm_sregs *kvm_sregs = NULL; 1374 1123 1375 1124 if (vcpu->kvm->mm != current->mm) 1376 1125 return -EIO; ··· 1420 1165 break; 1421 1166 } 1422 1167 case KVM_GET_SREGS: { 1423 - struct kvm_sregs kvm_sregs; 1424 - 1425 - memset(&kvm_sregs, 0, sizeof kvm_sregs); 1426 - r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs); 1168 + kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1169 + r = -ENOMEM; 1170 + if (!kvm_sregs) 1171 + goto out; 1172 + r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs); 1427 1173 if (r) 1428 1174 goto out; 1429 1175 r = -EFAULT; 1430 - if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs)) 1176 + if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs))) 1431 1177 goto out; 1432 1178 r = 0; 1433 1179 break; 1434 1180 } 1435 1181 case KVM_SET_SREGS: { 1436 - struct kvm_sregs kvm_sregs; 1437 - 1438 - r = -EFAULT; 1439 - if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 1182 + kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL); 1183 + r = -ENOMEM; 1184 + if (!kvm_sregs) 1440 1185 goto out; 1441 - r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs); 1186 + r = -EFAULT; 1187 + if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs))) 1188 + goto out; 1189 + r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs); 1442 1190 if (r) 1443 1191 goto out; 1444 1192 r = 0; ··· 1522 1264 break; 1523 1265 } 1524 1266 case KVM_GET_FPU: { 1525 - struct kvm_fpu fpu; 1526 - 1527 - memset(&fpu, 0, sizeof fpu); 1528 - r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu); 1267 + fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1268 + r = -ENOMEM; 1269 + if (!fpu) 1270 + goto out; 1271 + r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu); 1529 1272 if (r) 1530 1273 goto out; 1531 1274 r = -EFAULT; 1532 - if (copy_to_user(argp, &fpu, sizeof fpu)) 1275 + if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu))) 1533 1276 goto out; 1534 1277 r = 0; 1535 1278 break; 1536 1279 } 1537 1280 case KVM_SET_FPU: { 1538 - struct kvm_fpu fpu; 1539 - 1540 - r = -EFAULT; 1541 - if (copy_from_user(&fpu, argp, sizeof fpu)) 1281 + fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL); 1282 + r = -ENOMEM; 1283 + if (!fpu) 1542 1284 goto out; 1543 - r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu); 1285 + r = -EFAULT; 1286 + if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu))) 1287 + goto out; 1288 + r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu); 1544 1289 if (r) 1545 1290 goto out; 1546 1291 r = 0; ··· 1553 1292 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1554 1293 } 1555 1294 out: 1295 + kfree(fpu); 1296 + kfree(kvm_sregs); 1556 1297 return r; 1557 1298 } 1558 1299 ··· 1623 1360 break; 1624 1361 } 1625 1362 #endif 1363 + #ifdef KVM_CAP_DEVICE_ASSIGNMENT 1364 + case KVM_ASSIGN_PCI_DEVICE: { 1365 + struct kvm_assigned_pci_dev assigned_dev; 1366 + 1367 + r = -EFAULT; 1368 + if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 1369 + goto out; 1370 + r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); 1371 + if (r) 1372 + goto out; 1373 + break; 1374 + } 1375 + case KVM_ASSIGN_IRQ: { 1376 + struct kvm_assigned_irq assigned_irq; 1377 + 1378 + r = -EFAULT; 1379 + if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 1380 + goto out; 1381 + r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); 1382 + if (r) 1383 + goto out; 1384 + break; 1385 + } 1386 + #endif 1626 1387 default: 1627 1388 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1628 1389 } ··· 1656 1369 1657 1370 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1658 1371 { 1372 + struct page *page[1]; 1373 + unsigned long addr; 1374 + int npages; 1375 + gfn_t gfn = vmf->pgoff; 1659 1376 struct kvm *kvm = vma->vm_file->private_data; 1660 - struct page *page; 1661 1377 1662 - if (!kvm_is_visible_gfn(kvm, vmf->pgoff)) 1378 + addr = gfn_to_hva(kvm, gfn); 1379 + if (kvm_is_error_hva(addr)) 1663 1380 return VM_FAULT_SIGBUS; 1664 - page = gfn_to_page(kvm, vmf->pgoff); 1665 - if (is_error_page(page)) { 1666 - kvm_release_page_clean(page); 1381 + 1382 + npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page, 1383 + NULL); 1384 + if (unlikely(npages != 1)) 1667 1385 return VM_FAULT_SIGBUS; 1668 - } 1669 - vmf->page = page; 1386 + 1387 + vmf->page = page[0]; 1670 1388 return 0; 1671 1389 } 1672 1390
+16 -14
virt/kvm/kvm_trace.c
··· 17 17 #include <linux/module.h> 18 18 #include <linux/relay.h> 19 19 #include <linux/debugfs.h> 20 + #include <linux/ktime.h> 20 21 21 22 #include <linux/kvm_host.h> 22 23 ··· 36 35 struct kvm_trace_probe { 37 36 const char *name; 38 37 const char *format; 39 - u32 cycle_in; 38 + u32 timestamp_in; 40 39 marker_probe_func *probe_func; 41 40 }; 42 41 43 - static inline int calc_rec_size(int cycle, int extra) 42 + static inline int calc_rec_size(int timestamp, int extra) 44 43 { 45 44 int rec_size = KVM_TRC_HEAD_SIZE; 46 45 47 46 rec_size += extra; 48 - return cycle ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size; 47 + return timestamp ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size; 49 48 } 50 49 51 50 static void kvm_add_trace(void *probe_private, void *call_data, ··· 55 54 struct kvm_trace *kt = kvm_trace; 56 55 struct kvm_trace_rec rec; 57 56 struct kvm_vcpu *vcpu; 58 - int i, extra, size; 57 + int i, size; 58 + u32 extra; 59 59 60 60 if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING)) 61 61 return; 62 62 63 - rec.event = va_arg(*args, u32); 63 + rec.rec_val = TRACE_REC_EVENT_ID(va_arg(*args, u32)); 64 64 vcpu = va_arg(*args, struct kvm_vcpu *); 65 65 rec.pid = current->tgid; 66 66 rec.vcpu_id = vcpu->vcpu_id; ··· 69 67 extra = va_arg(*args, u32); 70 68 WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX)); 71 69 extra = min_t(u32, extra, KVM_TRC_EXTRA_MAX); 72 - rec.extra_u32 = extra; 73 70 74 - rec.cycle_in = p->cycle_in; 71 + rec.rec_val |= TRACE_REC_TCS(p->timestamp_in) 72 + | TRACE_REC_NUM_DATA_ARGS(extra); 75 73 76 - if (rec.cycle_in) { 77 - rec.u.cycle.cycle_u64 = get_cycles(); 74 + if (p->timestamp_in) { 75 + rec.u.timestamp.timestamp = ktime_to_ns(ktime_get()); 78 76 79 - for (i = 0; i < rec.extra_u32; i++) 80 - rec.u.cycle.extra_u32[i] = va_arg(*args, u32); 77 + for (i = 0; i < extra; i++) 78 + rec.u.timestamp.extra_u32[i] = va_arg(*args, u32); 81 79 } else { 82 - for (i = 0; i < rec.extra_u32; i++) 83 - rec.u.nocycle.extra_u32[i] = va_arg(*args, u32); 80 + for (i = 0; i < extra; i++) 81 + rec.u.notimestamp.extra_u32[i] = va_arg(*args, u32); 84 82 } 85 83 86 - size = calc_rec_size(rec.cycle_in, rec.extra_u32 * sizeof(u32)); 84 + size = calc_rec_size(p->timestamp_in, extra * sizeof(u32)); 87 85 relay_write(kt->rchan, &rec, size); 88 86 } 89 87
+191
virt/kvm/vtd.c
··· 1 + /* 2 + * Copyright (c) 2006, Intel Corporation. 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms and conditions of the GNU General Public License, 6 + * version 2, as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope it will be useful, but WITHOUT 9 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 + * more details. 12 + * 13 + * You should have received a copy of the GNU General Public License along with 14 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 + * Place - Suite 330, Boston, MA 02111-1307 USA. 16 + * 17 + * Copyright (C) 2006-2008 Intel Corporation 18 + * Copyright IBM Corporation, 2008 19 + * Author: Allen M. Kay <allen.m.kay@intel.com> 20 + * Author: Weidong Han <weidong.han@intel.com> 21 + * Author: Ben-Ami Yassour <benami@il.ibm.com> 22 + */ 23 + 24 + #include <linux/list.h> 25 + #include <linux/kvm_host.h> 26 + #include <linux/pci.h> 27 + #include <linux/dmar.h> 28 + #include <linux/intel-iommu.h> 29 + 30 + static int kvm_iommu_unmap_memslots(struct kvm *kvm); 31 + static void kvm_iommu_put_pages(struct kvm *kvm, 32 + gfn_t base_gfn, unsigned long npages); 33 + 34 + int kvm_iommu_map_pages(struct kvm *kvm, 35 + gfn_t base_gfn, unsigned long npages) 36 + { 37 + gfn_t gfn = base_gfn; 38 + pfn_t pfn; 39 + int i, r = 0; 40 + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 41 + 42 + /* check if iommu exists and in use */ 43 + if (!domain) 44 + return 0; 45 + 46 + for (i = 0; i < npages; i++) { 47 + /* check if already mapped */ 48 + pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, 49 + gfn_to_gpa(gfn)); 50 + if (pfn) 51 + continue; 52 + 53 + pfn = gfn_to_pfn(kvm, gfn); 54 + r = intel_iommu_page_mapping(domain, 55 + gfn_to_gpa(gfn), 56 + pfn_to_hpa(pfn), 57 + PAGE_SIZE, 58 + DMA_PTE_READ | 59 + DMA_PTE_WRITE); 60 + if (r) { 61 + printk(KERN_ERR "kvm_iommu_map_pages:" 62 + "iommu failed to map pfn=%lx\n", pfn); 63 + goto unmap_pages; 64 + } 65 + gfn++; 66 + } 67 + return 0; 68 + 69 + unmap_pages: 70 + kvm_iommu_put_pages(kvm, base_gfn, i); 71 + return r; 72 + } 73 + 74 + static int kvm_iommu_map_memslots(struct kvm *kvm) 75 + { 76 + int i, r; 77 + 78 + down_read(&kvm->slots_lock); 79 + for (i = 0; i < kvm->nmemslots; i++) { 80 + r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn, 81 + kvm->memslots[i].npages); 82 + if (r) 83 + break; 84 + } 85 + up_read(&kvm->slots_lock); 86 + return r; 87 + } 88 + 89 + int kvm_iommu_map_guest(struct kvm *kvm, 90 + struct kvm_assigned_dev_kernel *assigned_dev) 91 + { 92 + struct pci_dev *pdev = NULL; 93 + int r; 94 + 95 + if (!intel_iommu_found()) { 96 + printk(KERN_ERR "%s: intel iommu not found\n", __func__); 97 + return -ENODEV; 98 + } 99 + 100 + printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n", 101 + assigned_dev->host_busnr, 102 + PCI_SLOT(assigned_dev->host_devfn), 103 + PCI_FUNC(assigned_dev->host_devfn)); 104 + 105 + pdev = assigned_dev->dev; 106 + 107 + if (pdev == NULL) { 108 + if (kvm->arch.intel_iommu_domain) { 109 + intel_iommu_domain_exit(kvm->arch.intel_iommu_domain); 110 + kvm->arch.intel_iommu_domain = NULL; 111 + } 112 + return -ENODEV; 113 + } 114 + 115 + kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev); 116 + if (!kvm->arch.intel_iommu_domain) 117 + return -ENODEV; 118 + 119 + r = kvm_iommu_map_memslots(kvm); 120 + if (r) 121 + goto out_unmap; 122 + 123 + intel_iommu_detach_dev(kvm->arch.intel_iommu_domain, 124 + pdev->bus->number, pdev->devfn); 125 + 126 + r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain, 127 + pdev); 128 + if (r) { 129 + printk(KERN_ERR "Domain context map for %s failed", 130 + pci_name(pdev)); 131 + goto out_unmap; 132 + } 133 + return 0; 134 + 135 + out_unmap: 136 + kvm_iommu_unmap_memslots(kvm); 137 + return r; 138 + } 139 + 140 + static void kvm_iommu_put_pages(struct kvm *kvm, 141 + gfn_t base_gfn, unsigned long npages) 142 + { 143 + gfn_t gfn = base_gfn; 144 + pfn_t pfn; 145 + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 146 + int i; 147 + 148 + for (i = 0; i < npages; i++) { 149 + pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, 150 + gfn_to_gpa(gfn)); 151 + kvm_release_pfn_clean(pfn); 152 + gfn++; 153 + } 154 + } 155 + 156 + static int kvm_iommu_unmap_memslots(struct kvm *kvm) 157 + { 158 + int i; 159 + down_read(&kvm->slots_lock); 160 + for (i = 0; i < kvm->nmemslots; i++) { 161 + kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn, 162 + kvm->memslots[i].npages); 163 + } 164 + up_read(&kvm->slots_lock); 165 + 166 + return 0; 167 + } 168 + 169 + int kvm_iommu_unmap_guest(struct kvm *kvm) 170 + { 171 + struct kvm_assigned_dev_kernel *entry; 172 + struct dmar_domain *domain = kvm->arch.intel_iommu_domain; 173 + 174 + /* check if iommu exists and in use */ 175 + if (!domain) 176 + return 0; 177 + 178 + list_for_each_entry(entry, &kvm->arch.assigned_dev_head, list) { 179 + printk(KERN_DEBUG "VT-d unmap: host bdf = %x:%x:%x\n", 180 + entry->host_busnr, 181 + PCI_SLOT(entry->host_devfn), 182 + PCI_FUNC(entry->host_devfn)); 183 + 184 + /* detach kvm dmar domain */ 185 + intel_iommu_detach_dev(domain, entry->host_busnr, 186 + entry->host_devfn); 187 + } 188 + kvm_iommu_unmap_memslots(kvm); 189 + intel_iommu_domain_exit(domain); 190 + return 0; 191 + }