Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm bugfixes from Gleb Natapov:
"The bulk of the fixes is in MIPS KVM kernel<->userspace ABI. MIPS KVM
is new for 3.10 and some problems were found with current ABI. It is
better to fix them now and do not have a kernel with broken one"

* 'fixes' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: Fix race in apic->pending_events processing
KVM: fix sil/dil/bpl/spl in the mod/rm fields
KVM: Emulate multibyte NOP
ARM: KVM: be more thorough when invalidating TLBs
ARM: KVM: prevent NULL pointer dereferences with KVM VCPU ioctl
mips/kvm: Use ENOIOCTLCMD to indicate unimplemented ioctls.
mips/kvm: Fix ABI by moving manipulation of CP0 registers to KVM_{G,S}ET_ONE_REG
mips/kvm: Use ARRAY_SIZE() instead of hardcoded constants in kvm_arch_vcpu_ioctl_{s,g}et_regs
mips/kvm: Fix name of gpr field in struct kvm_regs.
mips/kvm: Fix ABI for use of 64-bit registers.
mips/kvm: Fix ABI for use of FPU.

+421 -124
+13 -2
arch/arm/kvm/arm.c
··· 492 492 wait_event_interruptible(*wq, !vcpu->arch.pause); 493 493 } 494 494 495 + static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) 496 + { 497 + return vcpu->arch.target >= 0; 498 + } 499 + 495 500 /** 496 501 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code 497 502 * @vcpu: The VCPU pointer ··· 513 508 int ret; 514 509 sigset_t sigsaved; 515 510 516 - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ 517 - if (unlikely(vcpu->arch.target < 0)) 511 + if (unlikely(!kvm_vcpu_initialized(vcpu))) 518 512 return -ENOEXEC; 519 513 520 514 ret = kvm_vcpu_first_run_init(vcpu); ··· 714 710 case KVM_SET_ONE_REG: 715 711 case KVM_GET_ONE_REG: { 716 712 struct kvm_one_reg reg; 713 + 714 + if (unlikely(!kvm_vcpu_initialized(vcpu))) 715 + return -ENOEXEC; 716 + 717 717 if (copy_from_user(&reg, argp, sizeof(reg))) 718 718 return -EFAULT; 719 719 if (ioctl == KVM_SET_ONE_REG) ··· 729 721 struct kvm_reg_list __user *user_list = argp; 730 722 struct kvm_reg_list reg_list; 731 723 unsigned n; 724 + 725 + if (unlikely(!kvm_vcpu_initialized(vcpu))) 726 + return -ENOEXEC; 732 727 733 728 if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 734 729 return -EFAULT;
+26 -15
arch/arm/kvm/mmu.c
··· 43 43 44 44 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) 45 45 { 46 - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 46 + /* 47 + * This function also gets called when dealing with HYP page 48 + * tables. As HYP doesn't have an associated struct kvm (and 49 + * the HYP page tables are fairly static), we don't do 50 + * anything there. 51 + */ 52 + if (kvm) 53 + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); 47 54 } 48 55 49 56 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, ··· 85 78 return p; 86 79 } 87 80 88 - static void clear_pud_entry(pud_t *pud) 81 + static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 89 82 { 90 83 pmd_t *pmd_table = pmd_offset(pud, 0); 91 84 pud_clear(pud); 85 + kvm_tlb_flush_vmid_ipa(kvm, addr); 92 86 pmd_free(NULL, pmd_table); 93 87 put_page(virt_to_page(pud)); 94 88 } 95 89 96 - static void clear_pmd_entry(pmd_t *pmd) 90 + static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 97 91 { 98 92 pte_t *pte_table = pte_offset_kernel(pmd, 0); 99 93 pmd_clear(pmd); 94 + kvm_tlb_flush_vmid_ipa(kvm, addr); 100 95 pte_free_kernel(NULL, pte_table); 101 96 put_page(virt_to_page(pmd)); 102 97 } ··· 109 100 return page_count(pmd_page) == 1; 110 101 } 111 102 112 - static void clear_pte_entry(pte_t *pte) 103 + static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 113 104 { 114 105 if (pte_present(*pte)) { 115 106 kvm_set_pte(pte, __pte(0)); 116 107 put_page(virt_to_page(pte)); 108 + kvm_tlb_flush_vmid_ipa(kvm, addr); 117 109 } 118 110 } 119 111 ··· 124 114 return page_count(pte_page) == 1; 125 115 } 126 116 127 - static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) 117 + static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 118 + unsigned long long start, u64 size) 128 119 { 129 120 pgd_t *pgd; 130 121 pud_t *pud; ··· 149 138 } 150 139 151 140 pte = pte_offset_kernel(pmd, addr); 152 - clear_pte_entry(pte); 141 + clear_pte_entry(kvm, pte, addr); 153 142 range = PAGE_SIZE; 154 143 155 144 /* If we emptied the pte, walk back up the ladder */ 156 145 if (pte_empty(pte)) { 157 - clear_pmd_entry(pmd); 146 + clear_pmd_entry(kvm, pmd, addr); 158 147 range = PMD_SIZE; 159 148 if (pmd_empty(pmd)) { 160 - clear_pud_entry(pud); 149 + clear_pud_entry(kvm, pud, addr); 161 150 range = PUD_SIZE; 162 151 } 163 152 } ··· 176 165 mutex_lock(&kvm_hyp_pgd_mutex); 177 166 178 167 if (boot_hyp_pgd) { 179 - unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); 180 - unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 168 + unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); 169 + unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 181 170 kfree(boot_hyp_pgd); 182 171 boot_hyp_pgd = NULL; 183 172 } 184 173 185 174 if (hyp_pgd) 186 - unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 175 + unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 187 176 188 177 kfree(init_bounce_page); 189 178 init_bounce_page = NULL; ··· 211 200 212 201 if (hyp_pgd) { 213 202 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) 214 - unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 203 + unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 215 204 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) 216 - unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 205 + unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); 206 + 217 207 kfree(hyp_pgd); 218 208 hyp_pgd = NULL; 219 209 } ··· 405 393 */ 406 394 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) 407 395 { 408 - unmap_range(kvm->arch.pgd, start, size); 396 + unmap_range(kvm, kvm->arch.pgd, start, size); 409 397 } 410 398 411 399 /** ··· 687 675 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) 688 676 { 689 677 unmap_stage2_range(kvm, gpa, PAGE_SIZE); 690 - kvm_tlb_flush_vmid_ipa(kvm, gpa); 691 678 } 692 679 693 680 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-4
arch/mips/include/asm/kvm_host.h
··· 496 496 uint32_t cause); 497 497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, 498 498 uint32_t cause); 499 - int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, 500 - struct kvm_regs *regs); 501 - int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, 502 - struct kvm_regs *regs); 503 499 }; 504 500 extern struct kvm_mips_callbacks *kvm_mips_callbacks; 505 501 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+110 -27
arch/mips/include/uapi/asm/kvm.h
··· 1 1 /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 - * Authors: Sanjay Lal <sanjayl@kymasys.com> 8 - */ 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 7 + * Copyright (C) 2013 Cavium, Inc. 8 + * Authors: Sanjay Lal <sanjayl@kymasys.com> 9 + */ 9 10 10 11 #ifndef __LINUX_KVM_MIPS_H 11 12 #define __LINUX_KVM_MIPS_H 12 13 13 14 #include <linux/types.h> 14 15 15 - #define __KVM_MIPS 16 + /* 17 + * KVM MIPS specific structures and definitions. 18 + * 19 + * Some parts derived from the x86 version of this file. 20 + */ 16 21 17 - #define N_MIPS_COPROC_REGS 32 18 - #define N_MIPS_COPROC_SEL 8 19 - 20 - /* for KVM_GET_REGS and KVM_SET_REGS */ 22 + /* 23 + * for KVM_GET_REGS and KVM_SET_REGS 24 + * 25 + * If Config[AT] is zero (32-bit CPU), the register contents are 26 + * stored in the lower 32-bits of the struct kvm_regs fields and sign 27 + * extended to 64-bits. 28 + */ 21 29 struct kvm_regs { 22 - __u32 gprs[32]; 23 - __u32 hi; 24 - __u32 lo; 25 - __u32 pc; 26 - 27 - __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 30 + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ 31 + __u64 gpr[32]; 32 + __u64 hi; 33 + __u64 lo; 34 + __u64 pc; 28 35 }; 29 36 30 - /* for KVM_GET_SREGS and KVM_SET_SREGS */ 31 - struct kvm_sregs { 32 - }; 33 - 34 - /* for KVM_GET_FPU and KVM_SET_FPU */ 37 + /* 38 + * for KVM_GET_FPU and KVM_SET_FPU 39 + * 40 + * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs 41 + * are zero filled. 42 + */ 35 43 struct kvm_fpu { 44 + __u64 fpr[32]; 45 + __u32 fir; 46 + __u32 fccr; 47 + __u32 fexr; 48 + __u32 fenr; 49 + __u32 fcsr; 50 + __u32 pad; 36 51 }; 37 52 53 + 54 + /* 55 + * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 56 + * registers. The id field is broken down as follows: 57 + * 58 + * bits[2..0] - Register 'sel' index. 59 + * bits[7..3] - Register 'rd' index. 60 + * bits[15..8] - Must be zero. 61 + * bits[63..16] - 1 -> CP0 registers. 62 + * 63 + * Other sets registers may be added in the future. Each set would 64 + * have its own identifier in bits[63..16]. 65 + * 66 + * The addr field of struct kvm_one_reg must point to an aligned 67 + * 64-bit wide location. For registers that are narrower than 68 + * 64-bits, the value is stored in the low order bits of the location, 69 + * and sign extended to 64-bits. 70 + * 71 + * The registers defined in struct kvm_regs are also accessible, the 72 + * id values for these are below. 73 + */ 74 + 75 + #define KVM_REG_MIPS_R0 0 76 + #define KVM_REG_MIPS_R1 1 77 + #define KVM_REG_MIPS_R2 2 78 + #define KVM_REG_MIPS_R3 3 79 + #define KVM_REG_MIPS_R4 4 80 + #define KVM_REG_MIPS_R5 5 81 + #define KVM_REG_MIPS_R6 6 82 + #define KVM_REG_MIPS_R7 7 83 + #define KVM_REG_MIPS_R8 8 84 + #define KVM_REG_MIPS_R9 9 85 + #define KVM_REG_MIPS_R10 10 86 + #define KVM_REG_MIPS_R11 11 87 + #define KVM_REG_MIPS_R12 12 88 + #define KVM_REG_MIPS_R13 13 89 + #define KVM_REG_MIPS_R14 14 90 + #define KVM_REG_MIPS_R15 15 91 + #define KVM_REG_MIPS_R16 16 92 + #define KVM_REG_MIPS_R17 17 93 + #define KVM_REG_MIPS_R18 18 94 + #define KVM_REG_MIPS_R19 19 95 + #define KVM_REG_MIPS_R20 20 96 + #define KVM_REG_MIPS_R21 21 97 + #define KVM_REG_MIPS_R22 22 98 + #define KVM_REG_MIPS_R23 23 99 + #define KVM_REG_MIPS_R24 24 100 + #define KVM_REG_MIPS_R25 25 101 + #define KVM_REG_MIPS_R26 26 102 + #define KVM_REG_MIPS_R27 27 103 + #define KVM_REG_MIPS_R28 28 104 + #define KVM_REG_MIPS_R29 29 105 + #define KVM_REG_MIPS_R30 30 106 + #define KVM_REG_MIPS_R31 31 107 + 108 + #define KVM_REG_MIPS_HI 32 109 + #define KVM_REG_MIPS_LO 33 110 + #define KVM_REG_MIPS_PC 34 111 + 112 + /* 113 + * KVM MIPS specific structures and definitions 114 + * 115 + */ 38 116 struct kvm_debug_exit_arch { 117 + __u64 epc; 39 118 }; 40 119 41 120 /* for KVM_SET_GUEST_DEBUG */ 42 121 struct kvm_guest_debug_arch { 43 122 }; 44 123 124 + /* definition of registers in kvm_run */ 125 + struct kvm_sync_regs { 126 + }; 127 + 128 + /* dummy definition */ 129 + struct kvm_sregs { 130 + }; 131 + 45 132 struct kvm_mips_interrupt { 46 133 /* in */ 47 134 __u32 cpu; 48 135 __u32 irq; 49 - }; 50 - 51 - /* definition of registers in kvm_run */ 52 - struct kvm_sync_regs { 53 136 }; 54 137 55 138 #endif /* __LINUX_KVM_MIPS_H */
+259 -21
arch/mips/kvm/kvm_mips.c
··· 195 195 long 196 196 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 197 197 { 198 - return -EINVAL; 198 + return -ENOIOCTLCMD; 199 199 } 200 200 201 201 void kvm_arch_free_memslot(struct kvm_memory_slot *free, ··· 401 401 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 402 402 struct kvm_guest_debug *dbg) 403 403 { 404 - return -EINVAL; 404 + return -ENOIOCTLCMD; 405 405 } 406 406 407 407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ··· 475 475 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 476 476 struct kvm_mp_state *mp_state) 477 477 { 478 - return -EINVAL; 478 + return -ENOIOCTLCMD; 479 479 } 480 480 481 481 int 482 482 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 483 483 struct kvm_mp_state *mp_state) 484 484 { 485 - return -EINVAL; 485 + return -ENOIOCTLCMD; 486 + } 487 + 488 + #define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0) 489 + #define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0) 490 + #define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0) 491 + #define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0) 492 + #define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2) 493 + #define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0) 494 + #define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1) 495 + #define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0) 496 + #define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0) 497 + #define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0) 498 + #define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0) 499 + #define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0) 500 + #define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0) 501 + #define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0) 502 + #define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0) 503 + #define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1) 504 + #define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0) 505 + #define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1) 506 + #define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2) 507 + #define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3) 508 + #define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7) 509 + #define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0) 510 + #define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0) 511 + 512 + static u64 kvm_mips_get_one_regs[] = { 513 + KVM_REG_MIPS_R0, 514 + KVM_REG_MIPS_R1, 515 + KVM_REG_MIPS_R2, 516 + KVM_REG_MIPS_R3, 517 + KVM_REG_MIPS_R4, 518 + KVM_REG_MIPS_R5, 519 + KVM_REG_MIPS_R6, 520 + KVM_REG_MIPS_R7, 521 + KVM_REG_MIPS_R8, 522 + KVM_REG_MIPS_R9, 523 + KVM_REG_MIPS_R10, 524 + KVM_REG_MIPS_R11, 525 + KVM_REG_MIPS_R12, 526 + KVM_REG_MIPS_R13, 527 + KVM_REG_MIPS_R14, 528 + KVM_REG_MIPS_R15, 529 + KVM_REG_MIPS_R16, 530 + KVM_REG_MIPS_R17, 531 + KVM_REG_MIPS_R18, 532 + KVM_REG_MIPS_R19, 533 + KVM_REG_MIPS_R20, 534 + KVM_REG_MIPS_R21, 535 + KVM_REG_MIPS_R22, 536 + KVM_REG_MIPS_R23, 537 + KVM_REG_MIPS_R24, 538 + KVM_REG_MIPS_R25, 539 + KVM_REG_MIPS_R26, 540 + KVM_REG_MIPS_R27, 541 + KVM_REG_MIPS_R28, 542 + KVM_REG_MIPS_R29, 543 + KVM_REG_MIPS_R30, 544 + KVM_REG_MIPS_R31, 545 + 546 + KVM_REG_MIPS_HI, 547 + KVM_REG_MIPS_LO, 548 + KVM_REG_MIPS_PC, 549 + 550 + KVM_REG_MIPS_CP0_INDEX, 551 + KVM_REG_MIPS_CP0_CONTEXT, 552 + KVM_REG_MIPS_CP0_PAGEMASK, 553 + KVM_REG_MIPS_CP0_WIRED, 554 + KVM_REG_MIPS_CP0_BADVADDR, 555 + KVM_REG_MIPS_CP0_ENTRYHI, 556 + KVM_REG_MIPS_CP0_STATUS, 557 + KVM_REG_MIPS_CP0_CAUSE, 558 + /* EPC set via kvm_regs, et al. */ 559 + KVM_REG_MIPS_CP0_CONFIG, 560 + KVM_REG_MIPS_CP0_CONFIG1, 561 + KVM_REG_MIPS_CP0_CONFIG2, 562 + KVM_REG_MIPS_CP0_CONFIG3, 563 + KVM_REG_MIPS_CP0_CONFIG7, 564 + KVM_REG_MIPS_CP0_ERROREPC 565 + }; 566 + 567 + static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 568 + const struct kvm_one_reg *reg) 569 + { 570 + u64 __user *uaddr = (u64 __user *)(long)reg->addr; 571 + 572 + struct mips_coproc *cop0 = vcpu->arch.cop0; 573 + s64 v; 574 + 575 + switch (reg->id) { 576 + case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: 577 + v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; 578 + break; 579 + case KVM_REG_MIPS_HI: 580 + v = (long)vcpu->arch.hi; 581 + break; 582 + case KVM_REG_MIPS_LO: 583 + v = (long)vcpu->arch.lo; 584 + break; 585 + case KVM_REG_MIPS_PC: 586 + v = (long)vcpu->arch.pc; 587 + break; 588 + 589 + case KVM_REG_MIPS_CP0_INDEX: 590 + v = (long)kvm_read_c0_guest_index(cop0); 591 + break; 592 + case KVM_REG_MIPS_CP0_CONTEXT: 593 + v = (long)kvm_read_c0_guest_context(cop0); 594 + break; 595 + case KVM_REG_MIPS_CP0_PAGEMASK: 596 + v = (long)kvm_read_c0_guest_pagemask(cop0); 597 + break; 598 + case KVM_REG_MIPS_CP0_WIRED: 599 + v = (long)kvm_read_c0_guest_wired(cop0); 600 + break; 601 + case KVM_REG_MIPS_CP0_BADVADDR: 602 + v = (long)kvm_read_c0_guest_badvaddr(cop0); 603 + break; 604 + case KVM_REG_MIPS_CP0_ENTRYHI: 605 + v = (long)kvm_read_c0_guest_entryhi(cop0); 606 + break; 607 + case KVM_REG_MIPS_CP0_STATUS: 608 + v = (long)kvm_read_c0_guest_status(cop0); 609 + break; 610 + case KVM_REG_MIPS_CP0_CAUSE: 611 + v = (long)kvm_read_c0_guest_cause(cop0); 612 + break; 613 + case KVM_REG_MIPS_CP0_ERROREPC: 614 + v = (long)kvm_read_c0_guest_errorepc(cop0); 615 + break; 616 + case KVM_REG_MIPS_CP0_CONFIG: 617 + v = (long)kvm_read_c0_guest_config(cop0); 618 + break; 619 + case KVM_REG_MIPS_CP0_CONFIG1: 620 + v = (long)kvm_read_c0_guest_config1(cop0); 621 + break; 622 + case KVM_REG_MIPS_CP0_CONFIG2: 623 + v = (long)kvm_read_c0_guest_config2(cop0); 624 + break; 625 + case KVM_REG_MIPS_CP0_CONFIG3: 626 + v = (long)kvm_read_c0_guest_config3(cop0); 627 + break; 628 + case KVM_REG_MIPS_CP0_CONFIG7: 629 + v = (long)kvm_read_c0_guest_config7(cop0); 630 + break; 631 + default: 632 + return -EINVAL; 633 + } 634 + return put_user(v, uaddr); 635 + } 636 + 637 + static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 638 + const struct kvm_one_reg *reg) 639 + { 640 + u64 __user *uaddr = (u64 __user *)(long)reg->addr; 641 + struct mips_coproc *cop0 = vcpu->arch.cop0; 642 + u64 v; 643 + 644 + if (get_user(v, uaddr) != 0) 645 + return -EFAULT; 646 + 647 + switch (reg->id) { 648 + case KVM_REG_MIPS_R0: 649 + /* Silently ignore requests to set $0 */ 650 + break; 651 + case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: 652 + vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; 653 + break; 654 + case KVM_REG_MIPS_HI: 655 + vcpu->arch.hi = v; 656 + break; 657 + case KVM_REG_MIPS_LO: 658 + vcpu->arch.lo = v; 659 + break; 660 + case KVM_REG_MIPS_PC: 661 + vcpu->arch.pc = v; 662 + break; 663 + 664 + case KVM_REG_MIPS_CP0_INDEX: 665 + kvm_write_c0_guest_index(cop0, v); 666 + break; 667 + case KVM_REG_MIPS_CP0_CONTEXT: 668 + kvm_write_c0_guest_context(cop0, v); 669 + break; 670 + case KVM_REG_MIPS_CP0_PAGEMASK: 671 + kvm_write_c0_guest_pagemask(cop0, v); 672 + break; 673 + case KVM_REG_MIPS_CP0_WIRED: 674 + kvm_write_c0_guest_wired(cop0, v); 675 + break; 676 + case KVM_REG_MIPS_CP0_BADVADDR: 677 + kvm_write_c0_guest_badvaddr(cop0, v); 678 + break; 679 + case KVM_REG_MIPS_CP0_ENTRYHI: 680 + kvm_write_c0_guest_entryhi(cop0, v); 681 + break; 682 + case KVM_REG_MIPS_CP0_STATUS: 683 + kvm_write_c0_guest_status(cop0, v); 684 + break; 685 + case KVM_REG_MIPS_CP0_CAUSE: 686 + kvm_write_c0_guest_cause(cop0, v); 687 + break; 688 + case KVM_REG_MIPS_CP0_ERROREPC: 689 + kvm_write_c0_guest_errorepc(cop0, v); 690 + break; 691 + default: 692 + return -EINVAL; 693 + } 694 + return 0; 486 695 } 487 696 488 697 long ··· 700 491 struct kvm_vcpu *vcpu = filp->private_data; 701 492 void __user *argp = (void __user *)arg; 702 493 long r; 703 - int intr; 704 494 705 495 switch (ioctl) { 496 + case KVM_SET_ONE_REG: 497 + case KVM_GET_ONE_REG: { 498 + struct kvm_one_reg reg; 499 + if (copy_from_user(&reg, argp, sizeof(reg))) 500 + return -EFAULT; 501 + if (ioctl == KVM_SET_ONE_REG) 502 + return kvm_mips_set_reg(vcpu, &reg); 503 + else 504 + return kvm_mips_get_reg(vcpu, &reg); 505 + } 506 + case KVM_GET_REG_LIST: { 507 + struct kvm_reg_list __user *user_list = argp; 508 + u64 __user *reg_dest; 509 + struct kvm_reg_list reg_list; 510 + unsigned n; 511 + 512 + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 513 + return -EFAULT; 514 + n = reg_list.n; 515 + reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); 516 + if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) 517 + return -EFAULT; 518 + if (n < reg_list.n) 519 + return -E2BIG; 520 + reg_dest = user_list->reg; 521 + if (copy_to_user(reg_dest, kvm_mips_get_one_regs, 522 + sizeof(kvm_mips_get_one_regs))) 523 + return -EFAULT; 524 + return 0; 525 + } 706 526 case KVM_NMI: 707 527 /* Treat the NMI as a CPU reset */ 708 528 r = kvm_mips_reset_vcpu(vcpu); ··· 743 505 if (copy_from_user(&irq, argp, sizeof(irq))) 744 506 goto out; 745 507 746 - intr = (int)irq.irq; 747 - 748 508 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, 749 509 irq.irq); 750 510 ··· 750 514 break; 751 515 } 752 516 default: 753 - r = -EINVAL; 517 + r = -ENOIOCTLCMD; 754 518 } 755 519 756 520 out: ··· 801 565 802 566 switch (ioctl) { 803 567 default: 804 - r = -EINVAL; 568 + r = -ENOIOCTLCMD; 805 569 } 806 570 807 571 return r; ··· 829 593 int 830 594 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 831 595 { 832 - return -ENOTSUPP; 596 + return -ENOIOCTLCMD; 833 597 } 834 598 835 599 int 836 600 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 837 601 { 838 - return -ENOTSUPP; 602 + return -ENOIOCTLCMD; 839 603 } 840 604 841 605 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ··· 845 609 846 610 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 847 611 { 848 - return -ENOTSUPP; 612 + return -ENOIOCTLCMD; 849 613 } 850 614 851 615 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 852 616 { 853 - return -ENOTSUPP; 617 + return -ENOIOCTLCMD; 854 618 } 855 619 856 620 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ··· 863 627 int r; 864 628 865 629 switch (ext) { 630 + case KVM_CAP_ONE_REG: 631 + r = 1; 632 + break; 866 633 case KVM_CAP_COALESCED_MMIO: 867 634 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 868 635 break; ··· 874 635 break; 875 636 } 876 637 return r; 877 - 878 638 } 879 639 880 640 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ··· 915 677 { 916 678 int i; 917 679 918 - for (i = 0; i < 32; i++) 919 - vcpu->arch.gprs[i] = regs->gprs[i]; 920 - 680 + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 681 + vcpu->arch.gprs[i] = regs->gpr[i]; 682 + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ 921 683 vcpu->arch.hi = regs->hi; 922 684 vcpu->arch.lo = regs->lo; 923 685 vcpu->arch.pc = regs->pc; 924 686 925 - return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); 687 + return 0; 926 688 } 927 689 928 690 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 929 691 { 930 692 int i; 931 693 932 - for (i = 0; i < 32; i++) 933 - regs->gprs[i] = vcpu->arch.gprs[i]; 694 + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 695 + regs->gpr[i] = vcpu->arch.gprs[i]; 934 696 935 697 regs->hi = vcpu->arch.hi; 936 698 regs->lo = vcpu->arch.lo; 937 699 regs->pc = vcpu->arch.pc; 938 700 939 - return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); 701 + return 0; 940 702 } 941 703 942 704 void kvm_mips_comparecount_func(unsigned long data)
-50
arch/mips/kvm/kvm_trap_emul.c
··· 345 345 return ret; 346 346 } 347 347 348 - static int 349 - kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 350 - { 351 - struct mips_coproc *cop0 = vcpu->arch.cop0; 352 - 353 - kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); 354 - kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); 355 - kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); 356 - kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); 357 - kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); 358 - 359 - kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); 360 - kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); 361 - kvm_write_c0_guest_pagemask(cop0, 362 - regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); 363 - kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); 364 - kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); 365 - 366 - return 0; 367 - } 368 - 369 - static int 370 - kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 371 - { 372 - struct mips_coproc *cop0 = vcpu->arch.cop0; 373 - 374 - regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); 375 - regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); 376 - regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); 377 - regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); 378 - regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); 379 - 380 - regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); 381 - regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); 382 - regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = 383 - kvm_read_c0_guest_pagemask(cop0); 384 - regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); 385 - regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); 386 - 387 - regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); 388 - regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); 389 - regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); 390 - regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); 391 - regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); 392 - 393 - return 0; 394 - } 395 - 396 348 static int kvm_trap_emul_vm_init(struct kvm *kvm) 397 349 { 398 350 return 0; ··· 423 471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb, 424 472 .irq_deliver = kvm_mips_irq_deliver_cb, 425 473 .irq_clear = kvm_mips_irq_clear_cb, 426 - .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, 427 - .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, 428 474 }; 429 475 430 476 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+7 -2
arch/x86/kvm/emulate.c
··· 1240 1240 ctxt->modrm_seg = VCPU_SREG_DS; 1241 1241 1242 1242 if (ctxt->modrm_mod == 3) { 1243 + int highbyte_regs = ctxt->rex_prefix == 0; 1244 + 1243 1245 op->type = OP_REG; 1244 1246 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1245 - op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); 1247 + op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1248 + highbyte_regs && (ctxt->d & ByteOp)); 1246 1249 if (ctxt->d & Sse) { 1247 1250 op->type = OP_XMM; 1248 1251 op->bytes = 16; ··· 4000 3997 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 4001 3998 N, D(ImplicitOps | ModRM), N, N, 4002 3999 /* 0x10 - 0x1F */ 4003 - N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, 4000 + N, N, N, N, N, N, N, N, 4001 + D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), 4004 4002 /* 0x20 - 0x2F */ 4005 4003 DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), 4006 4004 DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), ··· 4840 4836 case 0x08: /* invd */ 4841 4837 case 0x0d: /* GrpP (prefetch) */ 4842 4838 case 0x18: /* Grp16 (prefetch/nop) */ 4839 + case 0x1f: /* nop */ 4843 4840 break; 4844 4841 case 0x20: /* mov cr, reg */ 4845 4842 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
+6 -3
arch/x86/kvm/lapic.c
··· 1861 1861 { 1862 1862 struct kvm_lapic *apic = vcpu->arch.apic; 1863 1863 unsigned int sipi_vector; 1864 + unsigned long pe; 1864 1865 1865 - if (!kvm_vcpu_has_lapic(vcpu)) 1866 + if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) 1866 1867 return; 1867 1868 1868 - if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) { 1869 + pe = xchg(&apic->pending_events, 0); 1870 + 1871 + if (test_bit(KVM_APIC_INIT, &pe)) { 1869 1872 kvm_lapic_reset(vcpu); 1870 1873 kvm_vcpu_reset(vcpu); 1871 1874 if (kvm_vcpu_is_bsp(apic->vcpu)) ··· 1876 1873 else 1877 1874 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; 1878 1875 } 1879 - if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) && 1876 + if (test_bit(KVM_APIC_SIPI, &pe) && 1880 1877 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { 1881 1878 /* evaluate pending_events before reading the vector */ 1882 1879 smp_rmb();