Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: ppc: fix invalidation of large guest pages
KVM: s390: Fix possible host kernel bug on lctl(g) handling
KVM: s390: Fix instruction naming for lctlg
KVM: s390: Fix program check on interrupt delivery handling
KVM: s390: Change guestaddr type in gaccess
KVM: s390: Fix guest kconfig
KVM: s390: Advertise KVM_CAP_USER_MEMORY
KVM: ia64: Fix irq disabling leak in error handling code
KVM: VMX: Fix undefined beaviour of EPT after reload kvm-intel.ko
KVM: VMX: Fix bypass_guest_pf enabling when disable EPT in module parameter
KVM: task switch: translate guest segment limit to virt-extension byte granular field
KVM: Avoid instruction emulation when event delivery is pending
KVM: task switch: use seg regs provided by subarch instead of reading from GDT
KVM: task switch: segment base is linear address
KVM: SVM: allow enabling/disabling NPT by reloading only the architecture module

+143 -138
+3 -2
arch/ia64/kvm/kvm-ia64.c
··· 125 PAGE_KERNEL)); 126 local_irq_save(saved_psr); 127 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 128 if (slot < 0) 129 return; 130 - local_irq_restore(saved_psr); 131 132 spin_lock(&vp_lock); 133 status = ia64_pal_vp_init_env(kvm_vsa_base ? ··· 160 161 local_irq_save(saved_psr); 162 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 163 if (slot < 0) 164 return; 165 - local_irq_restore(saved_psr); 166 167 status = ia64_pal_vp_exit_env(host_iva); 168 if (status) ··· 1253 uninit: 1254 kvm_vcpu_uninit(vcpu); 1255 fail: 1256 return r; 1257 } 1258
··· 125 PAGE_KERNEL)); 126 local_irq_save(saved_psr); 127 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 128 + local_irq_restore(saved_psr); 129 if (slot < 0) 130 return; 131 132 spin_lock(&vp_lock); 133 status = ia64_pal_vp_init_env(kvm_vsa_base ? ··· 160 161 local_irq_save(saved_psr); 162 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 163 + local_irq_restore(saved_psr); 164 if (slot < 0) 165 return; 166 167 status = ia64_pal_vp_exit_env(host_iva); 168 if (status) ··· 1253 uninit: 1254 kvm_vcpu_uninit(vcpu); 1255 fail: 1256 + local_irq_restore(psr); 1257 return r; 1258 } 1259
+3 -2
arch/powerpc/kvm/44x_tlb.c
··· 177 vcpu->arch.msr & MSR_PR); 178 } 179 180 - void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid) 181 { 182 unsigned int pid = asid & 0xff; 183 int i; ··· 192 if (!get_tlb_v(stlbe)) 193 continue; 194 195 - if (eaddr < get_tlb_eaddr(stlbe)) 196 continue; 197 198 if (eaddr > get_tlb_end(stlbe))
··· 177 vcpu->arch.msr & MSR_PR); 178 } 179 180 + void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 181 + gva_t eend, u32 asid) 182 { 183 unsigned int pid = asid & 0xff; 184 int i; ··· 191 if (!get_tlb_v(stlbe)) 192 continue; 193 194 + if (eend < get_tlb_eaddr(stlbe)) 195 continue; 196 197 if (eaddr > get_tlb_end(stlbe))
+1 -1
arch/powerpc/kvm/emulate.c
··· 137 if (tlbe->word0 & PPC44x_TLB_VALID) { 138 eaddr = get_tlb_eaddr(tlbe); 139 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; 140 - kvmppc_mmu_invalidate(vcpu, eaddr, asid); 141 } 142 143 switch (ws) {
··· 137 if (tlbe->word0 & PPC44x_TLB_VALID) { 138 eaddr = get_tlb_eaddr(tlbe); 139 asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; 140 + kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid); 141 } 142 143 switch (ws) {
+33 -29
arch/s390/kvm/gaccess.h
··· 18 #include <asm/uaccess.h> 19 20 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21 - u64 guestaddr) 22 { 23 - u64 prefix = vcpu->arch.sie_block->prefix; 24 - u64 origin = vcpu->kvm->arch.guest_origin; 25 - u64 memsize = vcpu->kvm->arch.guest_memsize; 26 27 if (guestaddr < 2 * PAGE_SIZE) 28 guestaddr += prefix; ··· 37 return (void __user *) guestaddr; 38 } 39 40 - static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 41 u64 *result) 42 { 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 47 if (IS_ERR((void __force *) uptr)) 48 return PTR_ERR((void __force *) uptr); 49 50 - return get_user(*result, (u64 __user *) uptr); 51 } 52 53 - static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 54 u32 *result) 55 { 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 63 return get_user(*result, (u32 __user *) uptr); 64 } 65 66 - static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 67 u16 *result) 68 { 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 76 return get_user(*result, (u16 __user *) uptr); 77 } 78 79 - static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 80 u8 *result) 81 { 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 87 return get_user(*result, (u8 __user *) uptr); 88 } 89 90 - static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, 91 u64 value) 92 { 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 100 return put_user(value, (u64 __user *) uptr); 101 } 102 103 - static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, 104 u32 value) 105 { 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 113 return put_user(value, (u32 __user *) uptr); 114 } 115 116 - static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, 117 u16 value) 118 { 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 126 return put_user(value, (u16 __user *) uptr); 127 } 128 129 - static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, 130 u8 value) 131 { 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 138 } 139 140 141 - static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, 142 const void *from, unsigned long n) 143 { 144 int rc; ··· 154 return 0; 155 } 156 157 - static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, 158 const void *from, unsigned long n) 159 { 160 - u64 prefix = vcpu->arch.sie_block->prefix; 161 - u64 origin = vcpu->kvm->arch.guest_origin; 162 - u64 memsize = vcpu->kvm->arch.guest_memsize; 163 164 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 165 goto slowpath; ··· 190 } 191 192 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 193 - u64 guestsrc, unsigned long n) 194 { 195 int rc; 196 unsigned long i; ··· 206 } 207 208 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 209 - u64 guestsrc, unsigned long n) 210 { 211 - u64 prefix = vcpu->arch.sie_block->prefix; 212 - u64 origin = vcpu->kvm->arch.guest_origin; 213 - u64 memsize = vcpu->kvm->arch.guest_memsize; 214 215 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 216 goto slowpath; ··· 240 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 241 } 242 243 - static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, 244 const void *from, unsigned long n) 245 { 246 - u64 origin = vcpu->kvm->arch.guest_origin; 247 - u64 memsize = vcpu->kvm->arch.guest_memsize; 248 249 if (guestdest + n > memsize) 250 return -EFAULT; ··· 259 } 260 261 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 262 - u64 guestsrc, unsigned long n) 263 { 264 - u64 origin = vcpu->kvm->arch.guest_origin; 265 - u64 memsize = vcpu->kvm->arch.guest_memsize; 266 267 if (guestsrc + n > memsize) 268 return -EFAULT;
··· 18 #include <asm/uaccess.h> 19 20 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, 21 + unsigned long guestaddr) 22 { 23 + unsigned long prefix = vcpu->arch.sie_block->prefix; 24 + unsigned long origin = vcpu->kvm->arch.guest_origin; 25 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 26 27 if (guestaddr < 2 * PAGE_SIZE) 28 guestaddr += prefix; ··· 37 return (void __user *) guestaddr; 38 } 39 40 + static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 41 u64 *result) 42 { 43 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 47 if (IS_ERR((void __force *) uptr)) 48 return PTR_ERR((void __force *) uptr); 49 50 + return get_user(*result, (unsigned long __user *) uptr); 51 } 52 53 + static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, 54 u32 *result) 55 { 56 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 63 return get_user(*result, (u32 __user *) uptr); 64 } 65 66 + static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, 67 u16 *result) 68 { 69 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 76 return get_user(*result, (u16 __user *) uptr); 77 } 78 79 + static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, 80 u8 *result) 81 { 82 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 87 return get_user(*result, (u8 __user *) uptr); 88 } 89 90 + static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 91 u64 value) 92 { 93 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 100 return put_user(value, (u64 __user *) uptr); 101 } 102 103 + static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr, 104 u32 value) 105 { 106 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 113 return put_user(value, (u32 __user *) uptr); 114 } 115 116 + static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr, 117 u16 value) 118 { 119 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 126 return put_user(value, (u16 __user *) uptr); 127 } 128 129 + static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, 130 u8 value) 131 { 132 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); ··· 138 } 139 140 141 + static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, 142 + unsigned long guestdest, 143 const void *from, unsigned long n) 144 { 145 int rc; ··· 153 return 0; 154 } 155 156 + static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, 157 const void *from, unsigned long n) 158 { 159 + unsigned long prefix = vcpu->arch.sie_block->prefix; 160 + unsigned long origin = vcpu->kvm->arch.guest_origin; 161 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 162 163 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 164 goto slowpath; ··· 189 } 190 191 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, 192 + unsigned long guestsrc, 193 + unsigned long n) 194 { 195 int rc; 196 unsigned long i; ··· 204 } 205 206 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 207 + unsigned long guestsrc, unsigned long n) 208 { 209 + unsigned long prefix = vcpu->arch.sie_block->prefix; 210 + unsigned long origin = vcpu->kvm->arch.guest_origin; 211 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 212 213 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 214 goto slowpath; ··· 238 return __copy_from_guest_slow(vcpu, to, guestsrc, n); 239 } 240 241 + static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, 242 + unsigned long guestdest, 243 const void *from, unsigned long n) 244 { 245 + unsigned long origin = vcpu->kvm->arch.guest_origin; 246 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 247 248 if (guestdest + n > memsize) 249 return -EFAULT; ··· 256 } 257 258 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 259 + unsigned long guestsrc, 260 + unsigned long n) 261 { 262 + unsigned long origin = vcpu->kvm->arch.guest_origin; 263 + unsigned long memsize = vcpu->kvm->arch.guest_memsize; 264 265 if (guestsrc + n > memsize) 266 return -EFAULT;
+10 -4
arch/s390/kvm/intercept.c
··· 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 23 - static int handle_lctg(struct kvm_vcpu *vcpu) 24 { 25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; ··· 30 u64 useraddr; 31 int reg, rc; 32 33 - vcpu->stat.instruction_lctg++; 34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) 35 return -ENOTSUPP; 36 ··· 38 if (base2) 39 useraddr += vcpu->arch.guest_gprs[base2]; 40 41 reg = reg1; 42 43 - VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 44 disp2); 45 46 do { ··· 77 if (base2) 78 useraddr += vcpu->arch.guest_gprs[base2]; 79 80 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 81 disp2); 82 ··· 105 [0xae] = kvm_s390_handle_sigp, 106 [0xb2] = kvm_s390_handle_priv, 107 [0xb7] = handle_lctl, 108 - [0xeb] = handle_lctg, 109 }; 110 111 static int handle_noop(struct kvm_vcpu *vcpu)
··· 20 #include "kvm-s390.h" 21 #include "gaccess.h" 22 23 + static int handle_lctlg(struct kvm_vcpu *vcpu) 24 { 25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; 26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; ··· 30 u64 useraddr; 31 int reg, rc; 32 33 + vcpu->stat.instruction_lctlg++; 34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) 35 return -ENOTSUPP; 36 ··· 38 if (base2) 39 useraddr += vcpu->arch.guest_gprs[base2]; 40 41 + if (useraddr & 7) 42 + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 43 + 44 reg = reg1; 45 46 + VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 47 disp2); 48 49 do { ··· 74 if (base2) 75 useraddr += vcpu->arch.guest_gprs[base2]; 76 77 + if (useraddr & 3) 78 + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 79 + 80 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, 81 disp2); 82 ··· 99 [0xae] = kvm_s390_handle_sigp, 100 [0xb2] = kvm_s390_handle_priv, 101 [0xb7] = handle_lctl, 102 + [0xeb] = handle_lctlg, 103 }; 104 105 static int handle_noop(struct kvm_vcpu *vcpu)
+7 -14
arch/s390/kvm/interrupt.c
··· 13 #include <asm/lowcore.h> 14 #include <asm/uaccess.h> 15 #include <linux/kvm_host.h> 16 #include "kvm-s390.h" 17 #include "gaccess.h" 18 ··· 247 default: 248 BUG(); 249 } 250 - 251 if (exception) { 252 - VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" 253 - " interrupt"); 254 - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 255 - if (inti->type == KVM_S390_PROGRAM_INT) { 256 - printk(KERN_WARNING "kvm: recursive program check\n"); 257 - BUG(); 258 - } 259 } 260 } 261 ··· 273 __LC_EXT_NEW_PSW, sizeof(psw_t)); 274 if (rc == -EFAULT) 275 exception = 1; 276 - 277 if (exception) { 278 - VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \ 279 - " ckc interrupt"); 280 - kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 281 - return 0; 282 } 283 - 284 return 1; 285 } 286
··· 13 #include <asm/lowcore.h> 14 #include <asm/uaccess.h> 15 #include <linux/kvm_host.h> 16 + #include <linux/signal.h> 17 #include "kvm-s390.h" 18 #include "gaccess.h" 19 ··· 246 default: 247 BUG(); 248 } 249 if (exception) { 250 + printk("kvm: The guest lowcore is not mapped during interrupt " 251 + "delivery, killing userspace\n"); 252 + do_exit(SIGKILL); 253 } 254 } 255 ··· 277 __LC_EXT_NEW_PSW, sizeof(psw_t)); 278 if (rc == -EFAULT) 279 exception = 1; 280 if (exception) { 281 + printk("kvm: The guest lowcore is not mapped during interrupt " 282 + "delivery, killing userspace\n"); 283 + do_exit(SIGKILL); 284 } 285 return 1; 286 } 287
+7 -2
arch/s390/kvm/kvm-s390.c
··· 39 { "exit_instruction", VCPU_STAT(exit_instruction) }, 40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 42 - { "instruction_lctg", VCPU_STAT(instruction_lctg) }, 43 { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, ··· 112 113 int kvm_dev_ioctl_check_extension(long ext) 114 { 115 - return 0; 116 } 117 118 /* Section: vm related */
··· 39 { "exit_instruction", VCPU_STAT(exit_instruction) }, 40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 42 + { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 43 { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, ··· 112 113 int kvm_dev_ioctl_check_extension(long ext) 114 { 115 + switch (ext) { 116 + case KVM_CAP_USER_MEMORY: 117 + return 1; 118 + default: 119 + return 0; 120 + } 121 } 122 123 /* Section: vm related */
+3 -2
arch/s390/kvm/sigp.c
··· 43 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 44 45 46 - static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) 47 { 48 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 49 int rc; ··· 168 } 169 170 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 171 - u64 *reg) 172 { 173 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 174 struct kvm_s390_local_interrupt *li;
··· 43 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL 44 45 46 + static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, 47 + unsigned long *reg) 48 { 49 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 50 int rc; ··· 167 } 168 169 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 170 + unsigned long *reg) 171 { 172 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; 173 struct kvm_s390_local_interrupt *li;
+7
arch/x86/kvm/mmu.c
··· 1814 spin_unlock(&vcpu->kvm->mmu_lock); 1815 return r; 1816 } 1817 1818 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1819 { ··· 1870 tdp_enabled = true; 1871 } 1872 EXPORT_SYMBOL_GPL(kvm_enable_tdp); 1873 1874 static void free_mmu_pages(struct kvm_vcpu *vcpu) 1875 {
··· 1814 spin_unlock(&vcpu->kvm->mmu_lock); 1815 return r; 1816 } 1817 + EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); 1818 1819 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 1820 { ··· 1869 tdp_enabled = true; 1870 } 1871 EXPORT_SYMBOL_GPL(kvm_enable_tdp); 1872 + 1873 + void kvm_disable_tdp(void) 1874 + { 1875 + tdp_enabled = false; 1876 + } 1877 + EXPORT_SYMBOL_GPL(kvm_disable_tdp); 1878 1879 static void free_mmu_pages(struct kvm_vcpu *vcpu) 1880 {
+8 -2
arch/x86/kvm/svm.c
··· 453 if (npt_enabled) { 454 printk(KERN_INFO "kvm: Nested Paging enabled\n"); 455 kvm_enable_tdp(); 456 - } 457 458 return 0; 459 ··· 1008 struct kvm *kvm = svm->vcpu.kvm; 1009 u64 fault_address; 1010 u32 error_code; 1011 1012 if (!irqchip_in_kernel(kvm) && 1013 - is_external_interrupt(exit_int_info)) 1014 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1015 1016 fault_address = svm->vmcb->control.exit_info_2; 1017 error_code = svm->vmcb->control.exit_info_1; ··· 1028 (u32)fault_address, (u32)(fault_address >> 32), 1029 handler); 1030 1031 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1032 } 1033
··· 453 if (npt_enabled) { 454 printk(KERN_INFO "kvm: Nested Paging enabled\n"); 455 kvm_enable_tdp(); 456 + } else 457 + kvm_disable_tdp(); 458 459 return 0; 460 ··· 1007 struct kvm *kvm = svm->vcpu.kvm; 1008 u64 fault_address; 1009 u32 error_code; 1010 + bool event_injection = false; 1011 1012 if (!irqchip_in_kernel(kvm) && 1013 + is_external_interrupt(exit_int_info)) { 1014 + event_injection = true; 1015 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 1016 + } 1017 1018 fault_address = svm->vmcb->control.exit_info_2; 1019 error_code = svm->vmcb->control.exit_info_1; ··· 1024 (u32)fault_address, (u32)(fault_address >> 32), 1025 handler); 1026 1027 + if (event_injection) 1028 + kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address); 1029 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1030 } 1031
+12 -10
arch/x86/kvm/vmx.c
··· 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2300 (u32)((u64)cr2 >> 32), handler); 2301 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2302 } 2303 ··· 3118 return ERR_PTR(-ENOMEM); 3119 3120 allocate_vpid(vmx); 3121 - if (id == 0 && vm_need_ept()) { 3122 - kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3123 - VMX_EPT_WRITABLE_MASK | 3124 - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3125 - kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, 3126 - VMX_EPT_FAKE_DIRTY_MASK, 0ull, 3127 - VMX_EPT_EXECUTABLE_MASK); 3128 - kvm_enable_tdp(); 3129 - } 3130 3131 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 3132 if (err) ··· 3296 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); 3297 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); 3298 3299 - if (cpu_has_vmx_ept()) 3300 bypass_guest_pf = 0; 3301 3302 if (bypass_guest_pf) 3303 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
··· 2298 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2299 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2300 (u32)((u64)cr2 >> 32), handler); 2301 + if (vect_info & VECTORING_INFO_VALID_MASK) 2302 + kvm_mmu_unprotect_page_virt(vcpu, cr2); 2303 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2304 } 2305 ··· 3116 return ERR_PTR(-ENOMEM); 3117 3118 allocate_vpid(vmx); 3119 3120 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); 3121 if (err) ··· 3303 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); 3304 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); 3305 3306 + if (vm_need_ept()) { 3307 bypass_guest_pf = 0; 3308 + kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3309 + VMX_EPT_WRITABLE_MASK | 3310 + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); 3311 + kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, 3312 + VMX_EPT_FAKE_DIRTY_MASK, 0ull, 3313 + VMX_EPT_EXECUTABLE_MASK); 3314 + kvm_enable_tdp(); 3315 + } else 3316 + kvm_disable_tdp(); 3317 3318 if (bypass_guest_pf) 3319 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
+43 -66
arch/x86/kvm/x86.c
··· 3184 kvm_desct->base |= seg_desc->base2 << 24; 3185 kvm_desct->limit = seg_desc->limit0; 3186 kvm_desct->limit |= seg_desc->limit << 16; 3187 kvm_desct->selector = selector; 3188 kvm_desct->type = seg_desc->type; 3189 kvm_desct->present = seg_desc->p; ··· 3227 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3228 struct desc_struct *seg_desc) 3229 { 3230 struct descriptor_table dtable; 3231 u16 index = selector >> 3; 3232 ··· 3237 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 3238 return 1; 3239 } 3240 - return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3241 } 3242 3243 /* allowed just for 8 bytes segments */ 3244 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3245 struct desc_struct *seg_desc) 3246 { 3247 struct descriptor_table dtable; 3248 u16 index = selector >> 3; 3249 ··· 3254 3255 if (dtable.limit < index * 8 + 7) 3256 return 1; 3257 - return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8); 3258 } 3259 3260 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, ··· 3268 base_addr |= (seg_desc->base1 << 16); 3269 base_addr |= (seg_desc->base2 << 24); 3270 3271 - return base_addr; 3272 - } 3273 - 3274 - static int load_tss_segment32(struct kvm_vcpu *vcpu, 3275 - struct desc_struct *seg_desc, 3276 - struct tss_segment_32 *tss) 3277 - { 3278 - u32 base_addr; 3279 - 3280 - base_addr = get_tss_base_addr(vcpu, seg_desc); 3281 - 3282 - return kvm_read_guest(vcpu->kvm, base_addr, tss, 3283 - sizeof(struct tss_segment_32)); 3284 - } 3285 - 3286 - static int save_tss_segment32(struct kvm_vcpu *vcpu, 3287 - struct desc_struct *seg_desc, 3288 - struct tss_segment_32 *tss) 3289 - { 3290 - u32 base_addr; 3291 - 3292 - base_addr = get_tss_base_addr(vcpu, seg_desc); 3293 - 3294 - return kvm_write_guest(vcpu->kvm, base_addr, tss, 3295 - sizeof(struct tss_segment_32)); 3296 - } 3297 - 3298 - static int load_tss_segment16(struct kvm_vcpu *vcpu, 3299 - struct desc_struct *seg_desc, 3300 - struct tss_segment_16 *tss) 3301 - { 3302 - u32 base_addr; 3303 - 3304 - base_addr = get_tss_base_addr(vcpu, seg_desc); 3305 - 3306 - return kvm_read_guest(vcpu->kvm, base_addr, tss, 3307 - sizeof(struct tss_segment_16)); 3308 - } 3309 - 3310 - static int save_tss_segment16(struct kvm_vcpu *vcpu, 3311 - struct desc_struct *seg_desc, 3312 - struct tss_segment_16 *tss) 3313 - { 3314 - u32 base_addr; 3315 - 3316 - base_addr = get_tss_base_addr(vcpu, seg_desc); 3317 - 3318 - return kvm_write_guest(vcpu->kvm, base_addr, tss, 3319 - sizeof(struct tss_segment_16)); 3320 } 3321 3322 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) ··· 3428 } 3429 3430 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, 3431 - struct desc_struct *cseg_desc, 3432 struct desc_struct *nseg_desc) 3433 { 3434 struct tss_segment_16 tss_segment_16; 3435 int ret = 0; 3436 3437 - if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16)) 3438 goto out; 3439 3440 save_state_to_tss16(vcpu, &tss_segment_16); 3441 - save_tss_segment16(vcpu, cseg_desc, &tss_segment_16); 3442 3443 - if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16)) 3444 goto out; 3445 if (load_state_from_tss16(vcpu, &tss_segment_16)) 3446 goto out; 3447 ··· 3457 } 3458 3459 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, 3460 - struct desc_struct *cseg_desc, 3461 struct desc_struct *nseg_desc) 3462 { 3463 struct tss_segment_32 tss_segment_32; 3464 int ret = 0; 3465 3466 - if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32)) 3467 goto out; 3468 3469 save_state_to_tss32(vcpu, &tss_segment_32); 3470 - save_tss_segment32(vcpu, cseg_desc, &tss_segment_32); 3471 3472 - if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32)) 3473 goto out; 3474 if (load_state_from_tss32(vcpu, &tss_segment_32)) 3475 goto out; 3476 ··· 3491 struct desc_struct cseg_desc; 3492 struct desc_struct nseg_desc; 3493 int ret = 0; 3494 3495 - kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR); 3496 3497 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) 3498 goto out; 3499 3500 - if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc)) 3501 goto out; 3502 - 3503 3504 if (reason != TASK_SWITCH_IRET) { 3505 int cpl; ··· 3522 3523 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3524 cseg_desc.type &= ~(1 << 1); //clear the B flag 3525 - save_guest_segment_descriptor(vcpu, tr_seg.selector, 3526 - &cseg_desc); 3527 } 3528 3529 if (reason == TASK_SWITCH_IRET) { ··· 3534 kvm_x86_ops->cache_regs(vcpu); 3535 3536 if (nseg_desc.type & 8) 3537 - ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc, 3538 &nseg_desc); 3539 else 3540 - ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc, 3541 &nseg_desc); 3542 3543 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
··· 3184 kvm_desct->base |= seg_desc->base2 << 24; 3185 kvm_desct->limit = seg_desc->limit0; 3186 kvm_desct->limit |= seg_desc->limit << 16; 3187 + if (seg_desc->g) { 3188 + kvm_desct->limit <<= 12; 3189 + kvm_desct->limit |= 0xfff; 3190 + } 3191 kvm_desct->selector = selector; 3192 kvm_desct->type = seg_desc->type; 3193 kvm_desct->present = seg_desc->p; ··· 3223 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3224 struct desc_struct *seg_desc) 3225 { 3226 + gpa_t gpa; 3227 struct descriptor_table dtable; 3228 u16 index = selector >> 3; 3229 ··· 3232 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 3233 return 1; 3234 } 3235 + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); 3236 + gpa += index * 8; 3237 + return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8); 3238 } 3239 3240 /* allowed just for 8 bytes segments */ 3241 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 3242 struct desc_struct *seg_desc) 3243 { 3244 + gpa_t gpa; 3245 struct descriptor_table dtable; 3246 u16 index = selector >> 3; 3247 ··· 3246 3247 if (dtable.limit < index * 8 + 7) 3248 return 1; 3249 + gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); 3250 + gpa += index * 8; 3251 + return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8); 3252 } 3253 3254 static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, ··· 3258 base_addr |= (seg_desc->base1 << 16); 3259 base_addr |= (seg_desc->base2 << 24); 3260 3261 + return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); 3262 } 3263 3264 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) ··· 3466 } 3467 3468 static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector, 3469 + u32 old_tss_base, 3470 struct desc_struct *nseg_desc) 3471 { 3472 struct tss_segment_16 tss_segment_16; 3473 int ret = 0; 3474 3475 + if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16, 3476 + sizeof tss_segment_16)) 3477 goto out; 3478 3479 save_state_to_tss16(vcpu, &tss_segment_16); 3480 3481 + if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16, 3482 + sizeof tss_segment_16)) 3483 goto out; 3484 + 3485 + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), 3486 + &tss_segment_16, sizeof tss_segment_16)) 3487 + goto out; 3488 + 3489 if (load_state_from_tss16(vcpu, &tss_segment_16)) 3490 goto out; 3491 ··· 3489 } 3490 3491 static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector, 3492 + u32 old_tss_base, 3493 struct desc_struct *nseg_desc) 3494 { 3495 struct tss_segment_32 tss_segment_32; 3496 int ret = 0; 3497 3498 + if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32, 3499 + sizeof tss_segment_32)) 3500 goto out; 3501 3502 save_state_to_tss32(vcpu, &tss_segment_32); 3503 3504 + if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32, 3505 + sizeof tss_segment_32)) 3506 goto out; 3507 + 3508 + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), 3509 + &tss_segment_32, sizeof tss_segment_32)) 3510 + goto out; 3511 + 3512 if (load_state_from_tss32(vcpu, &tss_segment_32)) 3513 goto out; 3514 ··· 3517 struct desc_struct cseg_desc; 3518 struct desc_struct nseg_desc; 3519 int ret = 0; 3520 + u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); 3521 + u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); 3522 3523 + old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); 3524 3525 + /* FIXME: Handle errors. Failure to read either TSS or their 3526 + * descriptors should generate a pagefault. 3527 + */ 3528 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc)) 3529 goto out; 3530 3531 + if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc)) 3532 goto out; 3533 3534 if (reason != TASK_SWITCH_IRET) { 3535 int cpl; ··· 3544 3545 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 3546 cseg_desc.type &= ~(1 << 1); //clear the B flag 3547 + save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc); 3548 } 3549 3550 if (reason == TASK_SWITCH_IRET) { ··· 3557 kvm_x86_ops->cache_regs(vcpu); 3558 3559 if (nseg_desc.type & 8) 3560 + ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base, 3561 &nseg_desc); 3562 else 3563 + ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base, 3564 &nseg_desc); 3565 3566 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
+1 -1
drivers/s390/kvm/Makefile
··· 6 # it under the terms of the GNU General Public License (version 2 only) 7 # as published by the Free Software Foundation. 8 9 - obj-$(CONFIG_VIRTIO) += kvm_virtio.o
··· 6 # it under the terms of the GNU General Public License (version 2 only) 7 # as published by the Free Software Foundation. 8 9 + obj-$(CONFIG_S390_GUEST) += kvm_virtio.o
+2 -1
include/asm-powerpc/kvm_ppc.h
··· 61 62 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, 63 u64 asid, u32 flags); 64 - extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid); 65 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 66 67 extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
··· 61 62 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, 63 u64 asid, u32 flags); 64 + extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 65 + gva_t eend, u32 asid); 66 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 67 68 extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
+2 -2
include/asm-s390/kvm_host.h
··· 111 u32 exit_validity; 112 u32 exit_instruction; 113 u32 instruction_lctl; 114 - u32 instruction_lctg; 115 u32 exit_program_interruption; 116 u32 exit_instr_and_program; 117 u32 deliver_emergency_signal; ··· 231 struct kvm_s390_float_interrupt float_int; 232 }; 233 234 - extern int sie64a(struct kvm_s390_sie_block *, __u64 *); 235 #endif
··· 111 u32 exit_validity; 112 u32 exit_instruction; 113 u32 instruction_lctl; 114 + u32 instruction_lctlg; 115 u32 exit_program_interruption; 116 u32 exit_instr_and_program; 117 u32 deliver_emergency_signal; ··· 231 struct kvm_s390_float_interrupt float_int; 232 }; 233 234 + extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); 235 #endif
+1
include/asm-x86/kvm_host.h
··· 556 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 557 558 void kvm_enable_tdp(void); 559 560 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 561 int complete_pio(struct kvm_vcpu *vcpu);
··· 556 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); 557 558 void kvm_enable_tdp(void); 559 + void kvm_disable_tdp(void); 560 561 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 562 int complete_pio(struct kvm_vcpu *vcpu);