Merge branch 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: PPC: BookE: Load the lower half of MSR
KVM: PPC: BookE: fix sleep with interrupts disabled
KVM: PPC: e500: Call kvm_vcpu_uninit() before kvmppc_e500_tlb_uninit().
PPC: KVM: Book E doesn't have __end_interrupts.
KVM: x86: Issue smp_call_function_many with preemption disabled
KVM: x86: fix information leak to userland
KVM: PPC: fix information leak to userland
KVM: MMU: fix rmap_remove on non present sptes
KVM: Write protect memory after slot swap

+21 -13
+1 -1
arch/powerpc/kernel/kvm.c
··· 127 128 static void kvm_patch_ins_b(u32 *inst, int addr) 129 { 130 - #ifdef CONFIG_RELOCATABLE 131 /* On relocatable kernels interrupts handlers and our code 132 can be in different regions, so we don't patch them */ 133
··· 127 128 static void kvm_patch_ins_b(u32 *inst, int addr) 129 { 130 + #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) 131 /* On relocatable kernels interrupts handlers and our code 132 can be in different regions, so we don't patch them */ 133
+1 -1
arch/powerpc/kvm/booke_interrupts.S
··· 416 lwz r3, VCPU_PC(r4) 417 mtsrr0 r3 418 lwz r3, VCPU_SHARED(r4) 419 - lwz r3, VCPU_SHARED_MSR(r3) 420 oris r3, r3, KVMPPC_MSR_MASK@h 421 ori r3, r3, KVMPPC_MSR_MASK@l 422 mtsrr1 r3
··· 416 lwz r3, VCPU_PC(r4) 417 mtsrr0 r3 418 lwz r3, VCPU_SHARED(r4) 419 + lwz r3, (VCPU_SHARED_MSR + 4)(r3) 420 oris r3, r3, KVMPPC_MSR_MASK@h 421 ori r3, r3, KVMPPC_MSR_MASK@l 422 mtsrr1 r3
+1 -1
arch/powerpc/kvm/e500.c
··· 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 139 140 free_page((unsigned long)vcpu->arch.shared); 141 - kvmppc_e500_tlb_uninit(vcpu_e500); 142 kvm_vcpu_uninit(vcpu); 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 144 } 145
··· 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 139 140 free_page((unsigned long)vcpu->arch.shared); 141 kvm_vcpu_uninit(vcpu); 142 + kvmppc_e500_tlb_uninit(vcpu_e500); 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 144 } 145
+1
arch/powerpc/kvm/powerpc.c
··· 617 switch (ioctl) { 618 case KVM_PPC_GET_PVINFO: { 619 struct kvm_ppc_pvinfo pvinfo; 620 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 621 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 622 r = -EFAULT;
··· 617 switch (ioctl) { 618 case KVM_PPC_GET_PVINFO: { 619 struct kvm_ppc_pvinfo pvinfo; 620 + memset(&pvinfo, 0, sizeof(pvinfo)); 621 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 622 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 623 r = -EFAULT;
-2
arch/powerpc/kvm/timing.c
··· 35 int i; 36 37 /* pause guest execution to avoid concurrent updates */ 38 - local_irq_disable(); 39 mutex_lock(&vcpu->mutex); 40 41 vcpu->arch.last_exit_type = 0xDEAD; ··· 50 vcpu->arch.timing_last_enter.tv64 = 0; 51 52 mutex_unlock(&vcpu->mutex); 53 - local_irq_enable(); 54 } 55 56 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
··· 35 int i; 36 37 /* pause guest execution to avoid concurrent updates */ 38 mutex_lock(&vcpu->mutex); 39 40 vcpu->arch.last_exit_type = 0xDEAD; ··· 51 vcpu->arch.timing_last_enter.tv64 = 0; 52 53 mutex_unlock(&vcpu->mutex); 54 } 55 56 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
+5 -4
arch/x86/kvm/mmu.c
··· 720 } 721 } 722 723 - static void set_spte_track_bits(u64 *sptep, u64 new_spte) 724 { 725 pfn_t pfn; 726 u64 old_spte = *sptep; ··· 731 old_spte = __xchg_spte(sptep, new_spte); 732 733 if (!is_rmap_spte(old_spte)) 734 - return; 735 736 pfn = spte_to_pfn(old_spte); 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 738 kvm_set_pfn_accessed(pfn); 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 740 kvm_set_pfn_dirty(pfn); 741 } 742 743 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744 { 745 - set_spte_track_bits(sptep, new_spte); 746 - rmap_remove(kvm, sptep); 747 } 748 749 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
··· 720 } 721 } 722 723 + static int set_spte_track_bits(u64 *sptep, u64 new_spte) 724 { 725 pfn_t pfn; 726 u64 old_spte = *sptep; ··· 731 old_spte = __xchg_spte(sptep, new_spte); 732 733 if (!is_rmap_spte(old_spte)) 734 + return 0; 735 736 pfn = spte_to_pfn(old_spte); 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 738 kvm_set_pfn_accessed(pfn); 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 740 kvm_set_pfn_dirty(pfn); 741 + return 1; 742 } 743 744 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 745 { 746 + if (set_spte_track_bits(sptep, new_spte)) 747 + rmap_remove(kvm, sptep); 748 } 749 750 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
+12 -4
arch/x86/kvm/x86.c
··· 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2561 events->exception.nr = vcpu->arch.exception.nr; 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2563 events->exception.error_code = vcpu->arch.exception.error_code; 2564 2565 events->interrupt.injected = ··· 2574 events->nmi.injected = vcpu->arch.nmi_injected; 2575 events->nmi.pending = vcpu->arch.nmi_pending; 2576 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2577 2578 events->sipi_vector = vcpu->arch.sipi_vector; 2579 2580 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 | KVM_VCPUEVENT_VALID_SHADOW); 2583 } 2584 2585 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ··· 2626 dbgregs->dr6 = vcpu->arch.dr6; 2627 dbgregs->dr7 = vcpu->arch.dr7; 2628 dbgregs->flags = 0; 2629 } 2630 2631 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ··· 3110 sizeof(ps->channels)); 3111 ps->flags = kvm->arch.vpit->pit_state.flags; 3112 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3113 return r; 3114 } 3115 ··· 3174 struct kvm_memslots *slots, *old_slots; 3175 unsigned long *dirty_bitmap; 3176 3177 - spin_lock(&kvm->mmu_lock); 3178 - kvm_mmu_slot_remove_write_access(kvm, log->slot); 3179 - spin_unlock(&kvm->mmu_lock); 3180 - 3181 r = -ENOMEM; 3182 dirty_bitmap = vmalloc(n); 3183 if (!dirty_bitmap) ··· 3194 synchronize_srcu_expedited(&kvm->srcu); 3195 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3196 kfree(old_slots); 3197 3198 r = -EFAULT; 3199 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { ··· 3491 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3492 local_irq_enable(); 3493 user_ns.flags = 0; 3494 3495 r = -EFAULT; 3496 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) ··· 3978 return X86EMUL_CONTINUE; 3979 3980 if (kvm_x86_ops->has_wbinvd_exit()) { 3981 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3982 wbinvd_ipi, NULL, 1); 3983 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3984 } 3985 wbinvd();
··· 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2561 events->exception.nr = vcpu->arch.exception.nr; 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2563 + events->exception.pad = 0; 2564 events->exception.error_code = vcpu->arch.exception.error_code; 2565 2566 events->interrupt.injected = ··· 2573 events->nmi.injected = vcpu->arch.nmi_injected; 2574 events->nmi.pending = vcpu->arch.nmi_pending; 2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2576 + events->nmi.pad = 0; 2577 2578 events->sipi_vector = vcpu->arch.sipi_vector; 2579 2580 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 | KVM_VCPUEVENT_VALID_SHADOW); 2583 + memset(&events->reserved, 0, sizeof(events->reserved)); 2584 } 2585 2586 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ··· 2623 dbgregs->dr6 = vcpu->arch.dr6; 2624 dbgregs->dr7 = vcpu->arch.dr7; 2625 dbgregs->flags = 0; 2626 + memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 2627 } 2628 2629 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ··· 3106 sizeof(ps->channels)); 3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3109 + memset(&ps->reserved, 0, sizeof(ps->reserved)); 3110 return r; 3111 } 3112 ··· 3169 struct kvm_memslots *slots, *old_slots; 3170 unsigned long *dirty_bitmap; 3171 3172 r = -ENOMEM; 3173 dirty_bitmap = vmalloc(n); 3174 if (!dirty_bitmap) ··· 3193 synchronize_srcu_expedited(&kvm->srcu); 3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3195 kfree(old_slots); 3196 + 3197 + spin_lock(&kvm->mmu_lock); 3198 + kvm_mmu_slot_remove_write_access(kvm, log->slot); 3199 + spin_unlock(&kvm->mmu_lock); 3200 3201 r = -EFAULT; 3202 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { ··· 3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3487 local_irq_enable(); 3488 user_ns.flags = 0; 3489 + memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 3490 3491 r = -EFAULT; 3492 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) ··· 3972 return X86EMUL_CONTINUE; 3973 3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3975 + preempt_disable(); 3976 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3977 wbinvd_ipi, NULL, 1); 3978 + preempt_enable(); 3979 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3980 } 3981 wbinvd();