Merge branch 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: PPC: BookE: Load the lower half of MSR
KVM: PPC: BookE: fix sleep with interrupts disabled
KVM: PPC: e500: Call kvm_vcpu_uninit() before kvmppc_e500_tlb_uninit().
PPC: KVM: Book E doesn't have __end_interrupts.
KVM: x86: Issue smp_call_function_many with preemption disabled
KVM: x86: fix information leak to userland
KVM: PPC: fix information leak to userland
KVM: MMU: fix rmap_remove on non present sptes
KVM: Write protect memory after slot swap

+21 -13
+1 -1
arch/powerpc/kernel/kvm.c
··· 127 127 128 128 static void kvm_patch_ins_b(u32 *inst, int addr) 129 129 { 130 - #ifdef CONFIG_RELOCATABLE 130 + #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) 131 131 /* On relocatable kernels interrupts handlers and our code 132 132 can be in different regions, so we don't patch them */ 133 133
+1 -1
arch/powerpc/kvm/booke_interrupts.S
··· 416 416 lwz r3, VCPU_PC(r4) 417 417 mtsrr0 r3 418 418 lwz r3, VCPU_SHARED(r4) 419 - lwz r3, VCPU_SHARED_MSR(r3) 419 + lwz r3, (VCPU_SHARED_MSR + 4)(r3) 420 420 oris r3, r3, KVMPPC_MSR_MASK@h 421 421 ori r3, r3, KVMPPC_MSR_MASK@l 422 422 mtsrr1 r3
+1 -1
arch/powerpc/kvm/e500.c
··· 138 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 139 139 140 140 free_page((unsigned long)vcpu->arch.shared); 141 - kvmppc_e500_tlb_uninit(vcpu_e500); 142 141 kvm_vcpu_uninit(vcpu); 142 + kvmppc_e500_tlb_uninit(vcpu_e500); 143 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 144 144 } 145 145
+1
arch/powerpc/kvm/powerpc.c
··· 617 617 switch (ioctl) { 618 618 case KVM_PPC_GET_PVINFO: { 619 619 struct kvm_ppc_pvinfo pvinfo; 620 + memset(&pvinfo, 0, sizeof(pvinfo)); 620 621 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 621 622 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 622 623 r = -EFAULT;
-2
arch/powerpc/kvm/timing.c
··· 35 35 int i; 36 36 37 37 /* pause guest execution to avoid concurrent updates */ 38 - local_irq_disable(); 39 38 mutex_lock(&vcpu->mutex); 40 39 41 40 vcpu->arch.last_exit_type = 0xDEAD; ··· 50 51 vcpu->arch.timing_last_enter.tv64 = 0; 51 52 52 53 mutex_unlock(&vcpu->mutex); 53 - local_irq_enable(); 54 54 } 55 55 56 56 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
+5 -4
arch/x86/kvm/mmu.c
··· 720 720 } 721 721 } 722 722 723 - static void set_spte_track_bits(u64 *sptep, u64 new_spte) 723 + static int set_spte_track_bits(u64 *sptep, u64 new_spte) 724 724 { 725 725 pfn_t pfn; 726 726 u64 old_spte = *sptep; ··· 731 731 old_spte = __xchg_spte(sptep, new_spte); 732 732 733 733 if (!is_rmap_spte(old_spte)) 734 - return; 734 + return 0; 735 735 736 736 pfn = spte_to_pfn(old_spte); 737 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 738 738 kvm_set_pfn_accessed(pfn); 739 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 740 740 kvm_set_pfn_dirty(pfn); 741 + return 1; 741 742 } 742 743 743 744 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744 745 { 745 - set_spte_track_bits(sptep, new_spte); 746 - rmap_remove(kvm, sptep); 746 + if (set_spte_track_bits(sptep, new_spte)) 747 + rmap_remove(kvm, sptep); 747 748 } 748 749 749 750 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
+12 -4
arch/x86/kvm/x86.c
··· 2560 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2561 2561 events->exception.nr = vcpu->arch.exception.nr; 2562 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2563 + events->exception.pad = 0; 2563 2564 events->exception.error_code = vcpu->arch.exception.error_code; 2564 2565 2565 2566 events->interrupt.injected = ··· 2574 2573 events->nmi.injected = vcpu->arch.nmi_injected; 2575 2574 events->nmi.pending = vcpu->arch.nmi_pending; 2576 2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2576 + events->nmi.pad = 0; 2577 2577 2578 2578 events->sipi_vector = vcpu->arch.sipi_vector; 2579 2579 2580 2580 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 2581 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 2582 | KVM_VCPUEVENT_VALID_SHADOW); 2583 + memset(&events->reserved, 0, sizeof(events->reserved)); 2583 2584 } 2584 2585 2585 2586 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, ··· 2626 2623 dbgregs->dr6 = vcpu->arch.dr6; 2627 2624 dbgregs->dr7 = vcpu->arch.dr7; 2628 2625 dbgregs->flags = 0; 2626 + memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); 2629 2627 } 2630 2628 2631 2629 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, ··· 3110 3106 sizeof(ps->channels)); 3111 3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3112 3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3109 + memset(&ps->reserved, 0, sizeof(ps->reserved)); 3113 3110 return r; 3114 3111 } 3115 3112 ··· 3174 3169 struct kvm_memslots *slots, *old_slots; 3175 3170 unsigned long *dirty_bitmap; 3176 3171 3177 - spin_lock(&kvm->mmu_lock); 3178 - kvm_mmu_slot_remove_write_access(kvm, log->slot); 3179 - spin_unlock(&kvm->mmu_lock); 3180 - 3181 3172 r = -ENOMEM; 3182 3173 dirty_bitmap = vmalloc(n); 3183 3174 if (!dirty_bitmap) ··· 3194 3193 synchronize_srcu_expedited(&kvm->srcu); 3195 3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3196 3195 kfree(old_slots); 3196 + 3197 + spin_lock(&kvm->mmu_lock); 3198 + kvm_mmu_slot_remove_write_access(kvm, log->slot); 3199 + spin_unlock(&kvm->mmu_lock); 3197 3200 3198 3201 r = -EFAULT; 3199 3202 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { ··· 3491 3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3492 3487 local_irq_enable(); 3493 3488 user_ns.flags = 0; 3489 + memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 3494 3490 3495 3491 r = -EFAULT; 3496 3492 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) ··· 3978 3972 return X86EMUL_CONTINUE; 3979 3973 3980 3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3975 + preempt_disable(); 3981 3976 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3982 3977 wbinvd_ipi, NULL, 1); 3978 + preempt_enable(); 3983 3979 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3984 3980 } 3985 3981 wbinvd();