KVM: x86: Convert vapic synchronization to _cached functions (CVE-2013-6368)

In kvm_lapic_sync_from_vapic and kvm_lapic_sync_to_vapic there is the
potential to corrupt kernel memory if userspace provides an address that
is at the end of a page. This patches concerts those functions to use
kvm_write_guest_cached and kvm_read_guest_cached. It also checks the
vapic_address specified by userspace during ioctl processing and returns
an error to userspace if the address is not a valid GPA.

This is generally not guest triggerable, because the required write is
done by firmware that runs before the guest. Also, it only affects AMD
processors and oldish Intel that do not have the FlexPriority feature
(unless you disable FlexPriority, of course; then newer processors are
also affected).

Fixes: b93463aa59d6 ('KVM: Accelerated apic support')

Reported-by: Andrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by Andy Honig and committed by Paolo Bonzini fda4e2e8 b963a22e

+18 -53
+15 -12
arch/x86/kvm/lapic.c
··· 1692 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1693 { 1694 u32 data; 1695 - void *vapic; 1696 1697 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1698 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); ··· 1699 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1700 return; 1701 1702 - vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1703 - data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); 1704 - kunmap_atomic(vapic); 1705 1706 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1707 } ··· 1736 u32 data, tpr; 1737 int max_irr, max_isr; 1738 struct kvm_lapic *apic = vcpu->arch.apic; 1739 - void *vapic; 1740 1741 apic_sync_pv_eoi_to_guest(vcpu, apic); 1742 ··· 1751 max_isr = 0; 1752 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1753 1754 - vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1755 - *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; 1756 - kunmap_atomic(vapic); 1757 } 1758 1759 - void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1760 { 1761 - vcpu->arch.apic->vapic_addr = vapic_addr; 1762 - if (vapic_addr) 1763 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1764 - else 1765 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1766 } 1767 1768 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
··· 1692 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) 1693 { 1694 u32 data; 1695 1696 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) 1697 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); ··· 1700 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) 1701 return; 1702 1703 + kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 1704 + sizeof(u32)); 1705 1706 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1707 } ··· 1738 u32 data, tpr; 1739 int max_irr, max_isr; 1740 struct kvm_lapic *apic = vcpu->arch.apic; 1741 1742 apic_sync_pv_eoi_to_guest(vcpu, apic); 1743 ··· 1754 max_isr = 0; 1755 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1756 1757 + kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, 1758 + sizeof(u32)); 1759 } 1760 1761 + int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1762 { 1763 + if (vapic_addr) { 1764 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, 1765 + &vcpu->arch.apic->vapic_cache, 1766 + vapic_addr, sizeof(u32))) 1767 + return -EINVAL; 1768 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1769 + } else { 1770 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); 1771 + } 1772 + 1773 + vcpu->arch.apic->vapic_addr = vapic_addr; 1774 + return 0; 1775 } 1776 1777 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+2 -2
arch/x86/kvm/lapic.h
··· 34 */ 35 void *regs; 36 gpa_t vapic_addr; 37 - struct page *vapic_page; 38 unsigned long pending_events; 39 unsigned int sipi_vector; 40 }; ··· 76 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 77 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); 78 79 - void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 80 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 81 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 82
··· 34 */ 35 void *regs; 36 gpa_t vapic_addr; 37 + struct gfn_to_hva_cache vapic_cache; 38 unsigned long pending_events; 39 unsigned int sipi_vector; 40 }; ··· 76 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 77 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); 78 79 + int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 80 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 81 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 82
+1 -39
arch/x86/kvm/x86.c
··· 3214 r = -EFAULT; 3215 if (copy_from_user(&va, argp, sizeof va)) 3216 goto out; 3217 - r = 0; 3218 - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3219 break; 3220 } 3221 case KVM_X86_SETUP_MCE: { ··· 5738 !kvm_event_needs_reinjection(vcpu); 5739 } 5740 5741 - static int vapic_enter(struct kvm_vcpu *vcpu) 5742 - { 5743 - struct kvm_lapic *apic = vcpu->arch.apic; 5744 - struct page *page; 5745 - 5746 - if (!apic || !apic->vapic_addr) 5747 - return 0; 5748 - 5749 - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5750 - if (is_error_page(page)) 5751 - return -EFAULT; 5752 - 5753 - vcpu->arch.apic->vapic_page = page; 5754 - return 0; 5755 - } 5756 - 5757 - static void vapic_exit(struct kvm_vcpu *vcpu) 5758 - { 5759 - struct kvm_lapic *apic = vcpu->arch.apic; 5760 - int idx; 5761 - 5762 - if (!apic || !apic->vapic_addr) 5763 - return; 5764 - 5765 - idx = srcu_read_lock(&vcpu->kvm->srcu); 5766 - kvm_release_page_dirty(apic->vapic_page); 5767 - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); 5768 - srcu_read_unlock(&vcpu->kvm->srcu, idx); 5769 - } 5770 - 5771 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5772 { 5773 int max_irr, tpr; ··· 6038 struct kvm *kvm = vcpu->kvm; 6039 6040 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6041 - r = vapic_enter(vcpu); 6042 - if (r) { 6043 - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6044 - return r; 6045 - } 6046 6047 r = 1; 6048 while (r > 0) { ··· 6095 } 6096 6097 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6098 - 6099 - vapic_exit(vcpu); 6100 6101 return r; 6102 }
··· 3214 r = -EFAULT; 3215 if (copy_from_user(&va, argp, sizeof va)) 3216 goto out; 3217 + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3218 break; 3219 } 3220 case KVM_X86_SETUP_MCE: { ··· 5739 !kvm_event_needs_reinjection(vcpu); 5740 } 5741 5742 static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5743 { 5744 int max_irr, tpr; ··· 6069 struct kvm *kvm = vcpu->kvm; 6070 6071 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 6072 6073 r = 1; 6074 while (r > 0) { ··· 6131 } 6132 6133 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 6134 6135 return r; 6136 }