Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: switch to kvm_get_dirty_log_protect

We now have a generic function that does most of the work of
kvm_vm_ioctl_get_dirty_log, now use it.

Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Mario Smarduch <m.smarduch@samsung.com>

authored by

Paolo Bonzini and committed by
Christoffer Dall
e108ff2f ba0513b5

+16 -64
-3
arch/x86/include/asm/kvm_host.h
··· 821 821 822 822 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 823 823 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 824 - void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, 825 - struct kvm_memory_slot *slot, 826 - gfn_t gfn_offset, unsigned long mask); 827 824 void kvm_mmu_zap_all(struct kvm *kvm); 828 825 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm); 829 826 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
+1
arch/x86/kvm/Kconfig
··· 39 39 select PERF_EVENTS 40 40 select HAVE_KVM_MSI 41 41 select HAVE_KVM_CPU_RELAX_INTERCEPT 42 + select KVM_GENERIC_DIRTYLOG_READ_PROTECT 42 43 select KVM_VFIO 43 44 ---help--- 44 45 Support hosting fully virtualized guest machines using hardware
+2 -2
arch/x86/kvm/mmu.c
··· 1203 1203 } 1204 1204 1205 1205 /** 1206 - * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages 1206 + * kvm_arch_mmu_write_protect_pt_masked - write protect selected PT level pages 1207 1207 * @kvm: kvm instance 1208 1208 * @slot: slot to protect 1209 1209 * @gfn_offset: start of the BITS_PER_LONG pages we care about ··· 1212 1212 * Used when we do not need to care about huge page mappings: e.g. during dirty 1213 1213 * logging we do not have any such mappings. 1214 1214 */ 1215 - void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, 1215 + void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, 1216 1216 struct kvm_memory_slot *slot, 1217 1217 gfn_t gfn_offset, unsigned long mask) 1218 1218 {
+13 -59
arch/x86/kvm/x86.c
··· 3748 3748 * @kvm: kvm instance 3749 3749 * @log: slot id and address to which we copy the log 3750 3750 * 3751 - * We need to keep it in mind that VCPU threads can write to the bitmap 3752 - * concurrently. So, to avoid losing data, we keep the following order for 3753 - * each bit: 3751 + * Steps 1-4 below provide general overview of dirty page logging. See 3752 + * kvm_get_dirty_log_protect() function description for additional details. 3753 + * 3754 + * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we 3755 + * always flush the TLB (step 4) even if previous step failed and the dirty 3756 + * bitmap may be corrupt. Regardless of previous outcome the KVM logging API 3757 + * does not preclude user space subsequent dirty log read. Flushing TLB ensures 3758 + * writes will be marked dirty for next log read. 3754 3759 * 3755 3760 * 1. Take a snapshot of the bit and clear it if needed. 3756 3761 * 2. Write protect the corresponding page. 3757 - * 3. Flush TLB's if needed. 3758 - * 4. Copy the snapshot to the userspace. 3759 - * 3760 - * Between 2 and 3, the guest may write to the page using the remaining TLB 3761 - * entry. This is not a problem because the page will be reported dirty at 3762 - * step 4 using the snapshot taken before and step 3 ensures that successive 3763 - * writes will be logged for the next call. 3762 + * 3. Copy the snapshot to the userspace. 3763 + * 4. Flush TLB's if needed. 3764 3764 */ 3765 3765 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 3766 3766 { 3767 - int r; 3768 - struct kvm_memory_slot *memslot; 3769 - unsigned long n, i; 3770 - unsigned long *dirty_bitmap; 3771 - unsigned long *dirty_bitmap_buffer; 3772 3767 bool is_dirty = false; 3768 + int r; 3773 3769 3774 3770 mutex_lock(&kvm->slots_lock); 3775 3771 3776 - r = -EINVAL; 3777 - if (log->slot >= KVM_USER_MEM_SLOTS) 3778 - goto out; 3779 - 3780 - memslot = id_to_memslot(kvm->memslots, log->slot); 3781 - 3782 - dirty_bitmap = memslot->dirty_bitmap; 3783 - r = -ENOENT; 3784 - if (!dirty_bitmap) 3785 - goto out; 3786 - 3787 - n = kvm_dirty_bitmap_bytes(memslot); 3788 - 3789 - dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); 3790 - memset(dirty_bitmap_buffer, 0, n); 3791 - 3792 - spin_lock(&kvm->mmu_lock); 3793 - 3794 - for (i = 0; i < n / sizeof(long); i++) { 3795 - unsigned long mask; 3796 - gfn_t offset; 3797 - 3798 - if (!dirty_bitmap[i]) 3799 - continue; 3800 - 3801 - is_dirty = true; 3802 - 3803 - mask = xchg(&dirty_bitmap[i], 0); 3804 - dirty_bitmap_buffer[i] = mask; 3805 - 3806 - offset = i * BITS_PER_LONG; 3807 - kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); 3808 - } 3809 - 3810 - spin_unlock(&kvm->mmu_lock); 3811 - 3812 - /* See the comments in kvm_mmu_slot_remove_write_access(). */ 3813 - lockdep_assert_held(&kvm->slots_lock); 3772 + r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); 3814 3773 3815 3774 /* 3816 3775 * All the TLBs can be flushed out of mmu lock, see the comments in 3817 3776 * kvm_mmu_slot_remove_write_access(). 3818 3777 */ 3778 + lockdep_assert_held(&kvm->slots_lock); 3819 3779 if (is_dirty) 3820 3780 kvm_flush_remote_tlbs(kvm); 3821 3781 3822 - r = -EFAULT; 3823 - if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) 3824 - goto out; 3825 - 3826 - r = 0; 3827 - out: 3828 3782 mutex_unlock(&kvm->slots_lock); 3829 3783 return r; 3830 3784 }