Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kvm: rename last argument to kvm_get_dirty_log_protect

When manual dirty log reprotect will be enabled, kvm_get_dirty_log_protect's
pointer argument will always be false on exit, because no TLB flush is needed
until the manual re-protection operation. Rename it from "is_dirty" to "flush",
which more accurately tells the caller what they have to do with it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

+13 -13
+3 -3
arch/mips/kvm/mips.c
··· 1004 1004 { 1005 1005 struct kvm_memslots *slots; 1006 1006 struct kvm_memory_slot *memslot; 1007 - bool is_dirty = false; 1007 + bool flush = false; 1008 1008 int r; 1009 1009 1010 1010 mutex_lock(&kvm->slots_lock); 1011 1011 1012 - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); 1012 + r = kvm_get_dirty_log_protect(kvm, log, &flush); 1013 1013 1014 - if (is_dirty) { 1014 + if (flush) { 1015 1015 slots = kvm_memslots(kvm); 1016 1016 memslot = id_to_memslot(slots, log->slot); 1017 1017
+3 -3
arch/x86/kvm/x86.c
··· 4393 4393 */ 4394 4394 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 4395 4395 { 4396 - bool is_dirty = false; 4396 + bool flush = false; 4397 4397 int r; 4398 4398 4399 4399 mutex_lock(&kvm->slots_lock); ··· 4404 4404 if (kvm_x86_ops->flush_log_dirty) 4405 4405 kvm_x86_ops->flush_log_dirty(kvm); 4406 4406 4407 - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); 4407 + r = kvm_get_dirty_log_protect(kvm, log, &flush); 4408 4408 4409 4409 /* 4410 4410 * All the TLBs can be flushed out of mmu lock, see the comments in 4411 4411 * kvm_mmu_slot_remove_write_access(). 4412 4412 */ 4413 4413 lockdep_assert_held(&kvm->slots_lock); 4414 - if (is_dirty) 4414 + if (flush) 4415 4415 kvm_flush_remote_tlbs(kvm); 4416 4416 4417 4417 mutex_unlock(&kvm->slots_lock);
+1 -1
include/linux/kvm_host.h
··· 753 753 struct kvm_dirty_log *log, int *is_dirty); 754 754 755 755 int kvm_get_dirty_log_protect(struct kvm *kvm, 756 - struct kvm_dirty_log *log, bool *is_dirty); 756 + struct kvm_dirty_log *log, bool *flush); 757 757 758 758 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 759 759 struct kvm_memory_slot *slot,
+3 -3
virt/kvm/arm/arm.c
··· 1205 1205 */ 1206 1206 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 1207 1207 { 1208 - bool is_dirty = false; 1208 + bool flush = false; 1209 1209 int r; 1210 1210 1211 1211 mutex_lock(&kvm->slots_lock); 1212 1212 1213 - r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); 1213 + r = kvm_get_dirty_log_protect(kvm, log, &flush); 1214 1214 1215 - if (is_dirty) 1215 + if (flush) 1216 1216 kvm_flush_remote_tlbs(kvm); 1217 1217 1218 1218 mutex_unlock(&kvm->slots_lock);
+3 -3
virt/kvm/kvm_main.c
··· 1154 1154 * 1155 1155 */ 1156 1156 int kvm_get_dirty_log_protect(struct kvm *kvm, 1157 - struct kvm_dirty_log *log, bool *is_dirty) 1157 + struct kvm_dirty_log *log, bool *flush) 1158 1158 { 1159 1159 struct kvm_memslots *slots; 1160 1160 struct kvm_memory_slot *memslot; ··· 1181 1181 memset(dirty_bitmap_buffer, 0, n); 1182 1182 1183 1183 spin_lock(&kvm->mmu_lock); 1184 - *is_dirty = false; 1184 + *flush = false; 1185 1185 for (i = 0; i < n / sizeof(long); i++) { 1186 1186 unsigned long mask; 1187 1187 gfn_t offset; ··· 1189 1189 if (!dirty_bitmap[i]) 1190 1190 continue; 1191 1191 1192 - *is_dirty = true; 1192 + *flush = true; 1193 1193 1194 1194 mask = xchg(&dirty_bitmap[i], 0); 1195 1195 dirty_bitmap_buffer[i] = mask;