Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: add kvm_request_pending

A first step in vcpu->requests encapsulation. Additionally, we now
use READ_ONCE() when accessing vcpu->requests, which ensures we
always load vcpu->requests when it's accessed. This is important as
other threads can change it any time. Also, READ_ONCE() documents
that vcpu->requests is used with other threads, likely requiring
memory barriers, which it does.

Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
[ Documented the new use of READ_ONCE() and converted another check
in arch/mips/kvm/vz.c ]
Signed-off-by: Andrew Jones <drjones@redhat.com>
Acked-by: Christoffer Dall <cdall@linaro.org>
Signed-off-by: Christoffer Dall <cdall@linaro.org>

authored by

Radim Krčmář and committed by
Christoffer Dall
2fa6e1e1 2387149e

+13 -9
+1 -1
arch/mips/kvm/trap_emul.c
··· 1094 1094 struct mm_struct *mm; 1095 1095 int i; 1096 1096 1097 - if (likely(!vcpu->requests)) 1097 + if (likely(!kvm_request_pending(vcpu))) 1098 1098 return; 1099 1099 1100 1100 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+1 -1
arch/mips/kvm/vz.c
··· 2337 2337 int ret = 0; 2338 2338 int i; 2339 2339 2340 - if (!vcpu->requests) 2340 + if (!kvm_request_pending(vcpu)) 2341 2341 return 0; 2342 2342 2343 2343 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
+1 -1
arch/powerpc/kvm/booke.c
··· 687 687 688 688 kvmppc_core_check_exceptions(vcpu); 689 689 690 - if (vcpu->requests) { 690 + if (kvm_request_pending(vcpu)) { 691 691 /* Exception delivery raised request; start over */ 692 692 return 1; 693 693 }
+2 -3
arch/powerpc/kvm/powerpc.c
··· 55 55 56 56 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 57 57 { 58 - return !!(v->arch.pending_exceptions) || 59 - v->requests; 58 + return !!(v->arch.pending_exceptions) || kvm_request_pending(v); 60 59 } 61 60 62 61 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) ··· 107 108 */ 108 109 smp_mb(); 109 110 110 - if (vcpu->requests) { 111 + if (kvm_request_pending(vcpu)) { 111 112 /* Make sure we process requests preemptable */ 112 113 local_irq_enable(); 113 114 trace_kvm_check_requests(vcpu);
+1 -1
arch/s390/kvm/kvm-s390.c
··· 2440 2440 { 2441 2441 retry: 2442 2442 kvm_s390_vcpu_request_handled(vcpu); 2443 - if (!vcpu->requests) 2443 + if (!kvm_request_pending(vcpu)) 2444 2444 return 0; 2445 2445 /* 2446 2446 * We use MMU_RELOAD just to re-arm the ipte notifier for the
+2 -2
arch/x86/kvm/x86.c
··· 6710 6710 6711 6711 bool req_immediate_exit = false; 6712 6712 6713 - if (vcpu->requests) { 6713 + if (kvm_request_pending(vcpu)) { 6714 6714 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 6715 6715 kvm_mmu_unload(vcpu); 6716 6716 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) ··· 6874 6874 kvm_x86_ops->sync_pir_to_irr(vcpu); 6875 6875 } 6876 6876 6877 - if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests 6877 + if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) 6878 6878 || need_resched() || signal_pending(current)) { 6879 6879 vcpu->mode = OUTSIDE_GUEST_MODE; 6880 6880 smp_wmb();
+5
include/linux/kvm_host.h
··· 1105 1105 set_bit(req & KVM_REQUEST_MASK, &vcpu->requests); 1106 1106 } 1107 1107 1108 + static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 1109 + { 1110 + return READ_ONCE(vcpu->requests); 1111 + } 1112 + 1108 1113 static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 1109 1114 { 1110 1115 return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);