Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"x86 guest:

- Tweaks to the paravirtualization code, to avoid using them when
they're pointless or harmful

x86 host:

- Fix for SRCU lockdep splat

- Brown paper bag fix for the propagation of errno"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: pull kvm->srcu read-side to kvm_arch_vcpu_ioctl_run
KVM: x86/mmu: Passing up the error state of mmu_alloc_shadow_roots()
KVM: x86: Yield to IPI target vCPU only if it is busy
x86/kvmclock: Fix Hyper-V Isolated VM's boot issue when vCPUs > 64
x86/kvm: Don't waste memory if kvmclock is disabled
x86/kvm: Don't use PV TLB/yield when mwait is advertised

Changed files
+20 -14
arch
x86
kernel
kvm
mmu
+3 -1
arch/x86/kernel/kvm.c
··· 463 463 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 464 464 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 465 465 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && 466 + !boot_cpu_has(X86_FEATURE_MWAIT) && 466 467 (num_possible_cpus() != 1)); 467 468 } 468 469 ··· 478 477 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && 479 478 !kvm_para_has_hint(KVM_HINTS_REALTIME) && 480 479 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) && 480 + !boot_cpu_has(X86_FEATURE_MWAIT) && 481 481 (num_possible_cpus() != 1)); 482 482 } 483 483 ··· 624 622 625 623 /* Make sure other vCPUs get a chance to run if they need to. */ 626 624 for_each_cpu(cpu, mask) { 627 - if (vcpu_is_preempted(cpu)) { 625 + if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) { 628 626 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu)); 629 627 break; 630 628 }
+3
arch/x86/kernel/kvmclock.c
··· 239 239 240 240 static int __init kvm_setup_vsyscall_timeinfo(void) 241 241 { 242 + if (!kvm_para_available() || !kvmclock) 243 + return 0; 244 + 242 245 kvmclock_init_mem(); 243 246 244 247 #ifdef CONFIG_X86_64
+1 -1
arch/x86/kvm/mmu/mmu.c
··· 3565 3565 out_unlock: 3566 3566 write_unlock(&vcpu->kvm->mmu_lock); 3567 3567 3568 - return 0; 3568 + return r; 3569 3569 } 3570 3570 3571 3571 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
+13 -12
arch/x86/kvm/x86.c
··· 9180 9180 likely(!pic_in_kernel(vcpu->kvm)); 9181 9181 } 9182 9182 9183 + /* Called within kvm->srcu read side. */ 9183 9184 static void post_kvm_run_save(struct kvm_vcpu *vcpu) 9184 9185 { 9185 9186 struct kvm_run *kvm_run = vcpu->run; ··· 9189 9188 kvm_run->cr8 = kvm_get_cr8(vcpu); 9190 9189 kvm_run->apic_base = kvm_get_apic_base(vcpu); 9191 9190 9192 - /* 9193 - * The call to kvm_ready_for_interrupt_injection() may end up in 9194 - * kvm_xen_has_interrupt() which may require the srcu lock to be 9195 - * held, to protect against changes in the vcpu_info address. 9196 - */ 9197 - vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 9198 9191 kvm_run->ready_for_interrupt_injection = 9199 9192 pic_in_kernel(vcpu->kvm) || 9200 9193 kvm_vcpu_ready_for_interrupt_injection(vcpu); 9201 - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 9202 9194 9203 9195 if (is_smm(vcpu)) 9204 9196 kvm_run->flags |= KVM_RUN_X86_SMM; ··· 9809 9815 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit); 9810 9816 9811 9817 /* 9818 + * Called within kvm->srcu read side. 9812 9819 * Returns 1 to let vcpu_run() continue the guest execution loop without 9813 9820 * exiting to the userspace. Otherwise, the value will be returned to the 9814 9821 * userspace. ··· 10188 10193 return r; 10189 10194 } 10190 10195 10196 + /* Called within kvm->srcu read side. */ 10191 10197 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) 10192 10198 { 10193 10199 bool hv_timer; ··· 10248 10252 !vcpu->arch.apf.halted); 10249 10253 } 10250 10254 10255 + /* Called within kvm->srcu read side. */ 10251 10256 static int vcpu_run(struct kvm_vcpu *vcpu) 10252 10257 { 10253 10258 int r; 10254 10259 struct kvm *kvm = vcpu->kvm; 10255 10260 10256 - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 10257 10261 vcpu->arch.l1tf_flush_l1d = true; 10258 10262 10259 10263 for (;;) { ··· 10281 10285 if (__xfer_to_guest_mode_work_pending()) { 10282 10286 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 10283 10287 r = xfer_to_guest_mode_handle_work(vcpu); 10288 + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 10284 10289 if (r) 10285 10290 return r; 10286 - vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 10287 10291 } 10288 10292 } 10289 - 10290 - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 10291 10293 10292 10294 return r; 10293 10295 } ··· 10392 10398 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) 10393 10399 { 10394 10400 struct kvm_run *kvm_run = vcpu->run; 10401 + struct kvm *kvm = vcpu->kvm; 10395 10402 int r; 10396 10403 10397 10404 vcpu_load(vcpu); ··· 10400 10405 kvm_run->flags = 0; 10401 10406 kvm_load_guest_fpu(vcpu); 10402 10407 10408 + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 10403 10409 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { 10404 10410 if (kvm_run->immediate_exit) { 10405 10411 r = -EINTR; ··· 10411 10415 * use before KVM has ever run the vCPU. 10412 10416 */ 10413 10417 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu)); 10418 + 10419 + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 10414 10420 kvm_vcpu_block(vcpu); 10421 + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 10422 + 10415 10423 if (kvm_apic_accept_events(vcpu) < 0) { 10416 10424 r = 0; 10417 10425 goto out; ··· 10475 10475 if (kvm_run->kvm_valid_regs) 10476 10476 store_regs(vcpu); 10477 10477 post_kvm_run_save(vcpu); 10478 - kvm_sigset_deactivate(vcpu); 10478 + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 10479 10479 10480 + kvm_sigset_deactivate(vcpu); 10480 10481 vcpu_put(vcpu); 10481 10482 return r; 10482 10483 }