Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Replace static_call_cond() with static_call()

The use of static_call_cond() is essentially the same as static_call() on
x86 (e.g. static_call() now handles a NULL pointer as a NOP), so replace
it with static_call() to simplify the code.

Link: https://lore.kernel.org/all/3916caa1dcd114301a49beafa5030eca396745c1.1679456900.git.jpoimboe@kernel.org/
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Link: https://lore.kernel.org/r/20240507133103.15052-2-wei.w.wang@intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Wei Wang and committed by
Paolo Bonzini
f4854bf7 bc9cd5a2

+33 -35
+1 -2
arch/x86/include/asm/kvm-x86-ops.h
··· 9 9 * "static_call_update()" calls. 10 10 * 11 11 * KVM_X86_OP_OPTIONAL() can be used for those functions that can have 12 - * a NULL definition, for example if "static_call_cond()" will be used 13 - * at the call sites. KVM_X86_OP_OPTIONAL_RET0() can be used likewise 12 + * a NULL definition. KVM_X86_OP_OPTIONAL_RET0() can be used likewise 14 13 * to make a definition optional, but in this case the default will 15 14 * be __static_call_return0. 16 15 */
+1 -2
arch/x86/include/asm/kvm-x86-pmu-ops.h
··· 9 9 * "static_call_update()" calls. 10 10 * 11 11 * KVM_X86_PMU_OP_OPTIONAL() can be used for those functions that can have 12 - * a NULL definition, for example if "static_call_cond()" will be used 13 - * at the call sites. 12 + * a NULL definition. 14 13 */ 15 14 KVM_X86_PMU_OP(rdpmc_ecx_to_pmc) 16 15 KVM_X86_PMU_OP(msr_idx_to_pmc)
+2 -2
arch/x86/include/asm/kvm_host.h
··· 2309 2309 2310 2310 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 2311 2311 { 2312 - static_call_cond(kvm_x86_vcpu_blocking)(vcpu); 2312 + static_call(kvm_x86_vcpu_blocking)(vcpu); 2313 2313 } 2314 2314 2315 2315 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 2316 2316 { 2317 - static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); 2317 + static_call(kvm_x86_vcpu_unblocking)(vcpu); 2318 2318 } 2319 2319 2320 2320 static inline int kvm_cpu_get_apicid(int mps_cpu)
+1 -1
arch/x86/kvm/irq.c
··· 157 157 { 158 158 __kvm_migrate_apic_timer(vcpu); 159 159 __kvm_migrate_pit_timer(vcpu); 160 - static_call_cond(kvm_x86_migrate_timers)(vcpu); 160 + static_call(kvm_x86_migrate_timers)(vcpu); 161 161 } 162 162 163 163 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+12 -12
arch/x86/kvm/lapic.c
··· 738 738 if (unlikely(apic->apicv_active)) { 739 739 /* need to update RVI */ 740 740 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); 741 - static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu, 741 + static_call(kvm_x86_hwapic_irr_update)(apic->vcpu, 742 742 apic_find_highest_irr(apic)); 743 743 } else { 744 744 apic->irr_pending = false; ··· 765 765 * just set SVI. 766 766 */ 767 767 if (unlikely(apic->apicv_active)) 768 - static_call_cond(kvm_x86_hwapic_isr_update)(vec); 768 + static_call(kvm_x86_hwapic_isr_update)(vec); 769 769 else { 770 770 ++apic->isr_count; 771 771 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); ··· 810 810 * and must be left alone. 811 811 */ 812 812 if (unlikely(apic->apicv_active)) 813 - static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); 813 + static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); 814 814 else { 815 815 --apic->isr_count; 816 816 BUG_ON(apic->isr_count < 0); ··· 2577 2577 2578 2578 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) { 2579 2579 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); 2580 - static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu); 2580 + static_call(kvm_x86_set_virtual_apic_mode)(vcpu); 2581 2581 } 2582 2582 2583 2583 apic->base_address = apic->vcpu->arch.apic_base & ··· 2687 2687 u64 msr_val; 2688 2688 int i; 2689 2689 2690 - static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu); 2690 + static_call(kvm_x86_apicv_pre_state_restore)(vcpu); 2691 2691 2692 2692 if (!init_event) { 2693 2693 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE; ··· 2742 2742 vcpu->arch.pv_eoi.msr_val = 0; 2743 2743 apic_update_ppr(apic); 2744 2744 if (apic->apicv_active) { 2745 - static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); 2746 - static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1); 2747 - static_call_cond(kvm_x86_hwapic_isr_update)(-1); 2745 + static_call(kvm_x86_apicv_post_state_restore)(vcpu); 2746 + static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); 2747 + static_call(kvm_x86_hwapic_isr_update)(-1); 2748 2748 } 2749 2749 2750 2750 vcpu->arch.apic_arb_prio = 0; ··· 3019 3019 struct kvm_lapic *apic = vcpu->arch.apic; 3020 3020 int r; 3021 3021 3022 - static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu); 3022 + static_call(kvm_x86_apicv_pre_state_restore)(vcpu); 3023 3023 3024 3024 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); 3025 3025 /* set SPIV separately to get count of SW disabled APICs right */ ··· 3046 3046 kvm_lapic_set_reg(apic, APIC_TMCCT, 0); 3047 3047 kvm_apic_update_apicv(vcpu); 3048 3048 if (apic->apicv_active) { 3049 - static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu); 3050 - static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); 3051 - static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); 3049 + static_call(kvm_x86_apicv_post_state_restore)(vcpu); 3050 + static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic)); 3051 + static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic)); 3052 3052 } 3053 3053 kvm_make_request(KVM_REQ_EVENT, vcpu); 3054 3054 if (ioapic_in_kernel(vcpu->kvm))
+3 -3
arch/x86/kvm/pmu.c
··· 607 607 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) 608 608 { 609 609 if (lapic_in_kernel(vcpu)) { 610 - static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu); 610 + static_call(kvm_x86_pmu_deliver_pmi)(vcpu); 611 611 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); 612 612 } 613 613 } ··· 740 740 741 741 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0; 742 742 743 - static_call_cond(kvm_x86_pmu_reset)(vcpu); 743 + static_call(kvm_x86_pmu_reset)(vcpu); 744 744 } 745 745 746 746 ··· 818 818 pmc_stop_counter(pmc); 819 819 } 820 820 821 - static_call_cond(kvm_x86_pmu_cleanup)(vcpu); 821 + static_call(kvm_x86_pmu_cleanup)(vcpu); 822 822 823 823 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX); 824 824 }
+13 -13
arch/x86/kvm/x86.c
··· 5122 5122 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 5123 5123 struct kvm_lapic_state *s) 5124 5124 { 5125 - static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 5125 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 5126 5126 5127 5127 return kvm_apic_get_state(vcpu, s); 5128 5128 } ··· 9336 9336 kvm_rip_write(vcpu, ctxt->eip); 9337 9337 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 9338 9338 r = kvm_vcpu_do_singlestep(vcpu); 9339 - static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); 9339 + static_call(kvm_x86_update_emulated_instruction)(vcpu); 9340 9340 __kvm_set_rflags(vcpu, ctxt->eflags); 9341 9341 } 9342 9342 ··· 10759 10759 10760 10760 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); 10761 10761 10762 - static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 10762 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 10763 10763 10764 10764 if (irqchip_split(vcpu->kvm)) 10765 10765 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); ··· 10784 10784 bitmap_or((ulong *)eoi_exit_bitmap, 10785 10785 vcpu->arch.ioapic_handled_vectors, 10786 10786 to_hv_synic(vcpu)->vec_bitmap, 256); 10787 - static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10787 + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 10788 10788 return; 10789 10789 } 10790 10790 #endif 10791 - static_call_cond(kvm_x86_load_eoi_exitmap)( 10791 + static_call(kvm_x86_load_eoi_exitmap)( 10792 10792 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); 10793 10793 } 10794 10794 10795 10795 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) 10796 10796 { 10797 - static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm); 10797 + static_call(kvm_x86_guest_memory_reclaimed)(kvm); 10798 10798 } 10799 10799 10800 10800 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) ··· 10802 10802 if (!lapic_in_kernel(vcpu)) 10803 10803 return; 10804 10804 10805 - static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); 10805 + static_call(kvm_x86_set_apic_access_page_addr)(vcpu); 10806 10806 } 10807 10807 10808 10808 /* ··· 11050 11050 * i.e. they can post interrupts even if APICv is temporarily disabled. 11051 11051 */ 11052 11052 if (kvm_lapic_enabled(vcpu)) 11053 - static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 11053 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 11054 11054 11055 11055 if (kvm_vcpu_exit_request(vcpu)) { 11056 11056 vcpu->mode = OUTSIDE_GUEST_MODE; ··· 11099 11099 break; 11100 11100 11101 11101 if (kvm_lapic_enabled(vcpu)) 11102 - static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); 11102 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 11103 11103 11104 11104 if (unlikely(kvm_vcpu_exit_request(vcpu))) { 11105 11105 exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; ··· 11873 11873 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; 11874 11874 vcpu->arch.cr3 = sregs->cr3; 11875 11875 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 11876 - static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11876 + static_call(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); 11877 11877 11878 11878 kvm_set_cr8(vcpu, sregs->cr8); 11879 11879 ··· 12822 12822 mutex_unlock(&kvm->slots_lock); 12823 12823 } 12824 12824 kvm_unload_vcpu_mmus(kvm); 12825 - static_call_cond(kvm_x86_vm_destroy)(kvm); 12825 + static_call(kvm_x86_vm_destroy)(kvm); 12826 12826 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); 12827 12827 kvm_pic_destroy(kvm); 12828 12828 kvm_ioapic_destroy(kvm); ··· 13513 13513 void kvm_arch_start_assignment(struct kvm *kvm) 13514 13514 { 13515 13515 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) 13516 - static_call_cond(kvm_x86_pi_start_assignment)(kvm); 13516 + static_call(kvm_x86_pi_start_assignment)(kvm); 13517 13517 } 13518 13518 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); 13519 13519 ··· 13650 13650 #ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE 13651 13651 void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end) 13652 13652 { 13653 - static_call_cond(kvm_x86_gmem_invalidate)(start, end); 13653 + static_call(kvm_x86_gmem_invalidate)(start, end); 13654 13654 } 13655 13655 #endif 13656 13656