Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: use static calls to reduce kvm_x86_ops overhead

Convert kvm_x86_ops to use static calls. Note that all kvm_x86_ops are
covered here except for 'pmu_ops and 'nested ops'.

Here are some numbers running cpuid in a loop of 1 million calls averaged
over 5 runs, measured in the vm (lower is better).

Intel Xeon 3000MHz:

|default |mitigations=off
-------------------------------------
vanilla |.671s |.486s
static call|.573s(-15%)|.458s(-6%)

AMD EPYC 2500MHz:

|default |mitigations=off
-------------------------------------
vanilla |.710s |.609s
static call|.664s(-6%) |.609s(0%)

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: Jason Baron <jbaron@akamai.com>
Message-Id: <e057bf1b8a7ad15652df6eeba3f907ae758d3399.1610680941.git.jbaron@akamai.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Jason Baron and committed by
Paolo Bonzini
b3646477 9af5471b

+193 -197
+3 -5
arch/x86/include/asm/kvm_host.h
··· 1374 1374 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) 1375 1375 { 1376 1376 if (kvm_x86_ops.tlb_remote_flush && 1377 - !kvm_x86_ops.tlb_remote_flush(kvm)) 1377 + !static_call(kvm_x86_tlb_remote_flush)(kvm)) 1378 1378 return 0; 1379 1379 else 1380 1380 return -ENOTSUPP; ··· 1767 1767 1768 1768 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) 1769 1769 { 1770 - if (kvm_x86_ops.vcpu_blocking) 1771 - kvm_x86_ops.vcpu_blocking(vcpu); 1770 + static_call_cond(kvm_x86_vcpu_blocking)(vcpu); 1772 1771 } 1773 1772 1774 1773 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) 1775 1774 { 1776 - if (kvm_x86_ops.vcpu_unblocking) 1777 - kvm_x86_ops.vcpu_unblocking(vcpu); 1775 + static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); 1778 1776 } 1779 1777 1780 1778 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+1 -1
arch/x86/kvm/cpuid.c
··· 182 182 vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); 183 183 184 184 /* Invoke the vendor callback only after the above state is updated. */ 185 - kvm_x86_ops.vcpu_after_set_cpuid(vcpu); 185 + static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu); 186 186 } 187 187 188 188 static int is_efer_nx(void)
+2 -2
arch/x86/kvm/hyperv.c
··· 1154 1154 addr = gfn_to_hva(kvm, gfn); 1155 1155 if (kvm_is_error_hva(addr)) 1156 1156 return 1; 1157 - kvm_x86_ops.patch_hypercall(vcpu, instructions); 1157 + static_call(kvm_x86_patch_hypercall)(vcpu, instructions); 1158 1158 ((unsigned char *)instructions)[3] = 0xc3; /* ret */ 1159 1159 if (__copy_to_user((void __user *)addr, instructions, 4)) 1160 1160 return 1; ··· 1745 1745 * hypercall generates UD from non zero cpl and real mode 1746 1746 * per HYPER-V spec 1747 1747 */ 1748 - if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { 1748 + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) { 1749 1749 kvm_queue_exception(vcpu, UD_VECTOR); 1750 1750 return 1; 1751 1751 }
+1 -2
arch/x86/kvm/irq.c
··· 143 143 { 144 144 __kvm_migrate_apic_timer(vcpu); 145 145 __kvm_migrate_pit_timer(vcpu); 146 - if (kvm_x86_ops.migrate_timers) 147 - kvm_x86_ops.migrate_timers(vcpu); 146 + static_call_cond(kvm_x86_migrate_timers)(vcpu); 148 147 } 149 148 150 149 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+5 -5
arch/x86/kvm/kvm_cache_regs.h
··· 68 68 return 0; 69 69 70 70 if (!kvm_register_is_available(vcpu, reg)) 71 - kvm_x86_ops.cache_reg(vcpu, reg); 71 + static_call(kvm_x86_cache_reg)(vcpu, reg); 72 72 73 73 return vcpu->arch.regs[reg]; 74 74 } ··· 108 108 might_sleep(); /* on svm */ 109 109 110 110 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) 111 - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR); 111 + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); 112 112 113 113 return vcpu->arch.walk_mmu->pdptrs[index]; 114 114 } ··· 118 118 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; 119 119 if ((tmask & vcpu->arch.cr0_guest_owned_bits) && 120 120 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) 121 - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0); 121 + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); 122 122 return vcpu->arch.cr0 & mask; 123 123 } 124 124 ··· 132 132 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; 133 133 if ((tmask & vcpu->arch.cr4_guest_owned_bits) && 134 134 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) 135 - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4); 135 + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); 136 136 return vcpu->arch.cr4 & mask; 137 137 } 138 138 139 139 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) 140 140 { 141 141 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) 142 - kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3); 142 + static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); 143 143 return vcpu->arch.cr3; 144 144 } 145 145
+15 -15
arch/x86/kvm/lapic.c
··· 484 484 if (unlikely(vcpu->arch.apicv_active)) { 485 485 /* need to update RVI */ 486 486 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); 487 - kvm_x86_ops.hwapic_irr_update(vcpu, 487 + static_call(kvm_x86_hwapic_irr_update)(vcpu, 488 488 apic_find_highest_irr(apic)); 489 489 } else { 490 490 apic->irr_pending = false; ··· 515 515 * just set SVI. 516 516 */ 517 517 if (unlikely(vcpu->arch.apicv_active)) 518 - kvm_x86_ops.hwapic_isr_update(vcpu, vec); 518 + static_call(kvm_x86_hwapic_isr_update)(vcpu, vec); 519 519 else { 520 520 ++apic->isr_count; 521 521 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); ··· 563 563 * and must be left alone. 564 564 */ 565 565 if (unlikely(vcpu->arch.apicv_active)) 566 - kvm_x86_ops.hwapic_isr_update(vcpu, 567 - apic_find_highest_isr(apic)); 566 + static_call(kvm_x86_hwapic_isr_update)(vcpu, 567 + apic_find_highest_isr(apic)); 568 568 else { 569 569 --apic->isr_count; 570 570 BUG_ON(apic->isr_count < 0); ··· 701 701 { 702 702 int highest_irr; 703 703 if (apic->vcpu->arch.apicv_active) 704 - highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu); 704 + highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu); 705 705 else 706 706 highest_irr = apic_find_highest_irr(apic); 707 707 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) ··· 1090 1090 apic->regs + APIC_TMR); 1091 1091 } 1092 1092 1093 - if (kvm_x86_ops.deliver_posted_interrupt(vcpu, vector)) { 1093 + if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) { 1094 1094 kvm_lapic_set_irr(vector, apic); 1095 1095 kvm_make_request(KVM_REQ_EVENT, vcpu); 1096 1096 kvm_vcpu_kick(vcpu); ··· 1814 1814 { 1815 1815 WARN_ON(preemptible()); 1816 1816 WARN_ON(!apic->lapic_timer.hv_timer_in_use); 1817 - kvm_x86_ops.cancel_hv_timer(apic->vcpu); 1817 + static_call(kvm_x86_cancel_hv_timer)(apic->vcpu); 1818 1818 apic->lapic_timer.hv_timer_in_use = false; 1819 1819 } 1820 1820 ··· 1831 1831 if (!ktimer->tscdeadline) 1832 1832 return false; 1833 1833 1834 - if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) 1834 + if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired)) 1835 1835 return false; 1836 1836 1837 1837 ktimer->hv_timer_in_use = true; ··· 2261 2261 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); 2262 2262 2263 2263 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) 2264 - kvm_x86_ops.set_virtual_apic_mode(vcpu); 2264 + static_call(kvm_x86_set_virtual_apic_mode)(vcpu); 2265 2265 2266 2266 apic->base_address = apic->vcpu->arch.apic_base & 2267 2267 MSR_IA32_APICBASE_BASE; ··· 2338 2338 vcpu->arch.pv_eoi.msr_val = 0; 2339 2339 apic_update_ppr(apic); 2340 2340 if (vcpu->arch.apicv_active) { 2341 - kvm_x86_ops.apicv_post_state_restore(vcpu); 2342 - kvm_x86_ops.hwapic_irr_update(vcpu, -1); 2343 - kvm_x86_ops.hwapic_isr_update(vcpu, -1); 2341 + static_call(kvm_x86_apicv_post_state_restore)(vcpu); 2342 + static_call(kvm_x86_hwapic_irr_update)(vcpu, -1); 2343 + static_call(kvm_x86_hwapic_isr_update)(vcpu, -1); 2344 2344 } 2345 2345 2346 2346 vcpu->arch.apic_arb_prio = 0; ··· 2601 2601 kvm_apic_update_apicv(vcpu); 2602 2602 apic->highest_isr_cache = -1; 2603 2603 if (vcpu->arch.apicv_active) { 2604 - kvm_x86_ops.apicv_post_state_restore(vcpu); 2605 - kvm_x86_ops.hwapic_irr_update(vcpu, 2604 + static_call(kvm_x86_apicv_post_state_restore)(vcpu); 2605 + static_call(kvm_x86_hwapic_irr_update)(vcpu, 2606 2606 apic_find_highest_irr(apic)); 2607 - kvm_x86_ops.hwapic_isr_update(vcpu, 2607 + static_call(kvm_x86_hwapic_isr_update)(vcpu, 2608 2608 apic_find_highest_isr(apic)); 2609 2609 } 2610 2610 kvm_make_request(KVM_REQ_EVENT, vcpu);
+3 -3
arch/x86/kvm/mmu.h
··· 102 102 if (!VALID_PAGE(root_hpa)) 103 103 return; 104 104 105 - kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu), 105 + static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu), 106 106 vcpu->arch.mmu->shadow_root_level); 107 107 } 108 108 ··· 174 174 unsigned pte_access, unsigned pte_pkey, 175 175 unsigned pfec) 176 176 { 177 - int cpl = kvm_x86_ops.get_cpl(vcpu); 178 - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); 177 + int cpl = static_call(kvm_x86_get_cpl)(vcpu); 178 + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 179 179 180 180 /* 181 181 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
+8 -7
arch/x86/kvm/mmu/mmu.c
··· 190 190 int ret = -ENOTSUPP; 191 191 192 192 if (range && kvm_x86_ops.tlb_remote_flush_with_range) 193 - ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); 193 + ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range); 194 194 195 195 if (ret) 196 196 kvm_flush_remote_tlbs(kvm); ··· 1283 1283 gfn_t gfn_offset, unsigned long mask) 1284 1284 { 1285 1285 if (kvm_x86_ops.enable_log_dirty_pt_masked) 1286 - kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, 1287 - mask); 1286 + static_call(kvm_x86_enable_log_dirty_pt_masked)(kvm, slot, 1287 + gfn_offset, 1288 + mask); 1288 1289 else 1289 1290 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 1290 1291 } ··· 1293 1292 int kvm_cpu_dirty_log_size(void) 1294 1293 { 1295 1294 if (kvm_x86_ops.cpu_dirty_log_size) 1296 - return kvm_x86_ops.cpu_dirty_log_size(); 1295 + return static_call(kvm_x86_cpu_dirty_log_size)(); 1297 1296 1298 1297 return 0; 1299 1298 } ··· 4800 4799 if (r) 4801 4800 goto out; 4802 4801 kvm_mmu_load_pgd(vcpu); 4803 - kvm_x86_ops.tlb_flush_current(vcpu); 4802 + static_call(kvm_x86_tlb_flush_current)(vcpu); 4804 4803 out: 4805 4804 return r; 4806 4805 } ··· 5081 5080 if (is_noncanonical_address(gva, vcpu)) 5082 5081 return; 5083 5082 5084 - kvm_x86_ops.tlb_flush_gva(vcpu, gva); 5083 + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); 5085 5084 } 5086 5085 5087 5086 if (!mmu->invlpg) ··· 5138 5137 } 5139 5138 5140 5139 if (tlb_flush) 5141 - kvm_x86_ops.tlb_flush_gva(vcpu, gva); 5140 + static_call(kvm_x86_tlb_flush_gva)(vcpu, gva); 5142 5141 5143 5142 ++vcpu->stat.invlpg; 5144 5143
+1 -1
arch/x86/kvm/mmu/spte.c
··· 120 120 if (level > PG_LEVEL_4K) 121 121 spte |= PT_PAGE_SIZE_MASK; 122 122 if (tdp_enabled) 123 - spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, 123 + spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, 124 124 kvm_is_mmio_pfn(pfn)); 125 125 126 126 if (host_writable)
+1 -1
arch/x86/kvm/pmu.c
··· 373 373 return 1; 374 374 375 375 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) && 376 - (kvm_x86_ops.get_cpl(vcpu) != 0) && 376 + (static_call(kvm_x86_get_cpl)(vcpu) != 0) && 377 377 (kvm_read_cr0(vcpu) & X86_CR0_PE)) 378 378 return 1; 379 379
+2 -2
arch/x86/kvm/trace.h
··· 256 256 __entry->guest_rip = kvm_rip_read(vcpu); \ 257 257 __entry->isa = isa; \ 258 258 __entry->vcpu_id = vcpu->vcpu_id; \ 259 - kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \ 259 + static_call(kvm_x86_get_exit_info)(vcpu, &__entry->info1, \ 260 260 &__entry->info2, \ 261 261 &__entry->intr_info, \ 262 262 &__entry->error_code); \ ··· 738 738 ), 739 739 740 740 TP_fast_assign( 741 - __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS); 741 + __entry->csbase = static_call(kvm_x86_get_segment_base)(vcpu, VCPU_SREG_CS); 742 742 __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 743 743 - vcpu->arch.emulate_ctxt->fetch.data; 744 744 __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
+148 -150
arch/x86/kvm/x86.c
··· 708 708 */ 709 709 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) 710 710 { 711 - if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) 711 + if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) 712 712 return true; 713 713 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); 714 714 return false; ··· 868 868 869 869 if (!is_pae(vcpu)) 870 870 return 1; 871 - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 871 + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 872 872 if (cs_l) 873 873 return 1; 874 874 } ··· 881 881 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) 882 882 return 1; 883 883 884 - kvm_x86_ops.set_cr0(vcpu, cr0); 884 + static_call(kvm_x86_set_cr0)(vcpu, cr0); 885 885 886 886 kvm_post_set_cr0(vcpu, old_cr0, cr0); 887 887 ··· 986 986 987 987 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) 988 988 { 989 - if (kvm_x86_ops.get_cpl(vcpu) != 0 || 989 + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || 990 990 __kvm_set_xcr(vcpu, index, xcr)) { 991 991 kvm_inject_gp(vcpu, 0); 992 992 return 1; ··· 1003 1003 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) 1004 1004 return false; 1005 1005 1006 - return kvm_x86_ops.is_valid_cr4(vcpu, cr4); 1006 + return static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); 1007 1007 } 1008 1008 EXPORT_SYMBOL_GPL(kvm_is_valid_cr4); 1009 1009 ··· 1047 1047 return 1; 1048 1048 } 1049 1049 1050 - kvm_x86_ops.set_cr4(vcpu, cr4); 1050 + static_call(kvm_x86_set_cr4)(vcpu, cr4); 1051 1051 1052 1052 kvm_post_set_cr4(vcpu, old_cr4, cr4); 1053 1053 ··· 1130 1130 dr7 = vcpu->arch.guest_debug_dr7; 1131 1131 else 1132 1132 dr7 = vcpu->arch.dr7; 1133 - kvm_x86_ops.set_dr7(vcpu, dr7); 1133 + static_call(kvm_x86_set_dr7)(vcpu, dr7); 1134 1134 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; 1135 1135 if (dr7 & DR7_BP_EN_MASK) 1136 1136 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; ··· 1442 1442 rdmsrl_safe(msr->index, &msr->data); 1443 1443 break; 1444 1444 default: 1445 - return kvm_x86_ops.get_msr_feature(msr); 1445 + return static_call(kvm_x86_get_msr_feature)(msr); 1446 1446 } 1447 1447 return 0; 1448 1448 } ··· 1518 1518 efer &= ~EFER_LMA; 1519 1519 efer |= vcpu->arch.efer & EFER_LMA; 1520 1520 1521 - r = kvm_x86_ops.set_efer(vcpu, efer); 1521 + r = static_call(kvm_x86_set_efer)(vcpu, efer); 1522 1522 if (r) { 1523 1523 WARN_ON(r > 0); 1524 1524 return r; ··· 1615 1615 msr.index = index; 1616 1616 msr.host_initiated = host_initiated; 1617 1617 1618 - return kvm_x86_ops.set_msr(vcpu, &msr); 1618 + return static_call(kvm_x86_set_msr)(vcpu, &msr); 1619 1619 } 1620 1620 1621 1621 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, ··· 1648 1648 msr.index = index; 1649 1649 msr.host_initiated = host_initiated; 1650 1650 1651 - ret = kvm_x86_ops.get_msr(vcpu, &msr); 1651 + ret = static_call(kvm_x86_get_msr)(vcpu, &msr); 1652 1652 if (!ret) 1653 1653 *data = msr.data; 1654 1654 return ret; ··· 1689 1689 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); 1690 1690 } 1691 1691 1692 - return kvm_x86_ops.complete_emulated_msr(vcpu, err); 1692 + return static_call(kvm_x86_complete_emulated_msr)(vcpu, err); 1693 1693 } 1694 1694 1695 1695 static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) 1696 1696 { 1697 - return kvm_x86_ops.complete_emulated_msr(vcpu, vcpu->run->msr.error); 1697 + return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); 1698 1698 } 1699 1699 1700 1700 static u64 kvm_msr_reason(int r) ··· 1766 1766 trace_kvm_msr_read_ex(ecx); 1767 1767 } 1768 1768 1769 - return kvm_x86_ops.complete_emulated_msr(vcpu, r); 1769 + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 1770 1770 } 1771 1771 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 1772 1772 ··· 1792 1792 else 1793 1793 trace_kvm_msr_write_ex(ecx, data); 1794 1794 1795 - return kvm_x86_ops.complete_emulated_msr(vcpu, r); 1795 + return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); 1796 1796 } 1797 1797 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 1798 1798 ··· 2224 2224 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 2225 2225 { 2226 2226 vcpu->arch.l1_tsc_offset = offset; 2227 - vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); 2227 + vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset); 2228 2228 } 2229 2229 2230 2230 static inline bool kvm_check_tsc_unstable(void) ··· 2970 2970 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) 2971 2971 { 2972 2972 ++vcpu->stat.tlb_flush; 2973 - kvm_x86_ops.tlb_flush_all(vcpu); 2973 + static_call(kvm_x86_tlb_flush_all)(vcpu); 2974 2974 } 2975 2975 2976 2976 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) 2977 2977 { 2978 2978 ++vcpu->stat.tlb_flush; 2979 - kvm_x86_ops.tlb_flush_guest(vcpu); 2979 + static_call(kvm_x86_tlb_flush_guest)(vcpu); 2980 2980 } 2981 2981 2982 2982 static void record_steal_time(struct kvm_vcpu *vcpu) ··· 3802 3802 * fringe case that is not enabled except via specific settings 3803 3803 * of the module parameters. 3804 3804 */ 3805 - r = kvm_x86_ops.has_emulated_msr(kvm, MSR_IA32_SMBASE); 3805 + r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE); 3806 3806 break; 3807 3807 case KVM_CAP_VAPIC: 3808 - r = !kvm_x86_ops.cpu_has_accelerated_tpr(); 3808 + r = !static_call(kvm_x86_cpu_has_accelerated_tpr)(); 3809 3809 break; 3810 3810 case KVM_CAP_NR_VCPUS: 3811 3811 r = KVM_SOFT_MAX_VCPUS; ··· 3971 3971 { 3972 3972 /* Address WBINVD may be executed by guest */ 3973 3973 if (need_emulate_wbinvd(vcpu)) { 3974 - if (kvm_x86_ops.has_wbinvd_exit()) 3974 + if (static_call(kvm_x86_has_wbinvd_exit)()) 3975 3975 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); 3976 3976 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) 3977 3977 smp_call_function_single(vcpu->cpu, 3978 3978 wbinvd_ipi, NULL, 1); 3979 3979 } 3980 3980 3981 - kvm_x86_ops.vcpu_load(vcpu, cpu); 3981 + static_call(kvm_x86_vcpu_load)(vcpu, cpu); 3982 3982 3983 3983 /* Save host pkru register if supported */ 3984 3984 vcpu->arch.host_pkru = read_pkru(); ··· 4056 4056 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 4057 4057 { 4058 4058 if (vcpu->preempted && !vcpu->arch.guest_state_protected) 4059 - vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); 4059 + vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); 4060 4060 4061 4061 kvm_steal_time_set_preempted(vcpu); 4062 - kvm_x86_ops.vcpu_put(vcpu); 4062 + static_call(kvm_x86_vcpu_put)(vcpu); 4063 4063 vcpu->arch.last_host_tsc = rdtsc(); 4064 4064 /* 4065 4065 * If userspace has set any breakpoints or watchpoints, dr6 is restored ··· 4073 4073 struct kvm_lapic_state *s) 4074 4074 { 4075 4075 if (vcpu->arch.apicv_active) 4076 - kvm_x86_ops.sync_pir_to_irr(vcpu); 4076 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 4077 4077 4078 4078 return kvm_apic_get_state(vcpu, s); 4079 4079 } ··· 4183 4183 for (bank = 0; bank < bank_num; bank++) 4184 4184 vcpu->arch.mce_banks[bank*4] = ~(u64)0; 4185 4185 4186 - kvm_x86_ops.setup_mce(vcpu); 4186 + static_call(kvm_x86_setup_mce)(vcpu); 4187 4187 out: 4188 4188 return r; 4189 4189 } ··· 4290 4290 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; 4291 4291 events->interrupt.nr = vcpu->arch.interrupt.nr; 4292 4292 events->interrupt.soft = 0; 4293 - events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); 4293 + events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 4294 4294 4295 4295 events->nmi.injected = vcpu->arch.nmi_injected; 4296 4296 events->nmi.pending = vcpu->arch.nmi_pending != 0; 4297 - events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); 4297 + events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); 4298 4298 events->nmi.pad = 0; 4299 4299 4300 4300 events->sipi_vector = 0; /* never valid when reporting to user space */ ··· 4361 4361 vcpu->arch.interrupt.nr = events->interrupt.nr; 4362 4362 vcpu->arch.interrupt.soft = events->interrupt.soft; 4363 4363 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) 4364 - kvm_x86_ops.set_interrupt_shadow(vcpu, 4365 - events->interrupt.shadow); 4364 + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 4365 + events->interrupt.shadow); 4366 4366 4367 4367 vcpu->arch.nmi_injected = events->nmi.injected; 4368 4368 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) 4369 4369 vcpu->arch.nmi_pending = events->nmi.pending; 4370 - kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); 4370 + static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); 4371 4371 4372 4372 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && 4373 4373 lapic_in_kernel(vcpu)) ··· 4662 4662 if (!kvm_x86_ops.enable_direct_tlbflush) 4663 4663 return -ENOTTY; 4664 4664 4665 - return kvm_x86_ops.enable_direct_tlbflush(vcpu); 4665 + return static_call(kvm_x86_enable_direct_tlbflush)(vcpu); 4666 4666 4667 4667 case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: 4668 4668 vcpu->arch.pv_cpuid.enforce = cap->args[0]; ··· 5054 5054 5055 5055 if (addr > (unsigned int)(-3 * PAGE_SIZE)) 5056 5056 return -EINVAL; 5057 - ret = kvm_x86_ops.set_tss_addr(kvm, addr); 5057 + ret = static_call(kvm_x86_set_tss_addr)(kvm, addr); 5058 5058 return ret; 5059 5059 } 5060 5060 5061 5061 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, 5062 5062 u64 ident_addr) 5063 5063 { 5064 - return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr); 5064 + return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr); 5065 5065 } 5066 5066 5067 5067 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, ··· 5218 5218 /* 5219 5219 * Flush potentially hardware-cached dirty pages to dirty_bitmap. 5220 5220 */ 5221 - if (kvm_x86_ops.flush_log_dirty) 5222 - kvm_x86_ops.flush_log_dirty(kvm); 5221 + static_call_cond(kvm_x86_flush_log_dirty)(kvm); 5223 5222 } 5224 5223 5225 5224 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, ··· 5700 5701 case KVM_MEMORY_ENCRYPT_OP: { 5701 5702 r = -ENOTTY; 5702 5703 if (kvm_x86_ops.mem_enc_op) 5703 - r = kvm_x86_ops.mem_enc_op(kvm, argp); 5704 + r = static_call(kvm_x86_mem_enc_op)(kvm, argp); 5704 5705 break; 5705 5706 } 5706 5707 case KVM_MEMORY_ENCRYPT_REG_REGION: { ··· 5712 5713 5713 5714 r = -ENOTTY; 5714 5715 if (kvm_x86_ops.mem_enc_reg_region) 5715 - r = kvm_x86_ops.mem_enc_reg_region(kvm, &region); 5716 + r = static_call(kvm_x86_mem_enc_reg_region)(kvm, &region); 5716 5717 break; 5717 5718 } 5718 5719 case KVM_MEMORY_ENCRYPT_UNREG_REGION: { ··· 5724 5725 5725 5726 r = -ENOTTY; 5726 5727 if (kvm_x86_ops.mem_enc_unreg_region) 5727 - r = kvm_x86_ops.mem_enc_unreg_region(kvm, &region); 5728 + r = static_call(kvm_x86_mem_enc_unreg_region)(kvm, &region); 5728 5729 break; 5729 5730 } 5730 5731 case KVM_HYPERV_EVENTFD: { ··· 5826 5827 } 5827 5828 5828 5829 for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) { 5829 - if (!kvm_x86_ops.has_emulated_msr(NULL, emulated_msrs_all[i])) 5830 + if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i])) 5830 5831 continue; 5831 5832 5832 5833 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; ··· 5889 5890 static void kvm_set_segment(struct kvm_vcpu *vcpu, 5890 5891 struct kvm_segment *var, int seg) 5891 5892 { 5892 - kvm_x86_ops.set_segment(vcpu, var, seg); 5893 + static_call(kvm_x86_set_segment)(vcpu, var, seg); 5893 5894 } 5894 5895 5895 5896 void kvm_get_segment(struct kvm_vcpu *vcpu, 5896 5897 struct kvm_segment *var, int seg) 5897 5898 { 5898 - kvm_x86_ops.get_segment(vcpu, var, seg); 5899 + static_call(kvm_x86_get_segment)(vcpu, var, seg); 5899 5900 } 5900 5901 5901 5902 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, ··· 5915 5916 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 5916 5917 struct x86_exception *exception) 5917 5918 { 5918 - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5919 + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 5919 5920 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 5920 5921 } 5921 5922 5922 5923 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 5923 5924 struct x86_exception *exception) 5924 5925 { 5925 - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5926 + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 5926 5927 access |= PFERR_FETCH_MASK; 5927 5928 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 5928 5929 } ··· 5930 5931 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 5931 5932 struct x86_exception *exception) 5932 5933 { 5933 - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5934 + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 5934 5935 access |= PFERR_WRITE_MASK; 5935 5936 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); 5936 5937 } ··· 5979 5980 struct x86_exception *exception) 5980 5981 { 5981 5982 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 5982 - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 5983 + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 5983 5984 unsigned offset; 5984 5985 int ret; 5985 5986 ··· 6004 6005 gva_t addr, void *val, unsigned int bytes, 6005 6006 struct x86_exception *exception) 6006 6007 { 6007 - u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 6008 + u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 6008 6009 6009 6010 /* 6010 6011 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED ··· 6025 6026 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6026 6027 u32 access = 0; 6027 6028 6028 - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) 6029 + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) 6029 6030 access |= PFERR_USER_MASK; 6030 6031 6031 6032 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); ··· 6078 6079 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 6079 6080 u32 access = PFERR_WRITE_MASK; 6080 6081 6081 - if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) 6082 + if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) 6082 6083 access |= PFERR_USER_MASK; 6083 6084 6084 6085 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, ··· 6103 6104 char sig[5]; /* ud2; .ascii "kvm" */ 6104 6105 struct x86_exception e; 6105 6106 6106 - if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0))) 6107 + if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0))) 6107 6108 return 1; 6108 6109 6109 6110 if (force_emulation_prefix && ··· 6137 6138 gpa_t *gpa, struct x86_exception *exception, 6138 6139 bool write) 6139 6140 { 6140 - u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) 6141 + u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) 6141 6142 | (write ? PFERR_WRITE_MASK : 0); 6142 6143 6143 6144 /* ··· 6545 6546 6546 6547 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 6547 6548 { 6548 - return kvm_x86_ops.get_segment_base(vcpu, seg); 6549 + return static_call(kvm_x86_get_segment_base)(vcpu, seg); 6549 6550 } 6550 6551 6551 6552 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) ··· 6558 6559 if (!need_emulate_wbinvd(vcpu)) 6559 6560 return X86EMUL_CONTINUE; 6560 6561 6561 - if (kvm_x86_ops.has_wbinvd_exit()) { 6562 + if (static_call(kvm_x86_has_wbinvd_exit)()) { 6562 6563 int cpu = get_cpu(); 6563 6564 6564 6565 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); ··· 6663 6664 6664 6665 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) 6665 6666 { 6666 - return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt)); 6667 + return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt)); 6667 6668 } 6668 6669 6669 6670 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 6670 6671 { 6671 - kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt); 6672 + static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt); 6672 6673 } 6673 6674 6674 6675 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 6675 6676 { 6676 - kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt); 6677 + static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt); 6677 6678 } 6678 6679 6679 6680 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 6680 6681 { 6681 - kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt); 6682 + static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt); 6682 6683 } 6683 6684 6684 6685 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 6685 6686 { 6686 - kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt); 6687 + static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt); 6687 6688 } 6688 6689 6689 6690 static unsigned long emulator_get_cached_segment_base( ··· 6825 6826 struct x86_instruction_info *info, 6826 6827 enum x86_intercept_stage stage) 6827 6828 { 6828 - return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage, 6829 + return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage, 6829 6830 &ctxt->exception); 6830 6831 } 6831 6832 ··· 6863 6864 6864 6865 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) 6865 6866 { 6866 - kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked); 6867 + static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked); 6867 6868 } 6868 6869 6869 6870 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) ··· 6879 6880 static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, 6880 6881 const char *smstate) 6881 6882 { 6882 - return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate); 6883 + return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate); 6883 6884 } 6884 6885 6885 6886 static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt) ··· 6941 6942 6942 6943 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) 6943 6944 { 6944 - u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); 6945 + u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); 6945 6946 /* 6946 6947 * an sti; sti; sequence only disable interrupts for the first 6947 6948 * instruction. So, if the last instruction, be it emulated or ··· 6952 6953 if (int_shadow & mask) 6953 6954 mask = 0; 6954 6955 if (unlikely(int_shadow || mask)) { 6955 - kvm_x86_ops.set_interrupt_shadow(vcpu, mask); 6956 + static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); 6956 6957 if (!mask) 6957 6958 kvm_make_request(KVM_REQ_EVENT, vcpu); 6958 6959 } ··· 6994 6995 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; 6995 6996 int cs_db, cs_l; 6996 6997 6997 - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 6998 + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 6998 6999 6999 7000 ctxt->gpa_available = false; 7000 7001 ctxt->eflags = kvm_get_rflags(vcpu); ··· 7055 7056 7056 7057 kvm_queue_exception(vcpu, UD_VECTOR); 7057 7058 7058 - if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { 7059 + if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { 7059 7060 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 7060 7061 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; 7061 7062 vcpu->run->internal.ndata = 0; ··· 7236 7237 7237 7238 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) 7238 7239 { 7239 - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); 7240 + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 7240 7241 int r; 7241 7242 7242 - r = kvm_x86_ops.skip_emulated_instruction(vcpu); 7243 + r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); 7243 7244 if (unlikely(!r)) 7244 7245 return 0; 7245 7246 ··· 7369 7370 bool writeback = true; 7370 7371 bool write_fault_to_spt; 7371 7372 7372 - if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len))) 7373 + if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len))) 7373 7374 return 1; 7374 7375 7375 7376 vcpu->arch.l1tf_flush_l1d = true; ··· 7492 7493 r = 1; 7493 7494 7494 7495 if (writeback) { 7495 - unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); 7496 + unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); 7496 7497 toggle_interruptibility(vcpu, ctxt->interruptibility); 7497 7498 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 7498 7499 if (!ctxt->have_exception || ··· 7501 7502 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 7502 7503 r = kvm_vcpu_do_singlestep(vcpu); 7503 7504 if (kvm_x86_ops.update_emulated_instruction) 7504 - kvm_x86_ops.update_emulated_instruction(vcpu); 7505 + static_call(kvm_x86_update_emulated_instruction)(vcpu); 7505 7506 __kvm_set_rflags(vcpu, ctxt->eflags); 7506 7507 } 7507 7508 ··· 7830 7831 int user_mode = 3; 7831 7832 7832 7833 if (__this_cpu_read(current_vcpu)) 7833 - user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu)); 7834 + user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu)); 7834 7835 7835 7836 return user_mode != 0; 7836 7837 } ··· 8163 8164 a3 &= 0xFFFFFFFF; 8164 8165 } 8165 8166 8166 - if (kvm_x86_ops.get_cpl(vcpu) != 0) { 8167 + if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { 8167 8168 ret = -KVM_EPERM; 8168 8169 goto out; 8169 8170 } ··· 8220 8221 char instruction[3]; 8221 8222 unsigned long rip = kvm_rip_read(vcpu); 8222 8223 8223 - kvm_x86_ops.patch_hypercall(vcpu, instruction); 8224 + static_call(kvm_x86_patch_hypercall)(vcpu, instruction); 8224 8225 8225 8226 return emulator_write_emulated(ctxt, rip, instruction, 3, 8226 8227 &ctxt->exception); ··· 8277 8278 8278 8279 tpr = kvm_lapic_get_cr8(vcpu); 8279 8280 8280 - kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); 8281 + static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); 8281 8282 } 8282 8283 8283 8284 static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) ··· 8288 8289 /* try to reinject previous events if any */ 8289 8290 8290 8291 if (vcpu->arch.exception.injected) { 8291 - kvm_x86_ops.queue_exception(vcpu); 8292 + static_call(kvm_x86_queue_exception)(vcpu); 8292 8293 can_inject = false; 8293 8294 } 8294 8295 /* ··· 8307 8308 */ 8308 8309 else if (!vcpu->arch.exception.pending) { 8309 8310 if (vcpu->arch.nmi_injected) { 8310 - kvm_x86_ops.set_nmi(vcpu); 8311 + static_call(kvm_x86_set_nmi)(vcpu); 8311 8312 can_inject = false; 8312 8313 } else if (vcpu->arch.interrupt.injected) { 8313 - kvm_x86_ops.set_irq(vcpu); 8314 + static_call(kvm_x86_set_irq)(vcpu); 8314 8315 can_inject = false; 8315 8316 } 8316 8317 } ··· 8351 8352 } 8352 8353 } 8353 8354 8354 - kvm_x86_ops.queue_exception(vcpu); 8355 + static_call(kvm_x86_queue_exception)(vcpu); 8355 8356 can_inject = false; 8356 8357 } 8357 8358 ··· 8367 8368 * The kvm_x86_ops hooks communicate this by returning -EBUSY. 8368 8369 */ 8369 8370 if (vcpu->arch.smi_pending) { 8370 - r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; 8371 + r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; 8371 8372 if (r < 0) 8372 8373 goto busy; 8373 8374 if (r) { ··· 8376 8377 enter_smm(vcpu); 8377 8378 can_inject = false; 8378 8379 } else 8379 - kvm_x86_ops.enable_smi_window(vcpu); 8380 + static_call(kvm_x86_enable_smi_window)(vcpu); 8380 8381 } 8381 8382 8382 8383 if (vcpu->arch.nmi_pending) { 8383 - r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; 8384 + r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; 8384 8385 if (r < 0) 8385 8386 goto busy; 8386 8387 if (r) { 8387 8388 --vcpu->arch.nmi_pending; 8388 8389 vcpu->arch.nmi_injected = true; 8389 - kvm_x86_ops.set_nmi(vcpu); 8390 + static_call(kvm_x86_set_nmi)(vcpu); 8390 8391 can_inject = false; 8391 - WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); 8392 + WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); 8392 8393 } 8393 8394 if (vcpu->arch.nmi_pending) 8394 - kvm_x86_ops.enable_nmi_window(vcpu); 8395 + static_call(kvm_x86_enable_nmi_window)(vcpu); 8395 8396 } 8396 8397 8397 8398 if (kvm_cpu_has_injectable_intr(vcpu)) { 8398 - r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; 8399 + r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; 8399 8400 if (r < 0) 8400 8401 goto busy; 8401 8402 if (r) { 8402 8403 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); 8403 - kvm_x86_ops.set_irq(vcpu); 8404 - WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); 8404 + static_call(kvm_x86_set_irq)(vcpu); 8405 + WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); 8405 8406 } 8406 8407 if (kvm_cpu_has_injectable_intr(vcpu)) 8407 - kvm_x86_ops.enable_irq_window(vcpu); 8408 + static_call(kvm_x86_enable_irq_window)(vcpu); 8408 8409 } 8409 8410 8410 8411 if (is_guest_mode(vcpu) && ··· 8429 8430 * If an NMI is already in progress, limit further NMIs to just one. 8430 8431 * Otherwise, allow two (and we'll inject the first one immediately). 8431 8432 */ 8432 - if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) 8433 + if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) 8433 8434 limit = 1; 8434 8435 8435 8436 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); ··· 8519 8520 put_smstate(u32, buf, 0x7f7c, seg.limit); 8520 8521 put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg)); 8521 8522 8522 - kvm_x86_ops.get_gdt(vcpu, &dt); 8523 + static_call(kvm_x86_get_gdt)(vcpu, &dt); 8523 8524 put_smstate(u32, buf, 0x7f74, dt.address); 8524 8525 put_smstate(u32, buf, 0x7f70, dt.size); 8525 8526 8526 - kvm_x86_ops.get_idt(vcpu, &dt); 8527 + static_call(kvm_x86_get_idt)(vcpu, &dt); 8527 8528 put_smstate(u32, buf, 0x7f58, dt.address); 8528 8529 put_smstate(u32, buf, 0x7f54, dt.size); 8529 8530 ··· 8573 8574 put_smstate(u32, buf, 0x7e94, seg.limit); 8574 8575 put_smstate(u64, buf, 0x7e98, seg.base); 8575 8576 8576 - kvm_x86_ops.get_idt(vcpu, &dt); 8577 + static_call(kvm_x86_get_idt)(vcpu, &dt); 8577 8578 put_smstate(u32, buf, 0x7e84, dt.size); 8578 8579 put_smstate(u64, buf, 0x7e88, dt.address); 8579 8580 ··· 8583 8584 put_smstate(u32, buf, 0x7e74, seg.limit); 8584 8585 put_smstate(u64, buf, 0x7e78, seg.base); 8585 8586 8586 - kvm_x86_ops.get_gdt(vcpu, &dt); 8587 + static_call(kvm_x86_get_gdt)(vcpu, &dt); 8587 8588 put_smstate(u32, buf, 0x7e64, dt.size); 8588 8589 put_smstate(u64, buf, 0x7e68, dt.address); 8589 8590 ··· 8613 8614 * vCPU state (e.g. leave guest mode) after we've saved the state into 8614 8615 * the SMM state-save area. 8615 8616 */ 8616 - kvm_x86_ops.pre_enter_smm(vcpu, buf); 8617 + static_call(kvm_x86_pre_enter_smm)(vcpu, buf); 8617 8618 8618 8619 vcpu->arch.hflags |= HF_SMM_MASK; 8619 8620 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); 8620 8621 8621 - if (kvm_x86_ops.get_nmi_mask(vcpu)) 8622 + if (static_call(kvm_x86_get_nmi_mask)(vcpu)) 8622 8623 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 8623 8624 else 8624 - kvm_x86_ops.set_nmi_mask(vcpu, true); 8625 + static_call(kvm_x86_set_nmi_mask)(vcpu, true); 8625 8626 8626 8627 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 8627 8628 kvm_rip_write(vcpu, 0x8000); 8628 8629 8629 8630 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); 8630 - kvm_x86_ops.set_cr0(vcpu, cr0); 8631 + static_call(kvm_x86_set_cr0)(vcpu, cr0); 8631 8632 vcpu->arch.cr0 = cr0; 8632 8633 8633 - kvm_x86_ops.set_cr4(vcpu, 0); 8634 + static_call(kvm_x86_set_cr4)(vcpu, 0); 8634 8635 8635 8636 /* Undocumented: IDT limit is set to zero on entry to SMM. */ 8636 8637 dt.address = dt.size = 0; 8637 - kvm_x86_ops.set_idt(vcpu, &dt); 8638 + static_call(kvm_x86_set_idt)(vcpu, &dt); 8638 8639 8639 8640 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 8640 8641 ··· 8665 8666 8666 8667 #ifdef CONFIG_X86_64 8667 8668 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) 8668 - kvm_x86_ops.set_efer(vcpu, 0); 8669 + static_call(kvm_x86_set_efer)(vcpu, 0); 8669 8670 #endif 8670 8671 8671 8672 kvm_update_cpuid_runtime(vcpu); ··· 8703 8704 8704 8705 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); 8705 8706 kvm_apic_update_apicv(vcpu); 8706 - kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); 8707 + static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); 8707 8708 } 8708 8709 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv); 8709 8710 ··· 8720 8721 unsigned long old, new, expected; 8721 8722 8722 8723 if (!kvm_x86_ops.check_apicv_inhibit_reasons || 8723 - !kvm_x86_ops.check_apicv_inhibit_reasons(bit)) 8724 + !static_call(kvm_x86_check_apicv_inhibit_reasons)(bit)) 8724 8725 return; 8725 8726 8726 8727 old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); ··· 8740 8741 8741 8742 trace_kvm_apicv_update_request(activate, bit); 8742 8743 if (kvm_x86_ops.pre_update_apicv_exec_ctrl) 8743 - kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate); 8744 + static_call(kvm_x86_pre_update_apicv_exec_ctrl)(kvm, activate); 8744 8745 8745 8746 /* 8746 8747 * Sending request to update APICV for all other vcpus, ··· 8766 8767 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 8767 8768 else { 8768 8769 if (vcpu->arch.apicv_active) 8769 - kvm_x86_ops.sync_pir_to_irr(vcpu); 8770 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 8770 8771 if (ioapic_in_kernel(vcpu->kvm)) 8771 8772 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 8772 8773 } ··· 8786 8787 8787 8788 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, 8788 8789 vcpu_to_synic(vcpu)->vec_bitmap, 256); 8789 - kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); 8790 + static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); 8790 8791 } 8791 8792 8792 8793 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, ··· 8811 8812 if (!kvm_x86_ops.set_apic_access_page_addr) 8812 8813 return; 8813 8814 8814 - kvm_x86_ops.set_apic_access_page_addr(vcpu); 8815 + static_call(kvm_x86_set_apic_access_page_addr)(vcpu); 8815 8816 } 8816 8817 8817 8818 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) ··· 8954 8955 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) 8955 8956 kvm_check_async_pf_completion(vcpu); 8956 8957 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) 8957 - kvm_x86_ops.msr_filter_changed(vcpu); 8958 + static_call(kvm_x86_msr_filter_changed)(vcpu); 8958 8959 } 8959 8960 8960 8961 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { ··· 8967 8968 8968 8969 inject_pending_event(vcpu, &req_immediate_exit); 8969 8970 if (req_int_win) 8970 - kvm_x86_ops.enable_irq_window(vcpu); 8971 + static_call(kvm_x86_enable_irq_window)(vcpu); 8971 8972 8972 8973 if (kvm_lapic_enabled(vcpu)) { 8973 8974 update_cr8_intercept(vcpu); ··· 8982 8983 8983 8984 preempt_disable(); 8984 8985 8985 - kvm_x86_ops.prepare_guest_switch(vcpu); 8986 + static_call(kvm_x86_prepare_guest_switch)(vcpu); 8986 8987 8987 8988 /* 8988 8989 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt ··· 9013 9014 * notified with kvm_vcpu_kick. 9014 9015 */ 9015 9016 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) 9016 - kvm_x86_ops.sync_pir_to_irr(vcpu); 9017 + static_call(kvm_x86_sync_pir_to_irr)(vcpu); 9017 9018 9018 9019 if (kvm_vcpu_exit_request(vcpu)) { 9019 9020 vcpu->mode = OUTSIDE_GUEST_MODE; ··· 9027 9028 9028 9029 if (req_immediate_exit) { 9029 9030 kvm_make_request(KVM_REQ_EVENT, vcpu); 9030 - kvm_x86_ops.request_immediate_exit(vcpu); 9031 + static_call(kvm_x86_request_immediate_exit)(vcpu); 9031 9032 } 9032 9033 9033 9034 fpregs_assert_state_consistent(); ··· 9044 9045 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 9045 9046 } 9046 9047 9047 - exit_fastpath = kvm_x86_ops.run(vcpu); 9048 + exit_fastpath = static_call(kvm_x86_run)(vcpu); 9048 9049 9049 9050 /* 9050 9051 * Do this here before restoring debug registers on the host. And ··· 9054 9055 */ 9055 9056 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { 9056 9057 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); 9057 - kvm_x86_ops.sync_dirty_debug_regs(vcpu); 9058 + static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); 9058 9059 kvm_update_dr0123(vcpu); 9059 9060 kvm_update_dr7(vcpu); 9060 9061 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; ··· 9076 9077 vcpu->mode = OUTSIDE_GUEST_MODE; 9077 9078 smp_wmb(); 9078 9079 9079 - kvm_x86_ops.handle_exit_irqoff(vcpu); 9080 + static_call(kvm_x86_handle_exit_irqoff)(vcpu); 9080 9081 9081 9082 /* 9082 9083 * Consume any pending interrupts, including the possible source of ··· 9118 9119 if (vcpu->arch.apic_attention) 9119 9120 kvm_lapic_sync_from_vapic(vcpu); 9120 9121 9121 - r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); 9122 + r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); 9122 9123 return r; 9123 9124 9124 9125 cancel_injection: 9125 9126 if (req_immediate_exit) 9126 9127 kvm_make_request(KVM_REQ_EVENT, vcpu); 9127 - kvm_x86_ops.cancel_injection(vcpu); 9128 + static_call(kvm_x86_cancel_injection)(vcpu); 9128 9129 if (unlikely(vcpu->arch.apic_attention)) 9129 9130 kvm_lapic_sync_from_vapic(vcpu); 9130 9131 out: ··· 9134 9135 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) 9135 9136 { 9136 9137 if (!kvm_arch_vcpu_runnable(vcpu) && 9137 - (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { 9138 + (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) { 9138 9139 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); 9139 9140 kvm_vcpu_block(vcpu); 9140 9141 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); 9141 9142 9142 9143 if (kvm_x86_ops.post_block) 9143 - kvm_x86_ops.post_block(vcpu); 9144 + static_call(kvm_x86_post_block)(vcpu); 9144 9145 9145 9146 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) 9146 9147 return 1; ··· 9536 9537 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 9537 9538 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 9538 9539 9539 - kvm_x86_ops.get_idt(vcpu, &dt); 9540 + static_call(kvm_x86_get_idt)(vcpu, &dt); 9540 9541 sregs->idt.limit = dt.size; 9541 9542 sregs->idt.base = dt.address; 9542 - kvm_x86_ops.get_gdt(vcpu, &dt); 9543 + static_call(kvm_x86_get_gdt)(vcpu, &dt); 9543 9544 sregs->gdt.limit = dt.size; 9544 9545 sregs->gdt.base = dt.address; 9545 9546 ··· 9692 9693 9693 9694 dt.size = sregs->idt.limit; 9694 9695 dt.address = sregs->idt.base; 9695 - kvm_x86_ops.set_idt(vcpu, &dt); 9696 + static_call(kvm_x86_set_idt)(vcpu, &dt); 9696 9697 dt.size = sregs->gdt.limit; 9697 9698 dt.address = sregs->gdt.base; 9698 - kvm_x86_ops.set_gdt(vcpu, &dt); 9699 + static_call(kvm_x86_set_gdt)(vcpu, &dt); 9699 9700 9700 9701 vcpu->arch.cr2 = sregs->cr2; 9701 9702 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; ··· 9705 9706 kvm_set_cr8(vcpu, sregs->cr8); 9706 9707 9707 9708 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; 9708 - kvm_x86_ops.set_efer(vcpu, sregs->efer); 9709 + static_call(kvm_x86_set_efer)(vcpu, sregs->efer); 9709 9710 9710 9711 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; 9711 - kvm_x86_ops.set_cr0(vcpu, sregs->cr0); 9712 + static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); 9712 9713 vcpu->arch.cr0 = sregs->cr0; 9713 9714 9714 9715 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 9715 - kvm_x86_ops.set_cr4(vcpu, sregs->cr4); 9716 + static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); 9716 9717 9717 9718 idx = srcu_read_lock(&vcpu->kvm->srcu); 9718 9719 if (is_pae_paging(vcpu)) { ··· 9820 9821 */ 9821 9822 kvm_set_rflags(vcpu, rflags); 9822 9823 9823 - kvm_x86_ops.update_exception_bitmap(vcpu); 9824 + static_call(kvm_x86_update_exception_bitmap)(vcpu); 9824 9825 9825 9826 r = 0; 9826 9827 ··· 10047 10048 10048 10049 kvm_hv_vcpu_init(vcpu); 10049 10050 10050 - r = kvm_x86_ops.vcpu_create(vcpu); 10051 + r = static_call(kvm_x86_vcpu_create)(vcpu); 10051 10052 if (r) 10052 10053 goto free_guest_fpu; 10053 10054 ··· 10110 10111 10111 10112 kvmclock_reset(vcpu); 10112 10113 10113 - kvm_x86_ops.vcpu_free(vcpu); 10114 + static_call(kvm_x86_vcpu_free)(vcpu); 10114 10115 10115 10116 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); 10116 10117 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); ··· 10199 10200 10200 10201 vcpu->arch.ia32_xss = 0; 10201 10202 10202 - kvm_x86_ops.vcpu_reset(vcpu, init_event); 10203 + static_call(kvm_x86_vcpu_reset)(vcpu, init_event); 10203 10204 } 10204 10205 10205 10206 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ··· 10225 10226 bool stable, backwards_tsc = false; 10226 10227 10227 10228 kvm_user_return_msr_cpu_online(); 10228 - ret = kvm_x86_ops.hardware_enable(); 10229 + ret = static_call(kvm_x86_hardware_enable)(); 10229 10230 if (ret != 0) 10230 10231 return ret; 10231 10232 ··· 10307 10308 10308 10309 void kvm_arch_hardware_disable(void) 10309 10310 { 10310 - kvm_x86_ops.hardware_disable(); 10311 + static_call(kvm_x86_hardware_disable)(); 10311 10312 drop_user_return_notifiers(); 10312 10313 } 10313 10314 ··· 10326 10327 return r; 10327 10328 10328 10329 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); 10330 + kvm_ops_static_call_update(); 10329 10331 10330 10332 if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) 10331 10333 supported_xss = 0; ··· 10355 10355 10356 10356 void kvm_arch_hardware_unsetup(void) 10357 10357 { 10358 - kvm_x86_ops.hardware_unsetup(); 10358 + static_call(kvm_x86_hardware_unsetup)(); 10359 10359 } 10360 10360 10361 10361 int kvm_arch_check_processor_compat(void *opaque) ··· 10395 10395 pmu->need_cleanup = true; 10396 10396 kvm_make_request(KVM_REQ_PMU, vcpu); 10397 10397 } 10398 - kvm_x86_ops.sched_in(vcpu, cpu); 10398 + static_call(kvm_x86_sched_in)(vcpu, cpu); 10399 10399 } 10400 10400 10401 10401 void kvm_arch_free_vm(struct kvm *kvm) ··· 10439 10439 kvm_page_track_init(kvm); 10440 10440 kvm_mmu_init_vm(kvm); 10441 10441 10442 - return kvm_x86_ops.vm_init(kvm); 10442 + return static_call(kvm_x86_vm_init)(kvm); 10443 10443 } 10444 10444 10445 10445 int kvm_arch_post_init_vm(struct kvm *kvm) ··· 10584 10584 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); 10585 10585 mutex_unlock(&kvm->slots_lock); 10586 10586 } 10587 - if (kvm_x86_ops.vm_destroy) 10588 - kvm_x86_ops.vm_destroy(kvm); 10587 + static_call_cond(kvm_x86_vm_destroy)(kvm); 10589 10588 for (i = 0; i < kvm->arch.msr_filter.count; i++) 10590 10589 kfree(kvm->arch.msr_filter.ranges[i].bitmap); 10591 10590 kvm_pic_destroy(kvm); ··· 10775 10776 */ 10776 10777 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { 10777 10778 if (kvm_x86_ops.slot_enable_log_dirty) { 10778 - kvm_x86_ops.slot_enable_log_dirty(kvm, new); 10779 + static_call(kvm_x86_slot_enable_log_dirty)(kvm, new); 10779 10780 } else { 10780 10781 int level = 10781 10782 kvm_dirty_log_manual_protect_and_init_set(kvm) ? ··· 10792 10793 kvm_mmu_slot_remove_write_access(kvm, new, level); 10793 10794 } 10794 10795 } else { 10795 - if (kvm_x86_ops.slot_disable_log_dirty) 10796 - kvm_x86_ops.slot_disable_log_dirty(kvm, new); 10796 + static_call_cond(kvm_x86_slot_disable_log_dirty)(kvm, new); 10797 10797 } 10798 10798 } 10799 10799 ··· 10831 10833 { 10832 10834 return (is_guest_mode(vcpu) && 10833 10835 kvm_x86_ops.guest_apic_has_interrupt && 10834 - kvm_x86_ops.guest_apic_has_interrupt(vcpu)); 10836 + static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); 10835 10837 } 10836 10838 10837 10839 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) ··· 10850 10852 10851 10853 if (kvm_test_request(KVM_REQ_NMI, vcpu) || 10852 10854 (vcpu->arch.nmi_pending && 10853 - kvm_x86_ops.nmi_allowed(vcpu, false))) 10855 + static_call(kvm_x86_nmi_allowed)(vcpu, false))) 10854 10856 return true; 10855 10857 10856 10858 if (kvm_test_request(KVM_REQ_SMI, vcpu) || 10857 10859 (vcpu->arch.smi_pending && 10858 - kvm_x86_ops.smi_allowed(vcpu, false))) 10860 + static_call(kvm_x86_smi_allowed)(vcpu, false))) 10859 10861 return true; 10860 10862 10861 10863 if (kvm_arch_interrupt_allowed(vcpu) && ··· 10889 10891 kvm_test_request(KVM_REQ_EVENT, vcpu)) 10890 10892 return true; 10891 10893 10892 - if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) 10894 + if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) 10893 10895 return true; 10894 10896 10895 10897 return false; ··· 10907 10909 10908 10910 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 10909 10911 { 10910 - return kvm_x86_ops.interrupt_allowed(vcpu, false); 10912 + return static_call(kvm_x86_interrupt_allowed)(vcpu, false); 10911 10913 } 10912 10914 10913 10915 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) ··· 10933 10935 { 10934 10936 unsigned long rflags; 10935 10937 10936 - rflags = kvm_x86_ops.get_rflags(vcpu); 10938 + rflags = static_call(kvm_x86_get_rflags)(vcpu); 10937 10939 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 10938 10940 rflags &= ~X86_EFLAGS_TF; 10939 10941 return rflags; ··· 10945 10947 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 10946 10948 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 10947 10949 rflags |= X86_EFLAGS_TF; 10948 - kvm_x86_ops.set_rflags(vcpu, rflags); 10950 + static_call(kvm_x86_set_rflags)(vcpu, rflags); 10949 10951 } 10950 10952 10951 10953 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) ··· 11075 11077 return false; 11076 11078 11077 11079 if (!kvm_pv_async_pf_enabled(vcpu) || 11078 - (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) 11080 + (vcpu->arch.apf.send_user_only && static_call(kvm_x86_get_cpl)(vcpu) == 0)) 11079 11081 return false; 11080 11082 11081 11083 return true; ··· 11220 11222 11221 11223 irqfd->producer = prod; 11222 11224 kvm_arch_start_assignment(irqfd->kvm); 11223 - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, 11225 + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, 11224 11226 prod->irq, irqfd->gsi, 1); 11225 11227 11226 11228 if (ret) ··· 11245 11247 * when the irq is masked/disabled or the consumer side (KVM 11246 11248 * int this case doesn't want to receive the interrupts. 11247 11249 */ 11248 - ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); 11250 + ret = static_call(kvm_x86_update_pi_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0); 11249 11251 if (ret) 11250 11252 printk(KERN_INFO "irq bypass consumer (token %p) unregistration" 11251 11253 " fails: %d\n", irqfd->consumer.token, ret); ··· 11256 11258 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, 11257 11259 uint32_t guest_irq, bool set) 11258 11260 { 11259 - return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set); 11261 + return static_call(kvm_x86_update_pi_irte)(kvm, host_irq, guest_irq, set); 11260 11262 } 11261 11263 11262 11264 bool kvm_vector_hashing_enabled(void)
+3 -3
arch/x86/kvm/x86.h
··· 98 98 99 99 if (!is_long_mode(vcpu)) 100 100 return false; 101 - kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 101 + static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); 102 102 return cs_l; 103 103 } 104 104 ··· 129 129 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) 130 130 { 131 131 ++vcpu->stat.tlb_flush; 132 - kvm_x86_ops.tlb_flush_current(vcpu); 132 + static_call(kvm_x86_tlb_flush_current)(vcpu); 133 133 } 134 134 135 135 static inline int is_pae(struct kvm_vcpu *vcpu) ··· 244 244 245 245 static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) 246 246 { 247 - return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); 247 + return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu); 248 248 } 249 249 250 250 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);