KVM: x86: Export KVM-internal symbols for sub-modules only

Rework almost all of KVM x86's exports to expose symbols only to KVM's
vendor modules, i.e. to kvm-{amd,intel}.ko. Keep the generic exports that
are guarded by CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y, as they're explicitly
designed/intended for external usage.

Link: https://lore.kernel.org/r/20250919003303.1355064-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by Sean Christopherson and committed by Paolo Bonzini 6b36119b 65604683

+173 -173
+5 -5
arch/x86/kvm/cpuid.c
··· 34 34 * aligned to sizeof(unsigned long) because it's not accessed via bitops. 35 35 */ 36 36 u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; 37 - EXPORT_SYMBOL_GPL(kvm_cpu_caps); 37 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_caps); 38 38 39 39 struct cpuid_xstate_sizes { 40 40 u32 eax; ··· 131 131 132 132 return NULL; 133 133 } 134 - EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry2); 134 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_cpuid_entry2); 135 135 136 136 static int kvm_check_cpuid(struct kvm_vcpu *vcpu) 137 137 { ··· 1261 1261 kvm_cpu_cap_clear(X86_FEATURE_RDPID); 1262 1262 } 1263 1263 } 1264 - EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); 1264 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cpu_caps); 1265 1265 1266 1266 #undef F 1267 1267 #undef SCATTERED_F ··· 2085 2085 used_max_basic); 2086 2086 return exact; 2087 2087 } 2088 - EXPORT_SYMBOL_GPL(kvm_cpuid); 2088 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpuid); 2089 2089 2090 2090 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) 2091 2091 { ··· 2103 2103 kvm_rdx_write(vcpu, edx); 2104 2104 return kvm_skip_emulated_instruction(vcpu); 2105 2105 } 2106 - EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 2106 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_cpuid);
+2 -2
arch/x86/kvm/hyperv.c
··· 923 923 return false; 924 924 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; 925 925 } 926 - EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); 926 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_hv_assist_page_enabled); 927 927 928 928 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) 929 929 { ··· 935 935 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, 936 936 &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); 937 937 } 938 - EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); 938 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_hv_get_assist_page); 939 939 940 940 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) 941 941 {
+3 -3
arch/x86/kvm/irq.c
··· 103 103 104 104 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ 105 105 } 106 - EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr); 106 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_injectable_intr); 107 107 108 108 /* 109 109 * check if there is pending interrupt without ··· 119 119 120 120 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ 121 121 } 122 - EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); 122 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_interrupt); 123 123 124 124 /* 125 125 * Read pending interrupt(from non-APIC source) ··· 148 148 WARN_ON_ONCE(!irqchip_split(v->kvm)); 149 149 return get_userspace_extint(v); 150 150 } 151 - EXPORT_SYMBOL_GPL(kvm_cpu_get_extint); 151 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_get_extint); 152 152 153 153 /* 154 154 * Read pending interrupt vector and intack.
+3 -3
arch/x86/kvm/kvm_onhyperv.c
··· 101 101 102 102 return __hv_flush_remote_tlbs_range(kvm, &range); 103 103 } 104 - EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); 104 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(hv_flush_remote_tlbs_range); 105 105 106 106 int hv_flush_remote_tlbs(struct kvm *kvm) 107 107 { 108 108 return __hv_flush_remote_tlbs_range(kvm, NULL); 109 109 } 110 - EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); 110 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(hv_flush_remote_tlbs); 111 111 112 112 void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) 113 113 { ··· 121 121 spin_unlock(&kvm_arch->hv_root_tdp_lock); 122 122 } 123 123 } 124 - EXPORT_SYMBOL_GPL(hv_track_root_tdp); 124 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(hv_track_root_tdp);
+20 -20
arch/x86/kvm/lapic.c
··· 106 106 } 107 107 108 108 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); 109 - EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); 109 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_has_noapic_vcpu); 110 110 111 111 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ); 112 112 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ); ··· 646 646 return ((max_updated_irr != -1) && 647 647 (max_updated_irr == *max_irr)); 648 648 } 649 - EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); 649 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_apic_update_irr); 650 650 651 651 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr) 652 652 { ··· 657 657 apic->irr_pending = true; 658 658 return irr_updated; 659 659 } 660 - EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 660 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_irr); 661 661 662 662 static inline int apic_search_irr(struct kvm_lapic *apic) 663 663 { ··· 697 697 { 698 698 apic_clear_irr(vec, vcpu->arch.apic); 699 699 } 700 - EXPORT_SYMBOL_GPL(kvm_apic_clear_irr); 700 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_clear_irr); 701 701 702 702 static void *apic_vector_to_isr(int vec, struct kvm_lapic *apic) 703 703 { ··· 779 779 780 780 kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic)); 781 781 } 782 - EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr); 782 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_hwapic_isr); 783 783 784 784 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 785 785 { ··· 790 790 */ 791 791 return apic_find_highest_irr(vcpu->arch.apic); 792 792 } 793 - EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); 793 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_find_highest_irr); 794 794 795 795 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 796 796 int vector, int level, int trig_mode, ··· 954 954 { 955 955 apic_update_ppr(vcpu->arch.apic); 956 956 } 957 - EXPORT_SYMBOL_GPL(kvm_apic_update_ppr); 957 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_ppr); 958 958 959 959 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) 960 960 { ··· 1065 1065 return false; 1066 1066 } 1067 1067 } 1068 - EXPORT_SYMBOL_GPL(kvm_apic_match_dest); 1068 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_match_dest); 1069 1069 1070 1070 static int kvm_vector_to_index(u32 vector, u32 dest_vcpus, 1071 1071 const unsigned long *bitmap, u32 bitmap_size) ··· 1292 1292 1293 1293 return r == 1; 1294 1294 } 1295 - EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu); 1295 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_intr_is_single_vcpu); 1296 1296 1297 1297 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 1298 1298 struct kvm_lapic_irq *irq, struct dest_map *dest_map) ··· 1569 1569 kvm_ioapic_send_eoi(apic, vector); 1570 1570 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 1571 1571 } 1572 - EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); 1572 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_set_eoi_accelerated); 1573 1573 1574 1574 static void kvm_icr_to_lapic_irq(struct kvm_lapic *apic, u32 icr_low, 1575 1575 u32 icr_high, struct kvm_lapic_irq *irq) ··· 1600 1600 1601 1601 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); 1602 1602 } 1603 - EXPORT_SYMBOL_GPL(kvm_apic_send_ipi); 1603 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_send_ipi); 1604 1604 1605 1605 static u32 apic_get_tmcct(struct kvm_lapic *apic) 1606 1606 { ··· 1717 1717 1718 1718 return valid_reg_mask; 1719 1719 } 1720 - EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask); 1720 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_readable_reg_mask); 1721 1721 1722 1722 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, 1723 1723 void *data) ··· 1958 1958 lapic_timer_int_injected(vcpu)) 1959 1959 __kvm_wait_lapic_expire(vcpu); 1960 1960 } 1961 - EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire); 1961 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_wait_lapic_expire); 1962 1962 1963 1963 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic) 1964 1964 { ··· 2272 2272 out: 2273 2273 preempt_enable(); 2274 2274 } 2275 - EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); 2275 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_expired_hv_timer); 2276 2276 2277 2277 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) 2278 2278 { ··· 2525 2525 { 2526 2526 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0); 2527 2527 } 2528 - EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); 2528 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_set_eoi); 2529 2529 2530 2530 #define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13)) 2531 2531 ··· 2608 2608 else 2609 2609 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); 2610 2610 } 2611 - EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); 2611 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_write_nodecode); 2612 2612 2613 2613 void kvm_free_lapic(struct kvm_vcpu *vcpu) 2614 2614 { ··· 2746 2746 kvm_recalculate_apic_map(vcpu->kvm); 2747 2747 return 0; 2748 2748 } 2749 - EXPORT_SYMBOL_GPL(kvm_apic_set_base); 2749 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_set_base); 2750 2750 2751 2751 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) 2752 2752 { ··· 2794 2794 2795 2795 return 0; 2796 2796 } 2797 - EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page); 2797 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_alloc_apic_access_page); 2798 2798 2799 2799 void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu) 2800 2800 { ··· 3058 3058 __apic_update_ppr(apic, &ppr); 3059 3059 return apic_has_interrupt_for_ppr(apic, ppr); 3060 3060 } 3061 - EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt); 3061 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_has_interrupt); 3062 3062 3063 3063 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) 3064 3064 { ··· 3117 3117 } 3118 3118 3119 3119 } 3120 - EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt); 3120 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_ack_interrupt); 3121 3121 3122 3122 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, 3123 3123 struct kvm_lapic_state *s, bool set)
+18 -18
arch/x86/kvm/mmu/mmu.c
··· 110 110 #ifdef CONFIG_X86_64 111 111 bool __read_mostly tdp_mmu_enabled = true; 112 112 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444); 113 - EXPORT_SYMBOL_GPL(tdp_mmu_enabled); 113 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(tdp_mmu_enabled); 114 114 #endif 115 115 116 116 static int max_huge_page_level __read_mostly; ··· 3865 3865 write_unlock(&kvm->mmu_lock); 3866 3866 } 3867 3867 } 3868 - EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); 3868 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_roots); 3869 3869 3870 3870 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) 3871 3871 { ··· 3892 3892 3893 3893 kvm_mmu_free_roots(kvm, mmu, roots_to_free); 3894 3894 } 3895 - EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots); 3895 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_guest_mode_roots); 3896 3896 3897 3897 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, 3898 3898 u8 level) ··· 4876 4876 4877 4877 return r; 4878 4878 } 4879 - EXPORT_SYMBOL_GPL(kvm_handle_page_fault); 4879 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_page_fault); 4880 4880 4881 4881 #ifdef CONFIG_X86_64 4882 4882 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, ··· 4966 4966 return -EIO; 4967 4967 } 4968 4968 } 4969 - EXPORT_SYMBOL_GPL(kvm_tdp_map_page); 4969 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_map_page); 4970 4970 4971 4971 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, 4972 4972 struct kvm_pre_fault_memory *range) ··· 5162 5162 __clear_sp_write_flooding_count(sp); 5163 5163 } 5164 5164 } 5165 - EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); 5165 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_new_pgd); 5166 5166 5167 5167 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, 5168 5168 unsigned int access) ··· 5808 5808 shadow_mmu_init_context(vcpu, context, cpu_role, root_role); 5809 5809 kvm_mmu_new_pgd(vcpu, nested_cr3); 5810 5810 } 5811 - EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); 5811 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_npt_mmu); 5812 5812 5813 5813 static union kvm_cpu_role 5814 5814 kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, ··· 5862 5862 5863 5863 kvm_mmu_new_pgd(vcpu, new_eptp); 5864 5864 } 5865 - EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); 5865 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_ept_mmu); 5866 5866 5867 5867 static void init_kvm_softmmu(struct kvm_vcpu *vcpu, 5868 5868 union kvm_cpu_role cpu_role) ··· 5927 5927 else 5928 5928 init_kvm_softmmu(vcpu, cpu_role); 5929 5929 } 5930 - EXPORT_SYMBOL_GPL(kvm_init_mmu); 5930 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_mmu); 5931 5931 5932 5932 void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) 5933 5933 { ··· 5963 5963 kvm_mmu_unload(vcpu); 5964 5964 kvm_init_mmu(vcpu); 5965 5965 } 5966 - EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); 5966 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_reset_context); 5967 5967 5968 5968 int kvm_mmu_load(struct kvm_vcpu *vcpu) 5969 5969 { ··· 5997 5997 out: 5998 5998 return r; 5999 5999 } 6000 - EXPORT_SYMBOL_GPL(kvm_mmu_load); 6000 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_load); 6001 6001 6002 6002 void kvm_mmu_unload(struct kvm_vcpu *vcpu) 6003 6003 { ··· 6059 6059 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu); 6060 6060 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu); 6061 6061 } 6062 - EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots); 6062 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_obsolete_roots); 6063 6063 6064 6064 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, 6065 6065 int *bytes) ··· 6385 6385 return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, 6386 6386 insn_len); 6387 6387 } 6388 - EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 6388 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_page_fault); 6389 6389 6390 6390 void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg) 6391 6391 { ··· 6401 6401 pr_cont(", spte[%d] = 0x%llx", level, sptes[level]); 6402 6402 pr_cont("\n"); 6403 6403 } 6404 - EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes); 6404 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_print_sptes); 6405 6405 6406 6406 static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 6407 6407 u64 addr, hpa_t root_hpa) ··· 6467 6467 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); 6468 6468 } 6469 6469 } 6470 - EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); 6470 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invalidate_addr); 6471 6471 6472 6472 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 6473 6473 { ··· 6484 6484 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); 6485 6485 ++vcpu->stat.invlpg; 6486 6486 } 6487 - EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); 6487 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invlpg); 6488 6488 6489 6489 6490 6490 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) ··· 6537 6537 else 6538 6538 max_huge_page_level = PG_LEVEL_2M; 6539 6539 } 6540 - EXPORT_SYMBOL_GPL(kvm_configure_mmu); 6540 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_configure_mmu); 6541 6541 6542 6542 static void free_mmu_pages(struct kvm_mmu *mmu) 6543 6543 { ··· 7204 7204 7205 7205 return need_tlb_flush; 7206 7206 } 7207 - EXPORT_SYMBOL_GPL(kvm_zap_gfn_range); 7207 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_zap_gfn_range); 7208 7208 7209 7209 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, 7210 7210 const struct kvm_memory_slot *slot)
+5 -5
arch/x86/kvm/mmu/spte.c
··· 22 22 bool __read_mostly enable_mmio_caching = true; 23 23 static bool __ro_after_init allow_mmio_caching; 24 24 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 25 - EXPORT_SYMBOL_GPL(enable_mmio_caching); 25 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_mmio_caching); 26 26 27 27 bool __read_mostly kvm_ad_enabled; 28 28 ··· 470 470 shadow_mmio_mask = mmio_mask; 471 471 shadow_mmio_access_mask = access_mask; 472 472 } 473 - EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 473 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_mmio_spte_mask); 474 474 475 475 void kvm_mmu_set_mmio_spte_value(struct kvm *kvm, u64 mmio_value) 476 476 { 477 477 kvm->arch.shadow_mmio_value = mmio_value; 478 478 } 479 - EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_value); 479 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_mmio_spte_value); 480 480 481 481 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) 482 482 { ··· 487 487 shadow_me_value = me_value; 488 488 shadow_me_mask = me_mask; 489 489 } 490 - EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask); 490 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_me_spte_mask); 491 491 492 492 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) 493 493 { ··· 513 513 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 514 514 VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0); 515 515 } 516 - EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); 516 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_ept_masks); 517 517 518 518 void kvm_mmu_reset_all_pte_masks(void) 519 519 {
+1 -1
arch/x86/kvm/mmu/tdp_mmu.c
··· 1982 1982 spte = sptes[leaf]; 1983 1983 return is_shadow_present_pte(spte) && is_last_spte(spte, leaf); 1984 1984 } 1985 - EXPORT_SYMBOL_GPL(kvm_tdp_mmu_gpa_is_mapped); 1985 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_gpa_is_mapped); 1986 1986 1987 1987 /* 1988 1988 * Returns the last level spte pointer of the shadow page walk for the given
+5 -5
arch/x86/kvm/pmu.c
··· 31 31 32 32 /* KVM's PMU capabilities, i.e. the intersection of KVM and hardware support. */ 33 33 struct x86_pmu_capability __read_mostly kvm_pmu_cap; 34 - EXPORT_SYMBOL_GPL(kvm_pmu_cap); 34 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_cap); 35 35 36 36 struct kvm_pmu_emulated_event_selectors { 37 37 u64 INSTRUCTIONS_RETIRED; ··· 373 373 pmc->counter &= pmc_bitmask(pmc); 374 374 pmc_update_sample_period(pmc); 375 375 } 376 - EXPORT_SYMBOL_GPL(pmc_write_counter); 376 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(pmc_write_counter); 377 377 378 378 static int filter_cmp(const void *pa, const void *pb, u64 mask) 379 379 { ··· 581 581 if (pmc_is_event_match(pmc, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED)) 582 582 bitmap_set(pmu->pmc_counting_branches, pmc->idx, 1); 583 583 } 584 - EXPORT_SYMBOL_GPL(kvm_pmu_recalc_pmc_emulation); 584 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_recalc_pmc_emulation); 585 585 586 586 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) 587 587 { ··· 996 996 { 997 997 kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_instructions); 998 998 } 999 - EXPORT_SYMBOL_GPL(kvm_pmu_instruction_retired); 999 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_instruction_retired); 1000 1000 1001 1001 void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu) 1002 1002 { 1003 1003 kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_branches); 1004 1004 } 1005 - EXPORT_SYMBOL_GPL(kvm_pmu_branch_retired); 1005 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_branch_retired); 1006 1006 1007 1007 static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter) 1008 1008 {
+1 -1
arch/x86/kvm/smm.c
··· 131 131 132 132 kvm_mmu_reset_context(vcpu); 133 133 } 134 - EXPORT_SYMBOL_GPL(kvm_smm_changed); 134 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_smm_changed); 135 135 136 136 void process_smi(struct kvm_vcpu *vcpu) 137 137 {
+110 -110
arch/x86/kvm/x86.c
··· 97 97 * vendor module being reloaded with different module parameters. 98 98 */ 99 99 struct kvm_caps kvm_caps __read_mostly; 100 - EXPORT_SYMBOL_GPL(kvm_caps); 100 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_caps); 101 101 102 102 struct kvm_host_values kvm_host __read_mostly; 103 - EXPORT_SYMBOL_GPL(kvm_host); 103 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_host); 104 104 105 105 #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) 106 106 ··· 155 155 156 156 bool __read_mostly report_ignored_msrs = true; 157 157 module_param(report_ignored_msrs, bool, 0644); 158 - EXPORT_SYMBOL_GPL(report_ignored_msrs); 158 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(report_ignored_msrs); 159 159 160 160 unsigned int min_timer_period_us = 200; 161 161 module_param(min_timer_period_us, uint, 0644); ··· 169 169 170 170 bool __read_mostly enable_vmware_backdoor = false; 171 171 module_param(enable_vmware_backdoor, bool, 0444); 172 - EXPORT_SYMBOL_GPL(enable_vmware_backdoor); 172 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_vmware_backdoor); 173 173 174 174 /* 175 175 * Flags to manipulate forced emulation behavior (any non-zero value will ··· 184 184 185 185 /* Enable/disable PMU virtualization */ 186 186 bool __read_mostly enable_pmu = true; 187 - EXPORT_SYMBOL_GPL(enable_pmu); 187 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_pmu); 188 188 module_param(enable_pmu, bool, 0444); 189 189 190 190 bool __read_mostly eager_page_split = true; ··· 211 211 }; 212 212 213 213 u32 __read_mostly kvm_nr_uret_msrs; 214 - EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); 214 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_nr_uret_msrs); 215 215 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; 216 216 static struct kvm_user_return_msrs __percpu *user_return_msrs; 217 217 ··· 230 230 #define KVM_SUPPORTED_XSS (XFEATURE_MASK_CET_ALL) 231 231 232 232 bool __read_mostly allow_smaller_maxphyaddr = 0; 233 - EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); 233 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(allow_smaller_maxphyaddr); 234 234 235 235 bool __read_mostly enable_apicv = true; 236 - EXPORT_SYMBOL_GPL(enable_apicv); 236 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_apicv); 237 237 238 238 bool __read_mostly enable_ipiv = true; 239 - EXPORT_SYMBOL_GPL(enable_ipiv); 239 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_ipiv); 240 240 241 241 bool __read_mostly enable_device_posted_irqs = true; 242 - EXPORT_SYMBOL_GPL(enable_device_posted_irqs); 242 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs); 243 243 244 244 const struct _kvm_stats_desc kvm_vm_stats_desc[] = { 245 245 KVM_GENERIC_VM_STATS(), ··· 628 628 kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; 629 629 return kvm_nr_uret_msrs++; 630 630 } 631 - EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); 631 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_add_user_return_msr); 632 632 633 633 int kvm_find_user_return_msr(u32 msr) 634 634 { ··· 640 640 } 641 641 return -1; 642 642 } 643 - EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); 643 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_user_return_msr); 644 644 645 645 static void kvm_user_return_msr_cpu_online(void) 646 646 { ··· 680 680 kvm_user_return_register_notifier(msrs); 681 681 return 0; 682 682 } 683 - EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); 683 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr); 684 684 685 685 void kvm_user_return_msr_update_cache(unsigned int slot, u64 value) 686 686 { ··· 689 689 msrs->values[slot].curr = value; 690 690 kvm_user_return_register_notifier(msrs); 691 691 } 692 - EXPORT_SYMBOL_GPL(kvm_user_return_msr_update_cache); 692 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_user_return_msr_update_cache); 693 693 694 694 u64 kvm_get_user_return_msr(unsigned int slot) 695 695 { 696 696 return this_cpu_ptr(user_return_msrs)->values[slot].curr; 697 697 } 698 - EXPORT_SYMBOL_GPL(kvm_get_user_return_msr); 698 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_user_return_msr); 699 699 700 700 static void drop_user_return_notifiers(void) 701 701 { ··· 717 717 /* Fault while not rebooting. We want the trace. */ 718 718 BUG_ON(!kvm_rebooting); 719 719 } 720 - EXPORT_SYMBOL_GPL(kvm_spurious_fault); 720 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spurious_fault); 721 721 722 722 #define EXCPT_BENIGN 0 723 723 #define EXCPT_CONTRIBUTORY 1 ··· 822 822 ex->has_payload = false; 823 823 ex->payload = 0; 824 824 } 825 - EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); 825 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_deliver_exception_payload); 826 826 827 827 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, 828 828 bool has_error_code, u32 error_code, ··· 906 906 { 907 907 kvm_multiple_exception(vcpu, nr, false, 0, false, 0); 908 908 } 909 - EXPORT_SYMBOL_GPL(kvm_queue_exception); 909 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception); 910 910 911 911 912 912 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, ··· 914 914 { 915 915 kvm_multiple_exception(vcpu, nr, false, 0, true, payload); 916 916 } 917 - EXPORT_SYMBOL_GPL(kvm_queue_exception_p); 917 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_p); 918 918 919 919 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, 920 920 u32 error_code, unsigned long payload) ··· 949 949 vcpu->arch.exception.has_payload = false; 950 950 vcpu->arch.exception.payload = 0; 951 951 } 952 - EXPORT_SYMBOL_GPL(kvm_requeue_exception); 952 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_requeue_exception); 953 953 954 954 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) 955 955 { ··· 960 960 961 961 return 1; 962 962 } 963 - EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); 963 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_complete_insn_gp); 964 964 965 965 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) 966 966 { ··· 1010 1010 1011 1011 fault_mmu->inject_page_fault(vcpu, fault); 1012 1012 } 1013 - EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); 1013 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_emulated_page_fault); 1014 1014 1015 1015 void kvm_inject_nmi(struct kvm_vcpu *vcpu) 1016 1016 { ··· 1022 1022 { 1023 1023 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0); 1024 1024 } 1025 - EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 1025 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_e); 1026 1026 1027 1027 /* 1028 1028 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue ··· 1044 1044 kvm_queue_exception(vcpu, UD_VECTOR); 1045 1045 return false; 1046 1046 } 1047 - EXPORT_SYMBOL_GPL(kvm_require_dr); 1047 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_require_dr); 1048 1048 1049 1049 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) 1050 1050 { ··· 1099 1099 1100 1100 return 1; 1101 1101 } 1102 - EXPORT_SYMBOL_GPL(load_pdptrs); 1102 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(load_pdptrs); 1103 1103 1104 1104 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1105 1105 { ··· 1152 1152 if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) 1153 1153 kvm_mmu_reset_context(vcpu); 1154 1154 } 1155 - EXPORT_SYMBOL_GPL(kvm_post_set_cr0); 1155 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr0); 1156 1156 1157 1157 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1158 1158 { ··· 1196 1196 1197 1197 return 0; 1198 1198 } 1199 - EXPORT_SYMBOL_GPL(kvm_set_cr0); 1199 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr0); 1200 1200 1201 1201 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 1202 1202 { 1203 1203 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); 1204 1204 } 1205 - EXPORT_SYMBOL_GPL(kvm_lmsw); 1205 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw); 1206 1206 1207 1207 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) 1208 1208 { ··· 1225 1225 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) 1226 1226 wrpkru(vcpu->arch.pkru); 1227 1227 } 1228 - EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); 1228 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_load_guest_xsave_state); 1229 1229 1230 1230 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) 1231 1231 { ··· 1251 1251 } 1252 1252 1253 1253 } 1254 - EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); 1254 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_load_host_xsave_state); 1255 1255 1256 1256 #ifdef CONFIG_X86_64 1257 1257 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) ··· 1304 1304 vcpu->arch.cpuid_dynamic_bits_dirty = true; 1305 1305 return 0; 1306 1306 } 1307 - EXPORT_SYMBOL_GPL(__kvm_set_xcr); 1307 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_set_xcr); 1308 1308 1309 1309 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) 1310 1310 { ··· 1317 1317 1318 1318 return kvm_skip_emulated_instruction(vcpu); 1319 1319 } 1320 - EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); 1320 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_xsetbv); 1321 1321 1322 1322 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1323 1323 { ··· 1365 1365 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 1366 1366 1367 1367 } 1368 - EXPORT_SYMBOL_GPL(kvm_post_set_cr4); 1368 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr4); 1369 1369 1370 1370 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1371 1371 { ··· 1399 1399 1400 1400 return 0; 1401 1401 } 1402 - EXPORT_SYMBOL_GPL(kvm_set_cr4); 1402 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr4); 1403 1403 1404 1404 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) 1405 1405 { ··· 1491 1491 1492 1492 return 0; 1493 1493 } 1494 - EXPORT_SYMBOL_GPL(kvm_set_cr3); 1494 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr3); 1495 1495 1496 1496 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) 1497 1497 { ··· 1503 1503 vcpu->arch.cr8 = cr8; 1504 1504 return 0; 1505 1505 } 1506 - EXPORT_SYMBOL_GPL(kvm_set_cr8); 1506 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr8); 1507 1507 1508 1508 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) 1509 1509 { ··· 1512 1512 else 1513 1513 return vcpu->arch.cr8; 1514 1514 } 1515 - EXPORT_SYMBOL_GPL(kvm_get_cr8); 1515 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_cr8); 1516 1516 1517 1517 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) 1518 1518 { ··· 1537 1537 if (dr7 & DR7_BP_EN_MASK) 1538 1538 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; 1539 1539 } 1540 - EXPORT_SYMBOL_GPL(kvm_update_dr7); 1540 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_update_dr7); 1541 1541 1542 1542 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) 1543 1543 { ··· 1578 1578 1579 1579 return 0; 1580 1580 } 1581 - EXPORT_SYMBOL_GPL(kvm_set_dr); 1581 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_dr); 1582 1582 1583 1583 unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) 1584 1584 { ··· 1595 1595 return vcpu->arch.dr7; 1596 1596 } 1597 1597 } 1598 - EXPORT_SYMBOL_GPL(kvm_get_dr); 1598 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dr); 1599 1599 1600 1600 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) 1601 1601 { ··· 1611 1611 kvm_rdx_write(vcpu, data >> 32); 1612 1612 return kvm_skip_emulated_instruction(vcpu); 1613 1613 } 1614 - EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); 1614 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdpmc); 1615 1615 1616 1616 /* 1617 1617 * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM ··· 1750 1750 1751 1751 return __kvm_valid_efer(vcpu, efer); 1752 1752 } 1753 - EXPORT_SYMBOL_GPL(kvm_valid_efer); 1753 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_valid_efer); 1754 1754 1755 1755 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) 1756 1756 { ··· 1793 1793 { 1794 1794 efer_reserved_bits &= ~mask; 1795 1795 } 1796 - EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 1796 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_efer_bits); 1797 1797 1798 1798 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) 1799 1799 { ··· 1836 1836 1837 1837 return allowed; 1838 1838 } 1839 - EXPORT_SYMBOL_GPL(kvm_msr_allowed); 1839 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_msr_allowed); 1840 1840 1841 1841 /* 1842 1842 * Write @data into the MSR specified by @index. Select MSR specific fault ··· 2025 2025 { 2026 2026 return kvm_get_msr_ignored_check(vcpu, index, data, false); 2027 2027 } 2028 - EXPORT_SYMBOL_GPL(__kvm_emulate_msr_read); 2028 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_read); 2029 2029 2030 2030 int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data) 2031 2031 { 2032 2032 return kvm_set_msr_ignored_check(vcpu, index, data, false); 2033 2033 } 2034 - EXPORT_SYMBOL_GPL(__kvm_emulate_msr_write); 2034 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_write); 2035 2035 2036 2036 int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data) 2037 2037 { ··· 2040 2040 2041 2041 return __kvm_emulate_msr_read(vcpu, index, data); 2042 2042 } 2043 - EXPORT_SYMBOL_GPL(kvm_emulate_msr_read); 2043 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_read); 2044 2044 2045 2045 int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data) 2046 2046 { ··· 2049 2049 2050 2050 return __kvm_emulate_msr_write(vcpu, index, data); 2051 2051 } 2052 - EXPORT_SYMBOL_GPL(kvm_emulate_msr_write); 2052 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_write); 2053 2053 2054 2054 2055 2055 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) ··· 2158 2158 return __kvm_emulate_rdmsr(vcpu, kvm_rcx_read(vcpu), -1, 2159 2159 complete_fast_rdmsr); 2160 2160 } 2161 - EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); 2161 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr); 2162 2162 2163 2163 int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg) 2164 2164 { ··· 2166 2166 2167 2167 return __kvm_emulate_rdmsr(vcpu, msr, reg, complete_fast_rdmsr_imm); 2168 2168 } 2169 - EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr_imm); 2169 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr_imm); 2170 2170 2171 2171 static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 2172 2172 { ··· 2194 2194 return __kvm_emulate_wrmsr(vcpu, kvm_rcx_read(vcpu), 2195 2195 kvm_read_edx_eax(vcpu)); 2196 2196 } 2197 - EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); 2197 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr); 2198 2198 2199 2199 int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg) 2200 2200 { 2201 2201 return __kvm_emulate_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg)); 2202 2202 } 2203 - EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr_imm); 2203 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr_imm); 2204 2204 2205 2205 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) 2206 2206 { ··· 2212 2212 /* Treat an INVD instruction as a NOP and just skip it. */ 2213 2213 return kvm_emulate_as_nop(vcpu); 2214 2214 } 2215 - EXPORT_SYMBOL_GPL(kvm_emulate_invd); 2215 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_invd); 2216 2216 2217 2217 fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu) 2218 2218 { ··· 2221 2221 2222 2222 return EXIT_FASTPATH_REENTER_GUEST; 2223 2223 } 2224 - EXPORT_SYMBOL_GPL(handle_fastpath_invd); 2224 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_invd); 2225 2225 2226 2226 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) 2227 2227 { 2228 2228 kvm_queue_exception(vcpu, UD_VECTOR); 2229 2229 return 1; 2230 2230 } 2231 - EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); 2231 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invalid_op); 2232 2232 2233 2233 2234 2234 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) ··· 2254 2254 { 2255 2255 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); 2256 2256 } 2257 - EXPORT_SYMBOL_GPL(kvm_emulate_mwait); 2257 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_mwait); 2258 2258 2259 2259 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) 2260 2260 { 2261 2261 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); 2262 2262 } 2263 - EXPORT_SYMBOL_GPL(kvm_emulate_monitor); 2263 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_monitor); 2264 2264 2265 2265 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) 2266 2266 { ··· 2298 2298 return __handle_fastpath_wrmsr(vcpu, kvm_rcx_read(vcpu), 2299 2299 kvm_read_edx_eax(vcpu)); 2300 2300 } 2301 - EXPORT_SYMBOL_GPL(handle_fastpath_wrmsr); 2301 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr); 2302 2302 2303 2303 fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg) 2304 2304 { 2305 2305 return __handle_fastpath_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg)); 2306 2306 } 2307 - EXPORT_SYMBOL_GPL(handle_fastpath_wrmsr_imm); 2307 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr_imm); 2308 2308 2309 2309 /* 2310 2310 * Adapt set_msr() to msr_io()'s calling convention ··· 2670 2670 return vcpu->arch.l1_tsc_offset + 2671 2671 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); 2672 2672 } 2673 - EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); 2673 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_l1_tsc); 2674 2674 2675 2675 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) 2676 2676 { ··· 2685 2685 nested_offset += l2_offset; 2686 2686 return nested_offset; 2687 2687 } 2688 - EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); 2688 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_offset); 2689 2689 2690 2690 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) 2691 2691 { ··· 2695 2695 2696 2696 return l1_multiplier; 2697 2697 } 2698 - EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); 2698 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_multiplier); 2699 2699 2700 2700 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) 2701 2701 { ··· 3773 3773 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) 3774 3774 kvm_vcpu_flush_tlb_guest(vcpu); 3775 3775 } 3776 - EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); 3776 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_service_local_tlb_flush_requests); 3777 3777 3778 3778 static void record_steal_time(struct kvm_vcpu *vcpu) 3779 3779 { ··· 4327 4327 } 4328 4328 return 0; 4329 4329 } 4330 - EXPORT_SYMBOL_GPL(kvm_set_msr_common); 4330 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_msr_common); 4331 4331 4332 4332 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 4333 4333 { ··· 4680 4680 } 4681 4681 return 0; 4682 4682 } 4683 - EXPORT_SYMBOL_GPL(kvm_get_msr_common); 4683 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_msr_common); 4684 4684 4685 4685 /* 4686 4686 * Read or write a bunch of msrs. All parameters are kernel addresses. ··· 7836 7836 u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; 7837 7837 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7838 7838 } 7839 - EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); 7839 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_read); 7840 7840 7841 7841 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 7842 7842 struct x86_exception *exception) ··· 7847 7847 access |= PFERR_WRITE_MASK; 7848 7848 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); 7849 7849 } 7850 - EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); 7850 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_write); 7851 7851 7852 7852 /* uses this to access any guest's mapped memory without checking CPL */ 7853 7853 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, ··· 7933 7933 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 7934 7934 exception); 7935 7935 } 7936 - EXPORT_SYMBOL_GPL(kvm_read_guest_virt); 7936 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_virt); 7937 7937 7938 7938 static int emulator_read_std(struct x86_emulate_ctxt *ctxt, 7939 7939 gva_t addr, void *val, unsigned int bytes, ··· 8005 8005 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 8006 8006 PFERR_WRITE_MASK, exception); 8007 8007 } 8008 - EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); 8008 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_virt_system); 8009 8009 8010 8010 static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, 8011 8011 void *insn, int insn_len) ··· 8039 8039 8040 8040 return kvm_emulate_instruction(vcpu, emul_type); 8041 8041 } 8042 - EXPORT_SYMBOL_GPL(handle_ud); 8042 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_ud); 8043 8043 8044 8044 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, 8045 8045 gpa_t gpa, bool write) ··· 8518 8518 kvm_emulate_wbinvd_noskip(vcpu); 8519 8519 return kvm_skip_emulated_instruction(vcpu); 8520 8520 } 8521 - EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); 8521 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wbinvd); 8522 8522 8523 8523 8524 8524 ··· 9016 9016 kvm_set_rflags(vcpu, ctxt->eflags); 9017 9017 } 9018 9018 } 9019 - EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); 9019 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_realmode_interrupt); 9020 9020 9021 9021 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, 9022 9022 u8 ndata, u8 *insn_bytes, u8 insn_size) ··· 9081 9081 { 9082 9082 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); 9083 9083 } 9084 - EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); 9084 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_prepare_emulation_failure_exit); 9085 9085 9086 9086 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) 9087 9087 { 9088 9088 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); 9089 9089 } 9090 - EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); 9090 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_emulation_failure_exit); 9091 9091 9092 9092 void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa) 9093 9093 { ··· 9109 9109 run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; 9110 9110 run->internal.ndata = ndata; 9111 9111 } 9112 - EXPORT_SYMBOL_GPL(kvm_prepare_event_vectoring_exit); 9112 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_event_vectoring_exit); 9113 9113 9114 9114 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) 9115 9115 { ··· 9233 9233 r = kvm_vcpu_do_singlestep(vcpu); 9234 9234 return r; 9235 9235 } 9236 - EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); 9236 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_skip_emulated_instruction); 9237 9237 9238 9238 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) 9239 9239 { ··· 9364 9364 9365 9365 return r; 9366 9366 } 9367 - EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); 9367 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(x86_decode_emulated_instruction); 9368 9368 9369 9369 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 9370 9370 int emulation_type, void *insn, int insn_len) ··· 9588 9588 { 9589 9589 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); 9590 9590 } 9591 - EXPORT_SYMBOL_GPL(kvm_emulate_instruction); 9591 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction); 9592 9592 9593 9593 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, 9594 9594 void *insn, int insn_len) 9595 9595 { 9596 9596 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); 9597 9597 } 9598 - EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 9598 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction_from_buffer); 9599 9599 9600 9600 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) 9601 9601 { ··· 9690 9690 ret = kvm_fast_pio_out(vcpu, size, port); 9691 9691 return ret && kvm_skip_emulated_instruction(vcpu); 9692 9692 } 9693 - EXPORT_SYMBOL_GPL(kvm_fast_pio); 9693 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fast_pio); 9694 9694 9695 9695 static int kvmclock_cpu_down_prep(unsigned int cpu) 9696 9696 { ··· 10147 10147 kmem_cache_destroy(x86_emulator_cache); 10148 10148 return r; 10149 10149 } 10150 - EXPORT_SYMBOL_GPL(kvm_x86_vendor_init); 10150 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_init); 10151 10151 10152 10152 void kvm_x86_vendor_exit(void) 10153 10153 { ··· 10181 10181 kvm_x86_ops.enable_virtualization_cpu = NULL; 10182 10182 mutex_unlock(&vendor_module_lock); 10183 10183 } 10184 - EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); 10184 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_exit); 10185 10185 10186 10186 #ifdef CONFIG_X86_64 10187 10187 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, ··· 10245 10245 { 10246 10246 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); 10247 10247 } 10248 - EXPORT_SYMBOL_GPL(kvm_apicv_activated); 10248 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apicv_activated); 10249 10249 10250 10250 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) 10251 10251 { ··· 10255 10255 10256 10256 return (vm_reasons | vcpu_reasons) == 0; 10257 10257 } 10258 - EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); 10258 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_apicv_activated); 10259 10259 10260 10260 static void set_or_clear_apicv_inhibit(unsigned long *inhibits, 10261 10261 enum kvm_apicv_inhibit reason, bool set) ··· 10431 10431 vcpu->run->hypercall.ret = ret; 10432 10432 return 1; 10433 10433 } 10434 - EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall); 10434 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(____kvm_emulate_hypercall); 10435 10435 10436 10436 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 10437 10437 { ··· 10444 10444 return __kvm_emulate_hypercall(vcpu, kvm_x86_call(get_cpl)(vcpu), 10445 10445 complete_hypercall_exit); 10446 10446 } 10447 - EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); 10447 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_hypercall); 10448 10448 10449 10449 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) 10450 10450 { ··· 10887 10887 preempt_enable(); 10888 10888 up_read(&vcpu->kvm->arch.apicv_update_lock); 10889 10889 } 10890 - EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv); 10890 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_update_apicv); 10891 10891 10892 10892 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) 10893 10893 { ··· 10963 10963 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); 10964 10964 up_write(&kvm->arch.apicv_update_lock); 10965 10965 } 10966 - EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); 10966 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_or_clear_apicv_inhibit); 10967 10967 10968 10968 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 10969 10969 { ··· 11517 11517 11518 11518 return false; 11519 11519 } 11520 - EXPORT_SYMBOL_GPL(kvm_vcpu_has_events); 11520 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_has_events); 11521 11521 11522 11522 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 11523 11523 { ··· 11670 11670 { 11671 11671 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); 11672 11672 } 11673 - EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); 11673 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt_noskip); 11674 11674 11675 11675 int kvm_emulate_halt(struct kvm_vcpu *vcpu) 11676 11676 { ··· 11681 11681 */ 11682 11682 return kvm_emulate_halt_noskip(vcpu) && ret; 11683 11683 } 11684 - EXPORT_SYMBOL_GPL(kvm_emulate_halt); 11684 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt); 11685 11685 11686 11686 fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu) 11687 11687 { ··· 11693 11693 11694 11694 return EXIT_FASTPATH_EXIT_HANDLED; 11695 11695 } 11696 - EXPORT_SYMBOL_GPL(handle_fastpath_hlt); 11696 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_hlt); 11697 11697 11698 11698 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) 11699 11699 { ··· 11702 11702 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, 11703 11703 KVM_EXIT_AP_RESET_HOLD) && ret; 11704 11704 } 11705 - EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); 11705 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_ap_reset_hold); 11706 11706 11707 11707 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) 11708 11708 { ··· 12255 12255 vcpu->run->internal.ndata = 0; 12256 12256 return 0; 12257 12257 } 12258 - EXPORT_SYMBOL_GPL(kvm_task_switch); 12258 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_task_switch); 12259 12259 12260 12260 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 12261 12261 { ··· 12956 12956 if (init_event) 12957 12957 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); 12958 12958 } 12959 - EXPORT_SYMBOL_GPL(kvm_vcpu_reset); 12959 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_reset); 12960 12960 12961 12961 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) 12962 12962 { ··· 12968 12968 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); 12969 12969 kvm_rip_write(vcpu, 0); 12970 12970 } 12971 - EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); 12971 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_deliver_sipi_vector); 12972 12972 12973 12973 void kvm_arch_enable_virtualization(void) 12974 12974 { ··· 13086 13086 { 13087 13087 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; 13088 13088 } 13089 - EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp); 13089 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_reset_bsp); 13090 13090 13091 13091 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) 13092 13092 { ··· 13250 13250 13251 13251 return (void __user *)hva; 13252 13252 } 13253 - EXPORT_SYMBOL_GPL(__x86_set_memory_region); 13253 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(__x86_set_memory_region); 13254 13254 13255 13255 void kvm_arch_pre_destroy_vm(struct kvm *kvm) 13256 13256 { ··· 13658 13658 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + 13659 13659 kvm_rip_read(vcpu)); 13660 13660 } 13661 - EXPORT_SYMBOL_GPL(kvm_get_linear_rip); 13661 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_linear_rip); 13662 13662 13663 13663 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) 13664 13664 { 13665 13665 return kvm_get_linear_rip(vcpu) == linear_rip; 13666 13666 } 13667 - EXPORT_SYMBOL_GPL(kvm_is_linear_rip); 13667 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_linear_rip); 13668 13668 13669 13669 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) 13670 13670 { ··· 13675 13675 rflags &= ~X86_EFLAGS_TF; 13676 13676 return rflags; 13677 13677 } 13678 - EXPORT_SYMBOL_GPL(kvm_get_rflags); 13678 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_rflags); 13679 13679 13680 13680 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 13681 13681 { ··· 13690 13690 __kvm_set_rflags(vcpu, rflags); 13691 13691 kvm_make_request(KVM_REQ_EVENT, vcpu); 13692 13692 } 13693 - EXPORT_SYMBOL_GPL(kvm_set_rflags); 13693 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_rflags); 13694 13694 13695 13695 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 13696 13696 { ··· 13933 13933 { 13934 13934 return atomic_read(&kvm->arch.noncoherent_dma_count); 13935 13935 } 13936 - EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); 13936 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_arch_has_noncoherent_dma); 13937 13937 13938 13938 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 13939 13939 { ··· 13989 13989 13990 13990 return ret; 13991 13991 } 13992 - EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); 13992 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spec_ctrl_test_value); 13993 13993 13994 13994 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) 13995 13995 { ··· 14014 14014 } 14015 14015 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); 14016 14016 } 14017 - EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); 14017 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fixup_and_inject_pf_error); 14018 14018 14019 14019 /* 14020 14020 * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns ··· 14043 14043 14044 14044 return 0; 14045 14045 } 14046 - EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); 14046 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_memory_failure); 14047 14047 14048 14048 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) 14049 14049 { ··· 14107 14107 return 1; 14108 14108 } 14109 14109 } 14110 - EXPORT_SYMBOL_GPL(kvm_handle_invpcid); 14110 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invpcid); 14111 14111 14112 14112 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) 14113 14113 { ··· 14192 14192 14193 14193 return 0; 14194 14194 } 14195 - EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); 14195 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_write); 14196 14196 14197 14197 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, 14198 14198 void *data) ··· 14230 14230 14231 14231 return 0; 14232 14232 } 14233 - EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); 14233 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_read); 14234 14234 14235 14235 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) 14236 14236 { ··· 14318 14318 return in ? kvm_sev_es_ins(vcpu, size, port) 14319 14319 : kvm_sev_es_outs(vcpu, size, port); 14320 14320 } 14321 - EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); 14321 + EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_string_io); 14322 14322 14323 14323 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); 14324 14324 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);