Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: Restrict KVM-induced symbol exports to KVM modules where obvious/possible

Extend KVM's export macro framework to provide EXPORT_SYMBOL_FOR_KVM(),
and use the helper macro to export symbols for KVM throughout x86 if and
only if KVM will build one or more modules, and only for those modules.

To avoid unnecessary exports when CONFIG_KVM=m but kvm.ko will not be
built (because no vendor modules are selected), let arch code #define
EXPORT_SYMBOL_FOR_KVM to suppress/override the exports.

Note, the set of symbols to restrict to KVM was generated by manual search
and audit; any "misses" are due to human error, not some grand plan.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Kai Huang <kai.huang@intel.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Link: https://patch.msgid.link/20251112173944.1380633-5-seanjc%40google.com

authored by

Sean Christopherson and committed by
Dave Hansen
6276c67f e6f2d586

+144 -104
+3 -4
arch/x86/entry/entry.S
··· 4 4 */ 5 5 6 6 #include <linux/export.h> 7 + #include <linux/kvm_types.h> 7 8 #include <linux/linkage.h> 8 9 #include <linux/objtool.h> 9 10 #include <asm/msr-index.h> ··· 30 29 FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET 31 30 RET 32 31 SYM_FUNC_END(write_ibpb) 33 - /* For KVM */ 34 - EXPORT_SYMBOL_GPL(write_ibpb); 32 + EXPORT_SYMBOL_FOR_KVM(write_ibpb); 35 33 36 34 .popsection 37 35 ··· 48 48 .word __KERNEL_DS 49 49 .align L1_CACHE_BYTES, 0xcc 50 50 SYM_CODE_END(x86_verw_sel); 51 - /* For KVM */ 52 - EXPORT_SYMBOL_GPL(x86_verw_sel); 51 + EXPORT_SYMBOL_FOR_KVM(x86_verw_sel); 53 52 54 53 .popsection 55 54
+2 -1
arch/x86/entry/entry_64.S
··· 19 19 * - idtentry: Define exception entry points. 20 20 */ 21 21 #include <linux/export.h> 22 + #include <linux/kvm_types.h> 22 23 #include <linux/linkage.h> 23 24 #include <asm/segment.h> 24 25 #include <asm/cache.h> ··· 1567 1566 pop %rbp 1568 1567 RET 1569 1568 SYM_FUNC_END(clear_bhb_loop) 1570 - EXPORT_SYMBOL_GPL(clear_bhb_loop) 1569 + EXPORT_SYMBOL_FOR_KVM(clear_bhb_loop) 1571 1570 STACK_FRAME_NON_STANDARD(clear_bhb_loop)
+2 -1
arch/x86/entry/entry_64_fred.S
··· 4 4 */ 5 5 6 6 #include <linux/export.h> 7 + #include <linux/kvm_types.h> 7 8 8 9 #include <asm/asm.h> 9 10 #include <asm/fred.h> ··· 147 146 RET 148 147 149 148 SYM_FUNC_END(asm_fred_entry_from_kvm) 150 - EXPORT_SYMBOL_GPL(asm_fred_entry_from_kvm); 149 + EXPORT_SYMBOL_FOR_KVM(asm_fred_entry_from_kvm); 151 150 #endif
+3 -2
arch/x86/events/amd/core.c
··· 2 2 #include <linux/perf_event.h> 3 3 #include <linux/jump_label.h> 4 4 #include <linux/export.h> 5 + #include <linux/kvm_types.h> 5 6 #include <linux/types.h> 6 7 #include <linux/init.h> 7 8 #include <linux/slab.h> ··· 1570 1569 /* Reload all events */ 1571 1570 amd_pmu_reload_virt(); 1572 1571 } 1573 - EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); 1572 + EXPORT_SYMBOL_FOR_KVM(amd_pmu_enable_virt); 1574 1573 1575 1574 void amd_pmu_disable_virt(void) 1576 1575 { ··· 1587 1586 /* Reload all events */ 1588 1587 amd_pmu_reload_virt(); 1589 1588 } 1590 - EXPORT_SYMBOL_GPL(amd_pmu_disable_virt); 1589 + EXPORT_SYMBOL_FOR_KVM(amd_pmu_disable_virt);
+4 -3
arch/x86/events/core.c
··· 20 20 #include <linux/export.h> 21 21 #include <linux/init.h> 22 22 #include <linux/kdebug.h> 23 + #include <linux/kvm_types.h> 23 24 #include <linux/sched/mm.h> 24 25 #include <linux/sched/clock.h> 25 26 #include <linux/uaccess.h> ··· 715 714 { 716 715 return static_call(x86_pmu_guest_get_msrs)(nr, data); 717 716 } 718 - EXPORT_SYMBOL_GPL(perf_guest_get_msrs); 717 + EXPORT_SYMBOL_FOR_KVM(perf_guest_get_msrs); 719 718 720 719 /* 721 720 * There may be PMI landing after enabled=0. The PMI hitting could be before or ··· 3107 3106 cap->events_mask_len = x86_pmu.events_mask_len; 3108 3107 cap->pebs_ept = x86_pmu.pebs_ept; 3109 3108 } 3110 - EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability); 3109 + EXPORT_SYMBOL_FOR_KVM(perf_get_x86_pmu_capability); 3111 3110 3112 3111 u64 perf_get_hw_event_config(int hw_event) 3113 3112 { ··· 3118 3117 3119 3118 return 0; 3120 3119 } 3121 - EXPORT_SYMBOL_GPL(perf_get_hw_event_config); 3120 + EXPORT_SYMBOL_FOR_KVM(perf_get_hw_event_config);
+2 -1
arch/x86/events/intel/lbr.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/kvm_types.h> 2 3 #include <linux/perf_event.h> 3 4 #include <linux/types.h> 4 5 ··· 1706 1705 lbr->info = x86_pmu.lbr_info; 1707 1706 lbr->has_callstack = x86_pmu_has_lbr_callstack(); 1708 1707 } 1709 - EXPORT_SYMBOL_GPL(x86_perf_get_lbr); 1708 + EXPORT_SYMBOL_FOR_KVM(x86_perf_get_lbr); 1710 1709 1711 1710 struct event_constraint vlbr_constraint = 1712 1711 __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
+4 -3
arch/x86/events/intel/pt.c
··· 17 17 #include <linux/limits.h> 18 18 #include <linux/slab.h> 19 19 #include <linux/device.h> 20 + #include <linux/kvm_types.h> 20 21 21 22 #include <asm/cpuid/api.h> 22 23 #include <asm/perf_event.h> ··· 83 82 84 83 return (c & cd->mask) >> shift; 85 84 } 86 - EXPORT_SYMBOL_GPL(intel_pt_validate_cap); 85 + EXPORT_SYMBOL_FOR_KVM(intel_pt_validate_cap); 87 86 88 87 u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) 89 88 { 90 89 return intel_pt_validate_cap(pt_pmu.caps, cap); 91 90 } 92 - EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap); 91 + EXPORT_SYMBOL_FOR_KVM(intel_pt_validate_hw_cap); 93 92 94 93 static ssize_t pt_cap_show(struct device *cdev, 95 94 struct device_attribute *attr, ··· 1591 1590 1592 1591 local_irq_restore(flags); 1593 1592 } 1594 - EXPORT_SYMBOL_GPL(intel_pt_handle_vmx); 1593 + EXPORT_SYMBOL_FOR_KVM(intel_pt_handle_vmx); 1595 1594 1596 1595 /* 1597 1596 * PMU callbacks
+5
arch/x86/include/asm/kvm_types.h
··· 10 10 #define KVM_SUB_MODULES kvm-intel 11 11 #else 12 12 #undef KVM_SUB_MODULES 13 + /* 14 + * Don't export symbols for KVM without vendor modules, as kvm.ko is built iff 15 + * at least one vendor module is enabled. 16 + */ 17 + #define EXPORT_SYMBOL_FOR_KVM(symbol) 13 18 #endif 14 19 15 20 #define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+2 -1
arch/x86/kernel/apic/apic.c
··· 36 36 #include <linux/dmi.h> 37 37 #include <linux/smp.h> 38 38 #include <linux/mm.h> 39 + #include <linux/kvm_types.h> 39 40 40 41 #include <xen/xen.h> 41 42 ··· 2317 2316 dest |= msg->arch_addr_hi.destid_8_31 << 8; 2318 2317 return dest; 2319 2318 } 2320 - EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid); 2319 + EXPORT_SYMBOL_FOR_KVM(x86_msi_msg_get_destid); 2321 2320 2322 2321 static void __init apic_bsp_up_setup(void) 2323 2322 {
+2 -1
arch/x86/kernel/apic/apic_common.c
··· 4 4 * SPDX-License-Identifier: GPL-2.0 5 5 */ 6 6 #include <linux/irq.h> 7 + #include <linux/kvm_types.h> 7 8 #include <asm/apic.h> 8 9 9 10 #include "local.h" ··· 26 25 else 27 26 return BAD_APICID; 28 27 } 29 - EXPORT_SYMBOL_GPL(default_cpu_present_to_apicid); 28 + EXPORT_SYMBOL_FOR_KVM(default_cpu_present_to_apicid); 30 29 31 30 /* 32 31 * Set up the logical destination ID when the APIC operates in logical
+2 -2
arch/x86/kernel/cpu/amd.c
··· 3 3 #include <linux/bitops.h> 4 4 #include <linux/elf.h> 5 5 #include <linux/mm.h> 6 - 6 + #include <linux/kvm_types.h> 7 7 #include <linux/io.h> 8 8 #include <linux/sched.h> 9 9 #include <linux/sched/clock.h> ··· 1310 1310 1311 1311 return per_cpu(amd_dr_addr_mask[dr], smp_processor_id()); 1312 1312 } 1313 - EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask); 1313 + EXPORT_SYMBOL_FOR_KVM(amd_get_dr_addr_mask); 1314 1314 1315 1315 static void zenbleed_check_cpu(void *unused) 1316 1316 {
+8 -9
arch/x86/kernel/cpu/bugs.c
··· 16 16 #include <linux/sched/smt.h> 17 17 #include <linux/pgtable.h> 18 18 #include <linux/bpf.h> 19 + #include <linux/kvm_types.h> 19 20 20 21 #include <asm/spec-ctrl.h> 21 22 #include <asm/cmdline.h> ··· 179 178 180 179 /* Control IBPB on vCPU load */ 181 180 DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); 182 - EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); 181 + EXPORT_SYMBOL_FOR_KVM(switch_vcpu_ibpb); 183 182 184 183 /* Control CPU buffer clear before idling (halt, mwait) */ 185 184 DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); ··· 198 197 * mitigation is required. 199 198 */ 200 199 DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); 201 - EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); 200 + EXPORT_SYMBOL_FOR_KVM(cpu_buf_vm_clear); 202 201 203 202 #undef pr_fmt 204 203 #define pr_fmt(fmt) "mitigations: " fmt ··· 366 365 speculation_ctrl_update(tif); 367 366 } 368 367 } 369 - EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); 368 + EXPORT_SYMBOL_FOR_KVM(x86_virt_spec_ctrl); 370 369 371 370 static void x86_amd_ssb_disable(void) 372 371 { ··· 1032 1031 return (gds_mitigation == GDS_MITIGATION_FULL || 1033 1032 gds_mitigation == GDS_MITIGATION_FULL_LOCKED); 1034 1033 } 1035 - EXPORT_SYMBOL_GPL(gds_ucode_mitigated); 1034 + EXPORT_SYMBOL_FOR_KVM(gds_ucode_mitigated); 1036 1035 1037 1036 void update_gds_msr(void) 1038 1037 { ··· 2859 2858 } 2860 2859 2861 2860 bool itlb_multihit_kvm_mitigation; 2862 - EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); 2861 + EXPORT_SYMBOL_FOR_KVM(itlb_multihit_kvm_mitigation); 2863 2862 2864 2863 #undef pr_fmt 2865 2864 #define pr_fmt(fmt) "L1TF: " fmt ··· 2867 2866 /* Default mitigation for L1TF-affected CPUs */ 2868 2867 enum l1tf_mitigations l1tf_mitigation __ro_after_init = 2869 2868 IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; 2870 - #if IS_ENABLED(CONFIG_KVM_INTEL) 2871 - EXPORT_SYMBOL_GPL(l1tf_mitigation); 2872 - #endif 2869 + EXPORT_SYMBOL_FOR_KVM(l1tf_mitigation); 2873 2870 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 2874 - EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 2871 + EXPORT_SYMBOL_FOR_KVM(l1tf_vmx_mitigation); 2875 2872 2876 2873 /* 2877 2874 * These CPUs all support 44bits physical address space internally in the
+2 -1
arch/x86/kernel/cpu/bus_lock.c
··· 6 6 #include <linux/workqueue.h> 7 7 #include <linux/delay.h> 8 8 #include <linux/cpuhotplug.h> 9 + #include <linux/kvm_types.h> 9 10 #include <asm/cpu_device_id.h> 10 11 #include <asm/cmdline.h> 11 12 #include <asm/traps.h> ··· 290 289 force_sig_fault(SIGBUS, BUS_ADRALN, NULL); 291 290 return false; 292 291 } 293 - EXPORT_SYMBOL_GPL(handle_guest_split_lock); 292 + EXPORT_SYMBOL_FOR_KVM(handle_guest_split_lock); 294 293 295 294 void bus_lock_init(void) 296 295 {
+4 -3
arch/x86/kernel/cpu/common.c
··· 7 7 #include <linux/bitops.h> 8 8 #include <linux/kernel.h> 9 9 #include <linux/export.h> 10 + #include <linux/kvm_types.h> 10 11 #include <linux/percpu.h> 11 12 #include <linux/string.h> 12 13 #include <linux/ctype.h> ··· 461 460 __write_cr4(newval); 462 461 } 463 462 } 464 - EXPORT_SYMBOL(cr4_update_irqsoff); 463 + EXPORT_SYMBOL_FOR_KVM(cr4_update_irqsoff); 465 464 466 465 /* Read the CR4 shadow. */ 467 466 unsigned long cr4_read_shadow(void) 468 467 { 469 468 return this_cpu_read(cpu_tlbstate.cr4); 470 469 } 471 - EXPORT_SYMBOL_GPL(cr4_read_shadow); 470 + EXPORT_SYMBOL_FOR_KVM(cr4_read_shadow); 472 471 473 472 void cr4_init(void) 474 473 { ··· 723 722 gdt_descr.size = GDT_SIZE - 1; 724 723 load_gdt(&gdt_descr); 725 724 } 726 - EXPORT_SYMBOL_GPL(load_direct_gdt); 725 + EXPORT_SYMBOL_FOR_KVM(load_direct_gdt); 727 726 728 727 /* Load a fixmap remapping of the per-cpu GDT */ 729 728 void load_fixmap_gdt(int cpu)
+2 -1
arch/x86/kernel/cpu/sgx/main.c
··· 5 5 #include <linux/freezer.h> 6 6 #include <linux/highmem.h> 7 7 #include <linux/kthread.h> 8 + #include <linux/kvm_types.h> 8 9 #include <linux/miscdevice.h> 9 10 #include <linux/node.h> 10 11 #include <linux/pagemap.h> ··· 916 915 *allowed_attributes |= SGX_ATTR_PROVISIONKEY; 917 916 return 0; 918 917 } 919 - EXPORT_SYMBOL_GPL(sgx_set_attribute); 918 + EXPORT_SYMBOL_FOR_KVM(sgx_set_attribute); 920 919 921 920 static int __init sgx_init(void) 922 921 {
+3 -2
arch/x86/kernel/cpu/sgx/virt.c
··· 5 5 * Copyright(c) 2021 Intel Corporation. 6 6 */ 7 7 8 + #include <linux/kvm_types.h> 8 9 #include <linux/miscdevice.h> 9 10 #include <linux/mm.h> 10 11 #include <linux/mman.h> ··· 364 363 WARN_ON_ONCE(ret); 365 364 return 0; 366 365 } 367 - EXPORT_SYMBOL_GPL(sgx_virt_ecreate); 366 + EXPORT_SYMBOL_FOR_KVM(sgx_virt_ecreate); 368 367 369 368 static int __sgx_virt_einit(void __user *sigstruct, void __user *token, 370 369 void __user *secs) ··· 433 432 434 433 return ret; 435 434 } 436 - EXPORT_SYMBOL_GPL(sgx_virt_einit); 435 + EXPORT_SYMBOL_FOR_KVM(sgx_virt_einit);
+2 -1
arch/x86/kernel/e820.c
··· 16 16 #include <linux/firmware-map.h> 17 17 #include <linux/sort.h> 18 18 #include <linux/memory_hotplug.h> 19 + #include <linux/kvm_types.h> 19 20 20 21 #include <asm/e820/api.h> 21 22 #include <asm/setup.h> ··· 96 95 { 97 96 return _e820__mapped_any(e820_table_firmware, start, end, type); 98 97 } 99 - EXPORT_SYMBOL_GPL(e820__mapped_raw_any); 98 + EXPORT_SYMBOL_FOR_KVM(e820__mapped_raw_any); 100 99 101 100 bool e820__mapped_any(u64 start, u64 end, enum e820_type type) 102 101 {
+11 -10
arch/x86/kernel/fpu/core.c
··· 18 18 #include <uapi/asm/kvm.h> 19 19 20 20 #include <linux/hardirq.h> 21 + #include <linux/kvm_types.h> 21 22 #include <linux/pkeys.h> 22 23 #include <linux/vmalloc.h> 23 24 ··· 277 276 278 277 return true; 279 278 } 280 - EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate); 279 + EXPORT_SYMBOL_FOR_KVM(fpu_alloc_guest_fpstate); 281 280 282 281 void fpu_free_guest_fpstate(struct fpu_guest *gfpu) 283 282 { ··· 292 291 gfpu->fpstate = NULL; 293 292 vfree(fpstate); 294 293 } 295 - EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate); 294 + EXPORT_SYMBOL_FOR_KVM(fpu_free_guest_fpstate); 296 295 297 296 /* 298 297 * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable ··· 314 313 315 314 return __xfd_enable_feature(xfeatures, guest_fpu); 316 315 } 317 - EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features); 316 + EXPORT_SYMBOL_FOR_KVM(fpu_enable_guest_xfd_features); 318 317 319 318 #ifdef CONFIG_X86_64 320 319 void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) ··· 325 324 xfd_update_state(guest_fpu->fpstate); 326 325 fpregs_unlock(); 327 326 } 328 - EXPORT_SYMBOL_GPL(fpu_update_guest_xfd); 327 + EXPORT_SYMBOL_FOR_KVM(fpu_update_guest_xfd); 329 328 330 329 /** 331 330 * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state ··· 349 348 __this_cpu_write(xfd_state, fpstate->xfd); 350 349 } 351 350 } 352 - EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state); 351 + EXPORT_SYMBOL_FOR_KVM(fpu_sync_guest_vmexit_xfd_state); 353 352 #endif /* CONFIG_X86_64 */ 354 353 355 354 int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest) ··· 391 390 fpregs_unlock(); 392 391 return 0; 393 392 } 394 - EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate); 393 + EXPORT_SYMBOL_FOR_KVM(fpu_swap_kvm_fpstate); 395 394 396 395 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, 397 396 unsigned int size, u64 xfeatures, u32 pkru) ··· 410 409 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE; 411 410 } 412 411 } 413 - EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi); 412 + EXPORT_SYMBOL_FOR_KVM(fpu_copy_guest_fpstate_to_uabi); 414 413 415 414 int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, 416 415 u64 xcr0, u32 *vpkru) ··· 440 439 441 440 return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru); 442 441 } 443 - EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate); 442 + EXPORT_SYMBOL_FOR_KVM(fpu_copy_uabi_to_guest_fpstate); 444 443 #endif /* CONFIG_KVM */ 445 444 446 445 void kernel_fpu_begin_mask(unsigned int kfpu_mask) ··· 858 857 859 858 fpregs_restore_userregs(); 860 859 } 861 - EXPORT_SYMBOL_GPL(switch_fpu_return); 860 + EXPORT_SYMBOL_FOR_KVM(switch_fpu_return); 862 861 863 862 void fpregs_lock_and_load(void) 864 863 { ··· 893 892 894 893 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id())); 895 894 } 896 - EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent); 895 + EXPORT_SYMBOL_FOR_KVM(fpregs_assert_state_consistent); 897 896 #endif 898 897 899 898 void fpregs_mark_activate(void)
+4 -3
arch/x86/kernel/fpu/xstate.c
··· 8 8 #include <linux/compat.h> 9 9 #include <linux/cpu.h> 10 10 #include <linux/mman.h> 11 + #include <linux/kvm_types.h> 11 12 #include <linux/nospec.h> 12 13 #include <linux/pkeys.h> 13 14 #include <linux/seq_file.h> ··· 1059 1058 1060 1059 return __raw_xsave_addr(xsave, xfeature_nr); 1061 1060 } 1062 - EXPORT_SYMBOL_GPL(get_xsave_addr); 1061 + EXPORT_SYMBOL_FOR_KVM(get_xsave_addr); 1063 1062 1064 1063 /* 1065 1064 * Given an xstate feature nr, calculate where in the xsave buffer the state is. ··· 1483 1482 if (addr) 1484 1483 memset(addr, 0, xstate_sizes[xfeature]); 1485 1484 } 1486 - EXPORT_SYMBOL_GPL(fpstate_clear_xstate_component); 1485 + EXPORT_SYMBOL_FOR_KVM(fpstate_clear_xstate_component); 1487 1486 #endif 1488 1487 1489 1488 #ifdef CONFIG_X86_64 ··· 1819 1818 { 1820 1819 return xstate_get_group_perm(true); 1821 1820 } 1822 - EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm); 1821 + EXPORT_SYMBOL_FOR_KVM(xstate_get_guest_group_perm); 1823 1822 1824 1823 /** 1825 1824 * fpu_xstate_prctl - xstate permission operations
+2 -1
arch/x86/kernel/hw_breakpoint.c
··· 24 24 #include <linux/percpu.h> 25 25 #include <linux/kdebug.h> 26 26 #include <linux/kernel.h> 27 + #include <linux/kvm_types.h> 27 28 #include <linux/export.h> 28 29 #include <linux/sched.h> 29 30 #include <linux/smp.h> ··· 490 489 set_debugreg(DR6_RESERVED, 6); 491 490 set_debugreg(__this_cpu_read(cpu_dr7), 7); 492 491 } 493 - EXPORT_SYMBOL_GPL(hw_breakpoint_restore); 492 + EXPORT_SYMBOL_FOR_KVM(hw_breakpoint_restore); 494 493 495 494 /* 496 495 * Handle debug exception notifications.
+2 -1
arch/x86/kernel/irq.c
··· 12 12 #include <linux/delay.h> 13 13 #include <linux/export.h> 14 14 #include <linux/irq.h> 15 + #include <linux/kvm_types.h> 15 16 16 17 #include <asm/irq_stack.h> 17 18 #include <asm/apic.h> ··· 362 361 synchronize_rcu(); 363 362 } 364 363 } 365 - EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler); 364 + EXPORT_SYMBOL_FOR_KVM(kvm_set_posted_intr_wakeup_handler); 366 365 367 366 /* 368 367 * Handler for POSTED_INTERRUPT_VECTOR.
+3 -2
arch/x86/kernel/kvm.c
··· 29 29 #include <linux/syscore_ops.h> 30 30 #include <linux/cc_platform.h> 31 31 #include <linux/efi.h> 32 + #include <linux/kvm_types.h> 32 33 #include <asm/timer.h> 33 34 #include <asm/cpu.h> 34 35 #include <asm/traps.h> ··· 163 162 } 164 163 finish_swait(&n.wq, &wait); 165 164 } 166 - EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule); 165 + EXPORT_SYMBOL_FOR_KVM(kvm_async_pf_task_wait_schedule); 167 166 168 167 static void apf_task_wake_one(struct kvm_task_sleep_node *n) 169 168 { ··· 254 253 255 254 return flags; 256 255 } 257 - EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags); 256 + EXPORT_SYMBOL_FOR_KVM(kvm_read_and_reset_apf_flags); 258 257 259 258 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) 260 259 {
+2 -3
arch/x86/kernel/nmi.c
··· 24 24 #include <linux/export.h> 25 25 #include <linux/atomic.h> 26 26 #include <linux/sched/clock.h> 27 + #include <linux/kvm_types.h> 27 28 28 29 #include <asm/cpu_entry_area.h> 29 30 #include <asm/traps.h> ··· 614 613 { 615 614 exc_nmi(regs); 616 615 } 617 - #if IS_MODULE(CONFIG_KVM_INTEL) 618 - EXPORT_SYMBOL_GPL(asm_exc_nmi_kvm_vmx); 619 - #endif 616 + EXPORT_SYMBOL_FOR_KVM(asm_exc_nmi_kvm_vmx); 620 617 #endif 621 618 622 619 #ifdef CONFIG_NMI_CHECK_CPU
+2 -3
arch/x86/kernel/process_64.c
··· 30 30 #include <linux/interrupt.h> 31 31 #include <linux/delay.h> 32 32 #include <linux/export.h> 33 + #include <linux/kvm_types.h> 33 34 #include <linux/ptrace.h> 34 35 #include <linux/notifier.h> 35 36 #include <linux/kprobes.h> ··· 304 303 save_fsgs(current); 305 304 local_irq_restore(flags); 306 305 } 307 - #if IS_ENABLED(CONFIG_KVM) 308 - EXPORT_SYMBOL_GPL(current_save_fsgs); 309 - #endif 306 + EXPORT_SYMBOL_FOR_KVM(current_save_fsgs); 310 307 311 308 static __always_inline void loadseg(enum which_selector which, 312 309 unsigned short sel)
+3 -2
arch/x86/kernel/reboot.c
··· 13 13 #include <linux/objtool.h> 14 14 #include <linux/pgtable.h> 15 15 #include <linux/kexec.h> 16 + #include <linux/kvm_types.h> 16 17 #include <acpi/reboot.h> 17 18 #include <asm/io.h> 18 19 #include <asm/apic.h> ··· 542 541 543 542 rcu_assign_pointer(cpu_emergency_virt_callback, callback); 544 543 } 545 - EXPORT_SYMBOL_GPL(cpu_emergency_register_virt_callback); 544 + EXPORT_SYMBOL_FOR_KVM(cpu_emergency_register_virt_callback); 546 545 547 546 void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback) 548 547 { ··· 552 551 rcu_assign_pointer(cpu_emergency_virt_callback, NULL); 553 552 synchronize_rcu(); 554 553 } 555 - EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback); 554 + EXPORT_SYMBOL_FOR_KVM(cpu_emergency_unregister_virt_callback); 556 555 557 556 /* 558 557 * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
+1
arch/x86/kernel/tsc.c
··· 11 11 #include <linux/cpufreq.h> 12 12 #include <linux/delay.h> 13 13 #include <linux/clocksource.h> 14 + #include <linux/kvm_types.h> 14 15 #include <linux/percpu.h> 15 16 #include <linux/timex.h> 16 17 #include <linux/static_key.h>
+5 -4
arch/x86/lib/cache-smp.c
··· 2 2 #include <asm/paravirt.h> 3 3 #include <linux/smp.h> 4 4 #include <linux/export.h> 5 + #include <linux/kvm_types.h> 5 6 6 7 static void __wbinvd(void *dummy) 7 8 { ··· 13 12 { 14 13 smp_call_function_single(cpu, __wbinvd, NULL, 1); 15 14 } 16 - EXPORT_SYMBOL(wbinvd_on_cpu); 15 + EXPORT_SYMBOL_FOR_KVM(wbinvd_on_cpu); 17 16 18 17 void wbinvd_on_all_cpus(void) 19 18 { ··· 25 24 { 26 25 on_each_cpu_mask(cpus, __wbinvd, NULL, 1); 27 26 } 28 - EXPORT_SYMBOL_GPL(wbinvd_on_cpus_mask); 27 + EXPORT_SYMBOL_FOR_KVM(wbinvd_on_cpus_mask); 29 28 30 29 static void __wbnoinvd(void *dummy) 31 30 { ··· 36 35 { 37 36 on_each_cpu(__wbnoinvd, NULL, 1); 38 37 } 39 - EXPORT_SYMBOL_GPL(wbnoinvd_on_all_cpus); 38 + EXPORT_SYMBOL_FOR_KVM(wbnoinvd_on_all_cpus); 40 39 41 40 void wbnoinvd_on_cpus_mask(struct cpumask *cpus) 42 41 { 43 42 on_each_cpu_mask(cpus, __wbnoinvd, NULL, 1); 44 43 } 45 - EXPORT_SYMBOL_GPL(wbnoinvd_on_cpus_mask); 44 + EXPORT_SYMBOL_FOR_KVM(wbnoinvd_on_cpus_mask);
+3 -2
arch/x86/lib/msr.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/export.h> 3 + #include <linux/kvm_types.h> 3 4 #include <linux/percpu.h> 4 5 #include <linux/preempt.h> 5 6 #include <asm/msr.h> ··· 104 103 { 105 104 return __flip_bit(msr, bit, true); 106 105 } 107 - EXPORT_SYMBOL_GPL(msr_set_bit); 106 + EXPORT_SYMBOL_FOR_KVM(msr_set_bit); 108 107 109 108 /** 110 109 * msr_clear_bit - Clear @bit in a MSR @msr. ··· 120 119 { 121 120 return __flip_bit(msr, bit, false); 122 121 } 123 - EXPORT_SYMBOL_GPL(msr_clear_bit); 122 + EXPORT_SYMBOL_FOR_KVM(msr_clear_bit); 124 123 125 124 #ifdef CONFIG_TRACEPOINTS 126 125 void do_trace_write_msr(u32 msr, u64 val, int failed)
+2 -1
arch/x86/mm/pat/memtype.c
··· 42 42 #include <linux/highmem.h> 43 43 #include <linux/fs.h> 44 44 #include <linux/rbtree.h> 45 + #include <linux/kvm_types.h> 45 46 46 47 #include <asm/cpu_device_id.h> 47 48 #include <asm/cacheflush.h> ··· 698 697 cm == _PAGE_CACHE_MODE_UC_MINUS || 699 698 cm == _PAGE_CACHE_MODE_WC; 700 699 } 701 - EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); 700 + EXPORT_SYMBOL_FOR_KVM(pat_pfn_immune_to_uc_mtrr); 702 701 703 702 /** 704 703 * memtype_reserve_io - Request a memory type mapping for a region of memory
+3 -2
arch/x86/mm/tlb.c
··· 12 12 #include <linux/task_work.h> 13 13 #include <linux/mmu_notifier.h> 14 14 #include <linux/mmu_context.h> 15 + #include <linux/kvm_types.h> 15 16 16 17 #include <asm/tlbflush.h> 17 18 #include <asm/mmu_context.h> ··· 1583 1582 VM_BUG_ON(cr3 != __read_cr3()); 1584 1583 return cr3; 1585 1584 } 1586 - EXPORT_SYMBOL_GPL(__get_current_cr3_fast); 1585 + EXPORT_SYMBOL_FOR_KVM(__get_current_cr3_fast); 1587 1586 1588 1587 /* 1589 1588 * Flush one page in the kernel mapping ··· 1724 1723 flush_tlb_local(); 1725 1724 } 1726 1725 } 1727 - EXPORT_SYMBOL_GPL(__flush_tlb_all); 1726 + EXPORT_SYMBOL_FOR_KVM(__flush_tlb_all); 1728 1727 1729 1728 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) 1730 1729 {
+35 -34
arch/x86/virt/vmx/tdx/tdx.c
··· 29 29 #include <linux/acpi.h> 30 30 #include <linux/suspend.h> 31 31 #include <linux/idr.h> 32 + #include <linux/kvm_types.h> 32 33 #include <asm/page.h> 33 34 #include <asm/special_insns.h> 34 35 #include <asm/msr-index.h> ··· 182 181 183 182 return 0; 184 183 } 185 - EXPORT_SYMBOL_GPL(tdx_cpu_enable); 184 + EXPORT_SYMBOL_FOR_KVM(tdx_cpu_enable); 186 185 187 186 /* 188 187 * Add a memory region as a TDX memory block. The caller must make sure ··· 663 662 { 664 663 tdx_quirk_reset_paddr(page_to_phys(page), PAGE_SIZE); 665 664 } 666 - EXPORT_SYMBOL_GPL(tdx_quirk_reset_page); 665 + EXPORT_SYMBOL_FOR_KVM(tdx_quirk_reset_page); 667 666 668 667 static void tdmr_quirk_reset_pamt(struct tdmr_info *tdmr) 669 668 { ··· 1217 1216 1218 1217 return ret; 1219 1218 } 1220 - EXPORT_SYMBOL_GPL(tdx_enable); 1219 + EXPORT_SYMBOL_FOR_KVM(tdx_enable); 1221 1220 1222 1221 static bool is_pamt_page(unsigned long phys) 1223 1222 { ··· 1478 1477 1479 1478 return p; 1480 1479 } 1481 - EXPORT_SYMBOL_GPL(tdx_get_sysinfo); 1480 + EXPORT_SYMBOL_FOR_KVM(tdx_get_sysinfo); 1482 1481 1483 1482 u32 tdx_get_nr_guest_keyids(void) 1484 1483 { 1485 1484 return tdx_nr_guest_keyids; 1486 1485 } 1487 - EXPORT_SYMBOL_GPL(tdx_get_nr_guest_keyids); 1486 + EXPORT_SYMBOL_FOR_KVM(tdx_get_nr_guest_keyids); 1488 1487 1489 1488 int tdx_guest_keyid_alloc(void) 1490 1489 { ··· 1492 1491 tdx_guest_keyid_start + tdx_nr_guest_keyids - 1, 1493 1492 GFP_KERNEL); 1494 1493 } 1495 - EXPORT_SYMBOL_GPL(tdx_guest_keyid_alloc); 1494 + EXPORT_SYMBOL_FOR_KVM(tdx_guest_keyid_alloc); 1496 1495 1497 1496 void tdx_guest_keyid_free(unsigned int keyid) 1498 1497 { 1499 1498 ida_free(&tdx_guest_keyid_pool, keyid); 1500 1499 } 1501 - EXPORT_SYMBOL_GPL(tdx_guest_keyid_free); 1500 + EXPORT_SYMBOL_FOR_KVM(tdx_guest_keyid_free); 1502 1501 1503 1502 static inline u64 tdx_tdr_pa(struct tdx_td *td) 1504 1503 { ··· 1522 1521 1523 1522 return __seamcall_dirty_cache(__seamcall_saved_ret, TDH_VP_ENTER, args); 1524 1523 } 1525 - EXPORT_SYMBOL_GPL(tdh_vp_enter); 1524 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_enter); 1526 1525 1527 1526 u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page) 1528 1527 { ··· 1534 1533 tdx_clflush_page(tdcs_page); 1535 1534 return seamcall(TDH_MNG_ADDCX, &args); 1536 1535 } 1537 - EXPORT_SYMBOL_GPL(tdh_mng_addcx); 1536 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_addcx); 1538 1537 1539 1538 u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2) 1540 1539 { ··· 1554 1553 1555 1554 return ret; 1556 1555 } 1557 - EXPORT_SYMBOL_GPL(tdh_mem_page_add); 1556 + EXPORT_SYMBOL_FOR_KVM(tdh_mem_page_add); 1558 1557 1559 1558 u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2) 1560 1559 { ··· 1573 1572 1574 1573 return ret; 1575 1574 } 1576 - EXPORT_SYMBOL_GPL(tdh_mem_sept_add); 1575 + EXPORT_SYMBOL_FOR_KVM(tdh_mem_sept_add); 1577 1576 1578 1577 u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page) 1579 1578 { ··· 1585 1584 tdx_clflush_page(tdcx_page); 1586 1585 return seamcall(TDH_VP_ADDCX, &args); 1587 1586 } 1588 - EXPORT_SYMBOL_GPL(tdh_vp_addcx); 1587 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_addcx); 1589 1588 1590 1589 u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2) 1591 1590 { ··· 1604 1603 1605 1604 return ret; 1606 1605 } 1607 - EXPORT_SYMBOL_GPL(tdh_mem_page_aug); 1606 + EXPORT_SYMBOL_FOR_KVM(tdh_mem_page_aug); 1608 1607 1609 1608 u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2) 1610 1609 { ··· 1621 1620 1622 1621 return ret; 1623 1622 } 1624 - EXPORT_SYMBOL_GPL(tdh_mem_range_block); 1623 + EXPORT_SYMBOL_FOR_KVM(tdh_mem_range_block); 1625 1624 1626 1625 u64 tdh_mng_key_config(struct tdx_td *td) 1627 1626 { ··· 1631 1630 1632 1631 return seamcall(TDH_MNG_KEY_CONFIG, &args); 1633 1632 } 1634 - EXPORT_SYMBOL_GPL(tdh_mng_key_config); 1633 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_key_config); 1635 1634 1636 1635 u64 tdh_mng_create(struct tdx_td *td, u16 hkid) 1637 1636 { ··· 1643 1642 tdx_clflush_page(td->tdr_page); 1644 1643 return seamcall(TDH_MNG_CREATE, &args); 1645 1644 } 1646 - EXPORT_SYMBOL_GPL(tdh_mng_create); 1645 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_create); 1647 1646 1648 1647 u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp) 1649 1648 { ··· 1655 1654 tdx_clflush_page(vp->tdvpr_page); 1656 1655 return seamcall(TDH_VP_CREATE, &args); 1657 1656 } 1658 - EXPORT_SYMBOL_GPL(tdh_vp_create); 1657 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_create); 1659 1658 1660 1659 u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data) 1661 1660 { ··· 1672 1671 1673 1672 return ret; 1674 1673 } 1675 - EXPORT_SYMBOL_GPL(tdh_mng_rd); 1674 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_rd); 1676 1675 1677 1676 u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2) 1678 1677 { ··· 1689 1688 1690 1689 return ret; 1691 1690 } 1692 - EXPORT_SYMBOL_GPL(tdh_mr_extend); 1691 + EXPORT_SYMBOL_FOR_KVM(tdh_mr_extend); 1693 1692 1694 1693 u64 tdh_mr_finalize(struct tdx_td *td) 1695 1694 { ··· 1699 1698 1700 1699 return seamcall(TDH_MR_FINALIZE, &args); 1701 1700 } 1702 - EXPORT_SYMBOL_GPL(tdh_mr_finalize); 1701 + EXPORT_SYMBOL_FOR_KVM(tdh_mr_finalize); 1703 1702 1704 1703 u64 tdh_vp_flush(struct tdx_vp *vp) 1705 1704 { ··· 1709 1708 1710 1709 return seamcall(TDH_VP_FLUSH, &args); 1711 1710 } 1712 - EXPORT_SYMBOL_GPL(tdh_vp_flush); 1711 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_flush); 1713 1712 1714 1713 u64 tdh_mng_vpflushdone(struct tdx_td *td) 1715 1714 { ··· 1719 1718 1720 1719 return seamcall(TDH_MNG_VPFLUSHDONE, &args); 1721 1720 } 1722 - EXPORT_SYMBOL_GPL(tdh_mng_vpflushdone); 1721 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_vpflushdone); 1723 1722 1724 1723 u64 tdh_mng_key_freeid(struct tdx_td *td) 1725 1724 { ··· 1729 1728 1730 1729 return seamcall(TDH_MNG_KEY_FREEID, &args); 1731 1730 } 1732 - EXPORT_SYMBOL_GPL(tdh_mng_key_freeid); 1731 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_key_freeid); 1733 1732 1734 1733 u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err) 1735 1734 { ··· 1745 1744 1746 1745 return ret; 1747 1746 } 1748 - EXPORT_SYMBOL_GPL(tdh_mng_init); 1747 + EXPORT_SYMBOL_FOR_KVM(tdh_mng_init); 1749 1748 1750 1749 u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data) 1751 1750 { ··· 1762 1761 1763 1762 return ret; 1764 1763 } 1765 - EXPORT_SYMBOL_GPL(tdh_vp_rd); 1764 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_rd); 1766 1765 1767 1766 u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask) 1768 1767 { ··· 1775 1774 1776 1775 return seamcall(TDH_VP_WR, &args); 1777 1776 } 1778 - EXPORT_SYMBOL_GPL(tdh_vp_wr); 1777 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_wr); 1779 1778 1780 1779 u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid) 1781 1780 { ··· 1788 1787 /* apicid requires version == 1. */ 1789 1788 return seamcall(TDH_VP_INIT | (1ULL << TDX_VERSION_SHIFT), &args); 1790 1789 } 1791 - EXPORT_SYMBOL_GPL(tdh_vp_init); 1790 + EXPORT_SYMBOL_FOR_KVM(tdh_vp_init); 1792 1791 1793 1792 /* 1794 1793 * TDX ABI defines output operands as PT, OWNER and SIZE. These are TDX defined fomats. ··· 1810 1809 1811 1810 return ret; 1812 1811 } 1813 - EXPORT_SYMBOL_GPL(tdh_phymem_page_reclaim); 1812 + EXPORT_SYMBOL_FOR_KVM(tdh_phymem_page_reclaim); 1814 1813 1815 1814 u64 tdh_mem_track(struct tdx_td *td) 1816 1815 { ··· 1820 1819 1821 1820 return seamcall(TDH_MEM_TRACK, &args); 1822 1821 } 1823 - EXPORT_SYMBOL_GPL(tdh_mem_track); 1822 + EXPORT_SYMBOL_FOR_KVM(tdh_mem_track); 1824 1823 1825 1824 u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2) 1826 1825 { ··· 1837 1836 1838 1837 return ret; 1839 1838 } 1840 - EXPORT_SYMBOL_GPL(tdh_mem_page_remove); 1839 + EXPORT_SYMBOL_FOR_KVM(tdh_mem_page_remove); 1841 1840 1842 1841 u64 tdh_phymem_cache_wb(bool resume) 1843 1842 { ··· 1847 1846 1848 1847 return seamcall(TDH_PHYMEM_CACHE_WB, &args); 1849 1848 } 1850 - EXPORT_SYMBOL_GPL(tdh_phymem_cache_wb); 1849 + EXPORT_SYMBOL_FOR_KVM(tdh_phymem_cache_wb); 1851 1850 1852 1851 u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td) 1853 1852 { ··· 1857 1856 1858 1857 return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args); 1859 1858 } 1860 - EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_tdr); 1859 + EXPORT_SYMBOL_FOR_KVM(tdh_phymem_page_wbinvd_tdr); 1861 1860 1862 1861 u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page) 1863 1862 { ··· 1867 1866 1868 1867 return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args); 1869 1868 } 1870 - EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid); 1869 + EXPORT_SYMBOL_FOR_KVM(tdh_phymem_page_wbinvd_hkid); 1871 1870 1872 1871 #ifdef CONFIG_KEXEC_CORE 1873 1872 void tdx_cpu_flush_cache_for_kexec(void) ··· 1885 1884 wbinvd(); 1886 1885 this_cpu_write(cache_state_incoherent, false); 1887 1886 } 1888 - EXPORT_SYMBOL_GPL(tdx_cpu_flush_cache_for_kexec); 1887 + EXPORT_SYMBOL_FOR_KVM(tdx_cpu_flush_cache_for_kexec); 1889 1888 #endif
+14
include/linux/kvm_types.h
··· 11 11 #ifdef KVM_SUB_MODULES 12 12 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \ 13 13 EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES)) 14 + #define EXPORT_SYMBOL_FOR_KVM(symbol) \ 15 + EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm," __stringify(KVM_SUB_MODULES)) 14 16 #else 15 17 #define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) 18 + /* 19 + * Allow architectures to provide a custom EXPORT_SYMBOL_FOR_KVM, but only 20 + * if there are no submodules, e.g. to allow suppressing exports if KVM=m, but 21 + * kvm.ko won't actually be built (due to lack of at least one submodule). 22 + */ 23 + #ifndef EXPORT_SYMBOL_FOR_KVM 24 + #if IS_MODULE(CONFIG_KVM) 25 + #define EXPORT_SYMBOL_FOR_KVM(symbol) EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm") 26 + #else 27 + #define EXPORT_SYMBOL_FOR_KVM(symbol) 28 + #endif /* IS_MODULE(CONFIG_KVM) */ 29 + #endif /* EXPORT_SYMBOL_FOR_KVM */ 16 30 #endif 17 31 18 32 #ifndef __ASSEMBLER__