init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()

Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and
remove the weak fallback from the core code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de

+15 -16
+4 -3
arch/x86/include/asm/mem_encrypt.h
··· 51 51 52 52 void __init sev_es_init_vc_handling(void); 53 53 54 + void __init mem_encrypt_init(void); 55 + 54 56 #define __bss_decrypted __section(".bss..decrypted") 55 57 56 58 #else /* !CONFIG_AMD_MEM_ENCRYPT */ ··· 85 83 86 84 static inline void mem_encrypt_free_decrypted_mem(void) { } 87 85 86 + static inline void mem_encrypt_init(void) { } 87 + 88 88 #define __bss_decrypted 89 89 90 90 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 91 - 92 - /* Architecture __weak replacement functions */ 93 - void __init mem_encrypt_init(void); 94 91 95 92 void add_encrypt_protection_map(void); 96 93
+11
arch/x86/kernel/cpu/common.c
··· 18 18 #include <linux/init.h> 19 19 #include <linux/kprobes.h> 20 20 #include <linux/kgdb.h> 21 + #include <linux/mem_encrypt.h> 21 22 #include <linux/smp.h> 22 23 #include <linux/cpu.h> 23 24 #include <linux/io.h> ··· 2415 2414 } else { 2416 2415 fpu__init_check_bugs(); 2417 2416 } 2417 + 2418 + /* 2419 + * This needs to be called before any devices perform DMA 2420 + * operations that might use the SWIOTLB bounce buffers. It will 2421 + * mark the bounce buffers as decrypted so that their usage will 2422 + * not cause "plain-text" data to be decrypted when accessed. It 2423 + * must be called after late_time_init() so that Hyper-V x86/x64 2424 + * hypercalls work when the SWIOTLB bounce buffers are decrypted. 2425 + */ 2426 + mem_encrypt_init(); 2418 2427 }
-13
init/main.c
··· 95 95 #include <linux/cache.h> 96 96 #include <linux/rodata_test.h> 97 97 #include <linux/jump_label.h> 98 - #include <linux/mem_encrypt.h> 99 98 #include <linux/kcsan.h> 100 99 #include <linux/init_syscalls.h> 101 100 #include <linux/stackdepot.h> ··· 785 786 } 786 787 #endif 787 788 788 - void __init __weak mem_encrypt_init(void) { } 789 - 790 789 void __init __weak poking_init(void) { } 791 790 792 791 void __init __weak pgtable_cache_init(void) { } ··· 1039 1042 calibrate_delay(); 1040 1043 1041 1044 arch_cpu_finalize_init(); 1042 - 1043 - /* 1044 - * This needs to be called before any devices perform DMA 1045 - * operations that might use the SWIOTLB bounce buffers. It will 1046 - * mark the bounce buffers as decrypted so that their usage will 1047 - * not cause "plain-text" data to be decrypted when accessed. It 1048 - * must be called after late_time_init() so that Hyper-V x86/x64 1049 - * hypercalls work when the SWIOTLB bounce buffers are decrypted. 1050 - */ 1051 - mem_encrypt_init(); 1052 1045 1053 1046 pid_idr_init(); 1054 1047 anon_vma_init();