Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/boot: Move startup code out of __head section

Move startup code out of the __head section, now that this no longer has
a special significance. Move everything into .text or .init.text as
appropriate, so that startup code is not kept around unnecessarily.

[ bp: Fold in hunk to fix 32-bit CPU hotplug:
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202509022207.56fd97f4-lkp@intel.com ]

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/20250828102202.1849035-45-ardb+git@google.com

authored by

Ard Biesheuvel and committed by
Borislav Petkov (AMD)
c5c30a37 e7b88bc0

+48 -54
-3
arch/x86/boot/compressed/sev.c
··· 32 32 #undef __init 33 33 #define __init 34 34 35 - #undef __head 36 - #define __head 37 - 38 35 #define __BOOT_COMPRESSED 39 36 40 37 u8 snp_vmpl;
+2 -2
arch/x86/boot/startup/gdt_idt.c
··· 24 24 static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data; 25 25 26 26 /* This may run while still in the direct mapping */ 27 - void __head startup_64_load_idt(void *vc_handler) 27 + void startup_64_load_idt(void *vc_handler) 28 28 { 29 29 struct desc_ptr desc = { 30 30 .address = (unsigned long)rip_rel_ptr(bringup_idt_table), ··· 46 46 /* 47 47 * Setup boot CPU state needed before kernel switches to virtual addresses. 48 48 */ 49 - void __head startup_64_setup_gdt_idt(void) 49 + void __init startup_64_setup_gdt_idt(void) 50 50 { 51 51 struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page); 52 52 void *handler = NULL;
+2 -2
arch/x86/boot/startup/map_kernel.c
··· 30 30 return true; 31 31 } 32 32 33 - static unsigned long __head sme_postprocess_startup(struct boot_params *bp, 33 + static unsigned long __init sme_postprocess_startup(struct boot_params *bp, 34 34 pmdval_t *pmd, 35 35 unsigned long p2v_offset) 36 36 { ··· 84 84 * the 1:1 mapping of memory. Kernel virtual addresses can be determined by 85 85 * subtracting p2v_offset from the RIP-relative address. 86 86 */ 87 - unsigned long __head __startup_64(unsigned long p2v_offset, 87 + unsigned long __init __startup_64(unsigned long p2v_offset, 88 88 struct boot_params *bp) 89 89 { 90 90 pmd_t (*early_pgts)[PTRS_PER_PMD] = rip_rel_ptr(early_dynamic_pgts);
+18 -18
arch/x86/boot/startup/sev-shared.c
··· 33 33 34 34 bool sev_snp_needs_sfw; 35 35 36 - void __head __noreturn 36 + void __noreturn 37 37 sev_es_terminate(unsigned int set, unsigned int reason) 38 38 { 39 39 u64 val = GHCB_MSR_TERM_REQ; ··· 52 52 /* 53 53 * The hypervisor features are available from GHCB version 2 onward. 54 54 */ 55 - u64 get_hv_features(void) 55 + u64 __init get_hv_features(void) 56 56 { 57 57 u64 val; 58 58 ··· 222 222 * 223 223 * Return: XSAVE area size on success, 0 otherwise. 224 224 */ 225 - static u32 __head snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted) 225 + static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted) 226 226 { 227 227 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 228 228 u64 xfeatures_found = 0; ··· 258 258 return xsave_size; 259 259 } 260 260 261 - static bool __head 261 + static bool 262 262 snp_cpuid_get_validated_func(struct cpuid_leaf *leaf) 263 263 { 264 264 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); ··· 300 300 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV); 301 301 } 302 302 303 - static int __head 303 + static int 304 304 snp_cpuid_postprocess(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf), 305 305 void *ctx, struct cpuid_leaf *leaf) 306 306 { ··· 396 396 * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value 397 397 * should be treated as fatal by caller. 398 398 */ 399 - int __head snp_cpuid(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf), 400 - void *ctx, struct cpuid_leaf *leaf) 399 + int snp_cpuid(void (*cpuid_fn)(void *ctx, struct cpuid_leaf *leaf), 400 + void *ctx, struct cpuid_leaf *leaf) 401 401 { 402 402 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); 403 403 ··· 439 439 * page yet, so it only supports the MSR based communication with the 440 440 * hypervisor and only the CPUID exit-code. 441 441 */ 442 - void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) 442 + void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) 443 443 { 444 444 unsigned int subfn = lower_bits(regs->cx, 32); 445 445 unsigned int fn = lower_bits(regs->ax, 32); ··· 515 515 * Search for a Confidential Computing blob passed in as a setup_data entry 516 516 * via the Linux Boot Protocol. 517 517 */ 518 - static __head 518 + static __init 519 519 struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp) 520 520 { 521 521 struct cc_setup_data *sd = NULL; ··· 543 543 * mapping needs to be updated in sync with all the changes to virtual memory 544 544 * layout and related mapping facilities throughout the boot process. 545 545 */ 546 - static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info) 546 + static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info) 547 547 { 548 548 const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table; 549 549 int i; ··· 571 571 } 572 572 } 573 573 574 - static int __head svsm_call_msr_protocol(struct svsm_call *call) 574 + static int svsm_call_msr_protocol(struct svsm_call *call) 575 575 { 576 576 int ret; 577 577 ··· 582 582 return ret; 583 583 } 584 584 585 - static void __head svsm_pval_4k_page(unsigned long paddr, bool validate, 586 - struct svsm_ca *caa, u64 caa_pa) 585 + static void svsm_pval_4k_page(unsigned long paddr, bool validate, 586 + struct svsm_ca *caa, u64 caa_pa) 587 587 { 588 588 struct svsm_pvalidate_call *pc; 589 589 struct svsm_call call = {}; ··· 624 624 native_local_irq_restore(flags); 625 625 } 626 626 627 - static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, 628 - bool validate, struct svsm_ca *caa, u64 caa_pa) 627 + static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, 628 + bool validate, struct svsm_ca *caa, u64 caa_pa) 629 629 { 630 630 int ret; 631 631 ··· 645 645 sev_evict_cache((void *)vaddr, 1); 646 646 } 647 647 648 - static void __head __page_state_change(unsigned long vaddr, unsigned long paddr, 649 - const struct psc_desc *desc) 648 + static void __page_state_change(unsigned long vaddr, unsigned long paddr, 649 + const struct psc_desc *desc) 650 650 { 651 651 u64 val, msr; 652 652 ··· 684 684 * Maintain the GPA of the SVSM Calling Area (CA) in order to utilize the SVSM 685 685 * services needed when not running in VMPL0. 686 686 */ 687 - static bool __head svsm_setup_ca(const struct cc_blob_sev_info *cc_info, 687 + static bool __init svsm_setup_ca(const struct cc_blob_sev_info *cc_info, 688 688 void *page) 689 689 { 690 690 struct snp_secrets_page *secrets_page;
+7 -7
arch/x86/boot/startup/sev-startup.c
··· 44 44 /* Include code shared with pre-decompression boot stage */ 45 45 #include "sev-shared.c" 46 46 47 - void __head 47 + void __init 48 48 early_set_pages_state(unsigned long vaddr, unsigned long paddr, 49 49 unsigned long npages, const struct psc_desc *desc) 50 50 { ··· 63 63 } 64 64 } 65 65 66 - void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 66 + void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 67 67 unsigned long npages) 68 68 { 69 69 struct psc_desc d = { ··· 88 88 early_set_pages_state(vaddr, paddr, npages, &d); 89 89 } 90 90 91 - void __head early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 91 + void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 92 92 unsigned long npages) 93 93 { 94 94 struct psc_desc d = { ··· 123 123 * 124 124 * Scan for the blob in that order. 125 125 */ 126 - static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) 126 + static struct cc_blob_sev_info *__init find_cc_blob(struct boot_params *bp) 127 127 { 128 128 struct cc_blob_sev_info *cc_info; 129 129 ··· 149 149 return cc_info; 150 150 } 151 151 152 - static __head void svsm_setup(struct cc_blob_sev_info *cc_info) 152 + static void __init svsm_setup(struct cc_blob_sev_info *cc_info) 153 153 { 154 154 struct snp_secrets_page *secrets = (void *)cc_info->secrets_phys; 155 155 struct svsm_call call = {}; ··· 190 190 boot_svsm_caa_pa = pa; 191 191 } 192 192 193 - bool __head snp_init(struct boot_params *bp) 193 + bool __init snp_init(struct boot_params *bp) 194 194 { 195 195 struct cc_blob_sev_info *cc_info; 196 196 ··· 219 219 return true; 220 220 } 221 221 222 - void __head __noreturn snp_abort(void) 222 + void __init __noreturn snp_abort(void) 223 223 { 224 224 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 225 225 }
+13 -13
arch/x86/boot/startup/sme.c
··· 91 91 */ 92 92 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch"); 93 93 94 - static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd) 94 + static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) 95 95 { 96 96 unsigned long pgd_start, pgd_end, pgd_size; 97 97 pgd_t *pgd_p; ··· 106 106 memset(pgd_p, 0, pgd_size); 107 107 } 108 108 109 - static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) 109 + static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) 110 110 { 111 111 pgd_t *pgd; 112 112 p4d_t *p4d; ··· 143 143 return pud; 144 144 } 145 145 146 - static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) 146 + static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) 147 147 { 148 148 pud_t *pud; 149 149 pmd_t *pmd; ··· 159 159 set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); 160 160 } 161 161 162 - static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd) 162 + static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) 163 163 { 164 164 pud_t *pud; 165 165 pmd_t *pmd; ··· 185 185 set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); 186 186 } 187 187 188 - static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) 188 + static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) 189 189 { 190 190 while (ppd->vaddr < ppd->vaddr_end) { 191 191 sme_populate_pgd_large(ppd); ··· 195 195 } 196 196 } 197 197 198 - static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd) 198 + static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) 199 199 { 200 200 while (ppd->vaddr < ppd->vaddr_end) { 201 201 sme_populate_pgd(ppd); ··· 205 205 } 206 206 } 207 207 208 - static void __head __sme_map_range(struct sme_populate_pgd_data *ppd, 208 + static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, 209 209 pmdval_t pmd_flags, pteval_t pte_flags) 210 210 { 211 211 unsigned long vaddr_end; ··· 229 229 __sme_map_range_pte(ppd); 230 230 } 231 231 232 - static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) 232 + static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) 233 233 { 234 234 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); 235 235 } 236 236 237 - static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) 237 + static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) 238 238 { 239 239 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); 240 240 } 241 241 242 - static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) 242 + static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) 243 243 { 244 244 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); 245 245 } 246 246 247 - static unsigned long __head sme_pgtable_calc(unsigned long len) 247 + static unsigned long __init sme_pgtable_calc(unsigned long len) 248 248 { 249 249 unsigned long entries = 0, tables = 0; 250 250 ··· 281 281 return entries + tables; 282 282 } 283 283 284 - void __head sme_encrypt_kernel(struct boot_params *bp) 284 + void __init sme_encrypt_kernel(struct boot_params *bp) 285 285 { 286 286 unsigned long workarea_start, workarea_end, workarea_len; 287 287 unsigned long execute_start, execute_end, execute_len; ··· 485 485 native_write_cr3(__native_read_cr3()); 486 486 } 487 487 488 - void __head sme_enable(struct boot_params *bp) 488 + void __init sme_enable(struct boot_params *bp) 489 489 { 490 490 unsigned int eax, ebx, ecx, edx; 491 491 unsigned long feature_mask;
-6
arch/x86/include/asm/init.h
··· 2 2 #ifndef _ASM_X86_INIT_H 3 3 #define _ASM_X86_INIT_H 4 4 5 - #if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000 6 - #define __head __section(".head.text") __no_sanitize_undefined __no_stack_protector 7 - #else 8 - #define __head __section(".head.text") __no_sanitize_undefined __no_kstack_erase 9 - #endif 10 - 11 5 struct x86_mapping_info { 12 6 void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ 13 7 void (*free_pgt_page)(void *, void *); /* free buf for page table */
+4 -1
arch/x86/kernel/head_32.S
··· 61 61 * any particular GDT layout, because we load our own as soon as we 62 62 * can. 63 63 */ 64 - __HEAD 64 + __INIT 65 65 SYM_CODE_START(startup_32) 66 66 movl pa(initial_stack),%ecx 67 67 ··· 136 136 * If cpu hotplug is not supported then this code can go in init section 137 137 * which will be freed later 138 138 */ 139 + #ifdef CONFIG_HOTPLUG_CPU 140 + .text 141 + #endif 139 142 SYM_FUNC_START(startup_32_smp) 140 143 cld 141 144 movl $(__BOOT_DS),%eax
+1 -1
arch/x86/kernel/head_64.S
··· 33 33 * because we need identity-mapped pages. 34 34 */ 35 35 36 - __HEAD 36 + __INIT 37 37 .code64 38 38 SYM_CODE_START_NOALIGN(startup_64) 39 39 UNWIND_HINT_END_OF_STACK
+1 -1
arch/x86/platform/pvh/head.S
··· 24 24 #include <asm/nospec-branch.h> 25 25 #include <xen/interface/elfnote.h> 26 26 27 - __HEAD 27 + __INIT 28 28 29 29 /* 30 30 * Entry point for PVH guests.