Merge tag 'x86-boot-2023-06-26' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 boot updates from Thomas Gleixner:
"Initialize FPU late.

Right now FPU is initialized very early during boot. There is no real
requirement to do so. The only requirement is to have it done before
alternatives are patched.

That's done in check_bugs() which does way more than what the function
name suggests.

So first rename check_bugs() to arch_cpu_finalize_init() which makes
it clear what this is about.

Move the invocation of arch_cpu_finalize_init() earlier in
start_kernel() as it has to be done before fork_init() which needs to
know the FPU register buffer size.

With those prerequisites the FPU initialization can be moved into
arch_cpu_finalize_init(), which removes it from the early and fragile
part of the x86 bringup"

* tag 'x86-boot-2023-06-26' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mem_encrypt: Unbreak the AMD_MEM_ENCRYPT=n build
x86/fpu: Move FPU initialization into arch_cpu_finalize_init()
x86/fpu: Mark init functions __init
x86/fpu: Remove cpuinfo argument from init functions
x86/init: Initialize signal frame size late
init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()
init: Invoke arch_cpu_finalize_init() earlier
init: Remove check_bugs() leftovers
um/cpu: Switch to arch_cpu_finalize_init()
sparc/cpu: Switch to arch_cpu_finalize_init()
sh/cpu: Switch to arch_cpu_finalize_init()
mips/cpu: Switch to arch_cpu_finalize_init()
m68k/cpu: Switch to arch_cpu_finalize_init()
loongarch/cpu: Switch to arch_cpu_finalize_init()
ia64/cpu: Switch to arch_cpu_finalize_init()
ARM: cpu: Switch to arch_cpu_finalize_init()
x86/cpu: Switch to arch_cpu_finalize_init()
init: Provide arch_cpu_finalize_init()

+194 -352
+3
arch/Kconfig
··· 285 285 config ARCH_HAS_DMA_CLEAR_UNCACHED 286 286 bool 287 287 288 + config ARCH_HAS_CPU_FINALIZE_INIT 289 + bool 290 + 288 291 # Select if arch init_task must go in the __init_task_data section 289 292 config ARCH_TASK_STRUCT_ON_STACK 290 293 bool
-20
arch/alpha/include/asm/bugs.h
··· 1 - /* 2 - * include/asm-alpha/bugs.h 3 - * 4 - * Copyright (C) 1994 Linus Torvalds 5 - */ 6 - 7 - /* 8 - * This is included by init/main.c to check for architecture-dependent bugs. 9 - * 10 - * Needs: 11 - * void check_bugs(void); 12 - */ 13 - 14 - /* 15 - * I don't know of any alpha bugs yet.. Nice chip 16 - */ 17 - 18 - static void check_bugs(void) 19 - { 20 - }
+1
arch/arm/Kconfig
··· 5 5 select ARCH_32BIT_OFF_T 6 6 select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND 7 7 select ARCH_HAS_BINFMT_FLAT 8 + select ARCH_HAS_CPU_FINALIZE_INIT if MMU 8 9 select ARCH_HAS_CURRENT_STACK_POINTER 9 10 select ARCH_HAS_DEBUG_VIRTUAL if MMU 10 11 select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
-4
arch/arm/include/asm/bugs.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 - * arch/arm/include/asm/bugs.h 4 - * 5 3 * Copyright (C) 1995-2003 Russell King 6 4 */ 7 5 #ifndef __ASM_BUGS_H ··· 8 10 extern void check_writebuffer_bugs(void); 9 11 10 12 #ifdef CONFIG_MMU 11 - extern void check_bugs(void); 12 13 extern void check_other_bugs(void); 13 14 #else 14 - #define check_bugs() do { } while (0) 15 15 #define check_other_bugs() do { } while (0) 16 16 #endif 17 17
+2 -1
arch/arm/kernel/bugs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/init.h> 3 + #include <linux/cpu.h> 3 4 #include <asm/bugs.h> 4 5 #include <asm/proc-fns.h> 5 6 ··· 12 11 #endif 13 12 } 14 13 15 - void __init check_bugs(void) 14 + void __init arch_cpu_finalize_init(void) 16 15 { 17 16 check_writebuffer_bugs(); 18 17 check_other_bugs();
+1
arch/ia64/Kconfig
··· 9 9 config IA64 10 10 bool 11 11 select ARCH_BINFMT_ELF_EXTRA_PHDRS 12 + select ARCH_HAS_CPU_FINALIZE_INIT 12 13 select ARCH_HAS_DMA_MARK_CLEAN 13 14 select ARCH_HAS_STRNCPY_FROM_USER 14 15 select ARCH_HAS_STRNLEN_USER
-20
arch/ia64/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This is included by init/main.c to check for architecture-dependent bugs. 4 - * 5 - * Needs: 6 - * void check_bugs(void); 7 - * 8 - * Based on <asm-alpha/bugs.h>. 9 - * 10 - * Modified 1998, 1999, 2003 11 - * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. 12 - */ 13 - #ifndef _ASM_IA64_BUGS_H 14 - #define _ASM_IA64_BUGS_H 15 - 16 - #include <asm/processor.h> 17 - 18 - extern void check_bugs (void); 19 - 20 - #endif /* _ASM_IA64_BUGS_H */
+1 -2
arch/ia64/kernel/setup.c
··· 1067 1067 } 1068 1068 } 1069 1069 1070 - void __init 1071 - check_bugs (void) 1070 + void __init arch_cpu_finalize_init(void) 1072 1071 { 1073 1072 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 1074 1073 (unsigned long) __end___mckinley_e9_bundles);
+1
arch/loongarch/Kconfig
··· 10 10 select ARCH_ENABLE_MEMORY_HOTPLUG 11 11 select ARCH_ENABLE_MEMORY_HOTREMOVE 12 12 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI 13 + select ARCH_HAS_CPU_FINALIZE_INIT 13 14 select ARCH_HAS_FORTIFY_SOURCE 14 15 select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS 15 16 select ARCH_HAS_PTE_SPECIAL
-15
arch/loongarch/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This is included by init/main.c to check for architecture-dependent bugs. 4 - * 5 - * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 6 - */ 7 - #ifndef _ASM_BUGS_H 8 - #define _ASM_BUGS_H 9 - 10 - #include <asm/cpu.h> 11 - #include <asm/cpu-info.h> 12 - 13 - extern void check_bugs(void); 14 - 15 - #endif /* _ASM_BUGS_H */
+2 -2
arch/loongarch/kernel/setup.c
··· 12 12 */ 13 13 #include <linux/init.h> 14 14 #include <linux/acpi.h> 15 + #include <linux/cpu.h> 15 16 #include <linux/dmi.h> 16 17 #include <linux/efi.h> 17 18 #include <linux/export.h> ··· 38 37 #include <asm/addrspace.h> 39 38 #include <asm/alternative.h> 40 39 #include <asm/bootinfo.h> 41 - #include <asm/bugs.h> 42 40 #include <asm/cache.h> 43 41 #include <asm/cpu.h> 44 42 #include <asm/dma.h> ··· 87 87 return "generic-loongson-machine"; 88 88 } 89 89 90 - void __init check_bugs(void) 90 + void __init arch_cpu_finalize_init(void) 91 91 { 92 92 alternative_instructions(); 93 93 }
+1
arch/m68k/Kconfig
··· 4 4 default y 5 5 select ARCH_32BIT_OFF_T 6 6 select ARCH_HAS_BINFMT_FLAT 7 + select ARCH_HAS_CPU_FINALIZE_INIT if MMU 7 8 select ARCH_HAS_CURRENT_STACK_POINTER 8 9 select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE 9 10 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
-21
arch/m68k/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * include/asm-m68k/bugs.h 4 - * 5 - * Copyright (C) 1994 Linus Torvalds 6 - */ 7 - 8 - /* 9 - * This is included by init/main.c to check for architecture-dependent bugs. 10 - * 11 - * Needs: 12 - * void check_bugs(void); 13 - */ 14 - 15 - #ifdef CONFIG_MMU 16 - extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */ 17 - #else 18 - static void check_bugs(void) 19 - { 20 - } 21 - #endif
+2 -1
arch/m68k/kernel/setup_mm.c
··· 10 10 */ 11 11 12 12 #include <linux/kernel.h> 13 + #include <linux/cpu.h> 13 14 #include <linux/mm.h> 14 15 #include <linux/sched.h> 15 16 #include <linux/delay.h> ··· 505 504 module_init(proc_hardware_init); 506 505 #endif 507 506 508 - void check_bugs(void) 507 + void __init arch_cpu_finalize_init(void) 509 508 { 510 509 #if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU) 511 510 if (m68k_fputype == 0) {
+1
arch/mips/Kconfig
··· 4 4 default y 5 5 select ARCH_32BIT_OFF_T if !64BIT 6 6 select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT 7 + select ARCH_HAS_CPU_FINALIZE_INIT 7 8 select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000 8 9 select ARCH_HAS_DEBUG_VIRTUAL if !64BIT 9 10 select ARCH_HAS_FORTIFY_SOURCE
-17
arch/mips/include/asm/bugs.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * This is included by init/main.c to check for architecture-dependent bugs. 4 - * 5 3 * Copyright (C) 2007 Maciej W. Rozycki 6 - * 7 - * Needs: 8 - * void check_bugs(void); 9 4 */ 10 5 #ifndef _ASM_BUGS_H 11 6 #define _ASM_BUGS_H 12 7 13 8 #include <linux/bug.h> 14 - #include <linux/delay.h> 15 9 #include <linux/smp.h> 16 10 17 11 #include <asm/cpu.h> ··· 17 23 18 24 extern void check_bugs32(void); 19 25 extern void check_bugs64(void); 20 - 21 - static inline void __init check_bugs(void) 22 - { 23 - unsigned int cpu = smp_processor_id(); 24 - 25 - cpu_data[cpu].udelay_val = loops_per_jiffy; 26 - check_bugs32(); 27 - 28 - if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) 29 - check_bugs64(); 30 - } 31 26 32 27 static inline int r4k_daddiu_bug(void) 33 28 {
+13
arch/mips/kernel/setup.c
··· 11 11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki 12 12 */ 13 13 #include <linux/init.h> 14 + #include <linux/cpu.h> 15 + #include <linux/delay.h> 14 16 #include <linux/ioport.h> 15 17 #include <linux/export.h> 16 18 #include <linux/screen_info.h> ··· 843 841 } 844 842 early_param("nocoherentio", setnocoherentio); 845 843 #endif 844 + 845 + void __init arch_cpu_finalize_init(void) 846 + { 847 + unsigned int cpu = smp_processor_id(); 848 + 849 + cpu_data[cpu].udelay_val = loops_per_jiffy; 850 + check_bugs32(); 851 + 852 + if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) 853 + check_bugs64(); 854 + }
-20
arch/parisc/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * include/asm-parisc/bugs.h 4 - * 5 - * Copyright (C) 1999 Mike Shaver 6 - */ 7 - 8 - /* 9 - * This is included by init/main.c to check for architecture-dependent bugs. 10 - * 11 - * Needs: 12 - * void check_bugs(void); 13 - */ 14 - 15 - #include <asm/processor.h> 16 - 17 - static inline void check_bugs(void) 18 - { 19 - // identify_cpu(&boot_cpu_data); 20 - }
-15
arch/powerpc/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 - #ifndef _ASM_POWERPC_BUGS_H 3 - #define _ASM_POWERPC_BUGS_H 4 - 5 - /* 6 - */ 7 - 8 - /* 9 - * This file is included by 'init/main.c' to check for 10 - * architecture-dependent bugs. 11 - */ 12 - 13 - static inline void check_bugs(void) { } 14 - 15 - #endif /* _ASM_POWERPC_BUGS_H */
+1
arch/sh/Kconfig
··· 6 6 select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU 7 7 select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) 8 8 select ARCH_HAS_BINFMT_FLAT if !MMU 9 + select ARCH_HAS_CPU_FINALIZE_INIT 9 10 select ARCH_HAS_CURRENT_STACK_POINTER 10 11 select ARCH_HAS_GIGANTIC_PAGE 11 12 select ARCH_HAS_GCOV_PROFILE_ALL
-74
arch/sh/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __ASM_SH_BUGS_H 3 - #define __ASM_SH_BUGS_H 4 - 5 - /* 6 - * This is included by init/main.c to check for architecture-dependent bugs. 7 - * 8 - * Needs: 9 - * void check_bugs(void); 10 - */ 11 - 12 - /* 13 - * I don't know of any Super-H bugs yet. 14 - */ 15 - 16 - #include <asm/processor.h> 17 - 18 - extern void select_idle_routine(void); 19 - 20 - static void __init check_bugs(void) 21 - { 22 - extern unsigned long loops_per_jiffy; 23 - char *p = &init_utsname()->machine[2]; /* "sh" */ 24 - 25 - select_idle_routine(); 26 - 27 - current_cpu_data.loops_per_jiffy = loops_per_jiffy; 28 - 29 - switch (current_cpu_data.family) { 30 - case CPU_FAMILY_SH2: 31 - *p++ = '2'; 32 - break; 33 - case CPU_FAMILY_SH2A: 34 - *p++ = '2'; 35 - *p++ = 'a'; 36 - break; 37 - case CPU_FAMILY_SH3: 38 - *p++ = '3'; 39 - break; 40 - case CPU_FAMILY_SH4: 41 - *p++ = '4'; 42 - break; 43 - case CPU_FAMILY_SH4A: 44 - *p++ = '4'; 45 - *p++ = 'a'; 46 - break; 47 - case CPU_FAMILY_SH4AL_DSP: 48 - *p++ = '4'; 49 - *p++ = 'a'; 50 - *p++ = 'l'; 51 - *p++ = '-'; 52 - *p++ = 'd'; 53 - *p++ = 's'; 54 - *p++ = 'p'; 55 - break; 56 - case CPU_FAMILY_UNKNOWN: 57 - /* 58 - * Specifically use CPU_FAMILY_UNKNOWN rather than 59 - * default:, so we're able to have the compiler whine 60 - * about unhandled enumerations. 61 - */ 62 - break; 63 - } 64 - 65 - printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data)); 66 - 67 - #ifndef __LITTLE_ENDIAN__ 68 - /* 'eb' means 'Endian Big' */ 69 - *p++ = 'e'; 70 - *p++ = 'b'; 71 - #endif 72 - *p = '\0'; 73 - } 74 - #endif /* __ASM_SH_BUGS_H */
+2
arch/sh/include/asm/processor.h
··· 166 166 #define instruction_size(insn) (2) 167 167 #endif 168 168 169 + void select_idle_routine(void); 170 + 169 171 #endif /* __ASSEMBLY__ */ 170 172 171 173 #include <asm/processor_32.h>
+1
arch/sh/kernel/idle.c
··· 15 15 #include <linux/irqflags.h> 16 16 #include <linux/smp.h> 17 17 #include <linux/atomic.h> 18 + #include <asm/processor.h> 18 19 #include <asm/smp.h> 19 20 #include <asm/bl_bit.h> 20 21
+55
arch/sh/kernel/setup.c
··· 43 43 #include <asm/smp.h> 44 44 #include <asm/mmu_context.h> 45 45 #include <asm/mmzone.h> 46 + #include <asm/processor.h> 46 47 #include <asm/sparsemem.h> 47 48 #include <asm/platform_early.h> 48 49 ··· 354 353 int test_mode_pin(int pin) 355 354 { 356 355 return sh_mv.mv_mode_pins() & pin; 356 + } 357 + 358 + void __init arch_cpu_finalize_init(void) 359 + { 360 + char *p = &init_utsname()->machine[2]; /* "sh" */ 361 + 362 + select_idle_routine(); 363 + 364 + current_cpu_data.loops_per_jiffy = loops_per_jiffy; 365 + 366 + switch (current_cpu_data.family) { 367 + case CPU_FAMILY_SH2: 368 + *p++ = '2'; 369 + break; 370 + case CPU_FAMILY_SH2A: 371 + *p++ = '2'; 372 + *p++ = 'a'; 373 + break; 374 + case CPU_FAMILY_SH3: 375 + *p++ = '3'; 376 + break; 377 + case CPU_FAMILY_SH4: 378 + *p++ = '4'; 379 + break; 380 + case CPU_FAMILY_SH4A: 381 + *p++ = '4'; 382 + *p++ = 'a'; 383 + break; 384 + case CPU_FAMILY_SH4AL_DSP: 385 + *p++ = '4'; 386 + *p++ = 'a'; 387 + *p++ = 'l'; 388 + *p++ = '-'; 389 + *p++ = 'd'; 390 + *p++ = 's'; 391 + *p++ = 'p'; 392 + break; 393 + case CPU_FAMILY_UNKNOWN: 394 + /* 395 + * Specifically use CPU_FAMILY_UNKNOWN rather than 396 + * default:, so we're able to have the compiler whine 397 + * about unhandled enumerations. 398 + */ 399 + break; 400 + } 401 + 402 + pr_info("CPU: %s\n", get_cpu_subtype(&current_cpu_data)); 403 + 404 + #ifndef __LITTLE_ENDIAN__ 405 + /* 'eb' means 'Endian Big' */ 406 + *p++ = 'e'; 407 + *p++ = 'b'; 408 + #endif 409 + *p = '\0'; 357 410 }
+1
arch/sparc/Kconfig
··· 52 52 config SPARC32 53 53 def_bool !64BIT 54 54 select ARCH_32BIT_OFF_T 55 + select ARCH_HAS_CPU_FINALIZE_INIT if !SMP 55 56 select ARCH_HAS_SYNC_DMA_FOR_CPU 56 57 select CLZ_TAB 57 58 select DMA_DIRECT_REMAP
-18
arch/sparc/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* include/asm/bugs.h: Sparc probes for various bugs. 3 - * 4 - * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) 5 - */ 6 - 7 - #ifdef CONFIG_SPARC32 8 - #include <asm/cpudata.h> 9 - #endif 10 - 11 - extern unsigned long loops_per_jiffy; 12 - 13 - static void __init check_bugs(void) 14 - { 15 - #if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) 16 - cpu_data(0).udelay_val = loops_per_jiffy; 17 - #endif 18 - }
+7
arch/sparc/kernel/setup_32.c
··· 412 412 } 413 413 414 414 subsys_initcall(topology_init); 415 + 416 + #if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) 417 + void __init arch_cpu_finalize_init(void) 418 + { 419 + cpu_data(0).udelay_val = loops_per_jiffy; 420 + } 421 + #endif
+1
arch/um/Kconfig
··· 6 6 bool 7 7 default y 8 8 select ARCH_EPHEMERAL_INODES 9 + select ARCH_HAS_CPU_FINALIZE_INIT 9 10 select ARCH_HAS_FORTIFY_SOURCE 10 11 select ARCH_HAS_GCOV_PROFILE_ALL 11 12 select ARCH_HAS_KCOV
-7
arch/um/include/asm/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __UM_BUGS_H 3 - #define __UM_BUGS_H 4 - 5 - void check_bugs(void); 6 - 7 - #endif
+2 -1
arch/um/kernel/um_arch.c
··· 3 3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 4 4 */ 5 5 6 + #include <linux/cpu.h> 6 7 #include <linux/delay.h> 7 8 #include <linux/init.h> 8 9 #include <linux/mm.h> ··· 431 430 } 432 431 } 433 432 434 - void __init check_bugs(void) 433 + void __init arch_cpu_finalize_init(void) 435 434 { 436 435 arch_check_bugs(); 437 436 os_check_bugs();
+1
arch/x86/Kconfig
··· 71 71 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI 72 72 select ARCH_HAS_CACHE_LINE_SIZE 73 73 select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION 74 + select ARCH_HAS_CPU_FINALIZE_INIT 74 75 select ARCH_HAS_CURRENT_STACK_POINTER 75 76 select ARCH_HAS_DEBUG_VIRTUAL 76 77 select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
-2
arch/x86/include/asm/bugs.h
··· 4 4 5 5 #include <asm/processor.h> 6 6 7 - extern void check_bugs(void); 8 - 9 7 #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) 10 8 int ppro_with_ram_bug(void); 11 9 #else
+1 -1
arch/x86/include/asm/fpu/api.h
··· 109 109 110 110 /* Boot, hotplug and resume */ 111 111 extern void fpu__init_cpu(void); 112 - extern void fpu__init_system(struct cpuinfo_x86 *c); 112 + extern void fpu__init_system(void); 113 113 extern void fpu__init_check_bugs(void); 114 114 extern void fpu__resume_cpu(void); 115 115
+6 -3
arch/x86/include/asm/mem_encrypt.h
··· 17 17 18 18 #include <asm/bootparam.h> 19 19 20 + #ifdef CONFIG_X86_MEM_ENCRYPT 21 + void __init mem_encrypt_init(void); 22 + #else 23 + static inline void mem_encrypt_init(void) { } 24 + #endif 25 + 20 26 #ifdef CONFIG_AMD_MEM_ENCRYPT 21 27 22 28 extern u64 sme_me_mask; ··· 92 86 #define __bss_decrypted 93 87 94 88 #endif /* CONFIG_AMD_MEM_ENCRYPT */ 95 - 96 - /* Architecture __weak replacement functions */ 97 - void __init mem_encrypt_init(void); 98 89 99 90 void add_encrypt_protection_map(void); 100 91
-2
arch/x86/include/asm/sigframe.h
··· 85 85 86 86 #endif /* CONFIG_X86_64 */ 87 87 88 - void __init init_sigframe_size(void); 89 - 90 88 #endif /* _ASM_X86_SIGFRAME_H */
+1 -50
arch/x86/kernel/cpu/bugs.c
··· 9 9 * - Andrew D. Balsa (code cleanup). 10 10 */ 11 11 #include <linux/init.h> 12 - #include <linux/utsname.h> 13 12 #include <linux/cpu.h> 14 13 #include <linux/module.h> 15 14 #include <linux/nospec.h> ··· 26 27 #include <asm/msr.h> 27 28 #include <asm/vmx.h> 28 29 #include <asm/paravirt.h> 29 - #include <asm/alternative.h> 30 - #include <asm/set_memory.h> 31 30 #include <asm/intel-family.h> 32 31 #include <asm/e820/api.h> 33 32 #include <asm/hypervisor.h> ··· 122 125 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); 123 126 EXPORT_SYMBOL_GPL(mmio_stale_data_clear); 124 127 125 - void __init check_bugs(void) 128 + void __init cpu_select_mitigations(void) 126 129 { 127 - identify_boot_cpu(); 128 - 129 - /* 130 - * identify_boot_cpu() initialized SMT support information, let the 131 - * core code know. 132 - */ 133 - cpu_smt_check_topology(); 134 - 135 - if (!IS_ENABLED(CONFIG_SMP)) { 136 - pr_info("CPU: "); 137 - print_cpu_info(&boot_cpu_data); 138 - } 139 - 140 130 /* 141 131 * Read the SPEC_CTRL MSR to account for reserved bits which may 142 132 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD ··· 160 176 md_clear_select_mitigation(); 161 177 srbds_select_mitigation(); 162 178 l1d_flush_select_mitigation(); 163 - 164 - arch_smt_update(); 165 - 166 - #ifdef CONFIG_X86_32 167 - /* 168 - * Check whether we are able to run this kernel safely on SMP. 169 - * 170 - * - i386 is no longer supported. 171 - * - In order to run on anything without a TSC, we need to be 172 - * compiled for a i486. 173 - */ 174 - if (boot_cpu_data.x86 < 4) 175 - panic("Kernel requires i486+ for 'invlpg' and other features"); 176 - 177 - init_utsname()->machine[1] = 178 - '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 179 - alternative_instructions(); 180 - 181 - fpu__init_check_bugs(); 182 - #else /* CONFIG_X86_64 */ 183 - alternative_instructions(); 184 - 185 - /* 186 - * Make sure the first 2MB area is not mapped by huge pages 187 - * There are typically fixed size MTRRs in there and overlapping 188 - * MTRRs into large pages causes slow downs. 189 - * 190 - * Right now we don't do that with gbpages because there seems 191 - * very little benefit for that case. 192 - */ 193 - if (!direct_gbpages) 194 - set_memory_4k((unsigned long)__va(0), 1); 195 - #endif 196 179 } 197 180 198 181 /*
+72 -7
arch/x86/kernel/cpu/common.c
··· 18 18 #include <linux/init.h> 19 19 #include <linux/kprobes.h> 20 20 #include <linux/kgdb.h> 21 + #include <linux/mem_encrypt.h> 21 22 #include <linux/smp.h> 23 + #include <linux/cpu.h> 22 24 #include <linux/io.h> 23 25 #include <linux/syscore_ops.h> 24 26 #include <linux/pgtable.h> 25 27 #include <linux/stackprotector.h> 28 + #include <linux/utsname.h> 26 29 30 + #include <asm/alternative.h> 27 31 #include <asm/cmdline.h> 28 32 #include <asm/perf_event.h> 29 33 #include <asm/mmu_context.h> ··· 63 59 #include <asm/intel-family.h> 64 60 #include <asm/cpu_device_id.h> 65 61 #include <asm/uv/uv.h> 66 - #include <asm/sigframe.h> 62 + #include <asm/set_memory.h> 67 63 #include <asm/traps.h> 68 64 #include <asm/sev.h> 69 65 ··· 1604 1600 1605 1601 sld_setup(c); 1606 1602 1607 - fpu__init_system(c); 1608 - 1609 - init_sigframe_size(); 1610 - 1611 1603 #ifdef CONFIG_X86_32 1612 1604 /* 1613 1605 * Regardless of whether PCID is enumerated, the SDM says ··· 2285 2285 2286 2286 doublefault_init_cpu_tss(); 2287 2287 2288 - fpu__init_cpu(); 2289 - 2290 2288 if (is_uv_system()) 2291 2289 uv_cpu_init(); 2292 2290 ··· 2300 2302 */ 2301 2303 cpu_init_exception_handling(); 2302 2304 cpu_init(); 2305 + fpu__init_cpu(); 2303 2306 } 2304 2307 #endif 2305 2308 ··· 2360 2361 cpu_bugs_smt_update(); 2361 2362 /* Check whether IPI broadcasting can be enabled */ 2362 2363 apic_smt_update(); 2364 + } 2365 + 2366 + void __init arch_cpu_finalize_init(void) 2367 + { 2368 + identify_boot_cpu(); 2369 + 2370 + /* 2371 + * identify_boot_cpu() initialized SMT support information, let the 2372 + * core code know. 2373 + */ 2374 + cpu_smt_check_topology(); 2375 + 2376 + if (!IS_ENABLED(CONFIG_SMP)) { 2377 + pr_info("CPU: "); 2378 + print_cpu_info(&boot_cpu_data); 2379 + } 2380 + 2381 + cpu_select_mitigations(); 2382 + 2383 + arch_smt_update(); 2384 + 2385 + if (IS_ENABLED(CONFIG_X86_32)) { 2386 + /* 2387 + * Check whether this is a real i386 which is not longer 2388 + * supported and fixup the utsname. 2389 + */ 2390 + if (boot_cpu_data.x86 < 4) 2391 + panic("Kernel requires i486+ for 'invlpg' and other features"); 2392 + 2393 + init_utsname()->machine[1] = 2394 + '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 2395 + } 2396 + 2397 + /* 2398 + * Must be before alternatives because it might set or clear 2399 + * feature bits. 2400 + */ 2401 + fpu__init_system(); 2402 + fpu__init_cpu(); 2403 + 2404 + alternative_instructions(); 2405 + 2406 + if (IS_ENABLED(CONFIG_X86_64)) { 2407 + /* 2408 + * Make sure the first 2MB area is not mapped by huge pages 2409 + * There are typically fixed size MTRRs in there and overlapping 2410 + * MTRRs into large pages causes slow downs. 2411 + * 2412 + * Right now we don't do that with gbpages because there seems 2413 + * very little benefit for that case. 2414 + */ 2415 + if (!direct_gbpages) 2416 + set_memory_4k((unsigned long)__va(0), 1); 2417 + } else { 2418 + fpu__init_check_bugs(); 2419 + } 2420 + 2421 + /* 2422 + * This needs to be called before any devices perform DMA 2423 + * operations that might use the SWIOTLB bounce buffers. It will 2424 + * mark the bounce buffers as decrypted so that their usage will 2425 + * not cause "plain-text" data to be decrypted when accessed. It 2426 + * must be called after late_time_init() so that Hyper-V x86/x64 2427 + * hypercalls work when the SWIOTLB bounce buffers are decrypted. 2428 + */ 2429 + mem_encrypt_init(); 2363 2430 }
+1
arch/x86/kernel/cpu/cpu.h
··· 79 79 extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); 80 80 81 81 unsigned int aperfmperf_get_khz(int cpu); 82 + void cpu_select_mitigations(void); 82 83 83 84 extern void x86_spec_ctrl_setup_ap(void); 84 85 extern void update_srbds_msr(void);
+4 -4
arch/x86/kernel/fpu/init.c
··· 53 53 fpu__init_cpu_xstate(); 54 54 } 55 55 56 - static bool fpu__probe_without_cpuid(void) 56 + static bool __init fpu__probe_without_cpuid(void) 57 57 { 58 58 unsigned long cr0; 59 59 u16 fsw, fcw; ··· 71 71 return fsw == 0 && (fcw & 0x103f) == 0x003f; 72 72 } 73 73 74 - static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) 74 + static void __init fpu__init_system_early_generic(void) 75 75 { 76 76 if (!boot_cpu_has(X86_FEATURE_CPUID) && 77 77 !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { ··· 211 211 * Called on the boot CPU once per system bootup, to set up the initial 212 212 * FPU state that is later cloned into all processes: 213 213 */ 214 - void __init fpu__init_system(struct cpuinfo_x86 *c) 214 + void __init fpu__init_system(void) 215 215 { 216 216 fpstate_reset(&current->thread.fpu); 217 - fpu__init_system_early_generic(c); 217 + fpu__init_system_early_generic(); 218 218 219 219 /* 220 220 * The FPU has to be operational for some of the
+3 -1
arch/x86/kernel/signal.c
··· 182 182 static unsigned long __ro_after_init max_frame_size; 183 183 static unsigned int __ro_after_init fpu_default_state_size; 184 184 185 - void __init init_sigframe_size(void) 185 + static int __init init_sigframe_size(void) 186 186 { 187 187 fpu_default_state_size = fpu__get_fpstate_size(); 188 188 ··· 194 194 max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); 195 195 196 196 pr_info("max sigframe size: %lu\n", max_frame_size); 197 + return 0; 197 198 } 199 + early_initcall(init_sigframe_size); 198 200 199 201 unsigned long get_sigframe_size(void) 200 202 {
-18
arch/xtensa/include/asm/bugs.h
··· 1 - /* 2 - * include/asm-xtensa/bugs.h 3 - * 4 - * This is included by init/main.c to check for architecture-dependent bugs. 5 - * 6 - * Xtensa processors don't have any bugs. :) 7 - * 8 - * This file is subject to the terms and conditions of the GNU General 9 - * Public License. See the file "COPYING" in the main directory of 10 - * this archive for more details. 11 - */ 12 - 13 - #ifndef _XTENSA_BUGS_H 14 - #define _XTENSA_BUGS_H 15 - 16 - static void check_bugs(void) { } 17 - 18 - #endif /* _XTENSA_BUGS_H */
-11
include/asm-generic/bugs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __ASM_GENERIC_BUGS_H 3 - #define __ASM_GENERIC_BUGS_H 4 - /* 5 - * This file is included by 'init/main.c' to check for 6 - * architecture-dependent bugs. 7 - */ 8 - 9 - static inline void check_bugs(void) { } 10 - 11 - #endif /* __ASM_GENERIC_BUGS_H */
+6
include/linux/cpu.h
··· 184 184 void arch_cpu_idle_exit(void); 185 185 void __noreturn arch_cpu_idle_dead(void); 186 186 187 + #ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT 188 + void arch_cpu_finalize_init(void); 189 + #else 190 + static inline void arch_cpu_finalize_init(void) { } 191 + #endif 192 + 187 193 int cpu_report_state(int cpu); 188 194 int cpu_check_up_prepare(int cpu); 189 195 void cpu_set_state_online(int cpu);
+1 -15
init/main.c
··· 95 95 #include <linux/cache.h> 96 96 #include <linux/rodata_test.h> 97 97 #include <linux/jump_label.h> 98 - #include <linux/mem_encrypt.h> 99 98 #include <linux/kcsan.h> 100 99 #include <linux/init_syscalls.h> 101 100 #include <linux/stackdepot.h> ··· 102 103 #include <net/net_namespace.h> 103 104 104 105 #include <asm/io.h> 105 - #include <asm/bugs.h> 106 106 #include <asm/setup.h> 107 107 #include <asm/sections.h> 108 108 #include <asm/cacheflush.h> ··· 785 787 } 786 788 #endif 787 789 788 - void __init __weak mem_encrypt_init(void) { } 789 - 790 790 void __init __weak poking_init(void) { } 791 791 792 792 void __init __weak pgtable_cache_init(void) { } ··· 1038 1042 sched_clock_init(); 1039 1043 calibrate_delay(); 1040 1044 1041 - /* 1042 - * This needs to be called before any devices perform DMA 1043 - * operations that might use the SWIOTLB bounce buffers. It will 1044 - * mark the bounce buffers as decrypted so that their usage will 1045 - * not cause "plain-text" data to be decrypted when accessed. It 1046 - * must be called after late_time_init() so that Hyper-V x86/x64 1047 - * hypercalls work when the SWIOTLB bounce buffers are decrypted. 1048 - */ 1049 - mem_encrypt_init(); 1045 + arch_cpu_finalize_init(); 1050 1046 1051 1047 pid_idr_init(); 1052 1048 anon_vma_init(); ··· 1065 1077 cgroup_init(); 1066 1078 taskstats_init_early(); 1067 1079 delayacct_init(); 1068 - 1069 - check_bugs(); 1070 1080 1071 1081 acpi_subsystem_init(); 1072 1082 arch_post_acpi_subsys_init();