Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

objtool/idle: Validate __cpuidle code as noinstr

Idle code is very like entry code in that RCU isn't available. As
such, add a little validation.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Tested-by: Tony Lindgren <tony@atomide.com>
Tested-by: Ulf Hansson <ulf.hansson@linaro.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20230112195540.373461409@infradead.org

authored by

Peter Zijlstra and committed by
Ingo Molnar
2b5a0e42 00717eb8

+27 -45
-1
arch/alpha/kernel/vmlinux.lds.S
··· 27 27 HEAD_TEXT 28 28 TEXT_TEXT 29 29 SCHED_TEXT 30 - CPUIDLE_TEXT 31 30 LOCK_TEXT 32 31 *(.fixup) 33 32 *(.gnu.warning)
-1
arch/arc/kernel/vmlinux.lds.S
··· 85 85 _stext = .; 86 86 TEXT_TEXT 87 87 SCHED_TEXT 88 - CPUIDLE_TEXT 89 88 LOCK_TEXT 90 89 KPROBES_TEXT 91 90 IRQENTRY_TEXT
-1
arch/arm/include/asm/vmlinux.lds.h
··· 96 96 SOFTIRQENTRY_TEXT \ 97 97 TEXT_TEXT \ 98 98 SCHED_TEXT \ 99 - CPUIDLE_TEXT \ 100 99 LOCK_TEXT \ 101 100 KPROBES_TEXT \ 102 101 ARM_STUBS_TEXT \
-1
arch/arm64/kernel/vmlinux.lds.S
··· 175 175 ENTRY_TEXT 176 176 TEXT_TEXT 177 177 SCHED_TEXT 178 - CPUIDLE_TEXT 179 178 LOCK_TEXT 180 179 KPROBES_TEXT 181 180 HYPERVISOR_TEXT
-1
arch/csky/kernel/vmlinux.lds.S
··· 34 34 SOFTIRQENTRY_TEXT 35 35 TEXT_TEXT 36 36 SCHED_TEXT 37 - CPUIDLE_TEXT 38 37 LOCK_TEXT 39 38 KPROBES_TEXT 40 39 *(.fixup)
-1
arch/hexagon/kernel/vmlinux.lds.S
··· 41 41 IRQENTRY_TEXT 42 42 SOFTIRQENTRY_TEXT 43 43 SCHED_TEXT 44 - CPUIDLE_TEXT 45 44 LOCK_TEXT 46 45 KPROBES_TEXT 47 46 *(.fixup)
-1
arch/ia64/kernel/vmlinux.lds.S
··· 51 51 __end_ivt_text = .; 52 52 TEXT_TEXT 53 53 SCHED_TEXT 54 - CPUIDLE_TEXT 55 54 LOCK_TEXT 56 55 KPROBES_TEXT 57 56 IRQENTRY_TEXT
-1
arch/loongarch/kernel/vmlinux.lds.S
··· 43 43 .text : { 44 44 TEXT_TEXT 45 45 SCHED_TEXT 46 - CPUIDLE_TEXT 47 46 LOCK_TEXT 48 47 KPROBES_TEXT 49 48 IRQENTRY_TEXT
-1
arch/m68k/kernel/vmlinux-nommu.lds
··· 48 48 IRQENTRY_TEXT 49 49 SOFTIRQENTRY_TEXT 50 50 SCHED_TEXT 51 - CPUIDLE_TEXT 52 51 LOCK_TEXT 53 52 *(.fixup) 54 53 . = ALIGN(16);
-1
arch/m68k/kernel/vmlinux-std.lds
··· 19 19 IRQENTRY_TEXT 20 20 SOFTIRQENTRY_TEXT 21 21 SCHED_TEXT 22 - CPUIDLE_TEXT 23 22 LOCK_TEXT 24 23 *(.fixup) 25 24 *(.gnu.warning)
-1
arch/m68k/kernel/vmlinux-sun3.lds
··· 19 19 IRQENTRY_TEXT 20 20 SOFTIRQENTRY_TEXT 21 21 SCHED_TEXT 22 - CPUIDLE_TEXT 23 22 LOCK_TEXT 24 23 *(.fixup) 25 24 *(.gnu.warning)
-1
arch/microblaze/kernel/vmlinux.lds.S
··· 36 36 EXIT_TEXT 37 37 EXIT_CALL 38 38 SCHED_TEXT 39 - CPUIDLE_TEXT 40 39 LOCK_TEXT 41 40 KPROBES_TEXT 42 41 IRQENTRY_TEXT
-1
arch/mips/kernel/vmlinux.lds.S
··· 61 61 .text : { 62 62 TEXT_TEXT 63 63 SCHED_TEXT 64 - CPUIDLE_TEXT 65 64 LOCK_TEXT 66 65 KPROBES_TEXT 67 66 IRQENTRY_TEXT
-1
arch/nios2/kernel/vmlinux.lds.S
··· 24 24 .text : { 25 25 TEXT_TEXT 26 26 SCHED_TEXT 27 - CPUIDLE_TEXT 28 27 LOCK_TEXT 29 28 IRQENTRY_TEXT 30 29 SOFTIRQENTRY_TEXT
-1
arch/openrisc/kernel/vmlinux.lds.S
··· 52 52 _stext = .; 53 53 TEXT_TEXT 54 54 SCHED_TEXT 55 - CPUIDLE_TEXT 56 55 LOCK_TEXT 57 56 KPROBES_TEXT 58 57 IRQENTRY_TEXT
-1
arch/parisc/kernel/vmlinux.lds.S
··· 86 86 TEXT_TEXT 87 87 LOCK_TEXT 88 88 SCHED_TEXT 89 - CPUIDLE_TEXT 90 89 KPROBES_TEXT 91 90 IRQENTRY_TEXT 92 91 SOFTIRQENTRY_TEXT
-1
arch/powerpc/kernel/vmlinux.lds.S
··· 111 111 #endif 112 112 NOINSTR_TEXT 113 113 SCHED_TEXT 114 - CPUIDLE_TEXT 115 114 LOCK_TEXT 116 115 KPROBES_TEXT 117 116 IRQENTRY_TEXT
-1
arch/riscv/kernel/vmlinux-xip.lds.S
··· 39 39 _stext = .; 40 40 TEXT_TEXT 41 41 SCHED_TEXT 42 - CPUIDLE_TEXT 43 42 LOCK_TEXT 44 43 KPROBES_TEXT 45 44 ENTRY_TEXT
-1
arch/riscv/kernel/vmlinux.lds.S
··· 42 42 _stext = .; 43 43 TEXT_TEXT 44 44 SCHED_TEXT 45 - CPUIDLE_TEXT 46 45 LOCK_TEXT 47 46 KPROBES_TEXT 48 47 ENTRY_TEXT
-1
arch/s390/kernel/vmlinux.lds.S
··· 42 42 HEAD_TEXT 43 43 TEXT_TEXT 44 44 SCHED_TEXT 45 - CPUIDLE_TEXT 46 45 LOCK_TEXT 47 46 KPROBES_TEXT 48 47 IRQENTRY_TEXT
-1
arch/sh/kernel/vmlinux.lds.S
··· 29 29 HEAD_TEXT 30 30 TEXT_TEXT 31 31 SCHED_TEXT 32 - CPUIDLE_TEXT 33 32 LOCK_TEXT 34 33 KPROBES_TEXT 35 34 IRQENTRY_TEXT
-1
arch/sparc/kernel/vmlinux.lds.S
··· 50 50 HEAD_TEXT 51 51 TEXT_TEXT 52 52 SCHED_TEXT 53 - CPUIDLE_TEXT 54 53 LOCK_TEXT 55 54 KPROBES_TEXT 56 55 IRQENTRY_TEXT
-1
arch/um/kernel/dyn.lds.S
··· 74 74 _stext = .; 75 75 TEXT_TEXT 76 76 SCHED_TEXT 77 - CPUIDLE_TEXT 78 77 LOCK_TEXT 79 78 IRQENTRY_TEXT 80 79 SOFTIRQENTRY_TEXT
-1
arch/um/kernel/uml.lds.S
··· 35 35 _stext = .; 36 36 TEXT_TEXT 37 37 SCHED_TEXT 38 - CPUIDLE_TEXT 39 38 LOCK_TEXT 40 39 IRQENTRY_TEXT 41 40 SOFTIRQENTRY_TEXT
+4 -7
arch/x86/include/asm/irqflags.h
··· 8 8 9 9 #include <asm/nospec-branch.h> 10 10 11 - /* Provide __cpuidle; we can't safely include <linux/cpu.h> */ 12 - #define __cpuidle __section(".cpuidle.text") 13 - 14 11 /* 15 12 * Interrupt control: 16 13 */ ··· 42 45 asm volatile("sti": : :"memory"); 43 46 } 44 47 45 - static inline __cpuidle void native_safe_halt(void) 48 + static __always_inline void native_safe_halt(void) 46 49 { 47 50 mds_idle_clear_cpu_buffers(); 48 51 asm volatile("sti; hlt": : :"memory"); 49 52 } 50 53 51 - static inline __cpuidle void native_halt(void) 54 + static __always_inline void native_halt(void) 52 55 { 53 56 mds_idle_clear_cpu_buffers(); 54 57 asm volatile("hlt": : :"memory"); ··· 81 84 * Used in the idle loop; sti takes one instruction cycle 82 85 * to complete: 83 86 */ 84 - static inline __cpuidle void arch_safe_halt(void) 87 + static __always_inline void arch_safe_halt(void) 85 88 { 86 89 native_safe_halt(); 87 90 } ··· 90 93 * Used when interrupts are already enabled or to 91 94 * shutdown the processor: 92 95 */ 93 - static inline __cpuidle void halt(void) 96 + static __always_inline void halt(void) 94 97 { 95 98 native_halt(); 96 99 }
+1 -1
arch/x86/include/asm/mwait.h
··· 105 105 * New with Core Duo processors, MWAIT can take some hints based on CPU 106 106 * capability. 107 107 */ 108 - static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 108 + static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) 109 109 { 110 110 if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { 111 111 if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
-1
arch/x86/kernel/vmlinux.lds.S
··· 129 129 HEAD_TEXT 130 130 TEXT_TEXT 131 131 SCHED_TEXT 132 - CPUIDLE_TEXT 133 132 LOCK_TEXT 134 133 KPROBES_TEXT 135 134 SOFTIRQENTRY_TEXT
-1
arch/xtensa/kernel/vmlinux.lds.S
··· 125 125 ENTRY_TEXT 126 126 TEXT_TEXT 127 127 SCHED_TEXT 128 - CPUIDLE_TEXT 129 128 LOCK_TEXT 130 129 *(.fixup) 131 130 }
+3 -6
include/asm-generic/vmlinux.lds.h
··· 558 558 ALIGN_FUNCTION(); \ 559 559 __noinstr_text_start = .; \ 560 560 *(.noinstr.text) \ 561 + __cpuidle_text_start = .; \ 562 + *(.cpuidle.text) \ 563 + __cpuidle_text_end = .; \ 561 564 __noinstr_text_end = .; 562 565 563 566 /* ··· 600 597 __lock_text_start = .; \ 601 598 *(.spinlock.text) \ 602 599 __lock_text_end = .; 603 - 604 - #define CPUIDLE_TEXT \ 605 - ALIGN_FUNCTION(); \ 606 - __cpuidle_text_start = .; \ 607 - *(.cpuidle.text) \ 608 - __cpuidle_text_end = .; 609 600 610 601 #define KPROBES_TEXT \ 611 602 ALIGN_FUNCTION(); \
+6 -2
include/linux/compiler_types.h
··· 232 232 #endif 233 233 234 234 /* Section for code which can't be instrumented at all */ 235 - #define noinstr \ 236 - noinline notrace __attribute((__section__(".noinstr.text"))) \ 235 + #define __noinstr_section(section) \ 236 + noinline notrace __attribute((__section__(section))) \ 237 237 __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \ 238 238 __no_sanitize_memory 239 + 240 + #define noinstr __noinstr_section(".noinstr.text") 241 + 242 + #define __cpuidle __noinstr_section(".cpuidle.text") 239 243 240 244 #endif /* __KERNEL__ */ 241 245
-3
include/linux/cpu.h
··· 176 176 177 177 void cpu_idle_poll_ctrl(bool enable); 178 178 179 - /* Attach to any functions which should be considered cpuidle. */ 180 - #define __cpuidle __section(".cpuidle.text") 181 - 182 179 bool cpu_in_idle(unsigned long pc); 183 180 184 181 void arch_cpu_idle(void);
+13
tools/objtool/check.c
··· 376 376 377 377 if (!strcmp(sec->name, ".noinstr.text") || 378 378 !strcmp(sec->name, ".entry.text") || 379 + !strcmp(sec->name, ".cpuidle.text") || 379 380 !strncmp(sec->name, ".text.__x86.", 12)) 380 381 sec->noinstr = true; 381 382 ··· 3367 3366 return true; 3368 3367 3369 3368 /* 3369 + * If the symbol is a static_call trampoline, we can't tell. 3370 + */ 3371 + if (func->static_call_tramp) 3372 + return true; 3373 + 3374 + /* 3370 3375 * The __ubsan_handle_*() calls are like WARN(), they only happen when 3371 3376 * something 'BAD' happened. At the risk of taking the machine down, 3372 3377 * let them proceed to get the message out. ··· 4164 4157 } 4165 4158 4166 4159 sec = find_section_by_name(file->elf, ".entry.text"); 4160 + if (sec) { 4161 + warnings += validate_section(file, sec); 4162 + warnings += validate_unwind_hints(file, sec); 4163 + } 4164 + 4165 + sec = find_section_by_name(file->elf, ".cpuidle.text"); 4167 4166 if (sec) { 4168 4167 warnings += validate_section(file, sec); 4169 4168 warnings += validate_unwind_hints(file, sec);