Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull objtool updates from Ingo Molnar:

- Mark arch_cpu_idle_dead() __noreturn, make all architectures &
drivers that did this inconsistently follow this new, common
convention, and fix all the fallout that objtool can now detect
statically

- Fix/improve the ORC unwinder becoming unreliable due to
UNWIND_HINT_EMPTY ambiguity, split it into UNWIND_HINT_END_OF_STACK
and UNWIND_HINT_UNDEFINED to resolve it

- Fix noinstr violations in the KCSAN code and the lkdtm/stackleak code

- Generate ORC data for __pfx code

- Add more __noreturn annotations to various kernel startup/shutdown
and panic functions

- Misc improvements & fixes

* tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
x86/hyperv: Mark hv_ghcb_terminate() as noreturn
scsi: message: fusion: Mark mpt_halt_firmware() __noreturn
x86/cpu: Mark {hlt,resume}_play_dead() __noreturn
btrfs: Mark btrfs_assertfail() __noreturn
objtool: Include weak functions in global_noreturns check
cpu: Mark nmi_panic_self_stop() __noreturn
cpu: Mark panic_smp_self_stop() __noreturn
arm64/cpu: Mark cpu_park_loop() and friends __noreturn
x86/head: Mark *_start_kernel() __noreturn
init: Mark start_kernel() __noreturn
init: Mark [arch_call_]rest_init() __noreturn
objtool: Generate ORC data for __pfx code
x86/linkage: Fix padding for typed functions
objtool: Separate prefix code from stack validation code
objtool: Remove superfluous dead_end_function() check
objtool: Add symbol iteration helpers
objtool: Add WARN_INSN()
scripts/objdump-func: Support multiple functions
context_tracking: Fix KCSAN noinstr violation
objtool: Add stackleak instrumentation to uaccess safe list
...

+641 -684
+1 -1
Documentation/livepatch/reliable-stacktrace.rst
··· 183 183 .. code-block:: none 184 184 185 185 SYM_CODE_START(return_to_handler) 186 - UNWIND_HINT_EMPTY 186 + UNWIND_HINT_UNDEFINED 187 187 subq $24, %rsp 188 188 189 189 /* Save the return values */
+1 -1
MAINTAINERS
··· 15180 15180 M: Josh Poimboeuf <jpoimboe@kernel.org> 15181 15181 M: Peter Zijlstra <peterz@infradead.org> 15182 15182 S: Supported 15183 + F: include/linux/objtool*.h 15183 15184 F: tools/objtool/ 15184 - F: include/linux/objtool.h 15185 15185 15186 15186 OCELOT ETHERNET SWITCH DRIVER 15187 15187 M: Vladimir Oltean <vladimir.oltean@nxp.com>
+3 -1
arch/alpha/kernel/process.c
··· 9 9 * This file handles the architecture-dependent parts of process handling. 10 10 */ 11 11 12 + #include <linux/cpu.h> 12 13 #include <linux/errno.h> 13 14 #include <linux/module.h> 14 15 #include <linux/sched.h> ··· 60 59 wtint(0); 61 60 } 62 61 63 - void arch_cpu_idle_dead(void) 62 + void __noreturn arch_cpu_idle_dead(void) 64 63 { 65 64 wtint(INT_MAX); 65 + BUG(); 66 66 } 67 67 #endif /* ALPHA_WTINT */ 68 68
+4 -2
arch/arm/kernel/smp.c
··· 320 320 * of the other hotplug-cpu capable cores, so presumably coming 321 321 * out of idle fixes this. 322 322 */ 323 - void arch_cpu_idle_dead(void) 323 + void __noreturn arch_cpu_idle_dead(void) 324 324 { 325 325 unsigned int cpu = smp_processor_id(); 326 326 ··· 382 382 : "r" (task_stack_page(current) + THREAD_SIZE - 8), 383 383 "r" (current) 384 384 : "r0"); 385 + 386 + unreachable(); 385 387 } 386 388 #endif /* CONFIG_HOTPLUG_CPU */ 387 389 ··· 779 777 * kdump fails. So split out the panic_smp_self_stop() and add 780 778 * set_cpu_online(smp_processor_id(), false). 781 779 */ 782 - void panic_smp_self_stop(void) 780 + void __noreturn panic_smp_self_stop(void) 783 781 { 784 782 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", 785 783 smp_processor_id());
+2 -2
arch/arm64/include/asm/exception.h
··· 31 31 return esr; 32 32 } 33 33 34 - asmlinkage void handle_bad_stack(struct pt_regs *regs); 34 + asmlinkage void __noreturn handle_bad_stack(struct pt_regs *regs); 35 35 36 36 asmlinkage void el1t_64_sync_handler(struct pt_regs *regs); 37 37 asmlinkage void el1t_64_irq_handler(struct pt_regs *regs); ··· 80 80 void do_serror(struct pt_regs *regs, unsigned long esr); 81 81 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); 82 82 83 - void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); 83 + void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); 84 84 #endif /* __ASM_EXCEPTION_H */
+4 -5
arch/arm64/include/asm/smp.h
··· 100 100 extern int __cpu_disable(void); 101 101 102 102 extern void __cpu_die(unsigned int cpu); 103 - extern void cpu_die(void); 104 - extern void cpu_die_early(void); 103 + extern void __noreturn cpu_die(void); 104 + extern void __noreturn cpu_die_early(void); 105 105 106 - static inline void cpu_park_loop(void) 106 + static inline void __noreturn cpu_park_loop(void) 107 107 { 108 108 for (;;) { 109 109 wfe(); ··· 123 123 * which calls for a kernel panic. Update the boot status and park the calling 124 124 * CPU. 125 125 */ 126 - static inline void cpu_panic_kernel(void) 126 + static inline void __noreturn cpu_panic_kernel(void) 127 127 { 128 128 update_cpu_boot_status(CPU_PANIC_KERNEL); 129 129 cpu_park_loop(); ··· 143 143 144 144 extern void crash_smp_send_stop(void); 145 145 extern bool smp_crash_stop_failed(void); 146 - extern void panic_smp_self_stop(void); 147 146 148 147 #endif /* ifndef __ASSEMBLY__ */ 149 148
+1 -1
arch/arm64/kernel/entry-common.c
··· 840 840 #endif /* CONFIG_COMPAT */ 841 841 842 842 #ifdef CONFIG_VMAP_STACK 843 - asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs) 843 + asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) 844 844 { 845 845 unsigned long esr = read_sysreg(esr_el1); 846 846 unsigned long far = read_sysreg(far_el1);
+1 -1
arch/arm64/kernel/process.c
··· 69 69 EXPORT_SYMBOL_GPL(pm_power_off); 70 70 71 71 #ifdef CONFIG_HOTPLUG_CPU 72 - void arch_cpu_idle_dead(void) 72 + void __noreturn arch_cpu_idle_dead(void) 73 73 { 74 74 cpu_die(); 75 75 }
+7 -5
arch/arm64/kernel/smp.c
··· 361 361 * Called from the idle thread for the CPU which has been shutdown. 362 362 * 363 363 */ 364 - void cpu_die(void) 364 + void __noreturn cpu_die(void) 365 365 { 366 366 unsigned int cpu = smp_processor_id(); 367 367 const struct cpu_operations *ops = get_cpu_ops(cpu); ··· 398 398 * Kill the calling secondary CPU, early in bringup before it is turned 399 399 * online. 400 400 */ 401 - void cpu_die_early(void) 401 + void __noreturn cpu_die_early(void) 402 402 { 403 403 int cpu = smp_processor_id(); 404 404 ··· 816 816 } 817 817 #endif 818 818 819 - static void local_cpu_stop(void) 819 + static void __noreturn local_cpu_stop(void) 820 820 { 821 821 set_cpu_online(smp_processor_id(), false); 822 822 ··· 830 830 * that cpu_online_mask gets correctly updated and smp_send_stop() can skip 831 831 * CPUs that have already stopped themselves. 832 832 */ 833 - void panic_smp_self_stop(void) 833 + void __noreturn panic_smp_self_stop(void) 834 834 { 835 835 local_cpu_stop(); 836 836 } ··· 839 839 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); 840 840 #endif 841 841 842 - static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) 842 + static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) 843 843 { 844 844 #ifdef CONFIG_KEXEC_CORE 845 845 crash_save_cpu(regs, cpu); ··· 854 854 855 855 /* just in case */ 856 856 cpu_park_loop(); 857 + #else 858 + BUG(); 857 859 #endif 858 860 } 859 861
+1 -2
arch/arm64/kernel/traps.c
··· 863 863 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack) 864 864 __aligned(16); 865 865 866 - void panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) 866 + void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far) 867 867 { 868 868 unsigned long tsk_stk = (unsigned long)current->stack; 869 869 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr); ··· 905 905 nmi_panic(regs, "Asynchronous SError Interrupt"); 906 906 907 907 cpu_park_loop(); 908 - unreachable(); 909 908 } 910 909 911 910 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr)
+3 -1
arch/csky/kernel/smp.c
··· 300 300 pr_notice("CPU%u: shutdown\n", cpu); 301 301 } 302 302 303 - void arch_cpu_idle_dead(void) 303 + void __noreturn arch_cpu_idle_dead(void) 304 304 { 305 305 idle_task_exit(); 306 306 ··· 317 317 "jmpi csky_start_secondary" 318 318 : 319 319 : "r" (secondary_stack)); 320 + 321 + BUG(); 320 322 } 321 323 #endif
+3 -3
arch/ia64/kernel/process.c
··· 201 201 202 202 #ifdef CONFIG_HOTPLUG_CPU 203 203 /* We don't actually take CPU down, just spin without interrupts. */ 204 - static inline void play_dead(void) 204 + static inline void __noreturn play_dead(void) 205 205 { 206 206 unsigned int this_cpu = smp_processor_id(); 207 207 ··· 219 219 BUG(); 220 220 } 221 221 #else 222 - static inline void play_dead(void) 222 + static inline void __noreturn play_dead(void) 223 223 { 224 224 BUG(); 225 225 } 226 226 #endif /* CONFIG_HOTPLUG_CPU */ 227 227 228 - void arch_cpu_idle_dead(void) 228 + void __noreturn arch_cpu_idle_dead(void) 229 229 { 230 230 play_dead(); 231 231 }
+1 -1
arch/loongarch/include/asm/smp.h
··· 99 99 loongson_cpu_die(cpu); 100 100 } 101 101 102 - extern void play_dead(void); 102 + extern void __noreturn play_dead(void); 103 103 #endif 104 104 105 105 #endif /* __ASM_SMP_H */
+1 -1
arch/loongarch/kernel/process.c
··· 62 62 EXPORT_SYMBOL(boot_option_idle_override); 63 63 64 64 #ifdef CONFIG_HOTPLUG_CPU 65 - void arch_cpu_idle_dead(void) 65 + void __noreturn arch_cpu_idle_dead(void) 66 66 { 67 67 play_dead(); 68 68 }
+1 -1
arch/loongarch/kernel/smp.c
··· 336 336 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR); 337 337 338 338 init_fn(); 339 - unreachable(); 339 + BUG(); 340 340 } 341 341 342 342 #endif
+1
arch/mips/cavium-octeon/smp.c
··· 20 20 #include <asm/mmu_context.h> 21 21 #include <asm/time.h> 22 22 #include <asm/setup.h> 23 + #include <asm/smp.h> 23 24 24 25 #include <asm/octeon/octeon.h> 25 26
+1 -1
arch/mips/include/asm/smp.h
··· 88 88 mp_ops->cpu_die(cpu); 89 89 } 90 90 91 - extern void play_dead(void); 91 + extern void __noreturn play_dead(void); 92 92 #endif 93 93 94 94 #ifdef CONFIG_KEXEC
+1 -1
arch/mips/kernel/process.c
··· 40 40 #include <asm/stacktrace.h> 41 41 42 42 #ifdef CONFIG_HOTPLUG_CPU 43 - void arch_cpu_idle_dead(void) 43 + void __noreturn arch_cpu_idle_dead(void) 44 44 { 45 45 play_dead(); 46 46 }
+4
arch/mips/kernel/smp-bmips.c
··· 54 54 55 55 #ifdef CONFIG_SMP 56 56 57 + #include <asm/smp.h> 58 + 57 59 /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ 58 60 unsigned long bmips_smp_boot_sp; 59 61 unsigned long bmips_smp_boot_gp; ··· 415 413 " wait\n" 416 414 " j bmips_secondary_reentry\n" 417 415 : : : "memory"); 416 + 417 + BUG(); 418 418 } 419 419 420 420 #endif /* CONFIG_HOTPLUG_CPU */
+1
arch/mips/kernel/smp-cps.c
··· 20 20 #include <asm/mipsregs.h> 21 21 #include <asm/pm-cps.h> 22 22 #include <asm/r4kcache.h> 23 + #include <asm/smp.h> 23 24 #include <asm/smp-cps.h> 24 25 #include <asm/time.h> 25 26 #include <asm/uasm.h>
+2
arch/mips/loongson64/smp.c
··· 14 14 #include <linux/cpufreq.h> 15 15 #include <linux/kexec.h> 16 16 #include <asm/processor.h> 17 + #include <asm/smp.h> 17 18 #include <asm/time.h> 18 19 #include <asm/tlbflush.h> 19 20 #include <asm/cacheflush.h> ··· 809 808 state_addr = &per_cpu(cpu_state, cpu); 810 809 mb(); 811 810 play_dead_at_ckseg1(state_addr); 811 + BUG(); 812 812 } 813 813 814 814 static int loongson3_disable_clock(unsigned int cpu)
+1 -1
arch/parisc/kernel/process.c
··· 159 159 /* 160 160 * Called from the idle thread for the CPU which has been shutdown. 161 161 */ 162 - void arch_cpu_idle_dead(void) 162 + void __noreturn arch_cpu_idle_dead(void) 163 163 { 164 164 #ifdef CONFIG_HOTPLUG_CPU 165 165 idle_task_exit();
+1 -1
arch/powerpc/include/asm/smp.h
··· 67 67 extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 68 68 extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); 69 69 extern void smp_send_debugger_break(void); 70 - extern void start_secondary_resume(void); 70 + extern void __noreturn start_secondary_resume(void); 71 71 extern void smp_generic_give_timebase(void); 72 72 extern void smp_generic_take_timebase(void); 73 73
+1 -1
arch/powerpc/kernel/setup_64.c
··· 480 480 481 481 #endif /* CONFIG_SMP */ 482 482 483 - void panic_smp_self_stop(void) 483 + void __noreturn panic_smp_self_stop(void) 484 484 { 485 485 hard_irq_disable(); 486 486 spin_begin();
+1 -1
arch/powerpc/kernel/smp.c
··· 1752 1752 smp_ops->cpu_die(cpu); 1753 1753 } 1754 1754 1755 - void arch_cpu_idle_dead(void) 1755 + void __noreturn arch_cpu_idle_dead(void) 1756 1756 { 1757 1757 /* 1758 1758 * Disable on the down path. This will be re-enabled by
+1 -1
arch/riscv/kernel/cpu-hotplug.c
··· 72 72 /* 73 73 * Called from the idle thread for the CPU which has been shutdown. 74 74 */ 75 - void arch_cpu_idle_dead(void) 75 + void __noreturn arch_cpu_idle_dead(void) 76 76 { 77 77 idle_task_exit(); 78 78
+1 -1
arch/s390/kernel/idle.c
··· 88 88 { 89 89 } 90 90 91 - void arch_cpu_idle_dead(void) 91 + void __noreturn arch_cpu_idle_dead(void) 92 92 { 93 93 cpu_die(); 94 94 }
+1 -1
arch/s390/kernel/setup.c
··· 396 396 return 0; 397 397 } 398 398 399 - void __init arch_call_rest_init(void) 399 + void __init __noreturn arch_call_rest_init(void) 400 400 { 401 401 unsigned long stack; 402 402
+3 -2
arch/sh/include/asm/smp-ops.h
··· 24 24 mp_ops->smp_setup(); 25 25 } 26 26 27 - static inline void play_dead(void) 27 + static inline void __noreturn play_dead(void) 28 28 { 29 29 mp_ops->play_dead(); 30 + BUG(); 30 31 } 31 32 32 33 extern void register_smp_ops(struct plat_smp_ops *ops); ··· 43 42 { 44 43 } 45 44 46 - static inline void play_dead(void) 45 + static inline void __noreturn play_dead(void) 47 46 { 48 47 BUG(); 49 48 }
+2 -1
arch/sh/kernel/idle.c
··· 4 4 * 5 5 * Copyright (C) 2002 - 2009 Paul Mundt 6 6 */ 7 + #include <linux/cpu.h> 7 8 #include <linux/module.h> 8 9 #include <linux/init.h> 9 10 #include <linux/mm.h> ··· 30 29 clear_bl_bit(); 31 30 } 32 31 33 - void arch_cpu_idle_dead(void) 32 + void __noreturn arch_cpu_idle_dead(void) 34 33 { 35 34 play_dead(); 36 35 }
+1 -1
arch/sparc/include/asm/smp_64.h
··· 49 49 50 50 void smp_fill_in_cpu_possible_map(void); 51 51 void smp_fill_in_sib_core_maps(void); 52 - void cpu_play_dead(void); 52 + void __noreturn cpu_play_dead(void); 53 53 54 54 void smp_fetch_global_regs(void); 55 55 void smp_fetch_global_pmu(void);
+1 -1
arch/sparc/kernel/process_64.c
··· 95 95 } 96 96 97 97 #ifdef CONFIG_HOTPLUG_CPU 98 - void arch_cpu_idle_dead(void) 98 + void __noreturn arch_cpu_idle_dead(void) 99 99 { 100 100 sched_preempt_enable_no_resched(); 101 101 cpu_play_dead();
+13 -13
arch/x86/entry/entry_64.S
··· 205 205 */ 206 206 movq %rsp, %rdi 207 207 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 208 - UNWIND_HINT_EMPTY 208 + UNWIND_HINT_END_OF_STACK 209 209 210 210 pushq RSP-RDI(%rdi) /* RSP */ 211 211 pushq (%rdi) /* RDI */ ··· 286 286 .pushsection .text, "ax" 287 287 __FUNC_ALIGN 288 288 SYM_CODE_START_NOALIGN(ret_from_fork) 289 - UNWIND_HINT_EMPTY 289 + UNWIND_HINT_END_OF_STACK 290 290 ANNOTATE_NOENDBR // copy_thread 291 291 CALL_DEPTH_ACCOUNT 292 292 movq %rax, %rdi ··· 303 303 304 304 1: 305 305 /* kernel thread */ 306 - UNWIND_HINT_EMPTY 306 + UNWIND_HINT_END_OF_STACK 307 307 movq %r12, %rdi 308 308 CALL_NOSPEC rbx 309 309 /* ··· 388 388 389 389 .if \vector == X86_TRAP_BP 390 390 /* #BP advances %rip to the next instruction */ 391 - UNWIND_HINT_IRET_REGS offset=\has_error_code*8 signal=0 391 + UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0 392 392 .else 393 - UNWIND_HINT_IRET_REGS offset=\has_error_code*8 393 + UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 394 394 .endif 395 395 396 396 ENDBR ··· 461 461 */ 462 462 .macro idtentry_mce_db vector asmsym cfunc 463 463 SYM_CODE_START(\asmsym) 464 - UNWIND_HINT_IRET_REGS 464 + UNWIND_HINT_IRET_ENTRY 465 465 ENDBR 466 466 ASM_CLAC 467 467 cld ··· 518 518 */ 519 519 .macro idtentry_vc vector asmsym cfunc 520 520 SYM_CODE_START(\asmsym) 521 - UNWIND_HINT_IRET_REGS 521 + UNWIND_HINT_IRET_ENTRY 522 522 ENDBR 523 523 ASM_CLAC 524 524 cld ··· 582 582 */ 583 583 .macro idtentry_df vector asmsym cfunc 584 584 SYM_CODE_START(\asmsym) 585 - UNWIND_HINT_IRET_REGS offset=8 585 + UNWIND_HINT_IRET_ENTRY offset=8 586 586 ENDBR 587 587 ASM_CLAC 588 588 cld ··· 643 643 */ 644 644 movq %rsp, %rdi 645 645 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp 646 - UNWIND_HINT_EMPTY 646 + UNWIND_HINT_END_OF_STACK 647 647 648 648 /* Copy the IRET frame to the trampoline stack. */ 649 649 pushq 6*8(%rdi) /* SS */ ··· 869 869 */ 870 870 __FUNC_ALIGN 871 871 SYM_CODE_START_NOALIGN(xen_failsafe_callback) 872 - UNWIND_HINT_EMPTY 872 + UNWIND_HINT_UNDEFINED 873 873 ENDBR 874 874 movl %ds, %ecx 875 875 cmpw %cx, 0x10(%rsp) ··· 1107 1107 FENCE_SWAPGS_KERNEL_ENTRY 1108 1108 CALL_DEPTH_ACCOUNT 1109 1109 leaq 8(%rsp), %rax /* return pt_regs pointer */ 1110 - ANNOTATE_UNRET_END 1110 + VALIDATE_UNRET_END 1111 1111 RET 1112 1112 1113 1113 .Lbstep_iret: ··· 1153 1153 * when PAGE_TABLE_ISOLATION is in use. Do not clobber. 1154 1154 */ 1155 1155 SYM_CODE_START(asm_exc_nmi) 1156 - UNWIND_HINT_IRET_REGS 1156 + UNWIND_HINT_IRET_ENTRY 1157 1157 ENDBR 1158 1158 1159 1159 /* ··· 1520 1520 * MSRs to fully disable 32-bit SYSCALL. 1521 1521 */ 1522 1522 SYM_CODE_START(ignore_sysret) 1523 - UNWIND_HINT_EMPTY 1523 + UNWIND_HINT_END_OF_STACK 1524 1524 ENDBR 1525 1525 mov $-ENOSYS, %eax 1526 1526 sysretl
+1 -1
arch/x86/hyperv/ivm.c
··· 129 129 return ES_OK; 130 130 } 131 131 132 - void hv_ghcb_terminate(unsigned int set, unsigned int reason) 132 + void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason) 133 133 { 134 134 u64 val = GHCB_MSR_TERM_REQ; 135 135
+1 -1
arch/x86/include/asm/linkage.h
··· 99 99 100 100 /* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */ 101 101 #define SYM_TYPED_FUNC_START(name) \ 102 - SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \ 102 + SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \ 103 103 ENDBR 104 104 105 105 /* SYM_FUNC_START -- use for global functions */
+1 -1
arch/x86/include/asm/mshyperv.h
··· 228 228 void hv_ghcb_msr_write(u64 msr, u64 value); 229 229 void hv_ghcb_msr_read(u64 msr, u64 *value); 230 230 bool hv_ghcb_negotiate_protocol(void); 231 - void hv_ghcb_terminate(unsigned int set, unsigned int reason); 231 + void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason); 232 232 void hv_vtom_init(void); 233 233 #else 234 234 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
+7 -7
arch/x86/include/asm/nospec-branch.h
··· 194 194 * builds. 195 195 */ 196 196 .macro ANNOTATE_RETPOLINE_SAFE 197 - .Lannotate_\@: 197 + .Lhere_\@: 198 198 .pushsection .discard.retpoline_safe 199 - _ASM_PTR .Lannotate_\@ 199 + .long .Lhere_\@ - . 200 200 .popsection 201 201 .endm 202 202 ··· 210 210 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should 211 211 * eventually turn into it's own annotation. 212 212 */ 213 - .macro ANNOTATE_UNRET_END 214 - #ifdef CONFIG_DEBUG_ENTRY 213 + .macro VALIDATE_UNRET_END 214 + #if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) 215 215 ANNOTATE_RETPOLINE_SAFE 216 216 nop 217 217 #endif ··· 286 286 .macro UNTRAIN_RET 287 287 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 288 288 defined(CONFIG_CALL_DEPTH_TRACKING) 289 - ANNOTATE_UNRET_END 289 + VALIDATE_UNRET_END 290 290 ALTERNATIVE_3 "", \ 291 291 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 292 292 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ ··· 297 297 .macro UNTRAIN_RET_FROM_CALL 298 298 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 299 299 defined(CONFIG_CALL_DEPTH_TRACKING) 300 - ANNOTATE_UNRET_END 300 + VALIDATE_UNRET_END 301 301 ALTERNATIVE_3 "", \ 302 302 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 303 303 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ ··· 318 318 #define ANNOTATE_RETPOLINE_SAFE \ 319 319 "999:\n\t" \ 320 320 ".pushsection .discard.retpoline_safe\n\t" \ 321 - _ASM_PTR " 999b\n\t" \ 321 + ".long 999b - .\n\t" \ 322 322 ".popsection\n\t" 323 323 324 324 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
+8 -4
arch/x86/include/asm/orc_types.h
··· 39 39 #define ORC_REG_SP_INDIRECT 9 40 40 #define ORC_REG_MAX 15 41 41 42 + #define ORC_TYPE_UNDEFINED 0 43 + #define ORC_TYPE_END_OF_STACK 1 44 + #define ORC_TYPE_CALL 2 45 + #define ORC_TYPE_REGS 3 46 + #define ORC_TYPE_REGS_PARTIAL 4 47 + 42 48 #ifndef __ASSEMBLY__ 43 49 #include <asm/byteorder.h> 44 50 ··· 62 56 #if defined(__LITTLE_ENDIAN_BITFIELD) 63 57 unsigned sp_reg:4; 64 58 unsigned bp_reg:4; 65 - unsigned type:2; 59 + unsigned type:3; 66 60 unsigned signal:1; 67 - unsigned end:1; 68 61 #elif defined(__BIG_ENDIAN_BITFIELD) 69 62 unsigned bp_reg:4; 70 63 unsigned sp_reg:4; 71 64 unsigned unused:4; 72 - unsigned end:1; 73 65 unsigned signal:1; 74 - unsigned type:2; 66 + unsigned type:3; 75 67 #endif 76 68 } __packed; 77 69
-1
arch/x86/include/asm/reboot.h
··· 28 28 void cpu_emergency_disable_virtualization(void); 29 29 30 30 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); 31 - void nmi_panic_self_stop(struct pt_regs *regs); 32 31 void nmi_shootdown_cpus(nmi_shootdown_cb callback); 33 32 void run_crash_ipi_callback(struct pt_regs *regs); 34 33
+3 -3
arch/x86/include/asm/setup.h
··· 125 125 126 126 #ifdef __i386__ 127 127 128 - asmlinkage void __init i386_start_kernel(void); 128 + asmlinkage void __init __noreturn i386_start_kernel(void); 129 129 130 130 #else 131 - asmlinkage void __init x86_64_start_kernel(char *real_mode); 132 - asmlinkage void __init x86_64_start_reservations(char *real_mode_data); 131 + asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode); 132 + asmlinkage void __init __noreturn x86_64_start_reservations(char *real_mode_data); 133 133 134 134 #endif /* __i386__ */ 135 135 #endif /* _SETUP */
+3 -2
arch/x86/include/asm/smp.h
··· 93 93 smp_ops.cpu_die(cpu); 94 94 } 95 95 96 - static inline void play_dead(void) 96 + static inline void __noreturn play_dead(void) 97 97 { 98 98 smp_ops.play_dead(); 99 + BUG(); 99 100 } 100 101 101 102 static inline void smp_send_reschedule(int cpu) ··· 125 124 int native_cpu_disable(void); 126 125 int common_cpu_die(unsigned int cpu); 127 126 void native_cpu_die(unsigned int cpu); 128 - void hlt_play_dead(void); 127 + void __noreturn hlt_play_dead(void); 129 128 void native_play_dead(void); 130 129 void play_dead_common(void); 131 130 void wbinvd_on_cpu(int cpu);
+14 -4
arch/x86/include/asm/unwind_hints.h
··· 7 7 8 8 #ifdef __ASSEMBLY__ 9 9 10 - .macro UNWIND_HINT_EMPTY 11 - UNWIND_HINT type=UNWIND_HINT_TYPE_CALL end=1 10 + .macro UNWIND_HINT_END_OF_STACK 11 + UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK 12 + .endm 13 + 14 + .macro UNWIND_HINT_UNDEFINED 15 + UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED 12 16 .endm 13 17 14 18 .macro UNWIND_HINT_ENTRY 15 - UNWIND_HINT type=UNWIND_HINT_TYPE_ENTRY end=1 19 + VALIDATE_UNRET_BEGIN 20 + UNWIND_HINT_END_OF_STACK 16 21 .endm 17 22 18 23 .macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 signal=1 ··· 57 52 UNWIND_HINT_REGS base=\base offset=\offset partial=1 signal=\signal 58 53 .endm 59 54 55 + .macro UNWIND_HINT_IRET_ENTRY base=%rsp offset=0 signal=1 56 + VALIDATE_UNRET_BEGIN 57 + UNWIND_HINT_IRET_REGS base=\base offset=\offset signal=\signal 58 + .endm 59 + 60 60 .macro UNWIND_HINT_FUNC 61 61 UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC 62 62 .endm ··· 77 67 #else 78 68 79 69 #define UNWIND_HINT_FUNC \ 80 - UNWIND_HINT(ORC_REG_SP, 8, UNWIND_HINT_TYPE_FUNC, 0, 0) 70 + UNWIND_HINT(UNWIND_HINT_TYPE_FUNC, ORC_REG_SP, 8, 0) 81 71 82 72 #endif /* __ASSEMBLY__ */ 83 73
+1 -1
arch/x86/kernel/ftrace_64.S
··· 346 346 347 347 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 348 348 SYM_CODE_START(return_to_handler) 349 - UNWIND_HINT_EMPTY 349 + UNWIND_HINT_UNDEFINED 350 350 ANNOTATE_NOENDBR 351 351 subq $16, %rsp 352 352
+1 -1
arch/x86/kernel/head32.c
··· 29 29 x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; 30 30 } 31 31 32 - asmlinkage __visible void __init i386_start_kernel(void) 32 + asmlinkage __visible void __init __noreturn i386_start_kernel(void) 33 33 { 34 34 /* Make sure IDT is set up before any exception happens */ 35 35 idt_setup_early_handler();
+2 -2
arch/x86/kernel/head64.c
··· 471 471 sme_unmap_bootdata(real_mode_data); 472 472 } 473 473 474 - asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) 474 + asmlinkage __visible void __init __noreturn x86_64_start_kernel(char * real_mode_data) 475 475 { 476 476 /* 477 477 * Build-time sanity checks on the kernel image and module ··· 537 537 x86_64_start_reservations(real_mode_data); 538 538 } 539 539 540 - void __init x86_64_start_reservations(char *real_mode_data) 540 + void __init __noreturn x86_64_start_reservations(char *real_mode_data) 541 541 { 542 542 /* version is always not zero if it is copied */ 543 543 if (!boot_params.hdr.version)
+6 -11
arch/x86/kernel/head_64.S
··· 42 42 __HEAD 43 43 .code64 44 44 SYM_CODE_START_NOALIGN(startup_64) 45 - UNWIND_HINT_EMPTY 45 + UNWIND_HINT_END_OF_STACK 46 46 /* 47 47 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, 48 48 * and someone has loaded an identity mapped page table ··· 97 97 lretq 98 98 99 99 .Lon_kernel_cs: 100 - UNWIND_HINT_EMPTY 100 + UNWIND_HINT_END_OF_STACK 101 101 102 102 /* Sanitize CPU configuration */ 103 103 call verify_cpu ··· 119 119 SYM_CODE_END(startup_64) 120 120 121 121 SYM_CODE_START(secondary_startup_64) 122 - UNWIND_HINT_EMPTY 122 + UNWIND_HINT_END_OF_STACK 123 123 ANNOTATE_NOENDBR 124 124 /* 125 125 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, ··· 148 148 * verify_cpu() above to make sure NX is enabled. 149 149 */ 150 150 SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) 151 - UNWIND_HINT_EMPTY 151 + UNWIND_HINT_END_OF_STACK 152 152 ANNOTATE_NOENDBR 153 153 154 154 /* ··· 230 230 ANNOTATE_RETPOLINE_SAFE 231 231 jmp *%rax 232 232 1: 233 - UNWIND_HINT_EMPTY 233 + UNWIND_HINT_END_OF_STACK 234 234 ANNOTATE_NOENDBR // above 235 235 236 236 #ifdef CONFIG_SMP ··· 383 383 */ 384 384 SYM_CODE_START(start_cpu0) 385 385 ANNOTATE_NOENDBR 386 - UNWIND_HINT_EMPTY 386 + UNWIND_HINT_END_OF_STACK 387 387 388 388 /* Find the idle task stack */ 389 389 movq PER_CPU_VAR(pcpu_hot) + X86_current_task, %rcx ··· 405 405 SYM_CODE_START_NOALIGN(vc_boot_ghcb) 406 406 UNWIND_HINT_IRET_REGS offset=8 407 407 ENDBR 408 - 409 - ANNOTATE_UNRET_END 410 408 411 409 /* Build pt_regs */ 412 410 PUSH_AND_CLEAR_REGS ··· 458 460 459 461 SYM_CODE_START_LOCAL(early_idt_handler_common) 460 462 UNWIND_HINT_IRET_REGS offset=16 461 - ANNOTATE_UNRET_END 462 463 /* 463 464 * The stack is the hardware frame, an error code or zero, and the 464 465 * vector number. ··· 506 509 SYM_CODE_START_NOALIGN(vc_no_ghcb) 507 510 UNWIND_HINT_IRET_REGS offset=8 508 511 ENDBR 509 - 510 - ANNOTATE_UNRET_END 511 512 512 513 /* Build pt_regs */ 513 514 PUSH_AND_CLEAR_REGS
+3 -2
arch/x86/kernel/process.c
··· 5 5 #include <linux/kernel.h> 6 6 #include <linux/mm.h> 7 7 #include <linux/smp.h> 8 + #include <linux/cpu.h> 8 9 #include <linux/prctl.h> 9 10 #include <linux/slab.h> 10 11 #include <linux/sched.h> ··· 722 721 } 723 722 724 723 #ifndef CONFIG_SMP 725 - static inline void play_dead(void) 724 + static inline void __noreturn play_dead(void) 726 725 { 727 726 BUG(); 728 727 } ··· 734 733 local_touch_nmi(); 735 734 } 736 735 737 - void arch_cpu_idle_dead(void) 736 + void __noreturn arch_cpu_idle_dead(void) 738 737 { 739 738 play_dead(); 740 739 }
+1 -1
arch/x86/kernel/reboot.c
··· 920 920 } 921 921 922 922 /* Override the weak function in kernel/panic.c */ 923 - void nmi_panic_self_stop(struct pt_regs *regs) 923 + void __noreturn nmi_panic_self_stop(struct pt_regs *regs) 924 924 { 925 925 while (1) { 926 926 /* If no CPU is preparing crash dump, we simply loop here. */
+5 -5
arch/x86/kernel/relocate_kernel_64.S
··· 43 43 .code64 44 44 SYM_CODE_START_NOALIGN(relocate_range) 45 45 SYM_CODE_START_NOALIGN(relocate_kernel) 46 - UNWIND_HINT_EMPTY 46 + UNWIND_HINT_END_OF_STACK 47 47 ANNOTATE_NOENDBR 48 48 /* 49 49 * %rdi indirection_page ··· 113 113 SYM_CODE_END(relocate_kernel) 114 114 115 115 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) 116 - UNWIND_HINT_EMPTY 116 + UNWIND_HINT_END_OF_STACK 117 117 /* set return address to 0 if not preserving context */ 118 118 pushq $0 119 119 /* store the start address on the stack */ ··· 231 231 SYM_CODE_END(identity_mapped) 232 232 233 233 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) 234 - UNWIND_HINT_EMPTY 234 + UNWIND_HINT_END_OF_STACK 235 235 ANNOTATE_NOENDBR // RET target, above 236 236 movq RSP(%r8), %rsp 237 237 movq CR4(%r8), %rax ··· 256 256 257 257 /* Do the copies */ 258 258 SYM_CODE_START_LOCAL_NOALIGN(swap_pages) 259 - UNWIND_HINT_EMPTY 260 - movq %rdi, %rcx /* Put the page_list in %rcx */ 259 + UNWIND_HINT_END_OF_STACK 260 + movq %rdi, %rcx /* Put the page_list in %rcx */ 261 261 xorl %edi, %edi 262 262 xorl %esi, %esi 263 263 jmp 1f
+1 -1
arch/x86/kernel/smpboot.c
··· 1824 1824 } 1825 1825 } 1826 1826 1827 - void hlt_play_dead(void) 1827 + void __noreturn hlt_play_dead(void) 1828 1828 { 1829 1829 if (__this_cpu_read(cpu_info.x86) >= 4) 1830 1830 wbinvd();
+12 -15
arch/x86/kernel/unwind_orc.c
··· 133 133 .sp_offset = sizeof(long), 134 134 .sp_reg = ORC_REG_SP, 135 135 .bp_reg = ORC_REG_UNDEFINED, 136 - .type = UNWIND_HINT_TYPE_CALL 136 + .type = ORC_TYPE_CALL 137 137 }; 138 138 139 139 #ifdef CONFIG_CALL_THUNKS ··· 153 153 154 154 /* Fake frame pointer entry -- used as a fallback for generated code */ 155 155 static struct orc_entry orc_fp_entry = { 156 - .type = UNWIND_HINT_TYPE_CALL, 156 + .type = ORC_TYPE_CALL, 157 157 .sp_reg = ORC_REG_BP, 158 158 .sp_offset = 16, 159 159 .bp_reg = ORC_REG_PREV_SP, 160 160 .bp_offset = -16, 161 - .end = 0, 162 161 }; 163 162 164 163 static struct orc_entry *orc_find(unsigned long ip) ··· 249 250 return -1; 250 251 251 252 /* 252 - * The "weak" section terminator entries need to always be on the left 253 + * The "weak" section terminator entries need to always be first 253 254 * to ensure the lookup code skips them in favor of real entries. 254 255 * These terminator entries exist to handle any gaps created by 255 256 * whitelisted .o files which didn't get objtool generation. 256 257 */ 257 258 orc_a = cur_orc_table + (a - cur_orc_ip_table); 258 - return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; 259 + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; 259 260 } 260 261 261 262 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, ··· 473 474 */ 474 475 orc = &orc_fp_entry; 475 476 state->error = true; 476 - } 477 - 478 - /* End-of-stack check for kernel threads: */ 479 - if (orc->sp_reg == ORC_REG_UNDEFINED) { 480 - if (!orc->end) 477 + } else { 478 + if (orc->type == ORC_TYPE_UNDEFINED) 481 479 goto err; 482 480 483 - goto the_end; 481 + if (orc->type == ORC_TYPE_END_OF_STACK) 482 + goto the_end; 484 483 } 485 484 486 485 state->signal = orc->signal; ··· 551 554 552 555 /* Find IP, SP and possibly regs: */ 553 556 switch (orc->type) { 554 - case UNWIND_HINT_TYPE_CALL: 557 + case ORC_TYPE_CALL: 555 558 ip_p = sp - sizeof(long); 556 559 557 560 if (!deref_stack_reg(state, ip_p, &state->ip)) ··· 564 567 state->prev_regs = NULL; 565 568 break; 566 569 567 - case UNWIND_HINT_TYPE_REGS: 570 + case ORC_TYPE_REGS: 568 571 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { 569 572 orc_warn_current("can't access registers at %pB\n", 570 573 (void *)orig_ip); ··· 587 590 state->full_regs = true; 588 591 break; 589 592 590 - case UNWIND_HINT_TYPE_REGS_PARTIAL: 593 + case ORC_TYPE_REGS_PARTIAL: 591 594 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { 592 595 orc_warn_current("can't access iret registers at %pB\n", 593 596 (void *)orig_ip); 594 597 goto err; 595 598 } 596 - /* See UNWIND_HINT_TYPE_REGS case comment. */ 599 + /* See ORC_TYPE_REGS case comment. */ 597 600 state->ip = unwind_recover_rethook(state, state->ip, 598 601 (unsigned long *)(state->sp - sizeof(long))); 599 602
+3 -3
arch/x86/lib/retpoline.S
··· 33 33 34 34 .align RETPOLINE_THUNK_SIZE 35 35 SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) 36 - UNWIND_HINT_EMPTY 36 + UNWIND_HINT_UNDEFINED 37 37 ANNOTATE_NOENDBR 38 38 39 39 ALTERNATIVE_2 __stringify(RETPOLINE \reg), \ ··· 75 75 .align RETPOLINE_THUNK_SIZE 76 76 77 77 SYM_INNER_LABEL(__x86_indirect_call_thunk_\reg, SYM_L_GLOBAL) 78 - UNWIND_HINT_EMPTY 78 + UNWIND_HINT_UNDEFINED 79 79 ANNOTATE_NOENDBR 80 80 81 81 CALL_DEPTH_ACCOUNT ··· 103 103 .align RETPOLINE_THUNK_SIZE 104 104 105 105 SYM_INNER_LABEL(__x86_indirect_jump_thunk_\reg, SYM_L_GLOBAL) 106 - UNWIND_HINT_EMPTY 106 + UNWIND_HINT_UNDEFINED 107 107 ANNOTATE_NOENDBR 108 108 POLINE \reg 109 109 ANNOTATE_UNRET_SAFE
+1 -1
arch/x86/platform/pvh/head.S
··· 50 50 #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) 51 51 52 52 SYM_CODE_START_LOCAL(pvh_start_xen) 53 - UNWIND_HINT_EMPTY 53 + UNWIND_HINT_END_OF_STACK 54 54 cld 55 55 56 56 lgdt (_pa(gdt))
+1 -1
arch/x86/power/cpu.c
··· 288 288 #endif 289 289 290 290 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU) 291 - static void resume_play_dead(void) 291 + static void __noreturn resume_play_dead(void) 292 292 { 293 293 play_dead_common(); 294 294 tboot_shutdown(TB_SHUTDOWN_WFS);
+2 -2
arch/x86/xen/xen-asm.S
··· 165 165 SYM_CODE_START(xen_early_idt_handler_array) 166 166 i = 0 167 167 .rept NUM_EXCEPTION_VECTORS 168 - UNWIND_HINT_EMPTY 168 + UNWIND_HINT_UNDEFINED 169 169 ENDBR 170 170 pop %rcx 171 171 pop %r11 ··· 193 193 * rsp->rax } 194 194 */ 195 195 SYM_CODE_START(xen_iret) 196 - UNWIND_HINT_EMPTY 196 + UNWIND_HINT_UNDEFINED 197 197 ANNOTATE_NOENDBR 198 198 pushq $0 199 199 jmp hypercall_iret
+2 -2
arch/x86/xen/xen-head.S
··· 45 45 #ifdef CONFIG_XEN_PV 46 46 __INIT 47 47 SYM_CODE_START(startup_xen) 48 - UNWIND_HINT_EMPTY 48 + UNWIND_HINT_END_OF_STACK 49 49 ANNOTATE_NOENDBR 50 50 cld 51 51 ··· 71 71 #ifdef CONFIG_XEN_PV_SMP 72 72 .pushsection .text 73 73 SYM_CODE_START(asm_cpu_bringup_and_idle) 74 - UNWIND_HINT_EMPTY 74 + UNWIND_HINT_END_OF_STACK 75 75 ENDBR 76 76 77 77 call cpu_bringup_and_idle
+1 -1
arch/xtensa/include/asm/smp.h
··· 33 33 34 34 void __cpu_die(unsigned int cpu); 35 35 int __cpu_disable(void); 36 - void cpu_die(void); 36 + void __noreturn cpu_die(void); 37 37 void cpu_restart(void); 38 38 39 39 #endif /* CONFIG_HOTPLUG_CPU */
+3 -1
arch/xtensa/kernel/smp.c
··· 322 322 pr_err("CPU%u: unable to kill\n", cpu); 323 323 } 324 324 325 - void arch_cpu_idle_dead(void) 325 + void __noreturn arch_cpu_idle_dead(void) 326 326 { 327 327 cpu_die(); 328 328 } ··· 341 341 __asm__ __volatile__( 342 342 " movi a2, cpu_restart\n" 343 343 " jx a2\n"); 344 + 345 + BUG(); 344 346 } 345 347 346 348 #endif /* CONFIG_HOTPLUG_CPU */
+1 -1
drivers/message/fusion/mptbase.c
··· 6935 6935 * @ioc: Pointer to MPT_ADAPTER structure 6936 6936 * 6937 6937 **/ 6938 - void 6938 + void __noreturn 6939 6939 mpt_halt_firmware(MPT_ADAPTER *ioc) 6940 6940 { 6941 6941 u32 ioc_raw_state;
+1 -1
drivers/message/fusion/mptbase.h
··· 944 944 u8 phys_disk_num); 945 945 extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); 946 946 extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); 947 - extern void mpt_halt_firmware(MPT_ADAPTER *ioc); 947 + extern void __noreturn mpt_halt_firmware(MPT_ADAPTER *ioc); 948 948 949 949 950 950 /*
+6
drivers/misc/lkdtm/stackleak.c
··· 43 43 * STACK_END_MAGIC, and in either casee something is seriously wrong. 44 44 */ 45 45 if (current_sp < task_stack_low || current_sp >= task_stack_high) { 46 + instrumentation_begin(); 46 47 pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", 47 48 current_sp, task_stack_low, task_stack_high - 1); 48 49 test_failed = true; 49 50 goto out; 50 51 } 51 52 if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) { 53 + instrumentation_begin(); 52 54 pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", 53 55 lowest_sp, task_stack_low, task_stack_high - 1); 54 56 test_failed = true; ··· 88 86 if (*(unsigned long *)poison_low == STACKLEAK_POISON) 89 87 continue; 90 88 89 + instrumentation_begin(); 91 90 pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n", 92 91 poison_high - poison_low, *(unsigned long *)poison_low); 93 92 test_failed = true; 93 + goto out; 94 94 } 95 95 96 + instrumentation_begin(); 96 97 pr_info("stackleak stack usage:\n" 97 98 " high offset: %lu bytes\n" 98 99 " current: %lu bytes\n" ··· 118 113 } else { 119 114 pr_info("OK: the rest of the thread stack is properly erased\n"); 120 115 } 116 + instrumentation_end(); 121 117 } 122 118 123 119 static void lkdtm_STACKLEAK_ERASING(void)
+1 -1
include/linux/context_tracking.h
··· 98 98 static inline int ct_state(void) { return -1; } 99 99 static inline int __ct_state(void) { return -1; } 100 100 static __always_inline bool context_tracking_guest_enter(void) { return false; } 101 - static inline void context_tracking_guest_exit(void) { } 101 + static __always_inline void context_tracking_guest_exit(void) { } 102 102 #define CT_WARN_ON(cond) do { } while (0) 103 103 #endif /* !CONFIG_CONTEXT_TRACKING_USER */ 104 104
+1 -1
include/linux/cpu.h
··· 182 182 void arch_cpu_idle_prepare(void); 183 183 void arch_cpu_idle_enter(void); 184 184 void arch_cpu_idle_exit(void); 185 - void arch_cpu_idle_dead(void); 185 + void __noreturn arch_cpu_idle_dead(void); 186 186 187 187 int cpu_report_state(int cpu); 188 188 int cpu_check_up_prepare(int cpu);
+27 -54
include/linux/objtool.h
··· 2 2 #ifndef _LINUX_OBJTOOL_H 3 3 #define _LINUX_OBJTOOL_H 4 4 5 - #ifndef __ASSEMBLY__ 6 - 7 - #include <linux/types.h> 8 - 9 - /* 10 - * This struct is used by asm and inline asm code to manually annotate the 11 - * location of registers on the stack. 12 - */ 13 - struct unwind_hint { 14 - u32 ip; 15 - s16 sp_offset; 16 - u8 sp_reg; 17 - u8 type; 18 - u8 signal; 19 - u8 end; 20 - }; 21 - #endif 22 - 23 - /* 24 - * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP 25 - * (the caller's SP right before it made the call). Used for all callable 26 - * functions, i.e. all C code and all callable asm functions. 27 - * 28 - * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset 29 - * points to a fully populated pt_regs from a syscall, interrupt, or exception. 30 - * 31 - * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that 32 - * sp_reg+sp_offset points to the iret return frame. 33 - * 34 - * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. 35 - * Useful for code which doesn't have an ELF function annotation. 36 - * 37 - * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc. 38 - */ 39 - #define UNWIND_HINT_TYPE_CALL 0 40 - #define UNWIND_HINT_TYPE_REGS 1 41 - #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 42 - #define UNWIND_HINT_TYPE_FUNC 3 43 - #define UNWIND_HINT_TYPE_ENTRY 4 44 - #define UNWIND_HINT_TYPE_SAVE 5 45 - #define UNWIND_HINT_TYPE_RESTORE 6 5 + #include <linux/objtool_types.h> 46 6 47 7 #ifdef CONFIG_OBJTOOL 48 8 ··· 10 50 11 51 #ifndef __ASSEMBLY__ 12 52 13 - #define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ 53 + #define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ 14 54 "987: \n\t" \ 15 55 ".pushsection .discard.unwind_hints\n\t" \ 16 56 /* struct unwind_hint */ \ ··· 19 59 ".byte " __stringify(sp_reg) "\n\t" \ 20 60 ".byte " __stringify(type) "\n\t" \ 21 61 ".byte " __stringify(signal) "\n\t" \ 22 - ".byte " __stringify(end) "\n\t" \ 23 62 ".balign 4 \n\t" \ 24 63 ".popsection\n\t" 25 64 ··· 48 89 #define ANNOTATE_NOENDBR \ 49 90 "986: \n\t" \ 50 91 ".pushsection .discard.noendbr\n\t" \ 51 - _ASM_PTR " 986b\n\t" \ 92 + ".long 986b - .\n\t" \ 52 93 ".popsection\n\t" 53 94 54 95 #define ASM_REACHABLE \ ··· 66 107 #define ANNOTATE_INTRA_FUNCTION_CALL \ 67 108 999: \ 68 109 .pushsection .discard.intra_function_calls; \ 69 - .long 999b; \ 110 + .long 999b - .; \ 70 111 .popsection; 71 112 72 113 /* ··· 90 131 * the debuginfo as necessary. It will also warn if it sees any 91 132 * inconsistencies. 92 133 */ 93 - .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 94 - .Lunwind_hint_ip_\@: 134 + .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 135 + .Lhere_\@: 95 136 .pushsection .discard.unwind_hints 96 137 /* struct unwind_hint */ 97 - .long .Lunwind_hint_ip_\@ - . 138 + .long .Lhere_\@ - . 98 139 .short \sp_offset 99 140 .byte \sp_reg 100 141 .byte \type 101 142 .byte \signal 102 - .byte \end 103 143 .balign 4 104 144 .popsection 105 145 .endm 106 146 107 147 .macro STACK_FRAME_NON_STANDARD func:req 108 148 .pushsection .discard.func_stack_frame_non_standard, "aw" 109 - _ASM_PTR \func 149 + .long \func - . 110 150 .popsection 111 151 .endm 112 152 ··· 118 160 .macro ANNOTATE_NOENDBR 119 161 .Lhere_\@: 120 162 .pushsection .discard.noendbr 121 - .quad .Lhere_\@ 163 + .long .Lhere_\@ - . 122 164 .popsection 165 + .endm 166 + 167 + /* 168 + * Use objtool to validate the entry requirement that all code paths do 169 + * VALIDATE_UNRET_END before RET. 170 + * 171 + * NOTE: The macro must be used at the beginning of a global symbol, otherwise 172 + * it will be ignored. 173 + */ 174 + .macro VALIDATE_UNRET_BEGIN 175 + #if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY) 176 + .Lhere_\@: 177 + .pushsection .discard.validate_unret 178 + .long .Lhere_\@ - . 179 + .popsection 180 + #endif 123 181 .endm 124 182 125 183 .macro REACHABLE ··· 151 177 152 178 #ifndef __ASSEMBLY__ 153 179 154 - #define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ 155 - "\n\t" 180 + #define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" 156 181 #define STACK_FRAME_NON_STANDARD(func) 157 182 #define STACK_FRAME_NON_STANDARD_FP(func) 158 183 #define ANNOTATE_NOENDBR 159 184 #define ASM_REACHABLE 160 185 #else 161 186 #define ANNOTATE_INTRA_FUNCTION_CALL 162 - .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 187 + .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 163 188 .endm 164 189 .macro STACK_FRAME_NON_STANDARD func:req 165 190 .endm
+57
include/linux/objtool_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_OBJTOOL_TYPES_H 3 + #define _LINUX_OBJTOOL_TYPES_H 4 + 5 + #ifndef __ASSEMBLY__ 6 + 7 + #include <linux/types.h> 8 + 9 + /* 10 + * This struct is used by asm and inline asm code to manually annotate the 11 + * location of registers on the stack. 12 + */ 13 + struct unwind_hint { 14 + u32 ip; 15 + s16 sp_offset; 16 + u8 sp_reg; 17 + u8 type; 18 + u8 signal; 19 + }; 20 + 21 + #endif /* __ASSEMBLY__ */ 22 + 23 + /* 24 + * UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in 25 + * a truncated and unreliable stack unwind. 26 + * 27 + * UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before 28 + * hitting user entry, boot code, or fork entry (when there are no pt_regs 29 + * available). 30 + * 31 + * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP 32 + * (the caller's SP right before it made the call). Used for all callable 33 + * functions, i.e. all C code and all callable asm functions. 34 + * 35 + * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset 36 + * points to a fully populated pt_regs from a syscall, interrupt, or exception. 37 + * 38 + * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that 39 + * sp_reg+sp_offset points to the iret return frame. 40 + * 41 + * UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function. 42 + * Useful for code which doesn't have an ELF function annotation. 43 + * 44 + * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain 45 + * location so that it can be restored later. 46 + */ 47 + #define UNWIND_HINT_TYPE_UNDEFINED 0 48 + #define UNWIND_HINT_TYPE_END_OF_STACK 1 49 + #define UNWIND_HINT_TYPE_CALL 2 50 + #define UNWIND_HINT_TYPE_REGS 3 51 + #define UNWIND_HINT_TYPE_REGS_PARTIAL 4 52 + /* The below hint types don't have corresponding ORC types */ 53 + #define UNWIND_HINT_TYPE_FUNC 5 54 + #define UNWIND_HINT_TYPE_SAVE 6 55 + #define UNWIND_HINT_TYPE_RESTORE 7 56 + 57 + #endif /* _LINUX_OBJTOOL_TYPES_H */
+1 -1
include/linux/sched/task_stack.h
··· 23 23 24 24 #define setup_thread_stack(new,old) do { } while(0) 25 25 26 - static inline unsigned long *end_of_stack(const struct task_struct *task) 26 + static __always_inline unsigned long *end_of_stack(const struct task_struct *task) 27 27 { 28 28 #ifdef CONFIG_STACK_GROWSUP 29 29 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+2 -2
include/linux/smp.h
··· 59 59 * Cpus stopping functions in panic. All have default weak definitions. 60 60 * Architecture-dependent code may override them. 61 61 */ 62 - void panic_smp_self_stop(void); 63 - void nmi_panic_self_stop(struct pt_regs *regs); 62 + void __noreturn panic_smp_self_stop(void); 63 + void __noreturn nmi_panic_self_stop(struct pt_regs *regs); 64 64 void crash_smp_send_stop(void); 65 65 66 66 /*
+3 -3
include/linux/start_kernel.h
··· 8 8 /* Define the prototype for start_kernel here, rather than cluttering 9 9 up something else. */ 10 10 11 - extern asmlinkage void __init start_kernel(void); 12 - extern void __init arch_call_rest_init(void); 13 - extern void __ref rest_init(void); 11 + extern asmlinkage void __init __noreturn start_kernel(void); 12 + extern void __init __noreturn arch_call_rest_init(void); 13 + extern void __ref __noreturn rest_init(void); 14 14 15 15 #endif /* _LINUX_START_KERNEL_H */
+3 -3
init/main.c
··· 686 686 687 687 static __initdata DECLARE_COMPLETION(kthreadd_done); 688 688 689 - noinline void __ref rest_init(void) 689 + noinline void __ref __noreturn rest_init(void) 690 690 { 691 691 struct task_struct *tsk; 692 692 int pid; ··· 829 829 early_param("randomize_kstack_offset", early_randomize_kstack_offset); 830 830 #endif 831 831 832 - void __init __weak arch_call_rest_init(void) 832 + void __init __weak __noreturn arch_call_rest_init(void) 833 833 { 834 834 rest_init(); 835 835 } ··· 877 877 memblock_free(unknown_options, len); 878 878 } 879 879 880 - asmlinkage __visible void __init __no_sanitize_address start_kernel(void) 880 + asmlinkage __visible void __init __no_sanitize_address __noreturn start_kernel(void) 881 881 { 882 882 char *command_line; 883 883 char *after_dashes;
+2 -2
kernel/panic.c
··· 141 141 /* 142 142 * Stop ourself in panic -- architecture code may override this 143 143 */ 144 - void __weak panic_smp_self_stop(void) 144 + void __weak __noreturn panic_smp_self_stop(void) 145 145 { 146 146 while (1) 147 147 cpu_relax(); ··· 151 151 * Stop ourselves in NMI context if another CPU has already panicked. Arch code 152 152 * may override this to prepare for crash dumping, e.g. save regs info. 153 153 */ 154 - void __weak nmi_panic_self_stop(struct pt_regs *regs) 154 + void __weak __noreturn nmi_panic_self_stop(struct pt_regs *regs) 155 155 { 156 156 panic_smp_self_stop(); 157 157 }
+1 -1
kernel/sched/idle.c
··· 75 75 void __weak arch_cpu_idle_prepare(void) { } 76 76 void __weak arch_cpu_idle_enter(void) { } 77 77 void __weak arch_cpu_idle_exit(void) { } 78 - void __weak arch_cpu_idle_dead(void) { } 78 + void __weak __noreturn arch_cpu_idle_dead(void) { while (1); } 79 79 void __weak arch_cpu_idle(void) 80 80 { 81 81 cpu_idle_force_poll = 1;
+24 -8
scripts/objdump-func
··· 3 3 # 4 4 # Disassemble a single function. 5 5 # 6 - # usage: objdump-func <file> <func> 6 + # usage: objdump-func <file> <func> [<func> ...] 7 7 8 8 set -o errexit 9 9 set -o nounset ··· 13 13 command -v gawk >/dev/null 2>&1 || die "gawk isn't installed" 14 14 15 15 usage() { 16 - echo "usage: objdump-func <file> <func>" >&2 16 + echo "usage: objdump-func <file> <func> [<func> ...]" >&2 17 17 exit 1 18 18 } 19 19 20 20 [[ $# -lt 2 ]] && usage 21 21 22 22 OBJ=$1; shift 23 - FUNC=$1; shift 23 + FUNCS=("$@") 24 24 25 - # Secret feature to allow adding extra objdump args at the end 26 - EXTRA_ARGS=$@ 27 - 28 - # Note this also matches compiler-added suffixes like ".cold", etc 29 - ${OBJDUMP} -wdr $EXTRA_ARGS $OBJ | gawk -M -v f=$FUNC '/^$/ { P=0; } $0 ~ "<" f "(\\..*)?>:" { P=1; O=strtonum("0x" $1); } { if (P) { o=strtonum("0x" $1); printf("%04x ", o-O); print $0; } }' 25 + ${OBJDUMP} -wdr $OBJ | gawk -M -v _funcs="${FUNCS[*]}" ' 26 + BEGIN { split(_funcs, funcs); } 27 + /^$/ { func_match=0; } 28 + /<.*>:/ { 29 + f = gensub(/.*<(.*)>:/, "\\1", 1); 30 + for (i in funcs) { 31 + # match compiler-added suffixes like ".cold", etc 32 + if (f ~ "^" funcs[i] "(\\..*)?") { 33 + func_match = 1; 34 + base = strtonum("0x" $1); 35 + break; 36 + } 37 + } 38 + } 39 + { 40 + if (func_match) { 41 + addr = strtonum("0x" $1); 42 + printf("%04x ", addr - base); 43 + print; 44 + } 45 + }'
+1 -1
scripts/sorttable.h
··· 128 128 * whitelisted .o files which didn't get objtool generation. 129 129 */ 130 130 orc_a = g_orc_table + (a - g_orc_ip_table); 131 - return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; 131 + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; 132 132 } 133 133 134 134 static void *sort_orctable(void *arg)
+8 -4
tools/arch/x86/include/asm/orc_types.h
··· 39 39 #define ORC_REG_SP_INDIRECT 9 40 40 #define ORC_REG_MAX 15 41 41 42 + #define ORC_TYPE_UNDEFINED 0 43 + #define ORC_TYPE_END_OF_STACK 1 44 + #define ORC_TYPE_CALL 2 45 + #define ORC_TYPE_REGS 3 46 + #define ORC_TYPE_REGS_PARTIAL 4 47 + 42 48 #ifndef __ASSEMBLY__ 43 49 #include <asm/byteorder.h> 44 50 ··· 62 56 #if defined(__LITTLE_ENDIAN_BITFIELD) 63 57 unsigned sp_reg:4; 64 58 unsigned bp_reg:4; 65 - unsigned type:2; 59 + unsigned type:3; 66 60 unsigned signal:1; 67 - unsigned end:1; 68 61 #elif defined(__BIG_ENDIAN_BITFIELD) 69 62 unsigned bp_reg:4; 70 63 unsigned sp_reg:4; 71 64 unsigned unused:4; 72 - unsigned end:1; 73 65 unsigned signal:1; 74 - unsigned type:2; 66 + unsigned type:3; 75 67 #endif 76 68 } __packed; 77 69
-200
tools/include/linux/objtool.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _LINUX_OBJTOOL_H 3 - #define _LINUX_OBJTOOL_H 4 - 5 - #ifndef __ASSEMBLY__ 6 - 7 - #include <linux/types.h> 8 - 9 - /* 10 - * This struct is used by asm and inline asm code to manually annotate the 11 - * location of registers on the stack. 12 - */ 13 - struct unwind_hint { 14 - u32 ip; 15 - s16 sp_offset; 16 - u8 sp_reg; 17 - u8 type; 18 - u8 signal; 19 - u8 end; 20 - }; 21 - #endif 22 - 23 - /* 24 - * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP 25 - * (the caller's SP right before it made the call). Used for all callable 26 - * functions, i.e. all C code and all callable asm functions. 27 - * 28 - * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset 29 - * points to a fully populated pt_regs from a syscall, interrupt, or exception. 30 - * 31 - * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that 32 - * sp_reg+sp_offset points to the iret return frame. 33 - * 34 - * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function. 35 - * Useful for code which doesn't have an ELF function annotation. 36 - * 37 - * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc. 38 - */ 39 - #define UNWIND_HINT_TYPE_CALL 0 40 - #define UNWIND_HINT_TYPE_REGS 1 41 - #define UNWIND_HINT_TYPE_REGS_PARTIAL 2 42 - #define UNWIND_HINT_TYPE_FUNC 3 43 - #define UNWIND_HINT_TYPE_ENTRY 4 44 - #define UNWIND_HINT_TYPE_SAVE 5 45 - #define UNWIND_HINT_TYPE_RESTORE 6 46 - 47 - #ifdef CONFIG_OBJTOOL 48 - 49 - #include <asm/asm.h> 50 - 51 - #ifndef __ASSEMBLY__ 52 - 53 - #define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ 54 - "987: \n\t" \ 55 - ".pushsection .discard.unwind_hints\n\t" \ 56 - /* struct unwind_hint */ \ 57 - ".long 987b - .\n\t" \ 58 - ".short " __stringify(sp_offset) "\n\t" \ 59 - ".byte " __stringify(sp_reg) "\n\t" \ 60 - ".byte " __stringify(type) "\n\t" \ 61 - ".byte " __stringify(signal) "\n\t" \ 62 - ".byte " __stringify(end) "\n\t" \ 63 - ".balign 4 \n\t" \ 64 - ".popsection\n\t" 65 - 66 - /* 67 - * This macro marks the given function's stack frame as "non-standard", which 68 - * tells objtool to ignore the function when doing stack metadata validation. 69 - * It should only be used in special cases where you're 100% sure it won't 70 - * affect the reliability of frame pointers and kernel stack traces. 71 - * 72 - * For more information, see tools/objtool/Documentation/objtool.txt. 73 - */ 74 - #define STACK_FRAME_NON_STANDARD(func) \ 75 - static void __used __section(".discard.func_stack_frame_non_standard") \ 76 - *__func_stack_frame_non_standard_##func = func 77 - 78 - /* 79 - * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore 80 - * for the case where a function is intentionally missing frame pointer setup, 81 - * but otherwise needs objtool/ORC coverage when frame pointers are disabled. 82 - */ 83 - #ifdef CONFIG_FRAME_POINTER 84 - #define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) 85 - #else 86 - #define STACK_FRAME_NON_STANDARD_FP(func) 87 - #endif 88 - 89 - #define ANNOTATE_NOENDBR \ 90 - "986: \n\t" \ 91 - ".pushsection .discard.noendbr\n\t" \ 92 - _ASM_PTR " 986b\n\t" \ 93 - ".popsection\n\t" 94 - 95 - #define ASM_REACHABLE \ 96 - "998:\n\t" \ 97 - ".pushsection .discard.reachable\n\t" \ 98 - ".long 998b - .\n\t" \ 99 - ".popsection\n\t" 100 - 101 - #else /* __ASSEMBLY__ */ 102 - 103 - /* 104 - * This macro indicates that the following intra-function call is valid. 105 - * Any non-annotated intra-function call will cause objtool to issue a warning. 106 - */ 107 - #define ANNOTATE_INTRA_FUNCTION_CALL \ 108 - 999: \ 109 - .pushsection .discard.intra_function_calls; \ 110 - .long 999b; \ 111 - .popsection; 112 - 113 - /* 114 - * In asm, there are two kinds of code: normal C-type callable functions and 115 - * the rest. The normal callable functions can be called by other code, and 116 - * don't do anything unusual with the stack. Such normal callable functions 117 - * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this 118 - * category. In this case, no special debugging annotations are needed because 119 - * objtool can automatically generate the ORC data for the ORC unwinder to read 120 - * at runtime. 121 - * 122 - * Anything which doesn't fall into the above category, such as syscall and 123 - * interrupt handlers, tends to not be called directly by other functions, and 124 - * often does unusual non-C-function-type things with the stack pointer. Such 125 - * code needs to be annotated such that objtool can understand it. The 126 - * following CFI hint macros are for this type of code. 127 - * 128 - * These macros provide hints to objtool about the state of the stack at each 129 - * instruction. Objtool starts from the hints and follows the code flow, 130 - * making automatic CFI adjustments when it sees pushes and pops, filling out 131 - * the debuginfo as necessary. It will also warn if it sees any 132 - * inconsistencies. 133 - */ 134 - .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 135 - .Lunwind_hint_ip_\@: 136 - .pushsection .discard.unwind_hints 137 - /* struct unwind_hint */ 138 - .long .Lunwind_hint_ip_\@ - . 139 - .short \sp_offset 140 - .byte \sp_reg 141 - .byte \type 142 - .byte \signal 143 - .byte \end 144 - .balign 4 145 - .popsection 146 - .endm 147 - 148 - .macro STACK_FRAME_NON_STANDARD func:req 149 - .pushsection .discard.func_stack_frame_non_standard, "aw" 150 - _ASM_PTR \func 151 - .popsection 152 - .endm 153 - 154 - .macro STACK_FRAME_NON_STANDARD_FP func:req 155 - #ifdef CONFIG_FRAME_POINTER 156 - STACK_FRAME_NON_STANDARD \func 157 - #endif 158 - .endm 159 - 160 - .macro ANNOTATE_NOENDBR 161 - .Lhere_\@: 162 - .pushsection .discard.noendbr 163 - .quad .Lhere_\@ 164 - .popsection 165 - .endm 166 - 167 - .macro REACHABLE 168 - .Lhere_\@: 169 - .pushsection .discard.reachable 170 - .long .Lhere_\@ - . 171 - .popsection 172 - .endm 173 - 174 - #endif /* __ASSEMBLY__ */ 175 - 176 - #else /* !CONFIG_OBJTOOL */ 177 - 178 - #ifndef __ASSEMBLY__ 179 - 180 - #define UNWIND_HINT(sp_reg, sp_offset, type, signal, end) \ 181 - "\n\t" 182 - #define STACK_FRAME_NON_STANDARD(func) 183 - #define STACK_FRAME_NON_STANDARD_FP(func) 184 - #define ANNOTATE_NOENDBR 185 - #define ASM_REACHABLE 186 - #else 187 - #define ANNOTATE_INTRA_FUNCTION_CALL 188 - .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 end=0 189 - .endm 190 - .macro STACK_FRAME_NON_STANDARD func:req 191 - .endm 192 - .macro ANNOTATE_NOENDBR 193 - .endm 194 - .macro REACHABLE 195 - .endm 196 - #endif 197 - 198 - #endif /* CONFIG_OBJTOOL */ 199 - 200 - #endif /* _LINUX_OBJTOOL_H */
+57
tools/include/linux/objtool_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_OBJTOOL_TYPES_H 3 + #define _LINUX_OBJTOOL_TYPES_H 4 + 5 + #ifndef __ASSEMBLY__ 6 + 7 + #include <linux/types.h> 8 + 9 + /* 10 + * This struct is used by asm and inline asm code to manually annotate the 11 + * location of registers on the stack. 12 + */ 13 + struct unwind_hint { 14 + u32 ip; 15 + s16 sp_offset; 16 + u8 sp_reg; 17 + u8 type; 18 + u8 signal; 19 + }; 20 + 21 + #endif /* __ASSEMBLY__ */ 22 + 23 + /* 24 + * UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in 25 + * a truncated and unreliable stack unwind. 26 + * 27 + * UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before 28 + * hitting user entry, boot code, or fork entry (when there are no pt_regs 29 + * available). 30 + * 31 + * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP 32 + * (the caller's SP right before it made the call). Used for all callable 33 + * functions, i.e. all C code and all callable asm functions. 34 + * 35 + * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset 36 + * points to a fully populated pt_regs from a syscall, interrupt, or exception. 37 + * 38 + * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that 39 + * sp_reg+sp_offset points to the iret return frame. 40 + * 41 + * UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function. 42 + * Useful for code which doesn't have an ELF function annotation. 43 + * 44 + * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain 45 + * location so that it can be restored later. 46 + */ 47 + #define UNWIND_HINT_TYPE_UNDEFINED 0 48 + #define UNWIND_HINT_TYPE_END_OF_STACK 1 49 + #define UNWIND_HINT_TYPE_CALL 2 50 + #define UNWIND_HINT_TYPE_REGS 3 51 + #define UNWIND_HINT_TYPE_REGS_PARTIAL 4 52 + /* The below hint types don't have corresponding ORC types */ 53 + #define UNWIND_HINT_TYPE_FUNC 5 54 + #define UNWIND_HINT_TYPE_SAVE 6 55 + #define UNWIND_HINT_TYPE_RESTORE 7 56 + 57 + #endif /* _LINUX_OBJTOOL_TYPES_H */
+233 -243
tools/objtool/check.c
··· 17 17 #include <objtool/warn.h> 18 18 #include <objtool/endianness.h> 19 19 20 - #include <linux/objtool.h> 20 + #include <linux/objtool_types.h> 21 21 #include <linux/hashtable.h> 22 22 #include <linux/kernel.h> 23 23 #include <linux/static_call_types.h> ··· 202 202 "__reiserfs_panic", 203 203 "__stack_chk_fail", 204 204 "__ubsan_handle_builtin_unreachable", 205 + "arch_call_rest_init", 206 + "arch_cpu_idle_dead", 205 207 "btrfs_assertfail", 206 208 "cpu_bringup_and_idle", 207 209 "cpu_startup_entry", ··· 212 210 "do_task_dead", 213 211 "ex_handler_msr_mce", 214 212 "fortify_panic", 213 + "hlt_play_dead", 214 + "hv_ghcb_terminate", 215 215 "kthread_complete_and_exit", 216 216 "kthread_exit", 217 217 "kunit_try_catch_throw", 218 218 "lbug_with_loc", 219 219 "machine_real_restart", 220 220 "make_task_dead", 221 + "mpt_halt_firmware", 222 + "nmi_panic_self_stop", 221 223 "panic", 224 + "panic_smp_self_stop", 225 + "rest_init", 226 + "resume_play_dead", 222 227 "rewind_stack_and_make_dead", 223 228 "sev_es_terminate", 224 229 "snp_abort", 230 + "start_kernel", 225 231 "stop_this_cpu", 226 232 "usercopy_abort", 233 + "x86_64_start_kernel", 234 + "x86_64_start_reservations", 227 235 "xen_cpu_bringup_again", 228 236 "xen_start_kernel", 229 237 }; ··· 241 229 if (!func) 242 230 return false; 243 231 244 - if (func->bind == STB_WEAK) 245 - return false; 246 - 247 - if (func->bind == STB_GLOBAL) 232 + if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) 248 233 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 249 234 if (!strcmp(func->name, global_noreturns[i])) 250 235 return true; 236 + 237 + if (func->bind == STB_WEAK) 238 + return false; 251 239 252 240 if (!func->len) 253 241 return false; ··· 482 470 483 471 // printf("%s: last chunk used: %d\n", sec->name, (int)idx); 484 472 485 - list_for_each_entry(func, &sec->symbol_list, list) { 473 + sec_for_each_sym(sec, func) { 486 474 if (func->type != STT_NOTYPE && func->type != STT_FUNC) 487 475 continue; 488 476 ··· 936 924 937 925 static int create_cfi_sections(struct objtool_file *file) 938 926 { 939 - struct section *sec, *s; 927 + struct section *sec; 940 928 struct symbol *sym; 941 929 unsigned int *loc; 942 930 int idx; ··· 949 937 } 950 938 951 939 idx = 0; 952 - for_each_sec(file, s) { 953 - if (!s->text) 940 + for_each_sym(file, sym) { 941 + if (sym->type != STT_FUNC) 954 942 continue; 955 943 956 - list_for_each_entry(sym, &s->symbol_list, list) { 957 - if (sym->type != STT_FUNC) 958 - continue; 944 + if (strncmp(sym->name, "__cfi_", 6)) 945 + continue; 959 946 960 - if (strncmp(sym->name, "__cfi_", 6)) 961 - continue; 962 - 963 - idx++; 964 - } 947 + idx++; 965 948 } 966 949 967 950 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx); ··· 964 957 return -1; 965 958 966 959 idx = 0; 967 - for_each_sec(file, s) { 968 - if (!s->text) 960 + for_each_sym(file, sym) { 961 + if (sym->type != STT_FUNC) 969 962 continue; 970 963 971 - list_for_each_entry(sym, &s->symbol_list, list) { 972 - if (sym->type != STT_FUNC) 973 - continue; 964 + if (strncmp(sym->name, "__cfi_", 6)) 965 + continue; 974 966 975 - if (strncmp(sym->name, "__cfi_", 6)) 976 - continue; 967 + loc = (unsigned int *)sec->data->d_buf + idx; 968 + memset(loc, 0, sizeof(unsigned int)); 977 969 978 - loc = (unsigned int *)sec->data->d_buf + idx; 979 - memset(loc, 0, sizeof(unsigned int)); 970 + if (elf_add_reloc_to_insn(file->elf, sec, 971 + idx * sizeof(unsigned int), 972 + R_X86_64_PC32, 973 + sym->sec, sym->offset)) 974 + return -1; 980 975 981 - if (elf_add_reloc_to_insn(file->elf, sec, 982 - idx * sizeof(unsigned int), 983 - R_X86_64_PC32, 984 - s, sym->offset)) 985 - return -1; 986 - 987 - idx++; 988 - } 976 + idx++; 989 977 } 990 978 991 979 return 0; ··· 1281 1279 "__ubsan_handle_type_mismatch_v1", 1282 1280 "__ubsan_handle_shift_out_of_bounds", 1283 1281 "__ubsan_handle_load_invalid_value", 1282 + /* STACKLEAK */ 1283 + "stackleak_track_stack", 1284 1284 /* misc */ 1285 1285 "csum_partial_copy_generic", 1286 1286 "copy_mc_fragile", ··· 1448 1444 1449 1445 if (opts.mcount && sym->fentry) { 1450 1446 if (sibling) 1451 - WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset); 1447 + WARN_INSN(insn, "tail call to __fentry__ !?!?"); 1452 1448 if (opts.mnop) { 1453 1449 if (reloc) { 1454 1450 reloc->type = R_NONE; ··· 1650 1646 continue; 1651 1647 } 1652 1648 1653 - WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1654 - insn->sec, insn->offset, dest_sec->name, 1655 - dest_off); 1649 + WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx", 1650 + dest_sec->name, dest_off); 1656 1651 return -1; 1657 1652 } 1658 1653 ··· 1734 1731 continue; 1735 1732 1736 1733 if (!insn_call_dest(insn)) { 1737 - WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1734 + WARN_INSN(insn, "unannotated intra-function call"); 1738 1735 return -1; 1739 1736 } 1740 1737 1741 1738 if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) { 1742 - WARN_FUNC("unsupported call to non-function", 1743 - insn->sec, insn->offset); 1739 + WARN_INSN(insn, "unsupported call to non-function"); 1744 1740 return -1; 1745 1741 } 1746 1742 ··· 1747 1745 dest_off = arch_dest_reloc_offset(reloc->addend); 1748 1746 dest = find_call_destination(reloc->sym->sec, dest_off); 1749 1747 if (!dest) { 1750 - WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1751 - insn->sec, insn->offset, 1752 - reloc->sym->sec->name, 1753 - dest_off); 1748 + WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx", 1749 + reloc->sym->sec->name, dest_off); 1754 1750 return -1; 1755 1751 } 1756 1752 ··· 1808 1808 } else { 1809 1809 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len - 1810 1810 orig_alt_group->first_insn->offset != special_alt->orig_len) { 1811 - WARN_FUNC("weirdly overlapping alternative! %ld != %d", 1812 - orig_insn->sec, orig_insn->offset, 1811 + WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d", 1813 1812 orig_alt_group->last_insn->offset + 1814 1813 orig_alt_group->last_insn->len - 1815 1814 orig_alt_group->first_insn->offset, ··· 1877 1878 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) && 1878 1879 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1879 1880 1880 - WARN_FUNC("unsupported relocation in alternatives section", 1881 - insn->sec, insn->offset); 1881 + WARN_INSN(insn, "unsupported relocation in alternatives section"); 1882 1882 return -1; 1883 1883 } 1884 1884 ··· 1891 1893 if (dest_off == special_alt->new_off + special_alt->new_len) { 1892 1894 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn); 1893 1895 if (!insn->jump_dest) { 1894 - WARN_FUNC("can't find alternative jump destination", 1895 - insn->sec, insn->offset); 1896 + WARN_INSN(insn, "can't find alternative jump destination"); 1896 1897 return -1; 1897 1898 } 1898 1899 } ··· 1925 1928 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL && 1926 1929 orig_insn->type != INSN_NOP) { 1927 1930 1928 - WARN_FUNC("unsupported instruction at jump label", 1929 - orig_insn->sec, orig_insn->offset); 1931 + WARN_INSN(orig_insn, "unsupported instruction at jump label"); 1930 1932 return -1; 1931 1933 } 1932 1934 ··· 2004 2008 2005 2009 if (special_alt->group) { 2006 2010 if (!special_alt->orig_len) { 2007 - WARN_FUNC("empty alternative entry", 2008 - orig_insn->sec, orig_insn->offset); 2011 + WARN_INSN(orig_insn, "empty alternative entry"); 2009 2012 continue; 2010 2013 } 2011 2014 ··· 2095 2100 } 2096 2101 2097 2102 if (!prev_offset) { 2098 - WARN_FUNC("can't find switch jump table", 2099 - insn->sec, insn->offset); 2103 + WARN_INSN(insn, "can't find switch jump table"); 2100 2104 return -1; 2101 2105 } 2102 2106 ··· 2209 2215 */ 2210 2216 static int add_jump_table_alts(struct objtool_file *file) 2211 2217 { 2212 - struct section *sec; 2213 2218 struct symbol *func; 2214 2219 int ret; 2215 2220 2216 2221 if (!file->rodata) 2217 2222 return 0; 2218 2223 2219 - for_each_sec(file, sec) { 2220 - list_for_each_entry(func, &sec->symbol_list, list) { 2221 - if (func->type != STT_FUNC) 2222 - continue; 2224 + for_each_sym(file, func) { 2225 + if (func->type != STT_FUNC) 2226 + continue; 2223 2227 2224 - mark_func_jump_tables(file, func); 2225 - ret = add_func_jump_tables(file, func); 2226 - if (ret) 2227 - return ret; 2228 - } 2228 + mark_func_jump_tables(file, func); 2229 + ret = add_func_jump_tables(file, func); 2230 + if (ret) 2231 + return ret; 2229 2232 } 2230 2233 2231 2234 return 0; ··· 2234 2243 memcpy(&state->regs, &initial_func_cfi.regs, 2235 2244 CFI_NUM_REGS * sizeof(struct cfi_reg)); 2236 2245 state->stack_size = initial_func_cfi.cfa.offset; 2246 + state->type = UNWIND_HINT_TYPE_CALL; 2237 2247 } 2238 2248 2239 2249 static int read_unwind_hints(struct objtool_file *file) ··· 2296 2304 2297 2305 if (sym && sym->bind == STB_GLOBAL) { 2298 2306 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2299 - WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR", 2300 - insn->sec, insn->offset); 2307 + WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); 2301 2308 } 2302 - 2303 - insn->entry = 1; 2304 2309 } 2305 - } 2306 - 2307 - if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 2308 - hint->type = UNWIND_HINT_TYPE_CALL; 2309 - insn->entry = 1; 2310 2310 } 2311 2311 2312 2312 if (hint->type == UNWIND_HINT_TYPE_FUNC) { ··· 2310 2326 cfi = *(insn->cfi); 2311 2327 2312 2328 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 2313 - WARN_FUNC("unsupported unwind_hint sp base reg %d", 2314 - insn->sec, insn->offset, hint->sp_reg); 2329 + WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); 2315 2330 return -1; 2316 2331 } 2317 2332 2318 2333 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); 2319 2334 cfi.type = hint->type; 2320 2335 cfi.signal = hint->signal; 2321 - cfi.end = hint->end; 2322 2336 2323 2337 insn->cfi = cfi_hash_find_or_add(&cfi); 2324 2338 } ··· 2373 2391 insn->type != INSN_CALL_DYNAMIC && 2374 2392 insn->type != INSN_RETURN && 2375 2393 insn->type != INSN_NOP) { 2376 - WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 2377 - insn->sec, insn->offset); 2394 + WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop"); 2378 2395 return -1; 2379 2396 } 2380 2397 ··· 2430 2449 return 0; 2431 2450 } 2432 2451 2452 + static int read_validate_unret_hints(struct objtool_file *file) 2453 + { 2454 + struct section *sec; 2455 + struct instruction *insn; 2456 + struct reloc *reloc; 2457 + 2458 + sec = find_section_by_name(file->elf, ".rela.discard.validate_unret"); 2459 + if (!sec) 2460 + return 0; 2461 + 2462 + list_for_each_entry(reloc, &sec->reloc_list, list) { 2463 + if (reloc->sym->type != STT_SECTION) { 2464 + WARN("unexpected relocation symbol type in %s", sec->name); 2465 + return -1; 2466 + } 2467 + 2468 + insn = find_insn(file, reloc->sym->sec, reloc->addend); 2469 + if (!insn) { 2470 + WARN("bad .discard.instr_end entry"); 2471 + return -1; 2472 + } 2473 + insn->unret = 1; 2474 + } 2475 + 2476 + return 0; 2477 + } 2478 + 2479 + 2433 2480 static int read_intra_function_calls(struct objtool_file *file) 2434 2481 { 2435 2482 struct instruction *insn; ··· 2484 2475 } 2485 2476 2486 2477 if (insn->type != INSN_CALL) { 2487 - WARN_FUNC("intra_function_call not a direct call", 2488 - insn->sec, insn->offset); 2478 + WARN_INSN(insn, "intra_function_call not a direct call"); 2489 2479 return -1; 2490 2480 } 2491 2481 ··· 2498 2490 dest_off = arch_jump_destination(insn); 2499 2491 insn->jump_dest = find_insn(file, insn->sec, dest_off); 2500 2492 if (!insn->jump_dest) { 2501 - WARN_FUNC("can't find call dest at %s+0x%lx", 2502 - insn->sec, insn->offset, 2493 + WARN_INSN(insn, "can't find call dest at %s+0x%lx", 2503 2494 insn->sec->name, dest_off); 2504 2495 return -1; 2505 2496 } ··· 2534 2527 2535 2528 static int classify_symbols(struct objtool_file *file) 2536 2529 { 2537 - struct section *sec; 2538 2530 struct symbol *func; 2539 2531 2540 - for_each_sec(file, sec) { 2541 - list_for_each_entry(func, &sec->symbol_list, list) { 2542 - if (func->bind != STB_GLOBAL) 2543 - continue; 2532 + for_each_sym(file, func) { 2533 + if (func->bind != STB_GLOBAL) 2534 + continue; 2544 2535 2545 - if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2546 - strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2547 - func->static_call_tramp = true; 2536 + if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 2537 + strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 2538 + func->static_call_tramp = true; 2548 2539 2549 - if (arch_is_retpoline(func)) 2550 - func->retpoline_thunk = true; 2540 + if (arch_is_retpoline(func)) 2541 + func->retpoline_thunk = true; 2551 2542 2552 - if (arch_is_rethunk(func)) 2553 - func->return_thunk = true; 2543 + if (arch_is_rethunk(func)) 2544 + func->return_thunk = true; 2554 2545 2555 - if (arch_ftrace_match(func->name)) 2556 - func->fentry = true; 2546 + if (arch_ftrace_match(func->name)) 2547 + func->fentry = true; 2557 2548 2558 - if (is_profiling_func(func->name)) 2559 - func->profiling_func = true; 2560 - } 2549 + if (is_profiling_func(func->name)) 2550 + func->profiling_func = true; 2561 2551 } 2562 2552 2563 2553 return 0; ··· 2668 2664 return ret; 2669 2665 2670 2666 ret = read_instr_hints(file); 2667 + if (ret) 2668 + return ret; 2669 + 2670 + ret = read_validate_unret_hints(file); 2671 2671 if (ret) 2672 2672 return ret; 2673 2673 ··· 2836 2828 /* stack operations don't make sense with an undefined CFA */ 2837 2829 if (cfa->base == CFI_UNDEFINED) { 2838 2830 if (insn_func(insn)) { 2839 - WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2831 + WARN_INSN(insn, "undefined stack state"); 2840 2832 return -1; 2841 2833 } 2842 2834 return 0; ··· 2985 2977 break; 2986 2978 } 2987 2979 2988 - if (!cfi->drap && op->src.reg == CFI_SP && 2989 - op->dest.reg == CFI_BP && cfa->base == CFI_SP && 2990 - check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) { 2991 - 2992 - /* lea disp(%rsp), %rbp */ 2993 - cfa->base = CFI_BP; 2994 - cfa->offset -= op->src.offset; 2995 - cfi->bp_scratch = false; 2996 - break; 2997 - } 2998 - 2999 2980 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 3000 2981 3001 2982 /* drap: lea disp(%rsp), %drap */ ··· 3019 3022 } 3020 3023 3021 3024 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) { 3022 - WARN_FUNC("unsupported stack register modification", 3023 - insn->sec, insn->offset); 3025 + WARN_INSN(insn, "unsupported stack register modification"); 3024 3026 return -1; 3025 3027 } 3026 3028 ··· 3029 3033 if (op->dest.reg != CFI_SP || 3030 3034 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 3031 3035 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 3032 - WARN_FUNC("unsupported stack pointer realignment", 3033 - insn->sec, insn->offset); 3036 + WARN_INSN(insn, "unsupported stack pointer realignment"); 3034 3037 return -1; 3035 3038 } 3036 3039 ··· 3124 3129 break; 3125 3130 3126 3131 default: 3127 - WARN_FUNC("unknown stack-related instruction", 3128 - insn->sec, insn->offset); 3132 + WARN_INSN(insn, "unknown stack-related instruction"); 3129 3133 return -1; 3130 3134 } 3131 3135 ··· 3213 3219 3214 3220 case OP_DEST_MEM: 3215 3221 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 3216 - WARN_FUNC("unknown stack-related memory operation", 3217 - insn->sec, insn->offset); 3222 + WARN_INSN(insn, "unknown stack-related memory operation"); 3218 3223 return -1; 3219 3224 } 3220 3225 ··· 3225 3232 break; 3226 3233 3227 3234 default: 3228 - WARN_FUNC("unknown stack-related instruction", 3229 - insn->sec, insn->offset); 3235 + WARN_INSN(insn, "unknown stack-related instruction"); 3230 3236 return -1; 3231 3237 } 3232 3238 ··· 3264 3272 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group; 3265 3273 struct instruction *orig = orig_group->first_insn; 3266 3274 char *where = offstr(insn->sec, insn->offset); 3267 - WARN_FUNC("stack layout conflict in alternatives: %s", 3268 - orig->sec, orig->offset, where); 3275 + WARN_INSN(orig, "stack layout conflict in alternatives: %s", where); 3269 3276 free(where); 3270 3277 return -1; 3271 3278 } ··· 3291 3300 if (!state->uaccess_stack) { 3292 3301 state->uaccess_stack = 1; 3293 3302 } else if (state->uaccess_stack >> 31) { 3294 - WARN_FUNC("PUSHF stack exhausted", 3295 - insn->sec, insn->offset); 3303 + WARN_INSN(insn, "PUSHF stack exhausted"); 3296 3304 return 1; 3297 3305 } 3298 3306 state->uaccess_stack <<= 1; ··· 3323 3333 3324 3334 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 3325 3335 3326 - WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3327 - insn->sec, insn->offset, 3336 + WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 3328 3337 cfi1->cfa.base, cfi1->cfa.offset, 3329 3338 cfi2->cfa.base, cfi2->cfa.offset); 3330 3339 ··· 3333 3344 sizeof(struct cfi_reg))) 3334 3345 continue; 3335 3346 3336 - WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3337 - insn->sec, insn->offset, 3347 + WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 3338 3348 i, cfi1->regs[i].base, cfi1->regs[i].offset, 3339 3349 i, cfi2->regs[i].base, cfi2->regs[i].offset); 3340 3350 break; ··· 3341 3353 3342 3354 } else if (cfi1->type != cfi2->type) { 3343 3355 3344 - WARN_FUNC("stack state mismatch: type1=%d type2=%d", 3345 - insn->sec, insn->offset, cfi1->type, cfi2->type); 3356 + WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", 3357 + cfi1->type, cfi2->type); 3346 3358 3347 3359 } else if (cfi1->drap != cfi2->drap || 3348 3360 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 3349 3361 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 3350 3362 3351 - WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3352 - insn->sec, insn->offset, 3363 + WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 3353 3364 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 3354 3365 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 3355 3366 ··· 3456 3469 { 3457 3470 if (state->noinstr && state->instr <= 0 && 3458 3471 !noinstr_call_dest(file, insn, insn_call_dest(insn))) { 3459 - WARN_FUNC("call to %s() leaves .noinstr.text section", 3460 - insn->sec, insn->offset, call_dest_name(insn)); 3472 + WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn)); 3461 3473 return 1; 3462 3474 } 3463 3475 3464 3476 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) { 3465 - WARN_FUNC("call to %s() with UACCESS enabled", 3466 - insn->sec, insn->offset, call_dest_name(insn)); 3477 + WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn)); 3467 3478 return 1; 3468 3479 } 3469 3480 3470 3481 if (state->df) { 3471 - WARN_FUNC("call to %s() with DF set", 3472 - insn->sec, insn->offset, call_dest_name(insn)); 3482 + WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn)); 3473 3483 return 1; 3474 3484 } 3475 3485 ··· 3478 3494 struct insn_state *state) 3479 3495 { 3480 3496 if (insn_func(insn) && has_modified_stack_frame(insn, state)) { 3481 - WARN_FUNC("sibling call from callable instruction with modified stack frame", 3482 - insn->sec, insn->offset); 3497 + WARN_INSN(insn, "sibling call from callable instruction with modified stack frame"); 3483 3498 return 1; 3484 3499 } 3485 3500 ··· 3488 3505 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 3489 3506 { 3490 3507 if (state->noinstr && state->instr > 0) { 3491 - WARN_FUNC("return with instrumentation enabled", 3492 - insn->sec, insn->offset); 3508 + WARN_INSN(insn, "return with instrumentation enabled"); 3493 3509 return 1; 3494 3510 } 3495 3511 3496 3512 if (state->uaccess && !func_uaccess_safe(func)) { 3497 - WARN_FUNC("return with UACCESS enabled", 3498 - insn->sec, insn->offset); 3513 + WARN_INSN(insn, "return with UACCESS enabled"); 3499 3514 return 1; 3500 3515 } 3501 3516 3502 3517 if (!state->uaccess && func_uaccess_safe(func)) { 3503 - WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 3504 - insn->sec, insn->offset); 3518 + WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function"); 3505 3519 return 1; 3506 3520 } 3507 3521 3508 3522 if (state->df) { 3509 - WARN_FUNC("return with DF set", 3510 - insn->sec, insn->offset); 3523 + WARN_INSN(insn, "return with DF set"); 3511 3524 return 1; 3512 3525 } 3513 3526 3514 3527 if (func && has_modified_stack_frame(insn, state)) { 3515 - WARN_FUNC("return with modified stack frame", 3516 - insn->sec, insn->offset); 3528 + WARN_INSN(insn, "return with modified stack frame"); 3517 3529 return 1; 3518 3530 } 3519 3531 3520 3532 if (state->cfi.bp_scratch) { 3521 - WARN_FUNC("BP used as a scratch register", 3522 - insn->sec, insn->offset); 3533 + WARN_INSN(insn, "BP used as a scratch register"); 3523 3534 return 1; 3524 3535 } 3525 3536 ··· 3585 3608 } 3586 3609 3587 3610 if (func && insn->ignore) { 3588 - WARN_FUNC("BUG: why am I validating an ignored function?", 3589 - sec, insn->offset); 3611 + WARN_INSN(insn, "BUG: why am I validating an ignored function?"); 3590 3612 return 1; 3591 3613 } 3592 3614 ··· 3618 3642 } 3619 3643 3620 3644 if (!save_insn) { 3621 - WARN_FUNC("no corresponding CFI save for CFI restore", 3622 - sec, insn->offset); 3645 + WARN_INSN(insn, "no corresponding CFI save for CFI restore"); 3623 3646 return 1; 3624 3647 } 3625 3648 3626 3649 if (!save_insn->visited) { 3627 - WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 3628 - sec, insn->offset); 3650 + WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo"); 3629 3651 return 1; 3630 3652 } 3631 3653 ··· 3683 3709 3684 3710 if (opts.stackval && func && !is_fentry_call(insn) && 3685 3711 !has_valid_stack_frame(&state)) { 3686 - WARN_FUNC("call without frame pointer save/setup", 3687 - sec, insn->offset); 3712 + WARN_INSN(insn, "call without frame pointer save/setup"); 3688 3713 return 1; 3689 3714 } 3690 3715 ··· 3729 3756 3730 3757 case INSN_CONTEXT_SWITCH: 3731 3758 if (func && (!next_insn || !next_insn->hint)) { 3732 - WARN_FUNC("unsupported instruction in callable function", 3733 - sec, insn->offset); 3759 + WARN_INSN(insn, "unsupported instruction in callable function"); 3734 3760 return 1; 3735 3761 } 3736 3762 return 0; 3737 3763 3738 3764 case INSN_STAC: 3739 3765 if (state.uaccess) { 3740 - WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3766 + WARN_INSN(insn, "recursive UACCESS enable"); 3741 3767 return 1; 3742 3768 } 3743 3769 ··· 3745 3773 3746 3774 case INSN_CLAC: 3747 3775 if (!state.uaccess && func) { 3748 - WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3776 + WARN_INSN(insn, "redundant UACCESS disable"); 3749 3777 return 1; 3750 3778 } 3751 3779 3752 3780 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3753 - WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3781 + WARN_INSN(insn, "UACCESS-safe disables UACCESS"); 3754 3782 return 1; 3755 3783 } 3756 3784 ··· 3759 3787 3760 3788 case INSN_STD: 3761 3789 if (state.df) { 3762 - WARN_FUNC("recursive STD", sec, insn->offset); 3790 + WARN_INSN(insn, "recursive STD"); 3763 3791 return 1; 3764 3792 } 3765 3793 ··· 3768 3796 3769 3797 case INSN_CLD: 3770 3798 if (!state.df && func) { 3771 - WARN_FUNC("redundant CLD", sec, insn->offset); 3799 + WARN_INSN(insn, "redundant CLD"); 3772 3800 return 1; 3773 3801 } 3774 3802 ··· 3835 3863 /* 3836 3864 * Validate rethunk entry constraint: must untrain RET before the first RET. 3837 3865 * 3838 - * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3866 + * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes 3839 3867 * before an actual RET instruction. 3840 3868 */ 3841 - static int validate_entry(struct objtool_file *file, struct instruction *insn) 3869 + static int validate_unret(struct objtool_file *file, struct instruction *insn) 3842 3870 { 3843 3871 struct instruction *next, *dest; 3844 3872 int ret, warnings = 0; ··· 3846 3874 for (;;) { 3847 3875 next = next_insn_to_validate(file, insn); 3848 3876 3849 - if (insn->visited & VISITED_ENTRY) 3877 + if (insn->visited & VISITED_UNRET) 3850 3878 return 0; 3851 3879 3852 - insn->visited |= VISITED_ENTRY; 3880 + insn->visited |= VISITED_UNRET; 3853 3881 3854 3882 if (!insn->ignore_alts && insn->alts) { 3855 3883 struct alternative *alt; ··· 3859 3887 if (alt->skip_orig) 3860 3888 skip_orig = true; 3861 3889 3862 - ret = validate_entry(file, alt->insn); 3890 + ret = validate_unret(file, alt->insn); 3863 3891 if (ret) { 3864 3892 if (opts.backtrace) 3865 3893 BT_FUNC("(alt)", insn); ··· 3876 3904 case INSN_CALL_DYNAMIC: 3877 3905 case INSN_JUMP_DYNAMIC: 3878 3906 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3879 - WARN_FUNC("early indirect call", insn->sec, insn->offset); 3907 + WARN_INSN(insn, "early indirect call"); 3880 3908 return 1; 3881 3909 3882 3910 case INSN_JUMP_UNCONDITIONAL: 3883 3911 case INSN_JUMP_CONDITIONAL: 3884 3912 if (!is_sibling_call(insn)) { 3885 3913 if (!insn->jump_dest) { 3886 - WARN_FUNC("unresolved jump target after linking?!?", 3887 - insn->sec, insn->offset); 3914 + WARN_INSN(insn, "unresolved jump target after linking?!?"); 3888 3915 return -1; 3889 3916 } 3890 - ret = validate_entry(file, insn->jump_dest); 3917 + ret = validate_unret(file, insn->jump_dest); 3891 3918 if (ret) { 3892 3919 if (opts.backtrace) { 3893 3920 BT_FUNC("(branch%s)", insn, ··· 3911 3940 return -1; 3912 3941 } 3913 3942 3914 - ret = validate_entry(file, dest); 3943 + ret = validate_unret(file, dest); 3915 3944 if (ret) { 3916 3945 if (opts.backtrace) 3917 3946 BT_FUNC("(call)", insn); ··· 3924 3953 return 0; 3925 3954 3926 3955 case INSN_RETURN: 3927 - WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3956 + WARN_INSN(insn, "RET before UNTRAIN"); 3928 3957 return 1; 3929 3958 3930 3959 case INSN_NOP: ··· 3937 3966 } 3938 3967 3939 3968 if (!next) { 3940 - WARN_FUNC("teh end!", insn->sec, insn->offset); 3969 + WARN_INSN(insn, "teh end!"); 3941 3970 return -1; 3942 3971 } 3943 3972 insn = next; ··· 3947 3976 } 3948 3977 3949 3978 /* 3950 - * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3951 - * before RET. 3979 + * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter 3980 + * VALIDATE_UNRET_END before RET. 3952 3981 */ 3953 - static int validate_unret(struct objtool_file *file) 3982 + static int validate_unrets(struct objtool_file *file) 3954 3983 { 3955 3984 struct instruction *insn; 3956 3985 int ret, warnings = 0; 3957 3986 3958 3987 for_each_insn(file, insn) { 3959 - if (!insn->entry) 3988 + if (!insn->unret) 3960 3989 continue; 3961 3990 3962 - ret = validate_entry(file, insn); 3991 + ret = validate_unret(file, insn); 3963 3992 if (ret < 0) { 3964 - WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3993 + WARN_INSN(insn, "Failed UNRET validation"); 3965 3994 return ret; 3966 3995 } 3967 3996 warnings += ret; ··· 3989 4018 3990 4019 if (insn->type == INSN_RETURN) { 3991 4020 if (opts.rethunk) { 3992 - WARN_FUNC("'naked' return found in RETHUNK build", 3993 - insn->sec, insn->offset); 4021 + WARN_INSN(insn, "'naked' return found in RETHUNK build"); 3994 4022 } else 3995 4023 continue; 3996 4024 } else { 3997 - WARN_FUNC("indirect %s found in RETPOLINE build", 3998 - insn->sec, insn->offset, 4025 + WARN_INSN(insn, "indirect %s found in RETPOLINE build", 3999 4026 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 4000 4027 } 4001 4028 ··· 4090 4121 * It may also insert a UD2 after calling a __noreturn function. 4091 4122 */ 4092 4123 prev_insn = prev_insn_same_sec(file, insn); 4093 - if ((prev_insn->dead_end || 4094 - dead_end_function(file, insn_call_dest(prev_insn))) && 4124 + if (prev_insn->dead_end && 4095 4125 (insn->type == INSN_BUG || 4096 4126 (insn->type == INSN_JUMP_UNCONDITIONAL && 4097 4127 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) ··· 4126 4158 return false; 4127 4159 } 4128 4160 4129 - static int add_prefix_symbol(struct objtool_file *file, struct symbol *func, 4130 - struct instruction *insn) 4161 + static int add_prefix_symbol(struct objtool_file *file, struct symbol *func) 4131 4162 { 4132 - if (!opts.prefix) 4133 - return 0; 4163 + struct instruction *insn, *prev; 4164 + struct cfi_state *cfi; 4134 4165 4135 - for (;;) { 4136 - struct instruction *prev = prev_insn_same_sec(file, insn); 4166 + insn = find_insn(file, func->sec, func->offset); 4167 + if (!insn) 4168 + return -1; 4169 + 4170 + for (prev = prev_insn_same_sec(file, insn); 4171 + prev; 4172 + prev = prev_insn_same_sec(file, prev)) { 4137 4173 u64 offset; 4138 4174 4139 - if (!prev) 4140 - break; 4141 - 4142 4175 if (prev->type != INSN_NOP) 4143 - break; 4176 + return -1; 4144 4177 4145 4178 offset = func->offset - prev->offset; 4146 - if (offset >= opts.prefix) { 4147 - if (offset == opts.prefix) { 4148 - /* 4149 - * Since the sec->symbol_list is ordered by 4150 - * offset (see elf_add_symbol()) the added 4151 - * symbol will not be seen by the iteration in 4152 - * validate_section(). 4153 - * 4154 - * Hence the lack of list_for_each_entry_safe() 4155 - * there. 4156 - * 4157 - * The direct concequence is that prefix symbols 4158 - * don't get visited (because pointless), except 4159 - * for the logic in ignore_unreachable_insn() 4160 - * that needs the terminating insn to be visited 4161 - * otherwise it will report the hole. 4162 - * 4163 - * Hence mark the first instruction of the 4164 - * prefix symbol as visisted. 4165 - */ 4166 - prev->visited |= VISITED_BRANCH; 4167 - elf_create_prefix_symbol(file->elf, func, opts.prefix); 4168 - } 4169 - break; 4170 - } 4171 - insn = prev; 4179 + 4180 + if (offset > opts.prefix) 4181 + return -1; 4182 + 4183 + if (offset < opts.prefix) 4184 + continue; 4185 + 4186 + elf_create_prefix_symbol(file->elf, func, opts.prefix); 4187 + break; 4172 4188 } 4173 4189 4190 + if (!prev) 4191 + return -1; 4192 + 4193 + if (!insn->cfi) { 4194 + /* 4195 + * This can happen if stack validation isn't enabled or the 4196 + * function is annotated with STACK_FRAME_NON_STANDARD. 4197 + */ 4198 + return 0; 4199 + } 4200 + 4201 + /* Propagate insn->cfi to the prefix code */ 4202 + cfi = cfi_hash_find_or_add(insn->cfi); 4203 + for (; prev != insn; prev = next_insn_same_sec(file, prev)) 4204 + prev->cfi = cfi; 4205 + 4174 4206 return 0; 4207 + } 4208 + 4209 + static int add_prefix_symbols(struct objtool_file *file) 4210 + { 4211 + struct section *sec; 4212 + struct symbol *func; 4213 + int warnings = 0; 4214 + 4215 + for_each_sec(file, sec) { 4216 + if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 4217 + continue; 4218 + 4219 + sec_for_each_sym(sec, func) { 4220 + if (func->type != STT_FUNC) 4221 + continue; 4222 + 4223 + add_prefix_symbol(file, func); 4224 + } 4225 + } 4226 + 4227 + return warnings; 4175 4228 } 4176 4229 4177 4230 static int validate_symbol(struct objtool_file *file, struct section *sec, ··· 4213 4224 if (!insn || insn->ignore || insn->visited) 4214 4225 return 0; 4215 4226 4216 - add_prefix_symbol(file, sym, insn); 4217 - 4218 4227 state->uaccess = sym->uaccess_safe; 4219 4228 4220 4229 ret = validate_branch(file, insn_func(insn), insn, *state); ··· 4227 4240 struct symbol *func; 4228 4241 int warnings = 0; 4229 4242 4230 - list_for_each_entry(func, &sec->symbol_list, list) { 4243 + sec_for_each_sym(sec, func) { 4231 4244 if (func->type != STT_FUNC) 4232 4245 continue; 4233 4246 ··· 4390 4403 if (noendbr_range(file, dest)) 4391 4404 continue; 4392 4405 4393 - WARN_FUNC("relocation to !ENDBR: %s", 4394 - insn->sec, insn->offset, 4395 - offstr(dest->sec, dest->offset)); 4406 + WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset)); 4396 4407 4397 4408 warnings++; 4398 4409 } ··· 4492 4507 switch (insn->type) { 4493 4508 case INSN_RETURN: 4494 4509 if (!next_insn || next_insn->type != INSN_TRAP) { 4495 - WARN_FUNC("missing int3 after ret", 4496 - insn->sec, insn->offset); 4510 + WARN_INSN(insn, "missing int3 after ret"); 4497 4511 warnings++; 4498 4512 } 4499 4513 4500 4514 break; 4501 4515 case INSN_JUMP_DYNAMIC: 4502 4516 if (!next_insn || next_insn->type != INSN_TRAP) { 4503 - WARN_FUNC("missing int3 after indirect jump", 4504 - insn->sec, insn->offset); 4517 + WARN_INSN(insn, "missing int3 after indirect jump"); 4505 4518 warnings++; 4506 4519 } 4507 4520 break; ··· 4522 4539 if (insn->visited || ignore_unreachable_insn(file, insn)) 4523 4540 continue; 4524 4541 4525 - WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 4542 + WARN_INSN(insn, "unreachable instruction"); 4526 4543 return 1; 4527 4544 } 4528 4545 ··· 4590 4607 * Must be after validate_branch() and friends, it plays 4591 4608 * further games with insn->visited. 4592 4609 */ 4593 - ret = validate_unret(file); 4610 + ret = validate_unrets(file); 4594 4611 if (ret < 0) 4595 4612 return ret; 4596 4613 warnings += ret; ··· 4649 4666 ret = create_mcount_loc_sections(file); 4650 4667 if (ret < 0) 4651 4668 goto out; 4669 + warnings += ret; 4670 + } 4671 + 4672 + if (opts.prefix) { 4673 + ret = add_prefix_symbols(file); 4674 + if (ret < 0) 4675 + return ret; 4652 4676 warnings += ret; 4653 4677 } 4654 4678
+1 -1
tools/objtool/elf.c
··· 474 474 475 475 /* Create parent/child links for any cold subfunctions */ 476 476 list_for_each_entry(sec, &elf->sections, list) { 477 - list_for_each_entry(sym, &sec->symbol_list, list) { 477 + sec_for_each_sym(sec, sym) { 478 478 char pname[MAX_NAME_LEN + 1]; 479 479 size_t pnamelen; 480 480 if (sym->type != STT_FUNC)
+2 -2
tools/objtool/include/objtool/check.h
··· 61 61 restore : 1, 62 62 retpoline_safe : 1, 63 63 noendbr : 1, 64 - entry : 1, 64 + unret : 1, 65 65 visited : 4, 66 66 no_reloc : 1; 67 67 /* 10 bit hole */ ··· 92 92 #define VISITED_BRANCH 0x01 93 93 #define VISITED_BRANCH_UACCESS 0x02 94 94 #define VISITED_BRANCH_MASK 0x03 95 - #define VISITED_ENTRY 0x04 95 + #define VISITED_UNRET 0x04 96 96 97 97 static inline bool is_static_jump(struct instruction *insn) 98 98 {
+9
tools/objtool/include/objtool/elf.h
··· 188 188 #define for_each_sec(file, sec) \ 189 189 list_for_each_entry(sec, &file->elf->sections, list) 190 190 191 + #define sec_for_each_sym(sec, sym) \ 192 + list_for_each_entry(sym, &sec->symbol_list, list) 193 + 194 + #define for_each_sym(file, sym) \ 195 + for (struct section *__sec, *__fake = (struct section *)1; \ 196 + __fake; __fake = NULL) \ 197 + for_each_sec(file, __sec) \ 198 + sec_for_each_sym(__sec, sym) 199 + 191 200 #endif /* _OBJTOOL_ELF_H */
+5
tools/objtool/include/objtool/warn.h
··· 53 53 free(_str); \ 54 54 }) 55 55 56 + #define WARN_INSN(insn, format, ...) \ 57 + ({ \ 58 + WARN_FUNC(format, insn->sec, insn->offset, ##__VA_ARGS__); \ 59 + }) 60 + 56 61 #define BT_FUNC(format, insn, ...) \ 57 62 ({ \ 58 63 struct instruction *_insn = (insn); \
+9 -6
tools/objtool/orc_dump.c
··· 4 4 */ 5 5 6 6 #include <unistd.h> 7 - #include <linux/objtool.h> 8 7 #include <asm/orc_types.h> 9 8 #include <objtool/objtool.h> 10 9 #include <objtool/warn.h> ··· 38 39 static const char *orc_type_name(unsigned int type) 39 40 { 40 41 switch (type) { 41 - case UNWIND_HINT_TYPE_CALL: 42 + case ORC_TYPE_UNDEFINED: 43 + return "(und)"; 44 + case ORC_TYPE_END_OF_STACK: 45 + return "end"; 46 + case ORC_TYPE_CALL: 42 47 return "call"; 43 - case UNWIND_HINT_TYPE_REGS: 48 + case ORC_TYPE_REGS: 44 49 return "regs"; 45 - case UNWIND_HINT_TYPE_REGS_PARTIAL: 50 + case ORC_TYPE_REGS_PARTIAL: 46 51 return "regs (partial)"; 47 52 default: 48 53 return "?"; ··· 205 202 printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i])); 206 203 } 207 204 205 + printf("type:%s", orc_type_name(orc[i].type)); 208 206 209 207 printf(" sp:"); 210 208 ··· 215 211 216 212 print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); 217 213 218 - printf(" type:%s signal:%d end:%d\n", 219 - orc_type_name(orc[i].type), orc[i].signal, orc[i].end); 214 + printf(" signal:%d\n", orc[i].signal); 220 215 } 221 216 222 217 elf_end(elf);
+31 -19
tools/objtool/orc_gen.c
··· 6 6 #include <stdlib.h> 7 7 #include <string.h> 8 8 9 - #include <linux/objtool.h> 9 + #include <linux/objtool_types.h> 10 10 #include <asm/orc_types.h> 11 11 12 12 #include <objtool/check.h> ··· 21 21 memset(orc, 0, sizeof(*orc)); 22 22 23 23 if (!cfi) { 24 - orc->end = 0; 25 - orc->sp_reg = ORC_REG_UNDEFINED; 24 + /* 25 + * This is usually either unreachable nops/traps (which don't 26 + * trigger unreachable instruction warnings), or 27 + * STACK_FRAME_NON_STANDARD functions. 28 + */ 29 + orc->type = ORC_TYPE_UNDEFINED; 26 30 return 0; 27 31 } 28 32 29 - orc->end = cfi->end; 33 + switch (cfi->type) { 34 + case UNWIND_HINT_TYPE_UNDEFINED: 35 + orc->type = ORC_TYPE_UNDEFINED; 36 + return 0; 37 + case UNWIND_HINT_TYPE_END_OF_STACK: 38 + orc->type = ORC_TYPE_END_OF_STACK; 39 + return 0; 40 + case UNWIND_HINT_TYPE_CALL: 41 + orc->type = ORC_TYPE_CALL; 42 + break; 43 + case UNWIND_HINT_TYPE_REGS: 44 + orc->type = ORC_TYPE_REGS; 45 + break; 46 + case UNWIND_HINT_TYPE_REGS_PARTIAL: 47 + orc->type = ORC_TYPE_REGS_PARTIAL; 48 + break; 49 + default: 50 + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); 51 + return -1; 52 + } 53 + 30 54 orc->signal = cfi->signal; 31 - 32 - if (cfi->cfa.base == CFI_UNDEFINED) { 33 - orc->sp_reg = ORC_REG_UNDEFINED; 34 - return 0; 35 - } 36 55 37 56 switch (cfi->cfa.base) { 38 57 case CFI_SP: ··· 79 60 orc->sp_reg = ORC_REG_DX; 80 61 break; 81 62 default: 82 - WARN_FUNC("unknown CFA base reg %d", 83 - insn->sec, insn->offset, cfi->cfa.base); 63 + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); 84 64 return -1; 85 65 } 86 66 ··· 94 76 orc->bp_reg = ORC_REG_BP; 95 77 break; 96 78 default: 97 - WARN_FUNC("unknown BP base reg %d", 98 - insn->sec, insn->offset, bp->base); 79 + WARN_INSN(insn, "unknown BP base reg %d", bp->base); 99 80 return -1; 100 81 } 101 82 102 83 orc->sp_offset = cfi->cfa.offset; 103 84 orc->bp_offset = bp->offset; 104 - orc->type = cfi->type; 105 85 106 86 return 0; 107 87 } ··· 164 148 struct orc_list_entry *entry; 165 149 struct list_head orc_list; 166 150 167 - struct orc_entry null = { 168 - .sp_reg = ORC_REG_UNDEFINED, 169 - .bp_reg = ORC_REG_UNDEFINED, 170 - .type = UNWIND_HINT_TYPE_CALL, 171 - }; 151 + struct orc_entry null = { .type = ORC_TYPE_UNDEFINED }; 172 152 173 153 /* Build a deduplicated list of ORC entries: */ 174 154 INIT_LIST_HEAD(&orc_list);
+1 -1
tools/objtool/sync-check.sh
··· 6 6 exit 1 7 7 fi 8 8 9 - FILES="include/linux/objtool.h" 9 + FILES="include/linux/objtool_types.h" 10 10 11 11 if [ "$SRCARCH" = "x86" ]; then 12 12 FILES="$FILES