x86/irq: Make run_on_irqstack_cond() typesafe

Sami reported that run_on_irqstack_cond() requires the caller to cast
functions to mismatching types, which trips indirect call Control-Flow
Integrity (CFI) in Clang.

Instead of disabling CFI on that function, provide proper helpers for
the three call variants. The actual ASM code stays the same as that is
out of reach.

[ bp: Fix __run_on_irqstack() prototype to match. ]

Fixes: 931b94145981 ("x86/entry: Provide helpers for executing on the irqstack")
Reported-by: Nathan Chancellor <natechancellor@gmail.com>
Reported-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Tested-by: Sami Tolvanen <samitolvanen@google.com>
Cc: <stable@vger.kernel.org>
Link: https://github.com/ClangBuiltLinux/linux/issues/1052
Link: https://lkml.kernel.org/r/87pn6eb5tv.fsf@nanos.tec.linutronix.de

authored by Thomas Gleixner and committed by Borislav Petkov a7b3474c 98477740

Changed files
+68 -13
arch
x86
+1 -1
arch/x86/entry/common.c
··· 299 299 old_regs = set_irq_regs(regs); 300 300 301 301 instrumentation_begin(); 302 - run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs); 302 + run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); 303 303 instrumentation_begin(); 304 304 305 305 set_irq_regs(old_regs);
+2
arch/x86/entry/entry_64.S
··· 682 682 * rdx: Function argument (can be NULL if none) 683 683 */ 684 684 SYM_FUNC_START(asm_call_on_stack) 685 + SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL) 686 + SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) 685 687 /* 686 688 * Save the frame pointer unconditionally. This allows the ORC 687 689 * unwinder to handle the stack switch.
+1 -1
arch/x86/include/asm/idtentry.h
··· 242 242 instrumentation_begin(); \ 243 243 irq_enter_rcu(); \ 244 244 kvm_set_cpu_l1tf_flush_l1d(); \ 245 - run_on_irqstack_cond(__##func, regs, regs); \ 245 + run_sysvec_on_irqstack_cond(__##func, regs); \ 246 246 irq_exit_rcu(); \ 247 247 instrumentation_end(); \ 248 248 irqentry_exit(regs, state); \
+62 -9
arch/x86/include/asm/irq_stack.h
··· 12 12 return __this_cpu_read(irq_count) != -1; 13 13 } 14 14 15 - void asm_call_on_stack(void *sp, void *func, void *arg); 15 + void asm_call_on_stack(void *sp, void (*func)(void), void *arg); 16 + void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs), 17 + struct pt_regs *regs); 18 + void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc), 19 + struct irq_desc *desc); 16 20 17 - static __always_inline void __run_on_irqstack(void *func, void *arg) 21 + static __always_inline void __run_on_irqstack(void (*func)(void)) 18 22 { 19 23 void *tos = __this_cpu_read(hardirq_stack_ptr); 20 24 21 25 __this_cpu_add(irq_count, 1); 22 - asm_call_on_stack(tos - 8, func, arg); 26 + asm_call_on_stack(tos - 8, func, NULL); 27 + __this_cpu_sub(irq_count, 1); 28 + } 29 + 30 + static __always_inline void 31 + __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), 32 + struct pt_regs *regs) 33 + { 34 + void *tos = __this_cpu_read(hardirq_stack_ptr); 35 + 36 + __this_cpu_add(irq_count, 1); 37 + asm_call_sysvec_on_stack(tos - 8, func, regs); 38 + __this_cpu_sub(irq_count, 1); 39 + } 40 + 41 + static __always_inline void 42 + __run_irq_on_irqstack(void (*func)(struct irq_desc *desc), 43 + struct irq_desc *desc) 44 + { 45 + void *tos = __this_cpu_read(hardirq_stack_ptr); 46 + 47 + __this_cpu_add(irq_count, 1); 48 + asm_call_irq_on_stack(tos - 8, func, desc); 23 49 __this_cpu_sub(irq_count, 1); 24 50 } 25 51 26 52 #else /* CONFIG_X86_64 */ 27 53 static inline bool irqstack_active(void) { return false; } 28 - static inline void __run_on_irqstack(void *func, void *arg) { } 54 + static inline void __run_on_irqstack(void (*func)(void)) { } 55 + static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), 56 + struct pt_regs *regs) { } 57 + static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc), 58 + struct irq_desc *desc) { } 29 59 #endif /* !CONFIG_X86_64 */ 30 60 31 61 static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) ··· 67 37 return !user_mode(regs) && !irqstack_active(); 68 38 } 69 39 70 - static __always_inline void run_on_irqstack_cond(void *func, void *arg, 40 + 41 + static __always_inline void run_on_irqstack_cond(void (*func)(void), 71 42 struct pt_regs *regs) 72 43 { 73 - void (*__func)(void *arg) = func; 74 - 75 44 lockdep_assert_irqs_disabled(); 76 45 77 46 if (irq_needs_irq_stack(regs)) 78 - __run_on_irqstack(__func, arg); 47 + __run_on_irqstack(func); 79 48 else 80 - __func(arg); 49 + func(); 50 + } 51 + 52 + static __always_inline void 53 + run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs), 54 + struct pt_regs *regs) 55 + { 56 + lockdep_assert_irqs_disabled(); 57 + 58 + if (irq_needs_irq_stack(regs)) 59 + __run_sysvec_on_irqstack(func, regs); 60 + else 61 + func(regs); 62 + } 63 + 64 + static __always_inline void 65 + run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc, 66 + struct pt_regs *regs) 67 + { 68 + lockdep_assert_irqs_disabled(); 69 + 70 + if (irq_needs_irq_stack(regs)) 71 + __run_irq_on_irqstack(func, desc); 72 + else 73 + func(desc); 81 74 } 82 75 83 76 #endif
+1 -1
arch/x86/kernel/irq.c
··· 227 227 struct pt_regs *regs) 228 228 { 229 229 if (IS_ENABLED(CONFIG_X86_64)) 230 - run_on_irqstack_cond(desc->handle_irq, desc, regs); 230 + run_irq_on_irqstack_cond(desc->handle_irq, desc, regs); 231 231 else 232 232 __handle_irq(desc, regs); 233 233 }
+1 -1
arch/x86/kernel/irq_64.c
··· 74 74 75 75 void do_softirq_own_stack(void) 76 76 { 77 - run_on_irqstack_cond(__do_softirq, NULL, NULL); 77 + run_on_irqstack_cond(__do_softirq, NULL); 78 78 }