Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: Trace IPIs sent via smp_send_reschedule()

To be able to trace invocations of smp_send_reschedule(), rename the
arch-specific definitions of it to arch_smp_send_reschedule() and wrap it
into an smp_send_reschedule() that contains a tracepoint.

Changes to include the declaration of the tracepoint were driven by the
following coccinelle script:

@func_use@
@@
smp_send_reschedule(...);

@include@
@@
#include <trace/events/ipi.h>

@no_include depends on func_use && !include@
@@
#include <...>
+
+ #include <trace/events/ipi.h>

[csky bits]
[riscv bits]
Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Guo Ren <guoren@kernel.org>
Acked-by: Palmer Dabbelt <palmer@rivosinc.com>
Link: https://lore.kernel.org/r/20230307143558.294354-6-vschneid@redhat.com

authored by

Valentin Schneider and committed by
Peter Zijlstra
4c8c3c7f 4468161a

+53 -26
+1 -1
arch/alpha/kernel/smp.c
··· 562 562 } 563 563 564 564 void 565 - smp_send_reschedule(int cpu) 565 + arch_smp_send_reschedule(int cpu) 566 566 { 567 567 #ifdef DEBUG_IPI_MSG 568 568 if (cpu == hard_smp_processor_id())
+1 -1
arch/arc/kernel/smp.c
··· 292 292 ipi_send_msg_one(cpu, msg); 293 293 } 294 294 295 - void smp_send_reschedule(int cpu) 295 + void arch_smp_send_reschedule(int cpu) 296 296 { 297 297 ipi_send_msg_one(cpu, IPI_RESCHEDULE); 298 298 }
+1 -1
arch/arm/kernel/smp.c
··· 746 746 ipi_setup(smp_processor_id()); 747 747 } 748 748 749 - void smp_send_reschedule(int cpu) 749 + void arch_smp_send_reschedule(int cpu) 750 750 { 751 751 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 752 752 }
+2
arch/arm/mach-actions/platsmp.c
··· 20 20 #include <asm/smp_plat.h> 21 21 #include <asm/smp_scu.h> 22 22 23 + #include <trace/events/ipi.h> 24 + 23 25 #define OWL_CPU1_ADDR 0x50 24 26 #define OWL_CPU1_FLAG 0x5c 25 27
+1 -1
arch/arm64/kernel/smp.c
··· 976 976 ipi_setup(smp_processor_id()); 977 977 } 978 978 979 - void smp_send_reschedule(int cpu) 979 + void arch_smp_send_reschedule(int cpu) 980 980 { 981 981 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 982 982 }
+1 -1
arch/csky/kernel/smp.c
··· 140 140 on_each_cpu(ipi_stop, NULL, 1); 141 141 } 142 142 143 - void smp_send_reschedule(int cpu) 143 + void arch_smp_send_reschedule(int cpu) 144 144 { 145 145 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 146 146 }
+1 -1
arch/hexagon/kernel/smp.c
··· 217 217 } 218 218 } 219 219 220 - void smp_send_reschedule(int cpu) 220 + void arch_smp_send_reschedule(int cpu) 221 221 { 222 222 send_ipi(cpumask_of(cpu), IPI_RESCHEDULE); 223 223 }
+2 -2
arch/ia64/kernel/smp.c
··· 220 220 * Called with preemption disabled. 221 221 */ 222 222 void 223 - smp_send_reschedule (int cpu) 223 + arch_smp_send_reschedule (int cpu) 224 224 { 225 225 ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 226 226 } 227 - EXPORT_SYMBOL_GPL(smp_send_reschedule); 227 + EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); 228 228 229 229 /* 230 230 * Called with preemption disabled.
+2 -2
arch/loongarch/kernel/smp.c
··· 155 155 * it goes straight through and wastes no time serializing 156 156 * anything. Worst case is that we lose a reschedule ... 157 157 */ 158 - void smp_send_reschedule(int cpu) 158 + void arch_smp_send_reschedule(int cpu) 159 159 { 160 160 loongson_send_ipi_single(cpu, SMP_RESCHEDULE); 161 161 } 162 - EXPORT_SYMBOL_GPL(smp_send_reschedule); 162 + EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); 163 163 164 164 irqreturn_t loongson_ipi_interrupt(int irq, void *dev) 165 165 {
+1 -1
arch/mips/include/asm/smp.h
··· 66 66 * it goes straight through and wastes no time serializing 67 67 * anything. Worst case is that we lose a reschedule ... 68 68 */ 69 - static inline void smp_send_reschedule(int cpu) 69 + static inline void arch_smp_send_reschedule(int cpu) 70 70 { 71 71 extern const struct plat_smp_ops *mp_ops; /* private */ 72 72
+2
arch/mips/kernel/rtlx-cmp.c
··· 17 17 #include <asm/vpe.h> 18 18 #include <asm/rtlx.h> 19 19 20 + #include <trace/events/ipi.h> 21 + 20 22 static int major; 21 23 22 24 static void rtlx_interrupt(void)
+1 -1
arch/openrisc/kernel/smp.c
··· 173 173 } 174 174 } 175 175 176 - void smp_send_reschedule(int cpu) 176 + void arch_smp_send_reschedule(int cpu) 177 177 { 178 178 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 179 179 }
+2 -2
arch/parisc/kernel/smp.c
··· 246 246 inline void 247 247 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } 248 248 249 - void 250 - smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } 249 + void 250 + arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } 251 251 252 252 void 253 253 smp_send_all_nop(void)
+4 -2
arch/powerpc/kernel/smp.c
··· 61 61 #include <asm/kup.h> 62 62 #include <asm/fadump.h> 63 63 64 + #include <trace/events/ipi.h> 65 + 64 66 #ifdef DEBUG 65 67 #include <asm/udbg.h> 66 68 #define DBG(fmt...) udbg_printf(fmt) ··· 366 364 #endif 367 365 } 368 366 369 - void smp_send_reschedule(int cpu) 367 + void arch_smp_send_reschedule(int cpu) 370 368 { 371 369 if (likely(smp_ops)) 372 370 do_message_pass(cpu, PPC_MSG_RESCHEDULE); 373 371 } 374 - EXPORT_SYMBOL_GPL(smp_send_reschedule); 372 + EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); 375 373 376 374 void arch_send_call_function_single_ipi(int cpu) 377 375 {
+3
arch/powerpc/kvm/book3s_hv.c
··· 43 43 #include <linux/compiler.h> 44 44 #include <linux/of.h> 45 45 #include <linux/irqdomain.h> 46 + #include <linux/smp.h> 46 47 47 48 #include <asm/ftrace.h> 48 49 #include <asm/reg.h> ··· 80 79 #include <asm/ultravisor.h> 81 80 #include <asm/dtl.h> 82 81 #include <asm/plpar_wrappers.h> 82 + 83 + #include <trace/events/ipi.h> 83 84 84 85 #include "book3s.h" 85 86 #include "book3s_hv.h"
+2
arch/powerpc/platforms/powernv/subcore.c
··· 20 20 #include <asm/opal.h> 21 21 #include <asm/smp.h> 22 22 23 + #include <trace/events/ipi.h> 24 + 23 25 #include "subcore.h" 24 26 #include "powernv.h" 25 27
+2 -2
arch/riscv/kernel/smp.c
··· 328 328 } 329 329 #endif 330 330 331 - void smp_send_reschedule(int cpu) 331 + void arch_smp_send_reschedule(int cpu) 332 332 { 333 333 send_ipi_single(cpu, IPI_RESCHEDULE); 334 334 } 335 - EXPORT_SYMBOL_GPL(smp_send_reschedule); 335 + EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+1 -1
arch/s390/kernel/smp.c
··· 553 553 * it goes straight through and wastes no time serializing 554 554 * anything. Worst case is that we lose a reschedule ... 555 555 */ 556 - void smp_send_reschedule(int cpu) 556 + void arch_smp_send_reschedule(int cpu) 557 557 { 558 558 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 559 559 }
+1 -1
arch/sh/kernel/smp.c
··· 256 256 (bogosum / (5000/HZ)) % 100); 257 257 } 258 258 259 - void smp_send_reschedule(int cpu) 259 + void arch_smp_send_reschedule(int cpu) 260 260 { 261 261 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); 262 262 }
+1 -1
arch/sparc/kernel/smp_32.c
··· 120 120 121 121 struct linux_prom_registers smp_penguin_ctable = { 0 }; 122 122 123 - void smp_send_reschedule(int cpu) 123 + void arch_smp_send_reschedule(int cpu) 124 124 { 125 125 /* 126 126 * CPU model dependent way of implementing IPI generation targeting
+1 -1
arch/sparc/kernel/smp_64.c
··· 1430 1430 return hv_err; 1431 1431 } 1432 1432 1433 - void smp_send_reschedule(int cpu) 1433 + void arch_smp_send_reschedule(int cpu) 1434 1434 { 1435 1435 if (cpu == smp_processor_id()) { 1436 1436 WARN_ON_ONCE(preemptible());
+1 -1
arch/x86/include/asm/smp.h
··· 98 98 smp_ops.play_dead(); 99 99 } 100 100 101 - static inline void smp_send_reschedule(int cpu) 101 + static inline void arch_smp_send_reschedule(int cpu) 102 102 { 103 103 smp_ops.smp_send_reschedule(cpu); 104 104 }
+4
arch/x86/kvm/svm/svm.c
··· 27 27 #include <linux/swap.h> 28 28 #include <linux/rwsem.h> 29 29 #include <linux/cc_platform.h> 30 + #include <linux/smp.h> 30 31 31 32 #include <asm/apic.h> 32 33 #include <asm/perf_event.h> ··· 42 41 #include <asm/fpu/api.h> 43 42 44 43 #include <asm/virtext.h> 44 + 45 + #include <trace/events/ipi.h> 46 + 45 47 #include "trace.h" 46 48 47 49 #include "svm.h"
+2
arch/x86/kvm/x86.c
··· 60 60 #include <linux/mem_encrypt.h> 61 61 #include <linux/entry-kvm.h> 62 62 #include <linux/suspend.h> 63 + #include <linux/smp.h> 63 64 65 + #include <trace/events/ipi.h> 64 66 #include <trace/events/kvm.h> 65 67 66 68 #include <asm/debugreg.h>
+1 -1
arch/xtensa/kernel/smp.c
··· 389 389 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 390 390 } 391 391 392 - void smp_send_reschedule(int cpu) 392 + void arch_smp_send_reschedule(int cpu) 393 393 { 394 394 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 395 395 }
+9 -2
include/linux/smp.h
··· 125 125 /* 126 126 * sends a 'reschedule' event to another CPU: 127 127 */ 128 - extern void smp_send_reschedule(int cpu); 129 - 128 + extern void arch_smp_send_reschedule(int cpu); 129 + /* 130 + * scheduler_ipi() is inline so can't be passed as callback reason, but the 131 + * callsite IP should be sufficient for root-causing IPIs sent from here. 132 + */ 133 + #define smp_send_reschedule(cpu) ({ \ 134 + trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, NULL); \ 135 + arch_smp_send_reschedule(cpu); \ 136 + }) 130 137 131 138 /* 132 139 * Prepare machine for booting other CPUs.
+3
virt/kvm/kvm_main.c
··· 62 62 #include "kvm_mm.h" 63 63 #include "vfio.h" 64 64 65 + #include <trace/events/ipi.h> 66 + 65 67 #define CREATE_TRACE_POINTS 66 68 #include <trace/events/kvm.h> 67 69 68 70 #include <linux/kvm_dirty_ring.h> 71 + 69 72 70 73 /* Worst case buffer size needed for holding an integer. */ 71 74 #define ITOA_MAX_LEN 12