Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fgraph: Pass ftrace_regs to entryfunc

Pass ftrace_regs to the fgraph_ops::entryfunc(). If ftrace_regs is not
available, it passes a NULL instead. User callback function can access
some registers (including return address) via this ftrace_regs.

Note that the ftrace_regs can be NULL when the arch does NOT define:
HAVE_DYNAMIC_FTRACE_WITH_ARGS or HAVE_DYNAMIC_FTRACE_WITH_REGS.
More specifically, if HAVE_DYNAMIC_FTRACE_WITH_REGS is defined but
not the HAVE_DYNAMIC_FTRACE_WITH_ARGS, and the ftrace ops used to
register the function callback does not set FTRACE_OPS_FL_SAVE_REGS.
In this case, ftrace_regs can be NULL in user callback.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Naveen N Rao <naveen@kernel.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: x86@kernel.org
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://lore.kernel.org/173518990044.391279.17406984900626078579.stgit@devnote2
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Masami Hiramatsu (Google) and committed by
Steven Rostedt (Google)
41705c42 d576aec2

+114 -42
+14 -1
arch/arm64/kernel/ftrace.c
··· 481 481 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 482 482 struct ftrace_ops *op, struct ftrace_regs *fregs) 483 483 { 484 - prepare_ftrace_return(ip, &arch_ftrace_regs(fregs)->lr, arch_ftrace_regs(fregs)->fp); 484 + unsigned long return_hooker = (unsigned long)&return_to_handler; 485 + unsigned long frame_pointer = arch_ftrace_regs(fregs)->fp; 486 + unsigned long *parent = &arch_ftrace_regs(fregs)->lr; 487 + unsigned long old; 488 + 489 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 490 + return; 491 + 492 + old = *parent; 493 + 494 + if (!function_graph_enter_regs(old, ip, frame_pointer, 495 + (void *)frame_pointer, fregs)) { 496 + *parent = return_hooker; 497 + } 485 498 } 486 499 #else 487 500 /*
+9 -1
arch/loongarch/kernel/ftrace_dyn.c
··· 243 243 { 244 244 struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs; 245 245 unsigned long *parent = (unsigned long *)&regs->regs[1]; 246 + unsigned long return_hooker = (unsigned long)&return_to_handler; 247 + unsigned long old; 246 248 247 - prepare_ftrace_return(ip, (unsigned long *)parent); 249 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 250 + return; 251 + 252 + old = *parent; 253 + 254 + if (!function_graph_enter_regs(old, ip, 0, parent, fregs)) 255 + *parent = return_hooker; 248 256 } 249 257 #else 250 258 static int ftrace_modify_graph_caller(bool enable)
+1 -1
arch/powerpc/kernel/trace/ftrace.c
··· 665 665 if (unlikely(atomic_read(&current->tracing_graph_pause))) 666 666 goto out; 667 667 668 - if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp)) 668 + if (!function_graph_enter_regs(parent_ip, ip, 0, (unsigned long *)sp, fregs)) 669 669 parent_ip = ppc_function_entry(return_to_handler); 670 670 671 671 out:
+6 -4
arch/powerpc/kernel/trace/ftrace_64_pg.c
··· 787 787 * in current thread info. Return the address we want to divert to. 788 788 */ 789 789 static unsigned long 790 - __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) 790 + __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp, 791 + struct ftrace_regs *fregs) 791 792 { 792 793 unsigned long return_hooker; 793 794 ··· 800 799 801 800 return_hooker = ppc_function_entry(return_to_handler); 802 801 803 - if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) 802 + if (!function_graph_enter_regs(parent, ip, 0, (unsigned long *)sp, fregs)) 804 803 parent = return_hooker; 805 804 806 805 out: ··· 811 810 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 812 811 struct ftrace_ops *op, struct ftrace_regs *fregs) 813 812 { 814 - arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, arch_ftrace_regs(fregs)->regs.gpr[1]); 813 + arch_ftrace_regs(fregs)->regs.link = __prepare_ftrace_return(parent_ip, ip, 814 + arch_ftrace_regs(fregs)->regs.gpr[1], fregs); 815 815 } 816 816 #else 817 817 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, 818 818 unsigned long sp) 819 819 { 820 - return __prepare_ftrace_return(parent, ip, sp); 820 + return __prepare_ftrace_return(parent, ip, sp, NULL); 821 821 } 822 822 #endif 823 823 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+16 -1
arch/riscv/kernel/ftrace.c
··· 214 214 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 215 215 struct ftrace_ops *op, struct ftrace_regs *fregs) 216 216 { 217 - prepare_ftrace_return(&arch_ftrace_regs(fregs)->ra, ip, arch_ftrace_regs(fregs)->s0); 217 + unsigned long return_hooker = (unsigned long)&return_to_handler; 218 + unsigned long frame_pointer = arch_ftrace_regs(fregs)->s0; 219 + unsigned long *parent = &arch_ftrace_regs(fregs)->ra; 220 + unsigned long old; 221 + 222 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 223 + return; 224 + 225 + /* 226 + * We don't suffer access faults, so no extra fault-recovery assembly 227 + * is needed here. 228 + */ 229 + old = *parent; 230 + 231 + if (!function_graph_enter_regs(old, ip, frame_pointer, parent, fregs)) 232 + *parent = return_hooker; 218 233 } 219 234 #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ 220 235 extern void ftrace_graph_call(void);
+28 -14
arch/x86/kernel/ftrace.c
··· 607 607 } 608 608 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 609 609 610 - /* 611 - * Hook the return address and push it in the stack of return addrs 612 - * in current thread info. 613 - */ 614 - void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 615 - unsigned long frame_pointer) 610 + static inline bool skip_ftrace_return(void) 616 611 { 617 - unsigned long return_hooker = (unsigned long)&return_to_handler; 618 - 619 612 /* 620 613 * When resuming from suspend-to-ram, this function can be indirectly 621 614 * called from early CPU startup code while the CPU is in real mode, ··· 618 625 * This check isn't as accurate as virt_addr_valid(), but it should be 619 626 * good enough for this purpose, and it's fast. 620 627 */ 621 - if (unlikely((long)__builtin_frame_address(0) >= 0)) 622 - return; 628 + if ((long)__builtin_frame_address(0) >= 0) 629 + return true; 623 630 624 - if (unlikely(ftrace_graph_is_dead())) 625 - return; 631 + if (ftrace_graph_is_dead()) 632 + return true; 626 633 627 - if (unlikely(atomic_read(&current->tracing_graph_pause))) 634 + if (atomic_read(&current->tracing_graph_pause)) 635 + return true; 636 + return false; 637 + } 638 + 639 + /* 640 + * Hook the return address and push it in the stack of return addrs 641 + * in current thread info. 642 + */ 643 + void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 644 + unsigned long frame_pointer) 645 + { 646 + unsigned long return_hooker = (unsigned long)&return_to_handler; 647 + 648 + if (unlikely(skip_ftrace_return())) 628 649 return; 629 650 630 651 if (!function_graph_enter(*parent, ip, frame_pointer, parent)) ··· 651 644 { 652 645 struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs; 653 646 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs); 647 + unsigned long return_hooker = (unsigned long)&return_to_handler; 648 + unsigned long *parent = (unsigned long *)stack; 654 649 655 - prepare_ftrace_return(ip, (unsigned long *)stack, 0); 650 + if (unlikely(skip_ftrace_return())) 651 + return; 652 + 653 + 654 + if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs)) 655 + *parent = return_hooker; 656 656 } 657 657 #endif 658 658
+13 -4
include/linux/ftrace.h
··· 1071 1071 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, 1072 1072 struct fgraph_ops *); /* return */ 1073 1073 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, 1074 - struct fgraph_ops *); /* entry */ 1074 + struct fgraph_ops *, 1075 + struct ftrace_regs *); /* entry */ 1075 1076 1076 1077 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, 1077 - struct fgraph_ops *gops); 1078 + struct fgraph_ops *gops, 1079 + struct ftrace_regs *fregs); 1078 1080 bool ftrace_pids_enabled(struct ftrace_ops *ops); 1079 1081 1080 1082 #ifdef CONFIG_FUNCTION_GRAPH_TRACER ··· 1116 1114 extern void return_to_handler(void); 1117 1115 1118 1116 extern int 1119 - function_graph_enter(unsigned long ret, unsigned long func, 1120 - unsigned long frame_pointer, unsigned long *retp); 1117 + function_graph_enter_regs(unsigned long ret, unsigned long func, 1118 + unsigned long frame_pointer, unsigned long *retp, 1119 + struct ftrace_regs *fregs); 1120 + 1121 + static inline int function_graph_enter(unsigned long ret, unsigned long func, 1122 + unsigned long fp, unsigned long *retp) 1123 + { 1124 + return function_graph_enter_regs(ret, func, fp, retp, NULL); 1125 + } 1121 1126 1122 1127 struct ftrace_ret_stack * 1123 1128 ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
+12 -8
kernel/trace/fgraph.c
··· 292 292 } 293 293 294 294 /* ftrace_graph_entry set to this to tell some archs to run function graph */ 295 - static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops) 295 + static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops, 296 + struct ftrace_regs *fregs) 296 297 { 297 298 return 0; 298 299 } ··· 521 520 #endif 522 521 523 522 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, 524 - struct fgraph_ops *gops) 523 + struct fgraph_ops *gops, 524 + struct ftrace_regs *fregs) 525 525 { 526 526 return 0; 527 527 } ··· 646 644 #endif 647 645 648 646 /* If the caller does not use ftrace, call this function. */ 649 - int function_graph_enter(unsigned long ret, unsigned long func, 650 - unsigned long frame_pointer, unsigned long *retp) 647 + int function_graph_enter_regs(unsigned long ret, unsigned long func, 648 + unsigned long frame_pointer, unsigned long *retp, 649 + struct ftrace_regs *fregs) 651 650 { 652 651 struct ftrace_graph_ent trace; 653 652 unsigned long bitmap = 0; ··· 671 668 if (static_branch_likely(&fgraph_do_direct)) { 672 669 int save_curr_ret_stack = current->curr_ret_stack; 673 670 674 - if (static_call(fgraph_func)(&trace, fgraph_direct_gops)) 671 + if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs)) 675 672 bitmap |= BIT(fgraph_direct_gops->idx); 676 673 else 677 674 /* Clear out any saved storage */ ··· 689 686 690 687 save_curr_ret_stack = current->curr_ret_stack; 691 688 if (ftrace_ops_test(&gops->ops, func, NULL) && 692 - gops->entryfunc(&trace, gops)) 689 + gops->entryfunc(&trace, gops, fregs)) 693 690 bitmap |= BIT(i); 694 691 else 695 692 /* Clear out any saved storage */ ··· 1183 1180 1184 1181 #ifdef CONFIG_DYNAMIC_FTRACE 1185 1182 static int fgraph_pid_func(struct ftrace_graph_ent *trace, 1186 - struct fgraph_ops *gops) 1183 + struct fgraph_ops *gops, 1184 + struct ftrace_regs *fregs) 1187 1185 { 1188 1186 struct trace_array *tr = gops->ops.private; 1189 1187 int pid; ··· 1198 1194 return 0; 1199 1195 } 1200 1196 1201 - return gops->saved_func(trace, gops); 1197 + return gops->saved_func(trace, gops, fregs); 1202 1198 } 1203 1199 1204 1200 void fgraph_update_pid_func(void)
+2 -1
kernel/trace/ftrace.c
··· 819 819 }; 820 820 821 821 static int profile_graph_entry(struct ftrace_graph_ent *trace, 822 - struct fgraph_ops *gops) 822 + struct fgraph_ops *gops, 823 + struct ftrace_regs *fregs) 823 824 { 824 825 struct profile_fgraph_data *profile_data; 825 826
+2 -1
kernel/trace/trace.h
··· 694 694 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 695 695 696 696 void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); 697 - int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); 697 + int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops, 698 + struct ftrace_regs *fregs); 698 699 699 700 void tracing_start_cmdline_record(void); 700 701 void tracing_stop_cmdline_record(void);
+2 -1
kernel/trace/trace_functions_graph.c
··· 175 175 }; 176 176 177 177 int trace_graph_entry(struct ftrace_graph_ent *trace, 178 - struct fgraph_ops *gops) 178 + struct fgraph_ops *gops, 179 + struct ftrace_regs *fregs) 179 180 { 180 181 unsigned long *task_var = fgraph_get_task_var(gops); 181 182 struct trace_array *tr = gops->private;
+2 -1
kernel/trace/trace_irqsoff.c
··· 176 176 } 177 177 178 178 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace, 179 - struct fgraph_ops *gops) 179 + struct fgraph_ops *gops, 180 + struct ftrace_regs *fregs) 180 181 { 181 182 struct trace_array *tr = irqsoff_trace; 182 183 struct trace_array_cpu *data;
+2 -1
kernel/trace/trace_sched_wakeup.c
··· 113 113 } 114 114 115 115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace, 116 - struct fgraph_ops *gops) 116 + struct fgraph_ops *gops, 117 + struct ftrace_regs *fregs) 117 118 { 118 119 struct trace_array *tr = wakeup_trace; 119 120 struct trace_array_cpu *data;
+5 -3
kernel/trace/trace_selftest.c
··· 774 774 }; 775 775 776 776 static __init int store_entry(struct ftrace_graph_ent *trace, 777 - struct fgraph_ops *gops) 777 + struct fgraph_ops *gops, 778 + struct ftrace_regs *fregs) 778 779 { 779 780 struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); 780 781 const char *type = fixture->store_type_name; ··· 1026 1025 1027 1026 /* Wrap the real function entry probe to avoid possible hanging */ 1028 1027 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace, 1029 - struct fgraph_ops *gops) 1028 + struct fgraph_ops *gops, 1029 + struct ftrace_regs *fregs) 1030 1030 { 1031 1031 /* This is harmlessly racy, we want to approximately detect a hang */ 1032 1032 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { ··· 1041 1039 return 0; 1042 1040 } 1043 1041 1044 - return trace_graph_entry(trace, gops); 1042 + return trace_graph_entry(trace, gops, fregs); 1045 1043 } 1046 1044 1047 1045 static struct fgraph_ops fgraph_ops __initdata = {