Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/tracing: Enable HAVE_FTRACE_GRAPH_FUNC

Add ftrace_graph_func() which is required for fprobe to access registers.
This also eliminates the need for calling prepare_ftrace_return() from
ftrace_caller().

Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Cc: Florent Revest <revest@chromium.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: bpf <bpf@vger.kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/173519002875.391279.7060964632119674159.stgit@devnote2
Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Sven Schnelle and committed by
Steven Rostedt (Google)
7495e179 a762e926

+20 -46
+1
arch/s390/Kconfig
··· 190 190 select HAVE_EFFICIENT_UNALIGNED_ACCESS 191 191 select HAVE_GUP_FAST 192 192 select HAVE_FENTRY 193 + select HAVE_FTRACE_GRAPH_FUNC 193 194 select HAVE_FTRACE_MCOUNT_RECORD 194 195 select HAVE_FUNCTION_ARG_ACCESS_API 195 196 select HAVE_FUNCTION_ERROR_INJECTION
+5
arch/s390/include/asm/ftrace.h
··· 39 39 40 40 struct module; 41 41 struct dyn_ftrace; 42 + struct ftrace_ops; 42 43 43 44 bool ftrace_need_init_nop(void); 44 45 #define ftrace_need_init_nop ftrace_need_init_nop ··· 122 121 */ 123 122 return !strcmp(sym + 7, name) || !strcmp(sym + 8, name); 124 123 } 124 + 125 + void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 126 + struct ftrace_ops *op, struct ftrace_regs *fregs); 127 + #define ftrace_graph_func ftrace_graph_func 125 128 126 129 #endif /* __ASSEMBLY__ */ 127 130
-1
arch/s390/kernel/entry.h
··· 41 41 void __init startup_init(void); 42 42 void die(struct pt_regs *regs, const char *str); 43 43 int setup_profiling_timer(unsigned int multiplier); 44 - unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip); 45 44 46 45 struct s390_mmap_arg_struct; 47 46 struct fadvise64_64_args;
+14 -34
arch/s390/kernel/ftrace.c
··· 261 261 } 262 262 263 263 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 264 - /* 265 - * Hook the return address and push it in the stack of return addresses 266 - * in current thread info. 267 - */ 268 - unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp, 269 - unsigned long ip) 264 + 265 + void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 266 + struct ftrace_ops *op, struct ftrace_regs *fregs) 270 267 { 268 + unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14]; 269 + int bit; 270 + 271 271 if (unlikely(ftrace_graph_is_dead())) 272 - goto out; 272 + return; 273 273 if (unlikely(atomic_read(&current->tracing_graph_pause))) 274 - goto out; 275 - ip -= MCOUNT_INSN_SIZE; 276 - if (!function_graph_enter(ra, ip, 0, (void *) sp)) 277 - ra = (unsigned long) return_to_handler; 278 - out: 279 - return ra; 280 - } 281 - NOKPROBE_SYMBOL(prepare_ftrace_return); 282 - 283 - /* 284 - * Patch the kernel code at ftrace_graph_caller location. The instruction 285 - * there is branch relative on condition. To enable the ftrace graph code 286 - * block, we simply patch the mask field of the instruction to zero and 287 - * turn the instruction into a nop. 288 - * To disable the ftrace graph code the mask field will be patched to 289 - * all ones, which turns the instruction into an unconditional branch. 290 - */ 291 - int ftrace_enable_ftrace_graph_caller(void) 292 - { 293 - /* Expect brc 0xf,... */ 294 - return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false); 295 - } 296 - 297 - int ftrace_disable_ftrace_graph_caller(void) 298 - { 299 - /* Expect brc 0x0,... */ 300 - return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true); 274 + return; 275 + bit = ftrace_test_recursion_trylock(ip, *parent); 276 + if (bit < 0) 277 + return; 278 + if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs)) 279 + *parent = (unsigned long)&return_to_handler; 280 + ftrace_test_recursion_unlock(bit); 301 281 } 302 282 303 283 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-11
arch/s390/kernel/mcount.S
··· 104 104 lgr %r3,%r14 105 105 la %r5,STACK_FREGS(%r15) 106 106 BASR_EX %r14,%r1 107 - #ifdef CONFIG_FUNCTION_GRAPH_TRACER 108 - # The j instruction gets runtime patched to a nop instruction. 109 - # See ftrace_enable_ftrace_graph_caller. 110 - SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL) 111 - j .Lftrace_graph_caller_end 112 - lmg %r2,%r3,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15) 113 - lg %r4,(STACK_FREGS_PTREGS_PSW+8)(%r15) 114 - brasl %r14,prepare_ftrace_return 115 - stg %r2,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15) 116 - .Lftrace_graph_caller_end: 117 - #endif 118 107 lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15) 119 108 #ifdef MARCH_HAS_Z196_FEATURES 120 109 ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)