[S390] ftrace: add function graph tracer support

Function graph tracer support for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky 88dbd203 8b4488f8

+166 -12
+1
arch/s390/Kconfig
··· 85 85 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 86 86 select HAVE_FTRACE_MCOUNT_RECORD 87 87 select HAVE_DYNAMIC_FTRACE 88 + select HAVE_FUNCTION_GRAPH_TRACER 88 89 select HAVE_DEFAULT_NO_SPIN_MUTEXES 89 90 select HAVE_OPROFILE 90 91 select HAVE_KPROBES
+6 -4
arch/s390/include/asm/ftrace.h
··· 11 11 #define MCOUNT_ADDR ((long)_mcount) 12 12 13 13 #ifdef CONFIG_64BIT 14 - #define MCOUNT_INSN_SIZE 24 15 - #define MCOUNT_OFFSET 14 14 + #define MCOUNT_OFFSET_RET 18 15 + #define MCOUNT_INSN_SIZE 24 16 + #define MCOUNT_OFFSET 14 16 17 #else 17 - #define MCOUNT_INSN_SIZE 30 18 - #define MCOUNT_OFFSET 8 18 + #define MCOUNT_OFFSET_RET 26 19 + #define MCOUNT_INSN_SIZE 30 20 + #define MCOUNT_OFFSET 8 19 21 #endif 20 22 21 23 static inline unsigned long ftrace_call_adjust(unsigned long addr)
+2 -4
arch/s390/kernel/Makefile
··· 3 3 # 4 4 5 5 ifdef CONFIG_FUNCTION_TRACER 6 - # Do not trace early boot code 6 + # Don't trace early setup code and tracing code 7 7 CFLAGS_REMOVE_early.o = -pg 8 - endif 9 - 10 - ifdef CONFIG_DYNAMIC_FTRACE 11 8 CFLAGS_REMOVE_ftrace.o = -pg 12 9 endif 13 10 ··· 43 46 obj-$(CONFIG_KPROBES) += kprobes.o 44 47 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 45 48 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 49 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 46 50 47 51 # Kexec part 48 52 S390_KEXEC_OBJS := machine_kexec.o crash.o
+72
arch/s390/kernel/ftrace.c
··· 7 7 * 8 8 */ 9 9 10 + #include <linux/hardirq.h> 10 11 #include <linux/uaccess.h> 11 12 #include <linux/ftrace.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/types.h> 14 15 #include <asm/lowcore.h> 15 16 17 + #ifdef CONFIG_DYNAMIC_FTRACE 18 + 16 19 void ftrace_disable_code(void); 20 + void ftrace_disable_return(void); 17 21 void ftrace_call_code(void); 18 22 void ftrace_nop_code(void); 19 23 ··· 32 28 " .word 0x0024\n" 33 29 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 34 30 " basr %r14,%r1\n" 31 + "ftrace_disable_return:\n" 35 32 " lg %r14,8(15)\n" 36 33 " lgr %r0,%r0\n" 37 34 "0:\n"); ··· 55 50 " j 0f\n" 56 51 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 57 52 " basr %r14,%r1\n" 53 + "ftrace_disable_return:\n" 58 54 " l %r14,4(%r15)\n" 59 55 " j 0f\n" 60 56 " bcr 0,%r7\n" ··· 136 130 *(unsigned long *)data = 0; 137 131 return 0; 138 132 } 133 + 134 + #endif /* CONFIG_DYNAMIC_FTRACE */ 135 + 136 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 137 + #ifdef CONFIG_DYNAMIC_FTRACE 138 + /* 139 + * Patch the kernel code at ftrace_graph_caller location: 140 + * The instruction there is branch relative on condition. The condition mask 141 + * is either all ones (always branch aka disable ftrace_graph_caller) or all 142 + * zeroes (nop aka enable ftrace_graph_caller). 143 + * Instruction format for brc is a7m4xxxx where m is the condition mask. 144 + */ 145 + int ftrace_enable_ftrace_graph_caller(void) 146 + { 147 + unsigned short opcode = 0xa704; 148 + 149 + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 150 + } 151 + 152 + int ftrace_disable_ftrace_graph_caller(void) 153 + { 154 + unsigned short opcode = 0xa7f4; 155 + 156 + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 157 + } 158 + 159 + static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 160 + { 161 + return addr - (ftrace_disable_return - ftrace_disable_code); 162 + } 163 + 164 + #else /* CONFIG_DYNAMIC_FTRACE */ 165 + 166 + static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 167 + { 168 + return addr - MCOUNT_OFFSET_RET; 169 + } 170 + 171 + #endif /* CONFIG_DYNAMIC_FTRACE */ 172 + 173 + /* 174 + * Hook the return address and push it in the stack of return addresses 175 + * in current thread info. 176 + */ 177 + unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 178 + { 179 + struct ftrace_graph_ent trace; 180 + 181 + /* Nmi's are currently unsupported. */ 182 + if (unlikely(in_nmi())) 183 + goto out; 184 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 185 + goto out; 186 + if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) 187 + goto out; 188 + trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 189 + /* Only trace if the calling function expects to. */ 190 + if (!ftrace_graph_entry(&trace)) { 191 + current->curr_ret_stack--; 192 + goto out; 193 + } 194 + parent = (unsigned long)return_to_handler; 195 + out: 196 + return parent; 197 + } 198 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+79
arch/s390/kernel/mcount.S
··· 34 34 larl %r14,ftrace_dyn_func 35 35 lg %r14,0(%r14) 36 36 basr %r14,%r14 37 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 38 + .globl ftrace_graph_caller 39 + ftrace_graph_caller: 40 + # This unconditional branch gets runtime patched. Change only if 41 + # you know what you are doing. See ftrace_enable_graph_caller(). 42 + j 0f 43 + lg %r2,272(%r15) 44 + lg %r3,168(%r15) 45 + brasl %r14,prepare_ftrace_return 46 + stg %r2,168(%r15) 47 + 0: 48 + #endif 37 49 aghi %r15,160 38 50 lmg %r2,%r5,32(%r15) 39 51 lg %r14,112(%r15) ··· 74 62 larl %r14,ftrace_trace_function 75 63 lg %r14,0(%r14) 76 64 basr %r14,%r14 65 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 66 + lg %r2,272(%r15) 67 + lg %r3,168(%r15) 68 + brasl %r14,prepare_ftrace_return 69 + stg %r2,168(%r15) 70 + #endif 77 71 aghi %r15,160 78 72 lmg %r2,%r5,32(%r15) 79 73 lg %r14,112(%r15) 80 74 br %r14 81 75 82 76 #endif /* CONFIG_DYNAMIC_FTRACE */ 77 + 78 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 79 + 80 + .globl return_to_handler 81 + return_to_handler: 82 + stmg %r2,%r5,32(%r15) 83 + lgr %r1,%r15 84 + aghi %r15,-160 85 + stg %r1,__SF_BACKCHAIN(%r15) 86 + brasl %r14,ftrace_return_to_handler 87 + aghi %r15,160 88 + lgr %r14,%r2 89 + lmg %r2,%r5,32(%r15) 90 + br %r14 91 + 92 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 83 93 84 94 #else /* CONFIG_64BIT */ 85 95 ··· 130 96 l %r14,0b-0b(%r1) 131 97 l %r14,0(%r14) 132 98 basr %r14,%r14 99 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 100 + .globl ftrace_graph_caller 101 + ftrace_graph_caller: 102 + # This unconditional branch gets runtime patched. Change only if 103 + # you know what you are doing. See ftrace_enable_graph_caller(). 104 + j 1f 105 + bras %r1,0f 106 + .long prepare_ftrace_return 107 + 0: l %r2,152(%r15) 108 + l %r4,0(%r1) 109 + l %r3,100(%r15) 110 + basr %r14,%r4 111 + st %r2,100(%r15) 112 + 1: 113 + #endif 133 114 ahi %r15,96 134 115 l %r14,56(%r15) 135 116 3: lm %r2,%r5,16(%r15) ··· 177 128 l %r14,0b-0b(%r1) 178 129 l %r14,0(%r14) 179 130 basr %r14,%r14 131 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 132 + bras %r1,0f 133 + .long prepare_ftrace_return 134 + 0: l %r2,152(%r15) 135 + l %r4,0(%r1) 136 + l %r3,100(%r15) 137 + basr %r14,%r4 138 + st %r2,100(%r15) 139 + #endif 180 140 ahi %r15,96 181 141 l %r14,56(%r15) 182 142 3: lm %r2,%r5,16(%r15) 183 143 br %r14 184 144 185 145 #endif /* CONFIG_DYNAMIC_FTRACE */ 146 + 147 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 148 + 149 + .globl return_to_handler 150 + return_to_handler: 151 + stm %r2,%r5,16(%r15) 152 + st %r14,56(%r15) 153 + lr %r0,%r15 154 + ahi %r15,-96 155 + st %r0,__SF_BACKCHAIN(%r15) 156 + bras %r1,0f 157 + .long ftrace_return_to_handler 158 + 0: l %r2,0b-0b(%r1) 159 + basr %r14,%r2 160 + lr %r14,%r2 161 + ahi %r15,96 162 + lm %r2,%r5,16(%r15) 163 + br %r14 164 + 165 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 166 + 186 167 #endif /* CONFIG_64BIT */
+2 -1
arch/s390/kernel/s390_ext.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/kernel.h> 12 12 #include <linux/slab.h> 13 + #include <linux/ftrace.h> 13 14 #include <linux/errno.h> 14 15 #include <linux/kernel_stat.h> 15 16 #include <linux/interrupt.h> ··· 113 112 return 0; 114 113 } 115 114 116 - void do_extint(struct pt_regs *regs, unsigned short code) 115 + void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) 117 116 { 118 117 ext_int_info_t *p; 119 118 int index;
+1 -1
arch/s390/kernel/time.c
··· 70 70 /* 71 71 * Scheduler clock - returns current time in nanosec units. 72 72 */ 73 - unsigned long long sched_clock(void) 73 + unsigned long long notrace sched_clock(void) 74 74 { 75 75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 76 76 }
+1
arch/s390/kernel/vmlinux.lds.S
··· 34 34 SCHED_TEXT 35 35 LOCK_TEXT 36 36 KPROBES_TEXT 37 + IRQENTRY_TEXT 37 38 *(.fixup) 38 39 *(.gnu.warning) 39 40 } :text = 0x0700
+2 -2
drivers/s390/cio/cio.c
··· 12 12 #define KMSG_COMPONENT "cio" 13 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 14 15 + #include <linux/ftrace.h> 15 16 #include <linux/module.h> 16 17 #include <linux/init.h> 17 18 #include <linux/slab.h> ··· 627 626 * handlers). 628 627 * 629 628 */ 630 - void 631 - do_IRQ (struct pt_regs *regs) 629 + void __irq_entry do_IRQ(struct pt_regs *regs) 632 630 { 633 631 struct tpi_info *tpi_info; 634 632 struct subchannel *sch;