[S390] ftrace: add function graph tracer support

Function graph tracer support for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky 88dbd203 8b4488f8

+166 -12
+1
arch/s390/Kconfig
··· 85 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 86 select HAVE_FTRACE_MCOUNT_RECORD 87 select HAVE_DYNAMIC_FTRACE 88 select HAVE_DEFAULT_NO_SPIN_MUTEXES 89 select HAVE_OPROFILE 90 select HAVE_KPROBES
··· 85 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 86 select HAVE_FTRACE_MCOUNT_RECORD 87 select HAVE_DYNAMIC_FTRACE 88 + select HAVE_FUNCTION_GRAPH_TRACER 89 select HAVE_DEFAULT_NO_SPIN_MUTEXES 90 select HAVE_OPROFILE 91 select HAVE_KPROBES
+6 -4
arch/s390/include/asm/ftrace.h
··· 11 #define MCOUNT_ADDR ((long)_mcount) 12 13 #ifdef CONFIG_64BIT 14 - #define MCOUNT_INSN_SIZE 24 15 - #define MCOUNT_OFFSET 14 16 #else 17 - #define MCOUNT_INSN_SIZE 30 18 - #define MCOUNT_OFFSET 8 19 #endif 20 21 static inline unsigned long ftrace_call_adjust(unsigned long addr)
··· 11 #define MCOUNT_ADDR ((long)_mcount) 12 13 #ifdef CONFIG_64BIT 14 + #define MCOUNT_OFFSET_RET 18 15 + #define MCOUNT_INSN_SIZE 24 16 + #define MCOUNT_OFFSET 14 17 #else 18 + #define MCOUNT_OFFSET_RET 26 19 + #define MCOUNT_INSN_SIZE 30 20 + #define MCOUNT_OFFSET 8 21 #endif 22 23 static inline unsigned long ftrace_call_adjust(unsigned long addr)
+2 -4
arch/s390/kernel/Makefile
··· 3 # 4 5 ifdef CONFIG_FUNCTION_TRACER 6 - # Do not trace early boot code 7 CFLAGS_REMOVE_early.o = -pg 8 - endif 9 - 10 - ifdef CONFIG_DYNAMIC_FTRACE 11 CFLAGS_REMOVE_ftrace.o = -pg 12 endif 13 ··· 43 obj-$(CONFIG_KPROBES) += kprobes.o 44 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 45 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 46 47 # Kexec part 48 S390_KEXEC_OBJS := machine_kexec.o crash.o
··· 3 # 4 5 ifdef CONFIG_FUNCTION_TRACER 6 + # Don't trace early setup code and tracing code 7 CFLAGS_REMOVE_early.o = -pg 8 CFLAGS_REMOVE_ftrace.o = -pg 9 endif 10 ··· 46 obj-$(CONFIG_KPROBES) += kprobes.o 47 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 48 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 49 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 50 51 # Kexec part 52 S390_KEXEC_OBJS := machine_kexec.o crash.o
+72
arch/s390/kernel/ftrace.c
··· 7 * 8 */ 9 10 #include <linux/uaccess.h> 11 #include <linux/ftrace.h> 12 #include <linux/kernel.h> 13 #include <linux/types.h> 14 #include <asm/lowcore.h> 15 16 void ftrace_disable_code(void); 17 void ftrace_call_code(void); 18 void ftrace_nop_code(void); 19 ··· 32 " .word 0x0024\n" 33 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 34 " basr %r14,%r1\n" 35 " lg %r14,8(15)\n" 36 " lgr %r0,%r0\n" 37 "0:\n"); ··· 55 " j 0f\n" 56 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 57 " basr %r14,%r1\n" 58 " l %r14,4(%r15)\n" 59 " j 0f\n" 60 " bcr 0,%r7\n" ··· 136 *(unsigned long *)data = 0; 137 return 0; 138 }
··· 7 * 8 */ 9 10 + #include <linux/hardirq.h> 11 #include <linux/uaccess.h> 12 #include <linux/ftrace.h> 13 #include <linux/kernel.h> 14 #include <linux/types.h> 15 #include <asm/lowcore.h> 16 17 + #ifdef CONFIG_DYNAMIC_FTRACE 18 + 19 void ftrace_disable_code(void); 20 + void ftrace_disable_return(void); 21 void ftrace_call_code(void); 22 void ftrace_nop_code(void); 23 ··· 28 " .word 0x0024\n" 29 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 30 " basr %r14,%r1\n" 31 + "ftrace_disable_return:\n" 32 " lg %r14,8(15)\n" 33 " lgr %r0,%r0\n" 34 "0:\n"); ··· 50 " j 0f\n" 51 " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 52 " basr %r14,%r1\n" 53 + "ftrace_disable_return:\n" 54 " l %r14,4(%r15)\n" 55 " j 0f\n" 56 " bcr 0,%r7\n" ··· 130 *(unsigned long *)data = 0; 131 return 0; 132 } 133 + 134 + #endif /* CONFIG_DYNAMIC_FTRACE */ 135 + 136 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 137 + #ifdef CONFIG_DYNAMIC_FTRACE 138 + /* 139 + * Patch the kernel code at ftrace_graph_caller location: 140 + * The instruction there is branch relative on condition. The condition mask 141 + * is either all ones (always branch aka disable ftrace_graph_caller) or all 142 + * zeroes (nop aka enable ftrace_graph_caller). 143 + * Instruction format for brc is a7m4xxxx where m is the condition mask. 144 + */ 145 + int ftrace_enable_ftrace_graph_caller(void) 146 + { 147 + unsigned short opcode = 0xa704; 148 + 149 + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 150 + } 151 + 152 + int ftrace_disable_ftrace_graph_caller(void) 153 + { 154 + unsigned short opcode = 0xa7f4; 155 + 156 + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 157 + } 158 + 159 + static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 160 + { 161 + return addr - (ftrace_disable_return - ftrace_disable_code); 162 + } 163 + 164 + #else /* CONFIG_DYNAMIC_FTRACE */ 165 + 166 + static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 167 + { 168 + return addr - MCOUNT_OFFSET_RET; 169 + } 170 + 171 + #endif /* CONFIG_DYNAMIC_FTRACE */ 172 + 173 + /* 174 + * Hook the return address and push it in the stack of return addresses 175 + * in current thread info. 176 + */ 177 + unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 178 + { 179 + struct ftrace_graph_ent trace; 180 + 181 + /* Nmi's are currently unsupported. */ 182 + if (unlikely(in_nmi())) 183 + goto out; 184 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 185 + goto out; 186 + if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) 187 + goto out; 188 + trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 189 + /* Only trace if the calling function expects to. */ 190 + if (!ftrace_graph_entry(&trace)) { 191 + current->curr_ret_stack--; 192 + goto out; 193 + } 194 + parent = (unsigned long)return_to_handler; 195 + out: 196 + return parent; 197 + } 198 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+79
arch/s390/kernel/mcount.S
··· 34 larl %r14,ftrace_dyn_func 35 lg %r14,0(%r14) 36 basr %r14,%r14 37 aghi %r15,160 38 lmg %r2,%r5,32(%r15) 39 lg %r14,112(%r15) ··· 74 larl %r14,ftrace_trace_function 75 lg %r14,0(%r14) 76 basr %r14,%r14 77 aghi %r15,160 78 lmg %r2,%r5,32(%r15) 79 lg %r14,112(%r15) 80 br %r14 81 82 #endif /* CONFIG_DYNAMIC_FTRACE */ 83 84 #else /* CONFIG_64BIT */ 85 ··· 130 l %r14,0b-0b(%r1) 131 l %r14,0(%r14) 132 basr %r14,%r14 133 ahi %r15,96 134 l %r14,56(%r15) 135 3: lm %r2,%r5,16(%r15) ··· 177 l %r14,0b-0b(%r1) 178 l %r14,0(%r14) 179 basr %r14,%r14 180 ahi %r15,96 181 l %r14,56(%r15) 182 3: lm %r2,%r5,16(%r15) 183 br %r14 184 185 #endif /* CONFIG_DYNAMIC_FTRACE */ 186 #endif /* CONFIG_64BIT */
··· 34 larl %r14,ftrace_dyn_func 35 lg %r14,0(%r14) 36 basr %r14,%r14 37 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 38 + .globl ftrace_graph_caller 39 + ftrace_graph_caller: 40 + # This unconditional branch gets runtime patched. Change only if 41 + # you know what you are doing. See ftrace_enable_graph_caller(). 42 + j 0f 43 + lg %r2,272(%r15) 44 + lg %r3,168(%r15) 45 + brasl %r14,prepare_ftrace_return 46 + stg %r2,168(%r15) 47 + 0: 48 + #endif 49 aghi %r15,160 50 lmg %r2,%r5,32(%r15) 51 lg %r14,112(%r15) ··· 62 larl %r14,ftrace_trace_function 63 lg %r14,0(%r14) 64 basr %r14,%r14 65 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 66 + lg %r2,272(%r15) 67 + lg %r3,168(%r15) 68 + brasl %r14,prepare_ftrace_return 69 + stg %r2,168(%r15) 70 + #endif 71 aghi %r15,160 72 lmg %r2,%r5,32(%r15) 73 lg %r14,112(%r15) 74 br %r14 75 76 #endif /* CONFIG_DYNAMIC_FTRACE */ 77 + 78 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 79 + 80 + .globl return_to_handler 81 + return_to_handler: 82 + stmg %r2,%r5,32(%r15) 83 + lgr %r1,%r15 84 + aghi %r15,-160 85 + stg %r1,__SF_BACKCHAIN(%r15) 86 + brasl %r14,ftrace_return_to_handler 87 + aghi %r15,160 88 + lgr %r14,%r2 89 + lmg %r2,%r5,32(%r15) 90 + br %r14 91 + 92 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 93 94 #else /* CONFIG_64BIT */ 95 ··· 96 l %r14,0b-0b(%r1) 97 l %r14,0(%r14) 98 basr %r14,%r14 99 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 100 + .globl ftrace_graph_caller 101 + ftrace_graph_caller: 102 + # This unconditional branch gets runtime patched. Change only if 103 + # you know what you are doing. See ftrace_enable_graph_caller(). 104 + j 1f 105 + bras %r1,0f 106 + .long prepare_ftrace_return 107 + 0: l %r2,152(%r15) 108 + l %r4,0(%r1) 109 + l %r3,100(%r15) 110 + basr %r14,%r4 111 + st %r2,100(%r15) 112 + 1: 113 + #endif 114 ahi %r15,96 115 l %r14,56(%r15) 116 3: lm %r2,%r5,16(%r15) ··· 128 l %r14,0b-0b(%r1) 129 l %r14,0(%r14) 130 basr %r14,%r14 131 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 132 + bras %r1,0f 133 + .long prepare_ftrace_return 134 + 0: l %r2,152(%r15) 135 + l %r4,0(%r1) 136 + l %r3,100(%r15) 137 + basr %r14,%r4 138 + st %r2,100(%r15) 139 + #endif 140 ahi %r15,96 141 l %r14,56(%r15) 142 3: lm %r2,%r5,16(%r15) 143 br %r14 144 145 #endif /* CONFIG_DYNAMIC_FTRACE */ 146 + 147 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 148 + 149 + .globl return_to_handler 150 + return_to_handler: 151 + stm %r2,%r5,16(%r15) 152 + st %r14,56(%r15) 153 + lr %r0,%r15 154 + ahi %r15,-96 155 + st %r0,__SF_BACKCHAIN(%r15) 156 + bras %r1,0f 157 + .long ftrace_return_to_handler 158 + 0: l %r2,0b-0b(%r1) 159 + basr %r14,%r2 160 + lr %r14,%r2 161 + ahi %r15,96 162 + lm %r2,%r5,16(%r15) 163 + br %r14 164 + 165 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 166 + 167 #endif /* CONFIG_64BIT */
+2 -1
arch/s390/kernel/s390_ext.c
··· 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/errno.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/interrupt.h> ··· 113 return 0; 114 } 115 116 - void do_extint(struct pt_regs *regs, unsigned short code) 117 { 118 ext_int_info_t *p; 119 int index;
··· 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 + #include <linux/ftrace.h> 14 #include <linux/errno.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/interrupt.h> ··· 112 return 0; 113 } 114 115 + void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) 116 { 117 ext_int_info_t *p; 118 int index;
+1 -1
arch/s390/kernel/time.c
··· 70 /* 71 * Scheduler clock - returns current time in nanosec units. 72 */ 73 - unsigned long long sched_clock(void) 74 { 75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 76 }
··· 70 /* 71 * Scheduler clock - returns current time in nanosec units. 72 */ 73 + unsigned long long notrace sched_clock(void) 74 { 75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 76 }
+1
arch/s390/kernel/vmlinux.lds.S
··· 34 SCHED_TEXT 35 LOCK_TEXT 36 KPROBES_TEXT 37 *(.fixup) 38 *(.gnu.warning) 39 } :text = 0x0700
··· 34 SCHED_TEXT 35 LOCK_TEXT 36 KPROBES_TEXT 37 + IRQENTRY_TEXT 38 *(.fixup) 39 *(.gnu.warning) 40 } :text = 0x0700
+2 -2
drivers/s390/cio/cio.c
··· 12 #define KMSG_COMPONENT "cio" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> ··· 627 * handlers). 628 * 629 */ 630 - void 631 - do_IRQ (struct pt_regs *regs) 632 { 633 struct tpi_info *tpi_info; 634 struct subchannel *sch;
··· 12 #define KMSG_COMPONENT "cio" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 + #include <linux/ftrace.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> ··· 626 * handlers). 627 * 628 */ 629 + void __irq_entry do_IRQ(struct pt_regs *regs) 630 { 631 struct tpi_info *tpi_info; 632 struct subchannel *sch;