Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] cleanup ftrace backend functions

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Martin Schwidefsky and committed by
Martin Schwidefsky
4cc9bed0 5e9a2692

+135 -171
+4 -7
arch/s390/include/asm/ftrace.h
··· 4 4 #ifndef __ASSEMBLY__ 5 5 6 6 extern void _mcount(void); 7 - extern unsigned long ftrace_dyn_func; 8 7 9 8 struct dyn_arch_ftrace { }; 10 9 11 10 #define MCOUNT_ADDR ((long)_mcount) 12 11 13 12 #ifdef CONFIG_64BIT 14 - #define MCOUNT_OFFSET_RET 18 15 - #define MCOUNT_INSN_SIZE 24 16 - #define MCOUNT_OFFSET 14 17 - #else 18 - #define MCOUNT_OFFSET_RET 26 19 - #define MCOUNT_INSN_SIZE 30 13 + #define MCOUNT_INSN_SIZE 12 20 14 #define MCOUNT_OFFSET 8 15 + #else 16 + #define MCOUNT_INSN_SIZE 20 17 + #define MCOUNT_OFFSET 4 21 18 #endif 22 19 23 20 static inline unsigned long ftrace_call_adjust(unsigned long addr)
+117 -121
arch/s390/kernel/ftrace.c
··· 4 4 * Copyright IBM Corp. 2009 5 5 * 6 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 7 - * 7 + * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 8 */ 9 9 10 10 #include <linux/hardirq.h> ··· 12 12 #include <linux/ftrace.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/types.h> 15 + #include <linux/kprobes.h> 15 16 #include <trace/syscall.h> 16 17 #include <asm/asm-offsets.h> 18 + 19 + #ifdef CONFIG_64BIT 20 + #define MCOUNT_OFFSET_RET 12 21 + #else 22 + #define MCOUNT_OFFSET_RET 22 23 + #endif 17 24 18 25 #ifdef CONFIG_DYNAMIC_FTRACE 19 26 20 27 void ftrace_disable_code(void); 21 - void ftrace_disable_return(void); 22 - void ftrace_call_code(void); 23 - void ftrace_nop_code(void); 24 - 25 - #define FTRACE_INSN_SIZE 4 28 + void ftrace_enable_insn(void); 26 29 27 30 #ifdef CONFIG_64BIT 28 - 31 + /* 32 + * The 64-bit mcount code looks like this: 33 + * stg %r14,8(%r15) # offset 0 34 + * > larl %r1,<&counter> # offset 6 35 + * > brasl %r14,_mcount # offset 12 36 + * lg %r14,8(%r15) # offset 18 37 + * Total length is 24 bytes. The middle two instructions of the mcount 38 + * block get overwritten by ftrace_make_nop / ftrace_make_call. 39 + * The 64-bit enabled ftrace code block looks like this: 40 + * stg %r14,8(%r15) # offset 0 41 + * > lg %r1,__LC_FTRACE_FUNC # offset 6 42 + * > lgr %r0,%r0 # offset 12 43 + * > basr %r14,%r1 # offset 16 44 + * lg %r14,8(%15) # offset 18 45 + * The return points of the mcount/ftrace function have the same offset 18. 46 + * The 64-bit disable ftrace code block looks like this: 47 + * stg %r14,8(%r15) # offset 0 48 + * > jg .+18 # offset 6 49 + * > lgr %r0,%r0 # offset 12 50 + * > basr %r14,%r1 # offset 16 51 + * lg %r14,8(%15) # offset 18 52 + * The jg instruction branches to offset 24 to skip as many instructions 53 + * as possible. 54 + */ 29 55 asm( 30 56 " .align 4\n" 31 57 "ftrace_disable_code:\n" 32 - " j 0f\n" 33 - " .word 0x0024\n" 34 - " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 35 - " basr %r14,%r1\n" 36 - "ftrace_disable_return:\n" 37 - " lg %r14,8(15)\n" 58 + " jg 0f\n" 38 59 " lgr %r0,%r0\n" 39 - "0:\n"); 40 - 41 - asm( 60 + " basr %r14,%r1\n" 61 + "0:\n" 42 62 " .align 4\n" 43 - "ftrace_nop_code:\n" 44 - " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 63 + "ftrace_enable_insn:\n" 64 + " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"); 45 65 46 - asm( 47 - " .align 4\n" 48 - "ftrace_call_code:\n" 49 - " stg %r14,8(%r15)\n"); 66 + #define FTRACE_INSN_SIZE 6 50 67 51 68 #else /* CONFIG_64BIT */ 52 - 69 + /* 70 + * The 31-bit mcount code looks like this: 71 + * st %r14,4(%r15) # offset 0 72 + * > bras %r1,0f # offset 4 73 + * > .long _mcount # offset 8 74 + * > .long <&counter> # offset 12 75 + * > 0: l %r14,0(%r1) # offset 16 76 + * > l %r1,4(%r1) # offset 20 77 + * basr %r14,%r14 # offset 24 78 + * l %r14,4(%r15) # offset 26 79 + * Total length is 30 bytes. The twenty bytes starting from offset 4 80 + * to offset 24 get overwritten by ftrace_make_nop / ftrace_make_call. 81 + * The 31-bit enabled ftrace code block looks like this: 82 + * st %r14,4(%r15) # offset 0 83 + * > l %r14,__LC_FTRACE_FUNC # offset 4 84 + * > j 0f # offset 8 85 + * > .fill 12,1,0x07 # offset 12 86 + * 0: basr %r14,%r14 # offset 24 87 + * l %r14,4(%r14) # offset 26 88 + * The return points of the mcount/ftrace function have the same offset 26. 89 + * The 31-bit disabled ftrace code block looks like this: 90 + * st %r14,4(%r15) # offset 0 91 + * > j .+26 # offset 4 92 + * > j 0f # offset 8 93 + * > .fill 12,1,0x07 # offset 12 94 + * 0: basr %r14,%r14 # offset 24 95 + * l %r14,4(%r14) # offset 26 96 + * The j instruction branches to offset 30 to skip as many instructions 97 + * as possible. 98 + */ 53 99 asm( 54 100 " .align 4\n" 55 101 "ftrace_disable_code:\n" 102 + " j 1f\n" 56 103 " j 0f\n" 57 - " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 58 - " basr %r14,%r1\n" 59 - "ftrace_disable_return:\n" 60 - " l %r14,4(%r15)\n" 61 - " j 0f\n" 62 - " bcr 0,%r7\n" 63 - " bcr 0,%r7\n" 64 - " bcr 0,%r7\n" 65 - " bcr 0,%r7\n" 66 - " bcr 0,%r7\n" 67 - " bcr 0,%r7\n" 68 - "0:\n"); 69 - 70 - asm( 104 + " .fill 12,1,0x07\n" 105 + "0: basr %r14,%r14\n" 106 + "1:\n" 71 107 " .align 4\n" 72 - "ftrace_nop_code:\n" 73 - " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 108 + "ftrace_enable_insn:\n" 109 + " l %r14,"__stringify(__LC_FTRACE_FUNC)"\n"); 74 110 75 - asm( 76 - " .align 4\n" 77 - "ftrace_call_code:\n" 78 - " st %r14,4(%r15)\n"); 111 + #define FTRACE_INSN_SIZE 4 79 112 80 113 #endif /* CONFIG_64BIT */ 81 114 82 - static int ftrace_modify_code(unsigned long ip, 83 - void *old_code, int old_size, 84 - void *new_code, int new_size) 85 - { 86 - unsigned char replaced[MCOUNT_INSN_SIZE]; 87 - 88 - /* 89 - * Note: Due to modules code can disappear and change. 90 - * We need to protect against faulting as well as code 91 - * changing. We do this by using the probe_kernel_* 92 - * functions. 93 - * This however is just a simple sanity check. 94 - */ 95 - if (probe_kernel_read(replaced, (void *)ip, old_size)) 96 - return -EFAULT; 97 - if (memcmp(replaced, old_code, old_size) != 0) 98 - return -EINVAL; 99 - if (probe_kernel_write((void *)ip, new_code, new_size)) 100 - return -EPERM; 101 - return 0; 102 - } 103 - 104 - static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, 105 - unsigned long addr) 106 - { 107 - return ftrace_modify_code(rec->ip, 108 - ftrace_call_code, FTRACE_INSN_SIZE, 109 - ftrace_disable_code, MCOUNT_INSN_SIZE); 110 - } 111 115 112 116 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 113 117 unsigned long addr) 114 118 { 115 - if (addr == MCOUNT_ADDR) 116 - return ftrace_make_initial_nop(mod, rec, addr); 117 - return ftrace_modify_code(rec->ip, 118 - ftrace_call_code, FTRACE_INSN_SIZE, 119 - ftrace_nop_code, FTRACE_INSN_SIZE); 119 + if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 120 + MCOUNT_INSN_SIZE)) 121 + return -EPERM; 122 + return 0; 120 123 } 121 124 122 125 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 123 126 { 124 - return ftrace_modify_code(rec->ip, 125 - ftrace_nop_code, FTRACE_INSN_SIZE, 126 - ftrace_call_code, FTRACE_INSN_SIZE); 127 + if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, 128 + FTRACE_INSN_SIZE)) 129 + return -EPERM; 130 + return 0; 127 131 } 128 132 129 133 int ftrace_update_ftrace_func(ftrace_func_t func) 130 134 { 131 - ftrace_dyn_func = (unsigned long)func; 132 135 return 0; 133 136 } 134 137 135 138 int __init ftrace_dyn_arch_init(void *data) 136 139 { 137 - *(unsigned long *)data = 0; 140 + *(unsigned long *) data = 0; 138 141 return 0; 139 142 } 140 143 141 144 #endif /* CONFIG_DYNAMIC_FTRACE */ 142 145 143 146 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 144 - #ifdef CONFIG_DYNAMIC_FTRACE 145 - /* 146 - * Patch the kernel code at ftrace_graph_caller location: 147 - * The instruction there is branch relative on condition. The condition mask 148 - * is either all ones (always branch aka disable ftrace_graph_caller) or all 149 - * zeroes (nop aka enable ftrace_graph_caller). 150 - * Instruction format for brc is a7m4xxxx where m is the condition mask. 151 - */ 152 - int ftrace_enable_ftrace_graph_caller(void) 153 - { 154 - unsigned short opcode = 0xa704; 155 - 156 - return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 157 - } 158 - 159 - int ftrace_disable_ftrace_graph_caller(void) 160 - { 161 - unsigned short opcode = 0xa7f4; 162 - 163 - return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 164 - } 165 - 166 - static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 167 - { 168 - return addr - (ftrace_disable_return - ftrace_disable_code); 169 - } 170 - 171 - #else /* CONFIG_DYNAMIC_FTRACE */ 172 - 173 - static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 174 - { 175 - return addr - MCOUNT_OFFSET_RET; 176 - } 177 - 178 - #endif /* CONFIG_DYNAMIC_FTRACE */ 179 - 180 147 /* 181 148 * Hook the return address and push it in the stack of return addresses 182 149 * in current thread info. 183 150 */ 184 - unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 151 + unsigned long __kprobes prepare_ftrace_return(unsigned long parent, 152 + unsigned long ip) 185 153 { 186 154 struct ftrace_graph_ent trace; 187 155 ··· 157 189 goto out; 158 190 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 159 191 goto out; 160 - trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 192 + trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; 161 193 /* Only trace if the calling function expects to. */ 162 194 if (!ftrace_graph_entry(&trace)) { 163 195 current->curr_ret_stack--; 164 196 goto out; 165 197 } 166 - parent = (unsigned long)return_to_handler; 198 + parent = (unsigned long) return_to_handler; 167 199 out: 168 200 return parent; 169 201 } 202 + 203 + #ifdef CONFIG_DYNAMIC_FTRACE 204 + /* 205 + * Patch the kernel code at ftrace_graph_caller location. The instruction 206 + * there is branch relative and save to prepare_ftrace_return. To disable 207 + * the call to prepare_ftrace_return we patch the bras offset to point 208 + * directly after the instructions. To enable the call we calculate 209 + * the original offset to prepare_ftrace_return and put it back. 210 + */ 211 + int ftrace_enable_ftrace_graph_caller(void) 212 + { 213 + unsigned short offset; 214 + 215 + offset = ((void *) prepare_ftrace_return - 216 + (void *) ftrace_graph_caller) / 2; 217 + return probe_kernel_write(ftrace_graph_caller + 2, 218 + &offset, sizeof(offset)); 219 + } 220 + 221 + int ftrace_disable_ftrace_graph_caller(void) 222 + { 223 + static unsigned short offset = 0x0002; 224 + 225 + return probe_kernel_write(ftrace_graph_caller + 2, 226 + &offset, sizeof(offset)); 227 + } 228 + 229 + #endif /* CONFIG_DYNAMIC_FTRACE */ 170 230 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+7 -23
arch/s390/kernel/mcount.S
··· 18 18 #ifdef CONFIG_DYNAMIC_FTRACE 19 19 br %r14 20 20 21 - .data 22 - .globl ftrace_dyn_func 23 - ftrace_dyn_func: 24 - .long ftrace_stub 25 - .previous 26 - 27 21 .globl ftrace_caller 28 22 ftrace_caller: 29 23 #endif 30 24 stm %r2,%r5,16(%r15) 31 25 bras %r1,2f 32 - #ifdef CONFIG_DYNAMIC_FTRACE 33 - 0: .long ftrace_dyn_func 34 - #else 35 26 0: .long ftrace_trace_function 36 - #endif 37 27 1: .long function_trace_stop 38 28 2: l %r2,1b-0b(%r1) 39 29 icm %r2,0xf,0(%r2) ··· 39 49 l %r14,0(%r14) 40 50 basr %r14,%r14 41 51 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 42 - #ifdef CONFIG_DYNAMIC_FTRACE 52 + l %r2,100(%r15) 53 + l %r3,152(%r15) 43 54 .globl ftrace_graph_caller 44 55 ftrace_graph_caller: 45 - # This unconditional branch gets runtime patched. Change only if 46 - # you know what you are doing. See ftrace_enable_graph_caller(). 47 - j 1f 48 - #endif 49 - bras %r1,0f 50 - .long prepare_ftrace_return 51 - 0: l %r2,152(%r15) 52 - l %r4,0(%r1) 53 - l %r3,100(%r15) 54 - basr %r14,%r4 55 - st %r2,100(%r15) 56 - 1: 56 + # The bras instruction gets runtime patched to call prepare_ftrace_return. 57 + # See ftrace_enable_ftrace_graph_caller. The patched instruction is: 58 + # bras %r14,prepare_ftrace_return 59 + bras %r14,0f 60 + 0: st %r2,100(%r15) 57 61 #endif 58 62 ahi %r15,96 59 63 l %r14,56(%r15)
+7 -20
arch/s390/kernel/mcount64.S
··· 18 18 #ifdef CONFIG_DYNAMIC_FTRACE 19 19 br %r14 20 20 21 - .data 22 - .globl ftrace_dyn_func 23 - ftrace_dyn_func: 24 - .quad ftrace_stub 25 - .previous 26 - 27 21 .globl ftrace_caller 28 22 ftrace_caller: 29 23 #endif ··· 31 37 stg %r1,__SF_BACKCHAIN(%r15) 32 38 lgr %r2,%r14 33 39 lg %r3,168(%r15) 34 - #ifdef CONFIG_DYNAMIC_FTRACE 35 - larl %r14,ftrace_dyn_func 36 - #else 37 40 larl %r14,ftrace_trace_function 38 - #endif 39 41 lg %r14,0(%r14) 40 42 basr %r14,%r14 41 43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 42 - #ifdef CONFIG_DYNAMIC_FTRACE 44 + lg %r2,168(%r15) 45 + lg %r3,272(%r15) 43 46 .globl ftrace_graph_caller 44 47 ftrace_graph_caller: 45 - # This unconditional branch gets runtime patched. Change only if 46 - # you know what you are doing. See ftrace_enable_graph_caller(). 47 - j 0f 48 - #endif 49 - lg %r2,272(%r15) 50 - lg %r3,168(%r15) 51 - brasl %r14,prepare_ftrace_return 52 - stg %r2,168(%r15) 53 - 0: 48 + # The bras instruction gets runtime patched to call prepare_ftrace_return. 49 + # See ftrace_enable_ftrace_graph_caller. The patched instruction is: 50 + # bras %r14,prepare_ftrace_return 51 + bras %r14,0f 52 + 0: stg %r2,168(%r15) 54 53 #endif 55 54 aghi %r15,160 56 55 lmg %r2,%r5,32(%r15)