Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: support KPROBES_ON_FTRACE

Instead of using our own kprobes-on-ftrace handling convert the
code to support KPROBES_ON_FTRACE.

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Sven Schnelle and committed by
Vasily Gorbik
657480d9 5f490a52

+58 -91
+1 -1
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
··· 24 24 | parisc: | ok | 25 25 | powerpc: | ok | 26 26 | riscv: | TODO | 27 - | s390: | TODO | 27 + | s390: | ok | 28 28 | sh: | TODO | 29 29 | sparc: | TODO | 30 30 | um: | TODO |
+1
arch/s390/Kconfig
··· 156 156 select HAVE_KERNEL_UNCOMPRESSED 157 157 select HAVE_KERNEL_XZ 158 158 select HAVE_KPROBES 159 + select HAVE_KPROBES_ON_FTRACE 159 160 select HAVE_KRETPROBES 160 161 select HAVE_KVM 161 162 select HAVE_LIVEPATCH
-1
arch/s390/include/asm/kprobes.h
··· 54 54 struct arch_specific_insn { 55 55 /* copy of original instruction */ 56 56 kprobe_opcode_t *insn; 57 - unsigned int is_ftrace_insn : 1; 58 57 }; 59 58 60 59 struct prev_kprobe {
+46 -34
arch/s390/kernel/ftrace.c
··· 72 72 #endif 73 73 } 74 74 75 - static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn) 76 - { 77 - #ifdef CONFIG_KPROBES 78 - if (insn->opc == BREAKPOINT_INSTRUCTION) 79 - return 1; 80 - #endif 81 - return 0; 82 - } 83 - 84 75 static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) 85 76 { 86 77 #ifdef CONFIG_KPROBES ··· 105 114 /* Initial code replacement */ 106 115 ftrace_generate_orig_insn(&orig); 107 116 ftrace_generate_nop_insn(&new); 108 - } else if (is_kprobe_on_ftrace(&old)) { 109 - /* 110 - * If we find a breakpoint instruction, a kprobe has been 111 - * placed at the beginning of the function. We write the 112 - * constant KPROBE_ON_FTRACE_NOP into the remaining four 113 - * bytes of the original instruction so that the kprobes 114 - * handler can execute a nop, if it reaches this breakpoint. 115 - */ 116 - ftrace_generate_kprobe_call_insn(&orig); 117 - ftrace_generate_kprobe_nop_insn(&new); 118 117 } else { 119 118 /* Replace ftrace call with a nop. */ 120 119 ftrace_generate_call_insn(&orig, rec->ip); ··· 123 142 124 143 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) 125 144 return -EFAULT; 126 - if (is_kprobe_on_ftrace(&old)) { 127 - /* 128 - * If we find a breakpoint instruction, a kprobe has been 129 - * placed at the beginning of the function. We write the 130 - * constant KPROBE_ON_FTRACE_CALL into the remaining four 131 - * bytes of the original instruction so that the kprobes 132 - * handler can execute a brasl if it reaches this breakpoint. 133 - */ 134 - ftrace_generate_kprobe_nop_insn(&orig); 135 - ftrace_generate_kprobe_call_insn(&new); 136 - } else { 137 - /* Replace nop with an ftrace call. */ 138 - ftrace_generate_nop_insn(&orig); 139 - ftrace_generate_call_insn(&new, rec->ip); 140 - } 145 + /* Replace nop with an ftrace call. */ 146 + ftrace_generate_nop_insn(&orig); 147 + ftrace_generate_call_insn(&new, rec->ip); 148 + 141 149 /* Verify that the to be replaced code matches what we expect. */ 142 150 if (memcmp(&orig, &old, sizeof(old))) 143 151 return -EINVAL; ··· 211 241 } 212 242 213 243 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 244 + 245 + #ifdef CONFIG_KPROBES_ON_FTRACE 246 + void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 247 + struct ftrace_ops *ops, struct pt_regs *regs) 248 + { 249 + struct kprobe_ctlblk *kcb; 250 + struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip); 251 + 252 + if (unlikely(!p) || kprobe_disabled(p)) 253 + return; 254 + 255 + if (kprobe_running()) { 256 + kprobes_inc_nmissed_count(p); 257 + return; 258 + } 259 + 260 + __this_cpu_write(current_kprobe, p); 261 + 262 + kcb = get_kprobe_ctlblk(); 263 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 264 + 265 + instruction_pointer_set(regs, ip); 266 + 267 + if (!p->pre_handler || !p->pre_handler(p, regs)) { 268 + 269 + instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE); 270 + 271 + if (unlikely(p->post_handler)) { 272 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 273 + p->post_handler(p, regs, 0); 274 + } 275 + } 276 + __this_cpu_write(current_kprobe, NULL); 277 + } 278 + NOKPROBE_SYMBOL(kprobe_ftrace_handler); 279 + 280 + int arch_prepare_kprobe_ftrace(struct kprobe *p) 281 + { 282 + p->ainsn.insn = NULL; 283 + return 0; 284 + } 285 + #endif
+4 -55
arch/s390/kernel/kprobes.c
··· 56 56 57 57 static void copy_instruction(struct kprobe *p) 58 58 { 59 - unsigned long ip = (unsigned long) p->addr; 60 59 s64 disp, new_disp; 61 60 u64 addr, new_addr; 62 61 63 - if (ftrace_location(ip) == ip) { 64 - /* 65 - * If kprobes patches the instruction that is morphed by 66 - * ftrace make sure that kprobes always sees the branch 67 - * "jg .+24" that skips the mcount block or the "brcl 0,0" 68 - * in case of hotpatch. 69 - */ 70 - ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); 71 - p->ainsn.is_ftrace_insn = 1; 72 - } else 73 - memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); 62 + memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); 74 63 p->opcode = p->ainsn.insn[0]; 75 64 if (!probe_is_insn_relative_long(p->ainsn.insn)) 76 65 return; ··· 125 136 } 126 137 NOKPROBE_SYMBOL(arch_prepare_kprobe); 127 138 128 - int arch_check_ftrace_location(struct kprobe *p) 129 - { 130 - return 0; 131 - } 132 - 133 139 struct swap_insn_args { 134 140 struct kprobe *p; 135 141 unsigned int arm_kprobe : 1; ··· 133 149 static int swap_instruction(void *data) 134 150 { 135 151 struct swap_insn_args *args = data; 136 - struct ftrace_insn new_insn, *insn; 137 152 struct kprobe *p = args->p; 138 - size_t len; 153 + u16 opc; 139 154 140 - new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 141 - len = sizeof(new_insn.opc); 142 - if (!p->ainsn.is_ftrace_insn) 143 - goto skip_ftrace; 144 - len = sizeof(new_insn); 145 - insn = (struct ftrace_insn *) p->addr; 146 - if (args->arm_kprobe) { 147 - if (is_ftrace_nop(insn)) 148 - new_insn.disp = KPROBE_ON_FTRACE_NOP; 149 - else 150 - new_insn.disp = KPROBE_ON_FTRACE_CALL; 151 - } else { 152 - ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); 153 - if (insn->disp == KPROBE_ON_FTRACE_NOP) 154 - ftrace_generate_nop_insn(&new_insn); 155 - } 156 - skip_ftrace: 157 - s390_kernel_write(p->addr, &new_insn, len); 155 + opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 156 + s390_kernel_write(p->addr, &opc, sizeof(opc)); 158 157 return 0; 159 158 } 160 159 NOKPROBE_SYMBOL(swap_instruction); ··· 430 463 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 431 464 unsigned long ip = regs->psw.addr; 432 465 int fixup = probe_get_fixup_type(p->ainsn.insn); 433 - 434 - /* Check if the kprobes location is an enabled ftrace caller */ 435 - if (p->ainsn.is_ftrace_insn) { 436 - struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; 437 - struct ftrace_insn call_insn; 438 - 439 - ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); 440 - /* 441 - * A kprobe on an enabled ftrace call site actually single 442 - * stepped an unconditional branch (ftrace nop equivalent). 443 - * Now we need to fixup things and pretend that a brasl r0,... 444 - * was executed instead. 445 - */ 446 - if (insn->disp == KPROBE_ON_FTRACE_CALL) { 447 - ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; 448 - regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); 449 - } 450 - } 451 466 452 467 if (fixup & FIXUP_PSW_NORMAL) 453 468 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
+6
arch/s390/kernel/mcount.S
··· 42 42 .globl ftrace_regs_caller 43 43 .set ftrace_regs_caller,ftrace_caller 44 44 stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller 45 + lghi %r14,0 # save condition code 46 + ipm %r14 # don't put any instructions 47 + sllg %r14,%r14,16 # clobbering CC before this point 45 48 lgr %r1,%r15 46 49 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) 47 50 aghi %r0,MCOUNT_RETURN_FIXUP ··· 57 54 # allocate pt_regs and stack frame for ftrace_trace_function 58 55 aghi %r15,-STACK_FRAME_SIZE 59 56 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) 57 + stg %r14,(STACK_PTREGS_PSW)(%r15) 58 + lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address 59 + stosm (STACK_PTREGS_PSW)(%r15),0 60 60 aghi %r1,-TRACED_FUNC_FRAME_SIZE 61 61 stg %r1,__SF_BACKCHAIN(%r15) 62 62 stg %r0,(STACK_PTREGS_PSW+8)(%r15)