Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky: Add kprobes supported

This patch enable kprobes, kretprobes, ftrace interface. It utilized
software breakpoint and single step debug exceptions, instructions
simulation on csky.

We use USR_BKPT replace origin instruction, and the kprobe handler
prepares an excutable memory slot for out-of-line execution with a
copy of the original instruction being probed. Most of instructions
could be executed by single-step, but some instructions need origin
pc value to execute and we need software simulate these instructions.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>

Guo Ren 33e53ae1 000591f1

+1197 -2
+3
arch/csky/Kconfig
··· 46 46 select HAVE_KERNEL_GZIP 47 47 select HAVE_KERNEL_LZO 48 48 select HAVE_KERNEL_LZMA 49 + select HAVE_KPROBES if !CPU_CK610 50 + select HAVE_KPROBES_ON_FTRACE if !CPU_CK610 51 + select HAVE_KRETPROBES if !CPU_CK610 49 52 select HAVE_PERF_EVENTS 50 53 select HAVE_PERF_REGS 51 54 select HAVE_PERF_USER_STACK_DUMP
-1
arch/csky/include/asm/Kbuild
··· 20 20 generic-y += irq_work.h 21 21 generic-y += kdebug.h 22 22 generic-y += kmap_types.h 23 - generic-y += kprobes.h 24 23 generic-y += kvm_para.h 25 24 generic-y += linkage.h 26 25 generic-y += local.h
+48
arch/csky/include/asm/kprobes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __ASM_CSKY_KPROBES_H 4 + #define __ASM_CSKY_KPROBES_H 5 + 6 + #include <asm-generic/kprobes.h> 7 + 8 + #ifdef CONFIG_KPROBES 9 + #include <linux/types.h> 10 + #include <linux/ptrace.h> 11 + #include <linux/percpu.h> 12 + 13 + #define __ARCH_WANT_KPROBES_INSN_SLOT 14 + #define MAX_INSN_SIZE 1 15 + 16 + #define flush_insn_slot(p) do { } while (0) 17 + #define kretprobe_blacklist_size 0 18 + 19 + #include <asm/probes.h> 20 + 21 + struct prev_kprobe { 22 + struct kprobe *kp; 23 + unsigned int status; 24 + }; 25 + 26 + /* Single step context for kprobe */ 27 + struct kprobe_step_ctx { 28 + unsigned long ss_pending; 29 + unsigned long match_addr; 30 + }; 31 + 32 + /* per-cpu kprobe control block */ 33 + struct kprobe_ctlblk { 34 + unsigned int kprobe_status; 35 + unsigned long saved_sr; 36 + struct prev_kprobe prev_kprobe; 37 + struct kprobe_step_ctx ss_ctx; 38 + }; 39 + 40 + void arch_remove_kprobe(struct kprobe *p); 41 + int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); 42 + int kprobe_breakpoint_handler(struct pt_regs *regs); 43 + int kprobe_single_step_handler(struct pt_regs *regs); 44 + void kretprobe_trampoline(void); 45 + void __kprobes *trampoline_probe_handler(struct pt_regs *regs); 46 + 47 + #endif /* CONFIG_KPROBES */ 48 + #endif /* __ASM_CSKY_KPROBES_H */
+24
arch/csky/include/asm/probes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_PROBES_H 4 + #define __ASM_CSKY_PROBES_H 5 + 6 + typedef u32 probe_opcode_t; 7 + typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); 8 + 9 + /* architecture specific copy of original instruction */ 10 + struct arch_probe_insn { 11 + probe_opcode_t *insn; 12 + probes_handler_t *handler; 13 + /* restore address after simulation */ 14 + unsigned long restore; 15 + }; 16 + 17 + #ifdef CONFIG_KPROBES 18 + typedef u32 kprobe_opcode_t; 19 + struct arch_specific_insn { 20 + struct arch_probe_insn api; 21 + }; 22 + #endif 23 + 24 + #endif /* __ASM_CSKY_PROBES_H */
+2
arch/csky/include/asm/ptrace.h
··· 13 13 14 14 #define PS_S 0x80000000 /* Supervisor Mode */ 15 15 16 + #define USR_BKPT 0x1464 17 + 16 18 #define arch_has_single_step() (1) 17 19 #define current_pt_regs() \ 18 20 ({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
+1
arch/csky/kernel/Makefile
··· 4 4 obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o 5 5 obj-y += power.o syscall.o syscall_table.o setup.o 6 6 obj-y += process.o cpu-probe.o ptrace.o dumpstack.o 7 + obj-y += probes/ 7 8 8 9 obj-$(CONFIG_MODULES) += module.o 9 10 obj-$(CONFIG_SMP) += smp.o
+6
arch/csky/kernel/probes/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o 3 + obj-$(CONFIG_KPROBES) += kprobes_trampoline.o 4 + obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o 5 + 6 + CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
+49
arch/csky/kernel/probes/decode-insn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <linux/kernel.h> 4 + #include <linux/kprobes.h> 5 + #include <linux/module.h> 6 + #include <linux/kallsyms.h> 7 + #include <asm/sections.h> 8 + 9 + #include "decode-insn.h" 10 + #include "simulate-insn.h" 11 + 12 + /* Return: 13 + * INSN_REJECTED If instruction is one not allowed to kprobe, 14 + * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. 15 + */ 16 + enum probe_insn __kprobes 17 + csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) 18 + { 19 + probe_opcode_t insn = le32_to_cpu(*addr); 20 + 21 + CSKY_INSN_SET_SIMULATE(br16, insn); 22 + CSKY_INSN_SET_SIMULATE(bt16, insn); 23 + CSKY_INSN_SET_SIMULATE(bf16, insn); 24 + CSKY_INSN_SET_SIMULATE(jmp16, insn); 25 + CSKY_INSN_SET_SIMULATE(jsr16, insn); 26 + CSKY_INSN_SET_SIMULATE(lrw16, insn); 27 + CSKY_INSN_SET_SIMULATE(pop16, insn); 28 + 29 + CSKY_INSN_SET_SIMULATE(br32, insn); 30 + CSKY_INSN_SET_SIMULATE(bt32, insn); 31 + CSKY_INSN_SET_SIMULATE(bf32, insn); 32 + CSKY_INSN_SET_SIMULATE(jmp32, insn); 33 + CSKY_INSN_SET_SIMULATE(jsr32, insn); 34 + CSKY_INSN_SET_SIMULATE(lrw32, insn); 35 + CSKY_INSN_SET_SIMULATE(pop32, insn); 36 + 37 + CSKY_INSN_SET_SIMULATE(bez32, insn); 38 + CSKY_INSN_SET_SIMULATE(bnez32, insn); 39 + CSKY_INSN_SET_SIMULATE(bnezad32, insn); 40 + CSKY_INSN_SET_SIMULATE(bhsz32, insn); 41 + CSKY_INSN_SET_SIMULATE(bhz32, insn); 42 + CSKY_INSN_SET_SIMULATE(blsz32, insn); 43 + CSKY_INSN_SET_SIMULATE(blz32, insn); 44 + CSKY_INSN_SET_SIMULATE(bsr32, insn); 45 + CSKY_INSN_SET_SIMULATE(jmpi32, insn); 46 + CSKY_INSN_SET_SIMULATE(jsri32, insn); 47 + 48 + return INSN_GOOD; 49 + }
+20
arch/csky/kernel/probes/decode-insn.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H 4 + #define __CSKY_KERNEL_KPROBES_DECODE_INSN_H 5 + 6 + #include <asm/sections.h> 7 + #include <asm/kprobes.h> 8 + 9 + enum probe_insn { 10 + INSN_REJECTED, 11 + INSN_GOOD_NO_SLOT, 12 + INSN_GOOD, 13 + }; 14 + 15 + #define is_insn32(insn) ((insn & 0xc000) == 0xc000) 16 + 17 + enum probe_insn __kprobes 18 + csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi); 19 + 20 + #endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */
+66
arch/csky/kernel/probes/ftrace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/kprobes.h> 4 + 5 + int arch_check_ftrace_location(struct kprobe *p) 6 + { 7 + if (ftrace_location((unsigned long)p->addr)) 8 + p->flags |= KPROBE_FLAG_FTRACE; 9 + return 0; 10 + } 11 + 12 + /* Ftrace callback handler for kprobes -- called under preepmt disabed */ 13 + void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, 14 + struct ftrace_ops *ops, struct pt_regs *regs) 15 + { 16 + bool lr_saver = false; 17 + struct kprobe *p; 18 + struct kprobe_ctlblk *kcb; 19 + 20 + /* Preempt is disabled by ftrace */ 21 + p = get_kprobe((kprobe_opcode_t *)ip); 22 + if (!p) { 23 + p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); 24 + if (unlikely(!p) || kprobe_disabled(p)) 25 + return; 26 + lr_saver = true; 27 + } 28 + 29 + kcb = get_kprobe_ctlblk(); 30 + if (kprobe_running()) { 31 + kprobes_inc_nmissed_count(p); 32 + } else { 33 + unsigned long orig_ip = instruction_pointer(regs); 34 + 35 + if (lr_saver) 36 + ip -= MCOUNT_INSN_SIZE; 37 + instruction_pointer_set(regs, ip); 38 + __this_cpu_write(current_kprobe, p); 39 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 40 + if (!p->pre_handler || !p->pre_handler(p, regs)) { 41 + /* 42 + * Emulate singlestep (and also recover regs->pc) 43 + * as if there is a nop 44 + */ 45 + instruction_pointer_set(regs, 46 + (unsigned long)p->addr + MCOUNT_INSN_SIZE); 47 + if (unlikely(p->post_handler)) { 48 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 49 + p->post_handler(p, regs, 0); 50 + } 51 + instruction_pointer_set(regs, orig_ip); 52 + } 53 + /* 54 + * If pre_handler returns !0, it changes regs->pc. We have to 55 + * skip emulating post_handler. 56 + */ 57 + __this_cpu_write(current_kprobe, NULL); 58 + } 59 + } 60 + NOKPROBE_SYMBOL(kprobe_ftrace_handler); 61 + 62 + int arch_prepare_kprobe_ftrace(struct kprobe *p) 63 + { 64 + p->ainsn.api.insn = NULL; 65 + return 0; 66 + }
+499
arch/csky/kernel/probes/kprobes.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <linux/kprobes.h> 4 + #include <linux/extable.h> 5 + #include <linux/slab.h> 6 + #include <linux/stop_machine.h> 7 + #include <asm/ptrace.h> 8 + #include <linux/uaccess.h> 9 + #include <asm/sections.h> 10 + #include <asm/cacheflush.h> 11 + 12 + #include "decode-insn.h" 13 + 14 + DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 15 + DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 16 + 17 + static void __kprobes 18 + post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 19 + 20 + struct csky_insn_patch { 21 + kprobe_opcode_t *addr; 22 + u32 opcode; 23 + atomic_t cpu_count; 24 + }; 25 + 26 + static int __kprobes patch_text_cb(void *priv) 27 + { 28 + struct csky_insn_patch *param = priv; 29 + unsigned int addr = (unsigned int)param->addr; 30 + 31 + if (atomic_inc_return(&param->cpu_count) == 1) { 32 + *(u16 *) addr = cpu_to_le16(param->opcode); 33 + dcache_wb_range(addr, addr + 2); 34 + atomic_inc(&param->cpu_count); 35 + } else { 36 + while (atomic_read(&param->cpu_count) <= num_online_cpus()) 37 + cpu_relax(); 38 + } 39 + 40 + icache_inv_range(addr, addr + 2); 41 + 42 + return 0; 43 + } 44 + 45 + static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) 46 + { 47 + struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) }; 48 + 49 + return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask); 50 + } 51 + 52 + static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 53 + { 54 + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; 55 + 56 + p->ainsn.api.restore = (unsigned long)p->addr + offset; 57 + 58 + patch_text(p->ainsn.api.insn, p->opcode); 59 + } 60 + 61 + static void __kprobes arch_prepare_simulate(struct kprobe *p) 62 + { 63 + p->ainsn.api.restore = 0; 64 + } 65 + 66 + static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 67 + { 68 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 69 + 70 + if (p->ainsn.api.handler) 71 + p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); 72 + 73 + post_kprobe_handler(kcb, regs); 74 + } 75 + 76 + int __kprobes arch_prepare_kprobe(struct kprobe *p) 77 + { 78 + unsigned long probe_addr = (unsigned long)p->addr; 79 + 80 + if (probe_addr & 0x1) { 81 + pr_warn("Address not aligned.\n"); 82 + return -EINVAL; 83 + } 84 + 85 + /* copy instruction */ 86 + p->opcode = le32_to_cpu(*p->addr); 87 + 88 + /* decode instruction */ 89 + switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) { 90 + case INSN_REJECTED: /* insn not supported */ 91 + return -EINVAL; 92 + 93 + case INSN_GOOD_NO_SLOT: /* insn need simulation */ 94 + p->ainsn.api.insn = NULL; 95 + break; 96 + 97 + case INSN_GOOD: /* instruction uses slot */ 98 + p->ainsn.api.insn = get_insn_slot(); 99 + if (!p->ainsn.api.insn) 100 + return -ENOMEM; 101 + break; 102 + } 103 + 104 + /* prepare the instruction */ 105 + if (p->ainsn.api.insn) 106 + arch_prepare_ss_slot(p); 107 + else 108 + arch_prepare_simulate(p); 109 + 110 + return 0; 111 + } 112 + 113 + /* install breakpoint in text */ 114 + void __kprobes arch_arm_kprobe(struct kprobe *p) 115 + { 116 + patch_text(p->addr, USR_BKPT); 117 + } 118 + 119 + /* remove breakpoint from text */ 120 + void __kprobes arch_disarm_kprobe(struct kprobe *p) 121 + { 122 + patch_text(p->addr, p->opcode); 123 + } 124 + 125 + void __kprobes arch_remove_kprobe(struct kprobe *p) 126 + { 127 + } 128 + 129 + static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 130 + { 131 + kcb->prev_kprobe.kp = kprobe_running(); 132 + kcb->prev_kprobe.status = kcb->kprobe_status; 133 + } 134 + 135 + static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 136 + { 137 + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 138 + kcb->kprobe_status = kcb->prev_kprobe.status; 139 + } 140 + 141 + static void __kprobes set_current_kprobe(struct kprobe *p) 142 + { 143 + __this_cpu_write(current_kprobe, p); 144 + } 145 + 146 + /* 147 + * Interrupts need to be disabled before single-step mode is set, and not 148 + * reenabled until after single-step mode ends. 149 + * Without disabling interrupt on local CPU, there is a chance of 150 + * interrupt occurrence in the period of exception return and start of 151 + * out-of-line single-step, that result in wrongly single stepping 152 + * into the interrupt handler. 153 + */ 154 + static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 155 + struct pt_regs *regs) 156 + { 157 + kcb->saved_sr = regs->sr; 158 + regs->sr &= ~BIT(6); 159 + } 160 + 161 + static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, 162 + struct pt_regs *regs) 163 + { 164 + regs->sr = kcb->saved_sr; 165 + } 166 + 167 + static void __kprobes 168 + set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) 169 + { 170 + unsigned long offset = is_insn32(p->opcode) ? 4 : 2; 171 + 172 + kcb->ss_ctx.ss_pending = true; 173 + kcb->ss_ctx.match_addr = addr + offset; 174 + } 175 + 176 + static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) 177 + { 178 + kcb->ss_ctx.ss_pending = false; 179 + kcb->ss_ctx.match_addr = 0; 180 + } 181 + 182 + #define TRACE_MODE_SI BIT(14) 183 + #define TRACE_MODE_MASK ~(0x3 << 14) 184 + #define TRACE_MODE_RUN 0 185 + 186 + static void __kprobes setup_singlestep(struct kprobe *p, 187 + struct pt_regs *regs, 188 + struct kprobe_ctlblk *kcb, int reenter) 189 + { 190 + unsigned long slot; 191 + 192 + if (reenter) { 193 + save_previous_kprobe(kcb); 194 + set_current_kprobe(p); 195 + kcb->kprobe_status = KPROBE_REENTER; 196 + } else { 197 + kcb->kprobe_status = KPROBE_HIT_SS; 198 + } 199 + 200 + if (p->ainsn.api.insn) { 201 + /* prepare for single stepping */ 202 + slot = (unsigned long)p->ainsn.api.insn; 203 + 204 + set_ss_context(kcb, slot, p); /* mark pending ss */ 205 + 206 + /* IRQs and single stepping do not mix well. */ 207 + kprobes_save_local_irqflag(kcb, regs); 208 + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; 209 + instruction_pointer_set(regs, slot); 210 + } else { 211 + /* insn simulation */ 212 + arch_simulate_insn(p, regs); 213 + } 214 + } 215 + 216 + static int __kprobes reenter_kprobe(struct kprobe *p, 217 + struct pt_regs *regs, 218 + struct kprobe_ctlblk *kcb) 219 + { 220 + switch (kcb->kprobe_status) { 221 + case KPROBE_HIT_SSDONE: 222 + case KPROBE_HIT_ACTIVE: 223 + kprobes_inc_nmissed_count(p); 224 + setup_singlestep(p, regs, kcb, 1); 225 + break; 226 + case KPROBE_HIT_SS: 227 + case KPROBE_REENTER: 228 + pr_warn("Unrecoverable kprobe detected.\n"); 229 + dump_kprobe(p); 230 + BUG(); 231 + break; 232 + default: 233 + WARN_ON(1); 234 + return 0; 235 + } 236 + 237 + return 1; 238 + } 239 + 240 + static void __kprobes 241 + post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) 242 + { 243 + struct kprobe *cur = kprobe_running(); 244 + 245 + if (!cur) 246 + return; 247 + 248 + /* return addr restore if non-branching insn */ 249 + if (cur->ainsn.api.restore != 0) 250 + regs->pc = cur->ainsn.api.restore; 251 + 252 + /* restore back original saved kprobe variables and continue */ 253 + if (kcb->kprobe_status == KPROBE_REENTER) { 254 + restore_previous_kprobe(kcb); 255 + return; 256 + } 257 + 258 + /* call post handler */ 259 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 260 + if (cur->post_handler) { 261 + /* post_handler can hit breakpoint and single step 262 + * again, so we enable D-flag for recursive exception. 263 + */ 264 + cur->post_handler(cur, regs, 0); 265 + } 266 + 267 + reset_current_kprobe(); 268 + } 269 + 270 + int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) 271 + { 272 + struct kprobe *cur = kprobe_running(); 273 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 274 + 275 + switch (kcb->kprobe_status) { 276 + case KPROBE_HIT_SS: 277 + case KPROBE_REENTER: 278 + /* 279 + * We are here because the instruction being single 280 + * stepped caused a page fault. We reset the current 281 + * kprobe and the ip points back to the probe address 282 + * and allow the page fault handler to continue as a 283 + * normal page fault. 284 + */ 285 + regs->pc = (unsigned long) cur->addr; 286 + if (!instruction_pointer(regs)) 287 + BUG(); 288 + 289 + if (kcb->kprobe_status == KPROBE_REENTER) 290 + restore_previous_kprobe(kcb); 291 + else 292 + reset_current_kprobe(); 293 + 294 + break; 295 + case KPROBE_HIT_ACTIVE: 296 + case KPROBE_HIT_SSDONE: 297 + /* 298 + * We increment the nmissed count for accounting, 299 + * we can also use npre/npostfault count for accounting 300 + * these specific fault cases. 301 + */ 302 + kprobes_inc_nmissed_count(cur); 303 + 304 + /* 305 + * We come here because instructions in the pre/post 306 + * handler caused the page_fault, this could happen 307 + * if handler tries to access user space by 308 + * copy_from_user(), get_user() etc. Let the 309 + * user-specified handler try to fix it first. 310 + */ 311 + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 312 + return 1; 313 + 314 + /* 315 + * In case the user-specified fault handler returned 316 + * zero, try to fix up. 317 + */ 318 + if (fixup_exception(regs)) 319 + return 1; 320 + } 321 + return 0; 322 + } 323 + 324 + int __kprobes 325 + kprobe_breakpoint_handler(struct pt_regs *regs) 326 + { 327 + struct kprobe *p, *cur_kprobe; 328 + struct kprobe_ctlblk *kcb; 329 + unsigned long addr = instruction_pointer(regs); 330 + 331 + kcb = get_kprobe_ctlblk(); 332 + cur_kprobe = kprobe_running(); 333 + 334 + p = get_kprobe((kprobe_opcode_t *) addr); 335 + 336 + if (p) { 337 + if (cur_kprobe) { 338 + if (reenter_kprobe(p, regs, kcb)) 339 + return 1; 340 + } else { 341 + /* Probe hit */ 342 + set_current_kprobe(p); 343 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 344 + 345 + /* 346 + * If we have no pre-handler or it returned 0, we 347 + * continue with normal processing. If we have a 348 + * pre-handler and it returned non-zero, it will 349 + * modify the execution path and no need to single 350 + * stepping. Let's just reset current kprobe and exit. 351 + * 352 + * pre_handler can hit a breakpoint and can step thru 353 + * before return. 354 + */ 355 + if (!p->pre_handler || !p->pre_handler(p, regs)) 356 + setup_singlestep(p, regs, kcb, 0); 357 + else 358 + reset_current_kprobe(); 359 + } 360 + return 1; 361 + } 362 + 363 + /* 364 + * The breakpoint instruction was removed right 365 + * after we hit it. Another cpu has removed 366 + * either a probepoint or a debugger breakpoint 367 + * at this address. In either case, no further 368 + * handling of this interrupt is appropriate. 369 + * Return back to original instruction, and continue. 370 + */ 371 + return 0; 372 + } 373 + 374 + int __kprobes 375 + kprobe_single_step_handler(struct pt_regs *regs) 376 + { 377 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 378 + 379 + if ((kcb->ss_ctx.ss_pending) 380 + && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { 381 + clear_ss_context(kcb); /* clear pending ss */ 382 + 383 + kprobes_restore_local_irqflag(kcb, regs); 384 + regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; 385 + 386 + post_kprobe_handler(kcb, regs); 387 + return 1; 388 + } 389 + return 0; 390 + } 391 + 392 + /* 393 + * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 394 + * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 395 + */ 396 + int __init arch_populate_kprobe_blacklist(void) 397 + { 398 + int ret; 399 + 400 + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 401 + (unsigned long)__irqentry_text_end); 402 + return ret; 403 + } 404 + 405 + void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) 406 + { 407 + struct kretprobe_instance *ri = NULL; 408 + struct hlist_head *head, empty_rp; 409 + struct hlist_node *tmp; 410 + unsigned long flags, orig_ret_address = 0; 411 + unsigned long trampoline_address = 412 + (unsigned long)&kretprobe_trampoline; 413 + kprobe_opcode_t *correct_ret_addr = NULL; 414 + 415 + INIT_HLIST_HEAD(&empty_rp); 416 + kretprobe_hash_lock(current, &head, &flags); 417 + 418 + /* 419 + * It is possible to have multiple instances associated with a given 420 + * task either because multiple functions in the call path have 421 + * return probes installed on them, and/or more than one 422 + * return probe was registered for a target function. 423 + * 424 + * We can handle this because: 425 + * - instances are always pushed into the head of the list 426 + * - when multiple return probes are registered for the same 427 + * function, the (chronologically) first instance's ret_addr 428 + * will be the real return address, and all the rest will 429 + * point to kretprobe_trampoline. 430 + */ 431 + hlist_for_each_entry_safe(ri, tmp, head, hlist) { 432 + if (ri->task != current) 433 + /* another task is sharing our hash bucket */ 434 + continue; 435 + 436 + orig_ret_address = (unsigned long)ri->ret_addr; 437 + 438 + if (orig_ret_address != trampoline_address) 439 + /* 440 + * This is the real return address. Any other 441 + * instances associated with this task are for 442 + * other calls deeper on the call stack 443 + */ 444 + break; 445 + } 446 + 447 + kretprobe_assert(ri, orig_ret_address, trampoline_address); 448 + 449 + correct_ret_addr = ri->ret_addr; 450 + hlist_for_each_entry_safe(ri, tmp, head, hlist) { 451 + if (ri->task != current) 452 + /* another task is sharing our hash bucket */ 453 + continue; 454 + 455 + orig_ret_address = (unsigned long)ri->ret_addr; 456 + if (ri->rp && ri->rp->handler) { 457 + __this_cpu_write(current_kprobe, &ri->rp->kp); 458 + get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 459 + ri->ret_addr = correct_ret_addr; 460 + ri->rp->handler(ri, regs); 461 + __this_cpu_write(current_kprobe, NULL); 462 + } 463 + 464 + recycle_rp_inst(ri, &empty_rp); 465 + 466 + if (orig_ret_address != trampoline_address) 467 + /* 468 + * This is the real return address. Any other 469 + * instances associated with this task are for 470 + * other calls deeper on the call stack 471 + */ 472 + break; 473 + } 474 + 475 + kretprobe_hash_unlock(current, &flags); 476 + 477 + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 478 + hlist_del(&ri->hlist); 479 + kfree(ri); 480 + } 481 + return (void *)orig_ret_address; 482 + } 483 + 484 + void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 485 + struct pt_regs *regs) 486 + { 487 + ri->ret_addr = (kprobe_opcode_t *)regs->lr; 488 + regs->lr = (unsigned long) &kretprobe_trampoline; 489 + } 490 + 491 + int __kprobes arch_trampoline_kprobe(struct kprobe *p) 492 + { 493 + return 0; 494 + } 495 + 496 + int __init arch_init_kprobes(void) 497 + { 498 + return 0; 499 + }
+19
arch/csky/kernel/probes/kprobes_trampoline.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #include <linux/linkage.h> 4 + 5 + #include <abi/entry.h> 6 + 7 + ENTRY(kretprobe_trampoline) 8 + SAVE_REGS_FTRACE 9 + 10 + mov a0, sp /* pt_regs */ 11 + 12 + jbsr trampoline_probe_handler 13 + 14 + /* use the result as the return-address */ 15 + mov lr, a0 16 + 17 + RESTORE_REGS_FTRACE 18 + rts 19 + ENDPROC(kretprobe_trampoline)
+398
arch/csky/kernel/probes/simulate-insn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + #include <linux/bitops.h> 4 + #include <linux/kernel.h> 5 + #include <linux/kprobes.h> 6 + 7 + #include "decode-insn.h" 8 + #include "simulate-insn.h" 9 + 10 + static inline bool csky_insn_reg_get_val(struct pt_regs *regs, 11 + unsigned long index, 12 + unsigned long *ptr) 13 + { 14 + if (index < 14) 15 + *ptr = *(&regs->a0 + index); 16 + 17 + if (index > 15 && index < 31) 18 + *ptr = *(&regs->exregs[0] + index - 16); 19 + 20 + switch (index) { 21 + case 14: 22 + *ptr = regs->usp; 23 + break; 24 + case 15: 25 + *ptr = regs->lr; 26 + break; 27 + case 31: 28 + *ptr = regs->tls; 29 + break; 30 + default: 31 + goto fail; 32 + } 33 + 34 + return true; 35 + fail: 36 + return false; 37 + } 38 + 39 + static inline bool csky_insn_reg_set_val(struct pt_regs *regs, 40 + unsigned long index, 41 + unsigned long val) 42 + { 43 + if (index < 14) 44 + *(&regs->a0 + index) = val; 45 + 46 + if (index > 15 && index < 31) 47 + *(&regs->exregs[0] + index - 16) = val; 48 + 49 + switch (index) { 50 + case 14: 51 + regs->usp = val; 52 + break; 53 + case 15: 54 + regs->lr = val; 55 + break; 56 + case 31: 57 + regs->tls = val; 58 + break; 59 + default: 60 + goto fail; 61 + } 62 + 63 + return true; 64 + fail: 65 + return false; 66 + } 67 + 68 + void __kprobes 69 + simulate_br16(u32 opcode, long addr, struct pt_regs *regs) 70 + { 71 + instruction_pointer_set(regs, 72 + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); 73 + } 74 + 75 + void __kprobes 76 + simulate_br32(u32 opcode, long addr, struct pt_regs *regs) 77 + { 78 + instruction_pointer_set(regs, 79 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 80 + } 81 + 82 + void __kprobes 83 + simulate_bt16(u32 opcode, long addr, struct pt_regs *regs) 84 + { 85 + if (regs->sr & 1) 86 + instruction_pointer_set(regs, 87 + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); 88 + else 89 + instruction_pointer_set(regs, addr + 2); 90 + } 91 + 92 + void __kprobes 93 + simulate_bt32(u32 opcode, long addr, struct pt_regs *regs) 94 + { 95 + if (regs->sr & 1) 96 + instruction_pointer_set(regs, 97 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 98 + else 99 + instruction_pointer_set(regs, addr + 4); 100 + } 101 + 102 + void __kprobes 103 + simulate_bf16(u32 opcode, long addr, struct pt_regs *regs) 104 + { 105 + if (!(regs->sr & 1)) 106 + instruction_pointer_set(regs, 107 + addr + sign_extend32((opcode & 0x3ff) << 1, 9)); 108 + else 109 + instruction_pointer_set(regs, addr + 2); 110 + } 111 + 112 + void __kprobes 113 + simulate_bf32(u32 opcode, long addr, struct pt_regs *regs) 114 + { 115 + if (!(regs->sr & 1)) 116 + instruction_pointer_set(regs, 117 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 118 + else 119 + instruction_pointer_set(regs, addr + 4); 120 + } 121 + 122 + void __kprobes 123 + simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs) 124 + { 125 + unsigned long tmp = (opcode >> 2) & 0xf; 126 + 127 + csky_insn_reg_get_val(regs, tmp, &tmp); 128 + 129 + instruction_pointer_set(regs, tmp & 0xfffffffe); 130 + } 131 + 132 + void __kprobes 133 + simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs) 134 + { 135 + unsigned long tmp = opcode & 0x1f; 136 + 137 + csky_insn_reg_get_val(regs, tmp, &tmp); 138 + 139 + instruction_pointer_set(regs, tmp & 0xfffffffe); 140 + } 141 + 142 + void __kprobes 143 + simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs) 144 + { 145 + unsigned long tmp = (opcode >> 2) & 0xf; 146 + 147 + csky_insn_reg_get_val(regs, tmp, &tmp); 148 + 149 + regs->lr = addr + 2; 150 + 151 + instruction_pointer_set(regs, tmp & 0xfffffffe); 152 + } 153 + 154 + void __kprobes 155 + simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs) 156 + { 157 + unsigned long tmp = opcode & 0x1f; 158 + 159 + csky_insn_reg_get_val(regs, tmp, &tmp); 160 + 161 + regs->lr = addr + 4; 162 + 163 + instruction_pointer_set(regs, tmp & 0xfffffffe); 164 + } 165 + 166 + void __kprobes 167 + simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs) 168 + { 169 + unsigned long val; 170 + unsigned long tmp = (opcode & 0x300) >> 3; 171 + unsigned long offset = ((opcode & 0x1f) | tmp) << 2; 172 + 173 + tmp = (opcode & 0xe0) >> 5; 174 + 175 + val = *(unsigned int *)(instruction_pointer(regs) + offset); 176 + 177 + csky_insn_reg_set_val(regs, tmp, val); 178 + } 179 + 180 + void __kprobes 181 + simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs) 182 + { 183 + unsigned long val; 184 + unsigned long offset = (opcode & 0xffff0000) >> 14; 185 + unsigned long tmp = opcode & 0x0000001f; 186 + 187 + val = *(unsigned int *) 188 + ((instruction_pointer(regs) + offset) & 0xfffffffc); 189 + 190 + csky_insn_reg_set_val(regs, tmp, val); 191 + } 192 + 193 + void __kprobes 194 + simulate_pop16(u32 opcode, long addr, struct pt_regs *regs) 195 + { 196 + unsigned long *tmp = (unsigned long *)regs->usp; 197 + int i; 198 + 199 + for (i = 0; i < (opcode & 0xf); i++) { 200 + csky_insn_reg_set_val(regs, i + 4, *tmp); 201 + tmp += 1; 202 + } 203 + 204 + if (opcode & 0x10) { 205 + csky_insn_reg_set_val(regs, 15, *tmp); 206 + tmp += 1; 207 + } 208 + 209 + regs->usp = (unsigned long)tmp; 210 + 211 + instruction_pointer_set(regs, regs->lr); 212 + } 213 + 214 + void __kprobes 215 + simulate_pop32(u32 opcode, long addr, struct pt_regs *regs) 216 + { 217 + unsigned long *tmp = (unsigned long *)regs->usp; 218 + int i; 219 + 220 + for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) { 221 + csky_insn_reg_set_val(regs, i + 4, *tmp); 222 + tmp += 1; 223 + } 224 + 225 + if (opcode & 0x100000) { 226 + csky_insn_reg_set_val(regs, 15, *tmp); 227 + tmp += 1; 228 + } 229 + 230 + for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) { 231 + csky_insn_reg_set_val(regs, i + 16, *tmp); 232 + tmp += 1; 233 + } 234 + 235 + if (opcode & 0x1000000) { 236 + csky_insn_reg_set_val(regs, 29, *tmp); 237 + tmp += 1; 238 + } 239 + 240 + regs->usp = (unsigned long)tmp; 241 + 242 + instruction_pointer_set(regs, regs->lr); 243 + } 244 + 245 + void __kprobes 246 + simulate_bez32(u32 opcode, long addr, struct pt_regs *regs) 247 + { 248 + unsigned long tmp = opcode & 0x1f; 249 + 250 + csky_insn_reg_get_val(regs, tmp, &tmp); 251 + 252 + if (tmp == 0) { 253 + instruction_pointer_set(regs, 254 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 255 + } else 256 + instruction_pointer_set(regs, addr + 4); 257 + } 258 + 259 + void __kprobes 260 + simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs) 261 + { 262 + unsigned long tmp = opcode & 0x1f; 263 + 264 + csky_insn_reg_get_val(regs, tmp, &tmp); 265 + 266 + if (tmp != 0) { 267 + instruction_pointer_set(regs, 268 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 269 + } else 270 + instruction_pointer_set(regs, addr + 4); 271 + } 272 + 273 + void __kprobes 274 + simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) 275 + { 276 + unsigned long tmp = opcode & 0x1f; 277 + unsigned long val; 278 + 279 + csky_insn_reg_get_val(regs, tmp, &val); 280 + 281 + val -= 1; 282 + 283 + if (val > 0) { 284 + instruction_pointer_set(regs, 285 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 286 + } else 287 + instruction_pointer_set(regs, addr + 4); 288 + 289 + csky_insn_reg_set_val(regs, tmp, val); 290 + } 291 + 292 + void __kprobes 293 + simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs) 294 + { 295 + unsigned long tmp = opcode & 0x1f; 296 + unsigned long val; 297 + 298 + csky_insn_reg_get_val(regs, tmp, &val); 299 + 300 + if (val >= 0) { 301 + instruction_pointer_set(regs, 302 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 303 + } else 304 + instruction_pointer_set(regs, addr + 4); 305 + 306 + csky_insn_reg_set_val(regs, tmp, val); 307 + } 308 + 309 + void __kprobes 310 + simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs) 311 + { 312 + unsigned long tmp = opcode & 0x1f; 313 + unsigned long val; 314 + 315 + csky_insn_reg_get_val(regs, tmp, &val); 316 + 317 + if (val > 0) { 318 + instruction_pointer_set(regs, 319 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 320 + } else 321 + instruction_pointer_set(regs, addr + 4); 322 + 323 + csky_insn_reg_set_val(regs, tmp, val); 324 + } 325 + 326 + void __kprobes 327 + simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs) 328 + { 329 + unsigned long tmp = opcode & 0x1f; 330 + unsigned long val; 331 + 332 + csky_insn_reg_get_val(regs, tmp, &val); 333 + 334 + if (val <= 0) { 335 + instruction_pointer_set(regs, 336 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 337 + } else 338 + instruction_pointer_set(regs, addr + 4); 339 + 340 + csky_insn_reg_set_val(regs, tmp, val); 341 + } 342 + 343 + void __kprobes 344 + simulate_blz32(u32 opcode, long addr, struct pt_regs *regs) 345 + { 346 + unsigned long tmp = opcode & 0x1f; 347 + unsigned long val; 348 + 349 + csky_insn_reg_get_val(regs, tmp, &val); 350 + 351 + if (val < 0) { 352 + instruction_pointer_set(regs, 353 + addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); 354 + } else 355 + instruction_pointer_set(regs, addr + 4); 356 + 357 + csky_insn_reg_set_val(regs, tmp, val); 358 + } 359 + 360 + void __kprobes 361 + simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs) 362 + { 363 + unsigned long tmp; 364 + 365 + tmp = (opcode & 0xffff) << 16; 366 + tmp |= (opcode & 0xffff0000) >> 16; 367 + 368 + instruction_pointer_set(regs, 369 + addr + sign_extend32((tmp & 0x3ffffff) << 1, 15)); 370 + 371 + regs->lr = addr + 4; 372 + } 373 + 374 + void __kprobes 375 + simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs) 376 + { 377 + unsigned long val; 378 + unsigned long offset = ((opcode & 0xffff0000) >> 14); 379 + 380 + val = *(unsigned int *) 381 + ((instruction_pointer(regs) + offset) & 0xfffffffc); 382 + 383 + instruction_pointer_set(regs, val); 384 + } 385 + 386 + void __kprobes 387 + simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs) 388 + { 389 + unsigned long val; 390 + unsigned long offset = ((opcode & 0xffff0000) >> 14); 391 + 392 + val = *(unsigned int *) 393 + ((instruction_pointer(regs) + offset) & 0xfffffffc); 394 + 395 + regs->lr = addr + 4; 396 + 397 + instruction_pointer_set(regs, val); 398 + }
+49
arch/csky/kernel/probes/simulate-insn.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + #ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H 4 + #define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H 5 + 6 + #define __CSKY_INSN_FUNCS(name, mask, val) \ 7 + static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \ 8 + { \ 9 + BUILD_BUG_ON(~(mask) & (val)); \ 10 + return (code & (mask)) == (val); \ 11 + } \ 12 + void simulate_##name(u32 opcode, long addr, struct pt_regs *regs); 13 + 14 + #define CSKY_INSN_SET_SIMULATE(name, code) \ 15 + do { \ 16 + if (csky_insn_is_##name(code)) { \ 17 + api->handler = simulate_##name; \ 18 + return INSN_GOOD_NO_SLOT; \ 19 + } \ 20 + } while (0) 21 + 22 + __CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400) 23 + __CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800) 24 + __CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00) 25 + __CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800) 26 + __CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801) 27 + __CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000) 28 + __CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480) 29 + 30 + __CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800) 31 + __CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860) 32 + __CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840) 33 + __CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0) 34 + __CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0) 35 + __CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80) 36 + __CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0) 37 + 38 + __CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900) 39 + __CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920) 40 + __CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820) 41 + __CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0) 42 + __CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940) 43 + __CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960) 44 + __CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980) 45 + __CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000) 46 + __CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0) 47 + __CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0) 48 + 49 + #endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */
+9 -1
arch/csky/kernel/traps.c
··· 14 14 #include <linux/kallsyms.h> 15 15 #include <linux/rtc.h> 16 16 #include <linux/uaccess.h> 17 + #include <linux/kprobes.h> 17 18 18 19 #include <asm/setup.h> 19 20 #include <asm/traps.h> ··· 110 109 force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc); 111 110 } 112 111 113 - #define USR_BKPT 0x1464 114 112 asmlinkage void trap_c(struct pt_regs *regs) 115 113 { 116 114 int sig; ··· 126 126 break; 127 127 /* ptrace */ 128 128 case VEC_TRACE: 129 + #ifdef CONFIG_KPROBES 130 + if (kprobe_single_step_handler(regs)) 131 + return; 132 + #endif 129 133 info.si_code = TRAP_TRACE; 130 134 sig = SIGTRAP; 131 135 break; 132 136 case VEC_ILLEGAL: 133 137 tsk->thread.trap_no = vector; 138 + #ifdef CONFIG_KPROBES 139 + if (kprobe_breakpoint_handler(regs)) 140 + return; 141 + #endif 134 142 die_if_kernel("Kernel mode ILLEGAL", regs, vector); 135 143 #ifndef CONFIG_CPU_NO_USER_BKPT 136 144 if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)
+4
arch/csky/mm/fault.c
··· 18 18 #include <linux/extable.h> 19 19 #include <linux/uaccess.h> 20 20 #include <linux/perf_event.h> 21 + #include <linux/kprobes.h> 21 22 22 23 #include <asm/hardirq.h> 23 24 #include <asm/mmu_context.h> ··· 53 52 int si_code; 54 53 int fault; 55 54 unsigned long address = mmu_meh & PAGE_MASK; 55 + 56 + if (kprobe_page_fault(regs, tsk->thread.trap_no)) 57 + return; 56 58 57 59 si_code = SEGV_MAPERR; 58 60