Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: kprobe: Add support.

This patch is based on previous work by Sony and Himanshu Chauhan.

I have done some cleanup and implemented JProbes and KRETPROBES. The
KRETPROBES part is pretty much copied verbatim from powerpc. A possible
future enhance might be to factor out the common code.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Cc: Himanshu Chauhan <hschauhan@nulltrace.org>
To: linux-mips@linux-mips.org
To: ananth@in.ibm.com,
To: anil.s.keshavamurthy@intel.com
To: davem@davemloft.net
To: masami.hiramatsu.pt@hitachi.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/1525/
Patchwork: https://patchwork.linux-mips.org/patch/1530/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

David Daney and committed by
Ralf Baechle
c1bf207d 2ea6399f

+695 -2
+2
arch/mips/Kconfig
··· 10 10 select HAVE_DYNAMIC_FTRACE 11 11 select HAVE_FTRACE_MCOUNT_RECORD 12 12 select HAVE_FUNCTION_GRAPH_TRACER 13 + select HAVE_KPROBES 14 + select HAVE_KRETPROBES 13 15 select RTC_LIB if !MACH_LOONGSON 14 16 15 17 mainmenu "Linux/MIPS Kernel Configuration"
+3
arch/mips/Makefile
··· 259 259 vmlinux.32: vmlinux 260 260 $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@ 261 261 262 + 263 + #obj-$(CONFIG_KPROBES) += kprobes.o 264 + 262 265 # 263 266 # The 64-bit ELF tools are pretty broken so at this time we generate 64-bit 264 267 # ELF files from 32-bit files by conversion.
+2
arch/mips/include/asm/break.h
··· 30 30 #define BRK_BUG 512 /* Used by BUG() */ 31 31 #define BRK_KDB 513 /* Used in KDB_ENTER() */ 32 32 #define BRK_MEMU 514 /* Used by FPU emulator */ 33 + #define BRK_KPROBE_BP 515 /* Kprobe break */ 34 + #define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */ 33 35 #define BRK_MULOVF 1023 /* Multiply overflow */ 34 36 35 37 #endif /* __ASM_BREAK_H */
+3
arch/mips/include/asm/kdebug.h
··· 8 8 DIE_FP, 9 9 DIE_TRAP, 10 10 DIE_RI, 11 + DIE_PAGE_FAULT, 12 + DIE_BREAK, 13 + DIE_SSTEPBP 11 14 }; 12 15 13 16 #endif /* _ASM_MIPS_KDEBUG_H */
+92
arch/mips/include/asm/kprobes.h
··· 1 + /* 2 + * Kernel Probes (KProbes) 3 + * include/asm-mips/kprobes.h 4 + * 5 + * Copyright 2006 Sony Corp. 6 + * Copyright 2010 Cavium Networks 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; version 2 of the License. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program; if not, write to the Free Software 19 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 + */ 21 + 22 + #ifndef _ASM_KPROBES_H 23 + #define _ASM_KPROBES_H 24 + 25 + #include <linux/ptrace.h> 26 + #include <linux/types.h> 27 + 28 + #include <asm/cacheflush.h> 29 + #include <asm/kdebug.h> 30 + #include <asm/inst.h> 31 + 32 + #define __ARCH_WANT_KPROBES_INSN_SLOT 33 + 34 + struct kprobe; 35 + struct pt_regs; 36 + 37 + typedef union mips_instruction kprobe_opcode_t; 38 + 39 + #define MAX_INSN_SIZE 2 40 + 41 + #define flush_insn_slot(p) \ 42 + do { \ 43 + flush_icache_range((unsigned long)p->addr, \ 44 + (unsigned long)p->addr + \ 45 + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 46 + } while (0) 47 + 48 + 49 + #define kretprobe_blacklist_size 0 50 + 51 + void arch_remove_kprobe(struct kprobe *p); 52 + 53 + /* Architecture specific copy of original instruction*/ 54 + struct arch_specific_insn { 55 + /* copy of the original instruction */ 56 + kprobe_opcode_t *insn; 57 + }; 58 + 59 + struct prev_kprobe { 60 + struct kprobe *kp; 61 + unsigned long status; 62 + unsigned long old_SR; 63 + unsigned long saved_SR; 64 + unsigned long saved_epc; 65 + }; 66 + 67 + #define MAX_JPROBES_STACK_SIZE 128 68 + #define MAX_JPROBES_STACK_ADDR \ 69 + (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 - sizeof(struct pt_regs)) 70 + 71 + #define MIN_JPROBES_STACK_SIZE(ADDR) \ 72 + ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \ 73 + ? MAX_JPROBES_STACK_ADDR - (ADDR) \ 74 + : MAX_JPROBES_STACK_SIZE) 75 + 76 + 77 + /* per-cpu kprobe control block */ 78 + struct kprobe_ctlblk { 79 + unsigned long kprobe_status; 80 + unsigned long kprobe_old_SR; 81 + unsigned long kprobe_saved_SR; 82 + unsigned long kprobe_saved_epc; 83 + unsigned long jprobe_saved_sp; 84 + struct pt_regs jprobe_saved_regs; 85 + u8 jprobes_stack[MAX_JPROBES_STACK_SIZE]; 86 + struct prev_kprobe prev_kprobe; 87 + }; 88 + 89 + extern int kprobe_exceptions_notify(struct notifier_block *self, 90 + unsigned long val, void *data); 91 + 92 + #endif /* _ASM_KPROBES_H */
+1
arch/mips/kernel/Makefile
··· 76 76 obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o 77 77 obj-$(CONFIG_IRQ_GIC) += irq-gic.o 78 78 79 + obj-$(CONFIG_KPROBES) += kprobes.o 79 80 obj-$(CONFIG_32BIT) += scall32-o32.o 80 81 obj-$(CONFIG_64BIT) += scall64-64.o 81 82 obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
+557
arch/mips/kernel/kprobes.c
··· 1 + /* 2 + * Kernel Probes (KProbes) 3 + * arch/mips/kernel/kprobes.c 4 + * 5 + * Copyright 2006 Sony Corp. 6 + * Copyright 2010 Cavium Networks 7 + * 8 + * Some portions copied from the powerpc version. 9 + * 10 + * Copyright (C) IBM Corporation, 2002, 2004 11 + * 12 + * This program is free software; you can redistribute it and/or modify 13 + * it under the terms of the GNU General Public License as published by 14 + * the Free Software Foundation; version 2 of the License. 15 + * 16 + * This program is distributed in the hope that it will be useful, 17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 + * GNU General Public License for more details. 20 + * 21 + * You should have received a copy of the GNU General Public License 22 + * along with this program; if not, write to the Free Software 23 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 + */ 25 + 26 + #include <linux/kprobes.h> 27 + #include <linux/preempt.h> 28 + #include <linux/kdebug.h> 29 + #include <linux/slab.h> 30 + 31 + #include <asm/ptrace.h> 32 + #include <asm/break.h> 33 + #include <asm/inst.h> 34 + 35 + static const union mips_instruction breakpoint_insn = { 36 + .b_format = { 37 + .opcode = spec_op, 38 + .code = BRK_KPROBE_BP, 39 + .func = break_op 40 + } 41 + }; 42 + 43 + static const union mips_instruction breakpoint2_insn = { 44 + .b_format = { 45 + .opcode = spec_op, 46 + .code = BRK_KPROBE_SSTEPBP, 47 + .func = break_op 48 + } 49 + }; 50 + 51 + DEFINE_PER_CPU(struct kprobe *, current_kprobe); 52 + DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 53 + 54 + static int __kprobes insn_has_delayslot(union mips_instruction insn) 55 + { 56 + switch (insn.i_format.opcode) { 57 + 58 + /* 59 + * This group contains: 60 + * jr and jalr are in r_format format. 61 + */ 62 + case spec_op: 63 + switch (insn.r_format.func) { 64 + case jr_op: 65 + case jalr_op: 66 + break; 67 + default: 68 + goto insn_ok; 69 + } 70 + 71 + /* 72 + * This group contains: 73 + * bltz_op, bgez_op, bltzl_op, bgezl_op, 74 + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. 75 + */ 76 + case bcond_op: 77 + 78 + /* 79 + * These are unconditional and in j_format. 80 + */ 81 + case jal_op: 82 + case j_op: 83 + 84 + /* 85 + * These are conditional and in i_format. 86 + */ 87 + case beq_op: 88 + case beql_op: 89 + case bne_op: 90 + case bnel_op: 91 + case blez_op: 92 + case blezl_op: 93 + case bgtz_op: 94 + case bgtzl_op: 95 + 96 + /* 97 + * These are the FPA/cp1 branch instructions. 98 + */ 99 + case cop1_op: 100 + 101 + #ifdef CONFIG_CPU_CAVIUM_OCTEON 102 + case lwc2_op: /* This is bbit0 on Octeon */ 103 + case ldc2_op: /* This is bbit032 on Octeon */ 104 + case swc2_op: /* This is bbit1 on Octeon */ 105 + case sdc2_op: /* This is bbit132 on Octeon */ 106 + #endif 107 + return 1; 108 + default: 109 + break; 110 + } 111 + insn_ok: 112 + return 0; 113 + } 114 + 115 + int __kprobes arch_prepare_kprobe(struct kprobe *p) 116 + { 117 + union mips_instruction insn; 118 + union mips_instruction prev_insn; 119 + int ret = 0; 120 + 121 + prev_insn = p->addr[-1]; 122 + insn = p->addr[0]; 123 + 124 + if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) { 125 + pr_notice("Kprobes for branch and jump instructions are not supported\n"); 126 + ret = -EINVAL; 127 + goto out; 128 + } 129 + 130 + /* insn: must be on special executable page on mips. */ 131 + p->ainsn.insn = get_insn_slot(); 132 + if (!p->ainsn.insn) { 133 + ret = -ENOMEM; 134 + goto out; 135 + } 136 + 137 + /* 138 + * In the kprobe->ainsn.insn[] array we store the original 139 + * instruction at index zero and a break trap instruction at 140 + * index one. 141 + */ 142 + 143 + memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); 144 + p->ainsn.insn[1] = breakpoint2_insn; 145 + p->opcode = *p->addr; 146 + 147 + out: 148 + return ret; 149 + } 150 + 151 + void __kprobes arch_arm_kprobe(struct kprobe *p) 152 + { 153 + *p->addr = breakpoint_insn; 154 + flush_insn_slot(p); 155 + } 156 + 157 + void __kprobes arch_disarm_kprobe(struct kprobe *p) 158 + { 159 + *p->addr = p->opcode; 160 + flush_insn_slot(p); 161 + } 162 + 163 + void __kprobes arch_remove_kprobe(struct kprobe *p) 164 + { 165 + free_insn_slot(p->ainsn.insn, 0); 166 + } 167 + 168 + static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 169 + { 170 + kcb->prev_kprobe.kp = kprobe_running(); 171 + kcb->prev_kprobe.status = kcb->kprobe_status; 172 + kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; 173 + kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; 174 + kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; 175 + } 176 + 177 + static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 178 + { 179 + __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 180 + kcb->kprobe_status = kcb->prev_kprobe.status; 181 + kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; 182 + kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; 183 + kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; 184 + } 185 + 186 + static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 187 + struct kprobe_ctlblk *kcb) 188 + { 189 + __get_cpu_var(current_kprobe) = p; 190 + kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); 191 + kcb->kprobe_saved_epc = regs->cp0_epc; 192 + } 193 + 194 + static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 195 + { 196 + regs->cp0_status &= ~ST0_IE; 197 + 198 + /* single step inline if the instruction is a break */ 199 + if (p->opcode.word == breakpoint_insn.word || 200 + p->opcode.word == breakpoint2_insn.word) 201 + regs->cp0_epc = (unsigned long)p->addr; 202 + else 203 + regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; 204 + } 205 + 206 + static int __kprobes kprobe_handler(struct pt_regs *regs) 207 + { 208 + struct kprobe *p; 209 + int ret = 0; 210 + kprobe_opcode_t *addr; 211 + struct kprobe_ctlblk *kcb; 212 + 213 + addr = (kprobe_opcode_t *) regs->cp0_epc; 214 + 215 + /* 216 + * We don't want to be preempted for the entire 217 + * duration of kprobe processing 218 + */ 219 + preempt_disable(); 220 + kcb = get_kprobe_ctlblk(); 221 + 222 + /* Check we're not actually recursing */ 223 + if (kprobe_running()) { 224 + p = get_kprobe(addr); 225 + if (p) { 226 + if (kcb->kprobe_status == KPROBE_HIT_SS && 227 + p->ainsn.insn->word == breakpoint_insn.word) { 228 + regs->cp0_status &= ~ST0_IE; 229 + regs->cp0_status |= kcb->kprobe_saved_SR; 230 + goto no_kprobe; 231 + } 232 + /* 233 + * We have reentered the kprobe_handler(), since 234 + * another probe was hit while within the handler. 235 + * We here save the original kprobes variables and 236 + * just single step on the instruction of the new probe 237 + * without calling any user handlers. 238 + */ 239 + save_previous_kprobe(kcb); 240 + set_current_kprobe(p, regs, kcb); 241 + kprobes_inc_nmissed_count(p); 242 + prepare_singlestep(p, regs); 243 + kcb->kprobe_status = KPROBE_REENTER; 244 + return 1; 245 + } else { 246 + if (addr->word != breakpoint_insn.word) { 247 + /* 248 + * The breakpoint instruction was removed by 249 + * another cpu right after we hit, no further 250 + * handling of this interrupt is appropriate 251 + */ 252 + ret = 1; 253 + goto no_kprobe; 254 + } 255 + p = __get_cpu_var(current_kprobe); 256 + if (p->break_handler && p->break_handler(p, regs)) 257 + goto ss_probe; 258 + } 259 + goto no_kprobe; 260 + } 261 + 262 + p = get_kprobe(addr); 263 + if (!p) { 264 + if (addr->word != breakpoint_insn.word) { 265 + /* 266 + * The breakpoint instruction was removed right 267 + * after we hit it. Another cpu has removed 268 + * either a probepoint or a debugger breakpoint 269 + * at this address. In either case, no further 270 + * handling of this interrupt is appropriate. 271 + */ 272 + ret = 1; 273 + } 274 + /* Not one of ours: let kernel handle it */ 275 + goto no_kprobe; 276 + } 277 + 278 + set_current_kprobe(p, regs, kcb); 279 + kcb->kprobe_status = KPROBE_HIT_ACTIVE; 280 + 281 + if (p->pre_handler && p->pre_handler(p, regs)) { 282 + /* handler has already set things up, so skip ss setup */ 283 + return 1; 284 + } 285 + 286 + ss_probe: 287 + prepare_singlestep(p, regs); 288 + kcb->kprobe_status = KPROBE_HIT_SS; 289 + return 1; 290 + 291 + no_kprobe: 292 + preempt_enable_no_resched(); 293 + return ret; 294 + 295 + } 296 + 297 + /* 298 + * Called after single-stepping. p->addr is the address of the 299 + * instruction whose first byte has been replaced by the "break 0" 300 + * instruction. To avoid the SMP problems that can occur when we 301 + * temporarily put back the original opcode to single-step, we 302 + * single-stepped a copy of the instruction. The address of this 303 + * copy is p->ainsn.insn. 304 + * 305 + * This function prepares to return from the post-single-step 306 + * breakpoint trap. 307 + */ 308 + static void __kprobes resume_execution(struct kprobe *p, 309 + struct pt_regs *regs, 310 + struct kprobe_ctlblk *kcb) 311 + { 312 + unsigned long orig_epc = kcb->kprobe_saved_epc; 313 + regs->cp0_epc = orig_epc + 4; 314 + } 315 + 316 + static inline int post_kprobe_handler(struct pt_regs *regs) 317 + { 318 + struct kprobe *cur = kprobe_running(); 319 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 320 + 321 + if (!cur) 322 + return 0; 323 + 324 + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 325 + kcb->kprobe_status = KPROBE_HIT_SSDONE; 326 + cur->post_handler(cur, regs, 0); 327 + } 328 + 329 + resume_execution(cur, regs, kcb); 330 + 331 + regs->cp0_status |= kcb->kprobe_saved_SR; 332 + 333 + /* Restore back the original saved kprobes variables and continue. */ 334 + if (kcb->kprobe_status == KPROBE_REENTER) { 335 + restore_previous_kprobe(kcb); 336 + goto out; 337 + } 338 + reset_current_kprobe(); 339 + out: 340 + preempt_enable_no_resched(); 341 + 342 + return 1; 343 + } 344 + 345 + static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 346 + { 347 + struct kprobe *cur = kprobe_running(); 348 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 349 + 350 + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 351 + return 1; 352 + 353 + if (kcb->kprobe_status & KPROBE_HIT_SS) { 354 + resume_execution(cur, regs, kcb); 355 + regs->cp0_status |= kcb->kprobe_old_SR; 356 + 357 + reset_current_kprobe(); 358 + preempt_enable_no_resched(); 359 + } 360 + return 0; 361 + } 362 + 363 + /* 364 + * Wrapper routine for handling exceptions. 365 + */ 366 + int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 367 + unsigned long val, void *data) 368 + { 369 + 370 + struct die_args *args = (struct die_args *)data; 371 + int ret = NOTIFY_DONE; 372 + 373 + switch (val) { 374 + case DIE_BREAK: 375 + if (kprobe_handler(args->regs)) 376 + ret = NOTIFY_STOP; 377 + break; 378 + case DIE_SSTEPBP: 379 + if (post_kprobe_handler(args->regs)) 380 + ret = NOTIFY_STOP; 381 + break; 382 + 383 + case DIE_PAGE_FAULT: 384 + /* kprobe_running() needs smp_processor_id() */ 385 + preempt_disable(); 386 + 387 + if (kprobe_running() 388 + && kprobe_fault_handler(args->regs, args->trapnr)) 389 + ret = NOTIFY_STOP; 390 + preempt_enable(); 391 + break; 392 + default: 393 + break; 394 + } 395 + return ret; 396 + } 397 + 398 + int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 399 + { 400 + struct jprobe *jp = container_of(p, struct jprobe, kp); 401 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 402 + 403 + kcb->jprobe_saved_regs = *regs; 404 + kcb->jprobe_saved_sp = regs->regs[29]; 405 + 406 + memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp, 407 + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); 408 + 409 + regs->cp0_epc = (unsigned long)(jp->entry); 410 + 411 + return 1; 412 + } 413 + 414 + /* Defined in the inline asm below. */ 415 + void jprobe_return_end(void); 416 + 417 + void __kprobes jprobe_return(void) 418 + { 419 + /* Assembler quirk necessitates this '0,code' business. */ 420 + asm volatile( 421 + "break 0,%0\n\t" 422 + ".globl jprobe_return_end\n" 423 + "jprobe_return_end:\n" 424 + : : "n" (BRK_KPROBE_BP) : "memory"); 425 + } 426 + 427 + int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 428 + { 429 + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 430 + 431 + if (regs->cp0_epc >= (unsigned long)jprobe_return && 432 + regs->cp0_epc <= (unsigned long)jprobe_return_end) { 433 + *regs = kcb->jprobe_saved_regs; 434 + memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack, 435 + MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp)); 436 + preempt_enable_no_resched(); 437 + 438 + return 1; 439 + } 440 + return 0; 441 + } 442 + 443 + /* 444 + * Function return probe trampoline: 445 + * - init_kprobes() establishes a probepoint here 446 + * - When the probed function returns, this probe causes the 447 + * handlers to fire 448 + */ 449 + static void __used kretprobe_trampoline_holder(void) 450 + { 451 + asm volatile( 452 + ".set push\n\t" 453 + /* Keep the assembler from reordering and placing JR here. */ 454 + ".set noreorder\n\t" 455 + "nop\n\t" 456 + ".global kretprobe_trampoline\n" 457 + "kretprobe_trampoline:\n\t" 458 + "nop\n\t" 459 + ".set pop" 460 + : : : "memory"); 461 + } 462 + 463 + void kretprobe_trampoline(void); 464 + 465 + void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 466 + struct pt_regs *regs) 467 + { 468 + ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; 469 + 470 + /* Replace the return addr with trampoline addr */ 471 + regs->regs[31] = (unsigned long)kretprobe_trampoline; 472 + } 473 + 474 + /* 475 + * Called when the probe at kretprobe trampoline is hit 476 + */ 477 + static int __kprobes trampoline_probe_handler(struct kprobe *p, 478 + struct pt_regs *regs) 479 + { 480 + struct kretprobe_instance *ri = NULL; 481 + struct hlist_head *head, empty_rp; 482 + struct hlist_node *node, *tmp; 483 + unsigned long flags, orig_ret_address = 0; 484 + unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; 485 + 486 + INIT_HLIST_HEAD(&empty_rp); 487 + kretprobe_hash_lock(current, &head, &flags); 488 + 489 + /* 490 + * It is possible to have multiple instances associated with a given 491 + * task either because an multiple functions in the call path 492 + * have a return probe installed on them, and/or more than one return 493 + * return probe was registered for a target function. 494 + * 495 + * We can handle this because: 496 + * - instances are always inserted at the head of the list 497 + * - when multiple return probes are registered for the same 498 + * function, the first instance's ret_addr will point to the 499 + * real return address, and all the rest will point to 500 + * kretprobe_trampoline 501 + */ 502 + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 503 + if (ri->task != current) 504 + /* another task is sharing our hash bucket */ 505 + continue; 506 + 507 + if (ri->rp && ri->rp->handler) 508 + ri->rp->handler(ri, regs); 509 + 510 + orig_ret_address = (unsigned long)ri->ret_addr; 511 + recycle_rp_inst(ri, &empty_rp); 512 + 513 + if (orig_ret_address != trampoline_address) 514 + /* 515 + * This is the real return address. Any other 516 + * instances associated with this task are for 517 + * other calls deeper on the call stack 518 + */ 519 + break; 520 + } 521 + 522 + kretprobe_assert(ri, orig_ret_address, trampoline_address); 523 + instruction_pointer(regs) = orig_ret_address; 524 + 525 + reset_current_kprobe(); 526 + kretprobe_hash_unlock(current, &flags); 527 + preempt_enable_no_resched(); 528 + 529 + hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 530 + hlist_del(&ri->hlist); 531 + kfree(ri); 532 + } 533 + /* 534 + * By returning a non-zero value, we are telling 535 + * kprobe_handler() that we don't want the post_handler 536 + * to run (and have re-enabled preemption) 537 + */ 538 + return 1; 539 + } 540 + 541 + int __kprobes arch_trampoline_kprobe(struct kprobe *p) 542 + { 543 + if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) 544 + return 1; 545 + 546 + return 0; 547 + } 548 + 549 + static struct kprobe trampoline_p = { 550 + .addr = (kprobe_opcode_t *)kretprobe_trampoline, 551 + .pre_handler = trampoline_probe_handler 552 + }; 553 + 554 + int __init arch_init_kprobes(void) 555 + { 556 + return register_kprobe(&trampoline_p); 557 + }
+21 -1
arch/mips/kernel/traps.c
··· 25 25 #include <linux/ptrace.h> 26 26 #include <linux/kgdb.h> 27 27 #include <linux/kdebug.h> 28 + #include <linux/kprobes.h> 28 29 #include <linux/notifier.h> 29 30 #include <linux/kdb.h> 30 31 ··· 335 334 __show_regs((struct pt_regs *)regs); 336 335 } 337 336 338 - void show_registers(const struct pt_regs *regs) 337 + void show_registers(struct pt_regs *regs) 339 338 { 340 339 const int field = 2 * sizeof(unsigned long); 341 340 ··· 783 782 bcode = ((opcode >> 6) & ((1 << 20) - 1)); 784 783 if (bcode >= (1 << 10)) 785 784 bcode >>= 10; 785 + 786 + /* 787 + * notify the kprobe handlers, if instruction is likely to 788 + * pertain to them. 789 + */ 790 + switch (bcode) { 791 + case BRK_KPROBE_BP: 792 + if (notify_die(DIE_BREAK, "debug", regs, bcode, 0, 0) == NOTIFY_STOP) 793 + return; 794 + else 795 + break; 796 + case BRK_KPROBE_SSTEPBP: 797 + if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, 0, 0) == NOTIFY_STOP) 798 + return; 799 + else 800 + break; 801 + default: 802 + break; 803 + } 786 804 787 805 do_trap_or_bp(regs, bcode, "Break"); 788 806 return;
+14 -1
arch/mips/mm/fault.c
··· 17 17 #include <linux/mm.h> 18 18 #include <linux/smp.h> 19 19 #include <linux/module.h> 20 + #include <linux/kprobes.h> 20 21 21 22 #include <asm/branch.h> 22 23 #include <asm/mmu_context.h> ··· 25 24 #include <asm/uaccess.h> 26 25 #include <asm/ptrace.h> 27 26 #include <asm/highmem.h> /* For VMALLOC_END */ 27 + #include <linux/kdebug.h> 28 28 29 29 /* 30 30 * This routine handles page faults. It determines the address, 31 31 * and the problem, and then passes it off to one of the appropriate 32 32 * routines. 33 33 */ 34 - asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, 34 + asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write, 35 35 unsigned long address) 36 36 { 37 37 struct vm_area_struct * vma = NULL; ··· 46 44 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), 47 45 current->comm, current->pid, field, address, write, 48 46 field, regs->cp0_epc); 47 + #endif 48 + 49 + #ifdef CONFIG_KPROBES 50 + /* 51 + * This is to notify the fault handler of the kprobes. The 52 + * exception code is redundant as it is also carried in REGS, 53 + * but we pass it anyhow. 54 + */ 55 + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, 56 + (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP) 57 + return; 49 58 #endif 50 59 51 60 info.si_code = SEGV_MAPERR;