Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kprobes: treewide: Make it harder to refer kretprobe_trampoline directly

Since now there is kretprobe_trampoline_addr() for referring the
address of kretprobe trampoline code, we don't need to access
kretprobe_trampoline directly.

Make it harder to refer by renaming it to __kretprobe_trampoline().

Link: https://lkml.kernel.org/r/163163045446.489837.14510577516938803097.stgit@devnote2

Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

authored by

Masami Hiramatsu and committed by
Steven Rostedt (VMware)
adf8a61a 96fed8ac

+76 -75
+1 -1
arch/arc/include/asm/kprobes.h
··· 46 46 }; 47 47 48 48 int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause); 49 - void kretprobe_trampoline(void); 49 + void __kretprobe_trampoline(void); 50 50 void trap_is_kprobe(unsigned long address, struct pt_regs *regs); 51 51 #else 52 52 #define trap_is_kprobe(address, regs)
+6 -5
arch/arc/kernel/kprobes.c
··· 363 363 364 364 static void __used kretprobe_trampoline_holder(void) 365 365 { 366 - __asm__ __volatile__(".global kretprobe_trampoline\n" 367 - "kretprobe_trampoline:\n" "nop\n"); 366 + __asm__ __volatile__(".global __kretprobe_trampoline\n" 367 + "__kretprobe_trampoline:\n" 368 + "nop\n"); 368 369 } 369 370 370 371 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, ··· 376 375 ri->fp = NULL; 377 376 378 377 /* Replace the return addr with trampoline addr */ 379 - regs->blink = (unsigned long)&kretprobe_trampoline; 378 + regs->blink = (unsigned long)&__kretprobe_trampoline; 380 379 } 381 380 382 381 static int __kprobes trampoline_probe_handler(struct kprobe *p, ··· 391 390 } 392 391 393 392 static struct kprobe trampoline_p = { 394 - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 393 + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, 395 394 .pre_handler = trampoline_probe_handler 396 395 }; 397 396 ··· 403 402 404 403 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 405 404 { 406 - if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline) 405 + if (p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline) 407 406 return 1; 408 407 409 408 return 0;
+3 -3
arch/arm/probes/kprobes/core.c
··· 373 373 * for kretprobe handlers which should normally be interested in r0 only 374 374 * anyway. 375 375 */ 376 - void __naked __kprobes kretprobe_trampoline(void) 376 + void __naked __kprobes __kretprobe_trampoline(void) 377 377 { 378 378 __asm__ __volatile__ ( 379 379 "stmdb sp!, {r0 - r11} \n\t" ··· 389 389 : : : "memory"); 390 390 } 391 391 392 - /* Called from kretprobe_trampoline */ 392 + /* Called from __kretprobe_trampoline */ 393 393 static __used __kprobes void *trampoline_handler(struct pt_regs *regs) 394 394 { 395 395 return (void *)kretprobe_trampoline_handler(regs, (void *)regs->ARM_fp); ··· 402 402 ri->fp = (void *)regs->ARM_fp; 403 403 404 404 /* Replace the return addr with trampoline addr. */ 405 - regs->ARM_lr = (unsigned long)&kretprobe_trampoline; 405 + regs->ARM_lr = (unsigned long)&__kretprobe_trampoline; 406 406 } 407 407 408 408 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+1 -1
arch/arm64/include/asm/kprobes.h
··· 39 39 int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); 40 40 int kprobe_exceptions_notify(struct notifier_block *self, 41 41 unsigned long val, void *data); 42 - void kretprobe_trampoline(void); 42 + void __kretprobe_trampoline(void); 43 43 void __kprobes *trampoline_probe_handler(struct pt_regs *regs); 44 44 45 45 #endif /* CONFIG_KPROBES */
+1 -1
arch/arm64/kernel/probes/kprobes.c
··· 411 411 ri->fp = (void *)kernel_stack_pointer(regs); 412 412 413 413 /* replace return addr (x30) with trampoline */ 414 - regs->regs[30] = (long)&kretprobe_trampoline; 414 + regs->regs[30] = (long)&__kretprobe_trampoline; 415 415 } 416 416 417 417 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+2 -2
arch/arm64/kernel/probes/kprobes_trampoline.S
··· 61 61 ldp x28, x29, [sp, #S_X28] 62 62 .endm 63 63 64 - SYM_CODE_START(kretprobe_trampoline) 64 + SYM_CODE_START(__kretprobe_trampoline) 65 65 sub sp, sp, #PT_REGS_SIZE 66 66 67 67 save_all_base_regs ··· 79 79 add sp, sp, #PT_REGS_SIZE 80 80 ret 81 81 82 - SYM_CODE_END(kretprobe_trampoline) 82 + SYM_CODE_END(__kretprobe_trampoline)
+1 -1
arch/csky/include/asm/kprobes.h
··· 41 41 int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); 42 42 int kprobe_breakpoint_handler(struct pt_regs *regs); 43 43 int kprobe_single_step_handler(struct pt_regs *regs); 44 - void kretprobe_trampoline(void); 44 + void __kretprobe_trampoline(void); 45 45 void __kprobes *trampoline_probe_handler(struct pt_regs *regs); 46 46 47 47 #endif /* CONFIG_KPROBES */
+1 -1
arch/csky/kernel/probes/kprobes.c
··· 394 394 { 395 395 ri->ret_addr = (kprobe_opcode_t *)regs->lr; 396 396 ri->fp = NULL; 397 - regs->lr = (unsigned long) &kretprobe_trampoline; 397 + regs->lr = (unsigned long) &__kretprobe_trampoline; 398 398 } 399 399 400 400 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+2 -2
arch/csky/kernel/probes/kprobes_trampoline.S
··· 4 4 5 5 #include <abi/entry.h> 6 6 7 - ENTRY(kretprobe_trampoline) 7 + ENTRY(__kretprobe_trampoline) 8 8 SAVE_REGS_FTRACE 9 9 10 10 mov a0, sp /* pt_regs */ ··· 16 16 17 17 RESTORE_REGS_FTRACE 18 18 rts 19 - ENDPROC(kretprobe_trampoline) 19 + ENDPROC(__kretprobe_trampoline)
+4 -4
arch/ia64/kernel/kprobes.c
··· 392 392 __this_cpu_write(current_kprobe, p); 393 393 } 394 394 395 - void kretprobe_trampoline(void) 395 + void __kretprobe_trampoline(void) 396 396 { 397 397 } 398 398 ··· 414 414 ri->fp = NULL; 415 415 416 416 /* Replace the return addr with trampoline addr */ 417 - regs->b0 = (unsigned long)dereference_function_descriptor(kretprobe_trampoline); 417 + regs->b0 = (unsigned long)dereference_function_descriptor(__kretprobe_trampoline); 418 418 } 419 419 420 420 /* Check the instruction in the slot is break */ ··· 897 897 int __init arch_init_kprobes(void) 898 898 { 899 899 trampoline_p.addr = 900 - dereference_function_descriptor(kretprobe_trampoline); 900 + dereference_function_descriptor(__kretprobe_trampoline); 901 901 return register_kprobe(&trampoline_p); 902 902 } 903 903 904 904 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 905 905 { 906 906 if (p->addr == 907 - dereference_function_descriptor(kretprobe_trampoline)) 907 + dereference_function_descriptor(__kretprobe_trampoline)) 908 908 return 1; 909 909 910 910 return 0;
+6 -6
arch/mips/kernel/kprobes.c
··· 460 460 /* Keep the assembler from reordering and placing JR here. */ 461 461 ".set noreorder\n\t" 462 462 "nop\n\t" 463 - ".global kretprobe_trampoline\n" 464 - "kretprobe_trampoline:\n\t" 463 + ".global __kretprobe_trampoline\n" 464 + "__kretprobe_trampoline:\n\t" 465 465 "nop\n\t" 466 466 ".set pop" 467 467 : : : "memory"); 468 468 } 469 469 470 - void kretprobe_trampoline(void); 470 + void __kretprobe_trampoline(void); 471 471 472 472 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 473 473 struct pt_regs *regs) ··· 476 476 ri->fp = NULL; 477 477 478 478 /* Replace the return addr with trampoline addr */ 479 - regs->regs[31] = (unsigned long)kretprobe_trampoline; 479 + regs->regs[31] = (unsigned long)__kretprobe_trampoline; 480 480 } 481 481 482 482 /* ··· 496 496 497 497 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 498 498 { 499 - if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) 499 + if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) 500 500 return 1; 501 501 502 502 return 0; 503 503 } 504 504 505 505 static struct kprobe trampoline_p = { 506 - .addr = (kprobe_opcode_t *)kretprobe_trampoline, 506 + .addr = (kprobe_opcode_t *)__kretprobe_trampoline, 507 507 .pre_handler = trampoline_probe_handler 508 508 }; 509 509
+2 -2
arch/parisc/kernel/kprobes.c
··· 175 175 return 1; 176 176 } 177 177 178 - void kretprobe_trampoline(void) 178 + void __kretprobe_trampoline(void) 179 179 { 180 180 asm volatile("nop"); 181 181 asm volatile("nop"); ··· 217 217 int __init arch_init_kprobes(void) 218 218 { 219 219 trampoline_p.addr = (kprobe_opcode_t *) 220 - dereference_function_descriptor(kretprobe_trampoline); 220 + dereference_function_descriptor(__kretprobe_trampoline); 221 221 return register_kprobe(&trampoline_p); 222 222 }
+1 -1
arch/powerpc/include/asm/kprobes.h
··· 51 51 #define flush_insn_slot(p) do { } while (0) 52 52 #define kretprobe_blacklist_size 0 53 53 54 - void kretprobe_trampoline(void); 54 + void __kretprobe_trampoline(void); 55 55 extern void arch_remove_kprobe(struct kprobe *p); 56 56 57 57 /* Architecture specific copy of original instruction */
+8 -8
arch/powerpc/kernel/kprobes.c
··· 237 237 ri->fp = NULL; 238 238 239 239 /* Replace the return addr with trampoline addr */ 240 - regs->link = (unsigned long)kretprobe_trampoline; 240 + regs->link = (unsigned long)__kretprobe_trampoline; 241 241 } 242 242 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 243 243 ··· 403 403 * - When the probed function returns, this probe 404 404 * causes the handlers to fire 405 405 */ 406 - asm(".global kretprobe_trampoline\n" 407 - ".type kretprobe_trampoline, @function\n" 408 - "kretprobe_trampoline:\n" 406 + asm(".global __kretprobe_trampoline\n" 407 + ".type __kretprobe_trampoline, @function\n" 408 + "__kretprobe_trampoline:\n" 409 409 "nop\n" 410 410 "blr\n" 411 - ".size kretprobe_trampoline, .-kretprobe_trampoline\n"); 411 + ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"); 412 412 413 413 /* 414 414 * Called when the probe at kretprobe trampoline is hit ··· 427 427 * as it is used to determine the return address from the trap. 428 428 * For (2), since nip is not honoured with optprobes, we instead setup 429 429 * the link register properly so that the subsequent 'blr' in 430 - * kretprobe_trampoline jumps back to the right instruction. 430 + * __kretprobe_trampoline jumps back to the right instruction. 431 431 * 432 432 * For nip, we should set the address to the previous instruction since 433 433 * we end up emulating it in kprobe_handler(), which increments the nip ··· 543 543 NOKPROBE_SYMBOL(kprobe_fault_handler); 544 544 545 545 static struct kprobe trampoline_p = { 546 - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 546 + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, 547 547 .pre_handler = trampoline_probe_handler 548 548 }; 549 549 ··· 554 554 555 555 int arch_trampoline_kprobe(struct kprobe *p) 556 556 { 557 - if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 557 + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) 558 558 return 1; 559 559 560 560 return 0;
+1 -1
arch/powerpc/kernel/optprobes.c
··· 56 56 * has a 'nop' instruction, which can be emulated. 57 57 * So further checks can be skipped. 58 58 */ 59 - if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 59 + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) 60 60 return addr + sizeof(kprobe_opcode_t); 61 61 62 62 /*
+1 -1
arch/powerpc/kernel/stacktrace.c
··· 155 155 * Mark stacktraces with kretprobed functions on them 156 156 * as unreliable. 157 157 */ 158 - if (ip == (unsigned long)kretprobe_trampoline) 158 + if (ip == (unsigned long)__kretprobe_trampoline) 159 159 return -EINVAL; 160 160 #endif 161 161
+1 -1
arch/riscv/include/asm/kprobes.h
··· 40 40 int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); 41 41 bool kprobe_breakpoint_handler(struct pt_regs *regs); 42 42 bool kprobe_single_step_handler(struct pt_regs *regs); 43 - void kretprobe_trampoline(void); 43 + void __kretprobe_trampoline(void); 44 44 void __kprobes *trampoline_probe_handler(struct pt_regs *regs); 45 45 46 46 #endif /* CONFIG_KPROBES */
+1 -1
arch/riscv/kernel/probes/kprobes.c
··· 355 355 { 356 356 ri->ret_addr = (kprobe_opcode_t *)regs->ra; 357 357 ri->fp = NULL; 358 - regs->ra = (unsigned long) &kretprobe_trampoline; 358 + regs->ra = (unsigned long) &__kretprobe_trampoline; 359 359 } 360 360 361 361 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+2 -2
arch/riscv/kernel/probes/kprobes_trampoline.S
··· 75 75 REG_L x31, PT_T6(sp) 76 76 .endm 77 77 78 - ENTRY(kretprobe_trampoline) 78 + ENTRY(__kretprobe_trampoline) 79 79 addi sp, sp, -(PT_SIZE_ON_STACK) 80 80 save_all_base_regs 81 81 ··· 90 90 addi sp, sp, PT_SIZE_ON_STACK 91 91 92 92 ret 93 - ENDPROC(kretprobe_trampoline) 93 + ENDPROC(__kretprobe_trampoline)
+1 -1
arch/s390/include/asm/kprobes.h
··· 70 70 }; 71 71 72 72 void arch_remove_kprobe(struct kprobe *p); 73 - void kretprobe_trampoline(void); 73 + void __kretprobe_trampoline(void); 74 74 75 75 int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 76 76 int kprobe_exceptions_notify(struct notifier_block *self,
+5 -5
arch/s390/kernel/kprobes.c
··· 242 242 ri->fp = NULL; 243 243 244 244 /* Replace the return addr with trampoline addr */ 245 - regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 245 + regs->gprs[14] = (unsigned long) &__kretprobe_trampoline; 246 246 } 247 247 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 248 248 ··· 334 334 */ 335 335 static void __used kretprobe_trampoline_holder(void) 336 336 { 337 - asm volatile(".global kretprobe_trampoline\n" 338 - "kretprobe_trampoline: bcr 0,0\n"); 337 + asm volatile(".global __kretprobe_trampoline\n" 338 + "__kretprobe_trampoline: bcr 0,0\n"); 339 339 } 340 340 341 341 /* ··· 509 509 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 510 510 511 511 static struct kprobe trampoline = { 512 - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 512 + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, 513 513 .pre_handler = trampoline_probe_handler 514 514 }; 515 515 ··· 520 520 521 521 int arch_trampoline_kprobe(struct kprobe *p) 522 522 { 523 - return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 523 + return p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline; 524 524 } 525 525 NOKPROBE_SYMBOL(arch_trampoline_kprobe);
+1 -1
arch/s390/kernel/stacktrace.c
··· 46 46 * Mark stacktraces with kretprobed functions on them 47 47 * as unreliable. 48 48 */ 49 - if (state.ip == (unsigned long)kretprobe_trampoline) 49 + if (state.ip == (unsigned long)__kretprobe_trampoline) 50 50 return -EINVAL; 51 51 #endif 52 52
+1 -1
arch/sh/include/asm/kprobes.h
··· 26 26 struct kprobe; 27 27 28 28 void arch_remove_kprobe(struct kprobe *); 29 - void kretprobe_trampoline(void); 29 + void __kretprobe_trampoline(void); 30 30 31 31 /* Architecture specific copy of original instruction*/ 32 32 struct arch_specific_insn {
+5 -5
arch/sh/kernel/kprobes.c
··· 207 207 ri->fp = NULL; 208 208 209 209 /* Replace the return addr with trampoline addr */ 210 - regs->pr = (unsigned long)kretprobe_trampoline; 210 + regs->pr = (unsigned long)__kretprobe_trampoline; 211 211 } 212 212 213 213 static int __kprobes kprobe_handler(struct pt_regs *regs) ··· 293 293 */ 294 294 static void __used kretprobe_trampoline_holder(void) 295 295 { 296 - asm volatile (".globl kretprobe_trampoline\n" 297 - "kretprobe_trampoline:\n\t" 296 + asm volatile (".globl __kretprobe_trampoline\n" 297 + "__kretprobe_trampoline:\n\t" 298 298 "nop\n"); 299 299 } 300 300 301 301 /* 302 - * Called when we hit the probe point at kretprobe_trampoline 302 + * Called when we hit the probe point at __kretprobe_trampoline 303 303 */ 304 304 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 305 305 { ··· 442 442 } 443 443 444 444 static struct kprobe trampoline_p = { 445 - .addr = (kprobe_opcode_t *)&kretprobe_trampoline, 445 + .addr = (kprobe_opcode_t *)&__kretprobe_trampoline, 446 446 .pre_handler = trampoline_probe_handler 447 447 }; 448 448
+1 -1
arch/sparc/include/asm/kprobes.h
··· 24 24 flushi(&(p)->ainsn.insn[1]); \ 25 25 } while (0) 26 26 27 - void kretprobe_trampoline(void); 27 + void __kretprobe_trampoline(void); 28 28 29 29 /* Architecture specific copy of original instruction*/ 30 30 struct arch_specific_insn {
+5 -5
arch/sparc/kernel/kprobes.c
··· 440 440 441 441 /* Replace the return addr with trampoline addr */ 442 442 regs->u_regs[UREG_RETPC] = 443 - ((unsigned long)kretprobe_trampoline) - 8; 443 + ((unsigned long)__kretprobe_trampoline) - 8; 444 444 } 445 445 446 446 /* ··· 465 465 466 466 static void __used kretprobe_trampoline_holder(void) 467 467 { 468 - asm volatile(".global kretprobe_trampoline\n" 469 - "kretprobe_trampoline:\n" 468 + asm volatile(".global __kretprobe_trampoline\n" 469 + "__kretprobe_trampoline:\n" 470 470 "\tnop\n" 471 471 "\tnop\n"); 472 472 } 473 473 static struct kprobe trampoline_p = { 474 - .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 474 + .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, 475 475 .pre_handler = trampoline_probe_handler 476 476 }; 477 477 ··· 482 482 483 483 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 484 484 { 485 - if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 485 + if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) 486 486 return 1; 487 487 488 488 return 0;
+9 -9
arch/x86/kernel/kprobes/core.c
··· 809 809 ri->fp = sara; 810 810 811 811 /* Replace the return addr with trampoline addr */ 812 - *sara = (unsigned long) &kretprobe_trampoline; 812 + *sara = (unsigned long) &__kretprobe_trampoline; 813 813 } 814 814 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 815 815 ··· 1019 1019 */ 1020 1020 asm( 1021 1021 ".text\n" 1022 - ".global kretprobe_trampoline\n" 1023 - ".type kretprobe_trampoline, @function\n" 1024 - "kretprobe_trampoline:\n" 1022 + ".global __kretprobe_trampoline\n" 1023 + ".type __kretprobe_trampoline, @function\n" 1024 + "__kretprobe_trampoline:\n" 1025 1025 /* We don't bother saving the ss register */ 1026 1026 #ifdef CONFIG_X86_64 1027 1027 " pushq %rsp\n" ··· 1045 1045 " popfl\n" 1046 1046 #endif 1047 1047 " ret\n" 1048 - ".size kretprobe_trampoline, .-kretprobe_trampoline\n" 1048 + ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n" 1049 1049 ); 1050 - NOKPROBE_SYMBOL(kretprobe_trampoline); 1051 - STACK_FRAME_NON_STANDARD(kretprobe_trampoline); 1050 + NOKPROBE_SYMBOL(__kretprobe_trampoline); 1051 + STACK_FRAME_NON_STANDARD(__kretprobe_trampoline); 1052 1052 1053 1053 1054 1054 /* 1055 - * Called from kretprobe_trampoline 1055 + * Called from __kretprobe_trampoline 1056 1056 */ 1057 1057 __used __visible void *trampoline_handler(struct pt_regs *regs) 1058 1058 { ··· 1061 1061 #ifdef CONFIG_X86_32 1062 1062 regs->gs = 0; 1063 1063 #endif 1064 - regs->ip = (unsigned long)&kretprobe_trampoline; 1064 + regs->ip = (unsigned long)&__kretprobe_trampoline; 1065 1065 regs->orig_ax = ~0UL; 1066 1066 1067 1067 return (void *)kretprobe_trampoline_handler(regs, &regs->sp);
+2 -2
include/linux/kprobes.h
··· 188 188 struct pt_regs *regs); 189 189 extern int arch_trampoline_kprobe(struct kprobe *p); 190 190 191 - void kretprobe_trampoline(void); 191 + void __kretprobe_trampoline(void); 192 192 /* 193 193 * Since some architecture uses structured function pointer, 194 194 * use dereference_function_descriptor() to get real function address. 195 195 */ 196 196 static nokprobe_inline void *kretprobe_trampoline_addr(void) 197 197 { 198 - return dereference_kernel_function_descriptor(kretprobe_trampoline); 198 + return dereference_kernel_function_descriptor(__kretprobe_trampoline); 199 199 } 200 200 201 201 /* If the trampoline handler called from a kprobe, use this version */
+1 -1
kernel/trace/trace_output.c
··· 349 349 #ifdef CONFIG_KRETPROBES 350 350 static inline const char *kretprobed(const char *name) 351 351 { 352 - static const char tramp_name[] = "kretprobe_trampoline"; 352 + static const char tramp_name[] = "__kretprobe_trampoline"; 353 353 int size = sizeof(tramp_name); 354 354 355 355 if (strncmp(tramp_name, name, size) == 0)