kprobes: Propagate error from arm_kprobe_ftrace()

Improve error handling when arming ftrace-based kprobes. Specifically, if
we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe()
should report an error instead of success. Previously, this has lead to
confusing situations where register_kprobe() would return 0 indicating
success, but the kprobe would not be functional if ftrace registration
during the kprobe arming process had failed. We should therefore take any
errors returned by ftrace into account and propagate this error so that we
do not register/enable kprobes that cannot be armed. This can happen if,
for example, register_ftrace_function() finds an IPMODIFY conflict (since
kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict
is possible since livepatches also set the IPMODIFY flag for their ftrace_ops.

arm_all_kprobes() keeps its current behavior and attempts to arm all
kprobes. It returns the last encountered error and gives a warning if
not all probes could be armed.

This patch is based on Petr Mladek's original patchset (patches 2 and 3)
back in 2015, which improved kprobes error handling, found here:

https://lkml.org/lkml/2015/2/26/452

However, further work on this had been paused since then and the patches
were not upstreamed.

Based-on-patches-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Jessica Yu <jeyu@kernel.org>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Joe Lawrence <joe.lawrence@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: live-patching@vger.kernel.org
Link: http://lkml.kernel.org/r/20180109235124.30886-2-jeyu@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Jessica Yu and committed by Ingo Molnar 12310e34 3f9e6463

+76 -26
+76 -26
kernel/kprobes.c
··· 978 978 } 979 979 980 980 /* Caller must lock kprobe_mutex */ 981 - static void arm_kprobe_ftrace(struct kprobe *p) 981 + static int arm_kprobe_ftrace(struct kprobe *p) 982 982 { 983 - int ret; 983 + int ret = 0; 984 984 985 985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 986 986 (unsigned long)p->addr, 0, 0); 987 - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 988 - kprobe_ftrace_enabled++; 989 - if (kprobe_ftrace_enabled == 1) { 990 - ret = register_ftrace_function(&kprobe_ftrace_ops); 991 - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 987 + if (ret) { 988 + pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 989 + return ret; 992 990 } 991 + 992 + if (kprobe_ftrace_enabled == 0) { 993 + ret = register_ftrace_function(&kprobe_ftrace_ops); 994 + if (ret) { 995 + pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); 996 + goto err_ftrace; 997 + } 998 + } 999 + 1000 + kprobe_ftrace_enabled++; 1001 + return ret; 1002 + 1003 + err_ftrace: 1004 + /* 1005 + * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a 1006 + * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental 1007 + * empty filter_hash which would undesirably trace all functions. 1008 + */ 1009 + ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); 1010 + return ret; 993 1011 } 994 1012 995 1013 /* Caller must lock kprobe_mutex */ ··· 1026 1008 } 1027 1009 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1028 1010 #define prepare_kprobe(p) arch_prepare_kprobe(p) 1029 - #define arm_kprobe_ftrace(p) do {} while (0) 1011 + #define arm_kprobe_ftrace(p) (-ENODEV) 1030 1012 #define disarm_kprobe_ftrace(p) do {} while (0) 1031 1013 #endif 1032 1014 1033 1015 /* Arm a kprobe with text_mutex */ 1034 - static void arm_kprobe(struct kprobe *kp) 1016 + static int arm_kprobe(struct kprobe *kp) 1035 1017 { 1036 - if (unlikely(kprobe_ftrace(kp))) { 1037 - arm_kprobe_ftrace(kp); 1038 - return; 1039 - } 1018 + if (unlikely(kprobe_ftrace(kp))) 1019 + return arm_kprobe_ftrace(kp); 1020 + 1040 1021 cpus_read_lock(); 1041 1022 mutex_lock(&text_mutex); 1042 1023 __arm_kprobe(kp); 1043 1024 mutex_unlock(&text_mutex); 1044 1025 cpus_read_unlock(); 1026 + 1027 + return 0; 1045 1028 } 1046 1029 1047 1030 /* Disarm a kprobe with text_mutex */ ··· 1381 1362 1382 1363 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1383 1364 ap->flags &= ~KPROBE_FLAG_DISABLED; 1384 - if (!kprobes_all_disarmed) 1365 + if (!kprobes_all_disarmed) { 1385 1366 /* Arm the breakpoint again. */ 1386 - arm_kprobe(ap); 1367 + ret = arm_kprobe(ap); 1368 + if (ret) { 1369 + ap->flags |= KPROBE_FLAG_DISABLED; 1370 + list_del_rcu(&p->list); 1371 + synchronize_sched(); 1372 + } 1373 + } 1387 1374 } 1388 1375 return ret; 1389 1376 } ··· 1598 1573 hlist_add_head_rcu(&p->hlist, 1599 1574 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1600 1575 1601 - if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1602 - arm_kprobe(p); 1576 + if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1577 + ret = arm_kprobe(p); 1578 + if (ret) { 1579 + hlist_del_rcu(&p->hlist); 1580 + synchronize_sched(); 1581 + goto out; 1582 + } 1583 + } 1603 1584 1604 1585 /* Try to optimize kprobe */ 1605 1586 try_to_optimize_kprobe(p); ··· 2147 2116 2148 2117 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2149 2118 p->flags &= ~KPROBE_FLAG_DISABLED; 2150 - arm_kprobe(p); 2119 + ret = arm_kprobe(p); 2120 + if (ret) 2121 + p->flags |= KPROBE_FLAG_DISABLED; 2151 2122 } 2152 2123 out: 2153 2124 mutex_unlock(&kprobe_mutex); ··· 2440 2407 .release = seq_release, 2441 2408 }; 2442 2409 2443 - static void arm_all_kprobes(void) 2410 + static int arm_all_kprobes(void) 2444 2411 { 2445 2412 struct hlist_head *head; 2446 2413 struct kprobe *p; 2447 - unsigned int i; 2414 + unsigned int i, total = 0, errors = 0; 2415 + int err, ret = 0; 2448 2416 2449 2417 mutex_lock(&kprobe_mutex); 2450 2418 ··· 2462 2428 /* Arming kprobes doesn't optimize kprobe itself */ 2463 2429 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2464 2430 head = &kprobe_table[i]; 2465 - hlist_for_each_entry_rcu(p, head, hlist) 2466 - if (!kprobe_disabled(p)) 2467 - arm_kprobe(p); 2431 + /* Arm all kprobes on a best-effort basis */ 2432 + hlist_for_each_entry_rcu(p, head, hlist) { 2433 + if (!kprobe_disabled(p)) { 2434 + err = arm_kprobe(p); 2435 + if (err) { 2436 + errors++; 2437 + ret = err; 2438 + } 2439 + total++; 2440 + } 2441 + } 2468 2442 } 2469 2443 2470 - printk(KERN_INFO "Kprobes globally enabled\n"); 2444 + if (errors) 2445 + pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", 2446 + errors, total); 2447 + else 2448 + pr_info("Kprobes globally enabled\n"); 2471 2449 2472 2450 already_enabled: 2473 2451 mutex_unlock(&kprobe_mutex); 2474 - return; 2452 + return ret; 2475 2453 } 2476 2454 2477 2455 static void disarm_all_kprobes(void) ··· 2540 2494 { 2541 2495 char buf[32]; 2542 2496 size_t buf_size; 2497 + int ret = 0; 2543 2498 2544 2499 buf_size = min(count, (sizeof(buf)-1)); 2545 2500 if (copy_from_user(buf, user_buf, buf_size)) ··· 2551 2504 case 'y': 2552 2505 case 'Y': 2553 2506 case '1': 2554 - arm_all_kprobes(); 2507 + ret = arm_all_kprobes(); 2555 2508 break; 2556 2509 case 'n': 2557 2510 case 'N': ··· 2561 2514 default: 2562 2515 return -EINVAL; 2563 2516 } 2517 + 2518 + if (ret) 2519 + return ret; 2564 2520 2565 2521 return count; 2566 2522 }