kprobes: Propagate error from arm_kprobe_ftrace()

Improve error handling when arming ftrace-based kprobes. Specifically, if
we fail to arm a ftrace-based kprobe, register_kprobe()/enable_kprobe()
should report an error instead of success. Previously, this has lead to
confusing situations where register_kprobe() would return 0 indicating
success, but the kprobe would not be functional if ftrace registration
during the kprobe arming process had failed. We should therefore take any
errors returned by ftrace into account and propagate this error so that we
do not register/enable kprobes that cannot be armed. This can happen if,
for example, register_ftrace_function() finds an IPMODIFY conflict (since
kprobe_ftrace_ops has this flag set) and returns an error. Such a conflict
is possible since livepatches also set the IPMODIFY flag for their ftrace_ops.

arm_all_kprobes() keeps its current behavior and attempts to arm all
kprobes. It returns the last encountered error and gives a warning if
not all probes could be armed.

This patch is based on Petr Mladek's original patchset (patches 2 and 3)
back in 2015, which improved kprobes error handling, found here:

https://lkml.org/lkml/2015/2/26/452

However, further work on this had been paused since then and the patches
were not upstreamed.

Based-on-patches-by: Petr Mladek <pmladek@suse.com>
Signed-off-by: Jessica Yu <jeyu@kernel.org>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Joe Lawrence <joe.lawrence@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: live-patching@vger.kernel.org
Link: http://lkml.kernel.org/r/20180109235124.30886-2-jeyu@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Jessica Yu and committed by Ingo Molnar 12310e34 3f9e6463

+76 -26
+76 -26
kernel/kprobes.c
··· 978 } 979 980 /* Caller must lock kprobe_mutex */ 981 - static void arm_kprobe_ftrace(struct kprobe *p) 982 { 983 - int ret; 984 985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 986 (unsigned long)p->addr, 0, 0); 987 - WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 988 - kprobe_ftrace_enabled++; 989 - if (kprobe_ftrace_enabled == 1) { 990 - ret = register_ftrace_function(&kprobe_ftrace_ops); 991 - WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 992 } 993 } 994 995 /* Caller must lock kprobe_mutex */ ··· 1026 } 1027 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1028 #define prepare_kprobe(p) arch_prepare_kprobe(p) 1029 - #define arm_kprobe_ftrace(p) do {} while (0) 1030 #define disarm_kprobe_ftrace(p) do {} while (0) 1031 #endif 1032 1033 /* Arm a kprobe with text_mutex */ 1034 - static void arm_kprobe(struct kprobe *kp) 1035 { 1036 - if (unlikely(kprobe_ftrace(kp))) { 1037 - arm_kprobe_ftrace(kp); 1038 - return; 1039 - } 1040 cpus_read_lock(); 1041 mutex_lock(&text_mutex); 1042 __arm_kprobe(kp); 1043 mutex_unlock(&text_mutex); 1044 cpus_read_unlock(); 1045 } 1046 1047 /* Disarm a kprobe with text_mutex */ ··· 1381 1382 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1383 ap->flags &= ~KPROBE_FLAG_DISABLED; 1384 - if (!kprobes_all_disarmed) 1385 /* Arm the breakpoint again. */ 1386 - arm_kprobe(ap); 1387 } 1388 return ret; 1389 } ··· 1598 hlist_add_head_rcu(&p->hlist, 1599 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1600 1601 - if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1602 - arm_kprobe(p); 1603 1604 /* Try to optimize kprobe */ 1605 try_to_optimize_kprobe(p); ··· 2147 2148 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2149 p->flags &= ~KPROBE_FLAG_DISABLED; 2150 - arm_kprobe(p); 2151 } 2152 out: 2153 mutex_unlock(&kprobe_mutex); ··· 2440 .release = seq_release, 2441 }; 2442 2443 - static void arm_all_kprobes(void) 2444 { 2445 struct hlist_head *head; 2446 struct kprobe *p; 2447 - unsigned int i; 2448 2449 mutex_lock(&kprobe_mutex); 2450 ··· 2462 /* Arming kprobes doesn't optimize kprobe itself */ 2463 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2464 head = &kprobe_table[i]; 2465 - hlist_for_each_entry_rcu(p, head, hlist) 2466 - if (!kprobe_disabled(p)) 2467 - arm_kprobe(p); 2468 } 2469 2470 - printk(KERN_INFO "Kprobes globally enabled\n"); 2471 2472 already_enabled: 2473 mutex_unlock(&kprobe_mutex); 2474 - return; 2475 } 2476 2477 static void disarm_all_kprobes(void) ··· 2540 { 2541 char buf[32]; 2542 size_t buf_size; 2543 2544 buf_size = min(count, (sizeof(buf)-1)); 2545 if (copy_from_user(buf, user_buf, buf_size)) ··· 2551 case 'y': 2552 case 'Y': 2553 case '1': 2554 - arm_all_kprobes(); 2555 break; 2556 case 'n': 2557 case 'N': ··· 2561 default: 2562 return -EINVAL; 2563 } 2564 2565 return count; 2566 }
··· 978 } 979 980 /* Caller must lock kprobe_mutex */ 981 + static int arm_kprobe_ftrace(struct kprobe *p) 982 { 983 + int ret = 0; 984 985 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 986 (unsigned long)p->addr, 0, 0); 987 + if (ret) { 988 + pr_debug("Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 989 + return ret; 990 } 991 + 992 + if (kprobe_ftrace_enabled == 0) { 993 + ret = register_ftrace_function(&kprobe_ftrace_ops); 994 + if (ret) { 995 + pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); 996 + goto err_ftrace; 997 + } 998 + } 999 + 1000 + kprobe_ftrace_enabled++; 1001 + return ret; 1002 + 1003 + err_ftrace: 1004 + /* 1005 + * Note: Since kprobe_ftrace_ops has IPMODIFY set, and ftrace requires a 1006 + * non-empty filter_hash for IPMODIFY ops, we're safe from an accidental 1007 + * empty filter_hash which would undesirably trace all functions. 1008 + */ 1009 + ftrace_set_filter_ip(&kprobe_ftrace_ops, (unsigned long)p->addr, 1, 0); 1010 + return ret; 1011 } 1012 1013 /* Caller must lock kprobe_mutex */ ··· 1008 } 1009 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1010 #define prepare_kprobe(p) arch_prepare_kprobe(p) 1011 + #define arm_kprobe_ftrace(p) (-ENODEV) 1012 #define disarm_kprobe_ftrace(p) do {} while (0) 1013 #endif 1014 1015 /* Arm a kprobe with text_mutex */ 1016 + static int arm_kprobe(struct kprobe *kp) 1017 { 1018 + if (unlikely(kprobe_ftrace(kp))) 1019 + return arm_kprobe_ftrace(kp); 1020 + 1021 cpus_read_lock(); 1022 mutex_lock(&text_mutex); 1023 __arm_kprobe(kp); 1024 mutex_unlock(&text_mutex); 1025 cpus_read_unlock(); 1026 + 1027 + return 0; 1028 } 1029 1030 /* Disarm a kprobe with text_mutex */ ··· 1362 1363 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1364 ap->flags &= ~KPROBE_FLAG_DISABLED; 1365 + if (!kprobes_all_disarmed) { 1366 /* Arm the breakpoint again. */ 1367 + ret = arm_kprobe(ap); 1368 + if (ret) { 1369 + ap->flags |= KPROBE_FLAG_DISABLED; 1370 + list_del_rcu(&p->list); 1371 + synchronize_sched(); 1372 + } 1373 + } 1374 } 1375 return ret; 1376 } ··· 1573 hlist_add_head_rcu(&p->hlist, 1574 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1575 1576 + if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1577 + ret = arm_kprobe(p); 1578 + if (ret) { 1579 + hlist_del_rcu(&p->hlist); 1580 + synchronize_sched(); 1581 + goto out; 1582 + } 1583 + } 1584 1585 /* Try to optimize kprobe */ 1586 try_to_optimize_kprobe(p); ··· 2116 2117 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2118 p->flags &= ~KPROBE_FLAG_DISABLED; 2119 + ret = arm_kprobe(p); 2120 + if (ret) 2121 + p->flags |= KPROBE_FLAG_DISABLED; 2122 } 2123 out: 2124 mutex_unlock(&kprobe_mutex); ··· 2407 .release = seq_release, 2408 }; 2409 2410 + static int arm_all_kprobes(void) 2411 { 2412 struct hlist_head *head; 2413 struct kprobe *p; 2414 + unsigned int i, total = 0, errors = 0; 2415 + int err, ret = 0; 2416 2417 mutex_lock(&kprobe_mutex); 2418 ··· 2428 /* Arming kprobes doesn't optimize kprobe itself */ 2429 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2430 head = &kprobe_table[i]; 2431 + /* Arm all kprobes on a best-effort basis */ 2432 + hlist_for_each_entry_rcu(p, head, hlist) { 2433 + if (!kprobe_disabled(p)) { 2434 + err = arm_kprobe(p); 2435 + if (err) { 2436 + errors++; 2437 + ret = err; 2438 + } 2439 + total++; 2440 + } 2441 + } 2442 } 2443 2444 + if (errors) 2445 + pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", 2446 + errors, total); 2447 + else 2448 + pr_info("Kprobes globally enabled\n"); 2449 2450 already_enabled: 2451 mutex_unlock(&kprobe_mutex); 2452 + return ret; 2453 } 2454 2455 static void disarm_all_kprobes(void) ··· 2494 { 2495 char buf[32]; 2496 size_t buf_size; 2497 + int ret = 0; 2498 2499 buf_size = min(count, (sizeof(buf)-1)); 2500 if (copy_from_user(buf, user_buf, buf_size)) ··· 2504 case 'y': 2505 case 'Y': 2506 case '1': 2507 + ret = arm_all_kprobes(); 2508 break; 2509 case 'n': 2510 case 'N': ··· 2514 default: 2515 return -EINVAL; 2516 } 2517 + 2518 + if (ret) 2519 + return ret; 2520 2521 return count; 2522 }