Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kprobes: support per-kprobe disabling

Add disable_kprobe() and enable_kprobe() to disable/enable kprobes
temporarily.

disable_kprobe() asynchronously disables probe handlers of specified
kprobe. So, after calling it, some handlers can be called at a while.
enable_kprobe() enables specified kprobe.

aggr_pre_handler and aggr_post_handler check disabled probes. On the
other hand aggr_break_handler and aggr_fault_handler don't check it
because these handlers will be called while executing pre or post handlers
and usually those help error handling.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Masami Hiramatsu and committed by
Linus Torvalds
de5bd88d e579abeb

+193 -35
+28 -6
Documentation/kprobes.txt
··· 212 212 is single-stepped, Kprobe calls kp->post_handler. If a fault 213 213 occurs during execution of kp->pre_handler or kp->post_handler, 214 214 or during single-stepping of the probed instruction, Kprobes calls 215 - kp->fault_handler. Any or all handlers can be NULL. 215 + kp->fault_handler. Any or all handlers can be NULL. If kp->flags 216 + is set KPROBE_FLAG_DISABLED, that kp will be registered but disabled, 217 + so, it's handlers aren't hit until calling enable_kprobe(kp). 216 218 217 219 NOTE: 218 220 1. With the introduction of the "symbol_name" field to struct kprobe, ··· 365 363 incorrect probes. However, other probes in the array are 366 364 unregistered correctly. 367 365 366 + 4.7 disable_kprobe 367 + 368 + #include <linux/kprobes.h> 369 + int disable_kprobe(struct kprobe *kp); 370 + 371 + Temporarily disables the specified kprobe. You can enable it again by using 372 + enable_kprobe(). You must specify the kprobe which has been registered. 373 + 374 + 4.8 enable_kprobe 375 + 376 + #include <linux/kprobes.h> 377 + int enable_kprobe(struct kprobe *kp); 378 + 379 + Enables kprobe which has been disabled by disable_kprobe(). You must specify 380 + the kprobe which has been registered. 381 + 368 382 5. Kprobes Features and Limitations 369 383 370 384 Kprobes allows multiple probes at the same address. Currently, ··· 518 500 is also specified. Following columns show probe status. If the probe is on 519 501 a virtual address that is no longer valid (module init sections, module 520 502 virtual addresses that correspond to modules that've been unloaded), 521 - such probes are marked with [GONE]. 503 + such probes are marked with [GONE]. If the probe is temporarily disabled, 504 + such probes are marked with [DISABLED]. 522 505 523 - /debug/kprobes/enabled: Turn kprobes ON/OFF 506 + /debug/kprobes/enabled: Turn kprobes ON/OFF forcibly. 524 507 525 - Provides a knob to globally turn registered kprobes ON or OFF. By default, 526 - all kprobes are enabled. By echoing "0" to this file, all registered probes 527 - will be disarmed, till such time a "1" is echoed to this file. 508 + Provides a knob to globally and forcibly turn registered kprobes ON or OFF. 509 + By default, all kprobes are enabled. By echoing "0" to this file, all 510 + registered probes will be disarmed, till such time a "1" is echoed to this 511 + file. Note that this knob just disarms and arms all kprobes and doesn't 512 + change each probe's disabling state. This means that disabled kprobes (marked 513 + [DISABLED]) will be not enabled if you turn ON all kprobes by this knob.
+22 -1
include/linux/kprobes.h
··· 112 112 /* copy of the original instruction */ 113 113 struct arch_specific_insn ainsn; 114 114 115 - /* Indicates various status flags. Protected by kprobe_mutex. */ 115 + /* 116 + * Indicates various status flags. 117 + * Protected by kprobe_mutex after this kprobe is registered. 118 + */ 116 119 u32 flags; 117 120 }; 118 121 119 122 /* Kprobe status flags */ 120 123 #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ 124 + #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ 121 125 126 + /* Has this kprobe gone ? */ 122 127 static inline int kprobe_gone(struct kprobe *p) 123 128 { 124 129 return p->flags & KPROBE_FLAG_GONE; 125 130 } 126 131 132 + /* Is this kprobe disabled ? */ 133 + static inline int kprobe_disabled(struct kprobe *p) 134 + { 135 + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); 136 + } 127 137 /* 128 138 * Special probe type that uses setjmp-longjmp type tricks to resume 129 139 * execution at a specified entry with a matching prototype corresponding ··· 293 283 void kprobe_flush_task(struct task_struct *tk); 294 284 void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 295 285 286 + int disable_kprobe(struct kprobe *kp); 287 + int enable_kprobe(struct kprobe *kp); 288 + 296 289 #else /* !CONFIG_KPROBES: */ 297 290 298 291 static inline int kprobes_built_in(void) ··· 361 348 } 362 349 static inline void kprobe_flush_task(struct task_struct *tk) 363 350 { 351 + } 352 + static inline int disable_kprobe(struct kprobe *kp) 353 + { 354 + return -ENOSYS; 355 + } 356 + static inline int enable_kprobe(struct kprobe *kp) 357 + { 358 + return -ENOSYS; 364 359 } 365 360 #endif /* CONFIG_KPROBES */ 366 361 #endif /* _LINUX_KPROBES_H */
+143 -28
kernel/kprobes.c
··· 328 328 struct kprobe *kp; 329 329 330 330 list_for_each_entry_rcu(kp, &p->list, list) { 331 - if (kp->pre_handler && !kprobe_gone(kp)) { 331 + if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 332 332 set_kprobe_instance(kp); 333 333 if (kp->pre_handler(kp, regs)) 334 334 return 1; ··· 344 344 struct kprobe *kp; 345 345 346 346 list_for_each_entry_rcu(kp, &p->list, list) { 347 - if (kp->post_handler && !kprobe_gone(kp)) { 347 + if (kp->post_handler && likely(!kprobe_disabled(kp))) { 348 348 set_kprobe_instance(kp); 349 349 kp->post_handler(kp, regs, flags); 350 350 reset_kprobe_instance(); ··· 523 523 */ 524 524 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 525 525 { 526 + BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 526 527 if (p->break_handler) { 527 528 if (ap->break_handler) 528 529 return -EEXIST; ··· 533 532 list_add_rcu(&p->list, &ap->list); 534 533 if (p->post_handler && !ap->post_handler) 535 534 ap->post_handler = aggr_post_handler; 535 + 536 + if (kprobe_disabled(ap) && !kprobe_disabled(p)) { 537 + ap->flags &= ~KPROBE_FLAG_DISABLED; 538 + if (!kprobes_all_disarmed) 539 + /* Arm the breakpoint again. */ 540 + arch_arm_kprobe(ap); 541 + } 536 542 return 0; 537 543 } 538 544 ··· 600 592 * freed by unregister_kprobe. 601 593 */ 602 594 return ret; 603 - /* Clear gone flag to prevent allocating new slot again. */ 604 - ap->flags &= ~KPROBE_FLAG_GONE; 595 + 605 596 /* 606 - * If the old_p has gone, its breakpoint has been disarmed. 607 - * We have to arm it again after preparing real kprobes. 597 + * Clear gone flag to prevent allocating new slot again, and 598 + * set disabled flag because it is not armed yet. 608 599 */ 609 - if (!kprobes_all_disarmed) 610 - arch_arm_kprobe(ap); 600 + ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 601 + | KPROBE_FLAG_DISABLED; 611 602 } 612 603 613 604 copy_kprobe(ap, p); 614 605 return add_new_kprobe(ap, p); 606 + } 607 + 608 + /* Try to disable aggr_kprobe, and return 1 if succeeded.*/ 609 + static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p) 610 + { 611 + struct kprobe *kp; 612 + 613 + list_for_each_entry_rcu(kp, &p->list, list) { 614 + if (!kprobe_disabled(kp)) 615 + /* 616 + * There is an active probe on the list. 617 + * We can't disable aggr_kprobe. 618 + */ 619 + return 0; 620 + } 621 + p->flags |= KPROBE_FLAG_DISABLED; 622 + return 1; 615 623 } 616 624 617 625 static int __kprobes in_kprobes_functions(unsigned long addr) ··· 688 664 return -EINVAL; 689 665 } 690 666 691 - p->flags = 0; 667 + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 668 + p->flags &= KPROBE_FLAG_DISABLED; 669 + 692 670 /* 693 671 * Check if are we probing a module. 694 672 */ ··· 735 709 hlist_add_head_rcu(&p->hlist, 736 710 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 737 711 738 - if (!kprobes_all_disarmed) 712 + if (!kprobes_all_disarmed && !kprobe_disabled(p)) 739 713 arch_arm_kprobe(p); 740 714 741 715 out_unlock_text: ··· 750 724 } 751 725 EXPORT_SYMBOL_GPL(register_kprobe); 752 726 727 + /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 728 + static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 729 + { 730 + struct kprobe *old_p, *list_p; 731 + 732 + old_p = get_kprobe(p->addr); 733 + if (unlikely(!old_p)) 734 + return NULL; 735 + 736 + if (p != old_p) { 737 + list_for_each_entry_rcu(list_p, &old_p->list, list) 738 + if (list_p == p) 739 + /* kprobe p is a valid probe */ 740 + goto valid; 741 + return NULL; 742 + } 743 + valid: 744 + return old_p; 745 + } 746 + 753 747 /* 754 748 * Unregister a kprobe without a scheduler synchronization. 755 749 */ ··· 777 731 { 778 732 struct kprobe *old_p, *list_p; 779 733 780 - old_p = get_kprobe(p->addr); 781 - if (unlikely(!old_p)) 734 + old_p = __get_valid_kprobe(p); 735 + if (old_p == NULL) 782 736 return -EINVAL; 783 737 784 - if (p != old_p) { 785 - list_for_each_entry_rcu(list_p, &old_p->list, list) 786 - if (list_p == p) 787 - /* kprobe p is a valid probe */ 788 - goto valid_p; 789 - return -EINVAL; 790 - } 791 - valid_p: 792 738 if (old_p == p || 793 739 (old_p->pre_handler == aggr_pre_handler && 794 740 list_is_singular(&old_p->list))) { ··· 789 751 * enabled and not gone - otherwise, the breakpoint would 790 752 * already have been removed. We save on flushing icache. 791 753 */ 792 - if (!kprobes_all_disarmed && !kprobe_gone(old_p)) { 754 + if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { 793 755 mutex_lock(&text_mutex); 794 756 arch_disarm_kprobe(p); 795 757 mutex_unlock(&text_mutex); ··· 807 769 } 808 770 noclean: 809 771 list_del_rcu(&p->list); 772 + if (!kprobe_disabled(old_p)) { 773 + try_to_disable_aggr_kprobe(old_p); 774 + if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 775 + arch_disarm_kprobe(old_p); 776 + } 810 777 } 811 778 return 0; 812 779 } ··· 1121 1078 static void __kprobes kill_kprobe(struct kprobe *p) 1122 1079 { 1123 1080 struct kprobe *kp; 1081 + 1124 1082 p->flags |= KPROBE_FLAG_GONE; 1125 1083 if (p->pre_handler == aggr_pre_handler) { 1126 1084 /* ··· 1263 1219 else 1264 1220 kprobe_type = "k"; 1265 1221 if (sym) 1266 - seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type, 1267 - sym, offset, (modname ? modname : " "), 1268 - (kprobe_gone(p) ? "[GONE]" : "")); 1222 + seq_printf(pi, "%p %s %s+0x%x %s %s%s\n", 1223 + p->addr, kprobe_type, sym, offset, 1224 + (modname ? modname : " "), 1225 + (kprobe_gone(p) ? "[GONE]" : ""), 1226 + ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1227 + "[DISABLED]" : "")); 1269 1228 else 1270 - seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr, 1271 - (kprobe_gone(p) ? "[GONE]" : "")); 1229 + seq_printf(pi, "%p %s %p %s%s\n", 1230 + p->addr, kprobe_type, p->addr, 1231 + (kprobe_gone(p) ? "[GONE]" : ""), 1232 + ((kprobe_disabled(p) && !kprobe_gone(p)) ? 1233 + "[DISABLED]" : "")); 1272 1234 } 1273 1235 1274 1236 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) ··· 1339 1289 .release = seq_release, 1340 1290 }; 1341 1291 1292 + /* Disable one kprobe */ 1293 + int __kprobes disable_kprobe(struct kprobe *kp) 1294 + { 1295 + int ret = 0; 1296 + struct kprobe *p; 1297 + 1298 + mutex_lock(&kprobe_mutex); 1299 + 1300 + /* Check whether specified probe is valid. */ 1301 + p = __get_valid_kprobe(kp); 1302 + if (unlikely(p == NULL)) { 1303 + ret = -EINVAL; 1304 + goto out; 1305 + } 1306 + 1307 + /* If the probe is already disabled (or gone), just return */ 1308 + if (kprobe_disabled(kp)) 1309 + goto out; 1310 + 1311 + kp->flags |= KPROBE_FLAG_DISABLED; 1312 + if (p != kp) 1313 + /* When kp != p, p is always enabled. */ 1314 + try_to_disable_aggr_kprobe(p); 1315 + 1316 + if (!kprobes_all_disarmed && kprobe_disabled(p)) 1317 + arch_disarm_kprobe(p); 1318 + out: 1319 + mutex_unlock(&kprobe_mutex); 1320 + return ret; 1321 + } 1322 + EXPORT_SYMBOL_GPL(disable_kprobe); 1323 + 1324 + /* Enable one kprobe */ 1325 + int __kprobes enable_kprobe(struct kprobe *kp) 1326 + { 1327 + int ret = 0; 1328 + struct kprobe *p; 1329 + 1330 + mutex_lock(&kprobe_mutex); 1331 + 1332 + /* Check whether specified probe is valid. */ 1333 + p = __get_valid_kprobe(kp); 1334 + if (unlikely(p == NULL)) { 1335 + ret = -EINVAL; 1336 + goto out; 1337 + } 1338 + 1339 + if (kprobe_gone(kp)) { 1340 + /* This kprobe has gone, we couldn't enable it. */ 1341 + ret = -EINVAL; 1342 + goto out; 1343 + } 1344 + 1345 + if (!kprobes_all_disarmed && kprobe_disabled(p)) 1346 + arch_arm_kprobe(p); 1347 + 1348 + p->flags &= ~KPROBE_FLAG_DISABLED; 1349 + if (p != kp) 1350 + kp->flags &= ~KPROBE_FLAG_DISABLED; 1351 + out: 1352 + mutex_unlock(&kprobe_mutex); 1353 + return ret; 1354 + } 1355 + EXPORT_SYMBOL_GPL(enable_kprobe); 1356 + 1342 1357 static void __kprobes arm_all_kprobes(void) 1343 1358 { 1344 1359 struct hlist_head *head; ··· 1421 1306 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1422 1307 head = &kprobe_table[i]; 1423 1308 hlist_for_each_entry_rcu(p, node, head, hlist) 1424 - if (!kprobe_gone(p)) 1309 + if (!kprobe_disabled(p)) 1425 1310 arch_arm_kprobe(p); 1426 1311 } 1427 1312 mutex_unlock(&text_mutex); ··· 1453 1338 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1454 1339 head = &kprobe_table[i]; 1455 1340 hlist_for_each_entry_rcu(p, node, head, hlist) { 1456 - if (!arch_trampoline_kprobe(p) && !kprobe_gone(p)) 1341 + if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 1457 1342 arch_disarm_kprobe(p); 1458 1343 } 1459 1344 }