Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, kprobes: generalize and rename notify_page_fault() as kprobe_page_fault()

Architectures which support kprobes have very similar boilerplate around
calling kprobe_fault_handler(). Use a helper function in kprobes.h to
unify them, based on the x86 code.

This changes the behaviour for other architectures when preemption is
enabled. Previously, they would have disabled preemption while calling
the kprobe handler. However, preemption would be disabled if this fault
was due to a kprobe, so we know the fault was not due to a kprobe
handler and can simply return failure.

This behaviour was introduced in commit a980c0ef9f6d ("x86/kprobes:
Refactor kprobes_fault() like kprobe_exceptions_notify()")

[anshuman.khandual@arm.com: export kprobe_fault_handler()]
Link: http://lkml.kernel.org/r/1561133358-8876-1-git-send-email-anshuman.khandual@arm.com
Link: http://lkml.kernel.org/r/1560420444-25737-1-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Anshuman Khandual and committed by
Linus Torvalds
b98cca44 92bae787

+32 -156
+1 -23
arch/arm/mm/fault.c
··· 27 27 28 28 #ifdef CONFIG_MMU 29 29 30 - #ifdef CONFIG_KPROBES 31 - static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) 32 - { 33 - int ret = 0; 34 - 35 - if (!user_mode(regs)) { 36 - /* kprobe_running() needs smp_processor_id() */ 37 - preempt_disable(); 38 - if (kprobe_running() && kprobe_fault_handler(regs, fsr)) 39 - ret = 1; 40 - preempt_enable(); 41 - } 42 - 43 - return ret; 44 - } 45 - #else 46 - static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) 47 - { 48 - return 0; 49 - } 50 - #endif 51 - 52 30 /* 53 31 * This is useful to dump out the page tables associated with 54 32 * 'addr' in mm 'mm'. ··· 243 265 vm_fault_t fault; 244 266 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 245 267 246 - if (notify_page_fault(regs, fsr)) 268 + if (kprobe_page_fault(regs, fsr)) 247 269 return 0; 248 270 249 271 tsk = current;
+1 -23
arch/arm64/mm/fault.c
··· 59 59 return debug_fault_info + DBG_ESR_EVT(esr); 60 60 } 61 61 62 - #ifdef CONFIG_KPROBES 63 - static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) 64 - { 65 - int ret = 0; 66 - 67 - /* kprobe_running() needs smp_processor_id() */ 68 - if (!user_mode(regs)) { 69 - preempt_disable(); 70 - if (kprobe_running() && kprobe_fault_handler(regs, esr)) 71 - ret = 1; 72 - preempt_enable(); 73 - } 74 - 75 - return ret; 76 - } 77 - #else 78 - static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) 79 - { 80 - return 0; 81 - } 82 - #endif 83 - 84 62 static void data_abort_decode(unsigned int esr) 85 63 { 86 64 pr_alert("Data abort info:\n"); ··· 412 434 unsigned long vm_flags = VM_READ | VM_WRITE; 413 435 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 414 436 415 - if (notify_page_fault(regs, esr)) 437 + if (kprobe_page_fault(regs, esr)) 416 438 return 0; 417 439 418 440 /*
+1 -23
arch/ia64/mm/fault.c
··· 21 21 22 22 extern int die(char *, struct pt_regs *, long); 23 23 24 - #ifdef CONFIG_KPROBES 25 - static inline int notify_page_fault(struct pt_regs *regs, int trap) 26 - { 27 - int ret = 0; 28 - 29 - if (!user_mode(regs)) { 30 - /* kprobe_running() needs smp_processor_id() */ 31 - preempt_disable(); 32 - if (kprobe_running() && kprobe_fault_handler(regs, trap)) 33 - ret = 1; 34 - preempt_enable(); 35 - } 36 - 37 - return ret; 38 - } 39 - #else 40 - static inline int notify_page_fault(struct pt_regs *regs, int trap) 41 - { 42 - return 0; 43 - } 44 - #endif 45 - 46 24 /* 47 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment 48 26 * (inside region 5, on ia64) and that page is present. ··· 94 116 /* 95 117 * This is to handle the kprobes on user space access instructions 96 118 */ 97 - if (notify_page_fault(regs, TRAP_BRKPT)) 119 + if (kprobe_page_fault(regs, TRAP_BRKPT)) 98 120 return; 99 121 100 122 if (user_mode(regs))
+1
arch/mips/include/asm/kprobes.h
··· 41 41 #define kretprobe_blacklist_size 0 42 42 43 43 void arch_remove_kprobe(struct kprobe *p); 44 + int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 44 45 45 46 /* Architecture specific copy of original instruction*/ 46 47 struct arch_specific_insn {
+1 -1
arch/mips/kernel/kprobes.c
··· 398 398 return 1; 399 399 } 400 400 401 - static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 401 + int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 402 402 { 403 403 struct kprobe *cur = kprobe_running(); 404 404 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+2 -21
arch/powerpc/mm/fault.c
··· 42 42 #include <asm/debug.h> 43 43 #include <asm/kup.h> 44 44 45 - static inline bool notify_page_fault(struct pt_regs *regs) 46 - { 47 - bool ret = false; 48 - 49 - #ifdef CONFIG_KPROBES 50 - /* kprobe_running() needs smp_processor_id() */ 51 - if (!user_mode(regs)) { 52 - preempt_disable(); 53 - if (kprobe_running() && kprobe_fault_handler(regs, 11)) 54 - ret = true; 55 - preempt_enable(); 56 - } 57 - #endif /* CONFIG_KPROBES */ 58 - 59 - if (unlikely(debugger_fault_handler(regs))) 60 - ret = true; 61 - 62 - return ret; 63 - } 64 - 65 45 /* 66 46 * Check whether the instruction inst is a store using 67 47 * an update addressing form which will update r1. ··· 441 461 int is_write = page_fault_is_write(error_code); 442 462 vm_fault_t fault, major = 0; 443 463 bool must_retry = false; 464 + bool kprobe_fault = kprobe_page_fault(regs, 11); 444 465 445 - if (notify_page_fault(regs)) 466 + if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) 446 467 return 0; 447 468 448 469 if (unlikely(page_fault_is_bad(error_code))) {
+1 -15
arch/s390/mm/fault.c
··· 67 67 } 68 68 early_initcall(fault_init); 69 69 70 - static inline int notify_page_fault(struct pt_regs *regs) 71 - { 72 - int ret = 0; 73 - 74 - /* kprobe_running() needs smp_processor_id() */ 75 - if (kprobes_built_in() && !user_mode(regs)) { 76 - preempt_disable(); 77 - if (kprobe_running() && kprobe_fault_handler(regs, 14)) 78 - ret = 1; 79 - preempt_enable(); 80 - } 81 - return ret; 82 - } 83 - 84 70 /* 85 71 * Find out which address space caused the exception. 86 72 */ ··· 398 412 */ 399 413 clear_pt_regs_flag(regs, PIF_PER_TRAP); 400 414 401 - if (notify_page_fault(regs)) 415 + if (kprobe_page_fault(regs, 14)) 402 416 return 0; 403 417 404 418 mm = tsk->mm;
+2 -16
arch/sh/mm/fault.c
··· 24 24 #include <asm/tlbflush.h> 25 25 #include <asm/traps.h> 26 26 27 - static inline int notify_page_fault(struct pt_regs *regs, int trap) 28 - { 29 - int ret = 0; 30 - 31 - if (kprobes_built_in() && !user_mode(regs)) { 32 - preempt_disable(); 33 - if (kprobe_running() && kprobe_fault_handler(regs, trap)) 34 - ret = 1; 35 - preempt_enable(); 36 - } 37 - 38 - return ret; 39 - } 40 - 41 27 static void 42 28 force_sig_info_fault(int si_signo, int si_code, unsigned long address) 43 29 { ··· 398 412 if (unlikely(fault_in_kernel_space(address))) { 399 413 if (vmalloc_fault(address) >= 0) 400 414 return; 401 - if (notify_page_fault(regs, vec)) 415 + if (kprobe_page_fault(regs, vec)) 402 416 return; 403 417 404 418 bad_area_nosemaphore(regs, error_code, address); 405 419 return; 406 420 } 407 421 408 - if (unlikely(notify_page_fault(regs, vec))) 422 + if (unlikely(kprobe_page_fault(regs, vec))) 409 423 return; 410 424 411 425 /* Only enable interrupts if they were on before the fault */
+1 -15
arch/sparc/mm/fault_64.c
··· 38 38 39 39 int show_unhandled_signals = 1; 40 40 41 - static inline __kprobes int notify_page_fault(struct pt_regs *regs) 42 - { 43 - int ret = 0; 44 - 45 - /* kprobe_running() needs smp_processor_id() */ 46 - if (kprobes_built_in() && !user_mode(regs)) { 47 - preempt_disable(); 48 - if (kprobe_running() && kprobe_fault_handler(regs, 0)) 49 - ret = 1; 50 - preempt_enable(); 51 - } 52 - return ret; 53 - } 54 - 55 41 static void __kprobes unhandled_fault(unsigned long address, 56 42 struct task_struct *tsk, 57 43 struct pt_regs *regs) ··· 271 285 272 286 fault_code = get_thread_fault_code(); 273 287 274 - if (notify_page_fault(regs)) 288 + if (kprobe_page_fault(regs, 0)) 275 289 goto exit_exception; 276 290 277 291 si_code = SEGV_MAPERR;
+2 -19
arch/x86/mm/fault.c
··· 46 46 return 0; 47 47 } 48 48 49 - static nokprobe_inline int kprobes_fault(struct pt_regs *regs) 50 - { 51 - if (!kprobes_built_in()) 52 - return 0; 53 - if (user_mode(regs)) 54 - return 0; 55 - /* 56 - * To be potentially processing a kprobe fault and to be allowed to call 57 - * kprobe_running(), we have to be non-preemptible. 58 - */ 59 - if (preemptible()) 60 - return 0; 61 - if (!kprobe_running()) 62 - return 0; 63 - return kprobe_fault_handler(regs, X86_TRAP_PF); 64 - } 65 - 66 49 /* 67 50 * Prefetch quirks: 68 51 * ··· 1265 1282 return; 1266 1283 1267 1284 /* kprobes don't want to hook the spurious faults: */ 1268 - if (kprobes_fault(regs)) 1285 + if (kprobe_page_fault(regs, X86_TRAP_PF)) 1269 1286 return; 1270 1287 1271 1288 /* ··· 1296 1313 mm = tsk->mm; 1297 1314 1298 1315 /* kprobes don't want to hook the spurious faults: */ 1299 - if (unlikely(kprobes_fault(regs))) 1316 + if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF))) 1300 1317 return; 1301 1318 1302 1319 /*
+19
include/linux/kprobes.h
··· 458 458 } 459 459 #endif 460 460 461 + /* Returns true if kprobes handled the fault */ 462 + static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, 463 + unsigned int trap) 464 + { 465 + if (!kprobes_built_in()) 466 + return false; 467 + if (user_mode(regs)) 468 + return false; 469 + /* 470 + * To be potentially processing a kprobe fault and to be allowed 471 + * to call kprobe_running(), we have to be non-preemptible. 472 + */ 473 + if (preemptible()) 474 + return false; 475 + if (!kprobe_running()) 476 + return false; 477 + return kprobe_fault_handler(regs, trap); 478 + } 479 + 461 480 #endif /* _LINUX_KPROBES_H */