Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ring-buffer: use generic version of in_nmi

Impact: clean up

Now that a generic in_nmi is available, this patch removes the
special code in the ring_buffer and implements the in_nmi generic
version instead.

With this change, I was also able to rename the "arch_ftrace_nmi_enter"
back to "ftrace_nmi_enter" and remove the code from the ring buffer.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>

+15 -40
+2 -2
arch/x86/kernel/ftrace.c
··· 113 113 MCOUNT_INSN_SIZE); 114 114 } 115 115 116 - void arch_ftrace_nmi_enter(void) 116 + void ftrace_nmi_enter(void) 117 117 { 118 118 atomic_inc(&nmi_running); 119 119 /* Must have nmi_running seen before reading write flag */ ··· 124 124 } 125 125 } 126 126 127 - void arch_ftrace_nmi_exit(void) 127 + void ftrace_nmi_exit(void) 128 128 { 129 129 /* Finish all executions before clearing nmi_running */ 130 130 smp_wmb();
-8
include/linux/ftrace_irq.h
··· 3 3 4 4 5 5 #ifdef CONFIG_FTRACE_NMI_ENTER 6 - extern void arch_ftrace_nmi_enter(void); 7 - extern void arch_ftrace_nmi_exit(void); 8 - #else 9 - static inline void arch_ftrace_nmi_enter(void) { } 10 - static inline void arch_ftrace_nmi_exit(void) { } 11 - #endif 12 - 13 - #ifdef CONFIG_RING_BUFFER 14 6 extern void ftrace_nmi_enter(void); 15 7 extern void ftrace_nmi_exit(void); 16 8 #else
+13 -30
kernel/trace/ring_buffer.c
··· 8 8 #include <linux/spinlock.h> 9 9 #include <linux/debugfs.h> 10 10 #include <linux/uaccess.h> 11 + #include <linux/hardirq.h> 11 12 #include <linux/module.h> 12 13 #include <linux/percpu.h> 13 14 #include <linux/mutex.h> ··· 19 18 #include <linux/fs.h> 20 19 21 20 #include "trace.h" 22 - 23 - /* 24 - * Since the write to the buffer is still not fully lockless, 25 - * we must be careful with NMIs. The locks in the writers 26 - * are taken when a write crosses to a new page. The locks 27 - * protect against races with the readers (this will soon 28 - * be fixed with a lockless solution). 29 - * 30 - * Because we can not protect against NMIs, and we want to 31 - * keep traces reentrant, we need to manage what happens 32 - * when we are in an NMI. 33 - */ 34 - static DEFINE_PER_CPU(int, rb_in_nmi); 35 - 36 - void ftrace_nmi_enter(void) 37 - { 38 - __get_cpu_var(rb_in_nmi)++; 39 - /* call arch specific handler too */ 40 - arch_ftrace_nmi_enter(); 41 - } 42 - 43 - void ftrace_nmi_exit(void) 44 - { 45 - arch_ftrace_nmi_exit(); 46 - __get_cpu_var(rb_in_nmi)--; 47 - /* NMIs are not recursive */ 48 - WARN_ON_ONCE(__get_cpu_var(rb_in_nmi)); 49 - } 50 - 51 21 52 22 /* 53 23 * A fast way to enable or disable all ring buffers is to ··· 999 1027 1000 1028 local_irq_save(flags); 1001 1029 /* 1030 + * Since the write to the buffer is still not 1031 + * fully lockless, we must be careful with NMIs. 1032 + * The locks in the writers are taken when a write 1033 + * crosses to a new page. The locks protect against 1034 + * races with the readers (this will soon be fixed 1035 + * with a lockless solution). 1036 + * 1037 + * Because we can not protect against NMIs, and we 1038 + * want to keep traces reentrant, we need to manage 1039 + * what happens when we are in an NMI. 1040 + * 1002 1041 * NMIs can happen after we take the lock. 1003 1042 * If we are in an NMI, only take the lock 1004 1043 * if it is not already taken. Otherwise 1005 1044 * simply fail. 1006 1045 */ 1007 - if (unlikely(__get_cpu_var(rb_in_nmi))) { 1046 + if (unlikely(in_nmi())) { 1008 1047 if (!__raw_spin_trylock(&cpu_buffer->lock)) 1009 1048 goto out_unlock; 1010 1049 } else