Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing: Centralize preemptirq tracepoints and unify their usage

This patch detaches the preemptirq tracepoints from the tracers and
keeps it separate.

Advantages:
* Lockdep and irqsoff event can now run in parallel since they no longer
have their own calls.

* This unifies the usecase of adding hooks to an irqsoff and irqson
event, and a preemptoff and preempton event.
3 users of the events exist:
- Lockdep
- irqsoff and preemptoff tracers
- irqs and preempt trace events

The unification cleans up several ifdefs and makes the code in preempt
tracer and irqsoff tracers simpler. It gets rid of all the horrific
ifdeferry around PROVE_LOCKING and makes configuration of the different
users of the tracepoints more easy and understandable. It also gets rid
of the time_* function calls from the lockdep hooks used to call into
the preemptirq tracer which is not needed anymore. The negative delta in
lines of code in this patch is quite large too.

In the patch we introduce a new CONFIG option PREEMPTIRQ_TRACEPOINTS
as a single point for registering probes onto the tracepoints. With
this,
the web of config options for preempt/irq toggle tracepoints and its
users becomes:

PREEMPT_TRACER PREEMPTIRQ_EVENTS IRQSOFF_TRACER PROVE_LOCKING
| | \ | |
\ (selects) / \ \ (selects) /
TRACE_PREEMPT_TOGGLE ----> TRACE_IRQFLAGS
\ /
\ (depends on) /
PREEMPTIRQ_TRACEPOINTS

Other than the performance tests mentioned in the previous patch, I also
ran the locking API test suite. I verified that all tests cases are
passing.

I also injected issues by not registering lockdep probes onto the
tracepoints and I see failures to confirm that the probes are indeed
working.

This series + lockdep probes not registered (just to inject errors):
[ 0.000000] hard-irqs-on + irq-safe-A/21: ok | ok | ok |
[ 0.000000] soft-irqs-on + irq-safe-A/21: ok | ok | ok |
[ 0.000000] sirq-safe-A => hirqs-on/12:FAILED|FAILED| ok |
[ 0.000000] sirq-safe-A => hirqs-on/21:FAILED|FAILED| ok |
[ 0.000000] hard-safe-A + irqs-on/12:FAILED|FAILED| ok |
[ 0.000000] soft-safe-A + irqs-on/12:FAILED|FAILED| ok |
[ 0.000000] hard-safe-A + irqs-on/21:FAILED|FAILED| ok |
[ 0.000000] soft-safe-A + irqs-on/21:FAILED|FAILED| ok |
[ 0.000000] hard-safe-A + unsafe-B #1/123: ok | ok | ok |
[ 0.000000] soft-safe-A + unsafe-B #1/123: ok | ok | ok |

With this series + lockdep probes registered, all locking tests pass:

[ 0.000000] hard-irqs-on + irq-safe-A/21: ok | ok | ok |
[ 0.000000] soft-irqs-on + irq-safe-A/21: ok | ok | ok |
[ 0.000000] sirq-safe-A => hirqs-on/12: ok | ok | ok |
[ 0.000000] sirq-safe-A => hirqs-on/21: ok | ok | ok |
[ 0.000000] hard-safe-A + irqs-on/12: ok | ok | ok |
[ 0.000000] soft-safe-A + irqs-on/12: ok | ok | ok |
[ 0.000000] hard-safe-A + irqs-on/21: ok | ok | ok |
[ 0.000000] soft-safe-A + irqs-on/21: ok | ok | ok |
[ 0.000000] hard-safe-A + unsafe-B #1/123: ok | ok | ok |
[ 0.000000] soft-safe-A + unsafe-B #1/123: ok | ok | ok |

Link: http://lkml.kernel.org/r/20180730222423.196630-4-joel@joelfernandes.org

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>

authored by

Joel Fernandes (Google) and committed by
Steven Rostedt (VMware)
c3bc8fd6 e6753f23

+195 -229
+1 -10
include/linux/ftrace.h
··· 701 701 return CALLER_ADDR2; 702 702 } 703 703 704 - #ifdef CONFIG_IRQSOFF_TRACER 705 - extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 706 - extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 707 - #else 708 - static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } 709 - static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } 710 - #endif 711 - 712 - #if defined(CONFIG_PREEMPT_TRACER) || \ 713 - (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) 704 + #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 714 705 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 715 706 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 716 707 #else
+8 -3
include/linux/irqflags.h
··· 15 15 #include <linux/typecheck.h> 16 16 #include <asm/irqflags.h> 17 17 18 - #ifdef CONFIG_TRACE_IRQFLAGS 18 + /* Currently trace_softirqs_on/off is used only by lockdep */ 19 + #ifdef CONFIG_PROVE_LOCKING 19 20 extern void trace_softirqs_on(unsigned long ip); 20 21 extern void trace_softirqs_off(unsigned long ip); 22 + #else 23 + # define trace_softirqs_on(ip) do { } while (0) 24 + # define trace_softirqs_off(ip) do { } while (0) 25 + #endif 26 + 27 + #ifdef CONFIG_TRACE_IRQFLAGS 21 28 extern void trace_hardirqs_on(void); 22 29 extern void trace_hardirqs_off(void); 23 30 # define trace_hardirq_context(p) ((p)->hardirq_context) ··· 50 43 #else 51 44 # define trace_hardirqs_on() do { } while (0) 52 45 # define trace_hardirqs_off() do { } while (0) 53 - # define trace_softirqs_on(ip) do { } while (0) 54 - # define trace_softirqs_off(ip) do { } while (0) 55 46 # define trace_hardirq_context(p) 0 56 47 # define trace_softirq_context(p) 0 57 48 # define trace_hardirqs_enabled(p) 0
+5 -3
include/linux/lockdep.h
··· 266 266 /* 267 267 * Initialization, self-test and debugging-output methods: 268 268 */ 269 - extern void lockdep_info(void); 269 + extern void lockdep_init(void); 270 + extern void lockdep_init_early(void); 270 271 extern void lockdep_reset(void); 271 272 extern void lockdep_reset_lock(struct lockdep_map *lock); 272 273 extern void lockdep_free_key_range(void *start, unsigned long size); ··· 407 406 # define lock_downgrade(l, i) do { } while (0) 408 407 # define lock_set_class(l, n, k, s, i) do { } while (0) 409 408 # define lock_set_subclass(l, s, i) do { } while (0) 410 - # define lockdep_info() do { } while (0) 409 + # define lockdep_init() do { } while (0) 410 + # define lockdep_init_early() do { } while (0) 411 411 # define lockdep_init_map(lock, name, key, sub) \ 412 412 do { (void)(name); (void)(key); } while (0) 413 413 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) ··· 534 532 535 533 #endif /* CONFIG_LOCKDEP */ 536 534 537 - #ifdef CONFIG_TRACE_IRQFLAGS 535 + #ifdef CONFIG_PROVE_LOCKING 538 536 extern void print_irqtrace_events(struct task_struct *curr); 539 537 #else 540 538 static inline void print_irqtrace_events(struct task_struct *curr)
+1 -1
include/linux/preempt.h
··· 150 150 */ 151 151 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) 152 152 153 - #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 153 + #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) 154 154 extern void preempt_count_add(int val); 155 155 extern void preempt_count_sub(int val); 156 156 #define preempt_count_dec_and_test() \
+14 -9
include/trace/events/preemptirq.h
··· 1 - #ifdef CONFIG_PREEMPTIRQ_EVENTS 1 + #ifdef CONFIG_PREEMPTIRQ_TRACEPOINTS 2 2 3 3 #undef TRACE_SYSTEM 4 4 #define TRACE_SYSTEM preemptirq ··· 32 32 (void *)((unsigned long)(_stext) + __entry->parent_offs)) 33 33 ); 34 34 35 - #ifndef CONFIG_PROVE_LOCKING 35 + #ifdef CONFIG_TRACE_IRQFLAGS 36 36 DEFINE_EVENT(preemptirq_template, irq_disable, 37 37 TP_PROTO(unsigned long ip, unsigned long parent_ip), 38 38 TP_ARGS(ip, parent_ip)); ··· 40 40 DEFINE_EVENT(preemptirq_template, irq_enable, 41 41 TP_PROTO(unsigned long ip, unsigned long parent_ip), 42 42 TP_ARGS(ip, parent_ip)); 43 + #else 44 + #define trace_irq_enable(...) 45 + #define trace_irq_disable(...) 46 + #define trace_irq_enable_rcuidle(...) 47 + #define trace_irq_disable_rcuidle(...) 43 48 #endif 44 49 45 - #ifdef CONFIG_DEBUG_PREEMPT 50 + #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 46 51 DEFINE_EVENT(preemptirq_template, preempt_disable, 47 52 TP_PROTO(unsigned long ip, unsigned long parent_ip), 48 53 TP_ARGS(ip, parent_ip)); ··· 55 50 DEFINE_EVENT(preemptirq_template, preempt_enable, 56 51 TP_PROTO(unsigned long ip, unsigned long parent_ip), 57 52 TP_ARGS(ip, parent_ip)); 53 + #else 54 + #define trace_preempt_enable(...) 55 + #define trace_preempt_disable(...) 56 + #define trace_preempt_enable_rcuidle(...) 57 + #define trace_preempt_disable_rcuidle(...) 58 58 #endif 59 59 60 60 #endif /* _TRACE_PREEMPTIRQ_H */ 61 61 62 62 #include <trace/define_trace.h> 63 63 64 - #endif /* !CONFIG_PREEMPTIRQ_EVENTS */ 65 - 66 - #if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING) 64 + #else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */ 67 65 #define trace_irq_enable(...) 68 66 #define trace_irq_disable(...) 69 67 #define trace_irq_enable_rcuidle(...) 70 68 #define trace_irq_disable_rcuidle(...) 71 - #endif 72 - 73 - #if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT) 74 69 #define trace_preempt_enable(...) 75 70 #define trace_preempt_disable(...) 76 71 #define trace_preempt_enable_rcuidle(...)
+4 -1
init/main.c
··· 648 648 profile_init(); 649 649 call_function_init(); 650 650 WARN(!irqs_disabled(), "Interrupts were enabled early\n"); 651 + 652 + lockdep_init_early(); 653 + 651 654 early_boot_irqs_disabled = false; 652 655 local_irq_enable(); 653 656 ··· 666 663 panic("Too many boot %s vars at `%s'", panic_later, 667 664 panic_param); 668 665 669 - lockdep_info(); 666 + lockdep_init(); 670 667 671 668 /* 672 669 * Need to run this when irqs are enabled, because it wants
+14 -21
kernel/locking/lockdep.c
··· 55 55 56 56 #include "lockdep_internals.h" 57 57 58 + #include <trace/events/preemptirq.h> 58 59 #define CREATE_TRACE_POINTS 59 60 #include <trace/events/lock.h> 60 61 ··· 2840 2839 debug_atomic_inc(hardirqs_on_events); 2841 2840 } 2842 2841 2843 - __visible void trace_hardirqs_on_caller(unsigned long ip) 2842 + static void lockdep_hardirqs_on(void *none, unsigned long ignore, 2843 + unsigned long ip) 2844 2844 { 2845 - time_hardirqs_on(CALLER_ADDR0, ip); 2846 - 2847 2845 if (unlikely(!debug_locks || current->lockdep_recursion)) 2848 2846 return; 2849 2847 ··· 2881 2881 __trace_hardirqs_on_caller(ip); 2882 2882 current->lockdep_recursion = 0; 2883 2883 } 2884 - EXPORT_SYMBOL(trace_hardirqs_on_caller); 2885 - 2886 - void trace_hardirqs_on(void) 2887 - { 2888 - trace_hardirqs_on_caller(CALLER_ADDR0); 2889 - } 2890 - EXPORT_SYMBOL(trace_hardirqs_on); 2891 2884 2892 2885 /* 2893 2886 * Hardirqs were disabled: 2894 2887 */ 2895 - __visible void trace_hardirqs_off_caller(unsigned long ip) 2888 + static void lockdep_hardirqs_off(void *none, unsigned long ignore, 2889 + unsigned long ip) 2896 2890 { 2897 2891 struct task_struct *curr = current; 2898 - 2899 - time_hardirqs_off(CALLER_ADDR0, ip); 2900 2892 2901 2893 if (unlikely(!debug_locks || current->lockdep_recursion)) 2902 2894 return; ··· 2911 2919 } else 2912 2920 debug_atomic_inc(redundant_hardirqs_off); 2913 2921 } 2914 - EXPORT_SYMBOL(trace_hardirqs_off_caller); 2915 - 2916 - void trace_hardirqs_off(void) 2917 - { 2918 - trace_hardirqs_off_caller(CALLER_ADDR0); 2919 - } 2920 - EXPORT_SYMBOL(trace_hardirqs_off); 2921 2922 2922 2923 /* 2923 2924 * Softirqs will be enabled: ··· 4315 4330 raw_local_irq_restore(flags); 4316 4331 } 4317 4332 4318 - void __init lockdep_info(void) 4333 + void __init lockdep_init_early(void) 4334 + { 4335 + #ifdef CONFIG_PROVE_LOCKING 4336 + register_trace_prio_irq_disable(lockdep_hardirqs_off, NULL, INT_MAX); 4337 + register_trace_prio_irq_enable(lockdep_hardirqs_on, NULL, INT_MIN); 4338 + #endif 4339 + } 4340 + 4341 + void __init lockdep_init(void) 4319 4342 { 4320 4343 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 4321 4344
+1 -1
kernel/sched/core.c
··· 3189 3189 #endif 3190 3190 3191 3191 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 3192 - defined(CONFIG_PREEMPT_TRACER)) 3192 + defined(CONFIG_TRACE_PREEMPT_TOGGLE)) 3193 3193 /* 3194 3194 * If the value passed in is equal to the current preempt count 3195 3195 * then we just disabled preemption. Start timing the latency.
+17 -5
kernel/trace/Kconfig
··· 82 82 Allow the use of ring_buffer_swap_cpu. 83 83 Adds a very slight overhead to tracing when enabled. 84 84 85 + config PREEMPTIRQ_TRACEPOINTS 86 + bool 87 + depends on TRACE_PREEMPT_TOGGLE || TRACE_IRQFLAGS 88 + select TRACING 89 + default y 90 + help 91 + Create preempt/irq toggle tracepoints if needed, so that other parts 92 + of the kernel can use them to generate or add hooks to them. 93 + 85 94 # All tracer options should select GENERIC_TRACER. For those options that are 86 95 # enabled by all tracers (context switch and event tracer) they select TRACING. 87 96 # This allows those options to appear when no other tracer is selected. But the ··· 164 155 the return value. This is done by setting the current return 165 156 address on the current task structure into a stack of calls. 166 157 158 + config TRACE_PREEMPT_TOGGLE 159 + bool 160 + help 161 + Enables hooks which will be called when preemption is first disabled, 162 + and last enabled. 167 163 168 164 config PREEMPTIRQ_EVENTS 169 165 bool "Enable trace events for preempt and irq disable/enable" 170 166 select TRACE_IRQFLAGS 171 - depends on DEBUG_PREEMPT || !PROVE_LOCKING 172 - depends on TRACING 167 + select TRACE_PREEMPT_TOGGLE if PREEMPT 168 + select GENERIC_TRACER 173 169 default n 174 170 help 175 171 Enable tracing of disable and enable events for preemption and irqs. 176 - For tracing preempt disable/enable events, DEBUG_PREEMPT must be 177 - enabled. For tracing irq disable/enable events, PROVE_LOCKING must 178 - be disabled. 179 172 180 173 config IRQSOFF_TRACER 181 174 bool "Interrupts-off Latency Tracer" ··· 214 203 select RING_BUFFER_ALLOW_SWAP 215 204 select TRACER_SNAPSHOT 216 205 select TRACER_SNAPSHOT_PER_CPU_SWAP 206 + select TRACE_PREEMPT_TOGGLE 217 207 help 218 208 This option measures the time spent in preemption-off critical 219 209 sections, with microsecond accuracy.
+1 -1
kernel/trace/Makefile
··· 41 41 obj-$(CONFIG_PREEMPTIRQ_DELAY_TEST) += preemptirq_delay_test.o 42 42 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 43 43 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o 44 - obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o 44 + obj-$(CONFIG_PREEMPTIRQ_TRACEPOINTS) += trace_preemptirq.o 45 45 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 46 46 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 47 47 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+57 -174
kernel/trace/trace_irqsoff.c
··· 16 16 17 17 #include "trace.h" 18 18 19 - #define CREATE_TRACE_POINTS 20 19 #include <trace/events/preemptirq.h> 21 20 22 21 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) ··· 449 450 } 450 451 EXPORT_SYMBOL_GPL(stop_critical_timings); 451 452 452 - #ifdef CONFIG_IRQSOFF_TRACER 453 - #ifdef CONFIG_PROVE_LOCKING 454 - void time_hardirqs_on(unsigned long a0, unsigned long a1) 455 - { 456 - if (!preempt_trace() && irq_trace()) 457 - stop_critical_timing(a0, a1); 458 - } 459 - 460 - void time_hardirqs_off(unsigned long a0, unsigned long a1) 461 - { 462 - if (!preempt_trace() && irq_trace()) 463 - start_critical_timing(a0, a1); 464 - } 465 - 466 - #else /* !CONFIG_PROVE_LOCKING */ 467 - 468 - /* 469 - * We are only interested in hardirq on/off events: 470 - */ 471 - static inline void tracer_hardirqs_on(void) 472 - { 473 - if (!preempt_trace() && irq_trace()) 474 - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 475 - } 476 - 477 - static inline void tracer_hardirqs_off(void) 478 - { 479 - if (!preempt_trace() && irq_trace()) 480 - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 481 - } 482 - 483 - static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) 484 - { 485 - if (!preempt_trace() && irq_trace()) 486 - stop_critical_timing(CALLER_ADDR0, caller_addr); 487 - } 488 - 489 - static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) 490 - { 491 - if (!preempt_trace() && irq_trace()) 492 - start_critical_timing(CALLER_ADDR0, caller_addr); 493 - } 494 - 495 - #endif /* CONFIG_PROVE_LOCKING */ 496 - #endif /* CONFIG_IRQSOFF_TRACER */ 497 - 498 - #ifdef CONFIG_PREEMPT_TRACER 499 - static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) 500 - { 501 - if (preempt_trace() && !irq_trace()) 502 - stop_critical_timing(a0, a1); 503 - } 504 - 505 - static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) 506 - { 507 - if (preempt_trace() && !irq_trace()) 508 - start_critical_timing(a0, a1); 509 - } 510 - #endif /* CONFIG_PREEMPT_TRACER */ 511 - 512 453 #ifdef CONFIG_FUNCTION_TRACER 513 454 static bool function_enabled; 514 455 ··· 598 659 } 599 660 600 661 #ifdef CONFIG_IRQSOFF_TRACER 662 + /* 663 + * We are only interested in hardirq on/off events: 664 + */ 665 + static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1) 666 + { 667 + if (!preempt_trace() && irq_trace()) 668 + stop_critical_timing(a0, a1); 669 + } 670 + 671 + static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1) 672 + { 673 + if (!preempt_trace() && irq_trace()) 674 + start_critical_timing(a0, a1); 675 + } 676 + 601 677 static int irqsoff_tracer_init(struct trace_array *tr) 602 678 { 603 679 trace_type = TRACER_IRQS_OFF; 604 680 681 + register_trace_irq_disable(tracer_hardirqs_off, NULL); 682 + register_trace_irq_enable(tracer_hardirqs_on, NULL); 605 683 return __irqsoff_tracer_init(tr); 606 684 } 607 685 608 686 static void irqsoff_tracer_reset(struct trace_array *tr) 609 687 { 688 + unregister_trace_irq_disable(tracer_hardirqs_off, NULL); 689 + unregister_trace_irq_enable(tracer_hardirqs_on, NULL); 610 690 __irqsoff_tracer_reset(tr); 611 691 } 612 692 ··· 648 690 .allow_instances = true, 649 691 .use_max_tr = true, 650 692 }; 651 - # define register_irqsoff(trace) register_tracer(&trace) 652 - #else 653 - # define register_irqsoff(trace) do { } while (0) 654 - #endif 693 + #endif /* CONFIG_IRQSOFF_TRACER */ 655 694 656 695 #ifdef CONFIG_PREEMPT_TRACER 696 + static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1) 697 + { 698 + if (preempt_trace() && !irq_trace()) 699 + stop_critical_timing(a0, a1); 700 + } 701 + 702 + static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1) 703 + { 704 + if (preempt_trace() && !irq_trace()) 705 + start_critical_timing(a0, a1); 706 + } 707 + 657 708 static int preemptoff_tracer_init(struct trace_array *tr) 658 709 { 659 710 trace_type = TRACER_PREEMPT_OFF; 660 711 712 + register_trace_preempt_disable(tracer_preempt_off, NULL); 713 + register_trace_preempt_enable(tracer_preempt_on, NULL); 661 714 return __irqsoff_tracer_init(tr); 662 715 } 663 716 664 717 static void preemptoff_tracer_reset(struct trace_array *tr) 665 718 { 719 + unregister_trace_preempt_disable(tracer_preempt_off, NULL); 720 + unregister_trace_preempt_enable(tracer_preempt_on, NULL); 666 721 __irqsoff_tracer_reset(tr); 667 722 } 668 723 ··· 698 727 .allow_instances = true, 699 728 .use_max_tr = true, 700 729 }; 701 - # define register_preemptoff(trace) register_tracer(&trace) 702 - #else 703 - # define register_preemptoff(trace) do { } while (0) 704 - #endif 730 + #endif /* CONFIG_PREEMPT_TRACER */ 705 731 706 - #if defined(CONFIG_IRQSOFF_TRACER) && \ 707 - defined(CONFIG_PREEMPT_TRACER) 732 + #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 708 733 709 734 static int preemptirqsoff_tracer_init(struct trace_array *tr) 710 735 { 711 736 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 737 + 738 + register_trace_irq_disable(tracer_hardirqs_off, NULL); 739 + register_trace_irq_enable(tracer_hardirqs_on, NULL); 740 + register_trace_preempt_disable(tracer_preempt_off, NULL); 741 + register_trace_preempt_enable(tracer_preempt_on, NULL); 712 742 713 743 return __irqsoff_tracer_init(tr); 714 744 } 715 745 716 746 static void preemptirqsoff_tracer_reset(struct trace_array *tr) 717 747 { 748 + unregister_trace_irq_disable(tracer_hardirqs_off, NULL); 749 + unregister_trace_irq_enable(tracer_hardirqs_on, NULL); 750 + unregister_trace_preempt_disable(tracer_preempt_off, NULL); 751 + unregister_trace_preempt_enable(tracer_preempt_on, NULL); 752 + 718 753 __irqsoff_tracer_reset(tr); 719 754 } 720 755 ··· 743 766 .allow_instances = true, 744 767 .use_max_tr = true, 745 768 }; 746 - 747 - # define register_preemptirqsoff(trace) register_tracer(&trace) 748 - #else 749 - # define register_preemptirqsoff(trace) do { } while (0) 750 769 #endif 751 770 752 771 __init static int init_irqsoff_tracer(void) 753 772 { 754 - register_irqsoff(irqsoff_tracer); 755 - register_preemptoff(preemptoff_tracer); 756 - register_preemptirqsoff(preemptirqsoff_tracer); 773 + #ifdef CONFIG_IRQSOFF_TRACER 774 + register_tracer(&irqsoff_tracer); 775 + #endif 776 + #ifdef CONFIG_PREEMPT_TRACER 777 + register_tracer(&preemptoff_tracer); 778 + #endif 779 + #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 780 + register_tracer(&preemptirqsoff_tracer); 781 + #endif 757 782 758 783 return 0; 759 784 } 760 785 core_initcall(init_irqsoff_tracer); 761 786 #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ 762 - 763 - #ifndef CONFIG_IRQSOFF_TRACER 764 - static inline void tracer_hardirqs_on(void) { } 765 - static inline void tracer_hardirqs_off(void) { } 766 - static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { } 767 - static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { } 768 - #endif 769 - 770 - #ifndef CONFIG_PREEMPT_TRACER 771 - static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } 772 - static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } 773 - #endif 774 - 775 - #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) 776 - /* Per-cpu variable to prevent redundant calls when IRQs already off */ 777 - static DEFINE_PER_CPU(int, tracing_irq_cpu); 778 - 779 - void trace_hardirqs_on(void) 780 - { 781 - if (!this_cpu_read(tracing_irq_cpu)) 782 - return; 783 - 784 - trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); 785 - tracer_hardirqs_on(); 786 - 787 - this_cpu_write(tracing_irq_cpu, 0); 788 - } 789 - EXPORT_SYMBOL(trace_hardirqs_on); 790 - 791 - void trace_hardirqs_off(void) 792 - { 793 - if (this_cpu_read(tracing_irq_cpu)) 794 - return; 795 - 796 - this_cpu_write(tracing_irq_cpu, 1); 797 - 798 - trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); 799 - tracer_hardirqs_off(); 800 - } 801 - EXPORT_SYMBOL(trace_hardirqs_off); 802 - 803 - __visible void trace_hardirqs_on_caller(unsigned long caller_addr) 804 - { 805 - if (!this_cpu_read(tracing_irq_cpu)) 806 - return; 807 - 808 - trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); 809 - tracer_hardirqs_on_caller(caller_addr); 810 - 811 - this_cpu_write(tracing_irq_cpu, 0); 812 - } 813 - EXPORT_SYMBOL(trace_hardirqs_on_caller); 814 - 815 - __visible void trace_hardirqs_off_caller(unsigned long caller_addr) 816 - { 817 - if (this_cpu_read(tracing_irq_cpu)) 818 - return; 819 - 820 - this_cpu_write(tracing_irq_cpu, 1); 821 - 822 - trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); 823 - tracer_hardirqs_off_caller(caller_addr); 824 - } 825 - EXPORT_SYMBOL(trace_hardirqs_off_caller); 826 - 827 - /* 828 - * Stubs: 829 - */ 830 - 831 - void trace_softirqs_on(unsigned long ip) 832 - { 833 - } 834 - 835 - void trace_softirqs_off(unsigned long ip) 836 - { 837 - } 838 - 839 - inline void print_irqtrace_events(struct task_struct *curr) 840 - { 841 - } 842 - #endif 843 - 844 - #if defined(CONFIG_PREEMPT_TRACER) || \ 845 - (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) 846 - void trace_preempt_on(unsigned long a0, unsigned long a1) 847 - { 848 - trace_preempt_enable_rcuidle(a0, a1); 849 - tracer_preempt_on(a0, a1); 850 - } 851 - 852 - void trace_preempt_off(unsigned long a0, unsigned long a1) 853 - { 854 - trace_preempt_disable_rcuidle(a0, a1); 855 - tracer_preempt_off(a0, a1); 856 - } 857 - #endif
+72
kernel/trace/trace_preemptirq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * preemptoff and irqoff tracepoints 4 + * 5 + * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org> 6 + */ 7 + 8 + #include <linux/kallsyms.h> 9 + #include <linux/uaccess.h> 10 + #include <linux/module.h> 11 + #include <linux/ftrace.h> 12 + 13 + #define CREATE_TRACE_POINTS 14 + #include <trace/events/preemptirq.h> 15 + 16 + #ifdef CONFIG_TRACE_IRQFLAGS 17 + /* Per-cpu variable to prevent redundant calls when IRQs already off */ 18 + static DEFINE_PER_CPU(int, tracing_irq_cpu); 19 + 20 + void trace_hardirqs_on(void) 21 + { 22 + if (!this_cpu_read(tracing_irq_cpu)) 23 + return; 24 + 25 + trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); 26 + this_cpu_write(tracing_irq_cpu, 0); 27 + } 28 + EXPORT_SYMBOL(trace_hardirqs_on); 29 + 30 + void trace_hardirqs_off(void) 31 + { 32 + if (this_cpu_read(tracing_irq_cpu)) 33 + return; 34 + 35 + this_cpu_write(tracing_irq_cpu, 1); 36 + trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); 37 + } 38 + EXPORT_SYMBOL(trace_hardirqs_off); 39 + 40 + __visible void trace_hardirqs_on_caller(unsigned long caller_addr) 41 + { 42 + if (!this_cpu_read(tracing_irq_cpu)) 43 + return; 44 + 45 + trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); 46 + this_cpu_write(tracing_irq_cpu, 0); 47 + } 48 + EXPORT_SYMBOL(trace_hardirqs_on_caller); 49 + 50 + __visible void trace_hardirqs_off_caller(unsigned long caller_addr) 51 + { 52 + if (this_cpu_read(tracing_irq_cpu)) 53 + return; 54 + 55 + this_cpu_write(tracing_irq_cpu, 1); 56 + trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); 57 + } 58 + EXPORT_SYMBOL(trace_hardirqs_off_caller); 59 + #endif /* CONFIG_TRACE_IRQFLAGS */ 60 + 61 + #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 62 + 63 + void trace_preempt_on(unsigned long a0, unsigned long a1) 64 + { 65 + trace_preempt_enable_rcuidle(a0, a1); 66 + } 67 + 68 + void trace_preempt_off(unsigned long a0, unsigned long a1) 69 + { 70 + trace_preempt_disable_rcuidle(a0, a1); 71 + } 72 + #endif