Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kthread: add kthread_work tracepoints

While migrating some code from wq to kthread_worker, I found that I missed
the execute_start/end tracepoints. So add similar tracepoints for
kthread_work. And for completeness, queue_work tracepoint (although this
one differs slightly from the matching workqueue tracepoint).

Link: https://lkml.kernel.org/r/20201010180323.126634-1-robdclark@gmail.com
Signed-off-by: Rob Clark <robdclark@chromium.org>
Cc: Rob Clark <robdclark@chromium.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Cc: Phil Auld <pauld@redhat.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Thara Gopinath <thara.gopinath@linaro.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Vincent Donnefort <vincent.donnefort@arm.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Ilias Stamatis <stamatis.iliass@gmail.com>
Cc: Liang Chen <cl@rock-chips.com>
Cc: Ben Dooks <ben.dooks@codethink.co.uk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "J. Bruce Fields" <bfields@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Rob Clark and committed by
Linus Torvalds
f630c7c6 2c85ebc5

+93
+84
include/trace/events/sched.h
··· 5 5 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) 6 6 #define _TRACE_SCHED_H 7 7 8 + #include <linux/kthread.h> 8 9 #include <linux/sched/numa_balancing.h> 9 10 #include <linux/tracepoint.h> 10 11 #include <linux/binfmts.h> ··· 50 49 ), 51 50 52 51 TP_printk("ret=%d", __entry->ret) 52 + ); 53 + 54 + /** 55 + * sched_kthread_work_queue_work - called when a work gets queued 56 + * @worker: pointer to the kthread_worker 57 + * @work: pointer to struct kthread_work 58 + * 59 + * This event occurs when a work is queued immediately or once a 60 + * delayed work is actually queued (ie: once the delay has been 61 + * reached). 62 + */ 63 + TRACE_EVENT(sched_kthread_work_queue_work, 64 + 65 + TP_PROTO(struct kthread_worker *worker, 66 + struct kthread_work *work), 67 + 68 + TP_ARGS(worker, work), 69 + 70 + TP_STRUCT__entry( 71 + __field( void *, work ) 72 + __field( void *, function) 73 + __field( void *, worker) 74 + ), 75 + 76 + TP_fast_assign( 77 + __entry->work = work; 78 + __entry->function = work->func; 79 + __entry->worker = worker; 80 + ), 81 + 82 + TP_printk("work struct=%p function=%ps worker=%p", 83 + __entry->work, __entry->function, __entry->worker) 84 + ); 85 + 86 + /** 87 + * sched_kthread_work_execute_start - called immediately before the work callback 88 + * @work: pointer to struct kthread_work 89 + * 90 + * Allows to track kthread work execution. 91 + */ 92 + TRACE_EVENT(sched_kthread_work_execute_start, 93 + 94 + TP_PROTO(struct kthread_work *work), 95 + 96 + TP_ARGS(work), 97 + 98 + TP_STRUCT__entry( 99 + __field( void *, work ) 100 + __field( void *, function) 101 + ), 102 + 103 + TP_fast_assign( 104 + __entry->work = work; 105 + __entry->function = work->func; 106 + ), 107 + 108 + TP_printk("work struct %p: function %ps", __entry->work, __entry->function) 109 + ); 110 + 111 + /** 112 + * sched_kthread_work_execute_end - called immediately after the work callback 113 + * @work: pointer to struct work_struct 114 + * @function: pointer to worker function 115 + * 116 + * Allows to track workqueue execution. 117 + */ 118 + TRACE_EVENT(sched_kthread_work_execute_end, 119 + 120 + TP_PROTO(struct kthread_work *work, kthread_work_func_t function), 121 + 122 + TP_ARGS(work, function), 123 + 124 + TP_STRUCT__entry( 125 + __field( void *, work ) 126 + __field( void *, function) 127 + ), 128 + 129 + TP_fast_assign( 130 + __entry->work = work; 131 + __entry->function = function; 132 + ), 133 + 134 + TP_printk("work struct %p: function %ps", __entry->work, __entry->function) 53 135 ); 54 136 55 137 /*
+9
kernel/kthread.c
··· 704 704 raw_spin_unlock_irq(&worker->lock); 705 705 706 706 if (work) { 707 + kthread_work_func_t func = work->func; 707 708 __set_current_state(TASK_RUNNING); 709 + trace_sched_kthread_work_execute_start(work); 708 710 work->func(work); 711 + /* 712 + * Avoid dereferencing work after this point. The trace 713 + * event only cares about the address. 714 + */ 715 + trace_sched_kthread_work_execute_end(work, func); 709 716 } else if (!freezing(current)) 710 717 schedule(); 711 718 ··· 840 833 struct list_head *pos) 841 834 { 842 835 kthread_insert_work_sanity_check(worker, work); 836 + 837 + trace_sched_kthread_work_queue_work(worker, work); 843 838 844 839 list_add_tail(&work->node, pos); 845 840 work->worker = worker;