Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
25#define _GPU_SCHED_TRACE_H_
26
27#include <linux/stringify.h>
28#include <linux/types.h>
29#include <linux/tracepoint.h>
30
31#undef TRACE_SYSTEM
32#define TRACE_SYSTEM gpu_scheduler
33#define TRACE_INCLUDE_FILE gpu_scheduler_trace
34
35/**
36 * DOC: uAPI trace events
37 *
38 * ``drm_sched_job_queue``, ``drm_sched_job_run``, ``drm_sched_job_add_dep``,
39 * ``drm_sched_job_done`` and ``drm_sched_job_unschedulable`` are considered
40 * stable uAPI.
41 *
42 * Common trace events attributes:
43 *
44 * * ``dev`` - the dev_name() of the device running the job.
45 *
46 * * ``ring`` - the hardware ring running the job. Together with ``dev`` it
47 * uniquely identifies where the job is going to be executed.
48 *
49 * * ``fence`` - the &struct dma_fence.context and the &struct dma_fence.seqno of
50 * &struct drm_sched_fence.finished
51 *
52 * All the events depends on drm_sched_job_arm() having been called already for
53 * the job because they use &struct drm_sched_job.sched or
54 * &struct drm_sched_job.s_fence.
55 */
56
57DECLARE_EVENT_CLASS(drm_sched_job,
58 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
59 TP_ARGS(sched_job, entity),
60 TP_STRUCT__entry(
61 __string(name, sched_job->sched->name)
62 __field(u32, job_count)
63 __field(int, hw_job_count)
64 __string(dev, dev_name(sched_job->sched->dev))
65 __field(u64, fence_context)
66 __field(u64, fence_seqno)
67 __field(u64, client_id)
68 ),
69
70 TP_fast_assign(
71 __assign_str(name);
72 __entry->job_count = spsc_queue_count(&entity->job_queue);
73 __entry->hw_job_count = atomic_read(
74 &sched_job->sched->credit_count);
75 __assign_str(dev);
76 __entry->fence_context = sched_job->s_fence->finished.context;
77 __entry->fence_seqno = sched_job->s_fence->finished.seqno;
78 __entry->client_id = sched_job->s_fence->drm_client_id;
79 ),
80 TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu",
81 __get_str(dev),
82 __entry->fence_context, __entry->fence_seqno, __get_str(name),
83 __entry->job_count, __entry->hw_job_count, __entry->client_id)
84);
85
86DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
87 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
88 TP_ARGS(sched_job, entity)
89);
90
91DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
92 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
93 TP_ARGS(sched_job, entity)
94);
95
96TRACE_EVENT(drm_sched_job_done,
97 TP_PROTO(struct drm_sched_fence *fence),
98 TP_ARGS(fence),
99 TP_STRUCT__entry(
100 __field(u64, fence_context)
101 __field(u64, fence_seqno)
102 ),
103
104 TP_fast_assign(
105 __entry->fence_context = fence->finished.context;
106 __entry->fence_seqno = fence->finished.seqno;
107 ),
108 TP_printk("fence=%llu:%llu signaled",
109 __entry->fence_context, __entry->fence_seqno)
110);
111
112TRACE_EVENT(drm_sched_job_add_dep,
113 TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
114 TP_ARGS(sched_job, fence),
115 TP_STRUCT__entry(
116 __field(u64, fence_context)
117 __field(u64, fence_seqno)
118 __field(u64, ctx)
119 __field(u64, seqno)
120 ),
121
122 TP_fast_assign(
123 __entry->fence_context = sched_job->s_fence->finished.context;
124 __entry->fence_seqno = sched_job->s_fence->finished.seqno;
125 __entry->ctx = fence->context;
126 __entry->seqno = fence->seqno;
127 ),
128 TP_printk("fence=%llu:%llu depends on fence=%llu:%llu",
129 __entry->fence_context, __entry->fence_seqno,
130 __entry->ctx, __entry->seqno)
131);
132
133TRACE_EVENT(drm_sched_job_unschedulable,
134 TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
135 TP_ARGS(sched_job, fence),
136 TP_STRUCT__entry(
137 __field(u64, fence_context)
138 __field(u64, fence_seqno)
139 __field(u64, ctx)
140 __field(u64, seqno)
141 ),
142
143 TP_fast_assign(
144 __entry->fence_context = sched_job->s_fence->finished.context;
145 __entry->fence_seqno = sched_job->s_fence->finished.seqno;
146 __entry->ctx = fence->context;
147 __entry->seqno = fence->seqno;
148 ),
149 TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu",
150 __entry->fence_context, __entry->fence_seqno,
151 __entry->ctx, __entry->seqno)
152);
153
154#endif /* _GPU_SCHED_TRACE_H_ */
155
156/* This part must be outside protection */
157#undef TRACE_INCLUDE_PATH
158#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/scheduler
159#include <trace/define_trace.h>