Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM xe
8
9#if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10#define _XE_TRACE_H_
11
12#include <linux/tracepoint.h>
13#include <linux/types.h>
14
15#include "xe_exec_queue_types.h"
16#include "xe_gpu_scheduler_types.h"
17#include "xe_gt_tlb_invalidation_types.h"
18#include "xe_gt_types.h"
19#include "xe_guc_exec_queue_types.h"
20#include "xe_sched_job.h"
21#include "xe_vm.h"
22
23#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
24#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
25#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
26#define __dev_name_eq(q) __dev_name_gt((q)->gt)
27
28DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
29 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
30 TP_ARGS(xe, fence),
31
32 TP_STRUCT__entry(
33 __string(dev, __dev_name_xe(xe))
34 __field(struct xe_gt_tlb_invalidation_fence *, fence)
35 __field(int, seqno)
36 ),
37
38 TP_fast_assign(
39 __assign_str(dev);
40 __entry->fence = fence;
41 __entry->seqno = fence->seqno;
42 ),
43
44 TP_printk("dev=%s, fence=%p, seqno=%d",
45 __get_str(dev), __entry->fence, __entry->seqno)
46);
47
48DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
49 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
50 TP_ARGS(xe, fence)
51);
52
53DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
54 xe_gt_tlb_invalidation_fence_work_func,
55 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
56 TP_ARGS(xe, fence)
57);
58
59DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
60 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
61 TP_ARGS(xe, fence)
62);
63
64DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
65 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
66 TP_ARGS(xe, fence)
67);
68
69DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
70 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
71 TP_ARGS(xe, fence)
72);
73
74DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
75 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
76 TP_ARGS(xe, fence)
77);
78
79DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
80 TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
81 TP_ARGS(xe, fence)
82);
83
84DECLARE_EVENT_CLASS(xe_exec_queue,
85 TP_PROTO(struct xe_exec_queue *q),
86 TP_ARGS(q),
87
88 TP_STRUCT__entry(
89 __string(dev, __dev_name_eq(q))
90 __field(enum xe_engine_class, class)
91 __field(u32, logical_mask)
92 __field(u8, gt_id)
93 __field(u16, width)
94 __field(u16, guc_id)
95 __field(u32, guc_state)
96 __field(u32, flags)
97 ),
98
99 TP_fast_assign(
100 __assign_str(dev);
101 __entry->class = q->class;
102 __entry->logical_mask = q->logical_mask;
103 __entry->gt_id = q->gt->info.id;
104 __entry->width = q->width;
105 __entry->guc_id = q->guc->id;
106 __entry->guc_state = atomic_read(&q->guc->state);
107 __entry->flags = q->flags;
108 ),
109
110 TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
111 __get_str(dev), __entry->class, __entry->logical_mask,
112 __entry->gt_id, __entry->width, __entry->guc_id,
113 __entry->guc_state, __entry->flags)
114);
115
116DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
117 TP_PROTO(struct xe_exec_queue *q),
118 TP_ARGS(q)
119);
120
121DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
122 TP_PROTO(struct xe_exec_queue *q),
123 TP_ARGS(q)
124);
125
126DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
127 TP_PROTO(struct xe_exec_queue *q),
128 TP_ARGS(q)
129);
130
131DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
132 TP_PROTO(struct xe_exec_queue *q),
133 TP_ARGS(q)
134);
135
136DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
137 TP_PROTO(struct xe_exec_queue *q),
138 TP_ARGS(q)
139);
140
141DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
142 TP_PROTO(struct xe_exec_queue *q),
143 TP_ARGS(q)
144);
145
146DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
147 TP_PROTO(struct xe_exec_queue *q),
148 TP_ARGS(q)
149);
150
151DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
152 TP_PROTO(struct xe_exec_queue *q),
153 TP_ARGS(q)
154);
155
156DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
157 TP_PROTO(struct xe_exec_queue *q),
158 TP_ARGS(q)
159);
160
161DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
162 TP_PROTO(struct xe_exec_queue *q),
163 TP_ARGS(q)
164);
165
166DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
167 TP_PROTO(struct xe_exec_queue *q),
168 TP_ARGS(q)
169);
170
171DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
172 TP_PROTO(struct xe_exec_queue *q),
173 TP_ARGS(q)
174);
175
176DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
177 TP_PROTO(struct xe_exec_queue *q),
178 TP_ARGS(q)
179);
180
181DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
182 TP_PROTO(struct xe_exec_queue *q),
183 TP_ARGS(q)
184);
185
186DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
187 TP_PROTO(struct xe_exec_queue *q),
188 TP_ARGS(q)
189);
190
191DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
192 TP_PROTO(struct xe_exec_queue *q),
193 TP_ARGS(q)
194);
195
196DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
197 TP_PROTO(struct xe_exec_queue *q),
198 TP_ARGS(q)
199);
200
201DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
202 TP_PROTO(struct xe_exec_queue *q),
203 TP_ARGS(q)
204);
205
206DECLARE_EVENT_CLASS(xe_sched_job,
207 TP_PROTO(struct xe_sched_job *job),
208 TP_ARGS(job),
209
210 TP_STRUCT__entry(
211 __string(dev, __dev_name_eq(job->q))
212 __field(u32, seqno)
213 __field(u32, lrc_seqno)
214 __field(u8, gt_id)
215 __field(u16, guc_id)
216 __field(u32, guc_state)
217 __field(u32, flags)
218 __field(int, error)
219 __field(struct dma_fence *, fence)
220 __field(u64, batch_addr)
221 ),
222
223 TP_fast_assign(
224 __assign_str(dev);
225 __entry->seqno = xe_sched_job_seqno(job);
226 __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
227 __entry->gt_id = job->q->gt->info.id;
228 __entry->guc_id = job->q->guc->id;
229 __entry->guc_state =
230 atomic_read(&job->q->guc->state);
231 __entry->flags = job->q->flags;
232 __entry->error = job->fence ? job->fence->error : 0;
233 __entry->fence = job->fence;
234 __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
235 ),
236
237 TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
238 __get_str(dev), __entry->fence, __entry->seqno,
239 __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
240 __entry->batch_addr, __entry->guc_state,
241 __entry->flags, __entry->error)
242);
243
244DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
245 TP_PROTO(struct xe_sched_job *job),
246 TP_ARGS(job)
247);
248
249DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
250 TP_PROTO(struct xe_sched_job *job),
251 TP_ARGS(job)
252);
253
254DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
255 TP_PROTO(struct xe_sched_job *job),
256 TP_ARGS(job)
257);
258
259DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
260 TP_PROTO(struct xe_sched_job *job),
261 TP_ARGS(job)
262);
263
264DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
265 TP_PROTO(struct xe_sched_job *job),
266 TP_ARGS(job)
267);
268
269DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
270 TP_PROTO(struct xe_sched_job *job),
271 TP_ARGS(job)
272);
273
274DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
275 TP_PROTO(struct xe_sched_job *job),
276 TP_ARGS(job)
277);
278
279DECLARE_EVENT_CLASS(xe_sched_msg,
280 TP_PROTO(struct xe_sched_msg *msg),
281 TP_ARGS(msg),
282
283 TP_STRUCT__entry(
284 __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
285 __field(u32, opcode)
286 __field(u16, guc_id)
287 __field(u8, gt_id)
288 ),
289
290 TP_fast_assign(
291 __assign_str(dev);
292 __entry->opcode = msg->opcode;
293 __entry->guc_id =
294 ((struct xe_exec_queue *)msg->private_data)->guc->id;
295 __entry->gt_id =
296 ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
297 ),
298
299 TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
300 __entry->opcode)
301);
302
303DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
304 TP_PROTO(struct xe_sched_msg *msg),
305 TP_ARGS(msg)
306);
307
308DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
309 TP_PROTO(struct xe_sched_msg *msg),
310 TP_ARGS(msg)
311);
312
313DECLARE_EVENT_CLASS(xe_hw_fence,
314 TP_PROTO(struct xe_hw_fence *fence),
315 TP_ARGS(fence),
316
317 TP_STRUCT__entry(
318 __string(dev, __dev_name_xe(fence->xe))
319 __field(u64, ctx)
320 __field(u32, seqno)
321 __field(struct xe_hw_fence *, fence)
322 ),
323
324 TP_fast_assign(
325 __assign_str(dev);
326 __entry->ctx = fence->dma.context;
327 __entry->seqno = fence->dma.seqno;
328 __entry->fence = fence;
329 ),
330
331 TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u",
332 __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno)
333);
334
335DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
336 TP_PROTO(struct xe_hw_fence *fence),
337 TP_ARGS(fence)
338);
339
340DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
341 TP_PROTO(struct xe_hw_fence *fence),
342 TP_ARGS(fence)
343);
344
345DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
346 TP_PROTO(struct xe_hw_fence *fence),
347 TP_ARGS(fence)
348);
349
350TRACE_EVENT(xe_reg_rw,
351 TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
352
353 TP_ARGS(mmio, write, reg, val, len),
354
355 TP_STRUCT__entry(
356 __string(dev, __dev_name_tile(mmio->tile))
357 __field(u64, val)
358 __field(u32, reg)
359 __field(u16, write)
360 __field(u16, len)
361 ),
362
363 TP_fast_assign(
364 __assign_str(dev);
365 __entry->val = val;
366 __entry->reg = reg;
367 __entry->write = write;
368 __entry->len = len;
369 ),
370
371 TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
372 __get_str(dev), __entry->write ? "write" : "read",
373 __entry->reg, __entry->len,
374 (u32)(__entry->val & 0xffffffff),
375 (u32)(__entry->val >> 32))
376);
377
378DECLARE_EVENT_CLASS(xe_pm_runtime,
379 TP_PROTO(struct xe_device *xe, void *caller),
380 TP_ARGS(xe, caller),
381
382 TP_STRUCT__entry(
383 __string(dev, __dev_name_xe(xe))
384 __field(void *, caller)
385 ),
386
387 TP_fast_assign(
388 __assign_str(dev);
389 __entry->caller = caller;
390 ),
391
392 TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
393);
394
395DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
396 TP_PROTO(struct xe_device *xe, void *caller),
397 TP_ARGS(xe, caller)
398);
399
400DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
401 TP_PROTO(struct xe_device *xe, void *caller),
402 TP_ARGS(xe, caller)
403);
404
405DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
406 TP_PROTO(struct xe_device *xe, void *caller),
407 TP_ARGS(xe, caller)
408);
409
410DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
411 TP_PROTO(struct xe_device *xe, void *caller),
412 TP_ARGS(xe, caller)
413);
414
415DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
416 TP_PROTO(struct xe_device *xe, void *caller),
417 TP_ARGS(xe, caller)
418);
419
420DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
421 TP_PROTO(struct xe_device *xe, void *caller),
422 TP_ARGS(xe, caller)
423);
424
425DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
426 TP_PROTO(struct xe_device *xe, void *caller),
427 TP_ARGS(xe, caller)
428);
429
430TRACE_EVENT(xe_eu_stall_data_read,
431 TP_PROTO(u8 slice, u8 subslice,
432 u32 read_ptr, u32 write_ptr,
433 size_t read_size, size_t total_size),
434 TP_ARGS(slice, subslice,
435 read_ptr, write_ptr,
436 read_size, total_size),
437
438 TP_STRUCT__entry(__field(u8, slice)
439 __field(u8, subslice)
440 __field(u32, read_ptr)
441 __field(u32, write_ptr)
442 __field(size_t, read_size)
443 __field(size_t, total_size)
444 ),
445
446 TP_fast_assign(__entry->slice = slice;
447 __entry->subslice = subslice;
448 __entry->read_ptr = read_ptr;
449 __entry->write_ptr = write_ptr;
450 __entry->read_size = read_size;
451 __entry->total_size = total_size;
452 ),
453
454 TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
455 __entry->slice, __entry->subslice,
456 __entry->read_ptr, __entry->write_ptr,
457 __entry->read_size, __entry->total_size)
458);
459
460#endif
461
462/* This part must be outside protection */
463#undef TRACE_INCLUDE_PATH
464#undef TRACE_INCLUDE_FILE
465#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
466#define TRACE_INCLUDE_FILE xe_trace
467#include <trace/define_trace.h>