Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM xe
8
9#if !defined(_XE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
10#define _XE_TRACE_H_
11
12#include <linux/tracepoint.h>
13#include <linux/types.h>
14
15#include "xe_exec_queue_types.h"
16#include "xe_gpu_scheduler_types.h"
17#include "xe_gt_types.h"
18#include "xe_guc_exec_queue_types.h"
19#include "xe_sched_job.h"
20#include "xe_tlb_inval_types.h"
21#include "xe_vm.h"
22
23#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
24#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
25#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
26#define __dev_name_eq(q) __dev_name_gt((q)->gt)
27
28DECLARE_EVENT_CLASS(xe_tlb_inval_fence,
29 TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
30 TP_ARGS(xe, fence),
31
32 TP_STRUCT__entry(
33 __string(dev, __dev_name_xe(xe))
34 __field(struct xe_tlb_inval_fence *, fence)
35 __field(int, seqno)
36 ),
37
38 TP_fast_assign(
39 __assign_str(dev);
40 __entry->fence = fence;
41 __entry->seqno = fence->seqno;
42 ),
43
44 TP_printk("dev=%s, fence=%p, seqno=%d",
45 __get_str(dev), __entry->fence, __entry->seqno)
46);
47
48DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_send,
49 TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
50 TP_ARGS(xe, fence)
51);
52
53DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_recv,
54 TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
55 TP_ARGS(xe, fence)
56);
57
58DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_signal,
59 TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
60 TP_ARGS(xe, fence)
61);
62
63DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_timeout,
64 TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
65 TP_ARGS(xe, fence)
66);
67
68DECLARE_EVENT_CLASS(xe_exec_queue,
69 TP_PROTO(struct xe_exec_queue *q),
70 TP_ARGS(q),
71
72 TP_STRUCT__entry(
73 __string(dev, __dev_name_eq(q))
74 __field(enum xe_engine_class, class)
75 __field(u32, logical_mask)
76 __field(u8, gt_id)
77 __field(u16, width)
78 __field(u16, guc_id)
79 __field(u32, guc_state)
80 __field(u32, flags)
81 ),
82
83 TP_fast_assign(
84 __assign_str(dev);
85 __entry->class = q->class;
86 __entry->logical_mask = q->logical_mask;
87 __entry->gt_id = q->gt->info.id;
88 __entry->width = q->width;
89 __entry->guc_id = q->guc->id;
90 __entry->guc_state = atomic_read(&q->guc->state);
91 __entry->flags = q->flags;
92 ),
93
94 TP_printk("dev=%s, %d:0x%x, gt=%d, width=%d, guc_id=%d, guc_state=0x%x, flags=0x%x",
95 __get_str(dev), __entry->class, __entry->logical_mask,
96 __entry->gt_id, __entry->width, __entry->guc_id,
97 __entry->guc_state, __entry->flags)
98);
99
100DEFINE_EVENT(xe_exec_queue, xe_exec_queue_create,
101 TP_PROTO(struct xe_exec_queue *q),
102 TP_ARGS(q)
103);
104
105DEFINE_EVENT(xe_exec_queue, xe_exec_queue_supress_resume,
106 TP_PROTO(struct xe_exec_queue *q),
107 TP_ARGS(q)
108);
109
110DEFINE_EVENT(xe_exec_queue, xe_exec_queue_submit,
111 TP_PROTO(struct xe_exec_queue *q),
112 TP_ARGS(q)
113);
114
115DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_enable,
116 TP_PROTO(struct xe_exec_queue *q),
117 TP_ARGS(q)
118);
119
120DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_disable,
121 TP_PROTO(struct xe_exec_queue *q),
122 TP_ARGS(q)
123);
124
125DEFINE_EVENT(xe_exec_queue, xe_exec_queue_scheduling_done,
126 TP_PROTO(struct xe_exec_queue *q),
127 TP_ARGS(q)
128);
129
130DEFINE_EVENT(xe_exec_queue, xe_exec_queue_register,
131 TP_PROTO(struct xe_exec_queue *q),
132 TP_ARGS(q)
133);
134
135DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister,
136 TP_PROTO(struct xe_exec_queue *q),
137 TP_ARGS(q)
138);
139
140DEFINE_EVENT(xe_exec_queue, xe_exec_queue_deregister_done,
141 TP_PROTO(struct xe_exec_queue *q),
142 TP_ARGS(q)
143);
144
145DEFINE_EVENT(xe_exec_queue, xe_exec_queue_close,
146 TP_PROTO(struct xe_exec_queue *q),
147 TP_ARGS(q)
148);
149
150DEFINE_EVENT(xe_exec_queue, xe_exec_queue_kill,
151 TP_PROTO(struct xe_exec_queue *q),
152 TP_ARGS(q)
153);
154
155DEFINE_EVENT(xe_exec_queue, xe_exec_queue_cleanup_entity,
156 TP_PROTO(struct xe_exec_queue *q),
157 TP_ARGS(q)
158);
159
160DEFINE_EVENT(xe_exec_queue, xe_exec_queue_destroy,
161 TP_PROTO(struct xe_exec_queue *q),
162 TP_ARGS(q)
163);
164
165DEFINE_EVENT(xe_exec_queue, xe_exec_queue_reset,
166 TP_PROTO(struct xe_exec_queue *q),
167 TP_ARGS(q)
168);
169
170DEFINE_EVENT(xe_exec_queue, xe_exec_queue_memory_cat_error,
171 TP_PROTO(struct xe_exec_queue *q),
172 TP_ARGS(q)
173);
174
175DEFINE_EVENT(xe_exec_queue, xe_exec_queue_stop,
176 TP_PROTO(struct xe_exec_queue *q),
177 TP_ARGS(q)
178);
179
180DEFINE_EVENT(xe_exec_queue, xe_exec_queue_resubmit,
181 TP_PROTO(struct xe_exec_queue *q),
182 TP_ARGS(q)
183);
184
185DEFINE_EVENT(xe_exec_queue, xe_exec_queue_lr_cleanup,
186 TP_PROTO(struct xe_exec_queue *q),
187 TP_ARGS(q)
188);
189
190DECLARE_EVENT_CLASS(xe_sched_job,
191 TP_PROTO(struct xe_sched_job *job),
192 TP_ARGS(job),
193
194 TP_STRUCT__entry(
195 __string(dev, __dev_name_eq(job->q))
196 __field(u32, seqno)
197 __field(u32, lrc_seqno)
198 __field(u8, gt_id)
199 __field(u16, guc_id)
200 __field(u32, guc_state)
201 __field(u32, flags)
202 __field(int, error)
203 __field(struct dma_fence *, fence)
204 __field(u64, batch_addr)
205 ),
206
207 TP_fast_assign(
208 __assign_str(dev);
209 __entry->seqno = xe_sched_job_seqno(job);
210 __entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
211 __entry->gt_id = job->q->gt->info.id;
212 __entry->guc_id = job->q->guc->id;
213 __entry->guc_state =
214 atomic_read(&job->q->guc->state);
215 __entry->flags = job->q->flags;
216 __entry->error = job->fence ? job->fence->error : 0;
217 __entry->fence = job->fence;
218 __entry->batch_addr = (u64)job->ptrs[0].batch_addr;
219 ),
220
221 TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
222 __get_str(dev), __entry->fence, __entry->seqno,
223 __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
224 __entry->batch_addr, __entry->guc_state,
225 __entry->flags, __entry->error)
226);
227
228DEFINE_EVENT(xe_sched_job, xe_sched_job_create,
229 TP_PROTO(struct xe_sched_job *job),
230 TP_ARGS(job)
231);
232
233DEFINE_EVENT(xe_sched_job, xe_sched_job_exec,
234 TP_PROTO(struct xe_sched_job *job),
235 TP_ARGS(job)
236);
237
238DEFINE_EVENT(xe_sched_job, xe_sched_job_run,
239 TP_PROTO(struct xe_sched_job *job),
240 TP_ARGS(job)
241);
242
243DEFINE_EVENT(xe_sched_job, xe_sched_job_free,
244 TP_PROTO(struct xe_sched_job *job),
245 TP_ARGS(job)
246);
247
248DEFINE_EVENT(xe_sched_job, xe_sched_job_timedout,
249 TP_PROTO(struct xe_sched_job *job),
250 TP_ARGS(job)
251);
252
253DEFINE_EVENT(xe_sched_job, xe_sched_job_set_error,
254 TP_PROTO(struct xe_sched_job *job),
255 TP_ARGS(job)
256);
257
258DEFINE_EVENT(xe_sched_job, xe_sched_job_ban,
259 TP_PROTO(struct xe_sched_job *job),
260 TP_ARGS(job)
261);
262
263DECLARE_EVENT_CLASS(xe_sched_msg,
264 TP_PROTO(struct xe_sched_msg *msg),
265 TP_ARGS(msg),
266
267 TP_STRUCT__entry(
268 __string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
269 __field(u32, opcode)
270 __field(u16, guc_id)
271 __field(u8, gt_id)
272 ),
273
274 TP_fast_assign(
275 __assign_str(dev);
276 __entry->opcode = msg->opcode;
277 __entry->guc_id =
278 ((struct xe_exec_queue *)msg->private_data)->guc->id;
279 __entry->gt_id =
280 ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
281 ),
282
283 TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
284 __entry->opcode)
285);
286
287DEFINE_EVENT(xe_sched_msg, xe_sched_msg_add,
288 TP_PROTO(struct xe_sched_msg *msg),
289 TP_ARGS(msg)
290);
291
292DEFINE_EVENT(xe_sched_msg, xe_sched_msg_recv,
293 TP_PROTO(struct xe_sched_msg *msg),
294 TP_ARGS(msg)
295);
296
297DECLARE_EVENT_CLASS(xe_hw_fence,
298 TP_PROTO(struct xe_hw_fence *fence),
299 TP_ARGS(fence),
300
301 TP_STRUCT__entry(
302 __string(dev, __dev_name_xe(fence->xe))
303 __field(u64, ctx)
304 __field(u32, seqno)
305 __field(struct xe_hw_fence *, fence)
306 ),
307
308 TP_fast_assign(
309 __assign_str(dev);
310 __entry->ctx = fence->dma.context;
311 __entry->seqno = fence->dma.seqno;
312 __entry->fence = fence;
313 ),
314
315 TP_printk("dev=%s, ctx=0x%016llx, fence=%p, seqno=%u",
316 __get_str(dev), __entry->ctx, __entry->fence, __entry->seqno)
317);
318
319DEFINE_EVENT(xe_hw_fence, xe_hw_fence_create,
320 TP_PROTO(struct xe_hw_fence *fence),
321 TP_ARGS(fence)
322);
323
324DEFINE_EVENT(xe_hw_fence, xe_hw_fence_signal,
325 TP_PROTO(struct xe_hw_fence *fence),
326 TP_ARGS(fence)
327);
328
329DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
330 TP_PROTO(struct xe_hw_fence *fence),
331 TP_ARGS(fence)
332);
333
334TRACE_EVENT(xe_reg_rw,
335 TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
336
337 TP_ARGS(mmio, write, reg, val, len),
338
339 TP_STRUCT__entry(
340 __string(dev, __dev_name_tile(mmio->tile))
341 __field(u64, val)
342 __field(u32, reg)
343 __field(u16, write)
344 __field(u16, len)
345 ),
346
347 TP_fast_assign(
348 __assign_str(dev);
349 __entry->val = val;
350 __entry->reg = reg;
351 __entry->write = write;
352 __entry->len = len;
353 ),
354
355 TP_printk("dev=%s, %s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
356 __get_str(dev), __entry->write ? "write" : "read",
357 __entry->reg, __entry->len,
358 (u32)(__entry->val & 0xffffffff),
359 (u32)(__entry->val >> 32))
360);
361
362DECLARE_EVENT_CLASS(xe_pm_runtime,
363 TP_PROTO(struct xe_device *xe, void *caller),
364 TP_ARGS(xe, caller),
365
366 TP_STRUCT__entry(
367 __string(dev, __dev_name_xe(xe))
368 __field(void *, caller)
369 ),
370
371 TP_fast_assign(
372 __assign_str(dev);
373 __entry->caller = caller;
374 ),
375
376 TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller)
377);
378
379DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get,
380 TP_PROTO(struct xe_device *xe, void *caller),
381 TP_ARGS(xe, caller)
382);
383
384DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put,
385 TP_PROTO(struct xe_device *xe, void *caller),
386 TP_ARGS(xe, caller)
387);
388
389DEFINE_EVENT(xe_pm_runtime, xe_pm_resume,
390 TP_PROTO(struct xe_device *xe, void *caller),
391 TP_ARGS(xe, caller)
392);
393
394DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend,
395 TP_PROTO(struct xe_device *xe, void *caller),
396 TP_ARGS(xe, caller)
397);
398
399DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume,
400 TP_PROTO(struct xe_device *xe, void *caller),
401 TP_ARGS(xe, caller)
402);
403
404DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend,
405 TP_PROTO(struct xe_device *xe, void *caller),
406 TP_ARGS(xe, caller)
407);
408
409DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
410 TP_PROTO(struct xe_device *xe, void *caller),
411 TP_ARGS(xe, caller)
412);
413
414TRACE_EVENT(xe_eu_stall_data_read,
415 TP_PROTO(u8 slice, u8 subslice,
416 u32 read_ptr, u32 write_ptr,
417 size_t read_size, size_t total_size),
418 TP_ARGS(slice, subslice,
419 read_ptr, write_ptr,
420 read_size, total_size),
421
422 TP_STRUCT__entry(__field(u8, slice)
423 __field(u8, subslice)
424 __field(u32, read_ptr)
425 __field(u32, write_ptr)
426 __field(size_t, read_size)
427 __field(size_t, total_size)
428 ),
429
430 TP_fast_assign(__entry->slice = slice;
431 __entry->subslice = subslice;
432 __entry->read_ptr = read_ptr;
433 __entry->write_ptr = write_ptr;
434 __entry->read_size = read_size;
435 __entry->total_size = total_size;
436 ),
437
438 TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
439 __entry->slice, __entry->subslice,
440 __entry->read_ptr, __entry->write_ptr,
441 __entry->read_size, __entry->total_size)
442);
443
444TRACE_EVENT(xe_exec_queue_reach_max_job_count,
445 TP_PROTO(struct xe_exec_queue *q, int max_cnt),
446 TP_ARGS(q, max_cnt),
447
448 TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
449 __field(enum xe_engine_class, class)
450 __field(u32, logical_mask)
451 __field(u16, guc_id)
452 __field(int, max_cnt)
453 ),
454
455 TP_fast_assign(__assign_str(dev);
456 __entry->class = q->class;
457 __entry->logical_mask = q->logical_mask;
458 __entry->guc_id = q->guc->id;
459 __entry->max_cnt = max_cnt;
460 ),
461
462 TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d",
463 __get_str(dev), __entry->max_cnt,
464 __entry->class, __entry->logical_mask, __entry->guc_id)
465);
466
467#endif
468
469/* This part must be outside protection */
470#undef TRACE_INCLUDE_PATH
471#undef TRACE_INCLUDE_FILE
472#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
473#define TRACE_INCLUDE_FILE xe_trace
474#include <trace/define_trace.h>