Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM i915
5
6#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
7#define _I915_TRACE_H_
8
9#include <linux/stringify.h>
10#include <linux/types.h>
11#include <linux/tracepoint.h>
12
13#include <drm/drm_drv.h>
14
15#include "gt/intel_engine.h"
16
17#include "i915_drv.h"
18#include "i915_irq.h"
19
20/* object tracking */
21
22TRACE_EVENT(i915_gem_object_create,
23 TP_PROTO(struct drm_i915_gem_object *obj),
24 TP_ARGS(obj),
25
26 TP_STRUCT__entry(
27 __field(struct drm_i915_gem_object *, obj)
28 __field(u64, size)
29 ),
30
31 TP_fast_assign(
32 __entry->obj = obj;
33 __entry->size = obj->base.size;
34 ),
35
36 TP_printk("obj=%p, size=0x%llx", __entry->obj, __entry->size)
37);
38
39TRACE_EVENT(i915_gem_shrink,
40 TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
41 TP_ARGS(i915, target, flags),
42
43 TP_STRUCT__entry(
44 __field(int, dev)
45 __field(unsigned long, target)
46 __field(unsigned, flags)
47 ),
48
49 TP_fast_assign(
50 __entry->dev = i915->drm.primary->index;
51 __entry->target = target;
52 __entry->flags = flags;
53 ),
54
55 TP_printk("dev=%d, target=%lu, flags=%x",
56 __entry->dev, __entry->target, __entry->flags)
57);
58
59TRACE_EVENT(i915_vma_bind,
60 TP_PROTO(struct i915_vma *vma, unsigned flags),
61 TP_ARGS(vma, flags),
62
63 TP_STRUCT__entry(
64 __field(struct drm_i915_gem_object *, obj)
65 __field(struct i915_address_space *, vm)
66 __field(u64, offset)
67 __field(u64, size)
68 __field(unsigned, flags)
69 ),
70
71 TP_fast_assign(
72 __entry->obj = vma->obj;
73 __entry->vm = vma->vm;
74 __entry->offset = vma->node.start;
75 __entry->size = vma->node.size;
76 __entry->flags = flags;
77 ),
78
79 TP_printk("obj=%p, offset=0x%016llx size=0x%llx%s vm=%p",
80 __entry->obj, __entry->offset, __entry->size,
81 __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
82 __entry->vm)
83);
84
85TRACE_EVENT(i915_vma_unbind,
86 TP_PROTO(struct i915_vma *vma),
87 TP_ARGS(vma),
88
89 TP_STRUCT__entry(
90 __field(struct drm_i915_gem_object *, obj)
91 __field(struct i915_address_space *, vm)
92 __field(u64, offset)
93 __field(u64, size)
94 ),
95
96 TP_fast_assign(
97 __entry->obj = vma->obj;
98 __entry->vm = vma->vm;
99 __entry->offset = vma->node.start;
100 __entry->size = vma->node.size;
101 ),
102
103 TP_printk("obj=%p, offset=0x%016llx size=0x%llx vm=%p",
104 __entry->obj, __entry->offset, __entry->size, __entry->vm)
105);
106
107TRACE_EVENT(i915_gem_object_pwrite,
108 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
109 TP_ARGS(obj, offset, len),
110
111 TP_STRUCT__entry(
112 __field(struct drm_i915_gem_object *, obj)
113 __field(u64, offset)
114 __field(u64, len)
115 ),
116
117 TP_fast_assign(
118 __entry->obj = obj;
119 __entry->offset = offset;
120 __entry->len = len;
121 ),
122
123 TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
124 __entry->obj, __entry->offset, __entry->len)
125);
126
127TRACE_EVENT(i915_gem_object_pread,
128 TP_PROTO(struct drm_i915_gem_object *obj, u64 offset, u64 len),
129 TP_ARGS(obj, offset, len),
130
131 TP_STRUCT__entry(
132 __field(struct drm_i915_gem_object *, obj)
133 __field(u64, offset)
134 __field(u64, len)
135 ),
136
137 TP_fast_assign(
138 __entry->obj = obj;
139 __entry->offset = offset;
140 __entry->len = len;
141 ),
142
143 TP_printk("obj=%p, offset=0x%llx, len=0x%llx",
144 __entry->obj, __entry->offset, __entry->len)
145);
146
147TRACE_EVENT(i915_gem_object_fault,
148 TP_PROTO(struct drm_i915_gem_object *obj, u64 index, bool gtt, bool write),
149 TP_ARGS(obj, index, gtt, write),
150
151 TP_STRUCT__entry(
152 __field(struct drm_i915_gem_object *, obj)
153 __field(u64, index)
154 __field(bool, gtt)
155 __field(bool, write)
156 ),
157
158 TP_fast_assign(
159 __entry->obj = obj;
160 __entry->index = index;
161 __entry->gtt = gtt;
162 __entry->write = write;
163 ),
164
165 TP_printk("obj=%p, %s index=%llu %s",
166 __entry->obj,
167 __entry->gtt ? "GTT" : "CPU",
168 __entry->index,
169 __entry->write ? ", writable" : "")
170);
171
172DECLARE_EVENT_CLASS(i915_gem_object,
173 TP_PROTO(struct drm_i915_gem_object *obj),
174 TP_ARGS(obj),
175
176 TP_STRUCT__entry(
177 __field(struct drm_i915_gem_object *, obj)
178 ),
179
180 TP_fast_assign(
181 __entry->obj = obj;
182 ),
183
184 TP_printk("obj=%p", __entry->obj)
185);
186
187DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
188 TP_PROTO(struct drm_i915_gem_object *obj),
189 TP_ARGS(obj)
190);
191
192DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
193 TP_PROTO(struct drm_i915_gem_object *obj),
194 TP_ARGS(obj)
195);
196
197TRACE_EVENT(i915_gem_evict,
198 TP_PROTO(struct i915_address_space *vm, u64 size, u64 align, unsigned int flags),
199 TP_ARGS(vm, size, align, flags),
200
201 TP_STRUCT__entry(
202 __field(u32, dev)
203 __field(struct i915_address_space *, vm)
204 __field(u64, size)
205 __field(u64, align)
206 __field(unsigned int, flags)
207 ),
208
209 TP_fast_assign(
210 __entry->dev = vm->i915->drm.primary->index;
211 __entry->vm = vm;
212 __entry->size = size;
213 __entry->align = align;
214 __entry->flags = flags;
215 ),
216
217 TP_printk("dev=%d, vm=%p, size=0x%llx, align=0x%llx %s",
218 __entry->dev, __entry->vm, __entry->size, __entry->align,
219 __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
220);
221
222TRACE_EVENT(i915_gem_evict_node,
223 TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
224 TP_ARGS(vm, node, flags),
225
226 TP_STRUCT__entry(
227 __field(u32, dev)
228 __field(struct i915_address_space *, vm)
229 __field(u64, start)
230 __field(u64, size)
231 __field(unsigned long, color)
232 __field(unsigned int, flags)
233 ),
234
235 TP_fast_assign(
236 __entry->dev = vm->i915->drm.primary->index;
237 __entry->vm = vm;
238 __entry->start = node->start;
239 __entry->size = node->size;
240 __entry->color = node->color;
241 __entry->flags = flags;
242 ),
243
244 TP_printk("dev=%d, vm=%p, start=0x%llx size=0x%llx, color=0x%lx, flags=%x",
245 __entry->dev, __entry->vm,
246 __entry->start, __entry->size,
247 __entry->color, __entry->flags)
248);
249
250TRACE_EVENT(i915_gem_evict_vm,
251 TP_PROTO(struct i915_address_space *vm),
252 TP_ARGS(vm),
253
254 TP_STRUCT__entry(
255 __field(u32, dev)
256 __field(struct i915_address_space *, vm)
257 ),
258
259 TP_fast_assign(
260 __entry->dev = vm->i915->drm.primary->index;
261 __entry->vm = vm;
262 ),
263
264 TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
265);
266
267TRACE_EVENT(i915_request_queue,
268 TP_PROTO(struct i915_request *rq, u32 flags),
269 TP_ARGS(rq, flags),
270
271 TP_STRUCT__entry(
272 __field(u32, dev)
273 __field(u64, ctx)
274 __field(u16, class)
275 __field(u16, instance)
276 __field(u32, seqno)
277 __field(u32, flags)
278 ),
279
280 TP_fast_assign(
281 __entry->dev = rq->engine->i915->drm.primary->index;
282 __entry->class = rq->engine->uabi_class;
283 __entry->instance = rq->engine->uabi_instance;
284 __entry->ctx = rq->fence.context;
285 __entry->seqno = rq->fence.seqno;
286 __entry->flags = flags;
287 ),
288
289 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
290 __entry->dev, __entry->class, __entry->instance,
291 __entry->ctx, __entry->seqno, __entry->flags)
292);
293
294DECLARE_EVENT_CLASS(i915_request,
295 TP_PROTO(struct i915_request *rq),
296 TP_ARGS(rq),
297
298 TP_STRUCT__entry(
299 __field(u32, dev)
300 __field(u64, ctx)
301 __field(u16, class)
302 __field(u16, instance)
303 __field(u32, seqno)
304 __field(u32, tail)
305 ),
306
307 TP_fast_assign(
308 __entry->dev = rq->engine->i915->drm.primary->index;
309 __entry->class = rq->engine->uabi_class;
310 __entry->instance = rq->engine->uabi_instance;
311 __entry->ctx = rq->fence.context;
312 __entry->seqno = rq->fence.seqno;
313 __entry->tail = rq->tail;
314 ),
315
316 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
317 __entry->dev, __entry->class, __entry->instance,
318 __entry->ctx, __entry->seqno, __entry->tail)
319);
320
321DEFINE_EVENT(i915_request, i915_request_add,
322 TP_PROTO(struct i915_request *rq),
323 TP_ARGS(rq)
324);
325
326#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
327DEFINE_EVENT(i915_request, i915_request_guc_submit,
328 TP_PROTO(struct i915_request *rq),
329 TP_ARGS(rq)
330);
331
332DEFINE_EVENT(i915_request, i915_request_submit,
333 TP_PROTO(struct i915_request *rq),
334 TP_ARGS(rq)
335);
336
337DEFINE_EVENT(i915_request, i915_request_execute,
338 TP_PROTO(struct i915_request *rq),
339 TP_ARGS(rq)
340);
341
342TRACE_EVENT(i915_request_in,
343 TP_PROTO(struct i915_request *rq, unsigned int port),
344 TP_ARGS(rq, port),
345
346 TP_STRUCT__entry(
347 __field(u32, dev)
348 __field(u64, ctx)
349 __field(u16, class)
350 __field(u16, instance)
351 __field(u32, seqno)
352 __field(u32, port)
353 __field(s32, prio)
354 ),
355
356 TP_fast_assign(
357 __entry->dev = rq->engine->i915->drm.primary->index;
358 __entry->class = rq->engine->uabi_class;
359 __entry->instance = rq->engine->uabi_instance;
360 __entry->ctx = rq->fence.context;
361 __entry->seqno = rq->fence.seqno;
362 __entry->prio = rq->sched.attr.priority;
363 __entry->port = port;
364 ),
365
366 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
367 __entry->dev, __entry->class, __entry->instance,
368 __entry->ctx, __entry->seqno,
369 __entry->prio, __entry->port)
370);
371
372TRACE_EVENT(i915_request_out,
373 TP_PROTO(struct i915_request *rq),
374 TP_ARGS(rq),
375
376 TP_STRUCT__entry(
377 __field(u32, dev)
378 __field(u64, ctx)
379 __field(u16, class)
380 __field(u16, instance)
381 __field(u32, seqno)
382 __field(u32, completed)
383 ),
384
385 TP_fast_assign(
386 __entry->dev = rq->engine->i915->drm.primary->index;
387 __entry->class = rq->engine->uabi_class;
388 __entry->instance = rq->engine->uabi_instance;
389 __entry->ctx = rq->fence.context;
390 __entry->seqno = rq->fence.seqno;
391 __entry->completed = i915_request_completed(rq);
392 ),
393
394 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u",
395 __entry->dev, __entry->class, __entry->instance,
396 __entry->ctx, __entry->seqno, __entry->completed)
397);
398
399DECLARE_EVENT_CLASS(intel_context,
400 TP_PROTO(struct intel_context *ce),
401 TP_ARGS(ce),
402
403 TP_STRUCT__entry(
404 __field(u32, guc_id)
405 __field(int, pin_count)
406 __field(u32, sched_state)
407 __field(u8, guc_prio)
408 ),
409
410 TP_fast_assign(
411 __entry->guc_id = ce->guc_id.id;
412 __entry->pin_count = atomic_read(&ce->pin_count);
413 __entry->sched_state = ce->guc_state.sched_state;
414 __entry->guc_prio = ce->guc_state.prio;
415 ),
416
417 TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
418 __entry->guc_id, __entry->pin_count,
419 __entry->sched_state,
420 __entry->guc_prio)
421);
422
423DEFINE_EVENT(intel_context, intel_context_set_prio,
424 TP_PROTO(struct intel_context *ce),
425 TP_ARGS(ce)
426);
427
428DEFINE_EVENT(intel_context, intel_context_reset,
429 TP_PROTO(struct intel_context *ce),
430 TP_ARGS(ce)
431);
432
433DEFINE_EVENT(intel_context, intel_context_ban,
434 TP_PROTO(struct intel_context *ce),
435 TP_ARGS(ce)
436);
437
438DEFINE_EVENT(intel_context, intel_context_register,
439 TP_PROTO(struct intel_context *ce),
440 TP_ARGS(ce)
441);
442
443DEFINE_EVENT(intel_context, intel_context_deregister,
444 TP_PROTO(struct intel_context *ce),
445 TP_ARGS(ce)
446);
447
448DEFINE_EVENT(intel_context, intel_context_deregister_done,
449 TP_PROTO(struct intel_context *ce),
450 TP_ARGS(ce)
451);
452
453DEFINE_EVENT(intel_context, intel_context_sched_enable,
454 TP_PROTO(struct intel_context *ce),
455 TP_ARGS(ce)
456);
457
458DEFINE_EVENT(intel_context, intel_context_sched_disable,
459 TP_PROTO(struct intel_context *ce),
460 TP_ARGS(ce)
461);
462
463DEFINE_EVENT(intel_context, intel_context_sched_done,
464 TP_PROTO(struct intel_context *ce),
465 TP_ARGS(ce)
466);
467
468DEFINE_EVENT(intel_context, intel_context_create,
469 TP_PROTO(struct intel_context *ce),
470 TP_ARGS(ce)
471);
472
473DEFINE_EVENT(intel_context, intel_context_fence_release,
474 TP_PROTO(struct intel_context *ce),
475 TP_ARGS(ce)
476);
477
478DEFINE_EVENT(intel_context, intel_context_free,
479 TP_PROTO(struct intel_context *ce),
480 TP_ARGS(ce)
481);
482
483DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
484 TP_PROTO(struct intel_context *ce),
485 TP_ARGS(ce)
486);
487
488DEFINE_EVENT(intel_context, intel_context_do_pin,
489 TP_PROTO(struct intel_context *ce),
490 TP_ARGS(ce)
491);
492
493DEFINE_EVENT(intel_context, intel_context_do_unpin,
494 TP_PROTO(struct intel_context *ce),
495 TP_ARGS(ce)
496);
497
498#else
499#if !defined(TRACE_HEADER_MULTI_READ)
500static inline void
501trace_i915_request_guc_submit(struct i915_request *rq)
502{
503}
504
505static inline void
506trace_i915_request_submit(struct i915_request *rq)
507{
508}
509
510static inline void
511trace_i915_request_execute(struct i915_request *rq)
512{
513}
514
515static inline void
516trace_i915_request_in(struct i915_request *rq, unsigned int port)
517{
518}
519
520static inline void
521trace_i915_request_out(struct i915_request *rq)
522{
523}
524
525static inline void
526trace_intel_context_set_prio(struct intel_context *ce)
527{
528}
529
530static inline void
531trace_intel_context_reset(struct intel_context *ce)
532{
533}
534
535static inline void
536trace_intel_context_ban(struct intel_context *ce)
537{
538}
539
540static inline void
541trace_intel_context_register(struct intel_context *ce)
542{
543}
544
545static inline void
546trace_intel_context_deregister(struct intel_context *ce)
547{
548}
549
550static inline void
551trace_intel_context_deregister_done(struct intel_context *ce)
552{
553}
554
555static inline void
556trace_intel_context_sched_enable(struct intel_context *ce)
557{
558}
559
560static inline void
561trace_intel_context_sched_disable(struct intel_context *ce)
562{
563}
564
565static inline void
566trace_intel_context_sched_done(struct intel_context *ce)
567{
568}
569
570static inline void
571trace_intel_context_create(struct intel_context *ce)
572{
573}
574
575static inline void
576trace_intel_context_fence_release(struct intel_context *ce)
577{
578}
579
580static inline void
581trace_intel_context_free(struct intel_context *ce)
582{
583}
584
585static inline void
586trace_intel_context_steal_guc_id(struct intel_context *ce)
587{
588}
589
590static inline void
591trace_intel_context_do_pin(struct intel_context *ce)
592{
593}
594
595static inline void
596trace_intel_context_do_unpin(struct intel_context *ce)
597{
598}
599#endif
600#endif
601
602DEFINE_EVENT(i915_request, i915_request_retire,
603 TP_PROTO(struct i915_request *rq),
604 TP_ARGS(rq)
605);
606
607TRACE_EVENT(i915_request_wait_begin,
608 TP_PROTO(struct i915_request *rq, unsigned int flags),
609 TP_ARGS(rq, flags),
610
611 TP_STRUCT__entry(
612 __field(u32, dev)
613 __field(u64, ctx)
614 __field(u16, class)
615 __field(u16, instance)
616 __field(u32, seqno)
617 __field(unsigned int, flags)
618 ),
619
620 /* NB: the blocking information is racy since mutex_is_locked
621 * doesn't check that the current thread holds the lock. The only
622 * other option would be to pass the boolean information of whether
623 * or not the class was blocking down through the stack which is
624 * less desirable.
625 */
626 TP_fast_assign(
627 __entry->dev = rq->engine->i915->drm.primary->index;
628 __entry->class = rq->engine->uabi_class;
629 __entry->instance = rq->engine->uabi_instance;
630 __entry->ctx = rq->fence.context;
631 __entry->seqno = rq->fence.seqno;
632 __entry->flags = flags;
633 ),
634
635 TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x",
636 __entry->dev, __entry->class, __entry->instance,
637 __entry->ctx, __entry->seqno,
638 __entry->flags)
639);
640
641DEFINE_EVENT(i915_request, i915_request_wait_end,
642 TP_PROTO(struct i915_request *rq),
643 TP_ARGS(rq)
644);
645
646TRACE_EVENT_CONDITION(i915_reg_rw,
647 TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
648
649 TP_ARGS(write, reg, val, len, trace),
650
651 TP_CONDITION(trace),
652
653 TP_STRUCT__entry(
654 __field(u64, val)
655 __field(u32, reg)
656 __field(u16, write)
657 __field(u16, len)
658 ),
659
660 TP_fast_assign(
661 __entry->val = (u64)val;
662 __entry->reg = i915_mmio_reg_offset(reg);
663 __entry->write = write;
664 __entry->len = len;
665 ),
666
667 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
668 __entry->write ? "write" : "read",
669 __entry->reg, __entry->len,
670 (u32)(__entry->val & 0xffffffff),
671 (u32)(__entry->val >> 32))
672);
673
674TRACE_EVENT(intel_gpu_freq_change,
675 TP_PROTO(u32 freq),
676 TP_ARGS(freq),
677
678 TP_STRUCT__entry(
679 __field(u32, freq)
680 ),
681
682 TP_fast_assign(
683 __entry->freq = freq;
684 ),
685
686 TP_printk("new_freq=%u", __entry->freq)
687);
688
689/**
690 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
691 *
692 * With full ppgtt enabled each process using drm will allocate at least one
693 * translation table. With these traces it is possible to keep track of the
694 * allocation and of the lifetime of the tables; this can be used during
695 * testing/debug to verify that we are not leaking ppgtts.
696 * These traces identify the ppgtt through the vm pointer, which is also printed
697 * by the i915_vma_bind and i915_vma_unbind tracepoints.
698 */
699DECLARE_EVENT_CLASS(i915_ppgtt,
700 TP_PROTO(struct i915_address_space *vm),
701 TP_ARGS(vm),
702
703 TP_STRUCT__entry(
704 __field(struct i915_address_space *, vm)
705 __field(u32, dev)
706 ),
707
708 TP_fast_assign(
709 __entry->vm = vm;
710 __entry->dev = vm->i915->drm.primary->index;
711 ),
712
713 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
714)
715
716DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
717 TP_PROTO(struct i915_address_space *vm),
718 TP_ARGS(vm)
719);
720
721DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
722 TP_PROTO(struct i915_address_space *vm),
723 TP_ARGS(vm)
724);
725
726/**
727 * DOC: i915_context_create and i915_context_free tracepoints
728 *
729 * These tracepoints are used to track creation and deletion of contexts.
730 * If full ppgtt is enabled, they also print the address of the vm assigned to
731 * the context.
732 */
733DECLARE_EVENT_CLASS(i915_context,
734 TP_PROTO(struct i915_gem_context *ctx),
735 TP_ARGS(ctx),
736
737 TP_STRUCT__entry(
738 __field(u32, dev)
739 __field(struct i915_gem_context *, ctx)
740 __field(struct i915_address_space *, vm)
741 ),
742
743 TP_fast_assign(
744 __entry->dev = ctx->i915->drm.primary->index;
745 __entry->ctx = ctx;
746 __entry->vm = ctx->vm;
747 ),
748
749 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
750 __entry->dev, __entry->ctx, __entry->vm)
751)
752
753DEFINE_EVENT(i915_context, i915_context_create,
754 TP_PROTO(struct i915_gem_context *ctx),
755 TP_ARGS(ctx)
756);
757
758DEFINE_EVENT(i915_context, i915_context_free,
759 TP_PROTO(struct i915_gem_context *ctx),
760 TP_ARGS(ctx)
761);
762
763#endif /* _I915_TRACE_H_ */
764
765/* This part must be outside protection */
766#undef TRACE_INCLUDE_PATH
767#undef TRACE_INCLUDE_FILE
768#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
769#define TRACE_INCLUDE_FILE i915_trace
770#include <trace/define_trace.h>