Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: Get rid of drm_sched_job.id

Its only purpose was for trace events, but jobs can already be
uniquely identified using their fence.

The downside of using the fence is that it's only available
after 'drm_sched_job_arm' was called which is true for all trace
events that used job.id so they can safely switch to using it.

Suggested-by: Tvrtko Ursulin <tursulin@igalia.com>
Signed-off-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Arvind Yadav <arvind.yadav@amd.com>
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://lore.kernel.org/r/20250526125505.2360-9-pierre-eric.pelloux-prayer@amd.com

authored by

Pierre-Eric Pelloux-Prayer and committed by
Philipp Stanner
4f7fa5fa f6743e6a

+14 -32
+6 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 167 167 TP_PROTO(struct amdgpu_job *job), 168 168 TP_ARGS(job), 169 169 TP_STRUCT__entry( 170 - __field(uint64_t, sched_job_id) 171 170 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) 172 171 __field(unsigned int, context) 173 172 __field(unsigned int, seqno) ··· 176 177 ), 177 178 178 179 TP_fast_assign( 179 - __entry->sched_job_id = job->base.id; 180 180 __assign_str(timeline); 181 181 __entry->context = job->base.s_fence->finished.context; 182 182 __entry->seqno = job->base.s_fence->finished.seqno; 183 183 __assign_str(ring); 184 184 __entry->num_ibs = job->num_ibs; 185 185 ), 186 - TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 187 - __entry->sched_job_id, __get_str(timeline), __entry->context, 186 + TP_printk("timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 187 + __get_str(timeline), __entry->context, 188 188 __entry->seqno, __get_str(ring), __entry->num_ibs) 189 189 ); 190 190 ··· 191 193 TP_PROTO(struct amdgpu_job *job), 192 194 TP_ARGS(job), 193 195 TP_STRUCT__entry( 194 - __field(uint64_t, sched_job_id) 195 196 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) 196 197 __field(unsigned int, context) 197 198 __field(unsigned int, seqno) ··· 199 202 ), 200 203 201 204 TP_fast_assign( 202 - __entry->sched_job_id = job->base.id; 203 205 __assign_str(timeline); 204 206 __entry->context = job->base.s_fence->finished.context; 205 207 __entry->seqno = job->base.s_fence->finished.seqno; 206 208 __assign_str(ring); 207 209 __entry->num_ibs = job->num_ibs; 208 210 ), 209 - TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 210 - __entry->sched_job_id, __get_str(timeline), __entry->context, 211 + TP_printk("timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", 212 + __get_str(timeline), __entry->context, 211 213 __entry->seqno, __get_str(ring), __entry->num_ibs) 212 214 ); 213 215 ··· 547 551 TP_ARGS(sched_job, fence), 548 552 TP_STRUCT__entry( 549 553 __string(ring, sched_job->base.sched->name) 550 - __field(uint64_t, id) 551 554 __field(struct dma_fence *, fence) 552 555 __field(uint64_t, ctx) 553 556 __field(unsigned, seqno) ··· 554 559 555 560 TP_fast_assign( 556 561 __assign_str(ring); 557 - __entry->id = sched_job->base.id; 558 562 __entry->fence = fence; 559 563 __entry->ctx = fence->context; 560 564 __entry->seqno = fence->seqno; 561 565 ), 562 - TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", 563 - __get_str(ring), __entry->id, 566 + TP_printk("job ring=%s need pipe sync to fence=%p, context=%llu, seq=%u", 567 + __get_str(ring), 564 568 __entry->fence, __entry->ctx, 565 569 __entry->seqno) 566 570 );
+2 -4
drivers/gpu/drm/lima/lima_trace.h
··· 14 14 TP_PROTO(struct lima_sched_task *task), 15 15 TP_ARGS(task), 16 16 TP_STRUCT__entry( 17 - __field(uint64_t, task_id) 18 17 __field(unsigned int, context) 19 18 __field(unsigned int, seqno) 20 19 __string(pipe, task->base.sched->name) 21 20 ), 22 21 23 22 TP_fast_assign( 24 - __entry->task_id = task->base.id; 25 23 __entry->context = task->base.s_fence->finished.context; 26 24 __entry->seqno = task->base.s_fence->finished.seqno; 27 25 __assign_str(pipe); 28 26 ), 29 27 30 - TP_printk("task=%llu, context=%u seqno=%u pipe=%s", 31 - __entry->task_id, __entry->context, __entry->seqno, 28 + TP_printk("context=%u seqno=%u pipe=%s", 29 + __entry->context, __entry->seqno, 32 30 __get_str(pipe)) 33 31 ); 34 32
+6 -12
drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
··· 36 36 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 37 37 TP_ARGS(sched_job, entity), 38 38 TP_STRUCT__entry( 39 - __field(uint64_t, id) 40 39 __string(name, sched_job->sched->name) 41 40 __field(u32, job_count) 42 41 __field(int, hw_job_count) ··· 46 47 ), 47 48 48 49 TP_fast_assign( 49 - __entry->id = sched_job->id; 50 50 __assign_str(name); 51 51 __entry->job_count = spsc_queue_count(&entity->job_queue); 52 52 __entry->hw_job_count = atomic_read( ··· 55 57 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 56 58 __entry->client_id = sched_job->s_fence->drm_client_id; 57 59 ), 58 - TP_printk("dev=%s, id=%llu, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu", 59 - __get_str(dev), __entry->id, 60 + TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu", 61 + __get_str(dev), 60 62 __entry->fence_context, __entry->fence_seqno, __get_str(name), 61 63 __entry->job_count, __entry->hw_job_count, __entry->client_id) 62 64 ); ··· 93 95 TP_STRUCT__entry( 94 96 __field(u64, fence_context) 95 97 __field(u64, fence_seqno) 96 - __field(u64, id) 97 98 __field(u64, ctx) 98 99 __field(u64, seqno) 99 100 ), ··· 100 103 TP_fast_assign( 101 104 __entry->fence_context = sched_job->s_fence->finished.context; 102 105 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 103 - __entry->id = sched_job->id; 104 106 __entry->ctx = fence->context; 105 107 __entry->seqno = fence->seqno; 106 108 ), 107 - TP_printk("fence=%llu:%llu, id=%llu depends on fence=%llu:%llu", 108 - __entry->fence_context, __entry->fence_seqno, __entry->id, 109 + TP_printk("fence=%llu:%llu depends on fence=%llu:%llu", 110 + __entry->fence_context, __entry->fence_seqno, 109 111 __entry->ctx, __entry->seqno) 110 112 ); 111 113 ··· 114 118 TP_STRUCT__entry( 115 119 __field(u64, fence_context) 116 120 __field(u64, fence_seqno) 117 - __field(uint64_t, id) 118 121 __field(u64, ctx) 119 122 __field(u64, seqno) 120 123 ), ··· 121 126 TP_fast_assign( 122 127 __entry->fence_context = sched_job->s_fence->finished.context; 123 128 __entry->fence_seqno = sched_job->s_fence->finished.seqno; 124 - __entry->id = sched_job->id; 125 129 __entry->ctx = fence->context; 126 130 __entry->seqno = fence->seqno; 127 131 ), 128 - TP_printk("fence=%llu:%llu, id=%llu depends on unsignalled fence=%llu:%llu", 129 - __entry->fence_context, __entry->fence_seqno, __entry->id, 132 + TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu", 133 + __entry->fence_context, __entry->fence_seqno, 130 134 __entry->ctx, __entry->seqno) 131 135 ); 132 136
-1
drivers/gpu/drm/scheduler/sched_main.c
··· 855 855 856 856 job->sched = sched; 857 857 job->s_priority = entity->priority; 858 - job->id = atomic64_inc_return(&sched->job_id_count); 859 858 860 859 drm_sched_fence_init(job->s_fence, job->entity); 861 860 }
-3
include/drm/gpu_scheduler.h
··· 326 326 * @finish_cb: the callback for the finished fence. 327 327 * @credits: the number of credits this job contributes to the scheduler 328 328 * @work: Helper to reschedule job kill to different context. 329 - * @id: a unique id assigned to each job scheduled on the scheduler. 330 329 * @karma: increment on every hang caused by this job. If this exceeds the hang 331 330 * limit of the scheduler then the job is marked guilty and will not 332 331 * be scheduled further. ··· 338 339 * to schedule the job. 339 340 */ 340 341 struct drm_sched_job { 341 - u64 id; 342 - 343 342 /** 344 343 * @submit_ts: 345 344 *