Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/sched: Re-group and rename the entity run-queue lock

When writing to a drm_sched_entity's run-queue, writers are protected
through the lock drm_sched_entity.rq_lock. This naming, however,
frequently collides with the separate internal lock of struct
drm_sched_rq, resulting in uses like this:

spin_lock(&entity->rq_lock);
spin_lock(&entity->rq->lock);

Rename drm_sched_entity.rq_lock to improve readability. While at it,
re-order that struct's members to make it more obvious what the lock
protects.

v2:
* Rename some rq_lock straddlers in kerneldoc, improve commit text. (Philipp)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Luben Tuikov <ltuikov89@gmail.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Philipp Stanner <pstanner@redhat.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
[pstanner: Fix typo in docstring]
Signed-off-by: Philipp Stanner <pstanner@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241016122013.7857-5-tursulin@igalia.com

authored by

Tvrtko Ursulin and committed by
Philipp Stanner
f93126f5 a6f46283

+26 -25
+14 -14
drivers/gpu/drm/scheduler/sched_entity.c
··· 105 105 /* We start in an idle state. */ 106 106 complete_all(&entity->entity_idle); 107 107 108 - spin_lock_init(&entity->rq_lock); 108 + spin_lock_init(&entity->lock); 109 109 spsc_queue_init(&entity->job_queue); 110 110 111 111 atomic_set(&entity->fence_seq, 0); ··· 133 133 { 134 134 WARN_ON(!num_sched_list || !sched_list); 135 135 136 - spin_lock(&entity->rq_lock); 136 + spin_lock(&entity->lock); 137 137 entity->sched_list = sched_list; 138 138 entity->num_sched_list = num_sched_list; 139 - spin_unlock(&entity->rq_lock); 139 + spin_unlock(&entity->lock); 140 140 } 141 141 EXPORT_SYMBOL(drm_sched_entity_modify_sched); 142 142 ··· 244 244 if (!entity->rq) 245 245 return; 246 246 247 - spin_lock(&entity->rq_lock); 247 + spin_lock(&entity->lock); 248 248 entity->stopped = true; 249 249 drm_sched_rq_remove_entity(entity->rq, entity); 250 - spin_unlock(&entity->rq_lock); 250 + spin_unlock(&entity->lock); 251 251 252 252 /* Make sure this entity is not used by the scheduler at the moment */ 253 253 wait_for_completion(&entity->entity_idle); ··· 396 396 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 397 397 enum drm_sched_priority priority) 398 398 { 399 - spin_lock(&entity->rq_lock); 399 + spin_lock(&entity->lock); 400 400 entity->priority = priority; 401 - spin_unlock(&entity->rq_lock); 401 + spin_unlock(&entity->lock); 402 402 } 403 403 EXPORT_SYMBOL(drm_sched_entity_set_priority); 404 404 ··· 515 515 516 516 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); 517 517 if (next) { 518 - spin_lock(&entity->rq_lock); 518 + spin_lock(&entity->lock); 519 519 drm_sched_rq_update_fifo_locked(entity, 520 520 next->submit_ts); 521 - spin_unlock(&entity->rq_lock); 521 + spin_unlock(&entity->lock); 522 522 } 523 523 } 524 524 ··· 559 559 if (fence && !dma_fence_is_signaled(fence)) 560 560 return; 561 561 562 - spin_lock(&entity->rq_lock); 562 + spin_lock(&entity->lock); 563 563 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); 564 564 rq = sched ? sched->sched_rq[entity->priority] : NULL; 565 565 if (rq != entity->rq) { 566 566 drm_sched_rq_remove_entity(entity->rq, entity); 567 567 entity->rq = rq; 568 568 } 569 - spin_unlock(&entity->rq_lock); 569 + spin_unlock(&entity->lock); 570 570 571 571 if (entity->num_sched_list == 1) 572 572 entity->sched_list = NULL; ··· 605 605 struct drm_sched_rq *rq; 606 606 607 607 /* Add the entity to the run queue */ 608 - spin_lock(&entity->rq_lock); 608 + spin_lock(&entity->lock); 609 609 if (entity->stopped) { 610 - spin_unlock(&entity->rq_lock); 610 + spin_unlock(&entity->lock); 611 611 612 612 DRM_ERROR("Trying to push to a killed entity\n"); 613 613 return; ··· 621 621 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) 622 622 drm_sched_rq_update_fifo_locked(entity, submit_ts); 623 623 624 - spin_unlock(&entity->rq_lock); 624 + spin_unlock(&entity->lock); 625 625 626 626 drm_sched_wakeup(sched); 627 627 }
+1 -1
drivers/gpu/drm/scheduler/sched_main.c
··· 170 170 * for entity from within concurrent drm_sched_entity_select_rq and the 171 171 * other to update the rb tree structure. 172 172 */ 173 - lockdep_assert_held(&entity->rq_lock); 173 + lockdep_assert_held(&entity->lock); 174 174 175 175 spin_lock(&entity->rq->lock); 176 176
+11 -10
include/drm/gpu_scheduler.h
··· 97 97 struct list_head list; 98 98 99 99 /** 100 + * @lock: 101 + * 102 + * Lock protecting the run-queue (@rq) to which this entity belongs, 103 + * @priority and the list of schedulers (@sched_list, @num_sched_list). 104 + */ 105 + spinlock_t lock; 106 + 107 + /** 100 108 * @rq: 101 109 * 102 110 * Runqueue on which this entity is currently scheduled. 103 111 * 104 112 * FIXME: Locking is very unclear for this. Writers are protected by 105 - * @rq_lock, but readers are generally lockless and seem to just race 106 - * with not even a READ_ONCE. 113 + * @lock, but readers are generally lockless and seem to just race with 114 + * not even a READ_ONCE. 107 115 */ 108 116 struct drm_sched_rq *rq; 109 117 ··· 144 136 * @priority: 145 137 * 146 138 * Priority of the entity. This can be modified by calling 147 - * drm_sched_entity_set_priority(). Protected by &rq_lock. 139 + * drm_sched_entity_set_priority(). Protected by @lock. 148 140 */ 149 141 enum drm_sched_priority priority; 150 - 151 - /** 152 - * @rq_lock: 153 - * 154 - * Lock to modify the runqueue to which this entity belongs. 155 - */ 156 - spinlock_t rq_lock; 157 142 158 143 /** 159 144 * @job_queue: the list of jobs of this entity.