Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: refine the job naming for amdgpu_job and amdgpu_sched_job

Use consistent naming across functions.

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: David Zhou <david1.zhou@amd.com>
Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>

authored by

Junwei Zhang and committed by
Alex Deucher
4c7eb91c bf60efd3

+71 -69
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1275 1275 uint32_t num_ibs; 1276 1276 struct mutex job_lock; 1277 1277 struct amdgpu_user_fence uf; 1278 - int (*free_job)(struct amdgpu_job *sched_job); 1278 + int (*free_job)(struct amdgpu_job *job); 1279 1279 }; 1280 1280 1281 1281 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 778 778 return 0; 779 779 } 780 780 781 - static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) 781 + static int amdgpu_cs_free_job(struct amdgpu_job *job) 782 782 { 783 783 int i; 784 - if (sched_job->ibs) 785 - for (i = 0; i < sched_job->num_ibs; i++) 786 - amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 787 - kfree(sched_job->ibs); 788 - if (sched_job->uf.bo) 789 - drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); 784 + if (job->ibs) 785 + for (i = 0; i < job->num_ibs; i++) 786 + amdgpu_ib_free(job->adev, &job->ibs[i]); 787 + kfree(job->ibs); 788 + if (job->uf.bo) 789 + drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); 790 790 return 0; 791 791 } 792 792
+18 -18
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
··· 27 27 #include <drm/drmP.h> 28 28 #include "amdgpu.h" 29 29 30 - static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) 30 + static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 31 31 { 32 - struct amdgpu_job *sched_job = (struct amdgpu_job *)job; 33 - return amdgpu_sync_get_fence(&sched_job->ibs->sync); 32 + struct amdgpu_job *job = (struct amdgpu_job *)sched_job; 33 + return amdgpu_sync_get_fence(&job->ibs->sync); 34 34 } 35 35 36 - static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) 36 + static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) 37 37 { 38 38 struct amdgpu_fence *fence = NULL; 39 - struct amdgpu_job *sched_job; 39 + struct amdgpu_job *job; 40 40 int r; 41 41 42 - if (!job) { 42 + if (!sched_job) { 43 43 DRM_ERROR("job is null\n"); 44 44 return NULL; 45 45 } 46 - sched_job = (struct amdgpu_job *)job; 47 - mutex_lock(&sched_job->job_lock); 48 - r = amdgpu_ib_schedule(sched_job->adev, 49 - sched_job->num_ibs, 50 - sched_job->ibs, 51 - sched_job->base.owner); 46 + job = (struct amdgpu_job *)sched_job; 47 + mutex_lock(&job->job_lock); 48 + r = amdgpu_ib_schedule(job->adev, 49 + job->num_ibs, 50 + job->ibs, 51 + job->base.owner); 52 52 if (r) { 53 53 DRM_ERROR("Error scheduling IBs (%d)\n", r); 54 54 goto err; 55 55 } 56 56 57 - fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); 57 + fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence); 58 58 59 59 err: 60 - if (sched_job->free_job) 61 - sched_job->free_job(sched_job); 60 + if (job->free_job) 61 + job->free_job(job); 62 62 63 - mutex_unlock(&sched_job->job_lock); 64 - fence_put(&sched_job->base.s_fence->base); 65 - kfree(sched_job); 63 + mutex_unlock(&job->job_lock); 64 + fence_put(&job->base.s_fence->base); 65 + kfree(job); 66 66 return fence ? &fence->base : NULL; 67 67 } 68 68
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 805 805 } 806 806 807 807 static int amdgpu_uvd_free_job( 808 - struct amdgpu_job *sched_job) 808 + struct amdgpu_job *job) 809 809 { 810 - amdgpu_ib_free(sched_job->adev, sched_job->ibs); 811 - kfree(sched_job->ibs); 810 + amdgpu_ib_free(job->adev, job->ibs); 811 + kfree(job->ibs); 812 812 return 0; 813 813 } 814 814
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 342 342 } 343 343 344 344 static int amdgpu_vce_free_job( 345 - struct amdgpu_job *sched_job) 345 + struct amdgpu_job *job) 346 346 { 347 - amdgpu_ib_free(sched_job->adev, sched_job->ibs); 348 - kfree(sched_job->ibs); 347 + amdgpu_ib_free(job->adev, job->ibs); 348 + kfree(job->ibs); 349 349 return 0; 350 350 } 351 351
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 316 316 } 317 317 } 318 318 319 - int amdgpu_vm_free_job(struct amdgpu_job *sched_job) 319 + int amdgpu_vm_free_job(struct amdgpu_job *job) 320 320 { 321 321 int i; 322 - for (i = 0; i < sched_job->num_ibs; i++) 323 - amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 324 - kfree(sched_job->ibs); 322 + for (i = 0; i < job->num_ibs; i++) 323 + amdgpu_ib_free(job->adev, &job->ibs[i]); 324 + kfree(job->ibs); 325 325 return 0; 326 326 } 327 327
+33 -31
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 68 68 amd_sched_rq_select_job(struct amd_sched_rq *rq) 69 69 { 70 70 struct amd_sched_entity *entity; 71 - struct amd_sched_job *job; 71 + struct amd_sched_job *sched_job; 72 72 73 73 spin_lock(&rq->lock); 74 74 75 75 entity = rq->current_entity; 76 76 if (entity) { 77 77 list_for_each_entry_continue(entity, &rq->entities, list) { 78 - job = amd_sched_entity_pop_job(entity); 79 - if (job) { 78 + sched_job = amd_sched_entity_pop_job(entity); 79 + if (sched_job) { 80 80 rq->current_entity = entity; 81 81 spin_unlock(&rq->lock); 82 - return job; 82 + return sched_job; 83 83 } 84 84 } 85 85 } 86 86 87 87 list_for_each_entry(entity, &rq->entities, list) { 88 88 89 - job = amd_sched_entity_pop_job(entity); 90 - if (job) { 89 + sched_job = amd_sched_entity_pop_job(entity); 90 + if (sched_job) { 91 91 rq->current_entity = entity; 92 92 spin_unlock(&rq->lock); 93 - return job; 93 + return sched_job; 94 94 } 95 95 96 96 if (entity == rq->current_entity) ··· 208 208 amd_sched_entity_pop_job(struct amd_sched_entity *entity) 209 209 { 210 210 struct amd_gpu_scheduler *sched = entity->scheduler; 211 - struct amd_sched_job *job; 211 + struct amd_sched_job *sched_job; 212 212 213 213 if (ACCESS_ONCE(entity->dependency)) 214 214 return NULL; 215 215 216 - if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) 216 + if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) 217 217 return NULL; 218 218 219 - while ((entity->dependency = sched->ops->dependency(job))) { 219 + while ((entity->dependency = sched->ops->dependency(sched_job))) { 220 220 221 221 if (fence_add_callback(entity->dependency, &entity->cb, 222 222 amd_sched_entity_wakeup)) ··· 225 225 return NULL; 226 226 } 227 227 228 - return job; 228 + return sched_job; 229 229 } 230 230 231 231 /** 232 232 * Helper to submit a job to the job queue 233 233 * 234 - * @job The pointer to job required to submit 234 + * @sched_job The pointer to job required to submit 235 235 * 236 236 * Returns true if we could submit the job. 237 237 */ 238 - static bool amd_sched_entity_in(struct amd_sched_job *job) 238 + static bool amd_sched_entity_in(struct amd_sched_job *sched_job) 239 239 { 240 - struct amd_sched_entity *entity = job->s_entity; 240 + struct amd_sched_entity *entity = sched_job->s_entity; 241 241 bool added, first = false; 242 242 243 243 spin_lock(&entity->queue_lock); 244 - added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); 244 + added = kfifo_in(&entity->job_queue, &sched_job, 245 + sizeof(sched_job)) == sizeof(sched_job); 245 246 246 - if (added && kfifo_len(&entity->job_queue) == sizeof(job)) 247 + if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) 247 248 first = true; 248 249 249 250 spin_unlock(&entity->queue_lock); 250 251 251 252 /* first job wakes up scheduler */ 252 253 if (first) 253 - amd_sched_wakeup(job->sched); 254 + amd_sched_wakeup(sched_job->sched); 254 255 255 256 return added; 256 257 } ··· 259 258 /** 260 259 * Submit a job to the job queue 261 260 * 262 - * @job The pointer to job required to submit 261 + * @sched_job The pointer to job required to submit 263 262 * 264 263 * Returns 0 for success, negative error code otherwise. 265 264 */ ··· 305 304 static struct amd_sched_job * 306 305 amd_sched_select_job(struct amd_gpu_scheduler *sched) 307 306 { 308 - struct amd_sched_job *job; 307 + struct amd_sched_job *sched_job; 309 308 310 309 if (!amd_sched_ready(sched)) 311 310 return NULL; 312 311 313 312 /* Kernel run queue has higher priority than normal run queue*/ 314 - job = amd_sched_rq_select_job(&sched->kernel_rq); 315 - if (job == NULL) 316 - job = amd_sched_rq_select_job(&sched->sched_rq); 313 + sched_job = amd_sched_rq_select_job(&sched->kernel_rq); 314 + if (sched_job == NULL) 315 + sched_job = amd_sched_rq_select_job(&sched->sched_rq); 317 316 318 - return job; 317 + return sched_job; 319 318 } 320 319 321 320 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ··· 341 340 while (!kthread_should_stop()) { 342 341 struct amd_sched_entity *entity; 343 342 struct amd_sched_fence *s_fence; 344 - struct amd_sched_job *job; 343 + struct amd_sched_job *sched_job; 345 344 struct fence *fence; 346 345 347 346 wait_event_interruptible(sched->wake_up_worker, 348 347 kthread_should_stop() || 349 - (job = amd_sched_select_job(sched))); 348 + (sched_job = amd_sched_select_job(sched))); 350 349 351 - if (!job) 350 + if (!sched_job) 352 351 continue; 353 352 354 - entity = job->s_entity; 355 - s_fence = job->s_fence; 353 + entity = sched_job->s_entity; 354 + s_fence = sched_job->s_fence; 356 355 atomic_inc(&sched->hw_rq_count); 357 - fence = sched->ops->run_job(job); 356 + fence = sched->ops->run_job(sched_job); 358 357 if (fence) { 359 358 r = fence_add_callback(fence, &s_fence->cb, 360 359 amd_sched_process_job); ··· 368 367 amd_sched_process_job(NULL, &s_fence->cb); 369 368 } 370 369 371 - count = kfifo_out(&entity->job_queue, &job, sizeof(job)); 372 - WARN_ON(count != sizeof(job)); 370 + count = kfifo_out(&entity->job_queue, &sched_job, 371 + sizeof(sched_job)); 372 + WARN_ON(count != sizeof(sched_job)); 373 373 wake_up(&sched->job_scheduled); 374 374 } 375 375 return 0;
+2 -2
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
··· 91 91 * these functions should be implemented in driver side 92 92 */ 93 93 struct amd_sched_backend_ops { 94 - struct fence *(*dependency)(struct amd_sched_job *job); 95 - struct fence *(*run_job)(struct amd_sched_job *job); 94 + struct fence *(*dependency)(struct amd_sched_job *sched_job); 95 + struct fence *(*run_job)(struct amd_sched_job *sched_job); 96 96 }; 97 97 98 98 /**