Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/sched: Use struct for drm_sched_init() params

drm_sched_init() has a great many parameters and upcoming new
functionality for the scheduler might add even more. Generally, the
great number of parameters reduces readability and has already caused
one missnaming, addressed in:

commit 6f1cacf4eba7 ("drm/nouveau: Improve variable name in
nouveau_sched_init()").

Introduce a new struct for the scheduler init parameters and port all
users.

Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Acked-by: Matthew Brost <matthew.brost@intel.com> # for Xe
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> # for Panfrost and Panthor
Reviewed-by: Christian Gmeiner <cgmeiner@igalia.com> # for Etnaviv
Reviewed-by: Frank Binns <frank.binns@imgtec.com> # for Imagination
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> # for Sched
Reviewed-by: Maíra Canal <mcanal@igalia.com> # for v3d
Reviewed-by: Danilo Krummrich <dakr@kernel.org>
Reviewed-by: Lizhi Hou <lizhi.hou@amd.com> # for amdxdna
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20250211111422.21235-2-phasta@kernel.org

+210 -154
+9 -3
drivers/accel/amdxdna/aie2_ctx.c
··· 516 516 { 517 517 struct amdxdna_client *client = hwctx->client; 518 518 struct amdxdna_dev *xdna = client->xdna; 519 + const struct drm_sched_init_args args = { 520 + .ops = &sched_ops, 521 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 522 + .credit_limit = HWCTX_MAX_CMDS, 523 + .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT), 524 + .name = hwctx->name, 525 + .dev = xdna->ddev.dev, 526 + }; 519 527 struct drm_gpu_scheduler *sched; 520 528 struct amdxdna_hwctx_priv *priv; 521 529 struct amdxdna_gem_obj *heap; ··· 581 573 might_lock(&priv->io_lock); 582 574 fs_reclaim_release(GFP_KERNEL); 583 575 584 - ret = drm_sched_init(sched, &sched_ops, NULL, DRM_SCHED_PRIORITY_COUNT, 585 - HWCTX_MAX_CMDS, 0, msecs_to_jiffies(HWCTX_MAX_TIMEOUT), 586 - NULL, NULL, hwctx->name, xdna->ddev.dev); 576 + ret = drm_sched_init(sched, &args); 587 577 if (ret) { 588 578 XDNA_ERR(xdna, "Failed to init DRM scheduler. ret %d", ret); 589 579 goto free_cmd_bufs;
+12 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2823 2823 2824 2824 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) 2825 2825 { 2826 + struct drm_sched_init_args args = { 2827 + .ops = &amdgpu_sched_ops, 2828 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 2829 + .timeout_wq = adev->reset_domain->wq, 2830 + .dev = adev->dev, 2831 + }; 2826 2832 long timeout; 2827 2833 int r, i; 2828 2834 ··· 2854 2848 break; 2855 2849 } 2856 2850 2857 - r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, NULL, 2858 - DRM_SCHED_PRIORITY_COUNT, 2859 - ring->num_hw_submission, 0, 2860 - timeout, adev->reset_domain->wq, 2861 - ring->sched_score, ring->name, 2862 - adev->dev); 2851 + args.timeout = timeout; 2852 + args.credit_limit = ring->num_hw_submission; 2853 + args.score = ring->sched_score; 2854 + args.name = ring->name; 2855 + 2856 + r = drm_sched_init(&ring->sched, &args); 2863 2857 if (r) { 2864 2858 DRM_ERROR("Failed to create scheduler on ring %s.\n", 2865 2859 ring->name);
+10 -10
drivers/gpu/drm/etnaviv/etnaviv_sched.c
··· 144 144 145 145 int etnaviv_sched_init(struct etnaviv_gpu *gpu) 146 146 { 147 - int ret; 147 + const struct drm_sched_init_args args = { 148 + .ops = &etnaviv_sched_ops, 149 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 150 + .credit_limit = etnaviv_hw_jobs_limit, 151 + .hang_limit = etnaviv_job_hang_limit, 152 + .timeout = msecs_to_jiffies(500), 153 + .name = dev_name(gpu->dev), 154 + .dev = gpu->dev, 155 + }; 148 156 149 - ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, NULL, 150 - DRM_SCHED_PRIORITY_COUNT, 151 - etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, 152 - msecs_to_jiffies(500), NULL, NULL, 153 - dev_name(gpu->dev), gpu->dev); 154 - if (ret) 155 - return ret; 156 - 157 - return 0; 157 + return drm_sched_init(&gpu->sched, &args); 158 158 } 159 159 160 160 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
+12 -6
drivers/gpu/drm/imagination/pvr_queue.c
··· 1210 1210 }, 1211 1211 }; 1212 1212 struct pvr_device *pvr_dev = ctx->pvr_dev; 1213 + const struct drm_sched_init_args sched_args = { 1214 + .ops = &pvr_queue_sched_ops, 1215 + .submit_wq = pvr_dev->sched_wq, 1216 + .num_rqs = 1, 1217 + .credit_limit = 64 * 1024, 1218 + .hang_limit = 1, 1219 + .timeout = msecs_to_jiffies(500), 1220 + .timeout_wq = pvr_dev->sched_wq, 1221 + .name = "pvr-queue", 1222 + .dev = pvr_dev->base.dev, 1223 + }; 1213 1224 struct drm_gpu_scheduler *sched; 1214 1225 struct pvr_queue *queue; 1215 1226 int ctx_state_size, err; ··· 1293 1282 1294 1283 queue->timeline_ufo.value = cpu_map; 1295 1284 1296 - err = drm_sched_init(&queue->scheduler, 1297 - &pvr_queue_sched_ops, 1298 - pvr_dev->sched_wq, 1, 64 * 1024, 1, 1299 - msecs_to_jiffies(500), 1300 - pvr_dev->sched_wq, NULL, "pvr-queue", 1301 - pvr_dev->base.dev); 1285 + err = drm_sched_init(&queue->scheduler, &sched_args); 1302 1286 if (err) 1303 1287 goto err_release_ufo; 1304 1288
+10 -6
drivers/gpu/drm/lima/lima_sched.c
··· 515 515 { 516 516 unsigned int timeout = lima_sched_timeout_ms > 0 ? 517 517 lima_sched_timeout_ms : 10000; 518 + const struct drm_sched_init_args args = { 519 + .ops = &lima_sched_ops, 520 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 521 + .credit_limit = 1, 522 + .hang_limit = lima_job_hang_limit, 523 + .timeout = msecs_to_jiffies(timeout), 524 + .name = name, 525 + .dev = pipe->ldev->dev, 526 + }; 518 527 519 528 pipe->fence_context = dma_fence_context_alloc(1); 520 529 spin_lock_init(&pipe->fence_lock); 521 530 522 531 INIT_WORK(&pipe->recover_work, lima_sched_recover_work); 523 532 524 - return drm_sched_init(&pipe->base, &lima_sched_ops, NULL, 525 - DRM_SCHED_PRIORITY_COUNT, 526 - 1, 527 - lima_job_hang_limit, 528 - msecs_to_jiffies(timeout), NULL, 529 - NULL, name, pipe->ldev->dev); 533 + return drm_sched_init(&pipe->base, &args); 530 534 } 531 535 532 536 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
+9 -8
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 59 59 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, 60 60 void *memptrs, uint64_t memptrs_iova) 61 61 { 62 + struct drm_sched_init_args args = { 63 + .ops = &msm_sched_ops, 64 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 65 + .credit_limit = num_hw_submissions, 66 + .timeout = MAX_SCHEDULE_TIMEOUT, 67 + .dev = gpu->dev->dev, 68 + }; 62 69 struct msm_ringbuffer *ring; 63 - long sched_timeout; 64 70 char name[32]; 65 71 int ret; 66 72 ··· 93 87 } 94 88 95 89 msm_gem_object_set_name(ring->bo, "ring%d", id); 90 + args.name = to_msm_bo(ring->bo)->name, 96 91 97 92 ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); 98 93 ring->next = ring->start; ··· 102 95 ring->memptrs = memptrs; 103 96 ring->memptrs_iova = memptrs_iova; 104 97 105 - /* currently managing hangcheck ourselves: */ 106 - sched_timeout = MAX_SCHEDULE_TIMEOUT; 107 - 108 - ret = drm_sched_init(&ring->sched, &msm_sched_ops, NULL, 109 - DRM_SCHED_PRIORITY_COUNT, 110 - num_hw_submissions, 0, sched_timeout, 111 - NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev); 98 + ret = drm_sched_init(&ring->sched, &args); 112 99 if (ret) { 113 100 goto fail; 114 101 }
+11 -5
drivers/gpu/drm/nouveau/nouveau_sched.c
··· 404 404 { 405 405 struct drm_gpu_scheduler *drm_sched = &sched->base; 406 406 struct drm_sched_entity *entity = &sched->entity; 407 - const long timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS); 407 + struct drm_sched_init_args args = { 408 + .ops = &nouveau_sched_ops, 409 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 410 + .credit_limit = credit_limit, 411 + .timeout = msecs_to_jiffies(NOUVEAU_SCHED_JOB_TIMEOUT_MS), 412 + .name = "nouveau_sched", 413 + .dev = drm->dev->dev 414 + }; 408 415 int ret; 409 416 410 417 if (!wq) { ··· 423 416 sched->wq = wq; 424 417 } 425 418 426 - ret = drm_sched_init(drm_sched, &nouveau_sched_ops, wq, 427 - NOUVEAU_SCHED_PRIORITY_COUNT, 428 - credit_limit, 0, timeout, 429 - NULL, NULL, "nouveau_sched", drm->dev->dev); 419 + args.submit_wq = wq, 420 + 421 + ret = drm_sched_init(drm_sched, &args); 430 422 if (ret) 431 423 goto fail_wq; 432 424
+11 -9
drivers/gpu/drm/panfrost/panfrost_job.c
··· 836 836 837 837 int panfrost_job_init(struct panfrost_device *pfdev) 838 838 { 839 + struct drm_sched_init_args args = { 840 + .ops = &panfrost_sched_ops, 841 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 842 + .credit_limit = 2, 843 + .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 844 + .timeout_wq = pfdev->reset.wq, 845 + .name = "pan_js", 846 + .dev = pfdev->dev, 847 + }; 839 848 struct panfrost_job_slot *js; 840 - unsigned int nentries = 2; 841 849 int ret, j; 842 850 843 851 /* All GPUs have two entries per queue, but without jobchain ··· 853 845 * so let's just advertise one entry in that case. 854 846 */ 855 847 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) 856 - nentries = 1; 848 + args.credit_limit = 1; 857 849 858 850 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); 859 851 if (!js) ··· 883 875 for (j = 0; j < NUM_JOB_SLOTS; j++) { 884 876 js->queue[j].fence_context = dma_fence_context_alloc(1); 885 877 886 - ret = drm_sched_init(&js->queue[j].sched, 887 - &panfrost_sched_ops, NULL, 888 - DRM_SCHED_PRIORITY_COUNT, 889 - nentries, 0, 890 - msecs_to_jiffies(JOB_TIMEOUT_MS), 891 - pfdev->reset.wq, 892 - NULL, "pan_js", pfdev->dev); 878 + ret = drm_sched_init(&js->queue[j].sched, &args); 893 879 if (ret) { 894 880 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); 895 881 goto err_sched;
+11 -5
drivers/gpu/drm/panthor/panthor_mmu.c
··· 2311 2311 u64 full_va_range = 1ull << va_bits; 2312 2312 struct drm_gem_object *dummy_gem; 2313 2313 struct drm_gpu_scheduler *sched; 2314 + const struct drm_sched_init_args sched_args = { 2315 + .ops = &panthor_vm_bind_ops, 2316 + .submit_wq = ptdev->mmu->vm.wq, 2317 + .num_rqs = 1, 2318 + .credit_limit = 1, 2319 + /* Bind operations are synchronous for now, no timeout needed. */ 2320 + .timeout = MAX_SCHEDULE_TIMEOUT, 2321 + .name = "panthor-vm-bind", 2322 + .dev = ptdev->base.dev, 2323 + }; 2314 2324 struct io_pgtable_cfg pgtbl_cfg; 2315 2325 u64 mair, min_va, va_range; 2316 2326 struct panthor_vm *vm; ··· 2378 2368 goto err_mm_takedown; 2379 2369 } 2380 2370 2381 - /* Bind operations are synchronous for now, no timeout needed. */ 2382 - ret = drm_sched_init(&vm->sched, &panthor_vm_bind_ops, ptdev->mmu->vm.wq, 2383 - 1, 1, 0, 2384 - MAX_SCHEDULE_TIMEOUT, NULL, NULL, 2385 - "panthor-vm-bind", ptdev->base.dev); 2371 + ret = drm_sched_init(&vm->sched, &sched_args); 2386 2372 if (ret) 2387 2373 goto err_free_io_pgtable; 2388 2374
+17 -11
drivers/gpu/drm/panthor/panthor_sched.c
··· 3289 3289 group_create_queue(struct panthor_group *group, 3290 3290 const struct drm_panthor_queue_create *args) 3291 3291 { 3292 + const struct drm_sched_init_args sched_args = { 3293 + .ops = &panthor_queue_sched_ops, 3294 + .submit_wq = group->ptdev->scheduler->wq, 3295 + .num_rqs = 1, 3296 + /* 3297 + * The credit limit argument tells us the total number of 3298 + * instructions across all CS slots in the ringbuffer, with 3299 + * some jobs requiring twice as many as others, depending on 3300 + * their profiling status. 3301 + */ 3302 + .credit_limit = args->ringbuf_size / sizeof(u64), 3303 + .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), 3304 + .timeout_wq = group->ptdev->reset.wq, 3305 + .name = "panthor-queue", 3306 + .dev = group->ptdev->base.dev, 3307 + }; 3292 3308 struct drm_gpu_scheduler *drm_sched; 3293 3309 struct panthor_queue *queue; 3294 3310 int ret; ··· 3375 3359 if (ret) 3376 3360 goto err_free_queue; 3377 3361 3378 - /* 3379 - * Credit limit argument tells us the total number of instructions 3380 - * across all CS slots in the ringbuffer, with some jobs requiring 3381 - * twice as many as others, depending on their profiling status. 3382 - */ 3383 - ret = drm_sched_init(&queue->scheduler, &panthor_queue_sched_ops, 3384 - group->ptdev->scheduler->wq, 1, 3385 - args->ringbuf_size / sizeof(u64), 3386 - 0, msecs_to_jiffies(JOB_TIMEOUT_MS), 3387 - group->ptdev->reset.wq, 3388 - NULL, "panthor-queue", group->ptdev->base.dev); 3362 + ret = drm_sched_init(&queue->scheduler, &sched_args); 3389 3363 if (ret) 3390 3364 goto err_free_queue; 3391 3365
+17 -33
drivers/gpu/drm/scheduler/sched_main.c
··· 1244 1244 * drm_sched_init - Init a gpu scheduler instance 1245 1245 * 1246 1246 * @sched: scheduler instance 1247 - * @ops: backend operations for this scheduler 1248 - * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is 1249 - * allocated and used 1250 - * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT 1251 - * @credit_limit: the number of credits this scheduler can hold from all jobs 1252 - * @hang_limit: number of times to allow a job to hang before dropping it 1253 - * @timeout: timeout value in jiffies for the scheduler 1254 - * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is 1255 - * used 1256 - * @score: optional score atomic shared with other schedulers 1257 - * @name: name used for debugging 1258 - * @dev: target &struct device 1247 + * @args: scheduler initialization arguments 1259 1248 * 1260 1249 * Return 0 on success, otherwise error code. 1261 1250 */ 1262 - int drm_sched_init(struct drm_gpu_scheduler *sched, 1263 - const struct drm_sched_backend_ops *ops, 1264 - struct workqueue_struct *submit_wq, 1265 - u32 num_rqs, u32 credit_limit, unsigned int hang_limit, 1266 - long timeout, struct workqueue_struct *timeout_wq, 1267 - atomic_t *score, const char *name, struct device *dev) 1251 + int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_args *args) 1268 1252 { 1269 1253 int i; 1270 1254 1271 - sched->ops = ops; 1272 - sched->credit_limit = credit_limit; 1273 - sched->name = name; 1274 - sched->timeout = timeout; 1275 - sched->timeout_wq = timeout_wq ? : system_wq; 1276 - sched->hang_limit = hang_limit; 1277 - sched->score = score ? score : &sched->_score; 1278 - sched->dev = dev; 1255 + sched->ops = args->ops; 1256 + sched->credit_limit = args->credit_limit; 1257 + sched->name = args->name; 1258 + sched->timeout = args->timeout; 1259 + sched->hang_limit = args->hang_limit; 1260 + sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq; 1261 + sched->score = args->score ? args->score : &sched->_score; 1262 + sched->dev = args->dev; 1279 1263 1280 - if (num_rqs > DRM_SCHED_PRIORITY_COUNT) { 1264 + if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) { 1281 1265 /* This is a gross violation--tell drivers what the problem is. 1282 1266 */ 1283 1267 drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", ··· 1276 1292 return 0; 1277 1293 } 1278 1294 1279 - if (submit_wq) { 1280 - sched->submit_wq = submit_wq; 1295 + if (args->submit_wq) { 1296 + sched->submit_wq = args->submit_wq; 1281 1297 sched->own_submit_wq = false; 1282 1298 } else { 1283 1299 #ifdef CONFIG_LOCKDEP 1284 - sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 1300 + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name, 1285 1301 WQ_MEM_RECLAIM, 1286 1302 &drm_sched_lockdep_map); 1287 1303 #else 1288 - sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 1304 + sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM); 1289 1305 #endif 1290 1306 if (!sched->submit_wq) 1291 1307 return -ENOMEM; ··· 1293 1309 sched->own_submit_wq = true; 1294 1310 } 1295 1311 1296 - sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq), 1312 + sched->sched_rq = kmalloc_array(args->num_rqs, sizeof(*sched->sched_rq), 1297 1313 GFP_KERNEL | __GFP_ZERO); 1298 1314 if (!sched->sched_rq) 1299 1315 goto Out_check_own; 1300 - sched->num_rqs = num_rqs; 1316 + sched->num_rqs = args->num_rqs; 1301 1317 for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) { 1302 1318 sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); 1303 1319 if (!sched->sched_rq[i])
+26 -39
drivers/gpu/drm/v3d/v3d_sched.c
··· 820 820 .free_job = v3d_cpu_job_free 821 821 }; 822 822 823 + static int 824 + v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops, 825 + enum v3d_queue queue, const char *name) 826 + { 827 + struct drm_sched_init_args args = { 828 + .num_rqs = DRM_SCHED_PRIORITY_COUNT, 829 + .credit_limit = 1, 830 + .timeout = msecs_to_jiffies(500), 831 + .dev = v3d->drm.dev, 832 + }; 833 + 834 + args.ops = ops; 835 + args.name = name; 836 + 837 + return drm_sched_init(&v3d->queue[queue].sched, &args); 838 + } 839 + 823 840 int 824 841 v3d_sched_init(struct v3d_dev *v3d) 825 842 { 826 - int hw_jobs_limit = 1; 827 - int job_hang_limit = 0; 828 - int hang_limit_ms = 500; 829 843 int ret; 830 844 831 - ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, 832 - &v3d_bin_sched_ops, NULL, 833 - DRM_SCHED_PRIORITY_COUNT, 834 - hw_jobs_limit, job_hang_limit, 835 - msecs_to_jiffies(hang_limit_ms), NULL, 836 - NULL, "v3d_bin", v3d->drm.dev); 845 + ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin"); 837 846 if (ret) 838 847 return ret; 839 848 840 - ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, 841 - &v3d_render_sched_ops, NULL, 842 - DRM_SCHED_PRIORITY_COUNT, 843 - hw_jobs_limit, job_hang_limit, 844 - msecs_to_jiffies(hang_limit_ms), NULL, 845 - NULL, "v3d_render", v3d->drm.dev); 849 + ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER, 850 + "v3d_render"); 846 851 if (ret) 847 852 goto fail; 848 853 849 - ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, 850 - &v3d_tfu_sched_ops, NULL, 851 - DRM_SCHED_PRIORITY_COUNT, 852 - hw_jobs_limit, job_hang_limit, 853 - msecs_to_jiffies(hang_limit_ms), NULL, 854 - NULL, "v3d_tfu", v3d->drm.dev); 854 + ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu"); 855 855 if (ret) 856 856 goto fail; 857 857 858 858 if (v3d_has_csd(v3d)) { 859 - ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, 860 - &v3d_csd_sched_ops, NULL, 861 - DRM_SCHED_PRIORITY_COUNT, 862 - hw_jobs_limit, job_hang_limit, 863 - msecs_to_jiffies(hang_limit_ms), NULL, 864 - NULL, "v3d_csd", v3d->drm.dev); 859 + ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD, 860 + "v3d_csd"); 865 861 if (ret) 866 862 goto fail; 867 863 868 - ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, 869 - &v3d_cache_clean_sched_ops, NULL, 870 - DRM_SCHED_PRIORITY_COUNT, 871 - hw_jobs_limit, job_hang_limit, 872 - msecs_to_jiffies(hang_limit_ms), NULL, 873 - NULL, "v3d_cache_clean", v3d->drm.dev); 864 + ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops, 865 + V3D_CACHE_CLEAN, "v3d_cache_clean"); 874 866 if (ret) 875 867 goto fail; 876 868 } 877 869 878 - ret = drm_sched_init(&v3d->queue[V3D_CPU].sched, 879 - &v3d_cpu_sched_ops, NULL, 880 - DRM_SCHED_PRIORITY_COUNT, 881 - 1, job_hang_limit, 882 - msecs_to_jiffies(hang_limit_ms), NULL, 883 - NULL, "v3d_cpu", v3d->drm.dev); 870 + ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu"); 884 871 if (ret) 885 872 goto fail; 886 873
+10 -5
drivers/gpu/drm/xe/xe_execlist.c
··· 336 336 static int execlist_exec_queue_init(struct xe_exec_queue *q) 337 337 { 338 338 struct drm_gpu_scheduler *sched; 339 + const struct drm_sched_init_args args = { 340 + .ops = &drm_sched_ops, 341 + .num_rqs = 1, 342 + .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 343 + .hang_limit = XE_SCHED_HANG_LIMIT, 344 + .timeout = XE_SCHED_JOB_TIMEOUT, 345 + .name = q->hwe->name, 346 + .dev = gt_to_xe(q->gt)->drm.dev, 347 + }; 339 348 struct xe_execlist_exec_queue *exl; 340 349 struct xe_device *xe = gt_to_xe(q->gt); 341 350 int err; ··· 359 350 360 351 exl->q = q; 361 352 362 - err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1, 363 - q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 364 - XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT, 365 - NULL, NULL, q->hwe->name, 366 - gt_to_xe(q->gt)->drm.dev); 353 + err = drm_sched_init(&exl->sched, &args); 367 354 if (err) 368 355 goto err_free; 369 356
+14 -3
drivers/gpu/drm/xe/xe_gpu_scheduler.c
··· 63 63 atomic_t *score, const char *name, 64 64 struct device *dev) 65 65 { 66 + const struct drm_sched_init_args args = { 67 + .ops = ops, 68 + .submit_wq = submit_wq, 69 + .num_rqs = 1, 70 + .credit_limit = hw_submission, 71 + .hang_limit = hang_limit, 72 + .timeout = timeout, 73 + .timeout_wq = timeout_wq, 74 + .score = score, 75 + .name = name, 76 + .dev = dev, 77 + }; 78 + 66 79 sched->ops = xe_ops; 67 80 INIT_LIST_HEAD(&sched->msgs); 68 81 INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work); 69 82 70 - return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission, 71 - hang_limit, timeout, timeout_wq, score, name, 72 - dev); 83 + return drm_sched_init(&sched->base, &args); 73 84 } 74 85 75 86 void xe_sched_fini(struct xe_gpu_scheduler *sched)
+31 -5
include/drm/gpu_scheduler.h
··· 540 540 struct device *dev; 541 541 }; 542 542 543 + /** 544 + * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler 545 + * 546 + * @ops: backend operations provided by the driver 547 + * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is 548 + * allocated and used. 549 + * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT, 550 + * as there's usually one run-queue per priority, but may be less. 551 + * @credit_limit: the number of credits this scheduler can hold from all jobs 552 + * @hang_limit: number of times to allow a job to hang before dropping it. 553 + * This mechanism is DEPRECATED. Set it to 0. 554 + * @timeout: timeout value in jiffies for submitted jobs. 555 + * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used. 556 + * @score: score atomic shared with other schedulers. May be NULL. 557 + * @name: name (typically the driver's name). Used for debugging 558 + * @dev: associated device. Used for debugging 559 + */ 560 + struct drm_sched_init_args { 561 + const struct drm_sched_backend_ops *ops; 562 + struct workqueue_struct *submit_wq; 563 + struct workqueue_struct *timeout_wq; 564 + u32 num_rqs; 565 + u32 credit_limit; 566 + unsigned int hang_limit; 567 + long timeout; 568 + atomic_t *score; 569 + const char *name; 570 + struct device *dev; 571 + }; 572 + 543 573 int drm_sched_init(struct drm_gpu_scheduler *sched, 544 - const struct drm_sched_backend_ops *ops, 545 - struct workqueue_struct *submit_wq, 546 - u32 num_rqs, u32 credit_limit, unsigned int hang_limit, 547 - long timeout, struct workqueue_struct *timeout_wq, 548 - atomic_t *score, const char *name, struct device *dev); 574 + const struct drm_sched_init_args *args); 549 575 550 576 void drm_sched_fini(struct drm_gpu_scheduler *sched); 551 577 int drm_sched_job_init(struct drm_sched_job *job,