Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/exec: Pass in initial # of objects

In cases where the # is known ahead of time, it is silly to do the table
resize dance.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Christian König <christian.koenig@amd.com>
Patchwork: https://patchwork.freedesktop.org/patch/568338/

+37 -30
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1137 1137 1138 1138 ctx->n_vms = 1; 1139 1139 ctx->sync = &mem->sync; 1140 - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 1140 + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1141 1141 drm_exec_until_all_locked(&ctx->exec) { 1142 1142 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); 1143 1143 drm_exec_retry_on_contention(&ctx->exec); ··· 1176 1176 int ret; 1177 1177 1178 1178 ctx->sync = &mem->sync; 1179 - drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 1179 + drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 1180 1180 drm_exec_until_all_locked(&ctx->exec) { 1181 1181 ctx->n_vms = 0; 1182 1182 list_for_each_entry(entry, &mem->attachments, list) { ··· 2552 2552 2553 2553 amdgpu_sync_create(&sync); 2554 2554 2555 - drm_exec_init(&exec, 0); 2555 + drm_exec_init(&exec, 0, 0); 2556 2556 /* Reserve all BOs and page tables for validation */ 2557 2557 drm_exec_until_all_locked(&exec) { 2558 2558 /* Reserve all the page directories */ ··· 2793 2793 2794 2794 mutex_lock(&process_info->lock); 2795 2795 2796 - drm_exec_init(&exec, 0); 2796 + drm_exec_init(&exec, 0, 0); 2797 2797 drm_exec_until_all_locked(&exec) { 2798 2798 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2799 2799 vm_list_node) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 66 66 67 67 amdgpu_sync_create(&p->sync); 68 68 drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 69 - DRM_EXEC_IGNORE_DUPLICATES); 69 + DRM_EXEC_IGNORE_DUPLICATES, 0); 70 70 return 0; 71 71 } 72 72
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
··· 70 70 struct drm_exec exec; 71 71 int r; 72 72 73 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 73 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 74 74 drm_exec_until_all_locked(&exec) { 75 75 r = amdgpu_vm_lock_pd(vm, &exec, 0); 76 76 if (likely(!r)) ··· 110 110 struct drm_exec exec; 111 111 int r; 112 112 113 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 113 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 114 114 drm_exec_until_all_locked(&exec) { 115 115 r = amdgpu_vm_lock_pd(vm, &exec, 0); 116 116 if (likely(!r))
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 203 203 struct drm_exec exec; 204 204 long r; 205 205 206 - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); 206 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 207 207 drm_exec_until_all_locked(&exec) { 208 208 r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); 209 209 drm_exec_retry_on_contention(&exec); ··· 739 739 } 740 740 741 741 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | 742 - DRM_EXEC_IGNORE_DUPLICATES); 742 + DRM_EXEC_IGNORE_DUPLICATES, 0); 743 743 drm_exec_until_all_locked(&exec) { 744 744 if (gobj) { 745 745 r = drm_exec_lock_obj(&exec, gobj);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 1122 1122 1123 1123 amdgpu_sync_create(&sync); 1124 1124 1125 - drm_exec_init(&exec, 0); 1125 + drm_exec_init(&exec, 0, 0); 1126 1126 drm_exec_until_all_locked(&exec) { 1127 1127 r = drm_exec_lock_obj(&exec, 1128 1128 &ctx_data->meta_data_obj->tbo.base); ··· 1193 1193 struct drm_exec exec; 1194 1194 long r; 1195 1195 1196 - drm_exec_init(&exec, 0); 1196 + drm_exec_init(&exec, 0, 0); 1197 1197 drm_exec_until_all_locked(&exec) { 1198 1198 r = drm_exec_lock_obj(&exec, 1199 1199 &ctx_data->meta_data_obj->tbo.base);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
··· 86 86 87 87 amdgpu_sync_create(&sync); 88 88 89 - drm_exec_init(&exec, 0); 89 + drm_exec_init(&exec, 0, 0); 90 90 drm_exec_until_all_locked(&exec) { 91 91 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 92 92 drm_exec_retry_on_contention(&exec); ··· 149 149 struct drm_exec exec; 150 150 long r; 151 151 152 - drm_exec_init(&exec, 0); 152 + drm_exec_init(&exec, 0, 0); 153 153 drm_exec_until_all_locked(&exec) { 154 154 r = drm_exec_lock_obj(&exec, &bo->tbo.base); 155 155 drm_exec_retry_on_contention(&exec);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1529 1529 uint32_t gpuidx; 1530 1530 int r; 1531 1531 1532 - drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0); 1532 + drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0); 1533 1533 drm_exec_until_all_locked(&ctx->exec) { 1534 1534 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1535 1535 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
+10 -3
drivers/gpu/drm/drm_exec.c
··· 69 69 * drm_exec_init - initialize a drm_exec object 70 70 * @exec: the drm_exec object to initialize 71 71 * @flags: controls locking behavior, see DRM_EXEC_* defines 72 + * @nr: the initial # of objects 72 73 * 73 74 * Initialize the object and make sure that we can track locked objects. 75 + * 76 + * If nr is non-zero then it is used as the initial objects table size. 77 + * In either case, the table will grow (be re-allocated) on demand. 74 78 */ 75 - void drm_exec_init(struct drm_exec *exec, uint32_t flags) 79 + void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) 76 80 { 81 + if (!nr) 82 + nr = PAGE_SIZE / sizeof(void *); 83 + 77 84 exec->flags = flags; 78 - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); 85 + exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL); 79 86 80 87 /* If allocation here fails, just delay that till the first use */ 81 - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; 88 + exec->max_objects = exec->objects ? nr : 0; 82 89 exec->num_objects = 0; 83 90 exec->contended = DRM_EXEC_DUMMY; 84 91 exec->prelocked = NULL;
+2 -2
drivers/gpu/drm/drm_gpuvm.c
··· 1250 1250 unsigned int num_fences = vm_exec->num_fences; 1251 1251 int ret; 1252 1252 1253 - drm_exec_init(exec, vm_exec->flags); 1253 + drm_exec_init(exec, vm_exec->flags, 0); 1254 1254 1255 1255 drm_exec_until_all_locked(exec) { 1256 1256 ret = drm_gpuvm_prepare_vm(gpuvm, exec, num_fences); ··· 1341 1341 struct drm_exec *exec = &vm_exec->exec; 1342 1342 int ret; 1343 1343 1344 - drm_exec_init(exec, vm_exec->flags); 1344 + drm_exec_init(exec, vm_exec->flags, 0); 1345 1345 1346 1346 drm_exec_until_all_locked(exec) { 1347 1347 ret = drm_gpuvm_prepare_range(gpuvm, exec, addr, range,
+1 -1
drivers/gpu/drm/imagination/pvr_job.c
··· 746 746 if (err) 747 747 goto out_job_data_cleanup; 748 748 749 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); 749 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0); 750 750 751 751 xa_init_flags(&signal_array, XA_FLAGS_ALLOC); 752 752
+1 -1
drivers/gpu/drm/nouveau/nouveau_uvmm.c
··· 1347 1347 } 1348 1348 } 1349 1349 1350 - drm_exec_init(exec, vme->flags); 1350 + drm_exec_init(exec, vme->flags, 0); 1351 1351 drm_exec_until_all_locked(exec) { 1352 1352 ret = bind_lock_validate(job, exec, vme->num_fences); 1353 1353 drm_exec_retry_on_contention(exec);
+8 -8
drivers/gpu/drm/tests/drm_exec_test.c
··· 46 46 { 47 47 struct drm_exec exec; 48 48 49 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 49 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 50 50 drm_exec_fini(&exec); 51 51 KUNIT_SUCCEED(test); 52 52 } ··· 60 60 61 61 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 62 62 63 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 63 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 64 64 drm_exec_until_all_locked(&exec) { 65 65 ret = drm_exec_lock_obj(&exec, &gobj); 66 66 drm_exec_retry_on_contention(&exec); ··· 80 80 81 81 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 82 82 83 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 83 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 84 84 drm_exec_until_all_locked(&exec) { 85 85 ret = drm_exec_lock_obj(&exec, &gobj); 86 86 drm_exec_retry_on_contention(&exec); ··· 107 107 108 108 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 109 109 110 - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); 110 + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 111 111 drm_exec_until_all_locked(&exec) { 112 112 ret = drm_exec_lock_obj(&exec, &gobj); 113 113 drm_exec_retry_on_contention(&exec); ··· 134 134 135 135 drm_gem_private_object_init(priv->drm, &gobj, PAGE_SIZE); 136 136 137 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 137 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 138 138 drm_exec_until_all_locked(&exec) { 139 139 ret = drm_exec_prepare_obj(&exec, &gobj, 1); 140 140 drm_exec_retry_on_contention(&exec); ··· 159 159 drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE); 160 160 drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE); 161 161 162 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 162 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 163 163 drm_exec_until_all_locked(&exec) 164 164 ret = drm_exec_prepare_array(&exec, array, ARRAY_SIZE(array), 165 165 1); ··· 174 174 { 175 175 struct drm_exec exec; 176 176 177 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 177 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 178 178 drm_exec_until_all_locked(&exec) 179 179 { 180 180 break; 181 181 } 182 182 drm_exec_fini(&exec); 183 183 184 - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); 184 + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 185 185 drm_exec_until_all_locked(&exec) 186 186 { 187 187 break;
+1 -1
include/drm/drm_exec.h
··· 135 135 return !!exec->contended; 136 136 } 137 137 138 - void drm_exec_init(struct drm_exec *exec, uint32_t flags); 138 + void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); 139 139 void drm_exec_fini(struct drm_exec *exec); 140 140 bool drm_exec_cleanup(struct drm_exec *exec); 141 141 int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);