Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: cleanup scheduler job initialization v2

Init the DRM scheduler base class while allocating the job.

This makes the whole handling much more cleaner.

v2: fix coding style

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-7-christian.koenig@amd.com

+135 -150
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 674 674 goto err; 675 675 } 676 676 677 - ret = amdgpu_job_alloc(adev, 1, &job, NULL); 677 + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); 678 678 if (ret) 679 679 goto err; 680 680
+2 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 291 291 return -EINVAL; 292 292 293 293 for (i = 0; i < p->gang_size; ++i) { 294 - ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); 295 - if (ret) 296 - goto free_all_kdata; 297 - 298 - ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], 299 - &fpriv->vm); 294 + ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, 295 + num_ibs[i], &p->jobs[i]); 300 296 if (ret) 301 297 goto free_all_kdata; 302 298 }
+23 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 89 89 return DRM_GPU_SCHED_STAT_NOMINAL; 90 90 } 91 91 92 - int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 93 - struct amdgpu_job **job, struct amdgpu_vm *vm) 92 + int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 93 + struct drm_sched_entity *entity, void *owner, 94 + unsigned int num_ibs, struct amdgpu_job **job) 94 95 { 95 96 if (num_ibs == 0) 96 97 return -EINVAL; ··· 112 111 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 113 112 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; 114 113 115 - return 0; 114 + if (!entity) 115 + return 0; 116 + 117 + return drm_sched_job_init(&(*job)->base, entity, owner); 116 118 } 117 119 118 - int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 119 - enum amdgpu_ib_pool_type pool_type, 120 - struct amdgpu_job **job) 120 + int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 121 + struct drm_sched_entity *entity, void *owner, 122 + size_t size, enum amdgpu_ib_pool_type pool_type, 123 + struct amdgpu_job **job) 121 124 { 122 125 int r; 123 126 124 - r = amdgpu_job_alloc(adev, 1, job, NULL); 127 + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); 125 128 if (r) 126 129 return r; 127 130 128 131 (*job)->num_ibs = 1; 129 132 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); 130 - if (r) 133 + if (r) { 134 + if (entity) 135 + drm_sched_job_cleanup(&(*job)->base); 131 136 kfree(*job); 137 + } 132 138 133 139 return r; 134 140 } ··· 199 191 200 192 void amdgpu_job_free(struct amdgpu_job *job) 201 193 { 194 + if (job->base.entity) 195 + drm_sched_job_cleanup(&job->base); 196 + 202 197 amdgpu_job_free_resources(job); 203 198 amdgpu_sync_free(&job->sync); 204 199 amdgpu_sync_free(&job->sched_sync); ··· 214 203 dma_fence_put(&job->hw_fence); 215 204 } 216 205 217 - int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 218 - void *owner, struct dma_fence **f) 206 + struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) 219 207 { 220 - int r; 221 - 222 - if (!f) 223 - return -EINVAL; 224 - 225 - r = drm_sched_job_init(&job->base, entity, owner); 226 - if (r) 227 - return r; 208 + struct dma_fence *f; 228 209 229 210 drm_sched_job_arm(&job->base); 230 - 231 - *f = dma_fence_get(&job->base.s_fence->finished); 211 + f = dma_fence_get(&job->base.s_fence->finished); 232 212 amdgpu_job_free_resources(job); 233 213 drm_sched_entity_push_job(&job->base); 234 214 235 - return 0; 215 + return f; 236 216 } 237 217 238 218 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+8 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
··· 78 78 return to_amdgpu_ring(job->base.entity->rq->sched); 79 79 } 80 80 81 - int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 82 - struct amdgpu_job **job, struct amdgpu_vm *vm); 83 - int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 84 - enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); 81 + int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 82 + struct drm_sched_entity *entity, void *owner, 83 + unsigned int num_ibs, struct amdgpu_job **job); 84 + int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 85 + struct drm_sched_entity *entity, void *owner, 86 + size_t size, enum amdgpu_ib_pool_type pool_type, 87 + struct amdgpu_job **job); 85 88 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 86 89 struct amdgpu_bo *gws, struct amdgpu_bo *oa); 87 90 void amdgpu_job_free_resources(struct amdgpu_job *job); 88 91 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 89 92 struct amdgpu_job *leader); 90 93 void amdgpu_job_free(struct amdgpu_job *job); 91 - int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 92 - void *owner, struct dma_fence **f); 94 + struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job); 93 95 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 94 96 struct dma_fence **fence); 95 97
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
··· 150 150 const unsigned ib_size_dw = 16; 151 151 int i, r; 152 152 153 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 154 - AMDGPU_IB_POOL_DIRECT, &job); 153 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 154 + AMDGPU_IB_POOL_DIRECT, &job); 155 155 if (r) 156 156 return r; 157 157 158 158 ib = &job->ibs[0]; 159 159 160 - ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); 160 + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, 161 + PACKETJ_TYPE0); 161 162 ib->ptr[1] = 0xDEADBEEF; 162 163 for (i = 2; i < 16; i += 2) { 163 164 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+21 -35
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 189 189 struct amdgpu_device *adev = ring->adev; 190 190 unsigned offset, num_pages, num_dw, num_bytes; 191 191 uint64_t src_addr, dst_addr; 192 - struct dma_fence *fence; 193 192 struct amdgpu_job *job; 194 193 void *cpu_addr; 195 194 uint64_t flags; ··· 228 229 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 229 230 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; 230 231 231 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 232 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 233 + AMDGPU_FENCE_OWNER_UNDEFINED, 234 + num_dw * 4 + num_bytes, 232 235 AMDGPU_IB_POOL_DELAYED, &job); 233 236 if (r) 234 237 return r; ··· 270 269 } 271 270 } 272 271 273 - r = amdgpu_job_submit(job, &adev->mman.entity, 274 - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 275 - if (r) 276 - goto error_free; 277 - 278 - dma_fence_put(fence); 279 - 280 - return r; 281 - 282 - error_free: 283 - amdgpu_job_free(job); 284 - return r; 272 + dma_fence_put(amdgpu_job_submit(job)); 273 + return 0; 285 274 } 286 275 287 276 /** ··· 1405 1414 } 1406 1415 1407 1416 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, 1408 - unsigned long offset, void *buf, int len, int write) 1417 + unsigned long offset, void *buf, 1418 + int len, int write) 1409 1419 { 1410 1420 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1411 1421 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); ··· 1430 1438 memcpy(adev->mman.sdma_access_ptr, buf, len); 1431 1439 1432 1440 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 1433 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); 1441 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 1442 + AMDGPU_FENCE_OWNER_UNDEFINED, 1443 + num_dw * 4, AMDGPU_IB_POOL_DELAYED, 1444 + &job); 1434 1445 if (r) 1435 1446 goto out; 1436 1447 1437 1448 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); 1438 - src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; 1449 + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + 1450 + src_mm.start; 1439 1451 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); 1440 1452 if (write) 1441 1453 swap(src_addr, dst_addr); 1442 1454 1443 - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); 1455 + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, 1456 + PAGE_SIZE, false); 1444 1457 1445 1458 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); 1446 1459 WARN_ON(job->ibs[0].length_dw > num_dw); 1447 1460 1448 - r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 1449 - if (r) { 1450 - amdgpu_job_free(job); 1451 - goto out; 1452 - } 1461 + fence = amdgpu_job_submit(job); 1453 1462 1454 1463 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) 1455 1464 r = -ETIMEDOUT; ··· 1949 1956 AMDGPU_IB_POOL_DELAYED; 1950 1957 int r; 1951 1958 1952 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); 1959 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 1960 + AMDGPU_FENCE_OWNER_UNDEFINED, 1961 + num_dw * 4, pool, job); 1953 1962 if (r) 1954 1963 return r; 1955 1964 ··· 2010 2015 if (direct_submit) 2011 2016 r = amdgpu_job_submit_direct(job, ring, fence); 2012 2017 else 2013 - r = amdgpu_job_submit(job, &adev->mman.entity, 2014 - AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2018 + *fence = amdgpu_job_submit(job); 2015 2019 if (r) 2016 2020 goto error_free; 2017 2021 ··· 2055 2061 2056 2062 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2057 2063 WARN_ON(job->ibs[0].length_dw > num_dw); 2058 - r = amdgpu_job_submit(job, &adev->mman.entity, 2059 - AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2060 - if (r) 2061 - goto error_free; 2062 - 2064 + *fence = amdgpu_job_submit(job); 2063 2065 return 0; 2064 - 2065 - error_free: 2066 - amdgpu_job_free(job); 2067 - return r; 2068 2066 } 2069 2067 2070 2068 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+4 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 1132 1132 unsigned offset_idx = 0; 1133 1133 unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; 1134 1134 1135 - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : 1135 + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, 1136 + AMDGPU_FENCE_OWNER_UNDEFINED, 1137 + 64, direct ? AMDGPU_IB_POOL_DIRECT : 1136 1138 AMDGPU_IB_POOL_DELAYED, &job); 1137 1139 if (r) 1138 1140 return r; ··· 1183 1181 if (r) 1184 1182 goto err_free; 1185 1183 1186 - r = amdgpu_job_submit(job, &adev->uvd.entity, 1187 - AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1188 - if (r) 1189 - goto err_free; 1184 + f = amdgpu_job_submit(job); 1190 1185 } 1191 1186 1192 1187 amdgpu_bo_reserve(bo, true);
+8 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 450 450 uint64_t addr; 451 451 int i, r; 452 452 453 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 454 - AMDGPU_IB_POOL_DIRECT, &job); 453 + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, 454 + AMDGPU_FENCE_OWNER_UNDEFINED, 455 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 456 + &job); 455 457 if (r) 456 458 return r; 457 459 ··· 540 538 struct dma_fence *f = NULL; 541 539 int i, r; 542 540 543 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 541 + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, 542 + AMDGPU_FENCE_OWNER_UNDEFINED, 543 + ib_size_dw * 4, 544 544 direct ? AMDGPU_IB_POOL_DIRECT : 545 545 AMDGPU_IB_POOL_DELAYED, &job); 546 546 if (r) ··· 574 570 if (direct) 575 571 r = amdgpu_job_submit_direct(job, ring, &f); 576 572 else 577 - r = amdgpu_job_submit(job, &ring->adev->vce.entity, 578 - AMDGPU_FENCE_OWNER_UNDEFINED, &f); 573 + f = amdgpu_job_submit(job); 579 574 if (r) 580 575 goto err; 581 576
+13 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 600 600 struct amdgpu_ib *ib_msg, 601 601 struct dma_fence **fence) 602 602 { 603 + u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 603 604 struct amdgpu_device *adev = ring->adev; 604 605 struct dma_fence *f = NULL; 605 606 struct amdgpu_job *job; 606 607 struct amdgpu_ib *ib; 607 - uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 608 608 int i, r; 609 609 610 - r = amdgpu_job_alloc_with_ib(adev, 64, 611 - AMDGPU_IB_POOL_DIRECT, &job); 610 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 611 + 64, AMDGPU_IB_POOL_DIRECT, 612 + &job); 612 613 if (r) 613 614 goto err; 614 615 ··· 788 787 if (sq) 789 788 ib_size_dw += 8; 790 789 791 - r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, 792 - AMDGPU_IB_POOL_DIRECT, &job); 790 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 791 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 792 + &job); 793 793 if (r) 794 794 goto err; 795 795 ··· 918 916 if (sq) 919 917 ib_size_dw += 8; 920 918 921 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 922 - AMDGPU_IB_POOL_DIRECT, &job); 919 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 920 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 921 + &job); 923 922 if (r) 924 923 return r; 925 924 ··· 985 982 if (sq) 986 983 ib_size_dw += 8; 987 984 988 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 989 - AMDGPU_IB_POOL_DIRECT, &job); 985 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 986 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 987 + &job); 990 988 if (r) 991 989 return r; 992 990
+31 -30
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 47 47 return r; 48 48 } 49 49 50 + /* Allocate a new job for @count PTE updates */ 51 + static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, 52 + unsigned int count) 53 + { 54 + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 55 + : AMDGPU_IB_POOL_DELAYED; 56 + struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate 57 + : &p->vm->delayed; 58 + unsigned int ndw; 59 + int r; 60 + 61 + /* estimate how many dw we need */ 62 + ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 63 + if (p->pages_addr) 64 + ndw += count * 2; 65 + ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 66 + 67 + r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, 68 + ndw * 4, pool, &p->job); 69 + if (r) 70 + return r; 71 + 72 + p->num_dw_left = ndw; 73 + return 0; 74 + } 75 + 50 76 /** 51 77 * amdgpu_vm_sdma_prepare - prepare SDMA command submission 52 78 * ··· 87 61 struct dma_resv *resv, 88 62 enum amdgpu_sync_mode sync_mode) 89 63 { 90 - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 91 - : AMDGPU_IB_POOL_DELAYED; 92 - unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 93 64 int r; 94 65 95 - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); 66 + r = amdgpu_vm_sdma_alloc_job(p, 0); 96 67 if (r) 97 68 return r; 98 - 99 - p->num_dw_left = ndw; 100 69 101 70 if (!resv) 102 71 return 0; ··· 112 91 struct dma_fence **fence) 113 92 { 114 93 struct amdgpu_ib *ib = p->job->ibs; 115 - struct drm_sched_entity *entity; 116 94 struct amdgpu_ring *ring; 117 95 struct dma_fence *f; 118 - int r; 119 96 120 - entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; 121 - ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); 97 + ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, 98 + sched); 122 99 123 100 WARN_ON(ib->length_dw == 0); 124 101 amdgpu_ring_pad_ib(ring, ib); 125 102 WARN_ON(ib->length_dw > p->num_dw_left); 126 - r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); 127 - if (r) 128 - goto error; 103 + f = amdgpu_job_submit(p->job); 129 104 130 105 if (p->unlocked) { 131 106 struct dma_fence *tmp = dma_fence_get(f); ··· 137 120 swap(*fence, f); 138 121 dma_fence_put(f); 139 122 return 0; 140 - 141 - error: 142 - amdgpu_job_free(p->job); 143 - return r; 144 123 } 145 124 146 125 /** ··· 216 203 uint64_t flags) 217 204 { 218 205 struct amdgpu_bo *bo = &vmbo->bo; 219 - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 220 - : AMDGPU_IB_POOL_DELAYED; 221 206 struct dma_resv_iter cursor; 222 207 unsigned int i, ndw, nptes; 223 208 struct dma_fence *fence; ··· 242 231 if (r) 243 232 return r; 244 233 245 - /* estimate how many dw we need */ 246 - ndw = 32; 247 - if (p->pages_addr) 248 - ndw += count * 2; 249 - ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); 250 - ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 251 - 252 - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, 253 - &p->job); 234 + r = amdgpu_vm_sdma_alloc_job(p, count); 254 235 if (r) 255 236 return r; 256 - 257 - p->num_dw_left = ndw; 258 237 } 259 238 260 239 if (!p->pages_addr) {
+4 -8
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 371 371 * translation. Avoid this by doing the invalidation from the SDMA 372 372 * itself. 373 373 */ 374 - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, 374 + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, 375 + AMDGPU_FENCE_OWNER_UNDEFINED, 376 + 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, 375 377 &job); 376 378 if (r) 377 379 goto error_alloc; ··· 382 380 job->vm_needs_flush = true; 383 381 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; 384 382 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 385 - r = amdgpu_job_submit(job, &adev->mman.entity, 386 - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 387 - if (r) 388 - goto error_submit; 383 + fence = amdgpu_job_submit(job); 389 384 390 385 mutex_unlock(&adev->mman.gtt_window_lock); 391 386 ··· 390 391 dma_fence_put(fence); 391 392 392 393 return; 393 - 394 - error_submit: 395 - amdgpu_job_free(job); 396 394 397 395 error_alloc: 398 396 mutex_unlock(&adev->mman.gtt_window_lock);
+4 -4
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 216 216 uint64_t addr; 217 217 int i, r; 218 218 219 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 220 - AMDGPU_IB_POOL_DIRECT, &job); 219 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 220 + AMDGPU_IB_POOL_DIRECT, &job); 221 221 if (r) 222 222 return r; 223 223 ··· 280 280 uint64_t addr; 281 281 int i, r; 282 282 283 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 284 - AMDGPU_IB_POOL_DIRECT, &job); 283 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 284 + AMDGPU_IB_POOL_DIRECT, &job); 285 285 if (r) 286 286 return r; 287 287
+6 -6
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 213 213 * 214 214 * Open up a stream for HW test 215 215 */ 216 - static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 216 + static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, 217 217 struct amdgpu_bo *bo, 218 218 struct dma_fence **fence) 219 219 { ··· 224 224 uint64_t addr; 225 225 int i, r; 226 226 227 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 228 - AMDGPU_IB_POOL_DIRECT, &job); 227 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 228 + AMDGPU_IB_POOL_DIRECT, &job); 229 229 if (r) 230 230 return r; 231 231 ··· 276 276 * 277 277 * Close up a stream for HW test or if userspace failed to do so 278 278 */ 279 - static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 279 + static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, 280 280 struct amdgpu_bo *bo, 281 281 struct dma_fence **fence) 282 282 { ··· 287 287 uint64_t addr; 288 288 int i, r; 289 289 290 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 291 - AMDGPU_IB_POOL_DIRECT, &job); 290 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 291 + AMDGPU_IB_POOL_DIRECT, &job); 292 292 if (r) 293 293 return r; 294 294
+6 -11
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 65 65 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 66 66 num_bytes = npages * 8; 67 67 68 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 69 - AMDGPU_IB_POOL_DELAYED, &job); 68 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 69 + AMDGPU_FENCE_OWNER_UNDEFINED, 70 + num_dw * 4 + num_bytes, 71 + AMDGPU_IB_POOL_DELAYED, 72 + &job); 70 73 if (r) 71 74 return r; 72 75 ··· 92 89 cpu_addr = &job->ibs[0].ptr[num_dw]; 93 90 94 91 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 95 - r = amdgpu_job_submit(job, &adev->mman.entity, 96 - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 97 - if (r) 98 - goto error_free; 99 - 92 + fence = amdgpu_job_submit(job); 100 93 dma_fence_put(fence); 101 94 102 - return r; 103 - 104 - error_free: 105 - amdgpu_job_free(job); 106 95 return r; 107 96 } 108 97