Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: move the context from the IBs into the job

We only have one context for all IBs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
92f25098 f153d286

+24 -17
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 743 743 struct amdgpu_user_fence *user; 744 744 unsigned vm_id; 745 745 uint64_t vm_pd_addr; 746 - uint64_t ctx; 747 746 uint32_t gds_base, gds_size; 748 747 uint32_t gws_base, gws_size; 749 748 uint32_t oa_base, oa_size; ··· 1261 1262 struct fence *fence; /* the hw fence */ 1262 1263 uint32_t num_ibs; 1263 1264 void *owner; 1265 + uint64_t ctx; 1264 1266 struct amdgpu_user_fence uf; 1265 1267 }; 1266 1268 #define to_amdgpu_job(sched_job) \
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 741 741 742 742 ib->length_dw = chunk_ib->ib_bytes / 4; 743 743 ib->flags = chunk_ib->flags; 744 - ib->ctx = parser->ctx->rings[ring->idx].entity.fence_context; 745 744 j++; 746 745 } 747 746 ··· 839 840 union drm_amdgpu_cs *cs) 840 841 { 841 842 struct amdgpu_ring *ring = p->job->ring; 843 + struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 842 844 struct fence *fence; 843 845 struct amdgpu_job *job; 844 846 int r; ··· 848 848 p->job = NULL; 849 849 850 850 r = amd_sched_job_init(&job->base, &ring->sched, 851 - &p->ctx->rings[ring->idx].entity, 852 - amdgpu_job_timeout_func, 853 - amdgpu_job_free_func, 854 - p->filp, &fence); 851 + entity, amdgpu_job_timeout_func, 852 + amdgpu_job_free_func, 853 + p->filp, &fence); 855 854 if (r) { 856 855 amdgpu_job_free(job); 857 856 return r; 858 857 } 859 858 860 859 job->owner = p->filp; 860 + job->ctx = entity->fence_context; 861 861 p->fence = fence_get(fence); 862 862 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); 863 863 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+15 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 121 121 { 122 122 struct amdgpu_device *adev = ring->adev; 123 123 struct amdgpu_ib *ib = &ibs[0]; 124 - struct fence *hwf; 125 - struct amdgpu_vm *vm = NULL; 126 - unsigned i, patch_offset = ~0; 127 124 bool skip_preamble, need_ctx_switch; 125 + unsigned patch_offset = ~0; 126 + struct amdgpu_vm *vm; 127 + struct fence *hwf; 128 + uint64_t ctx; 128 129 130 + unsigned i; 129 131 int r = 0; 130 132 131 133 if (num_ibs == 0) 132 134 return -EINVAL; 133 135 134 - if (job) /* for domain0 job like ring test, ibs->job is not assigned */ 136 + /* ring tests don't use a job */ 137 + if (job) { 135 138 vm = job->vm; 139 + ctx = job->ctx; 140 + } else { 141 + vm = NULL; 142 + ctx = 0; 143 + } 136 144 137 145 if (!ring->ready) { 138 146 dev_err(adev->dev, "couldn't schedule ib\n"); ··· 178 170 /* always set cond_exec_polling to CONTINUE */ 179 171 *ring->cond_exe_cpu_addr = 1; 180 172 181 - skip_preamble = ring->current_ctx == ib->ctx; 182 - need_ctx_switch = ring->current_ctx != ib->ctx; 173 + skip_preamble = ring->current_ctx == ctx; 174 + need_ctx_switch = ring->current_ctx != ctx; 183 175 for (i = 0; i < num_ibs; ++i) { 184 176 ib = &ibs[i]; 185 177 ··· 217 209 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 218 210 amdgpu_ring_patch_cond_exec(ring, patch_offset); 219 211 220 - ring->current_ctx = ibs->ctx; 212 + ring->current_ctx = ctx; 221 213 amdgpu_ring_commit(ring); 222 214 return 0; 223 215 }
+3 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 122 122 return -EINVAL; 123 123 124 124 r = amd_sched_job_init(&job->base, &ring->sched, 125 - entity, 126 - amdgpu_job_timeout_func, 127 - amdgpu_job_free_func, 128 - owner, &fence); 125 + entity, amdgpu_job_timeout_func, 126 + amdgpu_job_free_func, owner, &fence); 129 127 if (r) 130 128 return r; 131 129 132 130 job->owner = owner; 131 + job->ctx = entity->fence_context; 133 132 *f = fence_get(fence); 134 133 amd_sched_entity_push_job(&job->base); 135 134