Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd: add scheduler fence implementation (v2)

scheduler fence is based on kernel fence framework.

v2: squash in Christian's build fix

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>

authored by

Chunming Zhou and committed by
Alex Deucher
f556cb0c 4af9f07c

+202 -38
+1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 86 86 # GPU scheduler 87 87 amdgpu-y += \ 88 88 ../scheduler/gpu_scheduler.o \ 89 + ../scheduler/sched_fence.o \ 89 90 amdgpu_sched.o 90 91 91 92 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
+1
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1261 1261 int (*prepare_job)(struct amdgpu_cs_parser *sched_job); 1262 1262 int (*run_job)(struct amdgpu_cs_parser *sched_job); 1263 1263 int (*free_job)(struct amdgpu_cs_parser *sched_job); 1264 + struct amd_sched_fence *s_fence; 1264 1265 }; 1265 1266 1266 1267 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
+15 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 899 899 if (amdgpu_enable_scheduler && parser->num_ibs) { 900 900 struct amdgpu_ring * ring = 901 901 amdgpu_cs_parser_get_ring(adev, parser); 902 - parser->ibs[parser->num_ibs - 1].sequence = atomic64_inc_return( 903 - &parser->ctx->rings[ring->idx].entity.last_queued_v_seq); 904 902 if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) { 905 903 r = amdgpu_cs_parser_prepare_job(parser); 906 904 if (r) ··· 908 910 parser->ring = ring; 909 911 parser->run_job = amdgpu_cs_parser_run_job; 910 912 parser->free_job = amdgpu_cs_parser_free_job; 911 - amd_sched_push_job(ring->scheduler, 912 - &parser->ctx->rings[ring->idx].entity, 913 - parser); 914 - cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; 913 + mutex_lock(&parser->job_lock); 914 + r = amd_sched_push_job(ring->scheduler, 915 + &parser->ctx->rings[ring->idx].entity, 916 + parser, 917 + &parser->s_fence); 918 + if (r) { 919 + mutex_unlock(&parser->job_lock); 920 + goto out; 921 + } 922 + parser->ibs[parser->num_ibs - 1].sequence = 923 + amdgpu_ctx_add_fence(parser->ctx, ring, 924 + &parser->s_fence->base, 925 + parser->s_fence->v_seq); 926 + cs->out.handle = parser->s_fence->v_seq; 927 + mutex_unlock(&parser->job_lock); 915 928 up_read(&adev->exclusive_lock); 916 929 return 0; 917 930 }
-10
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 268 268 struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; 269 269 struct fence *fence; 270 270 uint64_t queued_seq; 271 - int r; 272 - 273 - if (amdgpu_enable_scheduler) { 274 - r = amd_sched_wait_emit(&cring->entity, 275 - seq, 276 - false, 277 - -1); 278 - if (r) 279 - return NULL; 280 - } 281 271 282 272 spin_lock(&ctx->ring_lock); 283 273 if (amdgpu_enable_scheduler)
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 218 218 219 219 sequence = amdgpu_enable_scheduler ? ib->sequence : 0; 220 220 221 - if (ib->ctx) 221 + if (!amdgpu_enable_scheduler && ib->ctx) 222 222 ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, 223 223 &ib->fence->base, 224 224 sequence);
+17 -17
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
··· 118 118 { 119 119 int r = 0; 120 120 if (amdgpu_enable_scheduler) { 121 - uint64_t v_seq; 122 121 struct amdgpu_cs_parser *sched_job = 123 122 amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, 124 123 ibs, num_ibs); ··· 125 126 return -ENOMEM; 126 127 } 127 128 sched_job->free_job = free_job; 128 - v_seq = atomic64_inc_return(&adev->kernel_ctx.rings[ring->idx].entity.last_queued_v_seq); 129 - ibs[num_ibs - 1].sequence = v_seq; 130 - amd_sched_push_job(ring->scheduler, 131 - &adev->kernel_ctx.rings[ring->idx].entity, 132 - sched_job); 133 - r = amd_sched_wait_emit( 134 - &adev->kernel_ctx.rings[ring->idx].entity, 135 - v_seq, 136 - false, 137 - -1); 138 - if (r) 139 - WARN(true, "emit timeout\n"); 140 - } else 129 + mutex_lock(&sched_job->job_lock); 130 + r = amd_sched_push_job(ring->scheduler, 131 + &adev->kernel_ctx.rings[ring->idx].entity, 132 + sched_job, &sched_job->s_fence); 133 + if (r) { 134 + mutex_unlock(&sched_job->job_lock); 135 + kfree(sched_job); 136 + return r; 137 + } 138 + ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; 139 + *f = &sched_job->s_fence->base; 140 + mutex_unlock(&sched_job->job_lock); 141 + } else { 141 142 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 142 - if (r) 143 - return r; 144 - *f = &ibs[num_ibs - 1].fence->base; 143 + if (r) 144 + return r; 145 + *f = &ibs[num_ibs - 1].fence->base; 146 + } 145 147 return 0; 146 148 }
+23 -3
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
··· 180 180 uint32_t jobs) 181 181 { 182 182 uint64_t seq_ring = 0; 183 + char name[20]; 183 184 184 185 if (!(sched && entity && rq)) 185 186 return -EINVAL; ··· 192 191 entity->scheduler = sched; 193 192 init_waitqueue_head(&entity->wait_queue); 194 193 init_waitqueue_head(&entity->wait_emit); 194 + entity->fence_context = fence_context_alloc(1); 195 + snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context); 196 + memcpy(entity->name, name, 20); 197 + INIT_LIST_HEAD(&entity->fence_list); 195 198 if(kfifo_alloc(&entity->job_queue, 196 199 jobs * sizeof(void *), 197 200 GFP_KERNEL)) ··· 204 199 spin_lock_init(&entity->queue_lock); 205 200 atomic64_set(&entity->last_emitted_v_seq, seq_ring); 206 201 atomic64_set(&entity->last_queued_v_seq, seq_ring); 202 + atomic64_set(&entity->last_signaled_v_seq, seq_ring); 207 203 208 204 /* Add the entity to the run queue */ 209 205 mutex_lock(&rq->lock); ··· 297 291 */ 298 292 int amd_sched_push_job(struct amd_gpu_scheduler *sched, 299 293 struct amd_sched_entity *c_entity, 300 - void *data) 294 + void *data, 295 + struct amd_sched_fence **fence) 301 296 { 302 - struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job), 303 - GFP_KERNEL); 297 + struct amd_sched_job *job; 298 + 299 + if (!fence) 300 + return -EINVAL; 301 + job = kzalloc(sizeof(struct amd_sched_job), GFP_KERNEL); 304 302 if (!job) 305 303 return -ENOMEM; 306 304 job->sched = sched; 307 305 job->s_entity = c_entity; 308 306 job->data = data; 307 + *fence = amd_sched_fence_create(c_entity); 308 + if ((*fence) == NULL) { 309 + kfree(job); 310 + return -EINVAL; 311 + } 312 + job->s_fence = *fence; 309 313 while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *), 310 314 &c_entity->queue_lock) != sizeof(void *)) { 311 315 /** ··· 384 368 unsigned long flags; 385 369 386 370 sched = sched_job->sched; 371 + atomic64_set(&sched_job->s_entity->last_signaled_v_seq, 372 + sched_job->s_fence->v_seq); 373 + amd_sched_fence_signal(sched_job->s_fence); 387 374 spin_lock_irqsave(&sched->queue_lock, flags); 388 375 list_del(&sched_job->list); 389 376 atomic64_dec(&sched->hw_rq_count); 390 377 spin_unlock_irqrestore(&sched->queue_lock, flags); 391 378 392 379 sched->ops->process_job(sched, sched_job); 380 + fence_put(&sched_job->s_fence->base); 393 381 kfree(sched_job); 394 382 wake_up_interruptible(&sched->wait_queue); 395 383 }
+32 -1
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
··· 45 45 /* the virtual_seq is unique per context per ring */ 46 46 atomic64_t last_queued_v_seq; 47 47 atomic64_t last_emitted_v_seq; 48 + atomic64_t last_signaled_v_seq; 48 49 /* the job_queue maintains the jobs submitted by clients */ 49 50 struct kfifo job_queue; 50 51 spinlock_t queue_lock; ··· 53 52 wait_queue_head_t wait_queue; 54 53 wait_queue_head_t wait_emit; 55 54 bool is_pending; 55 + uint64_t fence_context; 56 + struct list_head fence_list; 57 + char name[20]; 56 58 }; 57 59 58 60 /** ··· 76 72 int (*check_entity_status)(struct amd_sched_entity *entity); 77 73 }; 78 74 75 + struct amd_sched_fence { 76 + struct fence base; 77 + struct fence_cb cb; 78 + struct list_head list; 79 + struct amd_sched_entity *entity; 80 + uint64_t v_seq; 81 + spinlock_t lock; 82 + }; 83 + 79 84 struct amd_sched_job { 80 85 struct list_head list; 81 86 struct fence_cb cb; 82 87 struct amd_gpu_scheduler *sched; 83 88 struct amd_sched_entity *s_entity; 84 89 void *data; 90 + struct amd_sched_fence *s_fence; 85 91 }; 92 + 93 + extern const struct fence_ops amd_sched_fence_ops; 94 + static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) 95 + { 96 + struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); 97 + 98 + if (__f->base.ops == &amd_sched_fence_ops) 99 + return __f; 100 + 101 + return NULL; 102 + } 86 103 87 104 /** 88 105 * Define the backend operations called by the scheduler, ··· 151 126 152 127 int amd_sched_push_job(struct amd_gpu_scheduler *sched, 153 128 struct amd_sched_entity *c_entity, 154 - void *data); 129 + void *data, 130 + struct amd_sched_fence **fence); 155 131 156 132 int amd_sched_wait_emit(struct amd_sched_entity *c_entity, 157 133 uint64_t seq, ··· 171 145 void amd_sched_emit(struct amd_sched_entity *c_entity, uint64_t seq); 172 146 173 147 uint64_t amd_sched_next_queued_seq(struct amd_sched_entity *c_entity); 148 + 149 + struct amd_sched_fence *amd_sched_fence_create( 150 + struct amd_sched_entity *s_entity); 151 + void amd_sched_fence_signal(struct amd_sched_fence *fence); 152 + 174 153 175 154 #endif
+112
drivers/gpu/drm/amd/scheduler/sched_fence.c
··· 1 + /* 2 + * Copyright 2015 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * 23 + */ 24 + #include <linux/kthread.h> 25 + #include <linux/wait.h> 26 + #include <linux/sched.h> 27 + #include <drm/drmP.h> 28 + #include "gpu_scheduler.h" 29 + 30 + static void amd_sched_fence_wait_cb(struct fence *f, struct fence_cb *cb) 31 + { 32 + struct amd_sched_fence *fence = 33 + container_of(cb, struct amd_sched_fence, cb); 34 + list_del_init(&fence->list); 35 + fence_put(&fence->base); 36 + } 37 + 38 + struct amd_sched_fence *amd_sched_fence_create( 39 + struct amd_sched_entity *s_entity) 40 + { 41 + struct amd_sched_fence *fence = NULL; 42 + fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); 43 + if (fence == NULL) 44 + return NULL; 45 + fence->v_seq = atomic64_inc_return(&s_entity->last_queued_v_seq); 46 + fence->entity = s_entity; 47 + spin_lock_init(&fence->lock); 48 + fence_init(&fence->base, &amd_sched_fence_ops, 49 + &fence->lock, 50 + s_entity->fence_context, 51 + fence->v_seq); 52 + fence_get(&fence->base); 53 + list_add_tail(&fence->list, &s_entity->fence_list); 54 + if (fence_add_callback(&fence->base,&fence->cb, 55 + amd_sched_fence_wait_cb)) { 56 + fence_put(&fence->base); 57 + kfree(fence); 58 + return NULL; 59 + } 60 + return fence; 61 + } 62 + 63 + bool amd_sched_check_ts(struct amd_sched_entity *s_entity, uint64_t v_seq) 64 + { 65 + return atomic64_read(&s_entity->last_signaled_v_seq) >= v_seq ? true : false; 66 + } 67 + 68 + void amd_sched_fence_signal(struct amd_sched_fence *fence) 69 + { 70 + if (amd_sched_check_ts(fence->entity, fence->v_seq)) { 71 + int ret = fence_signal_locked(&fence->base); 72 + if (!ret) 73 + FENCE_TRACE(&fence->base, "signaled from irq context\n"); 74 + else 75 + FENCE_TRACE(&fence->base, "was already signaled\n"); 76 + } else 77 + WARN(true, "fence process dismattch with job!\n"); 78 + } 79 + 80 + static const char *amd_sched_fence_get_driver_name(struct fence *fence) 81 + { 82 + return "amd_sched"; 83 + } 84 + 85 + static const char *amd_sched_fence_get_timeline_name(struct fence *f) 86 + { 87 + struct amd_sched_fence *fence = to_amd_sched_fence(f); 88 + return (const char *)fence->entity->name; 89 + } 90 + 91 + static bool amd_sched_fence_enable_signaling(struct fence *f) 92 + { 93 + struct amd_sched_fence *fence = to_amd_sched_fence(f); 94 + 95 + return !amd_sched_check_ts(fence->entity, fence->v_seq); 96 + } 97 + 98 + static bool amd_sched_fence_is_signaled(struct fence *f) 99 + { 100 + struct amd_sched_fence *fence = to_amd_sched_fence(f); 101 + 102 + return amd_sched_check_ts(fence->entity, fence->v_seq); 103 + } 104 + 105 + const struct fence_ops amd_sched_fence_ops = { 106 + .get_driver_name = amd_sched_fence_get_driver_name, 107 + .get_timeline_name = amd_sched_fence_get_timeline_name, 108 + .enable_signaling = amd_sched_fence_enable_signaling, 109 + .signaled = amd_sched_fence_is_signaled, 110 + .wait = fence_default_wait, 111 + .release = NULL, 112 + };