Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
33static struct amd_sched_job *
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
40/* Initialize a given run queue struct */
41static void amd_sched_rq_init(struct amd_sched_rq *rq)
42{
43 spin_lock_init(&rq->lock);
44 INIT_LIST_HEAD(&rq->entities);
45 rq->current_entity = NULL;
46}
47
48static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
49 struct amd_sched_entity *entity)
50{
51 spin_lock(&rq->lock);
52 list_add_tail(&entity->list, &rq->entities);
53 spin_unlock(&rq->lock);
54}
55
56static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
58{
59 spin_lock(&rq->lock);
60 list_del_init(&entity->list);
61 if (rq->current_entity == entity)
62 rq->current_entity = NULL;
63 spin_unlock(&rq->lock);
64}
65
66/**
67 * Select next job from a specified run queue with round robin policy.
68 * Return NULL if nothing available.
69 */
70static struct amd_sched_job *
71amd_sched_rq_select_job(struct amd_sched_rq *rq)
72{
73 struct amd_sched_entity *entity;
74 struct amd_sched_job *sched_job;
75
76 spin_lock(&rq->lock);
77
78 entity = rq->current_entity;
79 if (entity) {
80 list_for_each_entry_continue(entity, &rq->entities, list) {
81 sched_job = amd_sched_entity_pop_job(entity);
82 if (sched_job) {
83 rq->current_entity = entity;
84 spin_unlock(&rq->lock);
85 return sched_job;
86 }
87 }
88 }
89
90 list_for_each_entry(entity, &rq->entities, list) {
91
92 sched_job = amd_sched_entity_pop_job(entity);
93 if (sched_job) {
94 rq->current_entity = entity;
95 spin_unlock(&rq->lock);
96 return sched_job;
97 }
98
99 if (entity == rq->current_entity)
100 break;
101 }
102
103 spin_unlock(&rq->lock);
104
105 return NULL;
106}
107
108/**
109 * Init a context entity used by scheduler when submit to HW ring.
110 *
111 * @sched The pointer to the scheduler
112 * @entity The pointer to a valid amd_sched_entity
113 * @rq The run queue this entity belongs
114 * @kernel If this is an entity for the kernel
115 * @jobs The max number of jobs in the job queue
116 *
117 * return 0 if succeed. negative error code on failure
118*/
119int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
120 struct amd_sched_entity *entity,
121 struct amd_sched_rq *rq,
122 uint32_t jobs)
123{
124 int r;
125
126 if (!(sched && entity && rq))
127 return -EINVAL;
128
129 memset(entity, 0, sizeof(struct amd_sched_entity));
130 INIT_LIST_HEAD(&entity->list);
131 entity->rq = rq;
132 entity->sched = sched;
133
134 spin_lock_init(&entity->queue_lock);
135 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
136 if (r)
137 return r;
138
139 atomic_set(&entity->fence_seq, 0);
140 entity->fence_context = fence_context_alloc(1);
141
142 /* Add the entity to the run queue */
143 amd_sched_rq_add_entity(rq, entity);
144
145 return 0;
146}
147
148/**
149 * Query if entity is initialized
150 *
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
153 *
154 * return true if entity is initialized, false otherwise
155*/
156static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157 struct amd_sched_entity *entity)
158{
159 return entity->sched == sched &&
160 entity->rq != NULL;
161}
162
163/**
164 * Check if entity is idle
165 *
166 * @entity The pointer to a valid scheduler entity
167 *
168 * Return true if entity don't has any unscheduled jobs.
169 */
170static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
171{
172 rmb();
173 if (kfifo_is_empty(&entity->job_queue))
174 return true;
175
176 return false;
177}
178
179/**
180 * Destroy a context entity
181 *
182 * @sched Pointer to scheduler instance
183 * @entity The pointer to a valid scheduler entity
184 *
185 * Cleanup and free the allocated resources.
186 */
187void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
188 struct amd_sched_entity *entity)
189{
190 struct amd_sched_rq *rq = entity->rq;
191
192 if (!amd_sched_entity_is_initialized(sched, entity))
193 return;
194
195 /**
196 * The client will not queue more IBs during this fini, consume existing
197 * queued IBs
198 */
199 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
200
201 amd_sched_rq_remove_entity(rq, entity);
202 kfifo_free(&entity->job_queue);
203}
204
205static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
206{
207 struct amd_sched_entity *entity =
208 container_of(cb, struct amd_sched_entity, cb);
209 entity->dependency = NULL;
210 fence_put(f);
211 amd_sched_wakeup(entity->sched);
212}
213
214static struct amd_sched_job *
215amd_sched_entity_pop_job(struct amd_sched_entity *entity)
216{
217 struct amd_gpu_scheduler *sched = entity->sched;
218 struct amd_sched_job *sched_job;
219
220 if (ACCESS_ONCE(entity->dependency))
221 return NULL;
222
223 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
224 return NULL;
225
226 while ((entity->dependency = sched->ops->dependency(sched_job))) {
227
228 if (entity->dependency->context == entity->fence_context) {
229 /* We can ignore fences from ourself */
230 fence_put(entity->dependency);
231 continue;
232 }
233
234 if (fence_add_callback(entity->dependency, &entity->cb,
235 amd_sched_entity_wakeup))
236 fence_put(entity->dependency);
237 else
238 return NULL;
239 }
240
241 return sched_job;
242}
243
244/**
245 * Helper to submit a job to the job queue
246 *
247 * @sched_job The pointer to job required to submit
248 *
249 * Returns true if we could submit the job.
250 */
251static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
252{
253 struct amd_sched_entity *entity = sched_job->s_entity;
254 bool added, first = false;
255
256 spin_lock(&entity->queue_lock);
257 added = kfifo_in(&entity->job_queue, &sched_job,
258 sizeof(sched_job)) == sizeof(sched_job);
259
260 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
261 first = true;
262
263 spin_unlock(&entity->queue_lock);
264
265 /* first job wakes up scheduler */
266 if (first)
267 amd_sched_wakeup(sched_job->sched);
268
269 return added;
270}
271
272/**
273 * Submit a job to the job queue
274 *
275 * @sched_job The pointer to job required to submit
276 *
277 * Returns 0 for success, negative error code otherwise.
278 */
279void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
280{
281 struct amd_sched_entity *entity = sched_job->s_entity;
282
283 wait_event(entity->sched->job_scheduled,
284 amd_sched_entity_in(sched_job));
285 trace_amd_sched_job(sched_job);
286}
287
288/**
289 * Return ture if we can push more jobs to the hw.
290 */
291static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
292{
293 return atomic_read(&sched->hw_rq_count) <
294 sched->hw_submission_limit;
295}
296
297/**
298 * Wake up the scheduler when it is ready
299 */
300static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301{
302 if (amd_sched_ready(sched))
303 wake_up_interruptible(&sched->wake_up_worker);
304}
305
306/**
307 * Select next to run
308*/
309static struct amd_sched_job *
310amd_sched_select_job(struct amd_gpu_scheduler *sched)
311{
312 struct amd_sched_job *sched_job;
313
314 if (!amd_sched_ready(sched))
315 return NULL;
316
317 /* Kernel run queue has higher priority than normal run queue*/
318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
319 if (sched_job == NULL)
320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
321
322 return sched_job;
323}
324
325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
326{
327 struct amd_sched_fence *s_fence =
328 container_of(cb, struct amd_sched_fence, cb);
329 struct amd_gpu_scheduler *sched = s_fence->sched;
330 unsigned long flags;
331
332 atomic_dec(&sched->hw_rq_count);
333 amd_sched_fence_signal(s_fence);
334 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
335 cancel_delayed_work(&s_fence->dwork);
336 spin_lock_irqsave(&sched->fence_list_lock, flags);
337 list_del_init(&s_fence->list);
338 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
339 }
340 trace_amd_sched_process_job(s_fence);
341 fence_put(&s_fence->base);
342 wake_up_interruptible(&sched->wake_up_worker);
343}
344
345static void amd_sched_fence_work_func(struct work_struct *work)
346{
347 struct amd_sched_fence *s_fence =
348 container_of(work, struct amd_sched_fence, dwork.work);
349 struct amd_gpu_scheduler *sched = s_fence->sched;
350 struct amd_sched_fence *entity, *tmp;
351 unsigned long flags;
352
353 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
354
355 /* Clean all pending fences */
356 spin_lock_irqsave(&sched->fence_list_lock, flags);
357 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
358 DRM_ERROR(" fence no %d\n", entity->base.seqno);
359 cancel_delayed_work(&entity->dwork);
360 list_del_init(&entity->list);
361 fence_put(&entity->base);
362 }
363 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
364}
365
366static int amd_sched_main(void *param)
367{
368 struct sched_param sparam = {.sched_priority = 1};
369 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
370 int r, count;
371
372 spin_lock_init(&sched->fence_list_lock);
373 INIT_LIST_HEAD(&sched->fence_list);
374 sched_setscheduler(current, SCHED_FIFO, &sparam);
375
376 while (!kthread_should_stop()) {
377 struct amd_sched_entity *entity;
378 struct amd_sched_fence *s_fence;
379 struct amd_sched_job *sched_job;
380 struct fence *fence;
381 unsigned long flags;
382
383 wait_event_interruptible(sched->wake_up_worker,
384 kthread_should_stop() ||
385 (sched_job = amd_sched_select_job(sched)));
386
387 if (!sched_job)
388 continue;
389
390 entity = sched_job->s_entity;
391 s_fence = sched_job->s_fence;
392
393 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
394 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
395 schedule_delayed_work(&s_fence->dwork, sched->timeout);
396 spin_lock_irqsave(&sched->fence_list_lock, flags);
397 list_add_tail(&s_fence->list, &sched->fence_list);
398 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
399 }
400
401 atomic_inc(&sched->hw_rq_count);
402 fence = sched->ops->run_job(sched_job);
403 if (fence) {
404 r = fence_add_callback(fence, &s_fence->cb,
405 amd_sched_process_job);
406 if (r == -ENOENT)
407 amd_sched_process_job(fence, &s_fence->cb);
408 else if (r)
409 DRM_ERROR("fence add callback failed (%d)\n", r);
410 fence_put(fence);
411 } else {
412 DRM_ERROR("Failed to run job!\n");
413 amd_sched_process_job(NULL, &s_fence->cb);
414 }
415
416 count = kfifo_out(&entity->job_queue, &sched_job,
417 sizeof(sched_job));
418 WARN_ON(count != sizeof(sched_job));
419 wake_up(&sched->job_scheduled);
420 }
421 return 0;
422}
423
424/**
425 * Init a gpu scheduler instance
426 *
427 * @sched The pointer to the scheduler
428 * @ops The backend operations for this scheduler.
429 * @hw_submissions Number of hw submissions to do.
430 * @name Name used for debugging
431 *
432 * Return 0 on success, otherwise error code.
433*/
434int amd_sched_init(struct amd_gpu_scheduler *sched,
435 struct amd_sched_backend_ops *ops,
436 unsigned hw_submission, long timeout, const char *name)
437{
438 sched->ops = ops;
439 sched->hw_submission_limit = hw_submission;
440 sched->name = name;
441 sched->timeout = timeout;
442 amd_sched_rq_init(&sched->sched_rq);
443 amd_sched_rq_init(&sched->kernel_rq);
444
445 init_waitqueue_head(&sched->wake_up_worker);
446 init_waitqueue_head(&sched->job_scheduled);
447 atomic_set(&sched->hw_rq_count, 0);
448 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
449 sched_fence_slab = kmem_cache_create(
450 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
451 SLAB_HWCACHE_ALIGN, NULL);
452 if (!sched_fence_slab)
453 return -ENOMEM;
454 }
455
456 /* Each scheduler will run on a seperate kernel thread */
457 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
458 if (IS_ERR(sched->thread)) {
459 DRM_ERROR("Failed to create scheduler for %s.\n", name);
460 return PTR_ERR(sched->thread);
461 }
462
463 return 0;
464}
465
466/**
467 * Destroy a gpu scheduler
468 *
469 * @sched The pointer to the scheduler
470 */
471void amd_sched_fini(struct amd_gpu_scheduler *sched)
472{
473 if (sched->thread)
474 kthread_stop(sched->thread);
475 if (atomic_dec_and_test(&sched_fence_slab_ref))
476 kmem_cache_destroy(sched_fence_slab);
477}