Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2018 Broadcom */
3
4/**
5 * DOC: Broadcom V3D scheduling
6 *
7 * The shared DRM GPU scheduler is used to coordinate submitting jobs
8 * to the hardware. Each DRM fd (roughly a client process) gets its
9 * own scheduler entity, which will process jobs in order. The GPU
10 * scheduler will schedule the clients with a FIFO scheduling algorithm.
11 *
12 * For simplicity, and in order to keep latency low for interactive
13 * jobs when bulk background jobs are queued up, we submit a new job
14 * to the HW only when it has completed the last one, instead of
15 * filling up the CT[01]Q FIFOs with jobs. Similarly, we use
16 * `drm_sched_job_add_dependency()` to manage the dependency between bin
17 * and render, instead of having the clients submit jobs using the HW's
18 * semaphores to interlock between them.
19 */
20
21#include <linux/sched/clock.h>
22#include <linux/kthread.h>
23
24#include <drm/drm_print.h>
25#include <drm/drm_syncobj.h>
26
27#include "v3d_drv.h"
28#include "v3d_regs.h"
29#include "v3d_trace.h"
30
31#define V3D_CSD_CFG012_WG_COUNT_SHIFT 16
32
33static struct v3d_job *
34to_v3d_job(struct drm_sched_job *sched_job)
35{
36 return container_of(sched_job, struct v3d_job, base);
37}
38
39static struct v3d_bin_job *
40to_bin_job(struct drm_sched_job *sched_job)
41{
42 return container_of(sched_job, struct v3d_bin_job, base.base);
43}
44
45static struct v3d_render_job *
46to_render_job(struct drm_sched_job *sched_job)
47{
48 return container_of(sched_job, struct v3d_render_job, base.base);
49}
50
51static struct v3d_tfu_job *
52to_tfu_job(struct drm_sched_job *sched_job)
53{
54 return container_of(sched_job, struct v3d_tfu_job, base.base);
55}
56
57static struct v3d_csd_job *
58to_csd_job(struct drm_sched_job *sched_job)
59{
60 return container_of(sched_job, struct v3d_csd_job, base.base);
61}
62
63static struct v3d_cpu_job *
64to_cpu_job(struct drm_sched_job *sched_job)
65{
66 return container_of(sched_job, struct v3d_cpu_job, base.base);
67}
68
69static void
70v3d_sched_job_free(struct drm_sched_job *sched_job)
71{
72 struct v3d_job *job = to_v3d_job(sched_job);
73
74 v3d_job_cleanup(job);
75}
76
77void
78v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
79 unsigned int count)
80{
81 if (query_info->queries) {
82 unsigned int i;
83
84 for (i = 0; i < count; i++)
85 drm_syncobj_put(query_info->queries[i].syncobj);
86
87 kvfree(query_info->queries);
88 }
89}
90
91void
92v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
93 unsigned int count)
94{
95 if (query_info->queries) {
96 unsigned int i;
97
98 for (i = 0; i < count; i++) {
99 drm_syncobj_put(query_info->queries[i].syncobj);
100 kvfree(query_info->queries[i].kperfmon_ids);
101 }
102
103 kvfree(query_info->queries);
104 }
105}
106
107static void
108v3d_cpu_job_free(struct drm_sched_job *sched_job)
109{
110 struct v3d_cpu_job *job = to_cpu_job(sched_job);
111
112 v3d_timestamp_query_info_free(&job->timestamp_query,
113 job->timestamp_query.count);
114
115 v3d_performance_query_info_free(&job->performance_query,
116 job->performance_query.count);
117
118 v3d_job_cleanup(&job->base);
119}
120
121static void
122v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
123{
124 struct v3d_perfmon *perfmon = v3d->global_perfmon;
125
126 if (!perfmon)
127 perfmon = job->perfmon;
128
129 if (perfmon == v3d->active_perfmon)
130 return;
131
132 if (perfmon != v3d->active_perfmon)
133 v3d_perfmon_stop(v3d, v3d->active_perfmon, true);
134
135 if (perfmon && v3d->active_perfmon != perfmon)
136 v3d_perfmon_start(v3d, perfmon);
137}
138
139static void
140v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
141{
142 struct v3d_dev *v3d = job->v3d;
143 struct v3d_file_priv *file = job->file_priv;
144 struct v3d_stats *global_stats = &v3d->queue[queue].stats;
145 struct v3d_stats *local_stats = &file->stats[queue];
146 u64 now = local_clock();
147 unsigned long flags;
148
149 /*
150 * We only need to disable local interrupts to appease lockdep who
151 * otherwise would think v3d_job_start_stats vs v3d_stats_update has an
152 * unsafe in-irq vs no-irq-off usage problem. This is a false positive
153 * because all the locks are per queue and stats type, and all jobs are
154 * completely one at a time serialised. More specifically:
155 *
156 * 1. Locks for GPU queues are updated from interrupt handlers under a
157 * spin lock and started here with preemption disabled.
158 *
159 * 2. Locks for CPU queues are updated from the worker with preemption
160 * disabled and equally started here with preemption disabled.
161 *
162 * Therefore both are consistent.
163 *
164 * 3. Because next job can only be queued after the previous one has
165 * been signaled, and locks are per queue, there is also no scope for
166 * the start part to race with the update part.
167 */
168 if (IS_ENABLED(CONFIG_LOCKDEP))
169 local_irq_save(flags);
170 else
171 preempt_disable();
172
173 write_seqcount_begin(&local_stats->lock);
174 local_stats->start_ns = now;
175 write_seqcount_end(&local_stats->lock);
176
177 write_seqcount_begin(&global_stats->lock);
178 global_stats->start_ns = now;
179 write_seqcount_end(&global_stats->lock);
180
181 if (IS_ENABLED(CONFIG_LOCKDEP))
182 local_irq_restore(flags);
183 else
184 preempt_enable();
185}
186
187static void
188v3d_stats_update(struct v3d_stats *stats, u64 now)
189{
190 write_seqcount_begin(&stats->lock);
191 stats->enabled_ns += now - stats->start_ns;
192 stats->jobs_completed++;
193 stats->start_ns = 0;
194 write_seqcount_end(&stats->lock);
195}
196
197void
198v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q)
199{
200 struct v3d_dev *v3d = job->v3d;
201 struct v3d_queue_state *queue = &v3d->queue[q];
202 struct v3d_stats *global_stats = &queue->stats;
203 u64 now = local_clock();
204 unsigned long flags;
205
206 /* See comment in v3d_job_start_stats() */
207 if (IS_ENABLED(CONFIG_LOCKDEP))
208 local_irq_save(flags);
209 else
210 preempt_disable();
211
212 /* Don't update the local stats if the file context has already closed */
213 spin_lock(&queue->queue_lock);
214 if (job->file_priv)
215 v3d_stats_update(&job->file_priv->stats[q], now);
216 spin_unlock(&queue->queue_lock);
217
218 v3d_stats_update(global_stats, now);
219
220 if (IS_ENABLED(CONFIG_LOCKDEP))
221 local_irq_restore(flags);
222 else
223 preempt_enable();
224}
225
226static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
227{
228 struct v3d_bin_job *job = to_bin_job(sched_job);
229 struct v3d_dev *v3d = job->base.v3d;
230 struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
231 struct drm_device *dev = &v3d->drm;
232 struct dma_fence *fence;
233 unsigned long irqflags;
234
235 if (unlikely(job->base.base.s_fence->finished.error)) {
236 spin_lock_irqsave(&queue->queue_lock, irqflags);
237 queue->active_job = NULL;
238 spin_unlock_irqrestore(&queue->queue_lock, irqflags);
239 return NULL;
240 }
241
242 /* Lock required around bin_job update vs
243 * v3d_overflow_mem_work().
244 */
245 spin_lock_irqsave(&queue->queue_lock, irqflags);
246 queue->active_job = &job->base;
247 /* Clear out the overflow allocation, so we don't
248 * reuse the overflow attached to a previous job.
249 */
250 V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
251 spin_unlock_irqrestore(&queue->queue_lock, irqflags);
252
253 v3d_invalidate_caches(v3d);
254
255 fence = v3d_fence_create(v3d, V3D_BIN);
256 if (IS_ERR(fence))
257 return NULL;
258
259 if (job->base.irq_fence)
260 dma_fence_put(job->base.irq_fence);
261 job->base.irq_fence = dma_fence_get(fence);
262
263 trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno,
264 job->start, job->end);
265
266 v3d_job_start_stats(&job->base, V3D_BIN);
267 v3d_switch_perfmon(v3d, &job->base);
268
269 /* Set the current and end address of the control list.
270 * Writing the end register is what starts the job.
271 */
272 if (job->qma) {
273 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
274 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
275 }
276 if (job->qts) {
277 V3D_CORE_WRITE(0, V3D_CLE_CT0QTS,
278 V3D_CLE_CT0QTS_ENABLE |
279 job->qts);
280 }
281 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
282 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
283
284 return fence;
285}
286
287static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
288{
289 struct v3d_render_job *job = to_render_job(sched_job);
290 struct v3d_dev *v3d = job->base.v3d;
291 struct drm_device *dev = &v3d->drm;
292 struct dma_fence *fence;
293
294 if (unlikely(job->base.base.s_fence->finished.error)) {
295 v3d->queue[V3D_RENDER].active_job = NULL;
296 return NULL;
297 }
298
299 v3d->queue[V3D_RENDER].active_job = &job->base;
300
301 /* Can we avoid this flush? We need to be careful of
302 * scheduling, though -- imagine job0 rendering to texture and
303 * job1 reading, and them being executed as bin0, bin1,
304 * render0, render1, so that render1's flush at bin time
305 * wasn't enough.
306 */
307 v3d_invalidate_caches(v3d);
308
309 fence = v3d_fence_create(v3d, V3D_RENDER);
310 if (IS_ERR(fence))
311 return NULL;
312
313 if (job->base.irq_fence)
314 dma_fence_put(job->base.irq_fence);
315 job->base.irq_fence = dma_fence_get(fence);
316
317 trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno,
318 job->start, job->end);
319
320 v3d_job_start_stats(&job->base, V3D_RENDER);
321 v3d_switch_perfmon(v3d, &job->base);
322
323 /* XXX: Set the QCFG */
324
325 /* Set the current and end address of the control list.
326 * Writing the end register is what starts the job.
327 */
328 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
329 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
330
331 return fence;
332}
333
334static struct dma_fence *
335v3d_tfu_job_run(struct drm_sched_job *sched_job)
336{
337 struct v3d_tfu_job *job = to_tfu_job(sched_job);
338 struct v3d_dev *v3d = job->base.v3d;
339 struct drm_device *dev = &v3d->drm;
340 struct dma_fence *fence;
341
342 if (unlikely(job->base.base.s_fence->finished.error)) {
343 v3d->queue[V3D_TFU].active_job = NULL;
344 return NULL;
345 }
346
347 v3d->queue[V3D_TFU].active_job = &job->base;
348
349 fence = v3d_fence_create(v3d, V3D_TFU);
350 if (IS_ERR(fence))
351 return NULL;
352
353 if (job->base.irq_fence)
354 dma_fence_put(job->base.irq_fence);
355 job->base.irq_fence = dma_fence_get(fence);
356
357 trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
358
359 v3d_job_start_stats(&job->base, V3D_TFU);
360
361 V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia);
362 V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis);
363 V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica);
364 V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua);
365 V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa);
366 if (v3d->ver >= V3D_GEN_71)
367 V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc);
368 V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios);
369 V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]);
370 if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) {
371 V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]);
372 V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]);
373 V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]);
374 }
375 /* ICFG kicks off the job. */
376 V3D_WRITE(V3D_TFU_ICFG(v3d->ver), job->args.icfg | V3D_TFU_ICFG_IOC);
377
378 return fence;
379}
380
381static struct dma_fence *
382v3d_csd_job_run(struct drm_sched_job *sched_job)
383{
384 struct v3d_csd_job *job = to_csd_job(sched_job);
385 struct v3d_dev *v3d = job->base.v3d;
386 struct drm_device *dev = &v3d->drm;
387 struct dma_fence *fence;
388 int i, csd_cfg0_reg;
389
390 if (unlikely(job->base.base.s_fence->finished.error)) {
391 v3d->queue[V3D_CSD].active_job = NULL;
392 return NULL;
393 }
394
395 v3d->queue[V3D_CSD].active_job = &job->base;
396
397 v3d_invalidate_caches(v3d);
398
399 fence = v3d_fence_create(v3d, V3D_CSD);
400 if (IS_ERR(fence))
401 return NULL;
402
403 if (job->base.irq_fence)
404 dma_fence_put(job->base.irq_fence);
405 job->base.irq_fence = dma_fence_get(fence);
406
407 trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno);
408
409 v3d_job_start_stats(&job->base, V3D_CSD);
410 v3d_switch_perfmon(v3d, &job->base);
411
412 csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver);
413 for (i = 1; i <= 6; i++)
414 V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]);
415
416 /* Although V3D 7.1 has an eighth configuration register, we are not
417 * using it. Therefore, make sure it remains unused.
418 *
419 * XXX: Set the CFG7 register
420 */
421 if (v3d->ver >= V3D_GEN_71)
422 V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0);
423
424 /* CFG0 write kicks off the job. */
425 V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]);
426
427 return fence;
428}
429
430static void
431v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job)
432{
433 struct v3d_indirect_csd_info *indirect_csd = &job->indirect_csd;
434 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
435 struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect);
436 struct drm_v3d_submit_csd *args = &indirect_csd->job->args;
437 struct v3d_dev *v3d = job->base.v3d;
438 u32 num_batches, *wg_counts;
439
440 v3d_get_bo_vaddr(bo);
441 v3d_get_bo_vaddr(indirect);
442
443 wg_counts = (uint32_t *)(bo->vaddr + indirect_csd->offset);
444
445 if (wg_counts[0] == 0 || wg_counts[1] == 0 || wg_counts[2] == 0)
446 return;
447
448 args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
449 args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
450 args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT;
451
452 num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) *
453 (wg_counts[0] * wg_counts[1] * wg_counts[2]);
454
455 /* V3D 7.1.6 and later don't subtract 1 from the number of batches */
456 if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6))
457 args->cfg[4] = num_batches - 1;
458 else
459 args->cfg[4] = num_batches;
460
461 WARN_ON(args->cfg[4] == ~0);
462
463 for (int i = 0; i < 3; i++) {
464 /* 0xffffffff indicates that the uniform rewrite is not needed */
465 if (indirect_csd->wg_uniform_offsets[i] != 0xffffffff) {
466 u32 uniform_idx = indirect_csd->wg_uniform_offsets[i];
467 ((uint32_t *)indirect->vaddr)[uniform_idx] = wg_counts[i];
468 }
469 }
470
471 v3d_put_bo_vaddr(indirect);
472 v3d_put_bo_vaddr(bo);
473}
474
475static void
476v3d_timestamp_query(struct v3d_cpu_job *job)
477{
478 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
479 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
480 u8 *value_addr;
481
482 v3d_get_bo_vaddr(bo);
483
484 for (int i = 0; i < timestamp_query->count; i++) {
485 value_addr = ((u8 *)bo->vaddr) + timestamp_query->queries[i].offset;
486 *((u64 *)value_addr) = i == 0 ? ktime_get_ns() : 0ull;
487
488 drm_syncobj_replace_fence(timestamp_query->queries[i].syncobj,
489 job->base.done_fence);
490 }
491
492 v3d_put_bo_vaddr(bo);
493}
494
495static void
496v3d_reset_timestamp_queries(struct v3d_cpu_job *job)
497{
498 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
499 struct v3d_timestamp_query *queries = timestamp_query->queries;
500 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
501 u8 *value_addr;
502
503 v3d_get_bo_vaddr(bo);
504
505 for (int i = 0; i < timestamp_query->count; i++) {
506 value_addr = ((u8 *)bo->vaddr) + queries[i].offset;
507 *((u64 *)value_addr) = 0;
508
509 drm_syncobj_replace_fence(queries[i].syncobj, NULL);
510 }
511
512 v3d_put_bo_vaddr(bo);
513}
514
515static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value)
516{
517 dst[idx] = value;
518}
519
520static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value)
521{
522 dst[idx] = value;
523}
524
525static void
526write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value)
527{
528 if (do_64bit)
529 write_to_buffer_64(dst, idx, value);
530 else
531 write_to_buffer_32(dst, idx, value);
532}
533
534static void
535v3d_copy_query_results(struct v3d_cpu_job *job)
536{
537 struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
538 struct v3d_timestamp_query *queries = timestamp_query->queries;
539 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
540 struct v3d_bo *timestamp = to_v3d_bo(job->base.bo[1]);
541 struct v3d_copy_query_results_info *copy = &job->copy;
542 struct dma_fence *fence;
543 u8 *query_addr;
544 bool available, write_result;
545 u8 *data;
546 int i;
547
548 v3d_get_bo_vaddr(bo);
549 v3d_get_bo_vaddr(timestamp);
550
551 data = ((u8 *)bo->vaddr) + copy->offset;
552
553 for (i = 0; i < timestamp_query->count; i++) {
554 fence = drm_syncobj_fence_get(queries[i].syncobj);
555 available = fence ? dma_fence_is_signaled(fence) : false;
556
557 write_result = available || copy->do_partial;
558 if (write_result) {
559 query_addr = ((u8 *)timestamp->vaddr) + queries[i].offset;
560 write_to_buffer(data, 0, copy->do_64bit, *((u64 *)query_addr));
561 }
562
563 if (copy->availability_bit)
564 write_to_buffer(data, 1, copy->do_64bit, available ? 1u : 0u);
565
566 data += copy->stride;
567
568 dma_fence_put(fence);
569 }
570
571 v3d_put_bo_vaddr(timestamp);
572 v3d_put_bo_vaddr(bo);
573}
574
575static void
576v3d_reset_performance_queries(struct v3d_cpu_job *job)
577{
578 struct v3d_performance_query_info *performance_query = &job->performance_query;
579 struct v3d_file_priv *v3d_priv = job->base.file_priv;
580 struct v3d_dev *v3d = job->base.v3d;
581 struct v3d_perfmon *perfmon;
582
583 for (int i = 0; i < performance_query->count; i++) {
584 for (int j = 0; j < performance_query->nperfmons; j++) {
585 perfmon = v3d_perfmon_find(v3d_priv,
586 performance_query->queries[i].kperfmon_ids[j]);
587 if (!perfmon) {
588 DRM_DEBUG("Failed to find perfmon.");
589 continue;
590 }
591
592 v3d_perfmon_stop(v3d, perfmon, false);
593
594 memset(perfmon->values, 0, perfmon->ncounters * sizeof(u64));
595
596 v3d_perfmon_put(perfmon);
597 }
598
599 drm_syncobj_replace_fence(performance_query->queries[i].syncobj, NULL);
600 }
601}
602
603static void
604v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
605 unsigned int query)
606{
607 struct v3d_performance_query_info *performance_query =
608 &job->performance_query;
609 struct v3d_file_priv *v3d_priv = job->base.file_priv;
610 struct v3d_performance_query *perf_query =
611 &performance_query->queries[query];
612 struct v3d_dev *v3d = job->base.v3d;
613 unsigned int i, j, offset;
614
615 for (i = 0, offset = 0;
616 i < performance_query->nperfmons;
617 i++, offset += DRM_V3D_MAX_PERF_COUNTERS) {
618 struct v3d_perfmon *perfmon;
619
620 perfmon = v3d_perfmon_find(v3d_priv,
621 perf_query->kperfmon_ids[i]);
622 if (!perfmon) {
623 DRM_DEBUG("Failed to find perfmon.");
624 continue;
625 }
626
627 v3d_perfmon_stop(v3d, perfmon, true);
628
629 if (job->copy.do_64bit) {
630 for (j = 0; j < perfmon->ncounters; j++)
631 write_to_buffer_64(data, offset + j,
632 perfmon->values[j]);
633 } else {
634 for (j = 0; j < perfmon->ncounters; j++)
635 write_to_buffer_32(data, offset + j,
636 perfmon->values[j]);
637 }
638
639 v3d_perfmon_put(perfmon);
640 }
641}
642
643static void
644v3d_copy_performance_query(struct v3d_cpu_job *job)
645{
646 struct v3d_performance_query_info *performance_query = &job->performance_query;
647 struct v3d_copy_query_results_info *copy = &job->copy;
648 struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]);
649 struct dma_fence *fence;
650 bool available, write_result;
651 u8 *data;
652
653 v3d_get_bo_vaddr(bo);
654
655 data = ((u8 *)bo->vaddr) + copy->offset;
656
657 for (int i = 0; i < performance_query->count; i++) {
658 fence = drm_syncobj_fence_get(performance_query->queries[i].syncobj);
659 available = fence ? dma_fence_is_signaled(fence) : false;
660
661 write_result = available || copy->do_partial;
662 if (write_result)
663 v3d_write_performance_query_result(job, data, i);
664
665 if (copy->availability_bit)
666 write_to_buffer(data, performance_query->ncounters,
667 copy->do_64bit, available ? 1u : 0u);
668
669 data += copy->stride;
670
671 dma_fence_put(fence);
672 }
673
674 v3d_put_bo_vaddr(bo);
675}
676
677static const v3d_cpu_job_fn cpu_job_function[] = {
678 [V3D_CPU_JOB_TYPE_INDIRECT_CSD] = v3d_rewrite_csd_job_wg_counts_from_indirect,
679 [V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY] = v3d_timestamp_query,
680 [V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY] = v3d_reset_timestamp_queries,
681 [V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY] = v3d_copy_query_results,
682 [V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY] = v3d_reset_performance_queries,
683 [V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY] = v3d_copy_performance_query,
684};
685
686static struct dma_fence *
687v3d_cpu_job_run(struct drm_sched_job *sched_job)
688{
689 struct v3d_cpu_job *job = to_cpu_job(sched_job);
690 struct v3d_dev *v3d = job->base.v3d;
691
692 if (job->job_type >= ARRAY_SIZE(cpu_job_function)) {
693 DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type);
694 return NULL;
695 }
696
697 v3d_job_start_stats(&job->base, V3D_CPU);
698 trace_v3d_cpu_job_begin(&v3d->drm, job->job_type);
699
700 cpu_job_function[job->job_type](job);
701
702 trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
703 v3d_job_update_stats(&job->base, V3D_CPU);
704
705 /* Synchronous operation, so no fence to wait on. */
706 return NULL;
707}
708
709static struct dma_fence *
710v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
711{
712 struct v3d_job *job = to_v3d_job(sched_job);
713 struct v3d_dev *v3d = job->v3d;
714
715 v3d_job_start_stats(job, V3D_CACHE_CLEAN);
716
717 v3d_clean_caches(v3d);
718
719 v3d_job_update_stats(job, V3D_CACHE_CLEAN);
720
721 /* Synchronous operation, so no fence to wait on. */
722 return NULL;
723}
724
725static enum drm_gpu_sched_stat
726v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job,
727 enum v3d_queue q)
728{
729 struct v3d_job *job = to_v3d_job(sched_job);
730 struct v3d_file_priv *v3d_priv = job->file_priv;
731 unsigned long irqflags;
732 enum v3d_queue i;
733
734 mutex_lock(&v3d->reset_lock);
735
736 /* block scheduler */
737 for (i = 0; i < V3D_MAX_QUEUES; i++)
738 drm_sched_stop(&v3d->queue[i].sched, sched_job);
739
740 if (sched_job)
741 drm_sched_increase_karma(sched_job);
742
743 /* get the GPU back into the init state */
744 v3d_reset(v3d);
745
746 v3d->reset_counter++;
747 spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
748 if (v3d_priv)
749 v3d_priv->reset_counter++;
750 spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
751
752 for (i = 0; i < V3D_MAX_QUEUES; i++)
753 drm_sched_resubmit_jobs(&v3d->queue[i].sched);
754
755 /* Unblock schedulers and restart their jobs. */
756 for (i = 0; i < V3D_MAX_QUEUES; i++)
757 drm_sched_start(&v3d->queue[i].sched, 0);
758
759 mutex_unlock(&v3d->reset_lock);
760
761 return DRM_GPU_SCHED_STAT_RESET;
762}
763
764static enum drm_gpu_sched_stat
765v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
766 u32 *timedout_ctca, u32 *timedout_ctra)
767{
768 struct v3d_job *job = to_v3d_job(sched_job);
769 struct v3d_dev *v3d = job->v3d;
770 u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
771 u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
772
773 /* If the current address or return address have changed, then the GPU
774 * has probably made progress and we should delay the reset. This
775 * could fail if the GPU got in an infinite loop in the CL, but that
776 * is pretty unlikely outside of an i-g-t testcase.
777 */
778 if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
779 *timedout_ctca = ctca;
780 *timedout_ctra = ctra;
781
782 return DRM_GPU_SCHED_STAT_NO_HANG;
783 }
784
785 return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
786}
787
788static enum drm_gpu_sched_stat
789v3d_bin_job_timedout(struct drm_sched_job *sched_job)
790{
791 struct v3d_bin_job *job = to_bin_job(sched_job);
792
793 return v3d_cl_job_timedout(sched_job, V3D_BIN,
794 &job->timedout_ctca, &job->timedout_ctra);
795}
796
797static enum drm_gpu_sched_stat
798v3d_render_job_timedout(struct drm_sched_job *sched_job)
799{
800 struct v3d_render_job *job = to_render_job(sched_job);
801
802 return v3d_cl_job_timedout(sched_job, V3D_RENDER,
803 &job->timedout_ctca, &job->timedout_ctra);
804}
805
806static enum drm_gpu_sched_stat
807v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
808{
809 struct v3d_job *job = to_v3d_job(sched_job);
810
811 return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU);
812}
813
814static enum drm_gpu_sched_stat
815v3d_csd_job_timedout(struct drm_sched_job *sched_job)
816{
817 struct v3d_csd_job *job = to_csd_job(sched_job);
818 struct v3d_dev *v3d = job->base.v3d;
819 u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
820
821 /* If we've made progress, skip reset, add the job to the pending
822 * list, and let the timer get rearmed.
823 */
824 if (job->timedout_batches != batches) {
825 job->timedout_batches = batches;
826
827 return DRM_GPU_SCHED_STAT_NO_HANG;
828 }
829
830 return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD);
831}
832
833static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
834 .run_job = v3d_bin_job_run,
835 .timedout_job = v3d_bin_job_timedout,
836 .free_job = v3d_sched_job_free,
837};
838
839static const struct drm_sched_backend_ops v3d_render_sched_ops = {
840 .run_job = v3d_render_job_run,
841 .timedout_job = v3d_render_job_timedout,
842 .free_job = v3d_sched_job_free,
843};
844
845static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
846 .run_job = v3d_tfu_job_run,
847 .timedout_job = v3d_tfu_job_timedout,
848 .free_job = v3d_sched_job_free,
849};
850
851static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
852 .run_job = v3d_csd_job_run,
853 .timedout_job = v3d_csd_job_timedout,
854 .free_job = v3d_sched_job_free
855};
856
857static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
858 .run_job = v3d_cache_clean_job_run,
859 .free_job = v3d_sched_job_free
860};
861
862static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
863 .run_job = v3d_cpu_job_run,
864 .free_job = v3d_cpu_job_free
865};
866
867static int
868v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops,
869 enum v3d_queue queue, const char *name)
870{
871 struct drm_sched_init_args args = {
872 .num_rqs = DRM_SCHED_PRIORITY_COUNT,
873 .credit_limit = 1,
874 .timeout = msecs_to_jiffies(500),
875 .dev = v3d->drm.dev,
876 };
877
878 args.ops = ops;
879 args.name = name;
880
881 return drm_sched_init(&v3d->queue[queue].sched, &args);
882}
883
884int
885v3d_sched_init(struct v3d_dev *v3d)
886{
887 int ret;
888
889 ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin");
890 if (ret)
891 return ret;
892
893 ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER,
894 "v3d_render");
895 if (ret)
896 goto fail;
897
898 ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu");
899 if (ret)
900 goto fail;
901
902 if (v3d_has_csd(v3d)) {
903 ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD,
904 "v3d_csd");
905 if (ret)
906 goto fail;
907
908 ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops,
909 V3D_CACHE_CLEAN, "v3d_cache_clean");
910 if (ret)
911 goto fail;
912 }
913
914 ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu");
915 if (ret)
916 goto fail;
917
918 return 0;
919
920fail:
921 v3d_sched_fini(v3d);
922 return ret;
923}
924
925void
926v3d_sched_fini(struct v3d_dev *v3d)
927{
928 enum v3d_queue q;
929
930 for (q = 0; q < V3D_MAX_QUEUES; q++) {
931 if (v3d->queue[q].sched.ready)
932 drm_sched_fini(&v3d->queue[q].sched);
933 }
934}