Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/* Copyright (C) 2015-2018 Broadcom */
3
4#include <linux/delay.h>
5#include <linux/mutex.h>
6#include <linux/spinlock_types.h>
7#include <linux/workqueue.h>
8
9#include <drm/drm_encoder.h>
10#include <drm/drm_gem.h>
11#include <drm/drm_gem_shmem_helper.h>
12#include <drm/gpu_scheduler.h>
13
14#include "v3d_performance_counters.h"
15
16#include "uapi/drm/v3d_drm.h"
17
18struct clk;
19struct platform_device;
20struct reset_control;
21
22#define GMP_GRANULARITY (128 * 1024)
23
24#define V3D_MMU_PAGE_SHIFT 12
25
26#define V3D_MAX_QUEUES (V3D_CPU + 1)
27
28static inline char *v3d_queue_to_string(enum v3d_queue queue)
29{
30 switch (queue) {
31 case V3D_BIN: return "bin";
32 case V3D_RENDER: return "render";
33 case V3D_TFU: return "tfu";
34 case V3D_CSD: return "csd";
35 case V3D_CACHE_CLEAN: return "cache_clean";
36 case V3D_CPU: return "cpu";
37 }
38 return "UNKNOWN";
39}
40
41struct v3d_stats {
42 u64 start_ns;
43 u64 enabled_ns;
44 u64 jobs_completed;
45
46 /*
47 * This seqcount is used to protect the access to the GPU stats
48 * variables. It must be used as, while we are reading the stats,
49 * IRQs can happen and the stats can be updated.
50 */
51 seqcount_t lock;
52};
53
54struct v3d_queue_state {
55 struct drm_gpu_scheduler sched;
56
57 u64 fence_context;
58 u64 emit_seqno;
59
60 /* Stores the GPU stats for this queue in the global context. */
61 struct v3d_stats stats;
62};
63
64/* Performance monitor object. The perform lifetime is controlled by userspace
65 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
66 * request, and when this is the case, HW perf counters will be activated just
67 * before the submit_cl is submitted to the GPU and disabled when the job is
68 * done. This way, only events related to a specific job will be counted.
69 */
70struct v3d_perfmon {
71 /* Tracks the number of users of the perfmon, when this counter reaches
72 * zero the perfmon is destroyed.
73 */
74 refcount_t refcnt;
75
76 /* Protects perfmon stop, as it can be invoked from multiple places. */
77 struct mutex lock;
78
79 /* Number of counters activated in this perfmon instance
80 * (should be less than DRM_V3D_MAX_PERF_COUNTERS).
81 */
82 u8 ncounters;
83
84 /* Events counted by the HW perf counters. */
85 u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
86
87 /* Storage for counter values. Counters are incremented by the
88 * HW perf counter values every time the perfmon is attached
89 * to a GPU job. This way, perfmon users don't have to
90 * retrieve the results after each job if they want to track
91 * events covering several submissions. Note that counter
92 * values can't be reset, but you can fake a reset by
93 * destroying the perfmon and creating a new one.
94 */
95 u64 values[] __counted_by(ncounters);
96};
97
98struct v3d_dev {
99 struct drm_device drm;
100
101 /* Short representation (e.g. 33, 41) of the V3D tech version */
102 int ver;
103
104 /* Short representation (e.g. 5, 6) of the V3D tech revision */
105 int rev;
106
107 bool single_irq_line;
108
109 /* Different revisions of V3D have different total number of performance
110 * counters
111 */
112 unsigned int max_counters;
113
114 void __iomem *hub_regs;
115 void __iomem *core_regs[3];
116 void __iomem *bridge_regs;
117 void __iomem *gca_regs;
118 struct clk *clk;
119 struct reset_control *reset;
120
121 /* Virtual and DMA addresses of the single shared page table. */
122 volatile u32 *pt;
123 dma_addr_t pt_paddr;
124
125 /* Virtual and DMA addresses of the MMU's scratch page. When
126 * a read or write is invalid in the MMU, it will be
127 * redirected here.
128 */
129 void *mmu_scratch;
130 dma_addr_t mmu_scratch_paddr;
131 /* virtual address bits from V3D to the MMU. */
132 int va_width;
133
134 /* Number of V3D cores. */
135 u32 cores;
136
137 /* Allocator managing the address space. All units are in
138 * number of pages.
139 */
140 struct drm_mm mm;
141 spinlock_t mm_lock;
142
143 struct work_struct overflow_mem_work;
144
145 struct v3d_bin_job *bin_job;
146 struct v3d_render_job *render_job;
147 struct v3d_tfu_job *tfu_job;
148 struct v3d_csd_job *csd_job;
149 struct v3d_cpu_job *cpu_job;
150
151 struct v3d_queue_state queue[V3D_MAX_QUEUES];
152
153 /* Spinlock used to synchronize the overflow memory
154 * management against bin job submission.
155 */
156 spinlock_t job_lock;
157
158 /* Used to track the active perfmon if any. */
159 struct v3d_perfmon *active_perfmon;
160
161 /* Protects bo_stats */
162 struct mutex bo_lock;
163
164 /* Lock taken when resetting the GPU, to keep multiple
165 * processes from trying to park the scheduler threads and
166 * reset at once.
167 */
168 struct mutex reset_lock;
169
170 /* Lock taken when creating and pushing the GPU scheduler
171 * jobs, to keep the sched-fence seqnos in order.
172 */
173 struct mutex sched_lock;
174
175 /* Lock taken during a cache clean and when initiating an L2
176 * flush, to keep L2 flushes from interfering with the
177 * synchronous L2 cleans.
178 */
179 struct mutex cache_clean_lock;
180
181 struct {
182 u32 num_allocated;
183 u32 pages_allocated;
184 } bo_stats;
185};
186
187static inline struct v3d_dev *
188to_v3d_dev(struct drm_device *dev)
189{
190 return container_of(dev, struct v3d_dev, drm);
191}
192
193static inline bool
194v3d_has_csd(struct v3d_dev *v3d)
195{
196 return v3d->ver >= 41;
197}
198
199#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
200
201/* The per-fd struct, which tracks the MMU mappings. */
202struct v3d_file_priv {
203 struct v3d_dev *v3d;
204
205 struct {
206 struct idr idr;
207 struct mutex lock;
208 } perfmon;
209
210 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
211
212 /* Stores the GPU stats for a specific queue for this fd. */
213 struct v3d_stats stats[V3D_MAX_QUEUES];
214};
215
216struct v3d_bo {
217 struct drm_gem_shmem_object base;
218
219 struct drm_mm_node node;
220
221 /* List entry for the BO's position in
222 * v3d_render_job->unref_list
223 */
224 struct list_head unref_head;
225
226 void *vaddr;
227};
228
229static inline struct v3d_bo *
230to_v3d_bo(struct drm_gem_object *bo)
231{
232 return (struct v3d_bo *)bo;
233}
234
235struct v3d_fence {
236 struct dma_fence base;
237 struct drm_device *dev;
238 /* v3d seqno for signaled() test */
239 u64 seqno;
240 enum v3d_queue queue;
241};
242
243static inline struct v3d_fence *
244to_v3d_fence(struct dma_fence *fence)
245{
246 return (struct v3d_fence *)fence;
247}
248
249#define V3D_READ(offset) readl(v3d->hub_regs + offset)
250#define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset)
251
252#define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset)
253#define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset)
254
255#define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset)
256#define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset)
257
258#define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset)
259#define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset)
260
261struct v3d_job {
262 struct drm_sched_job base;
263
264 struct kref refcount;
265
266 struct v3d_dev *v3d;
267
268 /* This is the array of BOs that were looked up at the start
269 * of submission.
270 */
271 struct drm_gem_object **bo;
272 u32 bo_count;
273
274 /* v3d fence to be signaled by IRQ handler when the job is complete. */
275 struct dma_fence *irq_fence;
276
277 /* scheduler fence for when the job is considered complete and
278 * the BO reservations can be released.
279 */
280 struct dma_fence *done_fence;
281
282 /* Pointer to a performance monitor object if the user requested it,
283 * NULL otherwise.
284 */
285 struct v3d_perfmon *perfmon;
286
287 /* File descriptor of the process that submitted the job that could be used
288 * for collecting stats by process of GPU usage.
289 */
290 struct drm_file *file;
291
292 /* Callback for the freeing of the job on refcount going to 0. */
293 void (*free)(struct kref *ref);
294};
295
296struct v3d_bin_job {
297 struct v3d_job base;
298
299 /* GPU virtual addresses of the start/end of the CL job. */
300 u32 start, end;
301
302 u32 timedout_ctca, timedout_ctra;
303
304 /* Corresponding render job, for attaching our overflow memory. */
305 struct v3d_render_job *render;
306
307 /* Submitted tile memory allocation start/size, tile state. */
308 u32 qma, qms, qts;
309};
310
311struct v3d_render_job {
312 struct v3d_job base;
313
314 /* GPU virtual addresses of the start/end of the CL job. */
315 u32 start, end;
316
317 u32 timedout_ctca, timedout_ctra;
318
319 /* List of overflow BOs used in the job that need to be
320 * released once the job is complete.
321 */
322 struct list_head unref_list;
323};
324
325struct v3d_tfu_job {
326 struct v3d_job base;
327
328 struct drm_v3d_submit_tfu args;
329};
330
331struct v3d_csd_job {
332 struct v3d_job base;
333
334 u32 timedout_batches;
335
336 struct drm_v3d_submit_csd args;
337};
338
339enum v3d_cpu_job_type {
340 V3D_CPU_JOB_TYPE_INDIRECT_CSD = 1,
341 V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY,
342 V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY,
343 V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY,
344 V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY,
345 V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY,
346};
347
348struct v3d_timestamp_query {
349 /* Offset of this query in the timestamp BO for its value. */
350 u32 offset;
351
352 /* Syncobj that indicates the timestamp availability */
353 struct drm_syncobj *syncobj;
354};
355
356/* Number of perfmons required to handle all supported performance counters */
357#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_MAX_COUNTERS, \
358 DRM_V3D_MAX_PERF_COUNTERS)
359
360struct v3d_performance_query {
361 /* Performance monitor IDs for this query */
362 u32 kperfmon_ids[V3D_MAX_PERFMONS];
363
364 /* Syncobj that indicates the query availability */
365 struct drm_syncobj *syncobj;
366};
367
368struct v3d_indirect_csd_info {
369 /* Indirect CSD */
370 struct v3d_csd_job *job;
371
372 /* Clean cache job associated to the Indirect CSD job */
373 struct v3d_job *clean_job;
374
375 /* Offset within the BO where the workgroup counts are stored */
376 u32 offset;
377
378 /* Workgroups size */
379 u32 wg_size;
380
381 /* Indices of the uniforms with the workgroup dispatch counts
382 * in the uniform stream.
383 */
384 u32 wg_uniform_offsets[3];
385
386 /* Indirect BO */
387 struct drm_gem_object *indirect;
388
389 /* Context of the Indirect CSD job */
390 struct ww_acquire_ctx acquire_ctx;
391};
392
393struct v3d_timestamp_query_info {
394 struct v3d_timestamp_query *queries;
395
396 u32 count;
397};
398
399struct v3d_performance_query_info {
400 struct v3d_performance_query *queries;
401
402 /* Number of performance queries */
403 u32 count;
404
405 /* Number of performance monitors related to that query pool */
406 u32 nperfmons;
407
408 /* Number of performance counters related to that query pool */
409 u32 ncounters;
410};
411
412struct v3d_copy_query_results_info {
413 /* Define if should write to buffer using 64 or 32 bits */
414 bool do_64bit;
415
416 /* Define if it can write to buffer even if the query is not available */
417 bool do_partial;
418
419 /* Define if it should write availability bit to buffer */
420 bool availability_bit;
421
422 /* Offset of the copy buffer in the BO */
423 u32 offset;
424
425 /* Stride of the copy buffer in the BO */
426 u32 stride;
427};
428
429struct v3d_cpu_job {
430 struct v3d_job base;
431
432 enum v3d_cpu_job_type job_type;
433
434 struct v3d_indirect_csd_info indirect_csd;
435
436 struct v3d_timestamp_query_info timestamp_query;
437
438 struct v3d_copy_query_results_info copy;
439
440 struct v3d_performance_query_info performance_query;
441};
442
443typedef void (*v3d_cpu_job_fn)(struct v3d_cpu_job *);
444
445struct v3d_submit_outsync {
446 struct drm_syncobj *syncobj;
447};
448
449struct v3d_submit_ext {
450 u32 flags;
451 u32 wait_stage;
452
453 u32 in_sync_count;
454 u64 in_syncs;
455
456 u32 out_sync_count;
457 struct v3d_submit_outsync *out_syncs;
458};
459
460/**
461 * __wait_for - magic wait macro
462 *
463 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
464 * important that we check the condition again after having timed out, since the
465 * timeout could be due to preemption or similar and we've never had a chance to
466 * check the condition before the timeout.
467 */
468#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
469 const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
470 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
471 int ret__; \
472 might_sleep(); \
473 for (;;) { \
474 const bool expired__ = ktime_after(ktime_get_raw(), end__); \
475 OP; \
476 /* Guarantee COND check prior to timeout */ \
477 barrier(); \
478 if (COND) { \
479 ret__ = 0; \
480 break; \
481 } \
482 if (expired__) { \
483 ret__ = -ETIMEDOUT; \
484 break; \
485 } \
486 usleep_range(wait__, wait__ * 2); \
487 if (wait__ < (Wmax)) \
488 wait__ <<= 1; \
489 } \
490 ret__; \
491})
492
493#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
494 (Wmax))
495#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
496
497static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
498{
499 /* nsecs_to_jiffies64() does not guard against overflow */
500 if ((NSEC_PER_SEC % HZ) != 0 &&
501 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
502 return MAX_JIFFY_OFFSET;
503
504 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
505}
506
507/* v3d_bo.c */
508struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
509void v3d_free_object(struct drm_gem_object *gem_obj);
510struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
511 size_t size);
512void v3d_get_bo_vaddr(struct v3d_bo *bo);
513void v3d_put_bo_vaddr(struct v3d_bo *bo);
514int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *file_priv);
516int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv);
518int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
519 struct drm_file *file_priv);
520int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
521 struct drm_file *file_priv);
522struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
523 struct dma_buf_attachment *attach,
524 struct sg_table *sgt);
525
526/* v3d_debugfs.c */
527void v3d_debugfs_init(struct drm_minor *minor);
528
529/* v3d_drv.c */
530void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
531 u64 *active_runtime, u64 *jobs_completed);
532
533/* v3d_fence.c */
534extern const struct dma_fence_ops v3d_fence_ops;
535struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
536
537/* v3d_gem.c */
538int v3d_gem_init(struct drm_device *dev);
539void v3d_gem_destroy(struct drm_device *dev);
540void v3d_reset(struct v3d_dev *v3d);
541void v3d_invalidate_caches(struct v3d_dev *v3d);
542void v3d_clean_caches(struct v3d_dev *v3d);
543
544/* v3d_submit.c */
545void v3d_job_cleanup(struct v3d_job *job);
546void v3d_job_put(struct v3d_job *job);
547int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_priv);
549int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
550 struct drm_file *file_priv);
551int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv);
553int v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
554 struct drm_file *file_priv);
555
556/* v3d_irq.c */
557int v3d_irq_init(struct v3d_dev *v3d);
558void v3d_irq_enable(struct v3d_dev *v3d);
559void v3d_irq_disable(struct v3d_dev *v3d);
560void v3d_irq_reset(struct v3d_dev *v3d);
561
562/* v3d_mmu.c */
563int v3d_mmu_set_page_table(struct v3d_dev *v3d);
564void v3d_mmu_insert_ptes(struct v3d_bo *bo);
565void v3d_mmu_remove_ptes(struct v3d_bo *bo);
566
567/* v3d_sched.c */
568void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
569int v3d_sched_init(struct v3d_dev *v3d);
570void v3d_sched_fini(struct v3d_dev *v3d);
571
572/* v3d_perfmon.c */
573void v3d_perfmon_get(struct v3d_perfmon *perfmon);
574void v3d_perfmon_put(struct v3d_perfmon *perfmon);
575void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon);
576void v3d_perfmon_stop(struct v3d_dev *v3d, struct v3d_perfmon *perfmon,
577 bool capture);
578struct v3d_perfmon *v3d_perfmon_find(struct v3d_file_priv *v3d_priv, int id);
579void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv);
580void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv);
581int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data,
582 struct drm_file *file_priv);
583int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
584 struct drm_file *file_priv);
585int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
586 struct drm_file *file_priv);
587int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data,
588 struct drm_file *file_priv);
589
590/* v3d_sysfs.c */
591int v3d_sysfs_init(struct device *dev);
592void v3d_sysfs_destroy(struct device *dev);