Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#ifndef __MSM_GPU_H__
8#define __MSM_GPU_H__
9
10#include <linux/adreno-smmu-priv.h>
11#include <linux/clk.h>
12#include <linux/devfreq.h>
13#include <linux/interconnect.h>
14#include <linux/pm_opp.h>
15#include <linux/regulator/consumer.h>
16
17#include "msm_drv.h"
18#include "msm_fence.h"
19#include "msm_ringbuffer.h"
20#include "msm_gem.h"
21
22struct msm_gem_submit;
23struct msm_gpu_perfcntr;
24struct msm_gpu_state;
25struct msm_file_private;
26
27struct msm_gpu_config {
28 const char *ioname;
29 unsigned int nr_rings;
30};
31
32/* So far, with hardware that I've seen to date, we can have:
33 * + zero, one, or two z180 2d cores
34 * + a3xx or a2xx 3d core, which share a common CP (the firmware
35 * for the CP seems to implement some different PM4 packet types
36 * but the basics of cmdstream submission are the same)
37 *
38 * Which means that the eventual complete "class" hierarchy, once
39 * support for all past and present hw is in place, becomes:
40 * + msm_gpu
41 * + adreno_gpu
42 * + a3xx_gpu
43 * + a2xx_gpu
44 * + z180_gpu
45 */
46struct msm_gpu_funcs {
47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
48 uint32_t param, uint64_t *value, uint32_t *len);
49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
50 uint32_t param, uint64_t value, uint32_t len);
51 int (*hw_init)(struct msm_gpu *gpu);
52
53 /**
54 * @ucode_load: Optional hook to upload fw to GEM objs
55 */
56 int (*ucode_load)(struct msm_gpu *gpu);
57
58 int (*pm_suspend)(struct msm_gpu *gpu);
59 int (*pm_resume)(struct msm_gpu *gpu);
60 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
62 irqreturn_t (*irq)(struct msm_gpu *irq);
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu);
66#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
67 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
69 struct drm_printer *p);
70 /* for generation specific debugfs: */
71 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
72#endif
73 /* note: gpu_busy() can assume that we have been pm_resumed */
74 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
75 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
76 int (*gpu_state_put)(struct msm_gpu_state *state);
77 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
78 /* note: gpu_set_freq() can assume that we have been pm_resumed */
79 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
80 bool suspended);
81 struct msm_gem_address_space *(*create_address_space)
82 (struct msm_gpu *gpu, struct platform_device *pdev);
83 struct msm_gem_address_space *(*create_private_address_space)
84 (struct msm_gpu *gpu);
85 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
86
87 /**
88 * progress: Has the GPU made progress?
89 *
90 * Return true if GPU position in cmdstream has advanced (or changed)
91 * since the last call. To avoid false negatives, this should account
92 * for cmdstream that is buffered in this FIFO upstream of the CP fw.
93 */
94 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
95};
96
97/* Additional state for iommu faults: */
98struct msm_gpu_fault_info {
99 u64 ttbr0;
100 unsigned long iova;
101 int flags;
102 const char *type;
103 const char *block;
104};
105
106/**
107 * struct msm_gpu_devfreq - devfreq related state
108 */
109struct msm_gpu_devfreq {
110 /** devfreq: devfreq instance */
111 struct devfreq *devfreq;
112
113 /** lock: lock for "suspended", "busy_cycles", and "time" */
114 struct mutex lock;
115
116 /**
117 * idle_freq:
118 *
119 * Shadow frequency used while the GPU is idle. From the PoV of
120 * the devfreq governor, we are continuing to sample busyness and
121 * adjust frequency while the GPU is idle, but we use this shadow
122 * value as the GPU is actually clamped to minimum frequency while
123 * it is inactive.
124 */
125 unsigned long idle_freq;
126
127 /**
128 * boost_constraint:
129 *
130 * A PM QoS constraint to boost min freq for a period of time
131 * until the boost expires.
132 */
133 struct dev_pm_qos_request boost_freq;
134
135 /**
136 * busy_cycles: Last busy counter value, for calculating elapsed busy
137 * cycles since last sampling period.
138 */
139 u64 busy_cycles;
140
141 /** time: Time of last sampling period. */
142 ktime_t time;
143
144 /** idle_time: Time of last transition to idle: */
145 ktime_t idle_time;
146
147 /**
148 * idle_work:
149 *
150 * Used to delay clamping to idle freq on active->idle transition.
151 */
152 struct msm_hrtimer_work idle_work;
153
154 /**
155 * boost_work:
156 *
157 * Used to reset the boost_constraint after the boost period has
158 * elapsed
159 */
160 struct msm_hrtimer_work boost_work;
161
162 /** suspended: tracks if we're suspended */
163 bool suspended;
164};
165
166struct msm_gpu {
167 const char *name;
168 struct drm_device *dev;
169 struct platform_device *pdev;
170 const struct msm_gpu_funcs *funcs;
171
172 struct adreno_smmu_priv adreno_smmu;
173
174 /* performance counters (hw & sw): */
175 spinlock_t perf_lock;
176 bool perfcntr_active;
177 struct {
178 bool active;
179 ktime_t time;
180 } last_sample;
181 uint32_t totaltime, activetime; /* sw counters */
182 uint32_t last_cntrs[5]; /* hw counters */
183 const struct msm_gpu_perfcntr *perfcntrs;
184 uint32_t num_perfcntrs;
185
186 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
187 int nr_rings;
188
189 /**
190 * sysprof_active:
191 *
192 * The count of contexts that have enabled system profiling.
193 */
194 refcount_t sysprof_active;
195
196 /**
197 * cur_ctx_seqno:
198 *
199 * The ctx->seqno value of the last context to submit rendering,
200 * and the one with current pgtables installed (for generations
201 * that support per-context pgtables). Tracked by seqno rather
202 * than pointer value to avoid dangling pointers, and cases where
203 * a ctx can be freed and a new one created with the same address.
204 */
205 int cur_ctx_seqno;
206
207 /**
208 * lock:
209 *
210 * General lock for serializing all the gpu things.
211 *
212 * TODO move to per-ring locking where feasible (ie. submit/retire
213 * path, etc)
214 */
215 struct mutex lock;
216
217 /**
218 * active_submits:
219 *
220 * The number of submitted but not yet retired submits, used to
221 * determine transitions between active and idle.
222 *
223 * Protected by active_lock
224 */
225 int active_submits;
226
227 /** lock: protects active_submits and idle/active transitions */
228 struct mutex active_lock;
229
230 /* does gpu need hw_init? */
231 bool needs_hw_init;
232
233 /**
234 * global_faults: number of GPU hangs not attributed to a particular
235 * address space
236 */
237 int global_faults;
238
239 void __iomem *mmio;
240 int irq;
241
242 struct msm_gem_address_space *aspace;
243
244 /* Power Control: */
245 struct regulator *gpu_reg, *gpu_cx;
246 struct clk_bulk_data *grp_clks;
247 int nr_clocks;
248 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
249 uint32_t fast_rate;
250
251 /* Hang and Inactivity Detection:
252 */
253#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
254
255#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
256#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
257 struct timer_list hangcheck_timer;
258
259 /* Fault info for most recent iova fault: */
260 struct msm_gpu_fault_info fault_info;
261
262 /* work for handling GPU ioval faults: */
263 struct kthread_work fault_work;
264
265 /* work for handling GPU recovery: */
266 struct kthread_work recover_work;
267
268 /** retire_event: notified when submits are retired: */
269 wait_queue_head_t retire_event;
270
271 /* work for handling active-list retiring: */
272 struct kthread_work retire_work;
273
274 /* worker for retire/recover: */
275 struct kthread_worker *worker;
276
277 struct drm_gem_object *memptrs_bo;
278
279 struct msm_gpu_devfreq devfreq;
280
281 uint32_t suspend_count;
282
283 struct msm_gpu_state *crashstate;
284
285 /* True if the hardware supports expanded apriv (a650 and newer) */
286 bool hw_apriv;
287
288 struct thermal_cooling_device *cooling;
289};
290
291static inline struct msm_gpu *dev_to_gpu(struct device *dev)
292{
293 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
294
295 if (!adreno_smmu)
296 return NULL;
297
298 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
299}
300
301/* It turns out that all targets use the same ringbuffer size */
302#define MSM_GPU_RINGBUFFER_SZ SZ_32K
303#define MSM_GPU_RINGBUFFER_BLKSIZE 32
304
305#define MSM_GPU_RB_CNTL_DEFAULT \
306 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
307 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
308
309static inline bool msm_gpu_active(struct msm_gpu *gpu)
310{
311 int i;
312
313 for (i = 0; i < gpu->nr_rings; i++) {
314 struct msm_ringbuffer *ring = gpu->rb[i];
315
316 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
317 return true;
318 }
319
320 return false;
321}
322
323/* Perf-Counters:
324 * The select_reg and select_val are just there for the benefit of the child
325 * class that actually enables the perf counter.. but msm_gpu base class
326 * will handle sampling/displaying the counters.
327 */
328
329struct msm_gpu_perfcntr {
330 uint32_t select_reg;
331 uint32_t sample_reg;
332 uint32_t select_val;
333 const char *name;
334};
335
336/*
337 * The number of priority levels provided by drm gpu scheduler. The
338 * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
339 * cases, so we don't use it (no need for kernel generated jobs).
340 */
341#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
342
343/**
344 * struct msm_file_private - per-drm_file context
345 *
346 * @queuelock: synchronizes access to submitqueues list
347 * @submitqueues: list of &msm_gpu_submitqueue created by userspace
348 * @queueid: counter incremented each time a submitqueue is created,
349 * used to assign &msm_gpu_submitqueue.id
350 * @aspace: the per-process GPU address-space
351 * @ref: reference count
352 * @seqno: unique per process seqno
353 */
354struct msm_file_private {
355 rwlock_t queuelock;
356 struct list_head submitqueues;
357 int queueid;
358 struct msm_gem_address_space *aspace;
359 struct kref ref;
360 int seqno;
361
362 /**
363 * sysprof:
364 *
365 * The value of MSM_PARAM_SYSPROF set by userspace. This is
366 * intended to be used by system profiling tools like Mesa's
367 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
368 *
369 * Setting a value of 1 will preserve performance counters across
370 * context switches. Setting a value of 2 will in addition
371 * suppress suspend. (Performance counters lose state across
372 * power collapse, which is undesirable for profiling in some
373 * cases.)
374 *
375 * The value automatically reverts to zero when the drm device
376 * file is closed.
377 */
378 int sysprof;
379
380 /**
381 * comm: Overridden task comm, see MSM_PARAM_COMM
382 *
383 * Accessed under msm_gpu::lock
384 */
385 char *comm;
386
387 /**
388 * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
389 *
390 * Accessed under msm_gpu::lock
391 */
392 char *cmdline;
393
394 /**
395 * elapsed:
396 *
397 * The total (cumulative) elapsed time GPU was busy with rendering
398 * from this context in ns.
399 */
400 uint64_t elapsed_ns;
401
402 /**
403 * cycles:
404 *
405 * The total (cumulative) GPU cycles elapsed attributed to this
406 * context.
407 */
408 uint64_t cycles;
409
410 /**
411 * entities:
412 *
413 * Table of per-priority-level sched entities used by submitqueues
414 * associated with this &drm_file. Because some userspace apps
415 * make assumptions about rendering from multiple gl contexts
416 * (of the same priority) within the process happening in FIFO
417 * order without requiring any fencing beyond MakeCurrent(), we
418 * create at most one &drm_sched_entity per-process per-priority-
419 * level.
420 */
421 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
422};
423
424/**
425 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
426 *
427 * @gpu: the gpu instance
428 * @prio: the userspace priority level
429 * @ring_nr: [out] the ringbuffer the userspace priority maps to
430 * @sched_prio: [out] the gpu scheduler priority level which the userspace
431 * priority maps to
432 *
433 * With drm/scheduler providing it's own level of prioritization, our total
434 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
435 * Each ring is associated with it's own scheduler instance. However, our
436 * UABI is that lower numerical values are higher priority. So mapping the
437 * single userspace priority level into ring_nr and sched_prio takes some
438 * care. The userspace provided priority (when a submitqueue is created)
439 * is mapped to ring nr and scheduler priority as such:
440 *
441 * ring_nr = userspace_prio / NR_SCHED_PRIORITIES
442 * sched_prio = NR_SCHED_PRIORITIES -
443 * (userspace_prio % NR_SCHED_PRIORITIES) - 1
444 *
445 * This allows generations without preemption (nr_rings==1) to have some
446 * amount of prioritization, and provides more priority levels for gens
447 * that do have preemption.
448 */
449static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
450 unsigned *ring_nr, enum drm_sched_priority *sched_prio)
451{
452 unsigned rn, sp;
453
454 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
455
456 /* invert sched priority to map to higher-numeric-is-higher-
457 * priority convention
458 */
459 sp = NR_SCHED_PRIORITIES - sp - 1;
460
461 if (rn >= gpu->nr_rings)
462 return -EINVAL;
463
464 *ring_nr = rn;
465 *sched_prio = sp;
466
467 return 0;
468}
469
470/**
471 * struct msm_gpu_submitqueues - Userspace created context.
472 *
473 * A submitqueue is associated with a gl context or vk queue (or equiv)
474 * in userspace.
475 *
476 * @id: userspace id for the submitqueue, unique within the drm_file
477 * @flags: userspace flags for the submitqueue, specified at creation
478 * (currently unusued)
479 * @ring_nr: the ringbuffer used by this submitqueue, which is determined
480 * by the submitqueue's priority
481 * @faults: the number of GPU hangs associated with this submitqueue
482 * @last_fence: the sequence number of the last allocated fence (for error
483 * checking)
484 * @ctx: the per-drm_file context associated with the submitqueue (ie.
485 * which set of pgtables do submits jobs associated with the
486 * submitqueue use)
487 * @node: node in the context's list of submitqueues
488 * @fence_idr: maps fence-id to dma_fence for userspace visible fence
489 * seqno, protected by submitqueue lock
490 * @idr_lock: for serializing access to fence_idr
491 * @lock: submitqueue lock for serializing submits on a queue
492 * @ref: reference count
493 * @entity: the submit job-queue
494 */
495struct msm_gpu_submitqueue {
496 int id;
497 u32 flags;
498 u32 ring_nr;
499 int faults;
500 uint32_t last_fence;
501 struct msm_file_private *ctx;
502 struct list_head node;
503 struct idr fence_idr;
504 struct spinlock idr_lock;
505 struct mutex lock;
506 struct kref ref;
507 struct drm_sched_entity *entity;
508};
509
510struct msm_gpu_state_bo {
511 u64 iova;
512 size_t size;
513 void *data;
514 bool encoded;
515 char name[32];
516};
517
518struct msm_gpu_state {
519 struct kref ref;
520 struct timespec64 time;
521
522 struct {
523 u64 iova;
524 u32 fence;
525 u32 seqno;
526 u32 rptr;
527 u32 wptr;
528 void *data;
529 int data_size;
530 bool encoded;
531 } ring[MSM_GPU_MAX_RINGS];
532
533 int nr_registers;
534 u32 *registers;
535
536 u32 rbbm_status;
537
538 char *comm;
539 char *cmd;
540
541 struct msm_gpu_fault_info fault_info;
542
543 int nr_bos;
544 struct msm_gpu_state_bo *bos;
545};
546
547static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
548{
549 msm_writel(data, gpu->mmio + (reg << 2));
550}
551
552static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
553{
554 return msm_readl(gpu->mmio + (reg << 2));
555}
556
557static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
558{
559 msm_rmw(gpu->mmio + (reg << 2), mask, or);
560}
561
562static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
563{
564 u64 val;
565
566 /*
567 * Why not a readq here? Two reasons: 1) many of the LO registers are
568 * not quad word aligned and 2) the GPU hardware designers have a bit
569 * of a history of putting registers where they fit, especially in
570 * spins. The longer a GPU family goes the higher the chance that
571 * we'll get burned. We could do a series of validity checks if we
572 * wanted to, but really is a readq() that much better? Nah.
573 */
574
575 /*
576 * For some lo/hi registers (like perfcounters), the hi value is latched
577 * when the lo is read, so make sure to read the lo first to trigger
578 * that
579 */
580 val = (u64) msm_readl(gpu->mmio + (reg << 2));
581 val |= ((u64) msm_readl(gpu->mmio + ((reg + 1) << 2)) << 32);
582
583 return val;
584}
585
586static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
587{
588 /* Why not a writeq here? Read the screed above */
589 msm_writel(lower_32_bits(val), gpu->mmio + (reg << 2));
590 msm_writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
591}
592
593int msm_gpu_pm_suspend(struct msm_gpu *gpu);
594int msm_gpu_pm_resume(struct msm_gpu *gpu);
595
596void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
597 struct drm_printer *p);
598
599int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
600struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
601 u32 id);
602int msm_submitqueue_create(struct drm_device *drm,
603 struct msm_file_private *ctx,
604 u32 prio, u32 flags, u32 *id);
605int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
606 struct drm_msm_submitqueue_query *args);
607int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
608void msm_submitqueue_close(struct msm_file_private *ctx);
609
610void msm_submitqueue_destroy(struct kref *kref);
611
612int msm_file_private_set_sysprof(struct msm_file_private *ctx,
613 struct msm_gpu *gpu, int sysprof);
614void __msm_file_private_destroy(struct kref *kref);
615
616static inline void msm_file_private_put(struct msm_file_private *ctx)
617{
618 kref_put(&ctx->ref, __msm_file_private_destroy);
619}
620
621static inline struct msm_file_private *msm_file_private_get(
622 struct msm_file_private *ctx)
623{
624 kref_get(&ctx->ref);
625 return ctx;
626}
627
628void msm_devfreq_init(struct msm_gpu *gpu);
629void msm_devfreq_cleanup(struct msm_gpu *gpu);
630void msm_devfreq_resume(struct msm_gpu *gpu);
631void msm_devfreq_suspend(struct msm_gpu *gpu);
632void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
633void msm_devfreq_active(struct msm_gpu *gpu);
634void msm_devfreq_idle(struct msm_gpu *gpu);
635
636int msm_gpu_hw_init(struct msm_gpu *gpu);
637
638void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
639void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
640int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
641 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
642
643void msm_gpu_retire(struct msm_gpu *gpu);
644void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
645
646int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
647 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
648 const char *name, struct msm_gpu_config *config);
649
650struct msm_gem_address_space *
651msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
652
653void msm_gpu_cleanup(struct msm_gpu *gpu);
654
655struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
656void __init adreno_register(void);
657void __exit adreno_unregister(void);
658
659static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
660{
661 if (queue)
662 kref_put(&queue->ref, msm_submitqueue_destroy);
663}
664
665static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
666{
667 struct msm_gpu_state *state = NULL;
668
669 mutex_lock(&gpu->lock);
670
671 if (gpu->crashstate) {
672 kref_get(&gpu->crashstate->ref);
673 state = gpu->crashstate;
674 }
675
676 mutex_unlock(&gpu->lock);
677
678 return state;
679}
680
681static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
682{
683 mutex_lock(&gpu->lock);
684
685 if (gpu->crashstate) {
686 if (gpu->funcs->gpu_state_put(gpu->crashstate))
687 gpu->crashstate = NULL;
688 }
689
690 mutex_unlock(&gpu->lock);
691}
692
693/*
694 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
695 * support expanded privileges
696 */
697#define check_apriv(gpu, flags) \
698 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
699
700
701#endif /* __MSM_GPU_H__ */