Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#ifndef __MSM_GPU_H__
8#define __MSM_GPU_H__
9
10#include <linux/adreno-smmu-priv.h>
11#include <linux/clk.h>
12#include <linux/interconnect.h>
13#include <linux/pm_opp.h>
14#include <linux/regulator/consumer.h>
15
16#include "msm_drv.h"
17#include "msm_fence.h"
18#include "msm_ringbuffer.h"
19#include "msm_gem.h"
20
21struct msm_gem_submit;
22struct msm_gpu_perfcntr;
23struct msm_gpu_state;
24
25struct msm_gpu_config {
26 const char *ioname;
27 unsigned int nr_rings;
28};
29
30/* So far, with hardware that I've seen to date, we can have:
31 * + zero, one, or two z180 2d cores
32 * + a3xx or a2xx 3d core, which share a common CP (the firmware
33 * for the CP seems to implement some different PM4 packet types
34 * but the basics of cmdstream submission are the same)
35 *
36 * Which means that the eventual complete "class" hierarchy, once
37 * support for all past and present hw is in place, becomes:
38 * + msm_gpu
39 * + adreno_gpu
40 * + a3xx_gpu
41 * + a2xx_gpu
42 * + z180_gpu
43 */
44struct msm_gpu_funcs {
45 int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
46 int (*hw_init)(struct msm_gpu *gpu);
47 int (*pm_suspend)(struct msm_gpu *gpu);
48 int (*pm_resume)(struct msm_gpu *gpu);
49 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
50 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
51 irqreturn_t (*irq)(struct msm_gpu *irq);
52 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
53 void (*recover)(struct msm_gpu *gpu);
54 void (*destroy)(struct msm_gpu *gpu);
55#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
56 /* show GPU status in debugfs: */
57 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
58 struct drm_printer *p);
59 /* for generation specific debugfs: */
60 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
61#endif
62 unsigned long (*gpu_busy)(struct msm_gpu *gpu);
63 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
64 int (*gpu_state_put)(struct msm_gpu_state *state);
65 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
66 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
67 struct msm_gem_address_space *(*create_address_space)
68 (struct msm_gpu *gpu, struct platform_device *pdev);
69 struct msm_gem_address_space *(*create_private_address_space)
70 (struct msm_gpu *gpu);
71 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
72};
73
74/* Additional state for iommu faults: */
75struct msm_gpu_fault_info {
76 u64 ttbr0;
77 unsigned long iova;
78 int flags;
79 const char *type;
80 const char *block;
81};
82
83/**
84 * struct msm_gpu_devfreq - devfreq related state
85 */
86struct msm_gpu_devfreq {
87 /** devfreq: devfreq instance */
88 struct devfreq *devfreq;
89
90 /**
91 * busy_cycles:
92 *
93 * Used by implementation of gpu->gpu_busy() to track the last
94 * busy counter value, for calculating elapsed busy cycles since
95 * last sampling period.
96 */
97 u64 busy_cycles;
98
99 /** time: Time of last sampling period. */
100 ktime_t time;
101
102 /** idle_time: Time of last transition to idle: */
103 ktime_t idle_time;
104
105 /**
106 * idle_freq:
107 *
108 * Shadow frequency used while the GPU is idle. From the PoV of
109 * the devfreq governor, we are continuing to sample busyness and
110 * adjust frequency while the GPU is idle, but we use this shadow
111 * value as the GPU is actually clamped to minimum frequency while
112 * it is inactive.
113 */
114 unsigned long idle_freq;
115};
116
117struct msm_gpu {
118 const char *name;
119 struct drm_device *dev;
120 struct platform_device *pdev;
121 const struct msm_gpu_funcs *funcs;
122
123 struct adreno_smmu_priv adreno_smmu;
124
125 /* performance counters (hw & sw): */
126 spinlock_t perf_lock;
127 bool perfcntr_active;
128 struct {
129 bool active;
130 ktime_t time;
131 } last_sample;
132 uint32_t totaltime, activetime; /* sw counters */
133 uint32_t last_cntrs[5]; /* hw counters */
134 const struct msm_gpu_perfcntr *perfcntrs;
135 uint32_t num_perfcntrs;
136
137 struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
138 int nr_rings;
139
140 /*
141 * List of GEM active objects on this gpu. Protected by
142 * msm_drm_private::mm_lock
143 */
144 struct list_head active_list;
145
146 /**
147 * active_submits:
148 *
149 * The number of submitted but not yet retired submits, used to
150 * determine transitions between active and idle.
151 *
152 * Protected by lock
153 */
154 int active_submits;
155
156 /** lock: protects active_submits and idle/active transitions */
157 struct mutex active_lock;
158
159 /* does gpu need hw_init? */
160 bool needs_hw_init;
161
162 /* number of GPU hangs (for all contexts) */
163 int global_faults;
164
165 void __iomem *mmio;
166 int irq;
167
168 struct msm_gem_address_space *aspace;
169
170 /* Power Control: */
171 struct regulator *gpu_reg, *gpu_cx;
172 struct clk_bulk_data *grp_clks;
173 int nr_clocks;
174 struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
175 uint32_t fast_rate;
176
177 /* Hang and Inactivity Detection:
178 */
179#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
180
181#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
182 struct timer_list hangcheck_timer;
183
184 /* Fault info for most recent iova fault: */
185 struct msm_gpu_fault_info fault_info;
186
187 /* work for handling GPU ioval faults: */
188 struct kthread_work fault_work;
189
190 /* work for handling GPU recovery: */
191 struct kthread_work recover_work;
192
193 /* work for handling active-list retiring: */
194 struct kthread_work retire_work;
195
196 /* worker for retire/recover: */
197 struct kthread_worker *worker;
198
199 struct drm_gem_object *memptrs_bo;
200
201 struct msm_gpu_devfreq devfreq;
202
203 uint32_t suspend_count;
204
205 struct msm_gpu_state *crashstate;
206 /* True if the hardware supports expanded apriv (a650 and newer) */
207 bool hw_apriv;
208
209 struct thermal_cooling_device *cooling;
210};
211
212static inline struct msm_gpu *dev_to_gpu(struct device *dev)
213{
214 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
215 return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
216}
217
218/* It turns out that all targets use the same ringbuffer size */
219#define MSM_GPU_RINGBUFFER_SZ SZ_32K
220#define MSM_GPU_RINGBUFFER_BLKSIZE 32
221
222#define MSM_GPU_RB_CNTL_DEFAULT \
223 (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
224 AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
225
226static inline bool msm_gpu_active(struct msm_gpu *gpu)
227{
228 int i;
229
230 for (i = 0; i < gpu->nr_rings; i++) {
231 struct msm_ringbuffer *ring = gpu->rb[i];
232
233 if (ring->seqno > ring->memptrs->fence)
234 return true;
235 }
236
237 return false;
238}
239
240/* Perf-Counters:
241 * The select_reg and select_val are just there for the benefit of the child
242 * class that actually enables the perf counter.. but msm_gpu base class
243 * will handle sampling/displaying the counters.
244 */
245
246struct msm_gpu_perfcntr {
247 uint32_t select_reg;
248 uint32_t sample_reg;
249 uint32_t select_val;
250 const char *name;
251};
252
253/*
254 * The number of priority levels provided by drm gpu scheduler. The
255 * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
256 * cases, so we don't use it (no need for kernel generated jobs).
257 */
258#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
259
260/**
261 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
262 *
263 * @gpu: the gpu instance
264 * @prio: the userspace priority level
265 * @ring_nr: [out] the ringbuffer the userspace priority maps to
266 * @sched_prio: [out] the gpu scheduler priority level which the userspace
267 * priority maps to
268 *
269 * With drm/scheduler providing it's own level of prioritization, our total
270 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES).
271 * Each ring is associated with it's own scheduler instance. However, our
272 * UABI is that lower numerical values are higher priority. So mapping the
273 * single userspace priority level into ring_nr and sched_prio takes some
274 * care. The userspace provided priority (when a submitqueue is created)
275 * is mapped to ring nr and scheduler priority as such:
276 *
277 * ring_nr = userspace_prio / NR_SCHED_PRIORITIES
278 * sched_prio = NR_SCHED_PRIORITIES -
279 * (userspace_prio % NR_SCHED_PRIORITIES) - 1
280 *
281 * This allows generations without preemption (nr_rings==1) to have some
282 * amount of prioritization, and provides more priority levels for gens
283 * that do have preemption.
284 */
285static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
286 unsigned *ring_nr, enum drm_sched_priority *sched_prio)
287{
288 unsigned rn, sp;
289
290 rn = div_u64_rem(prio, NR_SCHED_PRIORITIES, &sp);
291
292 /* invert sched priority to map to higher-numeric-is-higher-
293 * priority convention
294 */
295 sp = NR_SCHED_PRIORITIES - sp - 1;
296
297 if (rn >= gpu->nr_rings)
298 return -EINVAL;
299
300 *ring_nr = rn;
301 *sched_prio = sp;
302
303 return 0;
304}
305
306/**
307 * A submitqueue is associated with a gl context or vk queue (or equiv)
308 * in userspace.
309 *
310 * @id: userspace id for the submitqueue, unique within the drm_file
311 * @flags: userspace flags for the submitqueue, specified at creation
312 * (currently unusued)
313 * @ring_nr: the ringbuffer used by this submitqueue, which is determined
314 * by the submitqueue's priority
315 * @faults: the number of GPU hangs associated with this submitqueue
316 * @ctx: the per-drm_file context associated with the submitqueue (ie.
317 * which set of pgtables do submits jobs associated with the
318 * submitqueue use)
319 * @node: node in the context's list of submitqueues
320 * @fence_idr: maps fence-id to dma_fence for userspace visible fence
321 * seqno, protected by submitqueue lock
322 * @lock: submitqueue lock
323 * @ref: reference count
324 * @entity: the submit job-queue
325 */
326struct msm_gpu_submitqueue {
327 int id;
328 u32 flags;
329 u32 ring_nr;
330 int faults;
331 struct msm_file_private *ctx;
332 struct list_head node;
333 struct idr fence_idr;
334 struct mutex lock;
335 struct kref ref;
336 struct drm_sched_entity entity;
337};
338
339struct msm_gpu_state_bo {
340 u64 iova;
341 size_t size;
342 void *data;
343 bool encoded;
344};
345
346struct msm_gpu_state {
347 struct kref ref;
348 struct timespec64 time;
349
350 struct {
351 u64 iova;
352 u32 fence;
353 u32 seqno;
354 u32 rptr;
355 u32 wptr;
356 void *data;
357 int data_size;
358 bool encoded;
359 } ring[MSM_GPU_MAX_RINGS];
360
361 int nr_registers;
362 u32 *registers;
363
364 u32 rbbm_status;
365
366 char *comm;
367 char *cmd;
368
369 struct msm_gpu_fault_info fault_info;
370
371 int nr_bos;
372 struct msm_gpu_state_bo *bos;
373};
374
375static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
376{
377 msm_writel(data, gpu->mmio + (reg << 2));
378}
379
380static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
381{
382 return msm_readl(gpu->mmio + (reg << 2));
383}
384
385static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
386{
387 msm_rmw(gpu->mmio + (reg << 2), mask, or);
388}
389
390static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
391{
392 u64 val;
393
394 /*
395 * Why not a readq here? Two reasons: 1) many of the LO registers are
396 * not quad word aligned and 2) the GPU hardware designers have a bit
397 * of a history of putting registers where they fit, especially in
398 * spins. The longer a GPU family goes the higher the chance that
399 * we'll get burned. We could do a series of validity checks if we
400 * wanted to, but really is a readq() that much better? Nah.
401 */
402
403 /*
404 * For some lo/hi registers (like perfcounters), the hi value is latched
405 * when the lo is read, so make sure to read the lo first to trigger
406 * that
407 */
408 val = (u64) msm_readl(gpu->mmio + (lo << 2));
409 val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
410
411 return val;
412}
413
414static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
415{
416 /* Why not a writeq here? Read the screed above */
417 msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
418 msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
419}
420
421int msm_gpu_pm_suspend(struct msm_gpu *gpu);
422int msm_gpu_pm_resume(struct msm_gpu *gpu);
423
424void msm_devfreq_init(struct msm_gpu *gpu);
425void msm_devfreq_cleanup(struct msm_gpu *gpu);
426void msm_devfreq_resume(struct msm_gpu *gpu);
427void msm_devfreq_suspend(struct msm_gpu *gpu);
428void msm_devfreq_active(struct msm_gpu *gpu);
429void msm_devfreq_idle(struct msm_gpu *gpu);
430
431int msm_gpu_hw_init(struct msm_gpu *gpu);
432
433void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
434void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
435int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
436 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
437
438void msm_gpu_retire(struct msm_gpu *gpu);
439void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
440
441int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
442 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
443 const char *name, struct msm_gpu_config *config);
444
445struct msm_gem_address_space *
446msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
447
448void msm_gpu_cleanup(struct msm_gpu *gpu);
449
450struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
451void __init adreno_register(void);
452void __exit adreno_unregister(void);
453
454static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
455{
456 if (queue)
457 kref_put(&queue->ref, msm_submitqueue_destroy);
458}
459
460static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
461{
462 struct msm_gpu_state *state = NULL;
463
464 mutex_lock(&gpu->dev->struct_mutex);
465
466 if (gpu->crashstate) {
467 kref_get(&gpu->crashstate->ref);
468 state = gpu->crashstate;
469 }
470
471 mutex_unlock(&gpu->dev->struct_mutex);
472
473 return state;
474}
475
476static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
477{
478 mutex_lock(&gpu->dev->struct_mutex);
479
480 if (gpu->crashstate) {
481 if (gpu->funcs->gpu_state_put(gpu->crashstate))
482 gpu->crashstate = NULL;
483 }
484
485 mutex_unlock(&gpu->dev->struct_mutex);
486}
487
488/*
489 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
490 * support expanded privileges
491 */
492#define check_apriv(gpu, flags) \
493 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
494
495
496#endif /* __MSM_GPU_H__ */