Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013-2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#ifdef CONFIG_DEBUG_FS
8
9#include <linux/debugfs.h>
10#include <linux/fault-inject.h>
11
12#include <drm/drm_debugfs.h>
13#include <drm/drm_fb_helper.h>
14#include <drm/drm_file.h>
15#include <drm/drm_framebuffer.h>
16
17#include "msm_drv.h"
18#include "msm_gpu.h"
19#include "msm_kms.h"
20#include "msm_debugfs.h"
21#include "disp/msm_disp_snapshot.h"
22
23/*
24 * GPU Snapshot:
25 */
26
27struct msm_gpu_show_priv {
28 struct msm_gpu_state *state;
29 struct drm_device *dev;
30};
31
32static int msm_gpu_show(struct seq_file *m, void *arg)
33{
34 struct drm_printer p = drm_seq_file_printer(m);
35 struct msm_gpu_show_priv *show_priv = m->private;
36 struct msm_drm_private *priv = show_priv->dev->dev_private;
37 struct msm_gpu *gpu = priv->gpu;
38 int ret;
39
40 ret = mutex_lock_interruptible(&gpu->lock);
41 if (ret)
42 return ret;
43
44 drm_printf(&p, "%s Status:\n", gpu->name);
45 gpu->funcs->show(gpu, show_priv->state, &p);
46
47 mutex_unlock(&gpu->lock);
48
49 return 0;
50}
51
52static int msm_gpu_release(struct inode *inode, struct file *file)
53{
54 struct seq_file *m = file->private_data;
55 struct msm_gpu_show_priv *show_priv = m->private;
56 struct msm_drm_private *priv = show_priv->dev->dev_private;
57 struct msm_gpu *gpu = priv->gpu;
58
59 mutex_lock(&gpu->lock);
60 gpu->funcs->gpu_state_put(show_priv->state);
61 mutex_unlock(&gpu->lock);
62
63 kfree(show_priv);
64
65 return single_release(inode, file);
66}
67
68static int msm_gpu_open(struct inode *inode, struct file *file)
69{
70 struct drm_device *dev = inode->i_private;
71 struct msm_drm_private *priv = dev->dev_private;
72 struct msm_gpu *gpu = priv->gpu;
73 struct msm_gpu_show_priv *show_priv;
74 int ret;
75
76 if (!gpu || !gpu->funcs->gpu_state_get)
77 return -ENODEV;
78
79 show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
80 if (!show_priv)
81 return -ENOMEM;
82
83 ret = mutex_lock_interruptible(&gpu->lock);
84 if (ret)
85 goto free_priv;
86
87 pm_runtime_get_sync(&gpu->pdev->dev);
88 msm_gpu_hw_init(gpu);
89 show_priv->state = gpu->funcs->gpu_state_get(gpu);
90 pm_runtime_put_sync(&gpu->pdev->dev);
91
92 mutex_unlock(&gpu->lock);
93
94 if (IS_ERR(show_priv->state)) {
95 ret = PTR_ERR(show_priv->state);
96 goto free_priv;
97 }
98
99 show_priv->dev = dev;
100
101 ret = single_open(file, msm_gpu_show, show_priv);
102 if (ret)
103 goto free_priv;
104
105 return 0;
106
107free_priv:
108 kfree(show_priv);
109 return ret;
110}
111
112static const struct file_operations msm_gpu_fops = {
113 .owner = THIS_MODULE,
114 .open = msm_gpu_open,
115 .read = seq_read,
116 .llseek = seq_lseek,
117 .release = msm_gpu_release,
118};
119
120#ifdef CONFIG_DRM_MSM_KMS
121static int msm_fb_show(struct seq_file *m, void *arg)
122{
123 struct drm_info_node *node = m->private;
124 struct drm_device *dev = node->minor->dev;
125 struct drm_framebuffer *fb, *fbdev_fb = NULL;
126
127 if (dev->fb_helper && dev->fb_helper->fb) {
128 seq_puts(m, "fbcon ");
129 fbdev_fb = dev->fb_helper->fb;
130 msm_framebuffer_describe(fbdev_fb, m);
131 }
132
133 mutex_lock(&dev->mode_config.fb_lock);
134 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
135 if (fb == fbdev_fb)
136 continue;
137
138 seq_puts(m, "user ");
139 msm_framebuffer_describe(fb, m);
140 }
141 mutex_unlock(&dev->mode_config.fb_lock);
142
143 return 0;
144}
145
146static struct drm_info_list msm_kms_debugfs_list[] = {
147 { "fb", msm_fb_show },
148};
149
150/*
151 * Display Snapshot:
152 */
153
154static int msm_kms_show(struct seq_file *m, void *arg)
155{
156 struct drm_printer p = drm_seq_file_printer(m);
157 struct msm_disp_state *state = m->private;
158
159 msm_disp_state_print(state, &p);
160
161 return 0;
162}
163
164static int msm_kms_release(struct inode *inode, struct file *file)
165{
166 struct seq_file *m = file->private_data;
167 struct msm_disp_state *state = m->private;
168
169 msm_disp_state_free(state);
170
171 return single_release(inode, file);
172}
173
174static int msm_kms_open(struct inode *inode, struct file *file)
175{
176 struct drm_device *dev = inode->i_private;
177 struct msm_drm_private *priv = dev->dev_private;
178 struct msm_disp_state *state;
179 int ret;
180
181 if (!priv->kms)
182 return -ENODEV;
183
184 ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
185 if (ret)
186 return ret;
187
188 state = msm_disp_snapshot_state_sync(priv->kms);
189
190 mutex_unlock(&priv->kms->dump_mutex);
191
192 if (IS_ERR(state)) {
193 return PTR_ERR(state);
194 }
195
196 ret = single_open(file, msm_kms_show, state);
197 if (ret) {
198 msm_disp_state_free(state);
199 return ret;
200 }
201
202 return 0;
203}
204
205static const struct file_operations msm_kms_fops = {
206 .owner = THIS_MODULE,
207 .open = msm_kms_open,
208 .read = seq_read,
209 .llseek = seq_lseek,
210 .release = msm_kms_release,
211};
212
213static void msm_debugfs_kms_init(struct drm_minor *minor)
214{
215 struct drm_device *dev = minor->dev;
216 struct msm_drm_private *priv = dev->dev_private;
217
218 drm_debugfs_create_files(msm_kms_debugfs_list,
219 ARRAY_SIZE(msm_kms_debugfs_list),
220 minor->debugfs_root, minor);
221 debugfs_create_file("kms", 0400, minor->debugfs_root,
222 dev, &msm_kms_fops);
223
224 if (priv->kms->funcs->debugfs_init)
225 priv->kms->funcs->debugfs_init(priv->kms, minor);
226
227}
228#else /* ! CONFIG_DRM_MSM_KMS */
229static void msm_debugfs_kms_init(struct drm_minor *minor)
230{
231}
232#endif
233
234/*
235 * Other debugfs:
236 */
237
238static unsigned long last_shrink_freed;
239
240static int
241shrink_get(void *data, u64 *val)
242{
243 *val = last_shrink_freed;
244
245 return 0;
246}
247
248static int
249shrink_set(void *data, u64 val)
250{
251 struct drm_device *dev = data;
252
253 last_shrink_freed = msm_gem_shrinker_shrink(dev, val);
254
255 return 0;
256}
257
258DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
259 shrink_get, shrink_set,
260 "0x%08llx\n");
261
262/*
263 * Return the number of microseconds to wait until stall-on-fault is
264 * re-enabled. If 0 then it is already enabled or will be re-enabled on the
265 * next submit (unless there's a leftover devcoredump). This is useful for
266 * kernel tests that intentionally produce a fault and check the devcoredump to
267 * wait until the cooldown period is over.
268 */
269
270static int
271stall_reenable_time_get(void *data, u64 *val)
272{
273 struct msm_drm_private *priv = data;
274 unsigned long irq_flags;
275
276 spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
277
278 if (priv->stall_enabled)
279 *val = 0;
280 else
281 *val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0);
282
283 spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
284
285 return 0;
286}
287
288DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops,
289 stall_reenable_time_get, NULL,
290 "%lld\n");
291
292static int msm_gem_show(struct seq_file *m, void *arg)
293{
294 struct drm_info_node *node = m->private;
295 struct drm_device *dev = node->minor->dev;
296 struct msm_drm_private *priv = dev->dev_private;
297 int ret;
298
299 ret = mutex_lock_interruptible(&priv->obj_lock);
300 if (ret)
301 return ret;
302
303 msm_gem_describe_objects(&priv->objects, m);
304
305 mutex_unlock(&priv->obj_lock);
306
307 return 0;
308}
309
310static int msm_mm_show(struct seq_file *m, void *arg)
311{
312 struct drm_info_node *node = m->private;
313 struct drm_device *dev = node->minor->dev;
314 struct drm_printer p = drm_seq_file_printer(m);
315
316 drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
317
318 return 0;
319}
320
321static struct drm_info_list msm_debugfs_list[] = {
322 {"gem", msm_gem_show},
323 { "mm", msm_mm_show },
324};
325
326static int late_init_minor(struct drm_minor *minor)
327{
328 struct drm_device *dev;
329 struct msm_drm_private *priv;
330 int ret;
331
332 if (!minor)
333 return 0;
334
335 dev = minor->dev;
336 priv = dev->dev_private;
337
338 if (!priv->gpu_pdev)
339 return 0;
340
341 ret = msm_rd_debugfs_init(minor);
342 if (ret) {
343 DRM_DEV_ERROR(dev->dev, "could not install rd debugfs\n");
344 return ret;
345 }
346
347 ret = msm_perf_debugfs_init(minor);
348 if (ret) {
349 DRM_DEV_ERROR(dev->dev, "could not install perf debugfs\n");
350 return ret;
351 }
352
353 return 0;
354}
355
356int msm_debugfs_late_init(struct drm_device *dev)
357{
358 int ret;
359 ret = late_init_minor(dev->primary);
360 if (ret)
361 return ret;
362 ret = late_init_minor(dev->render);
363 return ret;
364}
365
366static void msm_debugfs_gpu_init(struct drm_minor *minor)
367{
368 struct drm_device *dev = minor->dev;
369 struct msm_drm_private *priv = dev->dev_private;
370 struct dentry *gpu_devfreq;
371
372 debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
373 dev, &msm_gpu_fops);
374
375 debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
376 &priv->hangcheck_period);
377
378 debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
379 &priv->disable_err_irq);
380
381 debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root,
382 priv, &stall_reenable_time_fops);
383
384 gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
385
386 debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
387 &priv->gpu_clamp_to_idle);
388
389 debugfs_create_u32("upthreshold",0600, gpu_devfreq,
390 &priv->gpu_devfreq_config.upthreshold);
391
392 debugfs_create_u32("downdifferential",0600, gpu_devfreq,
393 &priv->gpu_devfreq_config.downdifferential);
394}
395
396void msm_debugfs_init(struct drm_minor *minor)
397{
398 struct drm_device *dev = minor->dev;
399 struct msm_drm_private *priv = dev->dev_private;
400
401 drm_debugfs_create_files(msm_debugfs_list,
402 ARRAY_SIZE(msm_debugfs_list),
403 minor->debugfs_root, minor);
404
405 if (priv->gpu_pdev)
406 msm_debugfs_gpu_init(minor);
407
408 if (priv->kms)
409 msm_debugfs_kms_init(minor);
410
411 debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
412 dev, &shrink_fops);
413
414 fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
415 &fail_gem_alloc);
416 fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
417 &fail_gem_iova);
418}
419#endif
420