Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include "msm_gpu.h"
8#include "msm_gem.h"
9#include "msm_mmu.h"
10#include "msm_fence.h"
11#include "msm_gpu_trace.h"
12#include "adreno/adreno_gpu.h"
13
14#include <generated/utsrelease.h>
15#include <linux/string_helpers.h>
16#include <linux/devfreq.h>
17#include <linux/devfreq_cooling.h>
18#include <linux/devcoredump.h>
19#include <linux/sched/task.h>
20
21/*
22 * Power Management:
23 */
24
25static int msm_devfreq_target(struct device *dev, unsigned long *freq,
26 u32 flags)
27{
28 struct msm_gpu *gpu = dev_to_gpu(dev);
29 struct dev_pm_opp *opp;
30
31 opp = devfreq_recommended_opp(dev, freq, flags);
32
33 if (IS_ERR(opp))
34 return PTR_ERR(opp);
35
36 trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
37
38 if (gpu->funcs->gpu_set_freq)
39 gpu->funcs->gpu_set_freq(gpu, opp);
40 else
41 clk_set_rate(gpu->core_clk, *freq);
42
43 dev_pm_opp_put(opp);
44
45 return 0;
46}
47
48static int msm_devfreq_get_dev_status(struct device *dev,
49 struct devfreq_dev_status *status)
50{
51 struct msm_gpu *gpu = dev_to_gpu(dev);
52 ktime_t time;
53
54 if (gpu->funcs->gpu_get_freq)
55 status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
56 else
57 status->current_frequency = clk_get_rate(gpu->core_clk);
58
59 status->busy_time = gpu->funcs->gpu_busy(gpu);
60
61 time = ktime_get();
62 status->total_time = ktime_us_delta(time, gpu->devfreq.time);
63 gpu->devfreq.time = time;
64
65 return 0;
66}
67
68static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
69{
70 struct msm_gpu *gpu = dev_to_gpu(dev);
71
72 if (gpu->funcs->gpu_get_freq)
73 *freq = gpu->funcs->gpu_get_freq(gpu);
74 else
75 *freq = clk_get_rate(gpu->core_clk);
76
77 return 0;
78}
79
80static struct devfreq_dev_profile msm_devfreq_profile = {
81 .polling_ms = 10,
82 .target = msm_devfreq_target,
83 .get_dev_status = msm_devfreq_get_dev_status,
84 .get_cur_freq = msm_devfreq_get_cur_freq,
85};
86
87static void msm_devfreq_init(struct msm_gpu *gpu)
88{
89 /* We need target support to do devfreq */
90 if (!gpu->funcs->gpu_busy)
91 return;
92
93 msm_devfreq_profile.initial_freq = gpu->fast_rate;
94
95 /*
96 * Don't set the freq_table or max_state and let devfreq build the table
97 * from OPP
98 * After a deferred probe, these may have be left to non-zero values,
99 * so set them back to zero before creating the devfreq device
100 */
101 msm_devfreq_profile.freq_table = NULL;
102 msm_devfreq_profile.max_state = 0;
103
104 gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
105 &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
106 NULL);
107
108 if (IS_ERR(gpu->devfreq.devfreq)) {
109 DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
110 gpu->devfreq.devfreq = NULL;
111 return;
112 }
113
114 devfreq_suspend_device(gpu->devfreq.devfreq);
115
116 gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
117 gpu->devfreq.devfreq);
118 if (IS_ERR(gpu->cooling)) {
119 DRM_DEV_ERROR(&gpu->pdev->dev,
120 "Couldn't register GPU cooling device\n");
121 gpu->cooling = NULL;
122 }
123}
124
125static int enable_pwrrail(struct msm_gpu *gpu)
126{
127 struct drm_device *dev = gpu->dev;
128 int ret = 0;
129
130 if (gpu->gpu_reg) {
131 ret = regulator_enable(gpu->gpu_reg);
132 if (ret) {
133 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
134 return ret;
135 }
136 }
137
138 if (gpu->gpu_cx) {
139 ret = regulator_enable(gpu->gpu_cx);
140 if (ret) {
141 DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
142 return ret;
143 }
144 }
145
146 return 0;
147}
148
149static int disable_pwrrail(struct msm_gpu *gpu)
150{
151 if (gpu->gpu_cx)
152 regulator_disable(gpu->gpu_cx);
153 if (gpu->gpu_reg)
154 regulator_disable(gpu->gpu_reg);
155 return 0;
156}
157
158static int enable_clk(struct msm_gpu *gpu)
159{
160 if (gpu->core_clk && gpu->fast_rate)
161 clk_set_rate(gpu->core_clk, gpu->fast_rate);
162
163 /* Set the RBBM timer rate to 19.2Mhz */
164 if (gpu->rbbmtimer_clk)
165 clk_set_rate(gpu->rbbmtimer_clk, 19200000);
166
167 return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
168}
169
170static int disable_clk(struct msm_gpu *gpu)
171{
172 clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
173
174 /*
175 * Set the clock to a deliberately low rate. On older targets the clock
176 * speed had to be non zero to avoid problems. On newer targets this
177 * will be rounded down to zero anyway so it all works out.
178 */
179 if (gpu->core_clk)
180 clk_set_rate(gpu->core_clk, 27000000);
181
182 if (gpu->rbbmtimer_clk)
183 clk_set_rate(gpu->rbbmtimer_clk, 0);
184
185 return 0;
186}
187
188static int enable_axi(struct msm_gpu *gpu)
189{
190 return clk_prepare_enable(gpu->ebi1_clk);
191}
192
193static int disable_axi(struct msm_gpu *gpu)
194{
195 clk_disable_unprepare(gpu->ebi1_clk);
196 return 0;
197}
198
199void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
200{
201 gpu->devfreq.busy_cycles = 0;
202 gpu->devfreq.time = ktime_get();
203
204 devfreq_resume_device(gpu->devfreq.devfreq);
205}
206
207int msm_gpu_pm_resume(struct msm_gpu *gpu)
208{
209 int ret;
210
211 DBG("%s", gpu->name);
212 trace_msm_gpu_resume(0);
213
214 ret = enable_pwrrail(gpu);
215 if (ret)
216 return ret;
217
218 ret = enable_clk(gpu);
219 if (ret)
220 return ret;
221
222 ret = enable_axi(gpu);
223 if (ret)
224 return ret;
225
226 msm_gpu_resume_devfreq(gpu);
227
228 gpu->needs_hw_init = true;
229
230 return 0;
231}
232
233int msm_gpu_pm_suspend(struct msm_gpu *gpu)
234{
235 int ret;
236
237 DBG("%s", gpu->name);
238 trace_msm_gpu_suspend(0);
239
240 devfreq_suspend_device(gpu->devfreq.devfreq);
241
242 ret = disable_axi(gpu);
243 if (ret)
244 return ret;
245
246 ret = disable_clk(gpu);
247 if (ret)
248 return ret;
249
250 ret = disable_pwrrail(gpu);
251 if (ret)
252 return ret;
253
254 gpu->suspend_count++;
255
256 return 0;
257}
258
259int msm_gpu_hw_init(struct msm_gpu *gpu)
260{
261 int ret;
262
263 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
264
265 if (!gpu->needs_hw_init)
266 return 0;
267
268 disable_irq(gpu->irq);
269 ret = gpu->funcs->hw_init(gpu);
270 if (!ret)
271 gpu->needs_hw_init = false;
272 enable_irq(gpu->irq);
273
274 return ret;
275}
276
277static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
278 uint32_t fence)
279{
280 struct msm_gem_submit *submit;
281
282 spin_lock(&ring->submit_lock);
283 list_for_each_entry(submit, &ring->submits, node) {
284 if (submit->seqno > fence)
285 break;
286
287 msm_update_fence(submit->ring->fctx,
288 submit->fence->seqno);
289 }
290 spin_unlock(&ring->submit_lock);
291}
292
293#ifdef CONFIG_DEV_COREDUMP
294static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
295 size_t count, void *data, size_t datalen)
296{
297 struct msm_gpu *gpu = data;
298 struct drm_print_iterator iter;
299 struct drm_printer p;
300 struct msm_gpu_state *state;
301
302 state = msm_gpu_crashstate_get(gpu);
303 if (!state)
304 return 0;
305
306 iter.data = buffer;
307 iter.offset = 0;
308 iter.start = offset;
309 iter.remain = count;
310
311 p = drm_coredump_printer(&iter);
312
313 drm_printf(&p, "---\n");
314 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
315 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
316 drm_printf(&p, "time: %lld.%09ld\n",
317 state->time.tv_sec, state->time.tv_nsec);
318 if (state->comm)
319 drm_printf(&p, "comm: %s\n", state->comm);
320 if (state->cmd)
321 drm_printf(&p, "cmdline: %s\n", state->cmd);
322
323 gpu->funcs->show(gpu, state, &p);
324
325 msm_gpu_crashstate_put(gpu);
326
327 return count - iter.remain;
328}
329
330static void msm_gpu_devcoredump_free(void *data)
331{
332 struct msm_gpu *gpu = data;
333
334 msm_gpu_crashstate_put(gpu);
335}
336
337static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
338 struct msm_gem_object *obj, u64 iova, u32 flags)
339{
340 struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
341
342 /* Don't record write only objects */
343 state_bo->size = obj->base.size;
344 state_bo->iova = iova;
345
346 /* Only store data for non imported buffer objects marked for read */
347 if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
348 void *ptr;
349
350 state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
351 if (!state_bo->data)
352 goto out;
353
354 msm_gem_lock(&obj->base);
355 ptr = msm_gem_get_vaddr_active(&obj->base);
356 msm_gem_unlock(&obj->base);
357 if (IS_ERR(ptr)) {
358 kvfree(state_bo->data);
359 state_bo->data = NULL;
360 goto out;
361 }
362
363 memcpy(state_bo->data, ptr, obj->base.size);
364 msm_gem_put_vaddr(&obj->base);
365 }
366out:
367 state->nr_bos++;
368}
369
370static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
371 struct msm_gem_submit *submit, char *comm, char *cmd)
372{
373 struct msm_gpu_state *state;
374
375 /* Check if the target supports capturing crash state */
376 if (!gpu->funcs->gpu_state_get)
377 return;
378
379 /* Only save one crash state at a time */
380 if (gpu->crashstate)
381 return;
382
383 state = gpu->funcs->gpu_state_get(gpu);
384 if (IS_ERR_OR_NULL(state))
385 return;
386
387 /* Fill in the additional crash state information */
388 state->comm = kstrdup(comm, GFP_KERNEL);
389 state->cmd = kstrdup(cmd, GFP_KERNEL);
390
391 if (submit) {
392 int i, nr = 0;
393
394 /* count # of buffers to dump: */
395 for (i = 0; i < submit->nr_bos; i++)
396 if (should_dump(submit, i))
397 nr++;
398 /* always dump cmd bo's, but don't double count them: */
399 for (i = 0; i < submit->nr_cmds; i++)
400 if (!should_dump(submit, submit->cmd[i].idx))
401 nr++;
402
403 state->bos = kcalloc(nr,
404 sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
405
406 for (i = 0; i < submit->nr_bos; i++) {
407 if (should_dump(submit, i)) {
408 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
409 submit->bos[i].iova, submit->bos[i].flags);
410 }
411 }
412
413 for (i = 0; state->bos && i < submit->nr_cmds; i++) {
414 int idx = submit->cmd[i].idx;
415
416 if (!should_dump(submit, submit->cmd[i].idx)) {
417 msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
418 submit->bos[idx].iova, submit->bos[idx].flags);
419 }
420 }
421 }
422
423 /* Set the active crash state to be dumped on failure */
424 gpu->crashstate = state;
425
426 /* FIXME: Release the crashstate if this errors out? */
427 dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
428 msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
429}
430#else
431static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
432 struct msm_gem_submit *submit, char *comm, char *cmd)
433{
434}
435#endif
436
437/*
438 * Hangcheck detection for locked gpu:
439 */
440
441static struct msm_gem_submit *
442find_submit(struct msm_ringbuffer *ring, uint32_t fence)
443{
444 struct msm_gem_submit *submit;
445
446 spin_lock(&ring->submit_lock);
447 list_for_each_entry(submit, &ring->submits, node) {
448 if (submit->seqno == fence) {
449 spin_unlock(&ring->submit_lock);
450 return submit;
451 }
452 }
453 spin_unlock(&ring->submit_lock);
454
455 return NULL;
456}
457
458static void retire_submits(struct msm_gpu *gpu);
459
460static void recover_worker(struct kthread_work *work)
461{
462 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
463 struct drm_device *dev = gpu->dev;
464 struct msm_drm_private *priv = dev->dev_private;
465 struct msm_gem_submit *submit;
466 struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
467 char *comm = NULL, *cmd = NULL;
468 int i;
469
470 mutex_lock(&dev->struct_mutex);
471
472 DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
473
474 submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
475 if (submit) {
476 struct task_struct *task;
477
478 /* Increment the fault counts */
479 gpu->global_faults++;
480 submit->queue->faults++;
481
482 task = get_pid_task(submit->pid, PIDTYPE_PID);
483 if (task) {
484 comm = kstrdup(task->comm, GFP_KERNEL);
485 cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
486 put_task_struct(task);
487 }
488
489 /* msm_rd_dump_submit() needs bo locked to dump: */
490 for (i = 0; i < submit->nr_bos; i++)
491 msm_gem_lock(&submit->bos[i].obj->base);
492
493 if (comm && cmd) {
494 DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
495 gpu->name, comm, cmd);
496
497 msm_rd_dump_submit(priv->hangrd, submit,
498 "offending task: %s (%s)", comm, cmd);
499 } else {
500 msm_rd_dump_submit(priv->hangrd, submit, NULL);
501 }
502
503 for (i = 0; i < submit->nr_bos; i++)
504 msm_gem_unlock(&submit->bos[i].obj->base);
505 }
506
507 /* Record the crash state */
508 pm_runtime_get_sync(&gpu->pdev->dev);
509 msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
510 pm_runtime_put_sync(&gpu->pdev->dev);
511
512 kfree(cmd);
513 kfree(comm);
514
515 /*
516 * Update all the rings with the latest and greatest fence.. this
517 * needs to happen after msm_rd_dump_submit() to ensure that the
518 * bo's referenced by the offending submit are still around.
519 */
520 for (i = 0; i < gpu->nr_rings; i++) {
521 struct msm_ringbuffer *ring = gpu->rb[i];
522
523 uint32_t fence = ring->memptrs->fence;
524
525 /*
526 * For the current (faulting?) ring/submit advance the fence by
527 * one more to clear the faulting submit
528 */
529 if (ring == cur_ring)
530 fence++;
531
532 update_fences(gpu, ring, fence);
533 }
534
535 if (msm_gpu_active(gpu)) {
536 /* retire completed submits, plus the one that hung: */
537 retire_submits(gpu);
538
539 pm_runtime_get_sync(&gpu->pdev->dev);
540 gpu->funcs->recover(gpu);
541 pm_runtime_put_sync(&gpu->pdev->dev);
542
543 /*
544 * Replay all remaining submits starting with highest priority
545 * ring
546 */
547 for (i = 0; i < gpu->nr_rings; i++) {
548 struct msm_ringbuffer *ring = gpu->rb[i];
549
550 spin_lock(&ring->submit_lock);
551 list_for_each_entry(submit, &ring->submits, node)
552 gpu->funcs->submit(gpu, submit);
553 spin_unlock(&ring->submit_lock);
554 }
555 }
556
557 mutex_unlock(&dev->struct_mutex);
558
559 msm_gpu_retire(gpu);
560}
561
562static void hangcheck_timer_reset(struct msm_gpu *gpu)
563{
564 mod_timer(&gpu->hangcheck_timer,
565 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
566}
567
568static void hangcheck_handler(struct timer_list *t)
569{
570 struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
571 struct drm_device *dev = gpu->dev;
572 struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
573 uint32_t fence = ring->memptrs->fence;
574
575 if (fence != ring->hangcheck_fence) {
576 /* some progress has been made.. ya! */
577 ring->hangcheck_fence = fence;
578 } else if (fence < ring->seqno) {
579 /* no progress and not done.. hung! */
580 ring->hangcheck_fence = fence;
581 DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
582 gpu->name, ring->id);
583 DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
584 gpu->name, fence);
585 DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
586 gpu->name, ring->seqno);
587
588 kthread_queue_work(gpu->worker, &gpu->recover_work);
589 }
590
591 /* if still more pending work, reset the hangcheck timer: */
592 if (ring->seqno > ring->hangcheck_fence)
593 hangcheck_timer_reset(gpu);
594
595 /* workaround for missing irq: */
596 kthread_queue_work(gpu->worker, &gpu->retire_work);
597}
598
599/*
600 * Performance Counters:
601 */
602
603/* called under perf_lock */
604static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
605{
606 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
607 int i, n = min(ncntrs, gpu->num_perfcntrs);
608
609 /* read current values: */
610 for (i = 0; i < gpu->num_perfcntrs; i++)
611 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
612
613 /* update cntrs: */
614 for (i = 0; i < n; i++)
615 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
616
617 /* save current values: */
618 for (i = 0; i < gpu->num_perfcntrs; i++)
619 gpu->last_cntrs[i] = current_cntrs[i];
620
621 return n;
622}
623
624static void update_sw_cntrs(struct msm_gpu *gpu)
625{
626 ktime_t time;
627 uint32_t elapsed;
628 unsigned long flags;
629
630 spin_lock_irqsave(&gpu->perf_lock, flags);
631 if (!gpu->perfcntr_active)
632 goto out;
633
634 time = ktime_get();
635 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
636
637 gpu->totaltime += elapsed;
638 if (gpu->last_sample.active)
639 gpu->activetime += elapsed;
640
641 gpu->last_sample.active = msm_gpu_active(gpu);
642 gpu->last_sample.time = time;
643
644out:
645 spin_unlock_irqrestore(&gpu->perf_lock, flags);
646}
647
648void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
649{
650 unsigned long flags;
651
652 pm_runtime_get_sync(&gpu->pdev->dev);
653
654 spin_lock_irqsave(&gpu->perf_lock, flags);
655 /* we could dynamically enable/disable perfcntr registers too.. */
656 gpu->last_sample.active = msm_gpu_active(gpu);
657 gpu->last_sample.time = ktime_get();
658 gpu->activetime = gpu->totaltime = 0;
659 gpu->perfcntr_active = true;
660 update_hw_cntrs(gpu, 0, NULL);
661 spin_unlock_irqrestore(&gpu->perf_lock, flags);
662}
663
664void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
665{
666 gpu->perfcntr_active = false;
667 pm_runtime_put_sync(&gpu->pdev->dev);
668}
669
670/* returns -errno or # of cntrs sampled */
671int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
672 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
673{
674 unsigned long flags;
675 int ret;
676
677 spin_lock_irqsave(&gpu->perf_lock, flags);
678
679 if (!gpu->perfcntr_active) {
680 ret = -EINVAL;
681 goto out;
682 }
683
684 *activetime = gpu->activetime;
685 *totaltime = gpu->totaltime;
686
687 gpu->activetime = gpu->totaltime = 0;
688
689 ret = update_hw_cntrs(gpu, ncntrs, cntrs);
690
691out:
692 spin_unlock_irqrestore(&gpu->perf_lock, flags);
693
694 return ret;
695}
696
697/*
698 * Cmdstream submission/retirement:
699 */
700
701static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
702 struct msm_gem_submit *submit)
703{
704 int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
705 volatile struct msm_gpu_submit_stats *stats;
706 u64 elapsed, clock = 0;
707 int i;
708
709 stats = &ring->memptrs->stats[index];
710 /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
711 elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
712 do_div(elapsed, 192);
713
714 /* Calculate the clock frequency from the number of CP cycles */
715 if (elapsed) {
716 clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
717 do_div(clock, elapsed);
718 }
719
720 trace_msm_gpu_submit_retired(submit, elapsed, clock,
721 stats->alwayson_start, stats->alwayson_end);
722
723 for (i = 0; i < submit->nr_bos; i++) {
724 struct drm_gem_object *obj = &submit->bos[i].obj->base;
725
726 msm_gem_lock(obj);
727 msm_gem_active_put(obj);
728 msm_gem_unpin_iova_locked(obj, submit->aspace);
729 msm_gem_unlock(obj);
730 drm_gem_object_put(obj);
731 }
732
733 pm_runtime_mark_last_busy(&gpu->pdev->dev);
734 pm_runtime_put_autosuspend(&gpu->pdev->dev);
735
736 spin_lock(&ring->submit_lock);
737 list_del(&submit->node);
738 spin_unlock(&ring->submit_lock);
739
740 msm_gem_submit_put(submit);
741}
742
743static void retire_submits(struct msm_gpu *gpu)
744{
745 int i;
746
747 /* Retire the commits starting with highest priority */
748 for (i = 0; i < gpu->nr_rings; i++) {
749 struct msm_ringbuffer *ring = gpu->rb[i];
750
751 while (true) {
752 struct msm_gem_submit *submit = NULL;
753
754 spin_lock(&ring->submit_lock);
755 submit = list_first_entry_or_null(&ring->submits,
756 struct msm_gem_submit, node);
757 spin_unlock(&ring->submit_lock);
758
759 /*
760 * If no submit, we are done. If submit->fence hasn't
761 * been signalled, then later submits are not signalled
762 * either, so we are also done.
763 */
764 if (submit && dma_fence_is_signaled(submit->fence)) {
765 retire_submit(gpu, ring, submit);
766 } else {
767 break;
768 }
769 }
770 }
771}
772
773static void retire_worker(struct kthread_work *work)
774{
775 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
776 int i;
777
778 for (i = 0; i < gpu->nr_rings; i++)
779 update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
780
781 retire_submits(gpu);
782}
783
784/* call from irq handler to schedule work to retire bo's */
785void msm_gpu_retire(struct msm_gpu *gpu)
786{
787 kthread_queue_work(gpu->worker, &gpu->retire_work);
788 update_sw_cntrs(gpu);
789}
790
791/* add bo's to gpu's ring, and kick gpu: */
792void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
793{
794 struct drm_device *dev = gpu->dev;
795 struct msm_drm_private *priv = dev->dev_private;
796 struct msm_ringbuffer *ring = submit->ring;
797 int i;
798
799 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
800
801 pm_runtime_get_sync(&gpu->pdev->dev);
802
803 msm_gpu_hw_init(gpu);
804
805 submit->seqno = ++ring->seqno;
806
807 msm_rd_dump_submit(priv->rd, submit, NULL);
808
809 update_sw_cntrs(gpu);
810
811 for (i = 0; i < submit->nr_bos; i++) {
812 struct msm_gem_object *msm_obj = submit->bos[i].obj;
813 struct drm_gem_object *drm_obj = &msm_obj->base;
814 uint64_t iova;
815
816 /* submit takes a reference to the bo and iova until retired: */
817 drm_gem_object_get(&msm_obj->base);
818 msm_gem_get_and_pin_iova_locked(&msm_obj->base, submit->aspace, &iova);
819
820 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
821 dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
822 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
823 dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
824
825 msm_gem_active_get(drm_obj, gpu);
826 }
827
828 /*
829 * ring->submits holds a ref to the submit, to deal with the case
830 * that a submit completes before msm_ioctl_gem_submit() returns.
831 */
832 msm_gem_submit_get(submit);
833
834 spin_lock(&ring->submit_lock);
835 list_add_tail(&submit->node, &ring->submits);
836 spin_unlock(&ring->submit_lock);
837
838 gpu->funcs->submit(gpu, submit);
839 priv->lastctx = submit->queue->ctx;
840
841 hangcheck_timer_reset(gpu);
842}
843
844/*
845 * Init/Cleanup:
846 */
847
848static irqreturn_t irq_handler(int irq, void *data)
849{
850 struct msm_gpu *gpu = data;
851 return gpu->funcs->irq(gpu);
852}
853
854static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
855{
856 int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
857
858 if (ret < 1) {
859 gpu->nr_clocks = 0;
860 return ret;
861 }
862
863 gpu->nr_clocks = ret;
864
865 gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
866 gpu->nr_clocks, "core");
867
868 gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
869 gpu->nr_clocks, "rbbmtimer");
870
871 return 0;
872}
873
874/* Return a new address space for a msm_drm_private instance */
875struct msm_gem_address_space *
876msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
877{
878 struct msm_gem_address_space *aspace = NULL;
879 if (!gpu)
880 return NULL;
881
882 /*
883 * If the target doesn't support private address spaces then return
884 * the global one
885 */
886 if (gpu->funcs->create_private_address_space) {
887 aspace = gpu->funcs->create_private_address_space(gpu);
888 if (!IS_ERR(aspace))
889 aspace->pid = get_pid(task_pid(task));
890 }
891
892 if (IS_ERR_OR_NULL(aspace))
893 aspace = msm_gem_address_space_get(gpu->aspace);
894
895 return aspace;
896}
897
898int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
899 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
900 const char *name, struct msm_gpu_config *config)
901{
902 int i, ret, nr_rings = config->nr_rings;
903 void *memptrs;
904 uint64_t memptrs_iova;
905
906 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
907 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
908
909 gpu->dev = drm;
910 gpu->funcs = funcs;
911 gpu->name = name;
912
913 gpu->worker = kthread_create_worker(0, "%s-worker", gpu->name);
914 if (IS_ERR(gpu->worker)) {
915 ret = PTR_ERR(gpu->worker);
916 gpu->worker = NULL;
917 goto fail;
918 }
919
920 sched_set_fifo_low(gpu->worker->task);
921
922 INIT_LIST_HEAD(&gpu->active_list);
923 kthread_init_work(&gpu->retire_work, retire_worker);
924 kthread_init_work(&gpu->recover_work, recover_worker);
925
926 timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
927
928 spin_lock_init(&gpu->perf_lock);
929
930
931 /* Map registers: */
932 gpu->mmio = msm_ioremap(pdev, config->ioname, name);
933 if (IS_ERR(gpu->mmio)) {
934 ret = PTR_ERR(gpu->mmio);
935 goto fail;
936 }
937
938 /* Get Interrupt: */
939 gpu->irq = platform_get_irq(pdev, 0);
940 if (gpu->irq < 0) {
941 ret = gpu->irq;
942 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
943 goto fail;
944 }
945
946 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
947 IRQF_TRIGGER_HIGH, gpu->name, gpu);
948 if (ret) {
949 DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
950 goto fail;
951 }
952
953 ret = get_clocks(pdev, gpu);
954 if (ret)
955 goto fail;
956
957 gpu->ebi1_clk = msm_clk_get(pdev, "bus");
958 DBG("ebi1_clk: %p", gpu->ebi1_clk);
959 if (IS_ERR(gpu->ebi1_clk))
960 gpu->ebi1_clk = NULL;
961
962 /* Acquire regulators: */
963 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
964 DBG("gpu_reg: %p", gpu->gpu_reg);
965 if (IS_ERR(gpu->gpu_reg))
966 gpu->gpu_reg = NULL;
967
968 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
969 DBG("gpu_cx: %p", gpu->gpu_cx);
970 if (IS_ERR(gpu->gpu_cx))
971 gpu->gpu_cx = NULL;
972
973 gpu->pdev = pdev;
974 platform_set_drvdata(pdev, &gpu->adreno_smmu);
975
976 msm_devfreq_init(gpu);
977
978
979 gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
980
981 if (gpu->aspace == NULL)
982 DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
983 else if (IS_ERR(gpu->aspace)) {
984 ret = PTR_ERR(gpu->aspace);
985 goto fail;
986 }
987
988 memptrs = msm_gem_kernel_new(drm,
989 sizeof(struct msm_rbmemptrs) * nr_rings,
990 check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
991 &memptrs_iova);
992
993 if (IS_ERR(memptrs)) {
994 ret = PTR_ERR(memptrs);
995 DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
996 goto fail;
997 }
998
999 msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
1000
1001 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
1002 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
1003 ARRAY_SIZE(gpu->rb));
1004 nr_rings = ARRAY_SIZE(gpu->rb);
1005 }
1006
1007 /* Create ringbuffer(s): */
1008 for (i = 0; i < nr_rings; i++) {
1009 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
1010
1011 if (IS_ERR(gpu->rb[i])) {
1012 ret = PTR_ERR(gpu->rb[i]);
1013 DRM_DEV_ERROR(drm->dev,
1014 "could not create ringbuffer %d: %d\n", i, ret);
1015 goto fail;
1016 }
1017
1018 memptrs += sizeof(struct msm_rbmemptrs);
1019 memptrs_iova += sizeof(struct msm_rbmemptrs);
1020 }
1021
1022 gpu->nr_rings = nr_rings;
1023
1024 return 0;
1025
1026fail:
1027 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1028 msm_ringbuffer_destroy(gpu->rb[i]);
1029 gpu->rb[i] = NULL;
1030 }
1031
1032 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
1033
1034 platform_set_drvdata(pdev, NULL);
1035 return ret;
1036}
1037
1038void msm_gpu_cleanup(struct msm_gpu *gpu)
1039{
1040 int i;
1041
1042 DBG("%s", gpu->name);
1043
1044 WARN_ON(!list_empty(&gpu->active_list));
1045
1046 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1047 msm_ringbuffer_destroy(gpu->rb[i]);
1048 gpu->rb[i] = NULL;
1049 }
1050
1051 msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
1052
1053 if (!IS_ERR_OR_NULL(gpu->aspace)) {
1054 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
1055 msm_gem_address_space_put(gpu->aspace);
1056 }
1057
1058 if (gpu->worker) {
1059 kthread_destroy_worker(gpu->worker);
1060 }
1061
1062 devfreq_cooling_unregister(gpu->cooling);
1063}