Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
30 */
31#include <linux/seq_file.h>
32#include <linux/atomic.h>
33#include <linux/wait.h>
34#include <linux/kref.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include <drm/drm_debugfs.h>
40
41#include "amdgpu.h"
42#include "amdgpu_trace.h"
43
44/*
45 * Fences
46 * Fences mark an event in the GPUs pipeline and are used
47 * for GPU/CPU synchronization. When the fence is written,
48 * it is expected that all buffers associated with that fence
49 * are no longer in use by the associated ring on the GPU and
50 * that the the relevant GPU caches have been flushed.
51 */
52
53struct amdgpu_fence {
54 struct dma_fence base;
55
56 /* RB, DMA, etc. */
57 struct amdgpu_ring *ring;
58};
59
60static struct kmem_cache *amdgpu_fence_slab;
61
62int amdgpu_fence_slab_init(void)
63{
64 amdgpu_fence_slab = kmem_cache_create(
65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
66 SLAB_HWCACHE_ALIGN, NULL);
67 if (!amdgpu_fence_slab)
68 return -ENOMEM;
69 return 0;
70}
71
72void amdgpu_fence_slab_fini(void)
73{
74 rcu_barrier();
75 kmem_cache_destroy(amdgpu_fence_slab);
76}
77/*
78 * Cast helper
79 */
80static const struct dma_fence_ops amdgpu_fence_ops;
81static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
82{
83 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
84
85 if (__f->base.ops == &amdgpu_fence_ops)
86 return __f;
87
88 return NULL;
89}
90
91/**
92 * amdgpu_fence_write - write a fence value
93 *
94 * @ring: ring the fence is associated with
95 * @seq: sequence number to write
96 *
97 * Writes a fence value to memory (all asics).
98 */
99static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
100{
101 struct amdgpu_fence_driver *drv = &ring->fence_drv;
102
103 if (drv->cpu_addr)
104 *drv->cpu_addr = cpu_to_le32(seq);
105}
106
107/**
108 * amdgpu_fence_read - read a fence value
109 *
110 * @ring: ring the fence is associated with
111 *
112 * Reads a fence value from memory (all asics).
113 * Returns the value of the fence read from memory.
114 */
115static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
116{
117 struct amdgpu_fence_driver *drv = &ring->fence_drv;
118 u32 seq = 0;
119
120 if (drv->cpu_addr)
121 seq = le32_to_cpu(*drv->cpu_addr);
122 else
123 seq = atomic_read(&drv->last_seq);
124
125 return seq;
126}
127
128/**
129 * amdgpu_fence_emit - emit a fence on the requested ring
130 *
131 * @ring: ring the fence is associated with
132 * @f: resulting fence object
133 *
134 * Emits a fence command on the requested ring (all asics).
135 * Returns 0 on success, -ENOMEM on failure.
136 */
137int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
138 unsigned flags)
139{
140 struct amdgpu_device *adev = ring->adev;
141 struct amdgpu_fence *fence;
142 struct dma_fence __rcu **ptr;
143 uint32_t seq;
144 int r;
145
146 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
147 if (fence == NULL)
148 return -ENOMEM;
149
150 seq = ++ring->fence_drv.sync_seq;
151 fence->ring = ring;
152 dma_fence_init(&fence->base, &amdgpu_fence_ops,
153 &ring->fence_drv.lock,
154 adev->fence_context + ring->idx,
155 seq);
156 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
157 seq, flags | AMDGPU_FENCE_FLAG_INT);
158 pm_runtime_get_noresume(adev->ddev->dev);
159 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
160 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
161 struct dma_fence *old;
162
163 rcu_read_lock();
164 old = dma_fence_get_rcu_safe(ptr);
165 rcu_read_unlock();
166
167 if (old) {
168 r = dma_fence_wait(old, false);
169 dma_fence_put(old);
170 if (r)
171 return r;
172 }
173 }
174
175 /* This function can't be called concurrently anyway, otherwise
176 * emitting the fence would mess up the hardware ring buffer.
177 */
178 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
179
180 *f = &fence->base;
181
182 return 0;
183}
184
185/**
186 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
187 *
188 * @ring: ring the fence is associated with
189 * @s: resulting sequence number
190 *
191 * Emits a fence command on the requested ring (all asics).
192 * Used For polling fence.
193 * Returns 0 on success, -ENOMEM on failure.
194 */
195int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
196{
197 uint32_t seq;
198
199 if (!s)
200 return -EINVAL;
201
202 seq = ++ring->fence_drv.sync_seq;
203 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
204 seq, 0);
205
206 *s = seq;
207
208 return 0;
209}
210
211/**
212 * amdgpu_fence_schedule_fallback - schedule fallback check
213 *
214 * @ring: pointer to struct amdgpu_ring
215 *
216 * Start a timer as fallback to our interrupts.
217 */
218static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
219{
220 mod_timer(&ring->fence_drv.fallback_timer,
221 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
222}
223
224/**
225 * amdgpu_fence_process - check for fence activity
226 *
227 * @ring: pointer to struct amdgpu_ring
228 *
229 * Checks the current fence value and calculates the last
230 * signalled fence value. Wakes the fence queue if the
231 * sequence number has increased.
232 *
233 * Returns true if fence was processed
234 */
235bool amdgpu_fence_process(struct amdgpu_ring *ring)
236{
237 struct amdgpu_fence_driver *drv = &ring->fence_drv;
238 struct amdgpu_device *adev = ring->adev;
239 uint32_t seq, last_seq;
240 int r;
241
242 do {
243 last_seq = atomic_read(&ring->fence_drv.last_seq);
244 seq = amdgpu_fence_read(ring);
245
246 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
247
248 if (del_timer(&ring->fence_drv.fallback_timer) &&
249 seq != ring->fence_drv.sync_seq)
250 amdgpu_fence_schedule_fallback(ring);
251
252 if (unlikely(seq == last_seq))
253 return false;
254
255 last_seq &= drv->num_fences_mask;
256 seq &= drv->num_fences_mask;
257
258 do {
259 struct dma_fence *fence, **ptr;
260
261 ++last_seq;
262 last_seq &= drv->num_fences_mask;
263 ptr = &drv->fences[last_seq];
264
265 /* There is always exactly one thread signaling this fence slot */
266 fence = rcu_dereference_protected(*ptr, 1);
267 RCU_INIT_POINTER(*ptr, NULL);
268
269 if (!fence)
270 continue;
271
272 r = dma_fence_signal(fence);
273 if (!r)
274 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
275 else
276 BUG();
277
278 dma_fence_put(fence);
279 pm_runtime_mark_last_busy(adev->ddev->dev);
280 pm_runtime_put_autosuspend(adev->ddev->dev);
281 } while (last_seq != seq);
282
283 return true;
284}
285
286/**
287 * amdgpu_fence_fallback - fallback for hardware interrupts
288 *
289 * @work: delayed work item
290 *
291 * Checks for fence activity.
292 */
293static void amdgpu_fence_fallback(struct timer_list *t)
294{
295 struct amdgpu_ring *ring = from_timer(ring, t,
296 fence_drv.fallback_timer);
297
298 if (amdgpu_fence_process(ring))
299 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
300}
301
302/**
303 * amdgpu_fence_wait_empty - wait for all fences to signal
304 *
305 * @adev: amdgpu device pointer
306 * @ring: ring index the fence is associated with
307 *
308 * Wait for all fences on the requested ring to signal (all asics).
309 * Returns 0 if the fences have passed, error for all other cases.
310 */
311int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
312{
313 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
314 struct dma_fence *fence, **ptr;
315 int r;
316
317 if (!seq)
318 return 0;
319
320 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
321 rcu_read_lock();
322 fence = rcu_dereference(*ptr);
323 if (!fence || !dma_fence_get_rcu(fence)) {
324 rcu_read_unlock();
325 return 0;
326 }
327 rcu_read_unlock();
328
329 r = dma_fence_wait(fence, false);
330 dma_fence_put(fence);
331 return r;
332}
333
334/**
335 * amdgpu_fence_wait_polling - busy wait for givn sequence number
336 *
337 * @ring: ring index the fence is associated with
338 * @wait_seq: sequence number to wait
339 * @timeout: the timeout for waiting in usecs
340 *
341 * Wait for all fences on the requested ring to signal (all asics).
342 * Returns left time if no timeout, 0 or minus if timeout.
343 */
344signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
345 uint32_t wait_seq,
346 signed long timeout)
347{
348 uint32_t seq;
349
350 do {
351 seq = amdgpu_fence_read(ring);
352 udelay(5);
353 timeout -= 5;
354 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
355
356 return timeout > 0 ? timeout : 0;
357}
358/**
359 * amdgpu_fence_count_emitted - get the count of emitted fences
360 *
361 * @ring: ring the fence is associated with
362 *
363 * Get the number of fences emitted on the requested ring (all asics).
364 * Returns the number of emitted fences on the ring. Used by the
365 * dynpm code to ring track activity.
366 */
367unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
368{
369 uint64_t emitted;
370
371 /* We are not protected by ring lock when reading the last sequence
372 * but it's ok to report slightly wrong fence count here.
373 */
374 amdgpu_fence_process(ring);
375 emitted = 0x100000000ull;
376 emitted -= atomic_read(&ring->fence_drv.last_seq);
377 emitted += READ_ONCE(ring->fence_drv.sync_seq);
378 return lower_32_bits(emitted);
379}
380
381/**
382 * amdgpu_fence_driver_start_ring - make the fence driver
383 * ready for use on the requested ring.
384 *
385 * @ring: ring to start the fence driver on
386 * @irq_src: interrupt source to use for this ring
387 * @irq_type: interrupt type to use for this ring
388 *
389 * Make the fence driver ready for processing (all asics).
390 * Not all asics have all rings, so each asic will only
391 * start the fence driver on the rings it has.
392 * Returns 0 for success, errors for failure.
393 */
394int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
395 struct amdgpu_irq_src *irq_src,
396 unsigned irq_type)
397{
398 struct amdgpu_device *adev = ring->adev;
399 uint64_t index;
400
401 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
402 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
403 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
404 } else {
405 /* put fence directly behind firmware */
406 index = ALIGN(adev->uvd.fw->size, 8);
407 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
408 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
409 }
410 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
411 amdgpu_irq_get(adev, irq_src, irq_type);
412
413 ring->fence_drv.irq_src = irq_src;
414 ring->fence_drv.irq_type = irq_type;
415 ring->fence_drv.initialized = true;
416
417 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
418 "0x%016llx, cpu addr 0x%p\n", ring->name,
419 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
420 return 0;
421}
422
423/**
424 * amdgpu_fence_driver_init_ring - init the fence driver
425 * for the requested ring.
426 *
427 * @ring: ring to init the fence driver on
428 * @num_hw_submission: number of entries on the hardware queue
429 *
430 * Init the fence driver for the requested ring (all asics).
431 * Helper function for amdgpu_fence_driver_init().
432 */
433int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
434 unsigned num_hw_submission)
435{
436 struct amdgpu_device *adev = ring->adev;
437 long timeout;
438 int r;
439
440 if (!adev)
441 return -EINVAL;
442
443 /* Check that num_hw_submission is a power of two */
444 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
445 return -EINVAL;
446
447 ring->fence_drv.cpu_addr = NULL;
448 ring->fence_drv.gpu_addr = 0;
449 ring->fence_drv.sync_seq = 0;
450 atomic_set(&ring->fence_drv.last_seq, 0);
451 ring->fence_drv.initialized = false;
452
453 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
454
455 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
456 spin_lock_init(&ring->fence_drv.lock);
457 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
458 GFP_KERNEL);
459 if (!ring->fence_drv.fences)
460 return -ENOMEM;
461
462 /* No need to setup the GPU scheduler for KIQ ring */
463 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
464 switch (ring->funcs->type) {
465 case AMDGPU_RING_TYPE_GFX:
466 timeout = adev->gfx_timeout;
467 break;
468 case AMDGPU_RING_TYPE_COMPUTE:
469 timeout = adev->compute_timeout;
470 break;
471 case AMDGPU_RING_TYPE_SDMA:
472 timeout = adev->sdma_timeout;
473 break;
474 default:
475 timeout = adev->video_timeout;
476 break;
477 }
478
479 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
480 num_hw_submission, amdgpu_job_hang_limit,
481 timeout, ring->name);
482 if (r) {
483 DRM_ERROR("Failed to create scheduler on ring %s.\n",
484 ring->name);
485 return r;
486 }
487 }
488
489 return 0;
490}
491
492/**
493 * amdgpu_fence_driver_init - init the fence driver
494 * for all possible rings.
495 *
496 * @adev: amdgpu device pointer
497 *
498 * Init the fence driver for all possible rings (all asics).
499 * Not all asics have all rings, so each asic will only
500 * start the fence driver on the rings it has using
501 * amdgpu_fence_driver_start_ring().
502 * Returns 0 for success.
503 */
504int amdgpu_fence_driver_init(struct amdgpu_device *adev)
505{
506 return 0;
507}
508
509/**
510 * amdgpu_fence_driver_fini - tear down the fence driver
511 * for all possible rings.
512 *
513 * @adev: amdgpu device pointer
514 *
515 * Tear down the fence driver for all possible rings (all asics).
516 */
517void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
518{
519 unsigned i, j;
520 int r;
521
522 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
523 struct amdgpu_ring *ring = adev->rings[i];
524
525 if (!ring || !ring->fence_drv.initialized)
526 continue;
527 r = amdgpu_fence_wait_empty(ring);
528 if (r) {
529 /* no need to trigger GPU reset as we are unloading */
530 amdgpu_fence_driver_force_completion(ring);
531 }
532 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
533 ring->fence_drv.irq_type);
534 drm_sched_fini(&ring->sched);
535 del_timer_sync(&ring->fence_drv.fallback_timer);
536 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
537 dma_fence_put(ring->fence_drv.fences[j]);
538 kfree(ring->fence_drv.fences);
539 ring->fence_drv.fences = NULL;
540 ring->fence_drv.initialized = false;
541 }
542}
543
544/**
545 * amdgpu_fence_driver_suspend - suspend the fence driver
546 * for all possible rings.
547 *
548 * @adev: amdgpu device pointer
549 *
550 * Suspend the fence driver for all possible rings (all asics).
551 */
552void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
553{
554 int i, r;
555
556 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
557 struct amdgpu_ring *ring = adev->rings[i];
558 if (!ring || !ring->fence_drv.initialized)
559 continue;
560
561 /* wait for gpu to finish processing current batch */
562 r = amdgpu_fence_wait_empty(ring);
563 if (r) {
564 /* delay GPU reset to resume */
565 amdgpu_fence_driver_force_completion(ring);
566 }
567
568 /* disable the interrupt */
569 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
570 ring->fence_drv.irq_type);
571 }
572}
573
574/**
575 * amdgpu_fence_driver_resume - resume the fence driver
576 * for all possible rings.
577 *
578 * @adev: amdgpu device pointer
579 *
580 * Resume the fence driver for all possible rings (all asics).
581 * Not all asics have all rings, so each asic will only
582 * start the fence driver on the rings it has using
583 * amdgpu_fence_driver_start_ring().
584 * Returns 0 for success.
585 */
586void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
587{
588 int i;
589
590 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
591 struct amdgpu_ring *ring = adev->rings[i];
592 if (!ring || !ring->fence_drv.initialized)
593 continue;
594
595 /* enable the interrupt */
596 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
597 ring->fence_drv.irq_type);
598 }
599}
600
601/**
602 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
603 *
604 * @ring: fence of the ring to signal
605 *
606 */
607void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
608{
609 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
610 amdgpu_fence_process(ring);
611}
612
613/*
614 * Common fence implementation
615 */
616
617static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
618{
619 return "amdgpu";
620}
621
622static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
623{
624 struct amdgpu_fence *fence = to_amdgpu_fence(f);
625 return (const char *)fence->ring->name;
626}
627
628/**
629 * amdgpu_fence_enable_signaling - enable signalling on fence
630 * @fence: fence
631 *
632 * This function is called with fence_queue lock held, and adds a callback
633 * to fence_queue that checks if this fence is signaled, and if so it
634 * signals the fence and removes itself.
635 */
636static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
637{
638 struct amdgpu_fence *fence = to_amdgpu_fence(f);
639 struct amdgpu_ring *ring = fence->ring;
640
641 if (!timer_pending(&ring->fence_drv.fallback_timer))
642 amdgpu_fence_schedule_fallback(ring);
643
644 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
645
646 return true;
647}
648
649/**
650 * amdgpu_fence_free - free up the fence memory
651 *
652 * @rcu: RCU callback head
653 *
654 * Free up the fence memory after the RCU grace period.
655 */
656static void amdgpu_fence_free(struct rcu_head *rcu)
657{
658 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
659 struct amdgpu_fence *fence = to_amdgpu_fence(f);
660 kmem_cache_free(amdgpu_fence_slab, fence);
661}
662
663/**
664 * amdgpu_fence_release - callback that fence can be freed
665 *
666 * @fence: fence
667 *
668 * This function is called when the reference count becomes zero.
669 * It just RCU schedules freeing up the fence.
670 */
671static void amdgpu_fence_release(struct dma_fence *f)
672{
673 call_rcu(&f->rcu, amdgpu_fence_free);
674}
675
676static const struct dma_fence_ops amdgpu_fence_ops = {
677 .get_driver_name = amdgpu_fence_get_driver_name,
678 .get_timeline_name = amdgpu_fence_get_timeline_name,
679 .enable_signaling = amdgpu_fence_enable_signaling,
680 .release = amdgpu_fence_release,
681};
682
683/*
684 * Fence debugfs
685 */
686#if defined(CONFIG_DEBUG_FS)
687static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
688{
689 struct drm_info_node *node = (struct drm_info_node *)m->private;
690 struct drm_device *dev = node->minor->dev;
691 struct amdgpu_device *adev = dev->dev_private;
692 int i;
693
694 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
695 struct amdgpu_ring *ring = adev->rings[i];
696 if (!ring || !ring->fence_drv.initialized)
697 continue;
698
699 amdgpu_fence_process(ring);
700
701 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
702 seq_printf(m, "Last signaled fence 0x%08x\n",
703 atomic_read(&ring->fence_drv.last_seq));
704 seq_printf(m, "Last emitted 0x%08x\n",
705 ring->fence_drv.sync_seq);
706
707 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
708 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
709 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
710 le32_to_cpu(*ring->trail_fence_cpu_addr));
711 seq_printf(m, "Last emitted 0x%08x\n",
712 ring->trail_seq);
713 }
714
715 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
716 continue;
717
718 /* set in CP_VMID_PREEMPT and preemption occurred */
719 seq_printf(m, "Last preempted 0x%08x\n",
720 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
721 /* set in CP_VMID_RESET and reset occurred */
722 seq_printf(m, "Last reset 0x%08x\n",
723 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
724 /* Both preemption and reset occurred */
725 seq_printf(m, "Last both 0x%08x\n",
726 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
727 }
728 return 0;
729}
730
731/**
732 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
733 *
734 * Manually trigger a gpu reset at the next fence wait.
735 */
736static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
737{
738 struct drm_info_node *node = (struct drm_info_node *) m->private;
739 struct drm_device *dev = node->minor->dev;
740 struct amdgpu_device *adev = dev->dev_private;
741 int r;
742
743 r = pm_runtime_get_sync(dev->dev);
744 if (r < 0)
745 return 0;
746
747 seq_printf(m, "gpu recover\n");
748 amdgpu_device_gpu_recover(adev, NULL);
749
750 pm_runtime_mark_last_busy(dev->dev);
751 pm_runtime_put_autosuspend(dev->dev);
752
753 return 0;
754}
755
756static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
757 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
758 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
759};
760
761static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
762 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
763};
764#endif
765
766int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
767{
768#if defined(CONFIG_DEBUG_FS)
769 if (amdgpu_sriov_vf(adev))
770 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
771 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
772#else
773 return 0;
774#endif
775}
776