Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39
40static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
41static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
42static int uvd_v6_0_start(struct amdgpu_device *adev);
43static void uvd_v6_0_stop(struct amdgpu_device *adev);
44static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
45static int uvd_v6_0_set_clockgating_state(void *handle,
46 enum amd_clockgating_state state);
47static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
48 bool enable);
49
50/**
51 * uvd_v6_0_ring_get_rptr - get read pointer
52 *
53 * @ring: amdgpu_ring pointer
54 *
55 * Returns the current hardware read pointer
56 */
57static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
58{
59 struct amdgpu_device *adev = ring->adev;
60
61 return RREG32(mmUVD_RBC_RB_RPTR);
62}
63
64/**
65 * uvd_v6_0_ring_get_wptr - get write pointer
66 *
67 * @ring: amdgpu_ring pointer
68 *
69 * Returns the current hardware write pointer
70 */
71static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
72{
73 struct amdgpu_device *adev = ring->adev;
74
75 return RREG32(mmUVD_RBC_RB_WPTR);
76}
77
78/**
79 * uvd_v6_0_ring_set_wptr - set write pointer
80 *
81 * @ring: amdgpu_ring pointer
82 *
83 * Commits the write pointer to the hardware
84 */
85static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
86{
87 struct amdgpu_device *adev = ring->adev;
88
89 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
90}
91
92static int uvd_v6_0_early_init(void *handle)
93{
94 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
95
96 uvd_v6_0_set_ring_funcs(adev);
97 uvd_v6_0_set_irq_funcs(adev);
98
99 return 0;
100}
101
102static int uvd_v6_0_sw_init(void *handle)
103{
104 struct amdgpu_ring *ring;
105 int r;
106 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
107
108 /* UVD TRAP */
109 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
110 if (r)
111 return r;
112
113 r = amdgpu_uvd_sw_init(adev);
114 if (r)
115 return r;
116
117 r = amdgpu_uvd_resume(adev);
118 if (r)
119 return r;
120
121 ring = &adev->uvd.ring;
122 sprintf(ring->name, "uvd");
123 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
124
125 return r;
126}
127
128static int uvd_v6_0_sw_fini(void *handle)
129{
130 int r;
131 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
132
133 r = amdgpu_uvd_suspend(adev);
134 if (r)
135 return r;
136
137 return amdgpu_uvd_sw_fini(adev);
138}
139
140/**
141 * uvd_v6_0_hw_init - start and test UVD block
142 *
143 * @adev: amdgpu_device pointer
144 *
145 * Initialize the hardware, boot up the VCPU and do some testing
146 */
147static int uvd_v6_0_hw_init(void *handle)
148{
149 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
150 struct amdgpu_ring *ring = &adev->uvd.ring;
151 uint32_t tmp;
152 int r;
153
154 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
155 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
156 uvd_v6_0_enable_mgcg(adev, true);
157
158 ring->ready = true;
159 r = amdgpu_ring_test_ring(ring);
160 if (r) {
161 ring->ready = false;
162 goto done;
163 }
164
165 r = amdgpu_ring_alloc(ring, 10);
166 if (r) {
167 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
168 goto done;
169 }
170
171 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
172 amdgpu_ring_write(ring, tmp);
173 amdgpu_ring_write(ring, 0xFFFFF);
174
175 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
176 amdgpu_ring_write(ring, tmp);
177 amdgpu_ring_write(ring, 0xFFFFF);
178
179 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
180 amdgpu_ring_write(ring, tmp);
181 amdgpu_ring_write(ring, 0xFFFFF);
182
183 /* Clear timeout status bits */
184 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
185 amdgpu_ring_write(ring, 0x8);
186
187 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
188 amdgpu_ring_write(ring, 3);
189
190 amdgpu_ring_commit(ring);
191
192done:
193 if (!r)
194 DRM_INFO("UVD initialized successfully.\n");
195
196 return r;
197}
198
199/**
200 * uvd_v6_0_hw_fini - stop the hardware block
201 *
202 * @adev: amdgpu_device pointer
203 *
204 * Stop the UVD block, mark ring as not ready any more
205 */
206static int uvd_v6_0_hw_fini(void *handle)
207{
208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
209 struct amdgpu_ring *ring = &adev->uvd.ring;
210
211 if (RREG32(mmUVD_STATUS) != 0)
212 uvd_v6_0_stop(adev);
213
214 ring->ready = false;
215
216 return 0;
217}
218
219static int uvd_v6_0_suspend(void *handle)
220{
221 int r;
222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223
224 r = uvd_v6_0_hw_fini(adev);
225 if (r)
226 return r;
227
228 return amdgpu_uvd_suspend(adev);
229}
230
231static int uvd_v6_0_resume(void *handle)
232{
233 int r;
234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235
236 r = amdgpu_uvd_resume(adev);
237 if (r)
238 return r;
239
240 return uvd_v6_0_hw_init(adev);
241}
242
243/**
244 * uvd_v6_0_mc_resume - memory controller programming
245 *
246 * @adev: amdgpu_device pointer
247 *
248 * Let the UVD memory controller know it's offsets
249 */
250static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
251{
252 uint64_t offset;
253 uint32_t size;
254
255 /* programm memory controller bits 0-27 */
256 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
257 lower_32_bits(adev->uvd.gpu_addr));
258 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
259 upper_32_bits(adev->uvd.gpu_addr));
260
261 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
262 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
263 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
264 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
265
266 offset += size;
267 size = AMDGPU_UVD_HEAP_SIZE;
268 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
269 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
270
271 offset += size;
272 size = AMDGPU_UVD_STACK_SIZE +
273 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
274 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
275 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
276
277 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
278 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
279 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
280
281 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
282}
283
284#if 0
285static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
286 bool enable)
287{
288 u32 data, data1;
289
290 data = RREG32(mmUVD_CGC_GATE);
291 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
292 if (enable) {
293 data |= UVD_CGC_GATE__SYS_MASK |
294 UVD_CGC_GATE__UDEC_MASK |
295 UVD_CGC_GATE__MPEG2_MASK |
296 UVD_CGC_GATE__RBC_MASK |
297 UVD_CGC_GATE__LMI_MC_MASK |
298 UVD_CGC_GATE__IDCT_MASK |
299 UVD_CGC_GATE__MPRD_MASK |
300 UVD_CGC_GATE__MPC_MASK |
301 UVD_CGC_GATE__LBSI_MASK |
302 UVD_CGC_GATE__LRBBM_MASK |
303 UVD_CGC_GATE__UDEC_RE_MASK |
304 UVD_CGC_GATE__UDEC_CM_MASK |
305 UVD_CGC_GATE__UDEC_IT_MASK |
306 UVD_CGC_GATE__UDEC_DB_MASK |
307 UVD_CGC_GATE__UDEC_MP_MASK |
308 UVD_CGC_GATE__WCB_MASK |
309 UVD_CGC_GATE__VCPU_MASK |
310 UVD_CGC_GATE__SCPU_MASK;
311 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
312 UVD_SUVD_CGC_GATE__SIT_MASK |
313 UVD_SUVD_CGC_GATE__SMP_MASK |
314 UVD_SUVD_CGC_GATE__SCM_MASK |
315 UVD_SUVD_CGC_GATE__SDB_MASK |
316 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
317 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
318 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
319 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
320 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
321 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
322 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
323 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
324 } else {
325 data &= ~(UVD_CGC_GATE__SYS_MASK |
326 UVD_CGC_GATE__UDEC_MASK |
327 UVD_CGC_GATE__MPEG2_MASK |
328 UVD_CGC_GATE__RBC_MASK |
329 UVD_CGC_GATE__LMI_MC_MASK |
330 UVD_CGC_GATE__LMI_UMC_MASK |
331 UVD_CGC_GATE__IDCT_MASK |
332 UVD_CGC_GATE__MPRD_MASK |
333 UVD_CGC_GATE__MPC_MASK |
334 UVD_CGC_GATE__LBSI_MASK |
335 UVD_CGC_GATE__LRBBM_MASK |
336 UVD_CGC_GATE__UDEC_RE_MASK |
337 UVD_CGC_GATE__UDEC_CM_MASK |
338 UVD_CGC_GATE__UDEC_IT_MASK |
339 UVD_CGC_GATE__UDEC_DB_MASK |
340 UVD_CGC_GATE__UDEC_MP_MASK |
341 UVD_CGC_GATE__WCB_MASK |
342 UVD_CGC_GATE__VCPU_MASK |
343 UVD_CGC_GATE__SCPU_MASK);
344 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
345 UVD_SUVD_CGC_GATE__SIT_MASK |
346 UVD_SUVD_CGC_GATE__SMP_MASK |
347 UVD_SUVD_CGC_GATE__SCM_MASK |
348 UVD_SUVD_CGC_GATE__SDB_MASK |
349 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
350 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
351 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
352 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
353 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
354 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
355 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
356 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
357 }
358 WREG32(mmUVD_CGC_GATE, data);
359 WREG32(mmUVD_SUVD_CGC_GATE, data1);
360}
361#endif
362
363/**
364 * uvd_v6_0_start - start UVD block
365 *
366 * @adev: amdgpu_device pointer
367 *
368 * Setup and start the UVD block
369 */
370static int uvd_v6_0_start(struct amdgpu_device *adev)
371{
372 struct amdgpu_ring *ring = &adev->uvd.ring;
373 uint32_t rb_bufsz, tmp;
374 uint32_t lmi_swap_cntl;
375 uint32_t mp_swap_cntl;
376 int i, j, r;
377
378 /* disable DPG */
379 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
380
381 /* disable byte swapping */
382 lmi_swap_cntl = 0;
383 mp_swap_cntl = 0;
384
385 uvd_v6_0_mc_resume(adev);
386
387 /* disable interupt */
388 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
389
390 /* stall UMC and register bus before resetting VCPU */
391 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
392 mdelay(1);
393
394 /* put LMI, VCPU, RBC etc... into reset */
395 WREG32(mmUVD_SOFT_RESET,
396 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
397 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
398 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
399 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
400 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
401 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
402 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
403 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
404 mdelay(5);
405
406 /* take UVD block out of reset */
407 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
408 mdelay(5);
409
410 /* initialize UVD memory controller */
411 WREG32(mmUVD_LMI_CTRL,
412 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
413 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
414 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
415 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
416 UVD_LMI_CTRL__REQ_MODE_MASK |
417 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
418
419#ifdef __BIG_ENDIAN
420 /* swap (8 in 32) RB and IB */
421 lmi_swap_cntl = 0xa;
422 mp_swap_cntl = 0;
423#endif
424 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
425 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
426
427 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
428 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
429 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
430 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
431 WREG32(mmUVD_MPC_SET_ALU, 0);
432 WREG32(mmUVD_MPC_SET_MUX, 0x88);
433
434 /* take all subblocks out of reset, except VCPU */
435 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
436 mdelay(5);
437
438 /* enable VCPU clock */
439 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
440
441 /* enable UMC */
442 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
443
444 /* boot up the VCPU */
445 WREG32(mmUVD_SOFT_RESET, 0);
446 mdelay(10);
447
448 for (i = 0; i < 10; ++i) {
449 uint32_t status;
450
451 for (j = 0; j < 100; ++j) {
452 status = RREG32(mmUVD_STATUS);
453 if (status & 2)
454 break;
455 mdelay(10);
456 }
457 r = 0;
458 if (status & 2)
459 break;
460
461 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
462 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
463 mdelay(10);
464 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
465 mdelay(10);
466 r = -1;
467 }
468
469 if (r) {
470 DRM_ERROR("UVD not responding, giving up!!!\n");
471 return r;
472 }
473 /* enable master interrupt */
474 WREG32_P(mmUVD_MASTINT_EN,
475 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
476 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
477
478 /* clear the bit 4 of UVD_STATUS */
479 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
480
481 /* force RBC into idle state */
482 rb_bufsz = order_base_2(ring->ring_size);
483 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
484 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
485 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
486 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
487 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
488 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
489 WREG32(mmUVD_RBC_RB_CNTL, tmp);
490
491 /* set the write pointer delay */
492 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
493
494 /* set the wb address */
495 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
496
497 /* programm the RB_BASE for ring buffer */
498 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
499 lower_32_bits(ring->gpu_addr));
500 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
501 upper_32_bits(ring->gpu_addr));
502
503 /* Initialize the ring buffer's read and write pointers */
504 WREG32(mmUVD_RBC_RB_RPTR, 0);
505
506 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
507 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
508
509 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
510
511 return 0;
512}
513
514/**
515 * uvd_v6_0_stop - stop UVD block
516 *
517 * @adev: amdgpu_device pointer
518 *
519 * stop the UVD block
520 */
521static void uvd_v6_0_stop(struct amdgpu_device *adev)
522{
523 /* force RBC into idle state */
524 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
525
526 /* Stall UMC and register bus before resetting VCPU */
527 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
528 mdelay(1);
529
530 /* put VCPU into reset */
531 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
532 mdelay(5);
533
534 /* disable VCPU clock */
535 WREG32(mmUVD_VCPU_CNTL, 0x0);
536
537 /* Unstall UMC and register bus */
538 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
539
540 WREG32(mmUVD_STATUS, 0);
541}
542
543/**
544 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
545 *
546 * @ring: amdgpu_ring pointer
547 * @fence: fence to emit
548 *
549 * Write a fence and a trap command to the ring.
550 */
551static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
552 unsigned flags)
553{
554 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
555
556 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
557 amdgpu_ring_write(ring, seq);
558 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
559 amdgpu_ring_write(ring, addr & 0xffffffff);
560 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
561 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
562 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
563 amdgpu_ring_write(ring, 0);
564
565 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
566 amdgpu_ring_write(ring, 0);
567 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
568 amdgpu_ring_write(ring, 0);
569 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
570 amdgpu_ring_write(ring, 2);
571}
572
573/**
574 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
575 *
576 * @ring: amdgpu_ring pointer
577 *
578 * Emits an hdp flush.
579 */
580static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
581{
582 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
583 amdgpu_ring_write(ring, 0);
584}
585
586/**
587 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
588 *
589 * @ring: amdgpu_ring pointer
590 *
591 * Emits an hdp invalidate.
592 */
593static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
594{
595 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
596 amdgpu_ring_write(ring, 1);
597}
598
599/**
600 * uvd_v6_0_ring_test_ring - register write test
601 *
602 * @ring: amdgpu_ring pointer
603 *
604 * Test if we can successfully write to the context register
605 */
606static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
607{
608 struct amdgpu_device *adev = ring->adev;
609 uint32_t tmp = 0;
610 unsigned i;
611 int r;
612
613 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
614 r = amdgpu_ring_alloc(ring, 3);
615 if (r) {
616 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
617 ring->idx, r);
618 return r;
619 }
620 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
621 amdgpu_ring_write(ring, 0xDEADBEEF);
622 amdgpu_ring_commit(ring);
623 for (i = 0; i < adev->usec_timeout; i++) {
624 tmp = RREG32(mmUVD_CONTEXT_ID);
625 if (tmp == 0xDEADBEEF)
626 break;
627 DRM_UDELAY(1);
628 }
629
630 if (i < adev->usec_timeout) {
631 DRM_INFO("ring test on %d succeeded in %d usecs\n",
632 ring->idx, i);
633 } else {
634 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
635 ring->idx, tmp);
636 r = -EINVAL;
637 }
638 return r;
639}
640
641/**
642 * uvd_v6_0_ring_emit_ib - execute indirect buffer
643 *
644 * @ring: amdgpu_ring pointer
645 * @ib: indirect buffer to execute
646 *
647 * Write ring commands to execute the indirect buffer
648 */
649static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
650 struct amdgpu_ib *ib,
651 unsigned vm_id, bool ctx_switch)
652{
653 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
654 amdgpu_ring_write(ring, vm_id);
655
656 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
657 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
658 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
659 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
660 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
661 amdgpu_ring_write(ring, ib->length_dw);
662}
663
664static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
665 unsigned vm_id, uint64_t pd_addr)
666{
667 uint32_t reg;
668
669 if (vm_id < 8)
670 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
671 else
672 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
673
674 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
675 amdgpu_ring_write(ring, reg << 2);
676 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
677 amdgpu_ring_write(ring, pd_addr >> 12);
678 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
679 amdgpu_ring_write(ring, 0x8);
680
681 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
682 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
683 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
684 amdgpu_ring_write(ring, 1 << vm_id);
685 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
686 amdgpu_ring_write(ring, 0x8);
687
688 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
689 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
690 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
691 amdgpu_ring_write(ring, 0);
692 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
693 amdgpu_ring_write(ring, 1 << vm_id); /* mask */
694 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
695 amdgpu_ring_write(ring, 0xC);
696}
697
698static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
699{
700 uint32_t seq = ring->fence_drv.sync_seq;
701 uint64_t addr = ring->fence_drv.gpu_addr;
702
703 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
704 amdgpu_ring_write(ring, lower_32_bits(addr));
705 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
706 amdgpu_ring_write(ring, upper_32_bits(addr));
707 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
708 amdgpu_ring_write(ring, 0xffffffff); /* mask */
709 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
710 amdgpu_ring_write(ring, seq);
711 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
712 amdgpu_ring_write(ring, 0xE);
713}
714
715static bool uvd_v6_0_is_idle(void *handle)
716{
717 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
718
719 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
720}
721
722static int uvd_v6_0_wait_for_idle(void *handle)
723{
724 unsigned i;
725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
726
727 for (i = 0; i < adev->usec_timeout; i++) {
728 if (uvd_v6_0_is_idle(handle))
729 return 0;
730 }
731 return -ETIMEDOUT;
732}
733
734#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
735static bool uvd_v6_0_check_soft_reset(void *handle)
736{
737 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
738 u32 srbm_soft_reset = 0;
739 u32 tmp = RREG32(mmSRBM_STATUS);
740
741 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
742 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
743 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
744 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
745
746 if (srbm_soft_reset) {
747 adev->uvd.srbm_soft_reset = srbm_soft_reset;
748 return true;
749 } else {
750 adev->uvd.srbm_soft_reset = 0;
751 return false;
752 }
753}
754
755static int uvd_v6_0_pre_soft_reset(void *handle)
756{
757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
758
759 if (!adev->uvd.srbm_soft_reset)
760 return 0;
761
762 uvd_v6_0_stop(adev);
763 return 0;
764}
765
766static int uvd_v6_0_soft_reset(void *handle)
767{
768 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
769 u32 srbm_soft_reset;
770
771 if (!adev->uvd.srbm_soft_reset)
772 return 0;
773 srbm_soft_reset = adev->uvd.srbm_soft_reset;
774
775 if (srbm_soft_reset) {
776 u32 tmp;
777
778 tmp = RREG32(mmSRBM_SOFT_RESET);
779 tmp |= srbm_soft_reset;
780 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
781 WREG32(mmSRBM_SOFT_RESET, tmp);
782 tmp = RREG32(mmSRBM_SOFT_RESET);
783
784 udelay(50);
785
786 tmp &= ~srbm_soft_reset;
787 WREG32(mmSRBM_SOFT_RESET, tmp);
788 tmp = RREG32(mmSRBM_SOFT_RESET);
789
790 /* Wait a little for things to settle down */
791 udelay(50);
792 }
793
794 return 0;
795}
796
797static int uvd_v6_0_post_soft_reset(void *handle)
798{
799 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
800
801 if (!adev->uvd.srbm_soft_reset)
802 return 0;
803
804 mdelay(5);
805
806 return uvd_v6_0_start(adev);
807}
808
809static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
810 struct amdgpu_irq_src *source,
811 unsigned type,
812 enum amdgpu_interrupt_state state)
813{
814 // TODO
815 return 0;
816}
817
818static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
819 struct amdgpu_irq_src *source,
820 struct amdgpu_iv_entry *entry)
821{
822 DRM_DEBUG("IH: UVD TRAP\n");
823 amdgpu_fence_process(&adev->uvd.ring);
824 return 0;
825}
826
827static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
828{
829 uint32_t data1, data3;
830
831 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
832 data3 = RREG32(mmUVD_CGC_GATE);
833
834 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
835 UVD_SUVD_CGC_GATE__SIT_MASK |
836 UVD_SUVD_CGC_GATE__SMP_MASK |
837 UVD_SUVD_CGC_GATE__SCM_MASK |
838 UVD_SUVD_CGC_GATE__SDB_MASK |
839 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
840 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
841 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
842 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
843 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
844 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
845 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
846 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
847
848 if (enable) {
849 data3 |= (UVD_CGC_GATE__SYS_MASK |
850 UVD_CGC_GATE__UDEC_MASK |
851 UVD_CGC_GATE__MPEG2_MASK |
852 UVD_CGC_GATE__RBC_MASK |
853 UVD_CGC_GATE__LMI_MC_MASK |
854 UVD_CGC_GATE__LMI_UMC_MASK |
855 UVD_CGC_GATE__IDCT_MASK |
856 UVD_CGC_GATE__MPRD_MASK |
857 UVD_CGC_GATE__MPC_MASK |
858 UVD_CGC_GATE__LBSI_MASK |
859 UVD_CGC_GATE__LRBBM_MASK |
860 UVD_CGC_GATE__UDEC_RE_MASK |
861 UVD_CGC_GATE__UDEC_CM_MASK |
862 UVD_CGC_GATE__UDEC_IT_MASK |
863 UVD_CGC_GATE__UDEC_DB_MASK |
864 UVD_CGC_GATE__UDEC_MP_MASK |
865 UVD_CGC_GATE__WCB_MASK |
866 UVD_CGC_GATE__JPEG_MASK |
867 UVD_CGC_GATE__SCPU_MASK |
868 UVD_CGC_GATE__JPEG2_MASK);
869 /* only in pg enabled, we can gate clock to vcpu*/
870 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
871 data3 |= UVD_CGC_GATE__VCPU_MASK;
872
873 data3 &= ~UVD_CGC_GATE__REGS_MASK;
874 } else {
875 data3 = 0;
876 }
877
878 WREG32(mmUVD_SUVD_CGC_GATE, data1);
879 WREG32(mmUVD_CGC_GATE, data3);
880}
881
882static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
883{
884 uint32_t data, data2;
885
886 data = RREG32(mmUVD_CGC_CTRL);
887 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
888
889
890 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
891 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
892
893
894 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
895 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
896 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
897
898 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
899 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
900 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
901 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
902 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
903 UVD_CGC_CTRL__SYS_MODE_MASK |
904 UVD_CGC_CTRL__UDEC_MODE_MASK |
905 UVD_CGC_CTRL__MPEG2_MODE_MASK |
906 UVD_CGC_CTRL__REGS_MODE_MASK |
907 UVD_CGC_CTRL__RBC_MODE_MASK |
908 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
909 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
910 UVD_CGC_CTRL__IDCT_MODE_MASK |
911 UVD_CGC_CTRL__MPRD_MODE_MASK |
912 UVD_CGC_CTRL__MPC_MODE_MASK |
913 UVD_CGC_CTRL__LBSI_MODE_MASK |
914 UVD_CGC_CTRL__LRBBM_MODE_MASK |
915 UVD_CGC_CTRL__WCB_MODE_MASK |
916 UVD_CGC_CTRL__VCPU_MODE_MASK |
917 UVD_CGC_CTRL__JPEG_MODE_MASK |
918 UVD_CGC_CTRL__SCPU_MODE_MASK |
919 UVD_CGC_CTRL__JPEG2_MODE_MASK);
920 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
921 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
922 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
923 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
924 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
925
926 WREG32(mmUVD_CGC_CTRL, data);
927 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
928}
929
930#if 0
931static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
932{
933 uint32_t data, data1, cgc_flags, suvd_flags;
934
935 data = RREG32(mmUVD_CGC_GATE);
936 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
937
938 cgc_flags = UVD_CGC_GATE__SYS_MASK |
939 UVD_CGC_GATE__UDEC_MASK |
940 UVD_CGC_GATE__MPEG2_MASK |
941 UVD_CGC_GATE__RBC_MASK |
942 UVD_CGC_GATE__LMI_MC_MASK |
943 UVD_CGC_GATE__IDCT_MASK |
944 UVD_CGC_GATE__MPRD_MASK |
945 UVD_CGC_GATE__MPC_MASK |
946 UVD_CGC_GATE__LBSI_MASK |
947 UVD_CGC_GATE__LRBBM_MASK |
948 UVD_CGC_GATE__UDEC_RE_MASK |
949 UVD_CGC_GATE__UDEC_CM_MASK |
950 UVD_CGC_GATE__UDEC_IT_MASK |
951 UVD_CGC_GATE__UDEC_DB_MASK |
952 UVD_CGC_GATE__UDEC_MP_MASK |
953 UVD_CGC_GATE__WCB_MASK |
954 UVD_CGC_GATE__VCPU_MASK |
955 UVD_CGC_GATE__SCPU_MASK |
956 UVD_CGC_GATE__JPEG_MASK |
957 UVD_CGC_GATE__JPEG2_MASK;
958
959 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
960 UVD_SUVD_CGC_GATE__SIT_MASK |
961 UVD_SUVD_CGC_GATE__SMP_MASK |
962 UVD_SUVD_CGC_GATE__SCM_MASK |
963 UVD_SUVD_CGC_GATE__SDB_MASK;
964
965 data |= cgc_flags;
966 data1 |= suvd_flags;
967
968 WREG32(mmUVD_CGC_GATE, data);
969 WREG32(mmUVD_SUVD_CGC_GATE, data1);
970}
971#endif
972
973static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
974 bool enable)
975{
976 u32 orig, data;
977
978 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
979 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
980 data |= 0xfff;
981 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
982
983 orig = data = RREG32(mmUVD_CGC_CTRL);
984 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
985 if (orig != data)
986 WREG32(mmUVD_CGC_CTRL, data);
987 } else {
988 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
989 data &= ~0xfff;
990 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
991
992 orig = data = RREG32(mmUVD_CGC_CTRL);
993 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
994 if (orig != data)
995 WREG32(mmUVD_CGC_CTRL, data);
996 }
997}
998
999static int uvd_v6_0_set_clockgating_state(void *handle,
1000 enum amd_clockgating_state state)
1001{
1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1003 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1004
1005 if (enable) {
1006 /* wait for STATUS to clear */
1007 if (uvd_v6_0_wait_for_idle(handle))
1008 return -EBUSY;
1009 uvd_v6_0_enable_clock_gating(adev, true);
1010 /* enable HW gates because UVD is idle */
1011/* uvd_v6_0_set_hw_clock_gating(adev); */
1012 } else {
1013 /* disable HW gating and enable Sw gating */
1014 uvd_v6_0_enable_clock_gating(adev, false);
1015 }
1016 uvd_v6_0_set_sw_clock_gating(adev);
1017 return 0;
1018}
1019
1020static int uvd_v6_0_set_powergating_state(void *handle,
1021 enum amd_powergating_state state)
1022{
1023 /* This doesn't actually powergate the UVD block.
1024 * That's done in the dpm code via the SMC. This
1025 * just re-inits the block as necessary. The actual
1026 * gating still happens in the dpm code. We should
1027 * revisit this when there is a cleaner line between
1028 * the smc and the hw blocks
1029 */
1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1031 int ret = 0;
1032
1033 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1034
1035 if (state == AMD_PG_STATE_GATE) {
1036 uvd_v6_0_stop(adev);
1037 } else {
1038 ret = uvd_v6_0_start(adev);
1039 if (ret)
1040 goto out;
1041 }
1042
1043out:
1044 return ret;
1045}
1046
1047static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1048{
1049 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1050 int data;
1051
1052 mutex_lock(&adev->pm.mutex);
1053
1054 if (adev->flags & AMD_IS_APU)
1055 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1056 else
1057 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1058
1059 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1060 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1061 goto out;
1062 }
1063
1064 /* AMD_CG_SUPPORT_UVD_MGCG */
1065 data = RREG32(mmUVD_CGC_CTRL);
1066 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1067 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1068
1069out:
1070 mutex_unlock(&adev->pm.mutex);
1071}
1072
1073static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1074 .name = "uvd_v6_0",
1075 .early_init = uvd_v6_0_early_init,
1076 .late_init = NULL,
1077 .sw_init = uvd_v6_0_sw_init,
1078 .sw_fini = uvd_v6_0_sw_fini,
1079 .hw_init = uvd_v6_0_hw_init,
1080 .hw_fini = uvd_v6_0_hw_fini,
1081 .suspend = uvd_v6_0_suspend,
1082 .resume = uvd_v6_0_resume,
1083 .is_idle = uvd_v6_0_is_idle,
1084 .wait_for_idle = uvd_v6_0_wait_for_idle,
1085 .check_soft_reset = uvd_v6_0_check_soft_reset,
1086 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1087 .soft_reset = uvd_v6_0_soft_reset,
1088 .post_soft_reset = uvd_v6_0_post_soft_reset,
1089 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1090 .set_powergating_state = uvd_v6_0_set_powergating_state,
1091 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1092};
1093
1094static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1095 .type = AMDGPU_RING_TYPE_UVD,
1096 .align_mask = 0xf,
1097 .nop = PACKET0(mmUVD_NO_OP, 0),
1098 .support_64bit_ptrs = false,
1099 .get_rptr = uvd_v6_0_ring_get_rptr,
1100 .get_wptr = uvd_v6_0_ring_get_wptr,
1101 .set_wptr = uvd_v6_0_ring_set_wptr,
1102 .parse_cs = amdgpu_uvd_ring_parse_cs,
1103 .emit_frame_size =
1104 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1105 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1106 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1107 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1108 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1109 .emit_ib = uvd_v6_0_ring_emit_ib,
1110 .emit_fence = uvd_v6_0_ring_emit_fence,
1111 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1112 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1113 .test_ring = uvd_v6_0_ring_test_ring,
1114 .test_ib = amdgpu_uvd_ring_test_ib,
1115 .insert_nop = amdgpu_ring_insert_nop,
1116 .pad_ib = amdgpu_ring_generic_pad_ib,
1117 .begin_use = amdgpu_uvd_ring_begin_use,
1118 .end_use = amdgpu_uvd_ring_end_use,
1119};
1120
1121static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1122 .type = AMDGPU_RING_TYPE_UVD,
1123 .align_mask = 0xf,
1124 .nop = PACKET0(mmUVD_NO_OP, 0),
1125 .support_64bit_ptrs = false,
1126 .get_rptr = uvd_v6_0_ring_get_rptr,
1127 .get_wptr = uvd_v6_0_ring_get_wptr,
1128 .set_wptr = uvd_v6_0_ring_set_wptr,
1129 .emit_frame_size =
1130 2 + /* uvd_v6_0_ring_emit_hdp_flush */
1131 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
1132 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1133 20 + /* uvd_v6_0_ring_emit_vm_flush */
1134 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1135 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1136 .emit_ib = uvd_v6_0_ring_emit_ib,
1137 .emit_fence = uvd_v6_0_ring_emit_fence,
1138 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1139 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1140 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1141 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
1142 .test_ring = uvd_v6_0_ring_test_ring,
1143 .test_ib = amdgpu_uvd_ring_test_ib,
1144 .insert_nop = amdgpu_ring_insert_nop,
1145 .pad_ib = amdgpu_ring_generic_pad_ib,
1146 .begin_use = amdgpu_uvd_ring_begin_use,
1147 .end_use = amdgpu_uvd_ring_end_use,
1148};
1149
1150static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1151{
1152 if (adev->asic_type >= CHIP_POLARIS10) {
1153 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1154 DRM_INFO("UVD is enabled in VM mode\n");
1155 } else {
1156 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1157 DRM_INFO("UVD is enabled in physical mode\n");
1158 }
1159}
1160
1161static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1162 .set = uvd_v6_0_set_interrupt_state,
1163 .process = uvd_v6_0_process_interrupt,
1164};
1165
1166static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1167{
1168 adev->uvd.irq.num_types = 1;
1169 adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs;
1170}
1171
1172const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1173{
1174 .type = AMD_IP_BLOCK_TYPE_UVD,
1175 .major = 6,
1176 .minor = 0,
1177 .rev = 0,
1178 .funcs = &uvd_v6_0_ip_funcs,
1179};
1180
1181const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1182{
1183 .type = AMD_IP_BLOCK_TYPE_UVD,
1184 .major = 6,
1185 .minor = 2,
1186 .rev = 0,
1187 .funcs = &uvd_v6_0_ip_funcs,
1188};
1189
1190const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1191{
1192 .type = AMD_IP_BLOCK_TYPE_UVD,
1193 .major = 6,
1194 .minor = 3,
1195 .rev = 0,
1196 .funcs = &uvd_v6_0_ip_funcs,
1197};