Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <drm/drmP.h>
30#include "amdgpu.h"
31#include "amdgpu_vce.h"
32#include "vid.h"
33#include "vce/vce_3_0_d.h"
34#include "vce/vce_3_0_sh_mask.h"
35#include "oss/oss_3_0_d.h"
36#include "oss/oss_3_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h"
38#include "smu/smu_7_1_2_d.h"
39#include "smu/smu_7_1_2_sh_mask.h"
40
41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
46
47#define VCE_V3_0_FW_SIZE (384 * 1024)
48#define VCE_V3_0_STACK_SIZE (64 * 1024)
49#define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
50
51static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
52static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
53static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
54
55/**
56 * vce_v3_0_ring_get_rptr - get read pointer
57 *
58 * @ring: amdgpu_ring pointer
59 *
60 * Returns the current hardware read pointer
61 */
62static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
63{
64 struct amdgpu_device *adev = ring->adev;
65
66 if (ring == &adev->vce.ring[0])
67 return RREG32(mmVCE_RB_RPTR);
68 else
69 return RREG32(mmVCE_RB_RPTR2);
70}
71
72/**
73 * vce_v3_0_ring_get_wptr - get write pointer
74 *
75 * @ring: amdgpu_ring pointer
76 *
77 * Returns the current hardware write pointer
78 */
79static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
80{
81 struct amdgpu_device *adev = ring->adev;
82
83 if (ring == &adev->vce.ring[0])
84 return RREG32(mmVCE_RB_WPTR);
85 else
86 return RREG32(mmVCE_RB_WPTR2);
87}
88
89/**
90 * vce_v3_0_ring_set_wptr - set write pointer
91 *
92 * @ring: amdgpu_ring pointer
93 *
94 * Commits the write pointer to the hardware
95 */
96static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
97{
98 struct amdgpu_device *adev = ring->adev;
99
100 if (ring == &adev->vce.ring[0])
101 WREG32(mmVCE_RB_WPTR, ring->wptr);
102 else
103 WREG32(mmVCE_RB_WPTR2, ring->wptr);
104}
105
106static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
107{
108 u32 tmp, data;
109
110 tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
111 if (override)
112 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
113 else
114 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
115
116 if (tmp != data)
117 WREG32(mmVCE_RB_ARB_CTRL, data);
118}
119
120static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
121 bool gated)
122{
123 u32 tmp, data;
124 /* Set Override to disable Clock Gating */
125 vce_v3_0_override_vce_clock_gating(adev, true);
126
127 if (!gated) {
128 /* Force CLOCK ON for VCE_CLOCK_GATING_B,
129 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
130 * VREG can be FORCE ON or set to Dynamic, but can't be OFF
131 */
132 tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
133 data |= 0x1ff;
134 data &= ~0xef0000;
135 if (tmp != data)
136 WREG32(mmVCE_CLOCK_GATING_B, data);
137
138 /* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
139 * {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
140 */
141 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
142 data |= 0x3ff000;
143 data &= ~0xffc00000;
144 if (tmp != data)
145 WREG32(mmVCE_UENC_CLOCK_GATING, data);
146
147 /* set VCE_UENC_CLOCK_GATING_2 */
148 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
149 data |= 0x2;
150 data &= ~0x2;
151 if (tmp != data)
152 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
153
154 /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
155 tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
156 data |= 0x37f;
157 if (tmp != data)
158 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
159
160 /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
161 tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
162 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
163 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
164 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
165 0x8;
166 if (tmp != data)
167 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
168 } else {
169 /* Force CLOCK OFF for VCE_CLOCK_GATING_B,
170 * {*, *_FORCE_OFF} = {*, 1}
171 * set VREG to Dynamic, as it can't be OFF
172 */
173 tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
174 data &= ~0x80010;
175 data |= 0xe70008;
176 if (tmp != data)
177 WREG32(mmVCE_CLOCK_GATING_B, data);
178 /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
179 * Force ClOCK OFF takes precedent over Force CLOCK ON setting.
180 * {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
181 */
182 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
183 data |= 0xffc00000;
184 if (tmp != data)
185 WREG32(mmVCE_UENC_CLOCK_GATING, data);
186 /* Set VCE_UENC_CLOCK_GATING_2 */
187 tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
188 data |= 0x10000;
189 if (tmp != data)
190 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
191 /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
192 tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
193 data &= ~0xffc00000;
194 if (tmp != data)
195 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
196 /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
197 tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
198 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
199 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
200 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
201 0x8);
202 if (tmp != data)
203 WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
204 }
205 vce_v3_0_override_vce_clock_gating(adev, false);
206}
207
208/**
209 * vce_v3_0_start - start VCE block
210 *
211 * @adev: amdgpu_device pointer
212 *
213 * Setup and start the VCE block
214 */
215static int vce_v3_0_start(struct amdgpu_device *adev)
216{
217 struct amdgpu_ring *ring;
218 int idx, i, j, r;
219
220 mutex_lock(&adev->grbm_idx_mutex);
221 for (idx = 0; idx < 2; ++idx) {
222
223 if (adev->vce.harvest_config & (1 << idx))
224 continue;
225
226 if (idx == 0)
227 WREG32_P(mmGRBM_GFX_INDEX, 0,
228 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
229 else
230 WREG32_P(mmGRBM_GFX_INDEX,
231 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
232 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
233
234 vce_v3_0_mc_resume(adev, idx);
235
236 /* set BUSY flag */
237 WREG32_P(mmVCE_STATUS, 1, ~1);
238 if (adev->asic_type >= CHIP_STONEY)
239 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
240 else
241 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
242 ~VCE_VCPU_CNTL__CLK_EN_MASK);
243
244 WREG32_P(mmVCE_SOFT_RESET,
245 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
246 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
247
248 mdelay(100);
249
250 WREG32_P(mmVCE_SOFT_RESET, 0,
251 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
252
253 for (i = 0; i < 10; ++i) {
254 uint32_t status;
255 for (j = 0; j < 100; ++j) {
256 status = RREG32(mmVCE_STATUS);
257 if (status & 2)
258 break;
259 mdelay(10);
260 }
261 r = 0;
262 if (status & 2)
263 break;
264
265 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
266 WREG32_P(mmVCE_SOFT_RESET,
267 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
268 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
269 mdelay(10);
270 WREG32_P(mmVCE_SOFT_RESET, 0,
271 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
272 mdelay(10);
273 r = -1;
274 }
275
276 /* clear BUSY flag */
277 WREG32_P(mmVCE_STATUS, 0, ~1);
278
279 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
281 vce_v3_0_set_vce_sw_clock_gating(adev, false);
282
283 if (r) {
284 DRM_ERROR("VCE not responding, giving up!!!\n");
285 mutex_unlock(&adev->grbm_idx_mutex);
286 return r;
287 }
288 }
289
290 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
291 mutex_unlock(&adev->grbm_idx_mutex);
292
293 ring = &adev->vce.ring[0];
294 WREG32(mmVCE_RB_RPTR, ring->wptr);
295 WREG32(mmVCE_RB_WPTR, ring->wptr);
296 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
297 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
298 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
299
300 ring = &adev->vce.ring[1];
301 WREG32(mmVCE_RB_RPTR2, ring->wptr);
302 WREG32(mmVCE_RB_WPTR2, ring->wptr);
303 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
304 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
305 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
306
307 return 0;
308}
309
310#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
311#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
312#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
313
314static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
315{
316 u32 tmp;
317
318 /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
319 if ((adev->asic_type == CHIP_FIJI) ||
320 (adev->asic_type == CHIP_STONEY) ||
321 (adev->asic_type == CHIP_POLARIS10) ||
322 (adev->asic_type == CHIP_POLARIS11))
323 return AMDGPU_VCE_HARVEST_VCE1;
324
325 /* Tonga and CZ are dual or single pipe */
326 if (adev->flags & AMD_IS_APU)
327 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
328 VCE_HARVEST_FUSE_MACRO__MASK) >>
329 VCE_HARVEST_FUSE_MACRO__SHIFT;
330 else
331 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
332 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
333 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
334
335 switch (tmp) {
336 case 1:
337 return AMDGPU_VCE_HARVEST_VCE0;
338 case 2:
339 return AMDGPU_VCE_HARVEST_VCE1;
340 case 3:
341 return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
342 default:
343 return 0;
344 }
345}
346
347static int vce_v3_0_early_init(void *handle)
348{
349 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
350
351 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
352
353 if ((adev->vce.harvest_config &
354 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
355 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
356 return -ENOENT;
357
358 vce_v3_0_set_ring_funcs(adev);
359 vce_v3_0_set_irq_funcs(adev);
360
361 return 0;
362}
363
364static int vce_v3_0_sw_init(void *handle)
365{
366 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
367 struct amdgpu_ring *ring;
368 int r;
369
370 /* VCE */
371 r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
372 if (r)
373 return r;
374
375 r = amdgpu_vce_sw_init(adev, VCE_V3_0_FW_SIZE +
376 (VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE) * 2);
377 if (r)
378 return r;
379
380 r = amdgpu_vce_resume(adev);
381 if (r)
382 return r;
383
384 ring = &adev->vce.ring[0];
385 sprintf(ring->name, "vce0");
386 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
387 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
388 if (r)
389 return r;
390
391 ring = &adev->vce.ring[1];
392 sprintf(ring->name, "vce1");
393 r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
394 &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
395 if (r)
396 return r;
397
398 return r;
399}
400
401static int vce_v3_0_sw_fini(void *handle)
402{
403 int r;
404 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
405
406 r = amdgpu_vce_suspend(adev);
407 if (r)
408 return r;
409
410 r = amdgpu_vce_sw_fini(adev);
411 if (r)
412 return r;
413
414 return r;
415}
416
417static int vce_v3_0_hw_init(void *handle)
418{
419 int r, i;
420 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
421
422 r = vce_v3_0_start(adev);
423 if (r)
424 return r;
425
426 adev->vce.ring[0].ready = false;
427 adev->vce.ring[1].ready = false;
428
429 for (i = 0; i < 2; i++) {
430 r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
431 if (r)
432 return r;
433 else
434 adev->vce.ring[i].ready = true;
435 }
436
437 DRM_INFO("VCE initialized successfully.\n");
438
439 return 0;
440}
441
442static int vce_v3_0_hw_fini(void *handle)
443{
444 return 0;
445}
446
447static int vce_v3_0_suspend(void *handle)
448{
449 int r;
450 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
451
452 r = vce_v3_0_hw_fini(adev);
453 if (r)
454 return r;
455
456 r = amdgpu_vce_suspend(adev);
457 if (r)
458 return r;
459
460 return r;
461}
462
463static int vce_v3_0_resume(void *handle)
464{
465 int r;
466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
467
468 r = amdgpu_vce_resume(adev);
469 if (r)
470 return r;
471
472 r = vce_v3_0_hw_init(adev);
473 if (r)
474 return r;
475
476 return r;
477}
478
479static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
480{
481 uint32_t offset, size;
482
483 WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
484 WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
485 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
486 WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
487
488 WREG32(mmVCE_LMI_CTRL, 0x00398000);
489 WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
490 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
491 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
492 WREG32(mmVCE_LMI_VM_CTRL, 0);
493 if (adev->asic_type >= CHIP_STONEY) {
494 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
495 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
496 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
497 } else
498 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
499 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
500 size = VCE_V3_0_FW_SIZE;
501 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
502 WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
503
504 if (idx == 0) {
505 offset += size;
506 size = VCE_V3_0_STACK_SIZE;
507 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
508 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
509 offset += size;
510 size = VCE_V3_0_DATA_SIZE;
511 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
512 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
513 } else {
514 offset += size + VCE_V3_0_STACK_SIZE + VCE_V3_0_DATA_SIZE;
515 size = VCE_V3_0_STACK_SIZE;
516 WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0xfffffff);
517 WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
518 offset += size;
519 size = VCE_V3_0_DATA_SIZE;
520 WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0xfffffff);
521 WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
522 }
523
524 WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
525
526 WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
527 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
528}
529
530static bool vce_v3_0_is_idle(void *handle)
531{
532 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
533 u32 mask = 0;
534
535 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
536 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
537
538 return !(RREG32(mmSRBM_STATUS2) & mask);
539}
540
541static int vce_v3_0_wait_for_idle(void *handle)
542{
543 unsigned i;
544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545
546 for (i = 0; i < adev->usec_timeout; i++)
547 if (vce_v3_0_is_idle(handle))
548 return 0;
549
550 return -ETIMEDOUT;
551}
552
553static int vce_v3_0_soft_reset(void *handle)
554{
555 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
556 u32 mask = 0;
557
558 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK;
559 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK;
560
561 WREG32_P(mmSRBM_SOFT_RESET, mask,
562 ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK |
563 SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK));
564 mdelay(5);
565
566 return vce_v3_0_start(adev);
567}
568
569static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
570 struct amdgpu_irq_src *source,
571 unsigned type,
572 enum amdgpu_interrupt_state state)
573{
574 uint32_t val = 0;
575
576 if (state == AMDGPU_IRQ_STATE_ENABLE)
577 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
578
579 WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
580 return 0;
581}
582
583static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
584 struct amdgpu_irq_src *source,
585 struct amdgpu_iv_entry *entry)
586{
587 DRM_DEBUG("IH: VCE\n");
588
589 WREG32_P(mmVCE_SYS_INT_STATUS,
590 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
592
593 switch (entry->src_data) {
594 case 0:
595 case 1:
596 amdgpu_fence_process(&adev->vce.ring[entry->src_data]);
597 break;
598 default:
599 DRM_ERROR("Unhandled interrupt: %d %d\n",
600 entry->src_id, entry->src_data);
601 break;
602 }
603
604 return 0;
605}
606
607static int vce_v3_0_set_clockgating_state(void *handle,
608 enum amd_clockgating_state state)
609{
610 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
611 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
612 int i;
613
614 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
615 return 0;
616
617 mutex_lock(&adev->grbm_idx_mutex);
618 for (i = 0; i < 2; i++) {
619 /* Program VCE Instance 0 or 1 if not harvested */
620 if (adev->vce.harvest_config & (1 << i))
621 continue;
622
623 if (i == 0)
624 WREG32_P(mmGRBM_GFX_INDEX, 0,
625 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
626 else
627 WREG32_P(mmGRBM_GFX_INDEX,
628 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
629 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
630
631 if (enable) {
632 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
633 uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
634 data &= ~(0xf | 0xff0);
635 data |= ((0x0 << 0) | (0x04 << 4));
636 WREG32(mmVCE_CLOCK_GATING_A, data);
637
638 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
639 data = RREG32(mmVCE_UENC_CLOCK_GATING);
640 data &= ~(0xf | 0xff0);
641 data |= ((0x0 << 0) | (0x04 << 4));
642 WREG32(mmVCE_UENC_CLOCK_GATING, data);
643 }
644
645 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
646 }
647
648 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
649 mutex_unlock(&adev->grbm_idx_mutex);
650
651 return 0;
652}
653
654static int vce_v3_0_set_powergating_state(void *handle,
655 enum amd_powergating_state state)
656{
657 /* This doesn't actually powergate the VCE block.
658 * That's done in the dpm code via the SMC. This
659 * just re-inits the block as necessary. The actual
660 * gating still happens in the dpm code. We should
661 * revisit this when there is a cleaner line between
662 * the smc and the hw blocks
663 */
664 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
665
666 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
667 return 0;
668
669 if (state == AMD_PG_STATE_GATE)
670 /* XXX do we need a vce_v3_0_stop()? */
671 return 0;
672 else
673 return vce_v3_0_start(adev);
674}
675
676const struct amd_ip_funcs vce_v3_0_ip_funcs = {
677 .name = "vce_v3_0",
678 .early_init = vce_v3_0_early_init,
679 .late_init = NULL,
680 .sw_init = vce_v3_0_sw_init,
681 .sw_fini = vce_v3_0_sw_fini,
682 .hw_init = vce_v3_0_hw_init,
683 .hw_fini = vce_v3_0_hw_fini,
684 .suspend = vce_v3_0_suspend,
685 .resume = vce_v3_0_resume,
686 .is_idle = vce_v3_0_is_idle,
687 .wait_for_idle = vce_v3_0_wait_for_idle,
688 .soft_reset = vce_v3_0_soft_reset,
689 .set_clockgating_state = vce_v3_0_set_clockgating_state,
690 .set_powergating_state = vce_v3_0_set_powergating_state,
691};
692
693static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
694 .get_rptr = vce_v3_0_ring_get_rptr,
695 .get_wptr = vce_v3_0_ring_get_wptr,
696 .set_wptr = vce_v3_0_ring_set_wptr,
697 .parse_cs = amdgpu_vce_ring_parse_cs,
698 .emit_ib = amdgpu_vce_ring_emit_ib,
699 .emit_fence = amdgpu_vce_ring_emit_fence,
700 .test_ring = amdgpu_vce_ring_test_ring,
701 .test_ib = amdgpu_vce_ring_test_ib,
702 .insert_nop = amdgpu_ring_insert_nop,
703 .pad_ib = amdgpu_ring_generic_pad_ib,
704};
705
706static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
707{
708 adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs;
709 adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs;
710}
711
712static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = {
713 .set = vce_v3_0_set_interrupt_state,
714 .process = vce_v3_0_process_interrupt,
715};
716
717static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev)
718{
719 adev->vce.irq.num_types = 1;
720 adev->vce.irq.funcs = &vce_v3_0_irq_funcs;
721};