Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_drv.h>
26
27#include "amdgpu.h"
28#include "amdgpu_vcn.h"
29#include "soc15.h"
30#include "soc15d.h"
31#include "amdgpu_pm.h"
32#include "amdgpu_psp.h"
33#include "mmsch_v2_0.h"
34#include "vcn_v2_0.h"
35
36#include "vcn/vcn_2_0_0_offset.h"
37#include "vcn/vcn_2_0_0_sh_mask.h"
38#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39
40#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
42#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
43
44#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd
45#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503
46#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x504
47#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x505
48#define mmUVD_NO_OP_INTERNAL_OFFSET 0x53f
49#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x54a
50#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
51
52#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x1e1
53#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x5a6
54#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x5a7
55#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x1e2
56
57static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
58 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
59 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
60 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
61 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
62 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
63 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
64 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
82 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
83 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
84 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
85 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
86 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
87 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
88 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
89 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
90 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
91};
92
93static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
94static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
95static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
96static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
97 enum amd_powergating_state state);
98static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
99 struct dpg_pause_state *new_state);
100static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
101static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);
102
103/**
104 * vcn_v2_0_early_init - set function pointers and load microcode
105 *
106 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
107 *
108 * Set ring and irq function pointers
109 * Load microcode from filesystem
110 */
111static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
112{
113 struct amdgpu_device *adev = ip_block->adev;
114
115 if (amdgpu_sriov_vf(adev))
116 adev->vcn.inst[0].num_enc_rings = 1;
117 else
118 adev->vcn.inst[0].num_enc_rings = 2;
119
120 adev->vcn.inst->set_pg_state = vcn_v2_0_set_pg_state;
121 vcn_v2_0_set_dec_ring_funcs(adev);
122 vcn_v2_0_set_enc_ring_funcs(adev);
123 vcn_v2_0_set_irq_funcs(adev);
124
125 return amdgpu_vcn_early_init(adev, 0);
126}
127
128/**
129 * vcn_v2_0_sw_init - sw init for VCN block
130 *
131 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
132 *
133 * Load firmware and sw initialization
134 */
135static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
136{
137 struct amdgpu_ring *ring;
138 int i, r;
139 struct amdgpu_device *adev = ip_block->adev;
140 struct amdgpu_fw_shared *fw_shared;
141
142 /* VCN DEC TRAP */
143 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
144 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
145 &adev->vcn.inst->irq);
146 if (r)
147 return r;
148
149 /* VCN ENC TRAP */
150 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
151 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
152 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
153 &adev->vcn.inst->irq);
154 if (r)
155 return r;
156 }
157
158 r = amdgpu_vcn_sw_init(adev, 0);
159 if (r)
160 return r;
161
162 amdgpu_vcn_setup_ucode(adev, 0);
163
164 r = amdgpu_vcn_resume(adev, 0);
165 if (r)
166 return r;
167
168 ring = &adev->vcn.inst->ring_dec;
169
170 ring->use_doorbell = true;
171 ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
172 ring->vm_hub = AMDGPU_MMHUB0(0);
173
174 sprintf(ring->name, "vcn_dec");
175 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
176 AMDGPU_RING_PRIO_DEFAULT, NULL);
177 if (r)
178 return r;
179
180 adev->vcn.inst[0].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
181 adev->vcn.inst[0].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
182 adev->vcn.inst[0].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
183 adev->vcn.inst[0].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
184 adev->vcn.inst[0].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
185 adev->vcn.inst[0].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
186
187 adev->vcn.inst[0].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
188 adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
189 adev->vcn.inst[0].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
190 adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
191 adev->vcn.inst[0].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
192 adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
193 adev->vcn.inst[0].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
194 adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
195 adev->vcn.inst[0].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
196 adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
197
198 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
199 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
200
201 ring = &adev->vcn.inst->ring_enc[i];
202 ring->use_doorbell = true;
203 ring->vm_hub = AMDGPU_MMHUB0(0);
204 if (!amdgpu_sriov_vf(adev))
205 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
206 else
207 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
208 sprintf(ring->name, "vcn_enc%d", i);
209 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
210 hw_prio, NULL);
211 if (r)
212 return r;
213 }
214
215 adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
216 adev->vcn.inst[0].reset = vcn_v2_0_reset;
217
218 adev->vcn.supported_reset =
219 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
220 if (!amdgpu_sriov_vf(adev))
221 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
222
223 r = amdgpu_virt_alloc_mm_table(adev);
224 if (r)
225 return r;
226
227 fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
228 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
229
230 if (amdgpu_vcnfw_log)
231 amdgpu_vcn_fwlog_init(adev->vcn.inst);
232
233 r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
234 if (r)
235 return r;
236
237 r = amdgpu_vcn_sysfs_reset_mask_init(adev);
238 if (r)
239 return r;
240
241 return 0;
242}
243
244/**
245 * vcn_v2_0_sw_fini - sw fini for VCN block
246 *
247 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
248 *
249 * VCN suspend and free up sw allocation
250 */
251static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
252{
253 int r, idx;
254 struct amdgpu_device *adev = ip_block->adev;
255 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
256
257 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
258 fw_shared->present_flag_0 = 0;
259 drm_dev_exit(idx);
260 }
261
262 amdgpu_virt_free_mm_table(adev);
263
264 r = amdgpu_vcn_suspend(adev, 0);
265 if (r)
266 return r;
267
268 amdgpu_vcn_sysfs_reset_mask_fini(adev);
269
270 amdgpu_vcn_sw_fini(adev, 0);
271
272 return 0;
273}
274
275/**
276 * vcn_v2_0_hw_init - start and test VCN block
277 *
278 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
279 *
280 * Initialize the hardware, boot up the VCPU and do some testing
281 */
282static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
283{
284 struct amdgpu_device *adev = ip_block->adev;
285 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
286 int i, r;
287
288 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
289 ring->doorbell_index, 0);
290
291 if (amdgpu_sriov_vf(adev))
292 vcn_v2_0_start_sriov(adev);
293
294 r = amdgpu_ring_test_helper(ring);
295 if (r)
296 return r;
297
298 //Disable vcn decode for sriov
299 if (amdgpu_sriov_vf(adev))
300 ring->sched.ready = false;
301
302 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
303 ring = &adev->vcn.inst->ring_enc[i];
304 r = amdgpu_ring_test_helper(ring);
305 if (r)
306 return r;
307 }
308
309 return 0;
310}
311
312/**
313 * vcn_v2_0_hw_fini - stop the hardware block
314 *
315 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
316 *
317 * Stop the VCN block, mark ring as not ready any more
318 */
319static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
320{
321 struct amdgpu_device *adev = ip_block->adev;
322 struct amdgpu_vcn_inst *vinst = adev->vcn.inst;
323
324 cancel_delayed_work_sync(&vinst->idle_work);
325
326 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
327 (vinst->cur_state != AMD_PG_STATE_GATE &&
328 RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
329 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
330
331 return 0;
332}
333
334/**
335 * vcn_v2_0_suspend - suspend VCN block
336 *
337 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
338 *
339 * HW fini and suspend VCN block
340 */
341static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
342{
343 int r;
344
345 r = vcn_v2_0_hw_fini(ip_block);
346 if (r)
347 return r;
348
349 r = amdgpu_vcn_suspend(ip_block->adev, 0);
350
351 return r;
352}
353
354/**
355 * vcn_v2_0_resume - resume VCN block
356 *
357 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
358 *
359 * Resume firmware and hw init VCN block
360 */
361static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
362{
363 int r;
364
365 r = amdgpu_vcn_resume(ip_block->adev, 0);
366 if (r)
367 return r;
368
369 r = vcn_v2_0_hw_init(ip_block);
370
371 return r;
372}
373
374/**
375 * vcn_v2_0_mc_resume - memory controller programming
376 *
377 * @vinst: Pointer to the VCN instance structure
378 *
379 * Let the VCN memory controller know it's offsets
380 */
381static void vcn_v2_0_mc_resume(struct amdgpu_vcn_inst *vinst)
382{
383 struct amdgpu_device *adev = vinst->adev;
384 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
385 uint32_t offset;
386
387 if (amdgpu_sriov_vf(adev))
388 return;
389
390 /* cache window 0: fw */
391 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
392 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
393 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
394 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
395 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
396 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
397 offset = 0;
398 } else {
399 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
400 lower_32_bits(adev->vcn.inst->gpu_addr));
401 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
402 upper_32_bits(adev->vcn.inst->gpu_addr));
403 offset = size;
404 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
405 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
406 }
407
408 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
409
410 /* cache window 1: stack */
411 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
412 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
413 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
414 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
415 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
416 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
417
418 /* cache window 2: context */
419 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
420 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
421 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
422 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
423 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
424 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
425
426 /* non-cache window */
427 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
428 lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
429 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
430 upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
431 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
432 WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
433 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
434
435 WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
436}
437
438static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
439 bool indirect)
440{
441 struct amdgpu_device *adev = vinst->adev;
442 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
443 uint32_t offset;
444
445 /* cache window 0: fw */
446 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
447 if (!indirect) {
448 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
449 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
450 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
451 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
452 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
453 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
454 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
455 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
456 } else {
457 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
458 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
459 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
460 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
461 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
462 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
463 }
464 offset = 0;
465 } else {
466 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
467 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
468 lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
469 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
470 UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
471 upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
472 offset = size;
473 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
474 UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
475 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
476 }
477
478 if (!indirect)
479 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
480 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
481 else
482 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
483 UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
484
485 /* cache window 1: stack */
486 if (!indirect) {
487 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
488 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
489 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
490 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
491 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
492 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
493 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
494 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
495 } else {
496 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
497 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
498 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
499 UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
500 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
501 UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
502 }
503 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
504 UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
505
506 /* cache window 2: context */
507 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
508 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
509 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
510 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
511 UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
512 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
513 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
514 UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
515 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
516 UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
517
518 /* non-cache window */
519 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
520 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
521 lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
522 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
523 UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
524 upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
525 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
526 UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
527 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
528 UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
529 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
530
531 /* VCN global tiling registers */
532 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
533 UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
534}
535
536/**
537 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
538 *
539 * @vinst: VCN instance
540 *
541 * Disable clock gating for VCN block
542 */
543static void vcn_v2_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
544{
545 struct amdgpu_device *adev = vinst->adev;
546 uint32_t data;
547
548 if (amdgpu_sriov_vf(adev))
549 return;
550
551 /* UVD disable CGC */
552 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
553 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
554 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
555 else
556 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
557 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
558 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
559 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
560
561 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
562 data &= ~(UVD_CGC_GATE__SYS_MASK
563 | UVD_CGC_GATE__UDEC_MASK
564 | UVD_CGC_GATE__MPEG2_MASK
565 | UVD_CGC_GATE__REGS_MASK
566 | UVD_CGC_GATE__RBC_MASK
567 | UVD_CGC_GATE__LMI_MC_MASK
568 | UVD_CGC_GATE__LMI_UMC_MASK
569 | UVD_CGC_GATE__IDCT_MASK
570 | UVD_CGC_GATE__MPRD_MASK
571 | UVD_CGC_GATE__MPC_MASK
572 | UVD_CGC_GATE__LBSI_MASK
573 | UVD_CGC_GATE__LRBBM_MASK
574 | UVD_CGC_GATE__UDEC_RE_MASK
575 | UVD_CGC_GATE__UDEC_CM_MASK
576 | UVD_CGC_GATE__UDEC_IT_MASK
577 | UVD_CGC_GATE__UDEC_DB_MASK
578 | UVD_CGC_GATE__UDEC_MP_MASK
579 | UVD_CGC_GATE__WCB_MASK
580 | UVD_CGC_GATE__VCPU_MASK
581 | UVD_CGC_GATE__SCPU_MASK);
582 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
583
584 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
585 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
586 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
587 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
588 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
589 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
590 | UVD_CGC_CTRL__SYS_MODE_MASK
591 | UVD_CGC_CTRL__UDEC_MODE_MASK
592 | UVD_CGC_CTRL__MPEG2_MODE_MASK
593 | UVD_CGC_CTRL__REGS_MODE_MASK
594 | UVD_CGC_CTRL__RBC_MODE_MASK
595 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
596 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
597 | UVD_CGC_CTRL__IDCT_MODE_MASK
598 | UVD_CGC_CTRL__MPRD_MODE_MASK
599 | UVD_CGC_CTRL__MPC_MODE_MASK
600 | UVD_CGC_CTRL__LBSI_MODE_MASK
601 | UVD_CGC_CTRL__LRBBM_MODE_MASK
602 | UVD_CGC_CTRL__WCB_MODE_MASK
603 | UVD_CGC_CTRL__VCPU_MODE_MASK
604 | UVD_CGC_CTRL__SCPU_MODE_MASK);
605 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
606
607 /* turn on */
608 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
609 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
610 | UVD_SUVD_CGC_GATE__SIT_MASK
611 | UVD_SUVD_CGC_GATE__SMP_MASK
612 | UVD_SUVD_CGC_GATE__SCM_MASK
613 | UVD_SUVD_CGC_GATE__SDB_MASK
614 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
615 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
616 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
617 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
618 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
619 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
620 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
621 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
622 | UVD_SUVD_CGC_GATE__SCLR_MASK
623 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
624 | UVD_SUVD_CGC_GATE__ENT_MASK
625 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
626 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
627 | UVD_SUVD_CGC_GATE__SITE_MASK
628 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
629 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
630 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
631 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
632 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
633 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
634
635 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
636 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
637 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
638 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
639 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
640 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
641 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
642 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
643 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
644 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
645 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
646 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
647}
648
649static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
650 uint8_t sram_sel, uint8_t indirect)
651{
652 struct amdgpu_device *adev = vinst->adev;
653 uint32_t reg_data = 0;
654
655 /* enable sw clock gating control */
656 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
657 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
658 else
659 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
660 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
661 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
662 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
663 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
664 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
665 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
666 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
667 UVD_CGC_CTRL__SYS_MODE_MASK |
668 UVD_CGC_CTRL__UDEC_MODE_MASK |
669 UVD_CGC_CTRL__MPEG2_MODE_MASK |
670 UVD_CGC_CTRL__REGS_MODE_MASK |
671 UVD_CGC_CTRL__RBC_MODE_MASK |
672 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
673 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
674 UVD_CGC_CTRL__IDCT_MODE_MASK |
675 UVD_CGC_CTRL__MPRD_MODE_MASK |
676 UVD_CGC_CTRL__MPC_MODE_MASK |
677 UVD_CGC_CTRL__LBSI_MODE_MASK |
678 UVD_CGC_CTRL__LRBBM_MODE_MASK |
679 UVD_CGC_CTRL__WCB_MODE_MASK |
680 UVD_CGC_CTRL__VCPU_MODE_MASK |
681 UVD_CGC_CTRL__SCPU_MODE_MASK);
682 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
683 UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
684
685 /* turn off clock gating */
686 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
687 UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
688
689 /* turn on SUVD clock gating */
690 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
691 UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
692
693 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
694 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
695 UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
696}
697
698/**
699 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
700 *
701 * @vinst: VCN instance
702 *
703 * Enable clock gating for VCN block
704 */
705static void vcn_v2_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
706{
707 struct amdgpu_device *adev = vinst->adev;
708 uint32_t data = 0;
709
710 if (amdgpu_sriov_vf(adev))
711 return;
712
713 /* enable UVD CGC */
714 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
715 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
716 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
717 else
718 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
719 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
720 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
721 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
722
723 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
724 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
725 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
726 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
727 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
728 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
729 | UVD_CGC_CTRL__SYS_MODE_MASK
730 | UVD_CGC_CTRL__UDEC_MODE_MASK
731 | UVD_CGC_CTRL__MPEG2_MODE_MASK
732 | UVD_CGC_CTRL__REGS_MODE_MASK
733 | UVD_CGC_CTRL__RBC_MODE_MASK
734 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
735 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
736 | UVD_CGC_CTRL__IDCT_MODE_MASK
737 | UVD_CGC_CTRL__MPRD_MODE_MASK
738 | UVD_CGC_CTRL__MPC_MODE_MASK
739 | UVD_CGC_CTRL__LBSI_MODE_MASK
740 | UVD_CGC_CTRL__LRBBM_MODE_MASK
741 | UVD_CGC_CTRL__WCB_MODE_MASK
742 | UVD_CGC_CTRL__VCPU_MODE_MASK
743 | UVD_CGC_CTRL__SCPU_MODE_MASK);
744 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
745
746 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
747 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
748 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
749 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
750 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
751 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
752 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
753 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
754 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
755 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
756 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
757 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
758}
759
760static void vcn_v2_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
761{
762 struct amdgpu_device *adev = vinst->adev;
763 uint32_t data = 0;
764
765 if (amdgpu_sriov_vf(adev))
766 return;
767
768 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
769 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
770 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
771 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
772 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
773 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
774 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
775 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
776 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
777 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
778 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
779
780 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
781 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
782 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
783 } else {
784 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
785 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
786 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
787 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
788 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
789 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
790 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
791 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
792 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
793 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
794 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
795 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFF);
796 }
797
798 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
799 * UVDU_PWR_STATUS are 0 (power on) */
800
801 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
802 data &= ~0x103;
803 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
804 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
805 UVD_POWER_STATUS__UVD_PG_EN_MASK;
806
807 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
808}
809
810static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
811{
812 struct amdgpu_device *adev = vinst->adev;
813 uint32_t data = 0;
814
815 if (amdgpu_sriov_vf(adev))
816 return;
817
818 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
819 /* Before power off, this indicator has to be turned on */
820 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
821 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
822 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
823 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
824
825
826 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
827 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
828 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
829 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
830 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
831 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
832 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
833 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
834 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
835 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
836
837 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
838
839 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
840 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
841 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
842 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
843 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
844 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
845 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
846 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
847 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
848 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
849 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
850 }
851}
852
853static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
854{
855 struct amdgpu_device *adev = vinst->adev;
856 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
857 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
858 uint32_t rb_bufsz, tmp;
859 int ret;
860
861 vcn_v2_0_enable_static_power_gating(vinst);
862
863 /* enable dynamic power gating mode */
864 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
865 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
866 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
867 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
868
869 if (indirect)
870 adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;
871
872 /* enable clock gating */
873 vcn_v2_0_clock_gating_dpg_mode(vinst, 0, indirect);
874
875 /* enable VCPU clock */
876 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
877 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
878 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
879 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
880 UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
881
882 /* disable master interupt */
883 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
884 UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
885
886 /* setup mmUVD_LMI_CTRL */
887 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
888 UVD_LMI_CTRL__REQ_MODE_MASK |
889 UVD_LMI_CTRL__CRC_RESET_MASK |
890 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
891 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
892 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
893 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
894 0x00100000L);
895 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
896 UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
897
898 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
899 UVD, 0, mmUVD_MPC_CNTL),
900 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
901
902 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
903 UVD, 0, mmUVD_MPC_SET_MUXA0),
904 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
905 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
906 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
907 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
908
909 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
910 UVD, 0, mmUVD_MPC_SET_MUXB0),
911 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
912 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
913 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
914 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
915
916 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
917 UVD, 0, mmUVD_MPC_SET_MUX),
918 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
919 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
920 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
921
922 vcn_v2_0_mc_resume_dpg_mode(vinst, indirect);
923
924 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
925 UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
926 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
927 UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
928
929 /* release VCPU reset to boot */
930 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
931 UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);
932
933 /* enable LMI MC and UMC channels */
934 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
935 UVD, 0, mmUVD_LMI_CTRL2),
936 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);
937
938 /* enable master interrupt */
939 WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
940 UVD, 0, mmUVD_MASTINT_EN),
941 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
942
943 if (indirect) {
944 ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
945 if (ret) {
946 dev_err(adev->dev, "vcn sram load failed %d\n", ret);
947 return ret;
948 }
949 }
950
951 /* force RBC into idle state */
952 rb_bufsz = order_base_2(ring->ring_size);
953 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
954 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
955 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
956 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
957 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
958 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
959
960 /* Stall DPG before WPTR/RPTR reset */
961 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
962 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
963 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
964 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
965
966 /* set the write pointer delay */
967 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
968
969 /* set the wb address */
970 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
971 (upper_32_bits(ring->gpu_addr) >> 2));
972
973 /* program the RB_BASE for ring buffer */
974 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
975 lower_32_bits(ring->gpu_addr));
976 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
977 upper_32_bits(ring->gpu_addr));
978
979 /* Initialize the ring buffer's read and write pointers */
980 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
981
982 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
983
984 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
985 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
986 lower_32_bits(ring->wptr));
987
988 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
989 /* Unstall DPG */
990 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
991 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
992
993 /* Keeping one read-back to ensure all register writes are done,
994 * otherwise it may introduce race conditions.
995 */
996 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
997
998 return 0;
999}
1000
1001static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
1002{
1003 struct amdgpu_device *adev = vinst->adev;
1004 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
1005 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
1006 uint32_t rb_bufsz, tmp;
1007 uint32_t lmi_swap_cntl;
1008 int i, j, r;
1009
1010 if (adev->pm.dpm_enabled)
1011 amdgpu_dpm_enable_vcn(adev, true, 0);
1012
1013 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1014 return vcn_v2_0_start_dpg_mode(vinst, adev->vcn.inst->indirect_sram);
1015
1016 vcn_v2_0_disable_static_power_gating(vinst);
1017
1018 /* set uvd status busy */
1019 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1020 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
1021
1022 /*SW clock gating */
1023 vcn_v2_0_disable_clock_gating(vinst);
1024
1025 /* enable VCPU clock */
1026 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
1027 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1028
1029 /* disable master interrupt */
1030 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
1031 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1032
1033 /* setup mmUVD_LMI_CTRL */
1034 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
1035 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
1036 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1037 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1038 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1039 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1040
1041 /* setup mmUVD_MPC_CNTL */
1042 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
1043 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1044 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1045 WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);
1046
1047 /* setup UVD_MPC_SET_MUXA0 */
1048 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
1049 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1050 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1051 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1052 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1053
1054 /* setup UVD_MPC_SET_MUXB0 */
1055 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
1056 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1057 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1058 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1059 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1060
1061 /* setup mmUVD_MPC_SET_MUX */
1062 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
1063 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1064 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1065 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1066
1067 vcn_v2_0_mc_resume(vinst);
1068
1069 /* release VCPU reset to boot */
1070 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1071 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1072
1073 /* enable LMI MC and UMC channels */
1074 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
1075 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1076
1077 tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
1078 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1079 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1080 WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);
1081
1082 /* disable byte swapping */
1083 lmi_swap_cntl = 0;
1084#ifdef __BIG_ENDIAN
1085 /* swap (8 in 32) RB and IB */
1086 lmi_swap_cntl = 0xa;
1087#endif
1088 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
1089
1090 for (i = 0; i < 10; ++i) {
1091 uint32_t status;
1092
1093 for (j = 0; j < 100; ++j) {
1094 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1095 if (status & 2)
1096 break;
1097 mdelay(10);
1098 }
1099 r = 0;
1100 if (status & 2)
1101 break;
1102
1103 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1104 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1105 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1106 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1107 mdelay(10);
1108 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
1109 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1110 mdelay(10);
1111 r = -1;
1112 }
1113
1114 if (r) {
1115 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1116 return r;
1117 }
1118
1119 /* enable master interrupt */
1120 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
1121 UVD_MASTINT_EN__VCPU_EN_MASK,
1122 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1123
1124 /* clear the busy bit of VCN_STATUS */
1125 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
1126 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1127
1128 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);
1129
1130 /* force RBC into idle state */
1131 rb_bufsz = order_base_2(ring->ring_size);
1132 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1133 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1134 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1135 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1136 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1137 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1138
1139 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1140 /* program the RB_BASE for ring buffer */
1141 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1142 lower_32_bits(ring->gpu_addr));
1143 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1144 upper_32_bits(ring->gpu_addr));
1145
1146 /* Initialize the ring buffer's read and write pointers */
1147 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1148
1149 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1150 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1151 lower_32_bits(ring->wptr));
1152 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1153
1154 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1155 ring = &adev->vcn.inst->ring_enc[0];
1156 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1157 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1158 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1159 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1160 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1161 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1162
1163 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1164 ring = &adev->vcn.inst->ring_enc[1];
1165 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1166 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1167 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1168 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1169 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1170 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1171
1172 /* Keeping one read-back to ensure all register writes are done,
1173 * otherwise it may introduce race conditions.
1174 */
1175 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1176
1177 return 0;
1178}
1179
1180static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1181{
1182 struct amdgpu_device *adev = vinst->adev;
1183 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1184 uint32_t tmp;
1185
1186 vcn_v2_0_pause_dpg_mode(vinst, &state);
1187 /* Wait for power status to be 1 */
1188 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1189 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1190
1191 /* wait for read ptr to be equal to write ptr */
1192 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1193 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1194
1195 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1196 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1197
1198 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1199 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1200
1201 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
1202 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1203
1204 /* disable dynamic power gating mode */
1205 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1206 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1207
1208 /* Keeping one read-back to ensure all register writes are done,
1209 * otherwise it may introduce race conditions.
1210 */
1211 RREG32_SOC15(UVD, 0, mmUVD_STATUS);
1212
1213 return 0;
1214}
1215
1216static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
1217{
1218 struct amdgpu_device *adev = vinst->adev;
1219 uint32_t tmp;
1220 int r;
1221
1222 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1223 r = vcn_v2_0_stop_dpg_mode(vinst);
1224 if (r)
1225 return r;
1226 goto power_off;
1227 }
1228
1229 /* wait for uvd idle */
1230 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1231 if (r)
1232 return r;
1233
1234 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1235 UVD_LMI_STATUS__READ_CLEAN_MASK |
1236 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1237 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1238 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1239 if (r)
1240 return r;
1241
1242 /* stall UMC channel */
1243 tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
1244 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1245 WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);
1246
1247 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1248 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1249 r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
1250 if (r)
1251 return r;
1252
1253 /* disable VCPU clock */
1254 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1255 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1256
1257 /* reset LMI UMC */
1258 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1259 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1260 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1261
1262 /* reset LMI */
1263 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1264 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1265 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1266
1267 /* reset VCPU */
1268 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1269 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1270 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1271
1272 /* clear status */
1273 WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);
1274
1275 vcn_v2_0_enable_clock_gating(vinst);
1276 vcn_v2_0_enable_static_power_gating(vinst);
1277
1278 /* Keeping one read-back to ensure all register writes are done,
1279 * otherwise it may introduce race conditions.
1280 */
1281 RREG32_SOC15(VCN, 0, mmUVD_STATUS);
1282
1283power_off:
1284 if (adev->pm.dpm_enabled)
1285 amdgpu_dpm_enable_vcn(adev, false, 0);
1286
1287 return 0;
1288}
1289
1290static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1291 struct dpg_pause_state *new_state)
1292{
1293 struct amdgpu_device *adev = vinst->adev;
1294 int inst_idx = vinst->inst;
1295 struct amdgpu_ring *ring;
1296 uint32_t reg_data = 0;
1297 int ret_code;
1298
1299 /* pause/unpause if state is changed */
1300 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1301 DRM_DEBUG("dpg pause state changed %d -> %d",
1302 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1303 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1304 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1305
1306 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1307 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
1308 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1309
1310 if (!ret_code) {
1311 struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
1312 /* pause DPG */
1313 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1314 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1315
1316 /* wait for ACK */
1317 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1318 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1319 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1320
1321 /* Stall DPG before WPTR/RPTR reset */
1322 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1323 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1324 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1325 /* Restore */
1326 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1327 ring = &adev->vcn.inst->ring_enc[0];
1328 ring->wptr = 0;
1329 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1330 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1331 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1332 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1333 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1334 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1335
1336 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1337 ring = &adev->vcn.inst->ring_enc[1];
1338 ring->wptr = 0;
1339 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1340 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1341 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1342 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1343 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1344 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1345
1346 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1347 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1348 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1349 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1350 /* Unstall DPG */
1351 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
1352 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1353
1354 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1355 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1356 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1357 }
1358 } else {
1359 /* unpause dpg, no need to wait */
1360 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1361 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1362 }
1363 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1364 }
1365
1366 return 0;
1367}
1368
1369static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
1370{
1371 int r;
1372
1373 r = vcn_v2_0_stop(vinst);
1374 if (r)
1375 return r;
1376 return vcn_v2_0_start(vinst);
1377}
1378
1379static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
1380{
1381 struct amdgpu_device *adev = ip_block->adev;
1382
1383 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1384}
1385
1386static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1387{
1388 struct amdgpu_device *adev = ip_block->adev;
1389 int ret;
1390
1391 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1392 UVD_STATUS__IDLE);
1393
1394 return ret;
1395}
1396
1397static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1398 enum amd_clockgating_state state)
1399{
1400 struct amdgpu_device *adev = ip_block->adev;
1401 bool enable = (state == AMD_CG_STATE_GATE);
1402
1403 if (amdgpu_sriov_vf(adev))
1404 return 0;
1405
1406 if (enable) {
1407 /* wait for STATUS to clear */
1408 if (!vcn_v2_0_is_idle(ip_block))
1409 return -EBUSY;
1410 vcn_v2_0_enable_clock_gating(&adev->vcn.inst[0]);
1411 } else {
1412 /* disable HW gating and enable Sw gating */
1413 vcn_v2_0_disable_clock_gating(&adev->vcn.inst[0]);
1414 }
1415 return 0;
1416}
1417
1418/**
1419 * vcn_v2_0_dec_ring_get_rptr - get read pointer
1420 *
1421 * @ring: amdgpu_ring pointer
1422 *
1423 * Returns the current hardware read pointer
1424 */
1425static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1426{
1427 struct amdgpu_device *adev = ring->adev;
1428
1429 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1430}
1431
1432/**
1433 * vcn_v2_0_dec_ring_get_wptr - get write pointer
1434 *
1435 * @ring: amdgpu_ring pointer
1436 *
1437 * Returns the current hardware write pointer
1438 */
1439static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1440{
1441 struct amdgpu_device *adev = ring->adev;
1442
1443 if (ring->use_doorbell)
1444 return *ring->wptr_cpu_addr;
1445 else
1446 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1447}
1448
1449/**
1450 * vcn_v2_0_dec_ring_set_wptr - set write pointer
1451 *
1452 * @ring: amdgpu_ring pointer
1453 *
1454 * Commits the write pointer to the hardware
1455 */
1456static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1457{
1458 struct amdgpu_device *adev = ring->adev;
1459
1460 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1461 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1462 lower_32_bits(ring->wptr) | 0x80000000);
1463
1464 if (ring->use_doorbell) {
1465 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1466 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1467 } else {
1468 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1469 }
1470}
1471
1472/**
1473 * vcn_v2_0_dec_ring_insert_start - insert a start command
1474 *
1475 * @ring: amdgpu_ring pointer
1476 *
1477 * Write a start command to the ring.
1478 */
1479void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1480{
1481 struct amdgpu_device *adev = ring->adev;
1482
1483 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
1484 amdgpu_ring_write(ring, 0);
1485 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
1486 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1487}
1488
1489/**
1490 * vcn_v2_0_dec_ring_insert_end - insert a end command
1491 *
1492 * @ring: amdgpu_ring pointer
1493 *
1494 * Write a end command to the ring.
1495 */
1496void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1497{
1498 struct amdgpu_device *adev = ring->adev;
1499
1500 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[0].internal.cmd, 0));
1501 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
1502}
1503
1504/**
1505 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
1506 *
1507 * @ring: amdgpu_ring pointer
1508 * @count: the number of NOP packets to insert
1509 *
1510 * Write a nop command to the ring.
1511 */
1512void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1513{
1514 struct amdgpu_device *adev = ring->adev;
1515 int i;
1516
1517 WARN_ON(ring->wptr % 2 || count % 2);
1518
1519 for (i = 0; i < count / 2; i++) {
1520 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.nop, 0));
1521 amdgpu_ring_write(ring, 0);
1522 }
1523}
1524
1525/**
1526 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
1527 *
1528 * @ring: amdgpu_ring pointer
1529 * @addr: address
1530 * @seq: sequence number
1531 * @flags: fence related flags
1532 *
1533 * Write a fence and a trap command to the ring.
1534 */
1535void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1536 unsigned flags)
1537{
1538 struct amdgpu_device *adev = ring->adev;
1539
1540 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1541 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.context_id, 0));
1542 amdgpu_ring_write(ring, seq);
1543
1544 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
1545 amdgpu_ring_write(ring, addr & 0xffffffff);
1546
1547 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
1548 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1549
1550 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
1551 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));
1552
1553 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
1554 amdgpu_ring_write(ring, 0);
1555
1556 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
1557 amdgpu_ring_write(ring, 0);
1558
1559 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
1560
1561 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
1562}
1563
1564/**
1565 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
1566 *
1567 * @ring: amdgpu_ring pointer
1568 * @job: job to retrieve vmid from
1569 * @ib: indirect buffer to execute
1570 * @flags: unused
1571 *
1572 * Write ring commands to execute the indirect buffer
1573 */
1574void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1575 struct amdgpu_job *job,
1576 struct amdgpu_ib *ib,
1577 uint32_t flags)
1578{
1579 struct amdgpu_device *adev = ring->adev;
1580 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1581
1582 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_vmid, 0));
1583 amdgpu_ring_write(ring, vmid);
1584
1585 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_low, 0));
1586 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1587 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_high, 0));
1588 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1589 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_size, 0));
1590 amdgpu_ring_write(ring, ib->length_dw);
1591}
1592
1593void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1594 uint32_t val, uint32_t mask)
1595{
1596 struct amdgpu_device *adev = ring->adev;
1597
1598 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
1599 amdgpu_ring_write(ring, reg << 2);
1600
1601 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
1602 amdgpu_ring_write(ring, val);
1603
1604 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.gp_scratch8, 0));
1605 amdgpu_ring_write(ring, mask);
1606
1607 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
1608
1609 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
1610}
1611
1612void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1613 unsigned vmid, uint64_t pd_addr)
1614{
1615 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1616 uint32_t data0, data1, mask;
1617
1618 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1619
1620 /* wait for register write */
1621 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1622 data1 = lower_32_bits(pd_addr);
1623 mask = 0xffffffff;
1624 vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1625}
1626
1627void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1628 uint32_t reg, uint32_t val)
1629{
1630 struct amdgpu_device *adev = ring->adev;
1631
1632 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
1633 amdgpu_ring_write(ring, reg << 2);
1634
1635 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
1636 amdgpu_ring_write(ring, val);
1637
1638 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
1639
1640 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
1641}
1642
1643/**
1644 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
1645 *
1646 * @ring: amdgpu_ring pointer
1647 *
1648 * Returns the current hardware enc read pointer
1649 */
1650static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1651{
1652 struct amdgpu_device *adev = ring->adev;
1653
1654 if (ring == &adev->vcn.inst->ring_enc[0])
1655 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1656 else
1657 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1658}
1659
1660 /**
1661 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
1662 *
1663 * @ring: amdgpu_ring pointer
1664 *
1665 * Returns the current hardware enc write pointer
1666 */
1667static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1668{
1669 struct amdgpu_device *adev = ring->adev;
1670
1671 if (ring == &adev->vcn.inst->ring_enc[0]) {
1672 if (ring->use_doorbell)
1673 return *ring->wptr_cpu_addr;
1674 else
1675 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1676 } else {
1677 if (ring->use_doorbell)
1678 return *ring->wptr_cpu_addr;
1679 else
1680 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1681 }
1682}
1683
1684 /**
1685 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
1686 *
1687 * @ring: amdgpu_ring pointer
1688 *
1689 * Commits the enc write pointer to the hardware
1690 */
1691static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1692{
1693 struct amdgpu_device *adev = ring->adev;
1694
1695 if (ring == &adev->vcn.inst->ring_enc[0]) {
1696 if (ring->use_doorbell) {
1697 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1698 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1699 } else {
1700 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1701 }
1702 } else {
1703 if (ring->use_doorbell) {
1704 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1705 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1706 } else {
1707 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1708 }
1709 }
1710}
1711
1712/**
1713 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
1714 *
1715 * @ring: amdgpu_ring pointer
1716 * @addr: address
1717 * @seq: sequence number
1718 * @flags: fence related flags
1719 *
1720 * Write enc a fence and a trap command to the ring.
1721 */
1722void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1723 u64 seq, unsigned flags)
1724{
1725 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1726
1727 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1728 amdgpu_ring_write(ring, addr);
1729 amdgpu_ring_write(ring, upper_32_bits(addr));
1730 amdgpu_ring_write(ring, seq);
1731 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1732}
1733
1734void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1735{
1736 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1737}
1738
1739/**
1740 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
1741 *
1742 * @ring: amdgpu_ring pointer
1743 * @job: job to retrive vmid from
1744 * @ib: indirect buffer to execute
1745 * @flags: unused
1746 *
1747 * Write enc ring commands to execute the indirect buffer
1748 */
1749void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1750 struct amdgpu_job *job,
1751 struct amdgpu_ib *ib,
1752 uint32_t flags)
1753{
1754 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1755
1756 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1757 amdgpu_ring_write(ring, vmid);
1758 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1759 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1760 amdgpu_ring_write(ring, ib->length_dw);
1761}
1762
1763void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1764 uint32_t val, uint32_t mask)
1765{
1766 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1767 amdgpu_ring_write(ring, reg << 2);
1768 amdgpu_ring_write(ring, mask);
1769 amdgpu_ring_write(ring, val);
1770}
1771
1772void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1773 unsigned int vmid, uint64_t pd_addr)
1774{
1775 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1776
1777 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1778
1779 /* wait for reg writes */
1780 vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1781 vmid * hub->ctx_addr_distance,
1782 lower_32_bits(pd_addr), 0xffffffff);
1783}
1784
1785void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
1786{
1787 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1788 amdgpu_ring_write(ring, reg << 2);
1789 amdgpu_ring_write(ring, val);
1790}
1791
1792static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
1793 struct amdgpu_irq_src *source,
1794 unsigned type,
1795 enum amdgpu_interrupt_state state)
1796{
1797 return 0;
1798}
1799
1800static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
1801 struct amdgpu_irq_src *source,
1802 struct amdgpu_iv_entry *entry)
1803{
1804 DRM_DEBUG("IH: VCN TRAP\n");
1805
1806 switch (entry->src_id) {
1807 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1808 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1809 break;
1810 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1811 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1812 break;
1813 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1814 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1815 break;
1816 default:
1817 DRM_ERROR("Unhandled interrupt: %d %d\n",
1818 entry->src_id, entry->src_data[0]);
1819 break;
1820 }
1821
1822 return 0;
1823}
1824
1825int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
1826{
1827 struct amdgpu_device *adev = ring->adev;
1828 uint32_t tmp = 0;
1829 unsigned i;
1830 int r;
1831
1832 if (amdgpu_sriov_vf(adev))
1833 return 0;
1834
1835 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
1836 r = amdgpu_ring_alloc(ring, 4);
1837 if (r)
1838 return r;
1839 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
1840 amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
1841 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
1842 amdgpu_ring_write(ring, 0xDEADBEEF);
1843 amdgpu_ring_commit(ring);
1844 for (i = 0; i < adev->usec_timeout; i++) {
1845 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
1846 if (tmp == 0xDEADBEEF)
1847 break;
1848 udelay(1);
1849 }
1850
1851 if (i >= adev->usec_timeout)
1852 r = -ETIMEDOUT;
1853
1854 return r;
1855}
1856
1857
1858static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1859 enum amd_powergating_state state)
1860{
1861 /* This doesn't actually powergate the VCN block.
1862 * That's done in the dpm code via the SMC. This
1863 * just re-inits the block as necessary. The actual
1864 * gating still happens in the dpm code. We should
1865 * revisit this when there is a cleaner line between
1866 * the smc and the hw blocks
1867 */
1868 int ret;
1869 struct amdgpu_device *adev = vinst->adev;
1870
1871 if (amdgpu_sriov_vf(adev)) {
1872 vinst->cur_state = AMD_PG_STATE_UNGATE;
1873 return 0;
1874 }
1875
1876 if (state == vinst->cur_state)
1877 return 0;
1878
1879 if (state == AMD_PG_STATE_GATE)
1880 ret = vcn_v2_0_stop(vinst);
1881 else
1882 ret = vcn_v2_0_start(vinst);
1883
1884 if (!ret)
1885 vinst->cur_state = state;
1886
1887 return ret;
1888}
1889
1890static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
1891 struct amdgpu_mm_table *table)
1892{
1893 uint32_t data = 0, loop;
1894 uint64_t addr = table->gpu_addr;
1895 struct mmsch_v2_0_init_header *header;
1896 uint32_t size;
1897 int i;
1898
1899 header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
1900 size = header->header_size + header->vcn_table_size;
1901
1902 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1903 * of memory descriptor location
1904 */
1905 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1906 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1907
1908 /* 2, update vmid of descriptor */
1909 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
1910 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1911 /* use domain0 for MM scheduler */
1912 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1913 WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
1914
1915 /* 3, notify mmsch about the size of this descriptor */
1916 WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
1917
1918 /* 4, set resp to zero */
1919 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1920
1921 adev->vcn.inst->ring_dec.wptr = 0;
1922 adev->vcn.inst->ring_dec.wptr_old = 0;
1923 vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
1924
1925 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
1926 adev->vcn.inst->ring_enc[i].wptr = 0;
1927 adev->vcn.inst->ring_enc[i].wptr_old = 0;
1928 vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
1929 }
1930
1931 /* 5, kick off the initialization and wait until
1932 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1933 */
1934 WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1935
1936 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1937 loop = 1000;
1938 while ((data & 0x10000002) != 0x10000002) {
1939 udelay(10);
1940 data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
1941 loop--;
1942 if (!loop)
1943 break;
1944 }
1945
1946 if (!loop) {
1947 DRM_ERROR("failed to init MMSCH, " \
1948 "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
1949 return -EBUSY;
1950 }
1951
1952 return 0;
1953}
1954
1955static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
1956{
1957 int r;
1958 uint32_t tmp;
1959 struct amdgpu_ring *ring;
1960 uint32_t offset, size;
1961 uint32_t table_size = 0;
1962 struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
1963 struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
1964 struct mmsch_v2_0_cmd_end end = { {0} };
1965 struct mmsch_v2_0_init_header *header;
1966 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1967 uint8_t i = 0;
1968
1969 header = (struct mmsch_v2_0_init_header *)init_table;
1970 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1971 direct_rd_mod_wt.cmd_header.command_type =
1972 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1973 end.cmd_header.command_type = MMSCH_COMMAND__END;
1974
1975 if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
1976 header->version = MMSCH_VERSION;
1977 header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
1978
1979 header->vcn_table_offset = header->header_size;
1980
1981 init_table += header->vcn_table_offset;
1982
1983 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
1984
1985 MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
1986 SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
1987 0xFFFFFFFF, 0x00000004);
1988
1989 /* mc resume*/
1990 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1991 MMSCH_V2_0_INSERT_DIRECT_WT(
1992 SOC15_REG_OFFSET(UVD, i,
1993 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1994 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
1995 MMSCH_V2_0_INSERT_DIRECT_WT(
1996 SOC15_REG_OFFSET(UVD, i,
1997 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1998 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
1999 offset = 0;
2000 } else {
2001 MMSCH_V2_0_INSERT_DIRECT_WT(
2002 SOC15_REG_OFFSET(UVD, i,
2003 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
2004 lower_32_bits(adev->vcn.inst->gpu_addr));
2005 MMSCH_V2_0_INSERT_DIRECT_WT(
2006 SOC15_REG_OFFSET(UVD, i,
2007 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
2008 upper_32_bits(adev->vcn.inst->gpu_addr));
2009 offset = size;
2010 }
2011
2012 MMSCH_V2_0_INSERT_DIRECT_WT(
2013 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
2014 0);
2015 MMSCH_V2_0_INSERT_DIRECT_WT(
2016 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
2017 size);
2018
2019 MMSCH_V2_0_INSERT_DIRECT_WT(
2020 SOC15_REG_OFFSET(UVD, i,
2021 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
2022 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
2023 MMSCH_V2_0_INSERT_DIRECT_WT(
2024 SOC15_REG_OFFSET(UVD, i,
2025 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
2026 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
2027 MMSCH_V2_0_INSERT_DIRECT_WT(
2028 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
2029 0);
2030 MMSCH_V2_0_INSERT_DIRECT_WT(
2031 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
2032 AMDGPU_VCN_STACK_SIZE);
2033
2034 MMSCH_V2_0_INSERT_DIRECT_WT(
2035 SOC15_REG_OFFSET(UVD, i,
2036 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
2037 lower_32_bits(adev->vcn.inst->gpu_addr + offset +
2038 AMDGPU_VCN_STACK_SIZE));
2039 MMSCH_V2_0_INSERT_DIRECT_WT(
2040 SOC15_REG_OFFSET(UVD, i,
2041 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
2042 upper_32_bits(adev->vcn.inst->gpu_addr + offset +
2043 AMDGPU_VCN_STACK_SIZE));
2044 MMSCH_V2_0_INSERT_DIRECT_WT(
2045 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
2046 0);
2047 MMSCH_V2_0_INSERT_DIRECT_WT(
2048 SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
2049 AMDGPU_VCN_CONTEXT_SIZE);
2050
2051 for (r = 0; r < adev->vcn.inst[0].num_enc_rings; ++r) {
2052 ring = &adev->vcn.inst->ring_enc[r];
2053 ring->wptr = 0;
2054 MMSCH_V2_0_INSERT_DIRECT_WT(
2055 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
2056 lower_32_bits(ring->gpu_addr));
2057 MMSCH_V2_0_INSERT_DIRECT_WT(
2058 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
2059 upper_32_bits(ring->gpu_addr));
2060 MMSCH_V2_0_INSERT_DIRECT_WT(
2061 SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
2062 ring->ring_size / 4);
2063 }
2064
2065 ring = &adev->vcn.inst->ring_dec;
2066 ring->wptr = 0;
2067 MMSCH_V2_0_INSERT_DIRECT_WT(
2068 SOC15_REG_OFFSET(UVD, i,
2069 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
2070 lower_32_bits(ring->gpu_addr));
2071 MMSCH_V2_0_INSERT_DIRECT_WT(
2072 SOC15_REG_OFFSET(UVD, i,
2073 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
2074 upper_32_bits(ring->gpu_addr));
2075 /* force RBC into idle state */
2076 tmp = order_base_2(ring->ring_size);
2077 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
2078 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
2079 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
2080 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
2081 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
2082 MMSCH_V2_0_INSERT_DIRECT_WT(
2083 SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
2084
2085 /* add end packet */
2086 tmp = sizeof(struct mmsch_v2_0_cmd_end);
2087 memcpy((void *)init_table, &end, tmp);
2088 table_size += (tmp / 4);
2089 header->vcn_table_size = table_size;
2090
2091 }
2092 return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
2093}
2094
2095static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
2096 .name = "vcn_v2_0",
2097 .early_init = vcn_v2_0_early_init,
2098 .sw_init = vcn_v2_0_sw_init,
2099 .sw_fini = vcn_v2_0_sw_fini,
2100 .hw_init = vcn_v2_0_hw_init,
2101 .hw_fini = vcn_v2_0_hw_fini,
2102 .suspend = vcn_v2_0_suspend,
2103 .resume = vcn_v2_0_resume,
2104 .is_idle = vcn_v2_0_is_idle,
2105 .wait_for_idle = vcn_v2_0_wait_for_idle,
2106 .set_clockgating_state = vcn_v2_0_set_clockgating_state,
2107 .set_powergating_state = vcn_set_powergating_state,
2108 .dump_ip_state = amdgpu_vcn_dump_ip_state,
2109 .print_ip_state = amdgpu_vcn_print_ip_state,
2110};
2111
2112static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
2113 .type = AMDGPU_RING_TYPE_VCN_DEC,
2114 .align_mask = 0xf,
2115 .secure_submission_supported = true,
2116 .get_rptr = vcn_v2_0_dec_ring_get_rptr,
2117 .get_wptr = vcn_v2_0_dec_ring_get_wptr,
2118 .set_wptr = vcn_v2_0_dec_ring_set_wptr,
2119 .emit_frame_size =
2120 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2121 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2122 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2123 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2124 6,
2125 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2126 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2127 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2128 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2129 .test_ring = vcn_v2_0_dec_ring_test_ring,
2130 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2131 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2132 .insert_start = vcn_v2_0_dec_ring_insert_start,
2133 .insert_end = vcn_v2_0_dec_ring_insert_end,
2134 .pad_ib = amdgpu_ring_generic_pad_ib,
2135 .begin_use = amdgpu_vcn_ring_begin_use,
2136 .end_use = amdgpu_vcn_ring_end_use,
2137 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2138 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2139 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2140 .reset = amdgpu_vcn_ring_reset,
2141};
2142
2143static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
2144 .type = AMDGPU_RING_TYPE_VCN_ENC,
2145 .align_mask = 0x3f,
2146 .nop = VCN_ENC_CMD_NO_OP,
2147 .get_rptr = vcn_v2_0_enc_ring_get_rptr,
2148 .get_wptr = vcn_v2_0_enc_ring_get_wptr,
2149 .set_wptr = vcn_v2_0_enc_ring_set_wptr,
2150 .emit_frame_size =
2151 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2152 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2153 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2154 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2155 1, /* vcn_v2_0_enc_ring_insert_end */
2156 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2157 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2158 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2159 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2160 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2161 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2162 .insert_nop = amdgpu_ring_insert_nop,
2163 .insert_end = vcn_v2_0_enc_ring_insert_end,
2164 .pad_ib = amdgpu_ring_generic_pad_ib,
2165 .begin_use = amdgpu_vcn_ring_begin_use,
2166 .end_use = amdgpu_vcn_ring_end_use,
2167 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2168 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2169 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2170 .reset = amdgpu_vcn_ring_reset,
2171};
2172
2173static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2174{
2175 adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
2176}
2177
2178static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2179{
2180 int i;
2181
2182 for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
2183 adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
2184}
2185
2186static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
2187 .set = vcn_v2_0_set_interrupt_state,
2188 .process = vcn_v2_0_process_interrupt,
2189};
2190
2191static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
2192{
2193 adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 1;
2194 adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
2195}
2196
2197const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
2198{
2199 .type = AMD_IP_BLOCK_TYPE_VCN,
2200 .major = 2,
2201 .minor = 0,
2202 .rev = 0,
2203 .funcs = &vcn_v2_0_ip_funcs,
2204};