Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "amdgpu.h"
26#include "amdgpu_vcn.h"
27#include "amdgpu_pm.h"
28#include "soc15.h"
29#include "soc15d.h"
30#include "soc15_hw_ip.h"
31#include "vcn_v2_0.h"
32
33#include "vcn/vcn_5_0_0_offset.h"
34#include "vcn/vcn_5_0_0_sh_mask.h"
35#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
36#include "vcn_v5_0_0.h"
37
38#include <drm/drm_drv.h>
39
40static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0[] = {
41 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
42 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
43 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
44 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
45 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
46 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
47 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
48 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
49 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
50 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
51 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
52 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
53 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
54 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
55 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
56 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
57 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
58 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
59 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
60 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
61 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
62 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
63 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
64 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
65 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
66 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
67 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
68 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
69 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
70 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
71 SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
72};
73
74static int amdgpu_ih_clientid_vcns[] = {
75 SOC15_IH_CLIENTID_VCN,
76 SOC15_IH_CLIENTID_VCN1
77};
78
79static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev);
80static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
81static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
82 enum amd_powergating_state state);
83static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
84 struct dpg_pause_state *new_state);
85static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
86
87/**
88 * vcn_v5_0_0_early_init - set function pointers and load microcode
89 *
90 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
91 *
92 * Set ring and irq function pointers
93 * Load microcode from filesystem
94 */
95static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
96{
97 struct amdgpu_device *adev = ip_block->adev;
98 int i, r;
99
100 for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
101 /* re-use enc ring as unified ring */
102 adev->vcn.inst[i].num_enc_rings = 1;
103
104 vcn_v5_0_0_set_unified_ring_funcs(adev);
105 vcn_v5_0_0_set_irq_funcs(adev);
106
107 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
108 adev->vcn.inst[i].set_pg_state = vcn_v5_0_0_set_pg_state;
109
110 r = amdgpu_vcn_early_init(adev, i);
111 if (r)
112 return r;
113 }
114
115 return 0;
116}
117
118void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
119{
120 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
121 uint32_t *ptr;
122
123 /* Allocate memory for VCN IP Dump buffer */
124 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
125 if (!ptr) {
126 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
127 adev->vcn.ip_dump = NULL;
128 } else {
129 adev->vcn.ip_dump = ptr;
130 }
131}
132
133/**
134 * vcn_v5_0_0_sw_init - sw init for VCN block
135 *
136 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
137 *
138 * Load firmware and sw initialization
139 */
140static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
141{
142 struct amdgpu_ring *ring;
143 struct amdgpu_device *adev = ip_block->adev;
144 int i, r;
145
146 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
147 volatile struct amdgpu_vcn5_fw_shared *fw_shared;
148
149 if (adev->vcn.harvest_config & (1 << i))
150 continue;
151
152 r = amdgpu_vcn_sw_init(adev, i);
153 if (r)
154 return r;
155
156 amdgpu_vcn_setup_ucode(adev, i);
157
158 r = amdgpu_vcn_resume(adev, i);
159 if (r)
160 return r;
161
162 atomic_set(&adev->vcn.inst[i].sched_score, 0);
163
164 /* VCN UNIFIED TRAP */
165 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
166 VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
167 if (r)
168 return r;
169
170 /* VCN POISON TRAP */
171 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
172 VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
173 if (r)
174 return r;
175
176 ring = &adev->vcn.inst[i].ring_enc[0];
177 ring->use_doorbell = true;
178 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
179
180 ring->vm_hub = AMDGPU_MMHUB0(0);
181 sprintf(ring->name, "vcn_unified_%d", i);
182
183 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
184 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
185 if (r)
186 return r;
187
188 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
189 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
190 fw_shared->sq.is_enabled = 1;
191
192 if (amdgpu_vcnfw_log)
193 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
194
195 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
196 adev->vcn.inst[i].pause_dpg_mode = vcn_v5_0_0_pause_dpg_mode;
197 }
198
199 /* TODO: Add queue reset mask when FW fully supports it */
200 adev->vcn.supported_reset =
201 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
202
203 vcn_v5_0_0_alloc_ip_dump(adev);
204
205 r = amdgpu_vcn_sysfs_reset_mask_init(adev);
206 if (r)
207 return r;
208
209 return 0;
210}
211
212/**
213 * vcn_v5_0_0_sw_fini - sw fini for VCN block
214 *
215 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
216 *
217 * VCN suspend and free up sw allocation
218 */
219static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
220{
221 struct amdgpu_device *adev = ip_block->adev;
222 int i, r, idx;
223
224 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
225 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
226 volatile struct amdgpu_vcn5_fw_shared *fw_shared;
227
228 if (adev->vcn.harvest_config & (1 << i))
229 continue;
230
231 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
232 fw_shared->present_flag_0 = 0;
233 fw_shared->sq.is_enabled = 0;
234 }
235
236 drm_dev_exit(idx);
237 }
238
239 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
240 r = amdgpu_vcn_suspend(adev, i);
241 if (r)
242 return r;
243 }
244
245 amdgpu_vcn_sysfs_reset_mask_fini(adev);
246
247 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
248 r = amdgpu_vcn_sw_fini(adev, i);
249 if (r)
250 return r;
251 }
252
253 kfree(adev->vcn.ip_dump);
254
255 return 0;
256}
257
258/**
259 * vcn_v5_0_0_hw_init - start and test VCN block
260 *
261 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
262 *
263 * Initialize the hardware, boot up the VCPU and do some testing
264 */
265static int vcn_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
266{
267 struct amdgpu_device *adev = ip_block->adev;
268 struct amdgpu_ring *ring;
269 int i, r;
270
271 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
272 if (adev->vcn.harvest_config & (1 << i))
273 continue;
274
275 ring = &adev->vcn.inst[i].ring_enc[0];
276
277 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
278 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
279
280 r = amdgpu_ring_test_helper(ring);
281 if (r)
282 return r;
283 }
284
285 return 0;
286}
287
288/**
289 * vcn_v5_0_0_hw_fini - stop the hardware block
290 *
291 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
292 *
293 * Stop the VCN block, mark ring as not ready any more
294 */
295static int vcn_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
296{
297 struct amdgpu_device *adev = ip_block->adev;
298 int i;
299
300 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
301 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
302
303 if (adev->vcn.harvest_config & (1 << i))
304 continue;
305
306 cancel_delayed_work_sync(&vinst->idle_work);
307
308 if (!amdgpu_sriov_vf(adev)) {
309 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
310 (vinst->cur_state != AMD_PG_STATE_GATE &&
311 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
312 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
313 }
314 }
315 }
316
317 return 0;
318}
319
320/**
321 * vcn_v5_0_0_suspend - suspend VCN block
322 *
323 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
324 *
325 * HW fini and suspend VCN block
326 */
327static int vcn_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
328{
329 struct amdgpu_device *adev = ip_block->adev;
330 int r, i;
331
332 r = vcn_v5_0_0_hw_fini(ip_block);
333 if (r)
334 return r;
335
336 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
337 r = amdgpu_vcn_suspend(ip_block->adev, i);
338 if (r)
339 return r;
340 }
341
342 return r;
343}
344
345/**
346 * vcn_v5_0_0_resume - resume VCN block
347 *
348 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
349 *
350 * Resume firmware and hw init VCN block
351 */
352static int vcn_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
353{
354 struct amdgpu_device *adev = ip_block->adev;
355 int r, i;
356
357 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
358 r = amdgpu_vcn_resume(ip_block->adev, i);
359 if (r)
360 return r;
361 }
362
363 r = vcn_v5_0_0_hw_init(ip_block);
364
365 return r;
366}
367
368/**
369 * vcn_v5_0_0_mc_resume - memory controller programming
370 *
371 * @vinst: VCN instance
372 *
373 * Let the VCN memory controller know it's offsets
374 */
375static void vcn_v5_0_0_mc_resume(struct amdgpu_vcn_inst *vinst)
376{
377 struct amdgpu_device *adev = vinst->adev;
378 int inst = vinst->inst;
379 uint32_t offset, size;
380 const struct common_firmware_header *hdr;
381
382 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data;
383 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
384
385 /* cache window 0: fw */
386 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
387 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
388 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
389 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
390 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
391 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
392 offset = 0;
393 } else {
394 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
395 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
396 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
397 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
398 offset = size;
399 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
400 }
401 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
402
403 /* cache window 1: stack */
404 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
405 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
406 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
407 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
408 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
409 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
410
411 /* cache window 2: context */
412 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
413 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
414 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
415 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
416 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
417 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
418
419 /* non-cache window */
420 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
421 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
422 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
423 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
424 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
425 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
426 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)));
427}
428
429/**
430 * vcn_v5_0_0_mc_resume_dpg_mode - memory controller programming for dpg mode
431 *
432 * @vinst: VCN instance
433 * @indirect: indirectly write sram
434 *
435 * Let the VCN memory controller know it's offsets with dpg mode
436 */
437static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
438 bool indirect)
439{
440 struct amdgpu_device *adev = vinst->adev;
441 int inst_idx = vinst->inst;
442 uint32_t offset, size;
443 const struct common_firmware_header *hdr;
444
445 hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data;
446 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
447
448 /* cache window 0: fw */
449 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
450 if (!indirect) {
451 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
452 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
453 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
454 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
455 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
456 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
457 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
458 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
459 } else {
460 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
461 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
462 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
463 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
464 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
465 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
466 }
467 offset = 0;
468 } else {
469 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
470 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
471 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
472 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
473 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
474 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
475 offset = size;
476 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
477 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
478 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
479 }
480
481 if (!indirect)
482 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
483 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
484 else
485 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
486 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
487
488 /* cache window 1: stack */
489 if (!indirect) {
490 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
491 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
492 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
493 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
494 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
495 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
496 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
497 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
498 } else {
499 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
500 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
501 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
502 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
503 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
504 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
505 }
506 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
507 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
508
509 /* cache window 2: context */
510 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
511 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
512 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
513 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
514 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
515 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
516 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
517 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
518 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
519 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
520
521 /* non-cache window */
522 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
523 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
524 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
525 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
526 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
527 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
528 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
529 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
530 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
531 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
532 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect);
533
534 /* VCN global tiling registers */
535 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
536 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
537
538 return;
539}
540
541/**
542 * vcn_v5_0_0_disable_static_power_gating - disable VCN static power gating
543 *
544 * @vinst: VCN instance
545 *
546 * Disable static power gating for VCN block
547 */
548static void vcn_v5_0_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
549{
550 struct amdgpu_device *adev = vinst->adev;
551 int inst = vinst->inst;
552 uint32_t data = 0;
553
554 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
555 data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
556 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
557 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
558 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
559
560 data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
561 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
562 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
563 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
564 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
565
566 data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
567 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
568 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
569 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
570 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
571
572 data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
573 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
574 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
575 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
576 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
577 } else {
578 data = 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
579 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
580 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
581 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
582
583 data = 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
584 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
585 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
586 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
587
588 data = 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
589 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
590 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
591 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
592
593 data = 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
594 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
595 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0,
596 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
597 }
598
599 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
600 data &= ~0x103;
601 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
602 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
603 UVD_POWER_STATUS__UVD_PG_EN_MASK;
604
605 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
606 return;
607}
608
609/**
610 * vcn_v5_0_0_enable_static_power_gating - enable VCN static power gating
611 *
612 * @vinst: VCN instance
613 *
614 * Enable static power gating for VCN block
615 */
616static void vcn_v5_0_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
617{
618 struct amdgpu_device *adev = vinst->adev;
619 int inst = vinst->inst;
620 uint32_t data;
621
622 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
623 /* Before power off, this indicator has to be turned on */
624 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
625 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
626 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
627 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
628
629 data = 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT;
630 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
631 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
632 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT,
633 UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK);
634
635 data = 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT;
636 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
637 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
638 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT,
639 UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK);
640
641 data = 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT;
642 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
643 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
644 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT,
645 UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK);
646
647 data = 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT;
648 WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, data);
649 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS,
650 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT,
651 UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK);
652 }
653 return;
654}
655
656/**
657 * vcn_v5_0_0_disable_clock_gating - disable VCN clock gating
658 *
659 * @vinst: VCN instance
660 *
661 * Disable clock gating for VCN block
662 */
663static void vcn_v5_0_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
664{
665 return;
666}
667
668#if 0
669/**
670 * vcn_v5_0_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
671 *
672 * @vinst: VCN instance
673 * @sram_sel: sram select
674 * @indirect: indirectly write sram
675 *
676 * Disable clock gating for VCN block with dpg mode
677 */
678static void vcn_v5_0_0_disable_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
679 uint8_t sram_sel,
680 uint8_t indirect)
681{
682 return;
683}
684#endif
685
686/**
687 * vcn_v5_0_0_enable_clock_gating - enable VCN clock gating
688 *
689 * @vinst: VCN instance
690 *
691 * Enable clock gating for VCN block
692 */
693static void vcn_v5_0_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
694{
695 return;
696}
697
698/**
699 * vcn_v5_0_0_start_dpg_mode - VCN start with dpg mode
700 *
701 * @vinst: VCN instance
702 * @indirect: indirectly write sram
703 *
704 * Start VCN block with dpg mode
705 */
706static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
707 bool indirect)
708{
709 struct amdgpu_device *adev = vinst->adev;
710 int inst_idx = vinst->inst;
711 volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
712 struct amdgpu_ring *ring;
713 uint32_t tmp;
714
715 /* disable register anti-hang mechanism */
716 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
717 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
718
719 /* enable dynamic power gating mode */
720 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
721 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
722 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
723 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
724
725 if (indirect)
726 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
727
728 /* enable VCPU clock */
729 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
730 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
731 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
732 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
733
734 /* disable master interrupt */
735 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
736 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
737
738 /* setup regUVD_LMI_CTRL */
739 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
740 UVD_LMI_CTRL__REQ_MODE_MASK |
741 UVD_LMI_CTRL__CRC_RESET_MASK |
742 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
743 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
744 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
745 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
746 0x00100000L);
747 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
748 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
749
750 vcn_v5_0_0_mc_resume_dpg_mode(vinst, indirect);
751
752 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
753 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
754 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
755 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
756
757 /* enable LMI MC and UMC channels */
758 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
759 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
760 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
761
762 /* enable master interrupt */
763 WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
764 VCN, inst_idx, regUVD_MASTINT_EN),
765 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
766
767 if (indirect)
768 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
769
770 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
771
772 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
773 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
774 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
775
776 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
777 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
778 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
779 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
780 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
781 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
782
783 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
784 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
785 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
786
787 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
788 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
789 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
790 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
791
792 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
793 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
794 VCN_RB1_DB_CTRL__EN_MASK);
795
796 return 0;
797}
798
799/**
800 * vcn_v5_0_0_start - VCN start
801 *
802 * @vinst: VCN instance
803 *
804 * Start VCN block
805 */
806static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
807{
808 struct amdgpu_device *adev = vinst->adev;
809 int i = vinst->inst;
810 volatile struct amdgpu_vcn5_fw_shared *fw_shared;
811 struct amdgpu_ring *ring;
812 uint32_t tmp;
813 int j, k, r;
814
815 if (adev->vcn.harvest_config & (1 << i))
816 return 0;
817
818 if (adev->pm.dpm_enabled)
819 amdgpu_dpm_enable_vcn(adev, true, i);
820
821 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
822
823 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
824 return vcn_v5_0_0_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram);
825
826 /* disable VCN power gating */
827 vcn_v5_0_0_disable_static_power_gating(vinst);
828
829 /* set VCN status busy */
830 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
831 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
832
833 /* enable VCPU clock */
834 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
835 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
836
837 /* disable master interrupt */
838 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
839 ~UVD_MASTINT_EN__VCPU_EN_MASK);
840
841 /* enable LMI MC and UMC channels */
842 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
843 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
844
845 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
846 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
847 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
848 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
849
850 /* setup regUVD_LMI_CTRL */
851 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
852 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
853 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
854 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
855 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
856 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
857
858 vcn_v5_0_0_mc_resume(vinst);
859
860 /* VCN global tiling registers */
861 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
862 adev->gfx.config.gb_addr_config);
863
864 /* unblock VCPU register access */
865 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
866 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
867
868 /* release VCPU reset to boot */
869 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
870 ~UVD_VCPU_CNTL__BLK_RST_MASK);
871
872 for (j = 0; j < 10; ++j) {
873 uint32_t status;
874
875 for (k = 0; k < 100; ++k) {
876 status = RREG32_SOC15(VCN, i, regUVD_STATUS);
877 if (status & 2)
878 break;
879 mdelay(10);
880 if (amdgpu_emu_mode == 1)
881 msleep(1);
882 }
883
884 if (amdgpu_emu_mode == 1) {
885 r = -1;
886 if (status & 2) {
887 r = 0;
888 break;
889 }
890 } else {
891 r = 0;
892 if (status & 2)
893 break;
894
895 dev_err(adev->dev,
896 "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
897 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
898 UVD_VCPU_CNTL__BLK_RST_MASK,
899 ~UVD_VCPU_CNTL__BLK_RST_MASK);
900 mdelay(10);
901 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
902 ~UVD_VCPU_CNTL__BLK_RST_MASK);
903
904 mdelay(10);
905 r = -1;
906 }
907 }
908
909 if (r) {
910 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
911 return r;
912 }
913
914 /* enable master interrupt */
915 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
916 UVD_MASTINT_EN__VCPU_EN_MASK,
917 ~UVD_MASTINT_EN__VCPU_EN_MASK);
918
919 /* clear the busy bit of VCN_STATUS */
920 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
921 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
922
923 ring = &adev->vcn.inst[i].ring_enc[0];
924 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
925 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
926 VCN_RB1_DB_CTRL__EN_MASK);
927
928 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
929 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
930 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
931
932 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
933 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
934 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
935 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
936 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
937 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
938
939 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
940 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
941 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
942
943 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
944 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
945 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
946 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
947
948 return 0;
949}
950
951/**
952 * vcn_v5_0_0_stop_dpg_mode - VCN stop with dpg mode
953 *
954 * @vinst: VCN instance
955 *
956 * Stop VCN block with dpg mode
957 */
958static void vcn_v5_0_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
959{
960 struct amdgpu_device *adev = vinst->adev;
961 int inst_idx = vinst->inst;
962 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
963 uint32_t tmp;
964
965 vcn_v5_0_0_pause_dpg_mode(vinst, &state);
966
967 /* Wait for power status to be 1 */
968 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
969 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
970
971 /* wait for read ptr to be equal to write ptr */
972 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
973 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
974
975 /* disable dynamic power gating mode */
976 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
977 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
978
979 return;
980}
981
982/**
983 * vcn_v5_0_0_stop - VCN stop
984 *
985 * @vinst: VCN instance
986 *
987 * Stop VCN block
988 */
989static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
990{
991 struct amdgpu_device *adev = vinst->adev;
992 int i = vinst->inst;
993 volatile struct amdgpu_vcn5_fw_shared *fw_shared;
994 uint32_t tmp;
995 int r = 0;
996
997 if (adev->vcn.harvest_config & (1 << i))
998 return 0;
999
1000 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1001 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1002
1003 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1004 vcn_v5_0_0_stop_dpg_mode(vinst);
1005 r = 0;
1006 goto done;
1007 }
1008
1009 /* wait for vcn idle */
1010 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1011 if (r)
1012 goto done;
1013
1014 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1015 UVD_LMI_STATUS__READ_CLEAN_MASK |
1016 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1017 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1018 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1019 if (r)
1020 goto done;
1021
1022 /* disable LMI UMC channel */
1023 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1024 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1025 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1026 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1027 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1028 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1029 if (r)
1030 goto done;
1031
1032 /* block VCPU register access */
1033 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1034 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1035 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1036
1037 /* reset VCPU */
1038 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1039 UVD_VCPU_CNTL__BLK_RST_MASK,
1040 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1041
1042 /* disable VCPU clock */
1043 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1044 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1045
1046 /* apply soft reset */
1047 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1048 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1049 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1050 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1051 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1052 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1053
1054 /* clear status */
1055 WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1056
1057 /* enable VCN power gating */
1058 vcn_v5_0_0_enable_static_power_gating(vinst);
1059
1060done:
1061 if (adev->pm.dpm_enabled)
1062 amdgpu_dpm_enable_vcn(adev, false, i);
1063
1064 return r;
1065}
1066
1067/**
1068 * vcn_v5_0_0_pause_dpg_mode - VCN pause with dpg mode
1069 *
1070 * @vinst: VCN instance
1071 * @new_state: pause state
1072 *
1073 * Pause dpg mode for VCN block
1074 */
1075static int vcn_v5_0_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1076 struct dpg_pause_state *new_state)
1077{
1078 struct amdgpu_device *adev = vinst->adev;
1079 int inst_idx = vinst->inst;
1080 uint32_t reg_data = 0;
1081 int ret_code;
1082
1083 /* pause/unpause if state is changed */
1084 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1085 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1086 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1087 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1088 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1089
1090 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1091 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1092 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1093
1094 if (!ret_code) {
1095 /* pause DPG */
1096 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1097 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1098
1099 /* wait for ACK */
1100 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1101 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1102 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1103 }
1104 } else {
1105 /* unpause dpg, no need to wait */
1106 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1107 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1108 }
1109 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1110 }
1111
1112 return 0;
1113}
1114
1115/**
1116 * vcn_v5_0_0_unified_ring_get_rptr - get unified read pointer
1117 *
1118 * @ring: amdgpu_ring pointer
1119 *
1120 * Returns the current hardware unified read pointer
1121 */
1122static uint64_t vcn_v5_0_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1123{
1124 struct amdgpu_device *adev = ring->adev;
1125
1126 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1127 DRM_ERROR("wrong ring id is identified in %s", __func__);
1128
1129 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1130}
1131
1132/**
1133 * vcn_v5_0_0_unified_ring_get_wptr - get unified write pointer
1134 *
1135 * @ring: amdgpu_ring pointer
1136 *
1137 * Returns the current hardware unified write pointer
1138 */
1139static uint64_t vcn_v5_0_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1140{
1141 struct amdgpu_device *adev = ring->adev;
1142
1143 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1144 DRM_ERROR("wrong ring id is identified in %s", __func__);
1145
1146 if (ring->use_doorbell)
1147 return *ring->wptr_cpu_addr;
1148 else
1149 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1150}
1151
1152/**
1153 * vcn_v5_0_0_unified_ring_set_wptr - set enc write pointer
1154 *
1155 * @ring: amdgpu_ring pointer
1156 *
1157 * Commits the enc write pointer to the hardware
1158 */
1159static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1160{
1161 struct amdgpu_device *adev = ring->adev;
1162
1163 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1164 DRM_ERROR("wrong ring id is identified in %s", __func__);
1165
1166 if (ring->use_doorbell) {
1167 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1168 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1169 } else {
1170 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1171 }
1172}
1173
1174static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = {
1175 .type = AMDGPU_RING_TYPE_VCN_ENC,
1176 .align_mask = 0x3f,
1177 .nop = VCN_ENC_CMD_NO_OP,
1178 .get_rptr = vcn_v5_0_0_unified_ring_get_rptr,
1179 .get_wptr = vcn_v5_0_0_unified_ring_get_wptr,
1180 .set_wptr = vcn_v5_0_0_unified_ring_set_wptr,
1181 .emit_frame_size =
1182 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1183 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1184 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1185 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1186 1, /* vcn_v2_0_enc_ring_insert_end */
1187 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1188 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1189 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1190 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1191 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1192 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1193 .insert_nop = amdgpu_ring_insert_nop,
1194 .insert_end = vcn_v2_0_enc_ring_insert_end,
1195 .pad_ib = amdgpu_ring_generic_pad_ib,
1196 .begin_use = amdgpu_vcn_ring_begin_use,
1197 .end_use = amdgpu_vcn_ring_end_use,
1198 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1199 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1200 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1201};
1202
1203/**
1204 * vcn_v5_0_0_set_unified_ring_funcs - set unified ring functions
1205 *
1206 * @adev: amdgpu_device pointer
1207 *
1208 * Set unified ring functions
1209 */
1210static void vcn_v5_0_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1211{
1212 int i;
1213
1214 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1215 if (adev->vcn.harvest_config & (1 << i))
1216 continue;
1217
1218 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_0_unified_ring_vm_funcs;
1219 adev->vcn.inst[i].ring_enc[0].me = i;
1220 }
1221}
1222
1223/**
1224 * vcn_v5_0_0_is_idle - check VCN block is idle
1225 *
1226 * @ip_block: Pointer to the amdgpu_ip_block structure
1227 *
1228 * Check whether VCN block is idle
1229 */
1230static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
1231{
1232 struct amdgpu_device *adev = ip_block->adev;
1233 int i, ret = 1;
1234
1235 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1236 if (adev->vcn.harvest_config & (1 << i))
1237 continue;
1238
1239 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1240 }
1241
1242 return ret;
1243}
1244
1245/**
1246 * vcn_v5_0_0_wait_for_idle - wait for VCN block idle
1247 *
1248 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
1249 *
1250 * Wait for VCN block idle
1251 */
1252static int vcn_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
1253{
1254 struct amdgpu_device *adev = ip_block->adev;
1255 int i, ret = 0;
1256
1257 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1258 if (adev->vcn.harvest_config & (1 << i))
1259 continue;
1260
1261 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1262 UVD_STATUS__IDLE);
1263 if (ret)
1264 return ret;
1265 }
1266
1267 return ret;
1268}
1269
1270/**
1271 * vcn_v5_0_0_set_clockgating_state - set VCN block clockgating state
1272 *
1273 * @ip_block: amdgpu_ip_block pointer
1274 * @state: clock gating state
1275 *
1276 * Set VCN block clockgating state
1277 */
1278static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1279 enum amd_clockgating_state state)
1280{
1281 struct amdgpu_device *adev = ip_block->adev;
1282 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1283 int i;
1284
1285 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1286 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1287
1288 if (adev->vcn.harvest_config & (1 << i))
1289 continue;
1290
1291 if (enable) {
1292 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1293 return -EBUSY;
1294 vcn_v5_0_0_enable_clock_gating(vinst);
1295 } else {
1296 vcn_v5_0_0_disable_clock_gating(vinst);
1297 }
1298 }
1299
1300 return 0;
1301}
1302
1303static int vcn_v5_0_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
1304 enum amd_powergating_state state)
1305{
1306 int ret = 0;
1307
1308 if (state == vinst->cur_state)
1309 return 0;
1310
1311 if (state == AMD_PG_STATE_GATE)
1312 ret = vcn_v5_0_0_stop(vinst);
1313 else
1314 ret = vcn_v5_0_0_start(vinst);
1315
1316 if (!ret)
1317 vinst->cur_state = state;
1318
1319 return ret;
1320}
1321
1322/**
1323 * vcn_v5_0_0_process_interrupt - process VCN block interrupt
1324 *
1325 * @adev: amdgpu_device pointer
1326 * @source: interrupt sources
1327 * @entry: interrupt entry from clients and sources
1328 *
1329 * Process VCN block interrupt
1330 */
1331static int vcn_v5_0_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1332 struct amdgpu_iv_entry *entry)
1333{
1334 uint32_t ip_instance;
1335
1336 switch (entry->client_id) {
1337 case SOC15_IH_CLIENTID_VCN:
1338 ip_instance = 0;
1339 break;
1340 case SOC15_IH_CLIENTID_VCN1:
1341 ip_instance = 1;
1342 break;
1343 default:
1344 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1345 return 0;
1346 }
1347
1348 DRM_DEBUG("IH: VCN TRAP\n");
1349
1350 switch (entry->src_id) {
1351 case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1352 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1353 break;
1354 case VCN_5_0__SRCID_UVD_POISON:
1355 amdgpu_vcn_process_poison_irq(adev, source, entry);
1356 break;
1357 default:
1358 DRM_ERROR("Unhandled interrupt: %d %d\n",
1359 entry->src_id, entry->src_data[0]);
1360 break;
1361 }
1362
1363 return 0;
1364}
1365
1366static const struct amdgpu_irq_src_funcs vcn_v5_0_0_irq_funcs = {
1367 .process = vcn_v5_0_0_process_interrupt,
1368};
1369
1370/**
1371 * vcn_v5_0_0_set_irq_funcs - set VCN block interrupt irq functions
1372 *
1373 * @adev: amdgpu_device pointer
1374 *
1375 * Set VCN block interrupt irq functions
1376 */
1377static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
1378{
1379 int i;
1380
1381 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1382 if (adev->vcn.harvest_config & (1 << i))
1383 continue;
1384
1385 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
1386 adev->vcn.inst[i].irq.funcs = &vcn_v5_0_0_irq_funcs;
1387 }
1388}
1389
1390void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
1391 struct drm_printer *p)
1392{
1393 struct amdgpu_device *adev = ip_block->adev;
1394 int i, j;
1395 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1396 uint32_t inst_off, is_powered;
1397
1398 if (!adev->vcn.ip_dump)
1399 return;
1400
1401 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1402 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1403 if (adev->vcn.harvest_config & (1 << i)) {
1404 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1405 continue;
1406 }
1407
1408 inst_off = i * reg_count;
1409 is_powered = (adev->vcn.ip_dump[inst_off] &
1410 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1411
1412 if (is_powered) {
1413 drm_printf(p, "\nActive Instance:VCN%d\n", i);
1414 for (j = 0; j < reg_count; j++)
1415 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
1416 adev->vcn.ip_dump[inst_off + j]);
1417 } else {
1418 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1419 }
1420 }
1421}
1422
1423void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
1424{
1425 struct amdgpu_device *adev = ip_block->adev;
1426 int i, j;
1427 bool is_powered;
1428 uint32_t inst_off;
1429 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
1430
1431 if (!adev->vcn.ip_dump)
1432 return;
1433
1434 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1435 if (adev->vcn.harvest_config & (1 << i))
1436 continue;
1437
1438 inst_off = i * reg_count;
1439 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
1440 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
1441 is_powered = (adev->vcn.ip_dump[inst_off] &
1442 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
1443
1444 if (is_powered)
1445 for (j = 1; j < reg_count; j++)
1446 adev->vcn.ip_dump[inst_off + j] =
1447 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
1448 }
1449}
1450
1451static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
1452 .name = "vcn_v5_0_0",
1453 .early_init = vcn_v5_0_0_early_init,
1454 .sw_init = vcn_v5_0_0_sw_init,
1455 .sw_fini = vcn_v5_0_0_sw_fini,
1456 .hw_init = vcn_v5_0_0_hw_init,
1457 .hw_fini = vcn_v5_0_0_hw_fini,
1458 .suspend = vcn_v5_0_0_suspend,
1459 .resume = vcn_v5_0_0_resume,
1460 .is_idle = vcn_v5_0_0_is_idle,
1461 .wait_for_idle = vcn_v5_0_0_wait_for_idle,
1462 .set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
1463 .set_powergating_state = vcn_set_powergating_state,
1464 .dump_ip_state = vcn_v5_0_0_dump_ip_state,
1465 .print_ip_state = vcn_v5_0_0_print_ip_state,
1466};
1467
1468const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
1469 .type = AMD_IP_BLOCK_TYPE_VCN,
1470 .major = 5,
1471 .minor = 0,
1472 .rev = 0,
1473 .funcs = &vcn_v5_0_0_ip_funcs,
1474};