Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_drv.h>
26
27#include "amdgpu.h"
28#include "amdgpu_vcn.h"
29#include "amdgpu_pm.h"
30#include "soc15.h"
31#include "soc15d.h"
32#include "soc15_hw_ip.h"
33#include "vcn_v2_0.h"
34
35#include "vcn/vcn_4_0_3_offset.h"
36#include "vcn/vcn_4_0_3_sh_mask.h"
37#include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
38
39#define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
40#define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
41#define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
42#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
43
44#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
45#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
46
47static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49static int vcn_v4_0_3_set_powergating_state(void *handle,
50 enum amd_powergating_state state);
51static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52 int inst_idx, struct dpg_pause_state *new_state);
53static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
54static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
55static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
56 int inst_idx, bool indirect);
57/**
58 * vcn_v4_0_3_early_init - set function pointers
59 *
60 * @handle: amdgpu_device pointer
61 *
62 * Set ring and irq function pointers
63 */
64static int vcn_v4_0_3_early_init(void *handle)
65{
66 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
67
68 /* re-use enc ring as unified ring */
69 adev->vcn.num_enc_rings = 1;
70
71 vcn_v4_0_3_set_unified_ring_funcs(adev);
72 vcn_v4_0_3_set_irq_funcs(adev);
73 vcn_v4_0_3_set_ras_funcs(adev);
74
75 return amdgpu_vcn_early_init(adev);
76}
77
78/**
79 * vcn_v4_0_3_sw_init - sw init for VCN block
80 *
81 * @handle: amdgpu_device pointer
82 *
83 * Load firmware and sw initialization
84 */
85static int vcn_v4_0_3_sw_init(void *handle)
86{
87 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
88 struct amdgpu_ring *ring;
89 int i, r, vcn_inst;
90
91 r = amdgpu_vcn_sw_init(adev);
92 if (r)
93 return r;
94
95 amdgpu_vcn_setup_ucode(adev);
96
97 r = amdgpu_vcn_resume(adev);
98 if (r)
99 return r;
100
101 /* VCN DEC TRAP */
102 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
103 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
104 if (r)
105 return r;
106
107 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
108 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
109
110 vcn_inst = GET_INST(VCN, i);
111
112 ring = &adev->vcn.inst[i].ring_enc[0];
113 ring->use_doorbell = true;
114 ring->doorbell_index =
115 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
116 9 * vcn_inst;
117 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
118 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
119 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
120 AMDGPU_RING_PRIO_DEFAULT,
121 &adev->vcn.inst[i].sched_score);
122 if (r)
123 return r;
124
125 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
126 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
127 fw_shared->sq.is_enabled = true;
128
129 if (amdgpu_vcnfw_log)
130 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
131 }
132
133 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
134 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
135
136 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
137 r = amdgpu_vcn_ras_sw_init(adev);
138 if (r) {
139 dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
140 return r;
141 }
142 }
143
144 return 0;
145}
146
147/**
148 * vcn_v4_0_3_sw_fini - sw fini for VCN block
149 *
150 * @handle: amdgpu_device pointer
151 *
152 * VCN suspend and free up sw allocation
153 */
154static int vcn_v4_0_3_sw_fini(void *handle)
155{
156 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
157 int i, r, idx;
158
159 if (drm_dev_enter(&adev->ddev, &idx)) {
160 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
161 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
162
163 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
164 fw_shared->present_flag_0 = 0;
165 fw_shared->sq.is_enabled = cpu_to_le32(false);
166 }
167 drm_dev_exit(idx);
168 }
169
170 r = amdgpu_vcn_suspend(adev);
171 if (r)
172 return r;
173
174 r = amdgpu_vcn_sw_fini(adev);
175
176 return r;
177}
178
179/**
180 * vcn_v4_0_3_hw_init - start and test VCN block
181 *
182 * @handle: amdgpu_device pointer
183 *
184 * Initialize the hardware, boot up the VCPU and do some testing
185 */
186static int vcn_v4_0_3_hw_init(void *handle)
187{
188 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
189 struct amdgpu_ring *ring;
190 int i, r, vcn_inst;
191
192 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
193 vcn_inst = GET_INST(VCN, i);
194 ring = &adev->vcn.inst[i].ring_enc[0];
195
196 if (ring->use_doorbell) {
197 adev->nbio.funcs->vcn_doorbell_range(
198 adev, ring->use_doorbell,
199 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
200 9 * vcn_inst,
201 adev->vcn.inst[i].aid_id);
202
203 WREG32_SOC15(
204 VCN, GET_INST(VCN, ring->me),
205 regVCN_RB1_DB_CTRL,
206 ring->doorbell_index
207 << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
208 VCN_RB1_DB_CTRL__EN_MASK);
209
210 /* Read DB_CTRL to flush the write DB_CTRL command. */
211 RREG32_SOC15(
212 VCN, GET_INST(VCN, ring->me),
213 regVCN_RB1_DB_CTRL);
214 }
215
216 r = amdgpu_ring_test_helper(ring);
217 if (r)
218 goto done;
219 }
220
221done:
222 if (!r)
223 DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
224 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
225
226 return r;
227}
228
229/**
230 * vcn_v4_0_3_hw_fini - stop the hardware block
231 *
232 * @handle: amdgpu_device pointer
233 *
234 * Stop the VCN block, mark ring as not ready any more
235 */
236static int vcn_v4_0_3_hw_fini(void *handle)
237{
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239
240 cancel_delayed_work_sync(&adev->vcn.idle_work);
241
242 if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
243 vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
244
245 return 0;
246}
247
248/**
249 * vcn_v4_0_3_suspend - suspend VCN block
250 *
251 * @handle: amdgpu_device pointer
252 *
253 * HW fini and suspend VCN block
254 */
255static int vcn_v4_0_3_suspend(void *handle)
256{
257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258 int r;
259
260 r = vcn_v4_0_3_hw_fini(adev);
261 if (r)
262 return r;
263
264 r = amdgpu_vcn_suspend(adev);
265
266 return r;
267}
268
269/**
270 * vcn_v4_0_3_resume - resume VCN block
271 *
272 * @handle: amdgpu_device pointer
273 *
274 * Resume firmware and hw init VCN block
275 */
276static int vcn_v4_0_3_resume(void *handle)
277{
278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
279 int r;
280
281 r = amdgpu_vcn_resume(adev);
282 if (r)
283 return r;
284
285 r = vcn_v4_0_3_hw_init(adev);
286
287 return r;
288}
289
290/**
291 * vcn_v4_0_3_mc_resume - memory controller programming
292 *
293 * @adev: amdgpu_device pointer
294 * @inst_idx: instance number
295 *
296 * Let the VCN memory controller know it's offsets
297 */
298static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
299{
300 uint32_t offset, size, vcn_inst;
301 const struct common_firmware_header *hdr;
302
303 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
304 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
305
306 vcn_inst = GET_INST(VCN, inst_idx);
307 /* cache window 0: fw */
308 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
309 WREG32_SOC15(
310 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
311 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
312 .tmr_mc_addr_lo));
313 WREG32_SOC15(
314 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
315 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
316 .tmr_mc_addr_hi));
317 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
318 offset = 0;
319 } else {
320 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
321 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
322 WREG32_SOC15(VCN, vcn_inst,
323 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
324 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
325 offset = size;
326 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
327 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
328 }
329 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
330
331 /* cache window 1: stack */
332 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
333 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
334 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
335 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
336 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
337 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
338 AMDGPU_VCN_STACK_SIZE);
339
340 /* cache window 2: context */
341 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
342 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
343 AMDGPU_VCN_STACK_SIZE));
344 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
345 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
346 AMDGPU_VCN_STACK_SIZE));
347 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
348 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
349 AMDGPU_VCN_CONTEXT_SIZE);
350
351 /* non-cache window */
352 WREG32_SOC15(
353 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
354 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
355 WREG32_SOC15(
356 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
357 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
358 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
359 WREG32_SOC15(
360 VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
361 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
362}
363
364/**
365 * vcn_v4_0_3_mc_resume_dpg_mode - memory controller programming for dpg mode
366 *
367 * @adev: amdgpu_device pointer
368 * @inst_idx: instance number index
369 * @indirect: indirectly write sram
370 *
371 * Let the VCN memory controller know it's offsets with dpg mode
372 */
373static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
374{
375 uint32_t offset, size;
376 const struct common_firmware_header *hdr;
377
378 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
379 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
380
381 /* cache window 0: fw */
382 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
383 if (!indirect) {
384 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
385 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
386 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
387 inst_idx].tmr_mc_addr_lo), 0, indirect);
388 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
390 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
391 inst_idx].tmr_mc_addr_hi), 0, indirect);
392 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
393 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
394 } else {
395 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
396 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
397 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
398 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
399 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
400 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
401 }
402 offset = 0;
403 } else {
404 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
405 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
406 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
407 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
408 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
409 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
410 offset = size;
411 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
412 VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
413 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
414 }
415
416 if (!indirect)
417 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
418 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
419 else
420 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
421 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
422
423 /* cache window 1: stack */
424 if (!indirect) {
425 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
426 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
427 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
428 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
429 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
430 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
431 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
432 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
433 } else {
434 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
435 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
436 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
437 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
438 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
439 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
440 }
441 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
442 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
443
444 /* cache window 2: context */
445 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
446 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
447 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
448 AMDGPU_VCN_STACK_SIZE), 0, indirect);
449 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
450 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
451 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
452 AMDGPU_VCN_STACK_SIZE), 0, indirect);
453 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
454 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
455 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
456 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
457
458 /* non-cache window */
459 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
460 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
461 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
462 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
463 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
464 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
465 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
466 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
467 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
468 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
469 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
470
471 /* VCN global tiling registers */
472 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
473 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
474 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
476}
477
478/**
479 * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
480 *
481 * @adev: amdgpu_device pointer
482 * @inst_idx: instance number
483 *
484 * Disable clock gating for VCN block
485 */
486static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
487{
488 uint32_t data;
489 int vcn_inst;
490
491 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
492 return;
493
494 vcn_inst = GET_INST(VCN, inst_idx);
495
496 /* VCN disable CGC */
497 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
498 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
499 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
500 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
501 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
502
503 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
504 data &= ~(UVD_CGC_GATE__SYS_MASK
505 | UVD_CGC_GATE__MPEG2_MASK
506 | UVD_CGC_GATE__REGS_MASK
507 | UVD_CGC_GATE__RBC_MASK
508 | UVD_CGC_GATE__LMI_MC_MASK
509 | UVD_CGC_GATE__LMI_UMC_MASK
510 | UVD_CGC_GATE__MPC_MASK
511 | UVD_CGC_GATE__LBSI_MASK
512 | UVD_CGC_GATE__LRBBM_MASK
513 | UVD_CGC_GATE__WCB_MASK
514 | UVD_CGC_GATE__VCPU_MASK
515 | UVD_CGC_GATE__MMSCH_MASK);
516
517 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
518 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
519
520 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
521 data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
522 | UVD_CGC_CTRL__MPEG2_MODE_MASK
523 | UVD_CGC_CTRL__REGS_MODE_MASK
524 | UVD_CGC_CTRL__RBC_MODE_MASK
525 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
526 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
527 | UVD_CGC_CTRL__MPC_MODE_MASK
528 | UVD_CGC_CTRL__LBSI_MODE_MASK
529 | UVD_CGC_CTRL__LRBBM_MODE_MASK
530 | UVD_CGC_CTRL__WCB_MODE_MASK
531 | UVD_CGC_CTRL__VCPU_MODE_MASK
532 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
533 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
534
535 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
536 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
537 | UVD_SUVD_CGC_GATE__SIT_MASK
538 | UVD_SUVD_CGC_GATE__SMP_MASK
539 | UVD_SUVD_CGC_GATE__SCM_MASK
540 | UVD_SUVD_CGC_GATE__SDB_MASK
541 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
542 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
543 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
544 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
545 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
546 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
547 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
548 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
549 | UVD_SUVD_CGC_GATE__ENT_MASK
550 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
551 | UVD_SUVD_CGC_GATE__SITE_MASK
552 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
553 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
554 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
555 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
556 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
557 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
558
559 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
560 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
561 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
562 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
563 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
564 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
565 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
566 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
567 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
568 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
569}
570
571/**
572 * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
573 *
574 * @adev: amdgpu_device pointer
575 * @sram_sel: sram select
576 * @inst_idx: instance number index
577 * @indirect: indirectly write sram
578 *
579 * Disable clock gating for VCN block with dpg mode
580 */
581static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
582 int inst_idx, uint8_t indirect)
583{
584 uint32_t reg_data = 0;
585
586 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
587 return;
588
589 /* enable sw clock gating control */
590 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
591 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
592 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
593 reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
594 UVD_CGC_CTRL__MPEG2_MODE_MASK |
595 UVD_CGC_CTRL__REGS_MODE_MASK |
596 UVD_CGC_CTRL__RBC_MODE_MASK |
597 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
598 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
599 UVD_CGC_CTRL__IDCT_MODE_MASK |
600 UVD_CGC_CTRL__MPRD_MODE_MASK |
601 UVD_CGC_CTRL__MPC_MODE_MASK |
602 UVD_CGC_CTRL__LBSI_MODE_MASK |
603 UVD_CGC_CTRL__LRBBM_MODE_MASK |
604 UVD_CGC_CTRL__WCB_MODE_MASK |
605 UVD_CGC_CTRL__VCPU_MODE_MASK);
606 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
607 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
608
609 /* turn off clock gating */
610 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
611 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
612
613 /* turn on SUVD clock gating */
614 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
615 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
616
617 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
618 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
619 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
620}
621
622/**
623 * vcn_v4_0_3_enable_clock_gating - enable VCN clock gating
624 *
625 * @adev: amdgpu_device pointer
626 * @inst_idx: instance number
627 *
628 * Enable clock gating for VCN block
629 */
630static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
631{
632 uint32_t data;
633 int vcn_inst;
634
635 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
636 return;
637
638 vcn_inst = GET_INST(VCN, inst_idx);
639
640 /* enable VCN CGC */
641 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
642 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
643 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
644 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
645 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
646
647 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
648 data |= (UVD_CGC_CTRL__SYS_MODE_MASK
649 | UVD_CGC_CTRL__MPEG2_MODE_MASK
650 | UVD_CGC_CTRL__REGS_MODE_MASK
651 | UVD_CGC_CTRL__RBC_MODE_MASK
652 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
653 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
654 | UVD_CGC_CTRL__MPC_MODE_MASK
655 | UVD_CGC_CTRL__LBSI_MODE_MASK
656 | UVD_CGC_CTRL__LRBBM_MODE_MASK
657 | UVD_CGC_CTRL__WCB_MODE_MASK
658 | UVD_CGC_CTRL__VCPU_MODE_MASK);
659 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
660
661 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
662 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
663 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
664 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
665 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
666 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
667 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
668 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
669 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
670 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
671}
672
673/**
674 * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
675 *
676 * @adev: amdgpu_device pointer
677 * @inst_idx: instance number index
678 * @indirect: indirectly write sram
679 *
680 * Start VCN block with dpg mode
681 */
682static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
683{
684 volatile struct amdgpu_vcn4_fw_shared *fw_shared =
685 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
686 struct amdgpu_ring *ring;
687 int vcn_inst;
688 uint32_t tmp;
689
690 vcn_inst = GET_INST(VCN, inst_idx);
691 /* disable register anti-hang mechanism */
692 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
693 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
694 /* enable dynamic power gating mode */
695 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
696 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
697 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
698 WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
699
700 if (indirect) {
701 DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
702 inst_idx, adev->vcn.inst[inst_idx].aid_id);
703 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
704 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
705 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
706 WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
707 adev->vcn.inst[inst_idx].aid_id, 0, true);
708 }
709
710 /* enable clock gating */
711 vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
712
713 /* enable VCPU clock */
714 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
715 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
716 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
717
718 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
719 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
720
721 /* disable master interrupt */
722 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
723 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
724
725 /* setup regUVD_LMI_CTRL */
726 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
727 UVD_LMI_CTRL__REQ_MODE_MASK |
728 UVD_LMI_CTRL__CRC_RESET_MASK |
729 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
730 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
731 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
732 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
733 0x00100000L);
734 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
735 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
736
737 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
738 VCN, 0, regUVD_MPC_CNTL),
739 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
740
741 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
742 VCN, 0, regUVD_MPC_SET_MUXA0),
743 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
744 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
745 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
746 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
747
748 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
749 VCN, 0, regUVD_MPC_SET_MUXB0),
750 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
751 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
752 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
753 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
754
755 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
756 VCN, 0, regUVD_MPC_SET_MUX),
757 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
758 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
759 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
760
761 vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
762
763 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
764 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
765 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
766 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
767
768 /* enable LMI MC and UMC channels */
769 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
770 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
771 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
772
773 vcn_v4_0_3_enable_ras(adev, inst_idx, indirect);
774
775 /* enable master interrupt */
776 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
777 VCN, 0, regUVD_MASTINT_EN),
778 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
779
780 if (indirect)
781 psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
782 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
783 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
784
785 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
786
787 /* program the RB_BASE for ring buffer */
788 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
789 lower_32_bits(ring->gpu_addr));
790 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
791 upper_32_bits(ring->gpu_addr));
792
793 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
794 ring->ring_size / sizeof(uint32_t));
795
796 /* resetting ring, fw should not check RB ring */
797 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
798 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
799 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
800 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
801
802 /* Initialize the ring buffer's read and write pointers */
803 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
804 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
805 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
806
807 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
808 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
809 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
810 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
811
812 /*resetting done, fw can check RB ring */
813 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
814
815 return 0;
816}
817
818/**
819 * vcn_v4_0_3_start - VCN start
820 *
821 * @adev: amdgpu_device pointer
822 *
823 * Start VCN block
824 */
825static int vcn_v4_0_3_start(struct amdgpu_device *adev)
826{
827 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
828 struct amdgpu_ring *ring;
829 int i, j, k, r, vcn_inst;
830 uint32_t tmp;
831
832 if (adev->pm.dpm_enabled)
833 amdgpu_dpm_enable_uvd(adev, true);
834
835 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
836 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
837 r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
838 continue;
839 }
840
841 vcn_inst = GET_INST(VCN, i);
842 /* set VCN status busy */
843 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
844 UVD_STATUS__UVD_BUSY;
845 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
846
847 /*SW clock gating */
848 vcn_v4_0_3_disable_clock_gating(adev, i);
849
850 /* enable VCPU clock */
851 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
852 UVD_VCPU_CNTL__CLK_EN_MASK,
853 ~UVD_VCPU_CNTL__CLK_EN_MASK);
854
855 /* disable master interrupt */
856 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
857 ~UVD_MASTINT_EN__VCPU_EN_MASK);
858
859 /* enable LMI MC and UMC channels */
860 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
861 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
862
863 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
864 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
865 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
866 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
867
868 /* setup regUVD_LMI_CTRL */
869 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
870 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
871 tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
872 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
873 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
874 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
875
876 /* setup regUVD_MPC_CNTL */
877 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
878 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
879 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
880 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
881
882 /* setup UVD_MPC_SET_MUXA0 */
883 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
884 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
885 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
886 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
887 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
888
889 /* setup UVD_MPC_SET_MUXB0 */
890 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
891 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
892 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
893 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
894 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
895
896 /* setup UVD_MPC_SET_MUX */
897 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
898 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
899 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
900 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
901
902 vcn_v4_0_3_mc_resume(adev, i);
903
904 /* VCN global tiling registers */
905 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
906 adev->gfx.config.gb_addr_config);
907 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
908 adev->gfx.config.gb_addr_config);
909
910 /* unblock VCPU register access */
911 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
912 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
913
914 /* release VCPU reset to boot */
915 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
916 ~UVD_VCPU_CNTL__BLK_RST_MASK);
917
918 for (j = 0; j < 10; ++j) {
919 uint32_t status;
920
921 for (k = 0; k < 100; ++k) {
922 status = RREG32_SOC15(VCN, vcn_inst,
923 regUVD_STATUS);
924 if (status & 2)
925 break;
926 mdelay(10);
927 }
928 r = 0;
929 if (status & 2)
930 break;
931
932 DRM_DEV_ERROR(adev->dev,
933 "VCN decode not responding, trying to reset the VCPU!!!\n");
934 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
935 regUVD_VCPU_CNTL),
936 UVD_VCPU_CNTL__BLK_RST_MASK,
937 ~UVD_VCPU_CNTL__BLK_RST_MASK);
938 mdelay(10);
939 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
940 regUVD_VCPU_CNTL),
941 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
942
943 mdelay(10);
944 r = -1;
945 }
946
947 if (r) {
948 DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
949 return r;
950 }
951
952 /* enable master interrupt */
953 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
954 UVD_MASTINT_EN__VCPU_EN_MASK,
955 ~UVD_MASTINT_EN__VCPU_EN_MASK);
956
957 /* clear the busy bit of VCN_STATUS */
958 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
959 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
960
961 ring = &adev->vcn.inst[i].ring_enc[0];
962 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
963
964 /* program the RB_BASE for ring buffer */
965 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
966 lower_32_bits(ring->gpu_addr));
967 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
968 upper_32_bits(ring->gpu_addr));
969
970 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
971 ring->ring_size / sizeof(uint32_t));
972
973 /* resetting ring, fw should not check RB ring */
974 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
975 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
976 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
977
978 /* Initialize the ring buffer's read and write pointers */
979 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
980 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
981
982 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
983 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
984 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
985
986 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
987 fw_shared->sq.queue_mode &=
988 cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
989
990 }
991 return 0;
992}
993
994/**
995 * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
996 *
997 * @adev: amdgpu_device pointer
998 * @inst_idx: instance number index
999 *
1000 * Stop VCN block with dpg mode
1001 */
1002static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1003{
1004 uint32_t tmp;
1005 int vcn_inst;
1006
1007 vcn_inst = GET_INST(VCN, inst_idx);
1008
1009 /* Wait for power status to be 1 */
1010 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1011 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1012
1013 /* wait for read ptr to be equal to write ptr */
1014 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1015 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1016
1017 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1018 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1019
1020 /* disable dynamic power gating mode */
1021 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1022 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1023 return 0;
1024}
1025
1026/**
1027 * vcn_v4_0_3_stop - VCN stop
1028 *
1029 * @adev: amdgpu_device pointer
1030 *
1031 * Stop VCN block
1032 */
1033static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
1034{
1035 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1036 int i, r = 0, vcn_inst;
1037 uint32_t tmp;
1038
1039 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1040 vcn_inst = GET_INST(VCN, i);
1041
1042 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1043 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1044
1045 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1046 vcn_v4_0_3_stop_dpg_mode(adev, i);
1047 continue;
1048 }
1049
1050 /* wait for vcn idle */
1051 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1052 UVD_STATUS__IDLE, 0x7);
1053 if (r)
1054 goto Done;
1055
1056 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1057 UVD_LMI_STATUS__READ_CLEAN_MASK |
1058 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1059 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1060 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1061 tmp);
1062 if (r)
1063 goto Done;
1064
1065 /* stall UMC channel */
1066 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1067 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1068 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1069 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1070 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1071 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1072 tmp);
1073 if (r)
1074 goto Done;
1075
1076 /* Unblock VCPU Register access */
1077 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1078 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1079 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1080
1081 /* release VCPU reset to boot */
1082 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1083 UVD_VCPU_CNTL__BLK_RST_MASK,
1084 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1085
1086 /* disable VCPU clock */
1087 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1088 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1089
1090 /* reset LMI UMC/LMI/VCPU */
1091 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1092 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1093 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1094
1095 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1096 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1097 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1098
1099 /* clear VCN status */
1100 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1101
1102 /* apply HW clock gating */
1103 vcn_v4_0_3_enable_clock_gating(adev, i);
1104 }
1105Done:
1106 if (adev->pm.dpm_enabled)
1107 amdgpu_dpm_enable_uvd(adev, false);
1108
1109 return 0;
1110}
1111
1112/**
1113 * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1114 *
1115 * @adev: amdgpu_device pointer
1116 * @inst_idx: instance number index
1117 * @new_state: pause state
1118 *
1119 * Pause dpg mode for VCN block
1120 */
1121static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1122 struct dpg_pause_state *new_state)
1123{
1124
1125 return 0;
1126}
1127
1128/**
1129 * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1130 *
1131 * @ring: amdgpu_ring pointer
1132 *
1133 * Returns the current hardware unified read pointer
1134 */
1135static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1136{
1137 struct amdgpu_device *adev = ring->adev;
1138
1139 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1140 DRM_ERROR("wrong ring id is identified in %s", __func__);
1141
1142 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1143}
1144
1145/**
1146 * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1147 *
1148 * @ring: amdgpu_ring pointer
1149 *
1150 * Returns the current hardware unified write pointer
1151 */
1152static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1153{
1154 struct amdgpu_device *adev = ring->adev;
1155
1156 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1157 DRM_ERROR("wrong ring id is identified in %s", __func__);
1158
1159 if (ring->use_doorbell)
1160 return *ring->wptr_cpu_addr;
1161 else
1162 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1163 regUVD_RB_WPTR);
1164}
1165
1166/**
1167 * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1168 *
1169 * @ring: amdgpu_ring pointer
1170 *
1171 * Commits the enc write pointer to the hardware
1172 */
1173static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1174{
1175 struct amdgpu_device *adev = ring->adev;
1176
1177 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1178 DRM_ERROR("wrong ring id is identified in %s", __func__);
1179
1180 if (ring->use_doorbell) {
1181 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1182 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1183 } else {
1184 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1185 lower_32_bits(ring->wptr));
1186 }
1187}
1188
1189static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1190 .type = AMDGPU_RING_TYPE_VCN_ENC,
1191 .align_mask = 0x3f,
1192 .nop = VCN_ENC_CMD_NO_OP,
1193 .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1194 .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1195 .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1196 .emit_frame_size =
1197 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1198 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1199 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1200 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1201 1, /* vcn_v2_0_enc_ring_insert_end */
1202 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1203 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1204 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1205 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1206 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1207 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1208 .insert_nop = amdgpu_ring_insert_nop,
1209 .insert_end = vcn_v2_0_enc_ring_insert_end,
1210 .pad_ib = amdgpu_ring_generic_pad_ib,
1211 .begin_use = amdgpu_vcn_ring_begin_use,
1212 .end_use = amdgpu_vcn_ring_end_use,
1213 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1214 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1215 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1216};
1217
1218/**
1219 * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1220 *
1221 * @adev: amdgpu_device pointer
1222 *
1223 * Set unified ring functions
1224 */
1225static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1226{
1227 int i, vcn_inst;
1228
1229 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1230 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1231 adev->vcn.inst[i].ring_enc[0].me = i;
1232 vcn_inst = GET_INST(VCN, i);
1233 adev->vcn.inst[i].aid_id =
1234 vcn_inst / adev->vcn.num_inst_per_aid;
1235 }
1236 DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1237}
1238
1239/**
1240 * vcn_v4_0_3_is_idle - check VCN block is idle
1241 *
1242 * @handle: amdgpu_device pointer
1243 *
1244 * Check whether VCN block is idle
1245 */
1246static bool vcn_v4_0_3_is_idle(void *handle)
1247{
1248 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1249 int i, ret = 1;
1250
1251 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1252 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1253 UVD_STATUS__IDLE);
1254 }
1255
1256 return ret;
1257}
1258
1259/**
1260 * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1261 *
1262 * @handle: amdgpu_device pointer
1263 *
1264 * Wait for VCN block idle
1265 */
1266static int vcn_v4_0_3_wait_for_idle(void *handle)
1267{
1268 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1269 int i, ret = 0;
1270
1271 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1272 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1273 UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1274 if (ret)
1275 return ret;
1276 }
1277
1278 return ret;
1279}
1280
1281/* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1282 *
1283 * @handle: amdgpu_device pointer
1284 * @state: clock gating state
1285 *
1286 * Set VCN block clockgating state
1287 */
1288static int vcn_v4_0_3_set_clockgating_state(void *handle,
1289 enum amd_clockgating_state state)
1290{
1291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1292 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1293 int i;
1294
1295 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1296 if (enable) {
1297 if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1298 regUVD_STATUS) != UVD_STATUS__IDLE)
1299 return -EBUSY;
1300 vcn_v4_0_3_enable_clock_gating(adev, i);
1301 } else {
1302 vcn_v4_0_3_disable_clock_gating(adev, i);
1303 }
1304 }
1305 return 0;
1306}
1307
1308/**
1309 * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1310 *
1311 * @handle: amdgpu_device pointer
1312 * @state: power gating state
1313 *
1314 * Set VCN block powergating state
1315 */
1316static int vcn_v4_0_3_set_powergating_state(void *handle,
1317 enum amd_powergating_state state)
1318{
1319 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1320 int ret;
1321
1322 if (state == adev->vcn.cur_state)
1323 return 0;
1324
1325 if (state == AMD_PG_STATE_GATE)
1326 ret = vcn_v4_0_3_stop(adev);
1327 else
1328 ret = vcn_v4_0_3_start(adev);
1329
1330 if (!ret)
1331 adev->vcn.cur_state = state;
1332
1333 return ret;
1334}
1335
1336/**
1337 * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1338 *
1339 * @adev: amdgpu_device pointer
1340 * @source: interrupt sources
1341 * @type: interrupt types
1342 * @state: interrupt states
1343 *
1344 * Set VCN block interrupt state
1345 */
1346static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1347 struct amdgpu_irq_src *source,
1348 unsigned int type,
1349 enum amdgpu_interrupt_state state)
1350{
1351 return 0;
1352}
1353
1354/**
1355 * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1356 *
1357 * @adev: amdgpu_device pointer
1358 * @source: interrupt sources
1359 * @entry: interrupt entry from clients and sources
1360 *
1361 * Process VCN block interrupt
1362 */
1363static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1364 struct amdgpu_irq_src *source,
1365 struct amdgpu_iv_entry *entry)
1366{
1367 uint32_t i, inst;
1368
1369 i = node_id_to_phys_map[entry->node_id];
1370
1371 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1372
1373 for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1374 if (adev->vcn.inst[inst].aid_id == i)
1375 break;
1376
1377 if (inst >= adev->vcn.num_vcn_inst) {
1378 dev_WARN_ONCE(adev->dev, 1,
1379 "Interrupt received for unknown VCN instance %d",
1380 entry->node_id);
1381 return 0;
1382 }
1383
1384 switch (entry->src_id) {
1385 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1386 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1387 break;
1388 default:
1389 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1390 entry->src_id, entry->src_data[0]);
1391 break;
1392 }
1393
1394 return 0;
1395}
1396
1397static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1398 .set = vcn_v4_0_3_set_interrupt_state,
1399 .process = vcn_v4_0_3_process_interrupt,
1400};
1401
1402/**
1403 * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1404 *
1405 * @adev: amdgpu_device pointer
1406 *
1407 * Set VCN block interrupt irq functions
1408 */
1409static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1410{
1411 int i;
1412
1413 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1414 adev->vcn.inst->irq.num_types++;
1415 }
1416 adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1417}
1418
1419static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1420 .name = "vcn_v4_0_3",
1421 .early_init = vcn_v4_0_3_early_init,
1422 .late_init = NULL,
1423 .sw_init = vcn_v4_0_3_sw_init,
1424 .sw_fini = vcn_v4_0_3_sw_fini,
1425 .hw_init = vcn_v4_0_3_hw_init,
1426 .hw_fini = vcn_v4_0_3_hw_fini,
1427 .suspend = vcn_v4_0_3_suspend,
1428 .resume = vcn_v4_0_3_resume,
1429 .is_idle = vcn_v4_0_3_is_idle,
1430 .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1431 .check_soft_reset = NULL,
1432 .pre_soft_reset = NULL,
1433 .soft_reset = NULL,
1434 .post_soft_reset = NULL,
1435 .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1436 .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1437};
1438
1439const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1440 .type = AMD_IP_BLOCK_TYPE_VCN,
1441 .major = 4,
1442 .minor = 0,
1443 .rev = 3,
1444 .funcs = &vcn_v4_0_3_ip_funcs,
1445};
1446
1447static const struct amdgpu_ras_err_status_reg_entry vcn_v4_0_3_ue_reg_list[] = {
1448 {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDD, regVCN_UE_ERR_STATUS_HI_VIDD),
1449 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDD"},
1450 {AMDGPU_RAS_REG_ENTRY(VCN, 0, regVCN_UE_ERR_STATUS_LO_VIDV, regVCN_UE_ERR_STATUS_HI_VIDV),
1451 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "VIDV"},
1452};
1453
1454static void vcn_v4_0_3_inst_query_ras_error_count(struct amdgpu_device *adev,
1455 uint32_t vcn_inst,
1456 void *ras_err_status)
1457{
1458 struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
1459
1460 /* vcn v4_0_3 only support query uncorrectable errors */
1461 amdgpu_ras_inst_query_ras_error_count(adev,
1462 vcn_v4_0_3_ue_reg_list,
1463 ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
1464 NULL, 0, GET_INST(VCN, vcn_inst),
1465 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
1466 &err_data->ue_count);
1467}
1468
1469static void vcn_v4_0_3_query_ras_error_count(struct amdgpu_device *adev,
1470 void *ras_err_status)
1471{
1472 uint32_t i;
1473
1474 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
1475 dev_warn(adev->dev, "VCN RAS is not supported\n");
1476 return;
1477 }
1478
1479 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1480 vcn_v4_0_3_inst_query_ras_error_count(adev, i, ras_err_status);
1481}
1482
1483static void vcn_v4_0_3_inst_reset_ras_error_count(struct amdgpu_device *adev,
1484 uint32_t vcn_inst)
1485{
1486 amdgpu_ras_inst_reset_ras_error_count(adev,
1487 vcn_v4_0_3_ue_reg_list,
1488 ARRAY_SIZE(vcn_v4_0_3_ue_reg_list),
1489 GET_INST(VCN, vcn_inst));
1490}
1491
1492static void vcn_v4_0_3_reset_ras_error_count(struct amdgpu_device *adev)
1493{
1494 uint32_t i;
1495
1496 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
1497 dev_warn(adev->dev, "VCN RAS is not supported\n");
1498 return;
1499 }
1500
1501 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1502 vcn_v4_0_3_inst_reset_ras_error_count(adev, i);
1503}
1504
1505static const struct amdgpu_ras_block_hw_ops vcn_v4_0_3_ras_hw_ops = {
1506 .query_ras_error_count = vcn_v4_0_3_query_ras_error_count,
1507 .reset_ras_error_count = vcn_v4_0_3_reset_ras_error_count,
1508};
1509
1510static struct amdgpu_vcn_ras vcn_v4_0_3_ras = {
1511 .ras_block = {
1512 .hw_ops = &vcn_v4_0_3_ras_hw_ops,
1513 },
1514};
1515
1516static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev)
1517{
1518 adev->vcn.ras = &vcn_v4_0_3_ras;
1519}
1520
1521static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
1522 int inst_idx, bool indirect)
1523{
1524 uint32_t tmp;
1525
1526 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
1527 return;
1528
1529 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
1530 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
1531 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
1532 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
1533 WREG32_SOC15_DPG_MODE(inst_idx,
1534 SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
1535 tmp, 0, indirect);
1536
1537 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
1538 WREG32_SOC15_DPG_MODE(inst_idx,
1539 SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
1540 tmp, 0, indirect);
1541}