Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include "amdgpu.h"
26#include "amdgpu_vcn.h"
27#include "amdgpu_pm.h"
28#include "amdgpu_cs.h"
29#include "soc15.h"
30#include "soc15d.h"
31#include "vcn_v2_0.h"
32#include "mmsch_v3_0.h"
33#include "vcn_sw_ring.h"
34
35#include "vcn/vcn_3_0_0_offset.h"
36#include "vcn/vcn_3_0_0_sh_mask.h"
37#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
38
39#include <drm/drm_drv.h>
40
41#define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
42#define VCN1_VID_SOC_ADDRESS_3_0 0x48200
43#define VCN1_AON_SOC_ADDRESS_3_0 0x48000
44
45#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
46#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
47#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
48#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
49#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
50#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
51#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
52
53#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
54#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
55#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
56#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
57
58#define VCN_INSTANCES_SIENNA_CICHLID 2
59#define DEC_SW_RING_ENABLED FALSE
60
61#define RDECODE_MSG_CREATE 0x00000000
62#define RDECODE_MESSAGE_CREATE 0x00000001
63
64static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = {
65 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
66 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
67 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
68 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
69 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
70 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
71 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
72 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
73 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
74 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
75 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
76 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
77 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
78 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
79 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
80 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
81 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
82 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
83 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
84 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
85 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
86 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
87 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
88 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
89 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
90 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
91 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
92 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
93 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
94 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
95 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
96 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
97 SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
98};
99
100static int amdgpu_ih_clientid_vcns[] = {
101 SOC15_IH_CLIENTID_VCN,
102 SOC15_IH_CLIENTID_VCN1
103};
104
105static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
106static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
107static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
108static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev);
109static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
110 enum amd_powergating_state state);
111static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
112 struct dpg_pause_state *new_state);
113static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst);
114
115static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
116static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
117
118/**
119 * vcn_v3_0_early_init - set function pointers and load microcode
120 *
121 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
122 *
123 * Set ring and irq function pointers
124 * Load microcode from filesystem
125 */
126static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block)
127{
128 struct amdgpu_device *adev = ip_block->adev;
129 int i, r;
130
131 if (amdgpu_sriov_vf(adev)) {
132 adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
133 adev->vcn.harvest_config = 0;
134 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
135 adev->vcn.inst[i].num_enc_rings = 1;
136
137 } else {
138 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
139 AMDGPU_VCN_HARVEST_VCN1))
140 /* both instances are harvested, disable the block */
141 return -ENOENT;
142
143 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
144 if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
145 IP_VERSION(3, 0, 33))
146 adev->vcn.inst[i].num_enc_rings = 0;
147 else
148 adev->vcn.inst[i].num_enc_rings = 2;
149 }
150 }
151
152 vcn_v3_0_set_dec_ring_funcs(adev);
153 vcn_v3_0_set_enc_ring_funcs(adev);
154 vcn_v3_0_set_irq_funcs(adev);
155
156 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
157 adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state;
158
159 r = amdgpu_vcn_early_init(adev, i);
160 if (r)
161 return r;
162 }
163 return 0;
164}
165
166/**
167 * vcn_v3_0_sw_init - sw init for VCN block
168 *
169 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
170 *
171 * Load firmware and sw initialization
172 */
173static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
174{
175 struct amdgpu_ring *ring;
176 int i, j, r;
177 int vcn_doorbell_index = 0;
178 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
179 uint32_t *ptr;
180 struct amdgpu_device *adev = ip_block->adev;
181
182 /*
183 * Note: doorbell assignment is fixed for SRIOV multiple VCN engines
184 * Formula:
185 * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
186 * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1)
187 * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j
188 */
189 if (amdgpu_sriov_vf(adev)) {
190 vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1;
191 /* get DWORD offset */
192 vcn_doorbell_index = vcn_doorbell_index << 1;
193 }
194
195 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
196 volatile struct amdgpu_fw_shared *fw_shared;
197
198 if (adev->vcn.harvest_config & (1 << i))
199 continue;
200
201 r = amdgpu_vcn_sw_init(adev, i);
202 if (r)
203 return r;
204
205 amdgpu_vcn_setup_ucode(adev, i);
206
207 r = amdgpu_vcn_resume(adev, i);
208 if (r)
209 return r;
210
211 adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
212 adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
213 adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
214 adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
215 adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
216 adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
217
218 adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
219 adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9);
220 adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
221 adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0);
222 adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
223 adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1);
224 adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
225 adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD);
226 adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
227 adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP);
228
229 /* VCN DEC TRAP */
230 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
231 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq);
232 if (r)
233 return r;
234
235 atomic_set(&adev->vcn.inst[i].sched_score, 0);
236
237 ring = &adev->vcn.inst[i].ring_dec;
238 ring->use_doorbell = true;
239 if (amdgpu_sriov_vf(adev)) {
240 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1);
241 } else {
242 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
243 }
244 ring->vm_hub = AMDGPU_MMHUB0(0);
245 sprintf(ring->name, "vcn_dec_%d", i);
246 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
247 AMDGPU_RING_PRIO_DEFAULT,
248 &adev->vcn.inst[i].sched_score);
249 if (r)
250 return r;
251
252 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
253 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
254
255 /* VCN ENC TRAP */
256 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
257 j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
258 if (r)
259 return r;
260
261 ring = &adev->vcn.inst[i].ring_enc[j];
262 ring->use_doorbell = true;
263 if (amdgpu_sriov_vf(adev)) {
264 ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j;
265 } else {
266 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
267 }
268 ring->vm_hub = AMDGPU_MMHUB0(0);
269 sprintf(ring->name, "vcn_enc_%d.%d", i, j);
270 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
271 hw_prio, &adev->vcn.inst[i].sched_score);
272 if (r)
273 return r;
274 }
275
276 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
277 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
278 cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
279 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
280 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
281 fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
282 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 1, 2))
283 fw_shared->smu_interface_info.smu_interface_type = 2;
284 else if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
285 IP_VERSION(3, 1, 1))
286 fw_shared->smu_interface_info.smu_interface_type = 1;
287
288 if (amdgpu_vcnfw_log)
289 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
290
291 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
292 adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode;
293 adev->vcn.inst[i].reset = vcn_v3_0_reset;
294 }
295
296 adev->vcn.supported_reset =
297 amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
298 if (!amdgpu_sriov_vf(adev))
299 adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
300
301 if (amdgpu_sriov_vf(adev)) {
302 r = amdgpu_virt_alloc_mm_table(adev);
303 if (r)
304 return r;
305 }
306
307 /* Allocate memory for VCN IP Dump buffer */
308 ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
309 if (ptr == NULL) {
310 DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
311 adev->vcn.ip_dump = NULL;
312 } else {
313 adev->vcn.ip_dump = ptr;
314 }
315
316 r = amdgpu_vcn_sysfs_reset_mask_init(adev);
317 if (r)
318 return r;
319
320 return 0;
321}
322
323/**
324 * vcn_v3_0_sw_fini - sw fini for VCN block
325 *
326 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
327 *
328 * VCN suspend and free up sw allocation
329 */
330static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
331{
332 struct amdgpu_device *adev = ip_block->adev;
333 int i, r, idx;
334
335 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
336 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
337 volatile struct amdgpu_fw_shared *fw_shared;
338
339 if (adev->vcn.harvest_config & (1 << i))
340 continue;
341 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
342 fw_shared->present_flag_0 = 0;
343 fw_shared->sw_ring.is_enabled = false;
344 }
345
346 drm_dev_exit(idx);
347 }
348
349 if (amdgpu_sriov_vf(adev))
350 amdgpu_virt_free_mm_table(adev);
351
352 amdgpu_vcn_sysfs_reset_mask_fini(adev);
353
354 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
355 r = amdgpu_vcn_suspend(adev, i);
356 if (r)
357 return r;
358
359 r = amdgpu_vcn_sw_fini(adev, i);
360 if (r)
361 return r;
362 }
363
364 kfree(adev->vcn.ip_dump);
365 return 0;
366}
367
368/**
369 * vcn_v3_0_hw_init - start and test VCN block
370 *
371 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
372 *
373 * Initialize the hardware, boot up the VCPU and do some testing
374 */
375static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
376{
377 struct amdgpu_device *adev = ip_block->adev;
378 struct amdgpu_ring *ring;
379 int i, j, r;
380
381 if (amdgpu_sriov_vf(adev)) {
382 r = vcn_v3_0_start_sriov(adev);
383 if (r)
384 return r;
385
386 /* initialize VCN dec and enc ring buffers */
387 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
388 if (adev->vcn.harvest_config & (1 << i))
389 continue;
390
391 ring = &adev->vcn.inst[i].ring_dec;
392 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
393 ring->sched.ready = false;
394 ring->no_scheduler = true;
395 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
396 } else {
397 ring->wptr = 0;
398 ring->wptr_old = 0;
399 vcn_v3_0_dec_ring_set_wptr(ring);
400 ring->sched.ready = true;
401 }
402
403 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
404 ring = &adev->vcn.inst[i].ring_enc[j];
405 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
406 ring->sched.ready = false;
407 ring->no_scheduler = true;
408 dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
409 } else {
410 ring->wptr = 0;
411 ring->wptr_old = 0;
412 vcn_v3_0_enc_ring_set_wptr(ring);
413 ring->sched.ready = true;
414 }
415 }
416 }
417 } else {
418 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
419 if (adev->vcn.harvest_config & (1 << i))
420 continue;
421
422 ring = &adev->vcn.inst[i].ring_dec;
423
424 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
425 ring->doorbell_index, i);
426
427 r = amdgpu_ring_test_helper(ring);
428 if (r)
429 return r;
430
431 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
432 ring = &adev->vcn.inst[i].ring_enc[j];
433 r = amdgpu_ring_test_helper(ring);
434 if (r)
435 return r;
436 }
437 }
438 }
439
440 return 0;
441}
442
443/**
444 * vcn_v3_0_hw_fini - stop the hardware block
445 *
446 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
447 *
448 * Stop the VCN block, mark ring as not ready any more
449 */
450static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
451{
452 struct amdgpu_device *adev = ip_block->adev;
453 int i;
454
455 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
456 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
457
458 if (adev->vcn.harvest_config & (1 << i))
459 continue;
460
461 cancel_delayed_work_sync(&vinst->idle_work);
462
463 if (!amdgpu_sriov_vf(adev)) {
464 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
465 (vinst->cur_state != AMD_PG_STATE_GATE &&
466 RREG32_SOC15(VCN, i, mmUVD_STATUS))) {
467 vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
468 }
469 }
470 }
471
472 return 0;
473}
474
475/**
476 * vcn_v3_0_suspend - suspend VCN block
477 *
478 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
479 *
480 * HW fini and suspend VCN block
481 */
482static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block)
483{
484 struct amdgpu_device *adev = ip_block->adev;
485 int r, i;
486
487 r = vcn_v3_0_hw_fini(ip_block);
488 if (r)
489 return r;
490
491 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
492 r = amdgpu_vcn_suspend(ip_block->adev, i);
493 if (r)
494 return r;
495 }
496
497 return 0;
498}
499
500/**
501 * vcn_v3_0_resume - resume VCN block
502 *
503 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
504 *
505 * Resume firmware and hw init VCN block
506 */
507static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block)
508{
509 struct amdgpu_device *adev = ip_block->adev;
510 int r, i;
511
512 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
513 r = amdgpu_vcn_resume(ip_block->adev, i);
514 if (r)
515 return r;
516 }
517
518 r = vcn_v3_0_hw_init(ip_block);
519
520 return r;
521}
522
523/**
524 * vcn_v3_0_mc_resume - memory controller programming
525 *
526 * @vinst: VCN instance
527 *
528 * Let the VCN memory controller know it's offsets
529 */
530static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst)
531{
532 struct amdgpu_device *adev = vinst->adev;
533 int inst = vinst->inst;
534 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4);
535 uint32_t offset;
536
537 /* cache window 0: fw */
538 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
539 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
540 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
541 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
542 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
543 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0);
544 offset = 0;
545 } else {
546 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
547 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
548 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
549 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
550 offset = size;
551 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0,
552 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
553 }
554 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size);
555
556 /* cache window 1: stack */
557 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
558 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
559 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
560 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
561 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0);
562 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
563
564 /* cache window 2: context */
565 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
566 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
567 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
568 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
569 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0);
570 WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
571
572 /* non-cache window */
573 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
574 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
575 WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
576 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
577 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
578 WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0,
579 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
580}
581
582static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
583 bool indirect)
584{
585 struct amdgpu_device *adev = vinst->adev;
586 int inst_idx = vinst->inst;
587 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4);
588 uint32_t offset;
589
590 /* cache window 0: fw */
591 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
592 if (!indirect) {
593 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
594 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
595 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
596 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
597 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
598 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
599 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
601 } else {
602 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
604 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
605 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
606 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
607 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
608 }
609 offset = 0;
610 } else {
611 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
612 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
613 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
614 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
615 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
616 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
617 offset = size;
618 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
619 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0),
620 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
621 }
622
623 if (!indirect)
624 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
625 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
626 else
627 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
628 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
629
630 /* cache window 1: stack */
631 if (!indirect) {
632 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
633 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
634 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
635 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
636 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
637 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
638 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
639 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
640 } else {
641 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
642 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
643 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
644 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
645 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
646 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
647 }
648 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
649 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
650
651 /* cache window 2: context */
652 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
653 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
654 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
655 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
656 VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
657 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
658 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
659 VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
660 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
661 VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
662
663 /* non-cache window */
664 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
665 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
666 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
667 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
668 VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
669 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
670 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
671 VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
672 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
673 VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
674 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
675
676 /* VCN global tiling registers */
677 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
678 UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
679}
680
681static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
682{
683 struct amdgpu_device *adev = vinst->adev;
684 int inst = vinst->inst;
685 uint32_t data = 0;
686
687 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
688 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
689 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
690 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
691 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
692 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
693 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
694 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
695 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
696 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
697 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
698 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
699 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
700 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
701 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
702
703 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
704 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS,
705 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
706 } else {
707 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
708 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
709 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
710 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
711 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
712 | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
713 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
714 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
715 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
716 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
717 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
718 | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
719 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
720 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
721 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
722 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF);
723 }
724
725 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
726 data &= ~0x103;
727 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
728 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
729 UVD_POWER_STATUS__UVD_PG_EN_MASK;
730
731 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
732}
733
734static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
735{
736 struct amdgpu_device *adev = vinst->adev;
737 int inst = vinst->inst;
738 uint32_t data;
739
740 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
741 /* Before power off, this indicator has to be turned on */
742 data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS);
743 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
744 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
745 WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data);
746
747 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
748 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
749 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
750 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
751 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT
753 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
756 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
757 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
758 | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT
759 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
761 WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data);
762
763 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
764 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
765 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
767 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
768 | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT
769 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
770 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
771 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
772 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
773 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
774 | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT
775 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
776 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
777 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
778 }
779}
780
781/**
782 * vcn_v3_0_disable_clock_gating - disable VCN clock gating
783 *
784 * @vinst: Pointer to the VCN instance structure
785 *
786 * Disable clock gating for VCN block
787 */
788static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
789{
790 struct amdgpu_device *adev = vinst->adev;
791 int inst = vinst->inst;
792 uint32_t data;
793
794 /* VCN disable CGC */
795 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
796 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
797 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
798 else
799 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
800 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
801 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
802 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
803
804 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE);
805 data &= ~(UVD_CGC_GATE__SYS_MASK
806 | UVD_CGC_GATE__UDEC_MASK
807 | UVD_CGC_GATE__MPEG2_MASK
808 | UVD_CGC_GATE__REGS_MASK
809 | UVD_CGC_GATE__RBC_MASK
810 | UVD_CGC_GATE__LMI_MC_MASK
811 | UVD_CGC_GATE__LMI_UMC_MASK
812 | UVD_CGC_GATE__IDCT_MASK
813 | UVD_CGC_GATE__MPRD_MASK
814 | UVD_CGC_GATE__MPC_MASK
815 | UVD_CGC_GATE__LBSI_MASK
816 | UVD_CGC_GATE__LRBBM_MASK
817 | UVD_CGC_GATE__UDEC_RE_MASK
818 | UVD_CGC_GATE__UDEC_CM_MASK
819 | UVD_CGC_GATE__UDEC_IT_MASK
820 | UVD_CGC_GATE__UDEC_DB_MASK
821 | UVD_CGC_GATE__UDEC_MP_MASK
822 | UVD_CGC_GATE__WCB_MASK
823 | UVD_CGC_GATE__VCPU_MASK
824 | UVD_CGC_GATE__MMSCH_MASK);
825
826 WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data);
827
828 SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
829
830 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
831 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
832 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
833 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
834 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
835 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
836 | UVD_CGC_CTRL__SYS_MODE_MASK
837 | UVD_CGC_CTRL__UDEC_MODE_MASK
838 | UVD_CGC_CTRL__MPEG2_MODE_MASK
839 | UVD_CGC_CTRL__REGS_MODE_MASK
840 | UVD_CGC_CTRL__RBC_MODE_MASK
841 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
842 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
843 | UVD_CGC_CTRL__IDCT_MODE_MASK
844 | UVD_CGC_CTRL__MPRD_MODE_MASK
845 | UVD_CGC_CTRL__MPC_MODE_MASK
846 | UVD_CGC_CTRL__LBSI_MODE_MASK
847 | UVD_CGC_CTRL__LRBBM_MODE_MASK
848 | UVD_CGC_CTRL__WCB_MODE_MASK
849 | UVD_CGC_CTRL__VCPU_MODE_MASK
850 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
851 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
852
853 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE);
854 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
855 | UVD_SUVD_CGC_GATE__SIT_MASK
856 | UVD_SUVD_CGC_GATE__SMP_MASK
857 | UVD_SUVD_CGC_GATE__SCM_MASK
858 | UVD_SUVD_CGC_GATE__SDB_MASK
859 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
860 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
861 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
862 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
863 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
864 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
865 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
866 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
867 | UVD_SUVD_CGC_GATE__SCLR_MASK
868 | UVD_SUVD_CGC_GATE__ENT_MASK
869 | UVD_SUVD_CGC_GATE__IME_MASK
870 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
871 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
872 | UVD_SUVD_CGC_GATE__SITE_MASK
873 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
874 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
875 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
876 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
877 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK
878 | UVD_SUVD_CGC_GATE__EFC_MASK
879 | UVD_SUVD_CGC_GATE__SAOE_MASK
880 | UVD_SUVD_CGC_GATE__SRE_AV1_MASK
881 | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK
882 | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK
883 | UVD_SUVD_CGC_GATE__SCM_AV1_MASK
884 | UVD_SUVD_CGC_GATE__SMPA_MASK);
885 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data);
886
887 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2);
888 data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK
889 | UVD_SUVD_CGC_GATE2__MPBE1_MASK
890 | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK
891 | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK
892 | UVD_SUVD_CGC_GATE2__MPC1_MASK);
893 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data);
894
895 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
896 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
897 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
898 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
899 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
900 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
901 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
902 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
903 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
904 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
905 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
906 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
907 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
908 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
909 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
910 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
911 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
912 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
913 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
914 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
915 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
916}
917
918static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
919 uint8_t sram_sel,
920 uint8_t indirect)
921{
922 struct amdgpu_device *adev = vinst->adev;
923 int inst_idx = vinst->inst;
924 uint32_t reg_data = 0;
925
926 /* enable sw clock gating control */
927 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
928 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
929 else
930 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
931 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
932 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
933 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
934 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
935 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
936 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
937 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
938 UVD_CGC_CTRL__SYS_MODE_MASK |
939 UVD_CGC_CTRL__UDEC_MODE_MASK |
940 UVD_CGC_CTRL__MPEG2_MODE_MASK |
941 UVD_CGC_CTRL__REGS_MODE_MASK |
942 UVD_CGC_CTRL__RBC_MODE_MASK |
943 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
944 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
945 UVD_CGC_CTRL__IDCT_MODE_MASK |
946 UVD_CGC_CTRL__MPRD_MODE_MASK |
947 UVD_CGC_CTRL__MPC_MODE_MASK |
948 UVD_CGC_CTRL__LBSI_MODE_MASK |
949 UVD_CGC_CTRL__LRBBM_MODE_MASK |
950 UVD_CGC_CTRL__WCB_MODE_MASK |
951 UVD_CGC_CTRL__VCPU_MODE_MASK |
952 UVD_CGC_CTRL__MMSCH_MODE_MASK);
953 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
954 VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
955
956 /* turn off clock gating */
957 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
958 VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect);
959
960 /* turn on SUVD clock gating */
961 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
962 VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
963
964 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
965 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
966 VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
967}
968
969/**
970 * vcn_v3_0_enable_clock_gating - enable VCN clock gating
971 *
972 * @vinst: Pointer to the VCN instance structure
973 *
974 * Enable clock gating for VCN block
975 */
976static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
977{
978 struct amdgpu_device *adev = vinst->adev;
979 int inst = vinst->inst;
980 uint32_t data;
981
982 /* enable VCN CGC */
983 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
984 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
985 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
986 else
987 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
988 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
989 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
990 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
991
992 data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL);
993 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
994 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
995 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
996 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
997 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
998 | UVD_CGC_CTRL__SYS_MODE_MASK
999 | UVD_CGC_CTRL__UDEC_MODE_MASK
1000 | UVD_CGC_CTRL__MPEG2_MODE_MASK
1001 | UVD_CGC_CTRL__REGS_MODE_MASK
1002 | UVD_CGC_CTRL__RBC_MODE_MASK
1003 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
1004 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
1005 | UVD_CGC_CTRL__IDCT_MODE_MASK
1006 | UVD_CGC_CTRL__MPRD_MODE_MASK
1007 | UVD_CGC_CTRL__MPC_MODE_MASK
1008 | UVD_CGC_CTRL__LBSI_MODE_MASK
1009 | UVD_CGC_CTRL__LRBBM_MODE_MASK
1010 | UVD_CGC_CTRL__WCB_MODE_MASK
1011 | UVD_CGC_CTRL__VCPU_MODE_MASK
1012 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
1013 WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data);
1014
1015 data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL);
1016 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
1017 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
1018 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
1019 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
1020 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
1021 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
1022 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
1023 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
1024 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK
1025 | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK
1026 | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK
1027 | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK
1028 | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK
1029 | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK
1030 | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK
1031 | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK
1032 | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK
1033 | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK
1034 | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK);
1035 WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
1036}
1037
1038static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
1039{
1040 struct amdgpu_device *adev = vinst->adev;
1041 int inst_idx = vinst->inst;
1042 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1043 struct amdgpu_ring *ring;
1044 uint32_t rb_bufsz, tmp;
1045
1046 /* disable register anti-hang mechanism */
1047 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
1048 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1049 /* enable dynamic power gating mode */
1050 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
1051 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
1052 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
1053 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
1054
1055 if (indirect)
1056 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
1057
1058 /* enable clock gating */
1059 vcn_v3_0_clock_gating_dpg_mode(vinst, 0, indirect);
1060
1061 /* enable VCPU clock */
1062 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1063 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1064 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
1065 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1066 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1067
1068 /* disable master interupt */
1069 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1070 VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
1071
1072 /* setup mmUVD_LMI_CTRL */
1073 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1074 UVD_LMI_CTRL__REQ_MODE_MASK |
1075 UVD_LMI_CTRL__CRC_RESET_MASK |
1076 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1077 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1078 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1079 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1080 0x00100000L);
1081 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1082 VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
1083
1084 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1085 VCN, inst_idx, mmUVD_MPC_CNTL),
1086 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
1087
1088 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1089 VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
1090 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1091 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1092 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1093 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
1094
1095 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1096 VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
1097 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1098 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1099 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1100 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
1101
1102 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1103 VCN, inst_idx, mmUVD_MPC_SET_MUX),
1104 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1105 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1106 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
1107
1108 vcn_v3_0_mc_resume_dpg_mode(vinst, indirect);
1109
1110 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1111 VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
1112 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1113 VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
1114
1115 /* enable LMI MC and UMC channels */
1116 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1117 VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
1118
1119 /* unblock VCPU register access */
1120 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1121 VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
1122
1123 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
1124 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
1125 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1126 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1127
1128 /* enable master interrupt */
1129 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1130 VCN, inst_idx, mmUVD_MASTINT_EN),
1131 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
1132
1133 /* add nop to workaround PSP size check */
1134 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
1135 VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
1136
1137 if (indirect)
1138 amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
1139
1140 ring = &adev->vcn.inst[inst_idx].ring_dec;
1141 /* force RBC into idle state */
1142 rb_bufsz = order_base_2(ring->ring_size);
1143 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1144 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1145 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1146 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1147 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1148 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
1149
1150 /* Stall DPG before WPTR/RPTR reset */
1151 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1152 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1153 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1154 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1155
1156 /* set the write pointer delay */
1157 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
1158
1159 /* set the wb address */
1160 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
1161 (upper_32_bits(ring->gpu_addr) >> 2));
1162
1163 /* programm the RB_BASE for ring buffer */
1164 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1165 lower_32_bits(ring->gpu_addr));
1166 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1167 upper_32_bits(ring->gpu_addr));
1168
1169 /* Initialize the ring buffer's read and write pointers */
1170 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
1171
1172 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
1173
1174 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
1175 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
1176 lower_32_bits(ring->wptr));
1177
1178 /* Reset FW shared memory RBC WPTR/RPTR */
1179 fw_shared->rb.rptr = 0;
1180 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1181
1182 /*resetting done, fw can check RB ring */
1183 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1184
1185 /* Unstall DPG */
1186 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1187 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1188
1189 /* Keeping one read-back to ensure all register writes are done,
1190 * otherwise it may introduce race conditions.
1191 */
1192 RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
1193
1194 return 0;
1195}
1196
1197static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
1198{
1199 struct amdgpu_device *adev = vinst->adev;
1200 int i = vinst->inst;
1201 volatile struct amdgpu_fw_shared *fw_shared;
1202 struct amdgpu_ring *ring;
1203 uint32_t rb_bufsz, tmp;
1204 int j, k, r;
1205
1206 if (adev->vcn.harvest_config & (1 << i))
1207 return 0;
1208
1209 if (adev->pm.dpm_enabled)
1210 amdgpu_dpm_enable_vcn(adev, true, i);
1211
1212 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1213 return vcn_v3_0_start_dpg_mode(vinst, vinst->indirect_sram);
1214
1215 /* disable VCN power gating */
1216 vcn_v3_0_disable_static_power_gating(vinst);
1217
1218 /* set VCN status busy */
1219 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1220 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
1221
1222 /* SW clock gating */
1223 vcn_v3_0_disable_clock_gating(vinst);
1224
1225 /* enable VCPU clock */
1226 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1227 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1228
1229 /* disable master interrupt */
1230 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1231 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1232
1233 /* enable LMI MC and UMC channels */
1234 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1235 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1236
1237 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1238 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1239 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1240 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1241
1242 /* setup mmUVD_LMI_CTRL */
1243 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1244 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp |
1245 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1246 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1247 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1248 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1249
1250 /* setup mmUVD_MPC_CNTL */
1251 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1252 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1253 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1254 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1255
1256 /* setup UVD_MPC_SET_MUXA0 */
1257 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1258 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1259 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1260 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1261 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1262
1263 /* setup UVD_MPC_SET_MUXB0 */
1264 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1265 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1266 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1267 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1268 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1269
1270 /* setup mmUVD_MPC_SET_MUX */
1271 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1272 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1273 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1274 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1275
1276 vcn_v3_0_mc_resume(vinst);
1277
1278 /* VCN global tiling registers */
1279 WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG,
1280 adev->gfx.config.gb_addr_config);
1281
1282 /* unblock VCPU register access */
1283 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1284 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1285
1286 /* release VCPU reset to boot */
1287 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1288 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1289
1290 for (j = 0; j < 10; ++j) {
1291 uint32_t status;
1292
1293 for (k = 0; k < 100; ++k) {
1294 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1295 if (status & 2)
1296 break;
1297 mdelay(10);
1298 }
1299 r = 0;
1300 if (status & 2)
1301 break;
1302
1303 DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i);
1304 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1305 UVD_VCPU_CNTL__BLK_RST_MASK,
1306 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1307 mdelay(10);
1308 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1309 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1310
1311 mdelay(10);
1312 r = -1;
1313 }
1314
1315 if (r) {
1316 DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i);
1317 return r;
1318 }
1319
1320 /* enable master interrupt */
1321 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1322 UVD_MASTINT_EN__VCPU_EN_MASK,
1323 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1324
1325 /* clear the busy bit of VCN_STATUS */
1326 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1327 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1328
1329 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1330
1331 ring = &adev->vcn.inst[i].ring_dec;
1332 /* force RBC into idle state */
1333 rb_bufsz = order_base_2(ring->ring_size);
1334 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1335 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1336 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1337 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1338 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1339 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1340
1341 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1342 fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1343
1344 /* programm the RB_BASE for ring buffer */
1345 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1346 lower_32_bits(ring->gpu_addr));
1347 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1348 upper_32_bits(ring->gpu_addr));
1349
1350 /* Initialize the ring buffer's read and write pointers */
1351 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1352
1353 WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
1354 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1355 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1356 lower_32_bits(ring->wptr));
1357 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1358 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1359
1360 if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
1361 IP_VERSION(3, 0, 33)) {
1362 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1363 ring = &adev->vcn.inst[i].ring_enc[0];
1364 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1365 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1366 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1367 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1368 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1369 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1370
1371 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1372 ring = &adev->vcn.inst[i].ring_enc[1];
1373 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1374 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1375 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1376 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1377 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1378 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1379 }
1380
1381 /* Keeping one read-back to ensure all register writes are done,
1382 * otherwise it may introduce race conditions.
1383 */
1384 RREG32_SOC15(VCN, i, mmUVD_STATUS);
1385
1386 return 0;
1387}
1388
1389static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
1390{
1391 int i, j;
1392 struct amdgpu_ring *ring;
1393 uint64_t cache_addr;
1394 uint64_t rb_addr;
1395 uint64_t ctx_addr;
1396 uint32_t param, resp, expected;
1397 uint32_t offset, cache_size;
1398 uint32_t tmp, timeout;
1399
1400 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1401 uint32_t *table_loc;
1402 uint32_t table_size;
1403 uint32_t size, size_dw;
1404
1405 struct mmsch_v3_0_cmd_direct_write
1406 direct_wt = { {0} };
1407 struct mmsch_v3_0_cmd_direct_read_modify_write
1408 direct_rd_mod_wt = { {0} };
1409 struct mmsch_v3_0_cmd_end end = { {0} };
1410 struct mmsch_v3_0_init_header header;
1411
1412 direct_wt.cmd_header.command_type =
1413 MMSCH_COMMAND__DIRECT_REG_WRITE;
1414 direct_rd_mod_wt.cmd_header.command_type =
1415 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1416 end.cmd_header.command_type =
1417 MMSCH_COMMAND__END;
1418
1419 header.version = MMSCH_VERSION;
1420 header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2;
1421 for (i = 0; i < MMSCH_V3_0_VCN_INSTANCES; i++) {
1422 header.inst[i].init_status = 0;
1423 header.inst[i].table_offset = 0;
1424 header.inst[i].table_size = 0;
1425 }
1426
1427 table_loc = (uint32_t *)table->cpu_addr;
1428 table_loc += header.total_size;
1429 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1430 if (adev->vcn.harvest_config & (1 << i))
1431 continue;
1432
1433 table_size = 0;
1434
1435 MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1436 mmUVD_STATUS),
1437 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1438
1439 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4);
1440
1441 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1442 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1443 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1444 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1445 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1446 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1447 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1448 offset = 0;
1449 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1450 mmUVD_VCPU_CACHE_OFFSET0),
1451 0);
1452 } else {
1453 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1454 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1455 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1456 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1457 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1458 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1459 offset = cache_size;
1460 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1461 mmUVD_VCPU_CACHE_OFFSET0),
1462 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1463 }
1464
1465 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1466 mmUVD_VCPU_CACHE_SIZE0),
1467 cache_size);
1468
1469 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1470 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1471 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1472 lower_32_bits(cache_addr));
1473 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1474 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1475 upper_32_bits(cache_addr));
1476 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1477 mmUVD_VCPU_CACHE_OFFSET1),
1478 0);
1479 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1480 mmUVD_VCPU_CACHE_SIZE1),
1481 AMDGPU_VCN_STACK_SIZE);
1482
1483 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1484 AMDGPU_VCN_STACK_SIZE;
1485 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1486 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1487 lower_32_bits(cache_addr));
1488 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1489 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1490 upper_32_bits(cache_addr));
1491 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1492 mmUVD_VCPU_CACHE_OFFSET2),
1493 0);
1494 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1495 mmUVD_VCPU_CACHE_SIZE2),
1496 AMDGPU_VCN_CONTEXT_SIZE);
1497
1498 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
1499 ring = &adev->vcn.inst[i].ring_enc[j];
1500 ring->wptr = 0;
1501 rb_addr = ring->gpu_addr;
1502 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1503 mmUVD_RB_BASE_LO),
1504 lower_32_bits(rb_addr));
1505 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1506 mmUVD_RB_BASE_HI),
1507 upper_32_bits(rb_addr));
1508 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1509 mmUVD_RB_SIZE),
1510 ring->ring_size / 4);
1511 }
1512
1513 ring = &adev->vcn.inst[i].ring_dec;
1514 ring->wptr = 0;
1515 rb_addr = ring->gpu_addr;
1516 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1517 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1518 lower_32_bits(rb_addr));
1519 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1520 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1521 upper_32_bits(rb_addr));
1522 /* force RBC into idle state */
1523 tmp = order_base_2(ring->ring_size);
1524 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
1525 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1526 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1527 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1528 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1529 MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1530 mmUVD_RBC_RB_CNTL),
1531 tmp);
1532
1533 /* add end packet */
1534 MMSCH_V3_0_INSERT_END();
1535
1536 /* refine header */
1537 header.inst[i].init_status = 0;
1538 header.inst[i].table_offset = header.total_size;
1539 header.inst[i].table_size = table_size;
1540 header.total_size += table_size;
1541 }
1542
1543 /* Update init table header in memory */
1544 size = sizeof(struct mmsch_v3_0_init_header);
1545 table_loc = (uint32_t *)table->cpu_addr;
1546 memcpy((void *)table_loc, &header, size);
1547
1548 /* message MMSCH (in VCN[0]) to initialize this client
1549 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1550 * of memory descriptor location
1551 */
1552 ctx_addr = table->gpu_addr;
1553 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1554 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1555
1556 /* 2, update vmid of descriptor */
1557 tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1558 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1559 /* use domain0 for MM scheduler */
1560 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1561 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp);
1562
1563 /* 3, notify mmsch about the size of this descriptor */
1564 size = header.total_size;
1565 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1566
1567 /* 4, set resp to zero */
1568 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1569
1570 /* 5, kick off the initialization and wait until
1571 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1572 */
1573 param = 0x10000001;
1574 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param);
1575 tmp = 0;
1576 timeout = 1000;
1577 resp = 0;
1578 expected = param + 1;
1579 while (resp != expected) {
1580 resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1581 if (resp == expected)
1582 break;
1583
1584 udelay(10);
1585 tmp = tmp + 10;
1586 if (tmp >= timeout) {
1587 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1588 " waiting for mmMMSCH_VF_MAILBOX_RESP "\
1589 "(expected=0x%08x, readback=0x%08x)\n",
1590 tmp, expected, resp);
1591 return -EBUSY;
1592 }
1593 }
1594
1595 return 0;
1596}
1597
1598static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
1599{
1600 struct amdgpu_device *adev = vinst->adev;
1601 int inst_idx = vinst->inst;
1602 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
1603 uint32_t tmp;
1604
1605 vcn_v3_0_pause_dpg_mode(vinst, &state);
1606
1607 /* Wait for power status to be 1 */
1608 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1609 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1610
1611 /* wait for read ptr to be equal to write ptr */
1612 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1613 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1614
1615 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1616 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1617
1618 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1619 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1620
1621 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1622 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1623
1624 /* disable dynamic power gating mode */
1625 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1626 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1627
1628 /* Keeping one read-back to ensure all register writes are done,
1629 * otherwise it may introduce race conditions.
1630 */
1631 RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS);
1632
1633 return 0;
1634}
1635
1636static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst)
1637{
1638 struct amdgpu_device *adev = vinst->adev;
1639 int i = vinst->inst;
1640 uint32_t tmp;
1641 int r = 0;
1642
1643 if (adev->vcn.harvest_config & (1 << i))
1644 return 0;
1645
1646 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1647 r = vcn_v3_0_stop_dpg_mode(vinst);
1648 goto done;
1649 }
1650
1651 /* wait for vcn idle */
1652 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1653 if (r)
1654 goto done;
1655
1656 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1657 UVD_LMI_STATUS__READ_CLEAN_MASK |
1658 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1659 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1660 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1661 if (r)
1662 goto done;
1663
1664 /* disable LMI UMC channel */
1665 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1666 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1667 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1668 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1669 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1670 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1671 if (r)
1672 goto done;
1673
1674 /* block VCPU register access */
1675 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1676 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1677 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1678
1679 /* reset VCPU */
1680 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1681 UVD_VCPU_CNTL__BLK_RST_MASK,
1682 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1683
1684 /* disable VCPU clock */
1685 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1686 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1687
1688 /* apply soft reset */
1689 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1690 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1691 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1692 tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET);
1693 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1694 WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp);
1695
1696 /* clear status */
1697 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1698
1699 /* apply HW clock gating */
1700 vcn_v3_0_enable_clock_gating(vinst);
1701
1702 /* enable VCN power gating */
1703 vcn_v3_0_enable_static_power_gating(vinst);
1704
1705 /* Keeping one read-back to ensure all register writes are done,
1706 * otherwise it may introduce race conditions.
1707 */
1708 RREG32_SOC15(VCN, i, mmUVD_STATUS);
1709
1710done:
1711 if (adev->pm.dpm_enabled)
1712 amdgpu_dpm_enable_vcn(adev, false, i);
1713
1714 return r;
1715}
1716
1717static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
1718 struct dpg_pause_state *new_state)
1719{
1720 struct amdgpu_device *adev = vinst->adev;
1721 int inst_idx = vinst->inst;
1722 volatile struct amdgpu_fw_shared *fw_shared;
1723 struct amdgpu_ring *ring;
1724 uint32_t reg_data = 0;
1725 int ret_code;
1726
1727 /* pause/unpause if state is changed */
1728 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1729 DRM_DEBUG("dpg pause state changed %d -> %d",
1730 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1731 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1732 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1733
1734 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1735 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1736 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1737
1738 if (!ret_code) {
1739 /* pause DPG */
1740 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1741 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1742
1743 /* wait for ACK */
1744 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1745 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1746 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1747
1748 /* Stall DPG before WPTR/RPTR reset */
1749 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1750 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1751 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1752
1753 if (amdgpu_ip_version(adev, UVD_HWIP, 0) !=
1754 IP_VERSION(3, 0, 33)) {
1755 /* Restore */
1756 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1757 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1758 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1759 ring->wptr = 0;
1760 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1761 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1762 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1763 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1764 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1765 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1766
1767 fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
1768 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1769 ring->wptr = 0;
1770 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1771 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1772 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1773 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1774 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1775 fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
1776
1777 /* restore wptr/rptr with pointers saved in FW shared memory*/
1778 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
1779 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
1780 }
1781
1782 /* Unstall DPG */
1783 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1784 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1785
1786 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1787 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1788 }
1789 } else {
1790 /* unpause dpg, no need to wait */
1791 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1792 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1793 }
1794 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1795 }
1796
1797 return 0;
1798}
1799
1800/**
1801 * vcn_v3_0_dec_ring_get_rptr - get read pointer
1802 *
1803 * @ring: amdgpu_ring pointer
1804 *
1805 * Returns the current hardware read pointer
1806 */
1807static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1808{
1809 struct amdgpu_device *adev = ring->adev;
1810
1811 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1812}
1813
1814/**
1815 * vcn_v3_0_dec_ring_get_wptr - get write pointer
1816 *
1817 * @ring: amdgpu_ring pointer
1818 *
1819 * Returns the current hardware write pointer
1820 */
1821static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1822{
1823 struct amdgpu_device *adev = ring->adev;
1824
1825 if (ring->use_doorbell)
1826 return *ring->wptr_cpu_addr;
1827 else
1828 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1829}
1830
1831/**
1832 * vcn_v3_0_dec_ring_set_wptr - set write pointer
1833 *
1834 * @ring: amdgpu_ring pointer
1835 *
1836 * Commits the write pointer to the hardware
1837 */
1838static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1839{
1840 struct amdgpu_device *adev = ring->adev;
1841 volatile struct amdgpu_fw_shared *fw_shared;
1842
1843 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1844 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
1845 fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr;
1846 fw_shared->rb.wptr = lower_32_bits(ring->wptr);
1847 WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
1848 lower_32_bits(ring->wptr));
1849 }
1850
1851 if (ring->use_doorbell) {
1852 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1853 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1854 } else {
1855 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1856 }
1857}
1858
1859static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
1860 .type = AMDGPU_RING_TYPE_VCN_DEC,
1861 .align_mask = 0x3f,
1862 .nop = VCN_DEC_SW_CMD_NO_OP,
1863 .secure_submission_supported = true,
1864 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
1865 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
1866 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
1867 .emit_frame_size =
1868 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1869 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1870 VCN_SW_RING_EMIT_FRAME_SIZE,
1871 .emit_ib_size = 5, /* vcn_dec_sw_ring_emit_ib */
1872 .emit_ib = vcn_dec_sw_ring_emit_ib,
1873 .emit_fence = vcn_dec_sw_ring_emit_fence,
1874 .emit_vm_flush = vcn_dec_sw_ring_emit_vm_flush,
1875 .test_ring = amdgpu_vcn_dec_sw_ring_test_ring,
1876 .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib,
1877 .insert_nop = amdgpu_ring_insert_nop,
1878 .insert_end = vcn_dec_sw_ring_insert_end,
1879 .pad_ib = amdgpu_ring_generic_pad_ib,
1880 .begin_use = amdgpu_vcn_ring_begin_use,
1881 .end_use = amdgpu_vcn_ring_end_use,
1882 .emit_wreg = vcn_dec_sw_ring_emit_wreg,
1883 .emit_reg_wait = vcn_dec_sw_ring_emit_reg_wait,
1884 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1885};
1886
1887static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
1888 struct amdgpu_job *job)
1889{
1890 struct drm_gpu_scheduler **scheds;
1891
1892 /* The create msg must be in the first IB submitted */
1893 if (atomic_read(&job->base.entity->fence_seq))
1894 return -EINVAL;
1895
1896 /* if VCN0 is harvested, we can't support AV1 */
1897 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
1898 return -EINVAL;
1899
1900 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
1901 [AMDGPU_RING_PRIO_DEFAULT].sched;
1902 drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1903 return 0;
1904}
1905
1906static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1907 uint64_t addr)
1908{
1909 struct ttm_operation_ctx ctx = { false, false };
1910 struct amdgpu_bo_va_mapping *map;
1911 uint32_t *msg, num_buffers;
1912 struct amdgpu_bo *bo;
1913 uint64_t start, end;
1914 unsigned int i;
1915 void *ptr;
1916 int r;
1917
1918 addr &= AMDGPU_GMC_HOLE_MASK;
1919 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1920 if (r) {
1921 DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
1922 return r;
1923 }
1924
1925 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1926 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1927 if (addr & 0x7) {
1928 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1929 return -EINVAL;
1930 }
1931
1932 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1933 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1934 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1935 if (r) {
1936 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1937 return r;
1938 }
1939
1940 r = amdgpu_bo_kmap(bo, &ptr);
1941 if (r) {
1942 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1943 return r;
1944 }
1945
1946 msg = ptr + addr - start;
1947
1948 /* Check length */
1949 if (msg[1] > end - addr) {
1950 r = -EINVAL;
1951 goto out;
1952 }
1953
1954 if (msg[3] != RDECODE_MSG_CREATE)
1955 goto out;
1956
1957 num_buffers = msg[2];
1958 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1959 uint32_t offset, size, *create;
1960
1961 if (msg[0] != RDECODE_MESSAGE_CREATE)
1962 continue;
1963
1964 offset = msg[1];
1965 size = msg[2];
1966
1967 if (offset + size > end) {
1968 r = -EINVAL;
1969 goto out;
1970 }
1971
1972 create = ptr + addr + offset - start;
1973
1974 /* H246, HEVC and VP9 can run on any instance */
1975 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1976 continue;
1977
1978 r = vcn_v3_0_limit_sched(p, job);
1979 if (r)
1980 goto out;
1981 }
1982
1983out:
1984 amdgpu_bo_kunmap(bo);
1985 return r;
1986}
1987
1988static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1989 struct amdgpu_job *job,
1990 struct amdgpu_ib *ib)
1991{
1992 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1993 uint32_t msg_lo = 0, msg_hi = 0;
1994 unsigned i;
1995 int r;
1996
1997 /* The first instance can decode anything */
1998 if (!ring->me)
1999 return 0;
2000
2001 for (i = 0; i < ib->length_dw; i += 2) {
2002 uint32_t reg = amdgpu_ib_get_value(ib, i);
2003 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
2004
2005 if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) {
2006 msg_lo = val;
2007 } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) {
2008 msg_hi = val;
2009 } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) &&
2010 val == 0) {
2011 r = vcn_v3_0_dec_msg(p, job,
2012 ((u64)msg_hi) << 32 | msg_lo);
2013 if (r)
2014 return r;
2015 }
2016 }
2017 return 0;
2018}
2019
2020static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
2021 .type = AMDGPU_RING_TYPE_VCN_DEC,
2022 .align_mask = 0xf,
2023 .secure_submission_supported = true,
2024 .get_rptr = vcn_v3_0_dec_ring_get_rptr,
2025 .get_wptr = vcn_v3_0_dec_ring_get_wptr,
2026 .set_wptr = vcn_v3_0_dec_ring_set_wptr,
2027 .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
2028 .emit_frame_size =
2029 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
2030 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
2031 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
2032 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
2033 6,
2034 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
2035 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
2036 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
2037 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
2038 .test_ring = vcn_v2_0_dec_ring_test_ring,
2039 .test_ib = amdgpu_vcn_dec_ring_test_ib,
2040 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
2041 .insert_start = vcn_v2_0_dec_ring_insert_start,
2042 .insert_end = vcn_v2_0_dec_ring_insert_end,
2043 .pad_ib = amdgpu_ring_generic_pad_ib,
2044 .begin_use = amdgpu_vcn_ring_begin_use,
2045 .end_use = amdgpu_vcn_ring_end_use,
2046 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
2047 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
2048 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2049 .reset = amdgpu_vcn_ring_reset,
2050};
2051
2052/**
2053 * vcn_v3_0_enc_ring_get_rptr - get enc read pointer
2054 *
2055 * @ring: amdgpu_ring pointer
2056 *
2057 * Returns the current hardware enc read pointer
2058 */
2059static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
2060{
2061 struct amdgpu_device *adev = ring->adev;
2062
2063 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
2064 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
2065 else
2066 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
2067}
2068
2069/**
2070 * vcn_v3_0_enc_ring_get_wptr - get enc write pointer
2071 *
2072 * @ring: amdgpu_ring pointer
2073 *
2074 * Returns the current hardware enc write pointer
2075 */
2076static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
2077{
2078 struct amdgpu_device *adev = ring->adev;
2079
2080 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2081 if (ring->use_doorbell)
2082 return *ring->wptr_cpu_addr;
2083 else
2084 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
2085 } else {
2086 if (ring->use_doorbell)
2087 return *ring->wptr_cpu_addr;
2088 else
2089 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
2090 }
2091}
2092
2093/**
2094 * vcn_v3_0_enc_ring_set_wptr - set enc write pointer
2095 *
2096 * @ring: amdgpu_ring pointer
2097 *
2098 * Commits the enc write pointer to the hardware
2099 */
2100static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2101{
2102 struct amdgpu_device *adev = ring->adev;
2103
2104 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
2105 if (ring->use_doorbell) {
2106 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2107 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2108 } else {
2109 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
2110 }
2111 } else {
2112 if (ring->use_doorbell) {
2113 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
2114 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
2115 } else {
2116 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
2117 }
2118 }
2119}
2120
2121static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2122 .type = AMDGPU_RING_TYPE_VCN_ENC,
2123 .align_mask = 0x3f,
2124 .nop = VCN_ENC_CMD_NO_OP,
2125 .get_rptr = vcn_v3_0_enc_ring_get_rptr,
2126 .get_wptr = vcn_v3_0_enc_ring_get_wptr,
2127 .set_wptr = vcn_v3_0_enc_ring_set_wptr,
2128 .emit_frame_size =
2129 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2130 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2131 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
2132 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
2133 1, /* vcn_v2_0_enc_ring_insert_end */
2134 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
2135 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
2136 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
2137 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
2138 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2139 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2140 .insert_nop = amdgpu_ring_insert_nop,
2141 .insert_end = vcn_v2_0_enc_ring_insert_end,
2142 .pad_ib = amdgpu_ring_generic_pad_ib,
2143 .begin_use = amdgpu_vcn_ring_begin_use,
2144 .end_use = amdgpu_vcn_ring_end_use,
2145 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
2146 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
2147 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2148 .reset = amdgpu_vcn_ring_reset,
2149};
2150
2151static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2152{
2153 int i;
2154
2155 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2156 if (adev->vcn.harvest_config & (1 << i))
2157 continue;
2158
2159 if (!DEC_SW_RING_ENABLED)
2160 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs;
2161 else
2162 adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs;
2163 adev->vcn.inst[i].ring_dec.me = i;
2164 }
2165}
2166
2167static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2168{
2169 int i, j;
2170
2171 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2172 if (adev->vcn.harvest_config & (1 << i))
2173 continue;
2174
2175 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) {
2176 adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs;
2177 adev->vcn.inst[i].ring_enc[j].me = i;
2178 }
2179 }
2180}
2181
2182static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst)
2183{
2184 int r;
2185
2186 r = vcn_v3_0_stop(vinst);
2187 if (r)
2188 return r;
2189 vcn_v3_0_enable_clock_gating(vinst);
2190 vcn_v3_0_enable_static_power_gating(vinst);
2191 return vcn_v3_0_start(vinst);
2192}
2193
2194static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
2195{
2196 struct amdgpu_device *adev = ip_block->adev;
2197 int i, ret = 1;
2198
2199 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2200 if (adev->vcn.harvest_config & (1 << i))
2201 continue;
2202
2203 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
2204 }
2205
2206 return ret;
2207}
2208
2209static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
2210{
2211 struct amdgpu_device *adev = ip_block->adev;
2212 int i, ret = 0;
2213
2214 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2215 if (adev->vcn.harvest_config & (1 << i))
2216 continue;
2217
2218 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
2219 UVD_STATUS__IDLE);
2220 if (ret)
2221 return ret;
2222 }
2223
2224 return ret;
2225}
2226
2227static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2228 enum amd_clockgating_state state)
2229{
2230 struct amdgpu_device *adev = ip_block->adev;
2231 bool enable = state == AMD_CG_STATE_GATE;
2232 int i;
2233
2234 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2235 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
2236 if (adev->vcn.harvest_config & (1 << i))
2237 continue;
2238
2239 if (enable) {
2240 if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE)
2241 return -EBUSY;
2242 vcn_v3_0_enable_clock_gating(vinst);
2243 } else {
2244 vcn_v3_0_disable_clock_gating(vinst);
2245 }
2246 }
2247
2248 return 0;
2249}
2250
2251static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
2252 enum amd_powergating_state state)
2253{
2254 struct amdgpu_device *adev = vinst->adev;
2255 int ret = 0;
2256
2257 /* for SRIOV, guest should not control VCN Power-gating
2258 * MMSCH FW should control Power-gating and clock-gating
2259 * guest should avoid touching CGC and PG
2260 */
2261 if (amdgpu_sriov_vf(adev)) {
2262 vinst->cur_state = AMD_PG_STATE_UNGATE;
2263 return 0;
2264 }
2265
2266 if (state == vinst->cur_state)
2267 return 0;
2268
2269 if (state == AMD_PG_STATE_GATE)
2270 ret = vcn_v3_0_stop(vinst);
2271 else
2272 ret = vcn_v3_0_start(vinst);
2273
2274 if (!ret)
2275 vinst->cur_state = state;
2276
2277 return ret;
2278}
2279
2280static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev,
2281 struct amdgpu_irq_src *source,
2282 unsigned type,
2283 enum amdgpu_interrupt_state state)
2284{
2285 return 0;
2286}
2287
2288static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev,
2289 struct amdgpu_irq_src *source,
2290 struct amdgpu_iv_entry *entry)
2291{
2292 uint32_t ip_instance;
2293
2294 switch (entry->client_id) {
2295 case SOC15_IH_CLIENTID_VCN:
2296 ip_instance = 0;
2297 break;
2298 case SOC15_IH_CLIENTID_VCN1:
2299 ip_instance = 1;
2300 break;
2301 default:
2302 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2303 return 0;
2304 }
2305
2306 DRM_DEBUG("IH: VCN TRAP\n");
2307
2308 switch (entry->src_id) {
2309 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
2310 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
2311 break;
2312 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2313 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2314 break;
2315 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
2316 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
2317 break;
2318 default:
2319 DRM_ERROR("Unhandled interrupt: %d %d\n",
2320 entry->src_id, entry->src_data[0]);
2321 break;
2322 }
2323
2324 return 0;
2325}
2326
2327static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = {
2328 .set = vcn_v3_0_set_interrupt_state,
2329 .process = vcn_v3_0_process_interrupt,
2330};
2331
2332static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
2333{
2334 int i;
2335
2336 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2337 if (adev->vcn.harvest_config & (1 << i))
2338 continue;
2339
2340 adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1;
2341 adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs;
2342 }
2343}
2344
2345static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
2346{
2347 struct amdgpu_device *adev = ip_block->adev;
2348 int i, j;
2349 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
2350 uint32_t inst_off;
2351 bool is_powered;
2352
2353 if (!adev->vcn.ip_dump)
2354 return;
2355
2356 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
2357 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2358 if (adev->vcn.harvest_config & (1 << i)) {
2359 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
2360 continue;
2361 }
2362
2363 inst_off = i * reg_count;
2364 is_powered = (adev->vcn.ip_dump[inst_off] &
2365 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2366
2367 if (is_powered) {
2368 drm_printf(p, "\nActive Instance:VCN%d\n", i);
2369 for (j = 0; j < reg_count; j++)
2370 drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
2371 adev->vcn.ip_dump[inst_off + j]);
2372 } else {
2373 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
2374 }
2375 }
2376}
2377
2378static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
2379{
2380 struct amdgpu_device *adev = ip_block->adev;
2381 int i, j;
2382 bool is_powered;
2383 uint32_t inst_off;
2384 uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
2385
2386 if (!adev->vcn.ip_dump)
2387 return;
2388
2389 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2390 if (adev->vcn.harvest_config & (1 << i))
2391 continue;
2392
2393 inst_off = i * reg_count;
2394 /* mmUVD_POWER_STATUS is always readable and is first element of the array */
2395 adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
2396 is_powered = (adev->vcn.ip_dump[inst_off] &
2397 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
2398
2399 if (is_powered)
2400 for (j = 1; j < reg_count; j++)
2401 adev->vcn.ip_dump[inst_off + j] =
2402 RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
2403 }
2404}
2405
2406static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
2407 .name = "vcn_v3_0",
2408 .early_init = vcn_v3_0_early_init,
2409 .sw_init = vcn_v3_0_sw_init,
2410 .sw_fini = vcn_v3_0_sw_fini,
2411 .hw_init = vcn_v3_0_hw_init,
2412 .hw_fini = vcn_v3_0_hw_fini,
2413 .suspend = vcn_v3_0_suspend,
2414 .resume = vcn_v3_0_resume,
2415 .is_idle = vcn_v3_0_is_idle,
2416 .wait_for_idle = vcn_v3_0_wait_for_idle,
2417 .set_clockgating_state = vcn_v3_0_set_clockgating_state,
2418 .set_powergating_state = vcn_set_powergating_state,
2419 .dump_ip_state = vcn_v3_0_dump_ip_state,
2420 .print_ip_state = vcn_v3_0_print_ip_state,
2421};
2422
2423const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
2424 .type = AMD_IP_BLOCK_TYPE_VCN,
2425 .major = 3,
2426 .minor = 0,
2427 .rev = 0,
2428 .funcs = &vcn_v3_0_ip_funcs,
2429};