Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2016-2024 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/module.h>
29#include <linux/dmi.h>
30#include <linux/pci.h>
31#include <linux/debugfs.h>
32#include <drm/drm_drv.h>
33
34#include "amdgpu.h"
35#include "amdgpu_pm.h"
36#include "amdgpu_vcn.h"
37#include "soc15d.h"
38
39/* Firmware Names */
40#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
41#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
42#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
43#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
44#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
45#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
46#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
47#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
48#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
49#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
50#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
51#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
52#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
53#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
54#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
55#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
56#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
57#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
58#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
59#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
60#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
61#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
62#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
63#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
64#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
65#define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin"
66
67MODULE_FIRMWARE(FIRMWARE_RAVEN);
68MODULE_FIRMWARE(FIRMWARE_PICASSO);
69MODULE_FIRMWARE(FIRMWARE_RAVEN2);
70MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
71MODULE_FIRMWARE(FIRMWARE_RENOIR);
72MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
73MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
74MODULE_FIRMWARE(FIRMWARE_NAVI10);
75MODULE_FIRMWARE(FIRMWARE_NAVI14);
76MODULE_FIRMWARE(FIRMWARE_NAVI12);
77MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
78MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
79MODULE_FIRMWARE(FIRMWARE_VANGOGH);
80MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
81MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
82MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
83MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
84MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
85MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
86MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
87MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
88MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
89MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
90MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
91MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
92MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
93
94static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
95static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
96
97int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
98{
99 char ucode_prefix[25];
100 int r;
101
102 adev->vcn.inst[i].adev = adev;
103 adev->vcn.inst[i].inst = i;
104 amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
105
106 if (i != 0 && adev->vcn.per_inst_fw) {
107 r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
108 AMDGPU_UCODE_REQUIRED,
109 "amdgpu/%s_%d.bin", ucode_prefix, i);
110 if (r)
111 amdgpu_ucode_release(&adev->vcn.inst[i].fw);
112 } else {
113 if (!adev->vcn.inst[0].fw) {
114 r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
115 AMDGPU_UCODE_REQUIRED,
116 "amdgpu/%s.bin", ucode_prefix);
117 if (r)
118 amdgpu_ucode_release(&adev->vcn.inst[0].fw);
119 } else {
120 r = 0;
121 }
122 adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
123 }
124
125 return r;
126}
127
128int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
129{
130 unsigned long bo_size;
131 const struct common_firmware_header *hdr;
132 unsigned char fw_check;
133 unsigned int fw_shared_size, log_offset;
134 int r;
135
136 mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
137 mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
138 mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
139 atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
140 INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
141 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
144 adev->vcn.inst[i].indirect_sram = true;
145
146 /*
147 * Some Steam Deck's BIOS versions are incompatible with the
148 * indirect SRAM mode, leading to amdgpu being unable to get
149 * properly probed (and even potentially crashing the kernel).
150 * Hence, check for these versions here - notice this is
151 * restricted to Vangogh (Deck's APU).
152 */
153 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
154 const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
155
156 if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
157 !strncmp("F7A0114", bios_ver, 7))) {
158 adev->vcn.inst[i].indirect_sram = false;
159 dev_info(adev->dev,
160 "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
161 }
162 }
163
164 /* from vcn4 and above, only unified queue is used */
165 adev->vcn.inst[i].using_unified_queue =
166 amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
167
168 hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
169 adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
170 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
171
172 /* Bit 20-23, it is encode major and non-zero for new naming convention.
173 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
174 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
175 * is zero in old naming convention, this field is always zero so far.
176 * These four bits are used to tell which naming convention is present.
177 */
178 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
179 if (fw_check) {
180 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
181
182 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
183 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
184 enc_major = fw_check;
185 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
186 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
187 dev_info(adev->dev,
188 "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
189 i, enc_major, enc_minor, dec_ver, vep, fw_rev);
190 } else {
191 unsigned int version_major, version_minor, family_id;
192
193 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
194 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
195 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
196 dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
197 i, version_major, version_minor, family_id);
198 }
199
200 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
201 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
202 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
203
204 if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
205 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
206 log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
207 } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
208 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
209 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
210 } else {
211 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
212 log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
213 }
214
215 bo_size += fw_shared_size;
216
217 if (amdgpu_vcnfw_log)
218 bo_size += AMDGPU_VCNFW_LOG_SIZE;
219
220 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
221 AMDGPU_GEM_DOMAIN_VRAM |
222 AMDGPU_GEM_DOMAIN_GTT,
223 &adev->vcn.inst[i].vcpu_bo,
224 &adev->vcn.inst[i].gpu_addr,
225 &adev->vcn.inst[i].cpu_addr);
226 if (r) {
227 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
228 return r;
229 }
230
231 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
232 bo_size - fw_shared_size;
233 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
234 bo_size - fw_shared_size;
235
236 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
237
238 if (amdgpu_vcnfw_log) {
239 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
240 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
241 adev->vcn.inst[i].fw_shared.log_offset = log_offset;
242 }
243
244 if (adev->vcn.inst[i].indirect_sram) {
245 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
246 AMDGPU_GEM_DOMAIN_VRAM |
247 AMDGPU_GEM_DOMAIN_GTT,
248 &adev->vcn.inst[i].dpg_sram_bo,
249 &adev->vcn.inst[i].dpg_sram_gpu_addr,
250 &adev->vcn.inst[i].dpg_sram_cpu_addr);
251 if (r) {
252 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
253 return r;
254 }
255 }
256
257 return 0;
258}
259
260void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
261{
262 int j;
263
264 if (adev->vcn.harvest_config & (1 << i))
265 return;
266
267 amdgpu_bo_free_kernel(
268 &adev->vcn.inst[i].dpg_sram_bo,
269 &adev->vcn.inst[i].dpg_sram_gpu_addr,
270 (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
271
272 kvfree(adev->vcn.inst[i].saved_bo);
273
274 amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
275 &adev->vcn.inst[i].gpu_addr,
276 (void **)&adev->vcn.inst[i].cpu_addr);
277
278 amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
279
280 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
281 amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
282
283 if (adev->vcn.per_inst_fw) {
284 amdgpu_ucode_release(&adev->vcn.inst[i].fw);
285 } else {
286 amdgpu_ucode_release(&adev->vcn.inst[0].fw);
287 adev->vcn.inst[i].fw = NULL;
288 }
289
290 if (adev->vcn.reg_list)
291 amdgpu_vcn_reg_dump_fini(adev);
292
293 mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
294 mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
295}
296
297bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
298{
299 bool ret = false;
300 int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
301
302 if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
303 ret = true;
304 else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
305 ret = true;
306 else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
307 ret = true;
308
309 return ret;
310}
311
312static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
313{
314 unsigned int size;
315 void *ptr;
316 int idx;
317
318 if (adev->vcn.harvest_config & (1 << i))
319 return 0;
320 if (adev->vcn.inst[i].vcpu_bo == NULL)
321 return 0;
322
323 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
324 ptr = adev->vcn.inst[i].cpu_addr;
325
326 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
327 if (!adev->vcn.inst[i].saved_bo)
328 return -ENOMEM;
329
330 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
331 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
332 drm_dev_exit(idx);
333 }
334
335 return 0;
336}
337
338int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
339{
340 int ret, i;
341
342 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
343 ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
344 if (ret)
345 return ret;
346 }
347
348 return 0;
349}
350
351int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
352{
353 bool in_ras_intr = amdgpu_ras_intr_triggered();
354
355 if (adev->vcn.harvest_config & (1 << i))
356 return 0;
357
358 /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
359 * restore fw data and clear buffer in amdgpu_vcn_resume() */
360 if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
361 return 0;
362
363 return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
364}
365
366int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
367{
368 unsigned int size;
369 void *ptr;
370 int idx;
371
372 if (adev->vcn.harvest_config & (1 << i))
373 return 0;
374 if (adev->vcn.inst[i].vcpu_bo == NULL)
375 return -EINVAL;
376
377 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
378 ptr = adev->vcn.inst[i].cpu_addr;
379
380 if (adev->vcn.inst[i].saved_bo != NULL) {
381 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
382 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
383 drm_dev_exit(idx);
384 }
385 kvfree(adev->vcn.inst[i].saved_bo);
386 adev->vcn.inst[i].saved_bo = NULL;
387 } else {
388 const struct common_firmware_header *hdr;
389 unsigned int offset;
390
391 hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
392 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
393 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
394 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
395 memcpy_toio(adev->vcn.inst[i].cpu_addr,
396 adev->vcn.inst[i].fw->data + offset,
397 le32_to_cpu(hdr->ucode_size_bytes));
398 drm_dev_exit(idx);
399 }
400 size -= le32_to_cpu(hdr->ucode_size_bytes);
401 ptr += le32_to_cpu(hdr->ucode_size_bytes);
402 }
403 memset_io(ptr, 0, size);
404 }
405
406 return 0;
407}
408
409void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
410{
411 int r;
412
413 mutex_lock(&adev->vcn.workload_profile_mutex);
414
415 if (adev->vcn.workload_profile_active) {
416 mutex_unlock(&adev->vcn.workload_profile_mutex);
417 return;
418 }
419 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
420 true);
421 if (r)
422 dev_warn(adev->dev,
423 "(%d) failed to enable video power profile mode\n", r);
424 else
425 adev->vcn.workload_profile_active = true;
426 mutex_unlock(&adev->vcn.workload_profile_mutex);
427}
428
429void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
430{
431 bool pg = true;
432 int r, i;
433
434 mutex_lock(&adev->vcn.workload_profile_mutex);
435 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
436 if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
437 pg = false;
438 break;
439 }
440 }
441
442 if (pg) {
443 r = amdgpu_dpm_switch_power_profile(
444 adev, PP_SMC_POWER_PROFILE_VIDEO, false);
445 if (r)
446 dev_warn(
447 adev->dev,
448 "(%d) failed to disable video power profile mode\n",
449 r);
450 else
451 adev->vcn.workload_profile_active = false;
452 }
453
454 mutex_unlock(&adev->vcn.workload_profile_mutex);
455}
456
457static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
458{
459 struct amdgpu_vcn_inst *vcn_inst =
460 container_of(work, struct amdgpu_vcn_inst, idle_work.work);
461 struct amdgpu_device *adev = vcn_inst->adev;
462 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
463 unsigned int i = vcn_inst->inst, j;
464
465 if (adev->vcn.harvest_config & (1 << i))
466 return;
467
468 for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
469 fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
470
471 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
472 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
473 !adev->vcn.inst[i].using_unified_queue) {
474 struct dpg_pause_state new_state;
475
476 if (fence[i] ||
477 unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
478 new_state.fw_based = VCN_DPG_STATE__PAUSE;
479 else
480 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
481
482 adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
483 }
484
485 fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
486 fences += fence[i];
487
488 if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
489 mutex_lock(&vcn_inst->vcn_pg_lock);
490 vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
491 mutex_unlock(&vcn_inst->vcn_pg_lock);
492 amdgpu_vcn_put_profile(adev);
493
494 } else {
495 schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
496 }
497}
498
499void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
500{
501 struct amdgpu_device *adev = ring->adev;
502 struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
503
504 atomic_inc(&vcn_inst->total_submission_cnt);
505
506 cancel_delayed_work_sync(&vcn_inst->idle_work);
507
508 mutex_lock(&vcn_inst->vcn_pg_lock);
509 vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
510
511 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
512 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
513 !vcn_inst->using_unified_queue) {
514 struct dpg_pause_state new_state;
515
516 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
517 atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
518 new_state.fw_based = VCN_DPG_STATE__PAUSE;
519 } else {
520 unsigned int fences = 0;
521 unsigned int i;
522
523 for (i = 0; i < vcn_inst->num_enc_rings; ++i)
524 fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
525
526 if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
527 new_state.fw_based = VCN_DPG_STATE__PAUSE;
528 else
529 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
530 }
531
532 vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
533 }
534 mutex_unlock(&vcn_inst->vcn_pg_lock);
535 amdgpu_vcn_get_profile(adev);
536}
537
538void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
539{
540 struct amdgpu_device *adev = ring->adev;
541
542 /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
543 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
544 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
545 !adev->vcn.inst[ring->me].using_unified_queue)
546 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
547
548 atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
549
550 schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
551 VCN_IDLE_TIMEOUT);
552}
553
554int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
555{
556 struct amdgpu_device *adev = ring->adev;
557 uint32_t tmp = 0;
558 unsigned int i;
559 int r;
560
561 /* VCN in SRIOV does not support direct register read/write */
562 if (amdgpu_sriov_vf(adev))
563 return 0;
564
565 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
566 r = amdgpu_ring_alloc(ring, 3);
567 if (r)
568 return r;
569 amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
570 amdgpu_ring_write(ring, 0xDEADBEEF);
571 amdgpu_ring_commit(ring);
572 for (i = 0; i < adev->usec_timeout; i++) {
573 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
574 if (tmp == 0xDEADBEEF)
575 break;
576 udelay(1);
577 }
578
579 if (i >= adev->usec_timeout)
580 r = -ETIMEDOUT;
581
582 return r;
583}
584
585int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
586{
587 struct amdgpu_device *adev = ring->adev;
588 uint32_t rptr;
589 unsigned int i;
590 int r;
591
592 if (amdgpu_sriov_vf(adev))
593 return 0;
594
595 r = amdgpu_ring_alloc(ring, 16);
596 if (r)
597 return r;
598
599 rptr = amdgpu_ring_get_rptr(ring);
600
601 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
602 amdgpu_ring_commit(ring);
603
604 for (i = 0; i < adev->usec_timeout; i++) {
605 if (amdgpu_ring_get_rptr(ring) != rptr)
606 break;
607 udelay(1);
608 }
609
610 if (i >= adev->usec_timeout)
611 r = -ETIMEDOUT;
612
613 return r;
614}
615
616static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
617 struct amdgpu_ib *ib_msg,
618 struct dma_fence **fence)
619{
620 u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
621 struct amdgpu_device *adev = ring->adev;
622 struct dma_fence *f = NULL;
623 struct amdgpu_job *job;
624 struct amdgpu_ib *ib;
625 int i, r;
626
627 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
628 64, AMDGPU_IB_POOL_DIRECT,
629 &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
630 if (r)
631 goto err;
632
633 ib = &job->ibs[0];
634 ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
635 ib->ptr[1] = addr;
636 ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
637 ib->ptr[3] = addr >> 32;
638 ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
639 ib->ptr[5] = 0;
640 for (i = 6; i < 16; i += 2) {
641 ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
642 ib->ptr[i+1] = 0;
643 }
644 ib->length_dw = 16;
645
646 r = amdgpu_job_submit_direct(job, ring, &f);
647 if (r)
648 goto err_free;
649
650 amdgpu_ib_free(ib_msg, f);
651
652 if (fence)
653 *fence = dma_fence_get(f);
654 dma_fence_put(f);
655
656 return 0;
657
658err_free:
659 amdgpu_job_free(job);
660err:
661 amdgpu_ib_free(ib_msg, f);
662 return r;
663}
664
665static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
666 struct amdgpu_ib *ib)
667{
668 struct amdgpu_device *adev = ring->adev;
669 uint32_t *msg;
670 int r, i;
671
672 memset(ib, 0, sizeof(*ib));
673 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
674 AMDGPU_IB_POOL_DIRECT,
675 ib);
676 if (r)
677 return r;
678
679 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
680 msg[0] = cpu_to_le32(0x00000028);
681 msg[1] = cpu_to_le32(0x00000038);
682 msg[2] = cpu_to_le32(0x00000001);
683 msg[3] = cpu_to_le32(0x00000000);
684 msg[4] = cpu_to_le32(handle);
685 msg[5] = cpu_to_le32(0x00000000);
686 msg[6] = cpu_to_le32(0x00000001);
687 msg[7] = cpu_to_le32(0x00000028);
688 msg[8] = cpu_to_le32(0x00000010);
689 msg[9] = cpu_to_le32(0x00000000);
690 msg[10] = cpu_to_le32(0x00000007);
691 msg[11] = cpu_to_le32(0x00000000);
692 msg[12] = cpu_to_le32(0x00000780);
693 msg[13] = cpu_to_le32(0x00000440);
694 for (i = 14; i < 1024; ++i)
695 msg[i] = cpu_to_le32(0x0);
696
697 return 0;
698}
699
700static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
701 struct amdgpu_ib *ib)
702{
703 struct amdgpu_device *adev = ring->adev;
704 uint32_t *msg;
705 int r, i;
706
707 memset(ib, 0, sizeof(*ib));
708 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
709 AMDGPU_IB_POOL_DIRECT,
710 ib);
711 if (r)
712 return r;
713
714 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
715 msg[0] = cpu_to_le32(0x00000028);
716 msg[1] = cpu_to_le32(0x00000018);
717 msg[2] = cpu_to_le32(0x00000000);
718 msg[3] = cpu_to_le32(0x00000002);
719 msg[4] = cpu_to_le32(handle);
720 msg[5] = cpu_to_le32(0x00000000);
721 for (i = 6; i < 1024; ++i)
722 msg[i] = cpu_to_le32(0x0);
723
724 return 0;
725}
726
727int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
728{
729 struct dma_fence *fence = NULL;
730 struct amdgpu_ib ib;
731 long r;
732
733 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
734 if (r)
735 goto error;
736
737 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
738 if (r)
739 goto error;
740 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
741 if (r)
742 goto error;
743
744 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
745 if (r)
746 goto error;
747
748 r = dma_fence_wait_timeout(fence, false, timeout);
749 if (r == 0)
750 r = -ETIMEDOUT;
751 else if (r > 0)
752 r = 0;
753
754 dma_fence_put(fence);
755error:
756 return r;
757}
758
759static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
760 uint32_t ib_pack_in_dw, bool enc)
761{
762 uint32_t *ib_checksum;
763
764 ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
765 ib->ptr[ib->length_dw++] = 0x30000002;
766 ib_checksum = &ib->ptr[ib->length_dw++];
767 ib->ptr[ib->length_dw++] = ib_pack_in_dw;
768
769 ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
770 ib->ptr[ib->length_dw++] = 0x30000001;
771 ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
772 ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
773
774 return ib_checksum;
775}
776
777static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
778 uint32_t ib_pack_in_dw)
779{
780 uint32_t i;
781 uint32_t checksum = 0;
782
783 for (i = 0; i < ib_pack_in_dw; i++)
784 checksum += *(*ib_checksum + 2 + i);
785
786 **ib_checksum = checksum;
787}
788
789static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
790 struct amdgpu_ib *ib_msg,
791 struct dma_fence **fence)
792{
793 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
794 unsigned int ib_size_dw = 64;
795 struct amdgpu_device *adev = ring->adev;
796 struct dma_fence *f = NULL;
797 struct amdgpu_job *job;
798 struct amdgpu_ib *ib;
799 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
800 uint32_t *ib_checksum;
801 uint32_t ib_pack_in_dw;
802 int i, r;
803
804 if (adev->vcn.inst[ring->me].using_unified_queue)
805 ib_size_dw += 8;
806
807 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
808 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
809 &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
810 if (r)
811 goto err;
812
813 ib = &job->ibs[0];
814 ib->length_dw = 0;
815
816 /* single queue headers */
817 if (adev->vcn.inst[ring->me].using_unified_queue) {
818 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
819 + 4 + 2; /* engine info + decoding ib in dw */
820 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
821 }
822
823 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
824 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
825 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
826 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
827 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
828
829 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
830 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
831 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
832
833 for (i = ib->length_dw; i < ib_size_dw; ++i)
834 ib->ptr[i] = 0x0;
835
836 if (adev->vcn.inst[ring->me].using_unified_queue)
837 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
838
839 r = amdgpu_job_submit_direct(job, ring, &f);
840 if (r)
841 goto err_free;
842
843 amdgpu_ib_free(ib_msg, f);
844
845 if (fence)
846 *fence = dma_fence_get(f);
847 dma_fence_put(f);
848
849 return 0;
850
851err_free:
852 amdgpu_job_free(job);
853err:
854 amdgpu_ib_free(ib_msg, f);
855 return r;
856}
857
858int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
859{
860 struct dma_fence *fence = NULL;
861 struct amdgpu_ib ib;
862 long r;
863
864 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
865 if (r)
866 goto error;
867
868 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
869 if (r)
870 goto error;
871 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
872 if (r)
873 goto error;
874
875 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
876 if (r)
877 goto error;
878
879 r = dma_fence_wait_timeout(fence, false, timeout);
880 if (r == 0)
881 r = -ETIMEDOUT;
882 else if (r > 0)
883 r = 0;
884
885 dma_fence_put(fence);
886error:
887 return r;
888}
889
890int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
891{
892 struct amdgpu_device *adev = ring->adev;
893 uint32_t rptr;
894 unsigned int i;
895 int r;
896
897 if (amdgpu_sriov_vf(adev))
898 return 0;
899
900 r = amdgpu_ring_alloc(ring, 16);
901 if (r)
902 return r;
903
904 rptr = amdgpu_ring_get_rptr(ring);
905
906 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
907 amdgpu_ring_commit(ring);
908
909 for (i = 0; i < adev->usec_timeout; i++) {
910 if (amdgpu_ring_get_rptr(ring) != rptr)
911 break;
912 udelay(1);
913 }
914
915 if (i >= adev->usec_timeout)
916 r = -ETIMEDOUT;
917
918 return r;
919}
920
921static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
922 struct amdgpu_ib *ib_msg,
923 struct dma_fence **fence)
924{
925 unsigned int ib_size_dw = 16;
926 struct amdgpu_device *adev = ring->adev;
927 struct amdgpu_job *job;
928 struct amdgpu_ib *ib;
929 struct dma_fence *f = NULL;
930 uint32_t *ib_checksum = NULL;
931 uint64_t addr;
932 int i, r;
933
934 if (adev->vcn.inst[ring->me].using_unified_queue)
935 ib_size_dw += 8;
936
937 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
938 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
939 &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
940 if (r)
941 return r;
942
943 ib = &job->ibs[0];
944 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
945
946 ib->length_dw = 0;
947
948 if (adev->vcn.inst[ring->me].using_unified_queue)
949 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
950
951 ib->ptr[ib->length_dw++] = 0x00000018;
952 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
953 ib->ptr[ib->length_dw++] = handle;
954 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
955 ib->ptr[ib->length_dw++] = addr;
956 ib->ptr[ib->length_dw++] = 0x00000000;
957
958 ib->ptr[ib->length_dw++] = 0x00000014;
959 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
960 ib->ptr[ib->length_dw++] = 0x0000001c;
961 ib->ptr[ib->length_dw++] = 0x00000000;
962 ib->ptr[ib->length_dw++] = 0x00000000;
963
964 ib->ptr[ib->length_dw++] = 0x00000008;
965 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
966
967 for (i = ib->length_dw; i < ib_size_dw; ++i)
968 ib->ptr[i] = 0x0;
969
970 if (adev->vcn.inst[ring->me].using_unified_queue)
971 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
972
973 r = amdgpu_job_submit_direct(job, ring, &f);
974 if (r)
975 goto err;
976
977 if (fence)
978 *fence = dma_fence_get(f);
979 dma_fence_put(f);
980
981 return 0;
982
983err:
984 amdgpu_job_free(job);
985 return r;
986}
987
988static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
989 struct amdgpu_ib *ib_msg,
990 struct dma_fence **fence)
991{
992 unsigned int ib_size_dw = 16;
993 struct amdgpu_device *adev = ring->adev;
994 struct amdgpu_job *job;
995 struct amdgpu_ib *ib;
996 struct dma_fence *f = NULL;
997 uint32_t *ib_checksum = NULL;
998 uint64_t addr;
999 int i, r;
1000
1001 if (adev->vcn.inst[ring->me].using_unified_queue)
1002 ib_size_dw += 8;
1003
1004 r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
1005 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
1006 &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
1007 if (r)
1008 return r;
1009
1010 ib = &job->ibs[0];
1011 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
1012
1013 ib->length_dw = 0;
1014
1015 if (adev->vcn.inst[ring->me].using_unified_queue)
1016 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
1017
1018 ib->ptr[ib->length_dw++] = 0x00000018;
1019 ib->ptr[ib->length_dw++] = 0x00000001;
1020 ib->ptr[ib->length_dw++] = handle;
1021 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1022 ib->ptr[ib->length_dw++] = addr;
1023 ib->ptr[ib->length_dw++] = 0x00000000;
1024
1025 ib->ptr[ib->length_dw++] = 0x00000014;
1026 ib->ptr[ib->length_dw++] = 0x00000002;
1027 ib->ptr[ib->length_dw++] = 0x0000001c;
1028 ib->ptr[ib->length_dw++] = 0x00000000;
1029 ib->ptr[ib->length_dw++] = 0x00000000;
1030
1031 ib->ptr[ib->length_dw++] = 0x00000008;
1032 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
1033
1034 for (i = ib->length_dw; i < ib_size_dw; ++i)
1035 ib->ptr[i] = 0x0;
1036
1037 if (adev->vcn.inst[ring->me].using_unified_queue)
1038 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
1039
1040 r = amdgpu_job_submit_direct(job, ring, &f);
1041 if (r)
1042 goto err;
1043
1044 if (fence)
1045 *fence = dma_fence_get(f);
1046 dma_fence_put(f);
1047
1048 return 0;
1049
1050err:
1051 amdgpu_job_free(job);
1052 return r;
1053}
1054
1055int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1056{
1057 struct amdgpu_device *adev = ring->adev;
1058 struct dma_fence *fence = NULL;
1059 struct amdgpu_ib ib;
1060 long r;
1061
1062 memset(&ib, 0, sizeof(ib));
1063 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
1064 AMDGPU_IB_POOL_DIRECT,
1065 &ib);
1066 if (r)
1067 return r;
1068
1069 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
1070 if (r)
1071 goto error;
1072
1073 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
1074 if (r)
1075 goto error;
1076
1077 r = dma_fence_wait_timeout(fence, false, timeout);
1078 if (r == 0)
1079 r = -ETIMEDOUT;
1080 else if (r > 0)
1081 r = 0;
1082
1083error:
1084 amdgpu_ib_free(&ib, fence);
1085 dma_fence_put(fence);
1086
1087 return r;
1088}
1089
1090int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1091{
1092 struct amdgpu_device *adev = ring->adev;
1093 long r;
1094
1095 if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
1096 (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
1097 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
1098 if (r)
1099 goto error;
1100 }
1101
1102 r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
1103
1104error:
1105 return r;
1106}
1107
1108enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
1109{
1110 switch (ring) {
1111 case 0:
1112 return AMDGPU_RING_PRIO_0;
1113 case 1:
1114 return AMDGPU_RING_PRIO_1;
1115 case 2:
1116 return AMDGPU_RING_PRIO_2;
1117 default:
1118 return AMDGPU_RING_PRIO_0;
1119 }
1120}
1121
1122void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
1123{
1124 unsigned int idx;
1125
1126 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1127 const struct common_firmware_header *hdr;
1128
1129 if (adev->vcn.harvest_config & (1 << i))
1130 return;
1131
1132 if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
1133 amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
1134 && (i > 0))
1135 return;
1136
1137 hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
1138 /* currently only support 2 FW instances */
1139 if (i >= 2) {
1140 dev_info(adev->dev, "More then 2 VCN FW instances!\n");
1141 return;
1142 }
1143 idx = AMDGPU_UCODE_ID_VCN + i;
1144 adev->firmware.ucode[idx].ucode_id = idx;
1145 adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
1146 adev->firmware.fw_size +=
1147 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
1148 }
1149}
1150
1151/*
1152 * debugfs for mapping vcn firmware log buffer.
1153 */
1154#if defined(CONFIG_DEBUG_FS)
1155static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
1156 size_t size, loff_t *pos)
1157{
1158 struct amdgpu_vcn_inst *vcn;
1159 void *log_buf;
1160 struct amdgpu_vcn_fwlog *plog;
1161 unsigned int read_pos, write_pos, available, i, read_bytes = 0;
1162 unsigned int read_num[2] = {0};
1163
1164 vcn = file_inode(f)->i_private;
1165 if (!vcn)
1166 return -ENODEV;
1167
1168 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
1169 return -EFAULT;
1170
1171 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1172
1173 plog = (struct amdgpu_vcn_fwlog *)log_buf;
1174 read_pos = plog->rptr;
1175 write_pos = plog->wptr;
1176
1177 if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
1178 return -EFAULT;
1179
1180 if (!size || (read_pos == write_pos))
1181 return 0;
1182
1183 if (write_pos > read_pos) {
1184 available = write_pos - read_pos;
1185 read_num[0] = min_t(size_t, size, available);
1186 } else {
1187 read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
1188 available = read_num[0] + write_pos - plog->header_size;
1189 if (size > available)
1190 read_num[1] = write_pos - plog->header_size;
1191 else if (size > read_num[0])
1192 read_num[1] = size - read_num[0];
1193 else
1194 read_num[0] = size;
1195 }
1196
1197 for (i = 0; i < 2; i++) {
1198 if (read_num[i]) {
1199 if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
1200 read_pos = plog->header_size;
1201 if (read_num[i] == copy_to_user((buf + read_bytes),
1202 (log_buf + read_pos), read_num[i]))
1203 return -EFAULT;
1204
1205 read_bytes += read_num[i];
1206 read_pos += read_num[i];
1207 }
1208 }
1209
1210 plog->rptr = read_pos;
1211 *pos += read_bytes;
1212 return read_bytes;
1213}
1214
1215static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
1216 .owner = THIS_MODULE,
1217 .read = amdgpu_debugfs_vcn_fwlog_read,
1218 .llseek = default_llseek
1219};
1220#endif
1221
1222void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
1223 struct amdgpu_vcn_inst *vcn)
1224{
1225#if defined(CONFIG_DEBUG_FS)
1226 struct drm_minor *minor = adev_to_drm(adev)->primary;
1227 struct dentry *root = minor->debugfs_root;
1228 char name[32];
1229
1230 sprintf(name, "amdgpu_vcn_%d_fwlog", i);
1231 debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
1232 &amdgpu_debugfs_vcnfwlog_fops,
1233 AMDGPU_VCNFW_LOG_SIZE);
1234#endif
1235}
1236
1237void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
1238{
1239#if defined(CONFIG_DEBUG_FS)
1240 uint32_t *flag = vcn->fw_shared.cpu_addr;
1241 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
1242 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
1243 struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
1244 struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
1245 + vcn->fw_shared.log_offset;
1246 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
1247 fw_log->is_enabled = 1;
1248 fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
1249 fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
1250 fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
1251
1252 log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
1253 log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
1254 log_buf->rptr = log_buf->header_size;
1255 log_buf->wptr = log_buf->header_size;
1256 log_buf->wrapped = 0;
1257#endif
1258}
1259
1260int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
1261 struct amdgpu_irq_src *source,
1262 struct amdgpu_iv_entry *entry)
1263{
1264 struct ras_common_if *ras_if = adev->vcn.ras_if;
1265 struct ras_dispatch_if ih_data = {
1266 .entry = entry,
1267 };
1268
1269 if (!ras_if)
1270 return 0;
1271
1272 if (!amdgpu_sriov_vf(adev)) {
1273 ih_data.head = *ras_if;
1274 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
1275 } else {
1276 if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
1277 adev->virt.ops->ras_poison_handler(adev, ras_if->block);
1278 else
1279 dev_warn(adev->dev,
1280 "No ras_poison_handler interface in SRIOV for VCN!\n");
1281 }
1282
1283 return 0;
1284}
1285
1286int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
1287{
1288 int r, i;
1289
1290 r = amdgpu_ras_block_late_init(adev, ras_block);
1291 if (r)
1292 return r;
1293
1294 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
1295 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1296 if (adev->vcn.harvest_config & (1 << i) ||
1297 !adev->vcn.inst[i].ras_poison_irq.funcs)
1298 continue;
1299
1300 r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
1301 if (r)
1302 goto late_fini;
1303 }
1304 }
1305 return 0;
1306
1307late_fini:
1308 amdgpu_ras_block_late_fini(adev, ras_block);
1309 return r;
1310}
1311
1312int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
1313{
1314 int err;
1315 struct amdgpu_vcn_ras *ras;
1316
1317 if (!adev->vcn.ras)
1318 return 0;
1319
1320 ras = adev->vcn.ras;
1321 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1322 if (err) {
1323 dev_err(adev->dev, "Failed to register vcn ras block!\n");
1324 return err;
1325 }
1326
1327 strcpy(ras->ras_block.ras_comm.name, "vcn");
1328 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
1329 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
1330 adev->vcn.ras_if = &ras->ras_block.ras_comm;
1331
1332 if (!ras->ras_block.ras_late_init)
1333 ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
1334
1335 return 0;
1336}
1337
1338int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
1339 enum AMDGPU_UCODE_ID ucode_id)
1340{
1341 struct amdgpu_firmware_info ucode = {
1342 .ucode_id = (ucode_id ? ucode_id :
1343 (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
1344 AMDGPU_UCODE_ID_VCN0_RAM)),
1345 .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
1346 .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
1347 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
1348 };
1349
1350 return psp_execute_ip_fw_load(&adev->psp, &ucode);
1351}
1352
1353static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
1354 struct device_attribute *attr,
1355 char *buf)
1356{
1357 struct drm_device *ddev = dev_get_drvdata(dev);
1358 struct amdgpu_device *adev = drm_to_adev(ddev);
1359
1360 if (!adev)
1361 return -ENODEV;
1362
1363 return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
1364}
1365
1366static DEVICE_ATTR(vcn_reset_mask, 0444,
1367 amdgpu_get_vcn_reset_mask, NULL);
1368
1369int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
1370{
1371 int r = 0;
1372
1373 if (adev->vcn.num_vcn_inst) {
1374 r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
1375 if (r)
1376 return r;
1377 }
1378
1379 return r;
1380}
1381
1382void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
1383{
1384 if (adev->dev->kobj.sd) {
1385 if (adev->vcn.num_vcn_inst)
1386 device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
1387 }
1388}
1389
1390/*
1391 * debugfs to enable/disable vcn job submission to specific core or
1392 * instance. It is created only if the queue type is unified.
1393 */
1394#if defined(CONFIG_DEBUG_FS)
1395static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
1396{
1397 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1398 u32 i;
1399 u64 mask;
1400 struct amdgpu_ring *ring;
1401
1402 if (!adev)
1403 return -ENODEV;
1404
1405 mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
1406 if ((val & mask) == 0)
1407 return -EINVAL;
1408 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1409 ring = &adev->vcn.inst[i].ring_enc[0];
1410 if (val & (1ULL << i))
1411 ring->sched.ready = true;
1412 else
1413 ring->sched.ready = false;
1414 }
1415 /* publish sched.ready flag update effective immediately across smp */
1416 smp_rmb();
1417 return 0;
1418}
1419
1420static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
1421{
1422 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1423 u32 i;
1424 u64 mask = 0;
1425 struct amdgpu_ring *ring;
1426
1427 if (!adev)
1428 return -ENODEV;
1429 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1430 ring = &adev->vcn.inst[i].ring_enc[0];
1431 if (ring->sched.ready)
1432 mask |= 1ULL << i;
1433 }
1434 *val = mask;
1435 return 0;
1436}
1437
1438DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
1439 amdgpu_debugfs_vcn_sched_mask_get,
1440 amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
1441#endif
1442
1443void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
1444{
1445#if defined(CONFIG_DEBUG_FS)
1446 struct drm_minor *minor = adev_to_drm(adev)->primary;
1447 struct dentry *root = minor->debugfs_root;
1448 char name[32];
1449
1450 if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
1451 return;
1452 sprintf(name, "amdgpu_vcn_sched_mask");
1453 debugfs_create_file(name, 0600, root, adev,
1454 &amdgpu_debugfs_vcn_sched_mask_fops);
1455#endif
1456}
1457
1458/**
1459 * vcn_set_powergating_state - set VCN block powergating state
1460 *
1461 * @ip_block: amdgpu_ip_block pointer
1462 * @state: power gating state
1463 *
1464 * Set VCN block powergating state
1465 */
1466int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
1467 enum amd_powergating_state state)
1468{
1469 struct amdgpu_device *adev = ip_block->adev;
1470 int ret = 0, i;
1471
1472 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1473 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
1474
1475 ret |= vinst->set_pg_state(vinst, state);
1476 }
1477
1478 return ret;
1479}
1480
1481/**
1482 * amdgpu_vcn_reset_engine - Reset a specific VCN engine
1483 * @adev: Pointer to the AMDGPU device
1484 * @instance_id: VCN engine instance to reset
1485 *
1486 * Returns: 0 on success, or a negative error code on failure.
1487 */
1488static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
1489 uint32_t instance_id)
1490{
1491 struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
1492 int r, i;
1493
1494 mutex_lock(&vinst->engine_reset_mutex);
1495 /* Stop the scheduler's work queue for the dec and enc rings if they are running.
1496 * This ensures that no new tasks are submitted to the queues while
1497 * the reset is in progress.
1498 */
1499 drm_sched_wqueue_stop(&vinst->ring_dec.sched);
1500 for (i = 0; i < vinst->num_enc_rings; i++)
1501 drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
1502
1503 /* Perform the VCN reset for the specified instance */
1504 r = vinst->reset(vinst);
1505 if (r)
1506 goto unlock;
1507 r = amdgpu_ring_test_ring(&vinst->ring_dec);
1508 if (r)
1509 goto unlock;
1510 for (i = 0; i < vinst->num_enc_rings; i++) {
1511 r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
1512 if (r)
1513 goto unlock;
1514 }
1515 amdgpu_fence_driver_force_completion(&vinst->ring_dec);
1516 for (i = 0; i < vinst->num_enc_rings; i++)
1517 amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
1518
1519 /* Restart the scheduler's work queue for the dec and enc rings
1520 * if they were stopped by this function. This allows new tasks
1521 * to be submitted to the queues after the reset is complete.
1522 */
1523 drm_sched_wqueue_start(&vinst->ring_dec.sched);
1524 for (i = 0; i < vinst->num_enc_rings; i++)
1525 drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
1526
1527unlock:
1528 mutex_unlock(&vinst->engine_reset_mutex);
1529
1530 return r;
1531}
1532
1533/**
1534 * amdgpu_vcn_ring_reset - Reset a VCN ring
1535 * @ring: ring to reset
1536 * @vmid: vmid of guilty job
1537 * @timedout_fence: fence of timed out job
1538 *
1539 * This helper is for VCN blocks without unified queues because
1540 * resetting the engine resets all queues in that case. With
1541 * unified queues we have one queue per engine.
1542 * Returns: 0 on success, or a negative error code on failure.
1543 */
1544int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
1545 unsigned int vmid,
1546 struct amdgpu_fence *timedout_fence)
1547{
1548 struct amdgpu_device *adev = ring->adev;
1549
1550 if (adev->vcn.inst[ring->me].using_unified_queue)
1551 return -EINVAL;
1552
1553 return amdgpu_vcn_reset_engine(adev, ring->me);
1554}
1555
1556int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
1557 const struct amdgpu_hwip_reg_entry *reg, u32 count)
1558{
1559 adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
1560 sizeof(uint32_t), GFP_KERNEL);
1561 if (!adev->vcn.ip_dump)
1562 return -ENOMEM;
1563 adev->vcn.reg_list = reg;
1564 adev->vcn.reg_count = count;
1565
1566 return 0;
1567}
1568
1569static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
1570{
1571 kfree(adev->vcn.ip_dump);
1572 adev->vcn.ip_dump = NULL;
1573 adev->vcn.reg_list = NULL;
1574 adev->vcn.reg_count = 0;
1575}
1576
1577void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
1578{
1579 struct amdgpu_device *adev = ip_block->adev;
1580 int i, j;
1581 bool is_powered;
1582 u32 inst_off;
1583
1584 if (!adev->vcn.ip_dump)
1585 return;
1586
1587 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1588 if (adev->vcn.harvest_config & (1 << i))
1589 continue;
1590
1591 inst_off = i * adev->vcn.reg_count;
1592 /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
1593 adev->vcn.ip_dump[inst_off] =
1594 RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
1595 is_powered = (adev->vcn.ip_dump[inst_off] &
1596 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
1597 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
1598
1599 if (is_powered)
1600 for (j = 1; j < adev->vcn.reg_count; j++)
1601 adev->vcn.ip_dump[inst_off + j] =
1602 RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
1603 }
1604}
1605
1606void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
1607{
1608 struct amdgpu_device *adev = ip_block->adev;
1609 int i, j;
1610 bool is_powered;
1611 u32 inst_off;
1612
1613 if (!adev->vcn.ip_dump)
1614 return;
1615
1616 drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
1617 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1618 if (adev->vcn.harvest_config & (1 << i)) {
1619 drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
1620 continue;
1621 }
1622
1623 inst_off = i * adev->vcn.reg_count;
1624 is_powered = (adev->vcn.ip_dump[inst_off] &
1625 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
1626 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
1627
1628 if (is_powered) {
1629 drm_printf(p, "\nActive Instance:VCN%d\n", i);
1630 for (j = 0; j < adev->vcn.reg_count; j++)
1631 drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
1632 adev->vcn.ip_dump[inst_off + j]);
1633 } else {
1634 drm_printf(p, "\nInactive Instance:VCN%d\n", i);
1635 }
1636 }
1637}