Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Add VCN_5_0_1 support

Add vcn support for VCN_5_0_1

v2: rebase, squash in fixes (Alex)

Signed-off-by: Sonny Jiang <sonjiang@amd.com>
Acked-by: Leo Liu <leo.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Sonny Jiang and committed by
Alex Deucher
346492f3 c406fca4

+1161 -5
+2 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 1 1 # 2 - # Copyright 2017 Advanced Micro Devices, Inc. 2 + # Copyright 2017-2024 Advanced Micro Devices, Inc. All rights reserved. 3 3 # 4 4 # Permission is hereby granted, free of charge, to any person obtaining a 5 5 # copy of this software and associated documentation files (the "Software"), ··· 200 200 vcn_v4_0_3.o \ 201 201 vcn_v4_0_5.o \ 202 202 vcn_v5_0_0.o \ 203 + vcn_v5_0_1.o \ 203 204 amdgpu_jpeg.o \ 204 205 jpeg_v1_0.o \ 205 206 jpeg_v2_0.o \
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 1031 1031 struct amdgpu_device *adev = ring->adev; 1032 1032 long r; 1033 1033 1034 - if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) { 1034 + if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) && 1035 + (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) { 1035 1036 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); 1036 1037 if (r) 1037 1038 goto error; ··· 1083 1082 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 1084 1083 1085 1084 if (amdgpu_ip_version(adev, UVD_HWIP, 0) == 1086 - IP_VERSION(4, 0, 3)) 1085 + IP_VERSION(4, 0, 3) || 1086 + amdgpu_ip_version(adev, UVD_HWIP, 0) == 1087 + IP_VERSION(5, 0, 1)) 1087 1088 break; 1088 1089 } 1089 1090 }
+12 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
··· 1 1 /* 2 - * Copyright 2016 Advanced Micro Devices, Inc. 2 + * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved. 3 3 * 4 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 5 * copy of this software and associated documentation files (the "Software"), ··· 163 163 #define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \ 164 164 ({ \ 165 165 uint32_t internal_reg_offset, addr; \ 166 - bool video_range, aon_range; \ 166 + bool video_range, video1_range, aon_range, aon1_range; \ 167 167 \ 168 168 addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ 169 169 addr <<= 2; \ 170 170 video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \ 171 171 ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \ 172 + video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) && \ 173 + ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600))))); \ 172 174 aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \ 173 175 ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \ 176 + aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) && \ 177 + ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600))))); \ 174 178 if (video_range) \ 175 179 internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \ 176 180 (VCN_VID_IP_ADDRESS)); \ 177 181 else if (aon_range) \ 178 182 internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \ 183 + (VCN_AON_IP_ADDRESS)); \ 184 + else if (video1_range) \ 185 + internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) + \ 186 + (VCN_VID_IP_ADDRESS)); \ 187 + else if (aon1_range) \ 188 + internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) + \ 179 189 (VCN_AON_IP_ADDRESS)); \ 180 190 else \ 181 191 internal_reg_offset = (0xFFFFF & addr); \
+1105
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
··· 1 + /* 2 + * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include <linux/firmware.h> 25 + #include "amdgpu.h" 26 + #include "amdgpu_vcn.h" 27 + #include "amdgpu_pm.h" 28 + #include "soc15.h" 29 + #include "soc15d.h" 30 + #include "soc15_hw_ip.h" 31 + #include "vcn_v2_0.h" 32 + 33 + #include "vcn/vcn_5_0_0_offset.h" 34 + #include "vcn/vcn_5_0_0_sh_mask.h" 35 + #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" 36 + #include "vcn_v5_0_1.h" 37 + 38 + #include <drm/drm_drv.h> 39 + 40 + static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); 41 + static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); 42 + static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 43 + enum amd_powergating_state state); 44 + static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring); 45 + 46 + /** 47 + * vcn_v5_0_1_early_init - set function pointers and load microcode 48 + * 49 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 50 + * 51 + * Set ring and irq function pointers 52 + * Load microcode from filesystem 53 + */ 54 + static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) 55 + { 56 + struct amdgpu_device *adev = ip_block->adev; 57 + 58 + /* re-use enc ring as unified ring */ 59 + adev->vcn.num_enc_rings = 1; 60 + 61 + vcn_v5_0_1_set_unified_ring_funcs(adev); 62 + vcn_v5_0_1_set_irq_funcs(adev); 63 + 64 + return amdgpu_vcn_early_init(adev); 65 + } 66 + 67 + /** 68 + * vcn_v5_0_1_sw_init - sw init for VCN block 69 + * 70 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 71 + * 72 + * Load firmware and sw initialization 73 + */ 74 + static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) 75 + { 76 + struct amdgpu_device *adev = ip_block->adev; 77 + struct amdgpu_ring *ring; 78 + int i, r, vcn_inst; 79 + 80 + r = amdgpu_vcn_sw_init(adev); 81 + if (r) 82 + return r; 83 + 84 + amdgpu_vcn_setup_ucode(adev); 85 + 86 + r = amdgpu_vcn_resume(adev); 87 + if (r) 88 + return r; 89 + 90 + /* VCN UNIFIED TRAP */ 91 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 92 + VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); 93 + if (r) 94 + return r; 95 + 96 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 97 + volatile struct amdgpu_vcn5_fw_shared *fw_shared; 98 + 99 + vcn_inst = GET_INST(VCN, i); 100 + 101 + ring = &adev->vcn.inst[i].ring_enc[0]; 102 + ring->use_doorbell = true; 103 + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * vcn_inst; 104 + 105 + ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); 106 + sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); 107 + 108 + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 109 + AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); 110 + if (r) 111 + return r; 112 + 113 + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 114 + fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); 115 + fw_shared->sq.is_enabled = true; 116 + 117 + if (amdgpu_vcnfw_log) 118 + amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); 119 + } 120 + 121 + return 0; 122 + } 123 + 124 + /** 125 + * vcn_v5_0_1_sw_fini - sw fini for VCN block 126 + * 127 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 128 + * 129 + * VCN suspend and free up sw allocation 130 + */ 131 + static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) 132 + { 133 + struct amdgpu_device *adev = ip_block->adev; 134 + int i, r, idx; 135 + 136 + if (drm_dev_enter(adev_to_drm(adev), &idx)) { 137 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 138 + volatile struct amdgpu_vcn4_fw_shared *fw_shared; 139 + 140 + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 141 + fw_shared->present_flag_0 = 0; 142 + fw_shared->sq.is_enabled = 0; 143 + } 144 + 145 + drm_dev_exit(idx); 146 + } 147 + 148 + r = amdgpu_vcn_suspend(adev); 149 + if (r) 150 + return r; 151 + 152 + r = amdgpu_vcn_sw_fini(adev); 153 + 154 + return r; 155 + } 156 + 157 + /** 158 + * vcn_v5_0_1_hw_init - start and test VCN block 159 + * 160 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 161 + * 162 + * Initialize the hardware, boot up the VCPU and do some testing 163 + */ 164 + static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) 165 + { 166 + struct amdgpu_device *adev = ip_block->adev; 167 + struct amdgpu_ring *ring; 168 + int i, r, vcn_inst; 169 + 170 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 171 + vcn_inst = GET_INST(VCN, i); 172 + ring = &adev->vcn.inst[i].ring_enc[0]; 173 + 174 + if (ring->use_doorbell) 175 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 176 + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 177 + 9 * vcn_inst), 178 + adev->vcn.inst[i].aid_id); 179 + 180 + r = amdgpu_ring_test_helper(ring); 181 + if (r) 182 + return r; 183 + } 184 + 185 + return 0; 186 + } 187 + 188 + /** 189 + * vcn_v5_0_1_hw_fini - stop the hardware block 190 + * 191 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 192 + * 193 + * Stop the VCN block, mark ring as not ready any more 194 + */ 195 + static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) 196 + { 197 + struct amdgpu_device *adev = ip_block->adev; 198 + 199 + cancel_delayed_work_sync(&adev->vcn.idle_work); 200 + 201 + return 0; 202 + } 203 + 204 + /** 205 + * vcn_v5_0_1_suspend - suspend VCN block 206 + * 207 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 208 + * 209 + * HW fini and suspend VCN block 210 + */ 211 + static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block) 212 + { 213 + struct amdgpu_device *adev = ip_block->adev; 214 + int r; 215 + 216 + r = vcn_v5_0_1_hw_fini(ip_block); 217 + if (r) 218 + return r; 219 + 220 + r = amdgpu_vcn_suspend(adev); 221 + 222 + return r; 223 + } 224 + 225 + /** 226 + * vcn_v5_0_1_resume - resume VCN block 227 + * 228 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 229 + * 230 + * Resume firmware and hw init VCN block 231 + */ 232 + static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block) 233 + { 234 + struct amdgpu_device *adev = ip_block->adev; 235 + int r; 236 + 237 + r = amdgpu_vcn_resume(adev); 238 + if (r) 239 + return r; 240 + 241 + r = vcn_v5_0_1_hw_init(ip_block); 242 + 243 + return r; 244 + } 245 + 246 + /** 247 + * vcn_v5_0_1_mc_resume - memory controller programming 248 + * 249 + * @adev: amdgpu_device pointer 250 + * @inst: instance number 251 + * 252 + * Let the VCN memory controller know it's offsets 253 + */ 254 + static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst) 255 + { 256 + uint32_t offset, size, vcn_inst; 257 + const struct common_firmware_header *hdr; 258 + 259 + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; 260 + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 261 + 262 + vcn_inst = GET_INST(VCN, inst); 263 + /* cache window 0: fw */ 264 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 265 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 266 + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); 267 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 268 + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); 269 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0); 270 + offset = 0; 271 + } else { 272 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 273 + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 274 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 275 + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 276 + offset = size; 277 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 278 + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 279 + } 280 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size); 281 + 282 + /* cache window 1: stack */ 283 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 284 + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 285 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 286 + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 287 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0); 288 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 289 + 290 + /* cache window 2: context */ 291 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 292 + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 293 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 294 + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 295 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0); 296 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 297 + 298 + /* non-cache window */ 299 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, 300 + lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 301 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, 302 + upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); 303 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); 304 + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0, 305 + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); 306 + } 307 + 308 + /** 309 + * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode 310 + * 311 + * @adev: amdgpu_device pointer 312 + * @inst_idx: instance number index 313 + * @indirect: indirectly write sram 314 + * 315 + * Let the VCN memory controller know it's offsets with dpg mode 316 + */ 317 + static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 318 + { 319 + uint32_t offset, size; 320 + const struct common_firmware_header *hdr; 321 + 322 + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; 323 + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 324 + 325 + /* cache window 0: fw */ 326 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 327 + if (!indirect) { 328 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 329 + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 330 + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 331 + inst_idx].tmr_mc_addr_lo), 0, indirect); 332 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 333 + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 334 + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + 335 + inst_idx].tmr_mc_addr_hi), 0, indirect); 336 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 337 + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 338 + } else { 339 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 340 + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 341 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 342 + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 343 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 344 + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 345 + } 346 + offset = 0; 347 + } else { 348 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 349 + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 350 + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 351 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 352 + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 353 + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 354 + offset = size; 355 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 356 + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 357 + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 358 + } 359 + 360 + if (!indirect) 361 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 362 + VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 363 + else 364 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 365 + VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 366 + 367 + /* cache window 1: stack */ 368 + if (!indirect) { 369 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 370 + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 371 + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 372 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 373 + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 374 + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 375 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 376 + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 377 + } else { 378 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 379 + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 380 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 381 + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 382 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 383 + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 384 + } 385 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 386 + VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 387 + 388 + /* cache window 2: context */ 389 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 390 + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 391 + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + 392 + AMDGPU_VCN_STACK_SIZE), 0, indirect); 393 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 394 + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 395 + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + 396 + AMDGPU_VCN_STACK_SIZE), 0, indirect); 397 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 398 + VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 399 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 400 + VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 401 + 402 + /* non-cache window */ 403 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 404 + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 405 + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 406 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 407 + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 408 + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); 409 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 410 + VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 411 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 412 + VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), 413 + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); 414 + 415 + /* VCN global tiling registers */ 416 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 417 + VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 418 + } 419 + 420 + /** 421 + * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating 422 + * 423 + * @adev: amdgpu_device pointer 424 + * @inst: instance number 425 + * 426 + * Disable clock gating for VCN block 427 + */ 428 + static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int inst) 429 + { 430 + } 431 + 432 + /** 433 + * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating 434 + * 435 + * @adev: amdgpu_device pointer 436 + * @inst: instance number 437 + * 438 + * Enable clock gating for VCN block 439 + */ 440 + static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int inst) 441 + { 442 + } 443 + 444 + /** 445 + * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode 446 + * 447 + * @adev: amdgpu_device pointer 448 + * @inst_idx: instance number index 449 + * @indirect: indirectly write sram 450 + * 451 + * Start VCN block with dpg mode 452 + */ 453 + static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) 454 + { 455 + volatile struct amdgpu_vcn4_fw_shared *fw_shared = 456 + adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 457 + struct amdgpu_ring *ring; 458 + int vcn_inst; 459 + uint32_t tmp; 460 + 461 + vcn_inst = GET_INST(VCN, inst_idx); 462 + 463 + /* disable register anti-hang mechanism */ 464 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1, 465 + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 466 + 467 + /* enable dynamic power gating mode */ 468 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS); 469 + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; 470 + WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); 471 + 472 + if (indirect) { 473 + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = 474 + (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; 475 + /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */ 476 + WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF, 477 + adev->vcn.inst[inst_idx].aid_id, 0, true); 478 + } 479 + 480 + /* enable VCPU clock */ 481 + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 482 + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; 483 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 484 + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); 485 + 486 + /* disable master interrupt */ 487 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 488 + VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect); 489 + 490 + /* setup regUVD_LMI_CTRL */ 491 + tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 492 + UVD_LMI_CTRL__REQ_MODE_MASK | 493 + UVD_LMI_CTRL__CRC_RESET_MASK | 494 + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 495 + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 496 + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 497 + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 498 + 0x00100000L); 499 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 500 + VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect); 501 + 502 + vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect); 503 + 504 + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 505 + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 506 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 507 + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); 508 + 509 + /* enable LMI MC and UMC channels */ 510 + tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; 511 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 512 + VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); 513 + 514 + /* enable master interrupt */ 515 + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( 516 + VCN, 0, regUVD_MASTINT_EN), 517 + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 518 + 519 + if (indirect) 520 + amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); 521 + 522 + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; 523 + 524 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); 525 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 526 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); 527 + 528 + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 529 + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 530 + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 531 + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 532 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); 533 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); 534 + 535 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); 536 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); 537 + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); 538 + 539 + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 540 + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 541 + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 542 + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 543 + 544 + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, 545 + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 546 + VCN_RB1_DB_CTRL__EN_MASK); 547 + /* Read DB_CTRL to flush the write DB_CTRL command. */ 548 + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); 549 + 550 + return 0; 551 + } 552 + 553 + /** 554 + * vcn_v5_0_1_start - VCN start 555 + * 556 + * @adev: amdgpu_device pointer 557 + * 558 + * Start VCN block 559 + */ 560 + static int vcn_v5_0_1_start(struct amdgpu_device *adev) 561 + { 562 + volatile struct amdgpu_vcn4_fw_shared *fw_shared; 563 + struct amdgpu_ring *ring; 564 + uint32_t tmp; 565 + int i, j, k, r, vcn_inst; 566 + 567 + if (adev->pm.dpm_enabled) 568 + amdgpu_dpm_enable_uvd(adev, true); 569 + 570 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 571 + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 572 + 573 + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 574 + r = vcn_v5_0_1_start_dpg_mode(adev, i, adev->vcn.indirect_sram); 575 + continue; 576 + } 577 + 578 + vcn_inst = GET_INST(VCN, i); 579 + 580 + /* set VCN status busy */ 581 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; 582 + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); 583 + 584 + /* enable VCPU clock */ 585 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 586 + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 587 + 588 + /* disable master interrupt */ 589 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, 590 + ~UVD_MASTINT_EN__VCPU_EN_MASK); 591 + 592 + /* enable LMI MC and UMC channels */ 593 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, 594 + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 595 + 596 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); 597 + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 598 + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 599 + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); 600 + 601 + /* setup regUVD_LMI_CTRL */ 602 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); 603 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp | 604 + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 605 + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 606 + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 607 + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 608 + 609 + vcn_v5_0_1_mc_resume(adev, i); 610 + 611 + /* VCN global tiling registers */ 612 + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, 613 + adev->gfx.config.gb_addr_config); 614 + 615 + /* unblock VCPU register access */ 616 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, 617 + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 618 + 619 + /* release VCPU reset to boot */ 620 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, 621 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 622 + 623 + for (j = 0; j < 10; ++j) { 624 + uint32_t status; 625 + 626 + for (k = 0; k < 100; ++k) { 627 + status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); 628 + if (status & 2) 629 + break; 630 + mdelay(100); 631 + if (amdgpu_emu_mode == 1) 632 + msleep(20); 633 + } 634 + 635 + if (amdgpu_emu_mode == 1) { 636 + r = -1; 637 + if (status & 2) { 638 + r = 0; 639 + break; 640 + } 641 + } else { 642 + r = 0; 643 + if (status & 2) 644 + break; 645 + 646 + dev_err(adev->dev, 647 + "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); 648 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 649 + UVD_VCPU_CNTL__BLK_RST_MASK, 650 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 651 + mdelay(10); 652 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, 653 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 654 + 655 + mdelay(10); 656 + r = -1; 657 + } 658 + } 659 + 660 + if (r) { 661 + dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); 662 + return r; 663 + } 664 + 665 + /* enable master interrupt */ 666 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 667 + UVD_MASTINT_EN__VCPU_EN_MASK, 668 + ~UVD_MASTINT_EN__VCPU_EN_MASK); 669 + 670 + /* clear the busy bit of VCN_STATUS */ 671 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, 672 + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 673 + 674 + ring = &adev->vcn.inst[i].ring_enc[0]; 675 + 676 + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, 677 + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | 678 + VCN_RB1_DB_CTRL__EN_MASK); 679 + 680 + /* Read DB_CTRL to flush the write DB_CTRL command. */ 681 + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); 682 + 683 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr); 684 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 685 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4); 686 + 687 + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 688 + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); 689 + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 690 + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; 691 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); 692 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); 693 + 694 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); 695 + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); 696 + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); 697 + 698 + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); 699 + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; 700 + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); 701 + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); 702 + } 703 + 704 + return 0; 705 + } 706 + 707 + /** 708 + * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode 709 + * 710 + * @adev: amdgpu_device pointer 711 + * @inst_idx: instance number index 712 + * 713 + * Stop VCN block with dpg mode 714 + */ 715 + static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) 716 + { 717 + uint32_t tmp; 718 + int vcn_inst; 719 + 720 + vcn_inst = GET_INST(VCN, inst_idx); 721 + 722 + /* Wait for power status to be 1 */ 723 + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, 724 + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 725 + 726 + /* wait for read ptr to be equal to write ptr */ 727 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); 728 + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); 729 + 730 + /* disable dynamic power gating mode */ 731 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, 732 + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 733 + } 734 + 735 + /** 736 + * vcn_v5_0_1_stop - VCN stop 737 + * 738 + * @adev: amdgpu_device pointer 739 + * 740 + * Stop VCN block 741 + */ 742 + static int vcn_v5_0_1_stop(struct amdgpu_device *adev) 743 + { 744 + volatile struct amdgpu_vcn4_fw_shared *fw_shared; 745 + uint32_t tmp; 746 + int i, r = 0, vcn_inst; 747 + 748 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 749 + vcn_inst = GET_INST(VCN, i); 750 + 751 + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 752 + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; 753 + 754 + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 755 + vcn_v5_0_1_stop_dpg_mode(adev, i); 756 + continue; 757 + } 758 + 759 + /* wait for vcn idle */ 760 + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); 761 + if (r) 762 + return r; 763 + 764 + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 765 + UVD_LMI_STATUS__READ_CLEAN_MASK | 766 + UVD_LMI_STATUS__WRITE_CLEAN_MASK | 767 + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 768 + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); 769 + if (r) 770 + return r; 771 + 772 + /* disable LMI UMC channel */ 773 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); 774 + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 775 + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); 776 + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | 777 + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 778 + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); 779 + if (r) 780 + return r; 781 + 782 + /* block VCPU register access */ 783 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 784 + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 785 + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 786 + 787 + /* reset VCPU */ 788 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 789 + UVD_VCPU_CNTL__BLK_RST_MASK, 790 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 791 + 792 + /* disable VCPU clock */ 793 + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, 794 + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 795 + 796 + /* apply soft reset */ 797 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); 798 + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 799 + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); 800 + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); 801 + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 802 + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); 803 + 804 + /* clear status */ 805 + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); 806 + } 807 + 808 + if (adev->pm.dpm_enabled) 809 + amdgpu_dpm_enable_uvd(adev, false); 810 + 811 + return 0; 812 + } 813 + 814 + /** 815 + * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer 816 + * 817 + * @ring: amdgpu_ring pointer 818 + * 819 + * Returns the current hardware unified read pointer 820 + */ 821 + static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring) 822 + { 823 + struct amdgpu_device *adev = ring->adev; 824 + 825 + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 826 + DRM_ERROR("wrong ring id is identified in %s", __func__); 827 + 828 + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR); 829 + } 830 + 831 + /** 832 + * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer 833 + * 834 + * @ring: amdgpu_ring pointer 835 + * 836 + * Returns the current hardware unified write pointer 837 + */ 838 + static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring) 839 + { 840 + struct amdgpu_device *adev = ring->adev; 841 + 842 + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 843 + DRM_ERROR("wrong ring id is identified in %s", __func__); 844 + 845 + if (ring->use_doorbell) 846 + return *ring->wptr_cpu_addr; 847 + else 848 + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR); 849 + } 850 + 851 + /** 852 + * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer 853 + * 854 + * @ring: amdgpu_ring pointer 855 + * 856 + * Commits the enc write pointer to the hardware 857 + */ 858 + static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring) 859 + { 860 + struct amdgpu_device *adev = ring->adev; 861 + 862 + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) 863 + DRM_ERROR("wrong ring id is identified in %s", __func__); 864 + 865 + if (ring->use_doorbell) { 866 + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 867 + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 868 + } else { 869 + WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR, 870 + lower_32_bits(ring->wptr)); 871 + } 872 + } 873 + 874 + static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { 875 + .type = AMDGPU_RING_TYPE_VCN_ENC, 876 + .align_mask = 0x3f, 877 + .nop = VCN_ENC_CMD_NO_OP, 878 + .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, 879 + .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, 880 + .set_wptr = vcn_v5_0_1_unified_ring_set_wptr, 881 + .emit_frame_size = 882 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 883 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 884 + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 885 + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 886 + 1, /* vcn_v2_0_enc_ring_insert_end */ 887 + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 888 + .emit_ib = vcn_v2_0_enc_ring_emit_ib, 889 + .emit_fence = vcn_v2_0_enc_ring_emit_fence, 890 + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 891 + .test_ring = amdgpu_vcn_enc_ring_test_ring, 892 + .test_ib = amdgpu_vcn_unified_ring_test_ib, 893 + .insert_nop = amdgpu_ring_insert_nop, 894 + .insert_end = vcn_v2_0_enc_ring_insert_end, 895 + .pad_ib = amdgpu_ring_generic_pad_ib, 896 + .begin_use = amdgpu_vcn_ring_begin_use, 897 + .end_use = amdgpu_vcn_ring_end_use, 898 + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 899 + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 900 + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 901 + }; 902 + 903 + /** 904 + * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions 905 + * 906 + * @adev: amdgpu_device pointer 907 + * 908 + * Set unified ring functions 909 + */ 910 + static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev) 911 + { 912 + int i, vcn_inst; 913 + 914 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 915 + adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs; 916 + adev->vcn.inst[i].ring_enc[0].me = i; 917 + vcn_inst = GET_INST(VCN, i); 918 + adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid; 919 + } 920 + } 921 + 922 + /** 923 + * vcn_v5_0_1_is_idle - check VCN block is idle 924 + * 925 + * @handle: amdgpu_device pointer 926 + * 927 + * Check whether VCN block is idle 928 + */ 929 + static bool vcn_v5_0_1_is_idle(void *handle) 930 + { 931 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 932 + int i, ret = 1; 933 + 934 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 935 + ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE); 936 + 937 + return ret; 938 + } 939 + 940 + /** 941 + * vcn_v5_0_1_wait_for_idle - wait for VCN block idle 942 + * 943 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 944 + * 945 + * Wait for VCN block idle 946 + */ 947 + static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block) 948 + { 949 + struct amdgpu_device *adev = ip_block->adev; 950 + int i, ret = 0; 951 + 952 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 953 + ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE, 954 + UVD_STATUS__IDLE); 955 + if (ret) 956 + return ret; 957 + } 958 + 959 + return ret; 960 + } 961 + 962 + /** 963 + * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state 964 + * 965 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 966 + * @state: clock gating state 967 + * 968 + * Set VCN block clockgating state 969 + */ 970 + static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, 971 + enum amd_clockgating_state state) 972 + { 973 + struct amdgpu_device *adev = ip_block->adev; 974 + bool enable = state == AMD_CG_STATE_GATE; 975 + int i; 976 + 977 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 978 + if (enable) { 979 + if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE) 980 + return -EBUSY; 981 + vcn_v5_0_1_enable_clock_gating(adev, i); 982 + } else { 983 + vcn_v5_0_1_disable_clock_gating(adev, i); 984 + } 985 + } 986 + 987 + return 0; 988 + } 989 + 990 + /** 991 + * vcn_v5_0_1_set_powergating_state - set VCN block powergating state 992 + * 993 + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. 994 + * @state: power gating state 995 + * 996 + * Set VCN block powergating state 997 + */ 998 + static int vcn_v5_0_1_set_powergating_state(struct amdgpu_ip_block *ip_block, 999 + enum amd_powergating_state state) 1000 + { 1001 + struct amdgpu_device *adev = ip_block->adev; 1002 + int ret; 1003 + 1004 + if (state == adev->vcn.cur_state) 1005 + return 0; 1006 + 1007 + if (state == AMD_PG_STATE_GATE) 1008 + ret = vcn_v5_0_1_stop(adev); 1009 + else 1010 + ret = vcn_v5_0_1_start(adev); 1011 + 1012 + if (!ret) 1013 + adev->vcn.cur_state = state; 1014 + 1015 + return ret; 1016 + } 1017 + 1018 + /** 1019 + * vcn_v5_0_1_process_interrupt - process VCN block interrupt 1020 + * 1021 + * @adev: amdgpu_device pointer 1022 + * @source: interrupt sources 1023 + * @entry: interrupt entry from clients and sources 1024 + * 1025 + * Process VCN block interrupt 1026 + */ 1027 + static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, 1028 + struct amdgpu_iv_entry *entry) 1029 + { 1030 + uint32_t i, inst; 1031 + 1032 + i = node_id_to_phys_map[entry->node_id]; 1033 + 1034 + DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); 1035 + 1036 + for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst) 1037 + if (adev->vcn.inst[inst].aid_id == i) 1038 + break; 1039 + if (inst >= adev->vcn.num_vcn_inst) { 1040 + dev_WARN_ONCE(adev->dev, 1, 1041 + "Interrupt received for unknown VCN instance %d", 1042 + entry->node_id); 1043 + return 0; 1044 + } 1045 + 1046 + switch (entry->src_id) { 1047 + case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 1048 + amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]); 1049 + break; 1050 + default: 1051 + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", 1052 + entry->src_id, entry->src_data[0]); 1053 + break; 1054 + } 1055 + 1056 + return 0; 1057 + } 1058 + 1059 + static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = { 1060 + .process = vcn_v5_0_1_process_interrupt, 1061 + }; 1062 + 1063 + /** 1064 + * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions 1065 + * 1066 + * @adev: amdgpu_device pointer 1067 + * 1068 + * Set VCN block interrupt irq functions 1069 + */ 1070 + static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) 1071 + { 1072 + int i; 1073 + 1074 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) 1075 + adev->vcn.inst->irq.num_types++; 1076 + adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs; 1077 + } 1078 + 1079 + static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { 1080 + .name = "vcn_v5_0_1", 1081 + .early_init = vcn_v5_0_1_early_init, 1082 + .late_init = NULL, 1083 + .sw_init = vcn_v5_0_1_sw_init, 1084 + .sw_fini = vcn_v5_0_1_sw_fini, 1085 + .hw_init = vcn_v5_0_1_hw_init, 1086 + .hw_fini = vcn_v5_0_1_hw_fini, 1087 + .suspend = vcn_v5_0_1_suspend, 1088 + .resume = vcn_v5_0_1_resume, 1089 + .is_idle = vcn_v5_0_1_is_idle, 1090 + .wait_for_idle = vcn_v5_0_1_wait_for_idle, 1091 + .check_soft_reset = NULL, 1092 + .pre_soft_reset = NULL, 1093 + .soft_reset = NULL, 1094 + .post_soft_reset = NULL, 1095 + .set_clockgating_state = vcn_v5_0_1_set_clockgating_state, 1096 + .set_powergating_state = vcn_v5_0_1_set_powergating_state, 1097 + }; 1098 + 1099 + const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = { 1100 + .type = AMD_IP_BLOCK_TYPE_VCN, 1101 + .major = 5, 1102 + .minor = 0, 1103 + .rev = 1, 1104 + .funcs = &vcn_v5_0_1_ip_funcs, 1105 + };
+37
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h
··· 1 + /* 2 + * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __VCN_v5_0_1_H__ 25 + #define __VCN_v5_0_1_H__ 26 + 27 + #define VCN_VID_SOC_ADDRESS 0x1FC00 28 + #define VCN_AON_SOC_ADDRESS 0x1F800 29 + #define VCN1_VID_SOC_ADDRESS 0x48300 30 + #define VCN1_AON_SOC_ADDRESS 0x48000 31 + 32 + #define VCN_VID_IP_ADDRESS 0x0 33 + #define VCN_AON_IP_ADDRESS 0x30000 34 + 35 + extern const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block; 36 + 37 + #endif /* __VCN_v5_0_1_H__ */