Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add VCN3.0 support for Sienna_Cichlid

With basic IP block functions and ring functions

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: James Zhu <James.Zhu@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Leo Liu and committed by
Alex Deucher
cf14826c 25fc0564

+1002
+1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 155 155 vcn_v1_0.o \ 156 156 vcn_v2_0.o \ 157 157 vcn_v2_5.o \ 158 + vcn_v3_0.o \ 158 159 amdgpu_jpeg.o \ 159 160 jpeg_v1_0.o \ 160 161 jpeg_v2_0.o \
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
··· 142 142 enum engine_status_constants { 143 143 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, 144 144 UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0, 145 + UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0 = 0x2A2A8AA0, 145 146 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002, 146 147 UVD_STATUS__UVD_BUSY = 0x00000004, 147 148 GB_ADDR_CONFIG_DEFAULT = 0x26010011,
+971
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 1 + /* 2 + * Copyright 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include <linux/firmware.h> 25 + #include "amdgpu.h" 26 + #include "amdgpu_vcn.h" 27 + #include "amdgpu_pm.h" 28 + #include "soc15.h" 29 + #include "soc15d.h" 30 + #include "vcn_v2_0.h" 31 + 32 + #include "vcn/vcn_3_0_0_offset.h" 33 + #include "vcn/vcn_3_0_0_sh_mask.h" 34 + #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" 35 + 36 + #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 37 + #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f 38 + #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 39 + #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 40 + #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 41 + #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 42 + #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d 43 + 44 + #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 45 + #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 46 + #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 47 + #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c 48 + 49 + #define VCN_INSTANCES_SIENNA_CICHLID 2 50 + 51 + static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); 52 + static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev); 53 + static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev); 54 + static int vcn_v3_0_set_powergating_state(void *handle, 55 + enum amd_powergating_state state); 56 + 57 + static int amdgpu_ih_clientid_vcns[] = { 58 + SOC15_IH_CLIENTID_VCN, 59 + SOC15_IH_CLIENTID_VCN1 60 + }; 61 + 62 + /** 63 + * vcn_v3_0_early_init - set function pointers 64 + * 65 + * @handle: amdgpu_device pointer 66 + * 67 + * Set ring and irq function pointers 68 + */ 69 + static int vcn_v3_0_early_init(void *handle) 70 + { 71 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 72 + if (adev->asic_type == CHIP_SIENNA_CICHLID) { 73 + u32 harvest; 74 + int i; 75 + 76 + adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; 77 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 78 + harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); 79 + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) 80 + adev->vcn.harvest_config |= 1 << i; 81 + } 82 + 83 + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | 84 + AMDGPU_VCN_HARVEST_VCN1)) 85 + /* both instances are harvested, disable the block */ 86 + return -ENOENT; 87 + } else 88 + adev->vcn.num_vcn_inst = 1; 89 + 90 + adev->vcn.num_enc_rings = 2; 91 + 92 + vcn_v3_0_set_dec_ring_funcs(adev); 93 + vcn_v3_0_set_enc_ring_funcs(adev); 94 + vcn_v3_0_set_irq_funcs(adev); 95 + 96 + return 0; 97 + } 98 + 99 + /** 100 + * vcn_v3_0_sw_init - sw init for VCN block 101 + * 102 + * @handle: amdgpu_device pointer 103 + * 104 + * Load firmware and sw initialization 105 + */ 106 + static int vcn_v3_0_sw_init(void *handle) 107 + { 108 + struct amdgpu_ring *ring; 109 + int i, j, r; 110 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 111 + 112 + r = amdgpu_vcn_sw_init(adev); 113 + if (r) 114 + return r; 115 + 116 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 117 + const struct common_firmware_header *hdr; 118 + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; 119 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; 120 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; 121 + adev->firmware.fw_size += 122 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 123 + 124 + if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) { 125 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; 126 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; 127 + adev->firmware.fw_size += 128 + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 129 + } 130 + DRM_INFO("PSP loading VCN firmware\n"); 131 + } 132 + 133 + r = amdgpu_vcn_resume(adev); 134 + if (r) 135 + return r; 136 + 137 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 138 + if (adev->vcn.harvest_config & (1 << i)) 139 + continue; 140 + 141 + adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; 142 + adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; 143 + adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; 144 + adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; 145 + adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; 146 + adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; 147 + 148 + adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; 149 + adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9); 150 + adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; 151 + adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0); 152 + adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; 153 + adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1); 154 + adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; 155 + adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD); 156 + adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; 157 + adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP); 158 + 159 + /* VCN DEC TRAP */ 160 + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 161 + VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[i].irq); 162 + if (r) 163 + return r; 164 + 165 + ring = &adev->vcn.inst[i].ring_dec; 166 + ring->use_doorbell = true; 167 + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; 168 + sprintf(ring->name, "vcn_dec_%d", i); 169 + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 170 + AMDGPU_RING_PRIO_DEFAULT); 171 + if (r) 172 + return r; 173 + 174 + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 175 + /* VCN ENC TRAP */ 176 + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], 177 + j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); 178 + if (r) 179 + return r; 180 + 181 + ring = &adev->vcn.inst[i].ring_enc[j]; 182 + ring->use_doorbell = true; 183 + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; 184 + sprintf(ring->name, "vcn_enc_%d.%d", i, j); 185 + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, 186 + AMDGPU_RING_PRIO_DEFAULT); 187 + if (r) 188 + return r; 189 + } 190 + } 191 + 192 + return 0; 193 + } 194 + 195 + /** 196 + * vcn_v3_0_sw_fini - sw fini for VCN block 197 + * 198 + * @handle: amdgpu_device pointer 199 + * 200 + * VCN suspend and free up sw allocation 201 + */ 202 + static int vcn_v3_0_sw_fini(void *handle) 203 + { 204 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 205 + int r; 206 + 207 + r = amdgpu_vcn_suspend(adev); 208 + if (r) 209 + return r; 210 + 211 + r = amdgpu_vcn_sw_fini(adev); 212 + 213 + return r; 214 + } 215 + 216 + /** 217 + * vcn_v3_0_hw_init - start and test VCN block 218 + * 219 + * @handle: amdgpu_device pointer 220 + * 221 + * Initialize the hardware, boot up the VCPU and do some testing 222 + */ 223 + static int vcn_v3_0_hw_init(void *handle) 224 + { 225 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 226 + struct amdgpu_ring *ring; 227 + int i, j, r; 228 + 229 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 230 + if (adev->vcn.harvest_config & (1 << i)) 231 + continue; 232 + 233 + ring = &adev->vcn.inst[i].ring_dec; 234 + 235 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 236 + ring->doorbell_index, i); 237 + 238 + r = amdgpu_ring_test_helper(ring); 239 + if (r) 240 + goto done; 241 + 242 + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 243 + ring = &adev->vcn.inst[i].ring_enc[j]; 244 + r = amdgpu_ring_test_helper(ring); 245 + if (r) 246 + goto done; 247 + } 248 + } 249 + 250 + done: 251 + if (!r) 252 + DRM_INFO("VCN decode and encode initialized successfully.\n"); 253 + 254 + return r; 255 + } 256 + 257 + /** 258 + * vcn_v3_0_hw_fini - stop the hardware block 259 + * 260 + * @handle: amdgpu_device pointer 261 + * 262 + * Stop the VCN block, mark ring as not ready any more 263 + */ 264 + static int vcn_v3_0_hw_fini(void *handle) 265 + { 266 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 267 + struct amdgpu_ring *ring; 268 + int i, j; 269 + 270 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 271 + if (adev->vcn.harvest_config & (1 << i)) 272 + continue; 273 + 274 + ring = &adev->vcn.inst[i].ring_dec; 275 + 276 + if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) 277 + vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 278 + 279 + ring->sched.ready = false; 280 + 281 + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 282 + ring = &adev->vcn.inst[i].ring_enc[j]; 283 + ring->sched.ready = false; 284 + } 285 + } 286 + 287 + return 0; 288 + } 289 + 290 + /** 291 + * vcn_v3_0_suspend - suspend VCN block 292 + * 293 + * @handle: amdgpu_device pointer 294 + * 295 + * HW fini and suspend VCN block 296 + */ 297 + static int vcn_v3_0_suspend(void *handle) 298 + { 299 + int r; 300 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 301 + 302 + r = vcn_v3_0_hw_fini(adev); 303 + if (r) 304 + return r; 305 + 306 + r = amdgpu_vcn_suspend(adev); 307 + 308 + return r; 309 + } 310 + 311 + /** 312 + * vcn_v3_0_resume - resume VCN block 313 + * 314 + * @handle: amdgpu_device pointer 315 + * 316 + * Resume firmware and hw init VCN block 317 + */ 318 + static int vcn_v3_0_resume(void *handle) 319 + { 320 + int r; 321 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 322 + 323 + r = amdgpu_vcn_resume(adev); 324 + if (r) 325 + return r; 326 + 327 + r = vcn_v3_0_hw_init(adev); 328 + 329 + return r; 330 + } 331 + 332 + /** 333 + * vcn_v3_0_mc_resume - memory controller programming 334 + * 335 + * @adev: amdgpu_device pointer 336 + * @inst: instance number 337 + * 338 + * Let the VCN memory controller know it's offsets 339 + */ 340 + static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) 341 + { 342 + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 343 + uint32_t offset; 344 + 345 + /* cache window 0: fw */ 346 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 347 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 348 + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); 349 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 350 + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); 351 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0); 352 + offset = 0; 353 + } else { 354 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 355 + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); 356 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 357 + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); 358 + offset = size; 359 + /* No signed header for now from firmware 360 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 361 + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 362 + */ 363 + WREG32_SOC15(UVD, inst, mmUVD_VCPU_CACHE_OFFSET0, 0); 364 + } 365 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size); 366 + 367 + /* cache window 1: stack */ 368 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, 369 + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 370 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, 371 + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); 372 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0); 373 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); 374 + 375 + /* cache window 2: context */ 376 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, 377 + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 378 + WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, 379 + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); 380 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0); 381 + WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); 382 + } 383 + 384 + static int vcn_v3_0_start(struct amdgpu_device *adev) 385 + { 386 + struct amdgpu_ring *ring; 387 + uint32_t rb_bufsz, tmp; 388 + int i, j, k, r; 389 + 390 + if (adev->pm.dpm_enabled) 391 + amdgpu_dpm_enable_uvd(adev, true); 392 + 393 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 394 + if (adev->vcn.harvest_config & (1 << i)) 395 + continue; 396 + 397 + /* set VCN status busy */ 398 + tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 399 + WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp); 400 + 401 + /* enable VCPU clock */ 402 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 403 + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); 404 + 405 + /* disable master interrupt */ 406 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0, 407 + ~UVD_MASTINT_EN__VCPU_EN_MASK); 408 + 409 + /* setup mmUVD_LMI_CTRL */ 410 + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL); 411 + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 412 + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 413 + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | 414 + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 415 + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); 416 + 417 + /* setup mmUVD_MPC_CNTL */ 418 + tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL); 419 + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; 420 + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; 421 + WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); 422 + 423 + /* setup UVD_MPC_SET_MUXA0 */ 424 + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0, 425 + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 426 + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 427 + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 428 + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); 429 + 430 + /* setup UVD_MPC_SET_MUXB0 */ 431 + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0, 432 + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 433 + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 434 + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 435 + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); 436 + 437 + /* setup mmUVD_MPC_SET_MUX */ 438 + WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX, 439 + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 440 + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 441 + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); 442 + 443 + vcn_v3_0_mc_resume(adev, i); 444 + 445 + /* VCN global tiling registers */ 446 + WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG, 447 + adev->gfx.config.gb_addr_config); 448 + 449 + /* enable LMI MC and UMC channels */ 450 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0, 451 + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); 452 + 453 + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); 454 + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 455 + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 456 + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); 457 + 458 + /* unblock VCPU register access */ 459 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0, 460 + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 461 + 462 + /* release VCPU reset to boot */ 463 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, 464 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 465 + 466 + for (j = 0; j < 10; ++j) { 467 + uint32_t status; 468 + 469 + for (k = 0; k < 100; ++k) { 470 + status = RREG32_SOC15(VCN, i, mmUVD_STATUS); 471 + if (status & 2) 472 + break; 473 + mdelay(10); 474 + } 475 + r = 0; 476 + if (status & 2) 477 + break; 478 + 479 + DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n", i); 480 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 481 + UVD_VCPU_CNTL__BLK_RST_MASK, 482 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 483 + mdelay(10); 484 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, 485 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 486 + 487 + mdelay(10); 488 + r = -1; 489 + } 490 + 491 + if (r) { 492 + DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n", i); 493 + return r; 494 + } 495 + 496 + /* enable master interrupt */ 497 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 498 + UVD_MASTINT_EN__VCPU_EN_MASK, 499 + ~UVD_MASTINT_EN__VCPU_EN_MASK); 500 + 501 + /* clear the busy bit of VCN_STATUS */ 502 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0, 503 + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 504 + 505 + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0); 506 + 507 + ring = &adev->vcn.inst[i].ring_dec; 508 + /* force RBC into idle state */ 509 + rb_bufsz = order_base_2(ring->ring_size); 510 + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 511 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 512 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 513 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 514 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 515 + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); 516 + 517 + /* programm the RB_BASE for ring buffer */ 518 + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 519 + lower_32_bits(ring->gpu_addr)); 520 + WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 521 + upper_32_bits(ring->gpu_addr)); 522 + 523 + /* Initialize the ring buffer's read and write pointers */ 524 + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); 525 + 526 + ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); 527 + WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, 528 + lower_32_bits(ring->wptr)); 529 + ring = &adev->vcn.inst[i].ring_enc[0]; 530 + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 531 + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 532 + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); 533 + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 534 + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); 535 + 536 + ring = &adev->vcn.inst[i].ring_enc[1]; 537 + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 538 + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 539 + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); 540 + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 541 + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); 542 + } 543 + 544 + return 0; 545 + } 546 + 547 + static int vcn_v3_0_stop(struct amdgpu_device *adev) 548 + { 549 + uint32_t tmp; 550 + int i, r = 0; 551 + 552 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 553 + if (adev->vcn.harvest_config & (1 << i)) 554 + continue; 555 + 556 + /* wait for vcn idle */ 557 + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); 558 + if (r) 559 + return r; 560 + 561 + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | 562 + UVD_LMI_STATUS__READ_CLEAN_MASK | 563 + UVD_LMI_STATUS__WRITE_CLEAN_MASK | 564 + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; 565 + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); 566 + if (r) 567 + return r; 568 + 569 + /* disable LMI UMC channel */ 570 + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); 571 + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; 572 + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); 573 + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| 574 + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; 575 + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); 576 + if (r) 577 + return r; 578 + 579 + /* block VCPU register access */ 580 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 581 + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, 582 + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); 583 + 584 + /* reset VCPU */ 585 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 586 + UVD_VCPU_CNTL__BLK_RST_MASK, 587 + ~UVD_VCPU_CNTL__BLK_RST_MASK); 588 + 589 + /* disable VCPU clock */ 590 + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, 591 + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); 592 + 593 + /* apply soft reset */ 594 + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); 595 + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; 596 + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); 597 + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); 598 + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; 599 + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); 600 + 601 + /* clear status */ 602 + WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); 603 + } 604 + 605 + if (adev->pm.dpm_enabled) 606 + amdgpu_dpm_enable_uvd(adev, false); 607 + 608 + return 0; 609 + } 610 + 611 + /** 612 + * vcn_v3_0_dec_ring_get_rptr - get read pointer 613 + * 614 + * @ring: amdgpu_ring pointer 615 + * 616 + * Returns the current hardware read pointer 617 + */ 618 + static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) 619 + { 620 + struct amdgpu_device *adev = ring->adev; 621 + 622 + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR); 623 + } 624 + 625 + /** 626 + * vcn_v3_0_dec_ring_get_wptr - get write pointer 627 + * 628 + * @ring: amdgpu_ring pointer 629 + * 630 + * Returns the current hardware write pointer 631 + */ 632 + static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) 633 + { 634 + struct amdgpu_device *adev = ring->adev; 635 + 636 + if (ring->use_doorbell) 637 + return adev->wb.wb[ring->wptr_offs]; 638 + else 639 + return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR); 640 + } 641 + 642 + /** 643 + * vcn_v3_0_dec_ring_set_wptr - set write pointer 644 + * 645 + * @ring: amdgpu_ring pointer 646 + * 647 + * Commits the write pointer to the hardware 648 + */ 649 + static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 650 + { 651 + struct amdgpu_device *adev = ring->adev; 652 + 653 + if (ring->use_doorbell) { 654 + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 655 + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 656 + } else { 657 + WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 658 + } 659 + } 660 + 661 + static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { 662 + .type = AMDGPU_RING_TYPE_VCN_DEC, 663 + .align_mask = 0xf, 664 + .vmhub = AMDGPU_MMHUB_0, 665 + .get_rptr = vcn_v3_0_dec_ring_get_rptr, 666 + .get_wptr = vcn_v3_0_dec_ring_get_wptr, 667 + .set_wptr = vcn_v3_0_dec_ring_set_wptr, 668 + .emit_frame_size = 669 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 670 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 671 + 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ 672 + 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ 673 + 6, 674 + .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ 675 + .emit_ib = vcn_v2_0_dec_ring_emit_ib, 676 + .emit_fence = vcn_v2_0_dec_ring_emit_fence, 677 + .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 678 + .test_ring = amdgpu_vcn_dec_ring_test_ring, 679 + .test_ib = amdgpu_vcn_dec_ring_test_ib, 680 + .insert_nop = vcn_v2_0_dec_ring_insert_nop, 681 + .insert_start = vcn_v2_0_dec_ring_insert_start, 682 + .insert_end = vcn_v2_0_dec_ring_insert_end, 683 + .pad_ib = amdgpu_ring_generic_pad_ib, 684 + .begin_use = amdgpu_vcn_ring_begin_use, 685 + .end_use = amdgpu_vcn_ring_end_use, 686 + .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, 687 + .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, 688 + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 689 + }; 690 + 691 + /** 692 + * vcn_v3_0_enc_ring_get_rptr - get enc read pointer 693 + * 694 + * @ring: amdgpu_ring pointer 695 + * 696 + * Returns the current hardware enc read pointer 697 + */ 698 + static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 699 + { 700 + struct amdgpu_device *adev = ring->adev; 701 + 702 + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) 703 + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR); 704 + else 705 + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2); 706 + } 707 + 708 + /** 709 + * vcn_v3_0_enc_ring_get_wptr - get enc write pointer 710 + * 711 + * @ring: amdgpu_ring pointer 712 + * 713 + * Returns the current hardware enc write pointer 714 + */ 715 + static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 716 + { 717 + struct amdgpu_device *adev = ring->adev; 718 + 719 + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 720 + if (ring->use_doorbell) 721 + return adev->wb.wb[ring->wptr_offs]; 722 + else 723 + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR); 724 + } else { 725 + if (ring->use_doorbell) 726 + return adev->wb.wb[ring->wptr_offs]; 727 + else 728 + return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2); 729 + } 730 + } 731 + 732 + /** 733 + * vcn_v3_0_enc_ring_set_wptr - set enc write pointer 734 + * 735 + * @ring: amdgpu_ring pointer 736 + * 737 + * Commits the enc write pointer to the hardware 738 + */ 739 + static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 740 + { 741 + struct amdgpu_device *adev = ring->adev; 742 + 743 + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { 744 + if (ring->use_doorbell) { 745 + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 746 + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 747 + } else { 748 + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 749 + } 750 + } else { 751 + if (ring->use_doorbell) { 752 + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); 753 + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 754 + } else { 755 + WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 756 + } 757 + } 758 + } 759 + 760 + static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { 761 + .type = AMDGPU_RING_TYPE_VCN_ENC, 762 + .align_mask = 0x3f, 763 + .nop = VCN_ENC_CMD_NO_OP, 764 + .vmhub = AMDGPU_MMHUB_0, 765 + .get_rptr = vcn_v3_0_enc_ring_get_rptr, 766 + .get_wptr = vcn_v3_0_enc_ring_get_wptr, 767 + .set_wptr = vcn_v3_0_enc_ring_set_wptr, 768 + .emit_frame_size = 769 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 770 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + 771 + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ 772 + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ 773 + 1, /* vcn_v2_0_enc_ring_insert_end */ 774 + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 775 + .emit_ib = vcn_v2_0_enc_ring_emit_ib, 776 + .emit_fence = vcn_v2_0_enc_ring_emit_fence, 777 + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 778 + .test_ring = amdgpu_vcn_enc_ring_test_ring, 779 + .test_ib = amdgpu_vcn_enc_ring_test_ib, 780 + .insert_nop = amdgpu_ring_insert_nop, 781 + .insert_end = vcn_v2_0_enc_ring_insert_end, 782 + .pad_ib = amdgpu_ring_generic_pad_ib, 783 + .begin_use = amdgpu_vcn_ring_begin_use, 784 + .end_use = amdgpu_vcn_ring_end_use, 785 + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 786 + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 787 + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 788 + }; 789 + 790 + static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) 791 + { 792 + int i; 793 + 794 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 795 + if (adev->vcn.harvest_config & (1 << i)) 796 + continue; 797 + 798 + adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs; 799 + adev->vcn.inst[i].ring_dec.me = i; 800 + DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); 801 + } 802 + } 803 + 804 + static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) 805 + { 806 + int i, j; 807 + 808 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 809 + if (adev->vcn.harvest_config & (1 << i)) 810 + continue; 811 + 812 + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { 813 + adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; 814 + adev->vcn.inst[i].ring_enc[j].me = i; 815 + } 816 + DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i); 817 + } 818 + } 819 + 820 + static bool vcn_v3_0_is_idle(void *handle) 821 + { 822 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 823 + int i, ret = 1; 824 + 825 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 826 + if (adev->vcn.harvest_config & (1 << i)) 827 + continue; 828 + 829 + ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); 830 + } 831 + 832 + return ret; 833 + } 834 + 835 + static int vcn_v3_0_wait_for_idle(void *handle) 836 + { 837 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 838 + int i, ret = 0; 839 + 840 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 841 + if (adev->vcn.harvest_config & (1 << i)) 842 + continue; 843 + 844 + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 845 + UVD_STATUS__IDLE, ret); 846 + if (ret) 847 + return ret; 848 + } 849 + 850 + return ret; 851 + } 852 + 853 + static int vcn_v3_0_set_clockgating_state(void *handle, 854 + enum amd_clockgating_state state) 855 + { 856 + return 0; 857 + } 858 + 859 + static int vcn_v3_0_set_powergating_state(void *handle, 860 + enum amd_powergating_state state) 861 + { 862 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 863 + int ret; 864 + 865 + if(state == adev->vcn.cur_state) 866 + return 0; 867 + 868 + if (state == AMD_PG_STATE_GATE) 869 + ret = vcn_v3_0_stop(adev); 870 + else 871 + ret = vcn_v3_0_start(adev); 872 + 873 + if(!ret) 874 + adev->vcn.cur_state = state; 875 + 876 + return ret; 877 + } 878 + 879 + static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev, 880 + struct amdgpu_irq_src *source, 881 + unsigned type, 882 + enum amdgpu_interrupt_state state) 883 + { 884 + return 0; 885 + } 886 + 887 + static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev, 888 + struct amdgpu_irq_src *source, 889 + struct amdgpu_iv_entry *entry) 890 + { 891 + uint32_t ip_instance; 892 + 893 + switch (entry->client_id) { 894 + case SOC15_IH_CLIENTID_VCN: 895 + ip_instance = 0; 896 + break; 897 + case SOC15_IH_CLIENTID_VCN1: 898 + ip_instance = 1; 899 + break; 900 + default: 901 + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); 902 + return 0; 903 + } 904 + 905 + DRM_DEBUG("IH: VCN TRAP\n"); 906 + 907 + switch (entry->src_id) { 908 + case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: 909 + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); 910 + break; 911 + case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: 912 + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); 913 + break; 914 + case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: 915 + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); 916 + break; 917 + default: 918 + DRM_ERROR("Unhandled interrupt: %d %d\n", 919 + entry->src_id, entry->src_data[0]); 920 + break; 921 + } 922 + 923 + return 0; 924 + } 925 + 926 + static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = { 927 + .set = vcn_v3_0_set_interrupt_state, 928 + .process = vcn_v3_0_process_interrupt, 929 + }; 930 + 931 + static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) 932 + { 933 + int i; 934 + 935 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 936 + if (adev->vcn.harvest_config & (1 << i)) 937 + continue; 938 + 939 + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; 940 + adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs; 941 + } 942 + } 943 + 944 + static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { 945 + .name = "vcn_v3_0", 946 + .early_init = vcn_v3_0_early_init, 947 + .late_init = NULL, 948 + .sw_init = vcn_v3_0_sw_init, 949 + .sw_fini = vcn_v3_0_sw_fini, 950 + .hw_init = vcn_v3_0_hw_init, 951 + .hw_fini = vcn_v3_0_hw_fini, 952 + .suspend = vcn_v3_0_suspend, 953 + .resume = vcn_v3_0_resume, 954 + .is_idle = vcn_v3_0_is_idle, 955 + .wait_for_idle = vcn_v3_0_wait_for_idle, 956 + .check_soft_reset = NULL, 957 + .pre_soft_reset = NULL, 958 + .soft_reset = NULL, 959 + .post_soft_reset = NULL, 960 + .set_clockgating_state = vcn_v3_0_set_clockgating_state, 961 + .set_powergating_state = vcn_v3_0_set_powergating_state, 962 + }; 963 + 964 + const struct amdgpu_ip_block_version vcn_v3_0_ip_block = 965 + { 966 + .type = AMD_IP_BLOCK_TYPE_VCN, 967 + .major = 3, 968 + .minor = 0, 969 + .rev = 0, 970 + .funcs = &vcn_v3_0_ip_funcs, 971 + };
+29
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.h
··· 1 + /* 2 + * Copyright 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __VCN_V3_0_H__ 25 + #define __VCN_V3_0_H__ 26 + 27 + extern const struct amdgpu_ip_block_version vcn_v3_0_ip_block; 28 + 29 + #endif /* __VCN_V3_0_H__ */