Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu/vcn: Use offsets local to VCN/JPEG in VF

For VCN/JPEG 4.0.3, use only the local addressing scheme.

- Mask bit higher than AID0 range

v2
remain the case for mmhub use master XCC

Signed-off-by: Jane Jian <Jane.Jian@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jane Jian and committed by
Alex Deucher
caaf5762 49cfaebe

+60 -5
+17 -2
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 32 32 #include "vcn/vcn_4_0_3_sh_mask.h" 33 33 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" 34 34 35 + #define NORMALIZE_JPEG_REG_OFFSET(offset) \ 36 + (offset & 0x1FFFF) 37 + 35 38 enum jpeg_engin_status { 36 39 UVD_PGFSM_STATUS__UVDJ_PWR_ON = 0, 37 40 UVD_PGFSM_STATUS__UVDJ_PWR_OFF = 2, ··· 827 824 void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 828 825 uint32_t val, uint32_t mask) 829 826 { 830 - uint32_t reg_offset = (reg << 2); 827 + uint32_t reg_offset; 828 + 829 + /* For VF, only local offsets should be used */ 830 + if (amdgpu_sriov_vf(ring->adev)) 831 + reg = NORMALIZE_JPEG_REG_OFFSET(reg); 832 + 833 + reg_offset = (reg << 2); 831 834 832 835 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET, 833 836 0, 0, PACKETJ_TYPE0)); ··· 874 865 875 866 void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) 876 867 { 877 - uint32_t reg_offset = (reg << 2); 868 + uint32_t reg_offset; 869 + 870 + /* For VF, only local offsets should be used */ 871 + if (amdgpu_sriov_vf(ring->adev)) 872 + reg = NORMALIZE_JPEG_REG_OFFSET(reg); 873 + 874 + reg_offset = (reg << 2); 878 875 879 876 amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 880 877 0, 0, PACKETJ_TYPE0));
+43 -3
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 45 45 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 46 46 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 47 47 48 + #define NORMALIZE_VCN_REG_OFFSET(offset) \ 49 + (offset & 0x1FFFF) 50 + 48 51 static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev); 49 52 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev); 50 53 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev); ··· 1378 1375 regUVD_RB_WPTR); 1379 1376 } 1380 1377 1378 + static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, 1379 + uint32_t val, uint32_t mask) 1380 + { 1381 + /* For VF, only local offsets should be used */ 1382 + if (amdgpu_sriov_vf(ring->adev)) 1383 + reg = NORMALIZE_VCN_REG_OFFSET(reg); 1384 + 1385 + amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); 1386 + amdgpu_ring_write(ring, reg << 2); 1387 + amdgpu_ring_write(ring, mask); 1388 + amdgpu_ring_write(ring, val); 1389 + } 1390 + 1391 + static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) 1392 + { 1393 + /* For VF, only local offsets should be used */ 1394 + if (amdgpu_sriov_vf(ring->adev)) 1395 + reg = NORMALIZE_VCN_REG_OFFSET(reg); 1396 + 1397 + amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); 1398 + amdgpu_ring_write(ring, reg << 2); 1399 + amdgpu_ring_write(ring, val); 1400 + } 1401 + 1402 + static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1403 + unsigned int vmid, uint64_t pd_addr) 1404 + { 1405 + struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; 1406 + 1407 + pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1408 + 1409 + /* wait for reg writes */ 1410 + vcn_v4_0_3_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + 1411 + vmid * hub->ctx_addr_distance, 1412 + lower_32_bits(pd_addr), 0xffffffff); 1413 + } 1414 + 1381 1415 static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) 1382 1416 { 1383 1417 /* VCN engine access for HDP flush doesn't work when RRMT is enabled. ··· 1461 1421 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ 1462 1422 .emit_ib = vcn_v2_0_enc_ring_emit_ib, 1463 1423 .emit_fence = vcn_v2_0_enc_ring_emit_fence, 1464 - .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, 1424 + .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush, 1465 1425 .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush, 1466 1426 .test_ring = amdgpu_vcn_enc_ring_test_ring, 1467 1427 .test_ib = amdgpu_vcn_unified_ring_test_ib, ··· 1470 1430 .pad_ib = amdgpu_ring_generic_pad_ib, 1471 1431 .begin_use = amdgpu_vcn_ring_begin_use, 1472 1432 .end_use = amdgpu_vcn_ring_end_use, 1473 - .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, 1474 - .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, 1433 + .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, 1434 + .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, 1475 1435 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, 1476 1436 }; 1477 1437