Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Use function for IP version check

Use an inline function for version check. Gives more flexibility to
handle any format changes.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Lijo Lazar and committed by
Alex Deucher
4e8303cf addd7aef

+714 -593
+3 -3
drivers/gpu/drm/amd/amdgpu/aldebaran.c
··· 35 35 { 36 36 struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; 37 37 38 - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && 38 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) && 39 39 adev->gmc.xgmi.connected_to_cpu)) 40 40 return true; 41 41 ··· 154 154 if (reset_device_list == NULL) 155 155 return -EINVAL; 156 156 157 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && 157 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) && 158 158 reset_context->hive == NULL) { 159 159 /* Wrong context, return error */ 160 160 return -EINVAL; ··· 335 335 if (reset_device_list == NULL) 336 336 return -EINVAL; 337 337 338 - if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] == 338 + if (amdgpu_ip_version(reset_context->reset_req_dev, MP1_HWIP, 0) == 339 339 IP_VERSION(13, 0, 2) && 340 340 reset_context->hive == NULL) { 341 341 /* Wrong context, return error */
+6
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1105 1105 bool debug_disable_soft_recovery; 1106 1106 }; 1107 1107 1108 + static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, 1109 + uint8_t ip, uint8_t inst) 1110 + { 1111 + return adev->ip_versions[ip][inst]; 1112 + } 1113 + 1108 1114 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) 1109 1115 { 1110 1116 return container_of(ddev, struct amdgpu_device, ddev);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 707 707 /* Temporary workaround to fix issues observed in some 708 708 * compute applications when GFXOFF is enabled on GFX11. 709 709 */ 710 - if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) { 710 + if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11) { 711 711 pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled"); 712 712 amdgpu_gfx_off_ctrl(adev, idle); 713 713 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
··· 658 658 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH | 659 659 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION; 660 660 661 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 4)) 661 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 4)) 662 662 *trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START | 663 663 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END; 664 664
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
··· 677 677 int i; 678 678 uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL)); 679 679 680 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) 680 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) 681 681 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID, 682 682 stall ? 1 << vmid : 0); 683 683 else
+17 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1003 1003 1004 1004 amdgpu_asic_pre_asic_init(adev); 1005 1005 1006 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) || 1007 - adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { 1006 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) || 1007 + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { 1008 1008 amdgpu_psp_wait_for_bootloader(adev); 1009 1009 ret = amdgpu_atomfirmware_asic_init(adev, true); 1010 1010 return ret; ··· 2845 2845 { 2846 2846 int i, r; 2847 2847 2848 - if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 2848 + if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) 2849 2849 return; 2850 2850 2851 2851 for (i = 0; i < adev->num_ip_blocks; i++) { ··· 3098 3098 3099 3099 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 3100 3100 if (adev->in_s0ix && 3101 - (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) && 3102 - (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA)) 3101 + (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 3102 + IP_VERSION(5, 0, 0)) && 3103 + (adev->ip_blocks[i].version->type == 3104 + AMD_IP_BLOCK_TYPE_SDMA)) 3103 3105 continue; 3104 3106 3105 3107 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot. ··· 3592 3590 adev->gfx.mcbp = true; 3593 3591 else if (amdgpu_mcbp == 0) 3594 3592 adev->gfx.mcbp = false; 3595 - else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && 3596 - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && 3593 + else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) && 3594 + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) && 3597 3595 adev->gfx.num_gfx_rings) 3598 3596 adev->gfx.mcbp = true; 3599 3597 ··· 3813 3811 * internal path natively support atomics, set have_atomics_support to true. 3814 3812 */ 3815 3813 } else if ((adev->flags & AMD_IS_APU) && 3816 - (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) { 3814 + (amdgpu_ip_version(adev, GC_HWIP, 0) > 3815 + IP_VERSION(9, 0, 0))) { 3817 3816 adev->have_atomics_support = true; 3818 3817 } else { 3819 3818 adev->have_atomics_support = ··· 5447 5444 adev->asic_reset_res = r; 5448 5445 5449 5446 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */ 5450 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 5451 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) 5447 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == 5448 + IP_VERSION(9, 4, 2) || 5449 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) 5452 5450 amdgpu_ras_resume(adev); 5453 5451 } else { 5454 5452 r = amdgpu_do_asic_reset(device_list_handle, reset_context); ··· 5474 5470 drm_sched_start(&ring->sched, true); 5475 5471 } 5476 5472 5477 - if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) 5473 + if (adev->enable_mes && 5474 + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3)) 5478 5475 amdgpu_mes_self_test(tmp_adev); 5479 5476 5480 5477 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) ··· 6152 6147 return true; 6153 6148 default: 6154 6149 /* IP discovery */ 6155 - if (!adev->ip_versions[DCE_HWIP][0] || 6150 + if (!amdgpu_ip_version(adev, DCE_HWIP, 0) || 6156 6151 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) 6157 6152 return false; 6158 6153 return true;
+45 -47
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
··· 311 311 * So far, apply this quirk only on those Navy Flounder boards which 312 312 * have a bad harvest table of VCN config. 313 313 */ 314 - if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 315 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 314 + if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) && 315 + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) { 316 316 switch (adev->pdev->revision) { 317 317 case 0xC1: 318 318 case 0xC2: ··· 1363 1363 * so read harvest bit per IP data structure to set 1364 1364 * harvest configuration. 1365 1365 */ 1366 - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) && 1367 - adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) { 1366 + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) && 1367 + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) { 1368 1368 if ((adev->pdev->device == 0x731E && 1369 1369 (adev->pdev->revision == 0xC6 || 1370 1370 adev->pdev->revision == 0xC7)) || ··· 1607 1607 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1608 1608 { 1609 1609 /* what IP to use for this? */ 1610 - switch (adev->ip_versions[GC_HWIP][0]) { 1610 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1611 1611 case IP_VERSION(9, 0, 1): 1612 1612 case IP_VERSION(9, 1, 0): 1613 1613 case IP_VERSION(9, 2, 1): ··· 1645 1645 default: 1646 1646 dev_err(adev->dev, 1647 1647 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1648 - adev->ip_versions[GC_HWIP][0]); 1648 + amdgpu_ip_version(adev, GC_HWIP, 0)); 1649 1649 return -EINVAL; 1650 1650 } 1651 1651 return 0; ··· 1654 1654 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1655 1655 { 1656 1656 /* use GC or MMHUB IP version */ 1657 - switch (adev->ip_versions[GC_HWIP][0]) { 1657 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1658 1658 case IP_VERSION(9, 0, 1): 1659 1659 case IP_VERSION(9, 1, 0): 1660 1660 case IP_VERSION(9, 2, 1): ··· 1690 1690 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1691 1691 break; 1692 1692 default: 1693 - dev_err(adev->dev, 1694 - "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1695 - adev->ip_versions[GC_HWIP][0]); 1693 + dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1694 + amdgpu_ip_version(adev, GC_HWIP, 0)); 1696 1695 return -EINVAL; 1697 1696 } 1698 1697 return 0; ··· 1699 1700 1700 1701 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1701 1702 { 1702 - switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1703 + switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 1703 1704 case IP_VERSION(4, 0, 0): 1704 1705 case IP_VERSION(4, 0, 1): 1705 1706 case IP_VERSION(4, 1, 0): ··· 1732 1733 default: 1733 1734 dev_err(adev->dev, 1734 1735 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1735 - adev->ip_versions[OSSSYS_HWIP][0]); 1736 + amdgpu_ip_version(adev, OSSSYS_HWIP, 0)); 1736 1737 return -EINVAL; 1737 1738 } 1738 1739 return 0; ··· 1740 1741 1741 1742 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1742 1743 { 1743 - switch (adev->ip_versions[MP0_HWIP][0]) { 1744 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 1744 1745 case IP_VERSION(9, 0, 0): 1745 1746 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1746 1747 break; ··· 1786 1787 default: 1787 1788 dev_err(adev->dev, 1788 1789 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1789 - adev->ip_versions[MP0_HWIP][0]); 1790 + amdgpu_ip_version(adev, MP0_HWIP, 0)); 1790 1791 return -EINVAL; 1791 1792 } 1792 1793 return 0; ··· 1794 1795 1795 1796 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1796 1797 { 1797 - switch (adev->ip_versions[MP1_HWIP][0]) { 1798 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1798 1799 case IP_VERSION(9, 0, 0): 1799 1800 case IP_VERSION(10, 0, 0): 1800 1801 case IP_VERSION(10, 0, 1): ··· 1835 1836 default: 1836 1837 dev_err(adev->dev, 1837 1838 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1838 - adev->ip_versions[MP1_HWIP][0]); 1839 + amdgpu_ip_version(adev, MP1_HWIP, 0)); 1839 1840 return -EINVAL; 1840 1841 } 1841 1842 return 0; ··· 1860 1861 return 0; 1861 1862 1862 1863 #if defined(CONFIG_DRM_AMD_DC) 1863 - if (adev->ip_versions[DCE_HWIP][0]) { 1864 - switch (adev->ip_versions[DCE_HWIP][0]) { 1864 + if (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1865 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1865 1866 case IP_VERSION(1, 0, 0): 1866 1867 case IP_VERSION(1, 0, 1): 1867 1868 case IP_VERSION(2, 0, 2): ··· 1887 1888 default: 1888 1889 dev_err(adev->dev, 1889 1890 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1890 - adev->ip_versions[DCE_HWIP][0]); 1891 + amdgpu_ip_version(adev, DCE_HWIP, 0)); 1891 1892 return -EINVAL; 1892 1893 } 1893 - } else if (adev->ip_versions[DCI_HWIP][0]) { 1894 - switch (adev->ip_versions[DCI_HWIP][0]) { 1894 + } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 1895 + switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) { 1895 1896 case IP_VERSION(12, 0, 0): 1896 1897 case IP_VERSION(12, 0, 1): 1897 1898 case IP_VERSION(12, 1, 0): ··· 1903 1904 default: 1904 1905 dev_err(adev->dev, 1905 1906 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1906 - adev->ip_versions[DCI_HWIP][0]); 1907 + amdgpu_ip_version(adev, DCI_HWIP, 0)); 1907 1908 return -EINVAL; 1908 1909 } 1909 1910 } ··· 1913 1914 1914 1915 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1915 1916 { 1916 - switch (adev->ip_versions[GC_HWIP][0]) { 1917 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1917 1918 case IP_VERSION(9, 0, 1): 1918 1919 case IP_VERSION(9, 1, 0): 1919 1920 case IP_VERSION(9, 2, 1): ··· 1953 1954 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 1954 1955 break; 1955 1956 default: 1956 - dev_err(adev->dev, 1957 - "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1958 - adev->ip_versions[GC_HWIP][0]); 1957 + dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1958 + amdgpu_ip_version(adev, GC_HWIP, 0)); 1959 1959 return -EINVAL; 1960 1960 } 1961 1961 return 0; ··· 1962 1964 1963 1965 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1964 1966 { 1965 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1967 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1966 1968 case IP_VERSION(4, 0, 0): 1967 1969 case IP_VERSION(4, 0, 1): 1968 1970 case IP_VERSION(4, 1, 0): ··· 2002 2004 default: 2003 2005 dev_err(adev->dev, 2004 2006 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2005 - adev->ip_versions[SDMA0_HWIP][0]); 2007 + amdgpu_ip_version(adev, SDMA0_HWIP, 0)); 2006 2008 return -EINVAL; 2007 2009 } 2008 2010 return 0; ··· 2010 2012 2011 2013 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2012 2014 { 2013 - if (adev->ip_versions[VCE_HWIP][0]) { 2014 - switch (adev->ip_versions[UVD_HWIP][0]) { 2015 + if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2016 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2015 2017 case IP_VERSION(7, 0, 0): 2016 2018 case IP_VERSION(7, 2, 0): 2017 2019 /* UVD is not supported on vega20 SR-IOV */ ··· 2021 2023 default: 2022 2024 dev_err(adev->dev, 2023 2025 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2024 - adev->ip_versions[UVD_HWIP][0]); 2026 + amdgpu_ip_version(adev, UVD_HWIP, 0)); 2025 2027 return -EINVAL; 2026 2028 } 2027 - switch (adev->ip_versions[VCE_HWIP][0]) { 2029 + switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 2028 2030 case IP_VERSION(4, 0, 0): 2029 2031 case IP_VERSION(4, 1, 0): 2030 2032 /* VCE is not supported on vega20 SR-IOV */ ··· 2034 2036 default: 2035 2037 dev_err(adev->dev, 2036 2038 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2037 - adev->ip_versions[VCE_HWIP][0]); 2039 + amdgpu_ip_version(adev, VCE_HWIP, 0)); 2038 2040 return -EINVAL; 2039 2041 } 2040 2042 } else { 2041 - switch (adev->ip_versions[UVD_HWIP][0]) { 2043 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 2042 2044 case IP_VERSION(1, 0, 0): 2043 2045 case IP_VERSION(1, 0, 1): 2044 2046 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); ··· 2089 2091 default: 2090 2092 dev_err(adev->dev, 2091 2093 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2092 - adev->ip_versions[UVD_HWIP][0]); 2094 + amdgpu_ip_version(adev, UVD_HWIP, 0)); 2093 2095 return -EINVAL; 2094 2096 } 2095 2097 } ··· 2098 2100 2099 2101 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2100 2102 { 2101 - switch (adev->ip_versions[GC_HWIP][0]) { 2103 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2102 2104 case IP_VERSION(10, 1, 10): 2103 2105 case IP_VERSION(10, 1, 1): 2104 2106 case IP_VERSION(10, 1, 2): ··· 2136 2138 2137 2139 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2138 2140 { 2139 - switch (adev->ip_versions[GC_HWIP][0]) { 2141 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2140 2142 case IP_VERSION(9, 4, 3): 2141 2143 aqua_vanjaram_init_soc_config(adev); 2142 2144 break; ··· 2147 2149 2148 2150 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev) 2149 2151 { 2150 - switch (adev->ip_versions[VPE_HWIP][0]) { 2152 + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 2151 2153 case IP_VERSION(6, 1, 0): 2152 2154 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block); 2153 2155 break; ··· 2160 2162 2161 2163 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) 2162 2164 { 2163 - switch (adev->ip_versions[VCN_HWIP][0]) { 2165 + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2164 2166 case IP_VERSION(4, 0, 5): 2165 2167 if (amdgpu_umsch_mm & 0x1) { 2166 2168 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); ··· 2354 2356 amdgpu_discovery_init_soc_config(adev); 2355 2357 amdgpu_discovery_sysfs_init(adev); 2356 2358 2357 - switch (adev->ip_versions[GC_HWIP][0]) { 2359 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2358 2360 case IP_VERSION(9, 0, 1): 2359 2361 case IP_VERSION(9, 2, 1): 2360 2362 case IP_VERSION(9, 4, 0): ··· 2408 2410 return -EINVAL; 2409 2411 } 2410 2412 2411 - switch (adev->ip_versions[GC_HWIP][0]) { 2413 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2412 2414 case IP_VERSION(9, 1, 0): 2413 2415 case IP_VERSION(9, 2, 2): 2414 2416 case IP_VERSION(9, 3, 0): ··· 2427 2429 break; 2428 2430 } 2429 2431 2430 - if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2432 + if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0)) 2431 2433 adev->gmc.xgmi.supported = true; 2432 2434 2433 2435 /* set NBIO version */ 2434 - switch (adev->ip_versions[NBIO_HWIP][0]) { 2436 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 2435 2437 case IP_VERSION(6, 1, 0): 2436 2438 case IP_VERSION(6, 2, 0): 2437 2439 adev->nbio.funcs = &nbio_v6_1_funcs; ··· 2493 2495 break; 2494 2496 } 2495 2497 2496 - switch (adev->ip_versions[HDP_HWIP][0]) { 2498 + switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 2497 2499 case IP_VERSION(4, 0, 0): 2498 2500 case IP_VERSION(4, 0, 1): 2499 2501 case IP_VERSION(4, 1, 0): ··· 2525 2527 break; 2526 2528 } 2527 2529 2528 - switch (adev->ip_versions[DF_HWIP][0]) { 2530 + switch (amdgpu_ip_version(adev, DF_HWIP, 0)) { 2529 2531 case IP_VERSION(3, 6, 0): 2530 2532 case IP_VERSION(3, 6, 1): 2531 2533 case IP_VERSION(3, 6, 2): ··· 2545 2547 break; 2546 2548 } 2547 2549 2548 - switch (adev->ip_versions[SMUIO_HWIP][0]) { 2550 + switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) { 2549 2551 case IP_VERSION(9, 0, 0): 2550 2552 case IP_VERSION(9, 0, 1): 2551 2553 case IP_VERSION(10, 0, 0): ··· 2588 2590 break; 2589 2591 } 2590 2592 2591 - switch (adev->ip_versions[LSDMA_HWIP][0]) { 2593 + switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 2592 2594 case IP_VERSION(6, 0, 0): 2593 2595 case IP_VERSION(6, 0, 1): 2594 2596 case IP_VERSION(6, 0, 2):
+18 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 766 766 return -EINVAL; 767 767 } 768 768 769 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) 769 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) 770 770 version = AMD_FMT_MOD_TILE_VER_GFX11; 771 - else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 771 + else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 772 + IP_VERSION(10, 3, 0)) 772 773 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS; 773 - else if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) 774 + else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 775 + IP_VERSION(10, 0, 0)) 774 776 version = AMD_FMT_MOD_TILE_VER_GFX10; 775 777 else 776 778 version = AMD_FMT_MOD_TILE_VER_GFX9; ··· 781 779 case 0: /* Z microtiling */ 782 780 return -EINVAL; 783 781 case 1: /* S microtiling */ 784 - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { 782 + if (amdgpu_ip_version(adev, GC_HWIP, 0) < 783 + IP_VERSION(11, 0, 0)) { 785 784 if (!has_xor) 786 785 version = AMD_FMT_MOD_TILE_VER_GFX9; 787 786 } 788 787 break; 789 788 case 2: 790 - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0)) { 789 + if (amdgpu_ip_version(adev, GC_HWIP, 0) < 790 + IP_VERSION(11, 0, 0)) { 791 791 if (!has_xor && afb->base.format->cpp[0] != 4) 792 792 version = AMD_FMT_MOD_TILE_VER_GFX9; 793 793 } ··· 842 838 u64 render_dcc_offset; 843 839 844 840 /* Enable constant encode on RAVEN2 and later. */ 845 - bool dcc_constant_encode = (adev->asic_type > CHIP_RAVEN || 846 - (adev->asic_type == CHIP_RAVEN && 847 - adev->external_rev_id >= 0x81)) && 848 - adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0); 841 + bool dcc_constant_encode = 842 + (adev->asic_type > CHIP_RAVEN || 843 + (adev->asic_type == CHIP_RAVEN && 844 + adev->external_rev_id >= 0x81)) && 845 + amdgpu_ip_version(adev, GC_HWIP, 0) < 846 + IP_VERSION(11, 0, 0); 849 847 850 848 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B : 851 849 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B : ··· 884 878 if (adev->family >= AMDGPU_FAMILY_NV) { 885 879 int extra_pipe = 0; 886 880 887 - if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) && 881 + if ((amdgpu_ip_version(adev, GC_HWIP, 882 + 0) >= 883 + IP_VERSION(10, 3, 0)) && 888 884 pipes == packers && pipes > 1) 889 885 extra_pipe = 1; 890 886
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2274 2274 pm_runtime_forbid(dev->dev); 2275 2275 } 2276 2276 2277 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && 2277 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) && 2278 2278 !amdgpu_sriov_vf(adev)) { 2279 2279 bool need_to_reset_gpu = false; 2280 2280
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 570 570 switch (ring->funcs->type) { 571 571 case AMDGPU_RING_TYPE_SDMA: 572 572 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ 573 - if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) 573 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 574 + IP_VERSION(5, 0, 0)) 574 575 is_gfx_power_domain = true; 575 576 break; 576 577 case AMDGPU_RING_TYPE_GFX:
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 158 158 return amdgpu_compute_multipipe == 1; 159 159 } 160 160 161 - if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 161 + if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) 162 162 return true; 163 163 164 164 /* FIXME: spreading the queues across pipes causes perf regressions ··· 385 385 u32 domain = AMDGPU_GEM_DOMAIN_GTT; 386 386 387 387 /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ 388 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0)) 388 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0)) 389 389 domain |= AMDGPU_GEM_DOMAIN_VRAM; 390 390 391 391 /* create MQD for KIQ */
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 588 588 */ 589 589 void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) 590 590 { 591 - switch (adev->ip_versions[GC_HWIP][0]) { 591 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 592 592 /* RAVEN */ 593 593 case IP_VERSION(9, 2, 2): 594 594 case IP_VERSION(9, 1, 0): ··· 652 652 void amdgpu_gmc_noretry_set(struct amdgpu_device *adev) 653 653 { 654 654 struct amdgpu_gmc *gmc = &adev->gmc; 655 - uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 655 + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 656 656 bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) || 657 657 gc_ver == IP_VERSION(9, 3, 0) || 658 658 gc_ver == IP_VERSION(9, 4, 0) ||
+8 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 501 501 if (adev->asic_type >= CHIP_VEGA10) { 502 502 switch (type) { 503 503 case AMD_IP_BLOCK_TYPE_GFX: 504 - result->ip_discovery_version = adev->ip_versions[GC_HWIP][0]; 504 + result->ip_discovery_version = 505 + amdgpu_ip_version(adev, GC_HWIP, 0); 505 506 break; 506 507 case AMD_IP_BLOCK_TYPE_SDMA: 507 - result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0]; 508 + result->ip_discovery_version = 509 + amdgpu_ip_version(adev, SDMA0_HWIP, 0); 508 510 break; 509 511 case AMD_IP_BLOCK_TYPE_UVD: 510 512 case AMD_IP_BLOCK_TYPE_VCN: 511 513 case AMD_IP_BLOCK_TYPE_JPEG: 512 - result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0]; 514 + result->ip_discovery_version = 515 + amdgpu_ip_version(adev, UVD_HWIP, 0); 513 516 break; 514 517 case AMD_IP_BLOCK_TYPE_VCE: 515 - result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0]; 518 + result->ip_discovery_version = 519 + amdgpu_ip_version(adev, VCE_HWIP, 0); 516 520 break; 517 521 default: 518 522 result->ip_discovery_version = 0;
+7 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 132 132 adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe; 133 133 134 134 for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { 135 - if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0)) 135 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < 136 + IP_VERSION(6, 0, 0)) 136 137 adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc; 137 138 /* zero sdma_hqd_mask for non-existent engine */ 138 139 else if (adev->sdma.num_instances == 1) ··· 1336 1335 1337 1336 for (i = 0; i < ARRAY_SIZE(queue_types); i++) { 1338 1337 /* On GFX v10.3, fw hasn't supported to map sdma queue. */ 1339 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) && 1340 - adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) && 1338 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= 1339 + IP_VERSION(10, 3, 0) && 1340 + amdgpu_ip_version(adev, GC_HWIP, 0) < 1341 + IP_VERSION(11, 0, 0) && 1341 1342 queue_types[i][0] == AMDGPU_RING_TYPE_SDMA) 1342 1343 continue; 1343 1344 ··· 1400 1397 1401 1398 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, 1402 1399 sizeof(ucode_prefix)); 1403 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) { 1400 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) { 1404 1401 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", 1405 1402 ucode_prefix, 1406 1403 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
+27 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 100 100 return; 101 101 } 102 102 103 - switch (adev->ip_versions[MP0_HWIP][0]) { 103 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 104 104 case IP_VERSION(11, 0, 0): 105 105 case IP_VERSION(11, 0, 4): 106 106 case IP_VERSION(11, 0, 5): ··· 128 128 129 129 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 130 130 131 - switch (adev->ip_versions[MP0_HWIP][0]) { 131 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 132 132 case IP_VERSION(9, 0, 0): 133 133 case IP_VERSION(11, 0, 7): 134 134 case IP_VERSION(11, 0, 9): ··· 162 162 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 163 163 struct psp_context *psp = &adev->psp; 164 164 165 - switch (adev->ip_versions[MP0_HWIP][0]) { 165 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 166 166 case IP_VERSION(9, 0, 0): 167 167 psp_v3_1_set_psp_funcs(psp); 168 168 psp->autoload_supported = false; ··· 334 334 bool ret = false; 335 335 int i; 336 336 337 - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) 337 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) 338 338 return false; 339 339 340 340 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; ··· 413 413 414 414 adev->psp.xgmi_context.supports_extended_data = 415 415 !adev->gmc.xgmi.connected_to_cpu && 416 - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2); 416 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2); 417 417 418 418 memset(&scpm_entry, 0, sizeof(scpm_entry)); 419 419 if ((psp_get_runtime_db_entry(adev, ··· 773 773 774 774 static bool psp_boottime_tmr(struct psp_context *psp) 775 775 { 776 - switch (psp->adev->ip_versions[MP0_HWIP][0]) { 776 + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 777 777 case IP_VERSION(13, 0, 6): 778 778 return true; 779 779 default: ··· 828 828 829 829 static bool psp_skip_tmr(struct psp_context *psp) 830 830 { 831 - switch (psp->adev->ip_versions[MP0_HWIP][0]) { 831 + switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) { 832 832 case IP_VERSION(11, 0, 9): 833 833 case IP_VERSION(11, 0, 7): 834 834 case IP_VERSION(13, 0, 2): ··· 1215 1215 struct amdgpu_device *adev = psp->adev; 1216 1216 1217 1217 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ 1218 - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || 1219 - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && 1218 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 1219 + (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 1220 1220 adev->gmc.xgmi.connected_to_cpu)) 1221 1221 return 0; 1222 1222 ··· 1313 1313 1314 1314 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) 1315 1315 { 1316 - return (psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && 1316 + return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1317 + IP_VERSION(13, 0, 2) && 1317 1318 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) || 1318 - psp->adev->ip_versions[MP0_HWIP][0] >= IP_VERSION(13, 0, 6); 1319 + amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= 1320 + IP_VERSION(13, 0, 6); 1319 1321 } 1320 1322 1321 1323 /* ··· 1426 1424 if (psp_xgmi_peer_link_info_supported(psp)) { 1427 1425 struct ta_xgmi_cmd_get_peer_link_info_output *link_info_output; 1428 1426 bool requires_reflection = 1429 - (psp->xgmi_context.supports_extended_data && get_extended_data) || 1430 - psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6); 1427 + (psp->xgmi_context.supports_extended_data && 1428 + get_extended_data) || 1429 + amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == 1430 + IP_VERSION(13, 0, 6); 1431 1431 1432 1432 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS; 1433 1433 ··· 2521 2517 if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) 2522 2518 return 0; 2523 2519 2524 - if ((amdgpu_in_reset(adev) && 2525 - ras && adev->ras_enabled && 2526 - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || 2527 - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { 2520 + if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && 2521 + (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) || 2522 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) { 2528 2523 ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); 2529 2524 if (ret) 2530 2525 DRM_WARN("Failed to set MP1 state prepare for reload\n"); ··· 2606 2603 continue; 2607 2604 2608 2605 if (psp->autoload_supported && 2609 - (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) || 2610 - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) || 2611 - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) && 2606 + (amdgpu_ip_version(adev, MP0_HWIP, 0) == 2607 + IP_VERSION(11, 0, 7) || 2608 + amdgpu_ip_version(adev, MP0_HWIP, 0) == 2609 + IP_VERSION(11, 0, 11) || 2610 + amdgpu_ip_version(adev, MP0_HWIP, 0) == 2611 + IP_VERSION(11, 0, 12)) && 2612 2612 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || 2613 2613 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || 2614 2614 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) ··· 3152 3146 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); 3153 3147 3154 3148 if (adev->gmc.xgmi.connected_to_cpu || 3155 - (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) { 3149 + (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) { 3156 3150 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); 3157 3151 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); 3158 3152
+22 -16
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 201 201 return -EINVAL; 202 202 203 203 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ 204 - if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 205 - obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 204 + if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 205 + amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 206 206 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 207 207 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 208 208 } ··· 611 611 if (amdgpu_ras_query_error_status(obj->adev, &info)) 612 612 return -EINVAL; 613 613 614 - if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 615 - obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 614 + if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 615 + amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 616 616 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 617 617 dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); 618 618 } ··· 1208 1208 1209 1209 /* some hardware/IP supports read to clear 1210 1210 * no need to explictly reset the err status after the query call */ 1211 - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1212 - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { 1211 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) && 1212 + amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { 1213 1213 if (amdgpu_ras_reset_error_status(adev, query_info->head.block)) 1214 1214 dev_warn(adev->dev, 1215 1215 "Failed to reset error counter and error status\n"); ··· 1905 1905 * should be removed until smu fix handle ecc_info table. 1906 1906 */ 1907 1907 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && 1908 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2))) 1908 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == 1909 + IP_VERSION(13, 0, 2))) 1909 1910 continue; 1910 1911 1911 1912 amdgpu_ras_query_error_status(adev, &info); 1912 1913 1913 - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && 1914 - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) && 1915 - adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) { 1914 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != 1915 + IP_VERSION(11, 0, 2) && 1916 + amdgpu_ip_version(adev, MP0_HWIP, 0) != 1917 + IP_VERSION(11, 0, 4) && 1918 + amdgpu_ip_version(adev, MP0_HWIP, 0) != 1919 + IP_VERSION(13, 0, 0)) { 1916 1920 if (amdgpu_ras_reset_error_status(adev, info.head.block)) 1917 1921 dev_warn(adev->dev, "Failed to reset error counter and error status"); 1918 1922 } ··· 2404 2400 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) 2405 2401 { 2406 2402 if (amdgpu_sriov_vf(adev)) { 2407 - switch (adev->ip_versions[MP0_HWIP][0]) { 2403 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2408 2404 case IP_VERSION(13, 0, 2): 2409 2405 case IP_VERSION(13, 0, 6): 2410 2406 return true; ··· 2414 2410 } 2415 2411 2416 2412 if (adev->asic_type == CHIP_IP_DISCOVERY) { 2417 - switch (adev->ip_versions[MP0_HWIP][0]) { 2413 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 2418 2414 case IP_VERSION(13, 0, 0): 2419 2415 case IP_VERSION(13, 0, 6): 2420 2416 case IP_VERSION(13, 0, 10): ··· 2488 2484 /* VCN/JPEG RAS can be supported on both bare metal and 2489 2485 * SRIOV environment 2490 2486 */ 2491 - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) || 2492 - adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0)) 2487 + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == 2488 + IP_VERSION(2, 6, 0) || 2489 + amdgpu_ip_version(adev, VCN_HWIP, 0) == 2490 + IP_VERSION(4, 0, 0)) 2493 2491 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | 2494 2492 1 << AMDGPU_RAS_BLOCK__JPEG); 2495 2493 else ··· 2525 2519 * Disable ras feature for aqua vanjaram 2526 2520 * by default on apu platform. 2527 2521 */ 2528 - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6) && 2522 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) && 2529 2523 adev->gmc.is_app_apu) 2530 2524 adev->ras_enabled = amdgpu_ras_enable != 1 ? 0 : 2531 2525 adev->ras_hw_enabled & amdgpu_ras_mask; ··· 2640 2634 /* initialize nbio ras function ahead of any other 2641 2635 * ras functions so hardware fatal error interrupt 2642 2636 * can be enabled as early as possible */ 2643 - switch (adev->ip_versions[NBIO_HWIP][0]) { 2637 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 2644 2638 case IP_VERSION(7, 4, 0): 2645 2639 case IP_VERSION(7, 4, 1): 2646 2640 case IP_VERSION(7, 4, 4):
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 153 153 154 154 static bool __is_ras_eeprom_supported(struct amdgpu_device *adev) 155 155 { 156 - switch (adev->ip_versions[MP1_HWIP][0]) { 156 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 157 157 case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */ 158 158 case IP_VERSION(11, 0, 7): /* Sienna cichlid */ 159 159 case IP_VERSION(13, 0, 0): ··· 191 191 return true; 192 192 } 193 193 194 - switch (adev->ip_versions[MP1_HWIP][0]) { 194 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 195 195 case IP_VERSION(11, 0, 2): 196 196 /* VEGA20 and ARCTURUS */ 197 197 if (adev->asic_type == CHIP_VEGA20)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
··· 30 30 { 31 31 int ret = 0; 32 32 33 - switch (adev->ip_versions[MP1_HWIP][0]) { 33 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 34 34 case IP_VERSION(13, 0, 2): 35 35 case IP_VERSION(13, 0, 6): 36 36 ret = aldebaran_reset_init(adev); ··· 52 52 { 53 53 int ret = 0; 54 54 55 - switch (adev->ip_versions[MP1_HWIP][0]) { 55 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 56 56 case IP_VERSION(13, 0, 2): 57 57 case IP_VERSION(13, 0, 6): 58 58 ret = aldebaran_reset_fini(adev);
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
··· 251 251 else { 252 252 /* Use a single copy per SDMA firmware type. PSP uses the same instance for all 253 253 * groups of SDMAs */ 254 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2) && 255 - adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && 254 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 255 + 0) == 256 + IP_VERSION(4, 4, 2) && 257 + adev->firmware.load_type == 258 + AMDGPU_FW_LOAD_PSP && 256 259 adev->sdma.num_inst_per_aid == i) { 257 260 break; 258 261 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1727 1727 reserve_size = 1728 1728 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev); 1729 1729 1730 - if (!adev->bios && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 1730 + if (!adev->bios && 1731 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 1731 1732 reserve_size = max(reserve_size, (uint32_t)280 << 20); 1732 1733 else if (!reserve_size) 1733 1734 reserve_size = DISCOVERY_TMR_OFFSET;
+8 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
··· 1119 1119 static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type) 1120 1120 { 1121 1121 if (block_type == MP0_HWIP) { 1122 - switch (adev->ip_versions[MP0_HWIP][0]) { 1122 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 1123 1123 case IP_VERSION(9, 0, 0): 1124 1124 switch (adev->asic_type) { 1125 1125 case CHIP_VEGA10: ··· 1170 1170 return "yellow_carp"; 1171 1171 } 1172 1172 } else if (block_type == MP1_HWIP) { 1173 - switch (adev->ip_versions[MP1_HWIP][0]) { 1173 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1174 1174 case IP_VERSION(9, 0, 0): 1175 1175 case IP_VERSION(10, 0, 0): 1176 1176 case IP_VERSION(10, 0, 1): ··· 1196 1196 return "aldebaran_smc"; 1197 1197 } 1198 1198 } else if (block_type == SDMA0_HWIP) { 1199 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1199 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1200 1200 case IP_VERSION(4, 0, 0): 1201 1201 return "vega10_sdma"; 1202 1202 case IP_VERSION(4, 0, 1): ··· 1240 1240 return "vangogh_sdma"; 1241 1241 } 1242 1242 } else if (block_type == UVD_HWIP) { 1243 - switch (adev->ip_versions[UVD_HWIP][0]) { 1243 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 1244 1244 case IP_VERSION(1, 0, 0): 1245 1245 case IP_VERSION(1, 0, 1): 1246 1246 if (adev->apu_flags & AMD_APU_IS_RAVEN2) ··· 1265 1265 case IP_VERSION(3, 0, 0): 1266 1266 case IP_VERSION(3, 0, 64): 1267 1267 case IP_VERSION(3, 0, 192): 1268 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) 1268 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == 1269 + IP_VERSION(10, 3, 0)) 1269 1270 return "sienna_cichlid_vcn"; 1270 1271 return "navy_flounder_vcn"; 1271 1272 case IP_VERSION(3, 0, 2): ··· 1279 1278 return "yellow_carp_vcn"; 1280 1279 } 1281 1280 } else if (block_type == GC_HWIP) { 1282 - switch (adev->ip_versions[GC_HWIP][0]) { 1281 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1283 1282 case IP_VERSION(9, 0, 1): 1284 1283 return "vega10"; 1285 1284 case IP_VERSION(9, 2, 1): ··· 1332 1331 int maj, min, rev; 1333 1332 char *ip_name; 1334 1333 const char *legacy; 1335 - uint32_t version = adev->ip_versions[block_type][0]; 1334 + uint32_t version = amdgpu_ip_version(adev, block_type, 0); 1336 1335 1337 1336 legacy = amdgpu_ucode_legacy_naming(adev, block_type); 1338 1337 if (legacy) {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
··· 28 28 struct ras_err_data *err_data, uint64_t err_addr, 29 29 uint32_t ch_inst, uint32_t umc_inst) 30 30 { 31 - switch (adev->ip_versions[UMC_HWIP][0]) { 31 + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 32 32 case IP_VERSION(6, 7, 0): 33 33 umc_v6_7_convert_error_address(adev, 34 34 err_data, err_addr, ch_inst, umc_inst);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
··· 580 580 const char *fw_name = NULL; 581 581 int r; 582 582 583 - switch (adev->ip_versions[VCN_HWIP][0]) { 583 + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 584 584 case IP_VERSION(4, 0, 5): 585 585 fw_name = "amdgpu/umsch_mm_4_0_0.bin"; 586 586 break; ··· 757 757 { 758 758 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 759 759 760 - switch (adev->ip_versions[VCN_HWIP][0]) { 760 + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 761 761 case IP_VERSION(4, 0, 5): 762 762 umsch_mm_v4_0_set_funcs(&adev->umsch_mm); 763 763 break;
+6 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 126 126 * Hence, check for these versions here - notice this is 127 127 * restricted to Vangogh (Deck's APU). 128 128 */ 129 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) { 129 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) { 130 130 const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); 131 131 132 132 if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) || ··· 171 171 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 172 172 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 173 173 174 - if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) { 174 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { 175 175 fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); 176 176 log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); 177 177 } else { ··· 267 267 struct amdgpu_device *adev = ring->adev; 268 268 bool ret = false; 269 269 270 - if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) 270 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) 271 271 ret = true; 272 272 273 273 return ret; ··· 998 998 struct amdgpu_device *adev = ring->adev; 999 999 long r; 1000 1000 1001 - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(4, 0, 3)) { 1001 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) { 1002 1002 r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); 1003 1003 if (r) 1004 1004 goto error; ··· 1048 1048 adev->firmware.fw_size += 1049 1049 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); 1050 1050 1051 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(4, 0, 3)) 1051 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == 1052 + IP_VERSION(4, 0, 3)) 1052 1053 break; 1053 1054 } 1054 1055 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 837 837 838 838 void amdgpu_virt_post_reset(struct amdgpu_device *adev) 839 839 { 840 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) { 840 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) { 841 841 /* force set to GFXOFF state after reset, 842 842 * to avoid some invalid operation before GC enable 843 843 */ ··· 847 847 848 848 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id) 849 849 { 850 - switch (adev->ip_versions[MP0_HWIP][0]) { 850 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 851 851 case IP_VERSION(13, 0, 0): 852 852 /* no vf autoload, white list */ 853 853 if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 885 885 * heavy-weight flush TLB unconditionally. 886 886 */ 887 887 flush_tlb |= adev->gmc.xgmi.num_physical_nodes && 888 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); 888 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0); 889 889 890 890 /* 891 891 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB 892 892 */ 893 - flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); 893 + flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0); 894 894 895 895 memset(&params, 0, sizeof(params)); 896 896 params.adev = adev;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
··· 123 123 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 124 124 struct amdgpu_vpe *vpe = &adev->vpe; 125 125 126 - switch (adev->ip_versions[VPE_HWIP][0]) { 126 + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { 127 127 case IP_VERSION(6, 1, 0): 128 128 vpe_v6_1_set_funcs(vpe); 129 129 break;
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 948 948 uint32_t field_array_size = 0; 949 949 950 950 if (is_xgmi_pcs) { 951 - if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { 951 + if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == 952 + IP_VERSION(6, 1, 0)) { 952 953 pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0]; 953 954 field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields); 954 955 } else {
+1 -1
drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
··· 68 68 if (amdgpu_sriov_vf(adev)) 69 69 return 0; 70 70 71 - switch (adev->ip_versions[ATHUB_HWIP][0]) { 71 + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { 72 72 case IP_VERSION(9, 0, 0): 73 73 case IP_VERSION(9, 1, 0): 74 74 case IP_VERSION(9, 2, 0):
+1 -1
drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
··· 77 77 if (amdgpu_sriov_vf(adev)) 78 78 return 0; 79 79 80 - switch (adev->ip_versions[ATHUB_HWIP][0]) { 80 + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { 81 81 case IP_VERSION(1, 3, 1): 82 82 case IP_VERSION(2, 0, 0): 83 83 case IP_VERSION(2, 0, 2):
+1 -1
drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
··· 70 70 if (amdgpu_sriov_vf(adev)) 71 71 return 0; 72 72 73 - switch (adev->ip_versions[ATHUB_HWIP][0]) { 73 + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { 74 74 case IP_VERSION(2, 1, 0): 75 75 case IP_VERSION(2, 1, 1): 76 76 case IP_VERSION(2, 1, 2):
+3 -3
drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
··· 36 36 { 37 37 uint32_t data; 38 38 39 - switch (adev->ip_versions[ATHUB_HWIP][0]) { 39 + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { 40 40 case IP_VERSION(3, 0, 1): 41 41 data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1); 42 42 break; ··· 49 49 50 50 static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data) 51 51 { 52 - switch (adev->ip_versions[ATHUB_HWIP][0]) { 52 + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { 53 53 case IP_VERSION(3, 0, 1): 54 54 WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data); 55 55 break; ··· 99 99 if (amdgpu_sriov_vf(adev)) 100 100 return 0; 101 101 102 - switch (adev->ip_versions[ATHUB_HWIP][0]) { 102 + switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) { 103 103 case IP_VERSION(3, 0, 0): 104 104 case IP_VERSION(3, 0, 1): 105 105 case IP_VERSION(3, 0, 2):
+54 -44
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 3627 3627 3628 3628 static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) 3629 3629 { 3630 - switch (adev->ip_versions[GC_HWIP][0]) { 3630 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3631 3631 case IP_VERSION(10, 1, 10): 3632 3632 soc15_program_register_sequence(adev, 3633 3633 golden_settings_gc_rlc_spm_10_0_nv10, ··· 3650 3650 3651 3651 static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) 3652 3652 { 3653 - switch (adev->ip_versions[GC_HWIP][0]) { 3653 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3654 3654 case IP_VERSION(10, 1, 10): 3655 3655 soc15_program_register_sequence(adev, 3656 3656 golden_settings_gc_10_1, ··· 3891 3891 { 3892 3892 adev->gfx.cp_fw_write_wait = false; 3893 3893 3894 - switch (adev->ip_versions[GC_HWIP][0]) { 3894 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3895 3895 case IP_VERSION(10, 1, 10): 3896 3896 case IP_VERSION(10, 1, 2): 3897 3897 case IP_VERSION(10, 1, 1): ··· 3942 3942 3943 3943 static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) 3944 3944 { 3945 - switch (adev->ip_versions[GC_HWIP][0]) { 3945 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3946 3946 case IP_VERSION(10, 1, 10): 3947 3947 if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) 3948 3948 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; ··· 3964 3964 3965 3965 DRM_DEBUG("\n"); 3966 3966 3967 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) && 3968 - (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00))) 3967 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 1) && 3968 + (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00))) 3969 3969 wks = "_wks"; 3970 3970 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); 3971 3971 ··· 4144 4144 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3); 4145 4145 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL); 4146 4146 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX); 4147 - switch (adev->ip_versions[GC_HWIP][0]) { 4147 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4148 4148 case IP_VERSION(10, 3, 0): 4149 4149 reg_access_ctrl->spare_int = 4150 4150 SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); ··· 4358 4358 { 4359 4359 u32 gb_addr_config; 4360 4360 4361 - switch (adev->ip_versions[GC_HWIP][0]) { 4361 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4362 4362 case IP_VERSION(10, 1, 10): 4363 4363 case IP_VERSION(10, 1, 1): 4364 4364 case IP_VERSION(10, 1, 2): ··· 4491 4491 struct amdgpu_kiq *kiq; 4492 4492 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4493 4493 4494 - switch (adev->ip_versions[GC_HWIP][0]) { 4494 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4495 4495 case IP_VERSION(10, 1, 10): 4496 4496 case IP_VERSION(10, 1, 1): 4497 4497 case IP_VERSION(10, 1, 2): ··· 4749 4749 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 4750 4750 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 4751 4751 bitmap = i * adev->gfx.config.max_sh_per_se + j; 4752 - if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || 4753 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) || 4754 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) && 4752 + if (((amdgpu_ip_version(adev, GC_HWIP, 0) == 4753 + IP_VERSION(10, 3, 0)) || 4754 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 4755 + IP_VERSION(10, 3, 3)) || 4756 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 4757 + IP_VERSION(10, 3, 6))) && 4755 4758 ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) 4756 4759 continue; 4757 4760 gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0); ··· 4782 4779 /* for ASICs that integrates GFX v10.3 4783 4780 * pa_sc_tile_steering_override should be set to 0 4784 4781 */ 4785 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 4782 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 4786 4783 return 0; 4787 4784 4788 4785 /* init num_sc */ ··· 4963 4960 /* TCCs are global (not instanced). */ 4964 4961 uint32_t tcc_disable; 4965 4962 4966 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { 4963 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) { 4967 4964 tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) | 4968 4965 RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3); 4969 4966 } else { ··· 5040 5037 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 5041 5038 5042 5039 /* csib */ 5043 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { 5040 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) { 5044 5041 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, 5045 5042 adev->gfx.rlc.clear_state_gpu_addr >> 32); 5046 5043 WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, ··· 5669 5666 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); 5670 5667 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); 5671 5668 5672 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) 5669 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) 5673 5670 WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); 5674 5671 else 5675 5672 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); ··· 6060 6057 } 6061 6058 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); 6062 6059 } 6063 - switch (adev->ip_versions[GC_HWIP][0]) { 6060 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 6064 6061 case IP_VERSION(10, 3, 0): 6065 6062 case IP_VERSION(10, 3, 2): 6066 6063 case IP_VERSION(10, 3, 1): ··· 6193 6190 static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 6194 6191 { 6195 6192 if (enable) { 6196 - switch (adev->ip_versions[GC_HWIP][0]) { 6193 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 6197 6194 case IP_VERSION(10, 3, 0): 6198 6195 case IP_VERSION(10, 3, 2): 6199 6196 case IP_VERSION(10, 3, 1): ··· 6209 6206 break; 6210 6207 } 6211 6208 } else { 6212 - switch (adev->ip_versions[GC_HWIP][0]) { 6209 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 6213 6210 case IP_VERSION(10, 3, 0): 6214 6211 case IP_VERSION(10, 3, 2): 6215 6212 case IP_VERSION(10, 3, 1): ··· 6309 6306 struct amdgpu_device *adev = ring->adev; 6310 6307 6311 6308 /* tell RLC which is KIQ queue */ 6312 - switch (adev->ip_versions[GC_HWIP][0]) { 6309 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 6313 6310 case IP_VERSION(10, 3, 0): 6314 6311 case IP_VERSION(10, 3, 2): 6315 6312 case IP_VERSION(10, 3, 1): ··· 6920 6917 * check if mmVGT_ESGS_RING_SIZE_UMD 6921 6918 * has been remapped to mmVGT_ESGS_RING_SIZE 6922 6919 */ 6923 - switch (adev->ip_versions[GC_HWIP][0]) { 6920 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 6924 6921 case IP_VERSION(10, 3, 0): 6925 6922 case IP_VERSION(10, 3, 2): 6926 6923 case IP_VERSION(10, 3, 4): ··· 6969 6966 */ 6970 6967 WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); 6971 6968 6972 - switch (adev->ip_versions[GC_HWIP][0]) { 6969 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 6973 6970 case IP_VERSION(10, 3, 0): 6974 6971 case IP_VERSION(10, 3, 2): 6975 6972 case IP_VERSION(10, 3, 1): ··· 7142 7139 * init golden registers and rlc resume may override some registers, 7143 7140 * reconfig them here 7144 7141 */ 7145 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) || 7146 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) || 7147 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) 7142 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 10) || 7143 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 1) || 7144 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) 7148 7145 gfx_v10_0_tcp_harvest(adev); 7149 7146 7150 7147 r = gfx_v10_0_cp_resume(adev); 7151 7148 if (r) 7152 7149 return r; 7153 7150 7154 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) 7151 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 7155 7152 gfx_v10_3_program_pbb_mode(adev); 7156 7153 7157 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 7154 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 7158 7155 gfx_v10_3_set_power_brake_sequence(adev); 7159 7156 7160 7157 return r; ··· 7258 7255 7259 7256 /* GRBM_STATUS2 */ 7260 7257 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); 7261 - switch (adev->ip_versions[GC_HWIP][0]) { 7258 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7262 7259 case IP_VERSION(10, 3, 0): 7263 7260 case IP_VERSION(10, 3, 2): 7264 7261 case IP_VERSION(10, 3, 1): ··· 7315 7312 { 7316 7313 uint64_t clock, clock_lo, clock_hi, hi_check; 7317 7314 7318 - switch (adev->ip_versions[GC_HWIP][0]) { 7315 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7319 7316 case IP_VERSION(10, 3, 1): 7320 7317 case IP_VERSION(10, 3, 3): 7321 7318 case IP_VERSION(10, 3, 7): ··· 7402 7399 7403 7400 adev->gfx.funcs = &gfx_v10_0_gfx_funcs; 7404 7401 7405 - switch (adev->ip_versions[GC_HWIP][0]) { 7402 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7406 7403 case IP_VERSION(10, 1, 10): 7407 7404 case IP_VERSION(10, 1, 1): 7408 7405 case IP_VERSION(10, 1, 2): ··· 7473 7470 data = RLC_SAFE_MODE__CMD_MASK; 7474 7471 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); 7475 7472 7476 - switch (adev->ip_versions[GC_HWIP][0]) { 7473 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7477 7474 case IP_VERSION(10, 3, 0): 7478 7475 case IP_VERSION(10, 3, 2): 7479 7476 case IP_VERSION(10, 3, 1): ··· 7511 7508 uint32_t data; 7512 7509 7513 7510 data = RLC_SAFE_MODE__CMD_MASK; 7514 - switch (adev->ip_versions[GC_HWIP][0]) { 7511 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7515 7512 case IP_VERSION(10, 3, 0): 7516 7513 case IP_VERSION(10, 3, 2): 7517 7514 case IP_VERSION(10, 3, 1): ··· 7822 7819 mmCGTS_SA1_QUAD1_SM_CTRL_REG 7823 7820 }; 7824 7821 7825 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { 7822 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 2)) { 7826 7823 for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) { 7827 7824 reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] + 7828 7825 tcp_ctrl_regs_nv12[i]; ··· 7867 7864 /* === CGCG + CGLS === */ 7868 7865 gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); 7869 7866 7870 - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) || 7871 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) || 7872 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))) 7867 + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == 7868 + IP_VERSION(10, 1, 10)) || 7869 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 7870 + IP_VERSION(10, 1, 1)) || 7871 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 7872 + IP_VERSION(10, 1, 2))) 7873 7873 gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev); 7874 7874 } else { 7875 7875 /* CGCG/CGLS should be disabled before MGCG/MGLS ··· 7972 7966 * Power/performance team will optimize it and might give a new value later. 7973 7967 */ 7974 7968 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 7975 - switch (adev->ip_versions[GC_HWIP][0]) { 7969 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7976 7970 case IP_VERSION(10, 3, 1): 7977 7971 case IP_VERSION(10, 3, 3): 7978 7972 case IP_VERSION(10, 3, 6): ··· 8033 8027 if (amdgpu_sriov_vf(adev)) 8034 8028 return 0; 8035 8029 8036 - switch (adev->ip_versions[GC_HWIP][0]) { 8030 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 8037 8031 case IP_VERSION(10, 1, 10): 8038 8032 case IP_VERSION(10, 1, 1): 8039 8033 case IP_VERSION(10, 1, 2): ··· 8070 8064 if (amdgpu_sriov_vf(adev)) 8071 8065 return 0; 8072 8066 8073 - switch (adev->ip_versions[GC_HWIP][0]) { 8067 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 8074 8068 case IP_VERSION(10, 1, 10): 8075 8069 case IP_VERSION(10, 1, 1): 8076 8070 case IP_VERSION(10, 1, 2): ··· 9317 9311 9318 9312 static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) 9319 9313 { 9320 - switch (adev->ip_versions[GC_HWIP][0]) { 9314 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 9321 9315 case IP_VERSION(10, 1, 10): 9322 9316 case IP_VERSION(10, 1, 1): 9323 9317 case IP_VERSION(10, 1, 3): ··· 9434 9428 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 9435 9429 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 9436 9430 bitmap = i * adev->gfx.config.max_sh_per_se + j; 9437 - if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || 9438 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) || 9439 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6)) || 9440 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 7))) && 9431 + if (((amdgpu_ip_version(adev, GC_HWIP, 0) == 9432 + IP_VERSION(10, 3, 0)) || 9433 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 9434 + IP_VERSION(10, 3, 3)) || 9435 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 9436 + IP_VERSION(10, 3, 6)) || 9437 + (amdgpu_ip_version(adev, GC_HWIP, 0) == 9438 + IP_VERSION(10, 3, 7))) && 9441 9439 ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) 9442 9440 continue; 9443 9441 mask = 1;
+14 -13
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 288 288 289 289 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev) 290 290 { 291 - switch (adev->ip_versions[GC_HWIP][0]) { 291 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 292 292 case IP_VERSION(11, 0, 1): 293 293 case IP_VERSION(11, 0, 4): 294 294 soc15_program_register_sequence(adev, ··· 493 493 494 494 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) 495 495 { 496 - switch (adev->ip_versions[GC_HWIP][0]) { 496 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 497 497 case IP_VERSION(11, 0, 0): 498 498 case IP_VERSION(11, 0, 2): 499 499 case IP_VERSION(11, 0, 3): ··· 884 884 885 885 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev) 886 886 { 887 - 888 - switch (adev->ip_versions[GC_HWIP][0]) { 887 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 889 888 case IP_VERSION(11, 0, 0): 890 889 case IP_VERSION(11, 0, 2): 891 890 adev->gfx.config.max_hw_contexts = 8; ··· 1331 1332 1332 1333 adev->gfxhub.funcs->init(adev); 1333 1334 1334 - switch (adev->ip_versions[GC_HWIP][0]) { 1335 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1335 1336 case IP_VERSION(11, 0, 0): 1336 1337 case IP_VERSION(11, 0, 2): 1337 1338 case IP_VERSION(11, 0, 3): ··· 1363 1364 } 1364 1365 1365 1366 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ 1366 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) && 1367 - amdgpu_sriov_is_pp_one_vf(adev)) 1367 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3) && 1368 + amdgpu_sriov_is_pp_one_vf(adev)) 1368 1369 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; 1369 1370 1370 1371 /* EOP Event */ ··· 2591 2592 for (i = 0; i < adev->usec_timeout; i++) { 2592 2593 cp_status = RREG32_SOC15(GC, 0, regCP_STAT); 2593 2594 2594 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) || 2595 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4) || 2596 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 5, 0)) 2595 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == 2596 + IP_VERSION(11, 0, 1) || 2597 + amdgpu_ip_version(adev, GC_HWIP, 0) == 2598 + IP_VERSION(11, 0, 4) || 2599 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 5, 0)) 2597 2600 bootload_status = RREG32_SOC15(GC, 0, 2598 2601 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1); 2599 2602 else ··· 5026 5025 5027 5026 // Program RLC_PG_DELAY3 for CGPG hysteresis 5028 5027 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 5029 - switch (adev->ip_versions[GC_HWIP][0]) { 5028 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5030 5029 case IP_VERSION(11, 0, 1): 5031 5030 case IP_VERSION(11, 0, 4): 5032 5031 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1); ··· 5055 5054 if (amdgpu_sriov_vf(adev)) 5056 5055 return 0; 5057 5056 5058 - switch (adev->ip_versions[GC_HWIP][0]) { 5057 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5059 5058 case IP_VERSION(11, 0, 0): 5060 5059 case IP_VERSION(11, 0, 2): 5061 5060 case IP_VERSION(11, 0, 3): ··· 5087 5086 if (amdgpu_sriov_vf(adev)) 5088 5087 return 0; 5089 5088 5090 - switch (adev->ip_versions[GC_HWIP][0]) { 5089 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5091 5090 case IP_VERSION(11, 0, 0): 5092 5091 case IP_VERSION(11, 0, 1): 5093 5092 case IP_VERSION(11, 0, 2):
+43 -40
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 895 895 896 896 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) 897 897 { 898 - switch (adev->ip_versions[GC_HWIP][0]) { 898 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 899 899 case IP_VERSION(9, 0, 1): 900 900 soc15_program_register_sequence(adev, 901 901 golden_settings_gc_9_0, ··· 951 951 break; 952 952 } 953 953 954 - if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && 955 - (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))) 954 + if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && 955 + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))) 956 956 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, 957 957 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); 958 958 } ··· 1095 1095 adev->gfx.me_fw_write_wait = false; 1096 1096 adev->gfx.mec_fw_write_wait = false; 1097 1097 1098 - if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && 1098 + if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && 1099 1099 ((adev->gfx.mec_fw_version < 0x000001a5) || 1100 - (adev->gfx.mec_feature_version < 46) || 1101 - (adev->gfx.pfp_fw_version < 0x000000b7) || 1102 - (adev->gfx.pfp_feature_version < 46))) 1100 + (adev->gfx.mec_feature_version < 46) || 1101 + (adev->gfx.pfp_fw_version < 0x000000b7) || 1102 + (adev->gfx.pfp_feature_version < 46))) 1103 1103 DRM_WARN_ONCE("CP firmware version too old, please update!"); 1104 1104 1105 - switch (adev->ip_versions[GC_HWIP][0]) { 1105 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1106 1106 case IP_VERSION(9, 0, 1): 1107 1107 if ((adev->gfx.me_fw_version >= 0x0000009c) && 1108 1108 (adev->gfx.me_feature_version >= 42) && ··· 1202 1202 1203 1203 static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev) 1204 1204 { 1205 - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) && 1205 + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) && 1206 1206 (adev->gfx.me_fw_version >= 0x000000a5) && 1207 1207 (adev->gfx.me_feature_version >= 52)) 1208 1208 return true; ··· 1215 1215 if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) 1216 1216 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1217 1217 1218 - switch (adev->ip_versions[GC_HWIP][0]) { 1218 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1219 1219 case IP_VERSION(9, 0, 1): 1220 1220 case IP_VERSION(9, 2, 1): 1221 1221 case IP_VERSION(9, 4, 0): ··· 1326 1326 1327 1327 static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) 1328 1328 { 1329 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 1330 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || 1331 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) 1329 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || 1330 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || 1331 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) 1332 1332 return false; 1333 1333 1334 1334 return true; ··· 1485 1485 1486 1486 if (adev->flags & AMD_IS_APU) 1487 1487 always_on_cu_num = 4; 1488 - else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1)) 1488 + else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1)) 1489 1489 always_on_cu_num = 8; 1490 1490 else 1491 1491 always_on_cu_num = 12; ··· 1836 1836 u32 gb_addr_config; 1837 1837 int err; 1838 1838 1839 - switch (adev->ip_versions[GC_HWIP][0]) { 1839 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1840 1840 case IP_VERSION(9, 0, 1): 1841 1841 adev->gfx.config.max_hw_contexts = 8; 1842 1842 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; ··· 2002 2002 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2003 2003 unsigned int hw_prio; 2004 2004 2005 - switch (adev->ip_versions[GC_HWIP][0]) { 2005 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2006 2006 case IP_VERSION(9, 0, 1): 2007 2007 case IP_VERSION(9, 2, 1): 2008 2008 case IP_VERSION(9, 4, 0): ··· 2363 2363 { 2364 2364 uint32_t tmp; 2365 2365 2366 - switch (adev->ip_versions[GC_HWIP][0]) { 2366 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2367 2367 case IP_VERSION(9, 4, 1): 2368 2368 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); 2369 2369 tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT, ··· 2700 2700 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ 2701 2701 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); 2702 2702 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); 2703 - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0)) 2703 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0)) 2704 2704 pwr_10_0_gfxip_control_over_cgpg(adev, true); 2705 2705 } 2706 2706 } ··· 2812 2812 * And it's needed by gfxoff feature. 2813 2813 */ 2814 2814 if (adev->gfx.rlc.is_rlc_v2_1) { 2815 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) || 2815 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == 2816 + IP_VERSION(9, 2, 1) || 2816 2817 (adev->apu_flags & AMD_APU_IS_RAVEN2)) 2817 2818 gfx_v9_1_init_rlc_save_restore_list(adev); 2818 2819 gfx_v9_0_enable_save_restore_machine(adev); ··· 2926 2925 return r; 2927 2926 } 2928 2927 2929 - switch (adev->ip_versions[GC_HWIP][0]) { 2928 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2930 2929 case IP_VERSION(9, 2, 2): 2931 2930 case IP_VERSION(9, 1, 0): 2932 2931 gfx_v9_0_init_lbpw(adev); ··· 3714 3713 { 3715 3714 u32 tmp; 3716 3715 3717 - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) && 3718 - adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)) 3716 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) && 3717 + amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) 3719 3718 return; 3720 3719 3721 3720 tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); ··· 3755 3754 if (r) 3756 3755 return r; 3757 3756 3758 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 3757 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 3759 3758 gfx_v9_4_2_set_power_brake_sequence(adev); 3760 3759 3761 3760 return r; ··· 3803 3802 3804 3803 /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ 3805 3804 if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || 3806 - (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { 3805 + (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) { 3807 3806 dev_dbg(adev->dev, "Skipping RLC halt\n"); 3808 3807 return 0; 3809 3808 } ··· 3987 3986 { 3988 3987 uint64_t clock, clock_lo, clock_hi, hi_check; 3989 3988 3990 - switch (adev->ip_versions[GC_HWIP][0]) { 3989 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 3991 3990 case IP_VERSION(9, 3, 0): 3992 3991 preempt_disable(); 3993 3992 clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); ··· 4006 4005 default: 4007 4006 amdgpu_gfx_off_ctrl(adev, false); 4008 4007 mutex_lock(&adev->gfx.gpu_clock_mutex); 4009 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { 4008 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == 4009 + IP_VERSION(9, 0, 1) && 4010 + amdgpu_sriov_runtime(adev)) { 4010 4011 clock = gfx_v9_0_kiq_read_clock(adev); 4011 4012 } else { 4012 4013 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); ··· 4360 4357 if (!ring->sched.ready) 4361 4358 return 0; 4362 4359 4363 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { 4360 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) { 4364 4361 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; 4365 4362 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); 4366 4363 vgpr_init_regs_ptr = vgpr_init_regs_arcturus; ··· 4512 4509 4513 4510 adev->gfx.funcs = &gfx_v9_0_gfx_funcs; 4514 4511 4515 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || 4516 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 4512 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || 4513 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 4517 4514 adev->gfx.num_gfx_rings = 0; 4518 4515 else 4519 4516 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; ··· 4551 4548 } 4552 4549 4553 4550 /* requires IBs so do in late init after IB pool is initialized */ 4554 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 4551 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 4555 4552 r = gfx_v9_4_2_do_edc_gpr_workarounds(adev); 4556 4553 else 4557 4554 r = gfx_v9_0_do_edc_gpr_workarounds(adev); ··· 4583 4580 if (r) 4584 4581 return r; 4585 4582 4586 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 4583 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 4587 4584 gfx_v9_4_2_debug_trap_config_init(adev, 4588 4585 adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID); 4589 4586 else ··· 4679 4676 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 4680 4677 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 4681 4678 4682 - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) 4679 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1)) 4683 4680 data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; 4684 4681 4685 4682 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | ··· 4713 4710 /* 1 - MGCG_OVERRIDE */ 4714 4711 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 4715 4712 4716 - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) 4713 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1)) 4717 4714 data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; 4718 4715 4719 4716 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | ··· 4819 4816 /* enable cgcg FSM(0x0000363F) */ 4820 4817 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 4821 4818 4822 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) 4819 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) 4823 4820 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | 4824 4821 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; 4825 4822 else ··· 4954 4951 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4955 4952 bool enable = (state == AMD_PG_STATE_GATE); 4956 4953 4957 - switch (adev->ip_versions[GC_HWIP][0]) { 4954 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4958 4955 case IP_VERSION(9, 2, 2): 4959 4956 case IP_VERSION(9, 1, 0): 4960 4957 case IP_VERSION(9, 3, 0): ··· 5001 4998 if (amdgpu_sriov_vf(adev)) 5002 4999 return 0; 5003 5000 5004 - switch (adev->ip_versions[GC_HWIP][0]) { 5001 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 5005 5002 case IP_VERSION(9, 0, 1): 5006 5003 case IP_VERSION(9, 2, 1): 5007 5004 case IP_VERSION(9, 4, 0): ··· 5051 5048 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 5052 5049 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 5053 5050 5054 - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) { 5051 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) { 5055 5052 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 5056 5053 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); 5057 5054 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) ··· 7090 7087 7091 7088 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) 7092 7089 { 7093 - switch (adev->ip_versions[GC_HWIP][0]) { 7090 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7094 7091 case IP_VERSION(9, 0, 1): 7095 7092 case IP_VERSION(9, 2, 1): 7096 7093 case IP_VERSION(9, 4, 0): ··· 7109 7106 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) 7110 7107 { 7111 7108 /* init asci gds info */ 7112 - switch (adev->ip_versions[GC_HWIP][0]) { 7109 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7113 7110 case IP_VERSION(9, 0, 1): 7114 7111 case IP_VERSION(9, 2, 1): 7115 7112 case IP_VERSION(9, 4, 0): ··· 7131 7128 break; 7132 7129 } 7133 7130 7134 - switch (adev->ip_versions[GC_HWIP][0]) { 7131 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 7135 7132 case IP_VERSION(9, 0, 1): 7136 7133 case IP_VERSION(9, 4, 0): 7137 7134 adev->gds.gds_compute_max_wave_id = 0x7ff;
+4 -4
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 682 682 adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; 683 683 adev->gfx.ras = &gfx_v9_4_3_ras; 684 684 685 - switch (adev->ip_versions[GC_HWIP][0]) { 685 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 686 686 case IP_VERSION(9, 4, 3): 687 687 adev->gfx.config.max_hw_contexts = 8; 688 688 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; ··· 2430 2430 return 0; 2431 2431 2432 2432 num_xcc = NUM_XCC(adev->gfx.xcc_mask); 2433 - switch (adev->ip_versions[GC_HWIP][0]) { 2433 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2434 2434 case IP_VERSION(9, 4, 3): 2435 2435 for (i = 0; i < num_xcc; i++) 2436 2436 gfx_v9_4_3_xcc_update_gfx_clock_gating( ··· 4231 4231 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) 4232 4232 { 4233 4233 /* init asci gds info */ 4234 - switch (adev->ip_versions[GC_HWIP][0]) { 4234 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4235 4235 case IP_VERSION(9, 4, 3): 4236 4236 /* 9.4.3 removed all the GDS internal memory, 4237 4237 * only support GWS opcode in kernel, like barrier ··· 4243 4243 break; 4244 4244 } 4245 4245 4246 - switch (adev->ip_versions[GC_HWIP][0]) { 4246 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 4247 4247 case IP_VERSION(9, 4, 3): 4248 4248 /* deprecated for 9.4.3, no usage at all */ 4249 4249 adev->gds.gds_compute_max_wave_id = 0;
+8 -5
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
··· 356 356 * the SQ per-process. 357 357 * Retry faults need to be enabled for that to work. 358 358 */ 359 - tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 360 - RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 361 - !adev->gmc.noretry || 362 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 363 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)); 359 + tmp = REG_SET_FIELD( 360 + tmp, VM_CONTEXT1_CNTL, 361 + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 362 + !adev->gmc.noretry || 363 + amdgpu_ip_version(adev, GC_HWIP, 0) == 364 + IP_VERSION(9, 4, 2) || 365 + amdgpu_ip_version(adev, GC_HWIP, 0) == 366 + IP_VERSION(9, 4, 3)); 364 367 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL, 365 368 i * hub->ctx_distance, tmp); 366 369 WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
+2 -2
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
··· 510 510 u32 max_num_physical_nodes = 0; 511 511 u32 max_physical_node_id = 0; 512 512 513 - switch (adev->ip_versions[XGMI_HWIP][0]) { 513 + switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) { 514 514 case IP_VERSION(4, 8, 0): 515 515 max_num_physical_nodes = 4; 516 516 max_physical_node_id = 3; ··· 548 548 adev->gfx.config.max_sh_per_se * 549 549 adev->gfx.config.max_shader_engines); 550 550 551 - switch (adev->ip_versions[GC_HWIP][0]) { 551 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 552 552 case IP_VERSION(10, 3, 1): 553 553 case IP_VERSION(10, 3, 3): 554 554 /* Get SA disabled bitmap from eFuse setting */
+15 -13
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 145 145 * the new fast GRBM interface. 146 146 */ 147 147 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && 148 - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) 148 + (amdgpu_ip_version(adev, GC_HWIP, 0) < 149 + IP_VERSION(10, 3, 0))) 149 150 RREG32(hub->vm_l2_pro_fault_status); 150 151 151 152 status = RREG32(hub->vm_l2_pro_fault_status); ··· 279 278 * to avoid a false ACK due to the new fast GRBM interface. 280 279 */ 281 280 if ((vmhub == AMDGPU_GFXHUB(0)) && 282 - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) 281 + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 3, 0))) 283 282 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + 284 283 hub->eng_distance * eng, hub_ip); 285 284 ··· 681 680 682 681 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) 683 682 { 684 - switch (adev->ip_versions[UMC_HWIP][0]) { 683 + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 685 684 case IP_VERSION(8, 7, 0): 686 685 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; 687 686 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; ··· 698 697 699 698 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) 700 699 { 701 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 700 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 702 701 case IP_VERSION(2, 3, 0): 703 702 case IP_VERSION(2, 4, 0): 704 703 case IP_VERSION(2, 4, 1): ··· 712 711 713 712 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) 714 713 { 715 - switch (adev->ip_versions[GC_HWIP][0]) { 714 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 716 715 case IP_VERSION(10, 3, 0): 717 716 case IP_VERSION(10, 3, 2): 718 717 case IP_VERSION(10, 3, 1): ··· 826 825 827 826 /* set the gart size */ 828 827 if (amdgpu_gart_size == -1) { 829 - switch (adev->ip_versions[GC_HWIP][0]) { 828 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 830 829 default: 831 830 adev->gmc.gart_size = 512ULL << 20; 832 831 break; ··· 893 892 adev->gmc.vram_vendor = vram_vendor; 894 893 } 895 894 896 - switch (adev->ip_versions[GC_HWIP][0]) { 895 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 897 896 case IP_VERSION(10, 3, 0): 898 897 adev->gmc.mall_size = 128 * 1024 * 1024; 899 898 break; ··· 911 910 break; 912 911 } 913 912 914 - switch (adev->ip_versions[GC_HWIP][0]) { 913 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 915 914 case IP_VERSION(10, 1, 10): 916 915 case IP_VERSION(10, 1, 1): 917 916 case IP_VERSION(10, 1, 2): ··· 1196 1195 * is a new problem observed at DF 3.0.3, however with the same suspend sequence not 1197 1196 * seen any issue on the DF 3.0.2 series platform. 1198 1197 */ 1199 - if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) { 1198 + if (adev->in_s0ix && 1199 + amdgpu_ip_version(adev, DF_HWIP, 0) > IP_VERSION(3, 0, 2)) { 1200 1200 dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); 1201 1201 return 0; 1202 1202 } ··· 1206 1204 if (r) 1207 1205 return r; 1208 1206 1209 - if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) 1207 + if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) 1210 1208 return athub_v2_1_set_clockgating(adev, state); 1211 1209 else 1212 1210 return athub_v2_0_set_clockgating(adev, state); ··· 1216 1214 { 1217 1215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1218 1216 1219 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) || 1220 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4)) 1217 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 3) || 1218 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 1, 4)) 1221 1219 return; 1222 1220 1223 1221 adev->mmhub.funcs->get_clockgating(adev, flags); 1224 1222 1225 - if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) 1223 + if (amdgpu_ip_version(adev, ATHUB_HWIP, 0) >= IP_VERSION(2, 1, 0)) 1226 1224 athub_v2_1_get_clockgating(adev, flags); 1227 1225 else 1228 1226 athub_v2_0_get_clockgating(adev, flags);
+4 -4
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 588 588 589 589 static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) 590 590 { 591 - switch (adev->ip_versions[UMC_HWIP][0]) { 591 + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 592 592 case IP_VERSION(8, 10, 0): 593 593 adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; 594 594 adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; ··· 611 611 612 612 static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev) 613 613 { 614 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 614 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 615 615 case IP_VERSION(3, 0, 1): 616 616 adev->mmhub.funcs = &mmhub_v3_0_1_funcs; 617 617 break; ··· 629 629 630 630 static void gmc_v11_0_set_gfxhub_funcs(struct amdgpu_device *adev) 631 631 { 632 - switch (adev->ip_versions[GC_HWIP][0]) { 632 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 633 633 case IP_VERSION(11, 0, 3): 634 634 adev->gfxhub.funcs = &gfxhub_v3_0_3_funcs; 635 635 break; ··· 782 782 adev->gmc.vram_type = vram_type; 783 783 adev->gmc.vram_vendor = vram_vendor; 784 784 785 - switch (adev->ip_versions[GC_HWIP][0]) { 785 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 786 786 case IP_VERSION(11, 0, 0): 787 787 case IP_VERSION(11, 0, 1): 788 788 case IP_VERSION(11, 0, 2):
+52 -44
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 640 640 addr, entry->client_id, 641 641 soc15_ih_clientid_name[entry->client_id]); 642 642 643 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 643 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 644 644 dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", 645 645 node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, 646 646 node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); ··· 654 654 * the new fast GRBM interface. 655 655 */ 656 656 if ((entry->vmid_src == AMDGPU_GFXHUB(0)) && 657 - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) 657 + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) 658 658 RREG32(hub->vm_l2_pro_fault_status); 659 659 660 660 status = RREG32(hub->vm_l2_pro_fault_status); ··· 671 671 gfxhub_client_ids[cid], 672 672 cid); 673 673 } else { 674 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 674 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 675 675 case IP_VERSION(9, 0, 0): 676 676 mmhub_cid = mmhub_client_ids_vega10[cid][rw]; 677 677 break; ··· 772 772 static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, 773 773 uint32_t vmhub) 774 774 { 775 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 776 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 775 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || 776 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 777 777 return false; 778 778 779 779 return ((vmhub == AMDGPU_MMHUB0(0) || ··· 824 824 825 825 hub = &adev->vmhub[vmhub]; 826 826 if (adev->gmc.xgmi.num_physical_nodes && 827 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { 827 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0)) { 828 828 /* Vega20+XGMI caches PTEs in TC and TLB. Add a 829 829 * heavy-weight TLB flush (type 2), which flushes 830 830 * both. Due to a race condition with concurrent ··· 834 834 inv_req = gmc_v9_0_get_invalidate_req(vmid, 2); 835 835 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); 836 836 } else if (flush_type == 2 && 837 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && 837 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && 838 838 adev->rev_id == 0) { 839 839 inv_req = gmc_v9_0_get_invalidate_req(vmid, 0); 840 840 inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type); ··· 896 896 * GRBM interface. 897 897 */ 898 898 if ((vmhub == AMDGPU_GFXHUB(0)) && 899 - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) 899 + (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))) 900 900 RREG32_NO_KIQ(hub->vm_inv_eng0_req + 901 901 hub->eng_distance * eng); 902 902 ··· 969 969 * still need a second TLB flush after this. 970 970 */ 971 971 bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && 972 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); 972 + amdgpu_ip_version(adev, GC_HWIP, 0) == 973 + IP_VERSION(9, 4, 0)); 973 974 /* 2 dwords flush + 8 dwords fence */ 974 975 unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; 975 976 ··· 985 984 pasid, 2, all_hub); 986 985 987 986 if (flush_type == 2 && 988 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) && 987 + amdgpu_ip_version(adev, GC_HWIP, 0) == 988 + IP_VERSION(9, 4, 3) && 989 989 adev->rev_id == 0) 990 990 kiq->pmf->kiq_invalidate_tlbs(ring, 991 991 pasid, 0, all_hub); ··· 1194 1192 bool snoop = false; 1195 1193 bool is_local; 1196 1194 1197 - switch (adev->ip_versions[GC_HWIP][0]) { 1195 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1198 1196 case IP_VERSION(9, 4, 1): 1199 1197 case IP_VERSION(9, 4, 2): 1200 1198 if (is_vram) { ··· 1208 1206 /* FIXME: is this still needed? Or does 1209 1207 * amdgpu_ttm_tt_pde_flags already handle this? 1210 1208 */ 1211 - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 1212 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) && 1209 + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == 1210 + IP_VERSION(9, 4, 2) || 1211 + amdgpu_ip_version(adev, GC_HWIP, 0) == 1212 + IP_VERSION(9, 4, 3)) && 1213 1213 adev->gmc.xgmi.connected_to_cpu) 1214 1214 snoop = true; 1215 1215 } else { ··· 1320 1316 /* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system 1321 1317 * memory can use more efficient MTYPEs. 1322 1318 */ 1323 - if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) 1319 + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3)) 1324 1320 return; 1325 1321 1326 1322 /* Only direct-mapped memory allows us to determine the NUMA node from ··· 1389 1385 } else { 1390 1386 u32 viewport; 1391 1387 1392 - switch (adev->ip_versions[DCE_HWIP][0]) { 1388 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1393 1389 case IP_VERSION(1, 0, 0): 1394 1390 case IP_VERSION(1, 0, 1): 1395 1391 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); ··· 1460 1456 1461 1457 static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) 1462 1458 { 1463 - switch (adev->ip_versions[UMC_HWIP][0]) { 1459 + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 1464 1460 case IP_VERSION(6, 0, 0): 1465 1461 adev->umc.funcs = &umc_v6_0_funcs; 1466 1462 break; ··· 1514 1510 1515 1511 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) 1516 1512 { 1517 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 1513 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 1518 1514 case IP_VERSION(9, 4, 1): 1519 1515 adev->mmhub.funcs = &mmhub_v9_4_funcs; 1520 1516 break; ··· 1532 1528 1533 1529 static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) 1534 1530 { 1535 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 1531 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 1536 1532 case IP_VERSION(9, 4, 0): 1537 1533 adev->mmhub.ras = &mmhub_v1_0_ras; 1538 1534 break; ··· 1553 1549 1554 1550 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) 1555 1551 { 1556 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 1552 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 1557 1553 adev->gfxhub.funcs = &gfxhub_v1_2_funcs; 1558 1554 else 1559 1555 adev->gfxhub.funcs = &gfxhub_v1_0_funcs; ··· 1569 1565 struct amdgpu_mca *mca = &adev->mca; 1570 1566 1571 1567 /* is UMC the right IP to check for MCA? Maybe DF? */ 1572 - switch (adev->ip_versions[UMC_HWIP][0]) { 1568 + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { 1573 1569 case IP_VERSION(6, 7, 0): 1574 1570 if (!adev->gmc.xgmi.connected_to_cpu) { 1575 1571 mca->mp0.ras = &mca_v3_0_mp0_ras; ··· 1596 1592 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined 1597 1593 * in their IP discovery tables 1598 1594 */ 1599 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) || 1600 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || 1601 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 1595 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || 1596 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || 1597 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 1602 1598 adev->gmc.xgmi.supported = true; 1603 1599 1604 - if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { 1600 + if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { 1605 1601 adev->gmc.xgmi.supported = true; 1606 1602 adev->gmc.xgmi.connected_to_cpu = 1607 1603 adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); 1608 1604 } 1609 1605 1610 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { 1606 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { 1611 1607 enum amdgpu_pkg_type pkg_type = 1612 1608 adev->smuio.funcs->get_pkg_type(adev); 1613 1609 /* On GFXIP 9.4.3. APU, there is no physical VRAM domain present ··· 1656 1652 * writes, while disables HBM ECC for vega10. 1657 1653 */ 1658 1654 if (!amdgpu_sriov_vf(adev) && 1659 - (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { 1655 + (amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(6, 0, 0))) { 1660 1656 if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { 1661 1657 if (adev->df.funcs && 1662 1658 adev->df.funcs->enable_ecc_force_par_wr_rmw) ··· 1764 1760 1765 1761 /* set the gart size */ 1766 1762 if (amdgpu_gart_size == -1) { 1767 - switch (adev->ip_versions[GC_HWIP][0]) { 1763 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1768 1764 case IP_VERSION(9, 0, 1): /* all engines support GPUVM */ 1769 1765 case IP_VERSION(9, 2, 1): /* all engines support GPUVM */ 1770 1766 case IP_VERSION(9, 4, 0): ··· 1843 1839 */ 1844 1840 static void gmc_v9_0_save_registers(struct amdgpu_device *adev) 1845 1841 { 1846 - if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 1847 - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) 1842 + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 1843 + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) 1848 1844 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); 1849 1845 } 1850 1846 ··· 2039 2035 2040 2036 spin_lock_init(&adev->gmc.invalidate_lock); 2041 2037 2042 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { 2038 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { 2043 2039 gmc_v9_4_3_init_vram_info(adev); 2044 2040 } else if (!adev->bios) { 2045 2041 if (adev->flags & AMD_IS_APU) { ··· 2079 2075 adev->gmc.vram_type = vram_type; 2080 2076 adev->gmc.vram_vendor = vram_vendor; 2081 2077 } 2082 - switch (adev->ip_versions[GC_HWIP][0]) { 2078 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 2083 2079 case IP_VERSION(9, 1, 0): 2084 2080 case IP_VERSION(9, 2, 2): 2085 2081 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); ··· 2112 2108 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); 2113 2109 else 2114 2110 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); 2115 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) 2111 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2)) 2116 2112 adev->gmc.translate_further = adev->vm_manager.num_level > 1; 2117 2113 break; 2118 2114 case IP_VERSION(9, 4, 1): ··· 2144 2140 if (r) 2145 2141 return r; 2146 2142 2147 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { 2143 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) { 2148 2144 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, 2149 2145 &adev->gmc.vm_fault); 2150 2146 if (r) ··· 2173 2169 */ 2174 2170 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ 2175 2171 2176 - dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44; 2172 + dma_addr_bits = amdgpu_ip_version(adev, GC_HWIP, 0) >= 2173 + IP_VERSION(9, 4, 2) ? 2174 + 48 : 2175 + 44; 2177 2176 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits)); 2178 2177 if (r) { 2179 2178 dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n"); ··· 2190 2183 2191 2184 amdgpu_gmc_get_vbios_allocations(adev); 2192 2185 2193 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { 2186 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) { 2194 2187 r = gmc_v9_0_init_mem_ranges(adev); 2195 2188 if (r) 2196 2189 return r; ··· 2216 2209 * for video processing. 2217 2210 */ 2218 2211 adev->vm_manager.first_kfd_vmid = 2219 - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || 2220 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || 2221 - adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8; 2212 + (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || 2213 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || 2214 + amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) ? 2215 + 3 : 2216 + 8; 2222 2217 2223 2218 amdgpu_vm_manager_init(adev); 2224 2219 ··· 2230 2221 if (r) 2231 2222 return r; 2232 2223 2233 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 2224 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 2234 2225 amdgpu_gmc_sysfs_init(adev); 2235 2226 2236 2227 return 0; ··· 2240 2231 { 2241 2232 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2242 2233 2243 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) 2234 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) 2244 2235 amdgpu_gmc_sysfs_fini(adev); 2245 2236 adev->gmc.num_mem_partitions = 0; 2246 2237 kfree(adev->gmc.mem_partitions); ··· 2262 2253 2263 2254 static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) 2264 2255 { 2265 - 2266 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 2256 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 2267 2257 case IP_VERSION(9, 0, 0): 2268 2258 if (amdgpu_sriov_vf(adev)) 2269 2259 break; ··· 2296 2288 */ 2297 2289 void gmc_v9_0_restore_registers(struct amdgpu_device *adev) 2298 2290 { 2299 - if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 2300 - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { 2291 + if ((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 2292 + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) { 2301 2293 WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); 2302 2294 WARN_ON(adev->gmc.sdpif_register != 2303 2295 RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
+9 -9
drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
··· 49 49 static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, 50 50 struct amdgpu_ring *ring) 51 51 { 52 - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) || 53 - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2)) 52 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0) || 53 + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2)) 54 54 return; 55 55 56 56 if (!ring || !ring->funcs->emit_wreg) ··· 80 80 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) 81 81 return; 82 82 83 - if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0)) 83 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) >= IP_VERSION(4, 4, 0)) 84 84 WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0); 85 85 else 86 86 /*read back hdp ras counter to reset it to 0 */ ··· 92 92 { 93 93 uint32_t def, data; 94 94 95 - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) || 96 - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) || 97 - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) || 98 - adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) { 95 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 0) || 96 + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 0, 1) || 97 + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 1) || 98 + amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 1, 0)) { 99 99 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 100 100 101 101 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) ··· 137 137 138 138 static void hdp_v4_0_init_registers(struct amdgpu_device *adev) 139 139 { 140 - switch (adev->ip_versions[HDP_HWIP][0]) { 140 + switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) { 141 141 case IP_VERSION(4, 2, 1): 142 142 WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); 143 143 break; ··· 147 147 148 148 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); 149 149 150 - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0)) 150 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0)) 151 151 WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, READ_BUFFER_WATERMARK, 2); 152 152 153 153 WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+3 -3
drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c
··· 51 51 AMD_CG_SUPPORT_HDP_SD))) 52 52 return; 53 53 54 - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(6, 1, 0)) 54 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0)) 55 55 hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1); 56 56 else 57 57 hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL); ··· 61 61 * forced on IPH & RC clock */ 62 62 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 63 63 RC_MEM_CLK_SOFT_OVERRIDE, 1); 64 - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(6, 1, 0)) 64 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0)) 65 65 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1, hdp_clk_cntl); 66 66 else 67 67 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl); ··· 126 126 /* disable IPH & RC clock override after clock/power mode changing */ 127 127 hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL, 128 128 RC_MEM_CLK_SOFT_OVERRIDE, 0); 129 - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(6, 1, 0)) 129 + if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(6, 1, 0)) 130 130 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL_V6_1, hdp_clk_cntl); 131 131 else 132 132 WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
+1 -1
drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
··· 353 353 354 354 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); 355 355 356 - switch (adev->ip_versions[GC_HWIP][0]) { 356 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 357 357 case IP_VERSION(11, 0, 0): 358 358 program_imu_rlc_ram(adev, imu_rlc_ram_golden_11, 359 359 (const u32)ARRAY_SIZE(imu_rlc_ram_golden_11));
+2 -2
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
··· 128 128 129 129 ring = adev->jpeg.inst[i].ring_dec; 130 130 ring->use_doorbell = true; 131 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) 131 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0)) 132 132 ring->vm_hub = AMDGPU_MMHUB1(0); 133 133 else 134 134 ring->vm_hub = AMDGPU_MMHUB0(0); ··· 822 822 823 823 static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) 824 824 { 825 - switch (adev->ip_versions[JPEG_HWIP][0]) { 825 + switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) { 826 826 case IP_VERSION(2, 6, 0): 827 827 adev->jpeg.ras = &jpeg_v2_6_ras; 828 828 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
··· 52 52 53 53 u32 harvest; 54 54 55 - switch (adev->ip_versions[UVD_HWIP][0]) { 55 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 56 56 case IP_VERSION(3, 1, 1): 57 57 case IP_VERSION(3, 1, 2): 58 58 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 831 831 832 832 static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev) 833 833 { 834 - switch (adev->ip_versions[JPEG_HWIP][0]) { 834 + switch (amdgpu_ip_version(adev, JPEG_HWIP, 0)) { 835 835 case IP_VERSION(4, 0, 0): 836 836 adev->jpeg.ras = &jpeg_v4_0_ras; 837 837 break;
+5 -5
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
··· 558 558 WREG32_SOC15(GC, 0, mmCP_MES_MDBOUND_LO, 0x3FFFF); 559 559 560 560 /* invalidate ICACHE */ 561 - switch (adev->ip_versions[GC_HWIP][0]) { 561 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 562 562 case IP_VERSION(10, 3, 0): 563 563 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid); 564 564 break; ··· 568 568 } 569 569 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); 570 570 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); 571 - switch (adev->ip_versions[GC_HWIP][0]) { 571 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 572 572 case IP_VERSION(10, 3, 0): 573 573 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data); 574 574 break; ··· 578 578 } 579 579 580 580 /* prime the ICACHE. */ 581 - switch (adev->ip_versions[GC_HWIP][0]) { 581 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 582 582 case IP_VERSION(10, 3, 0): 583 583 data = RREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid); 584 584 break; ··· 587 587 break; 588 588 } 589 589 data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); 590 - switch (adev->ip_versions[GC_HWIP][0]) { 590 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 591 591 case IP_VERSION(10, 3, 0): 592 592 WREG32_SOC15(GC, 0, mmCP_MES_IC_OP_CNTL_Sienna_Cichlid, data); 593 593 break; ··· 995 995 struct amdgpu_device *adev = ring->adev; 996 996 997 997 /* tell RLC which is KIQ queue */ 998 - switch (adev->ip_versions[GC_HWIP][0]) { 998 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 999 999 case IP_VERSION(10, 3, 0): 1000 1000 case IP_VERSION(10, 3, 2): 1001 1001 case IP_VERSION(10, 3, 1):
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 1316 1316 1317 1317 /* it's only intended for use in mes_self_test case, not for s0ix and reset */ 1318 1318 if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend && 1319 - (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) 1319 + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))) 1320 1320 amdgpu_mes_self_test(adev); 1321 1321 1322 1322 return 0;
+6 -6
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 151 151 dev_err(adev->dev, 152 152 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 153 153 status); 154 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 154 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 155 155 case IP_VERSION(2, 0, 0): 156 156 case IP_VERSION(2, 0, 2): 157 157 mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; ··· 568 568 if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) 569 569 return; 570 570 571 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 571 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 572 572 case IP_VERSION(2, 1, 0): 573 573 case IP_VERSION(2, 1, 1): 574 574 case IP_VERSION(2, 1, 2): ··· 601 601 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 602 602 } 603 603 604 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 604 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 605 605 case IP_VERSION(2, 1, 0): 606 606 case IP_VERSION(2, 1, 1): 607 607 case IP_VERSION(2, 1, 2): ··· 625 625 if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 626 626 return; 627 627 628 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 628 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 629 629 case IP_VERSION(2, 1, 0): 630 630 case IP_VERSION(2, 1, 1): 631 631 case IP_VERSION(2, 1, 2): ··· 651 651 if (amdgpu_sriov_vf(adev)) 652 652 return 0; 653 653 654 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 654 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 655 655 case IP_VERSION(2, 0, 0): 656 656 case IP_VERSION(2, 0, 2): 657 657 case IP_VERSION(2, 1, 0): ··· 676 676 if (amdgpu_sriov_vf(adev)) 677 677 *flags = 0; 678 678 679 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 679 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 680 680 case IP_VERSION(2, 1, 0): 681 681 case IP_VERSION(2, 1, 1): 682 682 case IP_VERSION(2, 1, 2):
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
··· 90 90 dev_err(adev->dev, 91 91 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 92 92 status); 93 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 93 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 94 94 case IP_VERSION(2, 3, 0): 95 95 case IP_VERSION(2, 4, 0): 96 96 case IP_VERSION(2, 4, 1):
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
··· 107 107 dev_err(adev->dev, 108 108 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 109 109 status); 110 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 110 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 111 111 case IP_VERSION(3, 0, 0): 112 112 case IP_VERSION(3, 0, 1): 113 113 mmhub_cid = mmhub_client_ids_v3_0_0[cid][rw];
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
··· 108 108 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 109 109 status); 110 110 111 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 111 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 112 112 case IP_VERSION(3, 0, 1): 113 113 mmhub_cid = mmhub_client_ids_v3_0_1[cid][rw]; 114 114 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
··· 96 96 "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", 97 97 status); 98 98 99 - switch (adev->ip_versions[MMHUB_HWIP][0]) { 99 + switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { 100 100 case IP_VERSION(3, 3, 0): 101 101 mmhub_cid = mmhub_client_ids_v3_3[cid][rw]; 102 102 break;
+2 -2
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
··· 107 107 { 108 108 u32 ih_cntl, ih_rb_cntl; 109 109 110 - if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3)) 110 + if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) < IP_VERSION(5, 0, 3)) 111 111 return; 112 112 113 113 ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2); ··· 330 330 331 331 if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { 332 332 if (ih[0]->use_bus_addr) { 333 - switch (adev->ip_versions[OSSSYS_HWIP][0]) { 333 + switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) { 334 334 case IP_VERSION(5, 0, 3): 335 335 case IP_VERSION(5, 2, 0): 336 336 case IP_VERSION(5, 2, 1):
+1 -1
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
··· 536 536 { 537 537 uint32_t reg, reg_data; 538 538 539 - if (adev->ip_versions[NBIO_HWIP][0] != IP_VERSION(3, 3, 0)) 539 + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) != IP_VERSION(3, 3, 0)) 540 540 return; 541 541 542 542 reg = RREG32_SOC15(NBIO, 0, mmBIF_RB_CNTL);
+3 -3
drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
··· 338 338 339 339 static void nbio_v4_3_init_registers(struct amdgpu_device *adev) 340 340 { 341 - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) { 341 + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(4, 3, 0)) { 342 342 uint32_t data; 343 343 344 344 data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2); ··· 392 392 #ifdef CONFIG_PCIEASPM 393 393 uint32_t def, data; 394 394 395 - if (!(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 4, 0)) && 396 - !(adev->ip_versions[PCIE_HWIP][0] == IP_VERSION(7, 6, 0))) 395 + if (!(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 4, 0)) && 396 + !(amdgpu_ip_version(adev, PCIE_HWIP, 0) == IP_VERSION(7, 6, 0))) 397 397 return; 398 398 399 399 def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL);
+5 -5
drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
··· 59 59 { 60 60 u32 tmp; 61 61 62 - switch (adev->ip_versions[NBIO_HWIP][0]) { 62 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 63 63 case IP_VERSION(7, 2, 1): 64 64 case IP_VERSION(7, 3, 0): 65 65 case IP_VERSION(7, 5, 0): ··· 78 78 79 79 static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable) 80 80 { 81 - switch (adev->ip_versions[NBIO_HWIP][0]) { 81 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 82 82 case IP_VERSION(7, 2, 1): 83 83 case IP_VERSION(7, 3, 0): 84 84 case IP_VERSION(7, 5, 0): ··· 262 262 { 263 263 uint32_t def, data; 264 264 265 - switch (adev->ip_versions[NBIO_HWIP][0]) { 265 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 266 266 case IP_VERSION(7, 2, 1): 267 267 case IP_VERSION(7, 3, 0): 268 268 case IP_VERSION(7, 5, 0): ··· 369 369 static void nbio_v7_2_init_registers(struct amdgpu_device *adev) 370 370 { 371 371 uint32_t def, data; 372 - switch (adev->ip_versions[NBIO_HWIP][0]) { 372 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 373 373 case IP_VERSION(7, 2, 1): 374 374 case IP_VERSION(7, 3, 0): 375 375 case IP_VERSION(7, 5, 0): ··· 394 394 break; 395 395 } 396 396 397 - switch (adev->ip_versions[NBIO_HWIP][0]) { 397 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 398 398 case IP_VERSION(7, 3, 0): 399 399 case IP_VERSION(7, 5, 1): 400 400 data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+2 -2
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
··· 347 347 adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, 348 348 mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; 349 349 350 - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4) && 350 + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4) && 351 351 !amdgpu_sriov_vf(adev)) { 352 352 baco_cntl = RREG32_SOC15(NBIO, 0, mmBACO_CNTL); 353 353 if (baco_cntl & ··· 702 702 #ifdef CONFIG_PCIEASPM 703 703 uint32_t def, data; 704 704 705 - if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) 705 + if (amdgpu_ip_version(adev, NBIO_HWIP, 0) == IP_VERSION(7, 4, 4)) 706 706 return; 707 707 708 708 def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+4 -4
drivers/gpu/drm/amd/amdgpu/nv.c
··· 214 214 if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) 215 215 return -EINVAL; 216 216 217 - switch (adev->ip_versions[UVD_HWIP][0]) { 217 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 218 218 case IP_VERSION(3, 0, 0): 219 219 case IP_VERSION(3, 0, 64): 220 220 case IP_VERSION(3, 0, 192): ··· 453 453 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 454 454 amdgpu_reset_method); 455 455 456 - switch (adev->ip_versions[MP1_HWIP][0]) { 456 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 457 457 case IP_VERSION(11, 5, 0): 458 458 case IP_VERSION(13, 0, 1): 459 459 case IP_VERSION(13, 0, 3): ··· 669 669 /* TODO: split the GC and PG flags based on the relevant IP version for which 670 670 * they are relevant. 671 671 */ 672 - switch (adev->ip_versions[GC_HWIP][0]) { 672 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 673 673 case IP_VERSION(10, 1, 10): 674 674 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 675 675 AMD_CG_SUPPORT_GFX_CGCG | ··· 1073 1073 if (amdgpu_sriov_vf(adev)) 1074 1074 return 0; 1075 1075 1076 - switch (adev->ip_versions[NBIO_HWIP][0]) { 1076 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 1077 1077 case IP_VERSION(2, 3, 0): 1078 1078 case IP_VERSION(2, 3, 1): 1079 1079 case IP_VERSION(2, 3, 2):
+4 -3
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
··· 58 58 return err; 59 59 60 60 err = psp_init_ta_microcode(psp, ucode_prefix); 61 - if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) && 62 - (adev->pdev->revision == 0xa1) && 63 - (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) { 61 + if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0)) && 62 + (adev->pdev->revision == 0xa1) && 63 + (psp->securedisplay_context.context.bin_desc.fw_version >= 64 + 0x27000008)) { 64 65 adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0; 65 66 } 66 67 return err;
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 95 95 96 96 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 97 97 98 - switch (adev->ip_versions[MP0_HWIP][0]) { 98 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 99 99 case IP_VERSION(11, 0, 2): 100 100 case IP_VERSION(11, 0, 4): 101 101 err = psp_init_sos_microcode(psp, ucode_prefix);
+3 -3
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
··· 79 79 80 80 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 81 81 82 - switch (adev->ip_versions[MP0_HWIP][0]) { 82 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 83 83 case IP_VERSION(13, 0, 2): 84 84 err = psp_init_sos_microcode(psp, ucode_prefix); 85 85 if (err) ··· 181 181 { 182 182 struct amdgpu_device *adev = psp->adev; 183 183 184 - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) { 184 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { 185 185 psp_v13_0_wait_for_vmbx_ready(psp); 186 186 187 187 return psp_v13_0_wait_for_bootloader(psp); ··· 728 728 { 729 729 struct amdgpu_device *adev = psp->adev; 730 730 731 - if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 10)) { 731 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 10)) { 732 732 uint32_t reg_data; 733 733 /* MP1 fatal error: trigger PSP dram read to unhalt PSP 734 734 * during MP1 triggered sync flood.
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
··· 40 40 41 41 amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix)); 42 42 43 - switch (adev->ip_versions[MP0_HWIP][0]) { 43 + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { 44 44 case IP_VERSION(13, 0, 4): 45 45 err = psp_init_toc_microcode(psp, ucode_prefix); 46 46 if (err)
+29 -19
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 469 469 470 470 static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) 471 471 { 472 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 472 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 473 473 case IP_VERSION(4, 0, 0): 474 474 soc15_program_register_sequence(adev, 475 475 golden_settings_sdma_4, ··· 539 539 * The only chips with SDMAv4 and ULV are VG10 and VG20. 540 540 * Server SKUs take a different hysteresis setting from other SKUs. 541 541 */ 542 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 542 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 543 543 case IP_VERSION(4, 0, 0): 544 544 if (adev->pdev->device == 0x6860) 545 545 break; ··· 578 578 int ret, i; 579 579 580 580 for (i = 0; i < adev->sdma.num_instances; i++) { 581 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || 582 - adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) { 581 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 582 + IP_VERSION(4, 2, 2) || 583 + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 584 + IP_VERSION(4, 4, 0)) { 583 585 /* Acturus & Aldebaran will leverage the same FW memory 584 586 for every SDMA instance */ 585 587 ret = amdgpu_sdma_init_microcode(adev, 0, true); ··· 980 978 * Arcturus for the moment and firmware version 14 981 979 * and above. 982 980 */ 983 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && 981 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 982 + IP_VERSION(4, 2, 2) && 984 983 adev->sdma.instance[i].fw_version >= 14) 985 984 WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable); 986 985 /* Extend page fault timeout to avoid interrupt storm */ ··· 1258 1255 if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA)) 1259 1256 return; 1260 1257 1261 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1258 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1262 1259 case IP_VERSION(4, 1, 0): 1263 1260 case IP_VERSION(4, 1, 1): 1264 1261 case IP_VERSION(4, 1, 2): ··· 1701 1698 { 1702 1699 uint fw_version = adev->sdma.instance[0].fw_version; 1703 1700 1704 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1701 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1705 1702 case IP_VERSION(4, 0, 0): 1706 1703 return fw_version >= 430; 1707 1704 case IP_VERSION(4, 0, 1): ··· 1726 1723 } 1727 1724 1728 1725 /* TODO: Page queue breaks driver reload under SRIOV */ 1729 - if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) && 1726 + if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 0, 0)) && 1730 1727 amdgpu_sriov_vf((adev))) 1731 1728 adev->sdma.has_page_queue = false; 1732 1729 else if (sdma_v4_0_fw_support_paging_queue(adev)) ··· 1826 1823 * On Arcturus, SDMA instance 5~7 has a different vmhub 1827 1824 * type(AMDGPU_MMHUB1). 1828 1825 */ 1829 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) 1826 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 1827 + IP_VERSION(4, 2, 2) && 1828 + i >= 5) 1830 1829 ring->vm_hub = AMDGPU_MMHUB1(0); 1831 1830 else 1832 1831 ring->vm_hub = AMDGPU_MMHUB0(0); ··· 1848 1843 /* paging queue use same doorbell index/routing as gfx queue 1849 1844 * with 0x400 (4096 dwords) offset on second doorbell page 1850 1845 */ 1851 - if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) && 1852 - adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) { 1846 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >= 1847 + IP_VERSION(4, 0, 0) && 1848 + amdgpu_ip_version(adev, SDMA0_HWIP, 0) < 1849 + IP_VERSION(4, 2, 0)) { 1853 1850 ring->doorbell_index = 1854 1851 adev->doorbell_index.sdma_engine[i] << 1; 1855 1852 ring->doorbell_index += 0x400; ··· 1863 1856 (adev->doorbell_index.sdma_engine[i] + 1) << 1; 1864 1857 } 1865 1858 1866 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) 1859 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 1860 + IP_VERSION(4, 2, 2) && 1861 + i >= 5) 1867 1862 ring->vm_hub = AMDGPU_MMHUB1(0); 1868 1863 else 1869 1864 ring->vm_hub = AMDGPU_MMHUB0(0); ··· 1899 1890 amdgpu_ring_fini(&adev->sdma.instance[i].page); 1900 1891 } 1901 1892 1902 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || 1903 - adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) 1893 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 2, 2) || 1894 + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 0)) 1904 1895 amdgpu_sdma_destroy_inst_ctx(adev, true); 1905 1896 else 1906 1897 amdgpu_sdma_destroy_inst_ctx(adev, false); ··· 2045 2036 amdgpu_fence_process(&adev->sdma.instance[instance].ring); 2046 2037 break; 2047 2038 case 1: 2048 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0)) 2039 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 2040 + IP_VERSION(4, 2, 0)) 2049 2041 amdgpu_fence_process(&adev->sdma.instance[instance].page); 2050 2042 break; 2051 2043 case 2: 2052 2044 /* XXX compute */ 2053 2045 break; 2054 2046 case 3: 2055 - if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0)) 2047 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) != 2048 + IP_VERSION(4, 2, 0)) 2056 2049 amdgpu_fence_process(&adev->sdma.instance[instance].page); 2057 2050 break; 2058 2051 } ··· 2270 2259 { 2271 2260 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2272 2261 2273 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 2262 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2274 2263 case IP_VERSION(4, 1, 0): 2275 2264 case IP_VERSION(4, 1, 1): 2276 2265 case IP_VERSION(4, 1, 2): ··· 2633 2622 2634 2623 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) 2635 2624 { 2636 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 2625 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 2637 2626 case IP_VERSION(4, 2, 0): 2638 2627 case IP_VERSION(4, 2, 2): 2639 2628 adev->sdma.ras = &sdma_v4_0_ras; ··· 2644 2633 default: 2645 2634 break; 2646 2635 } 2647 - 2648 2636 } 2649 2637 2650 2638 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
+4 -3
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
··· 132 132 int ret, i; 133 133 134 134 for (i = 0; i < adev->sdma.num_instances; i++) { 135 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) { 135 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 136 + IP_VERSION(4, 4, 2)) { 136 137 ret = amdgpu_sdma_init_microcode(adev, 0, true); 137 138 break; 138 139 } else { ··· 1232 1231 1233 1232 static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) 1234 1233 { 1235 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1234 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1236 1235 case IP_VERSION(4, 4, 2): 1237 1236 return false; 1238 1237 default: ··· 1402 1401 amdgpu_ring_fini(&adev->sdma.instance[i].page); 1403 1402 } 1404 1403 1405 - if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) 1404 + if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2)) 1406 1405 amdgpu_sdma_destroy_inst_ctx(adev, true); 1407 1406 else 1408 1407 amdgpu_sdma_destroy_inst_ctx(adev, false);
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 184 184 185 185 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) 186 186 { 187 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 187 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 188 188 case IP_VERSION(5, 0, 0): 189 189 soc15_program_register_sequence(adev, 190 190 golden_settings_sdma_5, ··· 1697 1697 if (amdgpu_sriov_vf(adev)) 1698 1698 return 0; 1699 1699 1700 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1700 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1701 1701 case IP_VERSION(5, 0, 0): 1702 1702 case IP_VERSION(5, 0, 2): 1703 1703 case IP_VERSION(5, 0, 5):
+5 -4
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 1510 1510 static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev, 1511 1511 int i) 1512 1512 { 1513 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1513 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1514 1514 case IP_VERSION(5, 2, 1): 1515 1515 if (adev->sdma.instance[i].fw_version < 70) 1516 1516 return false; ··· 1575 1575 int i; 1576 1576 1577 1577 for (i = 0; i < adev->sdma.num_instances; i++) { 1578 - 1579 - if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) 1578 + if (adev->sdma.instance[i].fw_version < 70 && 1579 + amdgpu_ip_version(adev, SDMA0_HWIP, 0) == 1580 + IP_VERSION(5, 2, 1)) 1580 1581 adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS; 1581 1582 1582 1583 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { ··· 1606 1605 if (amdgpu_sriov_vf(adev)) 1607 1606 return 0; 1608 1607 1609 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1608 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1610 1609 case IP_VERSION(5, 2, 0): 1611 1610 case IP_VERSION(5, 2, 2): 1612 1611 case IP_VERSION(5, 2, 1):
+1 -2
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 1246 1246 1247 1247 static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev) 1248 1248 { 1249 - switch (adev->ip_versions[SDMA0_HWIP][0]) { 1249 + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { 1250 1250 case IP_VERSION(6, 0, 3): 1251 1251 adev->sdma.ras = &sdma_v6_0_3_ras; 1252 1252 break; 1253 1253 default: 1254 1254 break; 1255 1255 } 1256 - 1257 1256 } 1258 1257 1259 1258 static int sdma_v6_0_early_init(void *handle)
+1 -1
drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c
··· 36 36 #if 0 37 37 struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; 38 38 39 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) && 39 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7) && 40 40 adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) 41 41 return true; 42 42 #endif
+13 -14
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 174 174 static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, 175 175 const struct amdgpu_video_codecs **codecs) 176 176 { 177 - if (adev->ip_versions[VCE_HWIP][0]) { 178 - switch (adev->ip_versions[VCE_HWIP][0]) { 177 + if (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 178 + switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) { 179 179 case IP_VERSION(4, 0, 0): 180 180 case IP_VERSION(4, 1, 0): 181 181 if (encode) ··· 187 187 return -EINVAL; 188 188 } 189 189 } else { 190 - switch (adev->ip_versions[UVD_HWIP][0]) { 190 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 191 191 case IP_VERSION(1, 0, 0): 192 192 case IP_VERSION(1, 0, 1): 193 193 if (encode) ··· 324 324 { 325 325 u32 reference_clock = adev->clock.spll.reference_freq; 326 326 327 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || 328 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) || 329 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6)) 327 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 0) || 328 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(12, 0, 1) || 329 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) 330 330 return 10000; 331 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || 332 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) 331 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 0) || 332 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(10, 0, 1)) 333 333 return reference_clock / 4; 334 334 335 335 return reference_clock; ··· 523 523 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 524 524 amdgpu_reset_method); 525 525 526 - switch (adev->ip_versions[MP1_HWIP][0]) { 526 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 527 527 case IP_VERSION(10, 0, 0): 528 528 case IP_VERSION(10, 0, 1): 529 529 case IP_VERSION(12, 0, 0): ··· 599 599 600 600 static bool soc15_supports_baco(struct amdgpu_device *adev) 601 601 { 602 - switch (adev->ip_versions[MP1_HWIP][0]) { 602 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 603 603 case IP_VERSION(9, 0, 0): 604 604 case IP_VERSION(11, 0, 2): 605 605 if (adev->asic_type == CHIP_VEGA20) { ··· 938 938 /* TODO: split the GC and PG flags based on the relevant IP version for which 939 939 * they are relevant. 940 940 */ 941 - switch (adev->ip_versions[GC_HWIP][0]) { 941 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 942 942 case IP_VERSION(9, 0, 1): 943 943 adev->asic_funcs = &soc15_asic_funcs; 944 944 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | ··· 1367 1367 if (amdgpu_sriov_vf(adev)) 1368 1368 return 0; 1369 1369 1370 - switch (adev->ip_versions[NBIO_HWIP][0]) { 1370 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 1371 1371 case IP_VERSION(6, 1, 0): 1372 1372 case IP_VERSION(6, 2, 0): 1373 1373 case IP_VERSION(7, 4, 0): ··· 1423 1423 1424 1424 adev->hdp.funcs->get_clock_gating_state(adev, flags); 1425 1425 1426 - if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) { 1427 - 1426 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) { 1428 1427 /* AMD_CG_SUPPORT_DRM_MGCG */ 1429 1428 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1430 1429 if (!(data & 0x01000000))
+6 -6
drivers/gpu/drm/amd/amdgpu/soc21.c
··· 153 153 if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) 154 154 return -EINVAL; 155 155 156 - switch (adev->ip_versions[UVD_HWIP][0]) { 156 + switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { 157 157 case IP_VERSION(4, 0, 0): 158 158 case IP_VERSION(4, 0, 2): 159 159 case IP_VERSION(4, 0, 4): ··· 374 374 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", 375 375 amdgpu_reset_method); 376 376 377 - switch (adev->ip_versions[MP1_HWIP][0]) { 377 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 378 378 case IP_VERSION(13, 0, 0): 379 379 case IP_VERSION(13, 0, 7): 380 380 case IP_VERSION(13, 0, 10): ··· 448 448 449 449 static bool soc21_need_full_reset(struct amdgpu_device *adev) 450 450 { 451 - switch (adev->ip_versions[GC_HWIP][0]) { 451 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 452 452 case IP_VERSION(11, 0, 0): 453 453 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); 454 454 case IP_VERSION(11, 0, 2): ··· 577 577 578 578 adev->rev_id = amdgpu_device_get_rev_id(adev); 579 579 adev->external_rev_id = 0xff; 580 - switch (adev->ip_versions[GC_HWIP][0]) { 580 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 581 581 case IP_VERSION(11, 0, 0): 582 582 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG | 583 583 AMD_CG_SUPPORT_GFX_CGLS | ··· 843 843 { 844 844 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 845 845 846 - switch (adev->ip_versions[NBIO_HWIP][0]) { 846 + switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) { 847 847 case IP_VERSION(4, 3, 0): 848 848 case IP_VERSION(4, 3, 1): 849 849 case IP_VERSION(7, 7, 0): ··· 865 865 { 866 866 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 867 867 868 - switch (adev->ip_versions[LSDMA_HWIP][0]) { 868 + switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) { 869 869 case IP_VERSION(6, 0, 0): 870 870 case IP_VERSION(6, 0, 2): 871 871 adev->lsdma.funcs->update_memory_power_gating(adev,
+5 -4
drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
··· 273 273 274 274 memcpy(set_hw_resources.mmhub_base, adev->reg_offset[MMHUB_HWIP][0], 275 275 sizeof(uint32_t) * 5); 276 - set_hw_resources.mmhub_version = adev->ip_versions[MMHUB_HWIP][0]; 276 + set_hw_resources.mmhub_version = amdgpu_ip_version(adev, MMHUB_HWIP, 0); 277 277 278 278 memcpy(set_hw_resources.osssys_base, adev->reg_offset[OSSSYS_HWIP][0], 279 279 sizeof(uint32_t) * 5); 280 - set_hw_resources.osssys_version = adev->ip_versions[OSSSYS_HWIP][0]; 280 + set_hw_resources.osssys_version = 281 + amdgpu_ip_version(adev, OSSSYS_HWIP, 0); 281 282 282 - set_hw_resources.vcn_version = adev->ip_versions[VCN_HWIP][0]; 283 - set_hw_resources.vpe_version = adev->ip_versions[VPE_HWIP][0]; 283 + set_hw_resources.vcn_version = amdgpu_ip_version(adev, VCN_HWIP, 0); 284 + set_hw_resources.vpe_version = amdgpu_ip_version(adev, VPE_HWIP, 0); 284 285 285 286 set_hw_resources.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; 286 287 set_hw_resources.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;
+5 -4
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 187 187 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 188 188 (amdgpu_sriov_vf(adev) ? 2*j : 8*j); 189 189 190 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) 190 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0)) 191 191 ring->vm_hub = AMDGPU_MMHUB1(0); 192 192 else 193 193 ring->vm_hub = AMDGPU_MMHUB0(0); ··· 207 207 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 208 208 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); 209 209 210 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) 210 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == 211 + IP_VERSION(2, 5, 0)) 211 212 ring->vm_hub = AMDGPU_MMHUB1(0); 212 213 else 213 214 ring->vm_hub = AMDGPU_MMHUB0(0); ··· 795 794 { 796 795 uint32_t tmp; 797 796 798 - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(2, 6, 0)) 797 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0)) 799 798 return; 800 799 801 800 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK | ··· 1986 1985 1987 1986 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev) 1988 1987 { 1989 - switch (adev->ip_versions[VCN_HWIP][0]) { 1988 + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 1990 1989 case IP_VERSION(2, 6, 0): 1991 1990 adev->vcn.ras = &vcn_v2_6_ras; 1992 1991 break;
+9 -5
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 100 100 /* both instances are harvested, disable the block */ 101 101 return -ENOENT; 102 102 103 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33)) 103 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == 104 + IP_VERSION(3, 0, 33)) 104 105 adev->vcn.num_enc_rings = 0; 105 106 else 106 107 adev->vcn.num_enc_rings = 2; ··· 228 227 cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); 229 228 fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); 230 229 fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG; 231 - if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2)) 230 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 1, 2)) 232 231 fw_shared->smu_interface_info.smu_interface_type = 2; 233 - else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1)) 232 + else if (amdgpu_ip_version(adev, UVD_HWIP, 0) == 233 + IP_VERSION(3, 1, 1)) 234 234 fw_shared->smu_interface_info.smu_interface_type = 1; 235 235 236 236 if (amdgpu_vcnfw_log) ··· 1257 1255 fw_shared->rb.wptr = lower_32_bits(ring->wptr); 1258 1256 fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); 1259 1257 1260 - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { 1258 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != 1259 + IP_VERSION(3, 0, 33)) { 1261 1260 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); 1262 1261 ring = &adev->vcn.inst[i].ring_enc[0]; 1263 1262 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); ··· 1631 1628 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, 1632 1629 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 1633 1630 1634 - if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { 1631 + if (amdgpu_ip_version(adev, UVD_HWIP, 0) != 1632 + IP_VERSION(3, 0, 33)) { 1635 1633 /* Restore */ 1636 1634 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1637 1635 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
+4 -3
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 169 169 fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? 170 170 AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; 171 171 172 - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) { 172 + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == 173 + IP_VERSION(4, 0, 2)) { 173 174 fw_shared->present_flag_0 |= AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT; 174 175 fw_shared->drm_key_wa.method = 175 176 AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; ··· 1850 1849 if (adev->vcn.harvest_config & (1 << i)) 1851 1850 continue; 1852 1851 1853 - if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 2)) 1852 + if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 2)) 1854 1853 vcn_v4_0_unified_ring_vm_funcs.secure_submission_supported = true; 1855 1854 1856 1855 adev->vcn.inst[i].ring_enc[0].funcs = ··· 2157 2156 2158 2157 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev) 2159 2158 { 2160 - switch (adev->ip_versions[VCN_HWIP][0]) { 2159 + switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { 2161 2160 case IP_VERSION(4, 0, 0): 2162 2161 adev->vcn.ras = &vcn_v4_0_ras; 2163 2162 break;
+7 -7
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
··· 291 291 292 292 adev->nbio.funcs->ih_control(adev); 293 293 294 - if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 2, 1)) && 294 + if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 2, 1)) && 295 295 adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { 296 296 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); 297 297 if (adev->irq.ih.use_bus_addr) { ··· 304 304 /* psp firmware won't program IH_CHICKEN for aldebaran 305 305 * driver needs to program it properly according to 306 306 * MC_SPACE type in IH_RB_CNTL */ 307 - if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) || 308 - (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) { 307 + if ((amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0)) || 308 + (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) { 309 309 ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); 310 310 if (adev->irq.ih.use_bus_addr) { 311 311 ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, ··· 334 334 vega20_setup_retry_doorbell(adev->irq.retry_cam_doorbell_index)); 335 335 336 336 /* Enable IH Retry CAM */ 337 - if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0) || 338 - adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2)) 337 + if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 0) || 338 + amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2)) 339 339 WREG32_FIELD15(OSSSYS, 0, IH_RETRY_INT_CAM_CNTL_ALDEBARAN, 340 340 ENABLE, 1); 341 341 else ··· 537 537 return r; 538 538 539 539 if ((adev->flags & AMD_IS_APU) && 540 - (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) 540 + (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(4, 4, 2))) 541 541 use_bus_addr = false; 542 542 543 543 r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr); ··· 554 554 adev->irq.ih1.use_doorbell = true; 555 555 adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; 556 556 557 - if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) { 557 + if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) != IP_VERSION(4, 4, 2)) { 558 558 r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); 559 559 if (r) 560 560 return r;
+7 -5
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 65 65 66 66 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) 67 67 { 68 - uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; 68 + uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0); 69 69 70 70 switch (sdma_version) { 71 71 case IP_VERSION(4, 0, 0):/* VEGA10 */ ··· 282 282 f2g = &gfx_v8_kfd2kgd; 283 283 break; 284 284 default: 285 - switch (adev->ip_versions[GC_HWIP][0]) { 285 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 286 286 /* Vega 10 */ 287 287 case IP_VERSION(9, 0, 1): 288 288 gfx_target_version = 90000; ··· 427 427 } 428 428 429 429 if (!f2g) { 430 - if (adev->ip_versions[GC_HWIP][0]) 431 - dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", 432 - adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); 430 + if (amdgpu_ip_version(adev, GC_HWIP, 0)) 431 + dev_err(kfd_device, 432 + "GC IP %06x %s not supported in kfd\n", 433 + amdgpu_ip_version(adev, GC_HWIP, 0), 434 + vf ? "VF" : ""); 433 435 else 434 436 dev_err(kfd_device, "%s %s not supported in kfd\n", 435 437 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 1001 1001 void *r; 1002 1002 1003 1003 /* Page migration works on gfx9 or newer */ 1004 - if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1)) 1004 + if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1)) 1005 1005 return -EINVAL; 1006 1006 1007 1007 if (adev->gmc.is_app_apu)
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
··· 205 205 206 206 static inline bool pm_use_ext_eng(struct kfd_dev *dev) 207 207 { 208 - return dev->adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 2, 0); 208 + return amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) >= 209 + IP_VERSION(5, 2, 0); 209 210 } 210 211 211 212 static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 202 202 cache_policy_noncoherent 203 203 }; 204 204 205 - #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 205 + #define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0)) 206 206 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 207 207 #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 208 208 ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1196 1196 if (domain == SVM_RANGE_VRAM_DOMAIN) 1197 1197 bo_node = prange->svm_bo->node; 1198 1198 1199 - switch (node->adev->ip_versions[GC_HWIP][0]) { 1199 + switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) { 1200 1200 case IP_VERSION(9, 4, 1): 1201 1201 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1202 1202 if (bo_node == node) {
+17 -17
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1173 1173 for (i = 0; i < fb_info->num_fb; ++i) 1174 1174 hw_params.fb[i] = &fb_info->fb[i]; 1175 1175 1176 - switch (adev->ip_versions[DCE_HWIP][0]) { 1176 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1177 1177 case IP_VERSION(3, 1, 3): 1178 1178 case IP_VERSION(3, 1, 4): 1179 1179 case IP_VERSION(3, 5, 0): ··· 1606 1606 1607 1607 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1608 1608 1609 - switch (adev->ip_versions[DCE_HWIP][0]) { 1609 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1610 1610 case IP_VERSION(2, 1, 0): 1611 1611 switch (adev->dm.dmcub_fw_version) { 1612 1612 case 0: /* development */ ··· 1631 1631 init_data.flags.gpu_vm_support = true; 1632 1632 break; 1633 1633 default: 1634 - switch (adev->ip_versions[DCE_HWIP][0]) { 1634 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 1635 1635 case IP_VERSION(1, 0, 0): 1636 1636 case IP_VERSION(1, 0, 1): 1637 1637 /* enable S/G on PCO and RV2 */ ··· 2015 2015 return 0; 2016 2016 break; 2017 2017 default: 2018 - switch (adev->ip_versions[DCE_HWIP][0]) { 2018 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2019 2019 case IP_VERSION(2, 0, 2): 2020 2020 case IP_VERSION(2, 0, 3): 2021 2021 case IP_VERSION(2, 0, 0): ··· 2105 2105 enum dmub_status status; 2106 2106 int r; 2107 2107 2108 - switch (adev->ip_versions[DCE_HWIP][0]) { 2108 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2109 2109 case IP_VERSION(2, 1, 0): 2110 2110 dmub_asic = DMUB_ASIC_DCN21; 2111 2111 break; ··· 2477 2477 * therefore, this function apply to navi10/12/14 but not Renoir 2478 2478 * * 2479 2479 */ 2480 - switch (adev->ip_versions[DCE_HWIP][0]) { 2480 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 2481 2481 case IP_VERSION(2, 0, 2): 2482 2482 case IP_VERSION(2, 0, 0): 2483 2483 break; ··· 4429 4429 } 4430 4430 4431 4431 /* Use Outbox interrupt */ 4432 - switch (adev->ip_versions[DCE_HWIP][0]) { 4432 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4433 4433 case IP_VERSION(3, 0, 0): 4434 4434 case IP_VERSION(3, 1, 2): 4435 4435 case IP_VERSION(3, 1, 3): ··· 4447 4447 break; 4448 4448 default: 4449 4449 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4450 - adev->ip_versions[DCE_HWIP][0]); 4450 + amdgpu_ip_version(adev, DCE_HWIP, 0)); 4451 4451 } 4452 4452 4453 4453 /* Determine whether to enable PSR support by default. */ 4454 4454 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4455 - switch (adev->ip_versions[DCE_HWIP][0]) { 4455 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4456 4456 case IP_VERSION(3, 1, 2): 4457 4457 case IP_VERSION(3, 1, 3): 4458 4458 case IP_VERSION(3, 1, 4): ··· 4470 4470 } 4471 4471 4472 4472 if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { 4473 - switch (adev->ip_versions[DCE_HWIP][0]) { 4473 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4474 4474 case IP_VERSION(3, 1, 4): 4475 4475 case IP_VERSION(3, 1, 5): 4476 4476 case IP_VERSION(3, 1, 6): ··· 4585 4585 } 4586 4586 break; 4587 4587 default: 4588 - switch (adev->ip_versions[DCE_HWIP][0]) { 4588 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4589 4589 case IP_VERSION(1, 0, 0): 4590 4590 case IP_VERSION(1, 0, 1): 4591 4591 case IP_VERSION(2, 0, 2): ··· 4611 4611 break; 4612 4612 default: 4613 4613 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 4614 - adev->ip_versions[DCE_HWIP][0]); 4614 + amdgpu_ip_version(adev, DCE_HWIP, 0)); 4615 4615 goto fail; 4616 4616 } 4617 4617 break; ··· 4694 4694 char *fw_name_dmub; 4695 4695 int r; 4696 4696 4697 - switch (adev->ip_versions[DCE_HWIP][0]) { 4697 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4698 4698 case IP_VERSION(2, 1, 0): 4699 4699 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 4700 4700 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 4701 4701 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 4702 4702 break; 4703 4703 case IP_VERSION(3, 0, 0): 4704 - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) 4704 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0)) 4705 4705 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 4706 4706 else 4707 4707 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; ··· 4831 4831 break; 4832 4832 default: 4833 4833 4834 - switch (adev->ip_versions[DCE_HWIP][0]) { 4834 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 4835 4835 case IP_VERSION(2, 0, 2): 4836 4836 case IP_VERSION(3, 0, 0): 4837 4837 adev->mode_info.num_crtc = 6; ··· 4868 4868 break; 4869 4869 default: 4870 4870 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 4871 - adev->ip_versions[DCE_HWIP][0]); 4871 + amdgpu_ip_version(adev, DCE_HWIP, 0)); 4872 4872 return -EINVAL; 4873 4873 } 4874 4874 break; ··· 11002 11002 */ 11003 11003 bool check_seamless_boot_capability(struct amdgpu_device *adev) 11004 11004 { 11005 - switch (adev->ip_versions[DCE_HWIP][0]) { 11005 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 11006 11006 case IP_VERSION(3, 0, 1): 11007 11007 if (!adev->mman.keep_stolen_vga_memory) 11008 11008 return true;
+5 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 226 226 tiling_info->gfx9.num_rb_per_se = 227 227 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 228 228 tiling_info->gfx9.shaderEnable = 1; 229 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 229 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 230 230 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 231 231 } 232 232 ··· 669 669 case AMDGPU_FAMILY_YC: 670 670 case AMDGPU_FAMILY_GC_10_3_6: 671 671 case AMDGPU_FAMILY_GC_10_3_7: 672 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 672 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) 673 673 add_gfx10_3_modifiers(adev, mods, &size, &capacity); 674 674 else 675 675 add_gfx10_1_modifiers(adev, mods, &size, &capacity); ··· 1069 1069 * is to gesture the YouTube Android app into full screen 1070 1070 * on ChromeOS. 1071 1071 */ 1072 - if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 1073 - (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && 1072 + if (((amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 0)) || 1073 + (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(1, 0, 1))) && 1074 1074 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 1075 1075 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 1076 1076 return -EINVAL; ··· 1509 1509 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1510 1510 supported_rotations); 1511 1511 1512 - if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && 1512 + if (amdgpu_ip_version(dm->adev, DCE_HWIP, 0) > IP_VERSION(3, 0, 1) && 1513 1513 plane->type != DRM_PLANE_TYPE_CURSOR) 1514 1514 drm_plane_enable_fb_damage_clips(plane); 1515 1515
+6 -6
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 2024 2024 uint32_t mask, enum amdgpu_device_attr_states *states) 2025 2025 { 2026 2026 struct device_attribute *dev_attr = &attr->dev_attr; 2027 - uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0]; 2028 - uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 2027 + uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); 2028 + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 2029 2029 const char *attr_name = dev_attr->attr.name; 2030 2030 2031 2031 if (!(attr->flags & mask)) { ··· 2917 2917 char *buf) 2918 2918 { 2919 2919 struct amdgpu_device *adev = dev_get_drvdata(dev); 2920 - uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 2920 + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 2921 2921 2922 2922 if (gc_ver == IP_VERSION(10, 3, 1)) 2923 2923 return sysfs_emit(buf, "%s\n", ··· 3205 3205 struct device *dev = kobj_to_dev(kobj); 3206 3206 struct amdgpu_device *adev = dev_get_drvdata(dev); 3207 3207 umode_t effective_mode = attr->mode; 3208 - uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 3208 + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 3209 3209 uint32_t tmp; 3210 3210 3211 3211 /* under multi-vf mode, the hwmon attributes are all not supported */ ··· 4158 4158 4159 4159 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 4160 4160 { 4161 - uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0]; 4162 - uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 4161 + uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); 4162 + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 4163 4163 uint32_t value; 4164 4164 uint64_t value64 = 0; 4165 4165 uint32_t query = 0;
+12 -12
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 485 485 if (adev->asic_type == CHIP_VEGA20) 486 486 return false; 487 487 488 - if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) 488 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) 489 489 return true; 490 490 491 491 return false; ··· 603 603 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 604 604 smu->od_enabled = true; 605 605 606 - switch (adev->ip_versions[MP1_HWIP][0]) { 606 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 607 607 case IP_VERSION(11, 0, 0): 608 608 case IP_VERSION(11, 0, 5): 609 609 case IP_VERSION(11, 0, 9): ··· 775 775 } 776 776 } 777 777 778 - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || 779 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) 778 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) || 779 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3))) 780 780 return 0; 781 781 782 782 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { ··· 1259 1259 uint64_t features_supported; 1260 1260 int ret = 0; 1261 1261 1262 - switch (adev->ip_versions[MP1_HWIP][0]) { 1262 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1263 1263 case IP_VERSION(11, 0, 7): 1264 1264 case IP_VERSION(11, 0, 11): 1265 1265 case IP_VERSION(11, 5, 0): ··· 1449 1449 int ret = 0; 1450 1450 1451 1451 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1452 - if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { 1452 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) { 1453 1453 if (smu->ppt_funcs->load_microcode) { 1454 1454 ret = smu->ppt_funcs->load_microcode(smu); 1455 1455 if (ret) ··· 1549 1549 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 1550 1550 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 1551 1551 */ 1552 - switch (adev->ip_versions[MP1_HWIP][0]) { 1552 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1553 1553 case IP_VERSION(13, 0, 0): 1554 1554 case IP_VERSION(13, 0, 7): 1555 1555 case IP_VERSION(13, 0, 10): ··· 1570 1570 * properly. 1571 1571 */ 1572 1572 if (smu->uploading_custom_pp_table) { 1573 - switch (adev->ip_versions[MP1_HWIP][0]) { 1573 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1574 1574 case IP_VERSION(11, 0, 0): 1575 1575 case IP_VERSION(11, 0, 5): 1576 1576 case IP_VERSION(11, 0, 9): ··· 1590 1590 * on BACO in. Driver involvement is unnecessary. 1591 1591 */ 1592 1592 if (use_baco) { 1593 - switch (adev->ip_versions[MP1_HWIP][0]) { 1593 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1594 1594 case IP_VERSION(11, 0, 7): 1595 1595 case IP_VERSION(11, 0, 0): 1596 1596 case IP_VERSION(11, 0, 5): ··· 1607 1607 * for gpu reset and S0i3 cases. Driver involvement is unnecessary. 1608 1608 */ 1609 1609 if (amdgpu_in_reset(adev) || adev->in_s0ix) { 1610 - switch (adev->ip_versions[MP1_HWIP][0]) { 1610 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1611 1611 case IP_VERSION(13, 0, 4): 1612 1612 case IP_VERSION(13, 0, 11): 1613 1613 return 0; ··· 1634 1634 } 1635 1635 } 1636 1636 1637 - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && 1637 + if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) && 1638 1638 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) 1639 1639 adev->gfx.rlc.funcs->stop(adev); 1640 1640 ··· 2391 2391 } else { 2392 2392 switch (limit_level) { 2393 2393 case SMU_PPT_LIMIT_CURRENT: 2394 - switch (adev->ip_versions[MP1_HWIP][0]) { 2394 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2395 2395 case IP_VERSION(13, 0, 2): 2396 2396 case IP_VERSION(11, 0, 7): 2397 2397 case IP_VERSION(11, 0, 11):
+29 -17
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 345 345 346 346 /* DPM UCLK enablement should be skipped for navi10 A0 secure board */ 347 347 if (!(is_asic_secure(smu) && 348 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && 349 - (adev->rev_id == 0)) && 348 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) && 349 + (adev->rev_id == 0)) && 350 350 (adev->pm.pp_feature & PP_MCLK_DPM_MASK)) 351 351 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) 352 352 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) ··· 354 354 355 355 /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */ 356 356 if (is_asic_secure(smu) && 357 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && 357 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) && 358 358 (adev->rev_id == 0)) 359 359 *(uint64_t *)feature_mask &= 360 360 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); ··· 916 916 return ret; 917 917 } 918 918 919 - switch (adev->ip_versions[MP1_HWIP][0]) { 919 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 920 920 case IP_VERSION(11, 0, 9): 921 921 if (smu_version > 0x00341C00) 922 922 ret = navi12_get_smu_metrics_data(smu, member, value); ··· 926 926 case IP_VERSION(11, 0, 0): 927 927 case IP_VERSION(11, 0, 5): 928 928 default: 929 - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || 930 - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) 929 + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == 930 + IP_VERSION(11, 0, 5)) && 931 + smu_version > 0x00351F00) || 932 + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == 933 + IP_VERSION(11, 0, 0)) && 934 + smu_version > 0x002A3B00)) 931 935 ret = navi10_get_smu_metrics_data(smu, member, value); 932 936 else 933 937 ret = navi10_get_legacy_smu_metrics_data(smu, member, value); ··· 1716 1712 uint32_t sclk_freq; 1717 1713 1718 1714 pstate_table->gfxclk_pstate.min = gfx_table->min; 1719 - switch (adev->ip_versions[MP1_HWIP][0]) { 1715 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1720 1716 case IP_VERSION(11, 0, 0): 1721 1717 switch (adev->pdev->revision) { 1722 1718 case 0xf0: /* XTX */ ··· 2758 2754 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) 2759 2755 return false; 2760 2756 2761 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) || 2762 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) 2757 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0) || 2758 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5)) 2763 2759 return true; 2764 2760 2765 2761 return false; ··· 2867 2863 * - PPSMC_MSG_SetDriverDummyTableDramAddrLow 2868 2864 * - PPSMC_MSG_GetUMCFWWA 2869 2865 */ 2870 - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) || 2871 - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) { 2866 + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 0)) && 2867 + (pmfw_version >= 0x2a3500)) || 2868 + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 5)) && 2869 + (pmfw_version >= 0x351D00))) { 2872 2870 ret = smu_cmn_send_smc_msg_with_param(smu, 2873 2871 SMU_MSG_GET_UMC_FW_WA, 2874 2872 0, ··· 2889 2883 return 0; 2890 2884 2891 2885 if (umc_fw_disable_cdr) { 2892 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) 2886 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == 2887 + IP_VERSION(11, 0, 0)) 2893 2888 return navi10_umc_hybrid_cdr_workaround(smu); 2894 2889 } else { 2895 2890 return navi10_set_dummy_pstates_table_location(smu); 2896 2891 } 2897 2892 } else { 2898 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) 2893 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == 2894 + IP_VERSION(11, 0, 0)) 2899 2895 return navi10_umc_hybrid_cdr_workaround(smu); 2900 2896 } 2901 2897 ··· 3364 3356 return ret; 3365 3357 } 3366 3358 3367 - switch (adev->ip_versions[MP1_HWIP][0]) { 3359 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 3368 3360 case IP_VERSION(11, 0, 9): 3369 3361 if (smu_version > 0x00341C00) 3370 3362 ret = navi12_get_gpu_metrics(smu, table); ··· 3374 3366 case IP_VERSION(11, 0, 0): 3375 3367 case IP_VERSION(11, 0, 5): 3376 3368 default: 3377 - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || 3378 - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) 3369 + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == 3370 + IP_VERSION(11, 0, 5)) && 3371 + smu_version > 0x00351F00) || 3372 + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == 3373 + IP_VERSION(11, 0, 0)) && 3374 + smu_version > 0x002A3B00)) 3379 3375 ret = navi10_get_gpu_metrics(smu, table); 3380 3376 else 3381 3377 ret = navi10_get_legacy_gpu_metrics(smu, table); ··· 3397 3385 uint32_t param = 0; 3398 3386 3399 3387 /* Navi12 does not support this */ 3400 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) 3388 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) 3401 3389 return 0; 3402 3390 3403 3391 /*
+37 -26
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 73 73 74 74 #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 75 75 76 - #define GET_PPTABLE_MEMBER(field, member) do {\ 77 - if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\ 78 - (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\ 79 - else\ 80 - (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\ 81 - } while(0) 76 + #define GET_PPTABLE_MEMBER(field, member) \ 77 + do { \ 78 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == \ 79 + IP_VERSION(11, 0, 13)) \ 80 + (*member) = (smu->smu_table.driver_pptable + \ 81 + offsetof(PPTable_beige_goby_t, field)); \ 82 + else \ 83 + (*member) = (smu->smu_table.driver_pptable + \ 84 + offsetof(PPTable_t, field)); \ 85 + } while (0) 82 86 83 87 /* STB FIFO depth is in 64bit units */ 84 88 #define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8 ··· 95 91 96 92 static int get_table_size(struct smu_context *smu) 97 93 { 98 - if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 94 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13)) 99 95 return sizeof(PPTable_beige_goby_t); 100 96 else 101 97 return sizeof(PPTable_t); ··· 313 309 } 314 310 315 311 if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) && 316 - (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) && 312 + (amdgpu_ip_version(adev, MP1_HWIP, 0) > IP_VERSION(11, 0, 7)) && 317 313 !(adev->flags & AMD_IS_APU)) 318 314 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT); 319 315 ··· 438 434 PPTable_beige_goby_t *ppt_beige_goby; 439 435 PPTable_t *ppt; 440 436 441 - if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 437 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13)) 442 438 ppt_beige_goby = smu->smu_table.driver_pptable; 443 439 else 444 440 ppt = smu->smu_table.driver_pptable; ··· 451 447 if (ret) 452 448 return ret; 453 449 454 - if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 450 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13)) 455 451 smu_memcpy_trailing(ppt_beige_goby, I2cControllers, BoardReserved, 456 452 smc_dpm_table, I2cControllers); 457 453 else ··· 729 725 uint32_t apu_percent = 0; 730 726 uint32_t dgpu_percent = 0; 731 727 732 - switch (smu->adev->ip_versions[MP1_HWIP][0]) { 728 + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { 733 729 case IP_VERSION(11, 0, 7): 734 730 if (smu->smc_fw_version >= 0x3A4900) 735 731 use_metrics_v3 = true; ··· 1389 1385 * and onwards SMU firmwares. 1390 1386 */ 1391 1387 smu_cmn_get_smc_version(smu, NULL, &smu_version); 1392 - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && 1393 - (smu_version < 0x003a2900)) 1388 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == 1389 + IP_VERSION(11, 0, 7)) && 1390 + (smu_version < 0x003a2900)) 1394 1391 break; 1395 1392 1396 1393 size += sysfs_emit_at(buf, size, "OD_VDDGFX_OFFSET:\n"); ··· 1499 1494 pstate_table->socclk_pstate.min = soc_table->min; 1500 1495 pstate_table->socclk_pstate.peak = soc_table->max; 1501 1496 1502 - switch (adev->ip_versions[MP1_HWIP][0]) { 1497 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1503 1498 case IP_VERSION(11, 0, 7): 1504 1499 case IP_VERSION(11, 0, 11): 1505 1500 pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; ··· 1950 1945 *size = 4; 1951 1946 break; 1952 1947 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 1953 - if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) { 1948 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) != 1949 + IP_VERSION(11, 0, 7)) { 1954 1950 ret = sienna_cichlid_get_smu_metrics_data(smu, 1955 1951 METRICS_SS_APU_SHARE, (uint32_t *)data); 1956 1952 *size = 4; ··· 1960 1954 } 1961 1955 break; 1962 1956 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 1963 - if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) { 1957 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) != 1958 + IP_VERSION(11, 0, 7)) { 1964 1959 ret = sienna_cichlid_get_smu_metrics_data(smu, 1965 1960 METRICS_SS_DGPU_SHARE, (uint32_t *)data); 1966 1961 *size = 4; ··· 1985 1978 1986 1979 /* Only supported as of version 0.58.83.0 and only on Sienna Cichlid */ 1987 1980 if (smu->smc_fw_version < 0x3A5300 || 1988 - smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) 1981 + amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(11, 0, 7)) 1989 1982 return; 1990 1983 1991 1984 if (sienna_cichlid_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32)) ··· 2155 2148 od_table->UclkFmax); 2156 2149 2157 2150 smu_cmn_get_smc_version(smu, NULL, &smu_version); 2158 - if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && 2159 - (smu_version < 0x003a2900))) 2151 + if (!((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)) && 2152 + (smu_version < 0x003a2900))) 2160 2153 dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset); 2161 2154 } 2162 2155 ··· 2388 2381 * and onwards SMU firmwares. 2389 2382 */ 2390 2383 smu_cmn_get_smc_version(smu, NULL, &smu_version); 2391 - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && 2392 - (smu_version < 0x003a2900)) { 2384 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == 2385 + IP_VERSION(11, 0, 7)) && 2386 + (smu_version < 0x003a2900)) { 2393 2387 dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported " 2394 2388 "only by 58.41.0 and onwards SMU firmwares!\n"); 2395 2389 return -EOPNOTSUPP; ··· 3113 3105 PPTable_t *pptable = table_context->driver_pptable; 3114 3106 int i; 3115 3107 3116 - if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) { 3108 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == 3109 + IP_VERSION(11, 0, 13)) { 3117 3110 beige_goby_dump_pptable(smu); 3118 3111 return; 3119 3112 } ··· 3919 3910 uint16_t average_gfx_activity; 3920 3911 int ret = 0; 3921 3912 3922 - switch (smu->adev->ip_versions[MP1_HWIP][0]) { 3913 + switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) { 3923 3914 case IP_VERSION(11, 0, 7): 3924 3915 if (smu->smc_fw_version >= 0x3A4900) 3925 3916 use_metrics_v3 = true; ··· 4035 4026 gpu_metrics->current_fan_speed = use_metrics_v3 ? metrics_v3->CurrFanSpeed : 4036 4027 use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed; 4037 4028 4038 - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) || 4039 - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) { 4029 + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)) && 4030 + smu->smc_fw_version > 0x003A1E00) || 4031 + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11)) && 4032 + smu->smc_fw_version > 0x00410400)) { 4040 4033 gpu_metrics->pcie_link_width = use_metrics_v3 ? metrics_v3->PcieWidth : 4041 4034 use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth; 4042 4035 gpu_metrics->pcie_link_speed = link_speed[use_metrics_v3 ? metrics_v3->PcieRate : ··· 4264 4253 table->gfx_activity_average_tau = 10; 4265 4254 table->mem_activity_average_tau = 10; 4266 4255 table->socket_power_average_tau = 100; 4267 - if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7)) 4256 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(11, 0, 7)) 4268 4257 table->apu_socket_power_average_tau = 100; 4269 4258 4270 4259 return 0;
+17 -15
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 101 101 struct amdgpu_firmware_info *ucode = NULL; 102 102 103 103 if (amdgpu_sriov_vf(adev) && 104 - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) || 105 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) 104 + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) || 105 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7)))) 106 106 return 0; 107 107 108 108 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); ··· 213 213 if (smu->is_apu) 214 214 adev->pm.fw_version = smu_version; 215 215 216 - switch (adev->ip_versions[MP1_HWIP][0]) { 216 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 217 217 case IP_VERSION(11, 0, 0): 218 218 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 219 219 break; ··· 246 246 break; 247 247 default: 248 248 dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", 249 - adev->ip_versions[MP1_HWIP][0]); 249 + amdgpu_ip_version(adev, MP1_HWIP, 0)); 250 250 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; 251 251 break; 252 252 } ··· 474 474 { 475 475 struct amdgpu_device *adev = smu->adev; 476 476 struct smu_power_context *smu_power = &smu->smu_power; 477 - size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ? 478 - sizeof(struct smu_11_5_power_context) : 479 - sizeof(struct smu_11_0_power_context); 477 + size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) == 478 + IP_VERSION(11, 5, 0) ? 479 + sizeof(struct smu_11_5_power_context) : 480 + sizeof(struct smu_11_0_power_context); 480 481 481 482 smu_power->power_context = kzalloc(size, GFP_KERNEL); 482 483 if (!smu_power->power_context) ··· 732 731 /* Navy_Flounder/Dimgrey_Cavefish do not support to change 733 732 * display num currently 734 733 */ 735 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || 736 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || 737 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || 738 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 734 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) || 735 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) || 736 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) || 737 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13)) 739 738 return 0; 740 739 741 740 return smu_cmn_send_smc_msg_with_param(smu, ··· 1104 1103 int ret = 0; 1105 1104 struct amdgpu_device *adev = smu->adev; 1106 1105 1107 - switch (adev->ip_versions[MP1_HWIP][0]) { 1106 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1108 1107 case IP_VERSION(11, 0, 0): 1109 1108 case IP_VERSION(11, 0, 5): 1110 1109 case IP_VERSION(11, 0, 9): ··· 1592 1591 return 0; 1593 1592 1594 1593 if (state == SMU_BACO_STATE_ENTER) { 1595 - switch (adev->ip_versions[MP1_HWIP][0]) { 1594 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1596 1595 case IP_VERSION(11, 0, 7): 1597 1596 case IP_VERSION(11, 0, 11): 1598 1597 case IP_VERSION(11, 0, 12): ··· 1611 1610 default: 1612 1611 if (!ras || !adev->ras_enabled || 1613 1612 adev->gmc.xgmi.pending_reset) { 1614 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1613 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == 1614 + IP_VERSION(11, 0, 2)) { 1615 1615 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); 1616 1616 data |= 0x80000000; 1617 1617 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); ··· 1896 1894 * Separate MCLK and SOCCLK soft min/max settings are not allowed 1897 1895 * on Arcturus. 1898 1896 */ 1899 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1897 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { 1900 1898 mclk_min = mclk_max = 0; 1901 1899 socclk_min = socclk_max = 0; 1902 1900 }
+6 -2
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 1198 1198 *value = metrics->AverageUvdActivity / 100; 1199 1199 break; 1200 1200 case METRICS_CURR_SOCKETPOWER: 1201 - if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) || 1202 - ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200))) 1201 + if (((amdgpu_ip_version(adev, MP1_HWIP, 0) == 1202 + IP_VERSION(12, 0, 1)) && 1203 + (adev->pm.fw_version >= 0x40000f)) || 1204 + ((amdgpu_ip_version(adev, MP1_HWIP, 0) == 1205 + IP_VERSION(12, 0, 0)) && 1206 + (adev->pm.fw_version >= 0x373200))) 1203 1207 *value = metrics->CurrentSocketPower << 8; 1204 1208 else 1205 1209 *value = (metrics->CurrentSocketPower << 8) / 1000;
+9 -9
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 196 196 if (!adev->scpm_enabled) 197 197 return 0; 198 198 199 - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) || 200 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) || 201 - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))) 199 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 7)) || 200 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0)) || 201 + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10))) 202 202 return 0; 203 203 204 204 /* override pptable_id from driver parameter */ ··· 234 234 struct amdgpu_device *adev = smu->adev; 235 235 uint32_t mp1_fw_flags; 236 236 237 - switch (adev->ip_versions[MP1_HWIP][0]) { 237 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 238 238 case IP_VERSION(13, 0, 4): 239 239 case IP_VERSION(13, 0, 11): 240 240 mp1_fw_flags = RREG32_PCIE(MP1_Public | ··· 269 269 smu_minor = (smu_version >> 8) & 0xff; 270 270 smu_debug = (smu_version >> 0) & 0xff; 271 271 if (smu->is_apu || 272 - adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 6)) 272 + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 6)) 273 273 adev->pm.fw_version = smu_version; 274 274 275 275 /* only for dGPU w/ SMU13*/ ··· 802 802 int ret = 0; 803 803 struct amdgpu_device *adev = smu->adev; 804 804 805 - switch (adev->ip_versions[MP1_HWIP][0]) { 805 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 806 806 case IP_VERSION(13, 0, 0): 807 807 case IP_VERSION(13, 0, 1): 808 808 case IP_VERSION(13, 0, 3): ··· 1780 1780 * Unset those settings for SMU 13.0.2. As soft limits settings 1781 1781 * for those clock domains are not supported. 1782 1782 */ 1783 - if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) { 1783 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) { 1784 1784 mclk_min = mclk_max = 0; 1785 1785 socclk_min = socclk_max = 0; 1786 1786 vclk_min = vclk_max = 0; ··· 1927 1927 1928 1928 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1929 1929 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */ 1930 - if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value)) 1930 + if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value)) 1931 1931 ++(*value); 1932 1932 1933 1933 return ret; ··· 1987 1987 return ret; 1988 1988 } 1989 1989 1990 - if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) { 1990 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) { 1991 1991 ret = smu_v13_0_get_fine_grained_status(smu, 1992 1992 clk_type, 1993 1993 &single_dpm_table->is_fine_grained);
+4 -4
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2754 2754 uint32_t param; 2755 2755 struct amdgpu_device *adev = smu->adev; 2756 2756 2757 - switch (adev->ip_versions[MP1_HWIP][0]) { 2757 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 2758 2758 case IP_VERSION(13, 0, 0): 2759 2759 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */ 2760 2760 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param); ··· 2787 2787 int ret; 2788 2788 struct amdgpu_device *adev = smu->adev; 2789 2789 2790 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) 2790 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)) 2791 2791 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL); 2792 2792 else 2793 2793 return -EOPNOTSUPP; ··· 2799 2799 { 2800 2800 struct amdgpu_device *adev = smu->adev; 2801 2801 2802 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) 2802 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)) 2803 2803 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, 2804 2804 FEATURE_PWR_GFX, NULL); 2805 2805 else ··· 2863 2863 if (ret) 2864 2864 return -EOPNOTSUPP; 2865 2865 2866 - if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) && 2866 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10)) && 2867 2867 (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION)) 2868 2868 return ret; 2869 2869 else
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
··· 1144 1144 smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION; 1145 1145 smu->is_apu = true; 1146 1146 1147 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4)) 1147 + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 4)) 1148 1148 smu_v13_0_4_set_smu_mailbox_registers(smu); 1149 1149 else 1150 1150 smu_v13_0_set_smu_mailbox_registers(smu);
+9 -9
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 1024 1024 switch (clk_type) { 1025 1025 case SMU_GFXCLK: 1026 1026 case SMU_SCLK: 1027 - if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) 1027 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8)) 1028 1028 clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK; 1029 - if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || 1030 - (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) 1029 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) || 1030 + (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3)) 1031 1031 clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK; 1032 1032 break; 1033 1033 case SMU_SOCCLK: 1034 - if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) 1034 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8)) 1035 1035 clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK; 1036 - if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || 1037 - (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) 1036 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) || 1037 + (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3)) 1038 1038 clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK; 1039 1039 break; 1040 1040 case SMU_FCLK: 1041 - if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8)) 1041 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 8)) 1042 1042 clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK; 1043 - if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) || 1044 - (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3)) 1043 + if ((amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 1) || 1044 + (amdgpu_ip_version(adev, MP1_HWIP, 0)) == IP_VERSION(13, 0, 3)) 1045 1045 clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK; 1046 1046 break; 1047 1047 default: