Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd: Use newly added interrupt source defs for VI v3.

v2: Rebase
v3: Use defines for CP_SQ and CP_ECC_ERROR interrupts.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Andrey Grodzovsky and committed by
Alex Deucher
091aec0b 530e7a66

+46 -26
+4 -2
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 41 41 #include "gmc/gmc_8_1_d.h" 42 42 #include "gmc/gmc_8_1_sh_mask.h" 43 43 44 + #include "ivsrcid/ivsrcid_vislands30.h" 45 + 44 46 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); 45 47 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); 46 48 ··· 2739 2737 return r; 2740 2738 } 2741 2739 2742 - for (i = 8; i < 20; i += 2) { 2740 + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { 2743 2741 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2744 2742 if (r) 2745 2743 return r; 2746 2744 } 2747 2745 2748 2746 /* HPD hotplug */ 2749 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2747 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 2750 2748 if (r) 2751 2749 return r; 2752 2750
+4 -2
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 41 41 #include "gmc/gmc_8_1_d.h" 42 42 #include "gmc/gmc_8_1_sh_mask.h" 43 43 44 + #include "ivsrcid/ivsrcid_vislands30.h" 45 + 44 46 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); 45 47 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); 46 48 ··· 2860 2858 return r; 2861 2859 } 2862 2860 2863 - for (i = 8; i < 20; i += 2) { 2861 + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { 2864 2862 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2865 2863 if (r) 2866 2864 return r; 2867 2865 } 2868 2866 2869 2867 /* HPD hotplug */ 2870 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2868 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 2871 2869 if (r) 2872 2870 return r; 2873 2871
+2 -1
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 36 36 #include "dce_v10_0.h" 37 37 #include "dce_v11_0.h" 38 38 #include "dce_virtual.h" 39 + #include "ivsrcid/ivsrcid_vislands30.h" 39 40 40 41 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 41 42 ··· 379 378 int r, i; 380 379 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 381 380 382 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq); 381 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq); 383 382 if (r) 384 383 return r; 385 384
+8 -6
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 51 51 52 52 #include "smu/smu_7_1_3_d.h" 53 53 54 + #include "ivsrcid/ivsrcid_vislands30.h" 55 + 54 56 #define GFX8_NUM_GFX_RINGS 1 55 57 #define GFX8_MEC_HPD_SIZE 2048 56 58 ··· 2049 2047 adev->gfx.mec.num_queue_per_pipe = 8; 2050 2048 2051 2049 /* KIQ event */ 2052 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq); 2050 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq); 2053 2051 if (r) 2054 2052 return r; 2055 2053 2056 2054 /* EOP Event */ 2057 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); 2055 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq); 2058 2056 if (r) 2059 2057 return r; 2060 2058 2061 2059 /* Privileged reg */ 2062 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, 2060 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT, 2063 2061 &adev->gfx.priv_reg_irq); 2064 2062 if (r) 2065 2063 return r; 2066 2064 2067 2065 /* Privileged inst */ 2068 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, 2066 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT, 2069 2067 &adev->gfx.priv_inst_irq); 2070 2068 if (r) 2071 2069 return r; 2072 2070 2073 2071 /* Add CP EDC/ECC irq */ 2074 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 197, 2072 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR, 2075 2073 &adev->gfx.cp_ecc_error_irq); 2076 2074 if (r) 2077 2075 return r; 2078 2076 2079 2077 /* SQ interrupts. */ 2080 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 239, 2078 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG, 2081 2079 &adev->gfx.sq_irq); 2082 2080 if (r) { 2083 2081 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+4 -2
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 43 43 44 44 #include "amdgpu_atombios.h" 45 45 46 + #include "ivsrcid/ivsrcid_vislands30.h" 47 + 46 48 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); 47 49 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 48 50 static int gmc_v7_0_wait_for_idle(void *handle); ··· 998 996 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); 999 997 } 1000 998 1001 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); 999 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); 1002 1000 if (r) 1003 1001 return r; 1004 1002 1005 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); 1003 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); 1006 1004 if (r) 1007 1005 return r; 1008 1006
+4 -2
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 44 44 45 45 #include "amdgpu_atombios.h" 46 46 47 + #include "ivsrcid/ivsrcid_vislands30.h" 48 + 47 49 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev); 48 50 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 49 51 static int gmc_v8_0_wait_for_idle(void *handle); ··· 1102 1100 adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); 1103 1101 } 1104 1102 1105 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); 1103 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); 1106 1104 if (r) 1107 1105 return r; 1108 1106 1109 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); 1107 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); 1110 1108 if (r) 1111 1109 return r; 1112 1110
+4 -2
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 44 44 45 45 #include "iceland_sdma_pkt_open.h" 46 46 47 + #include "ivsrcid/ivsrcid_vislands30.h" 48 + 47 49 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); 48 50 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); 49 51 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); ··· 898 896 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 899 897 900 898 /* SDMA trap event */ 901 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, 899 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, 902 900 &adev->sdma.trap_irq); 903 901 if (r) 904 902 return r; ··· 910 908 return r; 911 909 912 910 /* SDMA Privileged inst */ 913 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, 911 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, 914 912 &adev->sdma.illegal_inst_irq); 915 913 if (r) 916 914 return r;
+4 -2
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 44 44 45 45 #include "tonga_sdma_pkt_open.h" 46 46 47 + #include "ivsrcid/ivsrcid_vislands30.h" 48 + 47 49 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); 48 50 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); 49 51 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); ··· 1177 1175 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1178 1176 1179 1177 /* SDMA trap event */ 1180 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, 1178 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, 1181 1179 &adev->sdma.trap_irq); 1182 1180 if (r) 1183 1181 return r; ··· 1189 1187 return r; 1190 1188 1191 1189 /* SDMA Privileged inst */ 1192 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, 1190 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, 1193 1191 &adev->sdma.illegal_inst_irq); 1194 1192 if (r) 1195 1193 return r;
+2 -1
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 35 35 #include "vi.h" 36 36 #include "smu/smu_7_1_2_d.h" 37 37 #include "smu/smu_7_1_2_sh_mask.h" 38 + #include "ivsrcid/ivsrcid_vislands30.h" 38 39 39 40 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 40 41 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); ··· 105 104 int r; 106 105 107 106 /* UVD TRAP */ 108 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 107 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 109 108 if (r) 110 109 return r; 111 110
+3 -2
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 36 36 #include "bif/bif_5_1_d.h" 37 37 #include "gmc/gmc_8_1_d.h" 38 38 #include "vi.h" 39 + #include "ivsrcid/ivsrcid_vislands30.h" 39 40 40 41 /* Polaris10/11/12 firmware version */ 41 42 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8)) ··· 401 400 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 402 401 403 402 /* UVD TRAP */ 404 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); 403 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 405 404 if (r) 406 405 return r; 407 406 408 407 /* UVD ENC TRAP */ 409 408 if (uvd_v6_0_enc_support(adev)) { 410 409 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 411 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq); 410 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); 412 411 if (r) 413 412 return r; 414 413 }
+2 -1
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 39 39 #include "smu/smu_7_1_2_sh_mask.h" 40 40 #include "gca/gfx_8_0_d.h" 41 41 #include "gca/gfx_8_0_sh_mask.h" 42 + #include "ivsrcid/ivsrcid_vislands30.h" 42 43 43 44 44 45 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 ··· 423 422 int r, i; 424 423 425 424 /* VCE */ 426 - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq); 425 + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); 427 426 if (r) 428 427 return r; 429 428
+5 -3
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 48 48 #include "processpptables.h" 49 49 #include "pp_thermal.h" 50 50 51 + #include "ivsrcid/ivsrcid_vislands30.h" 52 + 51 53 #define MC_CG_ARB_FREQ_F0 0x0a 52 54 #define MC_CG_ARB_FREQ_F1 0x0b 53 55 #define MC_CG_ARB_FREQ_F2 0x0c ··· 4107 4105 4108 4106 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4109 4107 AMDGPU_IH_CLIENTID_LEGACY, 4110 - 230, 4108 + VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, 4111 4109 source); 4112 4110 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4113 4111 AMDGPU_IH_CLIENTID_LEGACY, 4114 - 231, 4112 + VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, 4115 4113 source); 4116 4114 4117 4115 /* Register CTF(GPIO_19) interrupt */ 4118 4116 amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), 4119 4117 AMDGPU_IH_CLIENTID_LEGACY, 4120 - 83, 4118 + VISLANDS30_IV_SRCID_GPIO_19, 4121 4119 source); 4122 4120 4123 4121 return 0;