Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: update the handle ptr in is_idle

Update the *handle to amdgpu_ip_block ptr for all
functions pointers of is_idle.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Sunil Khatri and committed by
Alex Deucher
7dc34054 cb0de06d

+148 -148
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 579 579 return 0; 580 580 } 581 581 582 - static bool acp_is_idle(void *handle) 582 + static bool acp_is_idle(struct amdgpu_ip_block *ip_block) 583 583 { 584 584 return true; 585 585 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
··· 124 124 return 0; 125 125 } 126 126 127 - static bool isp_is_idle(void *handle) 127 + static bool isp_is_idle(struct amdgpu_ip_block *ip_block) 128 128 { 129 129 return true; 130 130 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 627 627 return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev)); 628 628 } 629 629 630 - static bool amdgpu_vkms_is_idle(void *handle) 630 + static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block) 631 631 { 632 632 return true; 633 633 }
+1 -1
drivers/gpu/drm/amd/amdgpu/cik.c
··· 2148 2148 return cik_common_hw_init(ip_block); 2149 2149 } 2150 2150 2151 - static bool cik_common_is_idle(void *handle) 2151 + static bool cik_common_is_idle(struct amdgpu_ip_block *ip_block) 2152 2152 { 2153 2153 return true; 2154 2154 }
+2 -2
drivers/gpu/drm/amd/amdgpu/cik_ih.c
··· 345 345 return cik_ih_hw_init(ip_block); 346 346 } 347 347 348 - static bool cik_ih_is_idle(void *handle) 348 + static bool cik_ih_is_idle(struct amdgpu_ip_block *ip_block) 349 349 { 350 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 350 + struct amdgpu_device *adev = ip_block->adev; 351 351 u32 tmp = RREG32(mmSRBM_STATUS); 352 352 353 353 if (tmp & SRBM_STATUS__IH_BUSY_MASK)
+2 -2
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 1025 1025 return cik_sdma_hw_init(ip_block); 1026 1026 } 1027 1027 1028 - static bool cik_sdma_is_idle(void *handle) 1028 + static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block) 1029 1029 { 1030 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1030 + struct amdgpu_device *adev = ip_block->adev; 1031 1031 u32 tmp = RREG32(mmSRBM_STATUS2); 1032 1032 1033 1033 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+2 -2
drivers/gpu/drm/amd/amdgpu/cz_ih.c
··· 341 341 return cz_ih_hw_init(ip_block); 342 342 } 343 343 344 - static bool cz_ih_is_idle(void *handle) 344 + static bool cz_ih_is_idle(struct amdgpu_ip_block *ip_block) 345 345 { 346 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 346 + struct amdgpu_device *adev = ip_block->adev; 347 347 u32 tmp = RREG32(mmSRBM_STATUS); 348 348 349 349 if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 2970 2970 return amdgpu_display_resume_helper(adev); 2971 2971 } 2972 2972 2973 - static bool dce_v10_0_is_idle(void *handle) 2973 + static bool dce_v10_0_is_idle(struct amdgpu_ip_block *ip_block) 2974 2974 { 2975 2975 return true; 2976 2976 }
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 3108 3108 return amdgpu_display_resume_helper(adev); 3109 3109 } 3110 3110 3111 - static bool dce_v11_0_is_idle(void *handle) 3111 + static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 3112 3112 { 3113 3113 return true; 3114 3114 }
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 2865 2865 return amdgpu_display_resume_helper(adev); 2866 2866 } 2867 2867 2868 - static bool dce_v6_0_is_idle(void *handle) 2868 + static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 2869 2869 { 2870 2870 return true; 2871 2871 }
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 2887 2887 return amdgpu_display_resume_helper(adev); 2888 2888 } 2889 2889 2890 - static bool dce_v8_0_is_idle(void *handle) 2890 + static bool dce_v8_0_is_idle(struct amdgpu_ip_block *ip_block) 2891 2891 { 2892 2892 return true; 2893 2893 }
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 7583 7583 return gfx_v10_0_hw_init(ip_block); 7584 7584 } 7585 7585 7586 - static bool gfx_v10_0_is_idle(void *handle) 7586 + static bool gfx_v10_0_is_idle(struct amdgpu_ip_block *ip_block) 7587 7587 { 7588 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7588 + struct amdgpu_device *adev = ip_block->adev; 7589 7589 7590 7590 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), 7591 7591 GRBM_STATUS, GUI_ACTIVE))
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 4787 4787 return gfx_v11_0_hw_init(ip_block); 4788 4788 } 4789 4789 4790 - static bool gfx_v11_0_is_idle(void *handle) 4790 + static bool gfx_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 4791 4791 { 4792 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4792 + struct amdgpu_device *adev = ip_block->adev; 4793 4793 4794 4794 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 4795 4795 GRBM_STATUS, GUI_ACTIVE))
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 3695 3695 return gfx_v12_0_hw_init(ip_block); 3696 3696 } 3697 3697 3698 - static bool gfx_v12_0_is_idle(void *handle) 3698 + static bool gfx_v12_0_is_idle(struct amdgpu_ip_block *ip_block) 3699 3699 { 3700 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3700 + struct amdgpu_device *adev = ip_block->adev; 3701 3701 3702 3702 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS), 3703 3703 GRBM_STATUS, GUI_ACTIVE))
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 3167 3167 return gfx_v6_0_hw_init(ip_block); 3168 3168 } 3169 3169 3170 - static bool gfx_v6_0_is_idle(void *handle) 3170 + static bool gfx_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 3171 3171 { 3172 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3172 + struct amdgpu_device *adev = ip_block->adev; 3173 3173 3174 3174 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) 3175 3175 return false; ··· 3183 3183 struct amdgpu_device *adev = ip_block->adev; 3184 3184 3185 3185 for (i = 0; i < adev->usec_timeout; i++) { 3186 - if (gfx_v6_0_is_idle(adev)) 3186 + if (gfx_v6_0_is_idle(ip_block)) 3187 3187 return 0; 3188 3188 udelay(1); 3189 3189 }
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 4515 4515 return gfx_v7_0_hw_init(ip_block); 4516 4516 } 4517 4517 4518 - static bool gfx_v7_0_is_idle(void *handle) 4518 + static bool gfx_v7_0_is_idle(struct amdgpu_ip_block *ip_block) 4519 4519 { 4520 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4520 + struct amdgpu_device *adev = ip_block->adev; 4521 4521 4522 4522 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) 4523 4523 return false;
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 4851 4851 return r; 4852 4852 } 4853 4853 4854 - static bool gfx_v8_0_is_idle(void *handle) 4854 + static bool gfx_v8_0_is_idle(struct amdgpu_ip_block *ip_block) 4855 4855 { 4856 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4856 + struct amdgpu_device *adev = ip_block->adev; 4857 4857 4858 4858 if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE) 4859 4859 || RREG32(mmGRBM_STATUS2) != 0x8) ··· 4892 4892 struct amdgpu_device *adev = ip_block->adev; 4893 4893 4894 4894 for (i = 0; i < adev->usec_timeout; i++) { 4895 - if (gfx_v8_0_is_idle(adev)) 4895 + if (gfx_v8_0_is_idle(ip_block)) 4896 4896 return 0; 4897 4897 4898 4898 udelay(1);
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 4110 4110 return gfx_v9_0_hw_init(ip_block); 4111 4111 } 4112 4112 4113 - static bool gfx_v9_0_is_idle(void *handle) 4113 + static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block) 4114 4114 { 4115 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4115 + struct amdgpu_device *adev = ip_block->adev; 4116 4116 4117 4117 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), 4118 4118 GRBM_STATUS, GUI_ACTIVE)) ··· 4127 4127 struct amdgpu_device *adev = ip_block->adev; 4128 4128 4129 4129 for (i = 0; i < adev->usec_timeout; i++) { 4130 - if (gfx_v9_0_is_idle(adev)) 4130 + if (gfx_v9_0_is_idle(ip_block)) 4131 4131 return 0; 4132 4132 udelay(1); 4133 4133 }
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 2408 2408 return gfx_v9_4_3_hw_init(ip_block); 2409 2409 } 2410 2410 2411 - static bool gfx_v9_4_3_is_idle(void *handle) 2411 + static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block) 2412 2412 { 2413 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2413 + struct amdgpu_device *adev = ip_block->adev; 2414 2414 int i, num_xcc; 2415 2415 2416 2416 num_xcc = NUM_XCC(adev->gfx.xcc_mask); ··· 2428 2428 struct amdgpu_device *adev = ip_block->adev; 2429 2429 2430 2430 for (i = 0; i < adev->usec_timeout; i++) { 2431 - if (gfx_v9_4_3_is_idle(adev)) 2431 + if (gfx_v9_4_3_is_idle(ip_block)) 2432 2432 return 0; 2433 2433 udelay(1); 2434 2434 }
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 1076 1076 return 0; 1077 1077 } 1078 1078 1079 - static bool gmc_v10_0_is_idle(void *handle) 1079 + static bool gmc_v10_0_is_idle(struct amdgpu_ip_block *ip_block) 1080 1080 { 1081 1081 /* MC is always ready in GMC v10.*/ 1082 1082 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 987 987 return 0; 988 988 } 989 989 990 - static bool gmc_v11_0_is_idle(void *handle) 990 + static bool gmc_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 991 991 { 992 992 /* MC is always ready in GMC v11.*/ 993 993 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 984 984 return 0; 985 985 } 986 986 987 - static bool gmc_v12_0_is_idle(void *handle) 987 + static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block) 988 988 { 989 989 /* MC is always ready in GMC v11.*/ 990 990 return true;
+3 -3
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 957 957 return 0; 958 958 } 959 959 960 - static bool gmc_v6_0_is_idle(void *handle) 960 + static bool gmc_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 961 961 { 962 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 962 + struct amdgpu_device *adev = ip_block->adev; 963 963 964 964 u32 tmp = RREG32(mmSRBM_STATUS); 965 965 ··· 976 976 struct amdgpu_device *adev = ip_block->adev; 977 977 978 978 for (i = 0; i < adev->usec_timeout; i++) { 979 - if (gmc_v6_0_is_idle(adev)) 979 + if (gmc_v6_0_is_idle(ip_block)) 980 980 return 0; 981 981 udelay(1); 982 982 }
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 1142 1142 return 0; 1143 1143 } 1144 1144 1145 - static bool gmc_v7_0_is_idle(void *handle) 1145 + static bool gmc_v7_0_is_idle(struct amdgpu_ip_block *ip_block) 1146 1146 { 1147 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1147 + struct amdgpu_device *adev = ip_block->adev; 1148 1148 u32 tmp = RREG32(mmSRBM_STATUS); 1149 1149 1150 1150 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+2 -2
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
··· 1263 1263 return 0; 1264 1264 } 1265 1265 1266 - static bool gmc_v8_0_is_idle(void *handle) 1266 + static bool gmc_v8_0_is_idle(struct amdgpu_ip_block *ip_block) 1267 1267 { 1268 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1268 + struct amdgpu_device *adev = ip_block->adev; 1269 1269 u32 tmp = RREG32(mmSRBM_STATUS); 1270 1270 1271 1271 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 2543 2543 return 0; 2544 2544 } 2545 2545 2546 - static bool gmc_v9_0_is_idle(void *handle) 2546 + static bool gmc_v9_0_is_idle(struct amdgpu_ip_block *ip_block) 2547 2547 { 2548 2548 /* MC is always ready in GMC v9.*/ 2549 2549 return true;
+2 -2
drivers/gpu/drm/amd/amdgpu/iceland_ih.c
··· 335 335 return iceland_ih_hw_init(ip_block); 336 336 } 337 337 338 - static bool iceland_ih_is_idle(void *handle) 338 + static bool iceland_ih_is_idle(struct amdgpu_ip_block *ip_block) 339 339 { 340 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 340 + struct amdgpu_device *adev = ip_block->adev; 341 341 u32 tmp = RREG32(mmSRBM_STATUS); 342 342 343 343 if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+1 -1
drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
··· 652 652 return ih_v6_0_hw_init(ip_block); 653 653 } 654 654 655 - static bool ih_v6_0_is_idle(void *handle) 655 + static bool ih_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 656 656 { 657 657 /* todo */ 658 658 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
··· 631 631 return ih_v6_1_hw_init(ip_block); 632 632 } 633 633 634 - static bool ih_v6_1_is_idle(void *handle) 634 + static bool ih_v6_1_is_idle(struct amdgpu_ip_block *ip_block) 635 635 { 636 636 /* todo */ 637 637 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
··· 621 621 return ih_v7_0_hw_init(ip_block); 622 622 } 623 623 624 - static bool ih_v7_0_is_idle(void *handle) 624 + static bool ih_v7_0_is_idle(struct amdgpu_ip_block *ip_block) 625 625 { 626 626 /* todo */ 627 627 return true;
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
··· 680 680 } 681 681 } 682 682 683 - static bool jpeg_v2_0_is_idle(void *handle) 683 + static bool jpeg_v2_0_is_idle(struct amdgpu_ip_block *ip_block) 684 684 { 685 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 685 + struct amdgpu_device *adev = ip_block->adev; 686 686 687 687 return ((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & 688 688 UVD_JRBC_STATUS__RB_JOB_DONE_MASK) == ··· 707 707 bool enable = (state == AMD_CG_STATE_GATE); 708 708 709 709 if (enable) { 710 - if (!jpeg_v2_0_is_idle(adev)) 710 + if (!jpeg_v2_0_is_idle(ip_block)) 711 711 return -EBUSY; 712 712 jpeg_v2_0_enable_clock_gating(adev); 713 713 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
··· 515 515 amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14))); 516 516 } 517 517 518 - static bool jpeg_v2_5_is_idle(void *handle) 518 + static bool jpeg_v2_5_is_idle(struct amdgpu_ip_block *ip_block) 519 519 { 520 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 520 + struct amdgpu_device *adev = ip_block->adev; 521 521 int i, ret = 1; 522 522 523 523 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { ··· 563 563 continue; 564 564 565 565 if (enable) { 566 - if (!jpeg_v2_5_is_idle(adev)) 566 + if (!jpeg_v2_5_is_idle(ip_block)) 567 567 return -EBUSY; 568 568 jpeg_v2_5_enable_clock_gating(adev, i); 569 569 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
··· 470 470 } 471 471 } 472 472 473 - static bool jpeg_v3_0_is_idle(void *handle) 473 + static bool jpeg_v3_0_is_idle(struct amdgpu_ip_block *ip_block) 474 474 { 475 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 475 + struct amdgpu_device *adev = ip_block->adev; 476 476 int ret = 1; 477 477 478 478 ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) & ··· 498 498 bool enable = state == AMD_CG_STATE_GATE; 499 499 500 500 if (enable) { 501 - if (!jpeg_v3_0_is_idle(adev)) 501 + if (!jpeg_v3_0_is_idle(ip_block)) 502 502 return -EBUSY; 503 503 jpeg_v3_0_enable_clock_gating(adev); 504 504 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 630 630 } 631 631 } 632 632 633 - static bool jpeg_v4_0_is_idle(void *handle) 633 + static bool jpeg_v4_0_is_idle(struct amdgpu_ip_block *ip_block) 634 634 { 635 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 635 + struct amdgpu_device *adev = ip_block->adev; 636 636 int ret = 1; 637 637 638 638 ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & ··· 658 658 bool enable = state == AMD_CG_STATE_GATE; 659 659 660 660 if (enable) { 661 - if (!jpeg_v4_0_is_idle(adev)) 661 + if (!jpeg_v4_0_is_idle(ip_block)) 662 662 return -EBUSY; 663 663 jpeg_v4_0_enable_clock_gating(adev); 664 664 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 960 960 } 961 961 } 962 962 963 - static bool jpeg_v4_0_3_is_idle(void *handle) 963 + static bool jpeg_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block) 964 964 { 965 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 965 + struct amdgpu_device *adev = ip_block->adev; 966 966 bool ret = false; 967 967 int i, j; 968 968 ··· 1004 1004 1005 1005 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 1006 1006 if (enable) { 1007 - if (!jpeg_v4_0_3_is_idle(adev)) 1007 + if (!jpeg_v4_0_3_is_idle(ip_block)) 1008 1008 return -EBUSY; 1009 1009 jpeg_v4_0_3_enable_clock_gating(adev, i); 1010 1010 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
··· 648 648 } 649 649 } 650 650 651 - static bool jpeg_v4_0_5_is_idle(void *handle) 651 + static bool jpeg_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block) 652 652 { 653 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 653 + struct amdgpu_device *adev = ip_block->adev; 654 654 int i, ret = 1; 655 655 656 656 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { ··· 693 693 continue; 694 694 695 695 if (enable) { 696 - if (!jpeg_v4_0_5_is_idle(adev)) 696 + if (!jpeg_v4_0_5_is_idle(ip_block)) 697 697 return -EBUSY; 698 698 699 699 jpeg_v4_0_5_enable_clock_gating(adev, i);
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
··· 559 559 } 560 560 } 561 561 562 - static bool jpeg_v5_0_0_is_idle(void *handle) 562 + static bool jpeg_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block) 563 563 { 564 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 564 + struct amdgpu_device *adev = ip_block->adev; 565 565 int ret = 1; 566 566 567 567 ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) & ··· 587 587 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 588 588 589 589 if (enable) { 590 - if (!jpeg_v5_0_0_is_idle(adev)) 590 + if (!jpeg_v5_0_0_is_idle(ip_block)) 591 591 return -EBUSY; 592 592 jpeg_v5_0_0_enable_clock_gating(adev); 593 593 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
··· 516 516 } 517 517 } 518 518 519 - static bool jpeg_v5_0_1_is_idle(void *handle) 519 + static bool jpeg_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) 520 520 { 521 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 521 + struct amdgpu_device *adev = ip_block->adev; 522 522 bool ret = false; 523 523 int i, j; 524 524 ··· 567 567 return 0; 568 568 569 569 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 570 - if (!jpeg_v5_0_1_is_idle(adev)) 570 + if (!jpeg_v5_0_1_is_idle(ip_block)) 571 571 return -EBUSY; 572 572 } 573 573
+1 -1
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
··· 625 625 return navi10_ih_hw_init(ip_block); 626 626 } 627 627 628 - static bool navi10_ih_is_idle(void *handle) 628 + static bool navi10_ih_is_idle(struct amdgpu_ip_block *ip_block) 629 629 { 630 630 /* todo */ 631 631 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/nv.c
··· 1035 1035 return nv_common_hw_init(ip_block); 1036 1036 } 1037 1037 1038 - static bool nv_common_is_idle(void *handle) 1038 + static bool nv_common_is_idle(struct amdgpu_ip_block *ip_block) 1039 1039 { 1040 1040 return true; 1041 1041 }
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 911 911 return sdma_v2_4_hw_init(ip_block); 912 912 } 913 913 914 - static bool sdma_v2_4_is_idle(void *handle) 914 + static bool sdma_v2_4_is_idle(struct amdgpu_ip_block *ip_block) 915 915 { 916 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 916 + struct amdgpu_device *adev = ip_block->adev; 917 917 u32 tmp = RREG32(mmSRBM_STATUS2); 918 918 919 919 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 1200 1200 return sdma_v3_0_hw_init(ip_block); 1201 1201 } 1202 1202 1203 - static bool sdma_v3_0_is_idle(void *handle) 1203 + static bool sdma_v3_0_is_idle(struct amdgpu_ip_block *ip_block) 1204 1204 { 1205 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1205 + struct amdgpu_device *adev = ip_block->adev; 1206 1206 u32 tmp = RREG32(mmSRBM_STATUS2); 1207 1207 1208 1208 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 2015 2015 return sdma_v4_0_hw_init(ip_block); 2016 2016 } 2017 2017 2018 - static bool sdma_v4_0_is_idle(void *handle) 2018 + static bool sdma_v4_0_is_idle(struct amdgpu_ip_block *ip_block) 2019 2019 { 2020 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2020 + struct amdgpu_device *adev = ip_block->adev; 2021 2021 u32 i; 2022 2022 2023 2023 for (i = 0; i < adev->sdma.num_instances; i++) {
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
··· 1561 1561 return sdma_v4_4_2_hw_init(ip_block); 1562 1562 } 1563 1563 1564 - static bool sdma_v4_4_2_is_idle(void *handle) 1564 + static bool sdma_v4_4_2_is_idle(struct amdgpu_ip_block *ip_block) 1565 1565 { 1566 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1566 + struct amdgpu_device *adev = ip_block->adev; 1567 1567 u32 i; 1568 1568 1569 1569 for (i = 0; i < adev->sdma.num_instances; i++) {
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 1530 1530 return sdma_v5_0_hw_init(ip_block); 1531 1531 } 1532 1532 1533 - static bool sdma_v5_0_is_idle(void *handle) 1533 + static bool sdma_v5_0_is_idle(struct amdgpu_ip_block *ip_block) 1534 1534 { 1535 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1535 + struct amdgpu_device *adev = ip_block->adev; 1536 1536 u32 i; 1537 1537 1538 1538 for (i = 0; i < adev->sdma.num_instances; i++) {
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 1435 1435 return sdma_v5_2_hw_init(ip_block); 1436 1436 } 1437 1437 1438 - static bool sdma_v5_2_is_idle(void *handle) 1438 + static bool sdma_v5_2_is_idle(struct amdgpu_ip_block *ip_block) 1439 1439 { 1440 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1440 + struct amdgpu_device *adev = ip_block->adev; 1441 1441 u32 i; 1442 1442 1443 1443 for (i = 0; i < adev->sdma.num_instances; i++) {
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
··· 1429 1429 return sdma_v6_0_hw_init(ip_block); 1430 1430 } 1431 1431 1432 - static bool sdma_v6_0_is_idle(void *handle) 1432 + static bool sdma_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 1433 1433 { 1434 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1434 + struct amdgpu_device *adev = ip_block->adev; 1435 1435 u32 i; 1436 1436 1437 1437 for (i = 0; i < adev->sdma.num_instances; i++) {
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
··· 1430 1430 return sdma_v7_0_hw_init(ip_block); 1431 1431 } 1432 1432 1433 - static bool sdma_v7_0_is_idle(void *handle) 1433 + static bool sdma_v7_0_is_idle(struct amdgpu_ip_block *ip_block) 1434 1434 { 1435 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1435 + struct amdgpu_device *adev = ip_block->adev; 1436 1436 u32 i; 1437 1437 1438 1438 for (i = 0; i < adev->sdma.num_instances; i++) {
+1 -1
drivers/gpu/drm/amd/amdgpu/si.c
··· 2644 2644 return si_common_hw_init(ip_block); 2645 2645 } 2646 2646 2647 - static bool si_common_is_idle(void *handle) 2647 + static bool si_common_is_idle(struct amdgpu_ip_block *ip_block) 2648 2648 { 2649 2649 return true; 2650 2650 }
+3 -3
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 541 541 return si_dma_hw_init(ip_block); 542 542 } 543 543 544 - static bool si_dma_is_idle(void *handle) 544 + static bool si_dma_is_idle(struct amdgpu_ip_block *ip_block) 545 545 { 546 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 546 + struct amdgpu_device *adev = ip_block->adev; 547 547 548 548 u32 tmp = RREG32(SRBM_STATUS2); 549 549 ··· 559 559 struct amdgpu_device *adev = ip_block->adev; 560 560 561 561 for (i = 0; i < adev->usec_timeout; i++) { 562 - if (si_dma_is_idle(adev)) 562 + if (si_dma_is_idle(ip_block)) 563 563 return 0; 564 564 udelay(1); 565 565 }
+3 -3
drivers/gpu/drm/amd/amdgpu/si_ih.c
··· 210 210 return si_ih_hw_init(ip_block); 211 211 } 212 212 213 - static bool si_ih_is_idle(void *handle) 213 + static bool si_ih_is_idle(struct amdgpu_ip_block *ip_block) 214 214 { 215 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 + struct amdgpu_device *adev = ip_block->adev; 216 216 u32 tmp = RREG32(SRBM_STATUS); 217 217 218 218 if (tmp & SRBM_STATUS__IH_BUSY_MASK) ··· 227 227 struct amdgpu_device *adev = ip_block->adev; 228 228 229 229 for (i = 0; i < adev->usec_timeout; i++) { 230 - if (si_ih_is_idle(adev)) 230 + if (si_ih_is_idle(ip_block)) 231 231 return 0; 232 232 udelay(1); 233 233 }
+1 -1
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 1360 1360 return soc15_common_hw_init(ip_block); 1361 1361 } 1362 1362 1363 - static bool soc15_common_is_idle(void *handle) 1363 + static bool soc15_common_is_idle(struct amdgpu_ip_block *ip_block) 1364 1364 { 1365 1365 return true; 1366 1366 }
+1 -1
drivers/gpu/drm/amd/amdgpu/soc21.c
··· 952 952 return soc21_common_hw_init(ip_block); 953 953 } 954 954 955 - static bool soc21_common_is_idle(void *handle) 955 + static bool soc21_common_is_idle(struct amdgpu_ip_block *ip_block) 956 956 { 957 957 return true; 958 958 }
+1 -1
drivers/gpu/drm/amd/amdgpu/soc24.c
··· 531 531 return soc24_common_hw_init(ip_block); 532 532 } 533 533 534 - static bool soc24_common_is_idle(void *handle) 534 + static bool soc24_common_is_idle(struct amdgpu_ip_block *ip_block) 535 535 { 536 536 return true; 537 537 }
+2 -2
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
··· 353 353 return tonga_ih_hw_init(ip_block); 354 354 } 355 355 356 - static bool tonga_ih_is_idle(void *handle) 356 + static bool tonga_ih_is_idle(struct amdgpu_ip_block *ip_block) 357 357 { 358 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 358 + struct amdgpu_device *adev = ip_block->adev; 359 359 u32 tmp = RREG32(mmSRBM_STATUS); 360 360 361 361 if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
··· 758 758 return uvd_v3_1_hw_init(ip_block); 759 759 } 760 760 761 - static bool uvd_v3_1_is_idle(void *handle) 761 + static bool uvd_v3_1_is_idle(struct amdgpu_ip_block *ip_block) 762 762 { 763 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 763 + struct amdgpu_device *adev = ip_block->adev; 764 764 765 765 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 766 766 }
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 658 658 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 659 659 } 660 660 661 - static bool uvd_v4_2_is_idle(void *handle) 661 + static bool uvd_v4_2_is_idle(struct amdgpu_ip_block *ip_block) 662 662 { 663 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 663 + struct amdgpu_device *adev = ip_block->adev; 664 664 665 665 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 666 666 }
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 580 580 } 581 581 } 582 582 583 - static bool uvd_v5_0_is_idle(void *handle) 583 + static bool uvd_v5_0_is_idle(struct amdgpu_ip_block *ip_block) 584 584 { 585 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 585 + struct amdgpu_device *adev = ip_block->adev; 586 586 587 587 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 588 588 }
+3 -3
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 1143 1143 amdgpu_ring_write(ring, vmid); 1144 1144 } 1145 1145 1146 - static bool uvd_v6_0_is_idle(void *handle) 1146 + static bool uvd_v6_0_is_idle(struct amdgpu_ip_block *ip_block) 1147 1147 { 1148 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1148 + struct amdgpu_device *adev = ip_block->adev; 1149 1149 1150 1150 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 1151 1151 } ··· 1156 1156 struct amdgpu_device *adev = ip_block->adev; 1157 1157 1158 1158 for (i = 0; i < adev->usec_timeout; i++) { 1159 - if (uvd_v6_0_is_idle(adev)) 1159 + if (uvd_v6_0_is_idle(ip_block)) 1160 1160 return 0; 1161 1161 } 1162 1162 return -ETIMEDOUT;
+3 -3
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
··· 201 201 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 202 202 } 203 203 204 - static bool vce_v2_0_is_idle(void *handle) 204 + static bool vce_v2_0_is_idle(struct amdgpu_ip_block *ip_block) 205 205 { 206 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 206 + struct amdgpu_device *adev = ip_block->adev; 207 207 208 208 return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); 209 209 } ··· 214 214 unsigned i; 215 215 216 216 for (i = 0; i < adev->usec_timeout; i++) { 217 - if (vce_v2_0_is_idle(adev)) 217 + if (vce_v2_0_is_idle(ip_block)) 218 218 return 0; 219 219 } 220 220 return -ETIMEDOUT;
+3 -3
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 597 597 WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); 598 598 } 599 599 600 - static bool vce_v3_0_is_idle(void *handle) 600 + static bool vce_v3_0_is_idle(struct amdgpu_ip_block *ip_block) 601 601 { 602 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 602 + struct amdgpu_device *adev = ip_block->adev; 603 603 u32 mask = 0; 604 604 605 605 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK; ··· 614 614 struct amdgpu_device *adev = ip_block->adev; 615 615 616 616 for (i = 0; i < adev->usec_timeout; i++) 617 - if (vce_v3_0_is_idle(adev)) 617 + if (vce_v3_0_is_idle(ip_block)) 618 618 return 0; 619 619 620 620 return -ETIMEDOUT;
+3 -3
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 1377 1377 return 0; 1378 1378 } 1379 1379 1380 - static bool vcn_v1_0_is_idle(void *handle) 1380 + static bool vcn_v1_0_is_idle(struct amdgpu_ip_block *ip_block) 1381 1381 { 1382 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1382 + struct amdgpu_device *adev = ip_block->adev; 1383 1383 1384 1384 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); 1385 1385 } ··· 1403 1403 1404 1404 if (enable) { 1405 1405 /* wait for STATUS to clear */ 1406 - if (!vcn_v1_0_is_idle(adev)) 1406 + if (!vcn_v1_0_is_idle(ip_block)) 1407 1407 return -EBUSY; 1408 1408 vcn_v1_0_enable_clock_gating(adev); 1409 1409 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
··· 1317 1317 return 0; 1318 1318 } 1319 1319 1320 - static bool vcn_v2_0_is_idle(void *handle) 1320 + static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block) 1321 1321 { 1322 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1322 + struct amdgpu_device *adev = ip_block->adev; 1323 1323 1324 1324 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); 1325 1325 } ··· 1346 1346 1347 1347 if (enable) { 1348 1348 /* wait for STATUS to clear */ 1349 - if (!vcn_v2_0_is_idle(adev)) 1349 + if (!vcn_v2_0_is_idle(ip_block)) 1350 1350 return -EBUSY; 1351 1351 vcn_v2_0_enable_clock_gating(adev); 1352 1352 } else {
+3 -3
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 1750 1750 } 1751 1751 } 1752 1752 1753 - static bool vcn_v2_5_is_idle(void *handle) 1753 + static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block) 1754 1754 { 1755 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1755 + struct amdgpu_device *adev = ip_block->adev; 1756 1756 int i, ret = 1; 1757 1757 1758 1758 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { ··· 1794 1794 1795 1795 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1796 1796 if (enable) { 1797 - if (!vcn_v2_5_is_idle(adev)) 1797 + if (!vcn_v2_5_is_idle(ip_block)) 1798 1798 return -EBUSY; 1799 1799 vcn_v2_5_enable_clock_gating(adev, i); 1800 1800 } else {
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 2103 2103 } 2104 2104 } 2105 2105 2106 - static bool vcn_v3_0_is_idle(void *handle) 2106 + static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block) 2107 2107 { 2108 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2108 + struct amdgpu_device *adev = ip_block->adev; 2109 2109 int i, ret = 1; 2110 2110 2111 2111 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 1968 1968 * 1969 1969 * Check whether VCN block is idle 1970 1970 */ 1971 - static bool vcn_v4_0_is_idle(void *handle) 1971 + static bool vcn_v4_0_is_idle(struct amdgpu_ip_block *ip_block) 1972 1972 { 1973 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1973 + struct amdgpu_device *adev = ip_block->adev; 1974 1974 int i, ret = 1; 1975 1975 1976 1976 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 1579 1579 * 1580 1580 * Check whether VCN block is idle 1581 1581 */ 1582 - static bool vcn_v4_0_3_is_idle(void *handle) 1582 + static bool vcn_v4_0_3_is_idle(struct amdgpu_ip_block *ip_block) 1583 1583 { 1584 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1584 + struct amdgpu_device *adev = ip_block->adev; 1585 1585 int i, ret = 1; 1586 1586 1587 1587 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
··· 1456 1456 * 1457 1457 * Check whether VCN block is idle 1458 1458 */ 1459 - static bool vcn_v4_0_5_is_idle(void *handle) 1459 + static bool vcn_v4_0_5_is_idle(struct amdgpu_ip_block *ip_block) 1460 1460 { 1461 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1461 + struct amdgpu_device *adev = ip_block->adev; 1462 1462 int i, ret = 1; 1463 1463 1464 1464 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
··· 1196 1196 * 1197 1197 * Check whether VCN block is idle 1198 1198 */ 1199 - static bool vcn_v5_0_0_is_idle(void *handle) 1199 + static bool vcn_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block) 1200 1200 { 1201 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1201 + struct amdgpu_device *adev = ip_block->adev; 1202 1202 int i, ret = 1; 1203 1203 1204 1204 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
··· 931 931 * 932 932 * Check whether VCN block is idle 933 933 */ 934 - static bool vcn_v5_0_1_is_idle(void *handle) 934 + static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) 935 935 { 936 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 936 + struct amdgpu_device *adev = ip_block->adev; 937 937 int i, ret = 1; 938 938 939 939 for (i = 0; i < adev->vcn.num_vcn_inst; ++i)
+1 -1
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
··· 555 555 return vega10_ih_hw_init(ip_block); 556 556 } 557 557 558 - static bool vega10_ih_is_idle(void *handle) 558 + static bool vega10_ih_is_idle(struct amdgpu_ip_block *ip_block) 559 559 { 560 560 /* todo */ 561 561 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
··· 651 651 return vega20_ih_hw_init(ip_block); 652 652 } 653 653 654 - static bool vega20_ih_is_idle(void *handle) 654 + static bool vega20_ih_is_idle(struct amdgpu_ip_block *ip_block) 655 655 { 656 656 /* todo */ 657 657 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/vi.c
··· 1736 1736 return vi_common_hw_init(ip_block); 1737 1737 } 1738 1738 1739 - static bool vi_common_is_idle(void *handle) 1739 + static bool vi_common_is_idle(struct amdgpu_ip_block *ip_block) 1740 1740 { 1741 1741 return true; 1742 1742 }
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 321 321 return 0; 322 322 } 323 323 324 - static bool dm_is_idle(void *handle) 324 + static bool dm_is_idle(struct amdgpu_ip_block *ip_block) 325 325 { 326 326 /* XXX todo */ 327 327 return true;
+1 -1
drivers/gpu/drm/amd/include/amd_shared.h
··· 405 405 int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); 406 406 int (*suspend)(struct amdgpu_ip_block *ip_block); 407 407 int (*resume)(struct amdgpu_ip_block *ip_block); 408 - bool (*is_idle)(void *handle); 408 + bool (*is_idle)(struct amdgpu_ip_block *ip_block); 409 409 int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); 410 410 bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); 411 411 int (*pre_soft_reset)(struct amdgpu_ip_block *ip_block);
+1 -1
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
··· 3094 3094 return 0; 3095 3095 } 3096 3096 3097 - static bool kv_dpm_is_idle(void *handle) 3097 + static bool kv_dpm_is_idle(struct amdgpu_ip_block *ip_block) 3098 3098 { 3099 3099 return true; 3100 3100 }
+1 -1
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 7838 7838 return 0; 7839 7839 } 7840 7840 7841 - static bool si_dpm_is_idle(void *handle) 7841 + static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block) 7842 7842 { 7843 7843 /* XXX */ 7844 7844 return true;
+1 -1
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 239 239 } 240 240 241 241 242 - static bool pp_is_idle(void *handle) 242 + static bool pp_is_idle(struct amdgpu_ip_block *ip_block) 243 243 { 244 244 return false; 245 245 }