Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/powerplay: unified interfaces for message issuing and response checking

This can avoid potential race condition between them.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Evan Quan and committed by
Alex Deucher
a0ec2256 5964f3fe

+890 -591
+101 -64
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
··· 76 76 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); 77 77 return -EINVAL; 78 78 } 79 - smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); 79 + smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL); 80 80 81 81 return 0; 82 82 } ··· 209 209 smu10_data->deep_sleep_dcefclk = clock; 210 210 smum_send_msg_to_smc_with_parameter(hwmgr, 211 211 PPSMC_MSG_SetMinDeepSleepDcefclk, 212 - smu10_data->deep_sleep_dcefclk); 212 + smu10_data->deep_sleep_dcefclk, 213 + NULL); 213 214 } 214 215 return 0; 215 216 } ··· 224 223 smu10_data->dcf_actual_hard_min_freq = clock; 225 224 smum_send_msg_to_smc_with_parameter(hwmgr, 226 225 PPSMC_MSG_SetHardMinDcefclkByFreq, 227 - smu10_data->dcf_actual_hard_min_freq); 226 + smu10_data->dcf_actual_hard_min_freq, 227 + NULL); 228 228 } 229 229 return 0; 230 230 } ··· 239 237 smu10_data->f_actual_hard_min_freq = clock; 240 238 smum_send_msg_to_smc_with_parameter(hwmgr, 241 239 PPSMC_MSG_SetHardMinFclkByFreq, 242 - smu10_data->f_actual_hard_min_freq); 240 + smu10_data->f_actual_hard_min_freq, 241 + NULL); 243 242 } 244 243 return 0; 245 244 } ··· 253 250 smu10_data->num_active_display = count; 254 251 smum_send_msg_to_smc_with_parameter(hwmgr, 255 252 PPSMC_MSG_SetDisplayCount, 256 - smu10_data->num_active_display); 253 + smu10_data->num_active_display, 254 + NULL); 257 255 } 258 256 259 257 return 0; ··· 277 273 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) 278 274 return smum_send_msg_to_smc_with_parameter(hwmgr, 279 275 PPSMC_MSG_SetGfxCGPG, 280 - true); 276 + true, 277 + NULL); 281 278 else 282 279 return 0; 283 280 } ··· 324 319 struct amdgpu_device *adev = hwmgr->adev; 325 320 326 321 if (adev->pm.pp_feature & PP_GFXOFF_MASK) { 327 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); 322 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL); 328 323 329 324 /* confirm gfx is back to "on" state */ 330 325 while (!smu10_is_gfx_on(hwmgr)) ··· 344 339 struct amdgpu_device *adev = hwmgr->adev; 345 340 346 341 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 347 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff); 342 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL); 348 343 349 344 return 0; 350 345 } ··· 479 474 smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk, 480 475 ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]); 481 476 482 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency); 483 - result = smum_get_argument(hwmgr); 477 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result); 484 478 smu10_data->gfx_min_freq_limit = result / 10 * 1000; 485 479 486 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency); 487 - result = smum_get_argument(hwmgr); 480 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result); 488 481 smu10_data->gfx_max_freq_limit = result / 10 * 1000; 489 482 490 483 return 0; ··· 586 583 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 587 584 smum_send_msg_to_smc_with_parameter(hwmgr, 588 585 PPSMC_MSG_SetHardMinGfxClk, 589 - data->gfx_max_freq_limit/100); 586 + data->gfx_max_freq_limit/100, 587 + NULL); 590 588 smum_send_msg_to_smc_with_parameter(hwmgr, 591 589 PPSMC_MSG_SetHardMinFclkByFreq, 592 - SMU10_UMD_PSTATE_PEAK_FCLK); 590 + SMU10_UMD_PSTATE_PEAK_FCLK, 591 + NULL); 593 592 smum_send_msg_to_smc_with_parameter(hwmgr, 594 593 PPSMC_MSG_SetHardMinSocclkByFreq, 595 - SMU10_UMD_PSTATE_PEAK_SOCCLK); 594 + SMU10_UMD_PSTATE_PEAK_SOCCLK, 595 + NULL); 596 596 smum_send_msg_to_smc_with_parameter(hwmgr, 597 597 PPSMC_MSG_SetHardMinVcn, 598 - SMU10_UMD_PSTATE_VCE); 598 + SMU10_UMD_PSTATE_VCE, 599 + NULL); 599 600 600 601 smum_send_msg_to_smc_with_parameter(hwmgr, 601 602 PPSMC_MSG_SetSoftMaxGfxClk, 602 - data->gfx_max_freq_limit/100); 603 + data->gfx_max_freq_limit/100, 604 + NULL); 603 605 smum_send_msg_to_smc_with_parameter(hwmgr, 604 606 PPSMC_MSG_SetSoftMaxFclkByFreq, 605 - SMU10_UMD_PSTATE_PEAK_FCLK); 607 + SMU10_UMD_PSTATE_PEAK_FCLK, 608 + NULL); 606 609 smum_send_msg_to_smc_with_parameter(hwmgr, 607 610 PPSMC_MSG_SetSoftMaxSocclkByFreq, 608 - SMU10_UMD_PSTATE_PEAK_SOCCLK); 611 + SMU10_UMD_PSTATE_PEAK_SOCCLK, 612 + NULL); 609 613 smum_send_msg_to_smc_with_parameter(hwmgr, 610 614 PPSMC_MSG_SetSoftMaxVcn, 611 - SMU10_UMD_PSTATE_VCE); 615 + SMU10_UMD_PSTATE_VCE, 616 + NULL); 612 617 break; 613 618 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 614 619 smum_send_msg_to_smc_with_parameter(hwmgr, 615 620 PPSMC_MSG_SetHardMinGfxClk, 616 - min_sclk); 621 + min_sclk, 622 + NULL); 617 623 smum_send_msg_to_smc_with_parameter(hwmgr, 618 624 PPSMC_MSG_SetSoftMaxGfxClk, 619 - min_sclk); 625 + min_sclk, 626 + NULL); 620 627 break; 621 628 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 622 629 smum_send_msg_to_smc_with_parameter(hwmgr, 623 630 PPSMC_MSG_SetHardMinFclkByFreq, 624 - min_mclk); 631 + min_mclk, 632 + NULL); 625 633 smum_send_msg_to_smc_with_parameter(hwmgr, 626 634 PPSMC_MSG_SetSoftMaxFclkByFreq, 627 - min_mclk); 635 + min_mclk, 636 + NULL); 628 637 break; 629 638 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 630 639 smum_send_msg_to_smc_with_parameter(hwmgr, 631 640 PPSMC_MSG_SetHardMinGfxClk, 632 - SMU10_UMD_PSTATE_GFXCLK); 641 + SMU10_UMD_PSTATE_GFXCLK, 642 + NULL); 633 643 smum_send_msg_to_smc_with_parameter(hwmgr, 634 644 PPSMC_MSG_SetHardMinFclkByFreq, 635 - SMU10_UMD_PSTATE_FCLK); 645 + SMU10_UMD_PSTATE_FCLK, 646 + NULL); 636 647 smum_send_msg_to_smc_with_parameter(hwmgr, 637 648 PPSMC_MSG_SetHardMinSocclkByFreq, 638 - SMU10_UMD_PSTATE_SOCCLK); 649 + SMU10_UMD_PSTATE_SOCCLK, 650 + NULL); 639 651 smum_send_msg_to_smc_with_parameter(hwmgr, 640 652 PPSMC_MSG_SetHardMinVcn, 641 - SMU10_UMD_PSTATE_VCE); 653 + SMU10_UMD_PSTATE_VCE, 654 + NULL); 642 655 643 656 smum_send_msg_to_smc_with_parameter(hwmgr, 644 657 PPSMC_MSG_SetSoftMaxGfxClk, 645 - SMU10_UMD_PSTATE_GFXCLK); 658 + SMU10_UMD_PSTATE_GFXCLK, 659 + NULL); 646 660 smum_send_msg_to_smc_with_parameter(hwmgr, 647 661 PPSMC_MSG_SetSoftMaxFclkByFreq, 648 - SMU10_UMD_PSTATE_FCLK); 662 + SMU10_UMD_PSTATE_FCLK, 663 + NULL); 649 664 smum_send_msg_to_smc_with_parameter(hwmgr, 650 665 PPSMC_MSG_SetSoftMaxSocclkByFreq, 651 - SMU10_UMD_PSTATE_SOCCLK); 666 + SMU10_UMD_PSTATE_SOCCLK, 667 + NULL); 652 668 smum_send_msg_to_smc_with_parameter(hwmgr, 653 669 PPSMC_MSG_SetSoftMaxVcn, 654 - SMU10_UMD_PSTATE_VCE); 670 + SMU10_UMD_PSTATE_VCE, 671 + NULL); 655 672 break; 656 673 case AMD_DPM_FORCED_LEVEL_AUTO: 657 674 smum_send_msg_to_smc_with_parameter(hwmgr, 658 675 PPSMC_MSG_SetHardMinGfxClk, 659 - min_sclk); 676 + min_sclk, 677 + NULL); 660 678 smum_send_msg_to_smc_with_parameter(hwmgr, 661 679 PPSMC_MSG_SetHardMinFclkByFreq, 662 680 hwmgr->display_config->num_display > 3 ? 663 681 SMU10_UMD_PSTATE_PEAK_FCLK : 664 - min_mclk); 682 + min_mclk, 683 + NULL); 665 684 666 685 smum_send_msg_to_smc_with_parameter(hwmgr, 667 686 PPSMC_MSG_SetHardMinSocclkByFreq, 668 - SMU10_UMD_PSTATE_MIN_SOCCLK); 687 + SMU10_UMD_PSTATE_MIN_SOCCLK, 688 + NULL); 669 689 smum_send_msg_to_smc_with_parameter(hwmgr, 670 690 PPSMC_MSG_SetHardMinVcn, 671 - SMU10_UMD_PSTATE_MIN_VCE); 691 + SMU10_UMD_PSTATE_MIN_VCE, 692 + NULL); 672 693 673 694 smum_send_msg_to_smc_with_parameter(hwmgr, 674 695 PPSMC_MSG_SetSoftMaxGfxClk, 675 - data->gfx_max_freq_limit/100); 696 + data->gfx_max_freq_limit/100, 697 + NULL); 676 698 smum_send_msg_to_smc_with_parameter(hwmgr, 677 699 PPSMC_MSG_SetSoftMaxFclkByFreq, 678 - SMU10_UMD_PSTATE_PEAK_FCLK); 700 + SMU10_UMD_PSTATE_PEAK_FCLK, 701 + NULL); 679 702 smum_send_msg_to_smc_with_parameter(hwmgr, 680 703 PPSMC_MSG_SetSoftMaxSocclkByFreq, 681 - SMU10_UMD_PSTATE_PEAK_SOCCLK); 704 + SMU10_UMD_PSTATE_PEAK_SOCCLK, 705 + NULL); 682 706 smum_send_msg_to_smc_with_parameter(hwmgr, 683 707 PPSMC_MSG_SetSoftMaxVcn, 684 - SMU10_UMD_PSTATE_VCE); 708 + SMU10_UMD_PSTATE_VCE, 709 + NULL); 685 710 break; 686 711 case AMD_DPM_FORCED_LEVEL_LOW: 687 712 smum_send_msg_to_smc_with_parameter(hwmgr, 688 713 PPSMC_MSG_SetHardMinGfxClk, 689 - data->gfx_min_freq_limit/100); 714 + data->gfx_min_freq_limit/100, 715 + NULL); 690 716 smum_send_msg_to_smc_with_parameter(hwmgr, 691 717 PPSMC_MSG_SetSoftMaxGfxClk, 692 - data->gfx_min_freq_limit/100); 718 + data->gfx_min_freq_limit/100, 719 + NULL); 693 720 smum_send_msg_to_smc_with_parameter(hwmgr, 694 721 PPSMC_MSG_SetHardMinFclkByFreq, 695 - min_mclk); 722 + min_mclk, 723 + NULL); 696 724 smum_send_msg_to_smc_with_parameter(hwmgr, 697 725 PPSMC_MSG_SetSoftMaxFclkByFreq, 698 - min_mclk); 726 + min_mclk, 727 + NULL); 699 728 break; 700 729 case AMD_DPM_FORCED_LEVEL_MANUAL: 701 730 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: ··· 879 844 PPSMC_MSG_SetHardMinGfxClk, 880 845 low == 2 ? data->gfx_max_freq_limit/100 : 881 846 low == 1 ? SMU10_UMD_PSTATE_GFXCLK : 882 - data->gfx_min_freq_limit/100); 847 + data->gfx_min_freq_limit/100, 848 + NULL); 883 849 884 850 smum_send_msg_to_smc_with_parameter(hwmgr, 885 851 PPSMC_MSG_SetSoftMaxGfxClk, 886 852 high == 0 ? data->gfx_min_freq_limit/100 : 887 853 high == 1 ? SMU10_UMD_PSTATE_GFXCLK : 888 - data->gfx_max_freq_limit/100); 854 + data->gfx_max_freq_limit/100, 855 + NULL); 889 856 break; 890 857 891 858 case PP_MCLK: ··· 896 859 897 860 smum_send_msg_to_smc_with_parameter(hwmgr, 898 861 PPSMC_MSG_SetHardMinFclkByFreq, 899 - mclk_table->entries[low].clk/100); 862 + mclk_table->entries[low].clk/100, 863 + NULL); 900 864 901 865 smum_send_msg_to_smc_with_parameter(hwmgr, 902 866 PPSMC_MSG_SetSoftMaxFclkByFreq, 903 - mclk_table->entries[high].clk/100); 867 + mclk_table->entries[high].clk/100, 868 + NULL); 904 869 break; 905 870 906 871 case PP_PCIE: ··· 922 883 923 884 switch (type) { 924 885 case PP_SCLK: 925 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); 926 - now = smum_get_argument(hwmgr); 886 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); 927 887 928 888 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ 929 889 if (now == data->gfx_max_freq_limit/100) ··· 943 905 i == 2 ? "*" : ""); 944 906 break; 945 907 case PP_MCLK: 946 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); 947 - now = smum_get_argument(hwmgr); 908 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); 948 909 949 910 for (i = 0; i < mclk_table->count; i++) 950 911 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 1154 1117 1155 1118 switch (idx) { 1156 1119 case AMDGPU_PP_SENSOR_GFX_SCLK: 1157 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); 1158 - sclk = smum_get_argument(hwmgr); 1120 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk); 1159 1121 /* in units of 10KHZ */ 1160 1122 *((uint32_t *)value) = sclk * 100; 1161 1123 *size = 4; 1162 1124 break; 1163 1125 case AMDGPU_PP_SENSOR_GFX_MCLK: 1164 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); 1165 - mclk = smum_get_argument(hwmgr); 1126 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk); 1166 1127 /* in units of 10KHZ */ 1167 1128 *((uint32_t *)value) = mclk * 100; 1168 1129 *size = 4; ··· 1196 1161 static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) 1197 1162 { 1198 1163 1199 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister); 1164 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL); 1200 1165 } 1201 1166 1202 1167 static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr) 1203 1168 { 1204 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); 1169 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL); 1205 1170 } 1206 1171 1207 1172 static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) 1208 1173 { 1209 1174 if (gate) 1210 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma); 1175 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL); 1211 1176 else 1212 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma); 1177 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL); 1213 1178 } 1214 1179 1215 1180 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) ··· 1221 1186 AMD_IP_BLOCK_TYPE_VCN, 1222 1187 AMD_PG_STATE_GATE); 1223 1188 smum_send_msg_to_smc_with_parameter(hwmgr, 1224 - PPSMC_MSG_PowerDownVcn, 0); 1189 + PPSMC_MSG_PowerDownVcn, 0, NULL); 1225 1190 smu10_data->vcn_power_gated = true; 1226 1191 } else { 1227 1192 smum_send_msg_to_smc_with_parameter(hwmgr, 1228 - PPSMC_MSG_PowerUpVcn, 0); 1193 + PPSMC_MSG_PowerUpVcn, 0, NULL); 1229 1194 amdgpu_device_ip_set_powergating_state(hwmgr->adev, 1230 1195 AMD_IP_BLOCK_TYPE_VCN, 1231 1196 AMD_PG_STATE_UNGATE); ··· 1334 1299 hwmgr->gfxoff_state_changed_by_workload = true; 1335 1300 } 1336 1301 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, 1337 - 1 << workload_type); 1302 + 1 << workload_type, 1303 + NULL); 1338 1304 if (!result) 1339 1305 hwmgr->power_profile_mode = input[size]; 1340 1306 if (workload_type && hwmgr->gfxoff_state_changed_by_workload) { ··· 1350 1314 { 1351 1315 return smum_send_msg_to_smc_with_parameter(hwmgr, 1352 1316 PPSMC_MSG_DeviceDriverReset, 1353 - mode); 1317 + mode, 1318 + NULL); 1354 1319 } 1355 1320 1356 1321 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
+34 -27
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
··· 29 29 { 30 30 return smum_send_msg_to_smc(hwmgr, enable ? 31 31 PPSMC_MSG_UVDDPM_Enable : 32 - PPSMC_MSG_UVDDPM_Disable); 32 + PPSMC_MSG_UVDDPM_Disable, 33 + NULL); 33 34 } 34 35 35 36 static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) 36 37 { 37 38 return smum_send_msg_to_smc(hwmgr, enable ? 38 39 PPSMC_MSG_VCEDPM_Enable : 39 - PPSMC_MSG_VCEDPM_Disable); 40 + PPSMC_MSG_VCEDPM_Disable, 41 + NULL); 40 42 } 41 43 42 44 static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) ··· 59 57 { 60 58 if (phm_cf_want_uvd_power_gating(hwmgr)) 61 59 return smum_send_msg_to_smc(hwmgr, 62 - PPSMC_MSG_UVDPowerOFF); 60 + PPSMC_MSG_UVDPowerOFF, 61 + NULL); 63 62 return 0; 64 63 } 65 64 ··· 70 67 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 71 68 PHM_PlatformCaps_UVDDynamicPowerGating)) { 72 69 return smum_send_msg_to_smc_with_parameter(hwmgr, 73 - PPSMC_MSG_UVDPowerON, 1); 70 + PPSMC_MSG_UVDPowerON, 1, NULL); 74 71 } else { 75 72 return smum_send_msg_to_smc_with_parameter(hwmgr, 76 - PPSMC_MSG_UVDPowerON, 0); 73 + PPSMC_MSG_UVDPowerON, 0, NULL); 77 74 } 78 75 } 79 76 ··· 84 81 { 85 82 if (phm_cf_want_vce_power_gating(hwmgr)) 86 83 return smum_send_msg_to_smc(hwmgr, 87 - PPSMC_MSG_VCEPowerOFF); 84 + PPSMC_MSG_VCEPowerOFF, 85 + NULL); 88 86 return 0; 89 87 } 90 88 ··· 93 89 { 94 90 if (phm_cf_want_vce_power_gating(hwmgr)) 95 91 return smum_send_msg_to_smc(hwmgr, 96 - PPSMC_MSG_VCEPowerON); 92 + PPSMC_MSG_VCEPowerON, 93 + NULL); 97 94 return 0; 98 95 } 99 96 ··· 186 181 value = CG_GFX_CGCG_MASK; 187 182 188 183 if (smum_send_msg_to_smc_with_parameter( 189 - hwmgr, msg, value)) 184 + hwmgr, msg, value, NULL)) 190 185 return -EINVAL; 191 186 } 192 187 if (PP_STATE_SUPPORT_LS & *msg_id) { ··· 196 191 value = CG_GFX_CGLS_MASK; 197 192 198 193 if (smum_send_msg_to_smc_with_parameter( 199 - hwmgr, msg, value)) 194 + hwmgr, msg, value, NULL)) 200 195 return -EINVAL; 201 196 } 202 197 break; ··· 209 204 value = CG_GFX_3DCG_MASK; 210 205 211 206 if (smum_send_msg_to_smc_with_parameter( 212 - hwmgr, msg, value)) 207 + hwmgr, msg, value, NULL)) 213 208 return -EINVAL; 214 209 } 215 210 ··· 220 215 value = CG_GFX_3DLS_MASK; 221 216 222 217 if (smum_send_msg_to_smc_with_parameter( 223 - hwmgr, msg, value)) 218 + hwmgr, msg, value, NULL)) 224 219 return -EINVAL; 225 220 } 226 221 break; ··· 233 228 value = CG_GFX_RLC_LS_MASK; 234 229 235 230 if (smum_send_msg_to_smc_with_parameter( 236 - hwmgr, msg, value)) 231 + hwmgr, msg, value, NULL)) 237 232 return -EINVAL; 238 233 } 239 234 break; ··· 246 241 value = CG_GFX_CP_LS_MASK; 247 242 248 243 if (smum_send_msg_to_smc_with_parameter( 249 - hwmgr, msg, value)) 244 + hwmgr, msg, value, NULL)) 250 245 return -EINVAL; 251 246 } 252 247 break; ··· 260 255 CG_GFX_OTHERS_MGCG_MASK); 261 256 262 257 if (smum_send_msg_to_smc_with_parameter( 263 - hwmgr, msg, value)) 258 + hwmgr, msg, value, NULL)) 264 259 return -EINVAL; 265 260 } 266 261 break; ··· 280 275 value = CG_SYS_BIF_MGCG_MASK; 281 276 282 277 if (smum_send_msg_to_smc_with_parameter( 283 - hwmgr, msg, value)) 278 + hwmgr, msg, value, NULL)) 284 279 return -EINVAL; 285 280 } 286 281 if (PP_STATE_SUPPORT_LS & *msg_id) { ··· 290 285 value = CG_SYS_BIF_MGLS_MASK; 291 286 292 287 if (smum_send_msg_to_smc_with_parameter( 293 - hwmgr, msg, value)) 288 + hwmgr, msg, value, NULL)) 294 289 return -EINVAL; 295 290 } 296 291 break; ··· 303 298 value = CG_SYS_MC_MGCG_MASK; 304 299 305 300 if (smum_send_msg_to_smc_with_parameter( 306 - hwmgr, msg, value)) 301 + hwmgr, msg, value, NULL)) 307 302 return -EINVAL; 308 303 } 309 304 ··· 314 309 value = CG_SYS_MC_MGLS_MASK; 315 310 316 311 if (smum_send_msg_to_smc_with_parameter( 317 - hwmgr, msg, value)) 312 + hwmgr, msg, value, NULL)) 318 313 return -EINVAL; 319 314 } 320 315 break; ··· 327 322 value = CG_SYS_DRM_MGCG_MASK; 328 323 329 324 if (smum_send_msg_to_smc_with_parameter( 330 - hwmgr, msg, value)) 325 + hwmgr, msg, value, NULL)) 331 326 return -EINVAL; 332 327 } 333 328 if (PP_STATE_SUPPORT_LS & *msg_id) { ··· 337 332 value = CG_SYS_DRM_MGLS_MASK; 338 333 339 334 if (smum_send_msg_to_smc_with_parameter( 340 - hwmgr, msg, value)) 335 + hwmgr, msg, value, NULL)) 341 336 return -EINVAL; 342 337 } 343 338 break; ··· 350 345 value = CG_SYS_HDP_MGCG_MASK; 351 346 352 347 if (smum_send_msg_to_smc_with_parameter( 353 - hwmgr, msg, value)) 348 + hwmgr, msg, value, NULL)) 354 349 return -EINVAL; 355 350 } 356 351 ··· 361 356 value = CG_SYS_HDP_MGLS_MASK; 362 357 363 358 if (smum_send_msg_to_smc_with_parameter( 364 - hwmgr, msg, value)) 359 + hwmgr, msg, value, NULL)) 365 360 return -EINVAL; 366 361 } 367 362 break; ··· 374 369 value = CG_SYS_SDMA_MGCG_MASK; 375 370 376 371 if (smum_send_msg_to_smc_with_parameter( 377 - hwmgr, msg, value)) 372 + hwmgr, msg, value, NULL)) 378 373 return -EINVAL; 379 374 } 380 375 ··· 385 380 value = CG_SYS_SDMA_MGLS_MASK; 386 381 387 382 if (smum_send_msg_to_smc_with_parameter( 388 - hwmgr, msg, value)) 383 + hwmgr, msg, value, NULL)) 389 384 return -EINVAL; 390 385 } 391 386 break; ··· 398 393 value = CG_SYS_ROM_MASK; 399 394 400 395 if (smum_send_msg_to_smc_with_parameter( 401 - hwmgr, msg, value)) 396 + hwmgr, msg, value, NULL)) 402 397 return -EINVAL; 403 398 } 404 399 break; ··· 428 423 if (enable) 429 424 return smum_send_msg_to_smc_with_parameter(hwmgr, 430 425 PPSMC_MSG_GFX_CU_PG_ENABLE, 431 - adev->gfx.cu_info.number); 426 + adev->gfx.cu_info.number, 427 + NULL); 432 428 else 433 429 return smum_send_msg_to_smc(hwmgr, 434 - PPSMC_MSG_GFX_CU_PG_DISABLE); 430 + PPSMC_MSG_GFX_CU_PG_DISABLE, 431 + NULL); 435 432 }
+79 -56
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 186 186 } 187 187 188 188 if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) 189 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); 189 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL); 190 190 191 191 return 0; 192 192 } ··· 493 493 494 494 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) 495 495 { 496 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults); 496 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL); 497 497 } 498 498 499 499 /** ··· 979 979 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 980 980 PHM_PlatformCaps_RegulatorHot)) 981 981 return smum_send_msg_to_smc(hwmgr, 982 - PPSMC_MSG_EnableVRHotGPIOInterrupt); 982 + PPSMC_MSG_EnableVRHotGPIOInterrupt, 983 + NULL); 983 984 984 985 return 0; 985 986 } ··· 997 996 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 998 997 999 998 if (data->ulv_supported) 1000 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV); 999 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL); 1001 1000 1002 1001 return 0; 1003 1002 } ··· 1007 1006 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 1008 1007 1009 1008 if (data->ulv_supported) 1010 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV); 1009 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL); 1011 1010 1012 1011 return 0; 1013 1012 } ··· 1016 1015 { 1017 1016 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1018 1017 PHM_PlatformCaps_SclkDeepSleep)) { 1019 - if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON)) 1018 + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL)) 1020 1019 PP_ASSERT_WITH_CODE(false, 1021 1020 "Attempt to enable Master Deep Sleep switch failed!", 1022 1021 return -EINVAL); 1023 1022 } else { 1024 1023 if (smum_send_msg_to_smc(hwmgr, 1025 - PPSMC_MSG_MASTER_DeepSleep_OFF)) { 1024 + PPSMC_MSG_MASTER_DeepSleep_OFF, 1025 + NULL)) { 1026 1026 PP_ASSERT_WITH_CODE(false, 1027 1027 "Attempt to disable Master Deep Sleep switch failed!", 1028 1028 return -EINVAL); ··· 1038 1036 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1039 1037 PHM_PlatformCaps_SclkDeepSleep)) { 1040 1038 if (smum_send_msg_to_smc(hwmgr, 1041 - PPSMC_MSG_MASTER_DeepSleep_OFF)) { 1039 + PPSMC_MSG_MASTER_DeepSleep_OFF, 1040 + NULL)) { 1042 1041 PP_ASSERT_WITH_CODE(false, 1043 1042 "Attempt to disable Master Deep Sleep switch failed!", 1044 1043 return -EINVAL); ··· 1092 1089 smu7_disable_sclk_vce_handshake(hwmgr); 1093 1090 1094 1091 PP_ASSERT_WITH_CODE( 1095 - (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), 1092 + (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)), 1096 1093 "Failed to enable SCLK DPM during DPM Start Function!", 1097 1094 return -EINVAL); 1098 1095 } ··· 1104 1101 1105 1102 PP_ASSERT_WITH_CODE( 1106 1103 (0 == smum_send_msg_to_smc(hwmgr, 1107 - PPSMC_MSG_MCLKDPM_Enable)), 1104 + PPSMC_MSG_MCLKDPM_Enable, 1105 + NULL)), 1108 1106 "Failed to enable MCLK DPM during DPM Start Function!", 1109 1107 return -EINVAL); 1110 1108 ··· 1176 1172 if (0 == data->pcie_dpm_key_disabled) { 1177 1173 PP_ASSERT_WITH_CODE( 1178 1174 (0 == smum_send_msg_to_smc(hwmgr, 1179 - PPSMC_MSG_PCIeDPM_Enable)), 1175 + PPSMC_MSG_PCIeDPM_Enable, 1176 + NULL)), 1180 1177 "Failed to enable pcie DPM during DPM Start Function!", 1181 1178 return -EINVAL); 1182 1179 } ··· 1185 1180 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1186 1181 PHM_PlatformCaps_Falcon_QuickTransition)) { 1187 1182 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, 1188 - PPSMC_MSG_EnableACDCGPIOInterrupt)), 1183 + PPSMC_MSG_EnableACDCGPIOInterrupt, 1184 + NULL)), 1189 1185 "Failed to enable AC DC GPIO Interrupt!", 1190 1186 ); 1191 1187 } ··· 1203 1197 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1204 1198 "Trying to disable SCLK DPM when DPM is disabled", 1205 1199 return 0); 1206 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable); 1200 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL); 1207 1201 } 1208 1202 1209 1203 /* disable MCLK dpm */ ··· 1211 1205 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), 1212 1206 "Trying to disable MCLK DPM when DPM is disabled", 1213 1207 return 0); 1214 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable); 1208 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL); 1215 1209 } 1216 1210 1217 1211 return 0; ··· 1232 1226 if (!data->pcie_dpm_key_disabled) { 1233 1227 PP_ASSERT_WITH_CODE( 1234 1228 (smum_send_msg_to_smc(hwmgr, 1235 - PPSMC_MSG_PCIeDPM_Disable) == 0), 1229 + PPSMC_MSG_PCIeDPM_Disable, 1230 + NULL) == 0), 1236 1231 "Failed to disable pcie DPM during DPM Stop Function!", 1237 1232 return -EINVAL); 1238 1233 } ··· 1244 1237 "Trying to disable voltage DPM when DPM is disabled", 1245 1238 return 0); 1246 1239 1247 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable); 1240 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL); 1248 1241 1249 1242 return 0; 1250 1243 } ··· 1395 1388 PP_ASSERT_WITH_CODE((0 == tmp_result), 1396 1389 "Failed to enable VR hot GPIO interrupt!", result = tmp_result); 1397 1390 1398 - smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay); 1391 + smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL); 1399 1392 1400 1393 tmp_result = smu7_enable_sclk_control(hwmgr); 1401 1394 PP_ASSERT_WITH_CODE((0 == tmp_result), ··· 1453 1446 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1454 1447 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1455 1448 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1456 - hwmgr, PPSMC_MSG_EnableAvfs), 1449 + hwmgr, PPSMC_MSG_EnableAvfs, NULL), 1457 1450 "Failed to enable AVFS!", 1458 1451 return -EINVAL); 1459 1452 } 1460 1453 } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, 1461 1454 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { 1462 1455 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( 1463 - hwmgr, PPSMC_MSG_DisableAvfs), 1456 + hwmgr, PPSMC_MSG_DisableAvfs, NULL), 1464 1457 "Failed to disable AVFS!", 1465 1458 return -EINVAL); 1466 1459 } ··· 2616 2609 2617 2610 if (level) 2618 2611 smum_send_msg_to_smc_with_parameter(hwmgr, 2619 - PPSMC_MSG_PCIeDPM_ForceLevel, level); 2612 + PPSMC_MSG_PCIeDPM_ForceLevel, level, 2613 + NULL); 2620 2614 } 2621 2615 } 2622 2616 ··· 2631 2623 if (level) 2632 2624 smum_send_msg_to_smc_with_parameter(hwmgr, 2633 2625 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2634 - (1 << level)); 2626 + (1 << level), 2627 + NULL); 2635 2628 } 2636 2629 } 2637 2630 ··· 2646 2637 if (level) 2647 2638 smum_send_msg_to_smc_with_parameter(hwmgr, 2648 2639 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2649 - (1 << level)); 2640 + (1 << level), 2641 + NULL); 2650 2642 } 2651 2643 } 2652 2644 ··· 2666 2656 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) 2667 2657 smum_send_msg_to_smc_with_parameter(hwmgr, 2668 2658 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2669 - data->dpm_level_enable_mask.sclk_dpm_enable_mask); 2659 + data->dpm_level_enable_mask.sclk_dpm_enable_mask, 2660 + NULL); 2670 2661 } 2671 2662 2672 2663 if (!data->mclk_dpm_key_disabled) { 2673 2664 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) 2674 2665 smum_send_msg_to_smc_with_parameter(hwmgr, 2675 2666 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2676 - data->dpm_level_enable_mask.mclk_dpm_enable_mask); 2667 + data->dpm_level_enable_mask.mclk_dpm_enable_mask, 2668 + NULL); 2677 2669 } 2678 2670 2679 2671 return 0; ··· 2690 2678 2691 2679 if (!data->pcie_dpm_key_disabled) { 2692 2680 smum_send_msg_to_smc(hwmgr, 2693 - PPSMC_MSG_PCIeDPM_UnForceLevel); 2681 + PPSMC_MSG_PCIeDPM_UnForceLevel, 2682 + NULL); 2694 2683 } 2695 2684 2696 2685 return smu7_upload_dpm_level_enable_mask(hwmgr); ··· 2709 2696 data->dpm_level_enable_mask.sclk_dpm_enable_mask); 2710 2697 smum_send_msg_to_smc_with_parameter(hwmgr, 2711 2698 PPSMC_MSG_SCLKDPM_SetEnabledMask, 2712 - (1 << level)); 2699 + (1 << level), 2700 + NULL); 2713 2701 2714 2702 } 2715 2703 ··· 2720 2706 data->dpm_level_enable_mask.mclk_dpm_enable_mask); 2721 2707 smum_send_msg_to_smc_with_parameter(hwmgr, 2722 2708 PPSMC_MSG_MCLKDPM_SetEnabledMask, 2723 - (1 << level)); 2709 + (1 << level), 2710 + NULL); 2724 2711 } 2725 2712 } 2726 2713 ··· 2731 2716 data->dpm_level_enable_mask.pcie_dpm_enable_mask); 2732 2717 smum_send_msg_to_smc_with_parameter(hwmgr, 2733 2718 PPSMC_MSG_PCIeDPM_ForceLevel, 2734 - (level)); 2719 + (level), 2720 + NULL); 2735 2721 } 2736 2722 } 2737 2723 ··· 3511 3495 (adev->asic_type != CHIP_BONAIRE) && 3512 3496 (adev->asic_type != CHIP_FIJI) && 3513 3497 (adev->asic_type != CHIP_TONGA)) { 3514 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); 3515 - tmp = smum_get_argument(hwmgr); 3498 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp); 3516 3499 *query = tmp; 3517 3500 3518 3501 if (tmp != 0) 3519 3502 return 0; 3520 3503 } 3521 3504 3522 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); 3505 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL); 3523 3506 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 3524 3507 ixSMU_PM_STATUS_95, 0); 3525 3508 3526 3509 for (i = 0; i < 10; i++) { 3527 3510 msleep(500); 3528 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); 3511 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL); 3529 3512 tmp = cgs_read_ind_register(hwmgr->device, 3530 3513 CGS_IND_REG__SMC, 3531 3514 ixSMU_PM_STATUS_95); ··· 3549 3534 3550 3535 switch (idx) { 3551 3536 case AMDGPU_PP_SENSOR_GFX_SCLK: 3552 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); 3553 - sclk = smum_get_argument(hwmgr); 3537 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk); 3554 3538 *((uint32_t *)value) = sclk; 3555 3539 *size = 4; 3556 3540 return 0; 3557 3541 case AMDGPU_PP_SENSOR_GFX_MCLK: 3558 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); 3559 - mclk = smum_get_argument(hwmgr); 3542 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk); 3560 3543 *((uint32_t *)value) = mclk; 3561 3544 *size = 4; 3562 3545 return 0; ··· 3743 3730 "Trying to freeze SCLK DPM when DPM is disabled", 3744 3731 ); 3745 3732 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3746 - PPSMC_MSG_SCLKDPM_FreezeLevel), 3733 + PPSMC_MSG_SCLKDPM_FreezeLevel, 3734 + NULL), 3747 3735 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", 3748 3736 return -EINVAL); 3749 3737 } ··· 3756 3742 "Trying to freeze MCLK DPM when DPM is disabled", 3757 3743 ); 3758 3744 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3759 - PPSMC_MSG_MCLKDPM_FreezeLevel), 3745 + PPSMC_MSG_MCLKDPM_FreezeLevel, 3746 + NULL), 3760 3747 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", 3761 3748 return -EINVAL); 3762 3749 } ··· 3896 3881 "Trying to Unfreeze SCLK DPM when DPM is disabled", 3897 3882 ); 3898 3883 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3899 - PPSMC_MSG_SCLKDPM_UnfreezeLevel), 3884 + PPSMC_MSG_SCLKDPM_UnfreezeLevel, 3885 + NULL), 3900 3886 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", 3901 3887 return -EINVAL); 3902 3888 } ··· 3909 3893 "Trying to Unfreeze MCLK DPM when DPM is disabled", 3910 3894 ); 3911 3895 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, 3912 - PPSMC_MSG_MCLKDPM_UnfreezeLevel), 3896 + PPSMC_MSG_MCLKDPM_UnfreezeLevel, 3897 + NULL), 3913 3898 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", 3914 3899 return -EINVAL); 3915 3900 } ··· 3963 3946 if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { 3964 3947 if (hwmgr->chip_id == CHIP_VEGAM) 3965 3948 smum_send_msg_to_smc_with_parameter(hwmgr, 3966 - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2); 3949 + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2, 3950 + NULL); 3967 3951 else 3968 3952 smum_send_msg_to_smc_with_parameter(hwmgr, 3969 - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); 3953 + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2, 3954 + NULL); 3970 3955 } 3971 - return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; 3956 + return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL; 3972 3957 } 3973 3958 3974 3959 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) ··· 4056 4037 advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; 4057 4038 4058 4039 return smum_send_msg_to_smc_with_parameter(hwmgr, 4059 - PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); 4040 + PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm, 4041 + NULL); 4060 4042 } 4061 4043 4062 4044 static int ··· 4065 4045 { 4066 4046 PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; 4067 4047 4068 - return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1; 4048 + return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ? 0 : -1; 4069 4049 } 4070 4050 4071 4051 static int ··· 4149 4129 advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; 4150 4130 4151 4131 return smum_send_msg_to_smc_with_parameter(hwmgr, 4152 - PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); 4132 + PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm, 4133 + NULL); 4153 4134 } 4154 4135 4155 4136 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { ··· 4280 4259 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4281 4260 (hwmgr->chip_id == CHIP_POLARIS11) || 4282 4261 (hwmgr->chip_id == CHIP_POLARIS12)) 4283 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC); 4262 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL); 4284 4263 } else { 4285 4264 data->mem_latency_high = 330; 4286 4265 data->mem_latency_low = 330; 4287 4266 if ((hwmgr->chip_id == CHIP_POLARIS10) || 4288 4267 (hwmgr->chip_id == CHIP_POLARIS11) || 4289 4268 (hwmgr->chip_id == CHIP_POLARIS12)) 4290 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC); 4269 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL); 4291 4270 } 4292 4271 4293 4272 return 0; ··· 4431 4410 if (!data->sclk_dpm_key_disabled) 4432 4411 smum_send_msg_to_smc_with_parameter(hwmgr, 4433 4412 PPSMC_MSG_SCLKDPM_SetEnabledMask, 4434 - data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); 4413 + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask, 4414 + NULL); 4435 4415 break; 4436 4416 case PP_MCLK: 4437 4417 if (!data->mclk_dpm_key_disabled) 4438 4418 smum_send_msg_to_smc_with_parameter(hwmgr, 4439 4419 PPSMC_MSG_MCLKDPM_SetEnabledMask, 4440 - data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); 4420 + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask, 4421 + NULL); 4441 4422 break; 4442 4423 case PP_PCIE: 4443 4424 { ··· 4447 4424 4448 4425 if (!data->pcie_dpm_key_disabled) { 4449 4426 if (fls(tmp) != ffs(tmp)) 4450 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); 4427 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel, 4428 + NULL); 4451 4429 else 4452 4430 smum_send_msg_to_smc_with_parameter(hwmgr, 4453 4431 PPSMC_MSG_PCIeDPM_ForceLevel, 4454 - fls(tmp) - 1); 4432 + fls(tmp) - 1, 4433 + NULL); 4455 4434 } 4456 4435 break; 4457 4436 } ··· 4479 4454 4480 4455 switch (type) { 4481 4456 case PP_SCLK: 4482 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); 4483 - clock = smum_get_argument(hwmgr); 4457 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); 4484 4458 4485 4459 for (i = 0; i < sclk_table->count; i++) { 4486 4460 if (clock > sclk_table->dpm_levels[i].value) ··· 4494 4470 (i == now) ? "*" : ""); 4495 4471 break; 4496 4472 case PP_MCLK: 4497 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); 4498 - clock = smum_get_argument(hwmgr); 4473 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock); 4499 4474 4500 4475 for (i = 0; i < mclk_table->count; i++) { 4501 4476 if (clock > mclk_table->dpm_levels[i].value)
+28 -12
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
··· 887 887 didt_block |= block_en << TCP_Enable_SHIFT; 888 888 889 889 if (enable) 890 - result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block); 890 + result = smum_send_msg_to_smc_with_parameter(hwmgr, 891 + PPSMC_MSG_Didt_Block_Function, 892 + didt_block, 893 + NULL); 891 894 892 895 return result; 893 896 } ··· 1012 1009 1013 1010 if (hwmgr->chip_id == CHIP_POLARIS11) { 1014 1011 result = smum_send_msg_to_smc(hwmgr, 1015 - (uint16_t)(PPSMC_MSG_EnableDpmDidt)); 1012 + (uint16_t)(PPSMC_MSG_EnableDpmDidt), 1013 + NULL); 1016 1014 PP_ASSERT_WITH_CODE((0 == result), 1017 1015 "Failed to enable DPM DIDT.", goto error); 1018 1016 } ··· 1046 1042 goto error); 1047 1043 if (hwmgr->chip_id == CHIP_POLARIS11) { 1048 1044 result = smum_send_msg_to_smc(hwmgr, 1049 - (uint16_t)(PPSMC_MSG_DisableDpmDidt)); 1045 + (uint16_t)(PPSMC_MSG_DisableDpmDidt), 1046 + NULL); 1050 1047 PP_ASSERT_WITH_CODE((0 == result), 1051 1048 "Failed to disable DPM DIDT.", goto error); 1052 1049 } ··· 1068 1063 if (PP_CAP(PHM_PlatformCaps_CAC)) { 1069 1064 int smc_result; 1070 1065 smc_result = smum_send_msg_to_smc(hwmgr, 1071 - (uint16_t)(PPSMC_MSG_EnableCac)); 1066 + (uint16_t)(PPSMC_MSG_EnableCac), 1067 + NULL); 1072 1068 PP_ASSERT_WITH_CODE((0 == smc_result), 1073 1069 "Failed to enable CAC in SMC.", result = -1); 1074 1070 ··· 1085 1079 1086 1080 if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) { 1087 1081 int smc_result = smum_send_msg_to_smc(hwmgr, 1088 - (uint16_t)(PPSMC_MSG_DisableCac)); 1082 + (uint16_t)(PPSMC_MSG_DisableCac), 1083 + NULL); 1089 1084 PP_ASSERT_WITH_CODE((smc_result == 0), 1090 1085 "Failed to disable CAC in SMC.", result = -1); 1091 1086 ··· 1102 1095 if (data->power_containment_features & 1103 1096 POWERCONTAINMENT_FEATURE_PkgPwrLimit) 1104 1097 return smum_send_msg_to_smc_with_parameter(hwmgr, 1105 - PPSMC_MSG_PkgPwrSetLimit, n<<8); 1098 + PPSMC_MSG_PkgPwrSetLimit, 1099 + n<<8, 1100 + NULL); 1106 1101 return 0; 1107 1102 } 1108 1103 ··· 1112 1103 uint32_t target_tdp) 1113 1104 { 1114 1105 return smum_send_msg_to_smc_with_parameter(hwmgr, 1115 - PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1106 + PPSMC_MSG_OverDriveSetTargetTdp, 1107 + target_tdp, 1108 + NULL); 1116 1109 } 1117 1110 1118 1111 int smu7_enable_power_containment(struct pp_hwmgr *hwmgr) ··· 1135 1124 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { 1136 1125 if (data->enable_tdc_limit_feature) { 1137 1126 smc_result = smum_send_msg_to_smc(hwmgr, 1138 - (uint16_t)(PPSMC_MSG_TDCLimitEnable)); 1127 + (uint16_t)(PPSMC_MSG_TDCLimitEnable), 1128 + NULL); 1139 1129 PP_ASSERT_WITH_CODE((0 == smc_result), 1140 1130 "Failed to enable TDCLimit in SMC.", result = -1;); 1141 1131 if (0 == smc_result) ··· 1146 1134 1147 1135 if (data->enable_pkg_pwr_tracking_feature) { 1148 1136 smc_result = smum_send_msg_to_smc(hwmgr, 1149 - (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); 1137 + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable), 1138 + NULL); 1150 1139 PP_ASSERT_WITH_CODE((0 == smc_result), 1151 1140 "Failed to enable PkgPwrTracking in SMC.", result = -1;); 1152 1141 if (0 == smc_result) { ··· 1176 1163 if (data->power_containment_features & 1177 1164 POWERCONTAINMENT_FEATURE_TDCLimit) { 1178 1165 smc_result = smum_send_msg_to_smc(hwmgr, 1179 - (uint16_t)(PPSMC_MSG_TDCLimitDisable)); 1166 + (uint16_t)(PPSMC_MSG_TDCLimitDisable), 1167 + NULL); 1180 1168 PP_ASSERT_WITH_CODE((smc_result == 0), 1181 1169 "Failed to disable TDCLimit in SMC.", 1182 1170 result = smc_result); ··· 1186 1172 if (data->power_containment_features & 1187 1173 POWERCONTAINMENT_FEATURE_DTE) { 1188 1174 smc_result = smum_send_msg_to_smc(hwmgr, 1189 - (uint16_t)(PPSMC_MSG_DisableDTE)); 1175 + (uint16_t)(PPSMC_MSG_DisableDTE), 1176 + NULL); 1190 1177 PP_ASSERT_WITH_CODE((smc_result == 0), 1191 1178 "Failed to disable DTE in SMC.", 1192 1179 result = smc_result); ··· 1196 1181 if (data->power_containment_features & 1197 1182 POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1198 1183 smc_result = smum_send_msg_to_smc(hwmgr, 1199 - (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); 1184 + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable), 1185 + NULL); 1200 1186 PP_ASSERT_WITH_CODE((smc_result == 0), 1201 1187 "Failed to disable PkgPwrTracking in SMC.", 1202 1188 result = smc_result);
+7 -6
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
··· 152 152 153 153 if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) { 154 154 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl, 155 - FAN_CONTROL_FUZZY); 155 + FAN_CONTROL_FUZZY, NULL); 156 156 157 157 if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM)) 158 158 hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr, ··· 165 165 166 166 } else { 167 167 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl, 168 - FAN_CONTROL_TABLE); 168 + FAN_CONTROL_TABLE, NULL); 169 169 } 170 170 171 171 if (!result && hwmgr->thermal_controller. ··· 173 173 result = smum_send_msg_to_smc_with_parameter(hwmgr, 174 174 PPSMC_MSG_SetFanTemperatureTarget, 175 175 hwmgr->thermal_controller. 176 - advanceFanControlParameters.ucTargetTemperature); 176 + advanceFanControlParameters.ucTargetTemperature, 177 + NULL); 177 178 hwmgr->fan_ctrl_enabled = true; 178 179 179 180 return result; ··· 184 183 int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) 185 184 { 186 185 hwmgr->fan_ctrl_enabled = false; 187 - return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl); 186 + return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL); 188 187 } 189 188 190 189 /** ··· 373 372 CG_THERMAL_INT, THERM_INT_MASK, alert); 374 373 375 374 /* send message to SMU to enable internal thermal interrupts */ 376 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable); 375 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL); 377 376 } 378 377 379 378 /** ··· 391 390 CG_THERMAL_INT, THERM_INT_MASK, alert); 392 391 393 392 /* send message to SMU to disable internal thermal interrupts */ 394 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable); 393 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL); 395 394 } 396 395 397 396 /**
+102 -55
drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
··· 162 162 struct smu8_hwmgr *data = hwmgr->backend; 163 163 164 164 if (data->max_sclk_level == 0) { 165 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel); 166 - data->max_sclk_level = smum_get_argument(hwmgr) + 1; 165 + smum_send_msg_to_smc(hwmgr, 166 + PPSMC_MSG_GetMaxSclkLevel, 167 + &data->max_sclk_level); 168 + data->max_sclk_level += 1; 167 169 } 168 170 169 171 return data->max_sclk_level; ··· 582 580 struct smu8_hwmgr *data = hwmgr->backend; 583 581 struct phm_uvd_clock_voltage_dependency_table *table = 584 582 hwmgr->dyn_state.uvd_clock_voltage_dependency_table; 585 - unsigned long clock = 0, level; 583 + unsigned long clock = 0; 584 + uint32_t level; 586 585 587 586 if (NULL == table || table->count <= 0) 588 587 return -EINVAL; ··· 591 588 data->uvd_dpm.soft_min_clk = 0; 592 589 data->uvd_dpm.hard_min_clk = 0; 593 590 594 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel); 595 - level = smum_get_argument(hwmgr); 591 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level); 596 592 597 593 if (level < table->count) 598 594 clock = table->entries[level].vclk; ··· 609 607 struct smu8_hwmgr *data = hwmgr->backend; 610 608 struct phm_vce_clock_voltage_dependency_table *table = 611 609 hwmgr->dyn_state.vce_clock_voltage_dependency_table; 612 - unsigned long clock = 0, level; 610 + unsigned long clock = 0; 611 + uint32_t level; 613 612 614 613 if (NULL == table || table->count <= 0) 615 614 return -EINVAL; ··· 618 615 data->vce_dpm.soft_min_clk = 0; 619 616 data->vce_dpm.hard_min_clk = 0; 620 617 621 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel); 622 - level = smum_get_argument(hwmgr); 618 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level); 623 619 624 620 if (level < table->count) 625 621 clock = table->entries[level].ecclk; ··· 636 634 struct smu8_hwmgr *data = hwmgr->backend; 637 635 struct phm_acp_clock_voltage_dependency_table *table = 638 636 hwmgr->dyn_state.acp_clock_voltage_dependency_table; 639 - unsigned long clock = 0, level; 637 + unsigned long clock = 0; 638 + uint32_t level; 640 639 641 640 if (NULL == table || table->count <= 0) 642 641 return -EINVAL; ··· 645 642 data->acp_dpm.soft_min_clk = 0; 646 643 data->acp_dpm.hard_min_clk = 0; 647 644 648 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel); 649 - level = smum_get_argument(hwmgr); 645 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level); 650 646 651 647 if (level < table->count) 652 648 clock = table->entries[level].acpclk; ··· 667 665 #ifdef CONFIG_DRM_AMD_ACP 668 666 data->acp_power_gated = false; 669 667 #else 670 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF); 668 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL); 671 669 data->acp_power_gated = true; 672 670 #endif 673 671 ··· 710 708 PPSMC_MSG_SetSclkHardMin, 711 709 smu8_get_sclk_level(hwmgr, 712 710 data->sclk_dpm.hard_min_clk, 713 - PPSMC_MSG_SetSclkHardMin)); 711 + PPSMC_MSG_SetSclkHardMin), 712 + NULL); 714 713 } 715 714 716 715 clock = data->sclk_dpm.soft_min_clk; ··· 734 731 PPSMC_MSG_SetSclkSoftMin, 735 732 smu8_get_sclk_level(hwmgr, 736 733 data->sclk_dpm.soft_min_clk, 737 - PPSMC_MSG_SetSclkSoftMin)); 734 + PPSMC_MSG_SetSclkSoftMin), 735 + NULL); 738 736 } 739 737 740 738 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, ··· 746 742 PPSMC_MSG_SetSclkSoftMax, 747 743 smu8_get_sclk_level(hwmgr, 748 744 data->sclk_dpm.soft_max_clk, 749 - PPSMC_MSG_SetSclkSoftMax)); 745 + PPSMC_MSG_SetSclkSoftMax), 746 + NULL); 750 747 } 751 748 752 749 return 0; ··· 765 760 766 761 smum_send_msg_to_smc_with_parameter(hwmgr, 767 762 PPSMC_MSG_SetMinDeepSleepSclk, 768 - clks); 763 + clks, 764 + NULL); 769 765 } 770 766 771 767 return 0; ··· 779 773 780 774 smum_send_msg_to_smc_with_parameter(hwmgr, 781 775 PPSMC_MSG_SetWatermarkFrequency, 782 - data->sclk_dpm.soft_max_clk); 776 + data->sclk_dpm.soft_max_clk, 777 + NULL); 783 778 784 779 return 0; 785 780 } ··· 795 788 796 789 return smum_send_msg_to_smc_with_parameter(hwmgr, 797 790 PPSMC_MSG_EnableLowMemoryPstate, 798 - (lock ? 1 : 0)); 791 + (lock ? 1 : 0), 792 + NULL); 799 793 } else { 800 794 PP_DBG_LOG("disable Low Memory PState.\n"); 801 795 802 796 return smum_send_msg_to_smc_with_parameter(hwmgr, 803 797 PPSMC_MSG_DisableLowMemoryPstate, 804 - (lock ? 1 : 0)); 798 + (lock ? 1 : 0), 799 + NULL); 805 800 } 806 801 } 807 802 ··· 823 814 ret = smum_send_msg_to_smc_with_parameter( 824 815 hwmgr, 825 816 PPSMC_MSG_DisableAllSmuFeatures, 826 - dpm_features); 817 + dpm_features, 818 + NULL); 827 819 if (ret == 0) 828 820 data->is_nb_dpm_enabled = false; 829 821 } ··· 845 835 ret = smum_send_msg_to_smc_with_parameter( 846 836 hwmgr, 847 837 PPSMC_MSG_EnableAllSmuFeatures, 848 - dpm_features); 838 + dpm_features, 839 + NULL); 849 840 if (ret == 0) 850 841 data->is_nb_dpm_enabled = true; 851 842 } ··· 964 953 965 954 return smum_send_msg_to_smc_with_parameter(hwmgr, 966 955 PPSMC_MSG_EnableAllSmuFeatures, 967 - SCLK_DPM_MASK); 956 + SCLK_DPM_MASK, 957 + NULL); 968 958 } 969 959 970 960 static int smu8_stop_dpm(struct pp_hwmgr *hwmgr) ··· 979 967 data->dpm_flags &= ~DPMFlags_SCLK_Enabled; 980 968 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 981 969 PPSMC_MSG_DisableAllSmuFeatures, 982 - dpm_features); 970 + dpm_features, 971 + NULL); 983 972 } 984 973 return ret; 985 974 } ··· 996 983 PPSMC_MSG_SetSclkSoftMin, 997 984 smu8_get_sclk_level(hwmgr, 998 985 data->sclk_dpm.soft_min_clk, 999 - PPSMC_MSG_SetSclkSoftMin)); 986 + PPSMC_MSG_SetSclkSoftMin), 987 + NULL); 1000 988 1001 989 smum_send_msg_to_smc_with_parameter(hwmgr, 1002 990 PPSMC_MSG_SetSclkSoftMax, 1003 991 smu8_get_sclk_level(hwmgr, 1004 992 data->sclk_dpm.soft_max_clk, 1005 - PPSMC_MSG_SetSclkSoftMax)); 993 + PPSMC_MSG_SetSclkSoftMax), 994 + NULL); 1006 995 1007 996 return 0; 1008 997 } ··· 1142 1127 PPSMC_MSG_SetSclkSoftMin, 1143 1128 smu8_get_sclk_level(hwmgr, 1144 1129 data->sclk_dpm.soft_max_clk, 1145 - PPSMC_MSG_SetSclkSoftMin)); 1130 + PPSMC_MSG_SetSclkSoftMin), 1131 + NULL); 1146 1132 1147 1133 smum_send_msg_to_smc_with_parameter(hwmgr, 1148 1134 PPSMC_MSG_SetSclkSoftMax, 1149 1135 smu8_get_sclk_level(hwmgr, 1150 1136 data->sclk_dpm.soft_max_clk, 1151 - PPSMC_MSG_SetSclkSoftMax)); 1137 + PPSMC_MSG_SetSclkSoftMax), 1138 + NULL); 1152 1139 1153 1140 return 0; 1154 1141 } ··· 1184 1167 PPSMC_MSG_SetSclkSoftMin, 1185 1168 smu8_get_sclk_level(hwmgr, 1186 1169 data->sclk_dpm.soft_min_clk, 1187 - PPSMC_MSG_SetSclkSoftMin)); 1170 + PPSMC_MSG_SetSclkSoftMin), 1171 + NULL); 1188 1172 1189 1173 smum_send_msg_to_smc_with_parameter(hwmgr, 1190 1174 PPSMC_MSG_SetSclkSoftMax, 1191 1175 smu8_get_sclk_level(hwmgr, 1192 1176 data->sclk_dpm.soft_max_clk, 1193 - PPSMC_MSG_SetSclkSoftMax)); 1177 + PPSMC_MSG_SetSclkSoftMax), 1178 + NULL); 1194 1179 1195 1180 return 0; 1196 1181 } ··· 1205 1186 PPSMC_MSG_SetSclkSoftMax, 1206 1187 smu8_get_sclk_level(hwmgr, 1207 1188 data->sclk_dpm.soft_min_clk, 1208 - PPSMC_MSG_SetSclkSoftMax)); 1189 + PPSMC_MSG_SetSclkSoftMax), 1190 + NULL); 1209 1191 1210 1192 smum_send_msg_to_smc_with_parameter(hwmgr, 1211 1193 PPSMC_MSG_SetSclkSoftMin, 1212 1194 smu8_get_sclk_level(hwmgr, 1213 1195 data->sclk_dpm.soft_min_clk, 1214 - PPSMC_MSG_SetSclkSoftMin)); 1196 + PPSMC_MSG_SetSclkSoftMin), 1197 + NULL); 1215 1198 1216 1199 return 0; 1217 1200 } ··· 1248 1227 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) 1249 1228 { 1250 1229 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) 1251 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); 1230 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL); 1252 1231 return 0; 1253 1232 } 1254 1233 ··· 1258 1237 return smum_send_msg_to_smc_with_parameter( 1259 1238 hwmgr, 1260 1239 PPSMC_MSG_UVDPowerON, 1261 - PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0); 1240 + PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0, 1241 + NULL); 1262 1242 } 1263 1243 1264 1244 return 0; ··· 1281 1259 PPSMC_MSG_SetEclkHardMin, 1282 1260 smu8_get_eclk_level(hwmgr, 1283 1261 data->vce_dpm.hard_min_clk, 1284 - PPSMC_MSG_SetEclkHardMin)); 1262 + PPSMC_MSG_SetEclkHardMin), 1263 + NULL); 1285 1264 } else { 1286 1265 1287 1266 smum_send_msg_to_smc_with_parameter(hwmgr, 1288 - PPSMC_MSG_SetEclkHardMin, 0); 1267 + PPSMC_MSG_SetEclkHardMin, 1268 + 0, 1269 + NULL); 1289 1270 /* disable ECLK DPM 0. Otherwise VCE could hang if 1290 1271 * switching SCLK from DPM 0 to 6/7 */ 1291 1272 smum_send_msg_to_smc_with_parameter(hwmgr, 1292 - PPSMC_MSG_SetEclkSoftMin, 1); 1273 + PPSMC_MSG_SetEclkSoftMin, 1274 + 1, 1275 + NULL); 1293 1276 } 1294 1277 return 0; 1295 1278 } ··· 1303 1276 { 1304 1277 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1305 1278 return smum_send_msg_to_smc(hwmgr, 1306 - PPSMC_MSG_VCEPowerOFF); 1279 + PPSMC_MSG_VCEPowerOFF, 1280 + NULL); 1307 1281 return 0; 1308 1282 } 1309 1283 ··· 1312 1284 { 1313 1285 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating)) 1314 1286 return smum_send_msg_to_smc(hwmgr, 1315 - PPSMC_MSG_VCEPowerON); 1287 + PPSMC_MSG_VCEPowerON, 1288 + NULL); 1316 1289 return 0; 1317 1290 } 1318 1291 ··· 1464 1435 1465 1436 smum_send_msg_to_smc_with_parameter(hwmgr, 1466 1437 PPSMC_MSG_SetDisplaySizePowerParams, 1467 - data); 1438 + data, 1439 + NULL); 1468 1440 } 1469 1441 1470 1442 return 0; ··· 1527 1497 case PP_SCLK: 1528 1498 smum_send_msg_to_smc_with_parameter(hwmgr, 1529 1499 PPSMC_MSG_SetSclkSoftMin, 1530 - mask); 1500 + mask, 1501 + NULL); 1531 1502 smum_send_msg_to_smc_with_parameter(hwmgr, 1532 1503 PPSMC_MSG_SetSclkSoftMax, 1533 - mask); 1504 + mask, 1505 + NULL); 1534 1506 break; 1535 1507 default: 1536 1508 break; ··· 1785 1753 *((uint32_t *)value) = 0; 1786 1754 return 0; 1787 1755 case AMDGPU_PP_SENSOR_GPU_LOAD: 1788 - result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity); 1756 + result = smum_send_msg_to_smc(hwmgr, 1757 + PPSMC_MSG_GetAverageGraphicsActivity, 1758 + &activity_percent); 1789 1759 if (0 == result) { 1790 - activity_percent = smum_get_argument(hwmgr); 1791 1760 activity_percent = activity_percent > 100 ? 100 : activity_percent; 1792 1761 } else { 1793 1762 activity_percent = 50; ··· 1818 1785 { 1819 1786 smum_send_msg_to_smc_with_parameter(hwmgr, 1820 1787 PPSMC_MSG_DramAddrHiVirtual, 1821 - mc_addr_hi); 1788 + mc_addr_hi, 1789 + NULL); 1822 1790 smum_send_msg_to_smc_with_parameter(hwmgr, 1823 1791 PPSMC_MSG_DramAddrLoVirtual, 1824 - mc_addr_low); 1792 + mc_addr_low, 1793 + NULL); 1825 1794 smum_send_msg_to_smc_with_parameter(hwmgr, 1826 1795 PPSMC_MSG_DramAddrHiPhysical, 1827 - virtual_addr_hi); 1796 + virtual_addr_hi, 1797 + NULL); 1828 1798 smum_send_msg_to_smc_with_parameter(hwmgr, 1829 1799 PPSMC_MSG_DramAddrLoPhysical, 1830 - virtual_addr_low); 1800 + virtual_addr_low, 1801 + NULL); 1831 1802 1832 1803 smum_send_msg_to_smc_with_parameter(hwmgr, 1833 1804 PPSMC_MSG_DramBufferSize, 1834 - size); 1805 + size, 1806 + NULL); 1835 1807 return 0; 1836 1808 } 1837 1809 ··· 1865 1827 data->dpm_flags |= DPMFlags_UVD_Enabled; 1866 1828 dpm_features |= UVD_DPM_MASK; 1867 1829 smum_send_msg_to_smc_with_parameter(hwmgr, 1868 - PPSMC_MSG_EnableAllSmuFeatures, dpm_features); 1830 + PPSMC_MSG_EnableAllSmuFeatures, 1831 + dpm_features, 1832 + NULL); 1869 1833 } else { 1870 1834 dpm_features |= UVD_DPM_MASK; 1871 1835 data->dpm_flags &= ~DPMFlags_UVD_Enabled; 1872 1836 smum_send_msg_to_smc_with_parameter(hwmgr, 1873 - PPSMC_MSG_DisableAllSmuFeatures, dpm_features); 1837 + PPSMC_MSG_DisableAllSmuFeatures, 1838 + dpm_features, 1839 + NULL); 1874 1840 } 1875 1841 return 0; 1876 1842 } ··· 1896 1854 PPSMC_MSG_SetUvdHardMin, 1897 1855 smu8_get_uvd_level(hwmgr, 1898 1856 data->uvd_dpm.hard_min_clk, 1899 - PPSMC_MSG_SetUvdHardMin)); 1857 + PPSMC_MSG_SetUvdHardMin), 1858 + NULL); 1900 1859 1901 1860 smu8_enable_disable_uvd_dpm(hwmgr, true); 1902 1861 } else { ··· 1921 1878 data->dpm_flags |= DPMFlags_VCE_Enabled; 1922 1879 dpm_features |= VCE_DPM_MASK; 1923 1880 smum_send_msg_to_smc_with_parameter(hwmgr, 1924 - PPSMC_MSG_EnableAllSmuFeatures, dpm_features); 1881 + PPSMC_MSG_EnableAllSmuFeatures, 1882 + dpm_features, 1883 + NULL); 1925 1884 } else { 1926 1885 dpm_features |= VCE_DPM_MASK; 1927 1886 data->dpm_flags &= ~DPMFlags_VCE_Enabled; 1928 1887 smum_send_msg_to_smc_with_parameter(hwmgr, 1929 - PPSMC_MSG_DisableAllSmuFeatures, dpm_features); 1888 + PPSMC_MSG_DisableAllSmuFeatures, 1889 + dpm_features, 1890 + NULL); 1930 1891 } 1931 1892 1932 1893 return 0; ··· 1945 1898 return; 1946 1899 1947 1900 if (bgate) 1948 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF); 1901 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL); 1949 1902 else 1950 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON); 1903 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL); 1951 1904 } 1952 1905 1953 1906 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+3 -1
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
··· 557 557 if (req_vddc <= vddc_table->entries[i].vddc) { 558 558 req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); 559 559 smum_send_msg_to_smc_with_parameter(hwmgr, 560 - PPSMC_MSG_VddC_Request, req_volt); 560 + PPSMC_MSG_VddC_Request, 561 + req_volt, 562 + NULL); 561 563 return; 562 564 } 563 565 }
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
··· 98 98 if (state == BACO_STATE_IN) { 99 99 if (soc15_baco_program_registers(hwmgr, pre_baco_tbl, 100 100 ARRAY_SIZE(pre_baco_tbl))) { 101 - if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco)) 101 + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL)) 102 102 return -EINVAL; 103 103 104 104 if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
+78 -63
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 484 484 if (data->registry_data.vr0hot_enabled) 485 485 data->smu_features[GNLD_VR0HOT].supported = true; 486 486 487 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); 488 - hwmgr->smu_version = smum_get_argument(hwmgr); 487 + smum_send_msg_to_smc(hwmgr, 488 + PPSMC_MSG_GetSmuVersion, 489 + &hwmgr->smu_version); 489 490 /* ACG firmware has major version 5 */ 490 491 if ((hwmgr->smu_version & 0xff000000) == 0x5000000) 491 492 data->smu_features[GNLD_ACG].supported = true; ··· 504 503 data->smu_features[GNLD_PCC_LIMIT].supported = true; 505 504 506 505 /* Get the SN to turn into a Unique ID */ 507 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); 508 - top32 = smum_get_argument(hwmgr); 509 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); 510 - bottom32 = smum_get_argument(hwmgr); 506 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 507 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 511 508 512 509 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 513 510 } ··· 992 993 "Failed to set up led dpm config!", 993 994 return -EINVAL); 994 995 995 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0); 996 + smum_send_msg_to_smc_with_parameter(hwmgr, 997 + PPSMC_MSG_NumOfDisplays, 998 + 0, 999 + NULL); 996 1000 997 1001 return 0; 998 1002 } ··· 2305 2303 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) 2306 2304 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; 2307 2305 2308 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg); 2306 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL); 2309 2307 2310 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc); 2311 - agc_btc_response = smum_get_argument(hwmgr); 2308 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response); 2312 2309 2313 2310 if (1 == agc_btc_response) { 2314 2311 if (1 == data->acg_loop_state) 2315 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop); 2312 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL); 2316 2313 else if (2 == data->acg_loop_state) 2317 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop); 2314 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL); 2318 2315 if (0 == vega10_enable_smc_features(hwmgr, true, 2319 2316 data->smu_features[GNLD_ACG].smu_feature_bitmap)) 2320 2317 data->smu_features[GNLD_ACG].enabled = true; ··· 2430 2429 struct vega10_hwmgr *data = hwmgr->backend; 2431 2430 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); 2432 2431 2433 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); 2434 - top32 = smum_get_argument(hwmgr); 2432 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 2435 2433 2436 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); 2437 - bottom32 = smum_get_argument(hwmgr); 2434 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 2438 2435 2439 2436 serial_number = ((uint64_t)bottom32 << 32) | top32; 2440 2437 ··· 2609 2610 if (0 != boot_up_values.usVddc) { 2610 2611 smum_send_msg_to_smc_with_parameter(hwmgr, 2611 2612 PPSMC_MSG_SetFloorSocVoltage, 2612 - (boot_up_values.usVddc * 4)); 2613 + (boot_up_values.usVddc * 4), 2614 + NULL); 2613 2615 data->vbios_boot_state.bsoc_vddc_lock = true; 2614 2616 } else { 2615 2617 data->vbios_boot_state.bsoc_vddc_lock = false; 2616 2618 } 2617 2619 smum_send_msg_to_smc_with_parameter(hwmgr, 2618 2620 PPSMC_MSG_SetMinDeepSleepDcefclk, 2619 - (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); 2621 + (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 2622 + NULL); 2620 2623 } 2621 2624 2622 2625 result = vega10_populate_avfs_parameters(hwmgr); ··· 2905 2904 2906 2905 if (data->vbios_boot_state.bsoc_vddc_lock) { 2907 2906 smum_send_msg_to_smc_with_parameter(hwmgr, 2908 - PPSMC_MSG_SetFloorSocVoltage, 0); 2907 + PPSMC_MSG_SetFloorSocVoltage, 0, 2908 + NULL); 2909 2909 data->vbios_boot_state.bsoc_vddc_lock = false; 2910 2910 } 2911 2911 ··· 2949 2947 vega10_enable_disable_PCC_limit_feature(hwmgr, true); 2950 2948 2951 2949 smum_send_msg_to_smc_with_parameter(hwmgr, 2952 - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); 2950 + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry, 2951 + NULL); 2953 2952 2954 2953 tmp_result = vega10_construct_voltage_tables(hwmgr); 2955 2954 PP_ASSERT_WITH_CODE(!tmp_result, ··· 3531 3528 data->dpm_table.gfx_table.dpm_state.soft_min_level) { 3532 3529 smum_send_msg_to_smc_with_parameter(hwmgr, 3533 3530 PPSMC_MSG_SetSoftMinGfxclkByIndex, 3534 - data->smc_state_table.gfx_boot_level); 3531 + data->smc_state_table.gfx_boot_level, 3532 + NULL); 3535 3533 3536 3534 data->dpm_table.gfx_table.dpm_state.soft_min_level = 3537 3535 data->smc_state_table.gfx_boot_level; ··· 3547 3543 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); 3548 3544 smum_send_msg_to_smc_with_parameter(hwmgr, 3549 3545 PPSMC_MSG_SetSoftMinSocclkByIndex, 3550 - socclk_idx); 3546 + socclk_idx, 3547 + NULL); 3551 3548 } else { 3552 3549 smum_send_msg_to_smc_with_parameter(hwmgr, 3553 3550 PPSMC_MSG_SetSoftMinUclkByIndex, 3554 - data->smc_state_table.mem_boot_level); 3551 + data->smc_state_table.mem_boot_level, 3552 + NULL); 3555 3553 } 3556 3554 data->dpm_table.mem_table.dpm_state.soft_min_level = 3557 3555 data->smc_state_table.mem_boot_level; ··· 3568 3562 data->dpm_table.soc_table.dpm_state.soft_min_level) { 3569 3563 smum_send_msg_to_smc_with_parameter(hwmgr, 3570 3564 PPSMC_MSG_SetSoftMinSocclkByIndex, 3571 - data->smc_state_table.soc_boot_level); 3565 + data->smc_state_table.soc_boot_level, 3566 + NULL); 3572 3567 data->dpm_table.soc_table.dpm_state.soft_min_level = 3573 3568 data->smc_state_table.soc_boot_level; 3574 3569 } ··· 3589 3582 data->dpm_table.gfx_table.dpm_state.soft_max_level) { 3590 3583 smum_send_msg_to_smc_with_parameter(hwmgr, 3591 3584 PPSMC_MSG_SetSoftMaxGfxclkByIndex, 3592 - data->smc_state_table.gfx_max_level); 3585 + data->smc_state_table.gfx_max_level, 3586 + NULL); 3593 3587 data->dpm_table.gfx_table.dpm_state.soft_max_level = 3594 3588 data->smc_state_table.gfx_max_level; 3595 3589 } ··· 3601 3593 data->dpm_table.mem_table.dpm_state.soft_max_level) { 3602 3594 smum_send_msg_to_smc_with_parameter(hwmgr, 3603 3595 PPSMC_MSG_SetSoftMaxUclkByIndex, 3604 - data->smc_state_table.mem_max_level); 3596 + data->smc_state_table.mem_max_level, 3597 + NULL); 3605 3598 data->dpm_table.mem_table.dpm_state.soft_max_level = 3606 3599 data->smc_state_table.mem_max_level; 3607 3600 } ··· 3616 3607 data->dpm_table.soc_table.dpm_state.soft_max_level) { 3617 3608 smum_send_msg_to_smc_with_parameter(hwmgr, 3618 3609 PPSMC_MSG_SetSoftMaxSocclkByIndex, 3619 - data->smc_state_table.soc_max_level); 3610 + data->smc_state_table.soc_max_level, 3611 + NULL); 3620 3612 data->dpm_table.soc_table.dpm_state.soft_max_level = 3621 3613 data->smc_state_table.soc_max_level; 3622 3614 } ··· 3704 3694 /* This message will also enable SmcToHost Interrupt */ 3705 3695 smum_send_msg_to_smc_with_parameter(hwmgr, 3706 3696 PPSMC_MSG_SetLowGfxclkInterruptThreshold, 3707 - (uint32_t)low_sclk_interrupt_threshold); 3697 + (uint32_t)low_sclk_interrupt_threshold, 3698 + NULL); 3708 3699 } 3709 3700 3710 3701 return 0; ··· 3812 3801 if (!query) 3813 3802 return -EINVAL; 3814 3803 3815 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr); 3816 - value = smum_get_argument(hwmgr); 3804 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value); 3817 3805 3818 3806 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ 3819 3807 *query = value << 8; ··· 3832 3822 3833 3823 switch (idx) { 3834 3824 case AMDGPU_PP_SENSOR_GFX_SCLK: 3835 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency); 3836 - sclk_mhz = smum_get_argument(hwmgr); 3825 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz); 3837 3826 *((uint32_t *)value) = sclk_mhz * 100; 3838 3827 break; 3839 3828 case AMDGPU_PP_SENSOR_GFX_MCLK: 3840 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); 3841 - mclk_idx = smum_get_argument(hwmgr); 3829 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx); 3842 3830 if (mclk_idx < dpm_table->mem_table.count) { 3843 3831 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; 3844 3832 *size = 4; ··· 3845 3837 } 3846 3838 break; 3847 3839 case AMDGPU_PP_SENSOR_GPU_LOAD: 3848 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0); 3849 - activity_percent = smum_get_argument(hwmgr); 3840 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0, 3841 + &activity_percent); 3850 3842 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; 3851 3843 *size = 4; 3852 3844 break; ··· 3855 3847 *size = 4; 3856 3848 break; 3857 3849 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 3858 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot); 3859 - *((uint32_t *)value) = smum_get_argument(hwmgr) * 3850 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value); 3851 + *((uint32_t *)value) = *((uint32_t *)value) * 3860 3852 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 3861 3853 *size = 4; 3862 3854 break; 3863 3855 case AMDGPU_PP_SENSOR_MEM_TEMP: 3864 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM); 3865 - *((uint32_t *)value) = smum_get_argument(hwmgr) * 3856 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value); 3857 + *((uint32_t *)value) = *((uint32_t *)value) * 3866 3858 PP_TEMPERATURE_UNITS_PER_CENTIGRADES; 3867 3859 *size = 4; 3868 3860 break; ··· 3901 3893 { 3902 3894 smum_send_msg_to_smc_with_parameter(hwmgr, 3903 3895 PPSMC_MSG_SetUclkFastSwitch, 3904 - has_disp ? 1 : 0); 3896 + has_disp ? 1 : 0, 3897 + NULL); 3905 3898 } 3906 3899 3907 3900 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, ··· 3937 3928 clk_request = (clk_freq << 16) | clk_select; 3938 3929 smum_send_msg_to_smc_with_parameter(hwmgr, 3939 3930 PPSMC_MSG_RequestDisplayClockByFreq, 3940 - clk_request); 3931 + clk_request, 3932 + NULL); 3941 3933 } 3942 3934 3943 3935 return result; ··· 4000 3990 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { 4001 3991 smum_send_msg_to_smc_with_parameter( 4002 3992 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 4003 - min_clocks.dcefClockInSR / 100); 3993 + min_clocks.dcefClockInSR / 100, 3994 + NULL); 4004 3995 } else { 4005 3996 pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); 4006 3997 } ··· 4011 4000 4012 4001 if (min_clocks.memoryClock != 0) { 4013 4002 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock); 4014 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx); 4003 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx, 4004 + NULL); 4015 4005 data->dpm_table.mem_table.dpm_state.soft_min_level= idx; 4016 4006 } 4017 4007 ··· 4553 4541 if (data->registry_data.sclk_dpm_key_disabled) 4554 4542 break; 4555 4543 4556 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); 4557 - now = smum_get_argument(hwmgr); 4544 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now); 4558 4545 4559 4546 if (hwmgr->pp_one_vf && 4560 4547 (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) ··· 4569 4558 if (data->registry_data.mclk_dpm_key_disabled) 4570 4559 break; 4571 4560 4572 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); 4573 - now = smum_get_argument(hwmgr); 4561 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); 4574 4562 4575 4563 for (i = 0; i < mclk_table->count; i++) 4576 4564 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 4580 4570 if (data->registry_data.socclk_dpm_key_disabled) 4581 4571 break; 4582 4572 4583 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex); 4584 - now = smum_get_argument(hwmgr); 4573 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); 4585 4574 4586 4575 for (i = 0; i < soc_table->count; i++) 4587 4576 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 4592 4583 break; 4593 4584 4594 4585 smum_send_msg_to_smc_with_parameter(hwmgr, 4595 - PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK); 4596 - now = smum_get_argument(hwmgr); 4586 + PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); 4597 4587 4598 4588 for (i = 0; i < dcef_table->count; i++) 4599 4589 size += sprintf(buf + size, "%d: %uMhz %s\n", ··· 4601 4593 "*" : ""); 4602 4594 break; 4603 4595 case PP_PCIE: 4604 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex); 4605 - now = smum_get_argument(hwmgr); 4596 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now); 4606 4597 4607 4598 for (i = 0; i < pcie_table->count; i++) 4608 4599 size += sprintf(buf + size, "%d: %s %s\n", i, ··· 4665 4658 4666 4659 if (data->water_marks_bitmap & WaterMarksLoaded) { 4667 4660 smum_send_msg_to_smc_with_parameter(hwmgr, 4668 - PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); 4661 + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display, 4662 + NULL); 4669 4663 } 4670 4664 4671 4665 return result; ··· 4932 4924 { 4933 4925 smum_send_msg_to_smc_with_parameter(hwmgr, 4934 4926 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4935 - virtual_addr_hi); 4927 + virtual_addr_hi, 4928 + NULL); 4936 4929 smum_send_msg_to_smc_with_parameter(hwmgr, 4937 4930 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4938 - virtual_addr_low); 4931 + virtual_addr_low, 4932 + NULL); 4939 4933 smum_send_msg_to_smc_with_parameter(hwmgr, 4940 4934 PPSMC_MSG_DramLogSetDramAddrHigh, 4941 - mc_addr_hi); 4935 + mc_addr_hi, 4936 + NULL); 4942 4937 4943 4938 smum_send_msg_to_smc_with_parameter(hwmgr, 4944 4939 PPSMC_MSG_DramLogSetDramAddrLow, 4945 - mc_addr_low); 4940 + mc_addr_low, 4941 + NULL); 4946 4942 4947 4943 smum_send_msg_to_smc_with_parameter(hwmgr, 4948 4944 PPSMC_MSG_DramLogSetDramSize, 4949 - size); 4945 + size, 4946 + NULL); 4950 4947 return 0; 4951 4948 } 4952 4949 ··· 5053 5040 smum_send_msg_to_smc_with_parameter(hwmgr, 5054 5041 PPSMC_MSG_SetCustomGfxDpmParameters, 5055 5042 busy_set_point | FPS<<8 | 5056 - use_rlc_busy << 16 | min_active_level<<24); 5043 + use_rlc_busy << 16 | min_active_level<<24, 5044 + NULL); 5057 5045 } 5058 5046 5059 5047 out: 5060 5048 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 5061 - 1 << power_profile_mode); 5049 + 1 << power_profile_mode, 5050 + NULL); 5062 5051 hwmgr->power_profile_mode = power_profile_mode; 5063 5052 5064 5053 return 0; ··· 5317 5302 return 0; 5318 5303 } 5319 5304 5320 - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, 5305 + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 5321 5306 "[PrepareMp1] Failed!", 5322 5307 return ret); 5323 5308
+6 -3
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
··· 925 925 926 926 /* For Vega10, SMC does not support any mask yet. */ 927 927 if (enable) 928 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info); 928 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info, 929 + NULL); 929 930 930 931 } 931 932 ··· 1328 1327 1329 1328 if (data->registry_data.enable_pkg_pwr_tracking_feature) 1330 1329 smum_send_msg_to_smc_with_parameter(hwmgr, 1331 - PPSMC_MSG_SetPptLimit, n); 1330 + PPSMC_MSG_SetPptLimit, n, 1331 + NULL); 1332 1332 1333 1333 return 0; 1334 1334 } ··· 1395 1393 uint32_t adjust_percent) 1396 1394 { 1397 1395 smum_send_msg_to_smc_with_parameter(hwmgr, 1398 - PPSMC_MSG_OverDriveSetPercentage, adjust_percent); 1396 + PPSMC_MSG_OverDriveSetPercentage, adjust_percent, 1397 + NULL); 1399 1398 } 1400 1399 1401 1400 int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
+3 -3
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
··· 31 31 32 32 static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) 33 33 { 34 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm); 35 - *current_rpm = smum_get_argument(hwmgr); 34 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm); 36 35 return 0; 37 36 } 38 37 ··· 519 520 520 521 smum_send_msg_to_smc_with_parameter(hwmgr, 521 522 PPSMC_MSG_SetFanTemperatureTarget, 522 - (uint32_t)table->FanTargetTemperature); 523 + (uint32_t)table->FanTargetTemperature, 524 + NULL); 523 525 524 526 table->FanPwmMin = hwmgr->thermal_controller. 525 527 advanceFanControlParameters.usPWMMin * 255 / 100;
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
··· 96 96 if (state == BACO_STATE_IN) { 97 97 if (soc15_baco_program_registers(hwmgr, pre_baco_tbl, 98 98 ARRAY_SIZE(pre_baco_tbl))) { 99 - if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) 99 + if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL)) 100 100 return -EINVAL; 101 101 102 102 if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
+85 -64
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
··· 357 357 } 358 358 359 359 /* Get the SN to turn into a Unique ID */ 360 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); 361 - top32 = smum_get_argument(hwmgr); 362 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); 363 - bottom32 = smum_get_argument(hwmgr); 360 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 361 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 364 362 365 363 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 366 364 } ··· 481 483 482 484 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 483 485 PPSMC_MSG_GetDpmFreqByIndex, 484 - (clk_id << 16 | 0xFF)); 486 + (clk_id << 16 | 0xFF), 487 + num_of_levels); 485 488 PP_ASSERT_WITH_CODE(!ret, 486 489 "[GetNumOfDpmLevel] failed to get dpm levels!", 487 490 return ret); 488 - 489 - *num_of_levels = smum_get_argument(hwmgr); 490 - PP_ASSERT_WITH_CODE(*num_of_levels > 0, 491 - "[GetNumOfDpmLevel] number of clk levels is invalid!", 492 - return -EINVAL); 493 491 494 492 return ret; 495 493 } ··· 498 504 *Lower 16 bits specify the level 499 505 */ 500 506 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 501 - PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0, 507 + PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index), 508 + clock) == 0, 502 509 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!", 503 510 return -EINVAL); 504 - 505 - *clock = smum_get_argument(hwmgr); 506 511 507 512 return 0; 508 513 } ··· 742 749 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 743 750 smum_send_msg_to_smc_with_parameter(hwmgr, 744 751 PPSMC_MSG_SetMinDeepSleepDcefclk, 745 - (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); 752 + (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 753 + NULL); 746 754 } 747 755 748 756 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); ··· 761 767 uint32_t result; 762 768 763 769 PP_ASSERT_WITH_CODE( 764 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0, 770 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0, 765 771 "[Run_ACG_BTC] Attempt to run ACG BTC failed!", 766 772 return -EINVAL); 767 773 768 - result = smum_get_argument(hwmgr); 769 774 PP_ASSERT_WITH_CODE(result == 1, 770 775 "Failed to run ACG BTC!", return -EINVAL); 771 776 ··· 785 792 (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF)); 786 793 787 794 PP_ASSERT_WITH_CODE( 788 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0, 795 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, 796 + NULL) == 0, 789 797 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!", 790 798 return -1); 791 799 792 800 PP_ASSERT_WITH_CODE( 793 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0, 801 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, 802 + NULL) == 0, 794 803 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", 795 804 return -1); 796 805 ··· 823 828 bool enabled; 824 829 825 830 PP_ASSERT_WITH_CODE( 826 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0, 831 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0, 827 832 "[EnableAllSMUFeatures] Failed to enable all smu features!", 828 833 return -1); 829 834 ··· 849 854 bool enabled; 850 855 851 856 PP_ASSERT_WITH_CODE( 852 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0, 857 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0, 853 858 "[DisableAllSMUFeatures] Failed to disable all smu features!", 854 859 return -1); 855 860 ··· 874 879 uint32_t adjust_percent) 875 880 { 876 881 return smum_send_msg_to_smc_with_parameter(hwmgr, 877 - PPSMC_MSG_OverDriveSetPercentage, adjust_percent); 882 + PPSMC_MSG_OverDriveSetPercentage, adjust_percent, 883 + NULL); 878 884 } 879 885 880 886 static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr) ··· 898 902 { 899 903 /* AC Max */ 900 904 PP_ASSERT_WITH_CODE( 901 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0, 905 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16), 906 + &(clock->ACMax)) == 0, 902 907 "[GetClockRanges] Failed to get max ac clock from SMC!", 903 908 return -EINVAL); 904 - clock->ACMax = smum_get_argument(hwmgr); 905 909 906 910 /* AC Min */ 907 911 PP_ASSERT_WITH_CODE( 908 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0, 912 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16), 913 + &(clock->ACMin)) == 0, 909 914 "[GetClockRanges] Failed to get min ac clock from SMC!", 910 915 return -EINVAL); 911 - clock->ACMin = smum_get_argument(hwmgr); 912 916 913 917 /* DC Max */ 914 918 PP_ASSERT_WITH_CODE( 915 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0, 919 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16), 920 + &(clock->DCMax)) == 0, 916 921 "[GetClockRanges] Failed to get max dc clock from SMC!", 917 922 return -EINVAL); 918 - clock->DCMax = smum_get_argument(hwmgr); 919 923 920 924 return 0; 921 925 } ··· 940 944 int tmp_result, result = 0; 941 945 942 946 smum_send_msg_to_smc_with_parameter(hwmgr, 943 - PPSMC_MSG_NumOfDisplays, 0); 947 + PPSMC_MSG_NumOfDisplays, 0, NULL); 944 948 945 949 result = vega12_set_allowed_featuresmask(hwmgr); 946 950 PP_ASSERT_WITH_CODE(result == 0, ··· 1039 1043 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; 1040 1044 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1041 1045 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1042 - (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), 1046 + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), 1047 + NULL)), 1043 1048 "Failed to set soft min gfxclk !", 1044 1049 return ret); 1045 1050 } ··· 1049 1052 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; 1050 1053 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1051 1054 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1052 - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1055 + (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1056 + NULL)), 1053 1057 "Failed to set soft min memclk !", 1054 1058 return ret); 1055 1059 1056 1060 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level; 1057 1061 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1058 1062 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1059 - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1063 + (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1064 + NULL)), 1060 1065 "Failed to set hard min memclk !", 1061 1066 return ret); 1062 1067 } ··· 1068 1069 1069 1070 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1070 1071 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1071 - (PPCLK_VCLK << 16) | (min_freq & 0xffff))), 1072 + (PPCLK_VCLK << 16) | (min_freq & 0xffff), 1073 + NULL)), 1072 1074 "Failed to set soft min vclk!", 1073 1075 return ret); 1074 1076 ··· 1077 1077 1078 1078 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1079 1079 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1080 - (PPCLK_DCLK << 16) | (min_freq & 0xffff))), 1080 + (PPCLK_DCLK << 16) | (min_freq & 0xffff), 1081 + NULL)), 1081 1082 "Failed to set soft min dclk!", 1082 1083 return ret); 1083 1084 } ··· 1088 1087 1089 1088 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1090 1089 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1091 - (PPCLK_ECLK << 16) | (min_freq & 0xffff))), 1090 + (PPCLK_ECLK << 16) | (min_freq & 0xffff), 1091 + NULL)), 1092 1092 "Failed to set soft min eclk!", 1093 1093 return ret); 1094 1094 } ··· 1099 1097 1100 1098 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1101 1099 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1102 - (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), 1100 + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), 1101 + NULL)), 1103 1102 "Failed to set soft min socclk!", 1104 1103 return ret); 1105 1104 } ··· 1110 1107 1111 1108 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1112 1109 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1113 - (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))), 1110 + (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), 1111 + NULL)), 1114 1112 "Failed to set hard min dcefclk!", 1115 1113 return ret); 1116 1114 } ··· 1131 1127 1132 1128 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1133 1129 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1134 - (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), 1130 + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), 1131 + NULL)), 1135 1132 "Failed to set soft max gfxclk!", 1136 1133 return ret); 1137 1134 } ··· 1142 1137 1143 1138 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1144 1139 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1145 - (PPCLK_UCLK << 16) | (max_freq & 0xffff))), 1140 + (PPCLK_UCLK << 16) | (max_freq & 0xffff), 1141 + NULL)), 1146 1142 "Failed to set soft max memclk!", 1147 1143 return ret); 1148 1144 } ··· 1153 1147 1154 1148 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1155 1149 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1156 - (PPCLK_VCLK << 16) | (max_freq & 0xffff))), 1150 + (PPCLK_VCLK << 16) | (max_freq & 0xffff), 1151 + NULL)), 1157 1152 "Failed to set soft max vclk!", 1158 1153 return ret); 1159 1154 1160 1155 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; 1161 1156 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1162 1157 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1163 - (PPCLK_DCLK << 16) | (max_freq & 0xffff))), 1158 + (PPCLK_DCLK << 16) | (max_freq & 0xffff), 1159 + NULL)), 1164 1160 "Failed to set soft max dclk!", 1165 1161 return ret); 1166 1162 } ··· 1172 1164 1173 1165 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1174 1166 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1175 - (PPCLK_ECLK << 16) | (max_freq & 0xffff))), 1167 + (PPCLK_ECLK << 16) | (max_freq & 0xffff), 1168 + NULL)), 1176 1169 "Failed to set soft max eclk!", 1177 1170 return ret); 1178 1171 } ··· 1183 1174 1184 1175 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1185 1176 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1186 - (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), 1177 + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), 1178 + NULL)), 1187 1179 "Failed to set soft max socclk!", 1188 1180 return ret); 1189 1181 } ··· 1297 1287 *gfx_freq = 0; 1298 1288 1299 1289 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 1300 - PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0, 1290 + PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16), 1291 + &gfx_clk) == 0, 1301 1292 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", 1302 1293 return -EINVAL); 1303 - gfx_clk = smum_get_argument(hwmgr); 1304 1294 1305 1295 *gfx_freq = gfx_clk * 100; 1306 1296 ··· 1314 1304 *mclk_freq = 0; 1315 1305 1316 1306 PP_ASSERT_WITH_CODE( 1317 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0, 1307 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16), 1308 + &mem_clk) == 0, 1318 1309 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", 1319 1310 return -EINVAL); 1320 - mem_clk = smum_get_argument(hwmgr); 1321 1311 1322 1312 *mclk_freq = mem_clk * 100; 1323 1313 ··· 1430 1420 if (data->smu_features[GNLD_DPM_UCLK].enabled) 1431 1421 return smum_send_msg_to_smc_with_parameter(hwmgr, 1432 1422 PPSMC_MSG_SetUclkFastSwitch, 1433 - has_disp ? 1 : 0); 1423 + has_disp ? 1 : 0, 1424 + NULL); 1434 1425 1435 1426 return 0; 1436 1427 } ··· 1470 1459 clk_request = (clk_select << 16) | clk_freq; 1471 1460 result = smum_send_msg_to_smc_with_parameter(hwmgr, 1472 1461 PPSMC_MSG_SetHardMinByFreq, 1473 - clk_request); 1462 + clk_request, 1463 + NULL); 1474 1464 } 1475 1465 } 1476 1466 ··· 1505 1493 PP_ASSERT_WITH_CODE( 1506 1494 !smum_send_msg_to_smc_with_parameter( 1507 1495 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 1508 - min_clocks.dcefClockInSR /100), 1496 + min_clocks.dcefClockInSR /100, 1497 + NULL), 1509 1498 "Attempt to set divider for DCEFCLK Failed!", 1510 1499 return -1); 1511 1500 } else { ··· 2137 2124 case PP_SOCCLK: 2138 2125 PP_ASSERT_WITH_CODE( 2139 2126 smum_send_msg_to_smc_with_parameter(hwmgr, 2140 - PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0, 2127 + PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16), 2128 + &now) == 0, 2141 2129 "Attempt to get Current SOCCLK Frequency Failed!", 2142 2130 return -EINVAL); 2143 - now = smum_get_argument(hwmgr); 2144 2131 2145 2132 PP_ASSERT_WITH_CODE( 2146 2133 vega12_get_socclocks(hwmgr, &clocks) == 0, ··· 2155 2142 case PP_DCEFCLK: 2156 2143 PP_ASSERT_WITH_CODE( 2157 2144 smum_send_msg_to_smc_with_parameter(hwmgr, 2158 - PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0, 2145 + PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16), 2146 + &now) == 0, 2159 2147 "Attempt to get Current DCEFCLK Frequency Failed!", 2160 2148 return -EINVAL); 2161 - now = smum_get_argument(hwmgr); 2162 2149 2163 2150 PP_ASSERT_WITH_CODE( 2164 2151 vega12_get_dcefclocks(hwmgr, &clocks) == 0, ··· 2356 2343 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 2357 2344 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2358 2345 PPSMC_MSG_SetHardMinByFreq, 2359 - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), 2346 + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 2347 + NULL)), 2360 2348 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", 2361 2349 return ret); 2362 2350 } ··· 2371 2357 int ret = 0; 2372 2358 2373 2359 smum_send_msg_to_smc_with_parameter(hwmgr, 2374 - PPSMC_MSG_NumOfDisplays, 0); 2360 + PPSMC_MSG_NumOfDisplays, 0, 2361 + NULL); 2375 2362 2376 2363 ret = vega12_set_uclk_to_highest_dpm_level(hwmgr, 2377 2364 &data->dpm_table.mem_table); ··· 2398 2383 data->smu_features[GNLD_DPM_DCEFCLK].supported && 2399 2384 data->smu_features[GNLD_DPM_SOCCLK].supported) 2400 2385 smum_send_msg_to_smc_with_parameter(hwmgr, 2401 - PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); 2386 + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display, 2387 + NULL); 2402 2388 2403 2389 return result; 2404 2390 } ··· 2571 2555 { 2572 2556 smum_send_msg_to_smc_with_parameter(hwmgr, 2573 2557 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 2574 - virtual_addr_hi); 2558 + virtual_addr_hi, 2559 + NULL); 2575 2560 smum_send_msg_to_smc_with_parameter(hwmgr, 2576 2561 PPSMC_MSG_SetSystemVirtualDramAddrLow, 2577 - virtual_addr_low); 2562 + virtual_addr_low, 2563 + NULL); 2578 2564 smum_send_msg_to_smc_with_parameter(hwmgr, 2579 2565 PPSMC_MSG_DramLogSetDramAddrHigh, 2580 - mc_addr_hi); 2566 + mc_addr_hi, 2567 + NULL); 2581 2568 2582 2569 smum_send_msg_to_smc_with_parameter(hwmgr, 2583 2570 PPSMC_MSG_DramLogSetDramAddrLow, 2584 - mc_addr_low); 2571 + mc_addr_low, 2572 + NULL); 2585 2573 2586 2574 smum_send_msg_to_smc_with_parameter(hwmgr, 2587 2575 PPSMC_MSG_DramLogSetDramSize, 2588 - size); 2576 + size, 2577 + NULL); 2589 2578 return 0; 2590 2579 } 2591 2580 ··· 2626 2605 int ret = 0; 2627 2606 2628 2607 if (data->gfxoff_controlled_by_driver) 2629 - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff); 2608 + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL); 2630 2609 2631 2610 return ret; 2632 2611 } ··· 2638 2617 int ret = 0; 2639 2618 2640 2619 if (data->gfxoff_controlled_by_driver) 2641 - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff); 2620 + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL); 2642 2621 2643 2622 return ret; 2644 2623 } ··· 2675 2654 return 0; 2676 2655 } 2677 2656 2678 - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, 2657 + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 2679 2658 "[PrepareMp1] Failed!", 2680 2659 return ret); 2681 2660
+4 -3
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
··· 32 32 static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) 33 33 { 34 34 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 35 - PPSMC_MSG_GetCurrentRpm), 35 + PPSMC_MSG_GetCurrentRpm, 36 + current_rpm), 36 37 "Attempt to get current RPM from SMC Failed!", 37 38 return -EINVAL); 38 - *current_rpm = smum_get_argument(hwmgr); 39 39 40 40 return 0; 41 41 } ··· 259 259 260 260 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 261 261 PPSMC_MSG_SetFanTemperatureTarget, 262 - (uint32_t)table->FanTargetTemperature); 262 + (uint32_t)table->FanTargetTemperature, 263 + NULL); 263 264 264 265 return ret; 265 266 }
+4 -4
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
··· 91 91 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); 92 92 93 93 if(smum_send_msg_to_smc_with_parameter(hwmgr, 94 - PPSMC_MSG_EnterBaco, 0)) 94 + PPSMC_MSG_EnterBaco, 0, NULL)) 95 95 return -EINVAL; 96 96 } else { 97 97 if(smum_send_msg_to_smc_with_parameter(hwmgr, 98 - PPSMC_MSG_EnterBaco, 1)) 98 + PPSMC_MSG_EnterBaco, 1, NULL)) 99 99 return -EINVAL; 100 100 } 101 101 102 102 } else if (state == BACO_STATE_OUT) { 103 - if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco)) 103 + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL)) 104 104 return -EINVAL; 105 105 if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl, 106 106 ARRAY_SIZE(clean_baco_tbl))) ··· 118 118 if (ret) 119 119 return ret; 120 120 121 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI); 121 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL); 122 122 }
+104 -77
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
··· 92 92 */ 93 93 data->registry_data.disallowed_features = 0xE0041C00; 94 94 /* ECC feature should be disabled on old SMUs */ 95 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); 96 - hwmgr->smu_version = smum_get_argument(hwmgr); 95 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 97 96 if (hwmgr->smu_version < 0x282100) 98 97 data->registry_data.disallowed_features |= FEATURE_ECC_MASK; 99 98 ··· 399 400 } 400 401 401 402 /* Get the SN to turn into a Unique ID */ 402 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); 403 - top32 = smum_get_argument(hwmgr); 404 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); 405 - bottom32 = smum_get_argument(hwmgr); 403 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32); 404 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32); 406 405 407 406 adev->unique_id = ((uint64_t)bottom32 << 32) | top32; 408 407 } ··· 524 527 525 528 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 526 529 PPSMC_MSG_GetDpmFreqByIndex, 527 - (clk_id << 16 | 0xFF)); 530 + (clk_id << 16 | 0xFF), 531 + num_of_levels); 528 532 PP_ASSERT_WITH_CODE(!ret, 529 533 "[GetNumOfDpmLevel] failed to get dpm levels!", 530 534 return ret); 531 - 532 - *num_of_levels = smum_get_argument(hwmgr); 533 - PP_ASSERT_WITH_CODE(*num_of_levels > 0, 534 - "[GetNumOfDpmLevel] number of clk levels is invalid!", 535 - return -EINVAL); 536 535 537 536 return ret; 538 537 } ··· 540 547 541 548 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 542 549 PPSMC_MSG_GetDpmFreqByIndex, 543 - (clk_id << 16 | index)); 550 + (clk_id << 16 | index), 551 + clk); 544 552 PP_ASSERT_WITH_CODE(!ret, 545 553 "[GetDpmFreqByIndex] failed to get dpm freq by index!", 546 554 return ret); 547 - 548 - *clk = smum_get_argument(hwmgr); 549 - PP_ASSERT_WITH_CODE(*clk, 550 - "[GetDpmFreqByIndex] clk value is invalid!", 551 - return -EINVAL); 552 555 553 556 return ret; 554 557 } ··· 802 813 803 814 smum_send_msg_to_smc_with_parameter(hwmgr, 804 815 PPSMC_MSG_SetMinDeepSleepDcefclk, 805 - (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); 816 + (uint32_t)(data->vbios_boot_state.dcef_clock / 100), 817 + NULL); 806 818 807 819 memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); 808 820 ··· 858 868 */ 859 869 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; 860 870 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 861 - PPSMC_MSG_OverridePcieParameters, smu_pcie_arg); 871 + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg, 872 + NULL); 862 873 PP_ASSERT_WITH_CODE(!ret, 863 874 "[OverridePcieParameters] Attempt to override pcie params failed!", 864 875 return ret); ··· 890 899 & 0xFFFFFFFF)); 891 900 892 901 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 893 - PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high); 902 + PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL); 894 903 PP_ASSERT_WITH_CODE(!ret, 895 904 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!", 896 905 return ret); 897 906 898 907 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 899 - PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low); 908 + PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL); 900 909 PP_ASSERT_WITH_CODE(!ret, 901 910 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!", 902 911 return ret); ··· 906 915 907 916 static int vega20_run_btc(struct pp_hwmgr *hwmgr) 908 917 { 909 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc); 918 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL); 910 919 } 911 920 912 921 static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) 913 922 { 914 - return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc); 923 + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL); 915 924 } 916 925 917 926 static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) ··· 924 933 int ret = 0; 925 934 926 935 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 927 - PPSMC_MSG_EnableAllSmuFeatures)) == 0, 936 + PPSMC_MSG_EnableAllSmuFeatures, 937 + NULL)) == 0, 928 938 "[EnableAllSMUFeatures] Failed to enable all smu features!", 929 939 return ret); 930 940 ··· 958 966 if (data->smu_features[GNLD_DPM_UCLK].enabled) 959 967 return smum_send_msg_to_smc_with_parameter(hwmgr, 960 968 PPSMC_MSG_SetUclkFastSwitch, 961 - 1); 969 + 1, 970 + NULL); 962 971 963 972 return 0; 964 973 } ··· 971 978 972 979 return smum_send_msg_to_smc_with_parameter(hwmgr, 973 980 PPSMC_MSG_SetFclkGfxClkRatio, 974 - data->registry_data.fclk_gfxclk_ratio); 981 + data->registry_data.fclk_gfxclk_ratio, 982 + NULL); 975 983 } 976 984 977 985 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) ··· 985 991 int ret = 0; 986 992 987 993 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 988 - PPSMC_MSG_DisableAllSmuFeatures)) == 0, 994 + PPSMC_MSG_DisableAllSmuFeatures, 995 + NULL)) == 0, 989 996 "[DisableAllSMUFeatures] Failed to disable all smu features!", 990 997 return ret); 991 998 ··· 1194 1199 1195 1200 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1196 1201 PPSMC_MSG_GetAVFSVoltageByDpm, 1197 - ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); 1202 + ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), 1203 + voltage); 1198 1204 PP_ASSERT_WITH_CODE(!ret, 1199 1205 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", 1200 1206 return ret); 1201 1207 1202 - *voltage = smum_get_argument(hwmgr); 1203 1208 *voltage = *voltage / VOLTAGE_SCALE; 1204 1209 1205 1210 return 0; ··· 1555 1560 1556 1561 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1557 1562 PPSMC_MSG_GetDcModeMaxDpmFreq, 1558 - (clock_select << 16))) == 0, 1563 + (clock_select << 16), 1564 + clock)) == 0, 1559 1565 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", 1560 1566 return ret); 1561 - *clock = smum_get_argument(hwmgr); 1562 1567 1563 1568 /* if DC limit is zero, return AC limit */ 1564 1569 if (*clock == 0) { 1565 1570 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 1566 1571 PPSMC_MSG_GetMaxDpmFreq, 1567 - (clock_select << 16))) == 0, 1572 + (clock_select << 16), 1573 + clock)) == 0, 1568 1574 "[GetMaxSustainableClock] failed to get max AC clock from SMC!", 1569 1575 return ret); 1570 - *clock = smum_get_argument(hwmgr); 1571 1576 } 1572 1577 1573 1578 return 0; ··· 1636 1641 int result; 1637 1642 1638 1643 result = smum_send_msg_to_smc(hwmgr, 1639 - PPSMC_MSG_SetMGpuFanBoostLimitRpm); 1644 + PPSMC_MSG_SetMGpuFanBoostLimitRpm, 1645 + NULL); 1640 1646 PP_ASSERT_WITH_CODE(!result, 1641 1647 "[EnableMgpuFan] Failed to enable mgpu fan boost!", 1642 1648 return result); ··· 1665 1669 int result = 0; 1666 1670 1667 1671 smum_send_msg_to_smc_with_parameter(hwmgr, 1668 - PPSMC_MSG_NumOfDisplays, 0); 1672 + PPSMC_MSG_NumOfDisplays, 0, NULL); 1669 1673 1670 1674 result = vega20_set_allowed_featuresmask(hwmgr); 1671 1675 PP_ASSERT_WITH_CODE(!result, ··· 1736 1740 return result); 1737 1741 1738 1742 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, 1739 - POWER_SOURCE_AC << 16); 1743 + POWER_SOURCE_AC << 16, &hwmgr->default_power_limit); 1740 1744 PP_ASSERT_WITH_CODE(!result, 1741 1745 "[GetPptLimit] get default PPT limit failed!", 1742 1746 return result); 1743 1747 hwmgr->power_limit = 1744 - hwmgr->default_power_limit = smum_get_argument(hwmgr); 1748 + hwmgr->default_power_limit; 1745 1749 1746 1750 return 0; 1747 1751 } ··· 1802 1806 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level; 1803 1807 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1804 1808 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1805 - (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))), 1809 + (PPCLK_GFXCLK << 16) | (min_freq & 0xffff), 1810 + NULL)), 1806 1811 "Failed to set soft min gfxclk !", 1807 1812 return ret); 1808 1813 } ··· 1813 1816 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level; 1814 1817 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1815 1818 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1816 - (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1819 + (PPCLK_UCLK << 16) | (min_freq & 0xffff), 1820 + NULL)), 1817 1821 "Failed to set soft min memclk !", 1818 1822 return ret); 1819 1823 } ··· 1825 1827 1826 1828 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1827 1829 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1828 - (PPCLK_VCLK << 16) | (min_freq & 0xffff))), 1830 + (PPCLK_VCLK << 16) | (min_freq & 0xffff), 1831 + NULL)), 1829 1832 "Failed to set soft min vclk!", 1830 1833 return ret); 1831 1834 ··· 1834 1835 1835 1836 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1836 1837 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1837 - (PPCLK_DCLK << 16) | (min_freq & 0xffff))), 1838 + (PPCLK_DCLK << 16) | (min_freq & 0xffff), 1839 + NULL)), 1838 1840 "Failed to set soft min dclk!", 1839 1841 return ret); 1840 1842 } ··· 1846 1846 1847 1847 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1848 1848 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1849 - (PPCLK_ECLK << 16) | (min_freq & 0xffff))), 1849 + (PPCLK_ECLK << 16) | (min_freq & 0xffff), 1850 + NULL)), 1850 1851 "Failed to set soft min eclk!", 1851 1852 return ret); 1852 1853 } ··· 1858 1857 1859 1858 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1860 1859 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1861 - (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))), 1860 + (PPCLK_SOCCLK << 16) | (min_freq & 0xffff), 1861 + NULL)), 1862 1862 "Failed to set soft min socclk!", 1863 1863 return ret); 1864 1864 } ··· 1870 1868 1871 1869 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1872 1870 hwmgr, PPSMC_MSG_SetSoftMinByFreq, 1873 - (PPCLK_FCLK << 16) | (min_freq & 0xffff))), 1871 + (PPCLK_FCLK << 16) | (min_freq & 0xffff), 1872 + NULL)), 1874 1873 "Failed to set soft min fclk!", 1875 1874 return ret); 1876 1875 } ··· 1882 1879 1883 1880 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1884 1881 hwmgr, PPSMC_MSG_SetHardMinByFreq, 1885 - (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))), 1882 + (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff), 1883 + NULL)), 1886 1884 "Failed to set hard min dcefclk!", 1887 1885 return ret); 1888 1886 } ··· 1904 1900 1905 1901 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1906 1902 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1907 - (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))), 1903 + (PPCLK_GFXCLK << 16) | (max_freq & 0xffff), 1904 + NULL)), 1908 1905 "Failed to set soft max gfxclk!", 1909 1906 return ret); 1910 1907 } ··· 1916 1911 1917 1912 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1918 1913 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1919 - (PPCLK_UCLK << 16) | (max_freq & 0xffff))), 1914 + (PPCLK_UCLK << 16) | (max_freq & 0xffff), 1915 + NULL)), 1920 1916 "Failed to set soft max memclk!", 1921 1917 return ret); 1922 1918 } ··· 1928 1922 1929 1923 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1930 1924 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1931 - (PPCLK_VCLK << 16) | (max_freq & 0xffff))), 1925 + (PPCLK_VCLK << 16) | (max_freq & 0xffff), 1926 + NULL)), 1932 1927 "Failed to set soft max vclk!", 1933 1928 return ret); 1934 1929 1935 1930 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level; 1936 1931 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1937 1932 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1938 - (PPCLK_DCLK << 16) | (max_freq & 0xffff))), 1933 + (PPCLK_DCLK << 16) | (max_freq & 0xffff), 1934 + NULL)), 1939 1935 "Failed to set soft max dclk!", 1940 1936 return ret); 1941 1937 } ··· 1948 1940 1949 1941 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1950 1942 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1951 - (PPCLK_ECLK << 16) | (max_freq & 0xffff))), 1943 + (PPCLK_ECLK << 16) | (max_freq & 0xffff), 1944 + NULL)), 1952 1945 "Failed to set soft max eclk!", 1953 1946 return ret); 1954 1947 } ··· 1960 1951 1961 1952 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1962 1953 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1963 - (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))), 1954 + (PPCLK_SOCCLK << 16) | (max_freq & 0xffff), 1955 + NULL)), 1964 1956 "Failed to set soft max socclk!", 1965 1957 return ret); 1966 1958 } ··· 1972 1962 1973 1963 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( 1974 1964 hwmgr, PPSMC_MSG_SetSoftMaxByFreq, 1975 - (PPCLK_FCLK << 16) | (max_freq & 0xffff))), 1965 + (PPCLK_FCLK << 16) | (max_freq & 0xffff), 1966 + NULL)), 1976 1967 "Failed to set soft max fclk!", 1977 1968 return ret); 1978 1969 } ··· 2017 2006 2018 2007 if (max) { 2019 2008 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2020 - PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0, 2009 + PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16), 2010 + clock)) == 0, 2021 2011 "[GetClockRanges] Failed to get max clock from SMC!", 2022 2012 return ret); 2023 - *clock = smum_get_argument(hwmgr); 2024 2013 } else { 2025 2014 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2026 2015 PPSMC_MSG_GetMinDpmFreq, 2027 - (clock_select << 16))) == 0, 2016 + (clock_select << 16), 2017 + clock)) == 0, 2028 2018 "[GetClockRanges] Failed to get min clock from SMC!", 2029 2019 return ret); 2030 - *clock = smum_get_argument(hwmgr); 2031 2020 } 2032 2021 2033 2022 return 0; ··· 2133 2122 *clk_freq = 0; 2134 2123 2135 2124 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2136 - PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0, 2125 + PPSMC_MSG_GetDpmClockFreq, (clk_id << 16), 2126 + clk_freq)) == 0, 2137 2127 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!", 2138 2128 return ret); 2139 - *clk_freq = smum_get_argument(hwmgr); 2140 2129 2141 2130 *clk_freq = *clk_freq * 100; 2142 2131 ··· 2287 2276 clk_request = (clk_select << 16) | clk_freq; 2288 2277 result = smum_send_msg_to_smc_with_parameter(hwmgr, 2289 2278 PPSMC_MSG_SetHardMinByFreq, 2290 - clk_request); 2279 + clk_request, 2280 + NULL); 2291 2281 } 2292 2282 } 2293 2283 ··· 2324 2312 if (data->smu_features[GNLD_DS_DCEFCLK].supported) 2325 2313 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter( 2326 2314 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, 2327 - min_clocks.dcefClockInSR / 100)) == 0, 2315 + min_clocks.dcefClockInSR / 100, 2316 + NULL)) == 0, 2328 2317 "Attempt to set divider for DCEFCLK Failed!", 2329 2318 return ret); 2330 2319 } else { ··· 2337 2324 dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100; 2338 2325 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2339 2326 PPSMC_MSG_SetHardMinByFreq, 2340 - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), 2327 + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 2328 + NULL)), 2341 2329 "[SetHardMinFreq] Set hard min uclk failed!", 2342 2330 return ret); 2343 2331 } ··· 2670 2656 return -EINVAL; 2671 2657 2672 2658 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2673 - PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level); 2659 + PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level, 2660 + NULL); 2674 2661 PP_ASSERT_WITH_CODE(!ret, 2675 2662 "Failed to set min link dpm level!", 2676 2663 return ret); ··· 3155 3140 return 0; 3156 3141 } 3157 3142 3158 - PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, 3143 + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0, 3159 3144 "[PrepareMp1] Failed!", 3160 3145 return ret); 3161 3146 ··· 3510 3495 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3511 3496 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3512 3497 PPSMC_MSG_SetHardMinByFreq, 3513 - (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)), 3498 + (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level, 3499 + NULL)), 3514 3500 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!", 3515 3501 return ret); 3516 3502 } ··· 3536 3520 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3537 3521 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, 3538 3522 PPSMC_MSG_SetSoftMinByFreq, 3539 - (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)), 3523 + (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level, 3524 + NULL)), 3540 3525 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", 3541 3526 return ret); 3542 3527 } ··· 3551 3534 int ret = 0; 3552 3535 3553 3536 smum_send_msg_to_smc_with_parameter(hwmgr, 3554 - PPSMC_MSG_NumOfDisplays, 0); 3537 + PPSMC_MSG_NumOfDisplays, 0, NULL); 3555 3538 3556 3539 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3557 3540 &data->dpm_table.mem_table); ··· 3582 3565 data->smu_features[GNLD_DPM_SOCCLK].supported) { 3583 3566 result = smum_send_msg_to_smc_with_parameter(hwmgr, 3584 3567 PPSMC_MSG_NumOfDisplays, 3585 - hwmgr->display_config->num_display); 3568 + hwmgr->display_config->num_display, 3569 + NULL); 3586 3570 } 3587 3571 3588 3572 return result; ··· 4100 4082 workload_type = 4101 4083 conv_power_profile_to_pplib_workload(power_profile_mode); 4102 4084 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, 4103 - 1 << workload_type); 4085 + 1 << workload_type, 4086 + NULL); 4104 4087 4105 4088 hwmgr->power_profile_mode = power_profile_mode; 4106 4089 ··· 4117 4098 { 4118 4099 smum_send_msg_to_smc_with_parameter(hwmgr, 4119 4100 PPSMC_MSG_SetSystemVirtualDramAddrHigh, 4120 - virtual_addr_hi); 4101 + virtual_addr_hi, 4102 + NULL); 4121 4103 smum_send_msg_to_smc_with_parameter(hwmgr, 4122 4104 PPSMC_MSG_SetSystemVirtualDramAddrLow, 4123 - virtual_addr_low); 4105 + virtual_addr_low, 4106 + NULL); 4124 4107 smum_send_msg_to_smc_with_parameter(hwmgr, 4125 4108 PPSMC_MSG_DramLogSetDramAddrHigh, 4126 - mc_addr_hi); 4109 + mc_addr_hi, 4110 + NULL); 4127 4111 4128 4112 smum_send_msg_to_smc_with_parameter(hwmgr, 4129 4113 PPSMC_MSG_DramLogSetDramAddrLow, 4130 - mc_addr_low); 4114 + mc_addr_low, 4115 + NULL); 4131 4116 4132 4117 smum_send_msg_to_smc_with_parameter(hwmgr, 4133 4118 PPSMC_MSG_DramLogSetDramSize, 4134 - size); 4119 + size, 4120 + NULL); 4135 4121 return 0; 4136 4122 } 4137 4123 ··· 4177 4153 (acquire ? 4178 4154 PPSMC_MSG_RequestI2CBus : 4179 4155 PPSMC_MSG_ReleaseI2CBus), 4180 - 0); 4156 + 0, 4157 + NULL); 4181 4158 4182 4159 PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res); 4183 4160 return res; ··· 4195 4170 return -EINVAL; 4196 4171 } 4197 4172 4198 - ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state); 4173 + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state, 4174 + NULL); 4199 4175 if (ret) 4200 4176 pr_err("SetDfCstate failed!\n"); 4201 4177 ··· 4210 4184 4211 4185 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 4212 4186 PPSMC_MSG_SetXgmiMode, 4213 - pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); 4187 + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 4188 + NULL); 4214 4189 if (ret) 4215 4190 pr_err("SetXgmiPstate failed!\n"); 4216 4191
+4 -2
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
··· 36 36 37 37 if (data->smu_features[GNLD_PPT].enabled) 38 38 return smum_send_msg_to_smc_with_parameter(hwmgr, 39 - PPSMC_MSG_SetPptLimit, n); 39 + PPSMC_MSG_SetPptLimit, n, 40 + NULL); 40 41 41 42 return 0; 42 43 } ··· 52 51 uint32_t adjust_percent) 53 52 { 54 53 return smum_send_msg_to_smc_with_parameter(hwmgr, 55 - PPSMC_MSG_OverDriveSetPercentage, adjust_percent); 54 + PPSMC_MSG_OverDriveSetPercentage, adjust_percent, 55 + NULL); 56 56 } 57 57 58 58 int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
+4 -3
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
··· 106 106 int ret = 0; 107 107 108 108 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 109 - PPSMC_MSG_GetCurrentRpm)) == 0, 109 + PPSMC_MSG_GetCurrentRpm, 110 + current_rpm)) == 0, 110 111 "Attempt to get current RPM from SMC Failed!", 111 112 return ret); 112 - *current_rpm = smum_get_argument(hwmgr); 113 113 114 114 return 0; 115 115 } ··· 329 329 330 330 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 331 331 PPSMC_MSG_SetFanTemperatureTarget, 332 - (uint32_t)table->FanTargetTemperature); 332 + (uint32_t)table->FanTargetTemperature, 333 + NULL); 333 334 334 335 return ret; 335 336 }
+3 -4
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
··· 81 81 SMU10_CLOCKTABLE, 82 82 }; 83 83 84 - extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr); 85 - 86 84 extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); 87 85 88 86 extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr); 89 87 90 - extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg); 88 + extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp); 91 89 92 90 extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 93 - uint16_t msg, uint32_t parameter); 91 + uint16_t msg, uint32_t parameter, 92 + uint32_t *resp); 94 93 95 94 extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); 96 95
+8 -6
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
··· 2780 2780 2781 2781 if (setting->bupdate_sclk) { 2782 2782 if (!data->sclk_dpm_key_disabled) 2783 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); 2783 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); 2784 2784 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 2785 2785 if (levels[i].ActivityLevel != 2786 2786 cpu_to_be16(setting->sclk_activity)) { ··· 2810 2810 } 2811 2811 } 2812 2812 if (!data->sclk_dpm_key_disabled) 2813 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 2813 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); 2814 2814 } 2815 2815 2816 2816 if (setting->bupdate_mclk) { 2817 2817 if (!data->mclk_dpm_key_disabled) 2818 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); 2818 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); 2819 2819 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { 2820 2820 if (mclk_levels[i].ActivityLevel != 2821 2821 cpu_to_be16(setting->mclk_activity)) { ··· 2845 2845 } 2846 2846 } 2847 2847 if (!data->mclk_dpm_key_disabled) 2848 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 2848 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); 2849 2849 } 2850 2850 return 0; 2851 2851 } ··· 2882 2882 break; 2883 2883 } 2884 2884 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask, 2885 - data->dpm_level_enable_mask.uvd_dpm_enable_mask); 2885 + data->dpm_level_enable_mask.uvd_dpm_enable_mask, 2886 + NULL); 2886 2887 2887 2888 return 0; 2888 2889 } ··· 2914 2913 break; 2915 2914 } 2916 2915 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask, 2917 - data->dpm_level_enable_mask.vce_dpm_enable_mask); 2916 + data->dpm_level_enable_mask.vce_dpm_enable_mask, 2917 + NULL); 2918 2918 2919 2919 return 0; 2920 2920 }
+18 -12
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
··· 137 137 PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, 138 138 INTERRUPTS_ENABLED, 1); 139 139 140 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000); 140 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL); 141 141 142 142 /* Wait for done bit to be set */ 143 143 PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, ··· 202 202 203 203 if (0 != smu_data->avfs_btc_param) { 204 204 if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, 205 - PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) { 205 + PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param, 206 + NULL)) { 206 207 pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed"); 207 208 result = -EINVAL; 208 209 } ··· 1912 1911 if (mask) 1913 1912 smum_send_msg_to_smc_with_parameter(hwmgr, 1914 1913 PPSMC_MSG_LedConfig, 1915 - mask); 1914 + mask, 1915 + NULL); 1916 1916 return 0; 1917 1917 } 1918 1918 ··· 2220 2218 res = smum_send_msg_to_smc_with_parameter(hwmgr, 2221 2219 PPSMC_MSG_SetFanMinPwm, 2222 2220 hwmgr->thermal_controller. 2223 - advanceFanControlParameters.ucMinimumPWMLimit); 2221 + advanceFanControlParameters.ucMinimumPWMLimit, 2222 + NULL); 2224 2223 2225 2224 if (!res && hwmgr->thermal_controller. 2226 2225 advanceFanControlParameters.ulMinFanSCLKAcousticLimit) 2227 2226 res = smum_send_msg_to_smc_with_parameter(hwmgr, 2228 2227 PPSMC_MSG_SetFanSclkTarget, 2229 2228 hwmgr->thermal_controller. 2230 - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); 2229 + advanceFanControlParameters.ulMinFanSCLKAcousticLimit, 2230 + NULL); 2231 2231 2232 2232 if (res) 2233 2233 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, ··· 2244 2240 if (!hwmgr->avfs_supported) 2245 2241 return 0; 2246 2242 2247 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); 2243 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL); 2248 2244 2249 2245 return 0; 2250 2246 } ··· 2392 2388 PHM_PlatformCaps_StablePState)) 2393 2389 smum_send_msg_to_smc_with_parameter(hwmgr, 2394 2390 PPSMC_MSG_UVDDPM_SetEnabledMask, 2395 - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); 2391 + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), 2392 + NULL); 2396 2393 return 0; 2397 2394 } 2398 2395 ··· 2425 2420 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 2426 2421 smum_send_msg_to_smc_with_parameter(hwmgr, 2427 2422 PPSMC_MSG_VCEDPM_SetEnabledMask, 2428 - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); 2423 + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, 2424 + NULL); 2429 2425 return 0; 2430 2426 } 2431 2427 ··· 2573 2567 2574 2568 if (setting->bupdate_sclk) { 2575 2569 if (!data->sclk_dpm_key_disabled) 2576 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); 2570 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); 2577 2571 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 2578 2572 if (levels[i].ActivityLevel != 2579 2573 cpu_to_be16(setting->sclk_activity)) { ··· 2603 2597 } 2604 2598 } 2605 2599 if (!data->sclk_dpm_key_disabled) 2606 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 2600 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); 2607 2601 } 2608 2602 2609 2603 if (setting->bupdate_mclk) { 2610 2604 if (!data->mclk_dpm_key_disabled) 2611 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); 2605 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); 2612 2606 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { 2613 2607 if (mclk_levels[i].ActivityLevel != 2614 2608 cpu_to_be16(setting->mclk_activity)) { ··· 2638 2632 } 2639 2633 } 2640 2634 if (!data->mclk_dpm_key_disabled) 2641 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 2635 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); 2642 2636 } 2643 2637 return 0; 2644 2638 }
+18 -12
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
··· 99 99 struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); 100 100 101 101 if (0 != smu_data->avfs_btc_param) { 102 - if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) { 102 + if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param, 103 + NULL)) { 103 104 pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed"); 104 105 result = -1; 105 106 } ··· 2050 2049 return 0; 2051 2050 2052 2051 smum_send_msg_to_smc_with_parameter(hwmgr, 2053 - PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); 2052 + PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting, 2053 + NULL); 2054 2054 2055 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); 2055 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL); 2056 2056 2057 2057 /* Apply avfs cks-off voltages to avoid the overshoot 2058 2058 * when switching to the highest sclk frequency 2059 2059 */ 2060 2060 if (data->apply_avfs_cks_off_voltage) 2061 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); 2061 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL); 2062 2062 2063 2063 return 0; 2064 2064 } ··· 2160 2158 res = smum_send_msg_to_smc_with_parameter(hwmgr, 2161 2159 PPSMC_MSG_SetFanMinPwm, 2162 2160 hwmgr->thermal_controller. 2163 - advanceFanControlParameters.ucMinimumPWMLimit); 2161 + advanceFanControlParameters.ucMinimumPWMLimit, 2162 + NULL); 2164 2163 2165 2164 if (!res && hwmgr->thermal_controller. 2166 2165 advanceFanControlParameters.ulMinFanSCLKAcousticLimit) 2167 2166 res = smum_send_msg_to_smc_with_parameter(hwmgr, 2168 2167 PPSMC_MSG_SetFanSclkTarget, 2169 2168 hwmgr->thermal_controller. 2170 - advanceFanControlParameters.ulMinFanSCLKAcousticLimit); 2169 + advanceFanControlParameters.ulMinFanSCLKAcousticLimit, 2170 + NULL); 2171 2171 2172 2172 if (res) 2173 2173 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, ··· 2206 2202 PHM_PlatformCaps_StablePState)) 2207 2203 smum_send_msg_to_smc_with_parameter(hwmgr, 2208 2204 PPSMC_MSG_UVDDPM_SetEnabledMask, 2209 - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); 2205 + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), 2206 + NULL); 2210 2207 return 0; 2211 2208 } 2212 2209 ··· 2239 2234 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 2240 2235 smum_send_msg_to_smc_with_parameter(hwmgr, 2241 2236 PPSMC_MSG_VCEDPM_SetEnabledMask, 2242 - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); 2237 + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, 2238 + NULL); 2243 2239 return 0; 2244 2240 } 2245 2241 ··· 2491 2485 2492 2486 if (setting->bupdate_sclk) { 2493 2487 if (!data->sclk_dpm_key_disabled) 2494 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); 2488 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); 2495 2489 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 2496 2490 if (levels[i].ActivityLevel != 2497 2491 cpu_to_be16(setting->sclk_activity)) { ··· 2521 2515 } 2522 2516 } 2523 2517 if (!data->sclk_dpm_key_disabled) 2524 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 2518 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); 2525 2519 } 2526 2520 2527 2521 if (setting->bupdate_mclk) { 2528 2522 if (!data->mclk_dpm_key_disabled) 2529 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); 2523 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); 2530 2524 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { 2531 2525 if (mclk_levels[i].ActivityLevel != 2532 2526 cpu_to_be16(setting->mclk_activity)) { ··· 2556 2550 } 2557 2551 } 2558 2552 if (!data->mclk_dpm_key_disabled) 2559 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 2553 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); 2560 2554 } 2561 2555 return 0; 2562 2556 }
+15 -10
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
··· 128 128 "Invalid SMU Table Length!", return -EINVAL;); 129 129 smum_send_msg_to_smc_with_parameter(hwmgr, 130 130 PPSMC_MSG_SetDriverDramAddrHigh, 131 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 131 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 132 + NULL); 132 133 smum_send_msg_to_smc_with_parameter(hwmgr, 133 134 PPSMC_MSG_SetDriverDramAddrLow, 134 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 135 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 136 + NULL); 135 137 smum_send_msg_to_smc_with_parameter(hwmgr, 136 138 PPSMC_MSG_TransferTableSmu2Dram, 137 - priv->smu_tables.entry[table_id].table_id); 139 + priv->smu_tables.entry[table_id].table_id, 140 + NULL); 138 141 139 142 /* flush hdp cache */ 140 143 amdgpu_asic_flush_hdp(adev, NULL); ··· 169 166 170 167 smum_send_msg_to_smc_with_parameter(hwmgr, 171 168 PPSMC_MSG_SetDriverDramAddrHigh, 172 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 169 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 170 + NULL); 173 171 smum_send_msg_to_smc_with_parameter(hwmgr, 174 172 PPSMC_MSG_SetDriverDramAddrLow, 175 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 173 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 174 + NULL); 176 175 smum_send_msg_to_smc_with_parameter(hwmgr, 177 176 PPSMC_MSG_TransferTableDram2Smu, 178 - priv->smu_tables.entry[table_id].table_id); 177 + priv->smu_tables.entry[table_id].table_id, 178 + NULL); 179 179 180 180 return 0; 181 181 } ··· 188 182 uint32_t smc_driver_if_version; 189 183 190 184 smum_send_msg_to_smc(hwmgr, 191 - PPSMC_MSG_GetDriverIfVersion); 192 - smc_driver_if_version = smum_get_argument(hwmgr); 185 + PPSMC_MSG_GetDriverIfVersion, 186 + &smc_driver_if_version); 193 187 194 188 if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) && 195 189 (smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) { ··· 223 217 { 224 218 struct amdgpu_device *adev = hwmgr->adev; 225 219 226 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); 227 - hwmgr->smu_version = smum_get_argument(hwmgr); 220 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version); 228 221 adev->pm.fw_version = hwmgr->smu_version >> 8; 229 222 230 223 if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+14 -6
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
··· 207 207 208 208 int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) 209 209 { 210 - return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000); 210 + return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL); 211 211 } 212 212 213 213 enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) ··· 337 337 if (hwmgr->not_vf) { 338 338 smum_send_msg_to_smc_with_parameter(hwmgr, 339 339 PPSMC_MSG_SMU_DRAM_ADDR_HI, 340 - upper_32_bits(smu_data->smu_buffer.mc_addr)); 340 + upper_32_bits(smu_data->smu_buffer.mc_addr), 341 + NULL); 341 342 smum_send_msg_to_smc_with_parameter(hwmgr, 342 343 PPSMC_MSG_SMU_DRAM_ADDR_LO, 343 - lower_32_bits(smu_data->smu_buffer.mc_addr)); 344 + lower_32_bits(smu_data->smu_buffer.mc_addr), 345 + NULL); 344 346 } 345 347 fw_to_load = UCODE_ID_RLC_G_MASK 346 348 + UCODE_ID_SDMA0_MASK ··· 407 405 } 408 406 memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc, 409 407 sizeof(struct SMU_DRAMData_TOC)); 410 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); 411 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); 408 + smum_send_msg_to_smc_with_parameter(hwmgr, 409 + PPSMC_MSG_DRV_DRAM_ADDR_HI, 410 + upper_32_bits(smu_data->header_buffer.mc_addr), 411 + NULL); 412 + smum_send_msg_to_smc_with_parameter(hwmgr, 413 + PPSMC_MSG_DRV_DRAM_ADDR_LO, 414 + lower_32_bits(smu_data->header_buffer.mc_addr), 415 + NULL); 412 416 413 - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load); 417 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL); 414 418 415 419 r = smu7_check_fw_load_finish(hwmgr, fw_to_load); 416 420 if (!r)
+30 -17
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
··· 612 612 613 613 smum_send_msg_to_smc_with_parameter(hwmgr, 614 614 PPSMC_MSG_SetClkTableAddrHi, 615 - upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); 615 + upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr), 616 + NULL); 616 617 617 618 smum_send_msg_to_smc_with_parameter(hwmgr, 618 619 PPSMC_MSG_SetClkTableAddrLo, 619 - lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); 620 + lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr), 621 + NULL); 620 622 621 623 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, 622 - smu8_smu->toc_entry_clock_table); 624 + smu8_smu->toc_entry_clock_table, 625 + NULL); 623 626 624 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram); 627 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL); 625 628 626 629 return 0; 627 630 } ··· 642 639 643 640 smum_send_msg_to_smc_with_parameter(hwmgr, 644 641 PPSMC_MSG_SetClkTableAddrHi, 645 - upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); 642 + upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr), 643 + NULL); 646 644 647 645 smum_send_msg_to_smc_with_parameter(hwmgr, 648 646 PPSMC_MSG_SetClkTableAddrLo, 649 - lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr)); 647 + lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr), 648 + NULL); 650 649 651 650 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, 652 - smu8_smu->toc_entry_clock_table); 651 + smu8_smu->toc_entry_clock_table, 652 + NULL); 653 653 654 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu); 654 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL); 655 655 656 656 return 0; 657 657 } ··· 679 673 680 674 smum_send_msg_to_smc_with_parameter(hwmgr, 681 675 PPSMC_MSG_DriverDramAddrHi, 682 - upper_32_bits(smu8_smu->toc_buffer.mc_addr)); 676 + upper_32_bits(smu8_smu->toc_buffer.mc_addr), 677 + NULL); 683 678 684 679 smum_send_msg_to_smc_with_parameter(hwmgr, 685 680 PPSMC_MSG_DriverDramAddrLo, 686 - lower_32_bits(smu8_smu->toc_buffer.mc_addr)); 681 + lower_32_bits(smu8_smu->toc_buffer.mc_addr), 682 + NULL); 687 683 688 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs); 684 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL); 689 685 690 686 smum_send_msg_to_smc_with_parameter(hwmgr, 691 687 PPSMC_MSG_ExecuteJob, 692 - smu8_smu->toc_entry_aram); 688 + smu8_smu->toc_entry_aram, 689 + NULL); 693 690 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, 694 - smu8_smu->toc_entry_power_profiling_index); 691 + smu8_smu->toc_entry_power_profiling_index, 692 + NULL); 695 693 696 694 smum_send_msg_to_smc_with_parameter(hwmgr, 697 695 PPSMC_MSG_ExecuteJob, 698 - smu8_smu->toc_entry_initialize_index); 696 + smu8_smu->toc_entry_initialize_index, 697 + NULL); 699 698 700 699 fw_to_check = UCODE_ID_RLC_G_MASK | 701 700 UCODE_ID_SDMA0_MASK | ··· 871 860 unsigned long check_feature) 872 861 { 873 862 int result; 874 - unsigned long features; 863 + uint32_t features; 875 864 876 - result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0); 865 + result = smum_send_msg_to_smc_with_parameter(hwmgr, 866 + PPSMC_MSG_GetFeatureStatus, 867 + 0, 868 + &features); 877 869 if (result == 0) { 878 - features = smum_get_argument(hwmgr); 879 870 if (features & check_feature) 880 871 return true; 881 872 }
+30 -14
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
··· 103 103 return 0; 104 104 } 105 105 106 - uint32_t smum_get_argument(struct pp_hwmgr *hwmgr) 107 - { 108 - if (NULL != hwmgr->smumgr_funcs->get_argument) 109 - return hwmgr->smumgr_funcs->get_argument(hwmgr); 110 - 111 - return 0; 112 - } 113 - 114 106 uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value) 115 107 { 116 108 if (NULL != hwmgr->smumgr_funcs->get_mac_definition) ··· 127 135 return 0; 128 136 } 129 137 130 - int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) 138 + int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp) 131 139 { 132 - if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL) 140 + int ret = 0; 141 + 142 + if (hwmgr == NULL || 143 + hwmgr->smumgr_funcs->send_msg_to_smc == NULL || 144 + (resp && !hwmgr->smumgr_funcs->get_argument)) 133 145 return -EINVAL; 134 146 135 - return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); 147 + ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg); 148 + if (ret) 149 + return ret; 150 + 151 + if (resp) 152 + *resp = hwmgr->smumgr_funcs->get_argument(hwmgr); 153 + 154 + return ret; 136 155 } 137 156 138 157 int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, 139 - uint16_t msg, uint32_t parameter) 158 + uint16_t msg, 159 + uint32_t parameter, 160 + uint32_t *resp) 140 161 { 162 + int ret = 0; 163 + 141 164 if (hwmgr == NULL || 142 - hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL) 165 + hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL || 166 + (resp && !hwmgr->smumgr_funcs->get_argument)) 143 167 return -EINVAL; 144 - return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( 168 + 169 + ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter( 145 170 hwmgr, msg, parameter); 171 + if (ret) 172 + return ret; 173 + 174 + if (resp) 175 + *resp = hwmgr->smumgr_funcs->get_argument(hwmgr); 176 + 177 + return ret; 146 178 } 147 179 148 180 int smum_init_smc_table(struct pp_hwmgr *hwmgr)
+8 -6
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
··· 2702 2702 PHM_PlatformCaps_StablePState)) 2703 2703 smum_send_msg_to_smc_with_parameter(hwmgr, 2704 2704 PPSMC_MSG_UVDDPM_SetEnabledMask, 2705 - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); 2705 + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), 2706 + NULL); 2706 2707 return 0; 2707 2708 } 2708 2709 ··· 2734 2733 PHM_PlatformCaps_StablePState)) 2735 2734 smum_send_msg_to_smc_with_parameter(hwmgr, 2736 2735 PPSMC_MSG_VCEDPM_SetEnabledMask, 2737 - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); 2736 + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, 2737 + NULL); 2738 2738 return 0; 2739 2739 } 2740 2740 ··· 3170 3168 3171 3169 if (setting->bupdate_sclk) { 3172 3170 if (!data->sclk_dpm_key_disabled) 3173 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel); 3171 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL); 3174 3172 for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) { 3175 3173 if (levels[i].ActivityLevel != 3176 3174 cpu_to_be16(setting->sclk_activity)) { ··· 3200 3198 } 3201 3199 } 3202 3200 if (!data->sclk_dpm_key_disabled) 3203 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 3201 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL); 3204 3202 } 3205 3203 3206 3204 if (setting->bupdate_mclk) { 3207 3205 if (!data->mclk_dpm_key_disabled) 3208 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel); 3206 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL); 3209 3207 for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) { 3210 3208 if (mclk_levels[i].ActivityLevel != 3211 3209 cpu_to_be16(setting->mclk_activity)) { ··· 3235 3233 } 3236 3234 } 3237 3235 if (!data->mclk_dpm_key_disabled) 3238 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 3236 + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL); 3239 3237 } 3240 3238 return 0; 3241 3239 }
+25 -13
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
··· 49 49 "Invalid SMU Table Length!", return -EINVAL); 50 50 smum_send_msg_to_smc_with_parameter(hwmgr, 51 51 PPSMC_MSG_SetDriverDramAddrHigh, 52 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 52 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 53 + NULL); 53 54 smum_send_msg_to_smc_with_parameter(hwmgr, 54 55 PPSMC_MSG_SetDriverDramAddrLow, 55 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 56 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 57 + NULL); 56 58 smum_send_msg_to_smc_with_parameter(hwmgr, 57 59 PPSMC_MSG_TransferTableSmu2Dram, 58 - priv->smu_tables.entry[table_id].table_id); 60 + priv->smu_tables.entry[table_id].table_id, 61 + NULL); 59 62 60 63 /* flush hdp cache */ 61 64 amdgpu_asic_flush_hdp(adev, NULL); ··· 95 92 96 93 smum_send_msg_to_smc_with_parameter(hwmgr, 97 94 PPSMC_MSG_SetDriverDramAddrHigh, 98 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 95 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 96 + NULL); 99 97 smum_send_msg_to_smc_with_parameter(hwmgr, 100 98 PPSMC_MSG_SetDriverDramAddrLow, 101 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 99 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 100 + NULL); 102 101 smum_send_msg_to_smc_with_parameter(hwmgr, 103 102 PPSMC_MSG_TransferTableDram2Smu, 104 - priv->smu_tables.entry[table_id].table_id); 103 + priv->smu_tables.entry[table_id].table_id, 104 + NULL); 105 105 106 106 return 0; 107 107 } ··· 124 118 return 0; 125 119 126 120 return smum_send_msg_to_smc_with_parameter(hwmgr, 127 - msg, feature_mask); 121 + msg, feature_mask, NULL); 128 122 } 129 123 130 124 int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr, 131 125 uint64_t *features_enabled) 132 126 { 127 + uint32_t enabled_features; 128 + 133 129 if (features_enabled == NULL) 134 130 return -EINVAL; 135 131 136 - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures); 137 - *features_enabled = smum_get_argument(hwmgr); 132 + smum_send_msg_to_smc(hwmgr, 133 + PPSMC_MSG_GetEnabledSmuFeatures, 134 + &enabled_features); 135 + *features_enabled = enabled_features; 138 136 139 137 return 0; 140 138 } ··· 162 152 if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) { 163 153 smum_send_msg_to_smc_with_parameter(hwmgr, 164 154 PPSMC_MSG_SetToolsDramAddrHigh, 165 - upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); 155 + upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr), 156 + NULL); 166 157 smum_send_msg_to_smc_with_parameter(hwmgr, 167 158 PPSMC_MSG_SetToolsDramAddrLow, 168 - lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr)); 159 + lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr), 160 + NULL); 169 161 } 170 162 return 0; 171 163 } ··· 180 168 uint32_t rev_id; 181 169 182 170 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, 183 - PPSMC_MSG_GetDriverIfVersion), 171 + PPSMC_MSG_GetDriverIfVersion, 172 + &smc_driver_if_version), 184 173 "Attempt to get SMC IF Version Number Failed!", 185 174 return -EINVAL); 186 - smc_driver_if_version = smum_get_argument(hwmgr); 187 175 188 176 dev_id = adev->pdev->device; 189 177 rev_id = adev->pdev->revision;
+24 -16
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
··· 52 52 "Invalid SMU Table Length!", return -EINVAL); 53 53 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 54 54 PPSMC_MSG_SetDriverDramAddrHigh, 55 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 55 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 56 + NULL) == 0, 56 57 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL); 57 58 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 58 59 PPSMC_MSG_SetDriverDramAddrLow, 59 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 60 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 61 + NULL) == 0, 60 62 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", 61 63 return -EINVAL); 62 64 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 63 65 PPSMC_MSG_TransferTableSmu2Dram, 64 - table_id) == 0, 66 + table_id, 67 + NULL) == 0, 65 68 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", 66 69 return -EINVAL); 67 70 ··· 103 100 104 101 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 105 102 PPSMC_MSG_SetDriverDramAddrHigh, 106 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 103 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 104 + NULL) == 0, 107 105 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", 108 106 return -EINVAL;); 109 107 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 110 108 PPSMC_MSG_SetDriverDramAddrLow, 111 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 109 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 110 + NULL) == 0, 112 111 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", 113 112 return -EINVAL); 114 113 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 115 114 PPSMC_MSG_TransferTableDram2Smu, 116 - table_id) == 0, 115 + table_id, 116 + NULL) == 0, 117 117 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", 118 118 return -EINVAL); 119 119 ··· 133 127 134 128 if (enable) { 135 129 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 136 - PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, 130 + PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0, 137 131 "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", 138 132 return -EINVAL); 139 133 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 140 - PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, 134 + PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0, 141 135 "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", 142 136 return -EINVAL); 143 137 } else { 144 138 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 145 - PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, 139 + PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0, 146 140 "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", 147 141 return -EINVAL); 148 142 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr, 149 - PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, 143 + PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0, 150 144 "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", 151 145 return -EINVAL); 152 146 } ··· 163 157 return -EINVAL; 164 158 165 159 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr, 166 - PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, 160 + PPSMC_MSG_GetEnabledSmuFeaturesLow, 161 + &smc_features_low) == 0, 167 162 "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", 168 163 return -EINVAL); 169 - smc_features_low = smum_get_argument(hwmgr); 170 164 171 165 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr, 172 - PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, 166 + PPSMC_MSG_GetEnabledSmuFeaturesHigh, 167 + &smc_features_high) == 0, 173 168 "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", 174 169 return -EINVAL); 175 - smc_features_high = smum_get_argument(hwmgr); 176 170 177 171 *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | 178 172 (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); ··· 200 194 if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { 201 195 if (!smum_send_msg_to_smc_with_parameter(hwmgr, 202 196 PPSMC_MSG_SetToolsDramAddrHigh, 203 - upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr))) 197 + upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), 198 + NULL)) 204 199 smum_send_msg_to_smc_with_parameter(hwmgr, 205 200 PPSMC_MSG_SetToolsDramAddrLow, 206 - lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); 201 + lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), 202 + NULL); 207 203 } 208 204 return 0; 209 205 }
+38 -24
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
··· 177 177 178 178 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 179 179 PPSMC_MSG_SetDriverDramAddrHigh, 180 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, 180 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 181 + NULL)) == 0, 181 182 "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", 182 183 return ret); 183 184 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 184 185 PPSMC_MSG_SetDriverDramAddrLow, 185 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, 186 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 187 + NULL)) == 0, 186 188 "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", 187 189 return ret); 188 190 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 189 - PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0, 191 + PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0, 190 192 "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", 191 193 return ret); 192 194 ··· 228 226 229 227 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 230 228 PPSMC_MSG_SetDriverDramAddrHigh, 231 - upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, 229 + upper_32_bits(priv->smu_tables.entry[table_id].mc_addr), 230 + NULL)) == 0, 232 231 "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", 233 232 return ret); 234 233 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 235 234 PPSMC_MSG_SetDriverDramAddrLow, 236 - lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, 235 + lower_32_bits(priv->smu_tables.entry[table_id].mc_addr), 236 + NULL)) == 0, 237 237 "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", 238 238 return ret); 239 239 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 240 - PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0, 240 + PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0, 241 241 "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", 242 242 return ret); 243 243 ··· 261 257 262 258 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 263 259 PPSMC_MSG_SetDriverDramAddrHigh, 264 - upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, 260 + upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), 261 + NULL)) == 0, 265 262 "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!", 266 263 return ret); 267 264 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 268 265 PPSMC_MSG_SetDriverDramAddrLow, 269 - lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, 266 + lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), 267 + NULL)) == 0, 270 268 "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!", 271 269 return ret); 272 270 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 273 - PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, 271 + PPSMC_MSG_TransferTableDram2Smu, 272 + TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), 273 + NULL)) == 0, 274 274 "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!", 275 275 return ret); 276 276 ··· 291 283 292 284 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 293 285 PPSMC_MSG_SetDriverDramAddrHigh, 294 - upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, 286 + upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), 287 + NULL)) == 0, 295 288 "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!", 296 289 return ret); 297 290 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 298 291 PPSMC_MSG_SetDriverDramAddrLow, 299 - lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, 292 + lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr), 293 + NULL)) == 0, 300 294 "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!", 301 295 return ret); 302 296 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 303 297 PPSMC_MSG_TransferTableSmu2Dram, 304 - TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, 298 + TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0, 305 299 "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!", 306 300 return ret); 307 301 ··· 327 317 328 318 if (enable) { 329 319 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 330 - PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, 320 + PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0, 331 321 "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", 332 322 return ret); 333 323 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 334 - PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, 324 + PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0, 335 325 "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", 336 326 return ret); 337 327 } else { 338 328 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 339 - PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, 329 + PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0, 340 330 "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", 341 331 return ret); 342 332 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 343 - PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, 333 + PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0, 344 334 "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", 345 335 return ret); 346 336 } ··· 358 348 return -EINVAL; 359 349 360 350 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 361 - PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, 351 + PPSMC_MSG_GetEnabledSmuFeaturesLow, 352 + &smc_features_low)) == 0, 362 353 "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", 363 354 return ret); 364 - smc_features_low = smum_get_argument(hwmgr); 365 355 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, 366 - PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, 356 + PPSMC_MSG_GetEnabledSmuFeaturesHigh, 357 + &smc_features_high)) == 0, 367 358 "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", 368 359 return ret); 369 - smc_features_high = smum_get_argument(hwmgr); 370 360 371 361 *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | 372 362 (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); ··· 383 373 if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { 384 374 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 385 375 PPSMC_MSG_SetToolsDramAddrHigh, 386 - upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); 376 + upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), 377 + NULL); 387 378 if (!ret) 388 379 ret = smum_send_msg_to_smc_with_parameter(hwmgr, 389 380 PPSMC_MSG_SetToolsDramAddrLow, 390 - lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); 381 + lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr), 382 + NULL); 391 383 } 392 384 393 385 return ret; ··· 403 391 404 392 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 405 393 PPSMC_MSG_SetDriverDramAddrHigh, 406 - upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0, 394 + upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr), 395 + NULL)) == 0, 407 396 "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!", 408 397 return ret); 409 398 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, 410 399 PPSMC_MSG_SetDriverDramAddrLow, 411 - lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0, 400 + lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr), 401 + NULL)) == 0, 412 402 "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!", 413 403 return ret); 414 404
+11 -6
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
··· 356 356 PHM_PlatformCaps_StablePState)) 357 357 smum_send_msg_to_smc_with_parameter(hwmgr, 358 358 PPSMC_MSG_UVDDPM_SetEnabledMask, 359 - (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); 359 + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel), 360 + NULL); 360 361 return 0; 361 362 } 362 363 ··· 389 388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) 390 389 smum_send_msg_to_smc_with_parameter(hwmgr, 391 390 PPSMC_MSG_VCEDPM_SetEnabledMask, 392 - (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); 391 + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel, 392 + NULL); 393 393 return 0; 394 394 } 395 395 ··· 1908 1906 1909 1907 smum_send_msg_to_smc_with_parameter(hwmgr, 1910 1908 PPSMC_MSG_EnableModeSwitchRLCNotification, 1911 - adev->gfx.cu_info.number); 1909 + adev->gfx.cu_info.number, 1910 + NULL); 1912 1911 1913 1912 return 0; 1914 1913 } ··· 2063 2060 table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; 2064 2061 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2065 2062 PHM_PlatformCaps_AutomaticDCTransition) && 2066 - !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme)) 2063 + !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL)) 2067 2064 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2068 2065 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); 2069 2066 } else { ··· 2253 2250 if (!hwmgr->avfs_supported) 2254 2251 return 0; 2255 2252 2256 - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); 2253 + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL); 2257 2254 if (!ret) { 2258 2255 if (data->apply_avfs_cks_off_voltage) 2259 - ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); 2256 + ret = smum_send_msg_to_smc(hwmgr, 2257 + PPSMC_MSG_ApplyAvfsCksOffVoltage, 2258 + NULL); 2260 2259 } 2261 2260 2262 2261 return ret;