Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/powerplay: enable pp one vf mode for vega10

Originally, due to the restriction from PSP and SMU, VF has
to send message to hypervisor driver to handle powerplay
change which is complicated and redundant. Currently, SMU
and PSP can support VF to directly handle powerplay
change by itself. Therefore, the old code about the handshake
between VF and PF to handle powerplay will be removed and VF
will use new the registers below to handshake with SMU.
mmMP1_SMN_C2PMSG_101: register to handle SMU message
mmMP1_SMN_C2PMSG_102: register to handle SMU parameter
mmMP1_SMN_C2PMSG_103: register to handle SMU response

v2: remove module parameter pp_one_vf
v3: fix the parens
v4: forbid vf to change smu feature
v5: use hwmon_attributes_visible to skip sepicified hwmon atrribute
v6: change skip condition at vega10_copy_table_to_smc

Signed-off-by: Yintian Tao <yttao@amd.com>
Acked-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Kenneth Feng <kenneth.feng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Yintian Tao and committed by
Alex Deucher
c9ffa427 4cf781c2

+351 -299
+5 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1877 1877 } 1878 1878 } 1879 1879 1880 + if (amdgpu_sriov_vf(adev)) 1881 + amdgpu_virt_init_data_exchange(adev); 1882 + 1880 1883 r = amdgpu_ib_pool_init(adev); 1881 1884 if (r) { 1882 1885 dev_err(adev->dev, "IB initialization failed (%d).\n", r); ··· 1921 1918 amdgpu_amdkfd_device_init(adev); 1922 1919 1923 1920 init_failed: 1924 - if (amdgpu_sriov_vf(adev)) { 1925 - if (!r) 1926 - amdgpu_virt_init_data_exchange(adev); 1921 + if (amdgpu_sriov_vf(adev)) 1927 1922 amdgpu_virt_release_full_gpu(adev, true); 1928 - } 1929 1923 1930 1924 return r; 1931 1925 } ··· 2822 2822 mutex_init(&adev->virt.vf_errors.lock); 2823 2823 hash_init(adev->mn_hash); 2824 2824 mutex_init(&adev->lock_reset); 2825 - mutex_init(&adev->virt.dpm_mutex); 2826 2825 mutex_init(&adev->psp.mutex); 2827 2826 2828 2827 r = amdgpu_device_check_arguments(adev); ··· 3040 3041 3041 3042 amdgpu_fbdev_init(adev); 3042 3043 3043 - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) 3044 - amdgpu_pm_virt_sysfs_init(adev); 3045 - 3046 3044 r = amdgpu_pm_sysfs_init(adev); 3047 3045 if (r) { 3048 3046 adev->pm_sysfs_en = false; ··· 3184 3188 iounmap(adev->rmmio); 3185 3189 adev->rmmio = NULL; 3186 3190 amdgpu_device_doorbell_fini(adev); 3187 - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) 3188 - amdgpu_pm_virt_sysfs_fini(adev); 3189 3191 3190 3192 amdgpu_debugfs_regs_cleanup(adev); 3191 3193 device_remove_file(adev->dev, &dev_attr_pcie_replay_count); ··· 3664 3670 if (r) 3665 3671 goto error; 3666 3672 3673 + amdgpu_virt_init_data_exchange(adev); 3667 3674 /* we need recover gart prior to run SMC/CP/SDMA resume */ 3668 3675 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); 3669 3676 ··· 3682 3687 amdgpu_amdkfd_post_reset(adev); 3683 3688 3684 3689 error: 3685 - amdgpu_virt_init_data_exchange(adev); 3686 3690 amdgpu_virt_release_full_gpu(adev, true); 3687 3691 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3688 3692 amdgpu_inc_vram_lost(adev);
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 703 703 if (adev->pm.dpm_enabled) { 704 704 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; 705 705 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; 706 - } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 707 - adev->virt.ops->get_pp_clk) { 708 - dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; 709 - dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; 710 706 } else { 711 707 dev_info.max_engine_clock = adev->clock.default_sclk * 10; 712 708 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
+120 -62
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 159 159 struct amdgpu_device *adev = ddev->dev_private; 160 160 enum amd_pm_state_type pm; 161 161 162 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 163 + return 0; 164 + 162 165 if (is_support_sw_smu(adev)) { 163 166 if (adev->smu.ppt_funcs->get_current_power_state) 164 167 pm = smu_get_current_power_state(&adev->smu); ··· 186 183 struct drm_device *ddev = dev_get_drvdata(dev); 187 184 struct amdgpu_device *adev = ddev->dev_private; 188 185 enum amd_pm_state_type state; 186 + 187 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 188 + return -EINVAL; 189 189 190 190 if (strncmp("battery", buf, strlen("battery")) == 0) 191 191 state = POWER_STATE_TYPE_BATTERY; ··· 289 283 struct amdgpu_device *adev = ddev->dev_private; 290 284 enum amd_dpm_forced_level level = 0xff; 291 285 292 - if (amdgpu_sriov_vf(adev)) 286 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 293 287 return 0; 294 288 295 289 if ((adev->flags & AMD_IS_PX) && ··· 326 320 enum amd_dpm_forced_level current_level = 0xff; 327 321 int ret = 0; 328 322 323 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 324 + return -EINVAL; 325 + 329 326 /* Can't force performance level when the card is off */ 330 327 if ((adev->flags & AMD_IS_PX) && 331 328 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) ··· 355 346 } else { 356 347 count = -EINVAL; 357 348 goto fail; 358 - } 359 - 360 - /* handle sriov case here */ 361 - if (amdgpu_sriov_vf(adev)) { 362 - if (amdgim_is_hwperf(adev) && 363 - adev->virt.ops->force_dpm_level) { 364 - mutex_lock(&adev->pm.mutex); 365 - adev->virt.ops->force_dpm_level(adev, level); 366 - mutex_unlock(&adev->pm.mutex); 367 - return count; 368 - } else { 369 - return -EINVAL; 370 - } 371 349 } 372 350 373 351 if (is_support_sw_smu(adev)) ··· 436 440 enum amd_pm_state_type pm = 0; 437 441 int i = 0, ret = 0; 438 442 443 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 444 + return 0; 445 + 439 446 if (is_support_sw_smu(adev)) { 440 447 pm = smu_get_current_power_state(smu); 441 448 ret = smu_get_power_num_states(smu, &data); ··· 468 469 struct drm_device *ddev = dev_get_drvdata(dev); 469 470 struct amdgpu_device *adev = ddev->dev_private; 470 471 472 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 473 + return 0; 474 + 471 475 if (adev->pp_force_state_enabled) 472 476 return amdgpu_get_pp_cur_state(dev, attr, buf); 473 477 else ··· 487 485 enum amd_pm_state_type state = 0; 488 486 unsigned long idx; 489 487 int ret; 488 + 489 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 490 + return -EINVAL; 490 491 491 492 if (strlen(buf) == 1) 492 493 adev->pp_force_state_enabled = false; ··· 540 535 char *table = NULL; 541 536 int size; 542 537 538 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 539 + return 0; 540 + 543 541 if (is_support_sw_smu(adev)) { 544 542 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 545 543 if (size < 0) ··· 569 561 struct drm_device *ddev = dev_get_drvdata(dev); 570 562 struct amdgpu_device *adev = ddev->dev_private; 571 563 int ret = 0; 564 + 565 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 566 + return -EINVAL; 572 567 573 568 if (is_support_sw_smu(adev)) { 574 569 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); ··· 665 654 const char delimiter[3] = {' ', '\n', '\0'}; 666 655 uint32_t type; 667 656 657 + if (amdgpu_sriov_vf(adev)) 658 + return -EINVAL; 659 + 668 660 if (count > 127) 669 661 return -EINVAL; 670 662 ··· 740 726 struct amdgpu_device *adev = ddev->dev_private; 741 727 uint32_t size = 0; 742 728 729 + if (amdgpu_sriov_vf(adev)) 730 + return 0; 731 + 743 732 if (is_support_sw_smu(adev)) { 744 733 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 745 734 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); ··· 787 770 uint64_t featuremask; 788 771 int ret; 789 772 773 + if (amdgpu_sriov_vf(adev)) 774 + return -EINVAL; 775 + 790 776 ret = kstrtou64(buf, 0, &featuremask); 791 777 if (ret) 792 778 return -EINVAL; ··· 815 795 { 816 796 struct drm_device *ddev = dev_get_drvdata(dev); 817 797 struct amdgpu_device *adev = ddev->dev_private; 798 + 799 + if (amdgpu_sriov_vf(adev)) 800 + return 0; 818 801 819 802 if (is_support_sw_smu(adev)) { 820 803 return smu_sys_get_pp_feature_mask(&adev->smu, buf); ··· 864 841 struct drm_device *ddev = dev_get_drvdata(dev); 865 842 struct amdgpu_device *adev = ddev->dev_private; 866 843 867 - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 868 - adev->virt.ops->get_pp_clk) 869 - return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); 844 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 845 + return 0; 870 846 871 847 if (is_support_sw_smu(adev)) 872 848 return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); ··· 921 899 int ret; 922 900 uint32_t mask = 0; 923 901 924 - if (amdgpu_sriov_vf(adev)) 925 - return 0; 902 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 903 + return -EINVAL; 926 904 927 905 ret = amdgpu_read_mask(buf, count, &mask); 928 906 if (ret) ··· 946 924 struct drm_device *ddev = dev_get_drvdata(dev); 947 925 struct amdgpu_device *adev = ddev->dev_private; 948 926 949 - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 950 - adev->virt.ops->get_pp_clk) 951 - return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); 927 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 928 + return 0; 952 929 953 930 if (is_support_sw_smu(adev)) 954 931 return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); ··· 967 946 int ret; 968 947 uint32_t mask = 0; 969 948 970 - if (amdgpu_sriov_vf(adev)) 971 - return 0; 949 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 950 + return -EINVAL; 972 951 973 952 ret = amdgpu_read_mask(buf, count, &mask); 974 953 if (ret) ··· 992 971 struct drm_device *ddev = dev_get_drvdata(dev); 993 972 struct amdgpu_device *adev = ddev->dev_private; 994 973 974 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 975 + return 0; 976 + 995 977 if (is_support_sw_smu(adev)) 996 978 return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 997 979 else if (adev->powerplay.pp_funcs->print_clock_levels) ··· 1012 988 struct amdgpu_device *adev = ddev->dev_private; 1013 989 int ret; 1014 990 uint32_t mask = 0; 991 + 992 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 993 + return -EINVAL; 1015 994 1016 995 ret = amdgpu_read_mask(buf, count, &mask); 1017 996 if (ret) ··· 1038 1011 struct drm_device *ddev = dev_get_drvdata(dev); 1039 1012 struct amdgpu_device *adev = ddev->dev_private; 1040 1013 1014 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1015 + return 0; 1016 + 1041 1017 if (is_support_sw_smu(adev)) 1042 1018 return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1043 1019 else if (adev->powerplay.pp_funcs->print_clock_levels) ··· 1058 1028 struct amdgpu_device *adev = ddev->dev_private; 1059 1029 int ret; 1060 1030 uint32_t mask = 0; 1031 + 1032 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1033 + return -EINVAL; 1061 1034 1062 1035 ret = amdgpu_read_mask(buf, count, &mask); 1063 1036 if (ret) ··· 1084 1051 struct drm_device *ddev = dev_get_drvdata(dev); 1085 1052 struct amdgpu_device *adev = ddev->dev_private; 1086 1053 1054 + if (amdgpu_sriov_vf(adev)) 1055 + return 0; 1056 + 1087 1057 if (is_support_sw_smu(adev)) 1088 1058 return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1089 1059 else if (adev->powerplay.pp_funcs->print_clock_levels) ··· 1104 1068 struct amdgpu_device *adev = ddev->dev_private; 1105 1069 int ret; 1106 1070 uint32_t mask = 0; 1071 + 1072 + if (amdgpu_sriov_vf(adev)) 1073 + return -EINVAL; 1107 1074 1108 1075 ret = amdgpu_read_mask(buf, count, &mask); 1109 1076 if (ret) ··· 1130 1091 struct drm_device *ddev = dev_get_drvdata(dev); 1131 1092 struct amdgpu_device *adev = ddev->dev_private; 1132 1093 1094 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1095 + return 0; 1096 + 1133 1097 if (is_support_sw_smu(adev)) 1134 1098 return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1135 1099 else if (adev->powerplay.pp_funcs->print_clock_levels) ··· 1150 1108 struct amdgpu_device *adev = ddev->dev_private; 1151 1109 int ret; 1152 1110 uint32_t mask = 0; 1111 + 1112 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1113 + return -EINVAL; 1153 1114 1154 1115 ret = amdgpu_read_mask(buf, count, &mask); 1155 1116 if (ret) ··· 1177 1132 struct amdgpu_device *adev = ddev->dev_private; 1178 1133 uint32_t value = 0; 1179 1134 1135 + if (amdgpu_sriov_vf(adev)) 1136 + return 0; 1137 + 1180 1138 if (is_support_sw_smu(adev)) 1181 1139 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); 1182 1140 else if (adev->powerplay.pp_funcs->get_sclk_od) ··· 1197 1149 struct amdgpu_device *adev = ddev->dev_private; 1198 1150 int ret; 1199 1151 long int value; 1152 + 1153 + if (amdgpu_sriov_vf(adev)) 1154 + return -EINVAL; 1200 1155 1201 1156 ret = kstrtol(buf, 0, &value); 1202 1157 ··· 1234 1183 struct amdgpu_device *adev = ddev->dev_private; 1235 1184 uint32_t value = 0; 1236 1185 1186 + if (amdgpu_sriov_vf(adev)) 1187 + return 0; 1188 + 1237 1189 if (is_support_sw_smu(adev)) 1238 1190 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); 1239 1191 else if (adev->powerplay.pp_funcs->get_mclk_od) ··· 1254 1200 struct amdgpu_device *adev = ddev->dev_private; 1255 1201 int ret; 1256 1202 long int value; 1203 + 1204 + if (amdgpu_sriov_vf(adev)) 1205 + return 0; 1257 1206 1258 1207 ret = kstrtol(buf, 0, &value); 1259 1208 ··· 1310 1253 struct drm_device *ddev = dev_get_drvdata(dev); 1311 1254 struct amdgpu_device *adev = ddev->dev_private; 1312 1255 1256 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1257 + return 0; 1258 + 1313 1259 if (is_support_sw_smu(adev)) 1314 1260 return smu_get_power_profile_mode(&adev->smu, buf); 1315 1261 else if (adev->powerplay.pp_funcs->get_power_profile_mode) ··· 1344 1284 ret = kstrtol(tmp, 0, &profile_mode); 1345 1285 if (ret) 1346 1286 goto fail; 1287 + 1288 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1289 + return -EINVAL; 1347 1290 1348 1291 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1349 1292 if (count < 2 || count > 127) ··· 1394 1331 struct amdgpu_device *adev = ddev->dev_private; 1395 1332 int r, value, size = sizeof(value); 1396 1333 1334 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1335 + return 0; 1336 + 1397 1337 /* read the IP busy sensor */ 1398 1338 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1399 1339 (void *)&value, &size); ··· 1422 1356 struct drm_device *ddev = dev_get_drvdata(dev); 1423 1357 struct amdgpu_device *adev = ddev->dev_private; 1424 1358 int r, value, size = sizeof(value); 1359 + 1360 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1361 + return 0; 1425 1362 1426 1363 /* read the IP busy sensor */ 1427 1364 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, ··· 1456 1387 struct amdgpu_device *adev = ddev->dev_private; 1457 1388 uint64_t count0, count1; 1458 1389 1390 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1391 + return 0; 1392 + 1459 1393 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1460 1394 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1461 1395 count0, count1, pcie_get_mps(adev->pdev)); ··· 1480 1408 { 1481 1409 struct drm_device *ddev = dev_get_drvdata(dev); 1482 1410 struct amdgpu_device *adev = ddev->dev_private; 1411 + 1412 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1413 + return 0; 1483 1414 1484 1415 if (adev->unique_id) 1485 1416 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); ··· 1678 1603 { 1679 1604 struct amdgpu_device *adev = dev_get_drvdata(dev); 1680 1605 u32 pwm_mode = 0; 1606 + 1681 1607 if (is_support_sw_smu(adev)) { 1682 1608 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1683 1609 } else { ··· 2129 2053 int err; 2130 2054 u32 value; 2131 2055 2056 + if (amdgpu_sriov_vf(adev)) 2057 + return -EINVAL; 2058 + 2132 2059 err = kstrtou32(buf, 10, &value); 2133 2060 if (err) 2134 2061 return err; ··· 2377 2298 struct device *dev = kobj_to_dev(kobj); 2378 2299 struct amdgpu_device *adev = dev_get_drvdata(dev); 2379 2300 umode_t effective_mode = attr->mode; 2301 + 2302 + /* under multi-vf mode, the hwmon attributes are all not supported */ 2303 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 2304 + return 0; 2305 + 2306 + /* there is no fan under pp one vf mode */ 2307 + if (amdgpu_sriov_is_pp_one_vf(adev) && 2308 + (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2309 + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2310 + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2311 + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2312 + attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2313 + attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2314 + attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2315 + attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2316 + attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2317 + return 0; 2380 2318 2381 2319 /* Skip fan attributes if fan is not present */ 2382 2320 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || ··· 2824 2728 DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n", 2825 2729 enable ? "true" : "false", ret); 2826 2730 } 2827 - } 2828 - 2829 - int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) 2830 - { 2831 - int ret = 0; 2832 - 2833 - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) 2834 - return ret; 2835 - 2836 - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 2837 - if (ret) { 2838 - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 2839 - return ret; 2840 - } 2841 - 2842 - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 2843 - if (ret) { 2844 - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 2845 - return ret; 2846 - } 2847 - 2848 - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 2849 - if (ret) { 2850 - DRM_ERROR("failed to create device file for dpm state\n"); 2851 - return ret; 2852 - } 2853 - 2854 - return ret; 2855 - } 2856 - 2857 - void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) 2858 - { 2859 - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) 2860 - return; 2861 - 2862 - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 2863 - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 2864 - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 2865 2731 } 2866 2732 2867 2733 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
-51
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 379 379 } 380 380 } 381 381 } 382 - 383 - static uint32_t parse_clk(char *buf, bool min) 384 - { 385 - char *ptr = buf; 386 - uint32_t clk = 0; 387 - 388 - do { 389 - ptr = strchr(ptr, ':'); 390 - if (!ptr) 391 - break; 392 - ptr+=2; 393 - if (kstrtou32(ptr, 10, &clk)) 394 - return 0; 395 - } while (!min); 396 - 397 - return clk * 100; 398 - } 399 - 400 - uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) 401 - { 402 - char *buf = NULL; 403 - uint32_t clk = 0; 404 - 405 - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 406 - if (!buf) 407 - return -ENOMEM; 408 - 409 - adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); 410 - clk = parse_clk(buf, lowest); 411 - 412 - kfree(buf); 413 - 414 - return clk; 415 - } 416 - 417 - uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) 418 - { 419 - char *buf = NULL; 420 - uint32_t clk = 0; 421 - 422 - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 423 - if (!buf) 424 - return -ENOMEM; 425 - 426 - adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); 427 - clk = parse_clk(buf, lowest); 428 - 429 - kfree(buf); 430 - 431 - return clk; 432 - }
+4 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
··· 57 57 int (*reset_gpu)(struct amdgpu_device *adev); 58 58 int (*wait_reset)(struct amdgpu_device *adev); 59 59 void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); 60 - int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); 61 - int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); 62 60 }; 63 61 64 62 /* ··· 83 85 AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, 84 86 /* VRAM LOST by GIM */ 85 87 AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, 86 - /* HW PERF SIM in GIM */ 87 - AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), 88 + /* PP ONE VF MODE in GIM */ 89 + AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), 88 90 }; 89 91 90 92 struct amd_sriov_msg_pf2vf_info_header { ··· 255 257 struct amdgpu_vf_error_buffer vf_errors; 256 258 struct amdgpu_virt_fw_reserve fw_reserve; 257 259 uint32_t gim_feature; 258 - /* protect DPM events to GIM */ 259 - struct mutex dpm_mutex; 260 260 uint32_t reg_access_mode; 261 261 }; 262 262 ··· 282 286 #endif 283 287 } 284 288 285 - #define amdgim_is_hwperf(adev) \ 286 - ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) 289 + #define amdgpu_sriov_is_pp_one_vf(adev) \ 290 + ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) 287 291 288 292 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 289 293 void amdgpu_virt_init_setting(struct amdgpu_device *adev); ··· 302 306 unsigned int key, 303 307 unsigned int chksum); 304 308 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); 305 - uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); 306 - uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); 307 309 #endif
-78
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 158 158 xgpu_ai_mailbox_set_valid(adev, false); 159 159 } 160 160 161 - static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) 162 - { 163 - int r = 0; 164 - u32 req, val, size; 165 - 166 - if (!amdgim_is_hwperf(adev) || buf == NULL) 167 - return -EBADRQC; 168 - 169 - switch(type) { 170 - case PP_SCLK: 171 - req = IDH_IRQ_GET_PP_SCLK; 172 - break; 173 - case PP_MCLK: 174 - req = IDH_IRQ_GET_PP_MCLK; 175 - break; 176 - default: 177 - return -EBADRQC; 178 - } 179 - 180 - mutex_lock(&adev->virt.dpm_mutex); 181 - 182 - xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); 183 - 184 - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); 185 - if (!r && adev->fw_vram_usage.va != NULL) { 186 - val = RREG32_NO_KIQ( 187 - SOC15_REG_OFFSET(NBIO, 0, 188 - mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); 189 - size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + 190 - val), PAGE_SIZE); 191 - 192 - if (size < PAGE_SIZE) 193 - strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); 194 - else 195 - size = 0; 196 - 197 - r = size; 198 - goto out; 199 - } 200 - 201 - r = xgpu_ai_poll_msg(adev, IDH_FAIL); 202 - if(r) 203 - pr_info("%s DPM request failed", 204 - (type == PP_SCLK)? "SCLK" : "MCLK"); 205 - 206 - out: 207 - mutex_unlock(&adev->virt.dpm_mutex); 208 - return r; 209 - } 210 - 211 - static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) 212 - { 213 - int r = 0; 214 - u32 req = IDH_IRQ_FORCE_DPM_LEVEL; 215 - 216 - if (!amdgim_is_hwperf(adev)) 217 - return -EBADRQC; 218 - 219 - mutex_lock(&adev->virt.dpm_mutex); 220 - xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); 221 - 222 - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); 223 - if (!r) 224 - goto out; 225 - 226 - r = xgpu_ai_poll_msg(adev, IDH_FAIL); 227 - if (!r) 228 - pr_info("DPM request failed"); 229 - else 230 - pr_info("Mailbox is broken"); 231 - 232 - out: 233 - mutex_unlock(&adev->virt.dpm_mutex); 234 - return r; 235 - } 236 - 237 161 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, 238 162 enum idh_request req) 239 163 { ··· 379 455 .reset_gpu = xgpu_ai_request_reset, 380 456 .wait_reset = NULL, 381 457 .trans_msg = xgpu_ai_mailbox_trans_msg, 382 - .get_pp_clk = xgpu_ai_get_pp_clk, 383 - .force_dpm_level = xgpu_ai_force_dpm_level, 384 458 };
-4
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
··· 35 35 IDH_REL_GPU_FINI_ACCESS, 36 36 IDH_REQ_GPU_RESET_ACCESS, 37 37 38 - IDH_IRQ_FORCE_DPM_LEVEL = 10, 39 - IDH_IRQ_GET_PP_SCLK, 40 - IDH_IRQ_GET_PP_MCLK, 41 - 42 38 IDH_LOG_VF_ERROR = 200, 43 39 }; 44 40
+4 -4
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 775 775 } 776 776 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 777 777 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 778 - if (!amdgpu_sriov_vf(adev)) { 779 - if (is_support_sw_smu(adev)) 778 + if (is_support_sw_smu(adev)) { 779 + if (!amdgpu_sriov_vf(adev)) 780 780 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 781 - else 782 - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 781 + } else { 782 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 783 783 } 784 784 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 785 785 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+3 -1
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
··· 48 48 49 49 hwmgr->adev = adev; 50 50 hwmgr->not_vf = !amdgpu_sriov_vf(adev); 51 - hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false; 52 51 hwmgr->device = amdgpu_cgs_create_device(adev); 53 52 mutex_init(&hwmgr->smu_lock); 54 53 hwmgr->chip_family = adev->family; ··· 274 275 static int pp_dpm_load_fw(void *handle) 275 276 { 276 277 struct pp_hwmgr *hwmgr = handle; 278 + 279 + if (!hwmgr->not_vf) 280 + return 0; 277 281 278 282 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) 279 283 return -EINVAL;
+13 -2
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
··· 81 81 adev = hwmgr->adev; 82 82 83 83 /* Skip for suspend/resume case */ 84 - if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) 85 - && adev->in_suspend) { 84 + if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr) 85 + && !amdgpu_passthrough(adev) && adev->in_suspend) { 86 86 pr_info("dpm has been enabled\n"); 87 87 return 0; 88 88 } ··· 200 200 { 201 201 PHM_FUNC_CHECK(hwmgr); 202 202 203 + if (!hwmgr->not_vf) 204 + return 0; 205 + 203 206 if (hwmgr->hwmgr_func->stop_thermal_controller == NULL) 204 207 return -EINVAL; 205 208 ··· 240 237 TEMP_RANGE_MAX}; 241 238 struct amdgpu_device *adev = hwmgr->adev; 242 239 240 + if (!hwmgr->not_vf) 241 + return 0; 242 + 243 243 if (hwmgr->hwmgr_func->get_thermal_temperature_range) 244 244 hwmgr->hwmgr_func->get_thermal_temperature_range( 245 245 hwmgr, &range); ··· 269 263 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) 270 264 { 271 265 PHM_FUNC_CHECK(hwmgr); 266 + if (hwmgr->pp_one_vf) 267 + return false; 272 268 273 269 if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) 274 270 return false; ··· 489 481 int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr) 490 482 { 491 483 PHM_FUNC_CHECK(hwmgr); 484 + 485 + if (!hwmgr->not_vf) 486 + return 0; 492 487 493 488 if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL) 494 489 return -EINVAL;
+16
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
··· 221 221 { 222 222 int ret = 0; 223 223 224 + hwmgr->pp_one_vf = amdgpu_sriov_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev); 225 + hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf)) 226 + ? true : false; 224 227 if (!hwmgr->pm_en) 225 228 return 0; 226 229 ··· 282 279 283 280 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) 284 281 { 282 + if (!hwmgr->not_vf) 283 + return 0; 284 + 285 285 if (!hwmgr || !hwmgr->pm_en) 286 286 return 0; 287 287 ··· 305 299 { 306 300 int ret = 0; 307 301 302 + if (!hwmgr->not_vf) 303 + return 0; 304 + 308 305 if (!hwmgr || !hwmgr->pm_en) 309 306 return 0; 310 307 ··· 326 317 int hwmgr_resume(struct pp_hwmgr *hwmgr) 327 318 { 328 319 int ret = 0; 320 + 321 + if (!hwmgr->not_vf) 322 + return 0; 329 323 330 324 if (!hwmgr) 331 325 return -EINVAL; ··· 377 365 378 366 switch (task_id) { 379 367 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 368 + if (!hwmgr->not_vf) 369 + return ret; 380 370 ret = phm_pre_display_configuration_changed(hwmgr); 381 371 if (ret) 382 372 return ret; ··· 395 381 enum PP_StateUILabel requested_ui_label; 396 382 struct pp_power_state *requested_ps = NULL; 397 383 384 + if (!hwmgr->not_vf) 385 + return ret; 398 386 if (user_state == NULL) { 399 387 ret = -EINVAL; 400 388 break;
+14 -12
drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
··· 262 262 uint32_t index; 263 263 long workload; 264 264 265 - if (!skip_display_settings) 266 - phm_display_configuration_changed(hwmgr); 265 + if (hwmgr->not_vf) { 266 + if (!skip_display_settings) 267 + phm_display_configuration_changed(hwmgr); 267 268 268 - if (hwmgr->ps) 269 - power_state_management(hwmgr, new_ps); 270 - else 271 - /* 272 - * for vega12/vega20 which does not support power state manager 273 - * DAL clock limits should also be honoured 274 - */ 275 - phm_apply_clock_adjust_rules(hwmgr); 269 + if (hwmgr->ps) 270 + power_state_management(hwmgr, new_ps); 271 + else 272 + /* 273 + * for vega12/vega20 which does not support power state manager 274 + * DAL clock limits should also be honoured 275 + */ 276 + phm_apply_clock_adjust_rules(hwmgr); 276 277 277 - if (!skip_display_settings) 278 - phm_notify_smc_display_config_after_ps_adjustment(hwmgr); 278 + if (!skip_display_settings) 279 + phm_notify_smc_display_config_after_ps_adjustment(hwmgr); 280 + } 279 281 280 282 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) 281 283 hwmgr->dpm_level = hwmgr->request_dpm_level;
+110 -48
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 912 912 hwmgr->platform_descriptor.clockStep.memoryClock = 500; 913 913 914 914 data->total_active_cus = adev->gfx.cu_info.number; 915 + if (!hwmgr->not_vf) 916 + return result; 917 + 915 918 /* Setup default Overdrive Fan control settings */ 916 919 data->odn_fan_table.target_fan_speed = 917 920 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; ··· 982 979 983 980 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) 984 981 { 982 + if (!hwmgr->not_vf) 983 + return 0; 984 + 985 985 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), 986 986 "Failed to init sclk threshold!", 987 987 return -EINVAL); ··· 2509 2503 "Failed to setup default DPM tables!", 2510 2504 return result); 2511 2505 2506 + if (!hwmgr->not_vf) 2507 + return 0; 2508 + 2512 2509 /* initialize ODN table */ 2513 2510 if (hwmgr->od_enabled) { 2514 2511 if (odn_table->max_vddc) { ··· 2835 2826 struct vega10_hwmgr *data = hwmgr->backend; 2836 2827 uint32_t i, feature_mask = 0; 2837 2828 2829 + if (!hwmgr->not_vf) 2830 + return 0; 2838 2831 2839 2832 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ 2840 2833 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, ··· 2943 2932 struct vega10_hwmgr *data = hwmgr->backend; 2944 2933 int tmp_result, result = 0; 2945 2934 2946 - vega10_enable_disable_PCC_limit_feature(hwmgr, true); 2935 + if (hwmgr->not_vf) { 2936 + vega10_enable_disable_PCC_limit_feature(hwmgr, true); 2947 2937 2948 - smum_send_msg_to_smc_with_parameter(hwmgr, 2949 - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); 2938 + smum_send_msg_to_smc_with_parameter(hwmgr, 2939 + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); 2950 2940 2951 - tmp_result = vega10_construct_voltage_tables(hwmgr); 2952 - PP_ASSERT_WITH_CODE(!tmp_result, 2953 - "Failed to construct voltage tables!", 2954 - result = tmp_result); 2955 - 2956 - tmp_result = vega10_init_smc_table(hwmgr); 2957 - PP_ASSERT_WITH_CODE(!tmp_result, 2958 - "Failed to initialize SMC table!", 2959 - result = tmp_result); 2960 - 2961 - if (PP_CAP(PHM_PlatformCaps_ThermalController)) { 2962 - tmp_result = vega10_enable_thermal_protection(hwmgr); 2941 + tmp_result = vega10_construct_voltage_tables(hwmgr); 2963 2942 PP_ASSERT_WITH_CODE(!tmp_result, 2964 - "Failed to enable thermal protection!", 2965 - result = tmp_result); 2943 + "Failed to construct voltage tables!", 2944 + result = tmp_result); 2966 2945 } 2967 2946 2968 - tmp_result = vega10_enable_vrhot_feature(hwmgr); 2969 - PP_ASSERT_WITH_CODE(!tmp_result, 2970 - "Failed to enable VR hot feature!", 2971 - result = tmp_result); 2947 + if (hwmgr->not_vf || hwmgr->pp_one_vf) { 2948 + tmp_result = vega10_init_smc_table(hwmgr); 2949 + PP_ASSERT_WITH_CODE(!tmp_result, 2950 + "Failed to initialize SMC table!", 2951 + result = tmp_result); 2952 + } 2972 2953 2973 - tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); 2974 - PP_ASSERT_WITH_CODE(!tmp_result, 2975 - "Failed to enable deep sleep master switch!", 2976 - result = tmp_result); 2954 + if (hwmgr->not_vf) { 2955 + if (PP_CAP(PHM_PlatformCaps_ThermalController)) { 2956 + tmp_result = vega10_enable_thermal_protection(hwmgr); 2957 + PP_ASSERT_WITH_CODE(!tmp_result, 2958 + "Failed to enable thermal protection!", 2959 + result = tmp_result); 2960 + } 2977 2961 2978 - tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); 2979 - PP_ASSERT_WITH_CODE(!tmp_result, 2980 - "Failed to start DPM!", result = tmp_result); 2962 + tmp_result = vega10_enable_vrhot_feature(hwmgr); 2963 + PP_ASSERT_WITH_CODE(!tmp_result, 2964 + "Failed to enable VR hot feature!", 2965 + result = tmp_result); 2981 2966 2982 - /* enable didt, do not abort if failed didt */ 2983 - tmp_result = vega10_enable_didt_config(hwmgr); 2984 - PP_ASSERT(!tmp_result, 2985 - "Failed to enable didt config!"); 2967 + tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); 2968 + PP_ASSERT_WITH_CODE(!tmp_result, 2969 + "Failed to enable deep sleep master switch!", 2970 + result = tmp_result); 2971 + } 2972 + 2973 + if (hwmgr->not_vf) { 2974 + tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); 2975 + PP_ASSERT_WITH_CODE(!tmp_result, 2976 + "Failed to start DPM!", result = tmp_result); 2977 + } 2978 + 2979 + if (hwmgr->not_vf) { 2980 + /* enable didt, do not abort if failed didt */ 2981 + tmp_result = vega10_enable_didt_config(hwmgr); 2982 + PP_ASSERT(!tmp_result, 2983 + "Failed to enable didt config!"); 2984 + } 2986 2985 2987 2986 tmp_result = vega10_enable_power_containment(hwmgr); 2988 2987 PP_ASSERT_WITH_CODE(!tmp_result, 2989 - "Failed to enable power containment!", 2990 - result = tmp_result); 2988 + "Failed to enable power containment!", 2989 + result = tmp_result); 2991 2990 2992 - tmp_result = vega10_power_control_set_level(hwmgr); 2993 - PP_ASSERT_WITH_CODE(!tmp_result, 2994 - "Failed to power control set level!", 2995 - result = tmp_result); 2991 + if (hwmgr->not_vf) { 2992 + tmp_result = vega10_power_control_set_level(hwmgr); 2993 + PP_ASSERT_WITH_CODE(!tmp_result, 2994 + "Failed to power control set level!", 2995 + result = tmp_result); 2996 2996 2997 - tmp_result = vega10_enable_ulv(hwmgr); 2998 - PP_ASSERT_WITH_CODE(!tmp_result, 2999 - "Failed to enable ULV!", 3000 - result = tmp_result); 2997 + tmp_result = vega10_enable_ulv(hwmgr); 2998 + PP_ASSERT_WITH_CODE(!tmp_result, 2999 + "Failed to enable ULV!", 3000 + result = tmp_result); 3001 + } 3001 3002 3002 3003 return result; 3003 3004 } ··· 3103 3080 performance_level->soc_clock = socclk_dep_table->entries 3104 3081 [state_entry->ucSocClockIndexHigh].ulClk; 3105 3082 if (gfxclk_dep_table->ucRevId == 0) { 3106 - performance_level->gfx_clock = gfxclk_dep_table->entries 3107 - [state_entry->ucGfxClockIndexHigh].ulClk; 3083 + /* under vega10 pp one vf mode, the gfx clk dpm need be lower 3084 + * to level-4 due to the limited 110w-power 3085 + */ 3086 + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) 3087 + performance_level->gfx_clock = 3088 + gfxclk_dep_table->entries[4].ulClk; 3089 + else 3090 + performance_level->gfx_clock = gfxclk_dep_table->entries 3091 + [state_entry->ucGfxClockIndexHigh].ulClk; 3108 3092 } else if (gfxclk_dep_table->ucRevId == 1) { 3109 3093 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; 3110 - performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; 3094 + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) 3095 + performance_level->gfx_clock = patom_record_V2[4].ulClk; 3096 + else 3097 + performance_level->gfx_clock = 3098 + patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; 3111 3099 } 3112 3100 3113 3101 performance_level->mem_clock = mclk_dep_table->entries ··· 3529 3495 smum_send_msg_to_smc_with_parameter(hwmgr, 3530 3496 PPSMC_MSG_SetSoftMinGfxclkByIndex, 3531 3497 data->smc_state_table.gfx_boot_level); 3498 + 3532 3499 data->dpm_table.gfx_table.dpm_state.soft_min_level = 3533 3500 data->smc_state_table.gfx_boot_level; 3534 3501 } ··· 3552 3517 data->smc_state_table.mem_boot_level; 3553 3518 } 3554 3519 } 3520 + 3521 + if (!hwmgr->not_vf) 3522 + return 0; 3555 3523 3556 3524 if (!data->registry_data.socclk_dpm_key_disabled) { 3557 3525 if (data->smc_state_table.soc_boot_level != ··· 3597 3559 data->smc_state_table.mem_max_level; 3598 3560 } 3599 3561 } 3562 + 3563 + if (!hwmgr->not_vf) 3564 + return 0; 3600 3565 3601 3566 if (!data->registry_data.socclk_dpm_key_disabled) { 3602 3567 if (data->smc_state_table.soc_max_level != ··· 4095 4054 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { 4096 4055 *mclk_mask = 0; 4097 4056 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { 4098 - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 4057 + /* under vega10 pp one vf mode, the gfx clk dpm need be lower 4058 + * to level-4 due to the limited power 4059 + */ 4060 + if (hwmgr->pp_one_vf) 4061 + *sclk_mask = 4; 4062 + else 4063 + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; 4099 4064 *soc_mask = table_info->vdd_dep_on_socclk->count - 1; 4100 4065 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; 4101 4066 } 4067 + 4102 4068 return 0; 4103 4069 } 4104 4070 4105 4071 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) 4106 4072 { 4073 + if (!hwmgr->not_vf) 4074 + return; 4075 + 4107 4076 switch (mode) { 4108 4077 case AMD_FAN_CTRL_NONE: 4109 4078 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); ··· 4226 4175 default: 4227 4176 break; 4228 4177 } 4178 + 4179 + if (!hwmgr->not_vf) 4180 + return ret; 4229 4181 4230 4182 if (!ret) { 4231 4183 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ··· 4533 4479 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); 4534 4480 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; 4535 4481 4536 - int i, now, size = 0; 4482 + int i, now, size = 0, count = 0; 4537 4483 4538 4484 switch (type) { 4539 4485 case PP_SCLK: ··· 4543 4489 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); 4544 4490 now = smum_get_argument(hwmgr); 4545 4491 4546 - for (i = 0; i < sclk_table->count; i++) 4492 + if (hwmgr->pp_one_vf && 4493 + (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) 4494 + count = 5; 4495 + else 4496 + count = sclk_table->count; 4497 + for (i = 0; i < count; i++) 4547 4498 size += sprintf(buf + size, "%d: %uMhz %s\n", 4548 4499 i, sclk_table->dpm_levels[i].value / 100, 4549 4500 (i == now) ? "*" : ""); ··· 4758 4699 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 4759 4700 { 4760 4701 int tmp_result, result = 0; 4702 + 4703 + if (!hwmgr->not_vf) 4704 + return 0; 4761 4705 4762 4706 if (PP_CAP(PHM_PlatformCaps_ThermalController)) 4763 4707 vega10_disable_thermal_protection(hwmgr);
+3
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
··· 1343 1343 hwmgr->default_power_limit = hwmgr->power_limit = 1344 1344 (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); 1345 1345 1346 + if (!hwmgr->not_vf) 1347 + return 0; 1348 + 1346 1349 if (PP_CAP(PHM_PlatformCaps_PowerContainment)) { 1347 1350 if (data->smu_features[GNLD_PPT].supported) 1348 1351 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+1
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
··· 741 741 uint32_t smu_version; 742 742 bool not_vf; 743 743 bool pm_en; 744 + bool pp_one_vf; 744 745 struct mutex smu_lock; 745 746 746 747 uint32_t pp_table_version;
+44 -12
drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
··· 61 61 uint32_t reg; 62 62 uint32_t ret; 63 63 64 - reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 64 + /* Due to the L1 policy problem under SRIOV, we have to use 65 + * mmMP1_SMN_C2PMSG_103 as the driver response register 66 + */ 67 + if (hwmgr->pp_one_vf) { 68 + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103); 65 69 66 - ret = phm_wait_for_register_unequal(hwmgr, reg, 67 - 0, MP1_C2PMSG_90__CONTENT_MASK); 70 + ret = phm_wait_for_register_unequal(hwmgr, reg, 71 + 0, MP1_C2PMSG_103__CONTENT_MASK); 68 72 69 - if (ret) 70 - pr_err("No response from smu\n"); 73 + if (ret) 74 + pr_err("No response from smu\n"); 71 75 72 - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 76 + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103); 77 + } else { 78 + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 79 + 80 + ret = phm_wait_for_register_unequal(hwmgr, reg, 81 + 0, MP1_C2PMSG_90__CONTENT_MASK); 82 + 83 + if (ret) 84 + pr_err("No response from smu\n"); 85 + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); 86 + } 73 87 } 74 88 75 89 /* ··· 97 83 { 98 84 struct amdgpu_device *adev = hwmgr->adev; 99 85 100 - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 86 + if (hwmgr->pp_one_vf) { 87 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg); 88 + } else { 89 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); 90 + } 101 91 102 92 return 0; 103 93 } ··· 119 101 120 102 smu9_wait_for_response(hwmgr); 121 103 122 - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 104 + if (hwmgr->pp_one_vf) 105 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); 106 + else 107 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 123 108 124 109 smu9_send_msg_to_smc_without_waiting(hwmgr, msg); 125 110 ··· 148 127 149 128 smu9_wait_for_response(hwmgr); 150 129 151 - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 152 - 153 - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); 130 + /* Due to the L1 policy problem under SRIOV, we have to use 131 + * mmMP1_SMN_C2PMSG_101 as the driver message register and 132 + * mmMP1_SMN_C2PMSG_102 as the driver parameter register. 133 + */ 134 + if (hwmgr->pp_one_vf) { 135 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); 136 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter); 137 + } else { 138 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); 139 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); 140 + } 154 141 155 142 smu9_send_msg_to_smc_without_waiting(hwmgr, msg); 156 143 ··· 173 144 { 174 145 struct amdgpu_device *adev = hwmgr->adev; 175 146 176 - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 147 + if (hwmgr->pp_one_vf) 148 + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102); 149 + else 150 + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); 177 151 }
+14
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
··· 71 71 { 72 72 struct vega10_smumgr *priv = hwmgr->smu_backend; 73 73 74 + /* under sriov, vbios or hypervisor driver 75 + * has already copy table to smc so here only skip it 76 + */ 77 + if (!hwmgr->not_vf) 78 + return 0; 79 + 74 80 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 75 81 "Invalid SMU Table ID!", return -EINVAL); 76 82 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, ··· 105 99 { 106 100 int msg = enable ? PPSMC_MSG_EnableSmuFeatures : 107 101 PPSMC_MSG_DisableSmuFeatures; 102 + 103 + /* VF has no permission to change smu feature due 104 + * to security concern even under pp one vf mode 105 + * it still can't do it. For vega10, the smu in 106 + * vbios will enable the appropriate features. 107 + * */ 108 + if (!hwmgr->not_vf) 109 + return 0; 108 110 109 111 return smum_send_msg_to_smc_with_parameter(hwmgr, 110 112 msg, feature_mask);