Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu/pm: properly handle runtime pm

If power management sysfs or debugfs files are accessed,
power up the GPU when necessary.

Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

+618 -212
+618 -212
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 37 37 #include <linux/hwmon.h> 38 38 #include <linux/hwmon-sysfs.h> 39 39 #include <linux/nospec.h> 40 + #include <linux/pm_runtime.h> 40 41 #include "hwmgr.h" 41 42 #define WIDTH_4K 3840 42 43 ··· 159 158 struct drm_device *ddev = dev_get_drvdata(dev); 160 159 struct amdgpu_device *adev = ddev->dev_private; 161 160 enum amd_pm_state_type pm; 161 + int ret; 162 162 163 163 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 164 164 return 0; 165 + 166 + ret = pm_runtime_get_sync(ddev->dev); 167 + if (ret < 0) 168 + return ret; 165 169 166 170 if (is_support_sw_smu(adev)) { 167 171 if (adev->smu.ppt_funcs->get_current_power_state) ··· 178 172 } else { 179 173 pm = adev->pm.dpm.user_state; 180 174 } 175 + 176 + pm_runtime_mark_last_busy(ddev->dev); 177 + pm_runtime_put_autosuspend(ddev->dev); 181 178 182 179 return snprintf(buf, PAGE_SIZE, "%s\n", 183 180 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : ··· 195 186 struct drm_device *ddev = dev_get_drvdata(dev); 196 187 struct amdgpu_device *adev = ddev->dev_private; 197 188 enum amd_pm_state_type state; 189 + int ret; 198 190 199 191 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 200 192 return -EINVAL; ··· 211 201 goto fail; 212 202 } 213 203 204 + ret = pm_runtime_get_sync(ddev->dev); 205 + if (ret < 0) 206 + return ret; 207 + 214 208 if (is_support_sw_smu(adev)) { 215 209 mutex_lock(&adev->pm.mutex); 216 210 adev->pm.dpm.user_state = state; ··· 226 212 adev->pm.dpm.user_state = state; 227 213 mutex_unlock(&adev->pm.mutex); 228 214 229 - /* Can't set dpm state when the card is off */ 230 - if (!(adev->flags & AMD_IS_PX) || 231 - (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 232 - amdgpu_pm_compute_clocks(adev); 215 + amdgpu_pm_compute_clocks(adev); 233 216 } 217 + pm_runtime_mark_last_busy(ddev->dev); 218 + pm_runtime_put_autosuspend(ddev->dev); 219 + 220 + 234 221 fail: 235 222 return count; 236 223 } ··· 303 288 struct drm_device *ddev = dev_get_drvdata(dev); 304 289 struct amdgpu_device *adev = ddev->dev_private; 305 290 enum amd_dpm_forced_level level = 0xff; 291 + int ret; 306 292 307 293 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 308 294 return 0; 309 295 310 - if ((adev->flags & AMD_IS_PX) && 311 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 312 - return snprintf(buf, PAGE_SIZE, "off\n"); 296 + ret = pm_runtime_get_sync(ddev->dev); 297 + if (ret < 0) 298 + return ret; 313 299 314 300 if (is_support_sw_smu(adev)) 315 301 level = smu_get_performance_level(&adev->smu); ··· 318 302 level = amdgpu_dpm_get_performance_level(adev); 319 303 else 320 304 level = adev->pm.dpm.forced_level; 305 + 306 + pm_runtime_mark_last_busy(ddev->dev); 307 + pm_runtime_put_autosuspend(ddev->dev); 321 308 322 309 return snprintf(buf, PAGE_SIZE, "%s\n", 323 310 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : ··· 348 329 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 349 330 return -EINVAL; 350 331 351 - /* Can't force performance level when the card is off */ 352 - if ((adev->flags & AMD_IS_PX) && 353 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 354 - return -EINVAL; 355 - 356 332 if (strncmp("low", buf, strlen("low")) == 0) { 357 333 level = AMD_DPM_FORCED_LEVEL_LOW; 358 334 } else if (strncmp("high", buf, strlen("high")) == 0) { ··· 367 353 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 368 354 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 369 355 } else { 370 - count = -EINVAL; 371 - goto fail; 356 + return -EINVAL; 372 357 } 358 + 359 + ret = pm_runtime_get_sync(ddev->dev); 360 + if (ret < 0) 361 + return ret; 373 362 374 363 if (is_support_sw_smu(adev)) 375 364 current_level = smu_get_performance_level(&adev->smu); 376 365 else if (adev->powerplay.pp_funcs->get_performance_level) 377 366 current_level = amdgpu_dpm_get_performance_level(adev); 378 367 379 - if (current_level == level) 368 + if (current_level == level) { 369 + pm_runtime_mark_last_busy(ddev->dev); 370 + pm_runtime_put_autosuspend(ddev->dev); 380 371 return count; 372 + } 381 373 382 374 /* profile_exit setting is valid only when current mode is in profile mode */ 383 375 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | ··· 392 372 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 393 373 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 394 374 pr_err("Currently not in any profile mode!\n"); 375 + pm_runtime_mark_last_busy(ddev->dev); 376 + pm_runtime_put_autosuspend(ddev->dev); 395 377 return -EINVAL; 396 378 } 397 379 ··· 404 382 } else if (adev->powerplay.pp_funcs->force_performance_level) { 405 383 mutex_lock(&adev->pm.mutex); 406 384 if (adev->pm.dpm.thermal_active) { 407 - count = -EINVAL; 408 385 mutex_unlock(&adev->pm.mutex); 409 - goto fail; 386 + pm_runtime_mark_last_busy(ddev->dev); 387 + pm_runtime_put_autosuspend(ddev->dev); 388 + return -EINVAL; 410 389 } 411 390 ret = amdgpu_dpm_force_performance_level(adev, level); 412 391 if (ret) ··· 416 393 adev->pm.dpm.forced_level = level; 417 394 mutex_unlock(&adev->pm.mutex); 418 395 } 396 + pm_runtime_mark_last_busy(ddev->dev); 397 + pm_runtime_put_autosuspend(ddev->dev); 419 398 420 - fail: 421 399 return count; 422 400 } 423 401 ··· 431 407 struct pp_states_info data; 432 408 int i, buf_len, ret; 433 409 410 + ret = pm_runtime_get_sync(ddev->dev); 411 + if (ret < 0) 412 + return ret; 413 + 434 414 if (is_support_sw_smu(adev)) { 435 415 ret = smu_get_power_num_states(&adev->smu, &data); 436 416 if (ret) 437 417 return ret; 438 418 } else if (adev->powerplay.pp_funcs->get_pp_num_states) 439 419 amdgpu_dpm_get_pp_num_states(adev, &data); 420 + 421 + pm_runtime_mark_last_busy(ddev->dev); 422 + pm_runtime_put_autosuspend(ddev->dev); 440 423 441 424 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 442 425 for (i = 0; i < data.nums; i++) ··· 470 439 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 471 440 return 0; 472 441 442 + ret = pm_runtime_get_sync(ddev->dev); 443 + if (ret < 0) 444 + return ret; 445 + 473 446 if (is_support_sw_smu(adev)) { 474 447 pm = smu_get_current_power_state(smu); 475 448 ret = smu_get_power_num_states(smu, &data); ··· 484 449 pm = amdgpu_dpm_get_current_power_state(adev); 485 450 amdgpu_dpm_get_pp_num_states(adev, &data); 486 451 } 452 + 453 + pm_runtime_mark_last_busy(ddev->dev); 454 + pm_runtime_put_autosuspend(ddev->dev); 487 455 488 456 for (i = 0; i < data.nums; i++) { 489 457 if (pm == data.states[i]) ··· 538 500 struct pp_states_info data; 539 501 540 502 ret = kstrtoul(buf, 0, &idx); 541 - if (ret || idx >= ARRAY_SIZE(data.states)) { 542 - count = -EINVAL; 543 - goto fail; 544 - } 503 + if (ret || idx >= ARRAY_SIZE(data.states)) 504 + return -EINVAL; 505 + 545 506 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 546 507 547 508 amdgpu_dpm_get_pp_num_states(adev, &data); 548 509 state = data.states[idx]; 510 + 511 + ret = pm_runtime_get_sync(ddev->dev); 512 + if (ret < 0) 513 + return ret; 514 + 549 515 /* only set user selected power states */ 550 516 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 551 517 state != POWER_STATE_TYPE_DEFAULT) { ··· 557 515 AMD_PP_TASK_ENABLE_USER_STATE, &state); 558 516 adev->pp_force_state_enabled = true; 559 517 } 518 + pm_runtime_mark_last_busy(ddev->dev); 519 + pm_runtime_put_autosuspend(ddev->dev); 560 520 } 561 - fail: 521 + 562 522 return count; 563 523 } 564 524 ··· 582 538 struct drm_device *ddev = dev_get_drvdata(dev); 583 539 struct amdgpu_device *adev = ddev->dev_private; 584 540 char *table = NULL; 585 - int size; 541 + int size, ret; 586 542 587 543 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 588 544 return 0; 589 545 546 + ret = pm_runtime_get_sync(ddev->dev); 547 + if (ret < 0) 548 + return ret; 549 + 590 550 if (is_support_sw_smu(adev)) { 591 551 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 552 + pm_runtime_mark_last_busy(ddev->dev); 553 + pm_runtime_put_autosuspend(ddev->dev); 592 554 if (size < 0) 593 555 return size; 594 - } 595 - else if (adev->powerplay.pp_funcs->get_pp_table) 556 + } else if (adev->powerplay.pp_funcs->get_pp_table) { 596 557 size = amdgpu_dpm_get_pp_table(adev, &table); 597 - else 558 + pm_runtime_mark_last_busy(ddev->dev); 559 + pm_runtime_put_autosuspend(ddev->dev); 560 + if (size < 0) 561 + return size; 562 + } else { 563 + pm_runtime_mark_last_busy(ddev->dev); 564 + pm_runtime_put_autosuspend(ddev->dev); 598 565 return 0; 566 + } 599 567 600 568 if (size >= PAGE_SIZE) 601 569 size = PAGE_SIZE - 1; ··· 629 573 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 630 574 return -EINVAL; 631 575 576 + ret = pm_runtime_get_sync(ddev->dev); 577 + if (ret < 0) 578 + return ret; 579 + 632 580 if (is_support_sw_smu(adev)) { 633 581 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); 634 - if (ret) 582 + if (ret) { 583 + pm_runtime_mark_last_busy(ddev->dev); 584 + pm_runtime_put_autosuspend(ddev->dev); 635 585 return ret; 586 + } 636 587 } else if (adev->powerplay.pp_funcs->set_pp_table) 637 588 amdgpu_dpm_set_pp_table(adev, buf, count); 589 + 590 + pm_runtime_mark_last_busy(ddev->dev); 591 + pm_runtime_put_autosuspend(ddev->dev); 638 592 639 593 return count; 640 594 } ··· 769 703 tmp_str++; 770 704 } 771 705 706 + ret = pm_runtime_get_sync(ddev->dev); 707 + if (ret < 0) 708 + return ret; 709 + 772 710 if (is_support_sw_smu(adev)) { 773 711 ret = smu_od_edit_dpm_table(&adev->smu, type, 774 712 parameter, parameter_size); 775 713 776 - if (ret) 714 + if (ret) { 715 + pm_runtime_mark_last_busy(ddev->dev); 716 + pm_runtime_put_autosuspend(ddev->dev); 777 717 return -EINVAL; 718 + } 778 719 } else { 779 720 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 780 721 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 781 722 parameter, parameter_size); 782 - if (ret) 723 + if (ret) { 724 + pm_runtime_mark_last_busy(ddev->dev); 725 + pm_runtime_put_autosuspend(ddev->dev); 783 726 return -EINVAL; 727 + } 784 728 } 785 729 786 730 if (type == PP_OD_COMMIT_DPM_TABLE) { ··· 798 722 amdgpu_dpm_dispatch_task(adev, 799 723 AMD_PP_TASK_READJUST_POWER_STATE, 800 724 NULL); 725 + pm_runtime_mark_last_busy(ddev->dev); 726 + pm_runtime_put_autosuspend(ddev->dev); 801 727 return count; 802 728 } else { 729 + pm_runtime_mark_last_busy(ddev->dev); 730 + pm_runtime_put_autosuspend(ddev->dev); 803 731 return -EINVAL; 804 732 } 805 733 } 806 734 } 735 + pm_runtime_mark_last_busy(ddev->dev); 736 + pm_runtime_put_autosuspend(ddev->dev); 807 737 808 738 return count; 809 739 } ··· 820 738 { 821 739 struct drm_device *ddev = dev_get_drvdata(dev); 822 740 struct amdgpu_device *adev = ddev->dev_private; 823 - uint32_t size = 0; 741 + ssize_t size; 742 + int ret; 824 743 825 744 if (amdgpu_sriov_vf(adev)) 826 745 return 0; 746 + 747 + ret = pm_runtime_get_sync(ddev->dev); 748 + if (ret < 0) 749 + return ret; 827 750 828 751 if (is_support_sw_smu(adev)) { 829 752 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 830 753 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); 831 754 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); 832 755 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); 833 - return size; 834 756 } else if (adev->powerplay.pp_funcs->print_clock_levels) { 835 757 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 836 758 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 837 759 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 838 760 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 839 - return size; 840 761 } else { 841 - return snprintf(buf, PAGE_SIZE, "\n"); 762 + size = snprintf(buf, PAGE_SIZE, "\n"); 842 763 } 764 + pm_runtime_mark_last_busy(ddev->dev); 765 + pm_runtime_put_autosuspend(ddev->dev); 843 766 767 + return size; 844 768 } 845 769 846 770 /** ··· 884 796 885 797 pr_debug("featuremask = 0x%llx\n", featuremask); 886 798 799 + ret = pm_runtime_get_sync(ddev->dev); 800 + if (ret < 0) 801 + return ret; 802 + 887 803 if (is_support_sw_smu(adev)) { 888 804 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 889 805 if (ret) 890 - return -EINVAL; 806 + count = -EINVAL; 891 807 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 892 808 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 893 809 if (ret) 894 - return -EINVAL; 810 + count = -EINVAL; 895 811 } 812 + pm_runtime_mark_last_busy(ddev->dev); 813 + pm_runtime_put_autosuspend(ddev->dev); 896 814 897 815 return count; 898 816 } ··· 909 815 { 910 816 struct drm_device *ddev = dev_get_drvdata(dev); 911 817 struct amdgpu_device *adev = ddev->dev_private; 818 + ssize_t size; 819 + int ret; 912 820 913 821 if (amdgpu_sriov_vf(adev)) 914 822 return 0; 915 823 916 - if (is_support_sw_smu(adev)) { 917 - return smu_sys_get_pp_feature_mask(&adev->smu, buf); 918 - } else if (adev->powerplay.pp_funcs->get_ppfeature_status) 919 - return amdgpu_dpm_get_ppfeature_status(adev, buf); 824 + ret = pm_runtime_get_sync(ddev->dev); 825 + if (ret < 0) 826 + return ret; 920 827 921 - return snprintf(buf, PAGE_SIZE, "\n"); 828 + if (is_support_sw_smu(adev)) 829 + size = smu_sys_get_pp_feature_mask(&adev->smu, buf); 830 + else if (adev->powerplay.pp_funcs->get_ppfeature_status) 831 + size = amdgpu_dpm_get_ppfeature_status(adev, buf); 832 + else 833 + size = snprintf(buf, PAGE_SIZE, "\n"); 834 + 835 + pm_runtime_mark_last_busy(ddev->dev); 836 + pm_runtime_put_autosuspend(ddev->dev); 837 + 838 + return size; 922 839 } 923 840 924 841 /** ··· 968 863 { 969 864 struct drm_device *ddev = dev_get_drvdata(dev); 970 865 struct amdgpu_device *adev = ddev->dev_private; 866 + ssize_t size; 867 + int ret; 971 868 972 869 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 973 870 return 0; 974 871 872 + ret = pm_runtime_get_sync(ddev->dev); 873 + if (ret < 0) 874 + return ret; 875 + 975 876 if (is_support_sw_smu(adev)) 976 - return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 877 + size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 977 878 else if (adev->powerplay.pp_funcs->print_clock_levels) 978 - return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 879 + size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 979 880 else 980 - return snprintf(buf, PAGE_SIZE, "\n"); 881 + size = snprintf(buf, PAGE_SIZE, "\n"); 882 + 883 + pm_runtime_mark_last_busy(ddev->dev); 884 + pm_runtime_put_autosuspend(ddev->dev); 885 + 886 + return size; 981 887 } 982 888 983 889 /* ··· 1044 928 if (ret) 1045 929 return ret; 1046 930 931 + ret = pm_runtime_get_sync(ddev->dev); 932 + if (ret < 0) 933 + return ret; 934 + 1047 935 if (is_support_sw_smu(adev)) 1048 936 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); 1049 937 else if (adev->powerplay.pp_funcs->force_clock_level) 1050 938 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 939 + 940 + pm_runtime_mark_last_busy(ddev->dev); 941 + pm_runtime_put_autosuspend(ddev->dev); 1051 942 1052 943 if (ret) 1053 944 return -EINVAL; ··· 1068 945 { 1069 946 struct drm_device *ddev = dev_get_drvdata(dev); 1070 947 struct amdgpu_device *adev = ddev->dev_private; 948 + ssize_t size; 949 + int ret; 1071 950 1072 951 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1073 952 return 0; 1074 953 954 + ret = pm_runtime_get_sync(ddev->dev); 955 + if (ret < 0) 956 + return ret; 957 + 1075 958 if (is_support_sw_smu(adev)) 1076 - return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 959 + size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 1077 960 else if (adev->powerplay.pp_funcs->print_clock_levels) 1078 - return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 961 + size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 1079 962 else 1080 - return snprintf(buf, PAGE_SIZE, "\n"); 963 + size = snprintf(buf, PAGE_SIZE, "\n"); 964 + 965 + pm_runtime_mark_last_busy(ddev->dev); 966 + pm_runtime_put_autosuspend(ddev->dev); 967 + 968 + return size; 1081 969 } 1082 970 1083 971 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, ··· 1098 964 { 1099 965 struct drm_device *ddev = dev_get_drvdata(dev); 1100 966 struct amdgpu_device *adev = ddev->dev_private; 1101 - int ret; 1102 967 uint32_t mask = 0; 968 + int ret; 1103 969 1104 970 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1105 971 return -EINVAL; ··· 1108 974 if (ret) 1109 975 return ret; 1110 976 977 + ret = pm_runtime_get_sync(ddev->dev); 978 + if (ret < 0) 979 + return ret; 980 + 1111 981 if (is_support_sw_smu(adev)) 1112 982 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); 1113 983 else if (adev->powerplay.pp_funcs->force_clock_level) 1114 984 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 985 + 986 + pm_runtime_mark_last_busy(ddev->dev); 987 + pm_runtime_put_autosuspend(ddev->dev); 1115 988 1116 989 if (ret) 1117 990 return -EINVAL; ··· 1132 991 { 1133 992 struct drm_device *ddev = dev_get_drvdata(dev); 1134 993 struct amdgpu_device *adev = ddev->dev_private; 994 + ssize_t size; 995 + int ret; 1135 996 1136 997 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1137 998 return 0; 1138 999 1000 + ret = pm_runtime_get_sync(ddev->dev); 1001 + if (ret < 0) 1002 + return ret; 1003 + 1139 1004 if (is_support_sw_smu(adev)) 1140 - return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 1005 + size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 1141 1006 else if (adev->powerplay.pp_funcs->print_clock_levels) 1142 - return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 1007 + size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 1143 1008 else 1144 - return snprintf(buf, PAGE_SIZE, "\n"); 1009 + size = snprintf(buf, PAGE_SIZE, "\n"); 1010 + 1011 + pm_runtime_mark_last_busy(ddev->dev); 1012 + pm_runtime_put_autosuspend(ddev->dev); 1013 + 1014 + return size; 1145 1015 } 1146 1016 1147 1017 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, ··· 1172 1020 if (ret) 1173 1021 return ret; 1174 1022 1023 + ret = pm_runtime_get_sync(ddev->dev); 1024 + if (ret < 0) 1025 + return ret; 1026 + 1175 1027 if (is_support_sw_smu(adev)) 1176 1028 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); 1177 1029 else if (adev->powerplay.pp_funcs->force_clock_level) 1178 1030 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); 1031 + else 1032 + ret = 0; 1033 + 1034 + pm_runtime_mark_last_busy(ddev->dev); 1035 + pm_runtime_put_autosuspend(ddev->dev); 1179 1036 1180 1037 if (ret) 1181 1038 return -EINVAL; ··· 1198 1037 { 1199 1038 struct drm_device *ddev = dev_get_drvdata(dev); 1200 1039 struct amdgpu_device *adev = ddev->dev_private; 1040 + ssize_t size; 1041 + int ret; 1201 1042 1202 1043 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1203 1044 return 0; 1204 1045 1046 + ret = pm_runtime_get_sync(ddev->dev); 1047 + if (ret < 0) 1048 + return ret; 1049 + 1205 1050 if (is_support_sw_smu(adev)) 1206 - return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1051 + size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1207 1052 else if (adev->powerplay.pp_funcs->print_clock_levels) 1208 - return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1053 + size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1209 1054 else 1210 - return snprintf(buf, PAGE_SIZE, "\n"); 1055 + size = snprintf(buf, PAGE_SIZE, "\n"); 1056 + 1057 + pm_runtime_mark_last_busy(ddev->dev); 1058 + pm_runtime_put_autosuspend(ddev->dev); 1059 + 1060 + return size; 1211 1061 } 1212 1062 1213 1063 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, ··· 1238 1066 if (ret) 1239 1067 return ret; 1240 1068 1069 + ret = pm_runtime_get_sync(ddev->dev); 1070 + if (ret < 0) 1071 + return ret; 1072 + 1241 1073 if (is_support_sw_smu(adev)) 1242 1074 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); 1243 1075 else if (adev->powerplay.pp_funcs->force_clock_level) 1244 1076 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); 1077 + else 1078 + ret = 0; 1079 + 1080 + pm_runtime_mark_last_busy(ddev->dev); 1081 + pm_runtime_put_autosuspend(ddev->dev); 1245 1082 1246 1083 if (ret) 1247 1084 return -EINVAL; ··· 1264 1083 { 1265 1084 struct drm_device *ddev = dev_get_drvdata(dev); 1266 1085 struct amdgpu_device *adev = ddev->dev_private; 1086 + ssize_t size; 1087 + int ret; 1267 1088 1268 1089 if (amdgpu_sriov_vf(adev)) 1269 1090 return 0; 1270 1091 1092 + ret = pm_runtime_get_sync(ddev->dev); 1093 + if (ret < 0) 1094 + return ret; 1095 + 1271 1096 if (is_support_sw_smu(adev)) 1272 - return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1097 + size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1273 1098 else if (adev->powerplay.pp_funcs->print_clock_levels) 1274 - return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1099 + size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1275 1100 else 1276 - return snprintf(buf, PAGE_SIZE, "\n"); 1101 + size = snprintf(buf, PAGE_SIZE, "\n"); 1102 + 1103 + pm_runtime_mark_last_busy(ddev->dev); 1104 + pm_runtime_put_autosuspend(ddev->dev); 1105 + 1106 + return size; 1277 1107 } 1278 1108 1279 1109 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, ··· 1304 1112 if (ret) 1305 1113 return ret; 1306 1114 1115 + ret = pm_runtime_get_sync(ddev->dev); 1116 + if (ret < 0) 1117 + return ret; 1118 + 1307 1119 if (is_support_sw_smu(adev)) 1308 1120 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); 1309 1121 else if (adev->powerplay.pp_funcs->force_clock_level) 1310 1122 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); 1123 + else 1124 + ret = 0; 1125 + 1126 + pm_runtime_mark_last_busy(ddev->dev); 1127 + pm_runtime_put_autosuspend(ddev->dev); 1311 1128 1312 1129 if (ret) 1313 1130 return -EINVAL; ··· 1330 1129 { 1331 1130 struct drm_device *ddev = dev_get_drvdata(dev); 1332 1131 struct amdgpu_device *adev = ddev->dev_private; 1132 + ssize_t size; 1133 + int ret; 1333 1134 1334 1135 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1335 1136 return 0; 1336 1137 1138 + ret = pm_runtime_get_sync(ddev->dev); 1139 + if (ret < 0) 1140 + return ret; 1141 + 1337 1142 if (is_support_sw_smu(adev)) 1338 - return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1143 + size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1339 1144 else if (adev->powerplay.pp_funcs->print_clock_levels) 1340 - return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1145 + size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1341 1146 else 1342 - return snprintf(buf, PAGE_SIZE, "\n"); 1147 + size = snprintf(buf, PAGE_SIZE, "\n"); 1148 + 1149 + pm_runtime_mark_last_busy(ddev->dev); 1150 + pm_runtime_put_autosuspend(ddev->dev); 1151 + 1152 + return size; 1343 1153 } 1344 1154 1345 1155 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, ··· 1370 1158 if (ret) 1371 1159 return ret; 1372 1160 1161 + ret = pm_runtime_get_sync(ddev->dev); 1162 + if (ret < 0) 1163 + return ret; 1164 + 1373 1165 if (is_support_sw_smu(adev)) 1374 1166 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); 1375 1167 else if (adev->powerplay.pp_funcs->force_clock_level) 1376 1168 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 1169 + else 1170 + ret = 0; 1171 + 1172 + pm_runtime_mark_last_busy(ddev->dev); 1173 + pm_runtime_put_autosuspend(ddev->dev); 1377 1174 1378 1175 if (ret) 1379 1176 return -EINVAL; ··· 1397 1176 struct drm_device *ddev = dev_get_drvdata(dev); 1398 1177 struct amdgpu_device *adev = ddev->dev_private; 1399 1178 uint32_t value = 0; 1179 + int ret; 1400 1180 1401 1181 if (amdgpu_sriov_vf(adev)) 1402 1182 return 0; 1183 + 1184 + ret = pm_runtime_get_sync(ddev->dev); 1185 + if (ret < 0) 1186 + return ret; 1403 1187 1404 1188 if (is_support_sw_smu(adev)) 1405 1189 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); 1406 1190 else if (adev->powerplay.pp_funcs->get_sclk_od) 1407 1191 value = amdgpu_dpm_get_sclk_od(adev); 1192 + 1193 + pm_runtime_mark_last_busy(ddev->dev); 1194 + pm_runtime_put_autosuspend(ddev->dev); 1408 1195 1409 1196 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1410 1197 } ··· 1432 1203 1433 1204 ret = kstrtol(buf, 0, &value); 1434 1205 1435 - if (ret) { 1436 - count = -EINVAL; 1437 - goto fail; 1438 - } 1206 + if (ret) 1207 + return -EINVAL; 1208 + 1209 + ret = pm_runtime_get_sync(ddev->dev); 1210 + if (ret < 0) 1211 + return ret; 1439 1212 1440 1213 if (is_support_sw_smu(adev)) { 1441 1214 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); ··· 1453 1222 } 1454 1223 } 1455 1224 1456 - fail: 1225 + pm_runtime_mark_last_busy(ddev->dev); 1226 + pm_runtime_put_autosuspend(ddev->dev); 1227 + 1457 1228 return count; 1458 1229 } 1459 1230 ··· 1466 1233 struct drm_device *ddev = dev_get_drvdata(dev); 1467 1234 struct amdgpu_device *adev = ddev->dev_private; 1468 1235 uint32_t value = 0; 1236 + int ret; 1469 1237 1470 1238 if (amdgpu_sriov_vf(adev)) 1471 1239 return 0; 1240 + 1241 + ret = pm_runtime_get_sync(ddev->dev); 1242 + if (ret < 0) 1243 + return ret; 1472 1244 1473 1245 if (is_support_sw_smu(adev)) 1474 1246 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); 1475 1247 else if (adev->powerplay.pp_funcs->get_mclk_od) 1476 1248 value = amdgpu_dpm_get_mclk_od(adev); 1249 + 1250 + pm_runtime_mark_last_busy(ddev->dev); 1251 + pm_runtime_put_autosuspend(ddev->dev); 1477 1252 1478 1253 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1479 1254 } ··· 1501 1260 1502 1261 ret = kstrtol(buf, 0, &value); 1503 1262 1504 - if (ret) { 1505 - count = -EINVAL; 1506 - goto fail; 1507 - } 1263 + if (ret) 1264 + return -EINVAL; 1265 + 1266 + ret = pm_runtime_get_sync(ddev->dev); 1267 + if (ret < 0) 1268 + return ret; 1508 1269 1509 1270 if (is_support_sw_smu(adev)) { 1510 1271 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); ··· 1522 1279 } 1523 1280 } 1524 1281 1525 - fail: 1282 + pm_runtime_mark_last_busy(ddev->dev); 1283 + pm_runtime_put_autosuspend(ddev->dev); 1284 + 1526 1285 return count; 1527 1286 } 1528 1287 ··· 1554 1309 { 1555 1310 struct drm_device *ddev = dev_get_drvdata(dev); 1556 1311 struct amdgpu_device *adev = ddev->dev_private; 1312 + ssize_t size; 1313 + int ret; 1557 1314 1558 1315 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1559 1316 return 0; 1560 1317 1561 - if (is_support_sw_smu(adev)) 1562 - return smu_get_power_profile_mode(&adev->smu, buf); 1563 - else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1564 - return amdgpu_dpm_get_power_profile_mode(adev, buf); 1318 + ret = pm_runtime_get_sync(ddev->dev); 1319 + if (ret < 0) 1320 + return ret; 1565 1321 1566 - return snprintf(buf, PAGE_SIZE, "\n"); 1322 + if (is_support_sw_smu(adev)) 1323 + size = smu_get_power_profile_mode(&adev->smu, buf); 1324 + else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1325 + size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1326 + else 1327 + size = snprintf(buf, PAGE_SIZE, "\n"); 1328 + 1329 + pm_runtime_mark_last_busy(ddev->dev); 1330 + pm_runtime_put_autosuspend(ddev->dev); 1331 + 1332 + return size; 1567 1333 } 1568 1334 1569 1335 ··· 1599 1343 tmp[1] = '\0'; 1600 1344 ret = kstrtol(tmp, 0, &profile_mode); 1601 1345 if (ret) 1602 - goto fail; 1346 + return -EINVAL; 1603 1347 1604 1348 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1605 1349 return -EINVAL; ··· 1614 1358 while (tmp_str[0]) { 1615 1359 sub_str = strsep(&tmp_str, delimiter); 1616 1360 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 1617 - if (ret) { 1618 - count = -EINVAL; 1619 - goto fail; 1620 - } 1361 + if (ret) 1362 + return -EINVAL; 1621 1363 parameter_size++; 1622 1364 while (isspace(*tmp_str)) 1623 1365 tmp_str++; 1624 1366 } 1625 1367 } 1626 1368 parameter[parameter_size] = profile_mode; 1369 + 1370 + ret = pm_runtime_get_sync(ddev->dev); 1371 + if (ret < 0) 1372 + return ret; 1373 + 1627 1374 if (is_support_sw_smu(adev)) 1628 1375 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); 1629 1376 else if (adev->powerplay.pp_funcs->set_power_profile_mode) 1630 1377 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1378 + 1379 + pm_runtime_mark_last_busy(ddev->dev); 1380 + pm_runtime_put_autosuspend(ddev->dev); 1381 + 1631 1382 if (!ret) 1632 1383 return count; 1633 - fail: 1384 + 1634 1385 return -EINVAL; 1635 1386 } 1636 1387 ··· 1660 1397 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1661 1398 return 0; 1662 1399 1400 + r = pm_runtime_get_sync(ddev->dev); 1401 + if (r < 0) 1402 + return r; 1403 + 1663 1404 /* read the IP busy sensor */ 1664 1405 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1665 1406 (void *)&value, &size); 1407 + 1408 + pm_runtime_mark_last_busy(ddev->dev); 1409 + pm_runtime_put_autosuspend(ddev->dev); 1666 1410 1667 1411 if (r) 1668 1412 return r; ··· 1696 1426 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1697 1427 return 0; 1698 1428 1429 + r = pm_runtime_get_sync(ddev->dev); 1430 + if (r < 0) 1431 + return r; 1432 + 1699 1433 /* read the IP busy sensor */ 1700 1434 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1701 1435 (void *)&value, &size); 1436 + 1437 + pm_runtime_mark_last_busy(ddev->dev); 1438 + pm_runtime_put_autosuspend(ddev->dev); 1702 1439 1703 1440 if (r) 1704 1441 return r; ··· 1732 1455 struct drm_device *ddev = dev_get_drvdata(dev); 1733 1456 struct amdgpu_device *adev = ddev->dev_private; 1734 1457 uint64_t count0, count1; 1458 + int ret; 1735 1459 1736 1460 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1737 1461 return 0; 1738 1462 1463 + ret = pm_runtime_get_sync(ddev->dev); 1464 + if (ret < 0) 1465 + return ret; 1466 + 1739 1467 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1468 + 1469 + pm_runtime_mark_last_busy(ddev->dev); 1470 + pm_runtime_put_autosuspend(ddev->dev); 1471 + 1740 1472 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1741 1473 count0, count1, pcie_get_mps(adev->pdev)); 1742 1474 } ··· 1833 1547 char *buf) 1834 1548 { 1835 1549 struct amdgpu_device *adev = dev_get_drvdata(dev); 1836 - struct drm_device *ddev = adev->ddev; 1837 1550 int channel = to_sensor_dev_attr(attr)->index; 1838 1551 int r, temp = 0, size = sizeof(temp); 1839 1552 1840 - /* Can't get temperature when the card is off */ 1841 - if ((adev->flags & AMD_IS_PX) && 1842 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1843 - return -EINVAL; 1844 - 1845 1553 if (channel >= PP_TEMP_MAX) 1846 1554 return -EINVAL; 1555 + 1556 + r = pm_runtime_get_sync(adev->ddev->dev); 1557 + if (r < 0) 1558 + return r; 1847 1559 1848 1560 switch (channel) { 1849 1561 case PP_TEMP_JUNCTION: 1850 1562 /* get current junction temperature */ 1851 1563 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1852 1564 (void *)&temp, &size); 1853 - if (r) 1854 - return r; 1855 1565 break; 1856 1566 case PP_TEMP_EDGE: 1857 1567 /* get current edge temperature */ 1858 1568 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 1859 1569 (void *)&temp, &size); 1860 - if (r) 1861 - return r; 1862 1570 break; 1863 1571 case PP_TEMP_MEM: 1864 1572 /* get current memory temperature */ 1865 1573 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 1866 1574 (void *)&temp, &size); 1867 - if (r) 1868 - return r; 1575 + break; 1576 + default: 1577 + r = -EINVAL; 1869 1578 break; 1870 1579 } 1580 + 1581 + pm_runtime_mark_last_busy(adev->ddev->dev); 1582 + pm_runtime_put_autosuspend(adev->ddev->dev); 1583 + 1584 + if (r) 1585 + return r; 1871 1586 1872 1587 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1873 1588 } ··· 1965 1678 { 1966 1679 struct amdgpu_device *adev = dev_get_drvdata(dev); 1967 1680 u32 pwm_mode = 0; 1681 + int ret; 1682 + 1683 + ret = pm_runtime_get_sync(adev->ddev->dev); 1684 + if (ret < 0) 1685 + return ret; 1968 1686 1969 1687 if (is_support_sw_smu(adev)) { 1970 1688 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1971 1689 } else { 1972 - if (!adev->powerplay.pp_funcs->get_fan_control_mode) 1690 + if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 1691 + pm_runtime_mark_last_busy(adev->ddev->dev); 1692 + pm_runtime_put_autosuspend(adev->ddev->dev); 1973 1693 return -EINVAL; 1694 + } 1974 1695 1975 1696 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1976 1697 } 1698 + 1699 + pm_runtime_mark_last_busy(adev->ddev->dev); 1700 + pm_runtime_put_autosuspend(adev->ddev->dev); 1977 1701 1978 1702 return sprintf(buf, "%i\n", pwm_mode); 1979 1703 } ··· 1995 1697 size_t count) 1996 1698 { 1997 1699 struct amdgpu_device *adev = dev_get_drvdata(dev); 1998 - int err; 1700 + int err, ret; 1999 1701 int value; 2000 - 2001 - /* Can't adjust fan when the card is off */ 2002 - if ((adev->flags & AMD_IS_PX) && 2003 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2004 - return -EINVAL; 2005 1702 2006 1703 err = kstrtoint(buf, 10, &value); 2007 1704 if (err) 2008 1705 return err; 2009 1706 1707 + ret = pm_runtime_get_sync(adev->ddev->dev); 1708 + if (ret < 0) 1709 + return ret; 1710 + 2010 1711 if (is_support_sw_smu(adev)) { 2011 1712 smu_set_fan_control_mode(&adev->smu, value); 2012 1713 } else { 2013 - if (!adev->powerplay.pp_funcs->set_fan_control_mode) 1714 + if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 1715 + pm_runtime_mark_last_busy(adev->ddev->dev); 1716 + pm_runtime_put_autosuspend(adev->ddev->dev); 2014 1717 return -EINVAL; 1718 + } 2015 1719 2016 1720 amdgpu_dpm_set_fan_control_mode(adev, value); 2017 1721 } 1722 + 1723 + pm_runtime_mark_last_busy(adev->ddev->dev); 1724 + pm_runtime_put_autosuspend(adev->ddev->dev); 2018 1725 2019 1726 return count; 2020 1727 } ··· 2047 1744 u32 value; 2048 1745 u32 pwm_mode; 2049 1746 2050 - /* Can't adjust fan when the card is off */ 2051 - if ((adev->flags & AMD_IS_PX) && 2052 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2053 - return -EINVAL; 1747 + err = pm_runtime_get_sync(adev->ddev->dev); 1748 + if (err < 0) 1749 + return err; 1750 + 2054 1751 if (is_support_sw_smu(adev)) 2055 1752 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2056 1753 else 2057 1754 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1755 + 2058 1756 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2059 1757 pr_info("manual fan speed control should be enabled first\n"); 1758 + pm_runtime_mark_last_busy(adev->ddev->dev); 1759 + pm_runtime_put_autosuspend(adev->ddev->dev); 2060 1760 return -EINVAL; 2061 1761 } 2062 1762 2063 1763 err = kstrtou32(buf, 10, &value); 2064 - if (err) 1764 + if (err) { 1765 + pm_runtime_mark_last_busy(adev->ddev->dev); 1766 + pm_runtime_put_autosuspend(adev->ddev->dev); 2065 1767 return err; 1768 + } 2066 1769 2067 1770 value = (value * 100) / 255; 2068 1771 2069 - if (is_support_sw_smu(adev)) { 1772 + if (is_support_sw_smu(adev)) 2070 1773 err = smu_set_fan_speed_percent(&adev->smu, value); 2071 - if (err) 2072 - return err; 2073 - } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) { 1774 + else if (adev->powerplay.pp_funcs->set_fan_speed_percent) 2074 1775 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 2075 - if (err) 2076 - return err; 2077 - } 1776 + else 1777 + err = -EINVAL; 1778 + 1779 + pm_runtime_mark_last_busy(adev->ddev->dev); 1780 + pm_runtime_put_autosuspend(adev->ddev->dev); 1781 + 1782 + if (err) 1783 + return err; 2078 1784 2079 1785 return count; 2080 1786 } ··· 2096 1784 int err; 2097 1785 u32 speed = 0; 2098 1786 2099 - /* Can't adjust fan when the card is off */ 2100 - if ((adev->flags & AMD_IS_PX) && 2101 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2102 - return -EINVAL; 1787 + err = pm_runtime_get_sync(adev->ddev->dev); 1788 + if (err < 0) 1789 + return err; 2103 1790 2104 - if (is_support_sw_smu(adev)) { 1791 + if (is_support_sw_smu(adev)) 2105 1792 err = smu_get_fan_speed_percent(&adev->smu, &speed); 2106 - if (err) 2107 - return err; 2108 - } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) { 1793 + else if (adev->powerplay.pp_funcs->get_fan_speed_percent) 2109 1794 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 2110 - if (err) 2111 - return err; 2112 - } 1795 + else 1796 + err = -EINVAL; 1797 + 1798 + pm_runtime_mark_last_busy(adev->ddev->dev); 1799 + pm_runtime_put_autosuspend(adev->ddev->dev); 1800 + 1801 + if (err) 1802 + return err; 2113 1803 2114 1804 speed = (speed * 255) / 100; 2115 1805 ··· 2126 1812 int err; 2127 1813 u32 speed = 0; 2128 1814 2129 - /* Can't adjust fan when the card is off */ 2130 - if ((adev->flags & AMD_IS_PX) && 2131 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2132 - return -EINVAL; 1815 + err = pm_runtime_get_sync(adev->ddev->dev); 1816 + if (err < 0) 1817 + return err; 2133 1818 2134 - if (is_support_sw_smu(adev)) { 1819 + if (is_support_sw_smu(adev)) 2135 1820 err = smu_get_fan_speed_rpm(&adev->smu, &speed); 2136 - if (err) 2137 - return err; 2138 - } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1821 + else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2139 1822 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2140 - if (err) 2141 - return err; 2142 - } 1823 + else 1824 + err = -EINVAL; 1825 + 1826 + pm_runtime_mark_last_busy(adev->ddev->dev); 1827 + pm_runtime_put_autosuspend(adev->ddev->dev); 1828 + 1829 + if (err) 1830 + return err; 2143 1831 2144 1832 return sprintf(buf, "%i\n", speed); 2145 1833 } ··· 2155 1839 u32 size = sizeof(min_rpm); 2156 1840 int r; 2157 1841 1842 + r = pm_runtime_get_sync(adev->ddev->dev); 1843 + if (r < 0) 1844 + return r; 1845 + 2158 1846 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2159 1847 (void *)&min_rpm, &size); 1848 + 1849 + pm_runtime_mark_last_busy(adev->ddev->dev); 1850 + pm_runtime_put_autosuspend(adev->ddev->dev); 1851 + 2160 1852 if (r) 2161 1853 return r; 2162 1854 ··· 2180 1856 u32 size = sizeof(max_rpm); 2181 1857 int r; 2182 1858 1859 + r = pm_runtime_get_sync(adev->ddev->dev); 1860 + if (r < 0) 1861 + return r; 1862 + 2183 1863 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2184 1864 (void *)&max_rpm, &size); 1865 + 1866 + pm_runtime_mark_last_busy(adev->ddev->dev); 1867 + pm_runtime_put_autosuspend(adev->ddev->dev); 1868 + 2185 1869 if (r) 2186 1870 return r; 2187 1871 ··· 2204 1872 int err; 2205 1873 u32 rpm = 0; 2206 1874 2207 - /* Can't adjust fan when the card is off */ 2208 - if ((adev->flags & AMD_IS_PX) && 2209 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2210 - return -EINVAL; 1875 + err = pm_runtime_get_sync(adev->ddev->dev); 1876 + if (err < 0) 1877 + return err; 2211 1878 2212 - if (is_support_sw_smu(adev)) { 1879 + if (is_support_sw_smu(adev)) 2213 1880 err = smu_get_fan_speed_rpm(&adev->smu, &rpm); 2214 - if (err) 2215 - return err; 2216 - } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1881 + else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2217 1882 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2218 - if (err) 2219 - return err; 2220 - } 1883 + else 1884 + err = -EINVAL; 1885 + 1886 + pm_runtime_mark_last_busy(adev->ddev->dev); 1887 + pm_runtime_put_autosuspend(adev->ddev->dev); 1888 + 1889 + if (err) 1890 + return err; 2221 1891 2222 1892 return sprintf(buf, "%i\n", rpm); 2223 1893 } ··· 2233 1899 u32 value; 2234 1900 u32 pwm_mode; 2235 1901 1902 + err = pm_runtime_get_sync(adev->ddev->dev); 1903 + if (err < 0) 1904 + return err; 1905 + 2236 1906 if (is_support_sw_smu(adev)) 2237 1907 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2238 1908 else 2239 1909 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2240 1910 2241 - if (pwm_mode != AMD_FAN_CTRL_MANUAL) 1911 + if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 1912 + pm_runtime_mark_last_busy(adev->ddev->dev); 1913 + pm_runtime_put_autosuspend(adev->ddev->dev); 2242 1914 return -ENODATA; 2243 - 2244 - /* Can't adjust fan when the card is off */ 2245 - if ((adev->flags & AMD_IS_PX) && 2246 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2247 - return -EINVAL; 1915 + } 2248 1916 2249 1917 err = kstrtou32(buf, 10, &value); 1918 + if (err) { 1919 + pm_runtime_mark_last_busy(adev->ddev->dev); 1920 + pm_runtime_put_autosuspend(adev->ddev->dev); 1921 + return err; 1922 + } 1923 + 1924 + if (is_support_sw_smu(adev)) 1925 + err = smu_set_fan_speed_rpm(&adev->smu, value); 1926 + else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) 1927 + err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 1928 + else 1929 + err = -EINVAL; 1930 + 1931 + pm_runtime_mark_last_busy(adev->ddev->dev); 1932 + pm_runtime_put_autosuspend(adev->ddev->dev); 1933 + 2250 1934 if (err) 2251 1935 return err; 2252 - 2253 - if (is_support_sw_smu(adev)) { 2254 - err = smu_set_fan_speed_rpm(&adev->smu, value); 2255 - if (err) 2256 - return err; 2257 - } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { 2258 - err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2259 - if (err) 2260 - return err; 2261 - } 2262 1936 2263 1937 return count; 2264 1938 } ··· 2277 1935 { 2278 1936 struct amdgpu_device *adev = dev_get_drvdata(dev); 2279 1937 u32 pwm_mode = 0; 1938 + int ret; 1939 + 1940 + ret = pm_runtime_get_sync(adev->ddev->dev); 1941 + if (ret < 0) 1942 + return ret; 2280 1943 2281 1944 if (is_support_sw_smu(adev)) { 2282 1945 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2283 1946 } else { 2284 - if (!adev->powerplay.pp_funcs->get_fan_control_mode) 1947 + if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 1948 + pm_runtime_mark_last_busy(adev->ddev->dev); 1949 + pm_runtime_put_autosuspend(adev->ddev->dev); 2285 1950 return -EINVAL; 1951 + } 2286 1952 2287 1953 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2288 1954 } 1955 + 1956 + pm_runtime_mark_last_busy(adev->ddev->dev); 1957 + pm_runtime_put_autosuspend(adev->ddev->dev); 1958 + 2289 1959 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2290 1960 } 2291 1961 ··· 2311 1957 int value; 2312 1958 u32 pwm_mode; 2313 1959 2314 - /* Can't adjust fan when the card is off */ 2315 - if ((adev->flags & AMD_IS_PX) && 2316 - (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2317 - return -EINVAL; 2318 - 2319 - 2320 1960 err = kstrtoint(buf, 10, &value); 2321 1961 if (err) 2322 1962 return err; ··· 2322 1974 else 2323 1975 return -EINVAL; 2324 1976 1977 + err = pm_runtime_get_sync(adev->ddev->dev); 1978 + if (err < 0) 1979 + return err; 1980 + 2325 1981 if (is_support_sw_smu(adev)) { 2326 1982 smu_set_fan_control_mode(&adev->smu, pwm_mode); 2327 1983 } else { 2328 - if (!adev->powerplay.pp_funcs->set_fan_control_mode) 1984 + if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 1985 + pm_runtime_mark_last_busy(adev->ddev->dev); 1986 + pm_runtime_put_autosuspend(adev->ddev->dev); 2329 1987 return -EINVAL; 1988 + } 2330 1989 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2331 1990 } 1991 + 1992 + pm_runtime_mark_last_busy(adev->ddev->dev); 1993 + pm_runtime_put_autosuspend(adev->ddev->dev); 2332 1994 2333 1995 return count; 2334 1996 } ··· 2348 1990 char *buf) 2349 1991 { 2350 1992 struct amdgpu_device *adev = dev_get_drvdata(dev); 2351 - struct drm_device *ddev = adev->ddev; 2352 1993 u32 vddgfx; 2353 1994 int r, size = sizeof(vddgfx); 2354 1995 2355 - /* Can't get voltage when the card is off */ 2356 - if ((adev->flags & AMD_IS_PX) && 2357 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2358 - return -EINVAL; 1996 + r = pm_runtime_get_sync(adev->ddev->dev); 1997 + if (r < 0) 1998 + return r; 2359 1999 2360 2000 /* get the voltage */ 2361 2001 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 2362 2002 (void *)&vddgfx, &size); 2003 + 2004 + pm_runtime_mark_last_busy(adev->ddev->dev); 2005 + pm_runtime_put_autosuspend(adev->ddev->dev); 2006 + 2363 2007 if (r) 2364 2008 return r; 2365 2009 ··· 2380 2020 char *buf) 2381 2021 { 2382 2022 struct amdgpu_device *adev = dev_get_drvdata(dev); 2383 - struct drm_device *ddev = adev->ddev; 2384 2023 u32 vddnb; 2385 2024 int r, size = sizeof(vddnb); 2386 2025 ··· 2387 2028 if (!(adev->flags & AMD_IS_APU)) 2388 2029 return -EINVAL; 2389 2030 2390 - /* Can't get voltage when the card is off */ 2391 - if ((adev->flags & AMD_IS_PX) && 2392 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2393 - return -EINVAL; 2031 + r = pm_runtime_get_sync(adev->ddev->dev); 2032 + if (r < 0) 2033 + return r; 2394 2034 2395 2035 /* get the voltage */ 2396 2036 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 2397 2037 (void *)&vddnb, &size); 2038 + 2039 + pm_runtime_mark_last_busy(adev->ddev->dev); 2040 + pm_runtime_put_autosuspend(adev->ddev->dev); 2041 + 2398 2042 if (r) 2399 2043 return r; 2400 2044 ··· 2416 2054 char *buf) 2417 2055 { 2418 2056 struct amdgpu_device *adev = dev_get_drvdata(dev); 2419 - struct drm_device *ddev = adev->ddev; 2420 2057 u32 query = 0; 2421 2058 int r, size = sizeof(u32); 2422 2059 unsigned uw; 2423 2060 2424 - /* Can't get power when the card is off */ 2425 - if ((adev->flags & AMD_IS_PX) && 2426 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2427 - return -EINVAL; 2061 + r = pm_runtime_get_sync(adev->ddev->dev); 2062 + if (r < 0) 2063 + return r; 2428 2064 2429 2065 /* get the voltage */ 2430 2066 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 2431 2067 (void *)&query, &size); 2068 + 2069 + pm_runtime_mark_last_busy(adev->ddev->dev); 2070 + pm_runtime_put_autosuspend(adev->ddev->dev); 2071 + 2432 2072 if (r) 2433 2073 return r; 2434 2074 ··· 2453 2089 { 2454 2090 struct amdgpu_device *adev = dev_get_drvdata(dev); 2455 2091 uint32_t limit = 0; 2092 + ssize_t size; 2093 + int r; 2094 + 2095 + r = pm_runtime_get_sync(adev->ddev->dev); 2096 + if (r < 0) 2097 + return r; 2456 2098 2457 2099 if (is_support_sw_smu(adev)) { 2458 2100 smu_get_power_limit(&adev->smu, &limit, true, true); 2459 - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2101 + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2460 2102 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2461 2103 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 2462 - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2104 + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2463 2105 } else { 2464 - return snprintf(buf, PAGE_SIZE, "\n"); 2106 + size = snprintf(buf, PAGE_SIZE, "\n"); 2465 2107 } 2108 + 2109 + pm_runtime_mark_last_busy(adev->ddev->dev); 2110 + pm_runtime_put_autosuspend(adev->ddev->dev); 2111 + 2112 + return size; 2466 2113 } 2467 2114 2468 2115 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ··· 2482 2107 { 2483 2108 struct amdgpu_device *adev = dev_get_drvdata(dev); 2484 2109 uint32_t limit = 0; 2110 + ssize_t size; 2111 + int r; 2112 + 2113 + r = pm_runtime_get_sync(adev->ddev->dev); 2114 + if (r < 0) 2115 + return r; 2485 2116 2486 2117 if (is_support_sw_smu(adev)) { 2487 2118 smu_get_power_limit(&adev->smu, &limit, false, true); 2488 - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2119 + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2489 2120 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2490 2121 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 2491 - return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2122 + size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2492 2123 } else { 2493 - return snprintf(buf, PAGE_SIZE, "\n"); 2124 + size = snprintf(buf, PAGE_SIZE, "\n"); 2494 2125 } 2126 + 2127 + pm_runtime_mark_last_busy(adev->ddev->dev); 2128 + pm_runtime_put_autosuspend(adev->ddev->dev); 2129 + 2130 + return size; 2495 2131 } 2496 2132 2497 2133 ··· 2524 2138 2525 2139 value = value / 1000000; /* convert to Watt */ 2526 2140 2527 - if (is_support_sw_smu(adev)) { 2141 + 2142 + err = pm_runtime_get_sync(adev->ddev->dev); 2143 + if (err < 0) 2144 + return err; 2145 + 2146 + if (is_support_sw_smu(adev)) 2528 2147 err = smu_set_power_limit(&adev->smu, value); 2529 - } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { 2148 + else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) 2530 2149 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 2531 - } else { 2150 + else 2532 2151 err = -EINVAL; 2533 - } 2152 + 2153 + pm_runtime_mark_last_busy(adev->ddev->dev); 2154 + pm_runtime_put_autosuspend(adev->ddev->dev); 2534 2155 2535 2156 if (err) 2536 2157 return err; ··· 2550 2157 char *buf) 2551 2158 { 2552 2159 struct amdgpu_device *adev = dev_get_drvdata(dev); 2553 - struct drm_device *ddev = adev->ddev; 2554 2160 uint32_t sclk; 2555 2161 int r, size = sizeof(sclk); 2556 2162 2557 - /* Can't get voltage when the card is off */ 2558 - if ((adev->flags & AMD_IS_PX) && 2559 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2560 - return -EINVAL; 2163 + r = pm_runtime_get_sync(adev->ddev->dev); 2164 + if (r < 0) 2165 + return r; 2561 2166 2562 2167 /* get the sclk */ 2563 2168 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2564 2169 (void *)&sclk, &size); 2170 + 2171 + pm_runtime_mark_last_busy(adev->ddev->dev); 2172 + pm_runtime_put_autosuspend(adev->ddev->dev); 2173 + 2565 2174 if (r) 2566 2175 return r; 2567 2176 ··· 2582 2187 char *buf) 2583 2188 { 2584 2189 struct amdgpu_device *adev = dev_get_drvdata(dev); 2585 - struct drm_device *ddev = adev->ddev; 2586 2190 uint32_t mclk; 2587 2191 int r, size = sizeof(mclk); 2588 2192 2589 - /* Can't get voltage when the card is off */ 2590 - if ((adev->flags & AMD_IS_PX) && 2591 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2592 - return -EINVAL; 2193 + r = pm_runtime_get_sync(adev->ddev->dev); 2194 + if (r < 0) 2195 + return r; 2593 2196 2594 2197 /* get the sclk */ 2595 2198 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2596 2199 (void *)&mclk, &size); 2200 + 2201 + pm_runtime_mark_last_busy(adev->ddev->dev); 2202 + pm_runtime_put_autosuspend(adev->ddev->dev); 2203 + 2597 2204 if (r) 2598 2205 return r; 2599 2206 ··· 3617 3220 struct drm_info_node *node = (struct drm_info_node *) m->private; 3618 3221 struct drm_device *dev = node->minor->dev; 3619 3222 struct amdgpu_device *adev = dev->dev_private; 3620 - struct drm_device *ddev = adev->ddev; 3621 3223 u32 flags = 0; 3224 + int r; 3225 + 3226 + r = pm_runtime_get_sync(dev->dev); 3227 + if (r < 0) 3228 + return r; 3622 3229 3623 3230 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3624 3231 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); ··· 3631 3230 3632 3231 if (!adev->pm.dpm_enabled) { 3633 3232 seq_printf(m, "dpm not enabled\n"); 3233 + pm_runtime_mark_last_busy(dev->dev); 3234 + pm_runtime_put_autosuspend(dev->dev); 3634 3235 return 0; 3635 3236 } 3636 - if ((adev->flags & AMD_IS_PX) && 3637 - (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 3638 - seq_printf(m, "PX asic powered off\n"); 3639 - } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3237 + 3238 + if (!is_support_sw_smu(adev) && 3239 + adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3640 3240 mutex_lock(&adev->pm.mutex); 3641 3241 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3642 3242 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3643 3243 else 3644 3244 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3645 3245 mutex_unlock(&adev->pm.mutex); 3246 + r = 0; 3646 3247 } else { 3647 - return amdgpu_debugfs_pm_info_pp(m, adev); 3248 + r = amdgpu_debugfs_pm_info_pp(m, adev); 3648 3249 } 3649 3250 3650 - return 0; 3251 + pm_runtime_mark_last_busy(dev->dev); 3252 + pm_runtime_put_autosuspend(dev->dev); 3253 + 3254 + return r; 3651 3255 } 3652 3256 3653 3257 static const struct drm_info_list amdgpu_pm_info_list[] = {