Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/pm: optimize the amdgpu_pm_compute_clocks() implementations

Drop cross callings and multi-function APIs. Also avoid exposing
internal implementations details.

Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Evan Quan and committed by
Alex Deucher
6ddbd37f d698a2c4

+288 -193
+1 -1
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 404 404 int (*get_dpm_clock_table)(void *handle, 405 405 struct dpm_clocks *clock_table); 406 406 int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size); 407 - int (*change_power_state)(void *handle); 407 + void (*pm_compute_clocks)(void *handle); 408 408 }; 409 409 410 410 struct metrics_table_header {
+1 -1
drivers/gpu/drm/amd/pm/Makefile
··· 40 40 41 41 include $(AMD_PM) 42 42 43 - PM_MGR = amdgpu_dpm.o amdgpu_pm.o 43 + PM_MGR = amdgpu_dpm.o amdgpu_pm.o amdgpu_dpm_internal.o 44 44 45 45 AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR)) 46 46
+21 -179
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
··· 37 37 #define amdgpu_dpm_enable_bapm(adev, e) \ 38 38 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) 39 39 40 - static void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 41 - { 42 - struct drm_device *ddev = adev_to_drm(adev); 43 - struct drm_crtc *crtc; 44 - struct amdgpu_crtc *amdgpu_crtc; 45 - 46 - adev->pm.dpm.new_active_crtcs = 0; 47 - adev->pm.dpm.new_active_crtc_count = 0; 48 - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 49 - list_for_each_entry(crtc, 50 - &ddev->mode_config.crtc_list, head) { 51 - amdgpu_crtc = to_amdgpu_crtc(crtc); 52 - if (amdgpu_crtc->enabled) { 53 - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 54 - adev->pm.dpm.new_active_crtc_count++; 55 - } 56 - } 57 - } 58 - } 59 - 60 - u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 61 - { 62 - struct drm_device *dev = adev_to_drm(adev); 63 - struct drm_crtc *crtc; 64 - struct amdgpu_crtc *amdgpu_crtc; 65 - u32 vblank_in_pixels; 66 - u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 67 - 68 - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 69 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 70 - amdgpu_crtc = to_amdgpu_crtc(crtc); 71 - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 72 - vblank_in_pixels = 73 - amdgpu_crtc->hw_mode.crtc_htotal * 74 - (amdgpu_crtc->hw_mode.crtc_vblank_end - 75 - amdgpu_crtc->hw_mode.crtc_vdisplay + 76 - (amdgpu_crtc->v_border * 2)); 77 - 78 - vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 79 - break; 80 - } 81 - } 82 - } 83 - 84 - return vblank_time_us; 85 - } 86 - 87 - static u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 88 - { 89 - struct drm_device *dev = adev_to_drm(adev); 90 - struct drm_crtc *crtc; 91 - struct amdgpu_crtc *amdgpu_crtc; 92 - u32 vrefresh = 0; 93 - 94 - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 95 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 96 - amdgpu_crtc = to_amdgpu_crtc(crtc); 97 - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 98 - vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 99 - break; 100 - } 101 - } 102 - } 103 - 104 - return vrefresh; 105 - } 106 - 107 40 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 108 41 { 109 42 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; ··· 365 432 return ret; 366 433 } 367 434 368 - void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 369 - { 370 - struct amdgpu_device *adev = 371 - container_of(work, struct amdgpu_device, 372 - pm.dpm.thermal.work); 373 - /* switch to the thermal state */ 374 - enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 375 - int temp, size = sizeof(temp); 376 - 377 - if (!adev->pm.dpm_enabled) 378 - return; 379 - 380 - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 381 - (void *)&temp, &size)) { 382 - if (temp < adev->pm.dpm.thermal.min_temp) 383 - /* switch back the user state */ 384 - dpm_state = adev->pm.dpm.user_state; 385 - } else { 386 - if (adev->pm.dpm.thermal.high_to_low) 387 - /* switch back the user state */ 388 - dpm_state = adev->pm.dpm.user_state; 389 - } 390 - mutex_lock(&adev->pm.mutex); 391 - if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 392 - adev->pm.dpm.thermal_active = true; 393 - else 394 - adev->pm.dpm.thermal_active = false; 395 - adev->pm.dpm.state = dpm_state; 396 - mutex_unlock(&adev->pm.mutex); 397 - 398 - amdgpu_dpm_compute_clocks(adev); 399 - } 400 - 401 435 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 402 436 { 403 - int i = 0; 437 + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 404 438 405 439 if (!adev->pm.dpm_enabled) 406 440 return; 407 441 408 - if (adev->mode_info.num_crtc) 409 - amdgpu_display_bandwidth_update(adev); 442 + if (!pp_funcs->pm_compute_clocks) 443 + return; 410 444 411 - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 412 - struct amdgpu_ring *ring = adev->rings[i]; 413 - if (ring && ring->sched.ready) 414 - amdgpu_fence_wait_empty(ring); 415 - } 416 - 417 - if ((adev->family == AMDGPU_FAMILY_SI) || 418 - (adev->family == AMDGPU_FAMILY_KV)) { 419 - mutex_lock(&adev->pm.mutex); 420 - amdgpu_dpm_get_active_displays(adev); 421 - adev->powerplay.pp_funcs->change_power_state(adev->powerplay.pp_handle); 422 - mutex_unlock(&adev->pm.mutex); 423 - } else { 424 - if (!amdgpu_device_has_dc_support(adev)) { 425 - mutex_lock(&adev->pm.mutex); 426 - amdgpu_dpm_get_active_displays(adev); 427 - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 428 - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 429 - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 430 - /* we have issues with mclk switching with 431 - * refresh rates over 120 hz on the non-DC code. 432 - */ 433 - if (adev->pm.pm_display_cfg.vrefresh > 120) 434 - adev->pm.pm_display_cfg.min_vblank_time = 0; 435 - if (adev->powerplay.pp_funcs->display_configuration_change) 436 - adev->powerplay.pp_funcs->display_configuration_change( 437 - adev->powerplay.pp_handle, 438 - &adev->pm.pm_display_cfg); 439 - mutex_unlock(&adev->pm.mutex); 440 - } 441 - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 442 - } 445 + pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); 443 446 } 444 447 445 448 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 446 449 { 447 450 int ret = 0; 448 451 449 - if (adev->family == AMDGPU_FAMILY_SI) { 450 - mutex_lock(&adev->pm.mutex); 451 - if (enable) { 452 - adev->pm.dpm.uvd_active = true; 453 - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 454 - } else { 455 - adev->pm.dpm.uvd_active = false; 456 - } 457 - mutex_unlock(&adev->pm.mutex); 452 + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 453 + if (ret) 454 + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 455 + enable ? "enable" : "disable", ret); 458 456 459 - amdgpu_dpm_compute_clocks(adev); 460 - } else { 461 - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 462 - if (ret) 463 - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 464 - enable ? "enable" : "disable", ret); 457 + /* enable/disable Low Memory PState for UVD (4k videos) */ 458 + if (adev->asic_type == CHIP_STONEY && 459 + adev->uvd.decode_image_width >= WIDTH_4K) { 460 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 465 461 466 - /* enable/disable Low Memory PState for UVD (4k videos) */ 467 - if (adev->asic_type == CHIP_STONEY && 468 - adev->uvd.decode_image_width >= WIDTH_4K) { 469 - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 470 - 471 - if (hwmgr && hwmgr->hwmgr_func && 472 - hwmgr->hwmgr_func->update_nbdpm_pstate) 473 - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 474 - !enable, 475 - true); 476 - } 462 + if (hwmgr && hwmgr->hwmgr_func && 463 + hwmgr->hwmgr_func->update_nbdpm_pstate) 464 + hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 465 + !enable, 466 + true); 477 467 } 478 468 } 479 469 ··· 404 548 { 405 549 int ret = 0; 406 550 407 - if (adev->family == AMDGPU_FAMILY_SI) { 408 - mutex_lock(&adev->pm.mutex); 409 - if (enable) { 410 - adev->pm.dpm.vce_active = true; 411 - /* XXX select vce level based on ring/task */ 412 - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 413 - } else { 414 - adev->pm.dpm.vce_active = false; 415 - } 416 - mutex_unlock(&adev->pm.mutex); 417 - 418 - amdgpu_dpm_compute_clocks(adev); 419 - } else { 420 - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 421 - if (ret) 422 - DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 423 - enable ? "enable" : "disable", ret); 424 - } 551 + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 552 + if (ret) 553 + DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 554 + enable ? "enable" : "disable", ret); 425 555 } 426 556 427 557 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+94
drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
··· 1 + /* 2 + * Copyright 2021 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include "amdgpu.h" 25 + #include "amdgpu_display.h" 26 + #include "hwmgr.h" 27 + #include "amdgpu_smu.h" 28 + 29 + void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 30 + { 31 + struct drm_device *ddev = adev_to_drm(adev); 32 + struct drm_crtc *crtc; 33 + struct amdgpu_crtc *amdgpu_crtc; 34 + 35 + adev->pm.dpm.new_active_crtcs = 0; 36 + adev->pm.dpm.new_active_crtc_count = 0; 37 + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 38 + list_for_each_entry(crtc, 39 + &ddev->mode_config.crtc_list, head) { 40 + amdgpu_crtc = to_amdgpu_crtc(crtc); 41 + if (amdgpu_crtc->enabled) { 42 + adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 43 + adev->pm.dpm.new_active_crtc_count++; 44 + } 45 + } 46 + } 47 + } 48 + 49 + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 50 + { 51 + struct drm_device *dev = adev_to_drm(adev); 52 + struct drm_crtc *crtc; 53 + struct amdgpu_crtc *amdgpu_crtc; 54 + u32 vblank_in_pixels; 55 + u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 56 + 57 + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 58 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 59 + amdgpu_crtc = to_amdgpu_crtc(crtc); 60 + if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 61 + vblank_in_pixels = 62 + amdgpu_crtc->hw_mode.crtc_htotal * 63 + (amdgpu_crtc->hw_mode.crtc_vblank_end - 64 + amdgpu_crtc->hw_mode.crtc_vdisplay + 65 + (amdgpu_crtc->v_border * 2)); 66 + 67 + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 68 + break; 69 + } 70 + } 71 + } 72 + 73 + return vblank_time_us; 74 + } 75 + 76 + u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 77 + { 78 + struct drm_device *dev = adev_to_drm(adev); 79 + struct drm_crtc *crtc; 80 + struct amdgpu_crtc *amdgpu_crtc; 81 + u32 vrefresh = 0; 82 + 83 + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 84 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 85 + amdgpu_crtc = to_amdgpu_crtc(crtc); 86 + if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 87 + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 88 + break; 89 + } 90 + } 91 + } 92 + 93 + return vrefresh; 94 + }
-2
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
··· 428 428 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 429 429 void *data, uint32_t *size); 430 430 431 - void amdgpu_dpm_thermal_work_handler(struct work_struct *work); 432 - 433 431 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); 434 432 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); 435 433 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+32
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
··· 1 + /* 2 + * Copyright 2021 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __AMDGPU_DPM_INTERNAL_H__ 24 + #define __AMDGPU_DPM_INTERNAL_H__ 25 + 26 + void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); 27 + 28 + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); 29 + 30 + u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); 31 + 32 + #endif
+38 -1
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 31 31 #include "power_state.h" 32 32 #include "amdgpu.h" 33 33 #include "hwmgr.h" 34 - 34 + #include "amdgpu_dpm_internal.h" 35 + #include "amdgpu_display.h" 35 36 36 37 static const struct amd_pm_funcs pp_dpm_funcs; 37 38 ··· 1684 1683 return 0; 1685 1684 } 1686 1685 1686 + static void pp_pm_compute_clocks(void *handle) 1687 + { 1688 + struct pp_hwmgr *hwmgr = handle; 1689 + struct amdgpu_device *adev = hwmgr->adev; 1690 + int i = 0; 1691 + 1692 + if (adev->mode_info.num_crtc) 1693 + amdgpu_display_bandwidth_update(adev); 1694 + 1695 + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1696 + struct amdgpu_ring *ring = adev->rings[i]; 1697 + if (ring && ring->sched.ready) 1698 + amdgpu_fence_wait_empty(ring); 1699 + } 1700 + 1701 + if (!amdgpu_device_has_dc_support(adev)) { 1702 + amdgpu_dpm_get_active_displays(adev); 1703 + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1704 + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1705 + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1706 + /* we have issues with mclk switching with 1707 + * refresh rates over 120 hz on the non-DC code. 1708 + */ 1709 + if (adev->pm.pm_display_cfg.vrefresh > 120) 1710 + adev->pm.pm_display_cfg.min_vblank_time = 0; 1711 + 1712 + pp_display_configuration_change(handle, 1713 + &adev->pm.pm_display_cfg); 1714 + } 1715 + 1716 + pp_dpm_dispatch_tasks(handle, 1717 + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 1718 + NULL); 1719 + } 1720 + 1687 1721 static const struct amd_pm_funcs pp_dpm_funcs = { 1688 1722 .load_firmware = pp_dpm_load_fw, 1689 1723 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, ··· 1783 1747 .get_gpu_metrics = pp_get_gpu_metrics, 1784 1748 .gfx_state_change_set = pp_gfx_state_change_set, 1785 1749 .get_smu_prv_buf_details = pp_get_prv_buffer_details, 1750 + .pm_compute_clocks = pp_pm_compute_clocks, 1786 1751 };
+3 -3
drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
··· 3088 3088 else 3089 3089 adev->pm.dpm_enabled = true; 3090 3090 mutex_unlock(&adev->pm.mutex); 3091 - amdgpu_dpm_compute_clocks(adev); 3091 + amdgpu_legacy_dpm_compute_clocks(adev); 3092 3092 return ret; 3093 3093 } 3094 3094 ··· 3136 3136 adev->pm.dpm_enabled = true; 3137 3137 mutex_unlock(&adev->pm.mutex); 3138 3138 if (adev->pm.dpm_enabled) 3139 - amdgpu_dpm_compute_clocks(adev); 3139 + amdgpu_legacy_dpm_compute_clocks(adev); 3140 3140 } 3141 3141 return 0; 3142 3142 } ··· 3390 3390 .get_vce_clock_state = amdgpu_get_vce_clock_state, 3391 3391 .check_state_equal = kv_check_state_equal, 3392 3392 .read_sensor = &kv_dpm_read_sensor, 3393 - .change_power_state = amdgpu_dpm_change_power_state_locked, 3393 + .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, 3394 3394 }; 3395 3395 3396 3396 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
+58 -2
drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c
··· 26 26 #include "atom.h" 27 27 #include "amd_pcie.h" 28 28 #include "legacy_dpm.h" 29 + #include "amdgpu_dpm_internal.h" 30 + #include "amdgpu_display.h" 29 31 30 32 #define amdgpu_dpm_pre_set_power_state(adev) \ 31 33 ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) ··· 951 949 return NULL; 952 950 } 953 951 954 - int amdgpu_dpm_change_power_state_locked(void *handle) 952 + static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 955 953 { 956 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 957 954 struct amdgpu_ps *ps; 958 955 enum amd_pm_state_type dpm_state; 959 956 int ret; ··· 1022 1021 } 1023 1022 1024 1023 return 0; 1024 + } 1025 + 1026 + void amdgpu_legacy_dpm_compute_clocks(void *handle) 1027 + { 1028 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1029 + int i = 0; 1030 + 1031 + if (adev->mode_info.num_crtc) 1032 + amdgpu_display_bandwidth_update(adev); 1033 + 1034 + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1035 + struct amdgpu_ring *ring = adev->rings[i]; 1036 + if (ring && ring->sched.ready) 1037 + amdgpu_fence_wait_empty(ring); 1038 + } 1039 + 1040 + amdgpu_dpm_get_active_displays(adev); 1041 + 1042 + amdgpu_dpm_change_power_state_locked(adev); 1043 + } 1044 + 1045 + void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1046 + { 1047 + struct amdgpu_device *adev = 1048 + container_of(work, struct amdgpu_device, 1049 + pm.dpm.thermal.work); 1050 + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1051 + /* switch to the thermal state */ 1052 + enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1053 + int temp, size = sizeof(temp); 1054 + 1055 + if (!adev->pm.dpm_enabled) 1056 + return; 1057 + 1058 + if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, 1059 + AMDGPU_PP_SENSOR_GPU_TEMP, 1060 + (void *)&temp, 1061 + &size)) { 1062 + if (temp < adev->pm.dpm.thermal.min_temp) 1063 + /* switch back the user state */ 1064 + dpm_state = adev->pm.dpm.user_state; 1065 + } else { 1066 + if (adev->pm.dpm.thermal.high_to_low) 1067 + /* switch back the user state */ 1068 + dpm_state = adev->pm.dpm.user_state; 1069 + } 1070 + 1071 + if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1072 + adev->pm.dpm.thermal_active = true; 1073 + else 1074 + adev->pm.dpm.thermal_active = false; 1075 + 1076 + adev->pm.dpm.state = dpm_state; 1077 + 1078 + amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); 1025 1079 }
+2 -1
drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h
··· 32 32 void amdgpu_free_extended_power_table(struct amdgpu_device *adev); 33 33 void amdgpu_add_thermal_controller(struct amdgpu_device *adev); 34 34 struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); 35 - int amdgpu_dpm_change_power_state_locked(void *handle); 36 35 void amdgpu_pm_print_power_states(struct amdgpu_device *adev); 36 + void amdgpu_legacy_dpm_compute_clocks(void *handle); 37 + void amdgpu_dpm_thermal_work_handler(struct work_struct *work); 37 38 #endif
+38 -3
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
··· 3891 3891 } 3892 3892 #endif 3893 3893 3894 + static int si_set_powergating_by_smu(void *handle, 3895 + uint32_t block_type, 3896 + bool gate) 3897 + { 3898 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3899 + 3900 + switch (block_type) { 3901 + case AMD_IP_BLOCK_TYPE_UVD: 3902 + if (!gate) { 3903 + adev->pm.dpm.uvd_active = true; 3904 + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 3905 + } else { 3906 + adev->pm.dpm.uvd_active = false; 3907 + } 3908 + 3909 + amdgpu_legacy_dpm_compute_clocks(handle); 3910 + break; 3911 + case AMD_IP_BLOCK_TYPE_VCE: 3912 + if (!gate) { 3913 + adev->pm.dpm.vce_active = true; 3914 + /* XXX select vce level based on ring/task */ 3915 + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 3916 + } else { 3917 + adev->pm.dpm.vce_active = false; 3918 + } 3919 + 3920 + amdgpu_legacy_dpm_compute_clocks(handle); 3921 + break; 3922 + default: 3923 + break; 3924 + } 3925 + return 0; 3926 + } 3927 + 3894 3928 static int si_set_sw_state(struct amdgpu_device *adev) 3895 3929 { 3896 3930 return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? ··· 7835 7801 else 7836 7802 adev->pm.dpm_enabled = true; 7837 7803 mutex_unlock(&adev->pm.mutex); 7838 - amdgpu_dpm_compute_clocks(adev); 7804 + amdgpu_legacy_dpm_compute_clocks(adev); 7839 7805 return ret; 7840 7806 } 7841 7807 ··· 7883 7849 adev->pm.dpm_enabled = true; 7884 7850 mutex_unlock(&adev->pm.mutex); 7885 7851 if (adev->pm.dpm_enabled) 7886 - amdgpu_dpm_compute_clocks(adev); 7852 + amdgpu_legacy_dpm_compute_clocks(adev); 7887 7853 } 7888 7854 return 0; 7889 7855 } ··· 8128 8094 .print_power_state = &si_dpm_print_power_state, 8129 8095 .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, 8130 8096 .force_performance_level = &si_dpm_force_performance_level, 8097 + .set_powergating_by_smu = &si_set_powergating_by_smu, 8131 8098 .vblank_too_short = &si_dpm_vblank_too_short, 8132 8099 .set_fan_control_mode = &si_dpm_set_fan_control_mode, 8133 8100 .get_fan_control_mode = &si_dpm_get_fan_control_mode, ··· 8137 8102 .check_state_equal = &si_check_state_equal, 8138 8103 .get_vce_clock_state = amdgpu_get_vce_clock_state, 8139 8104 .read_sensor = &si_dpm_read_sensor, 8140 - .change_power_state = amdgpu_dpm_change_power_state_locked, 8105 + .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, 8141 8106 }; 8142 8107 8143 8108 static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {