Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Remove wrapper layer of smu ip functions

1. delete amdgpu_powerplay.c used for wrapping smu ip functions
2. delete struct pp_instance,
3. make struct hwmgr as the smu hw handle.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Evan Quan <evan.quan@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Rex Zhu and committed by
Alex Deucher
b905090d 5b2a3d2c

+336 -804
+1 -2
drivers/gpu/drm/amd/amdgpu/Makefile
··· 87 87 88 88 # add SMC block 89 89 amdgpu-y += \ 90 - amdgpu_dpm.o \ 91 - amdgpu_powerplay.o 90 + amdgpu_dpm.o 92 91 93 92 # add DCE block 94 93 amdgpu-y += \
-2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1393 1393 #define HWIP_MAX_INSTANCE 6 1394 1394 1395 1395 struct amd_powerplay { 1396 - struct cgs_device *cgs_device; 1397 1396 void *pp_handle; 1398 - const struct amd_ip_funcs *ip_funcs; 1399 1397 const struct amd_pm_funcs *pp_funcs; 1400 1398 }; 1401 1399
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 1154 1154 umode_t effective_mode = attr->mode; 1155 1155 1156 1156 /* handle non-powerplay limitations */ 1157 - if (!adev->powerplay.cgs_device) { 1157 + if (!adev->powerplay.pp_handle) { 1158 1158 /* Skip fan attributes if fan is not present */ 1159 1159 if (adev->pm.no_fan && 1160 1160 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
-285
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
··· 1 - /* 2 - * Copyright 2015 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - * Authors: AMD 23 - * 24 - */ 25 - #include "atom.h" 26 - #include "amdgpu.h" 27 - #include "amd_shared.h" 28 - #include <linux/module.h> 29 - #include <linux/moduleparam.h> 30 - #include "amdgpu_pm.h" 31 - #include <drm/amdgpu_drm.h> 32 - #include "amdgpu_powerplay.h" 33 - #include "si_dpm.h" 34 - #include "cik_dpm.h" 35 - #include "vi_dpm.h" 36 - 37 - static int amdgpu_pp_early_init(void *handle) 38 - { 39 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 40 - struct amd_powerplay *amd_pp; 41 - int ret = 0; 42 - 43 - amd_pp = &(adev->powerplay); 44 - amd_pp->pp_handle = (void *)adev; 45 - 46 - switch (adev->asic_type) { 47 - case CHIP_POLARIS11: 48 - case CHIP_POLARIS10: 49 - case CHIP_POLARIS12: 50 - case CHIP_TONGA: 51 - case CHIP_FIJI: 52 - case CHIP_TOPAZ: 53 - case CHIP_CARRIZO: 54 - case CHIP_STONEY: 55 - case CHIP_VEGA10: 56 - case CHIP_RAVEN: 57 - amd_pp->cgs_device = amdgpu_cgs_create_device(adev); 58 - amd_pp->ip_funcs = &pp_ip_funcs; 59 - amd_pp->pp_funcs = &pp_dpm_funcs; 60 - break; 61 - /* These chips don't have powerplay implemenations */ 62 - #ifdef CONFIG_DRM_AMDGPU_SI 63 - case CHIP_TAHITI: 64 - case CHIP_PITCAIRN: 65 - case CHIP_VERDE: 66 - case CHIP_OLAND: 67 - case CHIP_HAINAN: 68 - amd_pp->ip_funcs = &si_dpm_ip_funcs; 69 - amd_pp->pp_funcs = &si_dpm_funcs; 70 - break; 71 - #endif 72 - #ifdef CONFIG_DRM_AMDGPU_CIK 73 - case CHIP_BONAIRE: 74 - case CHIP_HAWAII: 75 - if (amdgpu_dpm == -1) { 76 - amd_pp->ip_funcs = &ci_dpm_ip_funcs; 77 - amd_pp->pp_funcs = &ci_dpm_funcs; 78 - } else { 79 - amd_pp->cgs_device = amdgpu_cgs_create_device(adev); 80 - amd_pp->ip_funcs = &pp_ip_funcs; 81 - amd_pp->pp_funcs = &pp_dpm_funcs; 82 - } 83 - break; 84 - case CHIP_KABINI: 85 - case CHIP_MULLINS: 86 - case CHIP_KAVERI: 87 - amd_pp->ip_funcs = &kv_dpm_ip_funcs; 88 - amd_pp->pp_funcs = &kv_dpm_funcs; 89 - break; 90 - #endif 91 - default: 92 - ret = -EINVAL; 93 - break; 94 - } 95 - 96 - if (adev->powerplay.ip_funcs->early_init) 97 - ret = adev->powerplay.ip_funcs->early_init(adev); 98 - 99 - return ret; 100 - } 101 - 102 - 103 - static int amdgpu_pp_late_init(void *handle) 104 - { 105 - int ret = 0; 106 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 107 - 108 - if (adev->powerplay.ip_funcs->late_init) 109 - ret = adev->powerplay.ip_funcs->late_init( 110 - adev->powerplay.pp_handle); 111 - 112 - return ret; 113 - } 114 - 115 - static int amdgpu_pp_sw_init(void *handle) 116 - { 117 - int ret = 0; 118 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 119 - 120 - if (adev->powerplay.ip_funcs->sw_init) 121 - ret = adev->powerplay.ip_funcs->sw_init( 122 - adev->powerplay.pp_handle); 123 - 124 - return ret; 125 - } 126 - 127 - static int amdgpu_pp_sw_fini(void *handle) 128 - { 129 - int ret = 0; 130 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 131 - 132 - if (adev->powerplay.ip_funcs->sw_fini) 133 - ret = adev->powerplay.ip_funcs->sw_fini( 134 - adev->powerplay.pp_handle); 135 - if (ret) 136 - return ret; 137 - 138 - return ret; 139 - } 140 - 141 - static int amdgpu_pp_hw_init(void *handle) 142 - { 143 - int ret = 0; 144 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 145 - 146 - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) 147 - amdgpu_ucode_init_bo(adev); 148 - 149 - if (adev->powerplay.ip_funcs->hw_init) 150 - ret = adev->powerplay.ip_funcs->hw_init( 151 - adev->powerplay.pp_handle); 152 - 153 - return ret; 154 - } 155 - 156 - static int amdgpu_pp_hw_fini(void *handle) 157 - { 158 - int ret = 0; 159 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 160 - 161 - if (adev->powerplay.ip_funcs->hw_fini) 162 - ret = adev->powerplay.ip_funcs->hw_fini( 163 - adev->powerplay.pp_handle); 164 - 165 - return ret; 166 - } 167 - 168 - static void amdgpu_pp_late_fini(void *handle) 169 - { 170 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 171 - 172 - if (adev->powerplay.ip_funcs->late_fini) 173 - adev->powerplay.ip_funcs->late_fini( 174 - adev->powerplay.pp_handle); 175 - 176 - if (adev->powerplay.cgs_device) 177 - amdgpu_cgs_destroy_device(adev->powerplay.cgs_device); 178 - } 179 - 180 - static int amdgpu_pp_suspend(void *handle) 181 - { 182 - int ret = 0; 183 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 184 - 185 - if (adev->powerplay.ip_funcs->suspend) 186 - ret = adev->powerplay.ip_funcs->suspend( 187 - adev->powerplay.pp_handle); 188 - return ret; 189 - } 190 - 191 - static int amdgpu_pp_resume(void *handle) 192 - { 193 - int ret = 0; 194 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 195 - 196 - if (adev->powerplay.ip_funcs->resume) 197 - ret = adev->powerplay.ip_funcs->resume( 198 - adev->powerplay.pp_handle); 199 - return ret; 200 - } 201 - 202 - static int amdgpu_pp_set_clockgating_state(void *handle, 203 - enum amd_clockgating_state state) 204 - { 205 - int ret = 0; 206 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 207 - 208 - if (adev->powerplay.ip_funcs->set_clockgating_state) 209 - ret = adev->powerplay.ip_funcs->set_clockgating_state( 210 - adev->powerplay.pp_handle, state); 211 - return ret; 212 - } 213 - 214 - static int amdgpu_pp_set_powergating_state(void *handle, 215 - enum amd_powergating_state state) 216 - { 217 - int ret = 0; 218 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 219 - 220 - if (adev->powerplay.ip_funcs->set_powergating_state) 221 - ret = adev->powerplay.ip_funcs->set_powergating_state( 222 - adev->powerplay.pp_handle, state); 223 - return ret; 224 - } 225 - 226 - 227 - static bool amdgpu_pp_is_idle(void *handle) 228 - { 229 - bool ret = true; 230 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 231 - 232 - if (adev->powerplay.ip_funcs->is_idle) 233 - ret = adev->powerplay.ip_funcs->is_idle( 234 - adev->powerplay.pp_handle); 235 - return ret; 236 - } 237 - 238 - static int amdgpu_pp_wait_for_idle(void *handle) 239 - { 240 - int ret = 0; 241 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 242 - 243 - if (adev->powerplay.ip_funcs->wait_for_idle) 244 - ret = adev->powerplay.ip_funcs->wait_for_idle( 245 - adev->powerplay.pp_handle); 246 - return ret; 247 - } 248 - 249 - static int amdgpu_pp_soft_reset(void *handle) 250 - { 251 - int ret = 0; 252 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 253 - 254 - if (adev->powerplay.ip_funcs->soft_reset) 255 - ret = adev->powerplay.ip_funcs->soft_reset( 256 - adev->powerplay.pp_handle); 257 - return ret; 258 - } 259 - 260 - static const struct amd_ip_funcs amdgpu_pp_ip_funcs = { 261 - .name = "amdgpu_powerplay", 262 - .early_init = amdgpu_pp_early_init, 263 - .late_init = amdgpu_pp_late_init, 264 - .sw_init = amdgpu_pp_sw_init, 265 - .sw_fini = amdgpu_pp_sw_fini, 266 - .hw_init = amdgpu_pp_hw_init, 267 - .hw_fini = amdgpu_pp_hw_fini, 268 - .late_fini = amdgpu_pp_late_fini, 269 - .suspend = amdgpu_pp_suspend, 270 - .resume = amdgpu_pp_resume, 271 - .is_idle = amdgpu_pp_is_idle, 272 - .wait_for_idle = amdgpu_pp_wait_for_idle, 273 - .soft_reset = amdgpu_pp_soft_reset, 274 - .set_clockgating_state = amdgpu_pp_set_clockgating_state, 275 - .set_powergating_state = amdgpu_pp_set_powergating_state, 276 - }; 277 - 278 - const struct amdgpu_ip_block_version amdgpu_pp_ip_block = 279 - { 280 - .type = AMD_IP_BLOCK_TYPE_SMC, 281 - .major = 1, 282 - .minor = 0, 283 - .rev = 0, 284 - .funcs = &amdgpu_pp_ip_funcs, 285 - };
-33
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h
··· 1 - /* 2 - * Copyright 2015 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - * Authors: AMD 23 - * 24 - */ 25 - 26 - #ifndef __AMDGPU_POWERPLAY_H__ 27 - #define __AMDGPU_POWERPLAY_H__ 28 - 29 - #include "amd_shared.h" 30 - 31 - extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block; 32 - 33 - #endif /* __AMDGPU_POWERPLAY_H__ */
+14 -2
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
··· 65 65 #define VOLTAGE_VID_OFFSET_SCALE1 625 66 66 #define VOLTAGE_VID_OFFSET_SCALE2 100 67 67 68 + static const struct amd_pm_funcs ci_dpm_funcs; 69 + 68 70 static const struct ci_pt_defaults defaults_hawaii_xt = 69 71 { 70 72 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, ··· 6243 6241 { 6244 6242 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6245 6243 6244 + adev->powerplay.pp_funcs = &ci_dpm_funcs; 6246 6245 ci_dpm_set_irq_funcs(adev); 6247 6246 6248 6247 return 0; ··· 6763 6760 } 6764 6761 } 6765 6762 6766 - const struct amd_ip_funcs ci_dpm_ip_funcs = { 6763 + static const struct amd_ip_funcs ci_dpm_ip_funcs = { 6767 6764 .name = "ci_dpm", 6768 6765 .early_init = ci_dpm_early_init, 6769 6766 .late_init = ci_dpm_late_init, ··· 6780 6777 .set_powergating_state = ci_dpm_set_powergating_state, 6781 6778 }; 6782 6779 6783 - const struct amd_pm_funcs ci_dpm_funcs = { 6780 + const struct amdgpu_ip_block_version ci_smu_ip_block = 6781 + { 6782 + .type = AMD_IP_BLOCK_TYPE_SMC, 6783 + .major = 7, 6784 + .minor = 0, 6785 + .rev = 0, 6786 + .funcs = &ci_dpm_ip_funcs, 6787 + }; 6788 + 6789 + static const struct amd_pm_funcs ci_dpm_funcs = { 6784 6790 .pre_set_power_state = &ci_dpm_pre_set_power_state, 6785 6791 .set_power_state = &ci_dpm_set_power_state, 6786 6792 .post_set_power_state = &ci_dpm_post_set_power_state,
+10 -5
drivers/gpu/drm/amd/amdgpu/cik.c
··· 67 67 68 68 #include "amdgpu_dm.h" 69 69 #include "amdgpu_amdkfd.h" 70 - #include "amdgpu_powerplay.h" 71 70 #include "dce_virtual.h" 72 71 73 72 /* ··· 1995 1996 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 1996 1997 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 1997 1998 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 1998 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1999 + if (amdgpu_dpm == -1) 2000 + amdgpu_device_ip_block_add(adev, &ci_smu_ip_block); 2001 + else 2002 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1999 2003 if (adev->enable_virtual_display) 2000 2004 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2001 2005 #if defined(CONFIG_DRM_AMD_DC) ··· 2016 2014 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2017 2015 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2018 2016 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2019 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2017 + if (amdgpu_dpm == -1) 2018 + amdgpu_device_ip_block_add(adev, &ci_smu_ip_block); 2019 + else 2020 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 2020 2021 if (adev->enable_virtual_display) 2021 2022 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2022 2023 #if defined(CONFIG_DRM_AMD_DC) ··· 2037 2032 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2038 2033 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2039 2034 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2040 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2035 + amdgpu_device_ip_block_add(adev, &kv_smu_ip_block); 2041 2036 if (adev->enable_virtual_display) 2042 2037 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2043 2038 #if defined(CONFIG_DRM_AMD_DC) ··· 2056 2051 amdgpu_device_ip_block_add(adev, &cik_common_ip_block); 2057 2052 amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); 2058 2053 amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); 2059 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2054 + amdgpu_device_ip_block_add(adev, &kv_smu_ip_block); 2060 2055 if (adev->enable_virtual_display) 2061 2056 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2062 2057 #if defined(CONFIG_DRM_AMD_DC)
+3 -4
drivers/gpu/drm/amd/amdgpu/cik_dpm.h
··· 24 24 #ifndef __CIK_DPM_H__ 25 25 #define __CIK_DPM_H__ 26 26 27 - extern const struct amd_ip_funcs ci_dpm_ip_funcs; 28 - extern const struct amd_ip_funcs kv_dpm_ip_funcs; 29 - extern const struct amd_pm_funcs ci_dpm_funcs; 30 - extern const struct amd_pm_funcs kv_dpm_funcs; 27 + extern const struct amdgpu_ip_block_version ci_smu_ip_block; 28 + extern const struct amdgpu_ip_block_version kv_smu_ip_block; 29 + 31 30 #endif
+14 -2
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
··· 42 42 #define KV_MINIMUM_ENGINE_CLOCK 800 43 43 #define SMC_RAM_END 0x40000 44 44 45 + static const struct amd_pm_funcs kv_dpm_funcs; 46 + 45 47 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); 46 48 static int kv_enable_nb_dpm(struct amdgpu_device *adev, 47 49 bool enable); ··· 2962 2960 { 2963 2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2964 2962 2963 + adev->powerplay.pp_funcs = &kv_dpm_funcs; 2965 2964 kv_dpm_set_irq_funcs(adev); 2966 2965 2967 2966 return 0; ··· 3304 3301 } 3305 3302 } 3306 3303 3307 - const struct amd_ip_funcs kv_dpm_ip_funcs = { 3304 + static const struct amd_ip_funcs kv_dpm_ip_funcs = { 3308 3305 .name = "kv_dpm", 3309 3306 .early_init = kv_dpm_early_init, 3310 3307 .late_init = kv_dpm_late_init, ··· 3321 3318 .set_powergating_state = kv_dpm_set_powergating_state, 3322 3319 }; 3323 3320 3324 - const struct amd_pm_funcs kv_dpm_funcs = { 3321 + const struct amdgpu_ip_block_version kv_smu_ip_block = 3322 + { 3323 + .type = AMD_IP_BLOCK_TYPE_SMC, 3324 + .major = 1, 3325 + .minor = 0, 3326 + .rev = 0, 3327 + .funcs = &kv_dpm_ip_funcs, 3328 + }; 3329 + 3330 + static const struct amd_pm_funcs kv_dpm_funcs = { 3325 3331 .pre_set_power_state = &kv_dpm_pre_set_power_state, 3326 3332 .set_power_state = &kv_dpm_set_power_state, 3327 3333 .post_set_power_state = &kv_dpm_post_set_power_state,
+4 -4
drivers/gpu/drm/amd/amdgpu/si.c
··· 32 32 #include "amdgpu_vce.h" 33 33 #include "atom.h" 34 34 #include "amd_pcie.h" 35 - #include "amdgpu_powerplay.h" 35 + #include "si_dpm.h" 36 36 #include "sid.h" 37 37 #include "si_ih.h" 38 38 #include "gfx_v6_0.h" ··· 1983 1983 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1984 1984 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1985 1985 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 1986 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1986 + amdgpu_device_ip_block_add(adev, &si_smu_ip_block); 1987 1987 if (adev->enable_virtual_display) 1988 1988 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1989 1989 else ··· 1997 1997 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 1998 1998 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 1999 1999 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2000 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2000 + amdgpu_device_ip_block_add(adev, &si_smu_ip_block); 2001 2001 if (adev->enable_virtual_display) 2002 2002 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2003 2003 else ··· 2011 2011 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2012 2012 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2013 2013 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2014 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2014 + amdgpu_device_ip_block_add(adev, &si_smu_ip_block); 2015 2015 if (adev->enable_virtual_display) 2016 2016 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2017 2017 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+14 -2
drivers/gpu/drm/amd/amdgpu/si_dpm.c
··· 67 67 MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 68 68 MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); 69 69 70 + static const struct amd_pm_funcs si_dpm_funcs; 71 + 70 72 union power_info { 71 73 struct _ATOM_POWERPLAY_INFO info; 72 74 struct _ATOM_POWERPLAY_INFO_V2 info_2; ··· 7916 7914 7917 7915 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7918 7916 7917 + adev->powerplay.pp_funcs = &si_dpm_funcs; 7919 7918 si_dpm_set_irq_funcs(adev); 7920 7919 return 0; 7921 7920 } ··· 8017 8014 } 8018 8015 } 8019 8016 8020 - const struct amd_ip_funcs si_dpm_ip_funcs = { 8017 + static const struct amd_ip_funcs si_dpm_ip_funcs = { 8021 8018 .name = "si_dpm", 8022 8019 .early_init = si_dpm_early_init, 8023 8020 .late_init = si_dpm_late_init, ··· 8034 8031 .set_powergating_state = si_dpm_set_powergating_state, 8035 8032 }; 8036 8033 8037 - const struct amd_pm_funcs si_dpm_funcs = { 8034 + const struct amdgpu_ip_block_version si_smu_ip_block = 8035 + { 8036 + .type = AMD_IP_BLOCK_TYPE_SMC, 8037 + .major = 6, 8038 + .minor = 0, 8039 + .rev = 0, 8040 + .funcs = &si_dpm_ip_funcs, 8041 + }; 8042 + 8043 + static const struct amd_pm_funcs si_dpm_funcs = { 8038 8044 .pre_set_power_state = &si_dpm_pre_set_power_state, 8039 8045 .set_power_state = &si_dpm_set_power_state, 8040 8046 .post_set_power_state = &si_dpm_post_set_power_state,
+1 -2
drivers/gpu/drm/amd/amdgpu/si_dpm.h
··· 245 245 SI_PM_DISPLAY_GAP_IGNORE = 3, 246 246 }; 247 247 248 - extern const struct amd_ip_funcs si_dpm_ip_funcs; 249 - extern const struct amd_pm_funcs si_dpm_funcs; 248 + extern const struct amdgpu_ip_block_version si_smu_ip_block; 250 249 251 250 struct ni_leakage_coeffients 252 251 {
+2 -3
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 57 57 #include "uvd_v7_0.h" 58 58 #include "vce_v4_0.h" 59 59 #include "vcn_v1_0.h" 60 - #include "amdgpu_powerplay.h" 61 60 #include "dce_virtual.h" 62 61 #include "mxgpu_ai.h" 63 62 ··· 532 533 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 533 534 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 534 535 if (!amdgpu_sriov_vf(adev)) 535 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 536 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 536 537 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 537 538 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 538 539 #if defined(CONFIG_DRM_AMD_DC) ··· 551 552 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 552 553 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 553 554 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 554 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 555 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 555 556 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 556 557 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 557 558 #if defined(CONFIG_DRM_AMD_DC)
+6 -7
drivers/gpu/drm/amd/amdgpu/vi.c
··· 71 71 #include "uvd_v5_0.h" 72 72 #include "uvd_v6_0.h" 73 73 #include "vce_v3_0.h" 74 - #include "amdgpu_powerplay.h" 75 74 #if defined(CONFIG_DRM_AMD_ACP) 76 75 #include "amdgpu_acp.h" 77 76 #endif ··· 1510 1511 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1511 1512 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1512 1513 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1513 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1514 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1514 1515 if (adev->enable_virtual_display) 1515 1516 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1516 1517 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); ··· 1520 1521 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1521 1522 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1522 1523 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1523 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1524 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1524 1525 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1525 1526 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1526 1527 #if defined(CONFIG_DRM_AMD_DC) ··· 1540 1541 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1541 1542 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1542 1543 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1543 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1544 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1544 1545 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1545 1546 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1546 1547 #if defined(CONFIG_DRM_AMD_DC) ··· 1562 1563 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1563 1564 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1564 1565 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1565 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1566 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1566 1567 if (adev->enable_virtual_display) 1567 1568 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1568 1569 #if defined(CONFIG_DRM_AMD_DC) ··· 1580 1581 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1581 1582 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1582 1583 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1583 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1584 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1584 1585 if (adev->enable_virtual_display) 1585 1586 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1586 1587 #if defined(CONFIG_DRM_AMD_DC) ··· 1601 1602 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1602 1603 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1603 1604 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1604 - amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 1605 + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1605 1606 if (adev->enable_virtual_display) 1606 1607 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1607 1608 #if defined(CONFIG_DRM_AMD_DC)
+1 -2
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 24 24 #ifndef __KGD_PP_INTERFACE_H__ 25 25 #define __KGD_PP_INTERFACE_H__ 26 26 27 - extern const struct amd_ip_funcs pp_ip_funcs; 28 - extern const struct amd_pm_funcs pp_dpm_funcs; 27 + extern const struct amdgpu_ip_block_version pp_smu_ip_block; 29 28 30 29 struct amd_vce_state { 31 30 /* vce clocks */
+245 -366
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
··· 27 27 #include <linux/slab.h> 28 28 #include "amd_shared.h" 29 29 #include "amd_powerplay.h" 30 - #include "pp_instance.h" 31 30 #include "power_state.h" 32 31 #include "amdgpu.h" 33 32 #include "hwmgr.h" ··· 36 37 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 37 38 enum amd_pm_state_type *user_state); 38 39 39 - static inline int pp_check(struct pp_instance *handle) 40 + static const struct amd_pm_funcs pp_dpm_funcs; 41 + 42 + static inline int pp_check(struct pp_hwmgr *hwmgr) 40 43 { 41 - if (handle == NULL) 44 + if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL) 42 45 return -EINVAL; 43 46 44 - if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL) 45 - return -EINVAL; 46 - 47 - if (handle->pm_en == 0) 48 - return PP_DPM_DISABLED; 49 - 50 - if (handle->hwmgr->hwmgr_func == NULL) 47 + if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL) 51 48 return PP_DPM_DISABLED; 52 49 53 50 return 0; ··· 51 56 52 57 static int amd_powerplay_create(struct amdgpu_device *adev) 53 58 { 54 - struct pp_instance *instance; 59 + struct pp_hwmgr *hwmgr; 55 60 56 61 if (adev == NULL) 57 62 return -EINVAL; 58 63 59 - instance = kzalloc(sizeof(struct pp_instance), GFP_KERNEL); 60 - if (instance == NULL) 64 + hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); 65 + if (hwmgr == NULL) 61 66 return -ENOMEM; 62 67 63 - instance->parent = adev; 64 - instance->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; 65 - instance->device = adev->powerplay.cgs_device; 66 - mutex_init(&instance->pp_lock); 67 - adev->powerplay.pp_handle = instance; 68 - 68 + hwmgr->adev = adev; 69 + hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; 70 + hwmgr->device = amdgpu_cgs_create_device(adev); 71 + mutex_init(&hwmgr->smu_lock); 72 + hwmgr->chip_family = adev->family; 73 + hwmgr->chip_id = adev->asic_type; 74 + hwmgr->feature_mask = amdgpu_pp_feature_mask; 75 + adev->powerplay.pp_handle = hwmgr; 76 + adev->powerplay.pp_funcs = &pp_dpm_funcs; 69 77 return 0; 70 78 } 71 79 72 80 73 - static int amd_powerplay_destroy(void *handle) 81 + static int amd_powerplay_destroy(struct amdgpu_device *adev) 74 82 { 75 - struct pp_instance *instance = (struct pp_instance *)handle; 83 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 76 84 77 - kfree(instance->hwmgr->hardcode_pp_table); 78 - instance->hwmgr->hardcode_pp_table = NULL; 85 + kfree(hwmgr->hardcode_pp_table); 86 + hwmgr->hardcode_pp_table = NULL; 79 87 80 - kfree(instance->hwmgr); 81 - instance->hwmgr = NULL; 88 + kfree(hwmgr); 89 + hwmgr = NULL; 82 90 83 - kfree(instance); 84 - instance = NULL; 85 91 return 0; 86 92 } 87 93 88 94 static int pp_early_init(void *handle) 89 95 { 90 96 int ret; 91 - struct pp_instance *pp_handle = NULL; 92 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 97 + struct amdgpu_device *adev = handle; 93 98 94 99 ret = amd_powerplay_create(adev); 95 100 96 101 if (ret != 0) 97 102 return ret; 98 103 99 - pp_handle = adev->powerplay.pp_handle; 100 - 101 - ret = hwmgr_early_init(pp_handle); 104 + ret = hwmgr_early_init(adev->powerplay.pp_handle); 102 105 if (ret) 103 106 return -EINVAL; 104 107 ··· 105 112 106 113 static int pp_sw_init(void *handle) 107 114 { 108 - struct pp_hwmgr *hwmgr; 115 + struct amdgpu_device *adev = handle; 116 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 109 117 int ret = 0; 110 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 111 118 112 - ret = pp_check(pp_handle); 119 + ret = pp_check(hwmgr); 113 120 114 121 if (ret >= 0) { 115 - hwmgr = pp_handle->hwmgr; 116 - 117 122 if (hwmgr->smumgr_funcs->smu_init == NULL) 118 123 return -EINVAL; 119 124 ··· 119 128 120 129 pr_debug("amdgpu: powerplay sw initialized\n"); 121 130 } 131 + 122 132 return ret; 123 133 } 124 134 125 135 static int pp_sw_fini(void *handle) 126 136 { 127 - struct pp_hwmgr *hwmgr; 137 + struct amdgpu_device *adev = handle; 138 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 128 139 int ret = 0; 129 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 130 140 131 - ret = pp_check(pp_handle); 141 + ret = pp_check(hwmgr); 132 142 if (ret >= 0) { 133 - hwmgr = pp_handle->hwmgr; 134 - 135 - if (hwmgr->smumgr_funcs->smu_fini == NULL) 136 - return -EINVAL; 137 - 138 - ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 143 + if (hwmgr->smumgr_funcs->smu_fini != NULL) 144 + hwmgr->smumgr_funcs->smu_fini(hwmgr); 139 145 } 140 - return ret; 146 + return 0; 141 147 } 142 148 143 149 static int pp_hw_init(void *handle) 144 150 { 145 151 int ret = 0; 146 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 147 - struct pp_hwmgr *hwmgr; 152 + struct amdgpu_device *adev = handle; 153 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 148 154 149 - ret = pp_check(pp_handle); 155 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) 156 + amdgpu_ucode_init_bo(adev); 157 + 158 + ret = pp_check(hwmgr); 150 159 151 160 if (ret >= 0) { 152 - hwmgr = pp_handle->hwmgr; 153 - 154 161 if (hwmgr->smumgr_funcs->start_smu == NULL) 155 162 return -EINVAL; 156 163 157 - if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 164 + if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { 158 165 pr_err("smc start failed\n"); 159 - hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 166 + hwmgr->smumgr_funcs->smu_fini(hwmgr); 160 167 return -EINVAL; 161 168 } 162 169 if (ret == PP_DPM_DISABLED) 163 170 goto exit; 164 - ret = hwmgr_hw_init(pp_handle); 171 + ret = hwmgr_hw_init(hwmgr); 165 172 if (ret) 166 173 goto exit; 167 174 } 168 175 return ret; 169 176 exit: 170 - pp_handle->pm_en = 0; 177 + hwmgr->pm_en = 0; 171 178 cgs_notify_dpm_enabled(hwmgr->device, false); 172 179 return 0; 173 180 ··· 173 184 174 185 static int pp_hw_fini(void *handle) 175 186 { 176 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 187 + struct amdgpu_device *adev = handle; 188 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 177 189 int ret = 0; 178 190 179 - ret = pp_check(pp_handle); 191 + ret = pp_check(hwmgr); 180 192 if (ret == 0) 181 - hwmgr_hw_fini(pp_handle); 193 + hwmgr_hw_fini(hwmgr); 182 194 183 195 return 0; 184 196 } 185 197 186 198 static int pp_late_init(void *handle) 187 199 { 188 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 200 + struct amdgpu_device *adev = handle; 201 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 189 202 int ret = 0; 190 203 191 - ret = pp_check(pp_handle); 204 + ret = pp_check(hwmgr); 205 + 192 206 if (ret == 0) 193 - pp_dpm_dispatch_tasks(pp_handle, 207 + pp_dpm_dispatch_tasks(hwmgr, 194 208 AMD_PP_TASK_COMPLETE_INIT, NULL); 195 209 196 210 return 0; ··· 223 231 static int pp_set_powergating_state(void *handle, 224 232 enum amd_powergating_state state) 225 233 { 226 - struct pp_hwmgr *hwmgr; 227 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 234 + struct amdgpu_device *adev = handle; 235 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 228 236 int ret = 0; 229 237 230 - ret = pp_check(pp_handle); 238 + ret = pp_check(hwmgr); 231 239 232 240 if (ret) 233 241 return ret; 234 - 235 - hwmgr = pp_handle->hwmgr; 236 242 237 243 if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { 238 244 pr_info("%s was not implemented.\n", __func__); ··· 244 254 245 255 static int pp_suspend(void *handle) 246 256 { 247 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 257 + struct amdgpu_device *adev = handle; 258 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 248 259 int ret = 0; 249 260 250 - ret = pp_check(pp_handle); 261 + ret = pp_check(hwmgr); 251 262 if (ret == 0) 252 - hwmgr_hw_suspend(pp_handle); 263 + hwmgr_hw_suspend(hwmgr); 253 264 return 0; 254 265 } 255 266 256 267 static int pp_resume(void *handle) 257 268 { 258 - struct pp_hwmgr *hwmgr; 269 + struct amdgpu_device *adev = handle; 270 + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 259 271 int ret; 260 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 261 272 262 - ret = pp_check(pp_handle); 273 + ret = pp_check(hwmgr); 263 274 264 275 if (ret < 0) 265 276 return ret; 266 277 267 - hwmgr = pp_handle->hwmgr; 268 - 269 278 if (hwmgr->smumgr_funcs->start_smu == NULL) 270 279 return -EINVAL; 271 280 272 - if (hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) { 281 + if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { 273 282 pr_err("smc start failed\n"); 274 - hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr); 283 + hwmgr->smumgr_funcs->smu_fini(hwmgr); 275 284 return -EINVAL; 276 285 } 277 286 278 287 if (ret == PP_DPM_DISABLED) 279 288 return 0; 280 289 281 - return hwmgr_hw_resume(pp_handle); 290 + return hwmgr_hw_resume(hwmgr); 282 291 } 283 292 284 - const struct amd_ip_funcs pp_ip_funcs = { 293 + static const struct amd_ip_funcs pp_ip_funcs = { 285 294 .name = "powerplay", 286 295 .early_init = pp_early_init, 287 296 .late_init = pp_late_init, ··· 298 309 .set_powergating_state = pp_set_powergating_state, 299 310 }; 300 311 312 + const struct amdgpu_ip_block_version pp_smu_ip_block = 313 + { 314 + .type = AMD_IP_BLOCK_TYPE_SMC, 315 + .major = 1, 316 + .minor = 0, 317 + .rev = 0, 318 + .funcs = &pp_ip_funcs, 319 + }; 320 + 301 321 static int pp_dpm_load_fw(void *handle) 302 322 { 303 323 return 0; ··· 319 321 320 322 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) 321 323 { 322 - struct pp_hwmgr *hwmgr; 323 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 324 + struct pp_hwmgr *hwmgr = handle; 324 325 int ret = 0; 325 326 326 - ret = pp_check(pp_handle); 327 + ret = pp_check(hwmgr); 327 328 328 329 if (ret) 329 330 return ret; 330 - 331 - hwmgr = pp_handle->hwmgr; 332 331 333 332 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { 334 333 pr_info("%s was not implemented.\n", __func__); ··· 374 379 static int pp_dpm_force_performance_level(void *handle, 375 380 enum amd_dpm_forced_level level) 376 381 { 377 - struct pp_hwmgr *hwmgr; 378 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 382 + struct pp_hwmgr *hwmgr = handle; 379 383 int ret = 0; 380 384 381 - ret = pp_check(pp_handle); 385 + ret = pp_check(hwmgr); 382 386 383 387 if (ret) 384 388 return ret; 385 389 386 - hwmgr = pp_handle->hwmgr; 387 - 388 390 if (level == hwmgr->dpm_level) 389 391 return 0; 390 392 391 - mutex_lock(&pp_handle->pp_lock); 393 + mutex_lock(&hwmgr->smu_lock); 392 394 pp_dpm_en_umd_pstate(hwmgr, &level); 393 395 hwmgr->request_dpm_level = level; 394 - hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 395 - mutex_unlock(&pp_handle->pp_lock); 396 + hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 397 + mutex_unlock(&hwmgr->smu_lock); 396 398 397 399 return 0; 398 400 } ··· 397 405 static enum amd_dpm_forced_level pp_dpm_get_performance_level( 398 406 void *handle) 399 407 { 400 - struct pp_hwmgr *hwmgr; 401 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 408 + struct pp_hwmgr *hwmgr = handle; 402 409 int ret = 0; 403 410 enum amd_dpm_forced_level level; 404 411 405 - ret = pp_check(pp_handle); 412 + ret = pp_check(hwmgr); 406 413 407 414 if (ret) 408 415 return ret; 409 416 410 - hwmgr = pp_handle->hwmgr; 411 - mutex_lock(&pp_handle->pp_lock); 417 + mutex_lock(&hwmgr->smu_lock); 412 418 level = hwmgr->dpm_level; 413 - mutex_unlock(&pp_handle->pp_lock); 419 + mutex_unlock(&hwmgr->smu_lock); 414 420 return level; 415 421 } 416 422 417 423 static uint32_t pp_dpm_get_sclk(void *handle, bool low) 418 424 { 419 - struct pp_hwmgr *hwmgr; 420 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 425 + struct pp_hwmgr *hwmgr = handle; 421 426 int ret = 0; 422 427 uint32_t clk = 0; 423 428 424 - ret = pp_check(pp_handle); 429 + ret = pp_check(hwmgr); 425 430 426 431 if (ret) 427 432 return ret; 428 - 429 - hwmgr = pp_handle->hwmgr; 430 433 431 434 if (hwmgr->hwmgr_func->get_sclk == NULL) { 432 435 pr_info("%s was not implemented.\n", __func__); 433 436 return 0; 434 437 } 435 - mutex_lock(&pp_handle->pp_lock); 438 + mutex_lock(&hwmgr->smu_lock); 436 439 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); 437 - mutex_unlock(&pp_handle->pp_lock); 440 + mutex_unlock(&hwmgr->smu_lock); 438 441 return clk; 439 442 } 440 443 441 444 static uint32_t pp_dpm_get_mclk(void *handle, bool low) 442 445 { 443 - struct pp_hwmgr *hwmgr; 444 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 446 + struct pp_hwmgr *hwmgr = handle; 445 447 int ret = 0; 446 448 uint32_t clk = 0; 447 449 448 - ret = pp_check(pp_handle); 450 + ret = pp_check(hwmgr); 449 451 450 452 if (ret) 451 453 return ret; 452 - 453 - hwmgr = pp_handle->hwmgr; 454 454 455 455 if (hwmgr->hwmgr_func->get_mclk == NULL) { 456 456 pr_info("%s was not implemented.\n", __func__); 457 457 return 0; 458 458 } 459 - mutex_lock(&pp_handle->pp_lock); 459 + mutex_lock(&hwmgr->smu_lock); 460 460 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); 461 - mutex_unlock(&pp_handle->pp_lock); 461 + mutex_unlock(&hwmgr->smu_lock); 462 462 return clk; 463 463 } 464 464 465 465 static void pp_dpm_powergate_vce(void *handle, bool gate) 466 466 { 467 - struct pp_hwmgr *hwmgr; 468 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 467 + struct pp_hwmgr *hwmgr = handle; 469 468 int ret = 0; 470 469 471 - ret = pp_check(pp_handle); 470 + ret = pp_check(hwmgr); 472 471 473 472 if (ret) 474 473 return; 475 - 476 - hwmgr = pp_handle->hwmgr; 477 474 478 475 if (hwmgr->hwmgr_func->powergate_vce == NULL) { 479 476 pr_info("%s was not implemented.\n", __func__); 480 477 return; 481 478 } 482 - mutex_lock(&pp_handle->pp_lock); 479 + mutex_lock(&hwmgr->smu_lock); 483 480 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); 484 - mutex_unlock(&pp_handle->pp_lock); 481 + mutex_unlock(&hwmgr->smu_lock); 485 482 } 486 483 487 484 static void pp_dpm_powergate_uvd(void *handle, bool gate) 488 485 { 489 - struct pp_hwmgr *hwmgr; 490 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 486 + struct pp_hwmgr *hwmgr = handle; 491 487 int ret = 0; 492 488 493 - ret = pp_check(pp_handle); 489 + ret = pp_check(hwmgr); 494 490 495 491 if (ret) 496 492 return; 497 - 498 - hwmgr = pp_handle->hwmgr; 499 493 500 494 if (hwmgr->hwmgr_func->powergate_uvd == NULL) { 501 495 pr_info("%s was not implemented.\n", __func__); 502 496 return; 503 497 } 504 - mutex_lock(&pp_handle->pp_lock); 498 + mutex_lock(&hwmgr->smu_lock); 505 499 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); 506 - mutex_unlock(&pp_handle->pp_lock); 500 + mutex_unlock(&hwmgr->smu_lock); 507 501 } 508 502 509 503 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, 510 504 enum amd_pm_state_type *user_state) 511 505 { 512 506 int ret = 0; 513 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 507 + struct pp_hwmgr *hwmgr = handle; 514 508 515 - ret = pp_check(pp_handle); 509 + ret = pp_check(hwmgr); 516 510 517 511 if (ret) 518 512 return ret; 519 513 520 - mutex_lock(&pp_handle->pp_lock); 521 - ret = hwmgr_handle_task(pp_handle, task_id, user_state); 522 - mutex_unlock(&pp_handle->pp_lock); 514 + mutex_lock(&hwmgr->smu_lock); 515 + ret = hwmgr_handle_task(hwmgr, task_id, user_state); 516 + mutex_unlock(&hwmgr->smu_lock); 523 517 524 518 return ret; 525 519 } 526 520 527 521 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) 528 522 { 529 - struct pp_hwmgr *hwmgr; 523 + struct pp_hwmgr *hwmgr = handle; 530 524 struct pp_power_state *state; 531 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 532 525 int ret = 0; 533 526 enum amd_pm_state_type pm_type; 534 527 535 - ret = pp_check(pp_handle); 528 + ret = pp_check(hwmgr); 536 529 537 530 if (ret) 538 531 return ret; 539 532 540 - hwmgr = pp_handle->hwmgr; 541 - 542 533 if (hwmgr->current_ps == NULL) 543 534 return -EINVAL; 544 535 545 - mutex_lock(&pp_handle->pp_lock); 536 + mutex_lock(&hwmgr->smu_lock); 546 537 547 538 state = hwmgr->current_ps; 548 539 ··· 546 571 pm_type = POWER_STATE_TYPE_DEFAULT; 547 572 break; 548 573 } 549 - mutex_unlock(&pp_handle->pp_lock); 574 + mutex_unlock(&hwmgr->smu_lock); 550 575 551 576 return pm_type; 552 577 } 553 578 554 579 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) 555 580 { 556 - struct pp_hwmgr *hwmgr; 557 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 581 + struct pp_hwmgr *hwmgr = handle; 558 582 int ret = 0; 559 583 560 - ret = pp_check(pp_handle); 584 + ret = pp_check(hwmgr); 561 585 562 586 if (ret) 563 587 return; 564 - 565 - hwmgr = pp_handle->hwmgr; 566 588 567 589 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { 568 590 pr_info("%s was not implemented.\n", __func__); 569 591 return; 570 592 } 571 - mutex_lock(&pp_handle->pp_lock); 593 + mutex_lock(&hwmgr->smu_lock); 572 594 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); 573 - mutex_unlock(&pp_handle->pp_lock); 595 + mutex_unlock(&hwmgr->smu_lock); 574 596 } 575 597 576 598 static uint32_t pp_dpm_get_fan_control_mode(void *handle) 577 599 { 578 - struct pp_hwmgr *hwmgr; 579 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 600 + struct pp_hwmgr *hwmgr = handle; 580 601 int ret = 0; 581 602 uint32_t mode = 0; 582 603 583 - ret = pp_check(pp_handle); 604 + ret = pp_check(hwmgr); 584 605 585 606 if (ret) 586 607 return ret; 587 - 588 - hwmgr = pp_handle->hwmgr; 589 608 590 609 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { 591 610 pr_info("%s was not implemented.\n", __func__); 592 611 return 0; 593 612 } 594 - mutex_lock(&pp_handle->pp_lock); 613 + mutex_lock(&hwmgr->smu_lock); 595 614 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); 596 - mutex_unlock(&pp_handle->pp_lock); 615 + mutex_unlock(&hwmgr->smu_lock); 597 616 return mode; 598 617 } 599 618 600 619 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) 601 620 { 602 - struct pp_hwmgr *hwmgr; 603 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 621 + struct pp_hwmgr *hwmgr = handle; 604 622 int ret = 0; 605 623 606 - ret = pp_check(pp_handle); 624 + ret = pp_check(hwmgr); 607 625 608 626 if (ret) 609 627 return ret; 610 - 611 - hwmgr = pp_handle->hwmgr; 612 628 613 629 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { 614 630 pr_info("%s was not implemented.\n", __func__); 615 631 return 0; 616 632 } 617 - mutex_lock(&pp_handle->pp_lock); 633 + mutex_lock(&hwmgr->smu_lock); 618 634 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent); 619 - mutex_unlock(&pp_handle->pp_lock); 635 + mutex_unlock(&hwmgr->smu_lock); 620 636 return ret; 621 637 } 622 638 623 639 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) 624 640 { 625 - struct pp_hwmgr *hwmgr; 626 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 641 + struct pp_hwmgr *hwmgr = handle; 627 642 int ret = 0; 628 643 629 - ret = pp_check(pp_handle); 644 + ret = pp_check(hwmgr); 630 645 631 646 if (ret) 632 647 return ret; 633 - 634 - hwmgr = pp_handle->hwmgr; 635 648 636 649 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { 637 650 pr_info("%s was not implemented.\n", __func__); 638 651 return 0; 639 652 } 640 653 641 - mutex_lock(&pp_handle->pp_lock); 654 + mutex_lock(&hwmgr->smu_lock); 642 655 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed); 643 - mutex_unlock(&pp_handle->pp_lock); 656 + mutex_unlock(&hwmgr->smu_lock); 644 657 return ret; 645 658 } 646 659 647 660 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) 648 661 { 649 - struct pp_hwmgr *hwmgr; 650 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 662 + struct pp_hwmgr *hwmgr = handle; 651 663 int ret = 0; 652 664 653 - ret = pp_check(pp_handle); 665 + ret = pp_check(hwmgr); 654 666 655 667 if (ret) 656 668 return ret; 657 669 658 - hwmgr = pp_handle->hwmgr; 659 - 660 670 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) 661 671 return -EINVAL; 662 672 663 - mutex_lock(&pp_handle->pp_lock); 673 + mutex_lock(&hwmgr->smu_lock); 664 674 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); 665 - mutex_unlock(&pp_handle->pp_lock); 675 + mutex_unlock(&hwmgr->smu_lock); 666 676 return ret; 667 677 } 668 678 669 679 static int pp_dpm_get_pp_num_states(void *handle, 670 680 struct pp_states_info *data) 671 681 { 672 - struct pp_hwmgr *hwmgr; 682 + struct pp_hwmgr *hwmgr = handle; 673 683 int i; 674 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 675 684 int ret = 0; 676 685 677 686 memset(data, 0, sizeof(*data)); 678 687 679 - ret = pp_check(pp_handle); 688 + ret = pp_check(hwmgr); 680 689 681 690 if (ret) 682 691 return ret; 683 692 684 - hwmgr = pp_handle->hwmgr; 685 - 686 693 if (hwmgr->ps == NULL) 687 694 return -EINVAL; 688 695 689 - mutex_lock(&pp_handle->pp_lock); 696 + mutex_lock(&hwmgr->smu_lock); 690 697 691 698 data->nums = hwmgr->num_ps; 692 699 ··· 692 735 data->states[i] = POWER_STATE_TYPE_DEFAULT; 693 736 } 694 737 } 695 - mutex_unlock(&pp_handle->pp_lock); 738 + mutex_unlock(&hwmgr->smu_lock); 696 739 return 0; 697 740 } 698 741 699 742 static int pp_dpm_get_pp_table(void *handle, char **table) 700 743 { 701 - struct pp_hwmgr *hwmgr; 702 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 744 + struct pp_hwmgr *hwmgr = handle; 703 745 int ret = 0; 704 746 int size = 0; 705 747 706 - ret = pp_check(pp_handle); 748 + ret = pp_check(hwmgr); 707 749 708 750 if (ret) 709 751 return ret; 710 752 711 - hwmgr = pp_handle->hwmgr; 712 - 713 753 if (!hwmgr->soft_pp_table) 714 754 return -EINVAL; 715 755 716 - mutex_lock(&pp_handle->pp_lock); 756 + mutex_lock(&hwmgr->smu_lock); 717 757 *table = (char *)hwmgr->soft_pp_table; 718 758 size = hwmgr->soft_pp_table_size; 719 - mutex_unlock(&pp_handle->pp_lock); 759 + mutex_unlock(&hwmgr->smu_lock); 720 760 return size; 721 761 } 722 762 723 763 static int amd_powerplay_reset(void *handle) 724 764 { 725 - struct pp_instance *instance = (struct pp_instance *)handle; 765 + struct pp_hwmgr *hwmgr = handle; 726 766 int ret; 727 767 728 - ret = pp_check(instance); 768 + ret = pp_check(hwmgr); 729 769 if (ret) 730 770 return ret; 731 771 732 - ret = pp_hw_fini(instance); 772 + ret = pp_hw_fini(hwmgr); 733 773 if (ret) 734 774 return ret; 735 775 736 - ret = hwmgr_hw_init(instance); 776 + ret = hwmgr_hw_init(hwmgr); 737 777 if (ret) 738 778 return ret; 739 779 740 - return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL); 780 + return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); 741 781 } 742 782 743 783 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 744 784 { 745 - struct pp_hwmgr *hwmgr; 746 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 785 + struct pp_hwmgr *hwmgr = handle; 747 786 int ret = 0; 748 787 749 - ret = pp_check(pp_handle); 788 + ret = pp_check(hwmgr); 750 789 751 790 if (ret) 752 791 return ret; 753 792 754 - hwmgr = pp_handle->hwmgr; 755 - mutex_lock(&pp_handle->pp_lock); 793 + mutex_lock(&hwmgr->smu_lock); 756 794 if (!hwmgr->hardcode_pp_table) { 757 795 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, 758 796 hwmgr->soft_pp_table_size, 759 797 GFP_KERNEL); 760 798 if (!hwmgr->hardcode_pp_table) { 761 - mutex_unlock(&pp_handle->pp_lock); 799 + mutex_unlock(&hwmgr->smu_lock); 762 800 return -ENOMEM; 763 801 } 764 802 } ··· 761 809 memcpy(hwmgr->hardcode_pp_table, buf, size); 762 810 763 811 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; 764 - mutex_unlock(&pp_handle->pp_lock); 812 + mutex_unlock(&hwmgr->smu_lock); 765 813 766 814 ret = amd_powerplay_reset(handle); 767 815 if (ret) ··· 779 827 static int pp_dpm_force_clock_level(void *handle, 780 828 enum pp_clock_type type, uint32_t mask) 781 829 { 782 - struct pp_hwmgr *hwmgr; 783 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 830 + struct pp_hwmgr *hwmgr = handle; 784 831 int ret = 0; 785 832 786 - ret = pp_check(pp_handle); 833 + ret = pp_check(hwmgr); 787 834 788 835 if (ret) 789 836 return ret; 790 - 791 - hwmgr = pp_handle->hwmgr; 792 837 793 838 if (hwmgr->hwmgr_func->force_clock_level == NULL) { 794 839 pr_info("%s was not implemented.\n", __func__); 795 840 return 0; 796 841 } 797 - mutex_lock(&pp_handle->pp_lock); 842 + mutex_lock(&hwmgr->smu_lock); 798 843 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 799 844 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); 800 845 else 801 846 ret = -EINVAL; 802 - mutex_unlock(&pp_handle->pp_lock); 847 + mutex_unlock(&hwmgr->smu_lock); 803 848 return ret; 804 849 } 805 850 806 851 static int pp_dpm_print_clock_levels(void *handle, 807 852 enum pp_clock_type type, char *buf) 808 853 { 809 - struct pp_hwmgr *hwmgr; 810 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 854 + struct pp_hwmgr *hwmgr = handle; 811 855 int ret = 0; 812 856 813 - ret = pp_check(pp_handle); 857 + ret = pp_check(hwmgr); 814 858 815 859 if (ret) 816 860 return ret; 817 - 818 - hwmgr = pp_handle->hwmgr; 819 861 820 862 if (hwmgr->hwmgr_func->print_clock_levels == NULL) { 821 863 pr_info("%s was not implemented.\n", __func__); 822 864 return 0; 823 865 } 824 - mutex_lock(&pp_handle->pp_lock); 866 + mutex_lock(&hwmgr->smu_lock); 825 867 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 826 - mutex_unlock(&pp_handle->pp_lock); 868 + mutex_unlock(&hwmgr->smu_lock); 827 869 return ret; 828 870 } 829 871 830 872 static int pp_dpm_get_sclk_od(void *handle) 831 873 { 832 - struct pp_hwmgr *hwmgr; 833 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 874 + struct pp_hwmgr *hwmgr = handle; 834 875 int ret = 0; 835 876 836 - ret = pp_check(pp_handle); 877 + ret = pp_check(hwmgr); 837 878 838 879 if (ret) 839 880 return ret; 840 - 841 - hwmgr = pp_handle->hwmgr; 842 881 843 882 if (hwmgr->hwmgr_func->get_sclk_od == NULL) { 844 883 pr_info("%s was not implemented.\n", __func__); 845 884 return 0; 846 885 } 847 - mutex_lock(&pp_handle->pp_lock); 886 + mutex_lock(&hwmgr->smu_lock); 848 887 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr); 849 - mutex_unlock(&pp_handle->pp_lock); 888 + mutex_unlock(&hwmgr->smu_lock); 850 889 return ret; 851 890 } 852 891 853 892 static int pp_dpm_set_sclk_od(void *handle, uint32_t value) 854 893 { 855 - struct pp_hwmgr *hwmgr; 856 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 894 + struct pp_hwmgr *hwmgr = handle; 857 895 int ret = 0; 858 896 859 - ret = pp_check(pp_handle); 897 + ret = pp_check(hwmgr); 860 898 861 899 if (ret) 862 900 return ret; 863 - 864 - hwmgr = pp_handle->hwmgr; 865 901 866 902 if (hwmgr->hwmgr_func->set_sclk_od == NULL) { 867 903 pr_info("%s was not implemented.\n", __func__); 868 904 return 0; 869 905 } 870 906 871 - mutex_lock(&pp_handle->pp_lock); 907 + mutex_lock(&hwmgr->smu_lock); 872 908 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); 873 - mutex_unlock(&pp_handle->pp_lock); 909 + mutex_unlock(&hwmgr->smu_lock); 874 910 return ret; 875 911 } 876 912 877 913 static int pp_dpm_get_mclk_od(void *handle) 878 914 { 879 - struct pp_hwmgr *hwmgr; 880 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 915 + struct pp_hwmgr *hwmgr = handle; 881 916 int ret = 0; 882 917 883 - ret = pp_check(pp_handle); 918 + ret = pp_check(hwmgr); 884 919 885 920 if (ret) 886 921 return ret; 887 - 888 - hwmgr = pp_handle->hwmgr; 889 922 890 923 if (hwmgr->hwmgr_func->get_mclk_od == NULL) { 891 924 pr_info("%s was not implemented.\n", __func__); 892 925 return 0; 893 926 } 894 - mutex_lock(&pp_handle->pp_lock); 927 + mutex_lock(&hwmgr->smu_lock); 895 928 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr); 896 - mutex_unlock(&pp_handle->pp_lock); 929 + mutex_unlock(&hwmgr->smu_lock); 897 930 return ret; 898 931 } 899 932 900 933 static int pp_dpm_set_mclk_od(void *handle, uint32_t value) 901 934 { 902 - struct pp_hwmgr *hwmgr; 903 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 935 + struct pp_hwmgr *hwmgr = handle; 904 936 int ret = 0; 905 937 906 - ret = pp_check(pp_handle); 938 + ret = pp_check(hwmgr); 907 939 908 940 if (ret) 909 941 return ret; 910 - 911 - hwmgr = pp_handle->hwmgr; 912 942 913 943 if (hwmgr->hwmgr_func->set_mclk_od == NULL) { 914 944 pr_info("%s was not implemented.\n", __func__); 915 945 return 0; 916 946 } 917 - mutex_lock(&pp_handle->pp_lock); 947 + mutex_lock(&hwmgr->smu_lock); 918 948 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); 919 - mutex_unlock(&pp_handle->pp_lock); 949 + mutex_unlock(&hwmgr->smu_lock); 920 950 return ret; 921 951 } 922 952 923 953 static int pp_dpm_read_sensor(void *handle, int idx, 924 954 void *value, int *size) 925 955 { 926 - struct pp_hwmgr *hwmgr; 927 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 956 + struct pp_hwmgr *hwmgr = handle; 928 957 int ret = 0; 929 958 930 - ret = pp_check(pp_handle); 959 + ret = pp_check(hwmgr); 931 960 if (ret) 932 961 return ret; 933 962 934 963 if (value == NULL) 935 964 return -EINVAL; 936 - 937 - hwmgr = pp_handle->hwmgr; 938 965 939 966 switch (idx) { 940 967 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: ··· 923 992 *((uint32_t *)value) = hwmgr->pstate_mclk; 924 993 return 0; 925 994 default: 926 - mutex_lock(&pp_handle->pp_lock); 995 + mutex_lock(&hwmgr->smu_lock); 927 996 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); 928 - mutex_unlock(&pp_handle->pp_lock); 997 + mutex_unlock(&hwmgr->smu_lock); 929 998 return ret; 930 999 } 931 1000 } ··· 933 1002 static struct amd_vce_state* 934 1003 pp_dpm_get_vce_clock_state(void *handle, unsigned idx) 935 1004 { 936 - struct pp_hwmgr *hwmgr; 937 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1005 + struct pp_hwmgr *hwmgr = handle; 938 1006 int ret = 0; 939 1007 940 - ret = pp_check(pp_handle); 1008 + ret = pp_check(hwmgr); 941 1009 942 1010 if (ret) 943 1011 return NULL; 944 - 945 - hwmgr = pp_handle->hwmgr; 946 1012 947 1013 if (hwmgr && idx < hwmgr->num_vce_state_tables) 948 1014 return &hwmgr->vce_states[idx]; ··· 948 1020 949 1021 static int pp_get_power_profile_mode(void *handle, char *buf) 950 1022 { 951 - struct pp_hwmgr *hwmgr; 952 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1023 + struct pp_hwmgr *hwmgr = handle; 953 1024 954 - if (!buf || pp_check(pp_handle)) 1025 + if (!buf || pp_check(hwmgr)) 955 1026 return -EINVAL; 956 - 957 - hwmgr = pp_handle->hwmgr; 958 1027 959 1028 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { 960 1029 pr_info("%s was not implemented.\n", __func__); ··· 963 1038 964 1039 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) 965 1040 { 966 - struct pp_hwmgr *hwmgr; 967 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1041 + struct pp_hwmgr *hwmgr = handle; 968 1042 int ret = -EINVAL; 969 1043 970 - if (pp_check(pp_handle)) 1044 + if (pp_check(hwmgr)) 971 1045 return -EINVAL; 972 - 973 - hwmgr = pp_handle->hwmgr; 974 1046 975 1047 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 976 1048 pr_info("%s was not implemented.\n", __func__); 977 1049 return -EINVAL; 978 1050 } 979 - mutex_lock(&pp_handle->pp_lock); 1051 + mutex_lock(&hwmgr->smu_lock); 980 1052 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) 981 1053 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); 982 - mutex_unlock(&pp_handle->pp_lock); 1054 + mutex_unlock(&hwmgr->smu_lock); 983 1055 return ret; 984 1056 } 985 1057 986 1058 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size) 987 1059 { 988 - struct pp_hwmgr *hwmgr; 989 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1060 + struct pp_hwmgr *hwmgr = handle; 990 1061 991 - if (pp_check(pp_handle)) 1062 + if (pp_check(hwmgr)) 992 1063 return -EINVAL; 993 - 994 - hwmgr = pp_handle->hwmgr; 995 1064 996 1065 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { 997 1066 pr_info("%s was not implemented.\n", __func__); ··· 998 1079 static int pp_dpm_switch_power_profile(void *handle, 999 1080 enum PP_SMC_POWER_PROFILE type, bool en) 1000 1081 { 1001 - struct pp_hwmgr *hwmgr; 1002 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1082 + struct pp_hwmgr *hwmgr = handle; 1003 1083 long workload; 1004 1084 uint32_t index; 1005 1085 1006 - if (pp_check(pp_handle)) 1086 + if (pp_check(hwmgr)) 1007 1087 return -EINVAL; 1008 - 1009 - hwmgr = pp_handle->hwmgr; 1010 1088 1011 1089 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { 1012 1090 pr_info("%s was not implemented.\n", __func__); ··· 1013 1097 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1014 1098 return -EINVAL; 1015 1099 1016 - mutex_lock(&pp_handle->pp_lock); 1100 + mutex_lock(&hwmgr->smu_lock); 1017 1101 1018 1102 if (!en) { 1019 1103 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); ··· 1029 1113 1030 1114 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1031 1115 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); 1032 - mutex_unlock(&pp_handle->pp_lock); 1116 + mutex_unlock(&hwmgr->smu_lock); 1033 1117 1034 1118 return 0; 1035 1119 } ··· 1041 1125 uint32_t mc_addr_hi, 1042 1126 uint32_t size) 1043 1127 { 1044 - struct pp_hwmgr *hwmgr; 1045 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1128 + struct pp_hwmgr *hwmgr = handle; 1046 1129 int ret = 0; 1047 1130 1048 - ret = pp_check(pp_handle); 1131 + ret = pp_check(hwmgr); 1049 1132 1050 1133 if (ret) 1051 1134 return ret; 1052 - 1053 - hwmgr = pp_handle->hwmgr; 1054 1135 1055 1136 if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { 1056 1137 pr_info("%s was not implemented.\n", __func__); 1057 1138 return -EINVAL; 1058 1139 } 1059 1140 1060 - mutex_lock(&pp_handle->pp_lock); 1141 + mutex_lock(&hwmgr->smu_lock); 1061 1142 1062 1143 ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low, 1063 1144 virtual_addr_hi, mc_addr_low, mc_addr_hi, 1064 1145 size); 1065 1146 1066 - mutex_unlock(&pp_handle->pp_lock); 1147 + mutex_unlock(&hwmgr->smu_lock); 1067 1148 1068 1149 return ret; 1069 1150 } 1070 1151 1071 1152 static int pp_set_power_limit(void *handle, uint32_t limit) 1072 1153 { 1073 - struct pp_hwmgr *hwmgr; 1074 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1154 + struct pp_hwmgr *hwmgr = handle; 1075 1155 int ret = 0; 1076 1156 1077 - ret = pp_check(pp_handle); 1157 + ret = pp_check(hwmgr); 1078 1158 1079 1159 if (ret) 1080 1160 return ret; 1081 - 1082 - hwmgr = pp_handle->hwmgr; 1083 1161 1084 1162 if (hwmgr->hwmgr_func->set_power_limit == NULL) { 1085 1163 pr_info("%s was not implemented.\n", __func__); ··· 1086 1176 if (limit > hwmgr->default_power_limit) 1087 1177 return -EINVAL; 1088 1178 1089 - mutex_lock(&pp_handle->pp_lock); 1179 + mutex_lock(&hwmgr->smu_lock); 1090 1180 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); 1091 1181 hwmgr->power_limit = limit; 1092 - mutex_unlock(&pp_handle->pp_lock); 1182 + mutex_unlock(&hwmgr->smu_lock); 1093 1183 return ret; 1094 1184 } 1095 1185 1096 1186 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) 1097 1187 { 1098 - struct pp_hwmgr *hwmgr; 1099 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1188 + struct pp_hwmgr *hwmgr = handle; 1100 1189 int ret = 0; 1101 1190 1102 - ret = pp_check(pp_handle); 1191 + ret = pp_check(hwmgr); 1103 1192 1104 1193 if (ret) 1105 1194 return ret; ··· 1106 1197 if (limit == NULL) 1107 1198 return -EINVAL; 1108 1199 1109 - hwmgr = pp_handle->hwmgr; 1110 - 1111 - mutex_lock(&pp_handle->pp_lock); 1200 + mutex_lock(&hwmgr->smu_lock); 1112 1201 1113 1202 if (default_limit) 1114 1203 *limit = hwmgr->default_power_limit; 1115 1204 else 1116 1205 *limit = hwmgr->power_limit; 1117 1206 1118 - mutex_unlock(&pp_handle->pp_lock); 1207 + mutex_unlock(&hwmgr->smu_lock); 1119 1208 1120 1209 return ret; 1121 1210 } ··· 1121 1214 static int pp_display_configuration_change(void *handle, 1122 1215 const struct amd_pp_display_configuration *display_config) 1123 1216 { 1124 - struct pp_hwmgr *hwmgr; 1125 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1217 + struct pp_hwmgr *hwmgr = handle; 1126 1218 int ret = 0; 1127 1219 1128 - ret = pp_check(pp_handle); 1220 + ret = pp_check(hwmgr); 1129 1221 1130 1222 if (ret) 1131 1223 return ret; 1132 1224 1133 - hwmgr = pp_handle->hwmgr; 1134 - mutex_lock(&pp_handle->pp_lock); 1225 + mutex_lock(&hwmgr->smu_lock); 1135 1226 phm_store_dal_configuration_data(hwmgr, display_config); 1136 - mutex_unlock(&pp_handle->pp_lock); 1227 + mutex_unlock(&hwmgr->smu_lock); 1137 1228 return 0; 1138 1229 } 1139 1230 1140 1231 static int pp_get_display_power_level(void *handle, 1141 1232 struct amd_pp_simple_clock_info *output) 1142 1233 { 1143 - struct pp_hwmgr *hwmgr; 1144 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1234 + struct pp_hwmgr *hwmgr = handle; 1145 1235 int ret = 0; 1146 1236 1147 - ret = pp_check(pp_handle); 1237 + ret = pp_check(hwmgr); 1148 1238 1149 1239 if (ret) 1150 1240 return ret; 1151 1241 1152 - hwmgr = pp_handle->hwmgr; 1153 - 1154 1242 if (output == NULL) 1155 1243 return -EINVAL; 1156 1244 1157 - mutex_lock(&pp_handle->pp_lock); 1245 + mutex_lock(&hwmgr->smu_lock); 1158 1246 ret = phm_get_dal_power_level(hwmgr, output); 1159 - mutex_unlock(&pp_handle->pp_lock); 1247 + mutex_unlock(&hwmgr->smu_lock); 1160 1248 return ret; 1161 1249 } 1162 1250 ··· 1160 1258 { 1161 1259 struct amd_pp_simple_clock_info simple_clocks; 1162 1260 struct pp_clock_info hw_clocks; 1163 - struct pp_hwmgr *hwmgr; 1164 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1261 + struct pp_hwmgr *hwmgr = handle; 1165 1262 int ret = 0; 1166 1263 1167 - ret = pp_check(pp_handle); 1264 + ret = pp_check(hwmgr); 1168 1265 1169 1266 if (ret) 1170 1267 return ret; 1171 1268 1172 - hwmgr = pp_handle->hwmgr; 1173 - 1174 - mutex_lock(&pp_handle->pp_lock); 1269 + mutex_lock(&hwmgr->smu_lock); 1175 1270 1176 1271 phm_get_dal_power_level(hwmgr, &simple_clocks); 1177 1272 ··· 1182 1283 1183 1284 if (ret) { 1184 1285 pr_info("Error in phm_get_clock_info \n"); 1185 - mutex_unlock(&pp_handle->pp_lock); 1286 + mutex_unlock(&hwmgr->smu_lock); 1186 1287 return -EINVAL; 1187 1288 } 1188 1289 ··· 1202 1303 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1203 1304 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1204 1305 } 1205 - mutex_unlock(&pp_handle->pp_lock); 1306 + mutex_unlock(&hwmgr->smu_lock); 1206 1307 return 0; 1207 1308 } 1208 1309 1209 1310 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) 1210 1311 { 1211 - struct pp_hwmgr *hwmgr; 1212 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1312 + struct pp_hwmgr *hwmgr = handle; 1213 1313 int ret = 0; 1214 1314 1215 - ret = pp_check(pp_handle); 1315 + ret = pp_check(hwmgr); 1216 1316 1217 1317 if (ret) 1218 1318 return ret; 1219 1319 1220 - hwmgr = pp_handle->hwmgr; 1221 - 1222 1320 if (clocks == NULL) 1223 1321 return -EINVAL; 1224 1322 1225 - mutex_lock(&pp_handle->pp_lock); 1323 + mutex_lock(&hwmgr->smu_lock); 1226 1324 ret = phm_get_clock_by_type(hwmgr, type, clocks); 1227 - mutex_unlock(&pp_handle->pp_lock); 1325 + mutex_unlock(&hwmgr->smu_lock); 1228 1326 return ret; 1229 1327 } 1230 1328 ··· 1229 1333 enum amd_pp_clock_type type, 1230 1334 struct pp_clock_levels_with_latency *clocks) 1231 1335 { 1232 - struct pp_hwmgr *hwmgr; 1233 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1336 + struct pp_hwmgr *hwmgr = handle; 1234 1337 int ret = 0; 1235 1338 1236 - ret = pp_check(pp_handle); 1339 + ret = pp_check(hwmgr); 1237 1340 if (ret) 1238 1341 return ret; 1239 1342 1240 1343 if (!clocks) 1241 1344 return -EINVAL; 1242 1345 1243 - mutex_lock(&pp_handle->pp_lock); 1244 - hwmgr = ((struct pp_instance *)handle)->hwmgr; 1346 + mutex_lock(&hwmgr->smu_lock); 1245 1347 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks); 1246 - mutex_unlock(&pp_handle->pp_lock); 1348 + mutex_unlock(&hwmgr->smu_lock); 1247 1349 return ret; 1248 1350 } 1249 1351 ··· 1249 1355 enum amd_pp_clock_type type, 1250 1356 struct pp_clock_levels_with_voltage *clocks) 1251 1357 { 1252 - struct pp_hwmgr *hwmgr; 1253 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1358 + struct pp_hwmgr *hwmgr = handle; 1254 1359 int ret = 0; 1255 1360 1256 - ret = pp_check(pp_handle); 1361 + ret = pp_check(hwmgr); 1257 1362 if (ret) 1258 1363 return ret; 1259 1364 1260 1365 if (!clocks) 1261 1366 return -EINVAL; 1262 1367 1263 - hwmgr = ((struct pp_instance *)handle)->hwmgr; 1264 - 1265 - mutex_lock(&pp_handle->pp_lock); 1368 + mutex_lock(&hwmgr->smu_lock); 1266 1369 1267 1370 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); 1268 1371 1269 - mutex_unlock(&pp_handle->pp_lock); 1372 + mutex_unlock(&hwmgr->smu_lock); 1270 1373 return ret; 1271 1374 } 1272 1375 1273 1376 static int pp_set_watermarks_for_clocks_ranges(void *handle, 1274 1377 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) 1275 1378 { 1276 - struct pp_hwmgr *hwmgr; 1277 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1379 + struct pp_hwmgr *hwmgr = handle; 1278 1380 int ret = 0; 1279 1381 1280 - ret = pp_check(pp_handle); 1382 + ret = pp_check(hwmgr); 1281 1383 if (ret) 1282 1384 return ret; 1283 1385 1284 1386 if (!wm_with_clock_ranges) 1285 1387 return -EINVAL; 1286 1388 1287 - hwmgr = ((struct pp_instance *)handle)->hwmgr; 1288 - 1289 - mutex_lock(&pp_handle->pp_lock); 1389 + mutex_lock(&hwmgr->smu_lock); 1290 1390 ret = phm_set_watermarks_for_clocks_ranges(hwmgr, 1291 1391 wm_with_clock_ranges); 1292 - mutex_unlock(&pp_handle->pp_lock); 1392 + mutex_unlock(&hwmgr->smu_lock); 1293 1393 1294 1394 return ret; 1295 1395 } ··· 1291 1403 static int pp_display_clock_voltage_request(void *handle, 1292 1404 struct pp_display_clock_request *clock) 1293 1405 { 1294 - struct pp_hwmgr *hwmgr; 1295 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1406 + struct pp_hwmgr *hwmgr = handle; 1296 1407 int ret = 0; 1297 1408 1298 - ret = pp_check(pp_handle); 1409 + ret = pp_check(hwmgr); 1299 1410 if (ret) 1300 1411 return ret; 1301 1412 1302 1413 if (!clock) 1303 1414 return -EINVAL; 1304 1415 1305 - hwmgr = ((struct pp_instance *)handle)->hwmgr; 1306 - 1307 - mutex_lock(&pp_handle->pp_lock); 1416 + mutex_lock(&hwmgr->smu_lock); 1308 1417 ret = phm_display_clock_voltage_request(hwmgr, clock); 1309 - mutex_unlock(&pp_handle->pp_lock); 1418 + mutex_unlock(&hwmgr->smu_lock); 1310 1419 1311 1420 return ret; 1312 1421 } ··· 1311 1426 static int pp_get_display_mode_validation_clocks(void *handle, 1312 1427 struct amd_pp_simple_clock_info *clocks) 1313 1428 { 1314 - struct pp_hwmgr *hwmgr; 1315 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1429 + struct pp_hwmgr *hwmgr = handle; 1316 1430 int ret = 0; 1317 1431 1318 - ret = pp_check(pp_handle); 1432 + ret = pp_check(hwmgr); 1319 1433 1320 1434 if (ret) 1321 1435 return ret; 1322 1436 1323 - hwmgr = pp_handle->hwmgr; 1324 - 1325 1437 if (clocks == NULL) 1326 1438 return -EINVAL; 1327 1439 1328 - mutex_lock(&pp_handle->pp_lock); 1440 + mutex_lock(&hwmgr->smu_lock); 1329 1441 1330 1442 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) 1331 1443 ret = phm_get_max_high_clocks(hwmgr, clocks); 1332 1444 1333 - mutex_unlock(&pp_handle->pp_lock); 1445 + mutex_unlock(&hwmgr->smu_lock); 1334 1446 return ret; 1335 1447 } 1336 1448 1337 1449 static int pp_set_mmhub_powergating_by_smu(void *handle) 1338 1450 { 1339 - struct pp_hwmgr *hwmgr; 1340 - struct pp_instance *pp_handle = (struct pp_instance *)handle; 1451 + struct pp_hwmgr *hwmgr = handle; 1341 1452 int ret = 0; 1342 1453 1343 - ret = pp_check(pp_handle); 1454 + ret = pp_check(hwmgr); 1344 1455 1345 1456 if (ret) 1346 1457 return ret; 1347 - 1348 - hwmgr = pp_handle->hwmgr; 1349 1458 1350 1459 if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { 1351 1460 pr_info("%s was not implemented.\n", __func__); ··· 1349 1470 return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr); 1350 1471 } 1351 1472 1352 - const struct amd_pm_funcs pp_dpm_funcs = { 1473 + static const struct amd_pm_funcs pp_dpm_funcs = { 1353 1474 .load_firmware = pp_dpm_load_fw, 1354 1475 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, 1355 1476 .force_performance_level = pp_dpm_force_performance_level,
+12 -38
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
··· 116 116 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 117 117 } 118 118 119 - int hwmgr_early_init(struct pp_instance *handle) 119 + int hwmgr_early_init(struct pp_hwmgr *hwmgr) 120 120 { 121 - struct pp_hwmgr *hwmgr; 122 - 123 - if (handle == NULL) 121 + if (hwmgr == NULL) 124 122 return -EINVAL; 125 123 126 - hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); 127 - if (hwmgr == NULL) 128 - return -ENOMEM; 129 - 130 - handle->hwmgr = hwmgr; 131 - hwmgr->adev = handle->parent; 132 - hwmgr->device = handle->device; 133 - hwmgr->chip_family = ((struct amdgpu_device *)handle->parent)->family; 134 - hwmgr->chip_id = ((struct amdgpu_device *)handle->parent)->asic_type; 135 - hwmgr->feature_mask = amdgpu_pp_feature_mask; 136 124 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 137 125 hwmgr->power_source = PP_PowerSource_AC; 138 126 hwmgr->pp_table_version = PP_TABLE_V1; ··· 208 220 return 0; 209 221 } 210 222 211 - int hwmgr_hw_init(struct pp_instance *handle) 223 + int hwmgr_hw_init(struct pp_hwmgr *hwmgr) 212 224 { 213 - struct pp_hwmgr *hwmgr; 214 225 int ret = 0; 215 226 216 - if (handle == NULL) 227 + if (hwmgr == NULL) 217 228 return -EINVAL; 218 - 219 - hwmgr = handle->hwmgr; 220 229 221 230 if (hwmgr->pptable_func == NULL || 222 231 hwmgr->pptable_func->pptable_init == NULL || ··· 260 275 return ret; 261 276 } 262 277 263 - int hwmgr_hw_fini(struct pp_instance *handle) 278 + int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) 264 279 { 265 - struct pp_hwmgr *hwmgr; 266 - 267 - if (handle == NULL || handle->hwmgr == NULL) 280 + if (hwmgr == NULL) 268 281 return -EINVAL; 269 - 270 - hwmgr = handle->hwmgr; 271 282 272 283 phm_stop_thermal_controller(hwmgr); 273 284 psm_set_boot_states(hwmgr); ··· 278 297 return psm_fini_power_state_table(hwmgr); 279 298 } 280 299 281 - int hwmgr_hw_suspend(struct pp_instance *handle) 300 + int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr) 282 301 { 283 - struct pp_hwmgr *hwmgr; 284 302 int ret = 0; 285 303 286 - if (handle == NULL || handle->hwmgr == NULL) 304 + if (hwmgr == NULL) 287 305 return -EINVAL; 288 306 289 - hwmgr = handle->hwmgr; 290 307 phm_disable_smc_firmware_ctf(hwmgr); 291 308 ret = psm_set_boot_states(hwmgr); 292 309 if (ret) ··· 297 318 return ret; 298 319 } 299 320 300 - int hwmgr_hw_resume(struct pp_instance *handle) 321 + int hwmgr_hw_resume(struct pp_hwmgr *hwmgr) 301 322 { 302 - struct pp_hwmgr *hwmgr; 303 323 int ret = 0; 304 324 305 - if (handle == NULL || handle->hwmgr == NULL) 325 + if (hwmgr == NULL) 306 326 return -EINVAL; 307 327 308 - hwmgr = handle->hwmgr; 309 328 ret = phm_setup_asic(hwmgr); 310 329 if (ret) 311 330 return ret; ··· 338 361 } 339 362 } 340 363 341 - int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id, 364 + int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, 342 365 enum amd_pm_state_type *user_state) 343 366 { 344 367 int ret = 0; 345 - struct pp_hwmgr *hwmgr; 346 368 347 - if (handle == NULL || handle->hwmgr == NULL) 369 + if (hwmgr == NULL) 348 370 return -EINVAL; 349 - 350 - hwmgr = handle->hwmgr; 351 371 352 372 switch (task_id) { 353 373 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+8 -8
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
··· 25 25 26 26 #include <linux/seq_file.h> 27 27 #include "amd_powerplay.h" 28 - #include "pp_instance.h" 29 28 #include "hardwaremanager.h" 30 29 #include "pp_power_source.h" 31 30 #include "hwmgr_ppt.h" ··· 33 34 #include "power_state.h" 34 35 #include "smu_helper.h" 35 36 36 - struct pp_instance; 37 37 struct pp_hwmgr; 38 38 struct phm_fan_speed_info; 39 39 struct pp_atomctrl_voltage_table; ··· 701 703 uint32_t chip_family; 702 704 uint32_t chip_id; 703 705 uint32_t smu_version; 706 + bool pm_en; 707 + struct mutex smu_lock; 704 708 705 709 uint32_t pp_table_version; 706 710 void *device; ··· 769 769 cgs_irq_handler_func_t handler; 770 770 }; 771 771 772 - extern int hwmgr_early_init(struct pp_instance *handle); 773 - extern int hwmgr_hw_init(struct pp_instance *handle); 774 - extern int hwmgr_hw_fini(struct pp_instance *handle); 775 - extern int hwmgr_hw_suspend(struct pp_instance *handle); 776 - extern int hwmgr_hw_resume(struct pp_instance *handle); 777 - extern int hwmgr_handle_task(struct pp_instance *handle, 772 + extern int hwmgr_early_init(struct pp_hwmgr *hwmgr); 773 + extern int hwmgr_hw_init(struct pp_hwmgr *hwmgr); 774 + extern int hwmgr_hw_fini(struct pp_hwmgr *hwmgr); 775 + extern int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr); 776 + extern int hwmgr_hw_resume(struct pp_hwmgr *hwmgr); 777 + extern int hwmgr_handle_task(struct pp_hwmgr *hwmgr, 778 778 enum amd_pp_task task_id, 779 779 enum amd_pm_state_type *user_state); 780 780
-36
drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
··· 1 - /* 2 - * Copyright 2015 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - #ifndef _PP_INSTANCE_H_ 24 - #define _PP_INSTANCE_H_ 25 - 26 - struct pp_hwmgr; 27 - 28 - struct pp_instance { 29 - void *parent; /* e.g. amdgpu_device */ 30 - void *device; /* e.g. cgs_device */ 31 - bool pm_en; 32 - struct pp_hwmgr *hwmgr; 33 - struct mutex pp_lock; 34 - }; 35 - 36 - #endif