Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/swsmu: add smu14 ip support

Add initial swSMU support for smu 14 series ASIC.

v2: squash in build fixes and updates (Li Ma)
fix warnings (Alex)
v3: squash in updates (Alex)
v4: squash in updates (Alex)
v5: squash in avg/current power updates (Alex)

Signed-off-by: Li Ma <li.ma@amd.com>
Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Kenneth Feng and committed by
Alex Deucher
fe6cd915 cd6d69dd

+3110 -3
+1
drivers/gpu/drm/amd/pm/Makefile
··· 30 30 -I$(FULL_AMD_PATH)/pm/swsmu/smu11 \ 31 31 -I$(FULL_AMD_PATH)/pm/swsmu/smu12 \ 32 32 -I$(FULL_AMD_PATH)/pm/swsmu/smu13 \ 33 + -I$(FULL_AMD_PATH)/pm/swsmu/smu14 \ 33 34 -I$(FULL_AMD_PATH)/pm/powerplay/inc \ 34 35 -I$(FULL_AMD_PATH)/pm/powerplay/smumgr\ 35 36 -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr \
+1 -1
drivers/gpu/drm/amd/pm/swsmu/Makefile
··· 22 22 23 23 AMD_SWSMU_PATH = ../pm/swsmu 24 24 25 - SWSMU_LIBS = smu11 smu12 smu13 25 + SWSMU_LIBS = smu11 smu12 smu13 smu14 26 26 27 27 AMD_SWSMU = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/swsmu/,$(SWSMU_LIBS))) 28 28
+4
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 43 43 #include "smu_v13_0_5_ppt.h" 44 44 #include "smu_v13_0_6_ppt.h" 45 45 #include "smu_v13_0_7_ppt.h" 46 + #include "smu_v14_0_0_ppt.h" 46 47 #include "amd_pcie.h" 47 48 48 49 /* ··· 660 659 break; 661 660 case IP_VERSION(13, 0, 7): 662 661 smu_v13_0_7_set_ppt_funcs(smu); 662 + break; 663 + case IP_VERSION(14, 0, 0): 664 + smu_v14_0_0_set_ppt_funcs(smu); 663 665 break; 664 666 default: 665 667 return -EINVAL;
+6
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 1343 1343 * @init_pptable_microcode: Prepare the pptable microcode to upload via PSP 1344 1344 */ 1345 1345 int (*init_pptable_microcode)(struct smu_context *smu); 1346 + 1347 + /** 1348 + * @dpm_set_vpe_enable: Enable/disable VPE engine dynamic power 1349 + * management. 1350 + */ 1351 + int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); 1346 1352 }; 1347 1353 1348 1354 typedef enum {
+5 -2
drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
··· 253 253 __SMU_DUMMY_MAP(QueryValidMcaCeCount), \ 254 254 __SMU_DUMMY_MAP(McaBankDumpDW), \ 255 255 __SMU_DUMMY_MAP(McaBankCeDumpDW), \ 256 - __SMU_DUMMY_MAP(SelectPLPDMode), 256 + __SMU_DUMMY_MAP(SelectPLPDMode), \ 257 + __SMU_DUMMY_MAP(PowerUpVpe), \ 258 + __SMU_DUMMY_MAP(PowerDownVpe), 257 259 258 260 #undef __SMU_DUMMY_MAP 259 261 #define __SMU_DUMMY_MAP(type) SMU_MSG_##type ··· 417 415 __SMU_DUMMY_MAP(MEM_TEMP_READ), \ 418 416 __SMU_DUMMY_MAP(ATHUB_MMHUB_PG), \ 419 417 __SMU_DUMMY_MAP(BACO_CG), \ 420 - __SMU_DUMMY_MAP(SOC_CG), 418 + __SMU_DUMMY_MAP(SOC_CG), \ 419 + __SMU_DUMMY_MAP(LOW_POWER_DCNCLKS), 421 420 422 421 #undef __SMU_DUMMY_MAP 423 422 #define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
+230
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __SMU_V14_0_H__ 24 + #define __SMU_V14_0_H__ 25 + 26 + #include "amdgpu_smu.h" 27 + 28 + #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 + #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 30 + #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x6 31 + 32 + #define FEATURE_MASK(feature) (1ULL << feature) 33 + 34 + /* MP Apertures */ 35 + #define MP0_Public 0x03800000 36 + #define MP0_SRAM 0x03900000 37 + #define MP1_Public 0x03b00000 38 + #define MP1_SRAM 0x03c00004 39 + 40 + /* address block */ 41 + #define smnMP1_FIRMWARE_FLAGS 0x3010028 42 + #define smnMP1_PUB_CTRL 0x3010d10 43 + 44 + #define MAX_DPM_LEVELS 16 45 + #define MAX_PCIE_CONF 3 46 + 47 + struct smu_14_0_max_sustainable_clocks { 48 + uint32_t display_clock; 49 + uint32_t phy_clock; 50 + uint32_t pixel_clock; 51 + uint32_t uclock; 52 + uint32_t dcef_clock; 53 + uint32_t soc_clock; 54 + }; 55 + 56 + struct smu_14_0_dpm_clk_level { 57 + bool enabled; 58 + uint32_t value; 59 + }; 60 + 61 + struct smu_14_0_dpm_table { 62 + uint32_t min; /* MHz */ 63 + uint32_t max; /* MHz */ 64 + uint32_t count; 65 + bool is_fine_grained; 66 + struct smu_14_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS]; 67 + }; 68 + 69 + struct smu_14_0_pcie_table { 70 + uint8_t pcie_gen[MAX_PCIE_CONF]; 71 + uint8_t pcie_lane[MAX_PCIE_CONF]; 72 + uint16_t clk_freq[MAX_PCIE_CONF]; 73 + uint32_t num_of_link_levels; 74 + }; 75 + 76 + struct smu_14_0_dpm_tables { 77 + struct smu_14_0_dpm_table soc_table; 78 + struct smu_14_0_dpm_table gfx_table; 79 + struct smu_14_0_dpm_table uclk_table; 80 + struct smu_14_0_dpm_table eclk_table; 81 + struct smu_14_0_dpm_table vclk_table; 82 + struct smu_14_0_dpm_table dclk_table; 83 + struct smu_14_0_dpm_table dcef_table; 84 + struct smu_14_0_dpm_table pixel_table; 85 + struct smu_14_0_dpm_table display_table; 86 + struct smu_14_0_dpm_table phy_table; 87 + struct smu_14_0_dpm_table fclk_table; 88 + struct smu_14_0_pcie_table pcie_table; 89 + }; 90 + 91 + struct smu_14_0_dpm_context { 92 + struct smu_14_0_dpm_tables dpm_tables; 93 + uint32_t workload_policy_mask; 94 + uint32_t dcef_min_ds_clk; 95 + }; 96 + 97 + enum smu_14_0_power_state { 98 + SMU_14_0_POWER_STATE__D0 = 0, 99 + SMU_14_0_POWER_STATE__D1, 100 + SMU_14_0_POWER_STATE__D3, /* Sleep*/ 101 + SMU_14_0_POWER_STATE__D4, /* Hibernate*/ 102 + SMU_14_0_POWER_STATE__D5, /* Power off*/ 103 + }; 104 + 105 + struct smu_14_0_power_context { 106 + uint32_t power_source; 107 + uint8_t in_power_limit_boost_mode; 108 + enum smu_14_0_power_state power_state; 109 + }; 110 + 111 + #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) 112 + 113 + int smu_v14_0_init_microcode(struct smu_context *smu); 114 + 115 + void smu_v14_0_fini_microcode(struct smu_context *smu); 116 + 117 + int smu_v14_0_load_microcode(struct smu_context *smu); 118 + 119 + int smu_v14_0_init_smc_tables(struct smu_context *smu); 120 + 121 + int smu_v14_0_fini_smc_tables(struct smu_context *smu); 122 + 123 + int smu_v14_0_init_power(struct smu_context *smu); 124 + 125 + int smu_v14_0_fini_power(struct smu_context *smu); 126 + 127 + int smu_v14_0_check_fw_status(struct smu_context *smu); 128 + 129 + int smu_v14_0_setup_pptable(struct smu_context *smu); 130 + 131 + int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu); 132 + 133 + int smu_v14_0_check_fw_version(struct smu_context *smu); 134 + 135 + int smu_v14_0_set_driver_table_location(struct smu_context *smu); 136 + 137 + int smu_v14_0_set_tool_table_location(struct smu_context *smu); 138 + 139 + int smu_v14_0_notify_memory_pool_location(struct smu_context *smu); 140 + 141 + int smu_v14_0_system_features_control(struct smu_context *smu, 142 + bool en); 143 + 144 + int smu_v14_0_set_allowed_mask(struct smu_context *smu); 145 + 146 + int smu_v14_0_notify_display_change(struct smu_context *smu); 147 + 148 + int smu_v14_0_get_current_power_limit(struct smu_context *smu, 149 + uint32_t *power_limit); 150 + 151 + int smu_v14_0_set_power_limit(struct smu_context *smu, 152 + enum smu_ppt_limit_type limit_type, 153 + uint32_t limit); 154 + 155 + int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable); 156 + 157 + int smu_v14_0_register_irq_handler(struct smu_context *smu); 158 + 159 + int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, 160 + enum smu_baco_seq baco_seq); 161 + 162 + bool smu_v14_0_baco_is_support(struct smu_context *smu); 163 + 164 + enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu); 165 + 166 + int smu_v14_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); 167 + 168 + int smu_v14_0_baco_enter(struct smu_context *smu); 169 + int smu_v14_0_baco_exit(struct smu_context *smu); 170 + 171 + int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 172 + uint32_t *min, uint32_t *max); 173 + 174 + int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, 175 + uint32_t min, uint32_t max); 176 + 177 + int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, 178 + enum smu_clk_type clk_type, 179 + uint32_t min, 180 + uint32_t max); 181 + 182 + int smu_v14_0_set_performance_level(struct smu_context *smu, 183 + enum amd_dpm_forced_level level); 184 + 185 + int smu_v14_0_set_power_source(struct smu_context *smu, 186 + enum smu_power_src_type power_src); 187 + 188 + int smu_v14_0_set_single_dpm_table(struct smu_context *smu, 189 + enum smu_clk_type clk_type, 190 + struct smu_14_0_dpm_table *single_dpm_table); 191 + 192 + int smu_v14_0_gfx_ulv_control(struct smu_context *smu, 193 + bool enablement); 194 + 195 + int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 196 + uint64_t event_arg); 197 + 198 + int smu_v14_0_set_vcn_enable(struct smu_context *smu, 199 + bool enable); 200 + 201 + int smu_v14_0_set_jpeg_enable(struct smu_context *smu, 202 + bool enable); 203 + 204 + int smu_v14_0_init_pptable_microcode(struct smu_context *smu); 205 + 206 + int smu_v14_0_run_btc(struct smu_context *smu); 207 + 208 + int smu_v14_0_gpo_control(struct smu_context *smu, 209 + bool enablement); 210 + 211 + int smu_v14_0_deep_sleep_control(struct smu_context *smu, 212 + bool enablement); 213 + 214 + int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu); 215 + 216 + int smu_v14_0_set_default_dpm_tables(struct smu_context *smu); 217 + 218 + int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu, 219 + void **table, 220 + uint32_t *size, 221 + uint32_t pptable_id); 222 + 223 + int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, 224 + enum PP_OD_DPM_TABLE_COMMAND type, 225 + long input[], uint32_t size); 226 + 227 + void smu_v14_0_set_smu_mailbox_registers(struct smu_context *smu); 228 + 229 + #endif 230 + #endif
+30
drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile
··· 1 + # 2 + # Copyright 2023 Advanced Micro Devices, Inc. 3 + # 4 + # Permission is hereby granted, free of charge, to any person obtaining a 5 + # copy of this software and associated documentation files (the "Software"), 6 + # to deal in the Software without restriction, including without limitation 7 + # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + # and/or sell copies of the Software, and to permit persons to whom the 9 + # Software is furnished to do so, subject to the following conditions: 10 + # 11 + # The above copyright notice and this permission notice shall be included in 12 + # all copies or substantial portions of the Software. 13 + # 14 + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + # THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + # OTHER DEALINGS IN THE SOFTWARE. 21 + # 22 + # 23 + # Makefile for the 'smu manager' sub-component of powerplay. 24 + # It provides the smu management services for the driver. 25 + 26 + SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o 27 + 28 + AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR)) 29 + 30 + AMD_POWERPLAY_FILES += $(AMD_SWSMU_SMU14MGR)
+1727
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + 23 + #include <linux/firmware.h> 24 + #include <linux/module.h> 25 + #include <linux/pci.h> 26 + #include <linux/reboot.h> 27 + 28 + #define SWSMU_CODE_LAYER_L3 29 + 30 + #include "amdgpu.h" 31 + #include "amdgpu_smu.h" 32 + #include "atomfirmware.h" 33 + #include "amdgpu_atomfirmware.h" 34 + #include "amdgpu_atombios.h" 35 + #include "smu_v14_0.h" 36 + #include "soc15_common.h" 37 + #include "atom.h" 38 + #include "amdgpu_ras.h" 39 + #include "smu_cmn.h" 40 + 41 + #include "asic_reg/mp/mp_14_0_0_offset.h" 42 + #include "asic_reg/mp/mp_14_0_0_sh_mask.h" 43 + 44 + /* 45 + * DO NOT use these for err/warn/info/debug messages. 46 + * Use dev_err, dev_warn, dev_info and dev_dbg instead. 47 + * They are more MGPU friendly. 48 + */ 49 + #undef pr_err 50 + #undef pr_warn 51 + #undef pr_info 52 + #undef pr_debug 53 + 54 + MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); 55 + 56 + int smu_v14_0_init_microcode(struct smu_context *smu) 57 + { 58 + struct amdgpu_device *adev = smu->adev; 59 + char fw_name[30]; 60 + char ucode_prefix[30]; 61 + int err = 0; 62 + const struct smc_firmware_header_v1_0 *hdr; 63 + const struct common_firmware_header *header; 64 + struct amdgpu_firmware_info *ucode = NULL; 65 + 66 + /* doesn't need to load smu firmware in IOV mode */ 67 + if (amdgpu_sriov_vf(adev)) 68 + return 0; 69 + 70 + amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 71 + 72 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 73 + 74 + err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); 75 + if (err) 76 + goto out; 77 + 78 + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 79 + amdgpu_ucode_print_smc_hdr(&hdr->header); 80 + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 81 + 82 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 83 + ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 84 + ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 85 + ucode->fw = adev->pm.fw; 86 + header = (const struct common_firmware_header *)ucode->fw->data; 87 + adev->firmware.fw_size += 88 + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 89 + } 90 + 91 + out: 92 + if (err) 93 + amdgpu_ucode_release(&adev->pm.fw); 94 + return err; 95 + } 96 + 97 + void smu_v14_0_fini_microcode(struct smu_context *smu) 98 + { 99 + struct amdgpu_device *adev = smu->adev; 100 + 101 + amdgpu_ucode_release(&adev->pm.fw); 102 + adev->pm.fw_version = 0; 103 + } 104 + 105 + int smu_v14_0_load_microcode(struct smu_context *smu) 106 + { 107 + #if 0 108 + struct amdgpu_device *adev = smu->adev; 109 + const uint32_t *src; 110 + const struct smc_firmware_header_v1_0 *hdr; 111 + uint32_t addr_start = MP1_SRAM; 112 + uint32_t i; 113 + uint32_t smc_fw_size; 114 + uint32_t mp1_fw_flags; 115 + 116 + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 117 + src = (const uint32_t *)(adev->pm.fw->data + 118 + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 119 + smc_fw_size = hdr->header.ucode_size_bytes; 120 + 121 + for (i = 1; i < smc_fw_size/4 - 1; i++) { 122 + WREG32_PCIE(addr_start, src[i]); 123 + addr_start += 4; 124 + } 125 + 126 + WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 127 + 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 128 + WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 129 + 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 130 + 131 + for (i = 0; i < adev->usec_timeout; i++) { 132 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 133 + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 134 + if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 135 + MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 136 + break; 137 + udelay(1); 138 + } 139 + 140 + if (i == adev->usec_timeout) 141 + return -ETIME; 142 + 143 + #endif 144 + return 0; 145 + 146 + } 147 + 148 + int smu_v14_0_init_pptable_microcode(struct smu_context *smu) 149 + { 150 + struct amdgpu_device *adev = smu->adev; 151 + struct amdgpu_firmware_info *ucode = NULL; 152 + uint32_t size = 0, pptable_id = 0; 153 + int ret = 0; 154 + void *table; 155 + 156 + /* doesn't need to load smu firmware in IOV mode */ 157 + if (amdgpu_sriov_vf(adev)) 158 + return 0; 159 + 160 + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 161 + return 0; 162 + 163 + if (!adev->scpm_enabled) 164 + return 0; 165 + 166 + /* override pptable_id from driver parameter */ 167 + if (amdgpu_smu_pptable_id >= 0) { 168 + pptable_id = amdgpu_smu_pptable_id; 169 + dev_info(adev->dev, "override pptable id %d\n", pptable_id); 170 + } else { 171 + pptable_id = smu->smu_table.boot_values.pp_table_id; 172 + } 173 + 174 + /* "pptable_id == 0" means vbios carries the pptable. */ 175 + if (!pptable_id) 176 + return 0; 177 + 178 + ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 179 + if (ret) 180 + return ret; 181 + 182 + smu->pptable_firmware.data = table; 183 + smu->pptable_firmware.size = size; 184 + 185 + ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE]; 186 + ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE; 187 + ucode->fw = &smu->pptable_firmware; 188 + adev->firmware.fw_size += 189 + ALIGN(smu->pptable_firmware.size, PAGE_SIZE); 190 + 191 + return 0; 192 + } 193 + 194 + int smu_v14_0_check_fw_status(struct smu_context *smu) 195 + { 196 + struct amdgpu_device *adev = smu->adev; 197 + uint32_t mp1_fw_flags; 198 + 199 + mp1_fw_flags = RREG32_PCIE(MP1_Public | 200 + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 201 + 202 + if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 203 + MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 204 + return 0; 205 + 206 + return -EIO; 207 + } 208 + 209 + int smu_v14_0_check_fw_version(struct smu_context *smu) 210 + { 211 + struct amdgpu_device *adev = smu->adev; 212 + uint32_t if_version = 0xff, smu_version = 0xff; 213 + uint8_t smu_program, smu_major, smu_minor, smu_debug; 214 + int ret = 0; 215 + 216 + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 217 + if (ret) 218 + return ret; 219 + 220 + smu_program = (smu_version >> 24) & 0xff; 221 + smu_major = (smu_version >> 16) & 0xff; 222 + smu_minor = (smu_version >> 8) & 0xff; 223 + smu_debug = (smu_version >> 0) & 0xff; 224 + if (smu->is_apu) 225 + adev->pm.fw_version = smu_version; 226 + 227 + switch (adev->ip_versions[MP1_HWIP][0]) { 228 + case IP_VERSION(14, 0, 2): 229 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 230 + break; 231 + case IP_VERSION(14, 0, 0): 232 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 233 + break; 234 + default: 235 + dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", 236 + adev->ip_versions[MP1_HWIP][0]); 237 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV; 238 + break; 239 + } 240 + 241 + if (adev->pm.fw) 242 + dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 243 + smu_program, smu_version, smu_major, smu_minor, smu_debug); 244 + 245 + /* 246 + * 1. if_version mismatch is not critical as our fw is designed 247 + * to be backward compatible. 248 + * 2. New fw usually brings some optimizations. But that's visible 249 + * only on the paired driver. 250 + * Considering above, we just leave user a verbal message instead 251 + * of halt driver loading. 252 + */ 253 + if (if_version != smu->smc_driver_if_version) { 254 + dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 255 + "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 256 + smu->smc_driver_if_version, if_version, 257 + smu_program, smu_version, smu_major, smu_minor, smu_debug); 258 + dev_info(adev->dev, "SMU driver if version not matched\n"); 259 + } 260 + 261 + return ret; 262 + } 263 + 264 + static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 265 + { 266 + struct amdgpu_device *adev = smu->adev; 267 + uint32_t ppt_offset_bytes; 268 + const struct smc_firmware_header_v2_0 *v2; 269 + 270 + v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 271 + 272 + ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 273 + *size = le32_to_cpu(v2->ppt_size_bytes); 274 + *table = (uint8_t *)v2 + ppt_offset_bytes; 275 + 276 + return 0; 277 + } 278 + 279 + static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table, 280 + uint32_t *size, uint32_t pptable_id) 281 + { 282 + struct amdgpu_device *adev = smu->adev; 283 + const struct smc_firmware_header_v2_1 *v2_1; 284 + struct smc_soft_pptable_entry *entries; 285 + uint32_t pptable_count = 0; 286 + int i = 0; 287 + 288 + v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 289 + entries = (struct smc_soft_pptable_entry *) 290 + ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 291 + pptable_count = le32_to_cpu(v2_1->pptable_count); 292 + for (i = 0; i < pptable_count; i++) { 293 + if (le32_to_cpu(entries[i].id) == pptable_id) { 294 + *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 295 + *size = le32_to_cpu(entries[i].ppt_size_bytes); 296 + break; 297 + } 298 + } 299 + 300 + if (i == pptable_count) 301 + return -EINVAL; 302 + 303 + return 0; 304 + } 305 + 306 + static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size) 307 + { 308 + struct amdgpu_device *adev = smu->adev; 309 + uint16_t atom_table_size; 310 + uint8_t frev, crev; 311 + int ret, index; 312 + 313 + dev_info(adev->dev, "use vbios provided pptable\n"); 314 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 315 + powerplayinfo); 316 + 317 + ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 318 + (uint8_t **)table); 319 + if (ret) 320 + return ret; 321 + 322 + if (size) 323 + *size = atom_table_size; 324 + 325 + return 0; 326 + } 327 + 328 + int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu, 329 + void **table, 330 + uint32_t *size, 331 + uint32_t pptable_id) 332 + { 333 + const struct smc_firmware_header_v1_0 *hdr; 334 + struct amdgpu_device *adev = smu->adev; 335 + uint16_t version_major, version_minor; 336 + int ret; 337 + 338 + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 339 + if (!hdr) 340 + return -EINVAL; 341 + 342 + dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id); 343 + 344 + version_major = le16_to_cpu(hdr->header.header_version_major); 345 + version_minor = le16_to_cpu(hdr->header.header_version_minor); 346 + if (version_major != 2) { 347 + dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n", 348 + version_major, version_minor); 349 + return -EINVAL; 350 + } 351 + 352 + switch (version_minor) { 353 + case 0: 354 + ret = smu_v14_0_set_pptable_v2_0(smu, table, size); 355 + break; 356 + case 1: 357 + ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id); 358 + break; 359 + default: 360 + ret = -EINVAL; 361 + break; 362 + } 363 + 364 + return ret; 365 + } 366 + 367 + int smu_v14_0_setup_pptable(struct smu_context *smu) 368 + { 369 + struct amdgpu_device *adev = smu->adev; 370 + uint32_t size = 0, pptable_id = 0; 371 + void *table; 372 + int ret = 0; 373 + 374 + /* override pptable_id from driver parameter */ 375 + if (amdgpu_smu_pptable_id >= 0) { 376 + pptable_id = amdgpu_smu_pptable_id; 377 + dev_info(adev->dev, "override pptable id %d\n", pptable_id); 378 + } else { 379 + pptable_id = smu->smu_table.boot_values.pp_table_id; 380 + } 381 + 382 + /* force using vbios pptable in sriov mode */ 383 + if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) 384 + ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size); 385 + else 386 + ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 387 + 388 + if (ret) 389 + return ret; 390 + 391 + if (!smu->smu_table.power_play_table) 392 + smu->smu_table.power_play_table = table; 393 + if (!smu->smu_table.power_play_table_size) 394 + smu->smu_table.power_play_table_size = size; 395 + 396 + return 0; 397 + } 398 + 399 + int smu_v14_0_init_smc_tables(struct smu_context *smu) 400 + { 401 + struct smu_table_context *smu_table = &smu->smu_table; 402 + struct smu_table *tables = smu_table->tables; 403 + int ret = 0; 404 + 405 + smu_table->driver_pptable = 406 + kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 407 + if (!smu_table->driver_pptable) { 408 + ret = -ENOMEM; 409 + goto err0_out; 410 + } 411 + 412 + smu_table->max_sustainable_clocks = 413 + kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL); 414 + if (!smu_table->max_sustainable_clocks) { 415 + ret = -ENOMEM; 416 + goto err1_out; 417 + } 418 + 419 + if (tables[SMU_TABLE_OVERDRIVE].size) { 420 + smu_table->overdrive_table = 421 + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 422 + if (!smu_table->overdrive_table) { 423 + ret = -ENOMEM; 424 + goto err2_out; 425 + } 426 + 427 + smu_table->boot_overdrive_table = 428 + kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 429 + if (!smu_table->boot_overdrive_table) { 430 + ret = -ENOMEM; 431 + goto err3_out; 432 + } 433 + } 434 + 435 + smu_table->combo_pptable = 436 + kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); 437 + if (!smu_table->combo_pptable) { 438 + ret = -ENOMEM; 439 + goto err4_out; 440 + } 441 + 442 + return 0; 443 + 444 + err4_out: 445 + kfree(smu_table->boot_overdrive_table); 446 + err3_out: 447 + kfree(smu_table->overdrive_table); 448 + err2_out: 449 + kfree(smu_table->max_sustainable_clocks); 450 + err1_out: 451 + kfree(smu_table->driver_pptable); 452 + err0_out: 453 + return ret; 454 + } 455 + 456 + int smu_v14_0_fini_smc_tables(struct smu_context *smu) 457 + { 458 + struct smu_table_context *smu_table = &smu->smu_table; 459 + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 460 + 461 + kfree(smu_table->gpu_metrics_table); 462 + kfree(smu_table->combo_pptable); 463 + kfree(smu_table->boot_overdrive_table); 464 + kfree(smu_table->overdrive_table); 465 + kfree(smu_table->max_sustainable_clocks); 466 + kfree(smu_table->driver_pptable); 467 + smu_table->gpu_metrics_table = NULL; 468 + smu_table->combo_pptable = NULL; 469 + smu_table->boot_overdrive_table = NULL; 470 + smu_table->overdrive_table = NULL; 471 + smu_table->max_sustainable_clocks = NULL; 472 + smu_table->driver_pptable = NULL; 473 + kfree(smu_table->hardcode_pptable); 474 + smu_table->hardcode_pptable = NULL; 475 + 476 + kfree(smu_table->ecc_table); 477 + kfree(smu_table->metrics_table); 478 + kfree(smu_table->watermarks_table); 479 + smu_table->ecc_table = NULL; 480 + smu_table->metrics_table = NULL; 481 + smu_table->watermarks_table = NULL; 482 + smu_table->metrics_time = 0; 483 + 484 + kfree(smu_dpm->dpm_context); 485 + kfree(smu_dpm->golden_dpm_context); 486 + kfree(smu_dpm->dpm_current_power_state); 487 + kfree(smu_dpm->dpm_request_power_state); 488 + smu_dpm->dpm_context = NULL; 489 + smu_dpm->golden_dpm_context = NULL; 490 + smu_dpm->dpm_context_size = 0; 491 + smu_dpm->dpm_current_power_state = NULL; 492 + smu_dpm->dpm_request_power_state = NULL; 493 + 494 + return 0; 495 + } 496 + 497 + int smu_v14_0_init_power(struct smu_context *smu) 498 + { 499 + struct smu_power_context *smu_power = &smu->smu_power; 500 + 501 + if (smu_power->power_context || smu_power->power_context_size != 0) 502 + return -EINVAL; 503 + 504 + smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context), 505 + GFP_KERNEL); 506 + if (!smu_power->power_context) 507 + return -ENOMEM; 508 + smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context); 509 + 510 + return 0; 511 + } 512 + 513 + int smu_v14_0_fini_power(struct smu_context *smu) 514 + { 515 + struct smu_power_context *smu_power = &smu->smu_power; 516 + 517 + if (!smu_power->power_context || smu_power->power_context_size == 0) 518 + return -EINVAL; 519 + 520 + kfree(smu_power->power_context); 521 + smu_power->power_context = NULL; 522 + smu_power->power_context_size = 0; 523 + 524 + return 0; 525 + } 526 + 527 + int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu) 528 + { 529 + int ret, index; 530 + uint16_t size; 531 + uint8_t frev, crev; 532 + struct atom_common_table_header *header; 533 + struct atom_firmware_info_v3_4 *v_3_4; 534 + struct atom_firmware_info_v3_3 *v_3_3; 535 + struct atom_firmware_info_v3_1 *v_3_1; 536 + struct atom_smu_info_v3_6 *smu_info_v3_6; 537 + struct atom_smu_info_v4_0 *smu_info_v4_0; 538 + 539 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 540 + firmwareinfo); 541 + 542 + ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 543 + (uint8_t **)&header); 544 + if (ret) 545 + return ret; 546 + 547 + if (header->format_revision != 3) { 548 + dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n"); 549 + return -EINVAL; 550 + } 551 + 552 + switch (header->content_revision) { 553 + case 0: 554 + case 1: 555 + case 2: 556 + v_3_1 = (struct atom_firmware_info_v3_1 *)header; 557 + smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 558 + smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 559 + smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 560 + smu->smu_table.boot_values.socclk = 0; 561 + smu->smu_table.boot_values.dcefclk = 0; 562 + smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 563 + smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 564 + smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 565 + smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 566 + smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 567 + smu->smu_table.boot_values.pp_table_id = 0; 568 + break; 569 + case 3: 570 + v_3_3 = (struct atom_firmware_info_v3_3 *)header; 571 + smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 572 + smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 573 + smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 574 + smu->smu_table.boot_values.socclk = 0; 575 + smu->smu_table.boot_values.dcefclk = 0; 576 + smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 577 + smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 578 + smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 579 + smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 580 + smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 581 + smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 582 + break; 583 + case 4: 584 + default: 585 + v_3_4 = (struct atom_firmware_info_v3_4 *)header; 586 + smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 587 + smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 588 + smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 589 + smu->smu_table.boot_values.socclk = 0; 590 + smu->smu_table.boot_values.dcefclk = 0; 591 + smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 592 + smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 593 + smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 594 + smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 595 + smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 596 + smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; 597 + break; 598 + } 599 + 600 + smu->smu_table.boot_values.format_revision = header->format_revision; 601 + smu->smu_table.boot_values.content_revision = header->content_revision; 602 + 603 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 604 + smu_info); 605 + if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 606 + (uint8_t **)&header)) { 607 + 608 + if ((frev == 3) && (crev == 6)) { 609 + smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header; 610 + 611 + smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz; 612 + smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz; 613 + smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz; 614 + smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz; 615 + } else if ((frev == 3) && (crev == 1)) { 616 + return 0; 617 + } else if ((frev == 4) && (crev == 0)) { 618 + smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header; 619 + 620 + smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz; 621 + smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz; 622 + smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz; 623 + smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz; 624 + smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz; 625 + } else { 626 + dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n", 627 + (uint32_t)frev, (uint32_t)crev); 628 + } 629 + } 630 + 631 + return 0; 632 + } 633 + 634 + 635 + int smu_v14_0_notify_memory_pool_location(struct smu_context *smu) 636 + { 637 + struct smu_table_context *smu_table = &smu->smu_table; 638 + struct smu_table *memory_pool = &smu_table->memory_pool; 639 + int ret = 0; 640 + uint64_t address; 641 + uint32_t address_low, address_high; 642 + 643 + if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 644 + return ret; 645 + 646 + address = memory_pool->mc_address; 647 + address_high = (uint32_t)upper_32_bits(address); 648 + address_low = (uint32_t)lower_32_bits(address); 649 + 650 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 651 + address_high, NULL); 652 + if (ret) 653 + return ret; 654 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 655 + address_low, NULL); 656 + if (ret) 657 + return ret; 658 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 659 + (uint32_t)memory_pool->size, NULL); 660 + if (ret) 661 + return ret; 662 + 663 + return ret; 664 + } 665 + 666 + int smu_v14_0_set_driver_table_location(struct smu_context *smu) 667 + { 668 + struct smu_table *driver_table = &smu->smu_table.driver_table; 669 + int ret = 0; 670 + 671 + if (driver_table->mc_address) { 672 + ret = smu_cmn_send_smc_msg_with_param(smu, 673 + SMU_MSG_SetDriverDramAddrHigh, 674 + upper_32_bits(driver_table->mc_address), 675 + NULL); 676 + if (!ret) 677 + ret = smu_cmn_send_smc_msg_with_param(smu, 678 + SMU_MSG_SetDriverDramAddrLow, 679 + lower_32_bits(driver_table->mc_address), 680 + NULL); 681 + } 682 + 683 + return ret; 684 + } 685 + 686 + int smu_v14_0_set_tool_table_location(struct smu_context *smu) 687 + { 688 + int ret = 0; 689 + struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 690 + 691 + if (tool_table->mc_address) { 692 + ret = smu_cmn_send_smc_msg_with_param(smu, 693 + SMU_MSG_SetToolsDramAddrHigh, 694 + upper_32_bits(tool_table->mc_address), 695 + NULL); 696 + if (!ret) 697 + ret = smu_cmn_send_smc_msg_with_param(smu, 698 + SMU_MSG_SetToolsDramAddrLow, 699 + lower_32_bits(tool_table->mc_address), 700 + NULL); 701 + } 702 + 703 + return ret; 704 + } 705 + 706 + int smu_v14_0_set_allowed_mask(struct smu_context *smu) 707 + { 708 + struct smu_feature *feature = &smu->smu_feature; 709 + int ret = 0; 710 + uint32_t feature_mask[2]; 711 + 712 + if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || 713 + feature->feature_num < 64) 714 + return -EINVAL; 715 + 716 + bitmap_to_arr32(feature_mask, feature->allowed, 64); 717 + 718 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 719 + feature_mask[1], NULL); 720 + if (ret) 721 + return ret; 722 + 723 + return smu_cmn_send_smc_msg_with_param(smu, 724 + SMU_MSG_SetAllowedFeaturesMaskLow, 725 + feature_mask[0], 726 + NULL); 727 + } 728 + 729 + int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) 730 + { 731 + int ret = 0; 732 + struct amdgpu_device *adev = smu->adev; 733 + 734 + switch (adev->ip_versions[MP1_HWIP][0]) { 735 + case IP_VERSION(14, 0, 2): 736 + case IP_VERSION(14, 0, 0): 737 + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 738 + return 0; 739 + if (enable) 740 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 741 + else 742 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 743 + break; 744 + default: 745 + break; 746 + } 747 + 748 + return ret; 749 + } 750 + 751 + int smu_v14_0_system_features_control(struct smu_context *smu, 752 + bool en) 753 + { 754 + return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 755 + SMU_MSG_DisableAllSmuFeatures), NULL); 756 + } 757 + 758 + int smu_v14_0_notify_display_change(struct smu_context *smu) 759 + { 760 + int ret = 0; 761 + 762 + if (!smu->pm_enabled) 763 + return ret; 764 + 765 + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 766 + smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 767 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 768 + 769 + return ret; 770 + } 771 + 772 + int smu_v14_0_get_current_power_limit(struct smu_context *smu, 773 + uint32_t *power_limit) 774 + { 775 + int power_src; 776 + int ret = 0; 777 + 778 + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 779 + return -EINVAL; 780 + 781 + power_src = smu_cmn_to_asic_specific_index(smu, 782 + CMN2ASIC_MAPPING_PWR, 783 + smu->adev->pm.ac_power ? 784 + SMU_POWER_SOURCE_AC : 785 + SMU_POWER_SOURCE_DC); 786 + if (power_src < 0) 787 + return -EINVAL; 788 + 789 + ret = smu_cmn_send_smc_msg_with_param(smu, 790 + SMU_MSG_GetPptLimit, 791 + power_src << 16, 792 + power_limit); 793 + if (ret) 794 + dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 795 + 796 + return ret; 797 + } 798 + 799 + int smu_v14_0_set_power_limit(struct smu_context *smu, 800 + enum smu_ppt_limit_type limit_type, 801 + uint32_t limit) 802 + { 803 + int ret = 0; 804 + 805 + if (limit_type != SMU_DEFAULT_PPT_LIMIT) 806 + return -EINVAL; 807 + 808 + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 809 + dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 810 + return -EOPNOTSUPP; 811 + } 812 + 813 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); 814 + if (ret) { 815 + dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 816 + return ret; 817 + } 818 + 819 + smu->current_power_limit = limit; 820 + 821 + return 0; 822 + } 823 + 824 + static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, 825 + struct amdgpu_irq_src *source, 826 + unsigned tyep, 827 + enum amdgpu_interrupt_state state) 828 + { 829 + uint32_t val = 0; 830 + 831 + switch (state) { 832 + case AMDGPU_IRQ_STATE_DISABLE: 833 + /* For THM irqs */ 834 + // TODO 835 + 836 + /* For MP1 SW irqs */ 837 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 838 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 839 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 840 + 841 + break; 842 + case AMDGPU_IRQ_STATE_ENABLE: 843 + /* For THM irqs */ 844 + // TODO 845 + 846 + /* For MP1 SW irqs */ 847 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 848 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 849 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 850 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 851 + 852 + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 853 + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 854 + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 855 + 856 + break; 857 + default: 858 + break; 859 + } 860 + 861 + return 0; 862 + } 863 + 864 + static int smu_v14_0_irq_process(struct amdgpu_device *adev, 865 + struct amdgpu_irq_src *source, 866 + struct amdgpu_iv_entry *entry) 867 + { 868 + // TODO 869 + 870 + return 0; 871 + } 872 + 873 + static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = { 874 + .set = smu_v14_0_set_irq_state, 875 + .process = smu_v14_0_irq_process, 876 + }; 877 + 878 + int smu_v14_0_register_irq_handler(struct smu_context *smu) 879 + { 880 + struct amdgpu_device *adev = smu->adev; 881 + struct amdgpu_irq_src *irq_src = &smu->irq_source; 882 + int ret = 0; 883 + 884 + if (amdgpu_sriov_vf(adev)) 885 + return 0; 886 + 887 + irq_src->num_types = 1; 888 + irq_src->funcs = &smu_v14_0_irq_funcs; 889 + 890 + // TODO: THM related 891 + 892 + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 893 + 0xfe, 894 + irq_src); 895 + if (ret) 896 + return ret; 897 + 898 + return ret; 899 + } 900 + 901 + static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu, 902 + uint64_t event_arg) 903 + { 904 + int ret = 0; 905 + 906 + dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); 907 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); 908 + 909 + return ret; 910 + } 911 + 912 + int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 913 + uint64_t event_arg) 914 + { 915 + int ret = -EINVAL; 916 + 917 + switch (event) { 918 + case SMU_EVENT_RESET_COMPLETE: 919 + ret = smu_v14_0_wait_for_reset_complete(smu, event_arg); 920 + break; 921 + default: 922 + break; 923 + } 924 + 925 + return ret; 926 + } 927 + 928 + int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 929 + uint32_t *min, uint32_t *max) 930 + { 931 + int ret = 0, clk_id = 0; 932 + uint32_t param = 0; 933 + uint32_t clock_limit; 934 + 935 + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 936 + switch (clk_type) { 937 + case SMU_MCLK: 938 + case SMU_UCLK: 939 + clock_limit = smu->smu_table.boot_values.uclk; 940 + break; 941 + case SMU_GFXCLK: 942 + case SMU_SCLK: 943 + clock_limit = smu->smu_table.boot_values.gfxclk; 944 + break; 945 + case SMU_SOCCLK: 946 + clock_limit = smu->smu_table.boot_values.socclk; 947 + break; 948 + default: 949 + clock_limit = 0; 950 + break; 951 + } 952 + 953 + /* clock in Mhz unit */ 954 + if (min) 955 + *min = clock_limit / 100; 956 + if (max) 957 + *max = clock_limit / 100; 958 + 959 + return 0; 960 + } 961 + 962 + clk_id = smu_cmn_to_asic_specific_index(smu, 963 + CMN2ASIC_MAPPING_CLK, 964 + clk_type); 965 + if (clk_id < 0) { 966 + ret = -EINVAL; 967 + goto failed; 968 + } 969 + param = (clk_id & 0xffff) << 16; 970 + 971 + if (max) { 972 + if (smu->adev->pm.ac_power) 973 + ret = smu_cmn_send_smc_msg_with_param(smu, 974 + SMU_MSG_GetMaxDpmFreq, 975 + param, 976 + max); 977 + else 978 + ret = smu_cmn_send_smc_msg_with_param(smu, 979 + SMU_MSG_GetDcModeMaxDpmFreq, 980 + param, 981 + max); 982 + if (ret) 983 + goto failed; 984 + } 985 + 986 + if (min) { 987 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 988 + if (ret) 989 + goto failed; 990 + } 991 + 992 + failed: 993 + return ret; 994 + } 995 + 996 + int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, 997 + enum smu_clk_type clk_type, 998 + uint32_t min, 999 + uint32_t max) 1000 + { 1001 + int ret = 0, clk_id = 0; 1002 + uint32_t param; 1003 + 1004 + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1005 + return 0; 1006 + 1007 + clk_id = smu_cmn_to_asic_specific_index(smu, 1008 + CMN2ASIC_MAPPING_CLK, 1009 + clk_type); 1010 + if (clk_id < 0) 1011 + return clk_id; 1012 + 1013 + if (max > 0) { 1014 + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1015 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1016 + param, NULL); 1017 + if (ret) 1018 + goto out; 1019 + } 1020 + 1021 + if (min > 0) { 1022 + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1023 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1024 + param, NULL); 1025 + if (ret) 1026 + goto out; 1027 + } 1028 + 1029 + out: 1030 + return ret; 1031 + } 1032 + 1033 + int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, 1034 + enum smu_clk_type clk_type, 1035 + uint32_t min, 1036 + uint32_t max) 1037 + { 1038 + int ret = 0, clk_id = 0; 1039 + uint32_t param; 1040 + 1041 + if (min <= 0 && max <= 0) 1042 + return -EINVAL; 1043 + 1044 + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1045 + return 0; 1046 + 1047 + clk_id = smu_cmn_to_asic_specific_index(smu, 1048 + CMN2ASIC_MAPPING_CLK, 1049 + clk_type); 1050 + if (clk_id < 0) 1051 + return clk_id; 1052 + 1053 + if (max > 0) { 1054 + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1055 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1056 + param, NULL); 1057 + if (ret) 1058 + return ret; 1059 + } 1060 + 1061 + if (min > 0) { 1062 + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1063 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1064 + param, NULL); 1065 + if (ret) 1066 + return ret; 1067 + } 1068 + 1069 + return ret; 1070 + } 1071 + 1072 + int smu_v14_0_set_performance_level(struct smu_context *smu, 1073 + enum amd_dpm_forced_level level) 1074 + { 1075 + struct smu_14_0_dpm_context *dpm_context = 1076 + smu->smu_dpm.dpm_context; 1077 + struct smu_14_0_dpm_table *gfx_table = 1078 + &dpm_context->dpm_tables.gfx_table; 1079 + struct smu_14_0_dpm_table *mem_table = 1080 + &dpm_context->dpm_tables.uclk_table; 1081 + struct smu_14_0_dpm_table *soc_table = 1082 + &dpm_context->dpm_tables.soc_table; 1083 + struct smu_14_0_dpm_table *vclk_table = 1084 + &dpm_context->dpm_tables.vclk_table; 1085 + struct smu_14_0_dpm_table *dclk_table = 1086 + &dpm_context->dpm_tables.dclk_table; 1087 + struct smu_14_0_dpm_table *fclk_table = 1088 + &dpm_context->dpm_tables.fclk_table; 1089 + struct smu_umd_pstate_table *pstate_table = 1090 + &smu->pstate_table; 1091 + struct amdgpu_device *adev = smu->adev; 1092 + uint32_t sclk_min = 0, sclk_max = 0; 1093 + uint32_t mclk_min = 0, mclk_max = 0; 1094 + uint32_t socclk_min = 0, socclk_max = 0; 1095 + uint32_t vclk_min = 0, vclk_max = 0; 1096 + uint32_t dclk_min = 0, dclk_max = 0; 1097 + uint32_t fclk_min = 0, fclk_max = 0; 1098 + int ret = 0, i; 1099 + 1100 + switch (level) { 1101 + case AMD_DPM_FORCED_LEVEL_HIGH: 1102 + sclk_min = sclk_max = gfx_table->max; 1103 + mclk_min = mclk_max = mem_table->max; 1104 + socclk_min = socclk_max = soc_table->max; 1105 + vclk_min = vclk_max = vclk_table->max; 1106 + dclk_min = dclk_max = dclk_table->max; 1107 + fclk_min = fclk_max = fclk_table->max; 1108 + break; 1109 + case AMD_DPM_FORCED_LEVEL_LOW: 1110 + sclk_min = sclk_max = gfx_table->min; 1111 + mclk_min = mclk_max = mem_table->min; 1112 + socclk_min = socclk_max = soc_table->min; 1113 + vclk_min = vclk_max = vclk_table->min; 1114 + dclk_min = dclk_max = dclk_table->min; 1115 + fclk_min = fclk_max = fclk_table->min; 1116 + break; 1117 + case AMD_DPM_FORCED_LEVEL_AUTO: 1118 + sclk_min = gfx_table->min; 1119 + sclk_max = gfx_table->max; 1120 + mclk_min = mem_table->min; 1121 + mclk_max = mem_table->max; 1122 + socclk_min = soc_table->min; 1123 + socclk_max = soc_table->max; 1124 + vclk_min = vclk_table->min; 1125 + vclk_max = vclk_table->max; 1126 + dclk_min = dclk_table->min; 1127 + dclk_max = dclk_table->max; 1128 + fclk_min = fclk_table->min; 1129 + fclk_max = fclk_table->max; 1130 + break; 1131 + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1132 + sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1133 + mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1134 + socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1135 + vclk_min = vclk_max = pstate_table->vclk_pstate.standard; 1136 + dclk_min = dclk_max = pstate_table->dclk_pstate.standard; 1137 + fclk_min = fclk_max = pstate_table->fclk_pstate.standard; 1138 + break; 1139 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1140 + sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1141 + break; 1142 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1143 + mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1144 + break; 1145 + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1146 + sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1147 + mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1148 + socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1149 + vclk_min = vclk_max = pstate_table->vclk_pstate.peak; 1150 + dclk_min = dclk_max = pstate_table->dclk_pstate.peak; 1151 + fclk_min = fclk_max = pstate_table->fclk_pstate.peak; 1152 + break; 1153 + case AMD_DPM_FORCED_LEVEL_MANUAL: 1154 + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1155 + return 0; 1156 + default: 1157 + dev_err(adev->dev, "Invalid performance level %d\n", level); 1158 + return -EINVAL; 1159 + } 1160 + 1161 + if (sclk_min && sclk_max) { 1162 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1163 + SMU_GFXCLK, 1164 + sclk_min, 1165 + sclk_max); 1166 + if (ret) 1167 + return ret; 1168 + 1169 + pstate_table->gfxclk_pstate.curr.min = sclk_min; 1170 + pstate_table->gfxclk_pstate.curr.max = sclk_max; 1171 + } 1172 + 1173 + if (mclk_min && mclk_max) { 1174 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1175 + SMU_MCLK, 1176 + mclk_min, 1177 + mclk_max); 1178 + if (ret) 1179 + return ret; 1180 + 1181 + pstate_table->uclk_pstate.curr.min = mclk_min; 1182 + pstate_table->uclk_pstate.curr.max = mclk_max; 1183 + } 1184 + 1185 + if (socclk_min && socclk_max) { 1186 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1187 + SMU_SOCCLK, 1188 + socclk_min, 1189 + socclk_max); 1190 + if (ret) 1191 + return ret; 1192 + 1193 + pstate_table->socclk_pstate.curr.min = socclk_min; 1194 + pstate_table->socclk_pstate.curr.max = socclk_max; 1195 + } 1196 + 1197 + if (vclk_min && vclk_max) { 1198 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1199 + if (adev->vcn.harvest_config & (1 << i)) 1200 + continue; 1201 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1202 + i ? SMU_VCLK1 : SMU_VCLK, 1203 + vclk_min, 1204 + vclk_max); 1205 + if (ret) 1206 + return ret; 1207 + } 1208 + pstate_table->vclk_pstate.curr.min = vclk_min; 1209 + pstate_table->vclk_pstate.curr.max = vclk_max; 1210 + } 1211 + 1212 + if (dclk_min && dclk_max) { 1213 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1214 + if (adev->vcn.harvest_config & (1 << i)) 1215 + continue; 1216 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1217 + i ? SMU_DCLK1 : SMU_DCLK, 1218 + dclk_min, 1219 + dclk_max); 1220 + if (ret) 1221 + return ret; 1222 + } 1223 + pstate_table->dclk_pstate.curr.min = dclk_min; 1224 + pstate_table->dclk_pstate.curr.max = dclk_max; 1225 + } 1226 + 1227 + if (fclk_min && fclk_max) { 1228 + ret = smu_v14_0_set_soft_freq_limited_range(smu, 1229 + SMU_FCLK, 1230 + fclk_min, 1231 + fclk_max); 1232 + if (ret) 1233 + return ret; 1234 + 1235 + pstate_table->fclk_pstate.curr.min = fclk_min; 1236 + pstate_table->fclk_pstate.curr.max = fclk_max; 1237 + } 1238 + 1239 + return ret; 1240 + } 1241 + 1242 + int smu_v14_0_set_power_source(struct smu_context *smu, 1243 + enum smu_power_src_type power_src) 1244 + { 1245 + int pwr_source; 1246 + 1247 + pwr_source = smu_cmn_to_asic_specific_index(smu, 1248 + CMN2ASIC_MAPPING_PWR, 1249 + (uint32_t)power_src); 1250 + if (pwr_source < 0) 1251 + return -EINVAL; 1252 + 1253 + return smu_cmn_send_smc_msg_with_param(smu, 1254 + SMU_MSG_NotifyPowerSource, 1255 + pwr_source, 1256 + NULL); 1257 + } 1258 + 1259 + static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu, 1260 + enum smu_clk_type clk_type, 1261 + uint16_t level, 1262 + uint32_t *value) 1263 + { 1264 + int ret = 0, clk_id = 0; 1265 + uint32_t param; 1266 + 1267 + if (!value) 1268 + return -EINVAL; 1269 + 1270 + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1271 + return 0; 1272 + 1273 + clk_id = smu_cmn_to_asic_specific_index(smu, 1274 + CMN2ASIC_MAPPING_CLK, 1275 + clk_type); 1276 + if (clk_id < 0) 1277 + return clk_id; 1278 + 1279 + param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1280 + 1281 + ret = smu_cmn_send_smc_msg_with_param(smu, 1282 + SMU_MSG_GetDpmFreqByIndex, 1283 + param, 1284 + value); 1285 + if (ret) 1286 + return ret; 1287 + 1288 + *value = *value & 0x7fffffff; 1289 + 1290 + return ret; 1291 + } 1292 + 1293 + static int smu_v14_0_get_dpm_level_count(struct smu_context *smu, 1294 + enum smu_clk_type clk_type, 1295 + uint32_t *value) 1296 + { 1297 + int ret; 1298 + 1299 + ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1300 + 1301 + return ret; 1302 + } 1303 + 1304 + static int smu_v14_0_get_fine_grained_status(struct smu_context *smu, 1305 + enum smu_clk_type clk_type, 1306 + bool *is_fine_grained_dpm) 1307 + { 1308 + int ret = 0, clk_id = 0; 1309 + uint32_t param; 1310 + uint32_t value; 1311 + 1312 + if (!is_fine_grained_dpm) 1313 + return -EINVAL; 1314 + 1315 + if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1316 + return 0; 1317 + 1318 + clk_id = smu_cmn_to_asic_specific_index(smu, 1319 + CMN2ASIC_MAPPING_CLK, 1320 + clk_type); 1321 + if (clk_id < 0) 1322 + return clk_id; 1323 + 1324 + param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff); 1325 + 1326 + ret = smu_cmn_send_smc_msg_with_param(smu, 1327 + SMU_MSG_GetDpmFreqByIndex, 1328 + param, 1329 + &value); 1330 + if (ret) 1331 + return ret; 1332 + 1333 + /* 1334 + * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM 1335 + * now, we un-support it 1336 + */ 1337 + *is_fine_grained_dpm = value & 0x80000000; 1338 + 1339 + return 0; 1340 + } 1341 + 1342 + int smu_v14_0_set_single_dpm_table(struct smu_context *smu, 1343 + enum smu_clk_type clk_type, 1344 + struct smu_14_0_dpm_table *single_dpm_table) 1345 + { 1346 + int ret = 0; 1347 + uint32_t clk; 1348 + int i; 1349 + 1350 + ret = smu_v14_0_get_dpm_level_count(smu, 1351 + clk_type, 1352 + &single_dpm_table->count); 1353 + if (ret) { 1354 + dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 1355 + return ret; 1356 + } 1357 + 1358 + ret = smu_v14_0_get_fine_grained_status(smu, 1359 + clk_type, 1360 + &single_dpm_table->is_fine_grained); 1361 + if (ret) { 1362 + dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); 1363 + return ret; 1364 + } 1365 + 1366 + for (i = 0; i < single_dpm_table->count; i++) { 1367 + ret = smu_v14_0_get_dpm_freq_by_index(smu, 1368 + clk_type, 1369 + i, 1370 + &clk); 1371 + if (ret) { 1372 + dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 1373 + return ret; 1374 + } 1375 + 1376 + single_dpm_table->dpm_levels[i].value = clk; 1377 + single_dpm_table->dpm_levels[i].enabled = true; 1378 + 1379 + if (i == 0) 1380 + single_dpm_table->min = clk; 1381 + else if (i == single_dpm_table->count - 1) 1382 + single_dpm_table->max = clk; 1383 + } 1384 + 1385 + return 0; 1386 + } 1387 + 1388 + int smu_v14_0_set_vcn_enable(struct smu_context *smu, 1389 + bool enable) 1390 + { 1391 + struct amdgpu_device *adev = smu->adev; 1392 + int i, ret = 0; 1393 + 1394 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1395 + if (adev->vcn.harvest_config & (1 << i)) 1396 + continue; 1397 + 1398 + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1399 + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, 1400 + i << 16U, NULL); 1401 + if (ret) 1402 + return ret; 1403 + } 1404 + 1405 + return ret; 1406 + } 1407 + 1408 + int smu_v14_0_set_jpeg_enable(struct smu_context *smu, 1409 + bool enable) 1410 + { 1411 + return smu_cmn_send_smc_msg_with_param(smu, enable ? 1412 + SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, 1413 + 0, NULL); 1414 + } 1415 + 1416 + int smu_v14_0_run_btc(struct smu_context *smu) 1417 + { 1418 + int res; 1419 + 1420 + res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 1421 + if (res) 1422 + dev_err(smu->adev->dev, "RunDcBtc failed!\n"); 1423 + 1424 + return res; 1425 + } 1426 + 1427 + int smu_v14_0_gpo_control(struct smu_context *smu, 1428 + bool enablement) 1429 + { 1430 + int res; 1431 + 1432 + res = smu_cmn_send_smc_msg_with_param(smu, 1433 + SMU_MSG_AllowGpo, 1434 + enablement ? 1 : 0, 1435 + NULL); 1436 + if (res) 1437 + dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement); 1438 + 1439 + return res; 1440 + } 1441 + 1442 + int smu_v14_0_deep_sleep_control(struct smu_context *smu, 1443 + bool enablement) 1444 + { 1445 + struct amdgpu_device *adev = smu->adev; 1446 + int ret = 0; 1447 + 1448 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 1449 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 1450 + if (ret) { 1451 + dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 1452 + return ret; 1453 + } 1454 + } 1455 + 1456 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 1457 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 1458 + if (ret) { 1459 + dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 1460 + return ret; 1461 + } 1462 + } 1463 + 1464 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 1465 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 1466 + if (ret) { 1467 + dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 1468 + return ret; 1469 + } 1470 + } 1471 + 1472 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 1473 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 1474 + if (ret) { 1475 + dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 1476 + return ret; 1477 + } 1478 + } 1479 + 1480 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 1481 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 1482 + if (ret) { 1483 + dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 1484 + return ret; 1485 + } 1486 + } 1487 + 1488 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) { 1489 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement); 1490 + if (ret) { 1491 + dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable"); 1492 + return ret; 1493 + } 1494 + } 1495 + 1496 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) { 1497 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement); 1498 + if (ret) { 1499 + dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable"); 1500 + return ret; 1501 + } 1502 + } 1503 + 1504 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) { 1505 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement); 1506 + if (ret) { 1507 + dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable"); 1508 + return ret; 1509 + } 1510 + } 1511 + 1512 + return ret; 1513 + } 1514 + 1515 + int smu_v14_0_gfx_ulv_control(struct smu_context *smu, 1516 + bool enablement) 1517 + { 1518 + int ret = 0; 1519 + 1520 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 1521 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 1522 + 1523 + return ret; 1524 + } 1525 + 1526 + int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, 1527 + enum smu_baco_seq baco_seq) 1528 + { 1529 + struct smu_baco_context *smu_baco = &smu->smu_baco; 1530 + int ret; 1531 + 1532 + ret = smu_cmn_send_smc_msg_with_param(smu, 1533 + SMU_MSG_ArmD3, 1534 + baco_seq, 1535 + NULL); 1536 + if (ret) 1537 + return ret; 1538 + 1539 + if (baco_seq == BACO_SEQ_BAMACO || 1540 + baco_seq == BACO_SEQ_BACO) 1541 + smu_baco->state = SMU_BACO_STATE_ENTER; 1542 + else 1543 + smu_baco->state = SMU_BACO_STATE_EXIT; 1544 + 1545 + return 0; 1546 + } 1547 + 1548 + bool smu_v14_0_baco_is_support(struct smu_context *smu) 1549 + { 1550 + struct smu_baco_context *smu_baco = &smu->smu_baco; 1551 + 1552 + if (amdgpu_sriov_vf(smu->adev) || 1553 + !smu_baco->platform_support) 1554 + return false; 1555 + 1556 + /* return true if ASIC is in BACO state already */ 1557 + if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 1558 + return true; 1559 + 1560 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1561 + !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1562 + return false; 1563 + 1564 + return true; 1565 + } 1566 + 1567 + enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu) 1568 + { 1569 + struct smu_baco_context *smu_baco = &smu->smu_baco; 1570 + 1571 + return smu_baco->state; 1572 + } 1573 + 1574 + int smu_v14_0_baco_set_state(struct smu_context *smu, 1575 + enum smu_baco_state state) 1576 + { 1577 + struct smu_baco_context *smu_baco = &smu->smu_baco; 1578 + struct amdgpu_device *adev = smu->adev; 1579 + int ret = 0; 1580 + 1581 + if (smu_v14_0_baco_get_state(smu) == state) 1582 + return 0; 1583 + 1584 + if (state == SMU_BACO_STATE_ENTER) { 1585 + ret = smu_cmn_send_smc_msg_with_param(smu, 1586 + SMU_MSG_EnterBaco, 1587 + smu_baco->maco_support ? 1588 + BACO_SEQ_BAMACO : BACO_SEQ_BACO, 1589 + NULL); 1590 + } else { 1591 + ret = smu_cmn_send_smc_msg(smu, 1592 + SMU_MSG_ExitBaco, 1593 + NULL); 1594 + if (ret) 1595 + return ret; 1596 + 1597 + /* clear vbios scratch 6 and 7 for coming asic reinit */ 1598 + WREG32(adev->bios_scratch_reg_offset + 6, 0); 1599 + WREG32(adev->bios_scratch_reg_offset + 7, 0); 1600 + } 1601 + 1602 + if (!ret) 1603 + smu_baco->state = state; 1604 + 1605 + return ret; 1606 + } 1607 + 1608 + int smu_v14_0_baco_enter(struct smu_context *smu) 1609 + { 1610 + int ret = 0; 1611 + 1612 + ret = smu_v14_0_baco_set_state(smu, 1613 + SMU_BACO_STATE_ENTER); 1614 + if (ret) 1615 + return ret; 1616 + 1617 + msleep(10); 1618 + 1619 + return ret; 1620 + } 1621 + 1622 + int smu_v14_0_baco_exit(struct smu_context *smu) 1623 + { 1624 + return smu_v14_0_baco_set_state(smu, 1625 + SMU_BACO_STATE_EXIT); 1626 + } 1627 + 1628 + int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu) 1629 + { 1630 + uint16_t index; 1631 + 1632 + index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 1633 + SMU_MSG_EnableGfxImu); 1634 + /* Param 1 to tell PMFW to enable GFXOFF feature */ 1635 + return smu_cmn_send_msg_without_waiting(smu, index, 1); 1636 + } 1637 + 1638 + int smu_v14_0_set_default_dpm_tables(struct smu_context *smu) 1639 + { 1640 + struct smu_table_context *smu_table = &smu->smu_table; 1641 + 1642 + return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 1643 + smu_table->clocks_table, false); 1644 + } 1645 + 1646 + int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, 1647 + enum PP_OD_DPM_TABLE_COMMAND type, 1648 + long input[], uint32_t size) 1649 + { 1650 + struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 1651 + int ret = 0; 1652 + 1653 + /* Only allowed in manual mode */ 1654 + if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1655 + return -EINVAL; 1656 + 1657 + switch (type) { 1658 + case PP_OD_EDIT_SCLK_VDDC_TABLE: 1659 + if (size != 2) { 1660 + dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1661 + return -EINVAL; 1662 + } 1663 + 1664 + if (input[0] == 0) { 1665 + if (input[1] < smu->gfx_default_hard_min_freq) { 1666 + dev_warn(smu->adev->dev, 1667 + "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1668 + input[1], smu->gfx_default_hard_min_freq); 1669 + return -EINVAL; 1670 + } 1671 + smu->gfx_actual_hard_min_freq = input[1]; 1672 + } else if (input[0] == 1) { 1673 + if (input[1] > smu->gfx_default_soft_max_freq) { 1674 + dev_warn(smu->adev->dev, 1675 + "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1676 + input[1], smu->gfx_default_soft_max_freq); 1677 + return -EINVAL; 1678 + } 1679 + smu->gfx_actual_soft_max_freq = input[1]; 1680 + } else { 1681 + return -EINVAL; 1682 + } 1683 + break; 1684 + case PP_OD_RESTORE_DEFAULT_TABLE: 1685 + if (size != 0) { 1686 + dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1687 + return -EINVAL; 1688 + } 1689 + smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1690 + smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1691 + break; 1692 + case PP_OD_COMMIT_DPM_TABLE: 1693 + if (size != 0) { 1694 + dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1695 + return -EINVAL; 1696 + } 1697 + if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 1698 + dev_err(smu->adev->dev, 1699 + "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 1700 + smu->gfx_actual_hard_min_freq, 1701 + smu->gfx_actual_soft_max_freq); 1702 + return -EINVAL; 1703 + } 1704 + 1705 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1706 + smu->gfx_actual_hard_min_freq, 1707 + NULL); 1708 + if (ret) { 1709 + dev_err(smu->adev->dev, "Set hard min sclk failed!"); 1710 + return ret; 1711 + } 1712 + 1713 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1714 + smu->gfx_actual_soft_max_freq, 1715 + NULL); 1716 + if (ret) { 1717 + dev_err(smu->adev->dev, "Set soft max sclk failed!"); 1718 + return ret; 1719 + } 1720 + break; 1721 + default: 1722 + return -ENOSYS; 1723 + } 1724 + 1725 + return ret; 1726 + } 1727 +
+1078
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include "smu_types.h" 25 + #define SWSMU_CODE_LAYER_L2 26 + 27 + #include "amdgpu.h" 28 + #include "amdgpu_smu.h" 29 + #include "smu_v14_0.h" 30 + #include "smu14_driver_if_v14_0_0.h" 31 + #include "smu_v14_0_0_ppt.h" 32 + #include "smu_v14_0_0_ppsmc.h" 33 + #include "smu_v14_0_0_pmfw.h" 34 + #include "smu_cmn.h" 35 + 36 + /* 37 + * DO NOT use these for err/warn/info/debug messages. 38 + * Use dev_err, dev_warn, dev_info and dev_dbg instead. 39 + * They are more MGPU friendly. 40 + */ 41 + #undef pr_err 42 + #undef pr_warn 43 + #undef pr_info 44 + #undef pr_debug 45 + 46 + #define mmMP1_SMN_C2PMSG_66 0x0282 47 + #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0 48 + 49 + #define mmMP1_SMN_C2PMSG_82 0x0292 50 + #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0 51 + 52 + #define mmMP1_SMN_C2PMSG_90 0x029a 53 + #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 54 + 55 + #define FEATURE_MASK(feature) (1ULL << feature) 56 + #define SMC_DPM_FEATURE ( \ 57 + FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 58 + FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 59 + FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 60 + FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 61 + FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 62 + FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 63 + FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 64 + FEATURE_MASK(FEATURE_ISP_DPM_BIT)| \ 65 + FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 66 + FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ 67 + FEATURE_MASK(FEATURE_VPE_DPM_BIT)) 68 + 69 + static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = { 70 + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 71 + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), 72 + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 73 + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 74 + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 75 + MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 76 + MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1), 77 + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 78 + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 79 + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 80 + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 81 + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 82 + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 83 + MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 84 + MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 85 + MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1), 86 + MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 87 + MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), 88 + MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 89 + MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 90 + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 91 + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 92 + MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 93 + MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 94 + MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 95 + MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 96 + MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 97 + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 98 + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 99 + MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 100 + MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 101 + MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), 102 + MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1), 103 + MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 1), 104 + MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 1), 105 + MSG_MAP(PowerUpVpe, PPSMC_MSG_PowerUpVpe, 1), 106 + MSG_MAP(PowerDownVpe, PPSMC_MSG_PowerDownVpe, 1), 107 + }; 108 + 109 + static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { 110 + FEA_MAP(CCLK_DPM), 111 + FEA_MAP(FAN_CONTROLLER), 112 + FEA_MAP(PPT), 113 + FEA_MAP(TDC), 114 + FEA_MAP(THERMAL), 115 + FEA_MAP(VCN_DPM), 116 + FEA_MAP_REVERSE(FCLK), 117 + FEA_MAP_REVERSE(SOCCLK), 118 + FEA_MAP(LCLK_DPM), 119 + FEA_MAP(SHUBCLK_DPM), 120 + FEA_MAP(DCFCLK_DPM), 121 + FEA_MAP_HALF_REVERSE(GFX), 122 + FEA_MAP(DS_GFXCLK), 123 + FEA_MAP(DS_SOCCLK), 124 + FEA_MAP(DS_LCLK), 125 + FEA_MAP(LOW_POWER_DCNCLKS), 126 + FEA_MAP(DS_FCLK), 127 + FEA_MAP(DS_MP1CLK), 128 + FEA_MAP(GFX_DEM), 129 + FEA_MAP(PSI), 130 + FEA_MAP(PROCHOT), 131 + FEA_MAP(CPUOFF), 132 + FEA_MAP(STAPM), 133 + FEA_MAP(S0I3), 134 + FEA_MAP(PERF_LIMIT), 135 + FEA_MAP(CORE_DLDO), 136 + FEA_MAP(DS_VCN), 137 + FEA_MAP(CPPC), 138 + FEA_MAP(DF_CSTATES), 139 + FEA_MAP(ATHUB_PG), 140 + }; 141 + 142 + static struct cmn2asic_mapping smu_v14_0_0_table_map[SMU_TABLE_COUNT] = { 143 + TAB_MAP_VALID(WATERMARKS), 144 + TAB_MAP_VALID(SMU_METRICS), 145 + TAB_MAP_VALID(CUSTOM_DPM), 146 + TAB_MAP_VALID(DPMCLOCKS), 147 + }; 148 + 149 + static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) 150 + { 151 + struct smu_table_context *smu_table = &smu->smu_table; 152 + struct smu_table *tables = smu_table->tables; 153 + 154 + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 155 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 156 + SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 157 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 158 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 159 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 160 + 161 + smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 162 + if (!smu_table->clocks_table) 163 + goto err0_out; 164 + 165 + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 166 + if (!smu_table->metrics_table) 167 + goto err1_out; 168 + smu_table->metrics_time = 0; 169 + 170 + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 171 + if (!smu_table->watermarks_table) 172 + goto err2_out; 173 + 174 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 175 + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 176 + if (!smu_table->gpu_metrics_table) 177 + goto err3_out; 178 + 179 + return 0; 180 + 181 + err3_out: 182 + kfree(smu_table->watermarks_table); 183 + err2_out: 184 + kfree(smu_table->metrics_table); 185 + err1_out: 186 + kfree(smu_table->clocks_table); 187 + err0_out: 188 + return -ENOMEM; 189 + } 190 + 191 + static int smu_v14_0_0_fini_smc_tables(struct smu_context *smu) 192 + { 193 + struct smu_table_context *smu_table = &smu->smu_table; 194 + 195 + kfree(smu_table->clocks_table); 196 + smu_table->clocks_table = NULL; 197 + 198 + kfree(smu_table->metrics_table); 199 + smu_table->metrics_table = NULL; 200 + 201 + kfree(smu_table->watermarks_table); 202 + smu_table->watermarks_table = NULL; 203 + 204 + kfree(smu_table->gpu_metrics_table); 205 + smu_table->gpu_metrics_table = NULL; 206 + 207 + return 0; 208 + } 209 + 210 + static int smu_v14_0_0_system_features_control(struct smu_context *smu, bool en) 211 + { 212 + struct amdgpu_device *adev = smu->adev; 213 + int ret = 0; 214 + 215 + if (!en && !adev->in_s0ix) 216 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 217 + 218 + return ret; 219 + } 220 + 221 + static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu, 222 + MetricsMember_t member, 223 + uint32_t *value) 224 + { 225 + struct smu_table_context *smu_table = &smu->smu_table; 226 + 227 + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 228 + int ret = 0; 229 + 230 + ret = smu_cmn_get_metrics_table(smu, NULL, false); 231 + if (ret) 232 + return ret; 233 + 234 + switch (member) { 235 + case METRICS_AVERAGE_GFXCLK: 236 + *value = metrics->GfxclkFrequency; 237 + break; 238 + case METRICS_AVERAGE_SOCCLK: 239 + *value = metrics->SocclkFrequency; 240 + break; 241 + case METRICS_AVERAGE_VCLK: 242 + *value = metrics->VclkFrequency; 243 + break; 244 + case METRICS_AVERAGE_DCLK: 245 + *value = metrics->DclkFrequency; 246 + break; 247 + case METRICS_AVERAGE_UCLK: 248 + *value = metrics->MemclkFrequency; 249 + break; 250 + case METRICS_AVERAGE_GFXACTIVITY: 251 + *value = metrics->GfxActivity / 100; 252 + break; 253 + case METRICS_AVERAGE_VCNACTIVITY: 254 + *value = metrics->UvdActivity; 255 + break; 256 + case METRICS_AVERAGE_SOCKETPOWER: 257 + *value = (metrics->AverageSocketPower << 8) / 1000; 258 + break; 259 + case METRICS_CURR_SOCKETPOWER: 260 + *value = (metrics->CurrentSocketPower << 8) / 1000; 261 + break; 262 + case METRICS_TEMPERATURE_EDGE: 263 + *value = metrics->GfxTemperature / 100 * 264 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 265 + break; 266 + case METRICS_TEMPERATURE_HOTSPOT: 267 + *value = metrics->SocTemperature / 100 * 268 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 269 + break; 270 + case METRICS_THROTTLER_STATUS: 271 + *value = metrics->ThrottlerStatus; 272 + break; 273 + case METRICS_VOLTAGE_VDDGFX: 274 + *value = metrics->Voltage[0]; 275 + break; 276 + case METRICS_VOLTAGE_VDDSOC: 277 + *value = metrics->Voltage[1]; 278 + break; 279 + case METRICS_SS_APU_SHARE: 280 + /* return the percentage of APU power with respect to APU's power limit. 281 + * percentage is reported, this isn't boost value. Smartshift power 282 + * boost/shift is only when the percentage is more than 100. 283 + */ 284 + if (metrics->StapmOpnLimit > 0) 285 + *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 286 + else 287 + *value = 0; 288 + break; 289 + case METRICS_SS_DGPU_SHARE: 290 + /* return the percentage of dGPU power with respect to dGPU's power limit. 291 + * percentage is reported, this isn't boost value. Smartshift power 292 + * boost/shift is only when the percentage is more than 100. 293 + */ 294 + if ((metrics->dGpuPower > 0) && 295 + (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 296 + *value = (metrics->dGpuPower * 100) / 297 + (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 298 + else 299 + *value = 0; 300 + break; 301 + default: 302 + *value = UINT_MAX; 303 + break; 304 + } 305 + 306 + return ret; 307 + } 308 + 309 + static int smu_v14_0_0_read_sensor(struct smu_context *smu, 310 + enum amd_pp_sensors sensor, 311 + void *data, uint32_t *size) 312 + { 313 + int ret = 0; 314 + 315 + if (!data || !size) 316 + return -EINVAL; 317 + 318 + switch (sensor) { 319 + case AMDGPU_PP_SENSOR_GPU_LOAD: 320 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 321 + METRICS_AVERAGE_GFXACTIVITY, 322 + (uint32_t *)data); 323 + *size = 4; 324 + break; 325 + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 326 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 327 + METRICS_AVERAGE_SOCKETPOWER, 328 + (uint32_t *)data); 329 + *size = 4; 330 + break; 331 + case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 332 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 333 + METRICS_CURR_SOCKETPOWER, 334 + (uint32_t *)data); 335 + *size = 4; 336 + break; 337 + case AMDGPU_PP_SENSOR_EDGE_TEMP: 338 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 339 + METRICS_TEMPERATURE_EDGE, 340 + (uint32_t *)data); 341 + *size = 4; 342 + break; 343 + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 344 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 345 + METRICS_TEMPERATURE_HOTSPOT, 346 + (uint32_t *)data); 347 + *size = 4; 348 + break; 349 + case AMDGPU_PP_SENSOR_GFX_MCLK: 350 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 351 + METRICS_AVERAGE_UCLK, 352 + (uint32_t *)data); 353 + *(uint32_t *)data *= 100; 354 + *size = 4; 355 + break; 356 + case AMDGPU_PP_SENSOR_GFX_SCLK: 357 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 358 + METRICS_AVERAGE_GFXCLK, 359 + (uint32_t *)data); 360 + *(uint32_t *)data *= 100; 361 + *size = 4; 362 + break; 363 + case AMDGPU_PP_SENSOR_VDDGFX: 364 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 365 + METRICS_VOLTAGE_VDDGFX, 366 + (uint32_t *)data); 367 + *size = 4; 368 + break; 369 + case AMDGPU_PP_SENSOR_VDDNB: 370 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 371 + METRICS_VOLTAGE_VDDSOC, 372 + (uint32_t *)data); 373 + *size = 4; 374 + break; 375 + case AMDGPU_PP_SENSOR_SS_APU_SHARE: 376 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 377 + METRICS_SS_APU_SHARE, 378 + (uint32_t *)data); 379 + *size = 4; 380 + break; 381 + case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 382 + ret = smu_v14_0_0_get_smu_metrics_data(smu, 383 + METRICS_SS_DGPU_SHARE, 384 + (uint32_t *)data); 385 + *size = 4; 386 + break; 387 + default: 388 + ret = -EOPNOTSUPP; 389 + break; 390 + } 391 + 392 + return ret; 393 + } 394 + 395 + static bool smu_v14_0_0_is_dpm_running(struct smu_context *smu) 396 + { 397 + int ret = 0; 398 + uint64_t feature_enabled; 399 + 400 + ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 401 + 402 + if (ret) 403 + return false; 404 + 405 + return !!(feature_enabled & SMC_DPM_FEATURE); 406 + } 407 + 408 + static int smu_v14_0_0_set_watermarks_table(struct smu_context *smu, 409 + struct pp_smu_wm_range_sets *clock_ranges) 410 + { 411 + int i; 412 + int ret = 0; 413 + Watermarks_t *table = smu->smu_table.watermarks_table; 414 + 415 + if (!table || !clock_ranges) 416 + return -EINVAL; 417 + 418 + if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 419 + clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 420 + return -EINVAL; 421 + 422 + for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 423 + table->WatermarkRow[WM_DCFCLK][i].MinClock = 424 + clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 425 + table->WatermarkRow[WM_DCFCLK][i].MaxClock = 426 + clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 427 + table->WatermarkRow[WM_DCFCLK][i].MinMclk = 428 + clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 429 + table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 430 + clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 431 + 432 + table->WatermarkRow[WM_DCFCLK][i].WmSetting = 433 + clock_ranges->reader_wm_sets[i].wm_inst; 434 + } 435 + 436 + for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 437 + table->WatermarkRow[WM_SOCCLK][i].MinClock = 438 + clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 439 + table->WatermarkRow[WM_SOCCLK][i].MaxClock = 440 + clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 441 + table->WatermarkRow[WM_SOCCLK][i].MinMclk = 442 + clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 443 + table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 444 + clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 445 + 446 + table->WatermarkRow[WM_SOCCLK][i].WmSetting = 447 + clock_ranges->writer_wm_sets[i].wm_inst; 448 + } 449 + 450 + smu->watermarks_bitmap |= WATERMARKS_EXIST; 451 + 452 + /* pass data to smu controller */ 453 + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 454 + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 455 + ret = smu_cmn_write_watermarks_table(smu); 456 + if (ret) { 457 + dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 458 + return ret; 459 + } 460 + smu->watermarks_bitmap |= WATERMARKS_LOADED; 461 + } 462 + 463 + return 0; 464 + } 465 + 466 + static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu, 467 + void **table) 468 + { 469 + struct smu_table_context *smu_table = &smu->smu_table; 470 + struct gpu_metrics_v2_1 *gpu_metrics = 471 + (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 472 + SmuMetrics_t metrics; 473 + int ret = 0; 474 + 475 + ret = smu_cmn_get_metrics_table(smu, &metrics, true); 476 + if (ret) 477 + return ret; 478 + 479 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 480 + 481 + gpu_metrics->temperature_gfx = metrics.GfxTemperature; 482 + gpu_metrics->temperature_soc = metrics.SocTemperature; 483 + memcpy(&gpu_metrics->temperature_core[0], 484 + &metrics.CoreTemperature[0], 485 + sizeof(uint16_t) * 8); 486 + gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 487 + gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; 488 + 489 + gpu_metrics->average_gfx_activity = metrics.GfxActivity; 490 + gpu_metrics->average_mm_activity = metrics.UvdActivity; 491 + 492 + gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 493 + gpu_metrics->average_gfx_power = metrics.Power[0]; 494 + gpu_metrics->average_soc_power = metrics.Power[1]; 495 + memcpy(&gpu_metrics->average_core_power[0], 496 + &metrics.CorePower[0], 497 + sizeof(uint16_t) * 8); 498 + 499 + gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 500 + gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 501 + gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 502 + gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 503 + gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 504 + gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 505 + 506 + memcpy(&gpu_metrics->current_coreclk[0], 507 + &metrics.CoreFrequency[0], 508 + sizeof(uint16_t) * 8); 509 + 510 + gpu_metrics->throttle_status = metrics.ThrottlerStatus; 511 + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 512 + 513 + *table = (void *)gpu_metrics; 514 + 515 + return sizeof(struct gpu_metrics_v2_1); 516 + } 517 + 518 + static int smu_v14_0_0_mode2_reset(struct smu_context *smu) 519 + { 520 + int ret; 521 + 522 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 523 + SMU_RESET_MODE_2, NULL); 524 + 525 + if (ret) 526 + dev_err(smu->adev->dev, "Failed to mode2 reset!\n"); 527 + 528 + return ret; 529 + } 530 + 531 + static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, 532 + enum smu_clk_type clk_type, 533 + uint32_t dpm_level, 534 + uint32_t *freq) 535 + { 536 + DpmClocks_t *clk_table = smu->smu_table.clocks_table; 537 + 538 + if (!clk_table || clk_type >= SMU_CLK_COUNT) 539 + return -EINVAL; 540 + 541 + switch (clk_type) { 542 + case SMU_SOCCLK: 543 + if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 544 + return -EINVAL; 545 + *freq = clk_table->SocClocks[dpm_level]; 546 + break; 547 + case SMU_VCLK: 548 + if (dpm_level >= clk_table->VcnClkLevelsEnabled) 549 + return -EINVAL; 550 + *freq = clk_table->VClocks[dpm_level]; 551 + break; 552 + case SMU_DCLK: 553 + if (dpm_level >= clk_table->VcnClkLevelsEnabled) 554 + return -EINVAL; 555 + *freq = clk_table->DClocks[dpm_level]; 556 + break; 557 + case SMU_UCLK: 558 + case SMU_MCLK: 559 + if (dpm_level >= clk_table->NumMemPstatesEnabled) 560 + return -EINVAL; 561 + *freq = clk_table->MemPstateTable[dpm_level].MemClk; 562 + break; 563 + case SMU_FCLK: 564 + if (dpm_level >= clk_table->NumFclkLevelsEnabled) 565 + return -EINVAL; 566 + *freq = clk_table->FclkClocks_Freq[dpm_level]; 567 + break; 568 + default: 569 + return -EINVAL; 570 + } 571 + 572 + return 0; 573 + } 574 + 575 + static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, 576 + enum smu_clk_type clk_type) 577 + { 578 + enum smu_feature_mask feature_id = 0; 579 + 580 + switch (clk_type) { 581 + case SMU_MCLK: 582 + case SMU_UCLK: 583 + case SMU_FCLK: 584 + feature_id = SMU_FEATURE_DPM_FCLK_BIT; 585 + break; 586 + case SMU_GFXCLK: 587 + case SMU_SCLK: 588 + feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 589 + break; 590 + case SMU_SOCCLK: 591 + feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 592 + break; 593 + case SMU_VCLK: 594 + case SMU_DCLK: 595 + feature_id = SMU_FEATURE_VCN_DPM_BIT; 596 + break; 597 + default: 598 + return true; 599 + } 600 + 601 + return smu_cmn_feature_is_enabled(smu, feature_id); 602 + } 603 + 604 + static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, 605 + enum smu_clk_type clk_type, 606 + uint32_t *min, 607 + uint32_t *max) 608 + { 609 + DpmClocks_t *clk_table = smu->smu_table.clocks_table; 610 + uint32_t clock_limit; 611 + uint32_t max_dpm_level, min_dpm_level; 612 + int ret = 0; 613 + 614 + if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) { 615 + switch (clk_type) { 616 + case SMU_MCLK: 617 + case SMU_UCLK: 618 + clock_limit = smu->smu_table.boot_values.uclk; 619 + break; 620 + case SMU_FCLK: 621 + clock_limit = smu->smu_table.boot_values.fclk; 622 + break; 623 + case SMU_GFXCLK: 624 + case SMU_SCLK: 625 + clock_limit = smu->smu_table.boot_values.gfxclk; 626 + break; 627 + case SMU_SOCCLK: 628 + clock_limit = smu->smu_table.boot_values.socclk; 629 + break; 630 + case SMU_VCLK: 631 + clock_limit = smu->smu_table.boot_values.vclk; 632 + break; 633 + case SMU_DCLK: 634 + clock_limit = smu->smu_table.boot_values.dclk; 635 + break; 636 + default: 637 + clock_limit = 0; 638 + break; 639 + } 640 + 641 + /* clock in Mhz unit */ 642 + if (min) 643 + *min = clock_limit / 100; 644 + if (max) 645 + *max = clock_limit / 100; 646 + 647 + return 0; 648 + } 649 + 650 + if (max) { 651 + switch (clk_type) { 652 + case SMU_GFXCLK: 653 + case SMU_SCLK: 654 + *max = clk_table->MaxGfxClk; 655 + break; 656 + case SMU_MCLK: 657 + case SMU_UCLK: 658 + case SMU_FCLK: 659 + max_dpm_level = 0; 660 + break; 661 + case SMU_SOCCLK: 662 + max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 663 + break; 664 + case SMU_VCLK: 665 + case SMU_DCLK: 666 + max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 667 + break; 668 + default: 669 + ret = -EINVAL; 670 + goto failed; 671 + } 672 + 673 + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 674 + ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); 675 + if (ret) 676 + goto failed; 677 + } 678 + } 679 + 680 + if (min) { 681 + switch (clk_type) { 682 + case SMU_GFXCLK: 683 + case SMU_SCLK: 684 + *min = clk_table->MinGfxClk; 685 + break; 686 + case SMU_MCLK: 687 + case SMU_UCLK: 688 + min_dpm_level = clk_table->NumMemPstatesEnabled - 1; 689 + break; 690 + case SMU_FCLK: 691 + min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; 692 + break; 693 + case SMU_SOCCLK: 694 + min_dpm_level = 0; 695 + break; 696 + case SMU_VCLK: 697 + case SMU_DCLK: 698 + min_dpm_level = 0; 699 + break; 700 + default: 701 + ret = -EINVAL; 702 + goto failed; 703 + } 704 + 705 + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 706 + ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); 707 + if (ret) 708 + goto failed; 709 + } 710 + } 711 + 712 + failed: 713 + return ret; 714 + } 715 + 716 + static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, 717 + enum smu_clk_type clk_type, 718 + uint32_t *value) 719 + { 720 + MetricsMember_t member_type; 721 + 722 + switch (clk_type) { 723 + case SMU_SOCCLK: 724 + member_type = METRICS_AVERAGE_SOCCLK; 725 + break; 726 + case SMU_VCLK: 727 + member_type = METRICS_AVERAGE_VCLK; 728 + break; 729 + case SMU_DCLK: 730 + member_type = METRICS_AVERAGE_DCLK; 731 + break; 732 + case SMU_MCLK: 733 + member_type = METRICS_AVERAGE_UCLK; 734 + break; 735 + case SMU_FCLK: 736 + return smu_cmn_send_smc_msg_with_param(smu, 737 + SMU_MSG_GetFclkFrequency, 738 + 0, value); 739 + case SMU_GFXCLK: 740 + case SMU_SCLK: 741 + return smu_cmn_send_smc_msg_with_param(smu, 742 + SMU_MSG_GetGfxclkFrequency, 743 + 0, value); 744 + break; 745 + default: 746 + return -EINVAL; 747 + } 748 + 749 + return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); 750 + } 751 + 752 + static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, 753 + enum smu_clk_type clk_type, 754 + uint32_t *count) 755 + { 756 + DpmClocks_t *clk_table = smu->smu_table.clocks_table; 757 + 758 + switch (clk_type) { 759 + case SMU_SOCCLK: 760 + *count = clk_table->NumSocClkLevelsEnabled; 761 + break; 762 + case SMU_VCLK: 763 + *count = clk_table->VcnClkLevelsEnabled; 764 + break; 765 + case SMU_DCLK: 766 + *count = clk_table->VcnClkLevelsEnabled; 767 + break; 768 + case SMU_MCLK: 769 + *count = clk_table->NumMemPstatesEnabled; 770 + break; 771 + case SMU_FCLK: 772 + *count = clk_table->NumFclkLevelsEnabled; 773 + break; 774 + default: 775 + break; 776 + } 777 + 778 + return 0; 779 + } 780 + 781 + static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, 782 + enum smu_clk_type clk_type, char *buf) 783 + { 784 + int i, size = 0, ret = 0; 785 + uint32_t cur_value = 0, value = 0, count = 0; 786 + uint32_t min, max; 787 + 788 + smu_cmn_get_sysfs_buf(&buf, &size); 789 + 790 + switch (clk_type) { 791 + case SMU_OD_SCLK: 792 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 793 + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 794 + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 795 + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 796 + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 797 + break; 798 + case SMU_OD_RANGE: 799 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 800 + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 801 + smu->gfx_default_hard_min_freq, 802 + smu->gfx_default_soft_max_freq); 803 + break; 804 + case SMU_SOCCLK: 805 + case SMU_VCLK: 806 + case SMU_DCLK: 807 + case SMU_MCLK: 808 + case SMU_FCLK: 809 + ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); 810 + if (ret) 811 + break; 812 + 813 + ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count); 814 + if (ret) 815 + break; 816 + 817 + for (i = 0; i < count; i++) { 818 + ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value); 819 + if (ret) 820 + break; 821 + 822 + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 823 + cur_value == value ? "*" : ""); 824 + } 825 + break; 826 + case SMU_GFXCLK: 827 + case SMU_SCLK: 828 + ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); 829 + if (ret) 830 + break; 831 + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 832 + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 833 + if (cur_value == max) 834 + i = 2; 835 + else if (cur_value == min) 836 + i = 0; 837 + else 838 + i = 1; 839 + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 840 + i == 0 ? "*" : ""); 841 + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 842 + i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */ 843 + i == 1 ? "*" : ""); 844 + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 845 + i == 2 ? "*" : ""); 846 + break; 847 + default: 848 + break; 849 + } 850 + 851 + return size; 852 + } 853 + 854 + static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, 855 + enum smu_clk_type clk_type, 856 + uint32_t min, 857 + uint32_t max) 858 + { 859 + enum smu_message_type msg_set_min, msg_set_max; 860 + int ret = 0; 861 + 862 + if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) 863 + return -EINVAL; 864 + 865 + switch (clk_type) { 866 + case SMU_GFXCLK: 867 + case SMU_SCLK: 868 + msg_set_min = SMU_MSG_SetHardMinGfxClk; 869 + msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 870 + break; 871 + case SMU_FCLK: 872 + msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 873 + msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 874 + break; 875 + case SMU_SOCCLK: 876 + msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 877 + msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 878 + break; 879 + case SMU_VCLK: 880 + case SMU_DCLK: 881 + msg_set_min = SMU_MSG_SetHardMinVcn; 882 + msg_set_max = SMU_MSG_SetSoftMaxVcn; 883 + break; 884 + default: 885 + return -EINVAL; 886 + } 887 + 888 + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); 889 + if (ret) 890 + return ret; 891 + 892 + return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, 893 + max, NULL); 894 + } 895 + 896 + static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, 897 + enum smu_clk_type clk_type, 898 + uint32_t mask) 899 + { 900 + uint32_t soft_min_level = 0, soft_max_level = 0; 901 + uint32_t min_freq = 0, max_freq = 0; 902 + int ret = 0; 903 + 904 + soft_min_level = mask ? (ffs(mask) - 1) : 0; 905 + soft_max_level = mask ? (fls(mask) - 1) : 0; 906 + 907 + switch (clk_type) { 908 + case SMU_SOCCLK: 909 + case SMU_FCLK: 910 + case SMU_VCLK: 911 + case SMU_DCLK: 912 + ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 913 + if (ret) 914 + break; 915 + 916 + ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 917 + if (ret) 918 + break; 919 + 920 + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 921 + break; 922 + default: 923 + ret = -EINVAL; 924 + break; 925 + } 926 + 927 + return ret; 928 + } 929 + 930 + static int smu_v14_0_0_set_performance_level(struct smu_context *smu, 931 + enum amd_dpm_forced_level level) 932 + { 933 + struct amdgpu_device *adev = smu->adev; 934 + uint32_t sclk_min = 0, sclk_max = 0; 935 + uint32_t fclk_min = 0, fclk_max = 0; 936 + uint32_t socclk_min = 0, socclk_max = 0; 937 + int ret = 0; 938 + 939 + switch (level) { 940 + case AMD_DPM_FORCED_LEVEL_HIGH: 941 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 942 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 943 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 944 + sclk_min = sclk_max; 945 + fclk_min = fclk_max; 946 + socclk_min = socclk_max; 947 + break; 948 + case AMD_DPM_FORCED_LEVEL_LOW: 949 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 950 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 951 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 952 + sclk_max = sclk_min; 953 + fclk_max = fclk_min; 954 + socclk_max = socclk_min; 955 + break; 956 + case AMD_DPM_FORCED_LEVEL_AUTO: 957 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 958 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 959 + smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 960 + break; 961 + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 962 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 963 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 964 + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 965 + /* Temporarily do nothing since the optimal clocks haven't been provided yet */ 966 + break; 967 + case AMD_DPM_FORCED_LEVEL_MANUAL: 968 + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 969 + return 0; 970 + default: 971 + dev_err(adev->dev, "Invalid performance level %d\n", level); 972 + return -EINVAL; 973 + } 974 + 975 + if (sclk_min && sclk_max) { 976 + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, 977 + SMU_SCLK, 978 + sclk_min, 979 + sclk_max); 980 + if (ret) 981 + return ret; 982 + 983 + smu->gfx_actual_hard_min_freq = sclk_min; 984 + smu->gfx_actual_soft_max_freq = sclk_max; 985 + } 986 + 987 + if (fclk_min && fclk_max) { 988 + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, 989 + SMU_FCLK, 990 + fclk_min, 991 + fclk_max); 992 + if (ret) 993 + return ret; 994 + } 995 + 996 + if (socclk_min && socclk_max) { 997 + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, 998 + SMU_SOCCLK, 999 + socclk_min, 1000 + socclk_max); 1001 + if (ret) 1002 + return ret; 1003 + } 1004 + 1005 + return ret; 1006 + } 1007 + 1008 + static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1009 + { 1010 + DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1011 + 1012 + smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1013 + smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1014 + smu->gfx_actual_hard_min_freq = 0; 1015 + smu->gfx_actual_soft_max_freq = 0; 1016 + 1017 + return 0; 1018 + } 1019 + 1020 + static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 1021 + bool enable) 1022 + { 1023 + return smu_cmn_send_smc_msg_with_param(smu, enable ? 1024 + SMU_MSG_PowerUpVpe : SMU_MSG_PowerDownVpe, 1025 + 0, NULL); 1026 + } 1027 + 1028 + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { 1029 + .check_fw_status = smu_v14_0_check_fw_status, 1030 + .check_fw_version = smu_v14_0_check_fw_version, 1031 + .init_smc_tables = smu_v14_0_0_init_smc_tables, 1032 + .fini_smc_tables = smu_v14_0_0_fini_smc_tables, 1033 + .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, 1034 + .system_features_control = smu_v14_0_0_system_features_control, 1035 + .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1036 + .send_smc_msg = smu_cmn_send_smc_msg, 1037 + .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, 1038 + .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, 1039 + .set_default_dpm_table = smu_v14_0_set_default_dpm_tables, 1040 + .read_sensor = smu_v14_0_0_read_sensor, 1041 + .is_dpm_running = smu_v14_0_0_is_dpm_running, 1042 + .set_watermarks_table = smu_v14_0_0_set_watermarks_table, 1043 + .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics, 1044 + .get_enabled_mask = smu_cmn_get_enabled_mask, 1045 + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1046 + .set_driver_table_location = smu_v14_0_set_driver_table_location, 1047 + .gfx_off_control = smu_v14_0_gfx_off_control, 1048 + .mode2_reset = smu_v14_0_0_mode2_reset, 1049 + .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq, 1050 + .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, 1051 + .print_clk_levels = smu_v14_0_0_print_clk_levels, 1052 + .force_clk_levels = smu_v14_0_0_force_clk_levels, 1053 + .set_performance_level = smu_v14_0_0_set_performance_level, 1054 + .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters, 1055 + .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, 1056 + .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, 1057 + }; 1058 + 1059 + static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) 1060 + { 1061 + struct amdgpu_device *adev = smu->adev; 1062 + 1063 + smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 1064 + smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 1065 + smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 1066 + } 1067 + 1068 + void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu) 1069 + { 1070 + 1071 + smu->ppt_funcs = &smu_v14_0_0_ppt_funcs; 1072 + smu->message_map = smu_v14_0_0_message_map; 1073 + smu->feature_map = smu_v14_0_0_feature_mask_map; 1074 + smu->table_map = smu_v14_0_0_table_map; 1075 + smu->is_apu = true; 1076 + 1077 + smu_v14_0_0_set_smu_mailbox_registers(smu); 1078 + }
+28
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #ifndef __SMU_V14_0_0_PPT_H__ 24 + #define __SMU_V14_0_0_PPT_H__ 25 + 26 + extern void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu); 27 + 28 + #endif