Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.6-rc3 6667 lines 203 kB view raw
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <linux/firmware.h> 25#include "drmP.h" 26#include "amdgpu.h" 27#include "amdgpu_pm.h" 28#include "amdgpu_ucode.h" 29#include "cikd.h" 30#include "amdgpu_dpm.h" 31#include "ci_dpm.h" 32#include "gfx_v7_0.h" 33#include "atom.h" 34#include "amd_pcie.h" 35#include <linux/seq_file.h> 36 37#include "smu/smu_7_0_1_d.h" 38#include "smu/smu_7_0_1_sh_mask.h" 39 40#include "dce/dce_8_0_d.h" 41#include "dce/dce_8_0_sh_mask.h" 42 43#include "bif/bif_4_1_d.h" 44#include "bif/bif_4_1_sh_mask.h" 45 46#include "gca/gfx_7_2_d.h" 47#include "gca/gfx_7_2_sh_mask.h" 48 49#include "gmc/gmc_7_1_d.h" 50#include "gmc/gmc_7_1_sh_mask.h" 51 52MODULE_FIRMWARE("radeon/bonaire_smc.bin"); 53MODULE_FIRMWARE("radeon/hawaii_smc.bin"); 54 55#define MC_CG_ARB_FREQ_F0 0x0a 56#define MC_CG_ARB_FREQ_F1 0x0b 57#define MC_CG_ARB_FREQ_F2 0x0c 58#define MC_CG_ARB_FREQ_F3 0x0d 59 60#define SMC_RAM_END 0x40000 61 62#define VOLTAGE_SCALE 4 63#define VOLTAGE_VID_OFFSET_SCALE1 625 64#define VOLTAGE_VID_OFFSET_SCALE2 100 65 66static const struct ci_pt_defaults defaults_hawaii_xt = 67{ 68 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 69 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 70 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 71}; 72 73static const struct ci_pt_defaults defaults_hawaii_pro = 74{ 75 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 76 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, 77 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } 78}; 79 80static const struct ci_pt_defaults defaults_bonaire_xt = 81{ 82 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, 83 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, 84 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 85}; 86 87static const struct ci_pt_defaults defaults_bonaire_pro = 88{ 89 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 90 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 91 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 92}; 93 94static const struct ci_pt_defaults defaults_saturn_xt = 95{ 96 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, 97 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, 98 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 99}; 100 101static const struct ci_pt_defaults defaults_saturn_pro = 102{ 103 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 104 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 105 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 106}; 107 108static const struct ci_pt_config_reg didt_config_ci[] = 109{ 110 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 111 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 112 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 113 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 114 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 115 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 116 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 117 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 118 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 119 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 120 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 121 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 122 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 123 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 124 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 125 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 126 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 127 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 128 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 129 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 130 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 131 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 132 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 133 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 134 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 135 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 136 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 137 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 138 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 139 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 140 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 141 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 142 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 143 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 144 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 145 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 146 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 147 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 148 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 149 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 150 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 151 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 152 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 153 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 154 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 155 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 156 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 157 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 158 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 159 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 160 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 161 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 162 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 163 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 164 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 165 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 166 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 167 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 168 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 169 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 170 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 171 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 172 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 173 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 174 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 175 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 176 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, 177 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, 178 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, 179 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 180 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, 181 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, 182 { 0xFFFFFFFF } 183}; 184 185static u8 ci_get_memory_module_index(struct amdgpu_device *adev) 186{ 187 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff); 188} 189 190#define MC_CG_ARB_FREQ_F0 0x0a 191#define MC_CG_ARB_FREQ_F1 0x0b 192#define MC_CG_ARB_FREQ_F2 0x0c 193#define MC_CG_ARB_FREQ_F3 0x0d 194 195static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev, 196 u32 arb_freq_src, u32 arb_freq_dest) 197{ 198 u32 mc_arb_dram_timing; 199 u32 mc_arb_dram_timing2; 200 u32 burst_time; 201 u32 mc_cg_config; 202 203 switch (arb_freq_src) { 204 case MC_CG_ARB_FREQ_F0: 205 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING); 206 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2); 207 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >> 208 MC_ARB_BURST_TIME__STATE0__SHIFT; 209 break; 210 case MC_CG_ARB_FREQ_F1: 211 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1); 212 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1); 213 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >> 214 MC_ARB_BURST_TIME__STATE1__SHIFT; 215 break; 216 default: 217 return -EINVAL; 218 } 219 220 switch (arb_freq_dest) { 221 case MC_CG_ARB_FREQ_F0: 222 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); 223 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); 224 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT), 225 ~MC_ARB_BURST_TIME__STATE0_MASK); 226 break; 227 case MC_CG_ARB_FREQ_F1: 228 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); 229 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); 230 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT), 231 ~MC_ARB_BURST_TIME__STATE1_MASK); 232 break; 233 default: 234 return -EINVAL; 235 } 236 237 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F; 238 WREG32(mmMC_CG_CONFIG, mc_cg_config); 239 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT, 240 ~MC_ARB_CG__CG_ARB_REQ_MASK); 241 242 return 0; 243} 244 245static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock) 246{ 247 u8 mc_para_index; 248 249 if (memory_clock < 10000) 250 mc_para_index = 0; 251 else if (memory_clock >= 80000) 252 mc_para_index = 0x0f; 253 else 254 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); 255 return mc_para_index; 256} 257 258static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) 259{ 260 u8 mc_para_index; 261 262 if (strobe_mode) { 263 if (memory_clock < 12500) 264 mc_para_index = 0x00; 265 else if (memory_clock > 47500) 266 mc_para_index = 0x0f; 267 else 268 mc_para_index = (u8)((memory_clock - 10000) / 2500); 269 } else { 270 if (memory_clock < 65000) 271 mc_para_index = 0x00; 272 else if (memory_clock > 135000) 273 mc_para_index = 0x0f; 274 else 275 mc_para_index = (u8)((memory_clock - 60000) / 5000); 276 } 277 return mc_para_index; 278} 279 280static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, 281 u32 max_voltage_steps, 282 struct atom_voltage_table *voltage_table) 283{ 284 unsigned int i, diff; 285 286 if (voltage_table->count <= max_voltage_steps) 287 return; 288 289 diff = voltage_table->count - max_voltage_steps; 290 291 for (i = 0; i < max_voltage_steps; i++) 292 voltage_table->entries[i] = voltage_table->entries[i + diff]; 293 294 voltage_table->count = max_voltage_steps; 295} 296 297static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev, 298 struct atom_voltage_table_entry *voltage_table, 299 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); 300static int ci_set_power_limit(struct amdgpu_device *adev, u32 n); 301static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, 302 u32 target_tdp); 303static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate); 304static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev); 305static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev); 306 307static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, 308 PPSMC_Msg msg, u32 parameter); 309static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev); 310static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev); 311 312static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev) 313{ 314 struct ci_power_info *pi = adev->pm.dpm.priv; 315 316 return pi; 317} 318 319static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps) 320{ 321 struct ci_ps *ps = rps->ps_priv; 322 323 return ps; 324} 325 326static void ci_initialize_powertune_defaults(struct amdgpu_device *adev) 327{ 328 struct ci_power_info *pi = ci_get_pi(adev); 329 330 switch (adev->pdev->device) { 331 case 0x6649: 332 case 0x6650: 333 case 0x6651: 334 case 0x6658: 335 case 0x665C: 336 case 0x665D: 337 default: 338 pi->powertune_defaults = &defaults_bonaire_xt; 339 break; 340 case 0x6640: 341 case 0x6641: 342 case 0x6646: 343 case 0x6647: 344 pi->powertune_defaults = &defaults_saturn_xt; 345 break; 346 case 0x67B8: 347 case 0x67B0: 348 pi->powertune_defaults = &defaults_hawaii_xt; 349 break; 350 case 0x67BA: 351 case 0x67B1: 352 pi->powertune_defaults = &defaults_hawaii_pro; 353 break; 354 case 0x67A0: 355 case 0x67A1: 356 case 0x67A2: 357 case 0x67A8: 358 case 0x67A9: 359 case 0x67AA: 360 case 0x67B9: 361 case 0x67BE: 362 pi->powertune_defaults = &defaults_bonaire_xt; 363 break; 364 } 365 366 pi->dte_tj_offset = 0; 367 368 pi->caps_power_containment = true; 369 pi->caps_cac = false; 370 pi->caps_sq_ramping = false; 371 pi->caps_db_ramping = false; 372 pi->caps_td_ramping = false; 373 pi->caps_tcp_ramping = false; 374 375 if (pi->caps_power_containment) { 376 pi->caps_cac = true; 377 if (adev->asic_type == CHIP_HAWAII) 378 pi->enable_bapm_feature = false; 379 else 380 pi->enable_bapm_feature = true; 381 pi->enable_tdc_limit_feature = true; 382 pi->enable_pkg_pwr_tracking_feature = true; 383 } 384} 385 386static u8 ci_convert_to_vid(u16 vddc) 387{ 388 return (6200 - (vddc * VOLTAGE_SCALE)) / 25; 389} 390 391static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev) 392{ 393 struct ci_power_info *pi = ci_get_pi(adev); 394 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 395 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 396 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; 397 u32 i; 398 399 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) 400 return -EINVAL; 401 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8) 402 return -EINVAL; 403 if (adev->pm.dpm.dyn_state.cac_leakage_table.count != 404 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) 405 return -EINVAL; 406 407 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { 408 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 409 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); 410 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); 411 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); 412 } else { 413 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); 414 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); 415 } 416 } 417 return 0; 418} 419 420static int ci_populate_vddc_vid(struct amdgpu_device *adev) 421{ 422 struct ci_power_info *pi = ci_get_pi(adev); 423 u8 *vid = pi->smc_powertune_table.VddCVid; 424 u32 i; 425 426 if (pi->vddc_voltage_table.count > 8) 427 return -EINVAL; 428 429 for (i = 0; i < pi->vddc_voltage_table.count; i++) 430 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); 431 432 return 0; 433} 434 435static int ci_populate_svi_load_line(struct amdgpu_device *adev) 436{ 437 struct ci_power_info *pi = ci_get_pi(adev); 438 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 439 440 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; 441 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; 442 pi->smc_powertune_table.SviLoadLineTrimVddC = 3; 443 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; 444 445 return 0; 446} 447 448static int ci_populate_tdc_limit(struct amdgpu_device *adev) 449{ 450 struct ci_power_info *pi = ci_get_pi(adev); 451 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 452 u16 tdc_limit; 453 454 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; 455 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); 456 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = 457 pt_defaults->tdc_vddc_throttle_release_limit_perc; 458 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; 459 460 return 0; 461} 462 463static int ci_populate_dw8(struct amdgpu_device *adev) 464{ 465 struct ci_power_info *pi = ci_get_pi(adev); 466 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 467 int ret; 468 469 ret = amdgpu_ci_read_smc_sram_dword(adev, 470 SMU7_FIRMWARE_HEADER_LOCATION + 471 offsetof(SMU7_Firmware_Header, PmFuseTable) + 472 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 473 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, 474 pi->sram_end); 475 if (ret) 476 return -EINVAL; 477 else 478 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; 479 480 return 0; 481} 482 483static int ci_populate_fuzzy_fan(struct amdgpu_device *adev) 484{ 485 struct ci_power_info *pi = ci_get_pi(adev); 486 487 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || 488 (adev->pm.dpm.fan.fan_output_sensitivity == 0)) 489 adev->pm.dpm.fan.fan_output_sensitivity = 490 adev->pm.dpm.fan.default_fan_output_sensitivity; 491 492 pi->smc_powertune_table.FuzzyFan_PwmSetDelta = 493 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity); 494 495 return 0; 496} 497 498static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev) 499{ 500 struct ci_power_info *pi = ci_get_pi(adev); 501 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; 502 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; 503 int i, min, max; 504 505 min = max = hi_vid[0]; 506 for (i = 0; i < 8; i++) { 507 if (0 != hi_vid[i]) { 508 if (min > hi_vid[i]) 509 min = hi_vid[i]; 510 if (max < hi_vid[i]) 511 max = hi_vid[i]; 512 } 513 514 if (0 != lo_vid[i]) { 515 if (min > lo_vid[i]) 516 min = lo_vid[i]; 517 if (max < lo_vid[i]) 518 max = lo_vid[i]; 519 } 520 } 521 522 if ((min == 0) || (max == 0)) 523 return -EINVAL; 524 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; 525 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; 526 527 return 0; 528} 529 530static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev) 531{ 532 struct ci_power_info *pi = ci_get_pi(adev); 533 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; 534 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; 535 struct amdgpu_cac_tdp_table *cac_tdp_table = 536 adev->pm.dpm.dyn_state.cac_tdp_table; 537 538 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; 539 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; 540 541 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); 542 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); 543 544 return 0; 545} 546 547static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev) 548{ 549 struct ci_power_info *pi = ci_get_pi(adev); 550 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; 551 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; 552 struct amdgpu_cac_tdp_table *cac_tdp_table = 553 adev->pm.dpm.dyn_state.cac_tdp_table; 554 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; 555 int i, j, k; 556 const u16 *def1; 557 const u16 *def2; 558 559 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; 560 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; 561 562 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; 563 dpm_table->GpuTjMax = 564 (u8)(pi->thermal_temp_setting.temperature_high / 1000); 565 dpm_table->GpuTjHyst = 8; 566 567 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; 568 569 if (ppm) { 570 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); 571 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); 572 } else { 573 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); 574 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); 575 } 576 577 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); 578 def1 = pt_defaults->bapmti_r; 579 def2 = pt_defaults->bapmti_rc; 580 581 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { 582 for (j = 0; j < SMU7_DTE_SOURCES; j++) { 583 for (k = 0; k < SMU7_DTE_SINKS; k++) { 584 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); 585 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); 586 def1++; 587 def2++; 588 } 589 } 590 } 591 592 return 0; 593} 594 595static int ci_populate_pm_base(struct amdgpu_device *adev) 596{ 597 struct ci_power_info *pi = ci_get_pi(adev); 598 u32 pm_fuse_table_offset; 599 int ret; 600 601 if (pi->caps_power_containment) { 602 ret = amdgpu_ci_read_smc_sram_dword(adev, 603 SMU7_FIRMWARE_HEADER_LOCATION + 604 offsetof(SMU7_Firmware_Header, PmFuseTable), 605 &pm_fuse_table_offset, pi->sram_end); 606 if (ret) 607 return ret; 608 ret = ci_populate_bapm_vddc_vid_sidd(adev); 609 if (ret) 610 return ret; 611 ret = ci_populate_vddc_vid(adev); 612 if (ret) 613 return ret; 614 ret = ci_populate_svi_load_line(adev); 615 if (ret) 616 return ret; 617 ret = ci_populate_tdc_limit(adev); 618 if (ret) 619 return ret; 620 ret = ci_populate_dw8(adev); 621 if (ret) 622 return ret; 623 ret = ci_populate_fuzzy_fan(adev); 624 if (ret) 625 return ret; 626 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev); 627 if (ret) 628 return ret; 629 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev); 630 if (ret) 631 return ret; 632 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset, 633 (u8 *)&pi->smc_powertune_table, 634 sizeof(SMU7_Discrete_PmFuses), pi->sram_end); 635 if (ret) 636 return ret; 637 } 638 639 return 0; 640} 641 642static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable) 643{ 644 struct ci_power_info *pi = ci_get_pi(adev); 645 u32 data; 646 647 if (pi->caps_sq_ramping) { 648 data = RREG32_DIDT(ixDIDT_SQ_CTRL0); 649 if (enable) 650 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 651 else 652 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; 653 WREG32_DIDT(ixDIDT_SQ_CTRL0, data); 654 } 655 656 if (pi->caps_db_ramping) { 657 data = RREG32_DIDT(ixDIDT_DB_CTRL0); 658 if (enable) 659 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 660 else 661 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; 662 WREG32_DIDT(ixDIDT_DB_CTRL0, data); 663 } 664 665 if (pi->caps_td_ramping) { 666 data = RREG32_DIDT(ixDIDT_TD_CTRL0); 667 if (enable) 668 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 669 else 670 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; 671 WREG32_DIDT(ixDIDT_TD_CTRL0, data); 672 } 673 674 if (pi->caps_tcp_ramping) { 675 data = RREG32_DIDT(ixDIDT_TCP_CTRL0); 676 if (enable) 677 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 678 else 679 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; 680 WREG32_DIDT(ixDIDT_TCP_CTRL0, data); 681 } 682} 683 684static int ci_program_pt_config_registers(struct amdgpu_device *adev, 685 const struct ci_pt_config_reg *cac_config_regs) 686{ 687 const struct ci_pt_config_reg *config_regs = cac_config_regs; 688 u32 data; 689 u32 cache = 0; 690 691 if (config_regs == NULL) 692 return -EINVAL; 693 694 while (config_regs->offset != 0xFFFFFFFF) { 695 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { 696 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); 697 } else { 698 switch (config_regs->type) { 699 case CISLANDS_CONFIGREG_SMC_IND: 700 data = RREG32_SMC(config_regs->offset); 701 break; 702 case CISLANDS_CONFIGREG_DIDT_IND: 703 data = RREG32_DIDT(config_regs->offset); 704 break; 705 default: 706 data = RREG32(config_regs->offset); 707 break; 708 } 709 710 data &= ~config_regs->mask; 711 data |= ((config_regs->value << config_regs->shift) & config_regs->mask); 712 data |= cache; 713 714 switch (config_regs->type) { 715 case CISLANDS_CONFIGREG_SMC_IND: 716 WREG32_SMC(config_regs->offset, data); 717 break; 718 case CISLANDS_CONFIGREG_DIDT_IND: 719 WREG32_DIDT(config_regs->offset, data); 720 break; 721 default: 722 WREG32(config_regs->offset, data); 723 break; 724 } 725 cache = 0; 726 } 727 config_regs++; 728 } 729 return 0; 730} 731 732static int ci_enable_didt(struct amdgpu_device *adev, bool enable) 733{ 734 struct ci_power_info *pi = ci_get_pi(adev); 735 int ret; 736 737 if (pi->caps_sq_ramping || pi->caps_db_ramping || 738 pi->caps_td_ramping || pi->caps_tcp_ramping) { 739 gfx_v7_0_enter_rlc_safe_mode(adev); 740 741 if (enable) { 742 ret = ci_program_pt_config_registers(adev, didt_config_ci); 743 if (ret) { 744 gfx_v7_0_exit_rlc_safe_mode(adev); 745 return ret; 746 } 747 } 748 749 ci_do_enable_didt(adev, enable); 750 751 gfx_v7_0_exit_rlc_safe_mode(adev); 752 } 753 754 return 0; 755} 756 757static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable) 758{ 759 struct ci_power_info *pi = ci_get_pi(adev); 760 PPSMC_Result smc_result; 761 int ret = 0; 762 763 if (enable) { 764 pi->power_containment_features = 0; 765 if (pi->caps_power_containment) { 766 if (pi->enable_bapm_feature) { 767 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); 768 if (smc_result != PPSMC_Result_OK) 769 ret = -EINVAL; 770 else 771 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; 772 } 773 774 if (pi->enable_tdc_limit_feature) { 775 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable); 776 if (smc_result != PPSMC_Result_OK) 777 ret = -EINVAL; 778 else 779 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; 780 } 781 782 if (pi->enable_pkg_pwr_tracking_feature) { 783 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable); 784 if (smc_result != PPSMC_Result_OK) { 785 ret = -EINVAL; 786 } else { 787 struct amdgpu_cac_tdp_table *cac_tdp_table = 788 adev->pm.dpm.dyn_state.cac_tdp_table; 789 u32 default_pwr_limit = 790 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 791 792 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; 793 794 ci_set_power_limit(adev, default_pwr_limit); 795 } 796 } 797 } 798 } else { 799 if (pi->caps_power_containment && pi->power_containment_features) { 800 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) 801 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable); 802 803 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) 804 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); 805 806 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) 807 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable); 808 pi->power_containment_features = 0; 809 } 810 } 811 812 return ret; 813} 814 815static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable) 816{ 817 struct ci_power_info *pi = ci_get_pi(adev); 818 PPSMC_Result smc_result; 819 int ret = 0; 820 821 if (pi->caps_cac) { 822 if (enable) { 823 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); 824 if (smc_result != PPSMC_Result_OK) { 825 ret = -EINVAL; 826 pi->cac_enabled = false; 827 } else { 828 pi->cac_enabled = true; 829 } 830 } else if (pi->cac_enabled) { 831 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); 832 pi->cac_enabled = false; 833 } 834 } 835 836 return ret; 837} 838 839static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev, 840 bool enable) 841{ 842 struct ci_power_info *pi = ci_get_pi(adev); 843 PPSMC_Result smc_result = PPSMC_Result_OK; 844 845 if (pi->thermal_sclk_dpm_enabled) { 846 if (enable) 847 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM); 848 else 849 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM); 850 } 851 852 if (smc_result == PPSMC_Result_OK) 853 return 0; 854 else 855 return -EINVAL; 856} 857 858static int ci_power_control_set_level(struct amdgpu_device *adev) 859{ 860 struct ci_power_info *pi = ci_get_pi(adev); 861 struct amdgpu_cac_tdp_table *cac_tdp_table = 862 adev->pm.dpm.dyn_state.cac_tdp_table; 863 s32 adjust_percent; 864 s32 target_tdp; 865 int ret = 0; 866 bool adjust_polarity = false; /* ??? */ 867 868 if (pi->caps_power_containment) { 869 adjust_percent = adjust_polarity ? 870 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment); 871 target_tdp = ((100 + adjust_percent) * 872 (s32)cac_tdp_table->configurable_tdp) / 100; 873 874 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp); 875 } 876 877 return ret; 878} 879 880static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) 881{ 882 struct ci_power_info *pi = ci_get_pi(adev); 883 884 if (pi->uvd_power_gated == gate) 885 return; 886 887 pi->uvd_power_gated = gate; 888 889 ci_update_uvd_dpm(adev, gate); 890} 891 892static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev) 893{ 894 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 895 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; 896 897 if (vblank_time < switch_limit) 898 return true; 899 else 900 return false; 901 902} 903 904static void ci_apply_state_adjust_rules(struct amdgpu_device *adev, 905 struct amdgpu_ps *rps) 906{ 907 struct ci_ps *ps = ci_get_ps(rps); 908 struct ci_power_info *pi = ci_get_pi(adev); 909 struct amdgpu_clock_and_voltage_limits *max_limits; 910 bool disable_mclk_switching; 911 u32 sclk, mclk; 912 int i; 913 914 if (rps->vce_active) { 915 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; 916 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; 917 } else { 918 rps->evclk = 0; 919 rps->ecclk = 0; 920 } 921 922 if ((adev->pm.dpm.new_active_crtc_count > 1) || 923 ci_dpm_vblank_too_short(adev)) 924 disable_mclk_switching = true; 925 else 926 disable_mclk_switching = false; 927 928 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 929 pi->battery_state = true; 930 else 931 pi->battery_state = false; 932 933 if (adev->pm.dpm.ac_power) 934 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 935 else 936 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 937 938 if (adev->pm.dpm.ac_power == false) { 939 for (i = 0; i < ps->performance_level_count; i++) { 940 if (ps->performance_levels[i].mclk > max_limits->mclk) 941 ps->performance_levels[i].mclk = max_limits->mclk; 942 if (ps->performance_levels[i].sclk > max_limits->sclk) 943 ps->performance_levels[i].sclk = max_limits->sclk; 944 } 945 } 946 947 /* XXX validate the min clocks required for display */ 948 949 if (disable_mclk_switching) { 950 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; 951 sclk = ps->performance_levels[0].sclk; 952 } else { 953 mclk = ps->performance_levels[0].mclk; 954 sclk = ps->performance_levels[0].sclk; 955 } 956 957 if (rps->vce_active) { 958 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) 959 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; 960 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) 961 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; 962 } 963 964 ps->performance_levels[0].sclk = sclk; 965 ps->performance_levels[0].mclk = mclk; 966 967 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) 968 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; 969 970 if (disable_mclk_switching) { 971 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) 972 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; 973 } else { 974 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) 975 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; 976 } 977} 978 979static int ci_thermal_set_temperature_range(struct amdgpu_device *adev, 980 int min_temp, int max_temp) 981{ 982 int low_temp = 0 * 1000; 983 int high_temp = 255 * 1000; 984 u32 tmp; 985 986 if (low_temp < min_temp) 987 low_temp = min_temp; 988 if (high_temp > max_temp) 989 high_temp = max_temp; 990 if (high_temp < low_temp) { 991 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); 992 return -EINVAL; 993 } 994 995 tmp = RREG32_SMC(ixCG_THERMAL_INT); 996 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK); 997 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) | 998 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT; 999 WREG32_SMC(ixCG_THERMAL_INT, tmp); 1000 1001#if 0 1002 /* XXX: need to figure out how to handle this properly */ 1003 tmp = RREG32_SMC(ixCG_THERMAL_CTRL); 1004 tmp &= DIG_THERM_DPM_MASK; 1005 tmp |= DIG_THERM_DPM(high_temp / 1000); 1006 WREG32_SMC(ixCG_THERMAL_CTRL, tmp); 1007#endif 1008 1009 adev->pm.dpm.thermal.min_temp = low_temp; 1010 adev->pm.dpm.thermal.max_temp = high_temp; 1011 return 0; 1012} 1013 1014static int ci_thermal_enable_alert(struct amdgpu_device *adev, 1015 bool enable) 1016{ 1017 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 1018 PPSMC_Result result; 1019 1020 if (enable) { 1021 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK | 1022 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); 1023 WREG32_SMC(ixCG_THERMAL_INT, thermal_int); 1024 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable); 1025 if (result != PPSMC_Result_OK) { 1026 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 1027 return -EINVAL; 1028 } 1029 } else { 1030 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK | 1031 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 1032 WREG32_SMC(ixCG_THERMAL_INT, thermal_int); 1033 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable); 1034 if (result != PPSMC_Result_OK) { 1035 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); 1036 return -EINVAL; 1037 } 1038 } 1039 1040 return 0; 1041} 1042 1043static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) 1044{ 1045 struct ci_power_info *pi = ci_get_pi(adev); 1046 u32 tmp; 1047 1048 if (pi->fan_ctrl_is_in_default_mode) { 1049 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) 1050 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; 1051 pi->fan_ctrl_default_mode = tmp; 1052 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) 1053 >> CG_FDO_CTRL2__TMIN__SHIFT; 1054 pi->t_min = tmp; 1055 pi->fan_ctrl_is_in_default_mode = false; 1056 } 1057 1058 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; 1059 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT; 1060 WREG32_SMC(ixCG_FDO_CTRL2, tmp); 1061 1062 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; 1063 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; 1064 WREG32_SMC(ixCG_FDO_CTRL2, tmp); 1065} 1066 1067static int ci_thermal_setup_fan_table(struct amdgpu_device *adev) 1068{ 1069 struct ci_power_info *pi = ci_get_pi(adev); 1070 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; 1071 u32 duty100; 1072 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; 1073 u16 fdo_min, slope1, slope2; 1074 u32 reference_clock, tmp; 1075 int ret; 1076 u64 tmp64; 1077 1078 if (!pi->fan_table_start) { 1079 adev->pm.dpm.fan.ucode_fan_control = false; 1080 return 0; 1081 } 1082 1083 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) 1084 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; 1085 1086 if (duty100 == 0) { 1087 adev->pm.dpm.fan.ucode_fan_control = false; 1088 return 0; 1089 } 1090 1091 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; 1092 do_div(tmp64, 10000); 1093 fdo_min = (u16)tmp64; 1094 1095 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; 1096 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; 1097 1098 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; 1099 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; 1100 1101 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); 1102 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); 1103 1104 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); 1105 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); 1106 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); 1107 1108 fan_table.Slope1 = cpu_to_be16(slope1); 1109 fan_table.Slope2 = cpu_to_be16(slope2); 1110 1111 fan_table.FdoMin = cpu_to_be16(fdo_min); 1112 1113 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst); 1114 1115 fan_table.HystUp = cpu_to_be16(1); 1116 1117 fan_table.HystSlope = cpu_to_be16(1); 1118 1119 fan_table.TempRespLim = cpu_to_be16(5); 1120 1121 reference_clock = amdgpu_asic_get_xclk(adev); 1122 1123 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * 1124 reference_clock) / 1600); 1125 1126 fan_table.FdoMax = cpu_to_be16((u16)duty100); 1127 1128 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) 1129 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT; 1130 fan_table.TempSrc = (uint8_t)tmp; 1131 1132 ret = amdgpu_ci_copy_bytes_to_smc(adev, 1133 pi->fan_table_start, 1134 (u8 *)(&fan_table), 1135 sizeof(fan_table), 1136 pi->sram_end); 1137 1138 if (ret) { 1139 DRM_ERROR("Failed to load fan table to the SMC."); 1140 adev->pm.dpm.fan.ucode_fan_control = false; 1141 } 1142 1143 return 0; 1144} 1145 1146static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) 1147{ 1148 struct ci_power_info *pi = ci_get_pi(adev); 1149 PPSMC_Result ret; 1150 1151 if (pi->caps_od_fuzzy_fan_control_support) { 1152 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, 1153 PPSMC_StartFanControl, 1154 FAN_CONTROL_FUZZY); 1155 if (ret != PPSMC_Result_OK) 1156 return -EINVAL; 1157 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, 1158 PPSMC_MSG_SetFanPwmMax, 1159 adev->pm.dpm.fan.default_max_fan_pwm); 1160 if (ret != PPSMC_Result_OK) 1161 return -EINVAL; 1162 } else { 1163 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, 1164 PPSMC_StartFanControl, 1165 FAN_CONTROL_TABLE); 1166 if (ret != PPSMC_Result_OK) 1167 return -EINVAL; 1168 } 1169 1170 pi->fan_is_controlled_by_smc = true; 1171 return 0; 1172} 1173 1174 1175static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) 1176{ 1177 PPSMC_Result ret; 1178 struct ci_power_info *pi = ci_get_pi(adev); 1179 1180 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl); 1181 if (ret == PPSMC_Result_OK) { 1182 pi->fan_is_controlled_by_smc = false; 1183 return 0; 1184 } else { 1185 return -EINVAL; 1186 } 1187} 1188 1189static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev, 1190 u32 *speed) 1191{ 1192 u32 duty, duty100; 1193 u64 tmp64; 1194 1195 if (adev->pm.no_fan) 1196 return -ENOENT; 1197 1198 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) 1199 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; 1200 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) 1201 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT; 1202 1203 if (duty100 == 0) 1204 return -EINVAL; 1205 1206 tmp64 = (u64)duty * 100; 1207 do_div(tmp64, duty100); 1208 *speed = (u32)tmp64; 1209 1210 if (*speed > 100) 1211 *speed = 100; 1212 1213 return 0; 1214} 1215 1216static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev, 1217 u32 speed) 1218{ 1219 u32 tmp; 1220 u32 duty, duty100; 1221 u64 tmp64; 1222 struct ci_power_info *pi = ci_get_pi(adev); 1223 1224 if (adev->pm.no_fan) 1225 return -ENOENT; 1226 1227 if (pi->fan_is_controlled_by_smc) 1228 return -EINVAL; 1229 1230 if (speed > 100) 1231 return -EINVAL; 1232 1233 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) 1234 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; 1235 1236 if (duty100 == 0) 1237 return -EINVAL; 1238 1239 tmp64 = (u64)speed * duty100; 1240 do_div(tmp64, 100); 1241 duty = (u32)tmp64; 1242 1243 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK; 1244 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT; 1245 WREG32_SMC(ixCG_FDO_CTRL0, tmp); 1246 1247 return 0; 1248} 1249 1250static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode) 1251{ 1252 if (mode) { 1253 /* stop auto-manage */ 1254 if (adev->pm.dpm.fan.ucode_fan_control) 1255 ci_fan_ctrl_stop_smc_fan_control(adev); 1256 ci_fan_ctrl_set_static_mode(adev, mode); 1257 } else { 1258 /* restart auto-manage */ 1259 if (adev->pm.dpm.fan.ucode_fan_control) 1260 ci_thermal_start_smc_fan_control(adev); 1261 else 1262 ci_fan_ctrl_set_default_mode(adev); 1263 } 1264} 1265 1266static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev) 1267{ 1268 struct ci_power_info *pi = ci_get_pi(adev); 1269 u32 tmp; 1270 1271 if (pi->fan_is_controlled_by_smc) 1272 return 0; 1273 1274 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK; 1275 return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT); 1276} 1277 1278#if 0 1279static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, 1280 u32 *speed) 1281{ 1282 u32 tach_period; 1283 u32 xclk = amdgpu_asic_get_xclk(adev); 1284 1285 if (adev->pm.no_fan) 1286 return -ENOENT; 1287 1288 if (adev->pm.fan_pulses_per_revolution == 0) 1289 return -ENOENT; 1290 1291 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) 1292 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT; 1293 if (tach_period == 0) 1294 return -ENOENT; 1295 1296 *speed = 60 * xclk * 10000 / tach_period; 1297 1298 return 0; 1299} 1300 1301static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, 1302 u32 speed) 1303{ 1304 u32 tach_period, tmp; 1305 u32 xclk = amdgpu_asic_get_xclk(adev); 1306 1307 if (adev->pm.no_fan) 1308 return -ENOENT; 1309 1310 if (adev->pm.fan_pulses_per_revolution == 0) 1311 return -ENOENT; 1312 1313 if ((speed < adev->pm.fan_min_rpm) || 1314 (speed > adev->pm.fan_max_rpm)) 1315 return -EINVAL; 1316 1317 if (adev->pm.dpm.fan.ucode_fan_control) 1318 ci_fan_ctrl_stop_smc_fan_control(adev); 1319 1320 tach_period = 60 * xclk * 10000 / (8 * speed); 1321 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK; 1322 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT; 1323 WREG32_SMC(CG_TACH_CTRL, tmp); 1324 1325 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); 1326 1327 return 0; 1328} 1329#endif 1330 1331static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev) 1332{ 1333 struct ci_power_info *pi = ci_get_pi(adev); 1334 u32 tmp; 1335 1336 if (!pi->fan_ctrl_is_in_default_mode) { 1337 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; 1338 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; 1339 WREG32_SMC(ixCG_FDO_CTRL2, tmp); 1340 1341 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; 1342 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT; 1343 WREG32_SMC(ixCG_FDO_CTRL2, tmp); 1344 pi->fan_ctrl_is_in_default_mode = true; 1345 } 1346} 1347 1348static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev) 1349{ 1350 if (adev->pm.dpm.fan.ucode_fan_control) { 1351 ci_fan_ctrl_start_smc_fan_control(adev); 1352 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); 1353 } 1354} 1355 1356static void ci_thermal_initialize(struct amdgpu_device *adev) 1357{ 1358 u32 tmp; 1359 1360 if (adev->pm.fan_pulses_per_revolution) { 1361 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK; 1362 tmp |= (adev->pm.fan_pulses_per_revolution - 1) 1363 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT; 1364 WREG32_SMC(ixCG_TACH_CTRL, tmp); 1365 } 1366 1367 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK; 1368 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT; 1369 WREG32_SMC(ixCG_FDO_CTRL2, tmp); 1370} 1371 1372static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev) 1373{ 1374 int ret; 1375 1376 ci_thermal_initialize(adev); 1377 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX); 1378 if (ret) 1379 return ret; 1380 ret = ci_thermal_enable_alert(adev, true); 1381 if (ret) 1382 return ret; 1383 if (adev->pm.dpm.fan.ucode_fan_control) { 1384 ret = ci_thermal_setup_fan_table(adev); 1385 if (ret) 1386 return ret; 1387 ci_thermal_start_smc_fan_control(adev); 1388 } 1389 1390 return 0; 1391} 1392 1393static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev) 1394{ 1395 if (!adev->pm.no_fan) 1396 ci_fan_ctrl_set_default_mode(adev); 1397} 1398 1399static int ci_read_smc_soft_register(struct amdgpu_device *adev, 1400 u16 reg_offset, u32 *value) 1401{ 1402 struct ci_power_info *pi = ci_get_pi(adev); 1403 1404 return amdgpu_ci_read_smc_sram_dword(adev, 1405 pi->soft_regs_start + reg_offset, 1406 value, pi->sram_end); 1407} 1408 1409static int ci_write_smc_soft_register(struct amdgpu_device *adev, 1410 u16 reg_offset, u32 value) 1411{ 1412 struct ci_power_info *pi = ci_get_pi(adev); 1413 1414 return amdgpu_ci_write_smc_sram_dword(adev, 1415 pi->soft_regs_start + reg_offset, 1416 value, pi->sram_end); 1417} 1418 1419static void ci_init_fps_limits(struct amdgpu_device *adev) 1420{ 1421 struct ci_power_info *pi = ci_get_pi(adev); 1422 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 1423 1424 if (pi->caps_fps) { 1425 u16 tmp; 1426 1427 tmp = 45; 1428 table->FpsHighT = cpu_to_be16(tmp); 1429 1430 tmp = 30; 1431 table->FpsLowT = cpu_to_be16(tmp); 1432 } 1433} 1434 1435static int ci_update_sclk_t(struct amdgpu_device *adev) 1436{ 1437 struct ci_power_info *pi = ci_get_pi(adev); 1438 int ret = 0; 1439 u32 low_sclk_interrupt_t = 0; 1440 1441 if (pi->caps_sclk_throttle_low_notification) { 1442 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); 1443 1444 ret = amdgpu_ci_copy_bytes_to_smc(adev, 1445 pi->dpm_table_start + 1446 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), 1447 (u8 *)&low_sclk_interrupt_t, 1448 sizeof(u32), pi->sram_end); 1449 1450 } 1451 1452 return ret; 1453} 1454 1455static void ci_get_leakage_voltages(struct amdgpu_device *adev) 1456{ 1457 struct ci_power_info *pi = ci_get_pi(adev); 1458 u16 leakage_id, virtual_voltage_id; 1459 u16 vddc, vddci; 1460 int i; 1461 1462 pi->vddc_leakage.count = 0; 1463 pi->vddci_leakage.count = 0; 1464 1465 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 1466 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1467 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1468 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0) 1469 continue; 1470 if (vddc != 0 && vddc != virtual_voltage_id) { 1471 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1472 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1473 pi->vddc_leakage.count++; 1474 } 1475 } 1476 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) { 1477 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { 1478 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; 1479 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci, 1480 virtual_voltage_id, 1481 leakage_id) == 0) { 1482 if (vddc != 0 && vddc != virtual_voltage_id) { 1483 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; 1484 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; 1485 pi->vddc_leakage.count++; 1486 } 1487 if (vddci != 0 && vddci != virtual_voltage_id) { 1488 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; 1489 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; 1490 pi->vddci_leakage.count++; 1491 } 1492 } 1493 } 1494 } 1495} 1496 1497static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) 1498{ 1499 struct ci_power_info *pi = ci_get_pi(adev); 1500 bool want_thermal_protection; 1501 enum amdgpu_dpm_event_src dpm_event_src; 1502 u32 tmp; 1503 1504 switch (sources) { 1505 case 0: 1506 default: 1507 want_thermal_protection = false; 1508 break; 1509 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): 1510 want_thermal_protection = true; 1511 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; 1512 break; 1513 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): 1514 want_thermal_protection = true; 1515 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; 1516 break; 1517 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | 1518 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): 1519 want_thermal_protection = true; 1520 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; 1521 break; 1522 } 1523 1524 if (want_thermal_protection) { 1525#if 0 1526 /* XXX: need to figure out how to handle this properly */ 1527 tmp = RREG32_SMC(ixCG_THERMAL_CTRL); 1528 tmp &= DPM_EVENT_SRC_MASK; 1529 tmp |= DPM_EVENT_SRC(dpm_event_src); 1530 WREG32_SMC(ixCG_THERMAL_CTRL, tmp); 1531#endif 1532 1533 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 1534 if (pi->thermal_protection) 1535 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; 1536 else 1537 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; 1538 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 1539 } else { 1540 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 1541 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; 1542 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 1543 } 1544} 1545 1546static void ci_enable_auto_throttle_source(struct amdgpu_device *adev, 1547 enum amdgpu_dpm_auto_throttle_src source, 1548 bool enable) 1549{ 1550 struct ci_power_info *pi = ci_get_pi(adev); 1551 1552 if (enable) { 1553 if (!(pi->active_auto_throttle_sources & (1 << source))) { 1554 pi->active_auto_throttle_sources |= 1 << source; 1555 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); 1556 } 1557 } else { 1558 if (pi->active_auto_throttle_sources & (1 << source)) { 1559 pi->active_auto_throttle_sources &= ~(1 << source); 1560 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); 1561 } 1562 } 1563} 1564 1565static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev) 1566{ 1567 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) 1568 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt); 1569} 1570 1571static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev) 1572{ 1573 struct ci_power_info *pi = ci_get_pi(adev); 1574 PPSMC_Result smc_result; 1575 1576 if (!pi->need_update_smu7_dpm_table) 1577 return 0; 1578 1579 if ((!pi->sclk_dpm_key_disabled) && 1580 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1581 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); 1582 if (smc_result != PPSMC_Result_OK) 1583 return -EINVAL; 1584 } 1585 1586 if ((!pi->mclk_dpm_key_disabled) && 1587 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1588 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); 1589 if (smc_result != PPSMC_Result_OK) 1590 return -EINVAL; 1591 } 1592 1593 pi->need_update_smu7_dpm_table = 0; 1594 return 0; 1595} 1596 1597static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable) 1598{ 1599 struct ci_power_info *pi = ci_get_pi(adev); 1600 PPSMC_Result smc_result; 1601 1602 if (enable) { 1603 if (!pi->sclk_dpm_key_disabled) { 1604 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable); 1605 if (smc_result != PPSMC_Result_OK) 1606 return -EINVAL; 1607 } 1608 1609 if (!pi->mclk_dpm_key_disabled) { 1610 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable); 1611 if (smc_result != PPSMC_Result_OK) 1612 return -EINVAL; 1613 1614 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK, 1615 ~MC_SEQ_CNTL_3__CAC_EN_MASK); 1616 1617 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05); 1618 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05); 1619 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005); 1620 1621 udelay(10); 1622 1623 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005); 1624 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005); 1625 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005); 1626 } 1627 } else { 1628 if (!pi->sclk_dpm_key_disabled) { 1629 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable); 1630 if (smc_result != PPSMC_Result_OK) 1631 return -EINVAL; 1632 } 1633 1634 if (!pi->mclk_dpm_key_disabled) { 1635 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable); 1636 if (smc_result != PPSMC_Result_OK) 1637 return -EINVAL; 1638 } 1639 } 1640 1641 return 0; 1642} 1643 1644static int ci_start_dpm(struct amdgpu_device *adev) 1645{ 1646 struct ci_power_info *pi = ci_get_pi(adev); 1647 PPSMC_Result smc_result; 1648 int ret; 1649 u32 tmp; 1650 1651 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 1652 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 1653 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 1654 1655 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 1656 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 1657 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); 1658 1659 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); 1660 1661 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK); 1662 1663 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable); 1664 if (smc_result != PPSMC_Result_OK) 1665 return -EINVAL; 1666 1667 ret = ci_enable_sclk_mclk_dpm(adev, true); 1668 if (ret) 1669 return ret; 1670 1671 if (!pi->pcie_dpm_key_disabled) { 1672 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable); 1673 if (smc_result != PPSMC_Result_OK) 1674 return -EINVAL; 1675 } 1676 1677 return 0; 1678} 1679 1680static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev) 1681{ 1682 struct ci_power_info *pi = ci_get_pi(adev); 1683 PPSMC_Result smc_result; 1684 1685 if (!pi->need_update_smu7_dpm_table) 1686 return 0; 1687 1688 if ((!pi->sclk_dpm_key_disabled) && 1689 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { 1690 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel); 1691 if (smc_result != PPSMC_Result_OK) 1692 return -EINVAL; 1693 } 1694 1695 if ((!pi->mclk_dpm_key_disabled) && 1696 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 1697 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel); 1698 if (smc_result != PPSMC_Result_OK) 1699 return -EINVAL; 1700 } 1701 1702 return 0; 1703} 1704 1705static int ci_stop_dpm(struct amdgpu_device *adev) 1706{ 1707 struct ci_power_info *pi = ci_get_pi(adev); 1708 PPSMC_Result smc_result; 1709 int ret; 1710 u32 tmp; 1711 1712 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 1713 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; 1714 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 1715 1716 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 1717 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; 1718 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); 1719 1720 if (!pi->pcie_dpm_key_disabled) { 1721 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable); 1722 if (smc_result != PPSMC_Result_OK) 1723 return -EINVAL; 1724 } 1725 1726 ret = ci_enable_sclk_mclk_dpm(adev, false); 1727 if (ret) 1728 return ret; 1729 1730 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable); 1731 if (smc_result != PPSMC_Result_OK) 1732 return -EINVAL; 1733 1734 return 0; 1735} 1736 1737static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable) 1738{ 1739 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 1740 1741 if (enable) 1742 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK; 1743 else 1744 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK; 1745 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); 1746} 1747 1748#if 0 1749static int ci_notify_hw_of_power_source(struct amdgpu_device *adev, 1750 bool ac_power) 1751{ 1752 struct ci_power_info *pi = ci_get_pi(adev); 1753 struct amdgpu_cac_tdp_table *cac_tdp_table = 1754 adev->pm.dpm.dyn_state.cac_tdp_table; 1755 u32 power_limit; 1756 1757 if (ac_power) 1758 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); 1759 else 1760 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); 1761 1762 ci_set_power_limit(adev, power_limit); 1763 1764 if (pi->caps_automatic_dc_transition) { 1765 if (ac_power) 1766 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC); 1767 else 1768 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp); 1769 } 1770 1771 return 0; 1772} 1773#endif 1774 1775static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, 1776 PPSMC_Msg msg, u32 parameter) 1777{ 1778 WREG32(mmSMC_MSG_ARG_0, parameter); 1779 return amdgpu_ci_send_msg_to_smc(adev, msg); 1780} 1781 1782static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev, 1783 PPSMC_Msg msg, u32 *parameter) 1784{ 1785 PPSMC_Result smc_result; 1786 1787 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg); 1788 1789 if ((smc_result == PPSMC_Result_OK) && parameter) 1790 *parameter = RREG32(mmSMC_MSG_ARG_0); 1791 1792 return smc_result; 1793} 1794 1795static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n) 1796{ 1797 struct ci_power_info *pi = ci_get_pi(adev); 1798 1799 if (!pi->sclk_dpm_key_disabled) { 1800 PPSMC_Result smc_result = 1801 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); 1802 if (smc_result != PPSMC_Result_OK) 1803 return -EINVAL; 1804 } 1805 1806 return 0; 1807} 1808 1809static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n) 1810{ 1811 struct ci_power_info *pi = ci_get_pi(adev); 1812 1813 if (!pi->mclk_dpm_key_disabled) { 1814 PPSMC_Result smc_result = 1815 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); 1816 if (smc_result != PPSMC_Result_OK) 1817 return -EINVAL; 1818 } 1819 1820 return 0; 1821} 1822 1823static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n) 1824{ 1825 struct ci_power_info *pi = ci_get_pi(adev); 1826 1827 if (!pi->pcie_dpm_key_disabled) { 1828 PPSMC_Result smc_result = 1829 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n); 1830 if (smc_result != PPSMC_Result_OK) 1831 return -EINVAL; 1832 } 1833 1834 return 0; 1835} 1836 1837static int ci_set_power_limit(struct amdgpu_device *adev, u32 n) 1838{ 1839 struct ci_power_info *pi = ci_get_pi(adev); 1840 1841 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { 1842 PPSMC_Result smc_result = 1843 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n); 1844 if (smc_result != PPSMC_Result_OK) 1845 return -EINVAL; 1846 } 1847 1848 return 0; 1849} 1850 1851static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, 1852 u32 target_tdp) 1853{ 1854 PPSMC_Result smc_result = 1855 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); 1856 if (smc_result != PPSMC_Result_OK) 1857 return -EINVAL; 1858 return 0; 1859} 1860 1861#if 0 1862static int ci_set_boot_state(struct amdgpu_device *adev) 1863{ 1864 return ci_enable_sclk_mclk_dpm(adev, false); 1865} 1866#endif 1867 1868static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev) 1869{ 1870 u32 sclk_freq; 1871 PPSMC_Result smc_result = 1872 amdgpu_ci_send_msg_to_smc_return_parameter(adev, 1873 PPSMC_MSG_API_GetSclkFrequency, 1874 &sclk_freq); 1875 if (smc_result != PPSMC_Result_OK) 1876 sclk_freq = 0; 1877 1878 return sclk_freq; 1879} 1880 1881static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev) 1882{ 1883 u32 mclk_freq; 1884 PPSMC_Result smc_result = 1885 amdgpu_ci_send_msg_to_smc_return_parameter(adev, 1886 PPSMC_MSG_API_GetMclkFrequency, 1887 &mclk_freq); 1888 if (smc_result != PPSMC_Result_OK) 1889 mclk_freq = 0; 1890 1891 return mclk_freq; 1892} 1893 1894static void ci_dpm_start_smc(struct amdgpu_device *adev) 1895{ 1896 int i; 1897 1898 amdgpu_ci_program_jump_on_start(adev); 1899 amdgpu_ci_start_smc_clock(adev); 1900 amdgpu_ci_start_smc(adev); 1901 for (i = 0; i < adev->usec_timeout; i++) { 1902 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) 1903 break; 1904 } 1905} 1906 1907static void ci_dpm_stop_smc(struct amdgpu_device *adev) 1908{ 1909 amdgpu_ci_reset_smc(adev); 1910 amdgpu_ci_stop_smc_clock(adev); 1911} 1912 1913static int ci_process_firmware_header(struct amdgpu_device *adev) 1914{ 1915 struct ci_power_info *pi = ci_get_pi(adev); 1916 u32 tmp; 1917 int ret; 1918 1919 ret = amdgpu_ci_read_smc_sram_dword(adev, 1920 SMU7_FIRMWARE_HEADER_LOCATION + 1921 offsetof(SMU7_Firmware_Header, DpmTable), 1922 &tmp, pi->sram_end); 1923 if (ret) 1924 return ret; 1925 1926 pi->dpm_table_start = tmp; 1927 1928 ret = amdgpu_ci_read_smc_sram_dword(adev, 1929 SMU7_FIRMWARE_HEADER_LOCATION + 1930 offsetof(SMU7_Firmware_Header, SoftRegisters), 1931 &tmp, pi->sram_end); 1932 if (ret) 1933 return ret; 1934 1935 pi->soft_regs_start = tmp; 1936 1937 ret = amdgpu_ci_read_smc_sram_dword(adev, 1938 SMU7_FIRMWARE_HEADER_LOCATION + 1939 offsetof(SMU7_Firmware_Header, mcRegisterTable), 1940 &tmp, pi->sram_end); 1941 if (ret) 1942 return ret; 1943 1944 pi->mc_reg_table_start = tmp; 1945 1946 ret = amdgpu_ci_read_smc_sram_dword(adev, 1947 SMU7_FIRMWARE_HEADER_LOCATION + 1948 offsetof(SMU7_Firmware_Header, FanTable), 1949 &tmp, pi->sram_end); 1950 if (ret) 1951 return ret; 1952 1953 pi->fan_table_start = tmp; 1954 1955 ret = amdgpu_ci_read_smc_sram_dword(adev, 1956 SMU7_FIRMWARE_HEADER_LOCATION + 1957 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), 1958 &tmp, pi->sram_end); 1959 if (ret) 1960 return ret; 1961 1962 pi->arb_table_start = tmp; 1963 1964 return 0; 1965} 1966 1967static void ci_read_clock_registers(struct amdgpu_device *adev) 1968{ 1969 struct ci_power_info *pi = ci_get_pi(adev); 1970 1971 pi->clock_registers.cg_spll_func_cntl = 1972 RREG32_SMC(ixCG_SPLL_FUNC_CNTL); 1973 pi->clock_registers.cg_spll_func_cntl_2 = 1974 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2); 1975 pi->clock_registers.cg_spll_func_cntl_3 = 1976 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3); 1977 pi->clock_registers.cg_spll_func_cntl_4 = 1978 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4); 1979 pi->clock_registers.cg_spll_spread_spectrum = 1980 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM); 1981 pi->clock_registers.cg_spll_spread_spectrum_2 = 1982 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2); 1983 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL); 1984 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL); 1985 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL); 1986 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL); 1987 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL); 1988 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1); 1989 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2); 1990 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1); 1991 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2); 1992} 1993 1994static void ci_init_sclk_t(struct amdgpu_device *adev) 1995{ 1996 struct ci_power_info *pi = ci_get_pi(adev); 1997 1998 pi->low_sclk_interrupt_t = 0; 1999} 2000 2001static void ci_enable_thermal_protection(struct amdgpu_device *adev, 2002 bool enable) 2003{ 2004 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 2005 2006 if (enable) 2007 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; 2008 else 2009 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; 2010 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 2011} 2012 2013static void ci_enable_acpi_power_management(struct amdgpu_device *adev) 2014{ 2015 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 2016 2017 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK; 2018 2019 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 2020} 2021 2022#if 0 2023static int ci_enter_ulp_state(struct amdgpu_device *adev) 2024{ 2025 2026 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); 2027 2028 udelay(25000); 2029 2030 return 0; 2031} 2032 2033static int ci_exit_ulp_state(struct amdgpu_device *adev) 2034{ 2035 int i; 2036 2037 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); 2038 2039 udelay(7000); 2040 2041 for (i = 0; i < adev->usec_timeout; i++) { 2042 if (RREG32(mmSMC_RESP_0) == 1) 2043 break; 2044 udelay(1000); 2045 } 2046 2047 return 0; 2048} 2049#endif 2050 2051static int ci_notify_smc_display_change(struct amdgpu_device *adev, 2052 bool has_display) 2053{ 2054 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; 2055 2056 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; 2057} 2058 2059static int ci_enable_ds_master_switch(struct amdgpu_device *adev, 2060 bool enable) 2061{ 2062 struct ci_power_info *pi = ci_get_pi(adev); 2063 2064 if (enable) { 2065 if (pi->caps_sclk_ds) { 2066 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) 2067 return -EINVAL; 2068 } else { 2069 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 2070 return -EINVAL; 2071 } 2072 } else { 2073 if (pi->caps_sclk_ds) { 2074 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) 2075 return -EINVAL; 2076 } 2077 } 2078 2079 return 0; 2080} 2081 2082static void ci_program_display_gap(struct amdgpu_device *adev) 2083{ 2084 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL); 2085 u32 pre_vbi_time_in_us; 2086 u32 frame_time_in_us; 2087 u32 ref_clock = adev->clock.spll.reference_freq; 2088 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev); 2089 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 2090 2091 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK; 2092 if (adev->pm.dpm.new_active_crtc_count > 0) 2093 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT); 2094 else 2095 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT); 2096 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp); 2097 2098 if (refresh_rate == 0) 2099 refresh_rate = 60; 2100 if (vblank_time == 0xffffffff) 2101 vblank_time = 500; 2102 frame_time_in_us = 1000000 / refresh_rate; 2103 pre_vbi_time_in_us = 2104 frame_time_in_us - 200 - vblank_time; 2105 tmp = pre_vbi_time_in_us * (ref_clock / 100); 2106 2107 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp); 2108 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); 2109 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 2110 2111 2112 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1)); 2113 2114} 2115 2116static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) 2117{ 2118 struct ci_power_info *pi = ci_get_pi(adev); 2119 u32 tmp; 2120 2121 if (enable) { 2122 if (pi->caps_sclk_ss_support) { 2123 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 2124 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK; 2125 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 2126 } 2127 } else { 2128 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM); 2129 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK; 2130 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp); 2131 2132 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 2133 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK; 2134 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 2135 } 2136} 2137 2138static void ci_program_sstp(struct amdgpu_device *adev) 2139{ 2140 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER, 2141 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) | 2142 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT))); 2143} 2144 2145static void ci_enable_display_gap(struct amdgpu_device *adev) 2146{ 2147 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL); 2148 2149 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK | 2150 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK); 2151 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) | 2152 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT)); 2153 2154 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp); 2155} 2156 2157static void ci_program_vc(struct amdgpu_device *adev) 2158{ 2159 u32 tmp; 2160 2161 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 2162 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 2163 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); 2164 2165 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0); 2166 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1); 2167 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2); 2168 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3); 2169 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4); 2170 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5); 2171 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6); 2172 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7); 2173} 2174 2175static void ci_clear_vc(struct amdgpu_device *adev) 2176{ 2177 u32 tmp; 2178 2179 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); 2180 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); 2181 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); 2182 2183 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 2184 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0); 2185 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0); 2186 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0); 2187 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0); 2188 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0); 2189 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0); 2190 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0); 2191} 2192 2193static int ci_upload_firmware(struct amdgpu_device *adev) 2194{ 2195 struct ci_power_info *pi = ci_get_pi(adev); 2196 int i, ret; 2197 2198 for (i = 0; i < adev->usec_timeout; i++) { 2199 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) 2200 break; 2201 } 2202 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1); 2203 2204 amdgpu_ci_stop_smc_clock(adev); 2205 amdgpu_ci_reset_smc(adev); 2206 2207 ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end); 2208 2209 return ret; 2210 2211} 2212 2213static int ci_get_svi2_voltage_table(struct amdgpu_device *adev, 2214 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, 2215 struct atom_voltage_table *voltage_table) 2216{ 2217 u32 i; 2218 2219 if (voltage_dependency_table == NULL) 2220 return -EINVAL; 2221 2222 voltage_table->mask_low = 0; 2223 voltage_table->phase_delay = 0; 2224 2225 voltage_table->count = voltage_dependency_table->count; 2226 for (i = 0; i < voltage_table->count; i++) { 2227 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; 2228 voltage_table->entries[i].smio_low = 0; 2229 } 2230 2231 return 0; 2232} 2233 2234static int ci_construct_voltage_tables(struct amdgpu_device *adev) 2235{ 2236 struct ci_power_info *pi = ci_get_pi(adev); 2237 int ret; 2238 2239 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2240 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, 2241 VOLTAGE_OBJ_GPIO_LUT, 2242 &pi->vddc_voltage_table); 2243 if (ret) 2244 return ret; 2245 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2246 ret = ci_get_svi2_voltage_table(adev, 2247 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2248 &pi->vddc_voltage_table); 2249 if (ret) 2250 return ret; 2251 } 2252 2253 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) 2254 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC, 2255 &pi->vddc_voltage_table); 2256 2257 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2258 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, 2259 VOLTAGE_OBJ_GPIO_LUT, 2260 &pi->vddci_voltage_table); 2261 if (ret) 2262 return ret; 2263 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2264 ret = ci_get_svi2_voltage_table(adev, 2265 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2266 &pi->vddci_voltage_table); 2267 if (ret) 2268 return ret; 2269 } 2270 2271 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) 2272 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI, 2273 &pi->vddci_voltage_table); 2274 2275 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { 2276 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, 2277 VOLTAGE_OBJ_GPIO_LUT, 2278 &pi->mvdd_voltage_table); 2279 if (ret) 2280 return ret; 2281 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 2282 ret = ci_get_svi2_voltage_table(adev, 2283 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 2284 &pi->mvdd_voltage_table); 2285 if (ret) 2286 return ret; 2287 } 2288 2289 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) 2290 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD, 2291 &pi->mvdd_voltage_table); 2292 2293 return 0; 2294} 2295 2296static void ci_populate_smc_voltage_table(struct amdgpu_device *adev, 2297 struct atom_voltage_table_entry *voltage_table, 2298 SMU7_Discrete_VoltageLevel *smc_voltage_table) 2299{ 2300 int ret; 2301 2302 ret = ci_get_std_voltage_value_sidd(adev, voltage_table, 2303 &smc_voltage_table->StdVoltageHiSidd, 2304 &smc_voltage_table->StdVoltageLoSidd); 2305 2306 if (ret) { 2307 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; 2308 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; 2309 } 2310 2311 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); 2312 smc_voltage_table->StdVoltageHiSidd = 2313 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); 2314 smc_voltage_table->StdVoltageLoSidd = 2315 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); 2316} 2317 2318static int ci_populate_smc_vddc_table(struct amdgpu_device *adev, 2319 SMU7_Discrete_DpmTable *table) 2320{ 2321 struct ci_power_info *pi = ci_get_pi(adev); 2322 unsigned int count; 2323 2324 table->VddcLevelCount = pi->vddc_voltage_table.count; 2325 for (count = 0; count < table->VddcLevelCount; count++) { 2326 ci_populate_smc_voltage_table(adev, 2327 &pi->vddc_voltage_table.entries[count], 2328 &table->VddcLevel[count]); 2329 2330 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2331 table->VddcLevel[count].Smio |= 2332 pi->vddc_voltage_table.entries[count].smio_low; 2333 else 2334 table->VddcLevel[count].Smio = 0; 2335 } 2336 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); 2337 2338 return 0; 2339} 2340 2341static int ci_populate_smc_vddci_table(struct amdgpu_device *adev, 2342 SMU7_Discrete_DpmTable *table) 2343{ 2344 unsigned int count; 2345 struct ci_power_info *pi = ci_get_pi(adev); 2346 2347 table->VddciLevelCount = pi->vddci_voltage_table.count; 2348 for (count = 0; count < table->VddciLevelCount; count++) { 2349 ci_populate_smc_voltage_table(adev, 2350 &pi->vddci_voltage_table.entries[count], 2351 &table->VddciLevel[count]); 2352 2353 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2354 table->VddciLevel[count].Smio |= 2355 pi->vddci_voltage_table.entries[count].smio_low; 2356 else 2357 table->VddciLevel[count].Smio = 0; 2358 } 2359 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); 2360 2361 return 0; 2362} 2363 2364static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev, 2365 SMU7_Discrete_DpmTable *table) 2366{ 2367 struct ci_power_info *pi = ci_get_pi(adev); 2368 unsigned int count; 2369 2370 table->MvddLevelCount = pi->mvdd_voltage_table.count; 2371 for (count = 0; count < table->MvddLevelCount; count++) { 2372 ci_populate_smc_voltage_table(adev, 2373 &pi->mvdd_voltage_table.entries[count], 2374 &table->MvddLevel[count]); 2375 2376 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) 2377 table->MvddLevel[count].Smio |= 2378 pi->mvdd_voltage_table.entries[count].smio_low; 2379 else 2380 table->MvddLevel[count].Smio = 0; 2381 } 2382 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); 2383 2384 return 0; 2385} 2386 2387static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev, 2388 SMU7_Discrete_DpmTable *table) 2389{ 2390 int ret; 2391 2392 ret = ci_populate_smc_vddc_table(adev, table); 2393 if (ret) 2394 return ret; 2395 2396 ret = ci_populate_smc_vddci_table(adev, table); 2397 if (ret) 2398 return ret; 2399 2400 ret = ci_populate_smc_mvdd_table(adev, table); 2401 if (ret) 2402 return ret; 2403 2404 return 0; 2405} 2406 2407static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, 2408 SMU7_Discrete_VoltageLevel *voltage) 2409{ 2410 struct ci_power_info *pi = ci_get_pi(adev); 2411 u32 i = 0; 2412 2413 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 2414 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { 2415 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { 2416 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; 2417 break; 2418 } 2419 } 2420 2421 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) 2422 return -EINVAL; 2423 } 2424 2425 return -EINVAL; 2426} 2427 2428static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev, 2429 struct atom_voltage_table_entry *voltage_table, 2430 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) 2431{ 2432 u16 v_index, idx; 2433 bool voltage_found = false; 2434 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; 2435 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; 2436 2437 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) 2438 return -EINVAL; 2439 2440 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 2441 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2442 if (voltage_table->value == 2443 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2444 voltage_found = true; 2445 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) 2446 idx = v_index; 2447 else 2448 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2449 *std_voltage_lo_sidd = 2450 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2451 *std_voltage_hi_sidd = 2452 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2453 break; 2454 } 2455 } 2456 2457 if (!voltage_found) { 2458 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { 2459 if (voltage_table->value <= 2460 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { 2461 voltage_found = true; 2462 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) 2463 idx = v_index; 2464 else 2465 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1; 2466 *std_voltage_lo_sidd = 2467 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; 2468 *std_voltage_hi_sidd = 2469 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; 2470 break; 2471 } 2472 } 2473 } 2474 } 2475 2476 return 0; 2477} 2478 2479static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev, 2480 const struct amdgpu_phase_shedding_limits_table *limits, 2481 u32 sclk, 2482 u32 *phase_shedding) 2483{ 2484 unsigned int i; 2485 2486 *phase_shedding = 1; 2487 2488 for (i = 0; i < limits->count; i++) { 2489 if (sclk < limits->entries[i].sclk) { 2490 *phase_shedding = i; 2491 break; 2492 } 2493 } 2494} 2495 2496static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev, 2497 const struct amdgpu_phase_shedding_limits_table *limits, 2498 u32 mclk, 2499 u32 *phase_shedding) 2500{ 2501 unsigned int i; 2502 2503 *phase_shedding = 1; 2504 2505 for (i = 0; i < limits->count; i++) { 2506 if (mclk < limits->entries[i].mclk) { 2507 *phase_shedding = i; 2508 break; 2509 } 2510 } 2511} 2512 2513static int ci_init_arb_table_index(struct amdgpu_device *adev) 2514{ 2515 struct ci_power_info *pi = ci_get_pi(adev); 2516 u32 tmp; 2517 int ret; 2518 2519 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start, 2520 &tmp, pi->sram_end); 2521 if (ret) 2522 return ret; 2523 2524 tmp &= 0x00FFFFFF; 2525 tmp |= MC_CG_ARB_FREQ_F1 << 24; 2526 2527 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start, 2528 tmp, pi->sram_end); 2529} 2530 2531static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev, 2532 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table, 2533 u32 clock, u32 *voltage) 2534{ 2535 u32 i = 0; 2536 2537 if (allowed_clock_voltage_table->count == 0) 2538 return -EINVAL; 2539 2540 for (i = 0; i < allowed_clock_voltage_table->count; i++) { 2541 if (allowed_clock_voltage_table->entries[i].clk >= clock) { 2542 *voltage = allowed_clock_voltage_table->entries[i].v; 2543 return 0; 2544 } 2545 } 2546 2547 *voltage = allowed_clock_voltage_table->entries[i-1].v; 2548 2549 return 0; 2550} 2551 2552static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, 2553 u32 sclk, u32 min_sclk_in_sr) 2554{ 2555 u32 i; 2556 u32 tmp; 2557 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? 2558 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK; 2559 2560 if (sclk < min) 2561 return 0; 2562 2563 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { 2564 tmp = sclk / (1 << i); 2565 if (tmp >= min || i == 0) 2566 break; 2567 } 2568 2569 return (u8)i; 2570} 2571 2572static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) 2573{ 2574 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 2575} 2576 2577static int ci_reset_to_default(struct amdgpu_device *adev) 2578{ 2579 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? 2580 0 : -EINVAL; 2581} 2582 2583static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev) 2584{ 2585 u32 tmp; 2586 2587 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8; 2588 2589 if (tmp == MC_CG_ARB_FREQ_F0) 2590 return 0; 2591 2592 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); 2593} 2594 2595static void ci_register_patching_mc_arb(struct amdgpu_device *adev, 2596 const u32 engine_clock, 2597 const u32 memory_clock, 2598 u32 *dram_timimg2) 2599{ 2600 bool patch; 2601 u32 tmp, tmp2; 2602 2603 tmp = RREG32(mmMC_SEQ_MISC0); 2604 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 2605 2606 if (patch && 2607 ((adev->pdev->device == 0x67B0) || 2608 (adev->pdev->device == 0x67B1))) { 2609 if ((memory_clock > 100000) && (memory_clock <= 125000)) { 2610 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; 2611 *dram_timimg2 &= ~0x00ff0000; 2612 *dram_timimg2 |= tmp2 << 16; 2613 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { 2614 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; 2615 *dram_timimg2 &= ~0x00ff0000; 2616 *dram_timimg2 |= tmp2 << 16; 2617 } 2618 } 2619} 2620 2621static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev, 2622 u32 sclk, 2623 u32 mclk, 2624 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) 2625{ 2626 u32 dram_timing; 2627 u32 dram_timing2; 2628 u32 burst_time; 2629 2630 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk); 2631 2632 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING); 2633 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2); 2634 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK; 2635 2636 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2); 2637 2638 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2639 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2640 arb_regs->McArbBurstTime = (u8)burst_time; 2641 2642 return 0; 2643} 2644 2645static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev) 2646{ 2647 struct ci_power_info *pi = ci_get_pi(adev); 2648 SMU7_Discrete_MCArbDramTimingTable arb_regs; 2649 u32 i, j; 2650 int ret = 0; 2651 2652 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); 2653 2654 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { 2655 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { 2656 ret = ci_populate_memory_timing_parameters(adev, 2657 pi->dpm_table.sclk_table.dpm_levels[i].value, 2658 pi->dpm_table.mclk_table.dpm_levels[j].value, 2659 &arb_regs.entries[i][j]); 2660 if (ret) 2661 break; 2662 } 2663 } 2664 2665 if (ret == 0) 2666 ret = amdgpu_ci_copy_bytes_to_smc(adev, 2667 pi->arb_table_start, 2668 (u8 *)&arb_regs, 2669 sizeof(SMU7_Discrete_MCArbDramTimingTable), 2670 pi->sram_end); 2671 2672 return ret; 2673} 2674 2675static int ci_program_memory_timing_parameters(struct amdgpu_device *adev) 2676{ 2677 struct ci_power_info *pi = ci_get_pi(adev); 2678 2679 if (pi->need_update_smu7_dpm_table == 0) 2680 return 0; 2681 2682 return ci_do_program_memory_timing_parameters(adev); 2683} 2684 2685static void ci_populate_smc_initial_state(struct amdgpu_device *adev, 2686 struct amdgpu_ps *amdgpu_boot_state) 2687{ 2688 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state); 2689 struct ci_power_info *pi = ci_get_pi(adev); 2690 u32 level = 0; 2691 2692 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { 2693 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= 2694 boot_state->performance_levels[0].sclk) { 2695 pi->smc_state_table.GraphicsBootLevel = level; 2696 break; 2697 } 2698 } 2699 2700 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { 2701 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= 2702 boot_state->performance_levels[0].mclk) { 2703 pi->smc_state_table.MemoryBootLevel = level; 2704 break; 2705 } 2706 } 2707} 2708 2709static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) 2710{ 2711 u32 i; 2712 u32 mask_value = 0; 2713 2714 for (i = dpm_table->count; i > 0; i--) { 2715 mask_value = mask_value << 1; 2716 if (dpm_table->dpm_levels[i-1].enabled) 2717 mask_value |= 0x1; 2718 else 2719 mask_value &= 0xFFFFFFFE; 2720 } 2721 2722 return mask_value; 2723} 2724 2725static void ci_populate_smc_link_level(struct amdgpu_device *adev, 2726 SMU7_Discrete_DpmTable *table) 2727{ 2728 struct ci_power_info *pi = ci_get_pi(adev); 2729 struct ci_dpm_table *dpm_table = &pi->dpm_table; 2730 u32 i; 2731 2732 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { 2733 table->LinkLevel[i].PcieGenSpeed = 2734 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; 2735 table->LinkLevel[i].PcieLaneCount = 2736 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); 2737 table->LinkLevel[i].EnabledForActivity = 1; 2738 table->LinkLevel[i].DownT = cpu_to_be32(5); 2739 table->LinkLevel[i].UpT = cpu_to_be32(30); 2740 } 2741 2742 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; 2743 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 2744 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); 2745} 2746 2747static int ci_populate_smc_uvd_level(struct amdgpu_device *adev, 2748 SMU7_Discrete_DpmTable *table) 2749{ 2750 u32 count; 2751 struct atom_clock_dividers dividers; 2752 int ret = -EINVAL; 2753 2754 table->UvdLevelCount = 2755 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; 2756 2757 for (count = 0; count < table->UvdLevelCount; count++) { 2758 table->UvdLevel[count].VclkFrequency = 2759 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; 2760 table->UvdLevel[count].DclkFrequency = 2761 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; 2762 table->UvdLevel[count].MinVddc = 2763 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2764 table->UvdLevel[count].MinVddcPhases = 1; 2765 2766 ret = amdgpu_atombios_get_clock_dividers(adev, 2767 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2768 table->UvdLevel[count].VclkFrequency, false, &dividers); 2769 if (ret) 2770 return ret; 2771 2772 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; 2773 2774 ret = amdgpu_atombios_get_clock_dividers(adev, 2775 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2776 table->UvdLevel[count].DclkFrequency, false, &dividers); 2777 if (ret) 2778 return ret; 2779 2780 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; 2781 2782 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); 2783 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); 2784 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); 2785 } 2786 2787 return ret; 2788} 2789 2790static int ci_populate_smc_vce_level(struct amdgpu_device *adev, 2791 SMU7_Discrete_DpmTable *table) 2792{ 2793 u32 count; 2794 struct atom_clock_dividers dividers; 2795 int ret = -EINVAL; 2796 2797 table->VceLevelCount = 2798 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; 2799 2800 for (count = 0; count < table->VceLevelCount; count++) { 2801 table->VceLevel[count].Frequency = 2802 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; 2803 table->VceLevel[count].MinVoltage = 2804 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2805 table->VceLevel[count].MinPhases = 1; 2806 2807 ret = amdgpu_atombios_get_clock_dividers(adev, 2808 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2809 table->VceLevel[count].Frequency, false, &dividers); 2810 if (ret) 2811 return ret; 2812 2813 table->VceLevel[count].Divider = (u8)dividers.post_divider; 2814 2815 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); 2816 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); 2817 } 2818 2819 return ret; 2820 2821} 2822 2823static int ci_populate_smc_acp_level(struct amdgpu_device *adev, 2824 SMU7_Discrete_DpmTable *table) 2825{ 2826 u32 count; 2827 struct atom_clock_dividers dividers; 2828 int ret = -EINVAL; 2829 2830 table->AcpLevelCount = (u8) 2831 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); 2832 2833 for (count = 0; count < table->AcpLevelCount; count++) { 2834 table->AcpLevel[count].Frequency = 2835 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; 2836 table->AcpLevel[count].MinVoltage = 2837 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; 2838 table->AcpLevel[count].MinPhases = 1; 2839 2840 ret = amdgpu_atombios_get_clock_dividers(adev, 2841 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2842 table->AcpLevel[count].Frequency, false, &dividers); 2843 if (ret) 2844 return ret; 2845 2846 table->AcpLevel[count].Divider = (u8)dividers.post_divider; 2847 2848 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); 2849 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); 2850 } 2851 2852 return ret; 2853} 2854 2855static int ci_populate_smc_samu_level(struct amdgpu_device *adev, 2856 SMU7_Discrete_DpmTable *table) 2857{ 2858 u32 count; 2859 struct atom_clock_dividers dividers; 2860 int ret = -EINVAL; 2861 2862 table->SamuLevelCount = 2863 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; 2864 2865 for (count = 0; count < table->SamuLevelCount; count++) { 2866 table->SamuLevel[count].Frequency = 2867 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; 2868 table->SamuLevel[count].MinVoltage = 2869 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; 2870 table->SamuLevel[count].MinPhases = 1; 2871 2872 ret = amdgpu_atombios_get_clock_dividers(adev, 2873 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 2874 table->SamuLevel[count].Frequency, false, &dividers); 2875 if (ret) 2876 return ret; 2877 2878 table->SamuLevel[count].Divider = (u8)dividers.post_divider; 2879 2880 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); 2881 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); 2882 } 2883 2884 return ret; 2885} 2886 2887static int ci_calculate_mclk_params(struct amdgpu_device *adev, 2888 u32 memory_clock, 2889 SMU7_Discrete_MemoryLevel *mclk, 2890 bool strobe_mode, 2891 bool dll_state_on) 2892{ 2893 struct ci_power_info *pi = ci_get_pi(adev); 2894 u32 dll_cntl = pi->clock_registers.dll_cntl; 2895 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 2896 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; 2897 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; 2898 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; 2899 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; 2900 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; 2901 u32 mpll_ss1 = pi->clock_registers.mpll_ss1; 2902 u32 mpll_ss2 = pi->clock_registers.mpll_ss2; 2903 struct atom_mpll_param mpll_param; 2904 int ret; 2905 2906 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); 2907 if (ret) 2908 return ret; 2909 2910 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK; 2911 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT); 2912 2913 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK | 2914 MPLL_FUNC_CNTL_1__VCO_MODE_MASK); 2915 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT | 2916 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) | 2917 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT); 2918 2919 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK; 2920 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); 2921 2922 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 2923 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK | 2924 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK); 2925 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) | 2926 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); 2927 } 2928 2929 if (pi->caps_mclk_ss_support) { 2930 struct amdgpu_atom_ss ss; 2931 u32 freq_nom; 2932 u32 tmp; 2933 u32 reference_clock = adev->clock.mpll.reference_freq; 2934 2935 if (mpll_param.qdr == 1) 2936 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); 2937 else 2938 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); 2939 2940 tmp = (freq_nom / reference_clock); 2941 tmp = tmp * tmp; 2942 if (amdgpu_atombios_get_asic_ss_info(adev, &ss, 2943 ASIC_INTERNAL_MEMORY_SS, freq_nom)) { 2944 u32 clks = reference_clock * 5 / ss.rate; 2945 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); 2946 2947 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK; 2948 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT); 2949 2950 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK; 2951 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT); 2952 } 2953 } 2954 2955 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK; 2956 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT); 2957 2958 if (dll_state_on) 2959 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | 2960 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK; 2961 else 2962 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | 2963 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK); 2964 2965 mclk->MclkFrequency = memory_clock; 2966 mclk->MpllFuncCntl = mpll_func_cntl; 2967 mclk->MpllFuncCntl_1 = mpll_func_cntl_1; 2968 mclk->MpllFuncCntl_2 = mpll_func_cntl_2; 2969 mclk->MpllAdFuncCntl = mpll_ad_func_cntl; 2970 mclk->MpllDqFuncCntl = mpll_dq_func_cntl; 2971 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; 2972 mclk->DllCntl = dll_cntl; 2973 mclk->MpllSs1 = mpll_ss1; 2974 mclk->MpllSs2 = mpll_ss2; 2975 2976 return 0; 2977} 2978 2979static int ci_populate_single_memory_level(struct amdgpu_device *adev, 2980 u32 memory_clock, 2981 SMU7_Discrete_MemoryLevel *memory_level) 2982{ 2983 struct ci_power_info *pi = ci_get_pi(adev); 2984 int ret; 2985 bool dll_state_on; 2986 2987 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { 2988 ret = ci_get_dependency_volt_by_clk(adev, 2989 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 2990 memory_clock, &memory_level->MinVddc); 2991 if (ret) 2992 return ret; 2993 } 2994 2995 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { 2996 ret = ci_get_dependency_volt_by_clk(adev, 2997 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 2998 memory_clock, &memory_level->MinVddci); 2999 if (ret) 3000 return ret; 3001 } 3002 3003 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { 3004 ret = ci_get_dependency_volt_by_clk(adev, 3005 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 3006 memory_clock, &memory_level->MinMvdd); 3007 if (ret) 3008 return ret; 3009 } 3010 3011 memory_level->MinVddcPhases = 1; 3012 3013 if (pi->vddc_phase_shed_control) 3014 ci_populate_phase_value_based_on_mclk(adev, 3015 &adev->pm.dpm.dyn_state.phase_shedding_limits_table, 3016 memory_clock, 3017 &memory_level->MinVddcPhases); 3018 3019 memory_level->EnabledForThrottle = 1; 3020 memory_level->UpH = 0; 3021 memory_level->DownH = 100; 3022 memory_level->VoltageDownH = 0; 3023 memory_level->ActivityLevel = (u16)pi->mclk_activity_target; 3024 3025 memory_level->StutterEnable = false; 3026 memory_level->StrobeEnable = false; 3027 memory_level->EdcReadEnable = false; 3028 memory_level->EdcWriteEnable = false; 3029 memory_level->RttEnable = false; 3030 3031 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3032 3033 if (pi->mclk_stutter_mode_threshold && 3034 (memory_clock <= pi->mclk_stutter_mode_threshold) && 3035 (pi->uvd_enabled == false) && 3036 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && 3037 (adev->pm.dpm.new_active_crtc_count <= 2)) 3038 memory_level->StutterEnable = true; 3039 3040 if (pi->mclk_strobe_mode_threshold && 3041 (memory_clock <= pi->mclk_strobe_mode_threshold)) 3042 memory_level->StrobeEnable = 1; 3043 3044 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { 3045 memory_level->StrobeRatio = 3046 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); 3047 if (pi->mclk_edc_enable_threshold && 3048 (memory_clock > pi->mclk_edc_enable_threshold)) 3049 memory_level->EdcReadEnable = true; 3050 3051 if (pi->mclk_edc_wr_enable_threshold && 3052 (memory_clock > pi->mclk_edc_wr_enable_threshold)) 3053 memory_level->EdcWriteEnable = true; 3054 3055 if (memory_level->StrobeEnable) { 3056 if (ci_get_mclk_frequency_ratio(memory_clock, true) >= 3057 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf)) 3058 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 3059 else 3060 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false; 3061 } else { 3062 dll_state_on = pi->dll_default_on; 3063 } 3064 } else { 3065 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock); 3066 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false; 3067 } 3068 3069 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); 3070 if (ret) 3071 return ret; 3072 3073 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); 3074 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); 3075 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); 3076 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); 3077 3078 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); 3079 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); 3080 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); 3081 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); 3082 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); 3083 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); 3084 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); 3085 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); 3086 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); 3087 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); 3088 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); 3089 3090 return 0; 3091} 3092 3093static int ci_populate_smc_acpi_level(struct amdgpu_device *adev, 3094 SMU7_Discrete_DpmTable *table) 3095{ 3096 struct ci_power_info *pi = ci_get_pi(adev); 3097 struct atom_clock_dividers dividers; 3098 SMU7_Discrete_VoltageLevel voltage_level; 3099 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; 3100 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; 3101 u32 dll_cntl = pi->clock_registers.dll_cntl; 3102 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; 3103 int ret; 3104 3105 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 3106 3107 if (pi->acpi_vddc) 3108 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); 3109 else 3110 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); 3111 3112 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; 3113 3114 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq; 3115 3116 ret = amdgpu_atombios_get_clock_dividers(adev, 3117 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3118 table->ACPILevel.SclkFrequency, false, &dividers); 3119 if (ret) 3120 return ret; 3121 3122 table->ACPILevel.SclkDid = (u8)dividers.post_divider; 3123 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3124 table->ACPILevel.DeepSleepDivId = 0; 3125 3126 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK; 3127 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK; 3128 3129 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; 3130 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT); 3131 3132 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; 3133 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; 3134 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; 3135 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; 3136 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; 3137 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3138 table->ACPILevel.CcPwrDynRm = 0; 3139 table->ACPILevel.CcPwrDynRm1 = 0; 3140 3141 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); 3142 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); 3143 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); 3144 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); 3145 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); 3146 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); 3147 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); 3148 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); 3149 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); 3150 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); 3151 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); 3152 3153 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; 3154 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; 3155 3156 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 3157 if (pi->acpi_vddci) 3158 table->MemoryACPILevel.MinVddci = 3159 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); 3160 else 3161 table->MemoryACPILevel.MinVddci = 3162 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); 3163 } 3164 3165 if (ci_populate_mvdd_value(adev, 0, &voltage_level)) 3166 table->MemoryACPILevel.MinMvdd = 0; 3167 else 3168 table->MemoryACPILevel.MinMvdd = 3169 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); 3170 3171 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK | 3172 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK; 3173 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | 3174 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK); 3175 3176 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK); 3177 3178 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); 3179 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); 3180 table->MemoryACPILevel.MpllAdFuncCntl = 3181 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); 3182 table->MemoryACPILevel.MpllDqFuncCntl = 3183 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); 3184 table->MemoryACPILevel.MpllFuncCntl = 3185 cpu_to_be32(pi->clock_registers.mpll_func_cntl); 3186 table->MemoryACPILevel.MpllFuncCntl_1 = 3187 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); 3188 table->MemoryACPILevel.MpllFuncCntl_2 = 3189 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); 3190 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); 3191 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); 3192 3193 table->MemoryACPILevel.EnabledForThrottle = 0; 3194 table->MemoryACPILevel.EnabledForActivity = 0; 3195 table->MemoryACPILevel.UpH = 0; 3196 table->MemoryACPILevel.DownH = 100; 3197 table->MemoryACPILevel.VoltageDownH = 0; 3198 table->MemoryACPILevel.ActivityLevel = 3199 cpu_to_be16((u16)pi->mclk_activity_target); 3200 3201 table->MemoryACPILevel.StutterEnable = false; 3202 table->MemoryACPILevel.StrobeEnable = false; 3203 table->MemoryACPILevel.EdcReadEnable = false; 3204 table->MemoryACPILevel.EdcWriteEnable = false; 3205 table->MemoryACPILevel.RttEnable = false; 3206 3207 return 0; 3208} 3209 3210 3211static int ci_enable_ulv(struct amdgpu_device *adev, bool enable) 3212{ 3213 struct ci_power_info *pi = ci_get_pi(adev); 3214 struct ci_ulv_parm *ulv = &pi->ulv; 3215 3216 if (ulv->supported) { 3217 if (enable) 3218 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? 3219 0 : -EINVAL; 3220 else 3221 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? 3222 0 : -EINVAL; 3223 } 3224 3225 return 0; 3226} 3227 3228static int ci_populate_ulv_level(struct amdgpu_device *adev, 3229 SMU7_Discrete_Ulv *state) 3230{ 3231 struct ci_power_info *pi = ci_get_pi(adev); 3232 u16 ulv_voltage = adev->pm.dpm.backbias_response_time; 3233 3234 state->CcPwrDynRm = 0; 3235 state->CcPwrDynRm1 = 0; 3236 3237 if (ulv_voltage == 0) { 3238 pi->ulv.supported = false; 3239 return 0; 3240 } 3241 3242 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { 3243 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3244 state->VddcOffset = 0; 3245 else 3246 state->VddcOffset = 3247 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; 3248 } else { 3249 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) 3250 state->VddcOffsetVid = 0; 3251 else 3252 state->VddcOffsetVid = (u8) 3253 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * 3254 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); 3255 } 3256 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; 3257 3258 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); 3259 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); 3260 state->VddcOffset = cpu_to_be16(state->VddcOffset); 3261 3262 return 0; 3263} 3264 3265static int ci_calculate_sclk_params(struct amdgpu_device *adev, 3266 u32 engine_clock, 3267 SMU7_Discrete_GraphicsLevel *sclk) 3268{ 3269 struct ci_power_info *pi = ci_get_pi(adev); 3270 struct atom_clock_dividers dividers; 3271 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; 3272 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; 3273 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; 3274 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; 3275 u32 reference_clock = adev->clock.spll.reference_freq; 3276 u32 reference_divider; 3277 u32 fbdiv; 3278 int ret; 3279 3280 ret = amdgpu_atombios_get_clock_dividers(adev, 3281 COMPUTE_GPUCLK_INPUT_FLAG_SCLK, 3282 engine_clock, false, &dividers); 3283 if (ret) 3284 return ret; 3285 3286 reference_divider = 1 + dividers.ref_div; 3287 fbdiv = dividers.fb_div & 0x3FFFFFF; 3288 3289 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK; 3290 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT); 3291 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK; 3292 3293 if (pi->caps_sclk_ss_support) { 3294 struct amdgpu_atom_ss ss; 3295 u32 vco_freq = engine_clock * dividers.post_div; 3296 3297 if (amdgpu_atombios_get_asic_ss_info(adev, &ss, 3298 ASIC_INTERNAL_ENGINE_SS, vco_freq)) { 3299 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); 3300 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); 3301 3302 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK); 3303 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT); 3304 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT); 3305 3306 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK; 3307 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT); 3308 } 3309 } 3310 3311 sclk->SclkFrequency = engine_clock; 3312 sclk->CgSpllFuncCntl3 = spll_func_cntl_3; 3313 sclk->CgSpllFuncCntl4 = spll_func_cntl_4; 3314 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; 3315 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; 3316 sclk->SclkDid = (u8)dividers.post_divider; 3317 3318 return 0; 3319} 3320 3321static int ci_populate_single_graphic_level(struct amdgpu_device *adev, 3322 u32 engine_clock, 3323 u16 sclk_activity_level_t, 3324 SMU7_Discrete_GraphicsLevel *graphic_level) 3325{ 3326 struct ci_power_info *pi = ci_get_pi(adev); 3327 int ret; 3328 3329 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level); 3330 if (ret) 3331 return ret; 3332 3333 ret = ci_get_dependency_volt_by_clk(adev, 3334 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 3335 engine_clock, &graphic_level->MinVddc); 3336 if (ret) 3337 return ret; 3338 3339 graphic_level->SclkFrequency = engine_clock; 3340 3341 graphic_level->Flags = 0; 3342 graphic_level->MinVddcPhases = 1; 3343 3344 if (pi->vddc_phase_shed_control) 3345 ci_populate_phase_value_based_on_sclk(adev, 3346 &adev->pm.dpm.dyn_state.phase_shedding_limits_table, 3347 engine_clock, 3348 &graphic_level->MinVddcPhases); 3349 3350 graphic_level->ActivityLevel = sclk_activity_level_t; 3351 3352 graphic_level->CcPwrDynRm = 0; 3353 graphic_level->CcPwrDynRm1 = 0; 3354 graphic_level->EnabledForThrottle = 1; 3355 graphic_level->UpH = 0; 3356 graphic_level->DownH = 0; 3357 graphic_level->VoltageDownH = 0; 3358 graphic_level->PowerThrottle = 0; 3359 3360 if (pi->caps_sclk_ds) 3361 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev, 3362 engine_clock, 3363 CISLAND_MINIMUM_ENGINE_CLOCK); 3364 3365 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; 3366 3367 graphic_level->Flags = cpu_to_be32(graphic_level->Flags); 3368 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); 3369 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); 3370 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); 3371 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); 3372 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); 3373 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); 3374 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); 3375 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); 3376 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); 3377 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); 3378 3379 return 0; 3380} 3381 3382static int ci_populate_all_graphic_levels(struct amdgpu_device *adev) 3383{ 3384 struct ci_power_info *pi = ci_get_pi(adev); 3385 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3386 u32 level_array_address = pi->dpm_table_start + 3387 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); 3388 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * 3389 SMU7_MAX_LEVELS_GRAPHICS; 3390 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; 3391 u32 i, ret; 3392 3393 memset(levels, 0, level_array_size); 3394 3395 for (i = 0; i < dpm_table->sclk_table.count; i++) { 3396 ret = ci_populate_single_graphic_level(adev, 3397 dpm_table->sclk_table.dpm_levels[i].value, 3398 (u16)pi->activity_target[i], 3399 &pi->smc_state_table.GraphicsLevel[i]); 3400 if (ret) 3401 return ret; 3402 if (i > 1) 3403 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; 3404 if (i == (dpm_table->sclk_table.count - 1)) 3405 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 3406 PPSMC_DISPLAY_WATERMARK_HIGH; 3407 } 3408 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; 3409 3410 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 3411 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3412 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); 3413 3414 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address, 3415 (u8 *)levels, level_array_size, 3416 pi->sram_end); 3417 if (ret) 3418 return ret; 3419 3420 return 0; 3421} 3422 3423static int ci_populate_ulv_state(struct amdgpu_device *adev, 3424 SMU7_Discrete_Ulv *ulv_level) 3425{ 3426 return ci_populate_ulv_level(adev, ulv_level); 3427} 3428 3429static int ci_populate_all_memory_levels(struct amdgpu_device *adev) 3430{ 3431 struct ci_power_info *pi = ci_get_pi(adev); 3432 struct ci_dpm_table *dpm_table = &pi->dpm_table; 3433 u32 level_array_address = pi->dpm_table_start + 3434 offsetof(SMU7_Discrete_DpmTable, MemoryLevel); 3435 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * 3436 SMU7_MAX_LEVELS_MEMORY; 3437 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; 3438 u32 i, ret; 3439 3440 memset(levels, 0, level_array_size); 3441 3442 for (i = 0; i < dpm_table->mclk_table.count; i++) { 3443 if (dpm_table->mclk_table.dpm_levels[i].value == 0) 3444 return -EINVAL; 3445 ret = ci_populate_single_memory_level(adev, 3446 dpm_table->mclk_table.dpm_levels[i].value, 3447 &pi->smc_state_table.MemoryLevel[i]); 3448 if (ret) 3449 return ret; 3450 } 3451 3452 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; 3453 3454 if ((dpm_table->mclk_table.count >= 2) && 3455 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) { 3456 pi->smc_state_table.MemoryLevel[1].MinVddc = 3457 pi->smc_state_table.MemoryLevel[0].MinVddc; 3458 pi->smc_state_table.MemoryLevel[1].MinVddcPhases = 3459 pi->smc_state_table.MemoryLevel[0].MinVddcPhases; 3460 } 3461 3462 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 3463 3464 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 3465 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 3466 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); 3467 3468 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = 3469 PPSMC_DISPLAY_WATERMARK_HIGH; 3470 3471 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address, 3472 (u8 *)levels, level_array_size, 3473 pi->sram_end); 3474 if (ret) 3475 return ret; 3476 3477 return 0; 3478} 3479 3480static void ci_reset_single_dpm_table(struct amdgpu_device *adev, 3481 struct ci_single_dpm_table* dpm_table, 3482 u32 count) 3483{ 3484 u32 i; 3485 3486 dpm_table->count = count; 3487 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) 3488 dpm_table->dpm_levels[i].enabled = false; 3489} 3490 3491static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, 3492 u32 index, u32 pcie_gen, u32 pcie_lanes) 3493{ 3494 dpm_table->dpm_levels[index].value = pcie_gen; 3495 dpm_table->dpm_levels[index].param1 = pcie_lanes; 3496 dpm_table->dpm_levels[index].enabled = true; 3497} 3498 3499static int ci_setup_default_pcie_tables(struct amdgpu_device *adev) 3500{ 3501 struct ci_power_info *pi = ci_get_pi(adev); 3502 3503 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) 3504 return -EINVAL; 3505 3506 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { 3507 pi->pcie_gen_powersaving = pi->pcie_gen_performance; 3508 pi->pcie_lane_powersaving = pi->pcie_lane_performance; 3509 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { 3510 pi->pcie_gen_performance = pi->pcie_gen_powersaving; 3511 pi->pcie_lane_performance = pi->pcie_lane_powersaving; 3512 } 3513 3514 ci_reset_single_dpm_table(adev, 3515 &pi->dpm_table.pcie_speed_table, 3516 SMU7_MAX_LEVELS_LINK); 3517 3518 if (adev->asic_type == CHIP_BONAIRE) 3519 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3520 pi->pcie_gen_powersaving.min, 3521 pi->pcie_lane_powersaving.max); 3522 else 3523 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3524 pi->pcie_gen_powersaving.min, 3525 pi->pcie_lane_powersaving.min); 3526 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 3527 pi->pcie_gen_performance.min, 3528 pi->pcie_lane_performance.min); 3529 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, 3530 pi->pcie_gen_powersaving.min, 3531 pi->pcie_lane_powersaving.max); 3532 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, 3533 pi->pcie_gen_performance.min, 3534 pi->pcie_lane_performance.max); 3535 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, 3536 pi->pcie_gen_powersaving.max, 3537 pi->pcie_lane_powersaving.max); 3538 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, 3539 pi->pcie_gen_performance.max, 3540 pi->pcie_lane_performance.max); 3541 3542 pi->dpm_table.pcie_speed_table.count = 6; 3543 3544 return 0; 3545} 3546 3547static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) 3548{ 3549 struct ci_power_info *pi = ci_get_pi(adev); 3550 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table = 3551 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3552 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table = 3553 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 3554 struct amdgpu_cac_leakage_table *std_voltage_table = 3555 &adev->pm.dpm.dyn_state.cac_leakage_table; 3556 u32 i; 3557 3558 if (allowed_sclk_vddc_table == NULL) 3559 return -EINVAL; 3560 if (allowed_sclk_vddc_table->count < 1) 3561 return -EINVAL; 3562 if (allowed_mclk_table == NULL) 3563 return -EINVAL; 3564 if (allowed_mclk_table->count < 1) 3565 return -EINVAL; 3566 3567 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); 3568 3569 ci_reset_single_dpm_table(adev, 3570 &pi->dpm_table.sclk_table, 3571 SMU7_MAX_LEVELS_GRAPHICS); 3572 ci_reset_single_dpm_table(adev, 3573 &pi->dpm_table.mclk_table, 3574 SMU7_MAX_LEVELS_MEMORY); 3575 ci_reset_single_dpm_table(adev, 3576 &pi->dpm_table.vddc_table, 3577 SMU7_MAX_LEVELS_VDDC); 3578 ci_reset_single_dpm_table(adev, 3579 &pi->dpm_table.vddci_table, 3580 SMU7_MAX_LEVELS_VDDCI); 3581 ci_reset_single_dpm_table(adev, 3582 &pi->dpm_table.mvdd_table, 3583 SMU7_MAX_LEVELS_MVDD); 3584 3585 pi->dpm_table.sclk_table.count = 0; 3586 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3587 if ((i == 0) || 3588 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != 3589 allowed_sclk_vddc_table->entries[i].clk)) { 3590 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 3591 allowed_sclk_vddc_table->entries[i].clk; 3592 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = 3593 (i == 0) ? true : false; 3594 pi->dpm_table.sclk_table.count++; 3595 } 3596 } 3597 3598 pi->dpm_table.mclk_table.count = 0; 3599 for (i = 0; i < allowed_mclk_table->count; i++) { 3600 if ((i == 0) || 3601 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3602 allowed_mclk_table->entries[i].clk)) { 3603 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3604 allowed_mclk_table->entries[i].clk; 3605 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = 3606 (i == 0) ? true : false; 3607 pi->dpm_table.mclk_table.count++; 3608 } 3609 } 3610 3611 for (i = 0; i < allowed_sclk_vddc_table->count; i++) { 3612 pi->dpm_table.vddc_table.dpm_levels[i].value = 3613 allowed_sclk_vddc_table->entries[i].v; 3614 pi->dpm_table.vddc_table.dpm_levels[i].param1 = 3615 std_voltage_table->entries[i].leakage; 3616 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; 3617 } 3618 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; 3619 3620 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 3621 if (allowed_mclk_table) { 3622 for (i = 0; i < allowed_mclk_table->count; i++) { 3623 pi->dpm_table.vddci_table.dpm_levels[i].value = 3624 allowed_mclk_table->entries[i].v; 3625 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; 3626 } 3627 pi->dpm_table.vddci_table.count = allowed_mclk_table->count; 3628 } 3629 3630 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; 3631 if (allowed_mclk_table) { 3632 for (i = 0; i < allowed_mclk_table->count; i++) { 3633 pi->dpm_table.mvdd_table.dpm_levels[i].value = 3634 allowed_mclk_table->entries[i].v; 3635 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 3636 } 3637 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; 3638 } 3639 3640 ci_setup_default_pcie_tables(adev); 3641 3642 return 0; 3643} 3644 3645static int ci_find_boot_level(struct ci_single_dpm_table *table, 3646 u32 value, u32 *boot_level) 3647{ 3648 u32 i; 3649 int ret = -EINVAL; 3650 3651 for(i = 0; i < table->count; i++) { 3652 if (value == table->dpm_levels[i].value) { 3653 *boot_level = i; 3654 ret = 0; 3655 } 3656 } 3657 3658 return ret; 3659} 3660 3661static int ci_init_smc_table(struct amdgpu_device *adev) 3662{ 3663 struct ci_power_info *pi = ci_get_pi(adev); 3664 struct ci_ulv_parm *ulv = &pi->ulv; 3665 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; 3666 SMU7_Discrete_DpmTable *table = &pi->smc_state_table; 3667 int ret; 3668 3669 ret = ci_setup_default_dpm_tables(adev); 3670 if (ret) 3671 return ret; 3672 3673 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) 3674 ci_populate_smc_voltage_tables(adev, table); 3675 3676 ci_init_fps_limits(adev); 3677 3678 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) 3679 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; 3680 3681 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) 3682 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; 3683 3684 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) 3685 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; 3686 3687 if (ulv->supported) { 3688 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv); 3689 if (ret) 3690 return ret; 3691 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter); 3692 } 3693 3694 ret = ci_populate_all_graphic_levels(adev); 3695 if (ret) 3696 return ret; 3697 3698 ret = ci_populate_all_memory_levels(adev); 3699 if (ret) 3700 return ret; 3701 3702 ci_populate_smc_link_level(adev, table); 3703 3704 ret = ci_populate_smc_acpi_level(adev, table); 3705 if (ret) 3706 return ret; 3707 3708 ret = ci_populate_smc_vce_level(adev, table); 3709 if (ret) 3710 return ret; 3711 3712 ret = ci_populate_smc_acp_level(adev, table); 3713 if (ret) 3714 return ret; 3715 3716 ret = ci_populate_smc_samu_level(adev, table); 3717 if (ret) 3718 return ret; 3719 3720 ret = ci_do_program_memory_timing_parameters(adev); 3721 if (ret) 3722 return ret; 3723 3724 ret = ci_populate_smc_uvd_level(adev, table); 3725 if (ret) 3726 return ret; 3727 3728 table->UvdBootLevel = 0; 3729 table->VceBootLevel = 0; 3730 table->AcpBootLevel = 0; 3731 table->SamuBootLevel = 0; 3732 table->GraphicsBootLevel = 0; 3733 table->MemoryBootLevel = 0; 3734 3735 ret = ci_find_boot_level(&pi->dpm_table.sclk_table, 3736 pi->vbios_boot_state.sclk_bootup_value, 3737 (u32 *)&pi->smc_state_table.GraphicsBootLevel); 3738 3739 ret = ci_find_boot_level(&pi->dpm_table.mclk_table, 3740 pi->vbios_boot_state.mclk_bootup_value, 3741 (u32 *)&pi->smc_state_table.MemoryBootLevel); 3742 3743 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; 3744 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; 3745 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; 3746 3747 ci_populate_smc_initial_state(adev, amdgpu_boot_state); 3748 3749 ret = ci_populate_bapm_parameters_in_dpm_table(adev); 3750 if (ret) 3751 return ret; 3752 3753 table->UVDInterval = 1; 3754 table->VCEInterval = 1; 3755 table->ACPInterval = 1; 3756 table->SAMUInterval = 1; 3757 table->GraphicsVoltageChangeEnable = 1; 3758 table->GraphicsThermThrottleEnable = 1; 3759 table->GraphicsInterval = 1; 3760 table->VoltageInterval = 1; 3761 table->ThermalInterval = 1; 3762 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * 3763 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3764 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * 3765 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); 3766 table->MemoryVoltageChangeEnable = 1; 3767 table->MemoryInterval = 1; 3768 table->VoltageResponseTime = 0; 3769 table->VddcVddciDelta = 4000; 3770 table->PhaseResponseTime = 0; 3771 table->MemoryThermThrottleEnable = 1; 3772 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; 3773 table->PCIeGenInterval = 1; 3774 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3775 table->SVI2Enable = 1; 3776 else 3777 table->SVI2Enable = 0; 3778 3779 table->ThermGpio = 17; 3780 table->SclkStepSize = 0x4000; 3781 3782 table->SystemFlags = cpu_to_be32(table->SystemFlags); 3783 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); 3784 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); 3785 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); 3786 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); 3787 table->SclkStepSize = cpu_to_be32(table->SclkStepSize); 3788 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); 3789 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); 3790 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); 3791 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); 3792 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); 3793 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); 3794 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); 3795 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); 3796 3797 ret = amdgpu_ci_copy_bytes_to_smc(adev, 3798 pi->dpm_table_start + 3799 offsetof(SMU7_Discrete_DpmTable, SystemFlags), 3800 (u8 *)&table->SystemFlags, 3801 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), 3802 pi->sram_end); 3803 if (ret) 3804 return ret; 3805 3806 return 0; 3807} 3808 3809static void ci_trim_single_dpm_states(struct amdgpu_device *adev, 3810 struct ci_single_dpm_table *dpm_table, 3811 u32 low_limit, u32 high_limit) 3812{ 3813 u32 i; 3814 3815 for (i = 0; i < dpm_table->count; i++) { 3816 if ((dpm_table->dpm_levels[i].value < low_limit) || 3817 (dpm_table->dpm_levels[i].value > high_limit)) 3818 dpm_table->dpm_levels[i].enabled = false; 3819 else 3820 dpm_table->dpm_levels[i].enabled = true; 3821 } 3822} 3823 3824static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev, 3825 u32 speed_low, u32 lanes_low, 3826 u32 speed_high, u32 lanes_high) 3827{ 3828 struct ci_power_info *pi = ci_get_pi(adev); 3829 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; 3830 u32 i, j; 3831 3832 for (i = 0; i < pcie_table->count; i++) { 3833 if ((pcie_table->dpm_levels[i].value < speed_low) || 3834 (pcie_table->dpm_levels[i].param1 < lanes_low) || 3835 (pcie_table->dpm_levels[i].value > speed_high) || 3836 (pcie_table->dpm_levels[i].param1 > lanes_high)) 3837 pcie_table->dpm_levels[i].enabled = false; 3838 else 3839 pcie_table->dpm_levels[i].enabled = true; 3840 } 3841 3842 for (i = 0; i < pcie_table->count; i++) { 3843 if (pcie_table->dpm_levels[i].enabled) { 3844 for (j = i + 1; j < pcie_table->count; j++) { 3845 if (pcie_table->dpm_levels[j].enabled) { 3846 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && 3847 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) 3848 pcie_table->dpm_levels[j].enabled = false; 3849 } 3850 } 3851 } 3852 } 3853} 3854 3855static int ci_trim_dpm_states(struct amdgpu_device *adev, 3856 struct amdgpu_ps *amdgpu_state) 3857{ 3858 struct ci_ps *state = ci_get_ps(amdgpu_state); 3859 struct ci_power_info *pi = ci_get_pi(adev); 3860 u32 high_limit_count; 3861 3862 if (state->performance_level_count < 1) 3863 return -EINVAL; 3864 3865 if (state->performance_level_count == 1) 3866 high_limit_count = 0; 3867 else 3868 high_limit_count = 1; 3869 3870 ci_trim_single_dpm_states(adev, 3871 &pi->dpm_table.sclk_table, 3872 state->performance_levels[0].sclk, 3873 state->performance_levels[high_limit_count].sclk); 3874 3875 ci_trim_single_dpm_states(adev, 3876 &pi->dpm_table.mclk_table, 3877 state->performance_levels[0].mclk, 3878 state->performance_levels[high_limit_count].mclk); 3879 3880 ci_trim_pcie_dpm_states(adev, 3881 state->performance_levels[0].pcie_gen, 3882 state->performance_levels[0].pcie_lane, 3883 state->performance_levels[high_limit_count].pcie_gen, 3884 state->performance_levels[high_limit_count].pcie_lane); 3885 3886 return 0; 3887} 3888 3889static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev) 3890{ 3891 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table = 3892 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; 3893 struct amdgpu_clock_voltage_dependency_table *vddc_table = 3894 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 3895 u32 requested_voltage = 0; 3896 u32 i; 3897 3898 if (disp_voltage_table == NULL) 3899 return -EINVAL; 3900 if (!disp_voltage_table->count) 3901 return -EINVAL; 3902 3903 for (i = 0; i < disp_voltage_table->count; i++) { 3904 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk) 3905 requested_voltage = disp_voltage_table->entries[i].v; 3906 } 3907 3908 for (i = 0; i < vddc_table->count; i++) { 3909 if (requested_voltage <= vddc_table->entries[i].v) { 3910 requested_voltage = vddc_table->entries[i].v; 3911 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev, 3912 PPSMC_MSG_VddC_Request, 3913 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? 3914 0 : -EINVAL; 3915 } 3916 } 3917 3918 return -EINVAL; 3919} 3920 3921static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev) 3922{ 3923 struct ci_power_info *pi = ci_get_pi(adev); 3924 PPSMC_Result result; 3925 3926 ci_apply_disp_minimum_voltage_request(adev); 3927 3928 if (!pi->sclk_dpm_key_disabled) { 3929 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3930 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, 3931 PPSMC_MSG_SCLKDPM_SetEnabledMask, 3932 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 3933 if (result != PPSMC_Result_OK) 3934 return -EINVAL; 3935 } 3936 } 3937 3938 if (!pi->mclk_dpm_key_disabled) { 3939 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3940 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, 3941 PPSMC_MSG_MCLKDPM_SetEnabledMask, 3942 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 3943 if (result != PPSMC_Result_OK) 3944 return -EINVAL; 3945 } 3946 } 3947 3948#if 0 3949 if (!pi->pcie_dpm_key_disabled) { 3950 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3951 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, 3952 PPSMC_MSG_PCIeDPM_SetEnabledMask, 3953 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 3954 if (result != PPSMC_Result_OK) 3955 return -EINVAL; 3956 } 3957 } 3958#endif 3959 3960 return 0; 3961} 3962 3963static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev, 3964 struct amdgpu_ps *amdgpu_state) 3965{ 3966 struct ci_power_info *pi = ci_get_pi(adev); 3967 struct ci_ps *state = ci_get_ps(amdgpu_state); 3968 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; 3969 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 3970 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; 3971 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 3972 u32 i; 3973 3974 pi->need_update_smu7_dpm_table = 0; 3975 3976 for (i = 0; i < sclk_table->count; i++) { 3977 if (sclk == sclk_table->dpm_levels[i].value) 3978 break; 3979 } 3980 3981 if (i >= sclk_table->count) { 3982 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3983 } else { 3984 /* XXX check display min clock requirements */ 3985 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) 3986 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3987 } 3988 3989 for (i = 0; i < mclk_table->count; i++) { 3990 if (mclk == mclk_table->dpm_levels[i].value) 3991 break; 3992 } 3993 3994 if (i >= mclk_table->count) 3995 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; 3996 3997 if (adev->pm.dpm.current_active_crtc_count != 3998 adev->pm.dpm.new_active_crtc_count) 3999 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; 4000} 4001 4002static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev, 4003 struct amdgpu_ps *amdgpu_state) 4004{ 4005 struct ci_power_info *pi = ci_get_pi(adev); 4006 struct ci_ps *state = ci_get_ps(amdgpu_state); 4007 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; 4008 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; 4009 struct ci_dpm_table *dpm_table = &pi->dpm_table; 4010 int ret; 4011 4012 if (!pi->need_update_smu7_dpm_table) 4013 return 0; 4014 4015 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) 4016 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; 4017 4018 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) 4019 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; 4020 4021 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { 4022 ret = ci_populate_all_graphic_levels(adev); 4023 if (ret) 4024 return ret; 4025 } 4026 4027 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { 4028 ret = ci_populate_all_memory_levels(adev); 4029 if (ret) 4030 return ret; 4031 } 4032 4033 return 0; 4034} 4035 4036static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 4037{ 4038 struct ci_power_info *pi = ci_get_pi(adev); 4039 const struct amdgpu_clock_and_voltage_limits *max_limits; 4040 int i; 4041 4042 if (adev->pm.dpm.ac_power) 4043 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4044 else 4045 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4046 4047 if (enable) { 4048 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; 4049 4050 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4051 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4052 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; 4053 4054 if (!pi->caps_uvd_dpm) 4055 break; 4056 } 4057 } 4058 4059 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4060 PPSMC_MSG_UVDDPM_SetEnabledMask, 4061 pi->dpm_level_enable_mask.uvd_dpm_enable_mask); 4062 4063 if (pi->last_mclk_dpm_enable_mask & 0x1) { 4064 pi->uvd_enabled = true; 4065 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 4066 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4067 PPSMC_MSG_MCLKDPM_SetEnabledMask, 4068 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4069 } 4070 } else { 4071 if (pi->last_mclk_dpm_enable_mask & 0x1) { 4072 pi->uvd_enabled = false; 4073 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; 4074 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4075 PPSMC_MSG_MCLKDPM_SetEnabledMask, 4076 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4077 } 4078 } 4079 4080 return (amdgpu_ci_send_msg_to_smc(adev, enable ? 4081 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? 4082 0 : -EINVAL; 4083} 4084 4085static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 4086{ 4087 struct ci_power_info *pi = ci_get_pi(adev); 4088 const struct amdgpu_clock_and_voltage_limits *max_limits; 4089 int i; 4090 4091 if (adev->pm.dpm.ac_power) 4092 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4093 else 4094 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4095 4096 if (enable) { 4097 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; 4098 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4099 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4100 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; 4101 4102 if (!pi->caps_vce_dpm) 4103 break; 4104 } 4105 } 4106 4107 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4108 PPSMC_MSG_VCEDPM_SetEnabledMask, 4109 pi->dpm_level_enable_mask.vce_dpm_enable_mask); 4110 } 4111 4112 return (amdgpu_ci_send_msg_to_smc(adev, enable ? 4113 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? 4114 0 : -EINVAL; 4115} 4116 4117#if 0 4118static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable) 4119{ 4120 struct ci_power_info *pi = ci_get_pi(adev); 4121 const struct amdgpu_clock_and_voltage_limits *max_limits; 4122 int i; 4123 4124 if (adev->pm.dpm.ac_power) 4125 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4126 else 4127 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4128 4129 if (enable) { 4130 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; 4131 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4132 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4133 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; 4134 4135 if (!pi->caps_samu_dpm) 4136 break; 4137 } 4138 } 4139 4140 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4141 PPSMC_MSG_SAMUDPM_SetEnabledMask, 4142 pi->dpm_level_enable_mask.samu_dpm_enable_mask); 4143 } 4144 return (amdgpu_ci_send_msg_to_smc(adev, enable ? 4145 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? 4146 0 : -EINVAL; 4147} 4148 4149static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable) 4150{ 4151 struct ci_power_info *pi = ci_get_pi(adev); 4152 const struct amdgpu_clock_and_voltage_limits *max_limits; 4153 int i; 4154 4155 if (adev->pm.dpm.ac_power) 4156 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 4157 else 4158 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; 4159 4160 if (enable) { 4161 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; 4162 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { 4163 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { 4164 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; 4165 4166 if (!pi->caps_acp_dpm) 4167 break; 4168 } 4169 } 4170 4171 amdgpu_ci_send_msg_to_smc_with_parameter(adev, 4172 PPSMC_MSG_ACPDPM_SetEnabledMask, 4173 pi->dpm_level_enable_mask.acp_dpm_enable_mask); 4174 } 4175 4176 return (amdgpu_ci_send_msg_to_smc(adev, enable ? 4177 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? 4178 0 : -EINVAL; 4179} 4180#endif 4181 4182static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 4183{ 4184 struct ci_power_info *pi = ci_get_pi(adev); 4185 u32 tmp; 4186 4187 if (!gate) { 4188 if (pi->caps_uvd_dpm || 4189 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) 4190 pi->smc_state_table.UvdBootLevel = 0; 4191 else 4192 pi->smc_state_table.UvdBootLevel = 4193 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; 4194 4195 tmp = RREG32_SMC(ixDPM_TABLE_475); 4196 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; 4197 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); 4198 WREG32_SMC(ixDPM_TABLE_475, tmp); 4199 } 4200 4201 return ci_enable_uvd_dpm(adev, !gate); 4202} 4203 4204static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) 4205{ 4206 u8 i; 4207 u32 min_evclk = 30000; /* ??? */ 4208 struct amdgpu_vce_clock_voltage_dependency_table *table = 4209 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 4210 4211 for (i = 0; i < table->count; i++) { 4212 if (table->entries[i].evclk >= min_evclk) 4213 return i; 4214 } 4215 4216 return table->count - 1; 4217} 4218 4219static int ci_update_vce_dpm(struct amdgpu_device *adev, 4220 struct amdgpu_ps *amdgpu_new_state, 4221 struct amdgpu_ps *amdgpu_current_state) 4222{ 4223 struct ci_power_info *pi = ci_get_pi(adev); 4224 int ret = 0; 4225 u32 tmp; 4226 4227 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { 4228 if (amdgpu_new_state->evclk) { 4229 /* turn the clocks on when encoding */ 4230 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 4231 AMD_CG_STATE_UNGATE); 4232 if (ret) 4233 return ret; 4234 4235 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); 4236 tmp = RREG32_SMC(ixDPM_TABLE_475); 4237 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; 4238 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT); 4239 WREG32_SMC(ixDPM_TABLE_475, tmp); 4240 4241 ret = ci_enable_vce_dpm(adev, true); 4242 } else { 4243 /* turn the clocks off when not encoding */ 4244 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 4245 AMD_CG_STATE_GATE); 4246 if (ret) 4247 return ret; 4248 4249 ret = ci_enable_vce_dpm(adev, false); 4250 } 4251 } 4252 return ret; 4253} 4254 4255#if 0 4256static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate) 4257{ 4258 return ci_enable_samu_dpm(adev, gate); 4259} 4260 4261static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate) 4262{ 4263 struct ci_power_info *pi = ci_get_pi(adev); 4264 u32 tmp; 4265 4266 if (!gate) { 4267 pi->smc_state_table.AcpBootLevel = 0; 4268 4269 tmp = RREG32_SMC(ixDPM_TABLE_475); 4270 tmp &= ~AcpBootLevel_MASK; 4271 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); 4272 WREG32_SMC(ixDPM_TABLE_475, tmp); 4273 } 4274 4275 return ci_enable_acp_dpm(adev, !gate); 4276} 4277#endif 4278 4279static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev, 4280 struct amdgpu_ps *amdgpu_state) 4281{ 4282 struct ci_power_info *pi = ci_get_pi(adev); 4283 int ret; 4284 4285 ret = ci_trim_dpm_states(adev, amdgpu_state); 4286 if (ret) 4287 return ret; 4288 4289 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 4290 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); 4291 pi->dpm_level_enable_mask.mclk_dpm_enable_mask = 4292 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); 4293 pi->last_mclk_dpm_enable_mask = 4294 pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4295 if (pi->uvd_enabled) { 4296 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) 4297 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; 4298 } 4299 pi->dpm_level_enable_mask.pcie_dpm_enable_mask = 4300 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); 4301 4302 return 0; 4303} 4304 4305static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev, 4306 u32 level_mask) 4307{ 4308 u32 level = 0; 4309 4310 while ((level_mask & (1 << level)) == 0) 4311 level++; 4312 4313 return level; 4314} 4315 4316 4317static int ci_dpm_force_performance_level(struct amdgpu_device *adev, 4318 enum amdgpu_dpm_forced_level level) 4319{ 4320 struct ci_power_info *pi = ci_get_pi(adev); 4321 u32 tmp, levels, i; 4322 int ret; 4323 4324 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) { 4325 if ((!pi->pcie_dpm_key_disabled) && 4326 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4327 levels = 0; 4328 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 4329 while (tmp >>= 1) 4330 levels++; 4331 if (levels) { 4332 ret = ci_dpm_force_state_pcie(adev, level); 4333 if (ret) 4334 return ret; 4335 for (i = 0; i < adev->usec_timeout; i++) { 4336 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & 4337 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> 4338 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; 4339 if (tmp == levels) 4340 break; 4341 udelay(1); 4342 } 4343 } 4344 } 4345 if ((!pi->sclk_dpm_key_disabled) && 4346 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4347 levels = 0; 4348 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 4349 while (tmp >>= 1) 4350 levels++; 4351 if (levels) { 4352 ret = ci_dpm_force_state_sclk(adev, levels); 4353 if (ret) 4354 return ret; 4355 for (i = 0; i < adev->usec_timeout; i++) { 4356 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 4357 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 4358 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 4359 if (tmp == levels) 4360 break; 4361 udelay(1); 4362 } 4363 } 4364 } 4365 if ((!pi->mclk_dpm_key_disabled) && 4366 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4367 levels = 0; 4368 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4369 while (tmp >>= 1) 4370 levels++; 4371 if (levels) { 4372 ret = ci_dpm_force_state_mclk(adev, levels); 4373 if (ret) 4374 return ret; 4375 for (i = 0; i < adev->usec_timeout; i++) { 4376 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 4377 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >> 4378 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT; 4379 if (tmp == levels) 4380 break; 4381 udelay(1); 4382 } 4383 } 4384 } 4385 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) { 4386 if ((!pi->sclk_dpm_key_disabled) && 4387 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4388 levels = ci_get_lowest_enabled_level(adev, 4389 pi->dpm_level_enable_mask.sclk_dpm_enable_mask); 4390 ret = ci_dpm_force_state_sclk(adev, levels); 4391 if (ret) 4392 return ret; 4393 for (i = 0; i < adev->usec_timeout; i++) { 4394 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 4395 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 4396 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 4397 if (tmp == levels) 4398 break; 4399 udelay(1); 4400 } 4401 } 4402 if ((!pi->mclk_dpm_key_disabled) && 4403 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4404 levels = ci_get_lowest_enabled_level(adev, 4405 pi->dpm_level_enable_mask.mclk_dpm_enable_mask); 4406 ret = ci_dpm_force_state_mclk(adev, levels); 4407 if (ret) 4408 return ret; 4409 for (i = 0; i < adev->usec_timeout; i++) { 4410 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 4411 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >> 4412 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT; 4413 if (tmp == levels) 4414 break; 4415 udelay(1); 4416 } 4417 } 4418 if ((!pi->pcie_dpm_key_disabled) && 4419 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4420 levels = ci_get_lowest_enabled_level(adev, 4421 pi->dpm_level_enable_mask.pcie_dpm_enable_mask); 4422 ret = ci_dpm_force_state_pcie(adev, levels); 4423 if (ret) 4424 return ret; 4425 for (i = 0; i < adev->usec_timeout; i++) { 4426 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & 4427 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> 4428 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; 4429 if (tmp == levels) 4430 break; 4431 udelay(1); 4432 } 4433 } 4434 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) { 4435 if (!pi->pcie_dpm_key_disabled) { 4436 PPSMC_Result smc_result; 4437 4438 smc_result = amdgpu_ci_send_msg_to_smc(adev, 4439 PPSMC_MSG_PCIeDPM_UnForceLevel); 4440 if (smc_result != PPSMC_Result_OK) 4441 return -EINVAL; 4442 } 4443 ret = ci_upload_dpm_level_enable_mask(adev); 4444 if (ret) 4445 return ret; 4446 } 4447 4448 adev->pm.dpm.forced_level = level; 4449 4450 return 0; 4451} 4452 4453static int ci_set_mc_special_registers(struct amdgpu_device *adev, 4454 struct ci_mc_reg_table *table) 4455{ 4456 u8 i, j, k; 4457 u32 temp_reg; 4458 4459 for (i = 0, j = table->last; i < table->last; i++) { 4460 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4461 return -EINVAL; 4462 switch(table->mc_reg_address[i].s1) { 4463 case mmMC_SEQ_MISC1: 4464 temp_reg = RREG32(mmMC_PMG_CMD_EMRS); 4465 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; 4466 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; 4467 for (k = 0; k < table->num_entries; k++) { 4468 table->mc_reg_table_entry[k].mc_data[j] = 4469 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); 4470 } 4471 j++; 4472 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4473 return -EINVAL; 4474 4475 temp_reg = RREG32(mmMC_PMG_CMD_MRS); 4476 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; 4477 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; 4478 for (k = 0; k < table->num_entries; k++) { 4479 table->mc_reg_table_entry[k].mc_data[j] = 4480 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4481 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) 4482 table->mc_reg_table_entry[k].mc_data[j] |= 0x100; 4483 } 4484 j++; 4485 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4486 return -EINVAL; 4487 4488 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { 4489 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; 4490 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; 4491 for (k = 0; k < table->num_entries; k++) { 4492 table->mc_reg_table_entry[k].mc_data[j] = 4493 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; 4494 } 4495 j++; 4496 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4497 return -EINVAL; 4498 } 4499 break; 4500 case mmMC_SEQ_RESERVE_M: 4501 temp_reg = RREG32(mmMC_PMG_CMD_MRS1); 4502 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; 4503 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; 4504 for (k = 0; k < table->num_entries; k++) { 4505 table->mc_reg_table_entry[k].mc_data[j] = 4506 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); 4507 } 4508 j++; 4509 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4510 return -EINVAL; 4511 break; 4512 default: 4513 break; 4514 } 4515 4516 } 4517 4518 table->last = j; 4519 4520 return 0; 4521} 4522 4523static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) 4524{ 4525 bool result = true; 4526 4527 switch(in_reg) { 4528 case mmMC_SEQ_RAS_TIMING: 4529 *out_reg = mmMC_SEQ_RAS_TIMING_LP; 4530 break; 4531 case mmMC_SEQ_DLL_STBY: 4532 *out_reg = mmMC_SEQ_DLL_STBY_LP; 4533 break; 4534 case mmMC_SEQ_G5PDX_CMD0: 4535 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; 4536 break; 4537 case mmMC_SEQ_G5PDX_CMD1: 4538 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; 4539 break; 4540 case mmMC_SEQ_G5PDX_CTRL: 4541 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; 4542 break; 4543 case mmMC_SEQ_CAS_TIMING: 4544 *out_reg = mmMC_SEQ_CAS_TIMING_LP; 4545 break; 4546 case mmMC_SEQ_MISC_TIMING: 4547 *out_reg = mmMC_SEQ_MISC_TIMING_LP; 4548 break; 4549 case mmMC_SEQ_MISC_TIMING2: 4550 *out_reg = mmMC_SEQ_MISC_TIMING2_LP; 4551 break; 4552 case mmMC_SEQ_PMG_DVS_CMD: 4553 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; 4554 break; 4555 case mmMC_SEQ_PMG_DVS_CTL: 4556 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; 4557 break; 4558 case mmMC_SEQ_RD_CTL_D0: 4559 *out_reg = mmMC_SEQ_RD_CTL_D0_LP; 4560 break; 4561 case mmMC_SEQ_RD_CTL_D1: 4562 *out_reg = mmMC_SEQ_RD_CTL_D1_LP; 4563 break; 4564 case mmMC_SEQ_WR_CTL_D0: 4565 *out_reg = mmMC_SEQ_WR_CTL_D0_LP; 4566 break; 4567 case mmMC_SEQ_WR_CTL_D1: 4568 *out_reg = mmMC_SEQ_WR_CTL_D1_LP; 4569 break; 4570 case mmMC_PMG_CMD_EMRS: 4571 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; 4572 break; 4573 case mmMC_PMG_CMD_MRS: 4574 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; 4575 break; 4576 case mmMC_PMG_CMD_MRS1: 4577 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; 4578 break; 4579 case mmMC_SEQ_PMG_TIMING: 4580 *out_reg = mmMC_SEQ_PMG_TIMING_LP; 4581 break; 4582 case mmMC_PMG_CMD_MRS2: 4583 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; 4584 break; 4585 case mmMC_SEQ_WR_CTL_2: 4586 *out_reg = mmMC_SEQ_WR_CTL_2_LP; 4587 break; 4588 default: 4589 result = false; 4590 break; 4591 } 4592 4593 return result; 4594} 4595 4596static void ci_set_valid_flag(struct ci_mc_reg_table *table) 4597{ 4598 u8 i, j; 4599 4600 for (i = 0; i < table->last; i++) { 4601 for (j = 1; j < table->num_entries; j++) { 4602 if (table->mc_reg_table_entry[j-1].mc_data[i] != 4603 table->mc_reg_table_entry[j].mc_data[i]) { 4604 table->valid_flag |= 1 << i; 4605 break; 4606 } 4607 } 4608 } 4609} 4610 4611static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) 4612{ 4613 u32 i; 4614 u16 address; 4615 4616 for (i = 0; i < table->last; i++) { 4617 table->mc_reg_address[i].s0 = 4618 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? 4619 address : table->mc_reg_address[i].s1; 4620 } 4621} 4622 4623static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, 4624 struct ci_mc_reg_table *ci_table) 4625{ 4626 u8 i, j; 4627 4628 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4629 return -EINVAL; 4630 if (table->num_entries > MAX_AC_TIMING_ENTRIES) 4631 return -EINVAL; 4632 4633 for (i = 0; i < table->last; i++) 4634 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; 4635 4636 ci_table->last = table->last; 4637 4638 for (i = 0; i < table->num_entries; i++) { 4639 ci_table->mc_reg_table_entry[i].mclk_max = 4640 table->mc_reg_table_entry[i].mclk_max; 4641 for (j = 0; j < table->last; j++) 4642 ci_table->mc_reg_table_entry[i].mc_data[j] = 4643 table->mc_reg_table_entry[i].mc_data[j]; 4644 } 4645 ci_table->num_entries = table->num_entries; 4646 4647 return 0; 4648} 4649 4650static int ci_register_patching_mc_seq(struct amdgpu_device *adev, 4651 struct ci_mc_reg_table *table) 4652{ 4653 u8 i, k; 4654 u32 tmp; 4655 bool patch; 4656 4657 tmp = RREG32(mmMC_SEQ_MISC0); 4658 patch = ((tmp & 0x0000f00) == 0x300) ? true : false; 4659 4660 if (patch && 4661 ((adev->pdev->device == 0x67B0) || 4662 (adev->pdev->device == 0x67B1))) { 4663 for (i = 0; i < table->last; i++) { 4664 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4665 return -EINVAL; 4666 switch (table->mc_reg_address[i].s1) { 4667 case mmMC_SEQ_MISC1: 4668 for (k = 0; k < table->num_entries; k++) { 4669 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4670 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4671 table->mc_reg_table_entry[k].mc_data[i] = 4672 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | 4673 0x00000007; 4674 } 4675 break; 4676 case mmMC_SEQ_WR_CTL_D0: 4677 for (k = 0; k < table->num_entries; k++) { 4678 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4679 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4680 table->mc_reg_table_entry[k].mc_data[i] = 4681 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4682 0x0000D0DD; 4683 } 4684 break; 4685 case mmMC_SEQ_WR_CTL_D1: 4686 for (k = 0; k < table->num_entries; k++) { 4687 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4688 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4689 table->mc_reg_table_entry[k].mc_data[i] = 4690 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | 4691 0x0000D0DD; 4692 } 4693 break; 4694 case mmMC_SEQ_WR_CTL_2: 4695 for (k = 0; k < table->num_entries; k++) { 4696 if ((table->mc_reg_table_entry[k].mclk_max == 125000) || 4697 (table->mc_reg_table_entry[k].mclk_max == 137500)) 4698 table->mc_reg_table_entry[k].mc_data[i] = 0; 4699 } 4700 break; 4701 case mmMC_SEQ_CAS_TIMING: 4702 for (k = 0; k < table->num_entries; k++) { 4703 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4704 table->mc_reg_table_entry[k].mc_data[i] = 4705 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4706 0x000C0140; 4707 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4708 table->mc_reg_table_entry[k].mc_data[i] = 4709 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | 4710 0x000C0150; 4711 } 4712 break; 4713 case mmMC_SEQ_MISC_TIMING: 4714 for (k = 0; k < table->num_entries; k++) { 4715 if (table->mc_reg_table_entry[k].mclk_max == 125000) 4716 table->mc_reg_table_entry[k].mc_data[i] = 4717 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4718 0x00000030; 4719 else if (table->mc_reg_table_entry[k].mclk_max == 137500) 4720 table->mc_reg_table_entry[k].mc_data[i] = 4721 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | 4722 0x00000035; 4723 } 4724 break; 4725 default: 4726 break; 4727 } 4728 } 4729 4730 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3); 4731 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA); 4732 tmp = (tmp & 0xFFF8FFFF) | (1 << 16); 4733 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3); 4734 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp); 4735 } 4736 4737 return 0; 4738} 4739 4740static int ci_initialize_mc_reg_table(struct amdgpu_device *adev) 4741{ 4742 struct ci_power_info *pi = ci_get_pi(adev); 4743 struct atom_mc_reg_table *table; 4744 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; 4745 u8 module_index = ci_get_memory_module_index(adev); 4746 int ret; 4747 4748 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); 4749 if (!table) 4750 return -ENOMEM; 4751 4752 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING)); 4753 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING)); 4754 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY)); 4755 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0)); 4756 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1)); 4757 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL)); 4758 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD)); 4759 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL)); 4760 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING)); 4761 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2)); 4762 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS)); 4763 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS)); 4764 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1)); 4765 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0)); 4766 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1)); 4767 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0)); 4768 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1)); 4769 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING)); 4770 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2)); 4771 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2)); 4772 4773 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); 4774 if (ret) 4775 goto init_mc_done; 4776 4777 ret = ci_copy_vbios_mc_reg_table(table, ci_table); 4778 if (ret) 4779 goto init_mc_done; 4780 4781 ci_set_s0_mc_reg_index(ci_table); 4782 4783 ret = ci_register_patching_mc_seq(adev, ci_table); 4784 if (ret) 4785 goto init_mc_done; 4786 4787 ret = ci_set_mc_special_registers(adev, ci_table); 4788 if (ret) 4789 goto init_mc_done; 4790 4791 ci_set_valid_flag(ci_table); 4792 4793init_mc_done: 4794 kfree(table); 4795 4796 return ret; 4797} 4798 4799static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev, 4800 SMU7_Discrete_MCRegisters *mc_reg_table) 4801{ 4802 struct ci_power_info *pi = ci_get_pi(adev); 4803 u32 i, j; 4804 4805 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { 4806 if (pi->mc_reg_table.valid_flag & (1 << j)) { 4807 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) 4808 return -EINVAL; 4809 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); 4810 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); 4811 i++; 4812 } 4813 } 4814 4815 mc_reg_table->last = (u8)i; 4816 4817 return 0; 4818} 4819 4820static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, 4821 SMU7_Discrete_MCRegisterSet *data, 4822 u32 num_entries, u32 valid_flag) 4823{ 4824 u32 i, j; 4825 4826 for (i = 0, j = 0; j < num_entries; j++) { 4827 if (valid_flag & (1 << j)) { 4828 data->value[i] = cpu_to_be32(entry->mc_data[j]); 4829 i++; 4830 } 4831 } 4832} 4833 4834static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, 4835 const u32 memory_clock, 4836 SMU7_Discrete_MCRegisterSet *mc_reg_table_data) 4837{ 4838 struct ci_power_info *pi = ci_get_pi(adev); 4839 u32 i = 0; 4840 4841 for(i = 0; i < pi->mc_reg_table.num_entries; i++) { 4842 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) 4843 break; 4844 } 4845 4846 if ((i == pi->mc_reg_table.num_entries) && (i > 0)) 4847 --i; 4848 4849 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], 4850 mc_reg_table_data, pi->mc_reg_table.last, 4851 pi->mc_reg_table.valid_flag); 4852} 4853 4854static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, 4855 SMU7_Discrete_MCRegisters *mc_reg_table) 4856{ 4857 struct ci_power_info *pi = ci_get_pi(adev); 4858 u32 i; 4859 4860 for (i = 0; i < pi->dpm_table.mclk_table.count; i++) 4861 ci_convert_mc_reg_table_entry_to_smc(adev, 4862 pi->dpm_table.mclk_table.dpm_levels[i].value, 4863 &mc_reg_table->data[i]); 4864} 4865 4866static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev) 4867{ 4868 struct ci_power_info *pi = ci_get_pi(adev); 4869 int ret; 4870 4871 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4872 4873 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table); 4874 if (ret) 4875 return ret; 4876 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table); 4877 4878 return amdgpu_ci_copy_bytes_to_smc(adev, 4879 pi->mc_reg_table_start, 4880 (u8 *)&pi->smc_mc_reg_table, 4881 sizeof(SMU7_Discrete_MCRegisters), 4882 pi->sram_end); 4883} 4884 4885static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev) 4886{ 4887 struct ci_power_info *pi = ci_get_pi(adev); 4888 4889 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) 4890 return 0; 4891 4892 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); 4893 4894 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table); 4895 4896 return amdgpu_ci_copy_bytes_to_smc(adev, 4897 pi->mc_reg_table_start + 4898 offsetof(SMU7_Discrete_MCRegisters, data[0]), 4899 (u8 *)&pi->smc_mc_reg_table.data[0], 4900 sizeof(SMU7_Discrete_MCRegisterSet) * 4901 pi->dpm_table.mclk_table.count, 4902 pi->sram_end); 4903} 4904 4905static void ci_enable_voltage_control(struct amdgpu_device *adev) 4906{ 4907 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); 4908 4909 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK; 4910 WREG32_SMC(ixGENERAL_PWRMGT, tmp); 4911} 4912 4913static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev, 4914 struct amdgpu_ps *amdgpu_state) 4915{ 4916 struct ci_ps *state = ci_get_ps(amdgpu_state); 4917 int i; 4918 u16 pcie_speed, max_speed = 0; 4919 4920 for (i = 0; i < state->performance_level_count; i++) { 4921 pcie_speed = state->performance_levels[i].pcie_gen; 4922 if (max_speed < pcie_speed) 4923 max_speed = pcie_speed; 4924 } 4925 4926 return max_speed; 4927} 4928 4929static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev) 4930{ 4931 u32 speed_cntl = 0; 4932 4933 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) & 4934 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK; 4935 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 4936 4937 return (u16)speed_cntl; 4938} 4939 4940static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev) 4941{ 4942 u32 link_width = 0; 4943 4944 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) & 4945 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK; 4946 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 4947 4948 switch (link_width) { 4949 case 1: 4950 return 1; 4951 case 2: 4952 return 2; 4953 case 3: 4954 return 4; 4955 case 4: 4956 return 8; 4957 case 0: 4958 case 6: 4959 default: 4960 return 16; 4961 } 4962} 4963 4964static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev, 4965 struct amdgpu_ps *amdgpu_new_state, 4966 struct amdgpu_ps *amdgpu_current_state) 4967{ 4968 struct ci_power_info *pi = ci_get_pi(adev); 4969 enum amdgpu_pcie_gen target_link_speed = 4970 ci_get_maximum_link_speed(adev, amdgpu_new_state); 4971 enum amdgpu_pcie_gen current_link_speed; 4972 4973 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) 4974 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state); 4975 else 4976 current_link_speed = pi->force_pcie_gen; 4977 4978 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 4979 pi->pspp_notify_required = false; 4980 if (target_link_speed > current_link_speed) { 4981 switch (target_link_speed) { 4982#ifdef CONFIG_ACPI 4983 case AMDGPU_PCIE_GEN3: 4984 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) 4985 break; 4986 pi->force_pcie_gen = AMDGPU_PCIE_GEN2; 4987 if (current_link_speed == AMDGPU_PCIE_GEN2) 4988 break; 4989 case AMDGPU_PCIE_GEN2: 4990 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) 4991 break; 4992#endif 4993 default: 4994 pi->force_pcie_gen = ci_get_current_pcie_speed(adev); 4995 break; 4996 } 4997 } else { 4998 if (target_link_speed < current_link_speed) 4999 pi->pspp_notify_required = true; 5000 } 5001} 5002 5003static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, 5004 struct amdgpu_ps *amdgpu_new_state, 5005 struct amdgpu_ps *amdgpu_current_state) 5006{ 5007 struct ci_power_info *pi = ci_get_pi(adev); 5008 enum amdgpu_pcie_gen target_link_speed = 5009 ci_get_maximum_link_speed(adev, amdgpu_new_state); 5010 u8 request; 5011 5012 if (pi->pspp_notify_required) { 5013 if (target_link_speed == AMDGPU_PCIE_GEN3) 5014 request = PCIE_PERF_REQ_PECI_GEN3; 5015 else if (target_link_speed == AMDGPU_PCIE_GEN2) 5016 request = PCIE_PERF_REQ_PECI_GEN2; 5017 else 5018 request = PCIE_PERF_REQ_PECI_GEN1; 5019 5020 if ((request == PCIE_PERF_REQ_PECI_GEN1) && 5021 (ci_get_current_pcie_speed(adev) > 0)) 5022 return; 5023 5024#ifdef CONFIG_ACPI 5025 amdgpu_acpi_pcie_performance_request(adev, request, false); 5026#endif 5027 } 5028} 5029 5030static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev) 5031{ 5032 struct ci_power_info *pi = ci_get_pi(adev); 5033 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table = 5034 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 5035 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table = 5036 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk; 5037 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table = 5038 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk; 5039 5040 if (allowed_sclk_vddc_table == NULL) 5041 return -EINVAL; 5042 if (allowed_sclk_vddc_table->count < 1) 5043 return -EINVAL; 5044 if (allowed_mclk_vddc_table == NULL) 5045 return -EINVAL; 5046 if (allowed_mclk_vddc_table->count < 1) 5047 return -EINVAL; 5048 if (allowed_mclk_vddci_table == NULL) 5049 return -EINVAL; 5050 if (allowed_mclk_vddci_table->count < 1) 5051 return -EINVAL; 5052 5053 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; 5054 pi->max_vddc_in_pp_table = 5055 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 5056 5057 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; 5058 pi->max_vddci_in_pp_table = 5059 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 5060 5061 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = 5062 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 5063 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = 5064 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; 5065 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = 5066 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; 5067 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = 5068 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; 5069 5070 return 0; 5071} 5072 5073static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc) 5074{ 5075 struct ci_power_info *pi = ci_get_pi(adev); 5076 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; 5077 u32 leakage_index; 5078 5079 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 5080 if (leakage_table->leakage_id[leakage_index] == *vddc) { 5081 *vddc = leakage_table->actual_voltage[leakage_index]; 5082 break; 5083 } 5084 } 5085} 5086 5087static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci) 5088{ 5089 struct ci_power_info *pi = ci_get_pi(adev); 5090 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; 5091 u32 leakage_index; 5092 5093 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { 5094 if (leakage_table->leakage_id[leakage_index] == *vddci) { 5095 *vddci = leakage_table->actual_voltage[leakage_index]; 5096 break; 5097 } 5098 } 5099} 5100 5101static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, 5102 struct amdgpu_clock_voltage_dependency_table *table) 5103{ 5104 u32 i; 5105 5106 if (table) { 5107 for (i = 0; i < table->count; i++) 5108 ci_patch_with_vddc_leakage(adev, &table->entries[i].v); 5109 } 5110} 5111 5112static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev, 5113 struct amdgpu_clock_voltage_dependency_table *table) 5114{ 5115 u32 i; 5116 5117 if (table) { 5118 for (i = 0; i < table->count; i++) 5119 ci_patch_with_vddci_leakage(adev, &table->entries[i].v); 5120 } 5121} 5122 5123static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, 5124 struct amdgpu_vce_clock_voltage_dependency_table *table) 5125{ 5126 u32 i; 5127 5128 if (table) { 5129 for (i = 0; i < table->count; i++) 5130 ci_patch_with_vddc_leakage(adev, &table->entries[i].v); 5131 } 5132} 5133 5134static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, 5135 struct amdgpu_uvd_clock_voltage_dependency_table *table) 5136{ 5137 u32 i; 5138 5139 if (table) { 5140 for (i = 0; i < table->count; i++) 5141 ci_patch_with_vddc_leakage(adev, &table->entries[i].v); 5142 } 5143} 5144 5145static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev, 5146 struct amdgpu_phase_shedding_limits_table *table) 5147{ 5148 u32 i; 5149 5150 if (table) { 5151 for (i = 0; i < table->count; i++) 5152 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage); 5153 } 5154} 5155 5156static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev, 5157 struct amdgpu_clock_and_voltage_limits *table) 5158{ 5159 if (table) { 5160 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc); 5161 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci); 5162 } 5163} 5164 5165static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev, 5166 struct amdgpu_cac_leakage_table *table) 5167{ 5168 u32 i; 5169 5170 if (table) { 5171 for (i = 0; i < table->count; i++) 5172 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc); 5173 } 5174} 5175 5176static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev) 5177{ 5178 5179 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, 5180 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); 5181 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, 5182 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); 5183 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, 5184 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); 5185 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev, 5186 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); 5187 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev, 5188 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); 5189 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev, 5190 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); 5191 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, 5192 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); 5193 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, 5194 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); 5195 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev, 5196 &adev->pm.dpm.dyn_state.phase_shedding_limits_table); 5197 ci_patch_clock_voltage_limits_with_vddc_leakage(adev, 5198 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 5199 ci_patch_clock_voltage_limits_with_vddc_leakage(adev, 5200 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc); 5201 ci_patch_cac_leakage_table_with_vddc_leakage(adev, 5202 &adev->pm.dpm.dyn_state.cac_leakage_table); 5203 5204} 5205 5206static void ci_update_current_ps(struct amdgpu_device *adev, 5207 struct amdgpu_ps *rps) 5208{ 5209 struct ci_ps *new_ps = ci_get_ps(rps); 5210 struct ci_power_info *pi = ci_get_pi(adev); 5211 5212 pi->current_rps = *rps; 5213 pi->current_ps = *new_ps; 5214 pi->current_rps.ps_priv = &pi->current_ps; 5215} 5216 5217static void ci_update_requested_ps(struct amdgpu_device *adev, 5218 struct amdgpu_ps *rps) 5219{ 5220 struct ci_ps *new_ps = ci_get_ps(rps); 5221 struct ci_power_info *pi = ci_get_pi(adev); 5222 5223 pi->requested_rps = *rps; 5224 pi->requested_ps = *new_ps; 5225 pi->requested_rps.ps_priv = &pi->requested_ps; 5226} 5227 5228static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) 5229{ 5230 struct ci_power_info *pi = ci_get_pi(adev); 5231 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 5232 struct amdgpu_ps *new_ps = &requested_ps; 5233 5234 ci_update_requested_ps(adev, new_ps); 5235 5236 ci_apply_state_adjust_rules(adev, &pi->requested_rps); 5237 5238 return 0; 5239} 5240 5241static void ci_dpm_post_set_power_state(struct amdgpu_device *adev) 5242{ 5243 struct ci_power_info *pi = ci_get_pi(adev); 5244 struct amdgpu_ps *new_ps = &pi->requested_rps; 5245 5246 ci_update_current_ps(adev, new_ps); 5247} 5248 5249 5250static void ci_dpm_setup_asic(struct amdgpu_device *adev) 5251{ 5252 ci_read_clock_registers(adev); 5253 ci_enable_acpi_power_management(adev); 5254 ci_init_sclk_t(adev); 5255} 5256 5257static int ci_dpm_enable(struct amdgpu_device *adev) 5258{ 5259 struct ci_power_info *pi = ci_get_pi(adev); 5260 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; 5261 int ret; 5262 5263 if (amdgpu_ci_is_smc_running(adev)) 5264 return -EINVAL; 5265 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { 5266 ci_enable_voltage_control(adev); 5267 ret = ci_construct_voltage_tables(adev); 5268 if (ret) { 5269 DRM_ERROR("ci_construct_voltage_tables failed\n"); 5270 return ret; 5271 } 5272 } 5273 if (pi->caps_dynamic_ac_timing) { 5274 ret = ci_initialize_mc_reg_table(adev); 5275 if (ret) 5276 pi->caps_dynamic_ac_timing = false; 5277 } 5278 if (pi->dynamic_ss) 5279 ci_enable_spread_spectrum(adev, true); 5280 if (pi->thermal_protection) 5281 ci_enable_thermal_protection(adev, true); 5282 ci_program_sstp(adev); 5283 ci_enable_display_gap(adev); 5284 ci_program_vc(adev); 5285 ret = ci_upload_firmware(adev); 5286 if (ret) { 5287 DRM_ERROR("ci_upload_firmware failed\n"); 5288 return ret; 5289 } 5290 ret = ci_process_firmware_header(adev); 5291 if (ret) { 5292 DRM_ERROR("ci_process_firmware_header failed\n"); 5293 return ret; 5294 } 5295 ret = ci_initial_switch_from_arb_f0_to_f1(adev); 5296 if (ret) { 5297 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); 5298 return ret; 5299 } 5300 ret = ci_init_smc_table(adev); 5301 if (ret) { 5302 DRM_ERROR("ci_init_smc_table failed\n"); 5303 return ret; 5304 } 5305 ret = ci_init_arb_table_index(adev); 5306 if (ret) { 5307 DRM_ERROR("ci_init_arb_table_index failed\n"); 5308 return ret; 5309 } 5310 if (pi->caps_dynamic_ac_timing) { 5311 ret = ci_populate_initial_mc_reg_table(adev); 5312 if (ret) { 5313 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); 5314 return ret; 5315 } 5316 } 5317 ret = ci_populate_pm_base(adev); 5318 if (ret) { 5319 DRM_ERROR("ci_populate_pm_base failed\n"); 5320 return ret; 5321 } 5322 ci_dpm_start_smc(adev); 5323 ci_enable_vr_hot_gpio_interrupt(adev); 5324 ret = ci_notify_smc_display_change(adev, false); 5325 if (ret) { 5326 DRM_ERROR("ci_notify_smc_display_change failed\n"); 5327 return ret; 5328 } 5329 ci_enable_sclk_control(adev, true); 5330 ret = ci_enable_ulv(adev, true); 5331 if (ret) { 5332 DRM_ERROR("ci_enable_ulv failed\n"); 5333 return ret; 5334 } 5335 ret = ci_enable_ds_master_switch(adev, true); 5336 if (ret) { 5337 DRM_ERROR("ci_enable_ds_master_switch failed\n"); 5338 return ret; 5339 } 5340 ret = ci_start_dpm(adev); 5341 if (ret) { 5342 DRM_ERROR("ci_start_dpm failed\n"); 5343 return ret; 5344 } 5345 ret = ci_enable_didt(adev, true); 5346 if (ret) { 5347 DRM_ERROR("ci_enable_didt failed\n"); 5348 return ret; 5349 } 5350 ret = ci_enable_smc_cac(adev, true); 5351 if (ret) { 5352 DRM_ERROR("ci_enable_smc_cac failed\n"); 5353 return ret; 5354 } 5355 ret = ci_enable_power_containment(adev, true); 5356 if (ret) { 5357 DRM_ERROR("ci_enable_power_containment failed\n"); 5358 return ret; 5359 } 5360 5361 ret = ci_power_control_set_level(adev); 5362 if (ret) { 5363 DRM_ERROR("ci_power_control_set_level failed\n"); 5364 return ret; 5365 } 5366 5367 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5368 5369 ret = ci_enable_thermal_based_sclk_dpm(adev, true); 5370 if (ret) { 5371 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); 5372 return ret; 5373 } 5374 5375 ci_thermal_start_thermal_controller(adev); 5376 5377 ci_update_current_ps(adev, boot_ps); 5378 5379 return 0; 5380} 5381 5382static void ci_dpm_disable(struct amdgpu_device *adev) 5383{ 5384 struct ci_power_info *pi = ci_get_pi(adev); 5385 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; 5386 5387 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 5388 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 5389 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 5390 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); 5391 5392 ci_dpm_powergate_uvd(adev, false); 5393 5394 if (!amdgpu_ci_is_smc_running(adev)) 5395 return; 5396 5397 ci_thermal_stop_thermal_controller(adev); 5398 5399 if (pi->thermal_protection) 5400 ci_enable_thermal_protection(adev, false); 5401 ci_enable_power_containment(adev, false); 5402 ci_enable_smc_cac(adev, false); 5403 ci_enable_didt(adev, false); 5404 ci_enable_spread_spectrum(adev, false); 5405 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 5406 ci_stop_dpm(adev); 5407 ci_enable_ds_master_switch(adev, false); 5408 ci_enable_ulv(adev, false); 5409 ci_clear_vc(adev); 5410 ci_reset_to_default(adev); 5411 ci_dpm_stop_smc(adev); 5412 ci_force_switch_to_arb_f0(adev); 5413 ci_enable_thermal_based_sclk_dpm(adev, false); 5414 5415 ci_update_current_ps(adev, boot_ps); 5416} 5417 5418static int ci_dpm_set_power_state(struct amdgpu_device *adev) 5419{ 5420 struct ci_power_info *pi = ci_get_pi(adev); 5421 struct amdgpu_ps *new_ps = &pi->requested_rps; 5422 struct amdgpu_ps *old_ps = &pi->current_rps; 5423 int ret; 5424 5425 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps); 5426 if (pi->pcie_performance_request) 5427 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps); 5428 ret = ci_freeze_sclk_mclk_dpm(adev); 5429 if (ret) { 5430 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); 5431 return ret; 5432 } 5433 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps); 5434 if (ret) { 5435 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); 5436 return ret; 5437 } 5438 ret = ci_generate_dpm_level_enable_mask(adev, new_ps); 5439 if (ret) { 5440 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); 5441 return ret; 5442 } 5443 5444 ret = ci_update_vce_dpm(adev, new_ps, old_ps); 5445 if (ret) { 5446 DRM_ERROR("ci_update_vce_dpm failed\n"); 5447 return ret; 5448 } 5449 5450 ret = ci_update_sclk_t(adev); 5451 if (ret) { 5452 DRM_ERROR("ci_update_sclk_t failed\n"); 5453 return ret; 5454 } 5455 if (pi->caps_dynamic_ac_timing) { 5456 ret = ci_update_and_upload_mc_reg_table(adev); 5457 if (ret) { 5458 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); 5459 return ret; 5460 } 5461 } 5462 ret = ci_program_memory_timing_parameters(adev); 5463 if (ret) { 5464 DRM_ERROR("ci_program_memory_timing_parameters failed\n"); 5465 return ret; 5466 } 5467 ret = ci_unfreeze_sclk_mclk_dpm(adev); 5468 if (ret) { 5469 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); 5470 return ret; 5471 } 5472 ret = ci_upload_dpm_level_enable_mask(adev); 5473 if (ret) { 5474 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); 5475 return ret; 5476 } 5477 if (pi->pcie_performance_request) 5478 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); 5479 5480 return 0; 5481} 5482 5483#if 0 5484static void ci_dpm_reset_asic(struct amdgpu_device *adev) 5485{ 5486 ci_set_boot_state(adev); 5487} 5488#endif 5489 5490static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev) 5491{ 5492 ci_program_display_gap(adev); 5493} 5494 5495union power_info { 5496 struct _ATOM_POWERPLAY_INFO info; 5497 struct _ATOM_POWERPLAY_INFO_V2 info_2; 5498 struct _ATOM_POWERPLAY_INFO_V3 info_3; 5499 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 5500 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 5501 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 5502}; 5503 5504union pplib_clock_info { 5505 struct _ATOM_PPLIB_R600_CLOCK_INFO r600; 5506 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; 5507 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 5508 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 5509 struct _ATOM_PPLIB_SI_CLOCK_INFO si; 5510 struct _ATOM_PPLIB_CI_CLOCK_INFO ci; 5511}; 5512 5513union pplib_power_state { 5514 struct _ATOM_PPLIB_STATE v1; 5515 struct _ATOM_PPLIB_STATE_V2 v2; 5516}; 5517 5518static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev, 5519 struct amdgpu_ps *rps, 5520 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 5521 u8 table_rev) 5522{ 5523 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 5524 rps->class = le16_to_cpu(non_clock_info->usClassification); 5525 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 5526 5527 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 5528 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 5529 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 5530 } else { 5531 rps->vclk = 0; 5532 rps->dclk = 0; 5533 } 5534 5535 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) 5536 adev->pm.dpm.boot_ps = rps; 5537 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 5538 adev->pm.dpm.uvd_ps = rps; 5539} 5540 5541static void ci_parse_pplib_clock_info(struct amdgpu_device *adev, 5542 struct amdgpu_ps *rps, int index, 5543 union pplib_clock_info *clock_info) 5544{ 5545 struct ci_power_info *pi = ci_get_pi(adev); 5546 struct ci_ps *ps = ci_get_ps(rps); 5547 struct ci_pl *pl = &ps->performance_levels[index]; 5548 5549 ps->performance_level_count = index + 1; 5550 5551 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5552 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; 5553 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5554 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5555 5556 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, 5557 pi->sys_pcie_mask, 5558 pi->vbios_boot_state.pcie_gen_bootup_value, 5559 clock_info->ci.ucPCIEGen); 5560 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev, 5561 pi->vbios_boot_state.pcie_lane_bootup_value, 5562 le16_to_cpu(clock_info->ci.usPCIELane)); 5563 5564 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { 5565 pi->acpi_pcie_gen = pl->pcie_gen; 5566 } 5567 5568 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { 5569 pi->ulv.supported = true; 5570 pi->ulv.pl = *pl; 5571 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; 5572 } 5573 5574 /* patch up boot state */ 5575 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 5576 pl->mclk = pi->vbios_boot_state.mclk_bootup_value; 5577 pl->sclk = pi->vbios_boot_state.sclk_bootup_value; 5578 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; 5579 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; 5580 } 5581 5582 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 5583 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 5584 pi->use_pcie_powersaving_levels = true; 5585 if (pi->pcie_gen_powersaving.max < pl->pcie_gen) 5586 pi->pcie_gen_powersaving.max = pl->pcie_gen; 5587 if (pi->pcie_gen_powersaving.min > pl->pcie_gen) 5588 pi->pcie_gen_powersaving.min = pl->pcie_gen; 5589 if (pi->pcie_lane_powersaving.max < pl->pcie_lane) 5590 pi->pcie_lane_powersaving.max = pl->pcie_lane; 5591 if (pi->pcie_lane_powersaving.min > pl->pcie_lane) 5592 pi->pcie_lane_powersaving.min = pl->pcie_lane; 5593 break; 5594 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 5595 pi->use_pcie_performance_levels = true; 5596 if (pi->pcie_gen_performance.max < pl->pcie_gen) 5597 pi->pcie_gen_performance.max = pl->pcie_gen; 5598 if (pi->pcie_gen_performance.min > pl->pcie_gen) 5599 pi->pcie_gen_performance.min = pl->pcie_gen; 5600 if (pi->pcie_lane_performance.max < pl->pcie_lane) 5601 pi->pcie_lane_performance.max = pl->pcie_lane; 5602 if (pi->pcie_lane_performance.min > pl->pcie_lane) 5603 pi->pcie_lane_performance.min = pl->pcie_lane; 5604 break; 5605 default: 5606 break; 5607 } 5608} 5609 5610static int ci_parse_power_table(struct amdgpu_device *adev) 5611{ 5612 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5613 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 5614 union pplib_power_state *power_state; 5615 int i, j, k, non_clock_array_index, clock_array_index; 5616 union pplib_clock_info *clock_info; 5617 struct _StateArray *state_array; 5618 struct _ClockInfoArray *clock_info_array; 5619 struct _NonClockInfoArray *non_clock_info_array; 5620 union power_info *power_info; 5621 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 5622 u16 data_offset; 5623 u8 frev, crev; 5624 u8 *power_state_offset; 5625 struct ci_ps *ps; 5626 5627 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 5628 &frev, &crev, &data_offset)) 5629 return -EINVAL; 5630 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 5631 5632 amdgpu_add_thermal_controller(adev); 5633 5634 state_array = (struct _StateArray *) 5635 (mode_info->atom_context->bios + data_offset + 5636 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 5637 clock_info_array = (struct _ClockInfoArray *) 5638 (mode_info->atom_context->bios + data_offset + 5639 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 5640 non_clock_info_array = (struct _NonClockInfoArray *) 5641 (mode_info->atom_context->bios + data_offset + 5642 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 5643 5644 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 5645 state_array->ucNumEntries, GFP_KERNEL); 5646 if (!adev->pm.dpm.ps) 5647 return -ENOMEM; 5648 power_state_offset = (u8 *)state_array->states; 5649 for (i = 0; i < state_array->ucNumEntries; i++) { 5650 u8 *idx; 5651 power_state = (union pplib_power_state *)power_state_offset; 5652 non_clock_array_index = power_state->v2.nonClockInfoIndex; 5653 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 5654 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 5655 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); 5656 if (ps == NULL) { 5657 kfree(adev->pm.dpm.ps); 5658 return -ENOMEM; 5659 } 5660 adev->pm.dpm.ps[i].ps_priv = ps; 5661 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 5662 non_clock_info, 5663 non_clock_info_array->ucEntrySize); 5664 k = 0; 5665 idx = (u8 *)&power_state->v2.clockInfoIndex[0]; 5666 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 5667 clock_array_index = idx[j]; 5668 if (clock_array_index >= clock_info_array->ucNumEntries) 5669 continue; 5670 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) 5671 break; 5672 clock_info = (union pplib_clock_info *) 5673 ((u8 *)&clock_info_array->clockInfo[0] + 5674 (clock_array_index * clock_info_array->ucEntrySize)); 5675 ci_parse_pplib_clock_info(adev, 5676 &adev->pm.dpm.ps[i], k, 5677 clock_info); 5678 k++; 5679 } 5680 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 5681 } 5682 adev->pm.dpm.num_ps = state_array->ucNumEntries; 5683 5684 /* fill in the vce power states */ 5685 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { 5686 u32 sclk, mclk; 5687 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; 5688 clock_info = (union pplib_clock_info *) 5689 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; 5690 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); 5691 sclk |= clock_info->ci.ucEngineClockHigh << 16; 5692 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); 5693 mclk |= clock_info->ci.ucMemoryClockHigh << 16; 5694 adev->pm.dpm.vce_states[i].sclk = sclk; 5695 adev->pm.dpm.vce_states[i].mclk = mclk; 5696 } 5697 5698 return 0; 5699} 5700 5701static int ci_get_vbios_boot_values(struct amdgpu_device *adev, 5702 struct ci_vbios_boot_state *boot_state) 5703{ 5704 struct amdgpu_mode_info *mode_info = &adev->mode_info; 5705 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 5706 ATOM_FIRMWARE_INFO_V2_2 *firmware_info; 5707 u8 frev, crev; 5708 u16 data_offset; 5709 5710 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 5711 &frev, &crev, &data_offset)) { 5712 firmware_info = 5713 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + 5714 data_offset); 5715 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); 5716 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); 5717 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); 5718 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev); 5719 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev); 5720 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); 5721 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); 5722 5723 return 0; 5724 } 5725 return -EINVAL; 5726} 5727 5728static void ci_dpm_fini(struct amdgpu_device *adev) 5729{ 5730 int i; 5731 5732 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 5733 kfree(adev->pm.dpm.ps[i].ps_priv); 5734 } 5735 kfree(adev->pm.dpm.ps); 5736 kfree(adev->pm.dpm.priv); 5737 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); 5738 amdgpu_free_extended_power_table(adev); 5739} 5740 5741/** 5742 * ci_dpm_init_microcode - load ucode images from disk 5743 * 5744 * @adev: amdgpu_device pointer 5745 * 5746 * Use the firmware interface to load the ucode images into 5747 * the driver (not loaded into hw). 5748 * Returns 0 on success, error on failure. 5749 */ 5750static int ci_dpm_init_microcode(struct amdgpu_device *adev) 5751{ 5752 const char *chip_name; 5753 char fw_name[30]; 5754 int err; 5755 5756 DRM_DEBUG("\n"); 5757 5758 switch (adev->asic_type) { 5759 case CHIP_BONAIRE: 5760 chip_name = "bonaire"; 5761 break; 5762 case CHIP_HAWAII: 5763 chip_name = "hawaii"; 5764 break; 5765 case CHIP_KAVERI: 5766 case CHIP_KABINI: 5767 default: BUG(); 5768 } 5769 5770 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 5771 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 5772 if (err) 5773 goto out; 5774 err = amdgpu_ucode_validate(adev->pm.fw); 5775 5776out: 5777 if (err) { 5778 printk(KERN_ERR 5779 "cik_smc: Failed to load firmware \"%s\"\n", 5780 fw_name); 5781 release_firmware(adev->pm.fw); 5782 adev->pm.fw = NULL; 5783 } 5784 return err; 5785} 5786 5787static int ci_dpm_init(struct amdgpu_device *adev) 5788{ 5789 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5790 SMU7_Discrete_DpmTable *dpm_table; 5791 struct amdgpu_gpio_rec gpio; 5792 u16 data_offset, size; 5793 u8 frev, crev; 5794 struct ci_power_info *pi; 5795 int ret; 5796 5797 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5798 if (pi == NULL) 5799 return -ENOMEM; 5800 adev->pm.dpm.priv = pi; 5801 5802 pi->sys_pcie_mask = 5803 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> 5804 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; 5805 5806 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 5807 5808 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; 5809 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3; 5810 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1; 5811 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3; 5812 5813 pi->pcie_lane_performance.max = 0; 5814 pi->pcie_lane_performance.min = 16; 5815 pi->pcie_lane_powersaving.max = 0; 5816 pi->pcie_lane_powersaving.min = 16; 5817 5818 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state); 5819 if (ret) { 5820 ci_dpm_fini(adev); 5821 return ret; 5822 } 5823 5824 ret = amdgpu_get_platform_caps(adev); 5825 if (ret) { 5826 ci_dpm_fini(adev); 5827 return ret; 5828 } 5829 5830 ret = amdgpu_parse_extended_power_table(adev); 5831 if (ret) { 5832 ci_dpm_fini(adev); 5833 return ret; 5834 } 5835 5836 ret = ci_parse_power_table(adev); 5837 if (ret) { 5838 ci_dpm_fini(adev); 5839 return ret; 5840 } 5841 5842 pi->dll_default_on = false; 5843 pi->sram_end = SMC_RAM_END; 5844 5845 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; 5846 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; 5847 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; 5848 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; 5849 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; 5850 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; 5851 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; 5852 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; 5853 5854 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; 5855 5856 pi->sclk_dpm_key_disabled = 0; 5857 pi->mclk_dpm_key_disabled = 0; 5858 pi->pcie_dpm_key_disabled = 0; 5859 pi->thermal_sclk_dpm_enabled = 0; 5860 5861 pi->caps_sclk_ds = true; 5862 5863 pi->mclk_strobe_mode_threshold = 40000; 5864 pi->mclk_stutter_mode_threshold = 40000; 5865 pi->mclk_edc_enable_threshold = 40000; 5866 pi->mclk_edc_wr_enable_threshold = 40000; 5867 5868 ci_initialize_powertune_defaults(adev); 5869 5870 pi->caps_fps = false; 5871 5872 pi->caps_sclk_throttle_low_notification = false; 5873 5874 pi->caps_uvd_dpm = true; 5875 pi->caps_vce_dpm = true; 5876 5877 ci_get_leakage_voltages(adev); 5878 ci_patch_dependency_tables_with_leakage(adev); 5879 ci_set_private_data_variables_based_on_pptable(adev); 5880 5881 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = 5882 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); 5883 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { 5884 ci_dpm_fini(adev); 5885 return -ENOMEM; 5886 } 5887 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; 5888 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; 5889 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; 5890 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; 5891 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; 5892 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; 5893 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; 5894 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; 5895 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; 5896 5897 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; 5898 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; 5899 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; 5900 5901 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; 5902 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; 5903 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; 5904 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; 5905 5906 if (adev->asic_type == CHIP_HAWAII) { 5907 pi->thermal_temp_setting.temperature_low = 94500; 5908 pi->thermal_temp_setting.temperature_high = 95000; 5909 pi->thermal_temp_setting.temperature_shutdown = 104000; 5910 } else { 5911 pi->thermal_temp_setting.temperature_low = 99500; 5912 pi->thermal_temp_setting.temperature_high = 100000; 5913 pi->thermal_temp_setting.temperature_shutdown = 104000; 5914 } 5915 5916 pi->uvd_enabled = false; 5917 5918 dpm_table = &pi->smc_state_table; 5919 5920 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID); 5921 if (gpio.valid) { 5922 dpm_table->VRHotGpio = gpio.shift; 5923 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5924 } else { 5925 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; 5926 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; 5927 } 5928 5929 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID); 5930 if (gpio.valid) { 5931 dpm_table->AcDcGpio = gpio.shift; 5932 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5933 } else { 5934 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; 5935 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; 5936 } 5937 5938 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID); 5939 if (gpio.valid) { 5940 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL); 5941 5942 switch (gpio.shift) { 5943 case 0: 5944 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK; 5945 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT; 5946 break; 5947 case 1: 5948 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK; 5949 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT; 5950 break; 5951 case 2: 5952 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK; 5953 break; 5954 case 3: 5955 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK; 5956 break; 5957 case 4: 5958 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK; 5959 break; 5960 default: 5961 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); 5962 break; 5963 } 5964 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp); 5965 } 5966 5967 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5968 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5969 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5970 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) 5971 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5972 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) 5973 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5974 5975 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { 5976 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) 5977 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5978 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) 5979 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5980 else 5981 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; 5982 } 5983 5984 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { 5985 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) 5986 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; 5987 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) 5988 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; 5989 else 5990 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; 5991 } 5992 5993 pi->vddc_phase_shed_control = true; 5994 5995#if defined(CONFIG_ACPI) 5996 pi->pcie_performance_request = 5997 amdgpu_acpi_is_pcie_performance_request_supported(adev); 5998#else 5999 pi->pcie_performance_request = false; 6000#endif 6001 6002 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, 6003 &frev, &crev, &data_offset)) { 6004 pi->caps_sclk_ss_support = true; 6005 pi->caps_mclk_ss_support = true; 6006 pi->dynamic_ss = true; 6007 } else { 6008 pi->caps_sclk_ss_support = false; 6009 pi->caps_mclk_ss_support = false; 6010 pi->dynamic_ss = true; 6011 } 6012 6013 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) 6014 pi->thermal_protection = true; 6015 else 6016 pi->thermal_protection = false; 6017 6018 pi->caps_dynamic_ac_timing = true; 6019 6020 pi->uvd_power_gated = false; 6021 6022 /* make sure dc limits are valid */ 6023 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || 6024 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) 6025 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 6026 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 6027 6028 pi->fan_ctrl_is_in_default_mode = true; 6029 6030 return 0; 6031} 6032 6033static void 6034ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 6035 struct seq_file *m) 6036{ 6037 struct ci_power_info *pi = ci_get_pi(adev); 6038 struct amdgpu_ps *rps = &pi->current_rps; 6039 u32 sclk = ci_get_average_sclk_freq(adev); 6040 u32 mclk = ci_get_average_mclk_freq(adev); 6041 u32 activity_percent = 50; 6042 int ret; 6043 6044 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA), 6045 &activity_percent); 6046 6047 if (ret == 0) { 6048 activity_percent += 0x80; 6049 activity_percent >>= 8; 6050 activity_percent = activity_percent > 100 ? 100 : activity_percent; 6051 } 6052 6053 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); 6054 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); 6055 seq_printf(m, "power level avg sclk: %u mclk: %u\n", 6056 sclk, mclk); 6057 seq_printf(m, "GPU load: %u %%\n", activity_percent); 6058} 6059 6060static void ci_dpm_print_power_state(struct amdgpu_device *adev, 6061 struct amdgpu_ps *rps) 6062{ 6063 struct ci_ps *ps = ci_get_ps(rps); 6064 struct ci_pl *pl; 6065 int i; 6066 6067 amdgpu_dpm_print_class_info(rps->class, rps->class2); 6068 amdgpu_dpm_print_cap_info(rps->caps); 6069 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 6070 for (i = 0; i < ps->performance_level_count; i++) { 6071 pl = &ps->performance_levels[i]; 6072 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", 6073 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); 6074 } 6075 amdgpu_dpm_print_ps_status(adev, rps); 6076} 6077 6078static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) 6079{ 6080 struct ci_power_info *pi = ci_get_pi(adev); 6081 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 6082 6083 if (low) 6084 return requested_state->performance_levels[0].sclk; 6085 else 6086 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; 6087} 6088 6089static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low) 6090{ 6091 struct ci_power_info *pi = ci_get_pi(adev); 6092 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); 6093 6094 if (low) 6095 return requested_state->performance_levels[0].mclk; 6096 else 6097 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; 6098} 6099 6100/* get temperature in millidegrees */ 6101static int ci_dpm_get_temp(struct amdgpu_device *adev) 6102{ 6103 u32 temp; 6104 int actual_temp = 0; 6105 6106 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> 6107 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; 6108 6109 if (temp & 0x200) 6110 actual_temp = 255; 6111 else 6112 actual_temp = temp & 0x1ff; 6113 6114 actual_temp = actual_temp * 1000; 6115 6116 return actual_temp; 6117} 6118 6119static int ci_set_temperature_range(struct amdgpu_device *adev) 6120{ 6121 int ret; 6122 6123 ret = ci_thermal_enable_alert(adev, false); 6124 if (ret) 6125 return ret; 6126 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, 6127 CISLANDS_TEMP_RANGE_MAX); 6128 if (ret) 6129 return ret; 6130 ret = ci_thermal_enable_alert(adev, true); 6131 if (ret) 6132 return ret; 6133 return ret; 6134} 6135 6136static int ci_dpm_early_init(void *handle) 6137{ 6138 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6139 6140 ci_dpm_set_dpm_funcs(adev); 6141 ci_dpm_set_irq_funcs(adev); 6142 6143 return 0; 6144} 6145 6146static int ci_dpm_late_init(void *handle) 6147{ 6148 int ret; 6149 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6150 6151 if (!amdgpu_dpm) 6152 return 0; 6153 6154 /* init the sysfs and debugfs files late */ 6155 ret = amdgpu_pm_sysfs_init(adev); 6156 if (ret) 6157 return ret; 6158 6159 ret = ci_set_temperature_range(adev); 6160 if (ret) 6161 return ret; 6162 6163 ci_dpm_powergate_uvd(adev, true); 6164 6165 return 0; 6166} 6167 6168static int ci_dpm_sw_init(void *handle) 6169{ 6170 int ret; 6171 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6172 6173 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq); 6174 if (ret) 6175 return ret; 6176 6177 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq); 6178 if (ret) 6179 return ret; 6180 6181 /* default to balanced state */ 6182 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 6183 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 6184 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 6185 adev->pm.default_sclk = adev->clock.default_sclk; 6186 adev->pm.default_mclk = adev->clock.default_mclk; 6187 adev->pm.current_sclk = adev->clock.default_sclk; 6188 adev->pm.current_mclk = adev->clock.default_mclk; 6189 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 6190 6191 if (amdgpu_dpm == 0) 6192 return 0; 6193 6194 ret = ci_dpm_init_microcode(adev); 6195 if (ret) 6196 return ret; 6197 6198 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); 6199 mutex_lock(&adev->pm.mutex); 6200 ret = ci_dpm_init(adev); 6201 if (ret) 6202 goto dpm_failed; 6203 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 6204 if (amdgpu_dpm == 1) 6205 amdgpu_pm_print_power_states(adev); 6206 mutex_unlock(&adev->pm.mutex); 6207 DRM_INFO("amdgpu: dpm initialized\n"); 6208 6209 return 0; 6210 6211dpm_failed: 6212 ci_dpm_fini(adev); 6213 mutex_unlock(&adev->pm.mutex); 6214 DRM_ERROR("amdgpu: dpm initialization failed\n"); 6215 return ret; 6216} 6217 6218static int ci_dpm_sw_fini(void *handle) 6219{ 6220 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6221 6222 mutex_lock(&adev->pm.mutex); 6223 amdgpu_pm_sysfs_fini(adev); 6224 ci_dpm_fini(adev); 6225 mutex_unlock(&adev->pm.mutex); 6226 6227 return 0; 6228} 6229 6230static int ci_dpm_hw_init(void *handle) 6231{ 6232 int ret; 6233 6234 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6235 6236 if (!amdgpu_dpm) 6237 return 0; 6238 6239 mutex_lock(&adev->pm.mutex); 6240 ci_dpm_setup_asic(adev); 6241 ret = ci_dpm_enable(adev); 6242 if (ret) 6243 adev->pm.dpm_enabled = false; 6244 else 6245 adev->pm.dpm_enabled = true; 6246 mutex_unlock(&adev->pm.mutex); 6247 6248 return ret; 6249} 6250 6251static int ci_dpm_hw_fini(void *handle) 6252{ 6253 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6254 6255 if (adev->pm.dpm_enabled) { 6256 mutex_lock(&adev->pm.mutex); 6257 ci_dpm_disable(adev); 6258 mutex_unlock(&adev->pm.mutex); 6259 } 6260 6261 return 0; 6262} 6263 6264static int ci_dpm_suspend(void *handle) 6265{ 6266 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6267 6268 if (adev->pm.dpm_enabled) { 6269 mutex_lock(&adev->pm.mutex); 6270 /* disable dpm */ 6271 ci_dpm_disable(adev); 6272 /* reset the power state */ 6273 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 6274 mutex_unlock(&adev->pm.mutex); 6275 } 6276 return 0; 6277} 6278 6279static int ci_dpm_resume(void *handle) 6280{ 6281 int ret; 6282 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6283 6284 if (adev->pm.dpm_enabled) { 6285 /* asic init will reset to the boot state */ 6286 mutex_lock(&adev->pm.mutex); 6287 ci_dpm_setup_asic(adev); 6288 ret = ci_dpm_enable(adev); 6289 if (ret) 6290 adev->pm.dpm_enabled = false; 6291 else 6292 adev->pm.dpm_enabled = true; 6293 mutex_unlock(&adev->pm.mutex); 6294 if (adev->pm.dpm_enabled) 6295 amdgpu_pm_compute_clocks(adev); 6296 } 6297 return 0; 6298} 6299 6300static bool ci_dpm_is_idle(void *handle) 6301{ 6302 /* XXX */ 6303 return true; 6304} 6305 6306static int ci_dpm_wait_for_idle(void *handle) 6307{ 6308 /* XXX */ 6309 return 0; 6310} 6311 6312static void ci_dpm_print_status(void *handle) 6313{ 6314 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6315 6316 dev_info(adev->dev, "CIK DPM registers\n"); 6317 dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n", 6318 RREG32(mmBIOS_SCRATCH_4)); 6319 dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n", 6320 RREG32(mmMC_ARB_DRAM_TIMING)); 6321 dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n", 6322 RREG32(mmMC_ARB_DRAM_TIMING2)); 6323 dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n", 6324 RREG32(mmMC_ARB_BURST_TIME)); 6325 dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n", 6326 RREG32(mmMC_ARB_DRAM_TIMING_1)); 6327 dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n", 6328 RREG32(mmMC_ARB_DRAM_TIMING2_1)); 6329 dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n", 6330 RREG32(mmMC_CG_CONFIG)); 6331 dev_info(adev->dev, " MC_ARB_CG=0x%08X\n", 6332 RREG32(mmMC_ARB_CG)); 6333 dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n", 6334 RREG32_DIDT(ixDIDT_SQ_CTRL0)); 6335 dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n", 6336 RREG32_DIDT(ixDIDT_DB_CTRL0)); 6337 dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n", 6338 RREG32_DIDT(ixDIDT_TD_CTRL0)); 6339 dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n", 6340 RREG32_DIDT(ixDIDT_TCP_CTRL0)); 6341 dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n", 6342 RREG32_SMC(ixCG_THERMAL_INT)); 6343 dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n", 6344 RREG32_SMC(ixCG_THERMAL_CTRL)); 6345 dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n", 6346 RREG32_SMC(ixGENERAL_PWRMGT)); 6347 dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n", 6348 RREG32(mmMC_SEQ_CNTL_3)); 6349 dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n", 6350 RREG32_SMC(ixLCAC_MC0_CNTL)); 6351 dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n", 6352 RREG32_SMC(ixLCAC_MC1_CNTL)); 6353 dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n", 6354 RREG32_SMC(ixLCAC_CPL_CNTL)); 6355 dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n", 6356 RREG32_SMC(ixSCLK_PWRMGT_CNTL)); 6357 dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n", 6358 RREG32(mmBIF_LNCNT_RESET)); 6359 dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n", 6360 RREG32_SMC(ixFIRMWARE_FLAGS)); 6361 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n", 6362 RREG32_SMC(ixCG_SPLL_FUNC_CNTL)); 6363 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n", 6364 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2)); 6365 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n", 6366 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3)); 6367 dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n", 6368 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4)); 6369 dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n", 6370 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM)); 6371 dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n", 6372 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2)); 6373 dev_info(adev->dev, " DLL_CNTL=0x%08X\n", 6374 RREG32(mmDLL_CNTL)); 6375 dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n", 6376 RREG32(mmMCLK_PWRMGT_CNTL)); 6377 dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n", 6378 RREG32(mmMPLL_AD_FUNC_CNTL)); 6379 dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n", 6380 RREG32(mmMPLL_DQ_FUNC_CNTL)); 6381 dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n", 6382 RREG32(mmMPLL_FUNC_CNTL)); 6383 dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n", 6384 RREG32(mmMPLL_FUNC_CNTL_1)); 6385 dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n", 6386 RREG32(mmMPLL_FUNC_CNTL_2)); 6387 dev_info(adev->dev, " MPLL_SS1=0x%08X\n", 6388 RREG32(mmMPLL_SS1)); 6389 dev_info(adev->dev, " MPLL_SS2=0x%08X\n", 6390 RREG32(mmMPLL_SS2)); 6391 dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n", 6392 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL)); 6393 dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n", 6394 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2)); 6395 dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n", 6396 RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER)); 6397 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n", 6398 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0)); 6399 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n", 6400 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1)); 6401 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n", 6402 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2)); 6403 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n", 6404 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3)); 6405 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n", 6406 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4)); 6407 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n", 6408 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5)); 6409 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n", 6410 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6)); 6411 dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n", 6412 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7)); 6413 dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n", 6414 RREG32_SMC(ixRCU_UC_EVENTS)); 6415 dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n", 6416 RREG32_SMC(ixDPM_TABLE_475)); 6417 dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n", 6418 RREG32(mmMC_SEQ_RAS_TIMING_LP)); 6419 dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n", 6420 RREG32(mmMC_SEQ_RAS_TIMING)); 6421 dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n", 6422 RREG32(mmMC_SEQ_CAS_TIMING_LP)); 6423 dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n", 6424 RREG32(mmMC_SEQ_CAS_TIMING)); 6425 dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n", 6426 RREG32(mmMC_SEQ_DLL_STBY_LP)); 6427 dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n", 6428 RREG32(mmMC_SEQ_DLL_STBY)); 6429 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n", 6430 RREG32(mmMC_SEQ_G5PDX_CMD0_LP)); 6431 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n", 6432 RREG32(mmMC_SEQ_G5PDX_CMD0)); 6433 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n", 6434 RREG32(mmMC_SEQ_G5PDX_CMD1_LP)); 6435 dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n", 6436 RREG32(mmMC_SEQ_G5PDX_CMD1)); 6437 dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n", 6438 RREG32(mmMC_SEQ_G5PDX_CTRL_LP)); 6439 dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n", 6440 RREG32(mmMC_SEQ_G5PDX_CTRL)); 6441 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n", 6442 RREG32(mmMC_SEQ_PMG_DVS_CMD_LP)); 6443 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n", 6444 RREG32(mmMC_SEQ_PMG_DVS_CMD)); 6445 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n", 6446 RREG32(mmMC_SEQ_PMG_DVS_CTL_LP)); 6447 dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n", 6448 RREG32(mmMC_SEQ_PMG_DVS_CTL)); 6449 dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n", 6450 RREG32(mmMC_SEQ_MISC_TIMING_LP)); 6451 dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n", 6452 RREG32(mmMC_SEQ_MISC_TIMING)); 6453 dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n", 6454 RREG32(mmMC_SEQ_MISC_TIMING2_LP)); 6455 dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n", 6456 RREG32(mmMC_SEQ_MISC_TIMING2)); 6457 dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n", 6458 RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP)); 6459 dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n", 6460 RREG32(mmMC_PMG_CMD_EMRS)); 6461 dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n", 6462 RREG32(mmMC_SEQ_PMG_CMD_MRS_LP)); 6463 dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n", 6464 RREG32(mmMC_PMG_CMD_MRS)); 6465 dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n", 6466 RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP)); 6467 dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n", 6468 RREG32(mmMC_PMG_CMD_MRS1)); 6469 dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n", 6470 RREG32(mmMC_SEQ_WR_CTL_D0_LP)); 6471 dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n", 6472 RREG32(mmMC_SEQ_WR_CTL_D0)); 6473 dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n", 6474 RREG32(mmMC_SEQ_WR_CTL_D1_LP)); 6475 dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n", 6476 RREG32(mmMC_SEQ_WR_CTL_D1)); 6477 dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n", 6478 RREG32(mmMC_SEQ_RD_CTL_D0_LP)); 6479 dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n", 6480 RREG32(mmMC_SEQ_RD_CTL_D0)); 6481 dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n", 6482 RREG32(mmMC_SEQ_RD_CTL_D1_LP)); 6483 dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n", 6484 RREG32(mmMC_SEQ_RD_CTL_D1)); 6485 dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n", 6486 RREG32(mmMC_SEQ_PMG_TIMING_LP)); 6487 dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n", 6488 RREG32(mmMC_SEQ_PMG_TIMING)); 6489 dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n", 6490 RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP)); 6491 dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n", 6492 RREG32(mmMC_PMG_CMD_MRS2)); 6493 dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n", 6494 RREG32(mmMC_SEQ_WR_CTL_2_LP)); 6495 dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n", 6496 RREG32(mmMC_SEQ_WR_CTL_2)); 6497 dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n", 6498 RREG32_PCIE(ixPCIE_LC_SPEED_CNTL)); 6499 dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n", 6500 RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL)); 6501 dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n", 6502 RREG32(mmSMC_IND_INDEX_0)); 6503 dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n", 6504 RREG32(mmSMC_IND_DATA_0)); 6505 dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n", 6506 RREG32(mmSMC_IND_ACCESS_CNTL)); 6507 dev_info(adev->dev, " SMC_RESP_0=0x%08X\n", 6508 RREG32(mmSMC_RESP_0)); 6509 dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n", 6510 RREG32(mmSMC_MESSAGE_0)); 6511 dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n", 6512 RREG32_SMC(ixSMC_SYSCON_RESET_CNTL)); 6513 dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n", 6514 RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0)); 6515 dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n", 6516 RREG32_SMC(ixSMC_SYSCON_MISC_CNTL)); 6517 dev_info(adev->dev, " SMC_PC_C=0x%08X\n", 6518 RREG32_SMC(ixSMC_PC_C)); 6519} 6520 6521static int ci_dpm_soft_reset(void *handle) 6522{ 6523 return 0; 6524} 6525 6526static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, 6527 struct amdgpu_irq_src *source, 6528 unsigned type, 6529 enum amdgpu_interrupt_state state) 6530{ 6531 u32 cg_thermal_int; 6532 6533 switch (type) { 6534 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: 6535 switch (state) { 6536 case AMDGPU_IRQ_STATE_DISABLE: 6537 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6538 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6539 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6540 break; 6541 case AMDGPU_IRQ_STATE_ENABLE: 6542 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6543 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6544 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6545 break; 6546 default: 6547 break; 6548 } 6549 break; 6550 6551 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: 6552 switch (state) { 6553 case AMDGPU_IRQ_STATE_DISABLE: 6554 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6555 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6556 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6557 break; 6558 case AMDGPU_IRQ_STATE_ENABLE: 6559 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6560 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6561 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6562 break; 6563 default: 6564 break; 6565 } 6566 break; 6567 6568 default: 6569 break; 6570 } 6571 return 0; 6572} 6573 6574static int ci_dpm_process_interrupt(struct amdgpu_device *adev, 6575 struct amdgpu_irq_src *source, 6576 struct amdgpu_iv_entry *entry) 6577{ 6578 bool queue_thermal = false; 6579 6580 if (entry == NULL) 6581 return -EINVAL; 6582 6583 switch (entry->src_id) { 6584 case 230: /* thermal low to high */ 6585 DRM_DEBUG("IH: thermal low to high\n"); 6586 adev->pm.dpm.thermal.high_to_low = false; 6587 queue_thermal = true; 6588 break; 6589 case 231: /* thermal high to low */ 6590 DRM_DEBUG("IH: thermal high to low\n"); 6591 adev->pm.dpm.thermal.high_to_low = true; 6592 queue_thermal = true; 6593 break; 6594 default: 6595 break; 6596 } 6597 6598 if (queue_thermal) 6599 schedule_work(&adev->pm.dpm.thermal.work); 6600 6601 return 0; 6602} 6603 6604static int ci_dpm_set_clockgating_state(void *handle, 6605 enum amd_clockgating_state state) 6606{ 6607 return 0; 6608} 6609 6610static int ci_dpm_set_powergating_state(void *handle, 6611 enum amd_powergating_state state) 6612{ 6613 return 0; 6614} 6615 6616const struct amd_ip_funcs ci_dpm_ip_funcs = { 6617 .early_init = ci_dpm_early_init, 6618 .late_init = ci_dpm_late_init, 6619 .sw_init = ci_dpm_sw_init, 6620 .sw_fini = ci_dpm_sw_fini, 6621 .hw_init = ci_dpm_hw_init, 6622 .hw_fini = ci_dpm_hw_fini, 6623 .suspend = ci_dpm_suspend, 6624 .resume = ci_dpm_resume, 6625 .is_idle = ci_dpm_is_idle, 6626 .wait_for_idle = ci_dpm_wait_for_idle, 6627 .soft_reset = ci_dpm_soft_reset, 6628 .print_status = ci_dpm_print_status, 6629 .set_clockgating_state = ci_dpm_set_clockgating_state, 6630 .set_powergating_state = ci_dpm_set_powergating_state, 6631}; 6632 6633static const struct amdgpu_dpm_funcs ci_dpm_funcs = { 6634 .get_temperature = &ci_dpm_get_temp, 6635 .pre_set_power_state = &ci_dpm_pre_set_power_state, 6636 .set_power_state = &ci_dpm_set_power_state, 6637 .post_set_power_state = &ci_dpm_post_set_power_state, 6638 .display_configuration_changed = &ci_dpm_display_configuration_changed, 6639 .get_sclk = &ci_dpm_get_sclk, 6640 .get_mclk = &ci_dpm_get_mclk, 6641 .print_power_state = &ci_dpm_print_power_state, 6642 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, 6643 .force_performance_level = &ci_dpm_force_performance_level, 6644 .vblank_too_short = &ci_dpm_vblank_too_short, 6645 .powergate_uvd = &ci_dpm_powergate_uvd, 6646 .set_fan_control_mode = &ci_dpm_set_fan_control_mode, 6647 .get_fan_control_mode = &ci_dpm_get_fan_control_mode, 6648 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, 6649 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, 6650}; 6651 6652static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) 6653{ 6654 if (adev->pm.funcs == NULL) 6655 adev->pm.funcs = &ci_dpm_funcs; 6656} 6657 6658static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = { 6659 .set = ci_dpm_set_interrupt_state, 6660 .process = ci_dpm_process_interrupt, 6661}; 6662 6663static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) 6664{ 6665 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; 6666 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; 6667}