Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.8-rc7 2266 lines 59 kB view raw
1/* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include <linux/firmware.h> 25#include <linux/seq_file.h> 26#include "drmP.h" 27#include "amdgpu.h" 28#include "amdgpu_pm.h" 29#include "amdgpu_atombios.h" 30#include "vid.h" 31#include "vi_dpm.h" 32#include "amdgpu_dpm.h" 33#include "cz_dpm.h" 34#include "cz_ppsmc.h" 35#include "atom.h" 36 37#include "smu/smu_8_0_d.h" 38#include "smu/smu_8_0_sh_mask.h" 39#include "gca/gfx_8_0_d.h" 40#include "gca/gfx_8_0_sh_mask.h" 41#include "gmc/gmc_8_1_d.h" 42#include "bif/bif_5_1_d.h" 43#include "gfx_v8_0.h" 44 45static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate); 46static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate); 47 48static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) 49{ 50 struct cz_ps *ps = rps->ps_priv; 51 52 return ps; 53} 54 55static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev) 56{ 57 struct cz_power_info *pi = adev->pm.dpm.priv; 58 59 return pi; 60} 61 62static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev, 63 uint16_t voltage) 64{ 65 uint16_t tmp = 6200 - voltage * 25; 66 67 return tmp; 68} 69 70static void cz_construct_max_power_limits_table(struct amdgpu_device *adev, 71 struct amdgpu_clock_and_voltage_limits *table) 72{ 73 struct cz_power_info *pi = cz_get_pi(adev); 74 struct amdgpu_clock_voltage_dependency_table *dep_table = 75 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 76 77 if (dep_table->count > 0) { 78 table->sclk = dep_table->entries[dep_table->count - 1].clk; 79 table->vddc = cz_convert_8bit_index_to_voltage(adev, 80 dep_table->entries[dep_table->count - 1].v); 81 } 82 83 table->mclk = pi->sys_info.nbp_memory_clock[0]; 84 85} 86 87union igp_info { 88 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 89 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; 90 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; 91 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; 92}; 93 94static int cz_parse_sys_info_table(struct amdgpu_device *adev) 95{ 96 struct cz_power_info *pi = cz_get_pi(adev); 97 struct amdgpu_mode_info *mode_info = &adev->mode_info; 98 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 99 union igp_info *igp_info; 100 u8 frev, crev; 101 u16 data_offset; 102 int i = 0; 103 104 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 105 &frev, &crev, &data_offset)) { 106 igp_info = (union igp_info *)(mode_info->atom_context->bios + 107 data_offset); 108 109 if (crev != 9) { 110 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); 111 return -EINVAL; 112 } 113 pi->sys_info.bootup_sclk = 114 le32_to_cpu(igp_info->info_9.ulBootUpEngineClock); 115 pi->sys_info.bootup_uma_clk = 116 le32_to_cpu(igp_info->info_9.ulBootUpUMAClock); 117 pi->sys_info.dentist_vco_freq = 118 le32_to_cpu(igp_info->info_9.ulDentistVCOFreq); 119 pi->sys_info.bootup_nb_voltage_index = 120 le16_to_cpu(igp_info->info_9.usBootUpNBVoltage); 121 122 if (igp_info->info_9.ucHtcTmpLmt == 0) 123 pi->sys_info.htc_tmp_lmt = 203; 124 else 125 pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt; 126 127 if (igp_info->info_9.ucHtcHystLmt == 0) 128 pi->sys_info.htc_hyst_lmt = 5; 129 else 130 pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt; 131 132 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { 133 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); 134 return -EINVAL; 135 } 136 137 if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) && 138 pi->enable_nb_ps_policy) 139 pi->sys_info.nb_dpm_enable = true; 140 else 141 pi->sys_info.nb_dpm_enable = false; 142 143 for (i = 0; i < CZ_NUM_NBPSTATES; i++) { 144 if (i < CZ_NUM_NBPMEMORY_CLOCK) 145 pi->sys_info.nbp_memory_clock[i] = 146 le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]); 147 pi->sys_info.nbp_n_clock[i] = 148 le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]); 149 } 150 151 for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++) 152 pi->sys_info.display_clock[i] = 153 le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK); 154 155 for (i = 0; i < CZ_NUM_NBPSTATES; i++) 156 pi->sys_info.nbp_voltage_index[i] = 157 le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]); 158 159 if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) & 160 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) 161 pi->caps_enable_dfs_bypass = true; 162 163 pi->sys_info.uma_channel_number = 164 igp_info->info_9.ucUMAChannelNumber; 165 166 cz_construct_max_power_limits_table(adev, 167 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); 168 } 169 170 return 0; 171} 172 173static void cz_patch_voltage_values(struct amdgpu_device *adev) 174{ 175 int i; 176 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 177 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 178 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 179 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 180 struct amdgpu_clock_voltage_dependency_table *acp_table = 181 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 182 183 if (uvd_table->count) { 184 for (i = 0; i < uvd_table->count; i++) 185 uvd_table->entries[i].v = 186 cz_convert_8bit_index_to_voltage(adev, 187 uvd_table->entries[i].v); 188 } 189 190 if (vce_table->count) { 191 for (i = 0; i < vce_table->count; i++) 192 vce_table->entries[i].v = 193 cz_convert_8bit_index_to_voltage(adev, 194 vce_table->entries[i].v); 195 } 196 197 if (acp_table->count) { 198 for (i = 0; i < acp_table->count; i++) 199 acp_table->entries[i].v = 200 cz_convert_8bit_index_to_voltage(adev, 201 acp_table->entries[i].v); 202 } 203 204} 205 206static void cz_construct_boot_state(struct amdgpu_device *adev) 207{ 208 struct cz_power_info *pi = cz_get_pi(adev); 209 210 pi->boot_pl.sclk = pi->sys_info.bootup_sclk; 211 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; 212 pi->boot_pl.ds_divider_index = 0; 213 pi->boot_pl.ss_divider_index = 0; 214 pi->boot_pl.allow_gnb_slow = 1; 215 pi->boot_pl.force_nbp_state = 0; 216 pi->boot_pl.display_wm = 0; 217 pi->boot_pl.vce_wm = 0; 218 219} 220 221static void cz_patch_boot_state(struct amdgpu_device *adev, 222 struct cz_ps *ps) 223{ 224 struct cz_power_info *pi = cz_get_pi(adev); 225 226 ps->num_levels = 1; 227 ps->levels[0] = pi->boot_pl; 228} 229 230union pplib_clock_info { 231 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; 232 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; 233 struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo; 234}; 235 236static void cz_parse_pplib_clock_info(struct amdgpu_device *adev, 237 struct amdgpu_ps *rps, int index, 238 union pplib_clock_info *clock_info) 239{ 240 struct cz_power_info *pi = cz_get_pi(adev); 241 struct cz_ps *ps = cz_get_ps(rps); 242 struct cz_pl *pl = &ps->levels[index]; 243 struct amdgpu_clock_voltage_dependency_table *table = 244 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 245 246 pl->sclk = table->entries[clock_info->carrizo.index].clk; 247 pl->vddc_index = table->entries[clock_info->carrizo.index].v; 248 249 ps->num_levels = index + 1; 250 251 if (pi->caps_sclk_ds) { 252 pl->ds_divider_index = 5; 253 pl->ss_divider_index = 5; 254 } 255 256} 257 258static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev, 259 struct amdgpu_ps *rps, 260 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, 261 u8 table_rev) 262{ 263 struct cz_ps *ps = cz_get_ps(rps); 264 265 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); 266 rps->class = le16_to_cpu(non_clock_info->usClassification); 267 rps->class2 = le16_to_cpu(non_clock_info->usClassification2); 268 269 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { 270 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); 271 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); 272 } else { 273 rps->vclk = 0; 274 rps->dclk = 0; 275 } 276 277 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { 278 adev->pm.dpm.boot_ps = rps; 279 cz_patch_boot_state(adev, ps); 280 } 281 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 282 adev->pm.dpm.uvd_ps = rps; 283 284} 285 286union power_info { 287 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 288 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 289 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 290 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 291 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 292}; 293 294union pplib_power_state { 295 struct _ATOM_PPLIB_STATE v1; 296 struct _ATOM_PPLIB_STATE_V2 v2; 297}; 298 299static int cz_parse_power_table(struct amdgpu_device *adev) 300{ 301 struct amdgpu_mode_info *mode_info = &adev->mode_info; 302 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; 303 union pplib_power_state *power_state; 304 int i, j, k, non_clock_array_index, clock_array_index; 305 union pplib_clock_info *clock_info; 306 struct _StateArray *state_array; 307 struct _ClockInfoArray *clock_info_array; 308 struct _NonClockInfoArray *non_clock_info_array; 309 union power_info *power_info; 310 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 311 u16 data_offset; 312 u8 frev, crev; 313 u8 *power_state_offset; 314 struct cz_ps *ps; 315 316 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 317 &frev, &crev, &data_offset)) 318 return -EINVAL; 319 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 320 321 state_array = (struct _StateArray *) 322 (mode_info->atom_context->bios + data_offset + 323 le16_to_cpu(power_info->pplib.usStateArrayOffset)); 324 clock_info_array = (struct _ClockInfoArray *) 325 (mode_info->atom_context->bios + data_offset + 326 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); 327 non_clock_info_array = (struct _NonClockInfoArray *) 328 (mode_info->atom_context->bios + data_offset + 329 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); 330 331 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * 332 state_array->ucNumEntries, GFP_KERNEL); 333 334 if (!adev->pm.dpm.ps) 335 return -ENOMEM; 336 337 power_state_offset = (u8 *)state_array->states; 338 adev->pm.dpm.platform_caps = 339 le32_to_cpu(power_info->pplib.ulPlatformCaps); 340 adev->pm.dpm.backbias_response_time = 341 le16_to_cpu(power_info->pplib.usBackbiasTime); 342 adev->pm.dpm.voltage_response_time = 343 le16_to_cpu(power_info->pplib.usVoltageTime); 344 345 for (i = 0; i < state_array->ucNumEntries; i++) { 346 power_state = (union pplib_power_state *)power_state_offset; 347 non_clock_array_index = power_state->v2.nonClockInfoIndex; 348 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 349 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 350 351 ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL); 352 if (ps == NULL) { 353 kfree(adev->pm.dpm.ps); 354 return -ENOMEM; 355 } 356 357 adev->pm.dpm.ps[i].ps_priv = ps; 358 k = 0; 359 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 360 clock_array_index = power_state->v2.clockInfoIndex[j]; 361 if (clock_array_index >= clock_info_array->ucNumEntries) 362 continue; 363 if (k >= CZ_MAX_HARDWARE_POWERLEVELS) 364 break; 365 clock_info = (union pplib_clock_info *) 366 &clock_info_array->clockInfo[clock_array_index * 367 clock_info_array->ucEntrySize]; 368 cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i], 369 k, clock_info); 370 k++; 371 } 372 cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], 373 non_clock_info, 374 non_clock_info_array->ucEntrySize); 375 power_state_offset += 2 + power_state->v2.ucNumDPMLevels; 376 } 377 adev->pm.dpm.num_ps = state_array->ucNumEntries; 378 379 return 0; 380} 381 382static int cz_process_firmware_header(struct amdgpu_device *adev) 383{ 384 struct cz_power_info *pi = cz_get_pi(adev); 385 u32 tmp; 386 int ret; 387 388 ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION + 389 offsetof(struct SMU8_Firmware_Header, 390 DpmTable), 391 &tmp, pi->sram_end); 392 393 if (ret == 0) 394 pi->dpm_table_start = tmp; 395 396 return ret; 397} 398 399static int cz_dpm_init(struct amdgpu_device *adev) 400{ 401 struct cz_power_info *pi; 402 int ret, i; 403 404 pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL); 405 if (NULL == pi) 406 return -ENOMEM; 407 408 adev->pm.dpm.priv = pi; 409 410 ret = amdgpu_get_platform_caps(adev); 411 if (ret) 412 return ret; 413 414 ret = amdgpu_parse_extended_power_table(adev); 415 if (ret) 416 return ret; 417 418 pi->sram_end = SMC_RAM_END; 419 420 /* set up DPM defaults */ 421 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) 422 pi->active_target[i] = CZ_AT_DFLT; 423 424 pi->mgcg_cgtt_local0 = 0x0; 425 pi->mgcg_cgtt_local1 = 0x0; 426 pi->clock_slow_down_step = 25000; 427 pi->skip_clock_slow_down = 1; 428 pi->enable_nb_ps_policy = false; 429 pi->caps_power_containment = true; 430 pi->caps_cac = true; 431 pi->didt_enabled = false; 432 if (pi->didt_enabled) { 433 pi->caps_sq_ramping = true; 434 pi->caps_db_ramping = true; 435 pi->caps_td_ramping = true; 436 pi->caps_tcp_ramping = true; 437 } 438 pi->caps_sclk_ds = true; 439 pi->voting_clients = 0x00c00033; 440 pi->auto_thermal_throttling_enabled = true; 441 pi->bapm_enabled = false; 442 pi->disable_nb_ps3_in_battery = false; 443 pi->voltage_drop_threshold = 0; 444 pi->caps_sclk_throttle_low_notification = false; 445 pi->gfx_pg_threshold = 500; 446 pi->caps_fps = true; 447 /* uvd */ 448 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; 449 pi->caps_uvd_dpm = true; 450 /* vce */ 451 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; 452 pi->caps_vce_dpm = true; 453 /* acp */ 454 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; 455 pi->caps_acp_dpm = true; 456 457 pi->caps_stable_power_state = false; 458 pi->nb_dpm_enabled_by_driver = true; 459 pi->nb_dpm_enabled = false; 460 pi->caps_voltage_island = false; 461 /* flags which indicate need to upload pptable */ 462 pi->need_pptable_upload = true; 463 464 ret = cz_parse_sys_info_table(adev); 465 if (ret) 466 return ret; 467 468 cz_patch_voltage_values(adev); 469 cz_construct_boot_state(adev); 470 471 ret = cz_parse_power_table(adev); 472 if (ret) 473 return ret; 474 475 ret = cz_process_firmware_header(adev); 476 if (ret) 477 return ret; 478 479 pi->dpm_enabled = true; 480 pi->uvd_dynamic_pg = false; 481 482 return 0; 483} 484 485static void cz_dpm_fini(struct amdgpu_device *adev) 486{ 487 int i; 488 489 for (i = 0; i < adev->pm.dpm.num_ps; i++) 490 kfree(adev->pm.dpm.ps[i].ps_priv); 491 492 kfree(adev->pm.dpm.ps); 493 kfree(adev->pm.dpm.priv); 494 amdgpu_free_extended_power_table(adev); 495} 496 497#define ixSMUSVI_NB_CURRENTVID 0xD8230044 498#define CURRENT_NB_VID_MASK 0xff000000 499#define CURRENT_NB_VID__SHIFT 24 500#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 501#define CURRENT_GFX_VID_MASK 0xff000000 502#define CURRENT_GFX_VID__SHIFT 24 503 504static void 505cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 506 struct seq_file *m) 507{ 508 struct cz_power_info *pi = cz_get_pi(adev); 509 struct amdgpu_clock_voltage_dependency_table *table = 510 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 511 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 512 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 513 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 514 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 515 u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), 516 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); 517 u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), 518 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); 519 u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), 520 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); 521 u32 sclk, vclk, dclk, ecclk, tmp; 522 u16 vddnb, vddgfx; 523 524 if (sclk_index >= NUM_SCLK_LEVELS) { 525 seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); 526 } else { 527 sclk = table->entries[sclk_index].clk; 528 seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); 529 } 530 531 tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & 532 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; 533 vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 534 tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & 535 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; 536 vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 537 seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); 538 539 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); 540 if (!pi->uvd_power_gated) { 541 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 542 seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); 543 } else { 544 vclk = uvd_table->entries[uvd_index].vclk; 545 dclk = uvd_table->entries[uvd_index].dclk; 546 seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); 547 } 548 } 549 550 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); 551 if (!pi->vce_power_gated) { 552 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { 553 seq_printf(m, "invalid vce dpm level %d\n", vce_index); 554 } else { 555 ecclk = vce_table->entries[vce_index].ecclk; 556 seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); 557 } 558 } 559} 560 561static void cz_dpm_print_power_state(struct amdgpu_device *adev, 562 struct amdgpu_ps *rps) 563{ 564 int i; 565 struct cz_ps *ps = cz_get_ps(rps); 566 567 amdgpu_dpm_print_class_info(rps->class, rps->class2); 568 amdgpu_dpm_print_cap_info(rps->caps); 569 570 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); 571 for (i = 0; i < ps->num_levels; i++) { 572 struct cz_pl *pl = &ps->levels[i]; 573 574 DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n", 575 i, pl->sclk, 576 cz_convert_8bit_index_to_voltage(adev, pl->vddc_index)); 577 } 578 579 amdgpu_dpm_print_ps_status(adev, rps); 580} 581 582static void cz_dpm_set_funcs(struct amdgpu_device *adev); 583 584static int cz_dpm_early_init(void *handle) 585{ 586 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 587 588 cz_dpm_set_funcs(adev); 589 590 return 0; 591} 592 593 594static int cz_dpm_late_init(void *handle) 595{ 596 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 597 598 if (amdgpu_dpm) { 599 int ret; 600 /* init the sysfs and debugfs files late */ 601 ret = amdgpu_pm_sysfs_init(adev); 602 if (ret) 603 return ret; 604 605 /* powerdown unused blocks for now */ 606 cz_dpm_powergate_uvd(adev, true); 607 cz_dpm_powergate_vce(adev, true); 608 } 609 610 return 0; 611} 612 613static int cz_dpm_sw_init(void *handle) 614{ 615 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 616 int ret = 0; 617 /* fix me to add thermal support TODO */ 618 619 /* default to balanced state */ 620 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 621 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 622 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 623 adev->pm.default_sclk = adev->clock.default_sclk; 624 adev->pm.default_mclk = adev->clock.default_mclk; 625 adev->pm.current_sclk = adev->clock.default_sclk; 626 adev->pm.current_mclk = adev->clock.default_mclk; 627 adev->pm.int_thermal_type = THERMAL_TYPE_NONE; 628 629 if (amdgpu_dpm == 0) 630 return 0; 631 632 mutex_lock(&adev->pm.mutex); 633 ret = cz_dpm_init(adev); 634 if (ret) 635 goto dpm_init_failed; 636 637 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 638 if (amdgpu_dpm == 1) 639 amdgpu_pm_print_power_states(adev); 640 641 mutex_unlock(&adev->pm.mutex); 642 DRM_INFO("amdgpu: dpm initialized\n"); 643 644 return 0; 645 646dpm_init_failed: 647 cz_dpm_fini(adev); 648 mutex_unlock(&adev->pm.mutex); 649 DRM_ERROR("amdgpu: dpm initialization failed\n"); 650 651 return ret; 652} 653 654static int cz_dpm_sw_fini(void *handle) 655{ 656 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 657 658 mutex_lock(&adev->pm.mutex); 659 amdgpu_pm_sysfs_fini(adev); 660 cz_dpm_fini(adev); 661 mutex_unlock(&adev->pm.mutex); 662 663 return 0; 664} 665 666static void cz_reset_ap_mask(struct amdgpu_device *adev) 667{ 668 struct cz_power_info *pi = cz_get_pi(adev); 669 670 pi->active_process_mask = 0; 671 672} 673 674static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev, 675 void **table) 676{ 677 int ret = 0; 678 679 ret = cz_smu_download_pptable(adev, table); 680 681 return ret; 682} 683 684static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev) 685{ 686 struct cz_power_info *pi = cz_get_pi(adev); 687 struct SMU8_Fusion_ClkTable *clock_table; 688 struct atom_clock_dividers dividers; 689 void *table = NULL; 690 uint8_t i = 0; 691 int ret = 0; 692 693 struct amdgpu_clock_voltage_dependency_table *vddc_table = 694 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 695 struct amdgpu_clock_voltage_dependency_table *vddgfx_table = 696 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk; 697 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = 698 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 699 struct amdgpu_vce_clock_voltage_dependency_table *vce_table = 700 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 701 struct amdgpu_clock_voltage_dependency_table *acp_table = 702 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 703 704 if (!pi->need_pptable_upload) 705 return 0; 706 707 ret = cz_dpm_download_pptable_from_smu(adev, &table); 708 if (ret) { 709 DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n"); 710 return -EINVAL; 711 } 712 713 clock_table = (struct SMU8_Fusion_ClkTable *)table; 714 /* patch clock table */ 715 if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS || 716 vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS || 717 uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS || 718 vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS || 719 acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) { 720 DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n"); 721 return -EINVAL; 722 } 723 724 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { 725 726 /* vddc sclk */ 727 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = 728 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; 729 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = 730 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; 731 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 732 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, 733 false, &dividers); 734 if (ret) 735 return ret; 736 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = 737 (uint8_t)dividers.post_divider; 738 739 /* vddgfx sclk */ 740 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = 741 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0; 742 743 /* acp breakdown */ 744 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = 745 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; 746 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = 747 (i < acp_table->count) ? acp_table->entries[i].clk : 0; 748 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 749 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, 750 false, &dividers); 751 if (ret) 752 return ret; 753 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = 754 (uint8_t)dividers.post_divider; 755 756 /* uvd breakdown */ 757 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = 758 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; 759 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = 760 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; 761 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 762 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, 763 false, &dividers); 764 if (ret) 765 return ret; 766 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = 767 (uint8_t)dividers.post_divider; 768 769 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = 770 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; 771 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = 772 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; 773 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 774 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, 775 false, &dividers); 776 if (ret) 777 return ret; 778 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = 779 (uint8_t)dividers.post_divider; 780 781 /* vce breakdown */ 782 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = 783 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; 784 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = 785 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; 786 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 787 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, 788 false, &dividers); 789 if (ret) 790 return ret; 791 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = 792 (uint8_t)dividers.post_divider; 793 } 794 795 /* its time to upload to SMU */ 796 ret = cz_smu_upload_pptable(adev); 797 if (ret) { 798 DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n"); 799 return ret; 800 } 801 802 return 0; 803} 804 805static void cz_init_sclk_limit(struct amdgpu_device *adev) 806{ 807 struct cz_power_info *pi = cz_get_pi(adev); 808 struct amdgpu_clock_voltage_dependency_table *table = 809 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 810 uint32_t clock = 0, level; 811 812 if (!table || !table->count) { 813 DRM_ERROR("Invalid Voltage Dependency table.\n"); 814 return; 815 } 816 817 pi->sclk_dpm.soft_min_clk = 0; 818 pi->sclk_dpm.hard_min_clk = 0; 819 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); 820 level = cz_get_argument(adev); 821 if (level < table->count) 822 clock = table->entries[level].clk; 823 else { 824 DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n"); 825 clock = table->entries[table->count - 1].clk; 826 } 827 828 pi->sclk_dpm.soft_max_clk = clock; 829 pi->sclk_dpm.hard_max_clk = clock; 830 831} 832 833static void cz_init_uvd_limit(struct amdgpu_device *adev) 834{ 835 struct cz_power_info *pi = cz_get_pi(adev); 836 struct amdgpu_uvd_clock_voltage_dependency_table *table = 837 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 838 uint32_t clock = 0, level; 839 840 if (!table || !table->count) { 841 DRM_ERROR("Invalid Voltage Dependency table.\n"); 842 return; 843 } 844 845 pi->uvd_dpm.soft_min_clk = 0; 846 pi->uvd_dpm.hard_min_clk = 0; 847 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); 848 level = cz_get_argument(adev); 849 if (level < table->count) 850 clock = table->entries[level].vclk; 851 else { 852 DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n"); 853 clock = table->entries[table->count - 1].vclk; 854 } 855 856 pi->uvd_dpm.soft_max_clk = clock; 857 pi->uvd_dpm.hard_max_clk = clock; 858 859} 860 861static void cz_init_vce_limit(struct amdgpu_device *adev) 862{ 863 struct cz_power_info *pi = cz_get_pi(adev); 864 struct amdgpu_vce_clock_voltage_dependency_table *table = 865 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 866 uint32_t clock = 0, level; 867 868 if (!table || !table->count) { 869 DRM_ERROR("Invalid Voltage Dependency table.\n"); 870 return; 871 } 872 873 pi->vce_dpm.soft_min_clk = table->entries[0].ecclk; 874 pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; 875 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); 876 level = cz_get_argument(adev); 877 if (level < table->count) 878 clock = table->entries[level].ecclk; 879 else { 880 /* future BIOS would fix this error */ 881 DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); 882 clock = table->entries[table->count - 1].ecclk; 883 } 884 885 pi->vce_dpm.soft_max_clk = clock; 886 pi->vce_dpm.hard_max_clk = clock; 887 888} 889 890static void cz_init_acp_limit(struct amdgpu_device *adev) 891{ 892 struct cz_power_info *pi = cz_get_pi(adev); 893 struct amdgpu_clock_voltage_dependency_table *table = 894 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; 895 uint32_t clock = 0, level; 896 897 if (!table || !table->count) { 898 DRM_ERROR("Invalid Voltage Dependency table.\n"); 899 return; 900 } 901 902 pi->acp_dpm.soft_min_clk = 0; 903 pi->acp_dpm.hard_min_clk = 0; 904 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel); 905 level = cz_get_argument(adev); 906 if (level < table->count) 907 clock = table->entries[level].clk; 908 else { 909 DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n"); 910 clock = table->entries[table->count - 1].clk; 911 } 912 913 pi->acp_dpm.soft_max_clk = clock; 914 pi->acp_dpm.hard_max_clk = clock; 915 916} 917 918static void cz_init_pg_state(struct amdgpu_device *adev) 919{ 920 struct cz_power_info *pi = cz_get_pi(adev); 921 922 pi->uvd_power_gated = false; 923 pi->vce_power_gated = false; 924 pi->acp_power_gated = false; 925 926} 927 928static void cz_init_sclk_threshold(struct amdgpu_device *adev) 929{ 930 struct cz_power_info *pi = cz_get_pi(adev); 931 932 pi->low_sclk_interrupt_threshold = 0; 933 934} 935 936static void cz_dpm_setup_asic(struct amdgpu_device *adev) 937{ 938 cz_reset_ap_mask(adev); 939 cz_dpm_upload_pptable_to_smu(adev); 940 cz_init_sclk_limit(adev); 941 cz_init_uvd_limit(adev); 942 cz_init_vce_limit(adev); 943 cz_init_acp_limit(adev); 944 cz_init_pg_state(adev); 945 cz_init_sclk_threshold(adev); 946 947} 948 949static bool cz_check_smu_feature(struct amdgpu_device *adev, 950 uint32_t feature) 951{ 952 uint32_t smu_feature = 0; 953 int ret; 954 955 ret = cz_send_msg_to_smc_with_parameter(adev, 956 PPSMC_MSG_GetFeatureStatus, 0); 957 if (ret) { 958 DRM_ERROR("Failed to get SMU features from SMC.\n"); 959 return false; 960 } else { 961 smu_feature = cz_get_argument(adev); 962 if (feature & smu_feature) 963 return true; 964 } 965 966 return false; 967} 968 969static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev) 970{ 971 if (cz_check_smu_feature(adev, 972 SMU_EnabledFeatureScoreboard_SclkDpmOn)) 973 return true; 974 975 return false; 976} 977 978static void cz_program_voting_clients(struct amdgpu_device *adev) 979{ 980 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); 981} 982 983static void cz_clear_voting_clients(struct amdgpu_device *adev) 984{ 985 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); 986} 987 988static int cz_start_dpm(struct amdgpu_device *adev) 989{ 990 int ret = 0; 991 992 if (amdgpu_dpm) { 993 ret = cz_send_msg_to_smc_with_parameter(adev, 994 PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK); 995 if (ret) { 996 DRM_ERROR("SMU feature: SCLK_DPM enable failed\n"); 997 return -EINVAL; 998 } 999 } 1000 1001 return 0; 1002} 1003 1004static int cz_stop_dpm(struct amdgpu_device *adev) 1005{ 1006 int ret = 0; 1007 1008 if (amdgpu_dpm && adev->pm.dpm_enabled) { 1009 ret = cz_send_msg_to_smc_with_parameter(adev, 1010 PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK); 1011 if (ret) { 1012 DRM_ERROR("SMU feature: SCLK_DPM disable failed\n"); 1013 return -EINVAL; 1014 } 1015 } 1016 1017 return 0; 1018} 1019 1020static uint32_t cz_get_sclk_level(struct amdgpu_device *adev, 1021 uint32_t clock, uint16_t msg) 1022{ 1023 int i = 0; 1024 struct amdgpu_clock_voltage_dependency_table *table = 1025 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1026 1027 switch (msg) { 1028 case PPSMC_MSG_SetSclkSoftMin: 1029 case PPSMC_MSG_SetSclkHardMin: 1030 for (i = 0; i < table->count; i++) 1031 if (clock <= table->entries[i].clk) 1032 break; 1033 if (i == table->count) 1034 i = table->count - 1; 1035 break; 1036 case PPSMC_MSG_SetSclkSoftMax: 1037 case PPSMC_MSG_SetSclkHardMax: 1038 for (i = table->count - 1; i >= 0; i--) 1039 if (clock >= table->entries[i].clk) 1040 break; 1041 if (i < 0) 1042 i = 0; 1043 break; 1044 default: 1045 break; 1046 } 1047 1048 return i; 1049} 1050 1051static uint32_t cz_get_eclk_level(struct amdgpu_device *adev, 1052 uint32_t clock, uint16_t msg) 1053{ 1054 int i = 0; 1055 struct amdgpu_vce_clock_voltage_dependency_table *table = 1056 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1057 1058 if (table->count == 0) 1059 return 0; 1060 1061 switch (msg) { 1062 case PPSMC_MSG_SetEclkSoftMin: 1063 case PPSMC_MSG_SetEclkHardMin: 1064 for (i = 0; i < table->count-1; i++) 1065 if (clock <= table->entries[i].ecclk) 1066 break; 1067 break; 1068 case PPSMC_MSG_SetEclkSoftMax: 1069 case PPSMC_MSG_SetEclkHardMax: 1070 for (i = table->count - 1; i > 0; i--) 1071 if (clock >= table->entries[i].ecclk) 1072 break; 1073 break; 1074 default: 1075 break; 1076 } 1077 1078 return i; 1079} 1080 1081static uint32_t cz_get_uvd_level(struct amdgpu_device *adev, 1082 uint32_t clock, uint16_t msg) 1083{ 1084 int i = 0; 1085 struct amdgpu_uvd_clock_voltage_dependency_table *table = 1086 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1087 1088 switch (msg) { 1089 case PPSMC_MSG_SetUvdSoftMin: 1090 case PPSMC_MSG_SetUvdHardMin: 1091 for (i = 0; i < table->count; i++) 1092 if (clock <= table->entries[i].vclk) 1093 break; 1094 if (i == table->count) 1095 i = table->count - 1; 1096 break; 1097 case PPSMC_MSG_SetUvdSoftMax: 1098 case PPSMC_MSG_SetUvdHardMax: 1099 for (i = table->count - 1; i >= 0; i--) 1100 if (clock >= table->entries[i].vclk) 1101 break; 1102 if (i < 0) 1103 i = 0; 1104 break; 1105 default: 1106 break; 1107 } 1108 1109 return i; 1110} 1111 1112static int cz_program_bootup_state(struct amdgpu_device *adev) 1113{ 1114 struct cz_power_info *pi = cz_get_pi(adev); 1115 uint32_t soft_min_clk = 0; 1116 uint32_t soft_max_clk = 0; 1117 int ret = 0; 1118 1119 pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk; 1120 pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk; 1121 1122 soft_min_clk = cz_get_sclk_level(adev, 1123 pi->sclk_dpm.soft_min_clk, 1124 PPSMC_MSG_SetSclkSoftMin); 1125 soft_max_clk = cz_get_sclk_level(adev, 1126 pi->sclk_dpm.soft_max_clk, 1127 PPSMC_MSG_SetSclkSoftMax); 1128 1129 ret = cz_send_msg_to_smc_with_parameter(adev, 1130 PPSMC_MSG_SetSclkSoftMin, soft_min_clk); 1131 if (ret) 1132 return -EINVAL; 1133 1134 ret = cz_send_msg_to_smc_with_parameter(adev, 1135 PPSMC_MSG_SetSclkSoftMax, soft_max_clk); 1136 if (ret) 1137 return -EINVAL; 1138 1139 return 0; 1140} 1141 1142/* TODO */ 1143static int cz_disable_cgpg(struct amdgpu_device *adev) 1144{ 1145 return 0; 1146} 1147 1148/* TODO */ 1149static int cz_enable_cgpg(struct amdgpu_device *adev) 1150{ 1151 return 0; 1152} 1153 1154/* TODO */ 1155static int cz_program_pt_config_registers(struct amdgpu_device *adev) 1156{ 1157 return 0; 1158} 1159 1160static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable) 1161{ 1162 struct cz_power_info *pi = cz_get_pi(adev); 1163 uint32_t reg = 0; 1164 1165 if (pi->caps_sq_ramping) { 1166 reg = RREG32_DIDT(ixDIDT_SQ_CTRL0); 1167 if (enable) 1168 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); 1169 else 1170 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); 1171 WREG32_DIDT(ixDIDT_SQ_CTRL0, reg); 1172 } 1173 if (pi->caps_db_ramping) { 1174 reg = RREG32_DIDT(ixDIDT_DB_CTRL0); 1175 if (enable) 1176 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1); 1177 else 1178 reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0); 1179 WREG32_DIDT(ixDIDT_DB_CTRL0, reg); 1180 } 1181 if (pi->caps_td_ramping) { 1182 reg = RREG32_DIDT(ixDIDT_TD_CTRL0); 1183 if (enable) 1184 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1); 1185 else 1186 reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0); 1187 WREG32_DIDT(ixDIDT_TD_CTRL0, reg); 1188 } 1189 if (pi->caps_tcp_ramping) { 1190 reg = RREG32_DIDT(ixDIDT_TCP_CTRL0); 1191 if (enable) 1192 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); 1193 else 1194 reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); 1195 WREG32_DIDT(ixDIDT_TCP_CTRL0, reg); 1196 } 1197 1198} 1199 1200static int cz_enable_didt(struct amdgpu_device *adev, bool enable) 1201{ 1202 struct cz_power_info *pi = cz_get_pi(adev); 1203 int ret; 1204 1205 if (pi->caps_sq_ramping || pi->caps_db_ramping || 1206 pi->caps_td_ramping || pi->caps_tcp_ramping) { 1207 if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) { 1208 ret = cz_disable_cgpg(adev); 1209 if (ret) { 1210 DRM_ERROR("Pre Di/Dt disable cg/pg failed\n"); 1211 return -EINVAL; 1212 } 1213 adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE; 1214 } 1215 1216 ret = cz_program_pt_config_registers(adev); 1217 if (ret) { 1218 DRM_ERROR("Di/Dt config failed\n"); 1219 return -EINVAL; 1220 } 1221 cz_do_enable_didt(adev, enable); 1222 1223 if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) { 1224 ret = cz_enable_cgpg(adev); 1225 if (ret) { 1226 DRM_ERROR("Post Di/Dt enable cg/pg failed\n"); 1227 return -EINVAL; 1228 } 1229 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; 1230 } 1231 } 1232 1233 return 0; 1234} 1235 1236/* TODO */ 1237static void cz_reset_acp_boot_level(struct amdgpu_device *adev) 1238{ 1239} 1240 1241static void cz_update_current_ps(struct amdgpu_device *adev, 1242 struct amdgpu_ps *rps) 1243{ 1244 struct cz_power_info *pi = cz_get_pi(adev); 1245 struct cz_ps *ps = cz_get_ps(rps); 1246 1247 pi->current_ps = *ps; 1248 pi->current_rps = *rps; 1249 pi->current_rps.ps_priv = ps; 1250 1251} 1252 1253static void cz_update_requested_ps(struct amdgpu_device *adev, 1254 struct amdgpu_ps *rps) 1255{ 1256 struct cz_power_info *pi = cz_get_pi(adev); 1257 struct cz_ps *ps = cz_get_ps(rps); 1258 1259 pi->requested_ps = *ps; 1260 pi->requested_rps = *rps; 1261 pi->requested_rps.ps_priv = ps; 1262 1263} 1264 1265/* PP arbiter support needed TODO */ 1266static void cz_apply_state_adjust_rules(struct amdgpu_device *adev, 1267 struct amdgpu_ps *new_rps, 1268 struct amdgpu_ps *old_rps) 1269{ 1270 struct cz_ps *ps = cz_get_ps(new_rps); 1271 struct cz_power_info *pi = cz_get_pi(adev); 1272 struct amdgpu_clock_and_voltage_limits *limits = 1273 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 1274 /* 10kHz memory clock */ 1275 uint32_t mclk = 0; 1276 1277 ps->force_high = false; 1278 ps->need_dfs_bypass = true; 1279 pi->video_start = new_rps->dclk || new_rps->vclk || 1280 new_rps->evclk || new_rps->ecclk; 1281 1282 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 1283 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) 1284 pi->battery_state = true; 1285 else 1286 pi->battery_state = false; 1287 1288 if (pi->caps_stable_power_state) 1289 mclk = limits->mclk; 1290 1291 if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1]) 1292 ps->force_high = true; 1293 1294} 1295 1296static int cz_dpm_enable(struct amdgpu_device *adev) 1297{ 1298 const char *chip_name; 1299 int ret = 0; 1300 1301 /* renable will hang up SMU, so check first */ 1302 if (cz_check_for_dpm_enabled(adev)) 1303 return -EINVAL; 1304 1305 cz_program_voting_clients(adev); 1306 1307 switch (adev->asic_type) { 1308 case CHIP_CARRIZO: 1309 chip_name = "carrizo"; 1310 break; 1311 case CHIP_STONEY: 1312 chip_name = "stoney"; 1313 break; 1314 default: 1315 BUG(); 1316 } 1317 1318 1319 ret = cz_start_dpm(adev); 1320 if (ret) { 1321 DRM_ERROR("%s DPM enable failed\n", chip_name); 1322 return -EINVAL; 1323 } 1324 1325 ret = cz_program_bootup_state(adev); 1326 if (ret) { 1327 DRM_ERROR("%s bootup state program failed\n", chip_name); 1328 return -EINVAL; 1329 } 1330 1331 ret = cz_enable_didt(adev, true); 1332 if (ret) { 1333 DRM_ERROR("%s enable di/dt failed\n", chip_name); 1334 return -EINVAL; 1335 } 1336 1337 cz_reset_acp_boot_level(adev); 1338 1339 cz_update_current_ps(adev, adev->pm.dpm.boot_ps); 1340 1341 return 0; 1342} 1343 1344static int cz_dpm_hw_init(void *handle) 1345{ 1346 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1347 int ret = 0; 1348 1349 mutex_lock(&adev->pm.mutex); 1350 1351 /* smu init only needs to be called at startup, not resume. 1352 * It should be in sw_init, but requires the fw info gathered 1353 * in sw_init from other IP modules. 1354 */ 1355 ret = cz_smu_init(adev); 1356 if (ret) { 1357 DRM_ERROR("amdgpu: smc initialization failed\n"); 1358 mutex_unlock(&adev->pm.mutex); 1359 return ret; 1360 } 1361 1362 /* do the actual fw loading */ 1363 ret = cz_smu_start(adev); 1364 if (ret) { 1365 DRM_ERROR("amdgpu: smc start failed\n"); 1366 mutex_unlock(&adev->pm.mutex); 1367 return ret; 1368 } 1369 1370 if (!amdgpu_dpm) { 1371 adev->pm.dpm_enabled = false; 1372 mutex_unlock(&adev->pm.mutex); 1373 return ret; 1374 } 1375 1376 /* cz dpm setup asic */ 1377 cz_dpm_setup_asic(adev); 1378 1379 /* cz dpm enable */ 1380 ret = cz_dpm_enable(adev); 1381 if (ret) 1382 adev->pm.dpm_enabled = false; 1383 else 1384 adev->pm.dpm_enabled = true; 1385 1386 mutex_unlock(&adev->pm.mutex); 1387 1388 return 0; 1389} 1390 1391static int cz_dpm_disable(struct amdgpu_device *adev) 1392{ 1393 int ret = 0; 1394 1395 if (!cz_check_for_dpm_enabled(adev)) 1396 return -EINVAL; 1397 1398 ret = cz_enable_didt(adev, false); 1399 if (ret) { 1400 DRM_ERROR("disable di/dt failed\n"); 1401 return -EINVAL; 1402 } 1403 1404 /* powerup blocks */ 1405 cz_dpm_powergate_uvd(adev, false); 1406 cz_dpm_powergate_vce(adev, false); 1407 1408 cz_clear_voting_clients(adev); 1409 cz_stop_dpm(adev); 1410 cz_update_current_ps(adev, adev->pm.dpm.boot_ps); 1411 1412 return 0; 1413} 1414 1415static int cz_dpm_hw_fini(void *handle) 1416{ 1417 int ret = 0; 1418 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1419 1420 mutex_lock(&adev->pm.mutex); 1421 1422 /* smu fini only needs to be called at teardown, not suspend. 1423 * It should be in sw_fini, but we put it here for symmetry 1424 * with smu init. 1425 */ 1426 cz_smu_fini(adev); 1427 1428 if (adev->pm.dpm_enabled) { 1429 ret = cz_dpm_disable(adev); 1430 1431 adev->pm.dpm.current_ps = 1432 adev->pm.dpm.requested_ps = 1433 adev->pm.dpm.boot_ps; 1434 } 1435 1436 adev->pm.dpm_enabled = false; 1437 1438 mutex_unlock(&adev->pm.mutex); 1439 1440 return ret; 1441} 1442 1443static int cz_dpm_suspend(void *handle) 1444{ 1445 int ret = 0; 1446 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1447 1448 if (adev->pm.dpm_enabled) { 1449 mutex_lock(&adev->pm.mutex); 1450 1451 ret = cz_dpm_disable(adev); 1452 1453 adev->pm.dpm.current_ps = 1454 adev->pm.dpm.requested_ps = 1455 adev->pm.dpm.boot_ps; 1456 1457 mutex_unlock(&adev->pm.mutex); 1458 } 1459 1460 return ret; 1461} 1462 1463static int cz_dpm_resume(void *handle) 1464{ 1465 int ret = 0; 1466 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1467 1468 mutex_lock(&adev->pm.mutex); 1469 1470 /* do the actual fw loading */ 1471 ret = cz_smu_start(adev); 1472 if (ret) { 1473 DRM_ERROR("amdgpu: smc start failed\n"); 1474 mutex_unlock(&adev->pm.mutex); 1475 return ret; 1476 } 1477 1478 if (!amdgpu_dpm) { 1479 adev->pm.dpm_enabled = false; 1480 mutex_unlock(&adev->pm.mutex); 1481 return ret; 1482 } 1483 1484 /* cz dpm setup asic */ 1485 cz_dpm_setup_asic(adev); 1486 1487 /* cz dpm enable */ 1488 ret = cz_dpm_enable(adev); 1489 if (ret) 1490 adev->pm.dpm_enabled = false; 1491 else 1492 adev->pm.dpm_enabled = true; 1493 1494 mutex_unlock(&adev->pm.mutex); 1495 /* upon resume, re-compute the clocks */ 1496 if (adev->pm.dpm_enabled) 1497 amdgpu_pm_compute_clocks(adev); 1498 1499 return 0; 1500} 1501 1502static int cz_dpm_set_clockgating_state(void *handle, 1503 enum amd_clockgating_state state) 1504{ 1505 return 0; 1506} 1507 1508static int cz_dpm_set_powergating_state(void *handle, 1509 enum amd_powergating_state state) 1510{ 1511 return 0; 1512} 1513 1514/* borrowed from KV, need future unify */ 1515static int cz_dpm_get_temperature(struct amdgpu_device *adev) 1516{ 1517 int actual_temp = 0; 1518 uint32_t temp = RREG32_SMC(0xC0300E0C); 1519 1520 if (temp) 1521 actual_temp = 1000 * ((temp / 8) - 49); 1522 1523 return actual_temp; 1524} 1525 1526static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev) 1527{ 1528 struct cz_power_info *pi = cz_get_pi(adev); 1529 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; 1530 struct amdgpu_ps *new_ps = &requested_ps; 1531 1532 cz_update_requested_ps(adev, new_ps); 1533 cz_apply_state_adjust_rules(adev, &pi->requested_rps, 1534 &pi->current_rps); 1535 1536 return 0; 1537} 1538 1539static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) 1540{ 1541 struct cz_power_info *pi = cz_get_pi(adev); 1542 struct amdgpu_clock_and_voltage_limits *limits = 1543 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 1544 uint32_t clock, stable_ps_clock = 0; 1545 1546 clock = pi->sclk_dpm.soft_min_clk; 1547 1548 if (pi->caps_stable_power_state) { 1549 stable_ps_clock = limits->sclk * 75 / 100; 1550 if (clock < stable_ps_clock) 1551 clock = stable_ps_clock; 1552 } 1553 1554 if (clock != pi->sclk_dpm.soft_min_clk) { 1555 pi->sclk_dpm.soft_min_clk = clock; 1556 cz_send_msg_to_smc_with_parameter(adev, 1557 PPSMC_MSG_SetSclkSoftMin, 1558 cz_get_sclk_level(adev, clock, 1559 PPSMC_MSG_SetSclkSoftMin)); 1560 } 1561 1562 if (pi->caps_stable_power_state && 1563 pi->sclk_dpm.soft_max_clk != clock) { 1564 pi->sclk_dpm.soft_max_clk = clock; 1565 cz_send_msg_to_smc_with_parameter(adev, 1566 PPSMC_MSG_SetSclkSoftMax, 1567 cz_get_sclk_level(adev, clock, 1568 PPSMC_MSG_SetSclkSoftMax)); 1569 } else { 1570 cz_send_msg_to_smc_with_parameter(adev, 1571 PPSMC_MSG_SetSclkSoftMax, 1572 cz_get_sclk_level(adev, 1573 pi->sclk_dpm.soft_max_clk, 1574 PPSMC_MSG_SetSclkSoftMax)); 1575 } 1576 1577 return 0; 1578} 1579 1580static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) 1581{ 1582 struct cz_power_info *pi = cz_get_pi(adev); 1583 1584 if (pi->caps_sclk_ds) { 1585 cz_send_msg_to_smc_with_parameter(adev, 1586 PPSMC_MSG_SetMinDeepSleepSclk, 1587 CZ_MIN_DEEP_SLEEP_SCLK); 1588 } 1589 1590 return 0; 1591} 1592 1593/* ?? without dal support, is this still needed in setpowerstate list*/ 1594static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) 1595{ 1596 struct cz_power_info *pi = cz_get_pi(adev); 1597 1598 cz_send_msg_to_smc_with_parameter(adev, 1599 PPSMC_MSG_SetWatermarkFrequency, 1600 pi->sclk_dpm.soft_max_clk); 1601 1602 return 0; 1603} 1604 1605static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) 1606{ 1607 int ret = 0; 1608 struct cz_power_info *pi = cz_get_pi(adev); 1609 1610 /* also depend on dal NBPStateDisableRequired */ 1611 if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) { 1612 ret = cz_send_msg_to_smc_with_parameter(adev, 1613 PPSMC_MSG_EnableAllSmuFeatures, 1614 NB_DPM_MASK); 1615 if (ret) { 1616 DRM_ERROR("amdgpu: nb dpm enable failed\n"); 1617 return ret; 1618 } 1619 pi->nb_dpm_enabled = true; 1620 } 1621 1622 return ret; 1623} 1624 1625static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, 1626 bool enable) 1627{ 1628 if (enable) 1629 cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate); 1630 else 1631 cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate); 1632 1633} 1634 1635static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) 1636{ 1637 struct cz_power_info *pi = cz_get_pi(adev); 1638 struct cz_ps *ps = &pi->requested_ps; 1639 1640 if (pi->sys_info.nb_dpm_enable) { 1641 if (ps->force_high) 1642 cz_dpm_nbdpm_lm_pstate_enable(adev, false); 1643 else 1644 cz_dpm_nbdpm_lm_pstate_enable(adev, true); 1645 } 1646 1647 return 0; 1648} 1649 1650/* with dpm enabled */ 1651static int cz_dpm_set_power_state(struct amdgpu_device *adev) 1652{ 1653 cz_dpm_update_sclk_limit(adev); 1654 cz_dpm_set_deep_sleep_sclk_threshold(adev); 1655 cz_dpm_set_watermark_threshold(adev); 1656 cz_dpm_enable_nbdpm(adev); 1657 cz_dpm_update_low_memory_pstate(adev); 1658 1659 return 0; 1660} 1661 1662static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) 1663{ 1664 struct cz_power_info *pi = cz_get_pi(adev); 1665 struct amdgpu_ps *ps = &pi->requested_rps; 1666 1667 cz_update_current_ps(adev, ps); 1668 1669} 1670 1671static int cz_dpm_force_highest(struct amdgpu_device *adev) 1672{ 1673 struct cz_power_info *pi = cz_get_pi(adev); 1674 int ret = 0; 1675 1676 if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) { 1677 pi->sclk_dpm.soft_min_clk = 1678 pi->sclk_dpm.soft_max_clk; 1679 ret = cz_send_msg_to_smc_with_parameter(adev, 1680 PPSMC_MSG_SetSclkSoftMin, 1681 cz_get_sclk_level(adev, 1682 pi->sclk_dpm.soft_min_clk, 1683 PPSMC_MSG_SetSclkSoftMin)); 1684 if (ret) 1685 return ret; 1686 } 1687 1688 return ret; 1689} 1690 1691static int cz_dpm_force_lowest(struct amdgpu_device *adev) 1692{ 1693 struct cz_power_info *pi = cz_get_pi(adev); 1694 int ret = 0; 1695 1696 if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) { 1697 pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk; 1698 ret = cz_send_msg_to_smc_with_parameter(adev, 1699 PPSMC_MSG_SetSclkSoftMax, 1700 cz_get_sclk_level(adev, 1701 pi->sclk_dpm.soft_max_clk, 1702 PPSMC_MSG_SetSclkSoftMax)); 1703 if (ret) 1704 return ret; 1705 } 1706 1707 return ret; 1708} 1709 1710static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev) 1711{ 1712 struct cz_power_info *pi = cz_get_pi(adev); 1713 1714 if (!pi->max_sclk_level) { 1715 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); 1716 pi->max_sclk_level = cz_get_argument(adev) + 1; 1717 } 1718 1719 if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) { 1720 DRM_ERROR("Invalid max sclk level!\n"); 1721 return -EINVAL; 1722 } 1723 1724 return pi->max_sclk_level; 1725} 1726 1727static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) 1728{ 1729 struct cz_power_info *pi = cz_get_pi(adev); 1730 struct amdgpu_clock_voltage_dependency_table *dep_table = 1731 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 1732 uint32_t level = 0; 1733 int ret = 0; 1734 1735 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk; 1736 level = cz_dpm_get_max_sclk_level(adev) - 1; 1737 if (level < dep_table->count) 1738 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk; 1739 else 1740 pi->sclk_dpm.soft_max_clk = 1741 dep_table->entries[dep_table->count - 1].clk; 1742 1743 /* get min/max sclk soft value 1744 * notify SMU to execute */ 1745 ret = cz_send_msg_to_smc_with_parameter(adev, 1746 PPSMC_MSG_SetSclkSoftMin, 1747 cz_get_sclk_level(adev, 1748 pi->sclk_dpm.soft_min_clk, 1749 PPSMC_MSG_SetSclkSoftMin)); 1750 if (ret) 1751 return ret; 1752 1753 ret = cz_send_msg_to_smc_with_parameter(adev, 1754 PPSMC_MSG_SetSclkSoftMax, 1755 cz_get_sclk_level(adev, 1756 pi->sclk_dpm.soft_max_clk, 1757 PPSMC_MSG_SetSclkSoftMax)); 1758 if (ret) 1759 return ret; 1760 1761 DRM_DEBUG("DPM unforce state min=%d, max=%d.\n", 1762 pi->sclk_dpm.soft_min_clk, 1763 pi->sclk_dpm.soft_max_clk); 1764 1765 return 0; 1766} 1767 1768static int cz_dpm_uvd_force_highest(struct amdgpu_device *adev) 1769{ 1770 struct cz_power_info *pi = cz_get_pi(adev); 1771 int ret = 0; 1772 1773 if (pi->uvd_dpm.soft_min_clk != pi->uvd_dpm.soft_max_clk) { 1774 pi->uvd_dpm.soft_min_clk = 1775 pi->uvd_dpm.soft_max_clk; 1776 ret = cz_send_msg_to_smc_with_parameter(adev, 1777 PPSMC_MSG_SetUvdSoftMin, 1778 cz_get_uvd_level(adev, 1779 pi->uvd_dpm.soft_min_clk, 1780 PPSMC_MSG_SetUvdSoftMin)); 1781 if (ret) 1782 return ret; 1783 } 1784 1785 return ret; 1786} 1787 1788static int cz_dpm_uvd_force_lowest(struct amdgpu_device *adev) 1789{ 1790 struct cz_power_info *pi = cz_get_pi(adev); 1791 int ret = 0; 1792 1793 if (pi->uvd_dpm.soft_max_clk != pi->uvd_dpm.soft_min_clk) { 1794 pi->uvd_dpm.soft_max_clk = pi->uvd_dpm.soft_min_clk; 1795 ret = cz_send_msg_to_smc_with_parameter(adev, 1796 PPSMC_MSG_SetUvdSoftMax, 1797 cz_get_uvd_level(adev, 1798 pi->uvd_dpm.soft_max_clk, 1799 PPSMC_MSG_SetUvdSoftMax)); 1800 if (ret) 1801 return ret; 1802 } 1803 1804 return ret; 1805} 1806 1807static uint32_t cz_dpm_get_max_uvd_level(struct amdgpu_device *adev) 1808{ 1809 struct cz_power_info *pi = cz_get_pi(adev); 1810 1811 if (!pi->max_uvd_level) { 1812 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); 1813 pi->max_uvd_level = cz_get_argument(adev) + 1; 1814 } 1815 1816 if (pi->max_uvd_level > CZ_MAX_HARDWARE_POWERLEVELS) { 1817 DRM_ERROR("Invalid max uvd level!\n"); 1818 return -EINVAL; 1819 } 1820 1821 return pi->max_uvd_level; 1822} 1823 1824static int cz_dpm_unforce_uvd_dpm_levels(struct amdgpu_device *adev) 1825{ 1826 struct cz_power_info *pi = cz_get_pi(adev); 1827 struct amdgpu_uvd_clock_voltage_dependency_table *dep_table = 1828 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1829 uint32_t level = 0; 1830 int ret = 0; 1831 1832 pi->uvd_dpm.soft_min_clk = dep_table->entries[0].vclk; 1833 level = cz_dpm_get_max_uvd_level(adev) - 1; 1834 if (level < dep_table->count) 1835 pi->uvd_dpm.soft_max_clk = dep_table->entries[level].vclk; 1836 else 1837 pi->uvd_dpm.soft_max_clk = 1838 dep_table->entries[dep_table->count - 1].vclk; 1839 1840 /* get min/max sclk soft value 1841 * notify SMU to execute */ 1842 ret = cz_send_msg_to_smc_with_parameter(adev, 1843 PPSMC_MSG_SetUvdSoftMin, 1844 cz_get_uvd_level(adev, 1845 pi->uvd_dpm.soft_min_clk, 1846 PPSMC_MSG_SetUvdSoftMin)); 1847 if (ret) 1848 return ret; 1849 1850 ret = cz_send_msg_to_smc_with_parameter(adev, 1851 PPSMC_MSG_SetUvdSoftMax, 1852 cz_get_uvd_level(adev, 1853 pi->uvd_dpm.soft_max_clk, 1854 PPSMC_MSG_SetUvdSoftMax)); 1855 if (ret) 1856 return ret; 1857 1858 DRM_DEBUG("DPM uvd unforce state min=%d, max=%d.\n", 1859 pi->uvd_dpm.soft_min_clk, 1860 pi->uvd_dpm.soft_max_clk); 1861 1862 return 0; 1863} 1864 1865static int cz_dpm_vce_force_highest(struct amdgpu_device *adev) 1866{ 1867 struct cz_power_info *pi = cz_get_pi(adev); 1868 int ret = 0; 1869 1870 if (pi->vce_dpm.soft_min_clk != pi->vce_dpm.soft_max_clk) { 1871 pi->vce_dpm.soft_min_clk = 1872 pi->vce_dpm.soft_max_clk; 1873 ret = cz_send_msg_to_smc_with_parameter(adev, 1874 PPSMC_MSG_SetEclkSoftMin, 1875 cz_get_eclk_level(adev, 1876 pi->vce_dpm.soft_min_clk, 1877 PPSMC_MSG_SetEclkSoftMin)); 1878 if (ret) 1879 return ret; 1880 } 1881 1882 return ret; 1883} 1884 1885static int cz_dpm_vce_force_lowest(struct amdgpu_device *adev) 1886{ 1887 struct cz_power_info *pi = cz_get_pi(adev); 1888 int ret = 0; 1889 1890 if (pi->vce_dpm.soft_max_clk != pi->vce_dpm.soft_min_clk) { 1891 pi->vce_dpm.soft_max_clk = pi->vce_dpm.soft_min_clk; 1892 ret = cz_send_msg_to_smc_with_parameter(adev, 1893 PPSMC_MSG_SetEclkSoftMax, 1894 cz_get_uvd_level(adev, 1895 pi->vce_dpm.soft_max_clk, 1896 PPSMC_MSG_SetEclkSoftMax)); 1897 if (ret) 1898 return ret; 1899 } 1900 1901 return ret; 1902} 1903 1904static uint32_t cz_dpm_get_max_vce_level(struct amdgpu_device *adev) 1905{ 1906 struct cz_power_info *pi = cz_get_pi(adev); 1907 1908 if (!pi->max_vce_level) { 1909 cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); 1910 pi->max_vce_level = cz_get_argument(adev) + 1; 1911 } 1912 1913 if (pi->max_vce_level > CZ_MAX_HARDWARE_POWERLEVELS) { 1914 DRM_ERROR("Invalid max vce level!\n"); 1915 return -EINVAL; 1916 } 1917 1918 return pi->max_vce_level; 1919} 1920 1921static int cz_dpm_unforce_vce_dpm_levels(struct amdgpu_device *adev) 1922{ 1923 struct cz_power_info *pi = cz_get_pi(adev); 1924 struct amdgpu_vce_clock_voltage_dependency_table *dep_table = 1925 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 1926 uint32_t level = 0; 1927 int ret = 0; 1928 1929 pi->vce_dpm.soft_min_clk = dep_table->entries[0].ecclk; 1930 level = cz_dpm_get_max_vce_level(adev) - 1; 1931 if (level < dep_table->count) 1932 pi->vce_dpm.soft_max_clk = dep_table->entries[level].ecclk; 1933 else 1934 pi->vce_dpm.soft_max_clk = 1935 dep_table->entries[dep_table->count - 1].ecclk; 1936 1937 /* get min/max sclk soft value 1938 * notify SMU to execute */ 1939 ret = cz_send_msg_to_smc_with_parameter(adev, 1940 PPSMC_MSG_SetEclkSoftMin, 1941 cz_get_eclk_level(adev, 1942 pi->vce_dpm.soft_min_clk, 1943 PPSMC_MSG_SetEclkSoftMin)); 1944 if (ret) 1945 return ret; 1946 1947 ret = cz_send_msg_to_smc_with_parameter(adev, 1948 PPSMC_MSG_SetEclkSoftMax, 1949 cz_get_eclk_level(adev, 1950 pi->vce_dpm.soft_max_clk, 1951 PPSMC_MSG_SetEclkSoftMax)); 1952 if (ret) 1953 return ret; 1954 1955 DRM_DEBUG("DPM vce unforce state min=%d, max=%d.\n", 1956 pi->vce_dpm.soft_min_clk, 1957 pi->vce_dpm.soft_max_clk); 1958 1959 return 0; 1960} 1961 1962static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, 1963 enum amdgpu_dpm_forced_level level) 1964{ 1965 int ret = 0; 1966 1967 switch (level) { 1968 case AMDGPU_DPM_FORCED_LEVEL_HIGH: 1969 /* sclk */ 1970 ret = cz_dpm_unforce_dpm_levels(adev); 1971 if (ret) 1972 return ret; 1973 ret = cz_dpm_force_highest(adev); 1974 if (ret) 1975 return ret; 1976 1977 /* uvd */ 1978 ret = cz_dpm_unforce_uvd_dpm_levels(adev); 1979 if (ret) 1980 return ret; 1981 ret = cz_dpm_uvd_force_highest(adev); 1982 if (ret) 1983 return ret; 1984 1985 /* vce */ 1986 ret = cz_dpm_unforce_vce_dpm_levels(adev); 1987 if (ret) 1988 return ret; 1989 ret = cz_dpm_vce_force_highest(adev); 1990 if (ret) 1991 return ret; 1992 break; 1993 case AMDGPU_DPM_FORCED_LEVEL_LOW: 1994 /* sclk */ 1995 ret = cz_dpm_unforce_dpm_levels(adev); 1996 if (ret) 1997 return ret; 1998 ret = cz_dpm_force_lowest(adev); 1999 if (ret) 2000 return ret; 2001 2002 /* uvd */ 2003 ret = cz_dpm_unforce_uvd_dpm_levels(adev); 2004 if (ret) 2005 return ret; 2006 ret = cz_dpm_uvd_force_lowest(adev); 2007 if (ret) 2008 return ret; 2009 2010 /* vce */ 2011 ret = cz_dpm_unforce_vce_dpm_levels(adev); 2012 if (ret) 2013 return ret; 2014 ret = cz_dpm_vce_force_lowest(adev); 2015 if (ret) 2016 return ret; 2017 break; 2018 case AMDGPU_DPM_FORCED_LEVEL_AUTO: 2019 /* sclk */ 2020 ret = cz_dpm_unforce_dpm_levels(adev); 2021 if (ret) 2022 return ret; 2023 2024 /* uvd */ 2025 ret = cz_dpm_unforce_uvd_dpm_levels(adev); 2026 if (ret) 2027 return ret; 2028 2029 /* vce */ 2030 ret = cz_dpm_unforce_vce_dpm_levels(adev); 2031 if (ret) 2032 return ret; 2033 break; 2034 default: 2035 break; 2036 } 2037 2038 adev->pm.dpm.forced_level = level; 2039 2040 return ret; 2041} 2042 2043/* fix me, display configuration change lists here 2044 * mostly dal related*/ 2045static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev) 2046{ 2047} 2048 2049static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low) 2050{ 2051 struct cz_power_info *pi = cz_get_pi(adev); 2052 struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps); 2053 2054 if (low) 2055 return requested_state->levels[0].sclk; 2056 else 2057 return requested_state->levels[requested_state->num_levels - 1].sclk; 2058 2059} 2060 2061static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low) 2062{ 2063 struct cz_power_info *pi = cz_get_pi(adev); 2064 2065 return pi->sys_info.bootup_uma_clk; 2066} 2067 2068static int cz_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) 2069{ 2070 struct cz_power_info *pi = cz_get_pi(adev); 2071 int ret = 0; 2072 2073 if (enable && pi->caps_uvd_dpm ) { 2074 pi->dpm_flags |= DPMFlags_UVD_Enabled; 2075 DRM_DEBUG("UVD DPM Enabled.\n"); 2076 2077 ret = cz_send_msg_to_smc_with_parameter(adev, 2078 PPSMC_MSG_EnableAllSmuFeatures, UVD_DPM_MASK); 2079 } else { 2080 pi->dpm_flags &= ~DPMFlags_UVD_Enabled; 2081 DRM_DEBUG("UVD DPM Stopped\n"); 2082 2083 ret = cz_send_msg_to_smc_with_parameter(adev, 2084 PPSMC_MSG_DisableAllSmuFeatures, UVD_DPM_MASK); 2085 } 2086 2087 return ret; 2088} 2089 2090static int cz_update_uvd_dpm(struct amdgpu_device *adev, bool gate) 2091{ 2092 return cz_enable_uvd_dpm(adev, !gate); 2093} 2094 2095 2096static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) 2097{ 2098 struct cz_power_info *pi = cz_get_pi(adev); 2099 int ret; 2100 2101 if (pi->uvd_power_gated == gate) 2102 return; 2103 2104 pi->uvd_power_gated = gate; 2105 2106 if (gate) { 2107 if (pi->caps_uvd_pg) { 2108 /* disable clockgating so we can properly shut down the block */ 2109 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 2110 AMD_CG_STATE_UNGATE); 2111 /* shutdown the UVD block */ 2112 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 2113 AMD_PG_STATE_GATE); 2114 /* XXX: check for errors */ 2115 } 2116 cz_update_uvd_dpm(adev, gate); 2117 if (pi->caps_uvd_pg) 2118 /* power off the UVD block */ 2119 cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); 2120 } else { 2121 if (pi->caps_uvd_pg) { 2122 /* power on the UVD block */ 2123 if (pi->uvd_dynamic_pg) 2124 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); 2125 else 2126 cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); 2127 /* re-init the UVD block */ 2128 ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 2129 AMD_PG_STATE_UNGATE); 2130 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ 2131 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, 2132 AMD_CG_STATE_GATE); 2133 /* XXX: check for errors */ 2134 } 2135 cz_update_uvd_dpm(adev, gate); 2136 } 2137} 2138 2139static int cz_enable_vce_dpm(struct amdgpu_device *adev, bool enable) 2140{ 2141 struct cz_power_info *pi = cz_get_pi(adev); 2142 int ret = 0; 2143 2144 if (enable && pi->caps_vce_dpm) { 2145 pi->dpm_flags |= DPMFlags_VCE_Enabled; 2146 DRM_DEBUG("VCE DPM Enabled.\n"); 2147 2148 ret = cz_send_msg_to_smc_with_parameter(adev, 2149 PPSMC_MSG_EnableAllSmuFeatures, VCE_DPM_MASK); 2150 2151 } else { 2152 pi->dpm_flags &= ~DPMFlags_VCE_Enabled; 2153 DRM_DEBUG("VCE DPM Stopped\n"); 2154 2155 ret = cz_send_msg_to_smc_with_parameter(adev, 2156 PPSMC_MSG_DisableAllSmuFeatures, VCE_DPM_MASK); 2157 } 2158 2159 return ret; 2160} 2161 2162static int cz_update_vce_dpm(struct amdgpu_device *adev) 2163{ 2164 struct cz_power_info *pi = cz_get_pi(adev); 2165 struct amdgpu_vce_clock_voltage_dependency_table *table = 2166 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; 2167 2168 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */ 2169 if (pi->caps_stable_power_state) { 2170 pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; 2171 2172 } else { /* non-stable p-state cases. without vce.Arbiter.EcclkHardMin */ 2173 /* leave it as set by user */ 2174 /*pi->vce_dpm.hard_min_clk = table->entries[0].ecclk;*/ 2175 } 2176 2177 cz_send_msg_to_smc_with_parameter(adev, 2178 PPSMC_MSG_SetEclkHardMin, 2179 cz_get_eclk_level(adev, 2180 pi->vce_dpm.hard_min_clk, 2181 PPSMC_MSG_SetEclkHardMin)); 2182 return 0; 2183} 2184 2185static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 2186{ 2187 struct cz_power_info *pi = cz_get_pi(adev); 2188 2189 if (pi->caps_vce_pg) { 2190 if (pi->vce_power_gated != gate) { 2191 if (gate) { 2192 /* disable clockgating so we can properly shut down the block */ 2193 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 2194 AMD_CG_STATE_UNGATE); 2195 /* shutdown the VCE block */ 2196 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 2197 AMD_PG_STATE_GATE); 2198 2199 cz_enable_vce_dpm(adev, false); 2200 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); 2201 pi->vce_power_gated = true; 2202 } else { 2203 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); 2204 pi->vce_power_gated = false; 2205 2206 /* re-init the VCE block */ 2207 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 2208 AMD_PG_STATE_UNGATE); 2209 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ 2210 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 2211 AMD_CG_STATE_GATE); 2212 2213 cz_update_vce_dpm(adev); 2214 cz_enable_vce_dpm(adev, true); 2215 } 2216 } else { 2217 if (! pi->vce_power_gated) { 2218 cz_update_vce_dpm(adev); 2219 } 2220 } 2221 } else { /*pi->caps_vce_pg*/ 2222 pi->vce_power_gated = gate; 2223 cz_update_vce_dpm(adev); 2224 cz_enable_vce_dpm(adev, !gate); 2225 } 2226} 2227 2228const struct amd_ip_funcs cz_dpm_ip_funcs = { 2229 .name = "cz_dpm", 2230 .early_init = cz_dpm_early_init, 2231 .late_init = cz_dpm_late_init, 2232 .sw_init = cz_dpm_sw_init, 2233 .sw_fini = cz_dpm_sw_fini, 2234 .hw_init = cz_dpm_hw_init, 2235 .hw_fini = cz_dpm_hw_fini, 2236 .suspend = cz_dpm_suspend, 2237 .resume = cz_dpm_resume, 2238 .is_idle = NULL, 2239 .wait_for_idle = NULL, 2240 .soft_reset = NULL, 2241 .set_clockgating_state = cz_dpm_set_clockgating_state, 2242 .set_powergating_state = cz_dpm_set_powergating_state, 2243}; 2244 2245static const struct amdgpu_dpm_funcs cz_dpm_funcs = { 2246 .get_temperature = cz_dpm_get_temperature, 2247 .pre_set_power_state = cz_dpm_pre_set_power_state, 2248 .set_power_state = cz_dpm_set_power_state, 2249 .post_set_power_state = cz_dpm_post_set_power_state, 2250 .display_configuration_changed = cz_dpm_display_configuration_changed, 2251 .get_sclk = cz_dpm_get_sclk, 2252 .get_mclk = cz_dpm_get_mclk, 2253 .print_power_state = cz_dpm_print_power_state, 2254 .debugfs_print_current_performance_level = 2255 cz_dpm_debugfs_print_current_performance_level, 2256 .force_performance_level = cz_dpm_force_dpm_level, 2257 .vblank_too_short = NULL, 2258 .powergate_uvd = cz_dpm_powergate_uvd, 2259 .powergate_vce = cz_dpm_powergate_vce, 2260}; 2261 2262static void cz_dpm_set_funcs(struct amdgpu_device *adev) 2263{ 2264 if (NULL == adev->pm.funcs) 2265 adev->pm.funcs = &cz_dpm_funcs; 2266}