Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.4 1804 lines 43 kB view raw
1/* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/firmware.h> 24 25#include "pp_debug.h" 26#include "amdgpu.h" 27#include "amdgpu_smu.h" 28#include "soc15_common.h" 29#include "smu_v11_0.h" 30#include "smu_v12_0.h" 31#include "atom.h" 32#include "amd_pcie.h" 33 34#undef __SMU_DUMMY_MAP 35#define __SMU_DUMMY_MAP(type) #type 36static const char* __smu_message_names[] = { 37 SMU_MESSAGE_TYPES 38}; 39 40const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) 41{ 42 if (type < 0 || type >= SMU_MSG_MAX_COUNT) 43 return "unknown smu message"; 44 return __smu_message_names[type]; 45} 46 47#undef __SMU_DUMMY_MAP 48#define __SMU_DUMMY_MAP(fea) #fea 49static const char* __smu_feature_names[] = { 50 SMU_FEATURE_MASKS 51}; 52 53const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) 54{ 55 if (feature < 0 || feature >= SMU_FEATURE_COUNT) 56 return "unknown smu feature"; 57 return __smu_feature_names[feature]; 58} 59 60size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) 61{ 62 size_t size = 0; 63 int ret = 0, i = 0; 64 uint32_t feature_mask[2] = { 0 }; 65 int32_t feature_index = 0; 66 uint32_t count = 0; 67 uint32_t sort_feature[SMU_FEATURE_COUNT]; 68 uint64_t hw_feature_count = 0; 69 70 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 71 if (ret) 72 goto failed; 73 74 size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n", 75 feature_mask[1], feature_mask[0]); 76 77 for (i = 0; i < SMU_FEATURE_COUNT; i++) { 78 feature_index = smu_feature_get_index(smu, i); 79 if (feature_index < 0) 80 continue; 81 sort_feature[feature_index] = i; 82 hw_feature_count++; 83 } 84 85 for (i = 0; i < hw_feature_count; i++) { 86 size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n", 87 count++, 88 smu_get_feature_name(smu, sort_feature[i]), 89 i, 90 !!smu_feature_is_enabled(smu, sort_feature[i]) ? 91 "enabled" : "disabled"); 92 } 93 94failed: 95 return size; 96} 97 98int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) 99{ 100 int ret = 0; 101 uint32_t feature_mask[2] = { 0 }; 102 uint64_t feature_2_enabled = 0; 103 uint64_t feature_2_disabled = 0; 104 uint64_t feature_enables = 0; 105 106 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 107 if (ret) 108 return ret; 109 110 feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); 111 112 feature_2_enabled = ~feature_enables & new_mask; 113 feature_2_disabled = feature_enables & ~new_mask; 114 115 if (feature_2_enabled) { 116 ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); 117 if (ret) 118 return ret; 119 } 120 if (feature_2_disabled) { 121 ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); 122 if (ret) 123 return ret; 124 } 125 126 return ret; 127} 128 129int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) 130{ 131 int ret = 0; 132 133 if (!if_version && !smu_version) 134 return -EINVAL; 135 136 if (if_version) { 137 ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); 138 if (ret) 139 return ret; 140 141 ret = smu_read_smc_arg(smu, if_version); 142 if (ret) 143 return ret; 144 } 145 146 if (smu_version) { 147 ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); 148 if (ret) 149 return ret; 150 151 ret = smu_read_smc_arg(smu, smu_version); 152 if (ret) 153 return ret; 154 } 155 156 return ret; 157} 158 159int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 160 uint32_t min, uint32_t max) 161{ 162 int ret = 0, clk_id = 0; 163 uint32_t param; 164 165 if (min <= 0 && max <= 0) 166 return -EINVAL; 167 168 if (!smu_clk_dpm_is_enabled(smu, clk_type)) 169 return 0; 170 171 clk_id = smu_clk_get_index(smu, clk_type); 172 if (clk_id < 0) 173 return clk_id; 174 175 if (max > 0) { 176 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 177 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 178 param); 179 if (ret) 180 return ret; 181 } 182 183 if (min > 0) { 184 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 185 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 186 param); 187 if (ret) 188 return ret; 189 } 190 191 192 return ret; 193} 194 195int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 196 uint32_t min, uint32_t max) 197{ 198 int ret = 0, clk_id = 0; 199 uint32_t param; 200 201 if (min <= 0 && max <= 0) 202 return -EINVAL; 203 204 if (!smu_clk_dpm_is_enabled(smu, clk_type)) 205 return 0; 206 207 clk_id = smu_clk_get_index(smu, clk_type); 208 if (clk_id < 0) 209 return clk_id; 210 211 if (max > 0) { 212 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 213 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 214 param); 215 if (ret) 216 return ret; 217 } 218 219 if (min > 0) { 220 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 221 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 222 param); 223 if (ret) 224 return ret; 225 } 226 227 228 return ret; 229} 230 231int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 232 uint32_t *min, uint32_t *max) 233{ 234 uint32_t clock_limit; 235 int ret = 0; 236 237 if (!min && !max) 238 return -EINVAL; 239 240 if (!smu_clk_dpm_is_enabled(smu, clk_type)) { 241 switch (clk_type) { 242 case SMU_MCLK: 243 case SMU_UCLK: 244 clock_limit = smu->smu_table.boot_values.uclk; 245 break; 246 case SMU_GFXCLK: 247 case SMU_SCLK: 248 clock_limit = smu->smu_table.boot_values.gfxclk; 249 break; 250 case SMU_SOCCLK: 251 clock_limit = smu->smu_table.boot_values.socclk; 252 break; 253 default: 254 clock_limit = 0; 255 break; 256 } 257 258 /* clock in Mhz unit */ 259 if (min) 260 *min = clock_limit / 100; 261 if (max) 262 *max = clock_limit / 100; 263 264 return 0; 265 } 266 /* 267 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the 268 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). 269 */ 270 ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); 271 return ret; 272} 273 274int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, 275 uint16_t level, uint32_t *value) 276{ 277 int ret = 0, clk_id = 0; 278 uint32_t param; 279 280 if (!value) 281 return -EINVAL; 282 283 if (!smu_clk_dpm_is_enabled(smu, clk_type)) 284 return 0; 285 286 clk_id = smu_clk_get_index(smu, clk_type); 287 if (clk_id < 0) 288 return clk_id; 289 290 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 291 292 ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, 293 param); 294 if (ret) 295 return ret; 296 297 ret = smu_read_smc_arg(smu, &param); 298 if (ret) 299 return ret; 300 301 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM 302 * now, we un-support it */ 303 *value = param & 0x7fffffff; 304 305 return ret; 306} 307 308int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, 309 uint32_t *value) 310{ 311 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 312} 313 314bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) 315{ 316 enum smu_feature_mask feature_id = 0; 317 318 switch (clk_type) { 319 case SMU_MCLK: 320 case SMU_UCLK: 321 feature_id = SMU_FEATURE_DPM_UCLK_BIT; 322 break; 323 case SMU_GFXCLK: 324 case SMU_SCLK: 325 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 326 break; 327 case SMU_SOCCLK: 328 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 329 break; 330 default: 331 return true; 332 } 333 334 if(!smu_feature_is_enabled(smu, feature_id)) { 335 return false; 336 } 337 338 return true; 339} 340 341 342int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, 343 bool gate) 344{ 345 int ret = 0; 346 347 switch (block_type) { 348 case AMD_IP_BLOCK_TYPE_UVD: 349 ret = smu_dpm_set_uvd_enable(smu, gate); 350 break; 351 case AMD_IP_BLOCK_TYPE_VCE: 352 ret = smu_dpm_set_vce_enable(smu, gate); 353 break; 354 case AMD_IP_BLOCK_TYPE_GFX: 355 ret = smu_gfx_off_control(smu, gate); 356 break; 357 case AMD_IP_BLOCK_TYPE_SDMA: 358 ret = smu_powergate_sdma(smu, gate); 359 break; 360 default: 361 break; 362 } 363 364 return ret; 365} 366 367enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) 368{ 369 /* not support power state */ 370 return POWER_STATE_TYPE_DEFAULT; 371} 372 373int smu_get_power_num_states(struct smu_context *smu, 374 struct pp_states_info *state_info) 375{ 376 if (!state_info) 377 return -EINVAL; 378 379 /* not support power state */ 380 memset(state_info, 0, sizeof(struct pp_states_info)); 381 state_info->nums = 1; 382 state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 383 384 return 0; 385} 386 387int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, 388 void *data, uint32_t *size) 389{ 390 struct smu_power_context *smu_power = &smu->smu_power; 391 struct smu_power_gate *power_gate = &smu_power->power_gate; 392 int ret = 0; 393 394 if(!data || !size) 395 return -EINVAL; 396 397 switch (sensor) { 398 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 399 *((uint32_t *)data) = smu->pstate_sclk; 400 *size = 4; 401 break; 402 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 403 *((uint32_t *)data) = smu->pstate_mclk; 404 *size = 4; 405 break; 406 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 407 ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2); 408 *size = 8; 409 break; 410 case AMDGPU_PP_SENSOR_UVD_POWER: 411 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 412 *size = 4; 413 break; 414 case AMDGPU_PP_SENSOR_VCE_POWER: 415 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 416 *size = 4; 417 break; 418 case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 419 *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1; 420 *size = 4; 421 break; 422 default: 423 ret = -EINVAL; 424 break; 425 } 426 427 if (ret) 428 *size = 0; 429 430 return ret; 431} 432 433int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int argument, 434 void *table_data, bool drv2smu) 435{ 436 struct smu_table_context *smu_table = &smu->smu_table; 437 struct amdgpu_device *adev = smu->adev; 438 struct smu_table *table = NULL; 439 int ret = 0; 440 int table_id = smu_table_get_index(smu, table_index); 441 442 if (!table_data || table_id >= smu_table->table_count || table_id < 0) 443 return -EINVAL; 444 445 table = &smu_table->tables[table_index]; 446 447 if (drv2smu) 448 memcpy(table->cpu_addr, table_data, table->size); 449 450 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, 451 upper_32_bits(table->mc_address)); 452 if (ret) 453 return ret; 454 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, 455 lower_32_bits(table->mc_address)); 456 if (ret) 457 return ret; 458 ret = smu_send_smc_msg_with_param(smu, drv2smu ? 459 SMU_MSG_TransferTableDram2Smu : 460 SMU_MSG_TransferTableSmu2Dram, 461 table_id | ((argument & 0xFFFF) << 16)); 462 if (ret) 463 return ret; 464 465 /* flush hdp cache */ 466 adev->nbio_funcs->hdp_flush(adev, NULL); 467 468 if (!drv2smu) 469 memcpy(table_data, table->cpu_addr, table->size); 470 471 return ret; 472} 473 474bool is_support_sw_smu(struct amdgpu_device *adev) 475{ 476 if (adev->asic_type == CHIP_VEGA20) 477 return (amdgpu_dpm == 2) ? true : false; 478 else if (adev->asic_type >= CHIP_ARCTURUS) 479 return true; 480 else 481 return false; 482} 483 484bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) 485{ 486 if (amdgpu_dpm != 1) 487 return false; 488 489 if (adev->asic_type == CHIP_VEGA20) 490 return true; 491 492 return false; 493} 494 495int smu_sys_get_pp_table(struct smu_context *smu, void **table) 496{ 497 struct smu_table_context *smu_table = &smu->smu_table; 498 499 if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 500 return -EINVAL; 501 502 if (smu_table->hardcode_pptable) 503 *table = smu_table->hardcode_pptable; 504 else 505 *table = smu_table->power_play_table; 506 507 return smu_table->power_play_table_size; 508} 509 510int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) 511{ 512 struct smu_table_context *smu_table = &smu->smu_table; 513 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 514 int ret = 0; 515 516 if (!smu->pm_enabled) 517 return -EINVAL; 518 if (header->usStructureSize != size) { 519 pr_err("pp table size not matched !\n"); 520 return -EIO; 521 } 522 523 mutex_lock(&smu->mutex); 524 if (!smu_table->hardcode_pptable) 525 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 526 if (!smu_table->hardcode_pptable) { 527 ret = -ENOMEM; 528 goto failed; 529 } 530 531 memcpy(smu_table->hardcode_pptable, buf, size); 532 smu_table->power_play_table = smu_table->hardcode_pptable; 533 smu_table->power_play_table_size = size; 534 mutex_unlock(&smu->mutex); 535 536 ret = smu_reset(smu); 537 if (ret) 538 pr_info("smu reset failed, ret = %d\n", ret); 539 540 return ret; 541 542failed: 543 mutex_unlock(&smu->mutex); 544 return ret; 545} 546 547int smu_feature_init_dpm(struct smu_context *smu) 548{ 549 struct smu_feature *feature = &smu->smu_feature; 550 int ret = 0; 551 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 552 553 if (!smu->pm_enabled) 554 return ret; 555 mutex_lock(&feature->mutex); 556 bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 557 mutex_unlock(&feature->mutex); 558 559 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 560 SMU_FEATURE_MAX/32); 561 if (ret) 562 return ret; 563 564 mutex_lock(&feature->mutex); 565 bitmap_or(feature->allowed, feature->allowed, 566 (unsigned long *)allowed_feature_mask, 567 feature->feature_num); 568 mutex_unlock(&feature->mutex); 569 570 return ret; 571} 572int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled) 573{ 574 uint32_t feature_low = 0, feature_high = 0; 575 int ret = 0; 576 577 if (!smu->pm_enabled) 578 return ret; 579 580 feature_low = (feature_mask >> 0 ) & 0xffffffff; 581 feature_high = (feature_mask >> 32) & 0xffffffff; 582 583 if (enabled) { 584 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, 585 feature_low); 586 if (ret) 587 return ret; 588 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, 589 feature_high); 590 if (ret) 591 return ret; 592 593 } else { 594 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, 595 feature_low); 596 if (ret) 597 return ret; 598 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, 599 feature_high); 600 if (ret) 601 return ret; 602 603 } 604 605 return ret; 606} 607 608int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) 609{ 610 struct amdgpu_device *adev = smu->adev; 611 struct smu_feature *feature = &smu->smu_feature; 612 int feature_id; 613 int ret = 0; 614 615 if (adev->flags & AMD_IS_APU) 616 return 1; 617 618 feature_id = smu_feature_get_index(smu, mask); 619 if (feature_id < 0) 620 return 0; 621 622 WARN_ON(feature_id > feature->feature_num); 623 624 mutex_lock(&feature->mutex); 625 ret = test_bit(feature_id, feature->enabled); 626 mutex_unlock(&feature->mutex); 627 628 return ret; 629} 630 631int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, 632 bool enable) 633{ 634 struct smu_feature *feature = &smu->smu_feature; 635 int feature_id; 636 uint64_t feature_mask = 0; 637 int ret = 0; 638 639 feature_id = smu_feature_get_index(smu, mask); 640 if (feature_id < 0) 641 return -EINVAL; 642 643 WARN_ON(feature_id > feature->feature_num); 644 645 feature_mask = 1ULL << feature_id; 646 647 mutex_lock(&feature->mutex); 648 ret = smu_feature_update_enable_state(smu, feature_mask, enable); 649 if (ret) 650 goto failed; 651 652 if (enable) 653 test_and_set_bit(feature_id, feature->enabled); 654 else 655 test_and_clear_bit(feature_id, feature->enabled); 656 657failed: 658 mutex_unlock(&feature->mutex); 659 660 return ret; 661} 662 663int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) 664{ 665 struct smu_feature *feature = &smu->smu_feature; 666 int feature_id; 667 int ret = 0; 668 669 feature_id = smu_feature_get_index(smu, mask); 670 if (feature_id < 0) 671 return 0; 672 673 WARN_ON(feature_id > feature->feature_num); 674 675 mutex_lock(&feature->mutex); 676 ret = test_bit(feature_id, feature->supported); 677 mutex_unlock(&feature->mutex); 678 679 return ret; 680} 681 682int smu_feature_set_supported(struct smu_context *smu, 683 enum smu_feature_mask mask, 684 bool enable) 685{ 686 struct smu_feature *feature = &smu->smu_feature; 687 int feature_id; 688 int ret = 0; 689 690 feature_id = smu_feature_get_index(smu, mask); 691 if (feature_id < 0) 692 return -EINVAL; 693 694 WARN_ON(feature_id > feature->feature_num); 695 696 mutex_lock(&feature->mutex); 697 if (enable) 698 test_and_set_bit(feature_id, feature->supported); 699 else 700 test_and_clear_bit(feature_id, feature->supported); 701 mutex_unlock(&feature->mutex); 702 703 return ret; 704} 705 706static int smu_set_funcs(struct amdgpu_device *adev) 707{ 708 struct smu_context *smu = &adev->smu; 709 710 switch (adev->asic_type) { 711 case CHIP_VEGA20: 712 case CHIP_NAVI10: 713 case CHIP_NAVI14: 714 case CHIP_NAVI12: 715 case CHIP_ARCTURUS: 716 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 717 smu->od_enabled = true; 718 smu_v11_0_set_smu_funcs(smu); 719 break; 720 case CHIP_RENOIR: 721 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 722 smu->od_enabled = true; 723 smu_v12_0_set_smu_funcs(smu); 724 break; 725 default: 726 return -EINVAL; 727 } 728 729 return 0; 730} 731 732static int smu_early_init(void *handle) 733{ 734 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 735 struct smu_context *smu = &adev->smu; 736 737 smu->adev = adev; 738 smu->pm_enabled = !!amdgpu_dpm; 739 mutex_init(&smu->mutex); 740 741 return smu_set_funcs(adev); 742} 743 744static int smu_late_init(void *handle) 745{ 746 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 747 struct smu_context *smu = &adev->smu; 748 749 if (!smu->pm_enabled) 750 return 0; 751 752 mutex_lock(&smu->mutex); 753 smu_handle_task(&adev->smu, 754 smu->smu_dpm.dpm_level, 755 AMD_PP_TASK_COMPLETE_INIT); 756 mutex_unlock(&smu->mutex); 757 758 return 0; 759} 760 761int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, 762 uint16_t *size, uint8_t *frev, uint8_t *crev, 763 uint8_t **addr) 764{ 765 struct amdgpu_device *adev = smu->adev; 766 uint16_t data_start; 767 768 if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table, 769 size, frev, crev, &data_start)) 770 return -EINVAL; 771 772 *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start; 773 774 return 0; 775} 776 777static int smu_initialize_pptable(struct smu_context *smu) 778{ 779 /* TODO */ 780 return 0; 781} 782 783static int smu_smc_table_sw_init(struct smu_context *smu) 784{ 785 int ret; 786 787 ret = smu_initialize_pptable(smu); 788 if (ret) { 789 pr_err("Failed to init smu_initialize_pptable!\n"); 790 return ret; 791 } 792 793 /** 794 * Create smu_table structure, and init smc tables such as 795 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 796 */ 797 ret = smu_init_smc_tables(smu); 798 if (ret) { 799 pr_err("Failed to init smc tables!\n"); 800 return ret; 801 } 802 803 /** 804 * Create smu_power_context structure, and allocate smu_dpm_context and 805 * context size to fill the smu_power_context data. 806 */ 807 ret = smu_init_power(smu); 808 if (ret) { 809 pr_err("Failed to init smu_init_power!\n"); 810 return ret; 811 } 812 813 return 0; 814} 815 816static int smu_smc_table_sw_fini(struct smu_context *smu) 817{ 818 int ret; 819 820 ret = smu_fini_smc_tables(smu); 821 if (ret) { 822 pr_err("Failed to smu_fini_smc_tables!\n"); 823 return ret; 824 } 825 826 return 0; 827} 828 829static int smu_sw_init(void *handle) 830{ 831 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 832 struct smu_context *smu = &adev->smu; 833 int ret; 834 835 smu->pool_size = adev->pm.smu_prv_buffer_size; 836 smu->smu_feature.feature_num = SMU_FEATURE_MAX; 837 mutex_init(&smu->smu_feature.mutex); 838 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 839 bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); 840 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 841 842 mutex_init(&smu->smu_baco.mutex); 843 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 844 smu->smu_baco.platform_support = false; 845 846 mutex_init(&smu->sensor_lock); 847 848 smu->watermarks_bitmap = 0; 849 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 850 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 851 852 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 853 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 854 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 855 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 856 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 857 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 858 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 859 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 860 861 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 862 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 863 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 864 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 865 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 866 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 867 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 868 smu->display_config = &adev->pm.pm_display_cfg; 869 870 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 871 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 872 ret = smu_init_microcode(smu); 873 if (ret) { 874 pr_err("Failed to load smu firmware!\n"); 875 return ret; 876 } 877 878 ret = smu_smc_table_sw_init(smu); 879 if (ret) { 880 pr_err("Failed to sw init smc table!\n"); 881 return ret; 882 } 883 884 ret = smu_register_irq_handler(smu); 885 if (ret) { 886 pr_err("Failed to register smc irq handler!\n"); 887 return ret; 888 } 889 890 return 0; 891} 892 893static int smu_sw_fini(void *handle) 894{ 895 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 896 struct smu_context *smu = &adev->smu; 897 int ret; 898 899 kfree(smu->irq_source); 900 smu->irq_source = NULL; 901 902 ret = smu_smc_table_sw_fini(smu); 903 if (ret) { 904 pr_err("Failed to sw fini smc table!\n"); 905 return ret; 906 } 907 908 ret = smu_fini_power(smu); 909 if (ret) { 910 pr_err("Failed to init smu_fini_power!\n"); 911 return ret; 912 } 913 914 return 0; 915} 916 917static int smu_init_fb_allocations(struct smu_context *smu) 918{ 919 struct amdgpu_device *adev = smu->adev; 920 struct smu_table_context *smu_table = &smu->smu_table; 921 struct smu_table *tables = smu_table->tables; 922 uint32_t table_count = smu_table->table_count; 923 uint32_t i = 0; 924 int32_t ret = 0; 925 926 if (table_count <= 0) 927 return -EINVAL; 928 929 for (i = 0 ; i < table_count; i++) { 930 if (tables[i].size == 0) 931 continue; 932 ret = amdgpu_bo_create_kernel(adev, 933 tables[i].size, 934 tables[i].align, 935 tables[i].domain, 936 &tables[i].bo, 937 &tables[i].mc_address, 938 &tables[i].cpu_addr); 939 if (ret) 940 goto failed; 941 } 942 943 return 0; 944failed: 945 for (; i > 0; i--) { 946 if (tables[i].size == 0) 947 continue; 948 amdgpu_bo_free_kernel(&tables[i].bo, 949 &tables[i].mc_address, 950 &tables[i].cpu_addr); 951 952 } 953 return ret; 954} 955 956static int smu_fini_fb_allocations(struct smu_context *smu) 957{ 958 struct smu_table_context *smu_table = &smu->smu_table; 959 struct smu_table *tables = smu_table->tables; 960 uint32_t table_count = smu_table->table_count; 961 uint32_t i = 0; 962 963 if (table_count == 0 || tables == NULL) 964 return 0; 965 966 for (i = 0 ; i < table_count; i++) { 967 if (tables[i].size == 0) 968 continue; 969 amdgpu_bo_free_kernel(&tables[i].bo, 970 &tables[i].mc_address, 971 &tables[i].cpu_addr); 972 } 973 974 return 0; 975} 976 977static int smu_override_pcie_parameters(struct smu_context *smu) 978{ 979 struct amdgpu_device *adev = smu->adev; 980 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; 981 int ret; 982 983 if (adev->flags & AMD_IS_APU) 984 return 0; 985 986 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 987 pcie_gen = 3; 988 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 989 pcie_gen = 2; 990 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 991 pcie_gen = 1; 992 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 993 pcie_gen = 0; 994 995 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 996 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 997 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 998 */ 999 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1000 pcie_width = 6; 1001 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1002 pcie_width = 5; 1003 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1004 pcie_width = 4; 1005 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1006 pcie_width = 3; 1007 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1008 pcie_width = 2; 1009 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1010 pcie_width = 1; 1011 1012 smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; 1013 ret = smu_send_smc_msg_with_param(smu, 1014 SMU_MSG_OverridePcieParameters, 1015 smu_pcie_arg); 1016 if (ret) 1017 pr_err("[%s] Attempt to override pcie params failed!\n", __func__); 1018 return ret; 1019} 1020 1021static int smu_smc_table_hw_init(struct smu_context *smu, 1022 bool initialize) 1023{ 1024 struct amdgpu_device *adev = smu->adev; 1025 int ret; 1026 1027 if (smu_is_dpm_running(smu) && adev->in_suspend) { 1028 pr_info("dpm has been enabled\n"); 1029 return 0; 1030 } 1031 1032 if (adev->asic_type != CHIP_ARCTURUS) { 1033 ret = smu_init_display_count(smu, 0); 1034 if (ret) 1035 return ret; 1036 } 1037 1038 if (initialize) { 1039 /* get boot_values from vbios to set revision, gfxclk, and etc. */ 1040 ret = smu_get_vbios_bootup_values(smu); 1041 if (ret) 1042 return ret; 1043 1044 ret = smu_setup_pptable(smu); 1045 if (ret) 1046 return ret; 1047 1048 ret = smu_get_clk_info_from_vbios(smu); 1049 if (ret) 1050 return ret; 1051 1052 /* 1053 * check if the format_revision in vbios is up to pptable header 1054 * version, and the structure size is not 0. 1055 */ 1056 ret = smu_check_pptable(smu); 1057 if (ret) 1058 return ret; 1059 1060 /* 1061 * allocate vram bos to store smc table contents. 1062 */ 1063 ret = smu_init_fb_allocations(smu); 1064 if (ret) 1065 return ret; 1066 1067 /* 1068 * Parse pptable format and fill PPTable_t smc_pptable to 1069 * smu_table_context structure. And read the smc_dpm_table from vbios, 1070 * then fill it into smc_pptable. 1071 */ 1072 ret = smu_parse_pptable(smu); 1073 if (ret) 1074 return ret; 1075 1076 /* 1077 * Send msg GetDriverIfVersion to check if the return value is equal 1078 * with DRIVER_IF_VERSION of smc header. 1079 */ 1080 ret = smu_check_fw_version(smu); 1081 if (ret) 1082 return ret; 1083 } 1084 1085 /* smu_dump_pptable(smu); */ 1086 1087 /* 1088 * Copy pptable bo in the vram to smc with SMU MSGs such as 1089 * SetDriverDramAddr and TransferTableDram2Smu. 1090 */ 1091 ret = smu_write_pptable(smu); 1092 if (ret) 1093 return ret; 1094 1095 /* issue RunAfllBtc msg */ 1096 ret = smu_run_afll_btc(smu); 1097 if (ret) 1098 return ret; 1099 1100 ret = smu_feature_set_allowed_mask(smu); 1101 if (ret) 1102 return ret; 1103 1104 ret = smu_system_features_control(smu, true); 1105 if (ret) 1106 return ret; 1107 1108 if (adev->asic_type != CHIP_ARCTURUS) { 1109 ret = smu_override_pcie_parameters(smu); 1110 if (ret) 1111 return ret; 1112 1113 ret = smu_notify_display_change(smu); 1114 if (ret) 1115 return ret; 1116 1117 /* 1118 * Set min deep sleep dce fclk with bootup value from vbios via 1119 * SetMinDeepSleepDcefclk MSG. 1120 */ 1121 ret = smu_set_min_dcef_deep_sleep(smu); 1122 if (ret) 1123 return ret; 1124 } 1125 1126 /* 1127 * Set initialized values (get from vbios) to dpm tables context such as 1128 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 1129 * type of clks. 1130 */ 1131 if (initialize) { 1132 ret = smu_populate_smc_tables(smu); 1133 if (ret) 1134 return ret; 1135 1136 ret = smu_init_max_sustainable_clocks(smu); 1137 if (ret) 1138 return ret; 1139 } 1140 1141 ret = smu_set_default_od_settings(smu, initialize); 1142 if (ret) 1143 return ret; 1144 1145 if (initialize) { 1146 ret = smu_populate_umd_state_clk(smu); 1147 if (ret) 1148 return ret; 1149 1150 ret = smu_get_power_limit(smu, &smu->default_power_limit, true); 1151 if (ret) 1152 return ret; 1153 } 1154 1155 /* 1156 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1157 */ 1158 ret = smu_set_tool_table_location(smu); 1159 1160 if (!smu_is_dpm_running(smu)) 1161 pr_info("dpm has been disabled\n"); 1162 1163 return ret; 1164} 1165 1166/** 1167 * smu_alloc_memory_pool - allocate memory pool in the system memory 1168 * 1169 * @smu: amdgpu_device pointer 1170 * 1171 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 1172 * and DramLogSetDramAddr can notify it changed. 1173 * 1174 * Returns 0 on success, error on failure. 1175 */ 1176static int smu_alloc_memory_pool(struct smu_context *smu) 1177{ 1178 struct amdgpu_device *adev = smu->adev; 1179 struct smu_table_context *smu_table = &smu->smu_table; 1180 struct smu_table *memory_pool = &smu_table->memory_pool; 1181 uint64_t pool_size = smu->pool_size; 1182 int ret = 0; 1183 1184 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 1185 return ret; 1186 1187 memory_pool->size = pool_size; 1188 memory_pool->align = PAGE_SIZE; 1189 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 1190 1191 switch (pool_size) { 1192 case SMU_MEMORY_POOL_SIZE_256_MB: 1193 case SMU_MEMORY_POOL_SIZE_512_MB: 1194 case SMU_MEMORY_POOL_SIZE_1_GB: 1195 case SMU_MEMORY_POOL_SIZE_2_GB: 1196 ret = amdgpu_bo_create_kernel(adev, 1197 memory_pool->size, 1198 memory_pool->align, 1199 memory_pool->domain, 1200 &memory_pool->bo, 1201 &memory_pool->mc_address, 1202 &memory_pool->cpu_addr); 1203 break; 1204 default: 1205 break; 1206 } 1207 1208 return ret; 1209} 1210 1211static int smu_free_memory_pool(struct smu_context *smu) 1212{ 1213 struct smu_table_context *smu_table = &smu->smu_table; 1214 struct smu_table *memory_pool = &smu_table->memory_pool; 1215 int ret = 0; 1216 1217 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 1218 return ret; 1219 1220 amdgpu_bo_free_kernel(&memory_pool->bo, 1221 &memory_pool->mc_address, 1222 &memory_pool->cpu_addr); 1223 1224 memset(memory_pool, 0, sizeof(struct smu_table)); 1225 1226 return ret; 1227} 1228 1229static int smu_hw_init(void *handle) 1230{ 1231 int ret; 1232 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1233 struct smu_context *smu = &adev->smu; 1234 1235 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1236 if (adev->asic_type < CHIP_NAVI10) { 1237 ret = smu_load_microcode(smu); 1238 if (ret) 1239 return ret; 1240 } 1241 } 1242 1243 ret = smu_check_fw_status(smu); 1244 if (ret) { 1245 pr_err("SMC firmware status is not correct\n"); 1246 return ret; 1247 } 1248 1249 if (adev->flags & AMD_IS_APU) { 1250 smu_powergate_sdma(&adev->smu, false); 1251 smu_powergate_vcn(&adev->smu, false); 1252 } 1253 1254 if (!smu->pm_enabled) 1255 return 0; 1256 1257 ret = smu_feature_init_dpm(smu); 1258 if (ret) 1259 goto failed; 1260 1261 ret = smu_smc_table_hw_init(smu, true); 1262 if (ret) 1263 goto failed; 1264 1265 ret = smu_alloc_memory_pool(smu); 1266 if (ret) 1267 goto failed; 1268 1269 /* 1270 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1271 * pool location. 1272 */ 1273 ret = smu_notify_memory_pool_location(smu); 1274 if (ret) 1275 goto failed; 1276 1277 ret = smu_start_thermal_control(smu); 1278 if (ret) 1279 goto failed; 1280 1281 if (!smu->pm_enabled) 1282 adev->pm.dpm_enabled = false; 1283 else 1284 adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */ 1285 1286 pr_info("SMU is initialized successfully!\n"); 1287 1288 return 0; 1289 1290failed: 1291 return ret; 1292} 1293 1294static int smu_hw_fini(void *handle) 1295{ 1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1297 struct smu_context *smu = &adev->smu; 1298 struct smu_table_context *table_context = &smu->smu_table; 1299 int ret = 0; 1300 1301 if (adev->flags & AMD_IS_APU) { 1302 smu_powergate_sdma(&adev->smu, true); 1303 smu_powergate_vcn(&adev->smu, true); 1304 } 1305 1306 kfree(table_context->driver_pptable); 1307 table_context->driver_pptable = NULL; 1308 1309 kfree(table_context->max_sustainable_clocks); 1310 table_context->max_sustainable_clocks = NULL; 1311 1312 kfree(table_context->overdrive_table); 1313 table_context->overdrive_table = NULL; 1314 1315 ret = smu_fini_fb_allocations(smu); 1316 if (ret) 1317 return ret; 1318 1319 ret = smu_free_memory_pool(smu); 1320 if (ret) 1321 return ret; 1322 1323 return 0; 1324} 1325 1326int smu_reset(struct smu_context *smu) 1327{ 1328 struct amdgpu_device *adev = smu->adev; 1329 int ret = 0; 1330 1331 ret = smu_hw_fini(adev); 1332 if (ret) 1333 return ret; 1334 1335 ret = smu_hw_init(adev); 1336 if (ret) 1337 return ret; 1338 1339 return ret; 1340} 1341 1342static int smu_suspend(void *handle) 1343{ 1344 int ret; 1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1346 struct smu_context *smu = &adev->smu; 1347 bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); 1348 1349 ret = smu_system_features_control(smu, false); 1350 if (ret) 1351 return ret; 1352 1353 if (adev->in_gpu_reset && baco_feature_is_enabled) { 1354 ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true); 1355 if (ret) { 1356 pr_warn("set BACO feature enabled failed, return %d\n", ret); 1357 return ret; 1358 } 1359 } 1360 1361 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1362 1363 if (adev->asic_type >= CHIP_NAVI10 && 1364 adev->gfx.rlc.funcs->stop) 1365 adev->gfx.rlc.funcs->stop(adev); 1366 1367 return 0; 1368} 1369 1370static int smu_resume(void *handle) 1371{ 1372 int ret; 1373 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1374 struct smu_context *smu = &adev->smu; 1375 1376 pr_info("SMU is resuming...\n"); 1377 1378 mutex_lock(&smu->mutex); 1379 1380 ret = smu_smc_table_hw_init(smu, false); 1381 if (ret) 1382 goto failed; 1383 1384 ret = smu_start_thermal_control(smu); 1385 if (ret) 1386 goto failed; 1387 1388 mutex_unlock(&smu->mutex); 1389 1390 pr_info("SMU is resumed successfully!\n"); 1391 1392 return 0; 1393failed: 1394 mutex_unlock(&smu->mutex); 1395 return ret; 1396} 1397 1398int smu_display_configuration_change(struct smu_context *smu, 1399 const struct amd_pp_display_configuration *display_config) 1400{ 1401 int index = 0; 1402 int num_of_active_display = 0; 1403 1404 if (!smu->pm_enabled || !is_support_sw_smu(smu->adev)) 1405 return -EINVAL; 1406 1407 if (!display_config) 1408 return -EINVAL; 1409 1410 mutex_lock(&smu->mutex); 1411 1412 smu_set_deep_sleep_dcefclk(smu, 1413 display_config->min_dcef_deep_sleep_set_clk / 100); 1414 1415 for (index = 0; index < display_config->num_path_including_non_display; index++) { 1416 if (display_config->displays[index].controller_id != 0) 1417 num_of_active_display++; 1418 } 1419 1420 smu_set_active_display_count(smu, num_of_active_display); 1421 1422 smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time, 1423 display_config->cpu_cc6_disable, 1424 display_config->cpu_pstate_disable, 1425 display_config->nb_pstate_switch_disable); 1426 1427 mutex_unlock(&smu->mutex); 1428 1429 return 0; 1430} 1431 1432static int smu_get_clock_info(struct smu_context *smu, 1433 struct smu_clock_info *clk_info, 1434 enum smu_perf_level_designation designation) 1435{ 1436 int ret; 1437 struct smu_performance_level level = {0}; 1438 1439 if (!clk_info) 1440 return -EINVAL; 1441 1442 ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level); 1443 if (ret) 1444 return -EINVAL; 1445 1446 clk_info->min_mem_clk = level.memory_clock; 1447 clk_info->min_eng_clk = level.core_clock; 1448 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; 1449 1450 ret = smu_get_perf_level(smu, designation, &level); 1451 if (ret) 1452 return -EINVAL; 1453 1454 clk_info->min_mem_clk = level.memory_clock; 1455 clk_info->min_eng_clk = level.core_clock; 1456 clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width; 1457 1458 return 0; 1459} 1460 1461int smu_get_current_clocks(struct smu_context *smu, 1462 struct amd_pp_clock_info *clocks) 1463{ 1464 struct amd_pp_simple_clock_info simple_clocks = {0}; 1465 struct smu_clock_info hw_clocks; 1466 int ret = 0; 1467 1468 if (!is_support_sw_smu(smu->adev)) 1469 return -EINVAL; 1470 1471 mutex_lock(&smu->mutex); 1472 1473 smu_get_dal_power_level(smu, &simple_clocks); 1474 1475 if (smu->support_power_containment) 1476 ret = smu_get_clock_info(smu, &hw_clocks, 1477 PERF_LEVEL_POWER_CONTAINMENT); 1478 else 1479 ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY); 1480 1481 if (ret) { 1482 pr_err("Error in smu_get_clock_info\n"); 1483 goto failed; 1484 } 1485 1486 clocks->min_engine_clock = hw_clocks.min_eng_clk; 1487 clocks->max_engine_clock = hw_clocks.max_eng_clk; 1488 clocks->min_memory_clock = hw_clocks.min_mem_clk; 1489 clocks->max_memory_clock = hw_clocks.max_mem_clk; 1490 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth; 1491 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth; 1492 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1493 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1494 1495 if (simple_clocks.level == 0) 1496 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7; 1497 else 1498 clocks->max_clocks_state = simple_clocks.level; 1499 1500 if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) { 1501 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; 1502 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; 1503 } 1504 1505failed: 1506 mutex_unlock(&smu->mutex); 1507 return ret; 1508} 1509 1510static int smu_set_clockgating_state(void *handle, 1511 enum amd_clockgating_state state) 1512{ 1513 return 0; 1514} 1515 1516static int smu_set_powergating_state(void *handle, 1517 enum amd_powergating_state state) 1518{ 1519 return 0; 1520} 1521 1522static int smu_enable_umd_pstate(void *handle, 1523 enum amd_dpm_forced_level *level) 1524{ 1525 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1526 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1527 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1528 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1529 1530 struct smu_context *smu = (struct smu_context*)(handle); 1531 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1532 if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context) 1533 return -EINVAL; 1534 1535 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1536 /* enter umd pstate, save current level, disable gfx cg*/ 1537 if (*level & profile_mode_mask) { 1538 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 1539 smu_dpm_ctx->enable_umd_pstate = true; 1540 amdgpu_device_ip_set_clockgating_state(smu->adev, 1541 AMD_IP_BLOCK_TYPE_GFX, 1542 AMD_CG_STATE_UNGATE); 1543 amdgpu_device_ip_set_powergating_state(smu->adev, 1544 AMD_IP_BLOCK_TYPE_GFX, 1545 AMD_PG_STATE_UNGATE); 1546 } 1547 } else { 1548 /* exit umd pstate, restore level, enable gfx cg*/ 1549 if (!(*level & profile_mode_mask)) { 1550 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1551 *level = smu_dpm_ctx->saved_dpm_level; 1552 smu_dpm_ctx->enable_umd_pstate = false; 1553 amdgpu_device_ip_set_clockgating_state(smu->adev, 1554 AMD_IP_BLOCK_TYPE_GFX, 1555 AMD_CG_STATE_GATE); 1556 amdgpu_device_ip_set_powergating_state(smu->adev, 1557 AMD_IP_BLOCK_TYPE_GFX, 1558 AMD_PG_STATE_GATE); 1559 } 1560 } 1561 1562 return 0; 1563} 1564 1565static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1566{ 1567 int ret = 0; 1568 uint32_t sclk_mask, mclk_mask, soc_mask; 1569 1570 switch (level) { 1571 case AMD_DPM_FORCED_LEVEL_HIGH: 1572 ret = smu_force_dpm_limit_value(smu, true); 1573 break; 1574 case AMD_DPM_FORCED_LEVEL_LOW: 1575 ret = smu_force_dpm_limit_value(smu, false); 1576 break; 1577 case AMD_DPM_FORCED_LEVEL_AUTO: 1578 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1579 ret = smu_unforce_dpm_levels(smu); 1580 break; 1581 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1582 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1583 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1584 ret = smu_get_profiling_clk_mask(smu, level, 1585 &sclk_mask, 1586 &mclk_mask, 1587 &soc_mask); 1588 if (ret) 1589 return ret; 1590 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); 1591 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); 1592 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1593 break; 1594 case AMD_DPM_FORCED_LEVEL_MANUAL: 1595 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1596 default: 1597 break; 1598 } 1599 return ret; 1600} 1601 1602int smu_adjust_power_state_dynamic(struct smu_context *smu, 1603 enum amd_dpm_forced_level level, 1604 bool skip_display_settings) 1605{ 1606 int ret = 0; 1607 int index = 0; 1608 long workload; 1609 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1610 1611 if (!smu->pm_enabled) 1612 return -EINVAL; 1613 1614 if (!skip_display_settings) { 1615 ret = smu_display_config_changed(smu); 1616 if (ret) { 1617 pr_err("Failed to change display config!"); 1618 return ret; 1619 } 1620 } 1621 1622 ret = smu_apply_clocks_adjust_rules(smu); 1623 if (ret) { 1624 pr_err("Failed to apply clocks adjust rules!"); 1625 return ret; 1626 } 1627 1628 if (!skip_display_settings) { 1629 ret = smu_notify_smc_dispaly_config(smu); 1630 if (ret) { 1631 pr_err("Failed to notify smc display config!"); 1632 return ret; 1633 } 1634 } 1635 1636 if (smu_dpm_ctx->dpm_level != level) { 1637 ret = smu_asic_set_performance_level(smu, level); 1638 if (ret) { 1639 ret = smu_default_set_performance_level(smu, level); 1640 if (ret) { 1641 pr_err("Failed to set performance level!"); 1642 return ret; 1643 } 1644 } 1645 1646 /* update the saved copy */ 1647 smu_dpm_ctx->dpm_level = level; 1648 } 1649 1650 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 1651 index = fls(smu->workload_mask); 1652 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1653 workload = smu->workload_setting[index]; 1654 1655 if (smu->power_profile_mode != workload) 1656 smu_set_power_profile_mode(smu, &workload, 0); 1657 } 1658 1659 return ret; 1660} 1661 1662int smu_handle_task(struct smu_context *smu, 1663 enum amd_dpm_forced_level level, 1664 enum amd_pp_task task_id) 1665{ 1666 int ret = 0; 1667 1668 switch (task_id) { 1669 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1670 ret = smu_pre_display_config_changed(smu); 1671 if (ret) 1672 return ret; 1673 ret = smu_set_cpu_power_state(smu); 1674 if (ret) 1675 return ret; 1676 ret = smu_adjust_power_state_dynamic(smu, level, false); 1677 break; 1678 case AMD_PP_TASK_COMPLETE_INIT: 1679 case AMD_PP_TASK_READJUST_POWER_STATE: 1680 ret = smu_adjust_power_state_dynamic(smu, level, true); 1681 break; 1682 default: 1683 break; 1684 } 1685 1686 return ret; 1687} 1688 1689int smu_switch_power_profile(struct smu_context *smu, 1690 enum PP_SMC_POWER_PROFILE type, 1691 bool en) 1692{ 1693 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1694 long workload; 1695 uint32_t index; 1696 1697 if (!smu->pm_enabled) 1698 return -EINVAL; 1699 1700 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1701 return -EINVAL; 1702 1703 mutex_lock(&smu->mutex); 1704 1705 if (!en) { 1706 smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1707 index = fls(smu->workload_mask); 1708 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1709 workload = smu->workload_setting[index]; 1710 } else { 1711 smu->workload_mask |= (1 << smu->workload_prority[type]); 1712 index = fls(smu->workload_mask); 1713 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1714 workload = smu->workload_setting[index]; 1715 } 1716 1717 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1718 smu_set_power_profile_mode(smu, &workload, 0); 1719 1720 mutex_unlock(&smu->mutex); 1721 1722 return 0; 1723} 1724 1725enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) 1726{ 1727 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1728 enum amd_dpm_forced_level level; 1729 1730 if (!smu_dpm_ctx->dpm_context) 1731 return -EINVAL; 1732 1733 mutex_lock(&(smu->mutex)); 1734 level = smu_dpm_ctx->dpm_level; 1735 mutex_unlock(&(smu->mutex)); 1736 1737 return level; 1738} 1739 1740int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1741{ 1742 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1743 int ret = 0; 1744 1745 if (!smu_dpm_ctx->dpm_context) 1746 return -EINVAL; 1747 1748 ret = smu_enable_umd_pstate(smu, &level); 1749 if (ret) 1750 return ret; 1751 1752 ret = smu_handle_task(smu, level, 1753 AMD_PP_TASK_READJUST_POWER_STATE); 1754 1755 return ret; 1756} 1757 1758int smu_set_display_count(struct smu_context *smu, uint32_t count) 1759{ 1760 int ret = 0; 1761 1762 mutex_lock(&smu->mutex); 1763 ret = smu_init_display_count(smu, count); 1764 mutex_unlock(&smu->mutex); 1765 1766 return ret; 1767} 1768 1769const struct amd_ip_funcs smu_ip_funcs = { 1770 .name = "smu", 1771 .early_init = smu_early_init, 1772 .late_init = smu_late_init, 1773 .sw_init = smu_sw_init, 1774 .sw_fini = smu_sw_fini, 1775 .hw_init = smu_hw_init, 1776 .hw_fini = smu_hw_fini, 1777 .suspend = smu_suspend, 1778 .resume = smu_resume, 1779 .is_idle = NULL, 1780 .check_soft_reset = NULL, 1781 .wait_for_idle = NULL, 1782 .soft_reset = NULL, 1783 .set_clockgating_state = smu_set_clockgating_state, 1784 .set_powergating_state = smu_set_powergating_state, 1785 .enable_umd_pstate = smu_enable_umd_pstate, 1786}; 1787 1788const struct amdgpu_ip_block_version smu_v11_0_ip_block = 1789{ 1790 .type = AMD_IP_BLOCK_TYPE_SMC, 1791 .major = 11, 1792 .minor = 0, 1793 .rev = 0, 1794 .funcs = &smu_ip_funcs, 1795}; 1796 1797const struct amdgpu_ip_block_version smu_v12_0_ip_block = 1798{ 1799 .type = AMD_IP_BLOCK_TYPE_SMC, 1800 .major = 12, 1801 .minor = 0, 1802 .rev = 0, 1803 .funcs = &smu_ip_funcs, 1804};