at v5.4-rc2 3188 lines 97 kB view raw
1/* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 26#include <drm/drm_debugfs.h> 27 28#include "amdgpu.h" 29#include "amdgpu_drv.h" 30#include "amdgpu_pm.h" 31#include "amdgpu_dpm.h" 32#include "amdgpu_display.h" 33#include "amdgpu_smu.h" 34#include "atom.h" 35#include <linux/power_supply.h> 36#include <linux/pci.h> 37#include <linux/hwmon.h> 38#include <linux/hwmon-sysfs.h> 39#include <linux/nospec.h> 40#include "hwmgr.h" 41#define WIDTH_4K 3840 42 43static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 44 45static const struct cg_flag_name clocks[] = { 46 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 47 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 48 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 49 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 50 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 51 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 52 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 53 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 54 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 55 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 56 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 57 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 58 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 59 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 61 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 62 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 64 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 65 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 66 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 67 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 68 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 69 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 70 71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 73 {0, NULL}, 74}; 75 76static const struct hwmon_temp_label { 77 enum PP_HWMON_TEMP channel; 78 const char *label; 79} temp_label[] = { 80 {PP_TEMP_EDGE, "edge"}, 81 {PP_TEMP_JUNCTION, "junction"}, 82 {PP_TEMP_MEM, "mem"}, 83}; 84 85void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 86{ 87 if (adev->pm.dpm_enabled) { 88 mutex_lock(&adev->pm.mutex); 89 if (power_supply_is_system_supplied() > 0) 90 adev->pm.ac_power = true; 91 else 92 adev->pm.ac_power = false; 93 if (adev->powerplay.pp_funcs->enable_bapm) 94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 95 mutex_unlock(&adev->pm.mutex); 96 } 97} 98 99int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 100 void *data, uint32_t *size) 101{ 102 int ret = 0; 103 104 if (!data || !size) 105 return -EINVAL; 106 107 if (is_support_sw_smu(adev)) 108 ret = smu_read_sensor(&adev->smu, sensor, data, size); 109 else { 110 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 111 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, 112 sensor, data, size); 113 else 114 ret = -EINVAL; 115 } 116 117 return ret; 118} 119 120/** 121 * DOC: power_dpm_state 122 * 123 * The power_dpm_state file is a legacy interface and is only provided for 124 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 125 * certain power related parameters. The file power_dpm_state is used for this. 126 * It accepts the following arguments: 127 * 128 * - battery 129 * 130 * - balanced 131 * 132 * - performance 133 * 134 * battery 135 * 136 * On older GPUs, the vbios provided a special power state for battery 137 * operation. Selecting battery switched to this state. This is no 138 * longer provided on newer GPUs so the option does nothing in that case. 139 * 140 * balanced 141 * 142 * On older GPUs, the vbios provided a special power state for balanced 143 * operation. Selecting balanced switched to this state. This is no 144 * longer provided on newer GPUs so the option does nothing in that case. 145 * 146 * performance 147 * 148 * On older GPUs, the vbios provided a special power state for performance 149 * operation. Selecting performance switched to this state. This is no 150 * longer provided on newer GPUs so the option does nothing in that case. 151 * 152 */ 153 154static ssize_t amdgpu_get_dpm_state(struct device *dev, 155 struct device_attribute *attr, 156 char *buf) 157{ 158 struct drm_device *ddev = dev_get_drvdata(dev); 159 struct amdgpu_device *adev = ddev->dev_private; 160 enum amd_pm_state_type pm; 161 162 if (is_support_sw_smu(adev)) { 163 if (adev->smu.ppt_funcs->get_current_power_state) 164 pm = amdgpu_smu_get_current_power_state(adev); 165 else 166 pm = adev->pm.dpm.user_state; 167 } else if (adev->powerplay.pp_funcs->get_current_power_state) { 168 pm = amdgpu_dpm_get_current_power_state(adev); 169 } else { 170 pm = adev->pm.dpm.user_state; 171 } 172 173 return snprintf(buf, PAGE_SIZE, "%s\n", 174 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 175 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 176} 177 178static ssize_t amdgpu_set_dpm_state(struct device *dev, 179 struct device_attribute *attr, 180 const char *buf, 181 size_t count) 182{ 183 struct drm_device *ddev = dev_get_drvdata(dev); 184 struct amdgpu_device *adev = ddev->dev_private; 185 enum amd_pm_state_type state; 186 187 if (strncmp("battery", buf, strlen("battery")) == 0) 188 state = POWER_STATE_TYPE_BATTERY; 189 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 190 state = POWER_STATE_TYPE_BALANCED; 191 else if (strncmp("performance", buf, strlen("performance")) == 0) 192 state = POWER_STATE_TYPE_PERFORMANCE; 193 else { 194 count = -EINVAL; 195 goto fail; 196 } 197 198 if (is_support_sw_smu(adev)) { 199 mutex_lock(&adev->pm.mutex); 200 adev->pm.dpm.user_state = state; 201 mutex_unlock(&adev->pm.mutex); 202 } else if (adev->powerplay.pp_funcs->dispatch_tasks) { 203 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 204 } else { 205 mutex_lock(&adev->pm.mutex); 206 adev->pm.dpm.user_state = state; 207 mutex_unlock(&adev->pm.mutex); 208 209 /* Can't set dpm state when the card is off */ 210 if (!(adev->flags & AMD_IS_PX) || 211 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 212 amdgpu_pm_compute_clocks(adev); 213 } 214fail: 215 return count; 216} 217 218 219/** 220 * DOC: power_dpm_force_performance_level 221 * 222 * The amdgpu driver provides a sysfs API for adjusting certain power 223 * related parameters. The file power_dpm_force_performance_level is 224 * used for this. It accepts the following arguments: 225 * 226 * - auto 227 * 228 * - low 229 * 230 * - high 231 * 232 * - manual 233 * 234 * - profile_standard 235 * 236 * - profile_min_sclk 237 * 238 * - profile_min_mclk 239 * 240 * - profile_peak 241 * 242 * auto 243 * 244 * When auto is selected, the driver will attempt to dynamically select 245 * the optimal power profile for current conditions in the driver. 246 * 247 * low 248 * 249 * When low is selected, the clocks are forced to the lowest power state. 250 * 251 * high 252 * 253 * When high is selected, the clocks are forced to the highest power state. 254 * 255 * manual 256 * 257 * When manual is selected, the user can manually adjust which power states 258 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 259 * and pp_dpm_pcie files and adjust the power state transition heuristics 260 * via the pp_power_profile_mode sysfs file. 261 * 262 * profile_standard 263 * profile_min_sclk 264 * profile_min_mclk 265 * profile_peak 266 * 267 * When the profiling modes are selected, clock and power gating are 268 * disabled and the clocks are set for different profiling cases. This 269 * mode is recommended for profiling specific work loads where you do 270 * not want clock or power gating for clock fluctuation to interfere 271 * with your results. profile_standard sets the clocks to a fixed clock 272 * level which varies from asic to asic. profile_min_sclk forces the sclk 273 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 274 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 275 * 276 */ 277 278static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 279 struct device_attribute *attr, 280 char *buf) 281{ 282 struct drm_device *ddev = dev_get_drvdata(dev); 283 struct amdgpu_device *adev = ddev->dev_private; 284 enum amd_dpm_forced_level level = 0xff; 285 286 if (amdgpu_sriov_vf(adev)) 287 return 0; 288 289 if ((adev->flags & AMD_IS_PX) && 290 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 291 return snprintf(buf, PAGE_SIZE, "off\n"); 292 293 if (is_support_sw_smu(adev)) 294 level = smu_get_performance_level(&adev->smu); 295 else if (adev->powerplay.pp_funcs->get_performance_level) 296 level = amdgpu_dpm_get_performance_level(adev); 297 else 298 level = adev->pm.dpm.forced_level; 299 300 return snprintf(buf, PAGE_SIZE, "%s\n", 301 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 302 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 303 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 304 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 305 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 306 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 307 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 308 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 309 "unknown"); 310} 311 312static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 313 struct device_attribute *attr, 314 const char *buf, 315 size_t count) 316{ 317 struct drm_device *ddev = dev_get_drvdata(dev); 318 struct amdgpu_device *adev = ddev->dev_private; 319 enum amd_dpm_forced_level level; 320 enum amd_dpm_forced_level current_level = 0xff; 321 int ret = 0; 322 323 /* Can't force performance level when the card is off */ 324 if ((adev->flags & AMD_IS_PX) && 325 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 326 return -EINVAL; 327 328 if (strncmp("low", buf, strlen("low")) == 0) { 329 level = AMD_DPM_FORCED_LEVEL_LOW; 330 } else if (strncmp("high", buf, strlen("high")) == 0) { 331 level = AMD_DPM_FORCED_LEVEL_HIGH; 332 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 333 level = AMD_DPM_FORCED_LEVEL_AUTO; 334 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 335 level = AMD_DPM_FORCED_LEVEL_MANUAL; 336 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 337 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 338 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 339 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 340 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 341 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 342 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 343 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 344 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 345 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 346 } else { 347 count = -EINVAL; 348 goto fail; 349 } 350 351 /* handle sriov case here */ 352 if (amdgpu_sriov_vf(adev)) { 353 if (amdgim_is_hwperf(adev) && 354 adev->virt.ops->force_dpm_level) { 355 mutex_lock(&adev->pm.mutex); 356 adev->virt.ops->force_dpm_level(adev, level); 357 mutex_unlock(&adev->pm.mutex); 358 return count; 359 } else { 360 return -EINVAL; 361 } 362 } 363 364 if (is_support_sw_smu(adev)) 365 current_level = smu_get_performance_level(&adev->smu); 366 else if (adev->powerplay.pp_funcs->get_performance_level) 367 current_level = amdgpu_dpm_get_performance_level(adev); 368 369 if (current_level == level) 370 return count; 371 372 /* profile_exit setting is valid only when current mode is in profile mode */ 373 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 374 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 375 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 376 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 377 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 378 pr_err("Currently not in any profile mode!\n"); 379 return -EINVAL; 380 } 381 382 if (is_support_sw_smu(adev)) { 383 ret = smu_force_performance_level(&adev->smu, level); 384 if (ret) 385 count = -EINVAL; 386 } else if (adev->powerplay.pp_funcs->force_performance_level) { 387 mutex_lock(&adev->pm.mutex); 388 if (adev->pm.dpm.thermal_active) { 389 count = -EINVAL; 390 mutex_unlock(&adev->pm.mutex); 391 goto fail; 392 } 393 ret = amdgpu_dpm_force_performance_level(adev, level); 394 if (ret) 395 count = -EINVAL; 396 else 397 adev->pm.dpm.forced_level = level; 398 mutex_unlock(&adev->pm.mutex); 399 } 400 401fail: 402 return count; 403} 404 405static ssize_t amdgpu_get_pp_num_states(struct device *dev, 406 struct device_attribute *attr, 407 char *buf) 408{ 409 struct drm_device *ddev = dev_get_drvdata(dev); 410 struct amdgpu_device *adev = ddev->dev_private; 411 struct pp_states_info data; 412 int i, buf_len, ret; 413 414 if (is_support_sw_smu(adev)) { 415 ret = smu_get_power_num_states(&adev->smu, &data); 416 if (ret) 417 return ret; 418 } else if (adev->powerplay.pp_funcs->get_pp_num_states) 419 amdgpu_dpm_get_pp_num_states(adev, &data); 420 421 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 422 for (i = 0; i < data.nums; i++) 423 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 424 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 425 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 426 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 427 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 428 429 return buf_len; 430} 431 432static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 433 struct device_attribute *attr, 434 char *buf) 435{ 436 struct drm_device *ddev = dev_get_drvdata(dev); 437 struct amdgpu_device *adev = ddev->dev_private; 438 struct pp_states_info data; 439 struct smu_context *smu = &adev->smu; 440 enum amd_pm_state_type pm = 0; 441 int i = 0, ret = 0; 442 443 if (is_support_sw_smu(adev)) { 444 pm = smu_get_current_power_state(smu); 445 ret = smu_get_power_num_states(smu, &data); 446 if (ret) 447 return ret; 448 } else if (adev->powerplay.pp_funcs->get_current_power_state 449 && adev->powerplay.pp_funcs->get_pp_num_states) { 450 pm = amdgpu_dpm_get_current_power_state(adev); 451 amdgpu_dpm_get_pp_num_states(adev, &data); 452 } 453 454 for (i = 0; i < data.nums; i++) { 455 if (pm == data.states[i]) 456 break; 457 } 458 459 if (i == data.nums) 460 i = -EINVAL; 461 462 return snprintf(buf, PAGE_SIZE, "%d\n", i); 463} 464 465static ssize_t amdgpu_get_pp_force_state(struct device *dev, 466 struct device_attribute *attr, 467 char *buf) 468{ 469 struct drm_device *ddev = dev_get_drvdata(dev); 470 struct amdgpu_device *adev = ddev->dev_private; 471 472 if (adev->pp_force_state_enabled) 473 return amdgpu_get_pp_cur_state(dev, attr, buf); 474 else 475 return snprintf(buf, PAGE_SIZE, "\n"); 476} 477 478static ssize_t amdgpu_set_pp_force_state(struct device *dev, 479 struct device_attribute *attr, 480 const char *buf, 481 size_t count) 482{ 483 struct drm_device *ddev = dev_get_drvdata(dev); 484 struct amdgpu_device *adev = ddev->dev_private; 485 enum amd_pm_state_type state = 0; 486 unsigned long idx; 487 int ret; 488 489 if (strlen(buf) == 1) 490 adev->pp_force_state_enabled = false; 491 else if (is_support_sw_smu(adev)) 492 adev->pp_force_state_enabled = false; 493 else if (adev->powerplay.pp_funcs->dispatch_tasks && 494 adev->powerplay.pp_funcs->get_pp_num_states) { 495 struct pp_states_info data; 496 497 ret = kstrtoul(buf, 0, &idx); 498 if (ret || idx >= ARRAY_SIZE(data.states)) { 499 count = -EINVAL; 500 goto fail; 501 } 502 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 503 504 amdgpu_dpm_get_pp_num_states(adev, &data); 505 state = data.states[idx]; 506 /* only set user selected power states */ 507 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 508 state != POWER_STATE_TYPE_DEFAULT) { 509 amdgpu_dpm_dispatch_task(adev, 510 AMD_PP_TASK_ENABLE_USER_STATE, &state); 511 adev->pp_force_state_enabled = true; 512 } 513 } 514fail: 515 return count; 516} 517 518/** 519 * DOC: pp_table 520 * 521 * The amdgpu driver provides a sysfs API for uploading new powerplay 522 * tables. The file pp_table is used for this. Reading the file 523 * will dump the current power play table. Writing to the file 524 * will attempt to upload a new powerplay table and re-initialize 525 * powerplay using that new table. 526 * 527 */ 528 529static ssize_t amdgpu_get_pp_table(struct device *dev, 530 struct device_attribute *attr, 531 char *buf) 532{ 533 struct drm_device *ddev = dev_get_drvdata(dev); 534 struct amdgpu_device *adev = ddev->dev_private; 535 char *table = NULL; 536 int size; 537 538 if (is_support_sw_smu(adev)) { 539 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 540 if (size < 0) 541 return size; 542 } 543 else if (adev->powerplay.pp_funcs->get_pp_table) 544 size = amdgpu_dpm_get_pp_table(adev, &table); 545 else 546 return 0; 547 548 if (size >= PAGE_SIZE) 549 size = PAGE_SIZE - 1; 550 551 memcpy(buf, table, size); 552 553 return size; 554} 555 556static ssize_t amdgpu_set_pp_table(struct device *dev, 557 struct device_attribute *attr, 558 const char *buf, 559 size_t count) 560{ 561 struct drm_device *ddev = dev_get_drvdata(dev); 562 struct amdgpu_device *adev = ddev->dev_private; 563 int ret = 0; 564 565 if (is_support_sw_smu(adev)) { 566 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); 567 if (ret) 568 return ret; 569 } else if (adev->powerplay.pp_funcs->set_pp_table) 570 amdgpu_dpm_set_pp_table(adev, buf, count); 571 572 return count; 573} 574 575/** 576 * DOC: pp_od_clk_voltage 577 * 578 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 579 * in each power level within a power state. The pp_od_clk_voltage is used for 580 * this. 581 * 582 * < For Vega10 and previous ASICs > 583 * 584 * Reading the file will display: 585 * 586 * - a list of engine clock levels and voltages labeled OD_SCLK 587 * 588 * - a list of memory clock levels and voltages labeled OD_MCLK 589 * 590 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 591 * 592 * To manually adjust these settings, first select manual using 593 * power_dpm_force_performance_level. Enter a new value for each 594 * level by writing a string that contains "s/m level clock voltage" to 595 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 596 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 597 * 810 mV. When you have edited all of the states as needed, write 598 * "c" (commit) to the file to commit your changes. If you want to reset to the 599 * default power levels, write "r" (reset) to the file to reset them. 600 * 601 * 602 * < For Vega20 > 603 * 604 * Reading the file will display: 605 * 606 * - minimum and maximum engine clock labeled OD_SCLK 607 * 608 * - maximum memory clock labeled OD_MCLK 609 * 610 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 611 * They can be used to calibrate the sclk voltage curve. 612 * 613 * - a list of valid ranges for sclk, mclk, and voltage curve points 614 * labeled OD_RANGE 615 * 616 * To manually adjust these settings: 617 * 618 * - First select manual using power_dpm_force_performance_level 619 * 620 * - For clock frequency setting, enter a new value by writing a 621 * string that contains "s/m index clock" to the file. The index 622 * should be 0 if to set minimum clock. And 1 if to set maximum 623 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 624 * "m 1 800" will update maximum mclk to be 800Mhz. 625 * 626 * For sclk voltage curve, enter the new values by writing a 627 * string that contains "vc point clock voltage" to the file. The 628 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will 629 * update point1 with clock set as 300Mhz and voltage as 630 * 600mV. "vc 2 1000 1000" will update point3 with clock set 631 * as 1000Mhz and voltage 1000mV. 632 * 633 * - When you have edited all of the states as needed, write "c" (commit) 634 * to the file to commit your changes 635 * 636 * - If you want to reset to the default power levels, write "r" (reset) 637 * to the file to reset them 638 * 639 */ 640 641static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 642 struct device_attribute *attr, 643 const char *buf, 644 size_t count) 645{ 646 struct drm_device *ddev = dev_get_drvdata(dev); 647 struct amdgpu_device *adev = ddev->dev_private; 648 int ret; 649 uint32_t parameter_size = 0; 650 long parameter[64]; 651 char buf_cpy[128]; 652 char *tmp_str; 653 char *sub_str; 654 const char delimiter[3] = {' ', '\n', '\0'}; 655 uint32_t type; 656 657 if (count > 127) 658 return -EINVAL; 659 660 if (*buf == 's') 661 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 662 else if (*buf == 'm') 663 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 664 else if(*buf == 'r') 665 type = PP_OD_RESTORE_DEFAULT_TABLE; 666 else if (*buf == 'c') 667 type = PP_OD_COMMIT_DPM_TABLE; 668 else if (!strncmp(buf, "vc", 2)) 669 type = PP_OD_EDIT_VDDC_CURVE; 670 else 671 return -EINVAL; 672 673 memcpy(buf_cpy, buf, count+1); 674 675 tmp_str = buf_cpy; 676 677 if (type == PP_OD_EDIT_VDDC_CURVE) 678 tmp_str++; 679 while (isspace(*++tmp_str)); 680 681 while (tmp_str[0]) { 682 sub_str = strsep(&tmp_str, delimiter); 683 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 684 if (ret) 685 return -EINVAL; 686 parameter_size++; 687 688 while (isspace(*tmp_str)) 689 tmp_str++; 690 } 691 692 if (is_support_sw_smu(adev)) { 693 ret = smu_od_edit_dpm_table(&adev->smu, type, 694 parameter, parameter_size); 695 696 if (ret) 697 return -EINVAL; 698 } else { 699 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 700 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 701 parameter, parameter_size); 702 if (ret) 703 return -EINVAL; 704 } 705 706 if (type == PP_OD_COMMIT_DPM_TABLE) { 707 if (adev->powerplay.pp_funcs->dispatch_tasks) { 708 amdgpu_dpm_dispatch_task(adev, 709 AMD_PP_TASK_READJUST_POWER_STATE, 710 NULL); 711 return count; 712 } else { 713 return -EINVAL; 714 } 715 } 716 } 717 718 return count; 719} 720 721static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 722 struct device_attribute *attr, 723 char *buf) 724{ 725 struct drm_device *ddev = dev_get_drvdata(dev); 726 struct amdgpu_device *adev = ddev->dev_private; 727 uint32_t size = 0; 728 729 if (is_support_sw_smu(adev)) { 730 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 731 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); 732 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); 733 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); 734 return size; 735 } else if (adev->powerplay.pp_funcs->print_clock_levels) { 736 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 737 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 738 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 739 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 740 return size; 741 } else { 742 return snprintf(buf, PAGE_SIZE, "\n"); 743 } 744 745} 746 747/** 748 * DOC: pp_features 749 * 750 * The amdgpu driver provides a sysfs API for adjusting what powerplay 751 * features to be enabled. The file pp_features is used for this. And 752 * this is only available for Vega10 and later dGPUs. 753 * 754 * Reading back the file will show you the followings: 755 * - Current ppfeature masks 756 * - List of the all supported powerplay features with their naming, 757 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 758 * 759 * To manually enable or disable a specific feature, just set or clear 760 * the corresponding bit from original ppfeature masks and input the 761 * new ppfeature masks. 762 */ 763static ssize_t amdgpu_set_pp_feature_status(struct device *dev, 764 struct device_attribute *attr, 765 const char *buf, 766 size_t count) 767{ 768 struct drm_device *ddev = dev_get_drvdata(dev); 769 struct amdgpu_device *adev = ddev->dev_private; 770 uint64_t featuremask; 771 int ret; 772 773 ret = kstrtou64(buf, 0, &featuremask); 774 if (ret) 775 return -EINVAL; 776 777 pr_debug("featuremask = 0x%llx\n", featuremask); 778 779 if (is_support_sw_smu(adev)) { 780 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 781 if (ret) 782 return -EINVAL; 783 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 784 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 785 if (ret) 786 return -EINVAL; 787 } 788 789 return count; 790} 791 792static ssize_t amdgpu_get_pp_feature_status(struct device *dev, 793 struct device_attribute *attr, 794 char *buf) 795{ 796 struct drm_device *ddev = dev_get_drvdata(dev); 797 struct amdgpu_device *adev = ddev->dev_private; 798 799 if (is_support_sw_smu(adev)) { 800 return smu_sys_get_pp_feature_mask(&adev->smu, buf); 801 } else if (adev->powerplay.pp_funcs->get_ppfeature_status) 802 return amdgpu_dpm_get_ppfeature_status(adev, buf); 803 804 return snprintf(buf, PAGE_SIZE, "\n"); 805} 806 807/** 808 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk 809 * pp_dpm_pcie 810 * 811 * The amdgpu driver provides a sysfs API for adjusting what power levels 812 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 813 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 814 * this. 815 * 816 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 817 * Vega10 and later ASICs. 818 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 819 * 820 * Reading back the files will show you the available power levels within 821 * the power state and the clock information for those levels. 822 * 823 * To manually adjust these states, first select manual using 824 * power_dpm_force_performance_level. 825 * Secondly,Enter a new value for each level by inputing a string that 826 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 827 * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. 828 * 829 * NOTE: change to the dcefclk max dpm level is not supported now 830 */ 831 832static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 833 struct device_attribute *attr, 834 char *buf) 835{ 836 struct drm_device *ddev = dev_get_drvdata(dev); 837 struct amdgpu_device *adev = ddev->dev_private; 838 839 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 840 adev->virt.ops->get_pp_clk) 841 return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); 842 843 if (is_support_sw_smu(adev)) 844 return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 845 else if (adev->powerplay.pp_funcs->print_clock_levels) 846 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 847 else 848 return snprintf(buf, PAGE_SIZE, "\n"); 849} 850 851/* 852 * Worst case: 32 bits individually specified, in octal at 12 characters 853 * per line (+1 for \n). 854 */ 855#define AMDGPU_MASK_BUF_MAX (32 * 13) 856 857static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 858{ 859 int ret; 860 long level; 861 char *sub_str = NULL; 862 char *tmp; 863 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 864 const char delimiter[3] = {' ', '\n', '\0'}; 865 size_t bytes; 866 867 *mask = 0; 868 869 bytes = min(count, sizeof(buf_cpy) - 1); 870 memcpy(buf_cpy, buf, bytes); 871 buf_cpy[bytes] = '\0'; 872 tmp = buf_cpy; 873 while (tmp[0]) { 874 sub_str = strsep(&tmp, delimiter); 875 if (strlen(sub_str)) { 876 ret = kstrtol(sub_str, 0, &level); 877 if (ret) 878 return -EINVAL; 879 *mask |= 1 << level; 880 } else 881 break; 882 } 883 884 return 0; 885} 886 887static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 888 struct device_attribute *attr, 889 const char *buf, 890 size_t count) 891{ 892 struct drm_device *ddev = dev_get_drvdata(dev); 893 struct amdgpu_device *adev = ddev->dev_private; 894 int ret; 895 uint32_t mask = 0; 896 897 if (amdgpu_sriov_vf(adev)) 898 return 0; 899 900 ret = amdgpu_read_mask(buf, count, &mask); 901 if (ret) 902 return ret; 903 904 if (is_support_sw_smu(adev)) 905 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); 906 else if (adev->powerplay.pp_funcs->force_clock_level) 907 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 908 909 if (ret) 910 return -EINVAL; 911 912 return count; 913} 914 915static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 916 struct device_attribute *attr, 917 char *buf) 918{ 919 struct drm_device *ddev = dev_get_drvdata(dev); 920 struct amdgpu_device *adev = ddev->dev_private; 921 922 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && 923 adev->virt.ops->get_pp_clk) 924 return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); 925 926 if (is_support_sw_smu(adev)) 927 return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 928 else if (adev->powerplay.pp_funcs->print_clock_levels) 929 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 930 else 931 return snprintf(buf, PAGE_SIZE, "\n"); 932} 933 934static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 935 struct device_attribute *attr, 936 const char *buf, 937 size_t count) 938{ 939 struct drm_device *ddev = dev_get_drvdata(dev); 940 struct amdgpu_device *adev = ddev->dev_private; 941 int ret; 942 uint32_t mask = 0; 943 944 if (amdgpu_sriov_vf(adev)) 945 return 0; 946 947 ret = amdgpu_read_mask(buf, count, &mask); 948 if (ret) 949 return ret; 950 951 if (is_support_sw_smu(adev)) 952 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); 953 else if (adev->powerplay.pp_funcs->force_clock_level) 954 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 955 956 if (ret) 957 return -EINVAL; 958 959 return count; 960} 961 962static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 963 struct device_attribute *attr, 964 char *buf) 965{ 966 struct drm_device *ddev = dev_get_drvdata(dev); 967 struct amdgpu_device *adev = ddev->dev_private; 968 969 if (is_support_sw_smu(adev)) 970 return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 971 else if (adev->powerplay.pp_funcs->print_clock_levels) 972 return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 973 else 974 return snprintf(buf, PAGE_SIZE, "\n"); 975} 976 977static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 978 struct device_attribute *attr, 979 const char *buf, 980 size_t count) 981{ 982 struct drm_device *ddev = dev_get_drvdata(dev); 983 struct amdgpu_device *adev = ddev->dev_private; 984 int ret; 985 uint32_t mask = 0; 986 987 ret = amdgpu_read_mask(buf, count, &mask); 988 if (ret) 989 return ret; 990 991 if (is_support_sw_smu(adev)) 992 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); 993 else if (adev->powerplay.pp_funcs->force_clock_level) 994 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); 995 996 if (ret) 997 return -EINVAL; 998 999 return count; 1000} 1001 1002static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1003 struct device_attribute *attr, 1004 char *buf) 1005{ 1006 struct drm_device *ddev = dev_get_drvdata(dev); 1007 struct amdgpu_device *adev = ddev->dev_private; 1008 1009 if (is_support_sw_smu(adev)) 1010 return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1011 else if (adev->powerplay.pp_funcs->print_clock_levels) 1012 return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1013 else 1014 return snprintf(buf, PAGE_SIZE, "\n"); 1015} 1016 1017static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1018 struct device_attribute *attr, 1019 const char *buf, 1020 size_t count) 1021{ 1022 struct drm_device *ddev = dev_get_drvdata(dev); 1023 struct amdgpu_device *adev = ddev->dev_private; 1024 int ret; 1025 uint32_t mask = 0; 1026 1027 ret = amdgpu_read_mask(buf, count, &mask); 1028 if (ret) 1029 return ret; 1030 1031 if (is_support_sw_smu(adev)) 1032 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); 1033 else if (adev->powerplay.pp_funcs->force_clock_level) 1034 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); 1035 1036 if (ret) 1037 return -EINVAL; 1038 1039 return count; 1040} 1041 1042static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1043 struct device_attribute *attr, 1044 char *buf) 1045{ 1046 struct drm_device *ddev = dev_get_drvdata(dev); 1047 struct amdgpu_device *adev = ddev->dev_private; 1048 1049 if (is_support_sw_smu(adev)) 1050 return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1051 else if (adev->powerplay.pp_funcs->print_clock_levels) 1052 return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1053 else 1054 return snprintf(buf, PAGE_SIZE, "\n"); 1055} 1056 1057static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1058 struct device_attribute *attr, 1059 const char *buf, 1060 size_t count) 1061{ 1062 struct drm_device *ddev = dev_get_drvdata(dev); 1063 struct amdgpu_device *adev = ddev->dev_private; 1064 int ret; 1065 uint32_t mask = 0; 1066 1067 ret = amdgpu_read_mask(buf, count, &mask); 1068 if (ret) 1069 return ret; 1070 1071 if (is_support_sw_smu(adev)) 1072 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); 1073 else if (adev->powerplay.pp_funcs->force_clock_level) 1074 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); 1075 1076 if (ret) 1077 return -EINVAL; 1078 1079 return count; 1080} 1081 1082static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1083 struct device_attribute *attr, 1084 char *buf) 1085{ 1086 struct drm_device *ddev = dev_get_drvdata(dev); 1087 struct amdgpu_device *adev = ddev->dev_private; 1088 1089 if (is_support_sw_smu(adev)) 1090 return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1091 else if (adev->powerplay.pp_funcs->print_clock_levels) 1092 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1093 else 1094 return snprintf(buf, PAGE_SIZE, "\n"); 1095} 1096 1097static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1098 struct device_attribute *attr, 1099 const char *buf, 1100 size_t count) 1101{ 1102 struct drm_device *ddev = dev_get_drvdata(dev); 1103 struct amdgpu_device *adev = ddev->dev_private; 1104 int ret; 1105 uint32_t mask = 0; 1106 1107 ret = amdgpu_read_mask(buf, count, &mask); 1108 if (ret) 1109 return ret; 1110 1111 if (is_support_sw_smu(adev)) 1112 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); 1113 else if (adev->powerplay.pp_funcs->force_clock_level) 1114 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 1115 1116 if (ret) 1117 return -EINVAL; 1118 1119 return count; 1120} 1121 1122static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1123 struct device_attribute *attr, 1124 char *buf) 1125{ 1126 struct drm_device *ddev = dev_get_drvdata(dev); 1127 struct amdgpu_device *adev = ddev->dev_private; 1128 uint32_t value = 0; 1129 1130 if (is_support_sw_smu(adev)) 1131 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); 1132 else if (adev->powerplay.pp_funcs->get_sclk_od) 1133 value = amdgpu_dpm_get_sclk_od(adev); 1134 1135 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1136} 1137 1138static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1139 struct device_attribute *attr, 1140 const char *buf, 1141 size_t count) 1142{ 1143 struct drm_device *ddev = dev_get_drvdata(dev); 1144 struct amdgpu_device *adev = ddev->dev_private; 1145 int ret; 1146 long int value; 1147 1148 ret = kstrtol(buf, 0, &value); 1149 1150 if (ret) { 1151 count = -EINVAL; 1152 goto fail; 1153 } 1154 1155 if (is_support_sw_smu(adev)) { 1156 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); 1157 } else { 1158 if (adev->powerplay.pp_funcs->set_sclk_od) 1159 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1160 1161 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1162 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1163 } else { 1164 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1165 amdgpu_pm_compute_clocks(adev); 1166 } 1167 } 1168 1169fail: 1170 return count; 1171} 1172 1173static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1174 struct device_attribute *attr, 1175 char *buf) 1176{ 1177 struct drm_device *ddev = dev_get_drvdata(dev); 1178 struct amdgpu_device *adev = ddev->dev_private; 1179 uint32_t value = 0; 1180 1181 if (is_support_sw_smu(adev)) 1182 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); 1183 else if (adev->powerplay.pp_funcs->get_mclk_od) 1184 value = amdgpu_dpm_get_mclk_od(adev); 1185 1186 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1187} 1188 1189static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1190 struct device_attribute *attr, 1191 const char *buf, 1192 size_t count) 1193{ 1194 struct drm_device *ddev = dev_get_drvdata(dev); 1195 struct amdgpu_device *adev = ddev->dev_private; 1196 int ret; 1197 long int value; 1198 1199 ret = kstrtol(buf, 0, &value); 1200 1201 if (ret) { 1202 count = -EINVAL; 1203 goto fail; 1204 } 1205 1206 if (is_support_sw_smu(adev)) { 1207 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); 1208 } else { 1209 if (adev->powerplay.pp_funcs->set_mclk_od) 1210 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1211 1212 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1213 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1214 } else { 1215 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1216 amdgpu_pm_compute_clocks(adev); 1217 } 1218 } 1219 1220fail: 1221 return count; 1222} 1223 1224/** 1225 * DOC: pp_power_profile_mode 1226 * 1227 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1228 * related to switching between power levels in a power state. The file 1229 * pp_power_profile_mode is used for this. 1230 * 1231 * Reading this file outputs a list of all of the predefined power profiles 1232 * and the relevant heuristics settings for that profile. 1233 * 1234 * To select a profile or create a custom profile, first select manual using 1235 * power_dpm_force_performance_level. Writing the number of a predefined 1236 * profile to pp_power_profile_mode will enable those heuristics. To 1237 * create a custom set of heuristics, write a string of numbers to the file 1238 * starting with the number of the custom profile along with a setting 1239 * for each heuristic parameter. Due to differences across asic families 1240 * the heuristic parameters vary from family to family. 1241 * 1242 */ 1243 1244static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1245 struct device_attribute *attr, 1246 char *buf) 1247{ 1248 struct drm_device *ddev = dev_get_drvdata(dev); 1249 struct amdgpu_device *adev = ddev->dev_private; 1250 1251 if (is_support_sw_smu(adev)) 1252 return smu_get_power_profile_mode(&adev->smu, buf); 1253 else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1254 return amdgpu_dpm_get_power_profile_mode(adev, buf); 1255 1256 return snprintf(buf, PAGE_SIZE, "\n"); 1257} 1258 1259 1260static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1261 struct device_attribute *attr, 1262 const char *buf, 1263 size_t count) 1264{ 1265 int ret = 0xff; 1266 struct drm_device *ddev = dev_get_drvdata(dev); 1267 struct amdgpu_device *adev = ddev->dev_private; 1268 uint32_t parameter_size = 0; 1269 long parameter[64]; 1270 char *sub_str, buf_cpy[128]; 1271 char *tmp_str; 1272 uint32_t i = 0; 1273 char tmp[2]; 1274 long int profile_mode = 0; 1275 const char delimiter[3] = {' ', '\n', '\0'}; 1276 1277 tmp[0] = *(buf); 1278 tmp[1] = '\0'; 1279 ret = kstrtol(tmp, 0, &profile_mode); 1280 if (ret) 1281 goto fail; 1282 1283 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1284 if (count < 2 || count > 127) 1285 return -EINVAL; 1286 while (isspace(*++buf)) 1287 i++; 1288 memcpy(buf_cpy, buf, count-i); 1289 tmp_str = buf_cpy; 1290 while (tmp_str[0]) { 1291 sub_str = strsep(&tmp_str, delimiter); 1292 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 1293 if (ret) { 1294 count = -EINVAL; 1295 goto fail; 1296 } 1297 parameter_size++; 1298 while (isspace(*tmp_str)) 1299 tmp_str++; 1300 } 1301 } 1302 parameter[parameter_size] = profile_mode; 1303 if (is_support_sw_smu(adev)) 1304 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); 1305 else if (adev->powerplay.pp_funcs->set_power_profile_mode) 1306 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1307 if (!ret) 1308 return count; 1309fail: 1310 return -EINVAL; 1311} 1312 1313/** 1314 * DOC: busy_percent 1315 * 1316 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1317 * is as a percentage. The file gpu_busy_percent is used for this. 1318 * The SMU firmware computes a percentage of load based on the 1319 * aggregate activity level in the IP cores. 1320 */ 1321static ssize_t amdgpu_get_busy_percent(struct device *dev, 1322 struct device_attribute *attr, 1323 char *buf) 1324{ 1325 struct drm_device *ddev = dev_get_drvdata(dev); 1326 struct amdgpu_device *adev = ddev->dev_private; 1327 int r, value, size = sizeof(value); 1328 1329 /* read the IP busy sensor */ 1330 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1331 (void *)&value, &size); 1332 1333 if (r) 1334 return r; 1335 1336 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1337} 1338 1339/** 1340 * DOC: mem_busy_percent 1341 * 1342 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1343 * is as a percentage. The file mem_busy_percent is used for this. 1344 * The SMU firmware computes a percentage of load based on the 1345 * aggregate activity level in the IP cores. 1346 */ 1347static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, 1348 struct device_attribute *attr, 1349 char *buf) 1350{ 1351 struct drm_device *ddev = dev_get_drvdata(dev); 1352 struct amdgpu_device *adev = ddev->dev_private; 1353 int r, value, size = sizeof(value); 1354 1355 /* read the IP busy sensor */ 1356 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1357 (void *)&value, &size); 1358 1359 if (r) 1360 return r; 1361 1362 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1363} 1364 1365/** 1366 * DOC: pcie_bw 1367 * 1368 * The amdgpu driver provides a sysfs API for estimating how much data 1369 * has been received and sent by the GPU in the last second through PCIe. 1370 * The file pcie_bw is used for this. 1371 * The Perf counters count the number of received and sent messages and return 1372 * those values, as well as the maximum payload size of a PCIe packet (mps). 1373 * Note that it is not possible to easily and quickly obtain the size of each 1374 * packet transmitted, so we output the max payload size (mps) to allow for 1375 * quick estimation of the PCIe bandwidth usage 1376 */ 1377static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1378 struct device_attribute *attr, 1379 char *buf) 1380{ 1381 struct drm_device *ddev = dev_get_drvdata(dev); 1382 struct amdgpu_device *adev = ddev->dev_private; 1383 uint64_t count0, count1; 1384 1385 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1386 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1387 count0, count1, pcie_get_mps(adev->pdev)); 1388} 1389 1390/** 1391 * DOC: unique_id 1392 * 1393 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1394 * The file unique_id is used for this. 1395 * This will provide a Unique ID that will persist from machine to machine 1396 * 1397 * NOTE: This will only work for GFX9 and newer. This file will be absent 1398 * on unsupported ASICs (GFX8 and older) 1399 */ 1400static ssize_t amdgpu_get_unique_id(struct device *dev, 1401 struct device_attribute *attr, 1402 char *buf) 1403{ 1404 struct drm_device *ddev = dev_get_drvdata(dev); 1405 struct amdgpu_device *adev = ddev->dev_private; 1406 1407 if (adev->unique_id) 1408 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); 1409 1410 return 0; 1411} 1412 1413static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 1414static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 1415 amdgpu_get_dpm_forced_performance_level, 1416 amdgpu_set_dpm_forced_performance_level); 1417static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 1418static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 1419static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 1420 amdgpu_get_pp_force_state, 1421 amdgpu_set_pp_force_state); 1422static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 1423 amdgpu_get_pp_table, 1424 amdgpu_set_pp_table); 1425static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 1426 amdgpu_get_pp_dpm_sclk, 1427 amdgpu_set_pp_dpm_sclk); 1428static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 1429 amdgpu_get_pp_dpm_mclk, 1430 amdgpu_set_pp_dpm_mclk); 1431static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, 1432 amdgpu_get_pp_dpm_socclk, 1433 amdgpu_set_pp_dpm_socclk); 1434static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, 1435 amdgpu_get_pp_dpm_fclk, 1436 amdgpu_set_pp_dpm_fclk); 1437static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, 1438 amdgpu_get_pp_dpm_dcefclk, 1439 amdgpu_set_pp_dpm_dcefclk); 1440static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 1441 amdgpu_get_pp_dpm_pcie, 1442 amdgpu_set_pp_dpm_pcie); 1443static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 1444 amdgpu_get_pp_sclk_od, 1445 amdgpu_set_pp_sclk_od); 1446static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 1447 amdgpu_get_pp_mclk_od, 1448 amdgpu_set_pp_mclk_od); 1449static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, 1450 amdgpu_get_pp_power_profile_mode, 1451 amdgpu_set_pp_power_profile_mode); 1452static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, 1453 amdgpu_get_pp_od_clk_voltage, 1454 amdgpu_set_pp_od_clk_voltage); 1455static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, 1456 amdgpu_get_busy_percent, NULL); 1457static DEVICE_ATTR(mem_busy_percent, S_IRUGO, 1458 amdgpu_get_memory_busy_percent, NULL); 1459static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); 1460static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, 1461 amdgpu_get_pp_feature_status, 1462 amdgpu_set_pp_feature_status); 1463static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); 1464 1465static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1466 struct device_attribute *attr, 1467 char *buf) 1468{ 1469 struct amdgpu_device *adev = dev_get_drvdata(dev); 1470 struct drm_device *ddev = adev->ddev; 1471 int channel = to_sensor_dev_attr(attr)->index; 1472 int r, temp = 0, size = sizeof(temp); 1473 1474 /* Can't get temperature when the card is off */ 1475 if ((adev->flags & AMD_IS_PX) && 1476 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1477 return -EINVAL; 1478 1479 if (channel >= PP_TEMP_MAX) 1480 return -EINVAL; 1481 1482 switch (channel) { 1483 case PP_TEMP_JUNCTION: 1484 /* get current junction temperature */ 1485 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1486 (void *)&temp, &size); 1487 if (r) 1488 return r; 1489 break; 1490 case PP_TEMP_EDGE: 1491 /* get current edge temperature */ 1492 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 1493 (void *)&temp, &size); 1494 if (r) 1495 return r; 1496 break; 1497 case PP_TEMP_MEM: 1498 /* get current memory temperature */ 1499 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 1500 (void *)&temp, &size); 1501 if (r) 1502 return r; 1503 break; 1504 } 1505 1506 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1507} 1508 1509static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 1510 struct device_attribute *attr, 1511 char *buf) 1512{ 1513 struct amdgpu_device *adev = dev_get_drvdata(dev); 1514 int hyst = to_sensor_dev_attr(attr)->index; 1515 int temp; 1516 1517 if (hyst) 1518 temp = adev->pm.dpm.thermal.min_temp; 1519 else 1520 temp = adev->pm.dpm.thermal.max_temp; 1521 1522 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1523} 1524 1525static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 1526 struct device_attribute *attr, 1527 char *buf) 1528{ 1529 struct amdgpu_device *adev = dev_get_drvdata(dev); 1530 int hyst = to_sensor_dev_attr(attr)->index; 1531 int temp; 1532 1533 if (hyst) 1534 temp = adev->pm.dpm.thermal.min_hotspot_temp; 1535 else 1536 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 1537 1538 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1539} 1540 1541static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 1542 struct device_attribute *attr, 1543 char *buf) 1544{ 1545 struct amdgpu_device *adev = dev_get_drvdata(dev); 1546 int hyst = to_sensor_dev_attr(attr)->index; 1547 int temp; 1548 1549 if (hyst) 1550 temp = adev->pm.dpm.thermal.min_mem_temp; 1551 else 1552 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 1553 1554 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1555} 1556 1557static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 1558 struct device_attribute *attr, 1559 char *buf) 1560{ 1561 int channel = to_sensor_dev_attr(attr)->index; 1562 1563 if (channel >= PP_TEMP_MAX) 1564 return -EINVAL; 1565 1566 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); 1567} 1568 1569static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 1570 struct device_attribute *attr, 1571 char *buf) 1572{ 1573 struct amdgpu_device *adev = dev_get_drvdata(dev); 1574 int channel = to_sensor_dev_attr(attr)->index; 1575 int temp = 0; 1576 1577 if (channel >= PP_TEMP_MAX) 1578 return -EINVAL; 1579 1580 switch (channel) { 1581 case PP_TEMP_JUNCTION: 1582 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 1583 break; 1584 case PP_TEMP_EDGE: 1585 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 1586 break; 1587 case PP_TEMP_MEM: 1588 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 1589 break; 1590 } 1591 1592 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1593} 1594 1595static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 1596 struct device_attribute *attr, 1597 char *buf) 1598{ 1599 struct amdgpu_device *adev = dev_get_drvdata(dev); 1600 u32 pwm_mode = 0; 1601 if (is_support_sw_smu(adev)) { 1602 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1603 } else { 1604 if (!adev->powerplay.pp_funcs->get_fan_control_mode) 1605 return -EINVAL; 1606 1607 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1608 } 1609 1610 return sprintf(buf, "%i\n", pwm_mode); 1611} 1612 1613static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 1614 struct device_attribute *attr, 1615 const char *buf, 1616 size_t count) 1617{ 1618 struct amdgpu_device *adev = dev_get_drvdata(dev); 1619 int err; 1620 int value; 1621 1622 /* Can't adjust fan when the card is off */ 1623 if ((adev->flags & AMD_IS_PX) && 1624 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1625 return -EINVAL; 1626 1627 err = kstrtoint(buf, 10, &value); 1628 if (err) 1629 return err; 1630 1631 if (is_support_sw_smu(adev)) { 1632 smu_set_fan_control_mode(&adev->smu, value); 1633 } else { 1634 if (!adev->powerplay.pp_funcs->set_fan_control_mode) 1635 return -EINVAL; 1636 1637 amdgpu_dpm_set_fan_control_mode(adev, value); 1638 } 1639 1640 return count; 1641} 1642 1643static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 1644 struct device_attribute *attr, 1645 char *buf) 1646{ 1647 return sprintf(buf, "%i\n", 0); 1648} 1649 1650static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 1651 struct device_attribute *attr, 1652 char *buf) 1653{ 1654 return sprintf(buf, "%i\n", 255); 1655} 1656 1657static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 1658 struct device_attribute *attr, 1659 const char *buf, size_t count) 1660{ 1661 struct amdgpu_device *adev = dev_get_drvdata(dev); 1662 int err; 1663 u32 value; 1664 u32 pwm_mode; 1665 1666 /* Can't adjust fan when the card is off */ 1667 if ((adev->flags & AMD_IS_PX) && 1668 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1669 return -EINVAL; 1670 if (is_support_sw_smu(adev)) 1671 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1672 else 1673 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1674 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 1675 pr_info("manual fan speed control should be enabled first\n"); 1676 return -EINVAL; 1677 } 1678 1679 err = kstrtou32(buf, 10, &value); 1680 if (err) 1681 return err; 1682 1683 value = (value * 100) / 255; 1684 1685 if (is_support_sw_smu(adev)) { 1686 err = smu_set_fan_speed_percent(&adev->smu, value); 1687 if (err) 1688 return err; 1689 } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) { 1690 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 1691 if (err) 1692 return err; 1693 } 1694 1695 return count; 1696} 1697 1698static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 1699 struct device_attribute *attr, 1700 char *buf) 1701{ 1702 struct amdgpu_device *adev = dev_get_drvdata(dev); 1703 int err; 1704 u32 speed = 0; 1705 1706 /* Can't adjust fan when the card is off */ 1707 if ((adev->flags & AMD_IS_PX) && 1708 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1709 return -EINVAL; 1710 1711 if (is_support_sw_smu(adev)) { 1712 err = smu_get_fan_speed_percent(&adev->smu, &speed); 1713 if (err) 1714 return err; 1715 } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) { 1716 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 1717 if (err) 1718 return err; 1719 } 1720 1721 speed = (speed * 255) / 100; 1722 1723 return sprintf(buf, "%i\n", speed); 1724} 1725 1726static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 1727 struct device_attribute *attr, 1728 char *buf) 1729{ 1730 struct amdgpu_device *adev = dev_get_drvdata(dev); 1731 int err; 1732 u32 speed = 0; 1733 1734 /* Can't adjust fan when the card is off */ 1735 if ((adev->flags & AMD_IS_PX) && 1736 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1737 return -EINVAL; 1738 1739 if (is_support_sw_smu(adev)) { 1740 err = smu_get_fan_speed_rpm(&adev->smu, &speed); 1741 if (err) 1742 return err; 1743 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1744 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 1745 if (err) 1746 return err; 1747 } 1748 1749 return sprintf(buf, "%i\n", speed); 1750} 1751 1752static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 1753 struct device_attribute *attr, 1754 char *buf) 1755{ 1756 struct amdgpu_device *adev = dev_get_drvdata(dev); 1757 u32 min_rpm = 0; 1758 u32 size = sizeof(min_rpm); 1759 int r; 1760 1761 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 1762 (void *)&min_rpm, &size); 1763 if (r) 1764 return r; 1765 1766 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); 1767} 1768 1769static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 1770 struct device_attribute *attr, 1771 char *buf) 1772{ 1773 struct amdgpu_device *adev = dev_get_drvdata(dev); 1774 u32 max_rpm = 0; 1775 u32 size = sizeof(max_rpm); 1776 int r; 1777 1778 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 1779 (void *)&max_rpm, &size); 1780 if (r) 1781 return r; 1782 1783 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); 1784} 1785 1786static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 1787 struct device_attribute *attr, 1788 char *buf) 1789{ 1790 struct amdgpu_device *adev = dev_get_drvdata(dev); 1791 int err; 1792 u32 rpm = 0; 1793 1794 /* Can't adjust fan when the card is off */ 1795 if ((adev->flags & AMD_IS_PX) && 1796 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1797 return -EINVAL; 1798 1799 if (is_support_sw_smu(adev)) { 1800 err = smu_get_fan_speed_rpm(&adev->smu, &rpm); 1801 if (err) 1802 return err; 1803 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 1804 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 1805 if (err) 1806 return err; 1807 } 1808 1809 return sprintf(buf, "%i\n", rpm); 1810} 1811 1812static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 1813 struct device_attribute *attr, 1814 const char *buf, size_t count) 1815{ 1816 struct amdgpu_device *adev = dev_get_drvdata(dev); 1817 int err; 1818 u32 value; 1819 u32 pwm_mode; 1820 1821 if (is_support_sw_smu(adev)) 1822 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1823 else 1824 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1825 1826 if (pwm_mode != AMD_FAN_CTRL_MANUAL) 1827 return -ENODATA; 1828 1829 /* Can't adjust fan when the card is off */ 1830 if ((adev->flags & AMD_IS_PX) && 1831 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1832 return -EINVAL; 1833 1834 err = kstrtou32(buf, 10, &value); 1835 if (err) 1836 return err; 1837 1838 if (is_support_sw_smu(adev)) { 1839 err = smu_set_fan_speed_rpm(&adev->smu, value); 1840 if (err) 1841 return err; 1842 } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { 1843 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 1844 if (err) 1845 return err; 1846 } 1847 1848 return count; 1849} 1850 1851static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 1852 struct device_attribute *attr, 1853 char *buf) 1854{ 1855 struct amdgpu_device *adev = dev_get_drvdata(dev); 1856 u32 pwm_mode = 0; 1857 1858 if (is_support_sw_smu(adev)) { 1859 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1860 } else { 1861 if (!adev->powerplay.pp_funcs->get_fan_control_mode) 1862 return -EINVAL; 1863 1864 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1865 } 1866 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 1867} 1868 1869static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 1870 struct device_attribute *attr, 1871 const char *buf, 1872 size_t count) 1873{ 1874 struct amdgpu_device *adev = dev_get_drvdata(dev); 1875 int err; 1876 int value; 1877 u32 pwm_mode; 1878 1879 /* Can't adjust fan when the card is off */ 1880 if ((adev->flags & AMD_IS_PX) && 1881 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1882 return -EINVAL; 1883 1884 1885 err = kstrtoint(buf, 10, &value); 1886 if (err) 1887 return err; 1888 1889 if (value == 0) 1890 pwm_mode = AMD_FAN_CTRL_AUTO; 1891 else if (value == 1) 1892 pwm_mode = AMD_FAN_CTRL_MANUAL; 1893 else 1894 return -EINVAL; 1895 1896 if (is_support_sw_smu(adev)) { 1897 smu_set_fan_control_mode(&adev->smu, pwm_mode); 1898 } else { 1899 if (!adev->powerplay.pp_funcs->set_fan_control_mode) 1900 return -EINVAL; 1901 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 1902 } 1903 1904 return count; 1905} 1906 1907static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 1908 struct device_attribute *attr, 1909 char *buf) 1910{ 1911 struct amdgpu_device *adev = dev_get_drvdata(dev); 1912 struct drm_device *ddev = adev->ddev; 1913 u32 vddgfx; 1914 int r, size = sizeof(vddgfx); 1915 1916 /* Can't get voltage when the card is off */ 1917 if ((adev->flags & AMD_IS_PX) && 1918 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1919 return -EINVAL; 1920 1921 /* get the voltage */ 1922 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 1923 (void *)&vddgfx, &size); 1924 if (r) 1925 return r; 1926 1927 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 1928} 1929 1930static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 1931 struct device_attribute *attr, 1932 char *buf) 1933{ 1934 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 1935} 1936 1937static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 1938 struct device_attribute *attr, 1939 char *buf) 1940{ 1941 struct amdgpu_device *adev = dev_get_drvdata(dev); 1942 struct drm_device *ddev = adev->ddev; 1943 u32 vddnb; 1944 int r, size = sizeof(vddnb); 1945 1946 /* only APUs have vddnb */ 1947 if (!(adev->flags & AMD_IS_APU)) 1948 return -EINVAL; 1949 1950 /* Can't get voltage when the card is off */ 1951 if ((adev->flags & AMD_IS_PX) && 1952 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1953 return -EINVAL; 1954 1955 /* get the voltage */ 1956 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 1957 (void *)&vddnb, &size); 1958 if (r) 1959 return r; 1960 1961 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 1962} 1963 1964static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 1965 struct device_attribute *attr, 1966 char *buf) 1967{ 1968 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 1969} 1970 1971static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 1972 struct device_attribute *attr, 1973 char *buf) 1974{ 1975 struct amdgpu_device *adev = dev_get_drvdata(dev); 1976 struct drm_device *ddev = adev->ddev; 1977 u32 query = 0; 1978 int r, size = sizeof(u32); 1979 unsigned uw; 1980 1981 /* Can't get power when the card is off */ 1982 if ((adev->flags & AMD_IS_PX) && 1983 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 1984 return -EINVAL; 1985 1986 /* get the voltage */ 1987 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 1988 (void *)&query, &size); 1989 if (r) 1990 return r; 1991 1992 /* convert to microwatts */ 1993 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 1994 1995 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 1996} 1997 1998static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 1999 struct device_attribute *attr, 2000 char *buf) 2001{ 2002 return sprintf(buf, "%i\n", 0); 2003} 2004 2005static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 2006 struct device_attribute *attr, 2007 char *buf) 2008{ 2009 struct amdgpu_device *adev = dev_get_drvdata(dev); 2010 uint32_t limit = 0; 2011 2012 if (is_support_sw_smu(adev)) { 2013 smu_get_power_limit(&adev->smu, &limit, true); 2014 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2015 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2016 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 2017 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2018 } else { 2019 return snprintf(buf, PAGE_SIZE, "\n"); 2020 } 2021} 2022 2023static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 2024 struct device_attribute *attr, 2025 char *buf) 2026{ 2027 struct amdgpu_device *adev = dev_get_drvdata(dev); 2028 uint32_t limit = 0; 2029 2030 if (is_support_sw_smu(adev)) { 2031 smu_get_power_limit(&adev->smu, &limit, false); 2032 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2033 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2034 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 2035 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2036 } else { 2037 return snprintf(buf, PAGE_SIZE, "\n"); 2038 } 2039} 2040 2041 2042static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 2043 struct device_attribute *attr, 2044 const char *buf, 2045 size_t count) 2046{ 2047 struct amdgpu_device *adev = dev_get_drvdata(dev); 2048 int err; 2049 u32 value; 2050 2051 err = kstrtou32(buf, 10, &value); 2052 if (err) 2053 return err; 2054 2055 value = value / 1000000; /* convert to Watt */ 2056 2057 if (is_support_sw_smu(adev)) { 2058 err = smu_set_power_limit(&adev->smu, value); 2059 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { 2060 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 2061 } else { 2062 err = -EINVAL; 2063 } 2064 2065 if (err) 2066 return err; 2067 2068 return count; 2069} 2070 2071static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 2072 struct device_attribute *attr, 2073 char *buf) 2074{ 2075 struct amdgpu_device *adev = dev_get_drvdata(dev); 2076 struct drm_device *ddev = adev->ddev; 2077 uint32_t sclk; 2078 int r, size = sizeof(sclk); 2079 2080 /* Can't get voltage when the card is off */ 2081 if ((adev->flags & AMD_IS_PX) && 2082 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2083 return -EINVAL; 2084 2085 /* get the sclk */ 2086 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2087 (void *)&sclk, &size); 2088 if (r) 2089 return r; 2090 2091 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); 2092} 2093 2094static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 2095 struct device_attribute *attr, 2096 char *buf) 2097{ 2098 return snprintf(buf, PAGE_SIZE, "sclk\n"); 2099} 2100 2101static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 2102 struct device_attribute *attr, 2103 char *buf) 2104{ 2105 struct amdgpu_device *adev = dev_get_drvdata(dev); 2106 struct drm_device *ddev = adev->ddev; 2107 uint32_t mclk; 2108 int r, size = sizeof(mclk); 2109 2110 /* Can't get voltage when the card is off */ 2111 if ((adev->flags & AMD_IS_PX) && 2112 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 2113 return -EINVAL; 2114 2115 /* get the sclk */ 2116 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2117 (void *)&mclk, &size); 2118 if (r) 2119 return r; 2120 2121 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); 2122} 2123 2124static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 2125 struct device_attribute *attr, 2126 char *buf) 2127{ 2128 return snprintf(buf, PAGE_SIZE, "mclk\n"); 2129} 2130 2131/** 2132 * DOC: hwmon 2133 * 2134 * The amdgpu driver exposes the following sensor interfaces: 2135 * 2136 * - GPU temperature (via the on-die sensor) 2137 * 2138 * - GPU voltage 2139 * 2140 * - Northbridge voltage (APUs only) 2141 * 2142 * - GPU power 2143 * 2144 * - GPU fan 2145 * 2146 * - GPU gfx/compute engine clock 2147 * 2148 * - GPU memory clock (dGPU only) 2149 * 2150 * hwmon interfaces for GPU temperature: 2151 * 2152 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 2153 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 2154 * 2155 * - temp[1-3]_label: temperature channel label 2156 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 2157 * 2158 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 2159 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 2160 * 2161 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 2162 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 2163 * 2164 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 2165 * - these are supported on SOC15 dGPUs only 2166 * 2167 * hwmon interfaces for GPU voltage: 2168 * 2169 * - in0_input: the voltage on the GPU in millivolts 2170 * 2171 * - in1_input: the voltage on the Northbridge in millivolts 2172 * 2173 * hwmon interfaces for GPU power: 2174 * 2175 * - power1_average: average power used by the GPU in microWatts 2176 * 2177 * - power1_cap_min: minimum cap supported in microWatts 2178 * 2179 * - power1_cap_max: maximum cap supported in microWatts 2180 * 2181 * - power1_cap: selected power cap in microWatts 2182 * 2183 * hwmon interfaces for GPU fan: 2184 * 2185 * - pwm1: pulse width modulation fan level (0-255) 2186 * 2187 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 2188 * 2189 * - pwm1_min: pulse width modulation fan control minimum level (0) 2190 * 2191 * - pwm1_max: pulse width modulation fan control maximum level (255) 2192 * 2193 * - fan1_min: an minimum value Unit: revolution/min (RPM) 2194 * 2195 * - fan1_max: an maxmum value Unit: revolution/max (RPM) 2196 * 2197 * - fan1_input: fan speed in RPM 2198 * 2199 * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) 2200 * 2201 * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable 2202 * 2203 * hwmon interfaces for GPU clocks: 2204 * 2205 * - freq1_input: the gfx/compute clock in hertz 2206 * 2207 * - freq2_input: the memory clock in hertz 2208 * 2209 * You can use hwmon tools like sensors to view this information on your system. 2210 * 2211 */ 2212 2213static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 2214static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 2215static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 2216static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 2217static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 2218static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 2219static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 2220static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 2221static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 2222static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 2223static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 2224static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 2225static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 2226static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 2227static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 2228static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 2229static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 2230static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 2231static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 2232static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 2233static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 2234static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 2235static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 2236static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 2237static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 2238static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 2239static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 2240static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 2241static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 2242static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 2243static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 2244static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 2245static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 2246static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 2247static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 2248static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 2249 2250static struct attribute *hwmon_attributes[] = { 2251 &sensor_dev_attr_temp1_input.dev_attr.attr, 2252 &sensor_dev_attr_temp1_crit.dev_attr.attr, 2253 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 2254 &sensor_dev_attr_temp2_input.dev_attr.attr, 2255 &sensor_dev_attr_temp2_crit.dev_attr.attr, 2256 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 2257 &sensor_dev_attr_temp3_input.dev_attr.attr, 2258 &sensor_dev_attr_temp3_crit.dev_attr.attr, 2259 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 2260 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 2261 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 2262 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 2263 &sensor_dev_attr_temp1_label.dev_attr.attr, 2264 &sensor_dev_attr_temp2_label.dev_attr.attr, 2265 &sensor_dev_attr_temp3_label.dev_attr.attr, 2266 &sensor_dev_attr_pwm1.dev_attr.attr, 2267 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 2268 &sensor_dev_attr_pwm1_min.dev_attr.attr, 2269 &sensor_dev_attr_pwm1_max.dev_attr.attr, 2270 &sensor_dev_attr_fan1_input.dev_attr.attr, 2271 &sensor_dev_attr_fan1_min.dev_attr.attr, 2272 &sensor_dev_attr_fan1_max.dev_attr.attr, 2273 &sensor_dev_attr_fan1_target.dev_attr.attr, 2274 &sensor_dev_attr_fan1_enable.dev_attr.attr, 2275 &sensor_dev_attr_in0_input.dev_attr.attr, 2276 &sensor_dev_attr_in0_label.dev_attr.attr, 2277 &sensor_dev_attr_in1_input.dev_attr.attr, 2278 &sensor_dev_attr_in1_label.dev_attr.attr, 2279 &sensor_dev_attr_power1_average.dev_attr.attr, 2280 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 2281 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 2282 &sensor_dev_attr_power1_cap.dev_attr.attr, 2283 &sensor_dev_attr_freq1_input.dev_attr.attr, 2284 &sensor_dev_attr_freq1_label.dev_attr.attr, 2285 &sensor_dev_attr_freq2_input.dev_attr.attr, 2286 &sensor_dev_attr_freq2_label.dev_attr.attr, 2287 NULL 2288}; 2289 2290static umode_t hwmon_attributes_visible(struct kobject *kobj, 2291 struct attribute *attr, int index) 2292{ 2293 struct device *dev = kobj_to_dev(kobj); 2294 struct amdgpu_device *adev = dev_get_drvdata(dev); 2295 umode_t effective_mode = attr->mode; 2296 2297 /* Skip fan attributes if fan is not present */ 2298 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2299 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2300 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2301 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2302 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2303 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2304 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2305 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2306 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2307 return 0; 2308 2309 /* Skip fan attributes on APU */ 2310 if ((adev->flags & AMD_IS_APU) && 2311 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2312 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2313 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2314 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2315 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2316 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2317 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2318 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2319 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2320 return 0; 2321 2322 /* Skip limit attributes if DPM is not enabled */ 2323 if (!adev->pm.dpm_enabled && 2324 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 2325 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 2326 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2327 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2328 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2329 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2330 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2331 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2332 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2333 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2334 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2335 return 0; 2336 2337 if (!is_support_sw_smu(adev)) { 2338 /* mask fan attributes if we have no bindings for this asic to expose */ 2339 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 2340 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 2341 (!adev->powerplay.pp_funcs->get_fan_control_mode && 2342 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 2343 effective_mode &= ~S_IRUGO; 2344 2345 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 2346 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 2347 (!adev->powerplay.pp_funcs->set_fan_control_mode && 2348 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 2349 effective_mode &= ~S_IWUSR; 2350 } 2351 2352 if (((adev->flags & AMD_IS_APU) || 2353 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 2354 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 2355 (attr == &sensor_dev_attr_power1_average.dev_attr.attr || 2356 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 2357 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 2358 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 2359 return 0; 2360 2361 if (!is_support_sw_smu(adev)) { 2362 /* hide max/min values if we can't both query and manage the fan */ 2363 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 2364 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 2365 (!adev->powerplay.pp_funcs->set_fan_speed_rpm && 2366 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 2367 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2368 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 2369 return 0; 2370 2371 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && 2372 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 2373 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2374 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 2375 return 0; 2376 } 2377 2378 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 2379 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 2380 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 2381 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 2382 return 0; 2383 2384 /* only APUs have vddnb */ 2385 if (!(adev->flags & AMD_IS_APU) && 2386 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 2387 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 2388 return 0; 2389 2390 /* no mclk on APUs */ 2391 if ((adev->flags & AMD_IS_APU) && 2392 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 2393 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 2394 return 0; 2395 2396 /* only SOC15 dGPUs support hotspot and mem temperatures */ 2397 if (((adev->flags & AMD_IS_APU) || 2398 adev->asic_type < CHIP_VEGA10) && 2399 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 2400 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 2401 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || 2402 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 2403 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 2404 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 2405 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || 2406 attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 2407 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 2408 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 2409 attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) 2410 return 0; 2411 2412 return effective_mode; 2413} 2414 2415static const struct attribute_group hwmon_attrgroup = { 2416 .attrs = hwmon_attributes, 2417 .is_visible = hwmon_attributes_visible, 2418}; 2419 2420static const struct attribute_group *hwmon_groups[] = { 2421 &hwmon_attrgroup, 2422 NULL 2423}; 2424 2425void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 2426{ 2427 struct amdgpu_device *adev = 2428 container_of(work, struct amdgpu_device, 2429 pm.dpm.thermal.work); 2430 /* switch to the thermal state */ 2431 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 2432 int temp, size = sizeof(temp); 2433 2434 if (!adev->pm.dpm_enabled) 2435 return; 2436 2437 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 2438 (void *)&temp, &size)) { 2439 if (temp < adev->pm.dpm.thermal.min_temp) 2440 /* switch back the user state */ 2441 dpm_state = adev->pm.dpm.user_state; 2442 } else { 2443 if (adev->pm.dpm.thermal.high_to_low) 2444 /* switch back the user state */ 2445 dpm_state = adev->pm.dpm.user_state; 2446 } 2447 mutex_lock(&adev->pm.mutex); 2448 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 2449 adev->pm.dpm.thermal_active = true; 2450 else 2451 adev->pm.dpm.thermal_active = false; 2452 adev->pm.dpm.state = dpm_state; 2453 mutex_unlock(&adev->pm.mutex); 2454 2455 amdgpu_pm_compute_clocks(adev); 2456} 2457 2458static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 2459 enum amd_pm_state_type dpm_state) 2460{ 2461 int i; 2462 struct amdgpu_ps *ps; 2463 u32 ui_class; 2464 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 2465 true : false; 2466 2467 /* check if the vblank period is too short to adjust the mclk */ 2468 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 2469 if (amdgpu_dpm_vblank_too_short(adev)) 2470 single_display = false; 2471 } 2472 2473 /* certain older asics have a separare 3D performance state, 2474 * so try that first if the user selected performance 2475 */ 2476 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 2477 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 2478 /* balanced states don't exist at the moment */ 2479 if (dpm_state == POWER_STATE_TYPE_BALANCED) 2480 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 2481 2482restart_search: 2483 /* Pick the best power state based on current conditions */ 2484 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2485 ps = &adev->pm.dpm.ps[i]; 2486 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 2487 switch (dpm_state) { 2488 /* user states */ 2489 case POWER_STATE_TYPE_BATTERY: 2490 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 2491 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 2492 if (single_display) 2493 return ps; 2494 } else 2495 return ps; 2496 } 2497 break; 2498 case POWER_STATE_TYPE_BALANCED: 2499 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 2500 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 2501 if (single_display) 2502 return ps; 2503 } else 2504 return ps; 2505 } 2506 break; 2507 case POWER_STATE_TYPE_PERFORMANCE: 2508 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 2509 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 2510 if (single_display) 2511 return ps; 2512 } else 2513 return ps; 2514 } 2515 break; 2516 /* internal states */ 2517 case POWER_STATE_TYPE_INTERNAL_UVD: 2518 if (adev->pm.dpm.uvd_ps) 2519 return adev->pm.dpm.uvd_ps; 2520 else 2521 break; 2522 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 2523 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 2524 return ps; 2525 break; 2526 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 2527 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 2528 return ps; 2529 break; 2530 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 2531 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 2532 return ps; 2533 break; 2534 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 2535 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 2536 return ps; 2537 break; 2538 case POWER_STATE_TYPE_INTERNAL_BOOT: 2539 return adev->pm.dpm.boot_ps; 2540 case POWER_STATE_TYPE_INTERNAL_THERMAL: 2541 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 2542 return ps; 2543 break; 2544 case POWER_STATE_TYPE_INTERNAL_ACPI: 2545 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 2546 return ps; 2547 break; 2548 case POWER_STATE_TYPE_INTERNAL_ULV: 2549 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 2550 return ps; 2551 break; 2552 case POWER_STATE_TYPE_INTERNAL_3DPERF: 2553 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 2554 return ps; 2555 break; 2556 default: 2557 break; 2558 } 2559 } 2560 /* use a fallback state if we didn't match */ 2561 switch (dpm_state) { 2562 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 2563 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 2564 goto restart_search; 2565 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 2566 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 2567 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 2568 if (adev->pm.dpm.uvd_ps) { 2569 return adev->pm.dpm.uvd_ps; 2570 } else { 2571 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 2572 goto restart_search; 2573 } 2574 case POWER_STATE_TYPE_INTERNAL_THERMAL: 2575 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 2576 goto restart_search; 2577 case POWER_STATE_TYPE_INTERNAL_ACPI: 2578 dpm_state = POWER_STATE_TYPE_BATTERY; 2579 goto restart_search; 2580 case POWER_STATE_TYPE_BATTERY: 2581 case POWER_STATE_TYPE_BALANCED: 2582 case POWER_STATE_TYPE_INTERNAL_3DPERF: 2583 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 2584 goto restart_search; 2585 default: 2586 break; 2587 } 2588 2589 return NULL; 2590} 2591 2592static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 2593{ 2594 struct amdgpu_ps *ps; 2595 enum amd_pm_state_type dpm_state; 2596 int ret; 2597 bool equal = false; 2598 2599 /* if dpm init failed */ 2600 if (!adev->pm.dpm_enabled) 2601 return; 2602 2603 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 2604 /* add other state override checks here */ 2605 if ((!adev->pm.dpm.thermal_active) && 2606 (!adev->pm.dpm.uvd_active)) 2607 adev->pm.dpm.state = adev->pm.dpm.user_state; 2608 } 2609 dpm_state = adev->pm.dpm.state; 2610 2611 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 2612 if (ps) 2613 adev->pm.dpm.requested_ps = ps; 2614 else 2615 return; 2616 2617 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 2618 printk("switching from power state:\n"); 2619 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 2620 printk("switching to power state:\n"); 2621 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 2622 } 2623 2624 /* update whether vce is active */ 2625 ps->vce_active = adev->pm.dpm.vce_active; 2626 if (adev->powerplay.pp_funcs->display_configuration_changed) 2627 amdgpu_dpm_display_configuration_changed(adev); 2628 2629 ret = amdgpu_dpm_pre_set_power_state(adev); 2630 if (ret) 2631 return; 2632 2633 if (adev->powerplay.pp_funcs->check_state_equal) { 2634 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 2635 equal = false; 2636 } 2637 2638 if (equal) 2639 return; 2640 2641 amdgpu_dpm_set_power_state(adev); 2642 amdgpu_dpm_post_set_power_state(adev); 2643 2644 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 2645 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 2646 2647 if (adev->powerplay.pp_funcs->force_performance_level) { 2648 if (adev->pm.dpm.thermal_active) { 2649 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 2650 /* force low perf level for thermal */ 2651 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 2652 /* save the user's level */ 2653 adev->pm.dpm.forced_level = level; 2654 } else { 2655 /* otherwise, user selected level */ 2656 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 2657 } 2658 } 2659} 2660 2661void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 2662{ 2663 int ret = 0; 2664 if (is_support_sw_smu(adev)) { 2665 ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable); 2666 if (ret) 2667 DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n", 2668 enable ? "true" : "false", ret); 2669 } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { 2670 /* enable/disable UVD */ 2671 mutex_lock(&adev->pm.mutex); 2672 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 2673 mutex_unlock(&adev->pm.mutex); 2674 } 2675 /* enable/disable Low Memory PState for UVD (4k videos) */ 2676 if (adev->asic_type == CHIP_STONEY && 2677 adev->uvd.decode_image_width >= WIDTH_4K) { 2678 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2679 2680 if (hwmgr && hwmgr->hwmgr_func && 2681 hwmgr->hwmgr_func->update_nbdpm_pstate) 2682 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 2683 !enable, 2684 true); 2685 } 2686} 2687 2688void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 2689{ 2690 int ret = 0; 2691 if (is_support_sw_smu(adev)) { 2692 ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable); 2693 if (ret) 2694 DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n", 2695 enable ? "true" : "false", ret); 2696 } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { 2697 /* enable/disable VCE */ 2698 mutex_lock(&adev->pm.mutex); 2699 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 2700 mutex_unlock(&adev->pm.mutex); 2701 } 2702} 2703 2704void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 2705{ 2706 int i; 2707 2708 if (adev->powerplay.pp_funcs->print_power_state == NULL) 2709 return; 2710 2711 for (i = 0; i < adev->pm.dpm.num_ps; i++) 2712 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 2713 2714} 2715 2716int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) 2717{ 2718 int ret = 0; 2719 2720 if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) 2721 return ret; 2722 2723 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 2724 if (ret) { 2725 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 2726 return ret; 2727 } 2728 2729 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 2730 if (ret) { 2731 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 2732 return ret; 2733 } 2734 2735 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 2736 if (ret) { 2737 DRM_ERROR("failed to create device file for dpm state\n"); 2738 return ret; 2739 } 2740 2741 return ret; 2742} 2743 2744void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) 2745{ 2746 if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) 2747 return; 2748 2749 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 2750 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 2751 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 2752} 2753 2754int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 2755{ 2756 int r; 2757 2758 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 2759 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 2760 if (r) { 2761 pr_err("smu firmware loading failed\n"); 2762 return r; 2763 } 2764 *smu_version = adev->pm.fw_version; 2765 } 2766 return 0; 2767} 2768 2769int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2770{ 2771 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2772 int ret; 2773 2774 if (adev->pm.sysfs_initialized) 2775 return 0; 2776 2777 if (adev->pm.dpm_enabled == 0) 2778 return 0; 2779 2780 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 2781 DRIVER_NAME, adev, 2782 hwmon_groups); 2783 if (IS_ERR(adev->pm.int_hwmon_dev)) { 2784 ret = PTR_ERR(adev->pm.int_hwmon_dev); 2785 dev_err(adev->dev, 2786 "Unable to register hwmon device: %d\n", ret); 2787 return ret; 2788 } 2789 2790 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 2791 if (ret) { 2792 DRM_ERROR("failed to create device file for dpm state\n"); 2793 return ret; 2794 } 2795 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 2796 if (ret) { 2797 DRM_ERROR("failed to create device file for dpm state\n"); 2798 return ret; 2799 } 2800 2801 2802 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 2803 if (ret) { 2804 DRM_ERROR("failed to create device file pp_num_states\n"); 2805 return ret; 2806 } 2807 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 2808 if (ret) { 2809 DRM_ERROR("failed to create device file pp_cur_state\n"); 2810 return ret; 2811 } 2812 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 2813 if (ret) { 2814 DRM_ERROR("failed to create device file pp_force_state\n"); 2815 return ret; 2816 } 2817 ret = device_create_file(adev->dev, &dev_attr_pp_table); 2818 if (ret) { 2819 DRM_ERROR("failed to create device file pp_table\n"); 2820 return ret; 2821 } 2822 2823 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 2824 if (ret) { 2825 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 2826 return ret; 2827 } 2828 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 2829 if (ret) { 2830 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 2831 return ret; 2832 } 2833 if (adev->asic_type >= CHIP_VEGA10) { 2834 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); 2835 if (ret) { 2836 DRM_ERROR("failed to create device file pp_dpm_socclk\n"); 2837 return ret; 2838 } 2839 if (adev->asic_type != CHIP_ARCTURUS) { 2840 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); 2841 if (ret) { 2842 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); 2843 return ret; 2844 } 2845 } 2846 } 2847 if (adev->asic_type >= CHIP_VEGA20) { 2848 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); 2849 if (ret) { 2850 DRM_ERROR("failed to create device file pp_dpm_fclk\n"); 2851 return ret; 2852 } 2853 } 2854 if (adev->asic_type != CHIP_ARCTURUS) { 2855 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 2856 if (ret) { 2857 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 2858 return ret; 2859 } 2860 } 2861 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 2862 if (ret) { 2863 DRM_ERROR("failed to create device file pp_sclk_od\n"); 2864 return ret; 2865 } 2866 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 2867 if (ret) { 2868 DRM_ERROR("failed to create device file pp_mclk_od\n"); 2869 return ret; 2870 } 2871 ret = device_create_file(adev->dev, 2872 &dev_attr_pp_power_profile_mode); 2873 if (ret) { 2874 DRM_ERROR("failed to create device file " 2875 "pp_power_profile_mode\n"); 2876 return ret; 2877 } 2878 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 2879 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { 2880 ret = device_create_file(adev->dev, 2881 &dev_attr_pp_od_clk_voltage); 2882 if (ret) { 2883 DRM_ERROR("failed to create device file " 2884 "pp_od_clk_voltage\n"); 2885 return ret; 2886 } 2887 } 2888 ret = device_create_file(adev->dev, 2889 &dev_attr_gpu_busy_percent); 2890 if (ret) { 2891 DRM_ERROR("failed to create device file " 2892 "gpu_busy_level\n"); 2893 return ret; 2894 } 2895 /* APU does not have its own dedicated memory */ 2896 if (!(adev->flags & AMD_IS_APU) && 2897 (adev->asic_type != CHIP_VEGA10)) { 2898 ret = device_create_file(adev->dev, 2899 &dev_attr_mem_busy_percent); 2900 if (ret) { 2901 DRM_ERROR("failed to create device file " 2902 "mem_busy_percent\n"); 2903 return ret; 2904 } 2905 } 2906 /* PCIe Perf counters won't work on APU nodes */ 2907 if (!(adev->flags & AMD_IS_APU)) { 2908 ret = device_create_file(adev->dev, &dev_attr_pcie_bw); 2909 if (ret) { 2910 DRM_ERROR("failed to create device file pcie_bw\n"); 2911 return ret; 2912 } 2913 } 2914 if (adev->unique_id) 2915 ret = device_create_file(adev->dev, &dev_attr_unique_id); 2916 if (ret) { 2917 DRM_ERROR("failed to create device file unique_id\n"); 2918 return ret; 2919 } 2920 ret = amdgpu_debugfs_pm_init(adev); 2921 if (ret) { 2922 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 2923 return ret; 2924 } 2925 2926 if ((adev->asic_type >= CHIP_VEGA10) && 2927 !(adev->flags & AMD_IS_APU)) { 2928 ret = device_create_file(adev->dev, 2929 &dev_attr_pp_features); 2930 if (ret) { 2931 DRM_ERROR("failed to create device file " 2932 "pp_features\n"); 2933 return ret; 2934 } 2935 } 2936 2937 adev->pm.sysfs_initialized = true; 2938 2939 return 0; 2940} 2941 2942void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 2943{ 2944 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 2945 2946 if (adev->pm.dpm_enabled == 0) 2947 return; 2948 2949 if (adev->pm.int_hwmon_dev) 2950 hwmon_device_unregister(adev->pm.int_hwmon_dev); 2951 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 2952 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 2953 2954 device_remove_file(adev->dev, &dev_attr_pp_num_states); 2955 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 2956 device_remove_file(adev->dev, &dev_attr_pp_force_state); 2957 device_remove_file(adev->dev, &dev_attr_pp_table); 2958 2959 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 2960 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 2961 if (adev->asic_type >= CHIP_VEGA10) { 2962 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); 2963 if (adev->asic_type != CHIP_ARCTURUS) 2964 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); 2965 } 2966 if (adev->asic_type != CHIP_ARCTURUS) 2967 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 2968 if (adev->asic_type >= CHIP_VEGA20) 2969 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); 2970 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 2971 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 2972 device_remove_file(adev->dev, 2973 &dev_attr_pp_power_profile_mode); 2974 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 2975 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 2976 device_remove_file(adev->dev, 2977 &dev_attr_pp_od_clk_voltage); 2978 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 2979 if (!(adev->flags & AMD_IS_APU) && 2980 (adev->asic_type != CHIP_VEGA10)) 2981 device_remove_file(adev->dev, &dev_attr_mem_busy_percent); 2982 if (!(adev->flags & AMD_IS_APU)) 2983 device_remove_file(adev->dev, &dev_attr_pcie_bw); 2984 if (adev->unique_id) 2985 device_remove_file(adev->dev, &dev_attr_unique_id); 2986 if ((adev->asic_type >= CHIP_VEGA10) && 2987 !(adev->flags & AMD_IS_APU)) 2988 device_remove_file(adev->dev, &dev_attr_pp_features); 2989} 2990 2991void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 2992{ 2993 int i = 0; 2994 2995 if (!adev->pm.dpm_enabled) 2996 return; 2997 2998 if (adev->mode_info.num_crtc) 2999 amdgpu_display_bandwidth_update(adev); 3000 3001 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3002 struct amdgpu_ring *ring = adev->rings[i]; 3003 if (ring && ring->sched.ready) 3004 amdgpu_fence_wait_empty(ring); 3005 } 3006 3007 if (is_support_sw_smu(adev)) { 3008 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; 3009 smu_handle_task(&adev->smu, 3010 smu_dpm->dpm_level, 3011 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); 3012 } else { 3013 if (adev->powerplay.pp_funcs->dispatch_tasks) { 3014 if (!amdgpu_device_has_dc_support(adev)) { 3015 mutex_lock(&adev->pm.mutex); 3016 amdgpu_dpm_get_active_displays(adev); 3017 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 3018 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 3019 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 3020 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 3021 if (adev->pm.pm_display_cfg.vrefresh > 120) 3022 adev->pm.pm_display_cfg.min_vblank_time = 0; 3023 if (adev->powerplay.pp_funcs->display_configuration_change) 3024 adev->powerplay.pp_funcs->display_configuration_change( 3025 adev->powerplay.pp_handle, 3026 &adev->pm.pm_display_cfg); 3027 mutex_unlock(&adev->pm.mutex); 3028 } 3029 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 3030 } else { 3031 mutex_lock(&adev->pm.mutex); 3032 amdgpu_dpm_get_active_displays(adev); 3033 amdgpu_dpm_change_power_state_locked(adev); 3034 mutex_unlock(&adev->pm.mutex); 3035 } 3036 } 3037} 3038 3039/* 3040 * Debugfs info 3041 */ 3042#if defined(CONFIG_DEBUG_FS) 3043 3044static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3045{ 3046 uint32_t value; 3047 uint64_t value64; 3048 uint32_t query = 0; 3049 int size; 3050 3051 /* GPU Clocks */ 3052 size = sizeof(value); 3053 seq_printf(m, "GFX Clocks and Power:\n"); 3054 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3055 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3056 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3057 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3058 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3059 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3060 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3061 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3062 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3063 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3064 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3065 seq_printf(m, "\t%u mV (VDDNB)\n", value); 3066 size = sizeof(uint32_t); 3067 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 3068 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3069 size = sizeof(value); 3070 seq_printf(m, "\n"); 3071 3072 /* GPU Temp */ 3073 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3074 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3075 3076 /* GPU Load */ 3077 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3078 seq_printf(m, "GPU Load: %u %%\n", value); 3079 /* MEM Load */ 3080 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3081 seq_printf(m, "MEM Load: %u %%\n", value); 3082 3083 seq_printf(m, "\n"); 3084 3085 /* SMC feature mask */ 3086 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3087 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3088 3089 if (adev->asic_type > CHIP_VEGA20) { 3090 /* VCN clocks */ 3091 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3092 if (!value) { 3093 seq_printf(m, "VCN: Disabled\n"); 3094 } else { 3095 seq_printf(m, "VCN: Enabled\n"); 3096 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3097 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3098 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3099 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3100 } 3101 } 3102 seq_printf(m, "\n"); 3103 } else { 3104 /* UVD clocks */ 3105 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3106 if (!value) { 3107 seq_printf(m, "UVD: Disabled\n"); 3108 } else { 3109 seq_printf(m, "UVD: Enabled\n"); 3110 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3111 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3112 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3113 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3114 } 3115 } 3116 seq_printf(m, "\n"); 3117 3118 /* VCE clocks */ 3119 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3120 if (!value) { 3121 seq_printf(m, "VCE: Disabled\n"); 3122 } else { 3123 seq_printf(m, "VCE: Enabled\n"); 3124 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3125 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3126 } 3127 } 3128 } 3129 3130 return 0; 3131} 3132 3133static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 3134{ 3135 int i; 3136 3137 for (i = 0; clocks[i].flag; i++) 3138 seq_printf(m, "\t%s: %s\n", clocks[i].name, 3139 (flags & clocks[i].flag) ? "On" : "Off"); 3140} 3141 3142static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 3143{ 3144 struct drm_info_node *node = (struct drm_info_node *) m->private; 3145 struct drm_device *dev = node->minor->dev; 3146 struct amdgpu_device *adev = dev->dev_private; 3147 struct drm_device *ddev = adev->ddev; 3148 u32 flags = 0; 3149 3150 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3151 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 3152 amdgpu_parse_cg_state(m, flags); 3153 seq_printf(m, "\n"); 3154 3155 if (!adev->pm.dpm_enabled) { 3156 seq_printf(m, "dpm not enabled\n"); 3157 return 0; 3158 } 3159 if ((adev->flags & AMD_IS_PX) && 3160 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 3161 seq_printf(m, "PX asic powered off\n"); 3162 } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3163 mutex_lock(&adev->pm.mutex); 3164 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3165 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3166 else 3167 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3168 mutex_unlock(&adev->pm.mutex); 3169 } else { 3170 return amdgpu_debugfs_pm_info_pp(m, adev); 3171 } 3172 3173 return 0; 3174} 3175 3176static const struct drm_info_list amdgpu_pm_info_list[] = { 3177 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 3178}; 3179#endif 3180 3181static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3182{ 3183#if defined(CONFIG_DEBUG_FS) 3184 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 3185#else 3186 return 0; 3187#endif 3188}