Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.8-rc6 3718 lines 106 kB view raw
1/* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 26#include <drm/drm_debugfs.h> 27 28#include "amdgpu.h" 29#include "amdgpu_drv.h" 30#include "amdgpu_pm.h" 31#include "amdgpu_dpm.h" 32#include "amdgpu_display.h" 33#include "amdgpu_smu.h" 34#include "atom.h" 35#include <linux/power_supply.h> 36#include <linux/pci.h> 37#include <linux/hwmon.h> 38#include <linux/hwmon-sysfs.h> 39#include <linux/nospec.h> 40#include <linux/pm_runtime.h> 41#include "hwmgr.h" 42#define WIDTH_4K 3840 43 44static const struct cg_flag_name clocks[] = { 45 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 46 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 47 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 48 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 49 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 50 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 51 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 52 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 53 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 54 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 55 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 56 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 57 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 58 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 59 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 61 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 62 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 64 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 65 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 66 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 67 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 68 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 69 70 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 71 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 72 {0, NULL}, 73}; 74 75static const struct hwmon_temp_label { 76 enum PP_HWMON_TEMP channel; 77 const char *label; 78} temp_label[] = { 79 {PP_TEMP_EDGE, "edge"}, 80 {PP_TEMP_JUNCTION, "junction"}, 81 {PP_TEMP_MEM, "mem"}, 82}; 83 84void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 85{ 86 if (adev->pm.dpm_enabled) { 87 mutex_lock(&adev->pm.mutex); 88 if (power_supply_is_system_supplied() > 0) 89 adev->pm.ac_power = true; 90 else 91 adev->pm.ac_power = false; 92 if (adev->powerplay.pp_funcs && 93 adev->powerplay.pp_funcs->enable_bapm) 94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 95 mutex_unlock(&adev->pm.mutex); 96 97 if (is_support_sw_smu(adev)) 98 smu_set_ac_dc(&adev->smu); 99 } 100} 101 102int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 103 void *data, uint32_t *size) 104{ 105 int ret = 0; 106 107 if (!data || !size) 108 return -EINVAL; 109 110 if (is_support_sw_smu(adev)) 111 ret = smu_read_sensor(&adev->smu, sensor, data, size); 112 else { 113 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 114 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, 115 sensor, data, size); 116 else 117 ret = -EINVAL; 118 } 119 120 return ret; 121} 122 123/** 124 * DOC: power_dpm_state 125 * 126 * The power_dpm_state file is a legacy interface and is only provided for 127 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 128 * certain power related parameters. The file power_dpm_state is used for this. 129 * It accepts the following arguments: 130 * 131 * - battery 132 * 133 * - balanced 134 * 135 * - performance 136 * 137 * battery 138 * 139 * On older GPUs, the vbios provided a special power state for battery 140 * operation. Selecting battery switched to this state. This is no 141 * longer provided on newer GPUs so the option does nothing in that case. 142 * 143 * balanced 144 * 145 * On older GPUs, the vbios provided a special power state for balanced 146 * operation. Selecting balanced switched to this state. This is no 147 * longer provided on newer GPUs so the option does nothing in that case. 148 * 149 * performance 150 * 151 * On older GPUs, the vbios provided a special power state for performance 152 * operation. Selecting performance switched to this state. This is no 153 * longer provided on newer GPUs so the option does nothing in that case. 154 * 155 */ 156 157static ssize_t amdgpu_get_power_dpm_state(struct device *dev, 158 struct device_attribute *attr, 159 char *buf) 160{ 161 struct drm_device *ddev = dev_get_drvdata(dev); 162 struct amdgpu_device *adev = ddev->dev_private; 163 enum amd_pm_state_type pm; 164 int ret; 165 166 if (adev->in_gpu_reset) 167 return -EPERM; 168 169 ret = pm_runtime_get_sync(ddev->dev); 170 if (ret < 0) 171 return ret; 172 173 if (is_support_sw_smu(adev)) { 174 if (adev->smu.ppt_funcs->get_current_power_state) 175 pm = smu_get_current_power_state(&adev->smu); 176 else 177 pm = adev->pm.dpm.user_state; 178 } else if (adev->powerplay.pp_funcs->get_current_power_state) { 179 pm = amdgpu_dpm_get_current_power_state(adev); 180 } else { 181 pm = adev->pm.dpm.user_state; 182 } 183 184 pm_runtime_mark_last_busy(ddev->dev); 185 pm_runtime_put_autosuspend(ddev->dev); 186 187 return snprintf(buf, PAGE_SIZE, "%s\n", 188 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 189 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 190} 191 192static ssize_t amdgpu_set_power_dpm_state(struct device *dev, 193 struct device_attribute *attr, 194 const char *buf, 195 size_t count) 196{ 197 struct drm_device *ddev = dev_get_drvdata(dev); 198 struct amdgpu_device *adev = ddev->dev_private; 199 enum amd_pm_state_type state; 200 int ret; 201 202 if (adev->in_gpu_reset) 203 return -EPERM; 204 205 if (strncmp("battery", buf, strlen("battery")) == 0) 206 state = POWER_STATE_TYPE_BATTERY; 207 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 208 state = POWER_STATE_TYPE_BALANCED; 209 else if (strncmp("performance", buf, strlen("performance")) == 0) 210 state = POWER_STATE_TYPE_PERFORMANCE; 211 else 212 return -EINVAL; 213 214 ret = pm_runtime_get_sync(ddev->dev); 215 if (ret < 0) 216 return ret; 217 218 if (is_support_sw_smu(adev)) { 219 mutex_lock(&adev->pm.mutex); 220 adev->pm.dpm.user_state = state; 221 mutex_unlock(&adev->pm.mutex); 222 } else if (adev->powerplay.pp_funcs->dispatch_tasks) { 223 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 224 } else { 225 mutex_lock(&adev->pm.mutex); 226 adev->pm.dpm.user_state = state; 227 mutex_unlock(&adev->pm.mutex); 228 229 amdgpu_pm_compute_clocks(adev); 230 } 231 pm_runtime_mark_last_busy(ddev->dev); 232 pm_runtime_put_autosuspend(ddev->dev); 233 234 return count; 235} 236 237 238/** 239 * DOC: power_dpm_force_performance_level 240 * 241 * The amdgpu driver provides a sysfs API for adjusting certain power 242 * related parameters. The file power_dpm_force_performance_level is 243 * used for this. It accepts the following arguments: 244 * 245 * - auto 246 * 247 * - low 248 * 249 * - high 250 * 251 * - manual 252 * 253 * - profile_standard 254 * 255 * - profile_min_sclk 256 * 257 * - profile_min_mclk 258 * 259 * - profile_peak 260 * 261 * auto 262 * 263 * When auto is selected, the driver will attempt to dynamically select 264 * the optimal power profile for current conditions in the driver. 265 * 266 * low 267 * 268 * When low is selected, the clocks are forced to the lowest power state. 269 * 270 * high 271 * 272 * When high is selected, the clocks are forced to the highest power state. 273 * 274 * manual 275 * 276 * When manual is selected, the user can manually adjust which power states 277 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 278 * and pp_dpm_pcie files and adjust the power state transition heuristics 279 * via the pp_power_profile_mode sysfs file. 280 * 281 * profile_standard 282 * profile_min_sclk 283 * profile_min_mclk 284 * profile_peak 285 * 286 * When the profiling modes are selected, clock and power gating are 287 * disabled and the clocks are set for different profiling cases. This 288 * mode is recommended for profiling specific work loads where you do 289 * not want clock or power gating for clock fluctuation to interfere 290 * with your results. profile_standard sets the clocks to a fixed clock 291 * level which varies from asic to asic. profile_min_sclk forces the sclk 292 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 293 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 294 * 295 */ 296 297static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, 298 struct device_attribute *attr, 299 char *buf) 300{ 301 struct drm_device *ddev = dev_get_drvdata(dev); 302 struct amdgpu_device *adev = ddev->dev_private; 303 enum amd_dpm_forced_level level = 0xff; 304 int ret; 305 306 if (adev->in_gpu_reset) 307 return -EPERM; 308 309 ret = pm_runtime_get_sync(ddev->dev); 310 if (ret < 0) 311 return ret; 312 313 if (is_support_sw_smu(adev)) 314 level = smu_get_performance_level(&adev->smu); 315 else if (adev->powerplay.pp_funcs->get_performance_level) 316 level = amdgpu_dpm_get_performance_level(adev); 317 else 318 level = adev->pm.dpm.forced_level; 319 320 pm_runtime_mark_last_busy(ddev->dev); 321 pm_runtime_put_autosuspend(ddev->dev); 322 323 return snprintf(buf, PAGE_SIZE, "%s\n", 324 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 325 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 326 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 327 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 328 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 329 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 330 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 331 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 332 "unknown"); 333} 334 335static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, 336 struct device_attribute *attr, 337 const char *buf, 338 size_t count) 339{ 340 struct drm_device *ddev = dev_get_drvdata(dev); 341 struct amdgpu_device *adev = ddev->dev_private; 342 enum amd_dpm_forced_level level; 343 enum amd_dpm_forced_level current_level = 0xff; 344 int ret = 0; 345 346 if (adev->in_gpu_reset) 347 return -EPERM; 348 349 if (strncmp("low", buf, strlen("low")) == 0) { 350 level = AMD_DPM_FORCED_LEVEL_LOW; 351 } else if (strncmp("high", buf, strlen("high")) == 0) { 352 level = AMD_DPM_FORCED_LEVEL_HIGH; 353 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 354 level = AMD_DPM_FORCED_LEVEL_AUTO; 355 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 356 level = AMD_DPM_FORCED_LEVEL_MANUAL; 357 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 358 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 359 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 360 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 361 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 362 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 363 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 364 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 365 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 366 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 367 } else { 368 return -EINVAL; 369 } 370 371 ret = pm_runtime_get_sync(ddev->dev); 372 if (ret < 0) 373 return ret; 374 375 if (is_support_sw_smu(adev)) 376 current_level = smu_get_performance_level(&adev->smu); 377 else if (adev->powerplay.pp_funcs->get_performance_level) 378 current_level = amdgpu_dpm_get_performance_level(adev); 379 380 if (current_level == level) { 381 pm_runtime_mark_last_busy(ddev->dev); 382 pm_runtime_put_autosuspend(ddev->dev); 383 return count; 384 } 385 386 if (adev->asic_type == CHIP_RAVEN) { 387 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { 388 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) 389 amdgpu_gfx_off_ctrl(adev, false); 390 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) 391 amdgpu_gfx_off_ctrl(adev, true); 392 } 393 } 394 395 /* profile_exit setting is valid only when current mode is in profile mode */ 396 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 397 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 398 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 399 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 400 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 401 pr_err("Currently not in any profile mode!\n"); 402 pm_runtime_mark_last_busy(ddev->dev); 403 pm_runtime_put_autosuspend(ddev->dev); 404 return -EINVAL; 405 } 406 407 if (is_support_sw_smu(adev)) { 408 ret = smu_force_performance_level(&adev->smu, level); 409 if (ret) { 410 pm_runtime_mark_last_busy(ddev->dev); 411 pm_runtime_put_autosuspend(ddev->dev); 412 return -EINVAL; 413 } 414 } else if (adev->powerplay.pp_funcs->force_performance_level) { 415 mutex_lock(&adev->pm.mutex); 416 if (adev->pm.dpm.thermal_active) { 417 mutex_unlock(&adev->pm.mutex); 418 pm_runtime_mark_last_busy(ddev->dev); 419 pm_runtime_put_autosuspend(ddev->dev); 420 return -EINVAL; 421 } 422 ret = amdgpu_dpm_force_performance_level(adev, level); 423 if (ret) { 424 mutex_unlock(&adev->pm.mutex); 425 pm_runtime_mark_last_busy(ddev->dev); 426 pm_runtime_put_autosuspend(ddev->dev); 427 return -EINVAL; 428 } else { 429 adev->pm.dpm.forced_level = level; 430 } 431 mutex_unlock(&adev->pm.mutex); 432 } 433 pm_runtime_mark_last_busy(ddev->dev); 434 pm_runtime_put_autosuspend(ddev->dev); 435 436 return count; 437} 438 439static ssize_t amdgpu_get_pp_num_states(struct device *dev, 440 struct device_attribute *attr, 441 char *buf) 442{ 443 struct drm_device *ddev = dev_get_drvdata(dev); 444 struct amdgpu_device *adev = ddev->dev_private; 445 struct pp_states_info data; 446 int i, buf_len, ret; 447 448 if (adev->in_gpu_reset) 449 return -EPERM; 450 451 ret = pm_runtime_get_sync(ddev->dev); 452 if (ret < 0) 453 return ret; 454 455 if (is_support_sw_smu(adev)) { 456 ret = smu_get_power_num_states(&adev->smu, &data); 457 if (ret) 458 return ret; 459 } else if (adev->powerplay.pp_funcs->get_pp_num_states) { 460 amdgpu_dpm_get_pp_num_states(adev, &data); 461 } else { 462 memset(&data, 0, sizeof(data)); 463 } 464 465 pm_runtime_mark_last_busy(ddev->dev); 466 pm_runtime_put_autosuspend(ddev->dev); 467 468 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 469 for (i = 0; i < data.nums; i++) 470 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 471 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 472 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 473 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 474 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 475 476 return buf_len; 477} 478 479static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 480 struct device_attribute *attr, 481 char *buf) 482{ 483 struct drm_device *ddev = dev_get_drvdata(dev); 484 struct amdgpu_device *adev = ddev->dev_private; 485 struct pp_states_info data; 486 struct smu_context *smu = &adev->smu; 487 enum amd_pm_state_type pm = 0; 488 int i = 0, ret = 0; 489 490 if (adev->in_gpu_reset) 491 return -EPERM; 492 493 ret = pm_runtime_get_sync(ddev->dev); 494 if (ret < 0) 495 return ret; 496 497 if (is_support_sw_smu(adev)) { 498 pm = smu_get_current_power_state(smu); 499 ret = smu_get_power_num_states(smu, &data); 500 if (ret) 501 return ret; 502 } else if (adev->powerplay.pp_funcs->get_current_power_state 503 && adev->powerplay.pp_funcs->get_pp_num_states) { 504 pm = amdgpu_dpm_get_current_power_state(adev); 505 amdgpu_dpm_get_pp_num_states(adev, &data); 506 } 507 508 pm_runtime_mark_last_busy(ddev->dev); 509 pm_runtime_put_autosuspend(ddev->dev); 510 511 for (i = 0; i < data.nums; i++) { 512 if (pm == data.states[i]) 513 break; 514 } 515 516 if (i == data.nums) 517 i = -EINVAL; 518 519 return snprintf(buf, PAGE_SIZE, "%d\n", i); 520} 521 522static ssize_t amdgpu_get_pp_force_state(struct device *dev, 523 struct device_attribute *attr, 524 char *buf) 525{ 526 struct drm_device *ddev = dev_get_drvdata(dev); 527 struct amdgpu_device *adev = ddev->dev_private; 528 529 if (adev->in_gpu_reset) 530 return -EPERM; 531 532 if (adev->pp_force_state_enabled) 533 return amdgpu_get_pp_cur_state(dev, attr, buf); 534 else 535 return snprintf(buf, PAGE_SIZE, "\n"); 536} 537 538static ssize_t amdgpu_set_pp_force_state(struct device *dev, 539 struct device_attribute *attr, 540 const char *buf, 541 size_t count) 542{ 543 struct drm_device *ddev = dev_get_drvdata(dev); 544 struct amdgpu_device *adev = ddev->dev_private; 545 enum amd_pm_state_type state = 0; 546 unsigned long idx; 547 int ret; 548 549 if (adev->in_gpu_reset) 550 return -EPERM; 551 552 if (strlen(buf) == 1) 553 adev->pp_force_state_enabled = false; 554 else if (is_support_sw_smu(adev)) 555 adev->pp_force_state_enabled = false; 556 else if (adev->powerplay.pp_funcs->dispatch_tasks && 557 adev->powerplay.pp_funcs->get_pp_num_states) { 558 struct pp_states_info data; 559 560 ret = kstrtoul(buf, 0, &idx); 561 if (ret || idx >= ARRAY_SIZE(data.states)) 562 return -EINVAL; 563 564 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 565 566 amdgpu_dpm_get_pp_num_states(adev, &data); 567 state = data.states[idx]; 568 569 ret = pm_runtime_get_sync(ddev->dev); 570 if (ret < 0) 571 return ret; 572 573 /* only set user selected power states */ 574 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 575 state != POWER_STATE_TYPE_DEFAULT) { 576 amdgpu_dpm_dispatch_task(adev, 577 AMD_PP_TASK_ENABLE_USER_STATE, &state); 578 adev->pp_force_state_enabled = true; 579 } 580 pm_runtime_mark_last_busy(ddev->dev); 581 pm_runtime_put_autosuspend(ddev->dev); 582 } 583 584 return count; 585} 586 587/** 588 * DOC: pp_table 589 * 590 * The amdgpu driver provides a sysfs API for uploading new powerplay 591 * tables. The file pp_table is used for this. Reading the file 592 * will dump the current power play table. Writing to the file 593 * will attempt to upload a new powerplay table and re-initialize 594 * powerplay using that new table. 595 * 596 */ 597 598static ssize_t amdgpu_get_pp_table(struct device *dev, 599 struct device_attribute *attr, 600 char *buf) 601{ 602 struct drm_device *ddev = dev_get_drvdata(dev); 603 struct amdgpu_device *adev = ddev->dev_private; 604 char *table = NULL; 605 int size, ret; 606 607 if (adev->in_gpu_reset) 608 return -EPERM; 609 610 ret = pm_runtime_get_sync(ddev->dev); 611 if (ret < 0) 612 return ret; 613 614 if (is_support_sw_smu(adev)) { 615 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 616 pm_runtime_mark_last_busy(ddev->dev); 617 pm_runtime_put_autosuspend(ddev->dev); 618 if (size < 0) 619 return size; 620 } else if (adev->powerplay.pp_funcs->get_pp_table) { 621 size = amdgpu_dpm_get_pp_table(adev, &table); 622 pm_runtime_mark_last_busy(ddev->dev); 623 pm_runtime_put_autosuspend(ddev->dev); 624 if (size < 0) 625 return size; 626 } else { 627 pm_runtime_mark_last_busy(ddev->dev); 628 pm_runtime_put_autosuspend(ddev->dev); 629 return 0; 630 } 631 632 if (size >= PAGE_SIZE) 633 size = PAGE_SIZE - 1; 634 635 memcpy(buf, table, size); 636 637 return size; 638} 639 640static ssize_t amdgpu_set_pp_table(struct device *dev, 641 struct device_attribute *attr, 642 const char *buf, 643 size_t count) 644{ 645 struct drm_device *ddev = dev_get_drvdata(dev); 646 struct amdgpu_device *adev = ddev->dev_private; 647 int ret = 0; 648 649 if (adev->in_gpu_reset) 650 return -EPERM; 651 652 ret = pm_runtime_get_sync(ddev->dev); 653 if (ret < 0) 654 return ret; 655 656 if (is_support_sw_smu(adev)) { 657 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); 658 if (ret) { 659 pm_runtime_mark_last_busy(ddev->dev); 660 pm_runtime_put_autosuspend(ddev->dev); 661 return ret; 662 } 663 } else if (adev->powerplay.pp_funcs->set_pp_table) 664 amdgpu_dpm_set_pp_table(adev, buf, count); 665 666 pm_runtime_mark_last_busy(ddev->dev); 667 pm_runtime_put_autosuspend(ddev->dev); 668 669 return count; 670} 671 672/** 673 * DOC: pp_od_clk_voltage 674 * 675 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 676 * in each power level within a power state. The pp_od_clk_voltage is used for 677 * this. 678 * 679 * < For Vega10 and previous ASICs > 680 * 681 * Reading the file will display: 682 * 683 * - a list of engine clock levels and voltages labeled OD_SCLK 684 * 685 * - a list of memory clock levels and voltages labeled OD_MCLK 686 * 687 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 688 * 689 * To manually adjust these settings, first select manual using 690 * power_dpm_force_performance_level. Enter a new value for each 691 * level by writing a string that contains "s/m level clock voltage" to 692 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 693 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 694 * 810 mV. When you have edited all of the states as needed, write 695 * "c" (commit) to the file to commit your changes. If you want to reset to the 696 * default power levels, write "r" (reset) to the file to reset them. 697 * 698 * 699 * < For Vega20 and newer ASICs > 700 * 701 * Reading the file will display: 702 * 703 * - minimum and maximum engine clock labeled OD_SCLK 704 * 705 * - maximum memory clock labeled OD_MCLK 706 * 707 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 708 * They can be used to calibrate the sclk voltage curve. 709 * 710 * - a list of valid ranges for sclk, mclk, and voltage curve points 711 * labeled OD_RANGE 712 * 713 * To manually adjust these settings: 714 * 715 * - First select manual using power_dpm_force_performance_level 716 * 717 * - For clock frequency setting, enter a new value by writing a 718 * string that contains "s/m index clock" to the file. The index 719 * should be 0 if to set minimum clock. And 1 if to set maximum 720 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 721 * "m 1 800" will update maximum mclk to be 800Mhz. 722 * 723 * For sclk voltage curve, enter the new values by writing a 724 * string that contains "vc point clock voltage" to the file. The 725 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will 726 * update point1 with clock set as 300Mhz and voltage as 727 * 600mV. "vc 2 1000 1000" will update point3 with clock set 728 * as 1000Mhz and voltage 1000mV. 729 * 730 * - When you have edited all of the states as needed, write "c" (commit) 731 * to the file to commit your changes 732 * 733 * - If you want to reset to the default power levels, write "r" (reset) 734 * to the file to reset them 735 * 736 */ 737 738static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 739 struct device_attribute *attr, 740 const char *buf, 741 size_t count) 742{ 743 struct drm_device *ddev = dev_get_drvdata(dev); 744 struct amdgpu_device *adev = ddev->dev_private; 745 int ret; 746 uint32_t parameter_size = 0; 747 long parameter[64]; 748 char buf_cpy[128]; 749 char *tmp_str; 750 char *sub_str; 751 const char delimiter[3] = {' ', '\n', '\0'}; 752 uint32_t type; 753 754 if (adev->in_gpu_reset) 755 return -EPERM; 756 757 if (count > 127) 758 return -EINVAL; 759 760 if (*buf == 's') 761 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 762 else if (*buf == 'm') 763 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 764 else if(*buf == 'r') 765 type = PP_OD_RESTORE_DEFAULT_TABLE; 766 else if (*buf == 'c') 767 type = PP_OD_COMMIT_DPM_TABLE; 768 else if (!strncmp(buf, "vc", 2)) 769 type = PP_OD_EDIT_VDDC_CURVE; 770 else 771 return -EINVAL; 772 773 memcpy(buf_cpy, buf, count+1); 774 775 tmp_str = buf_cpy; 776 777 if (type == PP_OD_EDIT_VDDC_CURVE) 778 tmp_str++; 779 while (isspace(*++tmp_str)); 780 781 while (tmp_str[0]) { 782 sub_str = strsep(&tmp_str, delimiter); 783 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 784 if (ret) 785 return -EINVAL; 786 parameter_size++; 787 788 while (isspace(*tmp_str)) 789 tmp_str++; 790 } 791 792 ret = pm_runtime_get_sync(ddev->dev); 793 if (ret < 0) 794 return ret; 795 796 if (is_support_sw_smu(adev)) { 797 ret = smu_od_edit_dpm_table(&adev->smu, type, 798 parameter, parameter_size); 799 800 if (ret) { 801 pm_runtime_mark_last_busy(ddev->dev); 802 pm_runtime_put_autosuspend(ddev->dev); 803 return -EINVAL; 804 } 805 } else { 806 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 807 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 808 parameter, parameter_size); 809 if (ret) { 810 pm_runtime_mark_last_busy(ddev->dev); 811 pm_runtime_put_autosuspend(ddev->dev); 812 return -EINVAL; 813 } 814 } 815 816 if (type == PP_OD_COMMIT_DPM_TABLE) { 817 if (adev->powerplay.pp_funcs->dispatch_tasks) { 818 amdgpu_dpm_dispatch_task(adev, 819 AMD_PP_TASK_READJUST_POWER_STATE, 820 NULL); 821 pm_runtime_mark_last_busy(ddev->dev); 822 pm_runtime_put_autosuspend(ddev->dev); 823 return count; 824 } else { 825 pm_runtime_mark_last_busy(ddev->dev); 826 pm_runtime_put_autosuspend(ddev->dev); 827 return -EINVAL; 828 } 829 } 830 } 831 pm_runtime_mark_last_busy(ddev->dev); 832 pm_runtime_put_autosuspend(ddev->dev); 833 834 return count; 835} 836 837static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 838 struct device_attribute *attr, 839 char *buf) 840{ 841 struct drm_device *ddev = dev_get_drvdata(dev); 842 struct amdgpu_device *adev = ddev->dev_private; 843 ssize_t size; 844 int ret; 845 846 if (adev->in_gpu_reset) 847 return -EPERM; 848 849 ret = pm_runtime_get_sync(ddev->dev); 850 if (ret < 0) 851 return ret; 852 853 if (is_support_sw_smu(adev)) { 854 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 855 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); 856 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); 857 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); 858 } else if (adev->powerplay.pp_funcs->print_clock_levels) { 859 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 860 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 861 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 862 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 863 } else { 864 size = snprintf(buf, PAGE_SIZE, "\n"); 865 } 866 pm_runtime_mark_last_busy(ddev->dev); 867 pm_runtime_put_autosuspend(ddev->dev); 868 869 return size; 870} 871 872/** 873 * DOC: pp_features 874 * 875 * The amdgpu driver provides a sysfs API for adjusting what powerplay 876 * features to be enabled. The file pp_features is used for this. And 877 * this is only available for Vega10 and later dGPUs. 878 * 879 * Reading back the file will show you the followings: 880 * - Current ppfeature masks 881 * - List of the all supported powerplay features with their naming, 882 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 883 * 884 * To manually enable or disable a specific feature, just set or clear 885 * the corresponding bit from original ppfeature masks and input the 886 * new ppfeature masks. 887 */ 888static ssize_t amdgpu_set_pp_features(struct device *dev, 889 struct device_attribute *attr, 890 const char *buf, 891 size_t count) 892{ 893 struct drm_device *ddev = dev_get_drvdata(dev); 894 struct amdgpu_device *adev = ddev->dev_private; 895 uint64_t featuremask; 896 int ret; 897 898 if (adev->in_gpu_reset) 899 return -EPERM; 900 901 ret = kstrtou64(buf, 0, &featuremask); 902 if (ret) 903 return -EINVAL; 904 905 pr_debug("featuremask = 0x%llx\n", featuremask); 906 907 ret = pm_runtime_get_sync(ddev->dev); 908 if (ret < 0) 909 return ret; 910 911 if (is_support_sw_smu(adev)) { 912 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 913 if (ret) { 914 pm_runtime_mark_last_busy(ddev->dev); 915 pm_runtime_put_autosuspend(ddev->dev); 916 return -EINVAL; 917 } 918 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 919 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 920 if (ret) { 921 pm_runtime_mark_last_busy(ddev->dev); 922 pm_runtime_put_autosuspend(ddev->dev); 923 return -EINVAL; 924 } 925 } 926 pm_runtime_mark_last_busy(ddev->dev); 927 pm_runtime_put_autosuspend(ddev->dev); 928 929 return count; 930} 931 932static ssize_t amdgpu_get_pp_features(struct device *dev, 933 struct device_attribute *attr, 934 char *buf) 935{ 936 struct drm_device *ddev = dev_get_drvdata(dev); 937 struct amdgpu_device *adev = ddev->dev_private; 938 ssize_t size; 939 int ret; 940 941 if (adev->in_gpu_reset) 942 return -EPERM; 943 944 ret = pm_runtime_get_sync(ddev->dev); 945 if (ret < 0) 946 return ret; 947 948 if (is_support_sw_smu(adev)) 949 size = smu_sys_get_pp_feature_mask(&adev->smu, buf); 950 else if (adev->powerplay.pp_funcs->get_ppfeature_status) 951 size = amdgpu_dpm_get_ppfeature_status(adev, buf); 952 else 953 size = snprintf(buf, PAGE_SIZE, "\n"); 954 955 pm_runtime_mark_last_busy(ddev->dev); 956 pm_runtime_put_autosuspend(ddev->dev); 957 958 return size; 959} 960 961/** 962 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 963 * 964 * The amdgpu driver provides a sysfs API for adjusting what power levels 965 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 966 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 967 * this. 968 * 969 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 970 * Vega10 and later ASICs. 971 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 972 * 973 * Reading back the files will show you the available power levels within 974 * the power state and the clock information for those levels. 975 * 976 * To manually adjust these states, first select manual using 977 * power_dpm_force_performance_level. 978 * Secondly, enter a new value for each level by inputing a string that 979 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 980 * E.g., 981 * 982 * .. code-block:: bash 983 * 984 * echo "4 5 6" > pp_dpm_sclk 985 * 986 * will enable sclk levels 4, 5, and 6. 987 * 988 * NOTE: change to the dcefclk max dpm level is not supported now 989 */ 990 991static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 992 struct device_attribute *attr, 993 char *buf) 994{ 995 struct drm_device *ddev = dev_get_drvdata(dev); 996 struct amdgpu_device *adev = ddev->dev_private; 997 ssize_t size; 998 int ret; 999 1000 if (adev->in_gpu_reset) 1001 return -EPERM; 1002 1003 ret = pm_runtime_get_sync(ddev->dev); 1004 if (ret < 0) 1005 return ret; 1006 1007 if (is_support_sw_smu(adev)) 1008 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 1009 else if (adev->powerplay.pp_funcs->print_clock_levels) 1010 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 1011 else 1012 size = snprintf(buf, PAGE_SIZE, "\n"); 1013 1014 pm_runtime_mark_last_busy(ddev->dev); 1015 pm_runtime_put_autosuspend(ddev->dev); 1016 1017 return size; 1018} 1019 1020/* 1021 * Worst case: 32 bits individually specified, in octal at 12 characters 1022 * per line (+1 for \n). 1023 */ 1024#define AMDGPU_MASK_BUF_MAX (32 * 13) 1025 1026static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1027{ 1028 int ret; 1029 long level; 1030 char *sub_str = NULL; 1031 char *tmp; 1032 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1033 const char delimiter[3] = {' ', '\n', '\0'}; 1034 size_t bytes; 1035 1036 *mask = 0; 1037 1038 bytes = min(count, sizeof(buf_cpy) - 1); 1039 memcpy(buf_cpy, buf, bytes); 1040 buf_cpy[bytes] = '\0'; 1041 tmp = buf_cpy; 1042 while (tmp[0]) { 1043 sub_str = strsep(&tmp, delimiter); 1044 if (strlen(sub_str)) { 1045 ret = kstrtol(sub_str, 0, &level); 1046 if (ret) 1047 return -EINVAL; 1048 *mask |= 1 << level; 1049 } else 1050 break; 1051 } 1052 1053 return 0; 1054} 1055 1056static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 1057 struct device_attribute *attr, 1058 const char *buf, 1059 size_t count) 1060{ 1061 struct drm_device *ddev = dev_get_drvdata(dev); 1062 struct amdgpu_device *adev = ddev->dev_private; 1063 int ret; 1064 uint32_t mask = 0; 1065 1066 if (adev->in_gpu_reset) 1067 return -EPERM; 1068 1069 ret = amdgpu_read_mask(buf, count, &mask); 1070 if (ret) 1071 return ret; 1072 1073 ret = pm_runtime_get_sync(ddev->dev); 1074 if (ret < 0) 1075 return ret; 1076 1077 if (is_support_sw_smu(adev)) 1078 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); 1079 else if (adev->powerplay.pp_funcs->force_clock_level) 1080 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 1081 1082 pm_runtime_mark_last_busy(ddev->dev); 1083 pm_runtime_put_autosuspend(ddev->dev); 1084 1085 if (ret) 1086 return -EINVAL; 1087 1088 return count; 1089} 1090 1091static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1092 struct device_attribute *attr, 1093 char *buf) 1094{ 1095 struct drm_device *ddev = dev_get_drvdata(dev); 1096 struct amdgpu_device *adev = ddev->dev_private; 1097 ssize_t size; 1098 int ret; 1099 1100 if (adev->in_gpu_reset) 1101 return -EPERM; 1102 1103 ret = pm_runtime_get_sync(ddev->dev); 1104 if (ret < 0) 1105 return ret; 1106 1107 if (is_support_sw_smu(adev)) 1108 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 1109 else if (adev->powerplay.pp_funcs->print_clock_levels) 1110 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 1111 else 1112 size = snprintf(buf, PAGE_SIZE, "\n"); 1113 1114 pm_runtime_mark_last_busy(ddev->dev); 1115 pm_runtime_put_autosuspend(ddev->dev); 1116 1117 return size; 1118} 1119 1120static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1121 struct device_attribute *attr, 1122 const char *buf, 1123 size_t count) 1124{ 1125 struct drm_device *ddev = dev_get_drvdata(dev); 1126 struct amdgpu_device *adev = ddev->dev_private; 1127 uint32_t mask = 0; 1128 int ret; 1129 1130 if (adev->in_gpu_reset) 1131 return -EPERM; 1132 1133 ret = amdgpu_read_mask(buf, count, &mask); 1134 if (ret) 1135 return ret; 1136 1137 ret = pm_runtime_get_sync(ddev->dev); 1138 if (ret < 0) 1139 return ret; 1140 1141 if (is_support_sw_smu(adev)) 1142 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); 1143 else if (adev->powerplay.pp_funcs->force_clock_level) 1144 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 1145 1146 pm_runtime_mark_last_busy(ddev->dev); 1147 pm_runtime_put_autosuspend(ddev->dev); 1148 1149 if (ret) 1150 return -EINVAL; 1151 1152 return count; 1153} 1154 1155static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1156 struct device_attribute *attr, 1157 char *buf) 1158{ 1159 struct drm_device *ddev = dev_get_drvdata(dev); 1160 struct amdgpu_device *adev = ddev->dev_private; 1161 ssize_t size; 1162 int ret; 1163 1164 if (adev->in_gpu_reset) 1165 return -EPERM; 1166 1167 ret = pm_runtime_get_sync(ddev->dev); 1168 if (ret < 0) 1169 return ret; 1170 1171 if (is_support_sw_smu(adev)) 1172 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 1173 else if (adev->powerplay.pp_funcs->print_clock_levels) 1174 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 1175 else 1176 size = snprintf(buf, PAGE_SIZE, "\n"); 1177 1178 pm_runtime_mark_last_busy(ddev->dev); 1179 pm_runtime_put_autosuspend(ddev->dev); 1180 1181 return size; 1182} 1183 1184static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1185 struct device_attribute *attr, 1186 const char *buf, 1187 size_t count) 1188{ 1189 struct drm_device *ddev = dev_get_drvdata(dev); 1190 struct amdgpu_device *adev = ddev->dev_private; 1191 int ret; 1192 uint32_t mask = 0; 1193 1194 if (adev->in_gpu_reset) 1195 return -EPERM; 1196 1197 ret = amdgpu_read_mask(buf, count, &mask); 1198 if (ret) 1199 return ret; 1200 1201 ret = pm_runtime_get_sync(ddev->dev); 1202 if (ret < 0) 1203 return ret; 1204 1205 if (is_support_sw_smu(adev)) 1206 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); 1207 else if (adev->powerplay.pp_funcs->force_clock_level) 1208 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); 1209 else 1210 ret = 0; 1211 1212 pm_runtime_mark_last_busy(ddev->dev); 1213 pm_runtime_put_autosuspend(ddev->dev); 1214 1215 if (ret) 1216 return -EINVAL; 1217 1218 return count; 1219} 1220 1221static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1222 struct device_attribute *attr, 1223 char *buf) 1224{ 1225 struct drm_device *ddev = dev_get_drvdata(dev); 1226 struct amdgpu_device *adev = ddev->dev_private; 1227 ssize_t size; 1228 int ret; 1229 1230 if (adev->in_gpu_reset) 1231 return -EPERM; 1232 1233 ret = pm_runtime_get_sync(ddev->dev); 1234 if (ret < 0) 1235 return ret; 1236 1237 if (is_support_sw_smu(adev)) 1238 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1239 else if (adev->powerplay.pp_funcs->print_clock_levels) 1240 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1241 else 1242 size = snprintf(buf, PAGE_SIZE, "\n"); 1243 1244 pm_runtime_mark_last_busy(ddev->dev); 1245 pm_runtime_put_autosuspend(ddev->dev); 1246 1247 return size; 1248} 1249 1250static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1251 struct device_attribute *attr, 1252 const char *buf, 1253 size_t count) 1254{ 1255 struct drm_device *ddev = dev_get_drvdata(dev); 1256 struct amdgpu_device *adev = ddev->dev_private; 1257 int ret; 1258 uint32_t mask = 0; 1259 1260 if (adev->in_gpu_reset) 1261 return -EPERM; 1262 1263 ret = amdgpu_read_mask(buf, count, &mask); 1264 if (ret) 1265 return ret; 1266 1267 ret = pm_runtime_get_sync(ddev->dev); 1268 if (ret < 0) 1269 return ret; 1270 1271 if (is_support_sw_smu(adev)) 1272 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); 1273 else if (adev->powerplay.pp_funcs->force_clock_level) 1274 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); 1275 else 1276 ret = 0; 1277 1278 pm_runtime_mark_last_busy(ddev->dev); 1279 pm_runtime_put_autosuspend(ddev->dev); 1280 1281 if (ret) 1282 return -EINVAL; 1283 1284 return count; 1285} 1286 1287static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1288 struct device_attribute *attr, 1289 char *buf) 1290{ 1291 struct drm_device *ddev = dev_get_drvdata(dev); 1292 struct amdgpu_device *adev = ddev->dev_private; 1293 ssize_t size; 1294 int ret; 1295 1296 if (adev->in_gpu_reset) 1297 return -EPERM; 1298 1299 ret = pm_runtime_get_sync(ddev->dev); 1300 if (ret < 0) 1301 return ret; 1302 1303 if (is_support_sw_smu(adev)) 1304 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1305 else if (adev->powerplay.pp_funcs->print_clock_levels) 1306 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1307 else 1308 size = snprintf(buf, PAGE_SIZE, "\n"); 1309 1310 pm_runtime_mark_last_busy(ddev->dev); 1311 pm_runtime_put_autosuspend(ddev->dev); 1312 1313 return size; 1314} 1315 1316static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1317 struct device_attribute *attr, 1318 const char *buf, 1319 size_t count) 1320{ 1321 struct drm_device *ddev = dev_get_drvdata(dev); 1322 struct amdgpu_device *adev = ddev->dev_private; 1323 int ret; 1324 uint32_t mask = 0; 1325 1326 if (adev->in_gpu_reset) 1327 return -EPERM; 1328 1329 ret = amdgpu_read_mask(buf, count, &mask); 1330 if (ret) 1331 return ret; 1332 1333 ret = pm_runtime_get_sync(ddev->dev); 1334 if (ret < 0) 1335 return ret; 1336 1337 if (is_support_sw_smu(adev)) 1338 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); 1339 else if (adev->powerplay.pp_funcs->force_clock_level) 1340 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); 1341 else 1342 ret = 0; 1343 1344 pm_runtime_mark_last_busy(ddev->dev); 1345 pm_runtime_put_autosuspend(ddev->dev); 1346 1347 if (ret) 1348 return -EINVAL; 1349 1350 return count; 1351} 1352 1353static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1354 struct device_attribute *attr, 1355 char *buf) 1356{ 1357 struct drm_device *ddev = dev_get_drvdata(dev); 1358 struct amdgpu_device *adev = ddev->dev_private; 1359 ssize_t size; 1360 int ret; 1361 1362 if (adev->in_gpu_reset) 1363 return -EPERM; 1364 1365 ret = pm_runtime_get_sync(ddev->dev); 1366 if (ret < 0) 1367 return ret; 1368 1369 if (is_support_sw_smu(adev)) 1370 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1371 else if (adev->powerplay.pp_funcs->print_clock_levels) 1372 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1373 else 1374 size = snprintf(buf, PAGE_SIZE, "\n"); 1375 1376 pm_runtime_mark_last_busy(ddev->dev); 1377 pm_runtime_put_autosuspend(ddev->dev); 1378 1379 return size; 1380} 1381 1382static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1383 struct device_attribute *attr, 1384 const char *buf, 1385 size_t count) 1386{ 1387 struct drm_device *ddev = dev_get_drvdata(dev); 1388 struct amdgpu_device *adev = ddev->dev_private; 1389 int ret; 1390 uint32_t mask = 0; 1391 1392 if (adev->in_gpu_reset) 1393 return -EPERM; 1394 1395 ret = amdgpu_read_mask(buf, count, &mask); 1396 if (ret) 1397 return ret; 1398 1399 ret = pm_runtime_get_sync(ddev->dev); 1400 if (ret < 0) 1401 return ret; 1402 1403 if (is_support_sw_smu(adev)) 1404 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); 1405 else if (adev->powerplay.pp_funcs->force_clock_level) 1406 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 1407 else 1408 ret = 0; 1409 1410 pm_runtime_mark_last_busy(ddev->dev); 1411 pm_runtime_put_autosuspend(ddev->dev); 1412 1413 if (ret) 1414 return -EINVAL; 1415 1416 return count; 1417} 1418 1419static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1420 struct device_attribute *attr, 1421 char *buf) 1422{ 1423 struct drm_device *ddev = dev_get_drvdata(dev); 1424 struct amdgpu_device *adev = ddev->dev_private; 1425 uint32_t value = 0; 1426 int ret; 1427 1428 if (adev->in_gpu_reset) 1429 return -EPERM; 1430 1431 ret = pm_runtime_get_sync(ddev->dev); 1432 if (ret < 0) 1433 return ret; 1434 1435 if (is_support_sw_smu(adev)) 1436 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); 1437 else if (adev->powerplay.pp_funcs->get_sclk_od) 1438 value = amdgpu_dpm_get_sclk_od(adev); 1439 1440 pm_runtime_mark_last_busy(ddev->dev); 1441 pm_runtime_put_autosuspend(ddev->dev); 1442 1443 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1444} 1445 1446static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1447 struct device_attribute *attr, 1448 const char *buf, 1449 size_t count) 1450{ 1451 struct drm_device *ddev = dev_get_drvdata(dev); 1452 struct amdgpu_device *adev = ddev->dev_private; 1453 int ret; 1454 long int value; 1455 1456 if (adev->in_gpu_reset) 1457 return -EPERM; 1458 1459 ret = kstrtol(buf, 0, &value); 1460 1461 if (ret) 1462 return -EINVAL; 1463 1464 ret = pm_runtime_get_sync(ddev->dev); 1465 if (ret < 0) 1466 return ret; 1467 1468 if (is_support_sw_smu(adev)) { 1469 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); 1470 } else { 1471 if (adev->powerplay.pp_funcs->set_sclk_od) 1472 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1473 1474 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1475 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1476 } else { 1477 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1478 amdgpu_pm_compute_clocks(adev); 1479 } 1480 } 1481 1482 pm_runtime_mark_last_busy(ddev->dev); 1483 pm_runtime_put_autosuspend(ddev->dev); 1484 1485 return count; 1486} 1487 1488static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1489 struct device_attribute *attr, 1490 char *buf) 1491{ 1492 struct drm_device *ddev = dev_get_drvdata(dev); 1493 struct amdgpu_device *adev = ddev->dev_private; 1494 uint32_t value = 0; 1495 int ret; 1496 1497 if (adev->in_gpu_reset) 1498 return -EPERM; 1499 1500 ret = pm_runtime_get_sync(ddev->dev); 1501 if (ret < 0) 1502 return ret; 1503 1504 if (is_support_sw_smu(adev)) 1505 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); 1506 else if (adev->powerplay.pp_funcs->get_mclk_od) 1507 value = amdgpu_dpm_get_mclk_od(adev); 1508 1509 pm_runtime_mark_last_busy(ddev->dev); 1510 pm_runtime_put_autosuspend(ddev->dev); 1511 1512 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1513} 1514 1515static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1516 struct device_attribute *attr, 1517 const char *buf, 1518 size_t count) 1519{ 1520 struct drm_device *ddev = dev_get_drvdata(dev); 1521 struct amdgpu_device *adev = ddev->dev_private; 1522 int ret; 1523 long int value; 1524 1525 if (adev->in_gpu_reset) 1526 return -EPERM; 1527 1528 ret = kstrtol(buf, 0, &value); 1529 1530 if (ret) 1531 return -EINVAL; 1532 1533 ret = pm_runtime_get_sync(ddev->dev); 1534 if (ret < 0) 1535 return ret; 1536 1537 if (is_support_sw_smu(adev)) { 1538 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); 1539 } else { 1540 if (adev->powerplay.pp_funcs->set_mclk_od) 1541 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1542 1543 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1544 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1545 } else { 1546 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1547 amdgpu_pm_compute_clocks(adev); 1548 } 1549 } 1550 1551 pm_runtime_mark_last_busy(ddev->dev); 1552 pm_runtime_put_autosuspend(ddev->dev); 1553 1554 return count; 1555} 1556 1557/** 1558 * DOC: pp_power_profile_mode 1559 * 1560 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1561 * related to switching between power levels in a power state. The file 1562 * pp_power_profile_mode is used for this. 1563 * 1564 * Reading this file outputs a list of all of the predefined power profiles 1565 * and the relevant heuristics settings for that profile. 1566 * 1567 * To select a profile or create a custom profile, first select manual using 1568 * power_dpm_force_performance_level. Writing the number of a predefined 1569 * profile to pp_power_profile_mode will enable those heuristics. To 1570 * create a custom set of heuristics, write a string of numbers to the file 1571 * starting with the number of the custom profile along with a setting 1572 * for each heuristic parameter. Due to differences across asic families 1573 * the heuristic parameters vary from family to family. 1574 * 1575 */ 1576 1577static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1578 struct device_attribute *attr, 1579 char *buf) 1580{ 1581 struct drm_device *ddev = dev_get_drvdata(dev); 1582 struct amdgpu_device *adev = ddev->dev_private; 1583 ssize_t size; 1584 int ret; 1585 1586 if (adev->in_gpu_reset) 1587 return -EPERM; 1588 1589 ret = pm_runtime_get_sync(ddev->dev); 1590 if (ret < 0) 1591 return ret; 1592 1593 if (is_support_sw_smu(adev)) 1594 size = smu_get_power_profile_mode(&adev->smu, buf); 1595 else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1596 size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1597 else 1598 size = snprintf(buf, PAGE_SIZE, "\n"); 1599 1600 pm_runtime_mark_last_busy(ddev->dev); 1601 pm_runtime_put_autosuspend(ddev->dev); 1602 1603 return size; 1604} 1605 1606 1607static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1608 struct device_attribute *attr, 1609 const char *buf, 1610 size_t count) 1611{ 1612 int ret = 0xff; 1613 struct drm_device *ddev = dev_get_drvdata(dev); 1614 struct amdgpu_device *adev = ddev->dev_private; 1615 uint32_t parameter_size = 0; 1616 long parameter[64]; 1617 char *sub_str, buf_cpy[128]; 1618 char *tmp_str; 1619 uint32_t i = 0; 1620 char tmp[2]; 1621 long int profile_mode = 0; 1622 const char delimiter[3] = {' ', '\n', '\0'}; 1623 1624 if (adev->in_gpu_reset) 1625 return -EPERM; 1626 1627 tmp[0] = *(buf); 1628 tmp[1] = '\0'; 1629 ret = kstrtol(tmp, 0, &profile_mode); 1630 if (ret) 1631 return -EINVAL; 1632 1633 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1634 if (count < 2 || count > 127) 1635 return -EINVAL; 1636 while (isspace(*++buf)) 1637 i++; 1638 memcpy(buf_cpy, buf, count-i); 1639 tmp_str = buf_cpy; 1640 while (tmp_str[0]) { 1641 sub_str = strsep(&tmp_str, delimiter); 1642 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 1643 if (ret) 1644 return -EINVAL; 1645 parameter_size++; 1646 while (isspace(*tmp_str)) 1647 tmp_str++; 1648 } 1649 } 1650 parameter[parameter_size] = profile_mode; 1651 1652 ret = pm_runtime_get_sync(ddev->dev); 1653 if (ret < 0) 1654 return ret; 1655 1656 if (is_support_sw_smu(adev)) 1657 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); 1658 else if (adev->powerplay.pp_funcs->set_power_profile_mode) 1659 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1660 1661 pm_runtime_mark_last_busy(ddev->dev); 1662 pm_runtime_put_autosuspend(ddev->dev); 1663 1664 if (!ret) 1665 return count; 1666 1667 return -EINVAL; 1668} 1669 1670/** 1671 * DOC: gpu_busy_percent 1672 * 1673 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1674 * is as a percentage. The file gpu_busy_percent is used for this. 1675 * The SMU firmware computes a percentage of load based on the 1676 * aggregate activity level in the IP cores. 1677 */ 1678static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, 1679 struct device_attribute *attr, 1680 char *buf) 1681{ 1682 struct drm_device *ddev = dev_get_drvdata(dev); 1683 struct amdgpu_device *adev = ddev->dev_private; 1684 int r, value, size = sizeof(value); 1685 1686 if (adev->in_gpu_reset) 1687 return -EPERM; 1688 1689 r = pm_runtime_get_sync(ddev->dev); 1690 if (r < 0) 1691 return r; 1692 1693 /* read the IP busy sensor */ 1694 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1695 (void *)&value, &size); 1696 1697 pm_runtime_mark_last_busy(ddev->dev); 1698 pm_runtime_put_autosuspend(ddev->dev); 1699 1700 if (r) 1701 return r; 1702 1703 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1704} 1705 1706/** 1707 * DOC: mem_busy_percent 1708 * 1709 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1710 * is as a percentage. The file mem_busy_percent is used for this. 1711 * The SMU firmware computes a percentage of load based on the 1712 * aggregate activity level in the IP cores. 1713 */ 1714static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, 1715 struct device_attribute *attr, 1716 char *buf) 1717{ 1718 struct drm_device *ddev = dev_get_drvdata(dev); 1719 struct amdgpu_device *adev = ddev->dev_private; 1720 int r, value, size = sizeof(value); 1721 1722 if (adev->in_gpu_reset) 1723 return -EPERM; 1724 1725 r = pm_runtime_get_sync(ddev->dev); 1726 if (r < 0) 1727 return r; 1728 1729 /* read the IP busy sensor */ 1730 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1731 (void *)&value, &size); 1732 1733 pm_runtime_mark_last_busy(ddev->dev); 1734 pm_runtime_put_autosuspend(ddev->dev); 1735 1736 if (r) 1737 return r; 1738 1739 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1740} 1741 1742/** 1743 * DOC: pcie_bw 1744 * 1745 * The amdgpu driver provides a sysfs API for estimating how much data 1746 * has been received and sent by the GPU in the last second through PCIe. 1747 * The file pcie_bw is used for this. 1748 * The Perf counters count the number of received and sent messages and return 1749 * those values, as well as the maximum payload size of a PCIe packet (mps). 1750 * Note that it is not possible to easily and quickly obtain the size of each 1751 * packet transmitted, so we output the max payload size (mps) to allow for 1752 * quick estimation of the PCIe bandwidth usage 1753 */ 1754static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1755 struct device_attribute *attr, 1756 char *buf) 1757{ 1758 struct drm_device *ddev = dev_get_drvdata(dev); 1759 struct amdgpu_device *adev = ddev->dev_private; 1760 uint64_t count0 = 0, count1 = 0; 1761 int ret; 1762 1763 if (adev->in_gpu_reset) 1764 return -EPERM; 1765 1766 if (adev->flags & AMD_IS_APU) 1767 return -ENODATA; 1768 1769 if (!adev->asic_funcs->get_pcie_usage) 1770 return -ENODATA; 1771 1772 ret = pm_runtime_get_sync(ddev->dev); 1773 if (ret < 0) 1774 return ret; 1775 1776 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1777 1778 pm_runtime_mark_last_busy(ddev->dev); 1779 pm_runtime_put_autosuspend(ddev->dev); 1780 1781 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1782 count0, count1, pcie_get_mps(adev->pdev)); 1783} 1784 1785/** 1786 * DOC: unique_id 1787 * 1788 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1789 * The file unique_id is used for this. 1790 * This will provide a Unique ID that will persist from machine to machine 1791 * 1792 * NOTE: This will only work for GFX9 and newer. This file will be absent 1793 * on unsupported ASICs (GFX8 and older) 1794 */ 1795static ssize_t amdgpu_get_unique_id(struct device *dev, 1796 struct device_attribute *attr, 1797 char *buf) 1798{ 1799 struct drm_device *ddev = dev_get_drvdata(dev); 1800 struct amdgpu_device *adev = ddev->dev_private; 1801 1802 if (adev->in_gpu_reset) 1803 return -EPERM; 1804 1805 if (adev->unique_id) 1806 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); 1807 1808 return 0; 1809} 1810 1811static struct amdgpu_device_attr amdgpu_device_attrs[] = { 1812 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1813 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1814 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC), 1815 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC), 1816 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC), 1817 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), 1818 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1819 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1820 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1821 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1822 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC), 1823 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), 1824 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), 1825 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), 1826 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), 1827 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), 1828 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), 1829 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), 1830 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), 1831 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), 1832 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), 1833}; 1834 1835static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1836 uint32_t mask, enum amdgpu_device_attr_states *states) 1837{ 1838 struct device_attribute *dev_attr = &attr->dev_attr; 1839 const char *attr_name = dev_attr->attr.name; 1840 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1841 enum amd_asic_type asic_type = adev->asic_type; 1842 1843 if (!(attr->flags & mask)) { 1844 *states = ATTR_STATE_UNSUPPORTED; 1845 return 0; 1846 } 1847 1848#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) 1849 1850 if (DEVICE_ATTR_IS(pp_dpm_socclk)) { 1851 if (asic_type < CHIP_VEGA10) 1852 *states = ATTR_STATE_UNSUPPORTED; 1853 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 1854 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) 1855 *states = ATTR_STATE_UNSUPPORTED; 1856 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 1857 if (asic_type < CHIP_VEGA20) 1858 *states = ATTR_STATE_UNSUPPORTED; 1859 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { 1860 if (asic_type == CHIP_ARCTURUS) 1861 *states = ATTR_STATE_UNSUPPORTED; 1862 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { 1863 *states = ATTR_STATE_UNSUPPORTED; 1864 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 1865 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 1866 *states = ATTR_STATE_SUPPORTED; 1867 } else if (DEVICE_ATTR_IS(mem_busy_percent)) { 1868 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) 1869 *states = ATTR_STATE_UNSUPPORTED; 1870 } else if (DEVICE_ATTR_IS(pcie_bw)) { 1871 /* PCIe Perf counters won't work on APU nodes */ 1872 if (adev->flags & AMD_IS_APU) 1873 *states = ATTR_STATE_UNSUPPORTED; 1874 } else if (DEVICE_ATTR_IS(unique_id)) { 1875 if (!adev->unique_id) 1876 *states = ATTR_STATE_UNSUPPORTED; 1877 } else if (DEVICE_ATTR_IS(pp_features)) { 1878 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10) 1879 *states = ATTR_STATE_UNSUPPORTED; 1880 } 1881 1882 if (asic_type == CHIP_ARCTURUS) { 1883 /* Arcturus does not support standalone mclk/socclk/fclk level setting */ 1884 if (DEVICE_ATTR_IS(pp_dpm_mclk) || 1885 DEVICE_ATTR_IS(pp_dpm_socclk) || 1886 DEVICE_ATTR_IS(pp_dpm_fclk)) { 1887 dev_attr->attr.mode &= ~S_IWUGO; 1888 dev_attr->store = NULL; 1889 } 1890 } 1891 1892#undef DEVICE_ATTR_IS 1893 1894 return 0; 1895} 1896 1897 1898static int amdgpu_device_attr_create(struct amdgpu_device *adev, 1899 struct amdgpu_device_attr *attr, 1900 uint32_t mask, struct list_head *attr_list) 1901{ 1902 int ret = 0; 1903 struct device_attribute *dev_attr = &attr->dev_attr; 1904 const char *name = dev_attr->attr.name; 1905 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; 1906 struct amdgpu_device_attr_entry *attr_entry; 1907 1908 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1909 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; 1910 1911 BUG_ON(!attr); 1912 1913 attr_update = attr->attr_update ? attr_update : default_attr_update; 1914 1915 ret = attr_update(adev, attr, mask, &attr_states); 1916 if (ret) { 1917 dev_err(adev->dev, "failed to update device file %s, ret = %d\n", 1918 name, ret); 1919 return ret; 1920 } 1921 1922 if (attr_states == ATTR_STATE_UNSUPPORTED) 1923 return 0; 1924 1925 ret = device_create_file(adev->dev, dev_attr); 1926 if (ret) { 1927 dev_err(adev->dev, "failed to create device file %s, ret = %d\n", 1928 name, ret); 1929 } 1930 1931 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL); 1932 if (!attr_entry) 1933 return -ENOMEM; 1934 1935 attr_entry->attr = attr; 1936 INIT_LIST_HEAD(&attr_entry->entry); 1937 1938 list_add_tail(&attr_entry->entry, attr_list); 1939 1940 return ret; 1941} 1942 1943static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) 1944{ 1945 struct device_attribute *dev_attr = &attr->dev_attr; 1946 1947 device_remove_file(adev->dev, dev_attr); 1948} 1949 1950static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 1951 struct list_head *attr_list); 1952 1953static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, 1954 struct amdgpu_device_attr *attrs, 1955 uint32_t counts, 1956 uint32_t mask, 1957 struct list_head *attr_list) 1958{ 1959 int ret = 0; 1960 uint32_t i = 0; 1961 1962 for (i = 0; i < counts; i++) { 1963 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); 1964 if (ret) 1965 goto failed; 1966 } 1967 1968 return 0; 1969 1970failed: 1971 amdgpu_device_attr_remove_groups(adev, attr_list); 1972 1973 return ret; 1974} 1975 1976static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 1977 struct list_head *attr_list) 1978{ 1979 struct amdgpu_device_attr_entry *entry, *entry_tmp; 1980 1981 if (list_empty(attr_list)) 1982 return ; 1983 1984 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { 1985 amdgpu_device_attr_remove(adev, entry->attr); 1986 list_del(&entry->entry); 1987 kfree(entry); 1988 } 1989} 1990 1991static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1992 struct device_attribute *attr, 1993 char *buf) 1994{ 1995 struct amdgpu_device *adev = dev_get_drvdata(dev); 1996 int channel = to_sensor_dev_attr(attr)->index; 1997 int r, temp = 0, size = sizeof(temp); 1998 1999 if (adev->in_gpu_reset) 2000 return -EPERM; 2001 2002 if (channel >= PP_TEMP_MAX) 2003 return -EINVAL; 2004 2005 r = pm_runtime_get_sync(adev->ddev->dev); 2006 if (r < 0) 2007 return r; 2008 2009 switch (channel) { 2010 case PP_TEMP_JUNCTION: 2011 /* get current junction temperature */ 2012 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 2013 (void *)&temp, &size); 2014 break; 2015 case PP_TEMP_EDGE: 2016 /* get current edge temperature */ 2017 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 2018 (void *)&temp, &size); 2019 break; 2020 case PP_TEMP_MEM: 2021 /* get current memory temperature */ 2022 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 2023 (void *)&temp, &size); 2024 break; 2025 default: 2026 r = -EINVAL; 2027 break; 2028 } 2029 2030 pm_runtime_mark_last_busy(adev->ddev->dev); 2031 pm_runtime_put_autosuspend(adev->ddev->dev); 2032 2033 if (r) 2034 return r; 2035 2036 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2037} 2038 2039static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 2040 struct device_attribute *attr, 2041 char *buf) 2042{ 2043 struct amdgpu_device *adev = dev_get_drvdata(dev); 2044 int hyst = to_sensor_dev_attr(attr)->index; 2045 int temp; 2046 2047 if (hyst) 2048 temp = adev->pm.dpm.thermal.min_temp; 2049 else 2050 temp = adev->pm.dpm.thermal.max_temp; 2051 2052 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2053} 2054 2055static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 2056 struct device_attribute *attr, 2057 char *buf) 2058{ 2059 struct amdgpu_device *adev = dev_get_drvdata(dev); 2060 int hyst = to_sensor_dev_attr(attr)->index; 2061 int temp; 2062 2063 if (hyst) 2064 temp = adev->pm.dpm.thermal.min_hotspot_temp; 2065 else 2066 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 2067 2068 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2069} 2070 2071static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 2072 struct device_attribute *attr, 2073 char *buf) 2074{ 2075 struct amdgpu_device *adev = dev_get_drvdata(dev); 2076 int hyst = to_sensor_dev_attr(attr)->index; 2077 int temp; 2078 2079 if (hyst) 2080 temp = adev->pm.dpm.thermal.min_mem_temp; 2081 else 2082 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 2083 2084 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2085} 2086 2087static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 2088 struct device_attribute *attr, 2089 char *buf) 2090{ 2091 int channel = to_sensor_dev_attr(attr)->index; 2092 2093 if (channel >= PP_TEMP_MAX) 2094 return -EINVAL; 2095 2096 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); 2097} 2098 2099static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 2100 struct device_attribute *attr, 2101 char *buf) 2102{ 2103 struct amdgpu_device *adev = dev_get_drvdata(dev); 2104 int channel = to_sensor_dev_attr(attr)->index; 2105 int temp = 0; 2106 2107 if (channel >= PP_TEMP_MAX) 2108 return -EINVAL; 2109 2110 switch (channel) { 2111 case PP_TEMP_JUNCTION: 2112 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 2113 break; 2114 case PP_TEMP_EDGE: 2115 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 2116 break; 2117 case PP_TEMP_MEM: 2118 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 2119 break; 2120 } 2121 2122 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 2123} 2124 2125static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 2126 struct device_attribute *attr, 2127 char *buf) 2128{ 2129 struct amdgpu_device *adev = dev_get_drvdata(dev); 2130 u32 pwm_mode = 0; 2131 int ret; 2132 2133 if (adev->in_gpu_reset) 2134 return -EPERM; 2135 2136 ret = pm_runtime_get_sync(adev->ddev->dev); 2137 if (ret < 0) 2138 return ret; 2139 2140 if (is_support_sw_smu(adev)) { 2141 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2142 } else { 2143 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2144 pm_runtime_mark_last_busy(adev->ddev->dev); 2145 pm_runtime_put_autosuspend(adev->ddev->dev); 2146 return -EINVAL; 2147 } 2148 2149 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2150 } 2151 2152 pm_runtime_mark_last_busy(adev->ddev->dev); 2153 pm_runtime_put_autosuspend(adev->ddev->dev); 2154 2155 return sprintf(buf, "%i\n", pwm_mode); 2156} 2157 2158static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2159 struct device_attribute *attr, 2160 const char *buf, 2161 size_t count) 2162{ 2163 struct amdgpu_device *adev = dev_get_drvdata(dev); 2164 int err, ret; 2165 int value; 2166 2167 if (adev->in_gpu_reset) 2168 return -EPERM; 2169 2170 err = kstrtoint(buf, 10, &value); 2171 if (err) 2172 return err; 2173 2174 ret = pm_runtime_get_sync(adev->ddev->dev); 2175 if (ret < 0) 2176 return ret; 2177 2178 if (is_support_sw_smu(adev)) { 2179 smu_set_fan_control_mode(&adev->smu, value); 2180 } else { 2181 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2182 pm_runtime_mark_last_busy(adev->ddev->dev); 2183 pm_runtime_put_autosuspend(adev->ddev->dev); 2184 return -EINVAL; 2185 } 2186 2187 amdgpu_dpm_set_fan_control_mode(adev, value); 2188 } 2189 2190 pm_runtime_mark_last_busy(adev->ddev->dev); 2191 pm_runtime_put_autosuspend(adev->ddev->dev); 2192 2193 return count; 2194} 2195 2196static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2197 struct device_attribute *attr, 2198 char *buf) 2199{ 2200 return sprintf(buf, "%i\n", 0); 2201} 2202 2203static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2204 struct device_attribute *attr, 2205 char *buf) 2206{ 2207 return sprintf(buf, "%i\n", 255); 2208} 2209 2210static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 2211 struct device_attribute *attr, 2212 const char *buf, size_t count) 2213{ 2214 struct amdgpu_device *adev = dev_get_drvdata(dev); 2215 int err; 2216 u32 value; 2217 u32 pwm_mode; 2218 2219 if (adev->in_gpu_reset) 2220 return -EPERM; 2221 2222 err = pm_runtime_get_sync(adev->ddev->dev); 2223 if (err < 0) 2224 return err; 2225 2226 if (is_support_sw_smu(adev)) 2227 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2228 else 2229 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2230 2231 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2232 pr_info("manual fan speed control should be enabled first\n"); 2233 pm_runtime_mark_last_busy(adev->ddev->dev); 2234 pm_runtime_put_autosuspend(adev->ddev->dev); 2235 return -EINVAL; 2236 } 2237 2238 err = kstrtou32(buf, 10, &value); 2239 if (err) { 2240 pm_runtime_mark_last_busy(adev->ddev->dev); 2241 pm_runtime_put_autosuspend(adev->ddev->dev); 2242 return err; 2243 } 2244 2245 value = (value * 100) / 255; 2246 2247 if (is_support_sw_smu(adev)) 2248 err = smu_set_fan_speed_percent(&adev->smu, value); 2249 else if (adev->powerplay.pp_funcs->set_fan_speed_percent) 2250 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 2251 else 2252 err = -EINVAL; 2253 2254 pm_runtime_mark_last_busy(adev->ddev->dev); 2255 pm_runtime_put_autosuspend(adev->ddev->dev); 2256 2257 if (err) 2258 return err; 2259 2260 return count; 2261} 2262 2263static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 2264 struct device_attribute *attr, 2265 char *buf) 2266{ 2267 struct amdgpu_device *adev = dev_get_drvdata(dev); 2268 int err; 2269 u32 speed = 0; 2270 2271 if (adev->in_gpu_reset) 2272 return -EPERM; 2273 2274 err = pm_runtime_get_sync(adev->ddev->dev); 2275 if (err < 0) 2276 return err; 2277 2278 if (is_support_sw_smu(adev)) 2279 err = smu_get_fan_speed_percent(&adev->smu, &speed); 2280 else if (adev->powerplay.pp_funcs->get_fan_speed_percent) 2281 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 2282 else 2283 err = -EINVAL; 2284 2285 pm_runtime_mark_last_busy(adev->ddev->dev); 2286 pm_runtime_put_autosuspend(adev->ddev->dev); 2287 2288 if (err) 2289 return err; 2290 2291 speed = (speed * 255) / 100; 2292 2293 return sprintf(buf, "%i\n", speed); 2294} 2295 2296static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 2297 struct device_attribute *attr, 2298 char *buf) 2299{ 2300 struct amdgpu_device *adev = dev_get_drvdata(dev); 2301 int err; 2302 u32 speed = 0; 2303 2304 if (adev->in_gpu_reset) 2305 return -EPERM; 2306 2307 err = pm_runtime_get_sync(adev->ddev->dev); 2308 if (err < 0) 2309 return err; 2310 2311 if (is_support_sw_smu(adev)) 2312 err = smu_get_fan_speed_rpm(&adev->smu, &speed); 2313 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2314 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2315 else 2316 err = -EINVAL; 2317 2318 pm_runtime_mark_last_busy(adev->ddev->dev); 2319 pm_runtime_put_autosuspend(adev->ddev->dev); 2320 2321 if (err) 2322 return err; 2323 2324 return sprintf(buf, "%i\n", speed); 2325} 2326 2327static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 2328 struct device_attribute *attr, 2329 char *buf) 2330{ 2331 struct amdgpu_device *adev = dev_get_drvdata(dev); 2332 u32 min_rpm = 0; 2333 u32 size = sizeof(min_rpm); 2334 int r; 2335 2336 if (adev->in_gpu_reset) 2337 return -EPERM; 2338 2339 r = pm_runtime_get_sync(adev->ddev->dev); 2340 if (r < 0) 2341 return r; 2342 2343 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2344 (void *)&min_rpm, &size); 2345 2346 pm_runtime_mark_last_busy(adev->ddev->dev); 2347 pm_runtime_put_autosuspend(adev->ddev->dev); 2348 2349 if (r) 2350 return r; 2351 2352 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); 2353} 2354 2355static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 2356 struct device_attribute *attr, 2357 char *buf) 2358{ 2359 struct amdgpu_device *adev = dev_get_drvdata(dev); 2360 u32 max_rpm = 0; 2361 u32 size = sizeof(max_rpm); 2362 int r; 2363 2364 if (adev->in_gpu_reset) 2365 return -EPERM; 2366 2367 r = pm_runtime_get_sync(adev->ddev->dev); 2368 if (r < 0) 2369 return r; 2370 2371 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2372 (void *)&max_rpm, &size); 2373 2374 pm_runtime_mark_last_busy(adev->ddev->dev); 2375 pm_runtime_put_autosuspend(adev->ddev->dev); 2376 2377 if (r) 2378 return r; 2379 2380 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); 2381} 2382 2383static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 2384 struct device_attribute *attr, 2385 char *buf) 2386{ 2387 struct amdgpu_device *adev = dev_get_drvdata(dev); 2388 int err; 2389 u32 rpm = 0; 2390 2391 if (adev->in_gpu_reset) 2392 return -EPERM; 2393 2394 err = pm_runtime_get_sync(adev->ddev->dev); 2395 if (err < 0) 2396 return err; 2397 2398 if (is_support_sw_smu(adev)) 2399 err = smu_get_fan_speed_rpm(&adev->smu, &rpm); 2400 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2401 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2402 else 2403 err = -EINVAL; 2404 2405 pm_runtime_mark_last_busy(adev->ddev->dev); 2406 pm_runtime_put_autosuspend(adev->ddev->dev); 2407 2408 if (err) 2409 return err; 2410 2411 return sprintf(buf, "%i\n", rpm); 2412} 2413 2414static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 2415 struct device_attribute *attr, 2416 const char *buf, size_t count) 2417{ 2418 struct amdgpu_device *adev = dev_get_drvdata(dev); 2419 int err; 2420 u32 value; 2421 u32 pwm_mode; 2422 2423 if (adev->in_gpu_reset) 2424 return -EPERM; 2425 2426 err = pm_runtime_get_sync(adev->ddev->dev); 2427 if (err < 0) 2428 return err; 2429 2430 if (is_support_sw_smu(adev)) 2431 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2432 else 2433 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2434 2435 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2436 pm_runtime_mark_last_busy(adev->ddev->dev); 2437 pm_runtime_put_autosuspend(adev->ddev->dev); 2438 return -ENODATA; 2439 } 2440 2441 err = kstrtou32(buf, 10, &value); 2442 if (err) { 2443 pm_runtime_mark_last_busy(adev->ddev->dev); 2444 pm_runtime_put_autosuspend(adev->ddev->dev); 2445 return err; 2446 } 2447 2448 if (is_support_sw_smu(adev)) 2449 err = smu_set_fan_speed_rpm(&adev->smu, value); 2450 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) 2451 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2452 else 2453 err = -EINVAL; 2454 2455 pm_runtime_mark_last_busy(adev->ddev->dev); 2456 pm_runtime_put_autosuspend(adev->ddev->dev); 2457 2458 if (err) 2459 return err; 2460 2461 return count; 2462} 2463 2464static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 2465 struct device_attribute *attr, 2466 char *buf) 2467{ 2468 struct amdgpu_device *adev = dev_get_drvdata(dev); 2469 u32 pwm_mode = 0; 2470 int ret; 2471 2472 if (adev->in_gpu_reset) 2473 return -EPERM; 2474 2475 ret = pm_runtime_get_sync(adev->ddev->dev); 2476 if (ret < 0) 2477 return ret; 2478 2479 if (is_support_sw_smu(adev)) { 2480 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2481 } else { 2482 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2483 pm_runtime_mark_last_busy(adev->ddev->dev); 2484 pm_runtime_put_autosuspend(adev->ddev->dev); 2485 return -EINVAL; 2486 } 2487 2488 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2489 } 2490 2491 pm_runtime_mark_last_busy(adev->ddev->dev); 2492 pm_runtime_put_autosuspend(adev->ddev->dev); 2493 2494 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2495} 2496 2497static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 2498 struct device_attribute *attr, 2499 const char *buf, 2500 size_t count) 2501{ 2502 struct amdgpu_device *adev = dev_get_drvdata(dev); 2503 int err; 2504 int value; 2505 u32 pwm_mode; 2506 2507 if (adev->in_gpu_reset) 2508 return -EPERM; 2509 2510 err = kstrtoint(buf, 10, &value); 2511 if (err) 2512 return err; 2513 2514 if (value == 0) 2515 pwm_mode = AMD_FAN_CTRL_AUTO; 2516 else if (value == 1) 2517 pwm_mode = AMD_FAN_CTRL_MANUAL; 2518 else 2519 return -EINVAL; 2520 2521 err = pm_runtime_get_sync(adev->ddev->dev); 2522 if (err < 0) 2523 return err; 2524 2525 if (is_support_sw_smu(adev)) { 2526 smu_set_fan_control_mode(&adev->smu, pwm_mode); 2527 } else { 2528 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2529 pm_runtime_mark_last_busy(adev->ddev->dev); 2530 pm_runtime_put_autosuspend(adev->ddev->dev); 2531 return -EINVAL; 2532 } 2533 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2534 } 2535 2536 pm_runtime_mark_last_busy(adev->ddev->dev); 2537 pm_runtime_put_autosuspend(adev->ddev->dev); 2538 2539 return count; 2540} 2541 2542static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 2543 struct device_attribute *attr, 2544 char *buf) 2545{ 2546 struct amdgpu_device *adev = dev_get_drvdata(dev); 2547 u32 vddgfx; 2548 int r, size = sizeof(vddgfx); 2549 2550 if (adev->in_gpu_reset) 2551 return -EPERM; 2552 2553 r = pm_runtime_get_sync(adev->ddev->dev); 2554 if (r < 0) 2555 return r; 2556 2557 /* get the voltage */ 2558 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 2559 (void *)&vddgfx, &size); 2560 2561 pm_runtime_mark_last_busy(adev->ddev->dev); 2562 pm_runtime_put_autosuspend(adev->ddev->dev); 2563 2564 if (r) 2565 return r; 2566 2567 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 2568} 2569 2570static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 2571 struct device_attribute *attr, 2572 char *buf) 2573{ 2574 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 2575} 2576 2577static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 2578 struct device_attribute *attr, 2579 char *buf) 2580{ 2581 struct amdgpu_device *adev = dev_get_drvdata(dev); 2582 u32 vddnb; 2583 int r, size = sizeof(vddnb); 2584 2585 if (adev->in_gpu_reset) 2586 return -EPERM; 2587 2588 /* only APUs have vddnb */ 2589 if (!(adev->flags & AMD_IS_APU)) 2590 return -EINVAL; 2591 2592 r = pm_runtime_get_sync(adev->ddev->dev); 2593 if (r < 0) 2594 return r; 2595 2596 /* get the voltage */ 2597 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 2598 (void *)&vddnb, &size); 2599 2600 pm_runtime_mark_last_busy(adev->ddev->dev); 2601 pm_runtime_put_autosuspend(adev->ddev->dev); 2602 2603 if (r) 2604 return r; 2605 2606 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 2607} 2608 2609static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 2610 struct device_attribute *attr, 2611 char *buf) 2612{ 2613 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 2614} 2615 2616static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 2617 struct device_attribute *attr, 2618 char *buf) 2619{ 2620 struct amdgpu_device *adev = dev_get_drvdata(dev); 2621 u32 query = 0; 2622 int r, size = sizeof(u32); 2623 unsigned uw; 2624 2625 if (adev->in_gpu_reset) 2626 return -EPERM; 2627 2628 r = pm_runtime_get_sync(adev->ddev->dev); 2629 if (r < 0) 2630 return r; 2631 2632 /* get the voltage */ 2633 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 2634 (void *)&query, &size); 2635 2636 pm_runtime_mark_last_busy(adev->ddev->dev); 2637 pm_runtime_put_autosuspend(adev->ddev->dev); 2638 2639 if (r) 2640 return r; 2641 2642 /* convert to microwatts */ 2643 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 2644 2645 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 2646} 2647 2648static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 2649 struct device_attribute *attr, 2650 char *buf) 2651{ 2652 return sprintf(buf, "%i\n", 0); 2653} 2654 2655static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 2656 struct device_attribute *attr, 2657 char *buf) 2658{ 2659 struct amdgpu_device *adev = dev_get_drvdata(dev); 2660 uint32_t limit = 0; 2661 ssize_t size; 2662 int r; 2663 2664 if (adev->in_gpu_reset) 2665 return -EPERM; 2666 2667 r = pm_runtime_get_sync(adev->ddev->dev); 2668 if (r < 0) 2669 return r; 2670 2671 if (is_support_sw_smu(adev)) { 2672 smu_get_power_limit(&adev->smu, &limit, true, true); 2673 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2674 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2675 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 2676 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2677 } else { 2678 size = snprintf(buf, PAGE_SIZE, "\n"); 2679 } 2680 2681 pm_runtime_mark_last_busy(adev->ddev->dev); 2682 pm_runtime_put_autosuspend(adev->ddev->dev); 2683 2684 return size; 2685} 2686 2687static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 2688 struct device_attribute *attr, 2689 char *buf) 2690{ 2691 struct amdgpu_device *adev = dev_get_drvdata(dev); 2692 uint32_t limit = 0; 2693 ssize_t size; 2694 int r; 2695 2696 if (adev->in_gpu_reset) 2697 return -EPERM; 2698 2699 r = pm_runtime_get_sync(adev->ddev->dev); 2700 if (r < 0) 2701 return r; 2702 2703 if (is_support_sw_smu(adev)) { 2704 smu_get_power_limit(&adev->smu, &limit, false, true); 2705 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2706 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2707 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 2708 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2709 } else { 2710 size = snprintf(buf, PAGE_SIZE, "\n"); 2711 } 2712 2713 pm_runtime_mark_last_busy(adev->ddev->dev); 2714 pm_runtime_put_autosuspend(adev->ddev->dev); 2715 2716 return size; 2717} 2718 2719 2720static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 2721 struct device_attribute *attr, 2722 const char *buf, 2723 size_t count) 2724{ 2725 struct amdgpu_device *adev = dev_get_drvdata(dev); 2726 int err; 2727 u32 value; 2728 2729 if (adev->in_gpu_reset) 2730 return -EPERM; 2731 2732 if (amdgpu_sriov_vf(adev)) 2733 return -EINVAL; 2734 2735 err = kstrtou32(buf, 10, &value); 2736 if (err) 2737 return err; 2738 2739 value = value / 1000000; /* convert to Watt */ 2740 2741 2742 err = pm_runtime_get_sync(adev->ddev->dev); 2743 if (err < 0) 2744 return err; 2745 2746 if (is_support_sw_smu(adev)) 2747 err = smu_set_power_limit(&adev->smu, value); 2748 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) 2749 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 2750 else 2751 err = -EINVAL; 2752 2753 pm_runtime_mark_last_busy(adev->ddev->dev); 2754 pm_runtime_put_autosuspend(adev->ddev->dev); 2755 2756 if (err) 2757 return err; 2758 2759 return count; 2760} 2761 2762static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 2763 struct device_attribute *attr, 2764 char *buf) 2765{ 2766 struct amdgpu_device *adev = dev_get_drvdata(dev); 2767 uint32_t sclk; 2768 int r, size = sizeof(sclk); 2769 2770 if (adev->in_gpu_reset) 2771 return -EPERM; 2772 2773 r = pm_runtime_get_sync(adev->ddev->dev); 2774 if (r < 0) 2775 return r; 2776 2777 /* get the sclk */ 2778 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2779 (void *)&sclk, &size); 2780 2781 pm_runtime_mark_last_busy(adev->ddev->dev); 2782 pm_runtime_put_autosuspend(adev->ddev->dev); 2783 2784 if (r) 2785 return r; 2786 2787 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000); 2788} 2789 2790static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 2791 struct device_attribute *attr, 2792 char *buf) 2793{ 2794 return snprintf(buf, PAGE_SIZE, "sclk\n"); 2795} 2796 2797static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 2798 struct device_attribute *attr, 2799 char *buf) 2800{ 2801 struct amdgpu_device *adev = dev_get_drvdata(dev); 2802 uint32_t mclk; 2803 int r, size = sizeof(mclk); 2804 2805 if (adev->in_gpu_reset) 2806 return -EPERM; 2807 2808 r = pm_runtime_get_sync(adev->ddev->dev); 2809 if (r < 0) 2810 return r; 2811 2812 /* get the sclk */ 2813 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2814 (void *)&mclk, &size); 2815 2816 pm_runtime_mark_last_busy(adev->ddev->dev); 2817 pm_runtime_put_autosuspend(adev->ddev->dev); 2818 2819 if (r) 2820 return r; 2821 2822 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000); 2823} 2824 2825static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 2826 struct device_attribute *attr, 2827 char *buf) 2828{ 2829 return snprintf(buf, PAGE_SIZE, "mclk\n"); 2830} 2831 2832/** 2833 * DOC: hwmon 2834 * 2835 * The amdgpu driver exposes the following sensor interfaces: 2836 * 2837 * - GPU temperature (via the on-die sensor) 2838 * 2839 * - GPU voltage 2840 * 2841 * - Northbridge voltage (APUs only) 2842 * 2843 * - GPU power 2844 * 2845 * - GPU fan 2846 * 2847 * - GPU gfx/compute engine clock 2848 * 2849 * - GPU memory clock (dGPU only) 2850 * 2851 * hwmon interfaces for GPU temperature: 2852 * 2853 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 2854 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 2855 * 2856 * - temp[1-3]_label: temperature channel label 2857 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 2858 * 2859 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 2860 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 2861 * 2862 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 2863 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 2864 * 2865 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 2866 * - these are supported on SOC15 dGPUs only 2867 * 2868 * hwmon interfaces for GPU voltage: 2869 * 2870 * - in0_input: the voltage on the GPU in millivolts 2871 * 2872 * - in1_input: the voltage on the Northbridge in millivolts 2873 * 2874 * hwmon interfaces for GPU power: 2875 * 2876 * - power1_average: average power used by the GPU in microWatts 2877 * 2878 * - power1_cap_min: minimum cap supported in microWatts 2879 * 2880 * - power1_cap_max: maximum cap supported in microWatts 2881 * 2882 * - power1_cap: selected power cap in microWatts 2883 * 2884 * hwmon interfaces for GPU fan: 2885 * 2886 * - pwm1: pulse width modulation fan level (0-255) 2887 * 2888 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 2889 * 2890 * - pwm1_min: pulse width modulation fan control minimum level (0) 2891 * 2892 * - pwm1_max: pulse width modulation fan control maximum level (255) 2893 * 2894 * - fan1_min: an minimum value Unit: revolution/min (RPM) 2895 * 2896 * - fan1_max: an maxmum value Unit: revolution/max (RPM) 2897 * 2898 * - fan1_input: fan speed in RPM 2899 * 2900 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 2901 * 2902 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 2903 * 2904 * hwmon interfaces for GPU clocks: 2905 * 2906 * - freq1_input: the gfx/compute clock in hertz 2907 * 2908 * - freq2_input: the memory clock in hertz 2909 * 2910 * You can use hwmon tools like sensors to view this information on your system. 2911 * 2912 */ 2913 2914static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 2915static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 2916static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 2917static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 2918static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 2919static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 2920static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 2921static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 2922static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 2923static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 2924static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 2925static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 2926static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 2927static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 2928static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 2929static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 2930static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 2931static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 2932static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 2933static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 2934static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 2935static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 2936static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 2937static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 2938static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 2939static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 2940static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 2941static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 2942static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 2943static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 2944static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 2945static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 2946static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 2947static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 2948static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 2949static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 2950 2951static struct attribute *hwmon_attributes[] = { 2952 &sensor_dev_attr_temp1_input.dev_attr.attr, 2953 &sensor_dev_attr_temp1_crit.dev_attr.attr, 2954 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 2955 &sensor_dev_attr_temp2_input.dev_attr.attr, 2956 &sensor_dev_attr_temp2_crit.dev_attr.attr, 2957 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 2958 &sensor_dev_attr_temp3_input.dev_attr.attr, 2959 &sensor_dev_attr_temp3_crit.dev_attr.attr, 2960 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 2961 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 2962 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 2963 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 2964 &sensor_dev_attr_temp1_label.dev_attr.attr, 2965 &sensor_dev_attr_temp2_label.dev_attr.attr, 2966 &sensor_dev_attr_temp3_label.dev_attr.attr, 2967 &sensor_dev_attr_pwm1.dev_attr.attr, 2968 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 2969 &sensor_dev_attr_pwm1_min.dev_attr.attr, 2970 &sensor_dev_attr_pwm1_max.dev_attr.attr, 2971 &sensor_dev_attr_fan1_input.dev_attr.attr, 2972 &sensor_dev_attr_fan1_min.dev_attr.attr, 2973 &sensor_dev_attr_fan1_max.dev_attr.attr, 2974 &sensor_dev_attr_fan1_target.dev_attr.attr, 2975 &sensor_dev_attr_fan1_enable.dev_attr.attr, 2976 &sensor_dev_attr_in0_input.dev_attr.attr, 2977 &sensor_dev_attr_in0_label.dev_attr.attr, 2978 &sensor_dev_attr_in1_input.dev_attr.attr, 2979 &sensor_dev_attr_in1_label.dev_attr.attr, 2980 &sensor_dev_attr_power1_average.dev_attr.attr, 2981 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 2982 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 2983 &sensor_dev_attr_power1_cap.dev_attr.attr, 2984 &sensor_dev_attr_freq1_input.dev_attr.attr, 2985 &sensor_dev_attr_freq1_label.dev_attr.attr, 2986 &sensor_dev_attr_freq2_input.dev_attr.attr, 2987 &sensor_dev_attr_freq2_label.dev_attr.attr, 2988 NULL 2989}; 2990 2991static umode_t hwmon_attributes_visible(struct kobject *kobj, 2992 struct attribute *attr, int index) 2993{ 2994 struct device *dev = kobj_to_dev(kobj); 2995 struct amdgpu_device *adev = dev_get_drvdata(dev); 2996 umode_t effective_mode = attr->mode; 2997 2998 /* under multi-vf mode, the hwmon attributes are all not supported */ 2999 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 3000 return 0; 3001 3002 /* there is no fan under pp one vf mode */ 3003 if (amdgpu_sriov_is_pp_one_vf(adev) && 3004 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3005 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3006 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3007 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3008 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3009 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3010 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3011 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3012 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3013 return 0; 3014 3015 /* Skip fan attributes if fan is not present */ 3016 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3017 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3018 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3019 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3020 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3021 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3022 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3023 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3024 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3025 return 0; 3026 3027 /* Skip fan attributes on APU */ 3028 if ((adev->flags & AMD_IS_APU) && 3029 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3030 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3031 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3032 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3033 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3034 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3035 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3036 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3037 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3038 return 0; 3039 3040 /* Skip limit attributes if DPM is not enabled */ 3041 if (!adev->pm.dpm_enabled && 3042 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3043 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 3044 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3045 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3046 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3047 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3048 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3049 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3050 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3051 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3052 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3053 return 0; 3054 3055 if (!is_support_sw_smu(adev)) { 3056 /* mask fan attributes if we have no bindings for this asic to expose */ 3057 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 3058 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 3059 (!adev->powerplay.pp_funcs->get_fan_control_mode && 3060 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 3061 effective_mode &= ~S_IRUGO; 3062 3063 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 3064 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 3065 (!adev->powerplay.pp_funcs->set_fan_control_mode && 3066 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 3067 effective_mode &= ~S_IWUSR; 3068 } 3069 3070 if (((adev->flags & AMD_IS_APU) || 3071 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 3072 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 3073 (attr == &sensor_dev_attr_power1_average.dev_attr.attr || 3074 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 3075 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 3076 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 3077 return 0; 3078 3079 if (!is_support_sw_smu(adev)) { 3080 /* hide max/min values if we can't both query and manage the fan */ 3081 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 3082 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 3083 (!adev->powerplay.pp_funcs->set_fan_speed_rpm && 3084 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 3085 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3086 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 3087 return 0; 3088 3089 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && 3090 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 3091 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3092 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 3093 return 0; 3094 } 3095 3096 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 3097 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 3098 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 3099 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 3100 return 0; 3101 3102 /* only APUs have vddnb */ 3103 if (!(adev->flags & AMD_IS_APU) && 3104 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 3105 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 3106 return 0; 3107 3108 /* no mclk on APUs */ 3109 if ((adev->flags & AMD_IS_APU) && 3110 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 3111 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 3112 return 0; 3113 3114 /* only SOC15 dGPUs support hotspot and mem temperatures */ 3115 if (((adev->flags & AMD_IS_APU) || 3116 adev->asic_type < CHIP_VEGA10) && 3117 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 3118 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 3119 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || 3120 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 3121 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 3122 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 3123 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || 3124 attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 3125 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 3126 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 3127 attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) 3128 return 0; 3129 3130 return effective_mode; 3131} 3132 3133static const struct attribute_group hwmon_attrgroup = { 3134 .attrs = hwmon_attributes, 3135 .is_visible = hwmon_attributes_visible, 3136}; 3137 3138static const struct attribute_group *hwmon_groups[] = { 3139 &hwmon_attrgroup, 3140 NULL 3141}; 3142 3143void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 3144{ 3145 struct amdgpu_device *adev = 3146 container_of(work, struct amdgpu_device, 3147 pm.dpm.thermal.work); 3148 /* switch to the thermal state */ 3149 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 3150 int temp, size = sizeof(temp); 3151 3152 if (!adev->pm.dpm_enabled) 3153 return; 3154 3155 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 3156 (void *)&temp, &size)) { 3157 if (temp < adev->pm.dpm.thermal.min_temp) 3158 /* switch back the user state */ 3159 dpm_state = adev->pm.dpm.user_state; 3160 } else { 3161 if (adev->pm.dpm.thermal.high_to_low) 3162 /* switch back the user state */ 3163 dpm_state = adev->pm.dpm.user_state; 3164 } 3165 mutex_lock(&adev->pm.mutex); 3166 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 3167 adev->pm.dpm.thermal_active = true; 3168 else 3169 adev->pm.dpm.thermal_active = false; 3170 adev->pm.dpm.state = dpm_state; 3171 mutex_unlock(&adev->pm.mutex); 3172 3173 amdgpu_pm_compute_clocks(adev); 3174} 3175 3176static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 3177 enum amd_pm_state_type dpm_state) 3178{ 3179 int i; 3180 struct amdgpu_ps *ps; 3181 u32 ui_class; 3182 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 3183 true : false; 3184 3185 /* check if the vblank period is too short to adjust the mclk */ 3186 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 3187 if (amdgpu_dpm_vblank_too_short(adev)) 3188 single_display = false; 3189 } 3190 3191 /* certain older asics have a separare 3D performance state, 3192 * so try that first if the user selected performance 3193 */ 3194 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 3195 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 3196 /* balanced states don't exist at the moment */ 3197 if (dpm_state == POWER_STATE_TYPE_BALANCED) 3198 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3199 3200restart_search: 3201 /* Pick the best power state based on current conditions */ 3202 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 3203 ps = &adev->pm.dpm.ps[i]; 3204 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 3205 switch (dpm_state) { 3206 /* user states */ 3207 case POWER_STATE_TYPE_BATTERY: 3208 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 3209 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3210 if (single_display) 3211 return ps; 3212 } else 3213 return ps; 3214 } 3215 break; 3216 case POWER_STATE_TYPE_BALANCED: 3217 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 3218 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3219 if (single_display) 3220 return ps; 3221 } else 3222 return ps; 3223 } 3224 break; 3225 case POWER_STATE_TYPE_PERFORMANCE: 3226 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 3227 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3228 if (single_display) 3229 return ps; 3230 } else 3231 return ps; 3232 } 3233 break; 3234 /* internal states */ 3235 case POWER_STATE_TYPE_INTERNAL_UVD: 3236 if (adev->pm.dpm.uvd_ps) 3237 return adev->pm.dpm.uvd_ps; 3238 else 3239 break; 3240 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 3241 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 3242 return ps; 3243 break; 3244 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 3245 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 3246 return ps; 3247 break; 3248 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 3249 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 3250 return ps; 3251 break; 3252 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 3253 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 3254 return ps; 3255 break; 3256 case POWER_STATE_TYPE_INTERNAL_BOOT: 3257 return adev->pm.dpm.boot_ps; 3258 case POWER_STATE_TYPE_INTERNAL_THERMAL: 3259 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 3260 return ps; 3261 break; 3262 case POWER_STATE_TYPE_INTERNAL_ACPI: 3263 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 3264 return ps; 3265 break; 3266 case POWER_STATE_TYPE_INTERNAL_ULV: 3267 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 3268 return ps; 3269 break; 3270 case POWER_STATE_TYPE_INTERNAL_3DPERF: 3271 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 3272 return ps; 3273 break; 3274 default: 3275 break; 3276 } 3277 } 3278 /* use a fallback state if we didn't match */ 3279 switch (dpm_state) { 3280 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 3281 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 3282 goto restart_search; 3283 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 3284 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 3285 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 3286 if (adev->pm.dpm.uvd_ps) { 3287 return adev->pm.dpm.uvd_ps; 3288 } else { 3289 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3290 goto restart_search; 3291 } 3292 case POWER_STATE_TYPE_INTERNAL_THERMAL: 3293 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 3294 goto restart_search; 3295 case POWER_STATE_TYPE_INTERNAL_ACPI: 3296 dpm_state = POWER_STATE_TYPE_BATTERY; 3297 goto restart_search; 3298 case POWER_STATE_TYPE_BATTERY: 3299 case POWER_STATE_TYPE_BALANCED: 3300 case POWER_STATE_TYPE_INTERNAL_3DPERF: 3301 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3302 goto restart_search; 3303 default: 3304 break; 3305 } 3306 3307 return NULL; 3308} 3309 3310static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 3311{ 3312 struct amdgpu_ps *ps; 3313 enum amd_pm_state_type dpm_state; 3314 int ret; 3315 bool equal = false; 3316 3317 /* if dpm init failed */ 3318 if (!adev->pm.dpm_enabled) 3319 return; 3320 3321 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 3322 /* add other state override checks here */ 3323 if ((!adev->pm.dpm.thermal_active) && 3324 (!adev->pm.dpm.uvd_active)) 3325 adev->pm.dpm.state = adev->pm.dpm.user_state; 3326 } 3327 dpm_state = adev->pm.dpm.state; 3328 3329 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 3330 if (ps) 3331 adev->pm.dpm.requested_ps = ps; 3332 else 3333 return; 3334 3335 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 3336 printk("switching from power state:\n"); 3337 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 3338 printk("switching to power state:\n"); 3339 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 3340 } 3341 3342 /* update whether vce is active */ 3343 ps->vce_active = adev->pm.dpm.vce_active; 3344 if (adev->powerplay.pp_funcs->display_configuration_changed) 3345 amdgpu_dpm_display_configuration_changed(adev); 3346 3347 ret = amdgpu_dpm_pre_set_power_state(adev); 3348 if (ret) 3349 return; 3350 3351 if (adev->powerplay.pp_funcs->check_state_equal) { 3352 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 3353 equal = false; 3354 } 3355 3356 if (equal) 3357 return; 3358 3359 amdgpu_dpm_set_power_state(adev); 3360 amdgpu_dpm_post_set_power_state(adev); 3361 3362 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 3363 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 3364 3365 if (adev->powerplay.pp_funcs->force_performance_level) { 3366 if (adev->pm.dpm.thermal_active) { 3367 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 3368 /* force low perf level for thermal */ 3369 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 3370 /* save the user's level */ 3371 adev->pm.dpm.forced_level = level; 3372 } else { 3373 /* otherwise, user selected level */ 3374 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 3375 } 3376 } 3377} 3378 3379void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 3380{ 3381 int ret = 0; 3382 3383 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 3384 if (ret) 3385 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 3386 enable ? "enable" : "disable", ret); 3387 3388 /* enable/disable Low Memory PState for UVD (4k videos) */ 3389 if (adev->asic_type == CHIP_STONEY && 3390 adev->uvd.decode_image_width >= WIDTH_4K) { 3391 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3392 3393 if (hwmgr && hwmgr->hwmgr_func && 3394 hwmgr->hwmgr_func->update_nbdpm_pstate) 3395 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 3396 !enable, 3397 true); 3398 } 3399} 3400 3401void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 3402{ 3403 int ret = 0; 3404 3405 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 3406 if (ret) 3407 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 3408 enable ? "enable" : "disable", ret); 3409} 3410 3411void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 3412{ 3413 int i; 3414 3415 if (adev->powerplay.pp_funcs->print_power_state == NULL) 3416 return; 3417 3418 for (i = 0; i < adev->pm.dpm.num_ps; i++) 3419 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 3420 3421} 3422 3423void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 3424{ 3425 int ret = 0; 3426 3427 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 3428 if (ret) 3429 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 3430 enable ? "enable" : "disable", ret); 3431} 3432 3433int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 3434{ 3435 int r; 3436 3437 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 3438 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 3439 if (r) { 3440 pr_err("smu firmware loading failed\n"); 3441 return r; 3442 } 3443 *smu_version = adev->pm.fw_version; 3444 } 3445 return 0; 3446} 3447 3448int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 3449{ 3450 int ret; 3451 uint32_t mask = 0; 3452 3453 if (adev->pm.sysfs_initialized) 3454 return 0; 3455 3456 if (adev->pm.dpm_enabled == 0) 3457 return 0; 3458 3459 INIT_LIST_HEAD(&adev->pm.pm_attr_list); 3460 3461 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 3462 DRIVER_NAME, adev, 3463 hwmon_groups); 3464 if (IS_ERR(adev->pm.int_hwmon_dev)) { 3465 ret = PTR_ERR(adev->pm.int_hwmon_dev); 3466 dev_err(adev->dev, 3467 "Unable to register hwmon device: %d\n", ret); 3468 return ret; 3469 } 3470 3471 switch (amdgpu_virt_get_sriov_vf_mode(adev)) { 3472 case SRIOV_VF_MODE_ONE_VF: 3473 mask = ATTR_FLAG_ONEVF; 3474 break; 3475 case SRIOV_VF_MODE_MULTI_VF: 3476 mask = 0; 3477 break; 3478 case SRIOV_VF_MODE_BARE_METAL: 3479 default: 3480 mask = ATTR_FLAG_MASK_ALL; 3481 break; 3482 } 3483 3484 ret = amdgpu_device_attr_create_groups(adev, 3485 amdgpu_device_attrs, 3486 ARRAY_SIZE(amdgpu_device_attrs), 3487 mask, 3488 &adev->pm.pm_attr_list); 3489 if (ret) 3490 return ret; 3491 3492 adev->pm.sysfs_initialized = true; 3493 3494 return 0; 3495} 3496 3497void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 3498{ 3499 if (adev->pm.dpm_enabled == 0) 3500 return; 3501 3502 if (adev->pm.int_hwmon_dev) 3503 hwmon_device_unregister(adev->pm.int_hwmon_dev); 3504 3505 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); 3506} 3507 3508void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 3509{ 3510 int i = 0; 3511 3512 if (!adev->pm.dpm_enabled) 3513 return; 3514 3515 if (adev->mode_info.num_crtc) 3516 amdgpu_display_bandwidth_update(adev); 3517 3518 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3519 struct amdgpu_ring *ring = adev->rings[i]; 3520 if (ring && ring->sched.ready) 3521 amdgpu_fence_wait_empty(ring); 3522 } 3523 3524 if (is_support_sw_smu(adev)) { 3525 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; 3526 smu_handle_task(&adev->smu, 3527 smu_dpm->dpm_level, 3528 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 3529 true); 3530 } else { 3531 if (adev->powerplay.pp_funcs->dispatch_tasks) { 3532 if (!amdgpu_device_has_dc_support(adev)) { 3533 mutex_lock(&adev->pm.mutex); 3534 amdgpu_dpm_get_active_displays(adev); 3535 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 3536 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 3537 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 3538 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 3539 if (adev->pm.pm_display_cfg.vrefresh > 120) 3540 adev->pm.pm_display_cfg.min_vblank_time = 0; 3541 if (adev->powerplay.pp_funcs->display_configuration_change) 3542 adev->powerplay.pp_funcs->display_configuration_change( 3543 adev->powerplay.pp_handle, 3544 &adev->pm.pm_display_cfg); 3545 mutex_unlock(&adev->pm.mutex); 3546 } 3547 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 3548 } else { 3549 mutex_lock(&adev->pm.mutex); 3550 amdgpu_dpm_get_active_displays(adev); 3551 amdgpu_dpm_change_power_state_locked(adev); 3552 mutex_unlock(&adev->pm.mutex); 3553 } 3554 } 3555} 3556 3557/* 3558 * Debugfs info 3559 */ 3560#if defined(CONFIG_DEBUG_FS) 3561 3562static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3563{ 3564 uint32_t value; 3565 uint64_t value64; 3566 uint32_t query = 0; 3567 int size; 3568 3569 /* GPU Clocks */ 3570 size = sizeof(value); 3571 seq_printf(m, "GFX Clocks and Power:\n"); 3572 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3573 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3574 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3575 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3576 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3577 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3578 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3579 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3580 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3581 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3582 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3583 seq_printf(m, "\t%u mV (VDDNB)\n", value); 3584 size = sizeof(uint32_t); 3585 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 3586 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3587 size = sizeof(value); 3588 seq_printf(m, "\n"); 3589 3590 /* GPU Temp */ 3591 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3592 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3593 3594 /* GPU Load */ 3595 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3596 seq_printf(m, "GPU Load: %u %%\n", value); 3597 /* MEM Load */ 3598 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3599 seq_printf(m, "MEM Load: %u %%\n", value); 3600 3601 seq_printf(m, "\n"); 3602 3603 /* SMC feature mask */ 3604 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3605 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3606 3607 if (adev->asic_type > CHIP_VEGA20) { 3608 /* VCN clocks */ 3609 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3610 if (!value) { 3611 seq_printf(m, "VCN: Disabled\n"); 3612 } else { 3613 seq_printf(m, "VCN: Enabled\n"); 3614 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3615 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3616 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3617 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3618 } 3619 } 3620 seq_printf(m, "\n"); 3621 } else { 3622 /* UVD clocks */ 3623 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3624 if (!value) { 3625 seq_printf(m, "UVD: Disabled\n"); 3626 } else { 3627 seq_printf(m, "UVD: Enabled\n"); 3628 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3629 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3630 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3631 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3632 } 3633 } 3634 seq_printf(m, "\n"); 3635 3636 /* VCE clocks */ 3637 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3638 if (!value) { 3639 seq_printf(m, "VCE: Disabled\n"); 3640 } else { 3641 seq_printf(m, "VCE: Enabled\n"); 3642 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3643 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3644 } 3645 } 3646 } 3647 3648 return 0; 3649} 3650 3651static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 3652{ 3653 int i; 3654 3655 for (i = 0; clocks[i].flag; i++) 3656 seq_printf(m, "\t%s: %s\n", clocks[i].name, 3657 (flags & clocks[i].flag) ? "On" : "Off"); 3658} 3659 3660static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 3661{ 3662 struct drm_info_node *node = (struct drm_info_node *) m->private; 3663 struct drm_device *dev = node->minor->dev; 3664 struct amdgpu_device *adev = dev->dev_private; 3665 u32 flags = 0; 3666 int r; 3667 3668 if (adev->in_gpu_reset) 3669 return -EPERM; 3670 3671 r = pm_runtime_get_sync(dev->dev); 3672 if (r < 0) 3673 return r; 3674 3675 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3676 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 3677 amdgpu_parse_cg_state(m, flags); 3678 seq_printf(m, "\n"); 3679 3680 if (!adev->pm.dpm_enabled) { 3681 seq_printf(m, "dpm not enabled\n"); 3682 pm_runtime_mark_last_busy(dev->dev); 3683 pm_runtime_put_autosuspend(dev->dev); 3684 return 0; 3685 } 3686 3687 if (!is_support_sw_smu(adev) && 3688 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3689 mutex_lock(&adev->pm.mutex); 3690 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3691 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3692 else 3693 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3694 mutex_unlock(&adev->pm.mutex); 3695 r = 0; 3696 } else { 3697 r = amdgpu_debugfs_pm_info_pp(m, adev); 3698 } 3699 3700 pm_runtime_mark_last_busy(dev->dev); 3701 pm_runtime_put_autosuspend(dev->dev); 3702 3703 return r; 3704} 3705 3706static const struct drm_info_list amdgpu_pm_info_list[] = { 3707 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 3708}; 3709#endif 3710 3711int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3712{ 3713#if defined(CONFIG_DEBUG_FS) 3714 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 3715#else 3716 return 0; 3717#endif 3718}