Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.7 3676 lines 107 kB view raw
1/* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 26#include <drm/drm_debugfs.h> 27 28#include "amdgpu.h" 29#include "amdgpu_drv.h" 30#include "amdgpu_pm.h" 31#include "amdgpu_dpm.h" 32#include "amdgpu_display.h" 33#include "amdgpu_smu.h" 34#include "atom.h" 35#include <linux/power_supply.h> 36#include <linux/pci.h> 37#include <linux/hwmon.h> 38#include <linux/hwmon-sysfs.h> 39#include <linux/nospec.h> 40#include <linux/pm_runtime.h> 41#include "hwmgr.h" 42#define WIDTH_4K 3840 43 44static const struct cg_flag_name clocks[] = { 45 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 46 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 47 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 48 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 49 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 50 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 51 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 52 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 53 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 54 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 55 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 56 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 57 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 58 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 59 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 61 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 62 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 64 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 65 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 66 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 67 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 68 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 69 70 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 71 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 72 {0, NULL}, 73}; 74 75static const struct hwmon_temp_label { 76 enum PP_HWMON_TEMP channel; 77 const char *label; 78} temp_label[] = { 79 {PP_TEMP_EDGE, "edge"}, 80 {PP_TEMP_JUNCTION, "junction"}, 81 {PP_TEMP_MEM, "mem"}, 82}; 83 84void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 85{ 86 if (adev->pm.dpm_enabled) { 87 mutex_lock(&adev->pm.mutex); 88 if (power_supply_is_system_supplied() > 0) 89 adev->pm.ac_power = true; 90 else 91 adev->pm.ac_power = false; 92 if (adev->powerplay.pp_funcs && 93 adev->powerplay.pp_funcs->enable_bapm) 94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 95 mutex_unlock(&adev->pm.mutex); 96 97 if (is_support_sw_smu(adev)) 98 smu_set_ac_dc(&adev->smu); 99 } 100} 101 102int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 103 void *data, uint32_t *size) 104{ 105 int ret = 0; 106 107 if (!data || !size) 108 return -EINVAL; 109 110 if (is_support_sw_smu(adev)) 111 ret = smu_read_sensor(&adev->smu, sensor, data, size); 112 else { 113 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 114 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, 115 sensor, data, size); 116 else 117 ret = -EINVAL; 118 } 119 120 return ret; 121} 122 123/** 124 * DOC: power_dpm_state 125 * 126 * The power_dpm_state file is a legacy interface and is only provided for 127 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 128 * certain power related parameters. The file power_dpm_state is used for this. 129 * It accepts the following arguments: 130 * 131 * - battery 132 * 133 * - balanced 134 * 135 * - performance 136 * 137 * battery 138 * 139 * On older GPUs, the vbios provided a special power state for battery 140 * operation. Selecting battery switched to this state. This is no 141 * longer provided on newer GPUs so the option does nothing in that case. 142 * 143 * balanced 144 * 145 * On older GPUs, the vbios provided a special power state for balanced 146 * operation. Selecting balanced switched to this state. This is no 147 * longer provided on newer GPUs so the option does nothing in that case. 148 * 149 * performance 150 * 151 * On older GPUs, the vbios provided a special power state for performance 152 * operation. Selecting performance switched to this state. This is no 153 * longer provided on newer GPUs so the option does nothing in that case. 154 * 155 */ 156 157static ssize_t amdgpu_get_dpm_state(struct device *dev, 158 struct device_attribute *attr, 159 char *buf) 160{ 161 struct drm_device *ddev = dev_get_drvdata(dev); 162 struct amdgpu_device *adev = ddev->dev_private; 163 enum amd_pm_state_type pm; 164 int ret; 165 166 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 167 return 0; 168 169 ret = pm_runtime_get_sync(ddev->dev); 170 if (ret < 0) 171 return ret; 172 173 if (is_support_sw_smu(adev)) { 174 if (adev->smu.ppt_funcs->get_current_power_state) 175 pm = smu_get_current_power_state(&adev->smu); 176 else 177 pm = adev->pm.dpm.user_state; 178 } else if (adev->powerplay.pp_funcs->get_current_power_state) { 179 pm = amdgpu_dpm_get_current_power_state(adev); 180 } else { 181 pm = adev->pm.dpm.user_state; 182 } 183 184 pm_runtime_mark_last_busy(ddev->dev); 185 pm_runtime_put_autosuspend(ddev->dev); 186 187 return snprintf(buf, PAGE_SIZE, "%s\n", 188 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 189 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 190} 191 192static ssize_t amdgpu_set_dpm_state(struct device *dev, 193 struct device_attribute *attr, 194 const char *buf, 195 size_t count) 196{ 197 struct drm_device *ddev = dev_get_drvdata(dev); 198 struct amdgpu_device *adev = ddev->dev_private; 199 enum amd_pm_state_type state; 200 int ret; 201 202 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 203 return -EINVAL; 204 205 if (strncmp("battery", buf, strlen("battery")) == 0) 206 state = POWER_STATE_TYPE_BATTERY; 207 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 208 state = POWER_STATE_TYPE_BALANCED; 209 else if (strncmp("performance", buf, strlen("performance")) == 0) 210 state = POWER_STATE_TYPE_PERFORMANCE; 211 else 212 return -EINVAL; 213 214 ret = pm_runtime_get_sync(ddev->dev); 215 if (ret < 0) 216 return ret; 217 218 if (is_support_sw_smu(adev)) { 219 mutex_lock(&adev->pm.mutex); 220 adev->pm.dpm.user_state = state; 221 mutex_unlock(&adev->pm.mutex); 222 } else if (adev->powerplay.pp_funcs->dispatch_tasks) { 223 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 224 } else { 225 mutex_lock(&adev->pm.mutex); 226 adev->pm.dpm.user_state = state; 227 mutex_unlock(&adev->pm.mutex); 228 229 amdgpu_pm_compute_clocks(adev); 230 } 231 pm_runtime_mark_last_busy(ddev->dev); 232 pm_runtime_put_autosuspend(ddev->dev); 233 234 return count; 235} 236 237 238/** 239 * DOC: power_dpm_force_performance_level 240 * 241 * The amdgpu driver provides a sysfs API for adjusting certain power 242 * related parameters. The file power_dpm_force_performance_level is 243 * used for this. It accepts the following arguments: 244 * 245 * - auto 246 * 247 * - low 248 * 249 * - high 250 * 251 * - manual 252 * 253 * - profile_standard 254 * 255 * - profile_min_sclk 256 * 257 * - profile_min_mclk 258 * 259 * - profile_peak 260 * 261 * auto 262 * 263 * When auto is selected, the driver will attempt to dynamically select 264 * the optimal power profile for current conditions in the driver. 265 * 266 * low 267 * 268 * When low is selected, the clocks are forced to the lowest power state. 269 * 270 * high 271 * 272 * When high is selected, the clocks are forced to the highest power state. 273 * 274 * manual 275 * 276 * When manual is selected, the user can manually adjust which power states 277 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 278 * and pp_dpm_pcie files and adjust the power state transition heuristics 279 * via the pp_power_profile_mode sysfs file. 280 * 281 * profile_standard 282 * profile_min_sclk 283 * profile_min_mclk 284 * profile_peak 285 * 286 * When the profiling modes are selected, clock and power gating are 287 * disabled and the clocks are set for different profiling cases. This 288 * mode is recommended for profiling specific work loads where you do 289 * not want clock or power gating for clock fluctuation to interfere 290 * with your results. profile_standard sets the clocks to a fixed clock 291 * level which varies from asic to asic. profile_min_sclk forces the sclk 292 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 293 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 294 * 295 */ 296 297static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 298 struct device_attribute *attr, 299 char *buf) 300{ 301 struct drm_device *ddev = dev_get_drvdata(dev); 302 struct amdgpu_device *adev = ddev->dev_private; 303 enum amd_dpm_forced_level level = 0xff; 304 int ret; 305 306 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 307 return 0; 308 309 ret = pm_runtime_get_sync(ddev->dev); 310 if (ret < 0) 311 return ret; 312 313 if (is_support_sw_smu(adev)) 314 level = smu_get_performance_level(&adev->smu); 315 else if (adev->powerplay.pp_funcs->get_performance_level) 316 level = amdgpu_dpm_get_performance_level(adev); 317 else 318 level = adev->pm.dpm.forced_level; 319 320 pm_runtime_mark_last_busy(ddev->dev); 321 pm_runtime_put_autosuspend(ddev->dev); 322 323 return snprintf(buf, PAGE_SIZE, "%s\n", 324 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 325 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 326 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 327 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 328 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 329 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 330 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 331 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 332 "unknown"); 333} 334 335static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 336 struct device_attribute *attr, 337 const char *buf, 338 size_t count) 339{ 340 struct drm_device *ddev = dev_get_drvdata(dev); 341 struct amdgpu_device *adev = ddev->dev_private; 342 enum amd_dpm_forced_level level; 343 enum amd_dpm_forced_level current_level = 0xff; 344 int ret = 0; 345 346 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 347 return -EINVAL; 348 349 if (strncmp("low", buf, strlen("low")) == 0) { 350 level = AMD_DPM_FORCED_LEVEL_LOW; 351 } else if (strncmp("high", buf, strlen("high")) == 0) { 352 level = AMD_DPM_FORCED_LEVEL_HIGH; 353 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 354 level = AMD_DPM_FORCED_LEVEL_AUTO; 355 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 356 level = AMD_DPM_FORCED_LEVEL_MANUAL; 357 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 358 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 359 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 360 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 361 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 362 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 363 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 364 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 365 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 366 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 367 } else { 368 return -EINVAL; 369 } 370 371 ret = pm_runtime_get_sync(ddev->dev); 372 if (ret < 0) 373 return ret; 374 375 if (is_support_sw_smu(adev)) 376 current_level = smu_get_performance_level(&adev->smu); 377 else if (adev->powerplay.pp_funcs->get_performance_level) 378 current_level = amdgpu_dpm_get_performance_level(adev); 379 380 if (current_level == level) { 381 pm_runtime_mark_last_busy(ddev->dev); 382 pm_runtime_put_autosuspend(ddev->dev); 383 return count; 384 } 385 386 /* profile_exit setting is valid only when current mode is in profile mode */ 387 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 388 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 389 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 390 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 391 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 392 pr_err("Currently not in any profile mode!\n"); 393 pm_runtime_mark_last_busy(ddev->dev); 394 pm_runtime_put_autosuspend(ddev->dev); 395 return -EINVAL; 396 } 397 398 if (is_support_sw_smu(adev)) { 399 ret = smu_force_performance_level(&adev->smu, level); 400 if (ret) { 401 pm_runtime_mark_last_busy(ddev->dev); 402 pm_runtime_put_autosuspend(ddev->dev); 403 return -EINVAL; 404 } 405 } else if (adev->powerplay.pp_funcs->force_performance_level) { 406 mutex_lock(&adev->pm.mutex); 407 if (adev->pm.dpm.thermal_active) { 408 mutex_unlock(&adev->pm.mutex); 409 pm_runtime_mark_last_busy(ddev->dev); 410 pm_runtime_put_autosuspend(ddev->dev); 411 return -EINVAL; 412 } 413 ret = amdgpu_dpm_force_performance_level(adev, level); 414 if (ret) { 415 mutex_unlock(&adev->pm.mutex); 416 pm_runtime_mark_last_busy(ddev->dev); 417 pm_runtime_put_autosuspend(ddev->dev); 418 return -EINVAL; 419 } else { 420 adev->pm.dpm.forced_level = level; 421 } 422 mutex_unlock(&adev->pm.mutex); 423 } 424 pm_runtime_mark_last_busy(ddev->dev); 425 pm_runtime_put_autosuspend(ddev->dev); 426 427 return count; 428} 429 430static ssize_t amdgpu_get_pp_num_states(struct device *dev, 431 struct device_attribute *attr, 432 char *buf) 433{ 434 struct drm_device *ddev = dev_get_drvdata(dev); 435 struct amdgpu_device *adev = ddev->dev_private; 436 struct pp_states_info data; 437 int i, buf_len, ret; 438 439 ret = pm_runtime_get_sync(ddev->dev); 440 if (ret < 0) 441 return ret; 442 443 if (is_support_sw_smu(adev)) { 444 ret = smu_get_power_num_states(&adev->smu, &data); 445 if (ret) 446 return ret; 447 } else if (adev->powerplay.pp_funcs->get_pp_num_states) 448 amdgpu_dpm_get_pp_num_states(adev, &data); 449 450 pm_runtime_mark_last_busy(ddev->dev); 451 pm_runtime_put_autosuspend(ddev->dev); 452 453 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 454 for (i = 0; i < data.nums; i++) 455 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 456 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 457 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 458 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 459 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 460 461 return buf_len; 462} 463 464static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 465 struct device_attribute *attr, 466 char *buf) 467{ 468 struct drm_device *ddev = dev_get_drvdata(dev); 469 struct amdgpu_device *adev = ddev->dev_private; 470 struct pp_states_info data; 471 struct smu_context *smu = &adev->smu; 472 enum amd_pm_state_type pm = 0; 473 int i = 0, ret = 0; 474 475 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 476 return 0; 477 478 ret = pm_runtime_get_sync(ddev->dev); 479 if (ret < 0) 480 return ret; 481 482 if (is_support_sw_smu(adev)) { 483 pm = smu_get_current_power_state(smu); 484 ret = smu_get_power_num_states(smu, &data); 485 if (ret) 486 return ret; 487 } else if (adev->powerplay.pp_funcs->get_current_power_state 488 && adev->powerplay.pp_funcs->get_pp_num_states) { 489 pm = amdgpu_dpm_get_current_power_state(adev); 490 amdgpu_dpm_get_pp_num_states(adev, &data); 491 } 492 493 pm_runtime_mark_last_busy(ddev->dev); 494 pm_runtime_put_autosuspend(ddev->dev); 495 496 for (i = 0; i < data.nums; i++) { 497 if (pm == data.states[i]) 498 break; 499 } 500 501 if (i == data.nums) 502 i = -EINVAL; 503 504 return snprintf(buf, PAGE_SIZE, "%d\n", i); 505} 506 507static ssize_t amdgpu_get_pp_force_state(struct device *dev, 508 struct device_attribute *attr, 509 char *buf) 510{ 511 struct drm_device *ddev = dev_get_drvdata(dev); 512 struct amdgpu_device *adev = ddev->dev_private; 513 514 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 515 return 0; 516 517 if (adev->pp_force_state_enabled) 518 return amdgpu_get_pp_cur_state(dev, attr, buf); 519 else 520 return snprintf(buf, PAGE_SIZE, "\n"); 521} 522 523static ssize_t amdgpu_set_pp_force_state(struct device *dev, 524 struct device_attribute *attr, 525 const char *buf, 526 size_t count) 527{ 528 struct drm_device *ddev = dev_get_drvdata(dev); 529 struct amdgpu_device *adev = ddev->dev_private; 530 enum amd_pm_state_type state = 0; 531 unsigned long idx; 532 int ret; 533 534 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 535 return -EINVAL; 536 537 if (strlen(buf) == 1) 538 adev->pp_force_state_enabled = false; 539 else if (is_support_sw_smu(adev)) 540 adev->pp_force_state_enabled = false; 541 else if (adev->powerplay.pp_funcs->dispatch_tasks && 542 adev->powerplay.pp_funcs->get_pp_num_states) { 543 struct pp_states_info data; 544 545 ret = kstrtoul(buf, 0, &idx); 546 if (ret || idx >= ARRAY_SIZE(data.states)) 547 return -EINVAL; 548 549 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 550 551 amdgpu_dpm_get_pp_num_states(adev, &data); 552 state = data.states[idx]; 553 554 ret = pm_runtime_get_sync(ddev->dev); 555 if (ret < 0) 556 return ret; 557 558 /* only set user selected power states */ 559 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 560 state != POWER_STATE_TYPE_DEFAULT) { 561 amdgpu_dpm_dispatch_task(adev, 562 AMD_PP_TASK_ENABLE_USER_STATE, &state); 563 adev->pp_force_state_enabled = true; 564 } 565 pm_runtime_mark_last_busy(ddev->dev); 566 pm_runtime_put_autosuspend(ddev->dev); 567 } 568 569 return count; 570} 571 572/** 573 * DOC: pp_table 574 * 575 * The amdgpu driver provides a sysfs API for uploading new powerplay 576 * tables. The file pp_table is used for this. Reading the file 577 * will dump the current power play table. Writing to the file 578 * will attempt to upload a new powerplay table and re-initialize 579 * powerplay using that new table. 580 * 581 */ 582 583static ssize_t amdgpu_get_pp_table(struct device *dev, 584 struct device_attribute *attr, 585 char *buf) 586{ 587 struct drm_device *ddev = dev_get_drvdata(dev); 588 struct amdgpu_device *adev = ddev->dev_private; 589 char *table = NULL; 590 int size, ret; 591 592 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 593 return 0; 594 595 ret = pm_runtime_get_sync(ddev->dev); 596 if (ret < 0) 597 return ret; 598 599 if (is_support_sw_smu(adev)) { 600 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 601 pm_runtime_mark_last_busy(ddev->dev); 602 pm_runtime_put_autosuspend(ddev->dev); 603 if (size < 0) 604 return size; 605 } else if (adev->powerplay.pp_funcs->get_pp_table) { 606 size = amdgpu_dpm_get_pp_table(adev, &table); 607 pm_runtime_mark_last_busy(ddev->dev); 608 pm_runtime_put_autosuspend(ddev->dev); 609 if (size < 0) 610 return size; 611 } else { 612 pm_runtime_mark_last_busy(ddev->dev); 613 pm_runtime_put_autosuspend(ddev->dev); 614 return 0; 615 } 616 617 if (size >= PAGE_SIZE) 618 size = PAGE_SIZE - 1; 619 620 memcpy(buf, table, size); 621 622 return size; 623} 624 625static ssize_t amdgpu_set_pp_table(struct device *dev, 626 struct device_attribute *attr, 627 const char *buf, 628 size_t count) 629{ 630 struct drm_device *ddev = dev_get_drvdata(dev); 631 struct amdgpu_device *adev = ddev->dev_private; 632 int ret = 0; 633 634 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 635 return -EINVAL; 636 637 ret = pm_runtime_get_sync(ddev->dev); 638 if (ret < 0) 639 return ret; 640 641 if (is_support_sw_smu(adev)) { 642 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); 643 if (ret) { 644 pm_runtime_mark_last_busy(ddev->dev); 645 pm_runtime_put_autosuspend(ddev->dev); 646 return ret; 647 } 648 } else if (adev->powerplay.pp_funcs->set_pp_table) 649 amdgpu_dpm_set_pp_table(adev, buf, count); 650 651 pm_runtime_mark_last_busy(ddev->dev); 652 pm_runtime_put_autosuspend(ddev->dev); 653 654 return count; 655} 656 657/** 658 * DOC: pp_od_clk_voltage 659 * 660 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 661 * in each power level within a power state. The pp_od_clk_voltage is used for 662 * this. 663 * 664 * < For Vega10 and previous ASICs > 665 * 666 * Reading the file will display: 667 * 668 * - a list of engine clock levels and voltages labeled OD_SCLK 669 * 670 * - a list of memory clock levels and voltages labeled OD_MCLK 671 * 672 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 673 * 674 * To manually adjust these settings, first select manual using 675 * power_dpm_force_performance_level. Enter a new value for each 676 * level by writing a string that contains "s/m level clock voltage" to 677 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 678 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 679 * 810 mV. When you have edited all of the states as needed, write 680 * "c" (commit) to the file to commit your changes. If you want to reset to the 681 * default power levels, write "r" (reset) to the file to reset them. 682 * 683 * 684 * < For Vega20 > 685 * 686 * Reading the file will display: 687 * 688 * - minimum and maximum engine clock labeled OD_SCLK 689 * 690 * - maximum memory clock labeled OD_MCLK 691 * 692 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 693 * They can be used to calibrate the sclk voltage curve. 694 * 695 * - a list of valid ranges for sclk, mclk, and voltage curve points 696 * labeled OD_RANGE 697 * 698 * To manually adjust these settings: 699 * 700 * - First select manual using power_dpm_force_performance_level 701 * 702 * - For clock frequency setting, enter a new value by writing a 703 * string that contains "s/m index clock" to the file. The index 704 * should be 0 if to set minimum clock. And 1 if to set maximum 705 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 706 * "m 1 800" will update maximum mclk to be 800Mhz. 707 * 708 * For sclk voltage curve, enter the new values by writing a 709 * string that contains "vc point clock voltage" to the file. The 710 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will 711 * update point1 with clock set as 300Mhz and voltage as 712 * 600mV. "vc 2 1000 1000" will update point3 with clock set 713 * as 1000Mhz and voltage 1000mV. 714 * 715 * - When you have edited all of the states as needed, write "c" (commit) 716 * to the file to commit your changes 717 * 718 * - If you want to reset to the default power levels, write "r" (reset) 719 * to the file to reset them 720 * 721 */ 722 723static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 724 struct device_attribute *attr, 725 const char *buf, 726 size_t count) 727{ 728 struct drm_device *ddev = dev_get_drvdata(dev); 729 struct amdgpu_device *adev = ddev->dev_private; 730 int ret; 731 uint32_t parameter_size = 0; 732 long parameter[64]; 733 char buf_cpy[128]; 734 char *tmp_str; 735 char *sub_str; 736 const char delimiter[3] = {' ', '\n', '\0'}; 737 uint32_t type; 738 739 if (amdgpu_sriov_vf(adev)) 740 return -EINVAL; 741 742 if (count > 127) 743 return -EINVAL; 744 745 if (*buf == 's') 746 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 747 else if (*buf == 'm') 748 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 749 else if(*buf == 'r') 750 type = PP_OD_RESTORE_DEFAULT_TABLE; 751 else if (*buf == 'c') 752 type = PP_OD_COMMIT_DPM_TABLE; 753 else if (!strncmp(buf, "vc", 2)) 754 type = PP_OD_EDIT_VDDC_CURVE; 755 else 756 return -EINVAL; 757 758 memcpy(buf_cpy, buf, count+1); 759 760 tmp_str = buf_cpy; 761 762 if (type == PP_OD_EDIT_VDDC_CURVE) 763 tmp_str++; 764 while (isspace(*++tmp_str)); 765 766 while (tmp_str[0]) { 767 sub_str = strsep(&tmp_str, delimiter); 768 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 769 if (ret) 770 return -EINVAL; 771 parameter_size++; 772 773 while (isspace(*tmp_str)) 774 tmp_str++; 775 } 776 777 ret = pm_runtime_get_sync(ddev->dev); 778 if (ret < 0) 779 return ret; 780 781 if (is_support_sw_smu(adev)) { 782 ret = smu_od_edit_dpm_table(&adev->smu, type, 783 parameter, parameter_size); 784 785 if (ret) { 786 pm_runtime_mark_last_busy(ddev->dev); 787 pm_runtime_put_autosuspend(ddev->dev); 788 return -EINVAL; 789 } 790 } else { 791 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 792 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 793 parameter, parameter_size); 794 if (ret) { 795 pm_runtime_mark_last_busy(ddev->dev); 796 pm_runtime_put_autosuspend(ddev->dev); 797 return -EINVAL; 798 } 799 } 800 801 if (type == PP_OD_COMMIT_DPM_TABLE) { 802 if (adev->powerplay.pp_funcs->dispatch_tasks) { 803 amdgpu_dpm_dispatch_task(adev, 804 AMD_PP_TASK_READJUST_POWER_STATE, 805 NULL); 806 pm_runtime_mark_last_busy(ddev->dev); 807 pm_runtime_put_autosuspend(ddev->dev); 808 return count; 809 } else { 810 pm_runtime_mark_last_busy(ddev->dev); 811 pm_runtime_put_autosuspend(ddev->dev); 812 return -EINVAL; 813 } 814 } 815 } 816 pm_runtime_mark_last_busy(ddev->dev); 817 pm_runtime_put_autosuspend(ddev->dev); 818 819 return count; 820} 821 822static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 823 struct device_attribute *attr, 824 char *buf) 825{ 826 struct drm_device *ddev = dev_get_drvdata(dev); 827 struct amdgpu_device *adev = ddev->dev_private; 828 ssize_t size; 829 int ret; 830 831 if (amdgpu_sriov_vf(adev)) 832 return 0; 833 834 ret = pm_runtime_get_sync(ddev->dev); 835 if (ret < 0) 836 return ret; 837 838 if (is_support_sw_smu(adev)) { 839 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 840 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); 841 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); 842 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); 843 } else if (adev->powerplay.pp_funcs->print_clock_levels) { 844 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 845 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 846 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 847 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 848 } else { 849 size = snprintf(buf, PAGE_SIZE, "\n"); 850 } 851 pm_runtime_mark_last_busy(ddev->dev); 852 pm_runtime_put_autosuspend(ddev->dev); 853 854 return size; 855} 856 857/** 858 * DOC: pp_features 859 * 860 * The amdgpu driver provides a sysfs API for adjusting what powerplay 861 * features to be enabled. The file pp_features is used for this. And 862 * this is only available for Vega10 and later dGPUs. 863 * 864 * Reading back the file will show you the followings: 865 * - Current ppfeature masks 866 * - List of the all supported powerplay features with their naming, 867 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 868 * 869 * To manually enable or disable a specific feature, just set or clear 870 * the corresponding bit from original ppfeature masks and input the 871 * new ppfeature masks. 872 */ 873static ssize_t amdgpu_set_pp_feature_status(struct device *dev, 874 struct device_attribute *attr, 875 const char *buf, 876 size_t count) 877{ 878 struct drm_device *ddev = dev_get_drvdata(dev); 879 struct amdgpu_device *adev = ddev->dev_private; 880 uint64_t featuremask; 881 int ret; 882 883 if (amdgpu_sriov_vf(adev)) 884 return -EINVAL; 885 886 ret = kstrtou64(buf, 0, &featuremask); 887 if (ret) 888 return -EINVAL; 889 890 pr_debug("featuremask = 0x%llx\n", featuremask); 891 892 ret = pm_runtime_get_sync(ddev->dev); 893 if (ret < 0) 894 return ret; 895 896 if (is_support_sw_smu(adev)) { 897 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 898 if (ret) { 899 pm_runtime_mark_last_busy(ddev->dev); 900 pm_runtime_put_autosuspend(ddev->dev); 901 return -EINVAL; 902 } 903 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 904 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 905 if (ret) { 906 pm_runtime_mark_last_busy(ddev->dev); 907 pm_runtime_put_autosuspend(ddev->dev); 908 return -EINVAL; 909 } 910 } 911 pm_runtime_mark_last_busy(ddev->dev); 912 pm_runtime_put_autosuspend(ddev->dev); 913 914 return count; 915} 916 917static ssize_t amdgpu_get_pp_feature_status(struct device *dev, 918 struct device_attribute *attr, 919 char *buf) 920{ 921 struct drm_device *ddev = dev_get_drvdata(dev); 922 struct amdgpu_device *adev = ddev->dev_private; 923 ssize_t size; 924 int ret; 925 926 if (amdgpu_sriov_vf(adev)) 927 return 0; 928 929 ret = pm_runtime_get_sync(ddev->dev); 930 if (ret < 0) 931 return ret; 932 933 if (is_support_sw_smu(adev)) 934 size = smu_sys_get_pp_feature_mask(&adev->smu, buf); 935 else if (adev->powerplay.pp_funcs->get_ppfeature_status) 936 size = amdgpu_dpm_get_ppfeature_status(adev, buf); 937 else 938 size = snprintf(buf, PAGE_SIZE, "\n"); 939 940 pm_runtime_mark_last_busy(ddev->dev); 941 pm_runtime_put_autosuspend(ddev->dev); 942 943 return size; 944} 945 946/** 947 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 948 * 949 * The amdgpu driver provides a sysfs API for adjusting what power levels 950 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 951 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 952 * this. 953 * 954 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 955 * Vega10 and later ASICs. 956 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 957 * 958 * Reading back the files will show you the available power levels within 959 * the power state and the clock information for those levels. 960 * 961 * To manually adjust these states, first select manual using 962 * power_dpm_force_performance_level. 963 * Secondly, enter a new value for each level by inputing a string that 964 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 965 * E.g., 966 * 967 * .. code-block:: bash 968 * 969 * echo "4 5 6" > pp_dpm_sclk 970 * 971 * will enable sclk levels 4, 5, and 6. 972 * 973 * NOTE: change to the dcefclk max dpm level is not supported now 974 */ 975 976static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 977 struct device_attribute *attr, 978 char *buf) 979{ 980 struct drm_device *ddev = dev_get_drvdata(dev); 981 struct amdgpu_device *adev = ddev->dev_private; 982 ssize_t size; 983 int ret; 984 985 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 986 return 0; 987 988 ret = pm_runtime_get_sync(ddev->dev); 989 if (ret < 0) 990 return ret; 991 992 if (is_support_sw_smu(adev)) 993 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 994 else if (adev->powerplay.pp_funcs->print_clock_levels) 995 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 996 else 997 size = snprintf(buf, PAGE_SIZE, "\n"); 998 999 pm_runtime_mark_last_busy(ddev->dev); 1000 pm_runtime_put_autosuspend(ddev->dev); 1001 1002 return size; 1003} 1004 1005/* 1006 * Worst case: 32 bits individually specified, in octal at 12 characters 1007 * per line (+1 for \n). 1008 */ 1009#define AMDGPU_MASK_BUF_MAX (32 * 13) 1010 1011static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1012{ 1013 int ret; 1014 long level; 1015 char *sub_str = NULL; 1016 char *tmp; 1017 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1018 const char delimiter[3] = {' ', '\n', '\0'}; 1019 size_t bytes; 1020 1021 *mask = 0; 1022 1023 bytes = min(count, sizeof(buf_cpy) - 1); 1024 memcpy(buf_cpy, buf, bytes); 1025 buf_cpy[bytes] = '\0'; 1026 tmp = buf_cpy; 1027 while (tmp[0]) { 1028 sub_str = strsep(&tmp, delimiter); 1029 if (strlen(sub_str)) { 1030 ret = kstrtol(sub_str, 0, &level); 1031 if (ret) 1032 return -EINVAL; 1033 *mask |= 1 << level; 1034 } else 1035 break; 1036 } 1037 1038 return 0; 1039} 1040 1041static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 1042 struct device_attribute *attr, 1043 const char *buf, 1044 size_t count) 1045{ 1046 struct drm_device *ddev = dev_get_drvdata(dev); 1047 struct amdgpu_device *adev = ddev->dev_private; 1048 int ret; 1049 uint32_t mask = 0; 1050 1051 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1052 return -EINVAL; 1053 1054 ret = amdgpu_read_mask(buf, count, &mask); 1055 if (ret) 1056 return ret; 1057 1058 ret = pm_runtime_get_sync(ddev->dev); 1059 if (ret < 0) 1060 return ret; 1061 1062 if (is_support_sw_smu(adev)) 1063 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); 1064 else if (adev->powerplay.pp_funcs->force_clock_level) 1065 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 1066 1067 pm_runtime_mark_last_busy(ddev->dev); 1068 pm_runtime_put_autosuspend(ddev->dev); 1069 1070 if (ret) 1071 return -EINVAL; 1072 1073 return count; 1074} 1075 1076static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1077 struct device_attribute *attr, 1078 char *buf) 1079{ 1080 struct drm_device *ddev = dev_get_drvdata(dev); 1081 struct amdgpu_device *adev = ddev->dev_private; 1082 ssize_t size; 1083 int ret; 1084 1085 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1086 return 0; 1087 1088 ret = pm_runtime_get_sync(ddev->dev); 1089 if (ret < 0) 1090 return ret; 1091 1092 if (is_support_sw_smu(adev)) 1093 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 1094 else if (adev->powerplay.pp_funcs->print_clock_levels) 1095 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 1096 else 1097 size = snprintf(buf, PAGE_SIZE, "\n"); 1098 1099 pm_runtime_mark_last_busy(ddev->dev); 1100 pm_runtime_put_autosuspend(ddev->dev); 1101 1102 return size; 1103} 1104 1105static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1106 struct device_attribute *attr, 1107 const char *buf, 1108 size_t count) 1109{ 1110 struct drm_device *ddev = dev_get_drvdata(dev); 1111 struct amdgpu_device *adev = ddev->dev_private; 1112 uint32_t mask = 0; 1113 int ret; 1114 1115 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1116 return -EINVAL; 1117 1118 ret = amdgpu_read_mask(buf, count, &mask); 1119 if (ret) 1120 return ret; 1121 1122 ret = pm_runtime_get_sync(ddev->dev); 1123 if (ret < 0) 1124 return ret; 1125 1126 if (is_support_sw_smu(adev)) 1127 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); 1128 else if (adev->powerplay.pp_funcs->force_clock_level) 1129 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 1130 1131 pm_runtime_mark_last_busy(ddev->dev); 1132 pm_runtime_put_autosuspend(ddev->dev); 1133 1134 if (ret) 1135 return -EINVAL; 1136 1137 return count; 1138} 1139 1140static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1141 struct device_attribute *attr, 1142 char *buf) 1143{ 1144 struct drm_device *ddev = dev_get_drvdata(dev); 1145 struct amdgpu_device *adev = ddev->dev_private; 1146 ssize_t size; 1147 int ret; 1148 1149 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1150 return 0; 1151 1152 ret = pm_runtime_get_sync(ddev->dev); 1153 if (ret < 0) 1154 return ret; 1155 1156 if (is_support_sw_smu(adev)) 1157 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 1158 else if (adev->powerplay.pp_funcs->print_clock_levels) 1159 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 1160 else 1161 size = snprintf(buf, PAGE_SIZE, "\n"); 1162 1163 pm_runtime_mark_last_busy(ddev->dev); 1164 pm_runtime_put_autosuspend(ddev->dev); 1165 1166 return size; 1167} 1168 1169static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1170 struct device_attribute *attr, 1171 const char *buf, 1172 size_t count) 1173{ 1174 struct drm_device *ddev = dev_get_drvdata(dev); 1175 struct amdgpu_device *adev = ddev->dev_private; 1176 int ret; 1177 uint32_t mask = 0; 1178 1179 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1180 return -EINVAL; 1181 1182 ret = amdgpu_read_mask(buf, count, &mask); 1183 if (ret) 1184 return ret; 1185 1186 ret = pm_runtime_get_sync(ddev->dev); 1187 if (ret < 0) 1188 return ret; 1189 1190 if (is_support_sw_smu(adev)) 1191 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); 1192 else if (adev->powerplay.pp_funcs->force_clock_level) 1193 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); 1194 else 1195 ret = 0; 1196 1197 pm_runtime_mark_last_busy(ddev->dev); 1198 pm_runtime_put_autosuspend(ddev->dev); 1199 1200 if (ret) 1201 return -EINVAL; 1202 1203 return count; 1204} 1205 1206static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1207 struct device_attribute *attr, 1208 char *buf) 1209{ 1210 struct drm_device *ddev = dev_get_drvdata(dev); 1211 struct amdgpu_device *adev = ddev->dev_private; 1212 ssize_t size; 1213 int ret; 1214 1215 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1216 return 0; 1217 1218 ret = pm_runtime_get_sync(ddev->dev); 1219 if (ret < 0) 1220 return ret; 1221 1222 if (is_support_sw_smu(adev)) 1223 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1224 else if (adev->powerplay.pp_funcs->print_clock_levels) 1225 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1226 else 1227 size = snprintf(buf, PAGE_SIZE, "\n"); 1228 1229 pm_runtime_mark_last_busy(ddev->dev); 1230 pm_runtime_put_autosuspend(ddev->dev); 1231 1232 return size; 1233} 1234 1235static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1236 struct device_attribute *attr, 1237 const char *buf, 1238 size_t count) 1239{ 1240 struct drm_device *ddev = dev_get_drvdata(dev); 1241 struct amdgpu_device *adev = ddev->dev_private; 1242 int ret; 1243 uint32_t mask = 0; 1244 1245 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1246 return -EINVAL; 1247 1248 ret = amdgpu_read_mask(buf, count, &mask); 1249 if (ret) 1250 return ret; 1251 1252 ret = pm_runtime_get_sync(ddev->dev); 1253 if (ret < 0) 1254 return ret; 1255 1256 if (is_support_sw_smu(adev)) 1257 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); 1258 else if (adev->powerplay.pp_funcs->force_clock_level) 1259 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); 1260 else 1261 ret = 0; 1262 1263 pm_runtime_mark_last_busy(ddev->dev); 1264 pm_runtime_put_autosuspend(ddev->dev); 1265 1266 if (ret) 1267 return -EINVAL; 1268 1269 return count; 1270} 1271 1272static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1273 struct device_attribute *attr, 1274 char *buf) 1275{ 1276 struct drm_device *ddev = dev_get_drvdata(dev); 1277 struct amdgpu_device *adev = ddev->dev_private; 1278 ssize_t size; 1279 int ret; 1280 1281 if (amdgpu_sriov_vf(adev)) 1282 return 0; 1283 1284 ret = pm_runtime_get_sync(ddev->dev); 1285 if (ret < 0) 1286 return ret; 1287 1288 if (is_support_sw_smu(adev)) 1289 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1290 else if (adev->powerplay.pp_funcs->print_clock_levels) 1291 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1292 else 1293 size = snprintf(buf, PAGE_SIZE, "\n"); 1294 1295 pm_runtime_mark_last_busy(ddev->dev); 1296 pm_runtime_put_autosuspend(ddev->dev); 1297 1298 return size; 1299} 1300 1301static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1302 struct device_attribute *attr, 1303 const char *buf, 1304 size_t count) 1305{ 1306 struct drm_device *ddev = dev_get_drvdata(dev); 1307 struct amdgpu_device *adev = ddev->dev_private; 1308 int ret; 1309 uint32_t mask = 0; 1310 1311 if (amdgpu_sriov_vf(adev)) 1312 return -EINVAL; 1313 1314 ret = amdgpu_read_mask(buf, count, &mask); 1315 if (ret) 1316 return ret; 1317 1318 ret = pm_runtime_get_sync(ddev->dev); 1319 if (ret < 0) 1320 return ret; 1321 1322 if (is_support_sw_smu(adev)) 1323 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); 1324 else if (adev->powerplay.pp_funcs->force_clock_level) 1325 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); 1326 else 1327 ret = 0; 1328 1329 pm_runtime_mark_last_busy(ddev->dev); 1330 pm_runtime_put_autosuspend(ddev->dev); 1331 1332 if (ret) 1333 return -EINVAL; 1334 1335 return count; 1336} 1337 1338static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1339 struct device_attribute *attr, 1340 char *buf) 1341{ 1342 struct drm_device *ddev = dev_get_drvdata(dev); 1343 struct amdgpu_device *adev = ddev->dev_private; 1344 ssize_t size; 1345 int ret; 1346 1347 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1348 return 0; 1349 1350 ret = pm_runtime_get_sync(ddev->dev); 1351 if (ret < 0) 1352 return ret; 1353 1354 if (is_support_sw_smu(adev)) 1355 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1356 else if (adev->powerplay.pp_funcs->print_clock_levels) 1357 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1358 else 1359 size = snprintf(buf, PAGE_SIZE, "\n"); 1360 1361 pm_runtime_mark_last_busy(ddev->dev); 1362 pm_runtime_put_autosuspend(ddev->dev); 1363 1364 return size; 1365} 1366 1367static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1368 struct device_attribute *attr, 1369 const char *buf, 1370 size_t count) 1371{ 1372 struct drm_device *ddev = dev_get_drvdata(dev); 1373 struct amdgpu_device *adev = ddev->dev_private; 1374 int ret; 1375 uint32_t mask = 0; 1376 1377 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1378 return -EINVAL; 1379 1380 ret = amdgpu_read_mask(buf, count, &mask); 1381 if (ret) 1382 return ret; 1383 1384 ret = pm_runtime_get_sync(ddev->dev); 1385 if (ret < 0) 1386 return ret; 1387 1388 if (is_support_sw_smu(adev)) 1389 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); 1390 else if (adev->powerplay.pp_funcs->force_clock_level) 1391 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 1392 else 1393 ret = 0; 1394 1395 pm_runtime_mark_last_busy(ddev->dev); 1396 pm_runtime_put_autosuspend(ddev->dev); 1397 1398 if (ret) 1399 return -EINVAL; 1400 1401 return count; 1402} 1403 1404static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1405 struct device_attribute *attr, 1406 char *buf) 1407{ 1408 struct drm_device *ddev = dev_get_drvdata(dev); 1409 struct amdgpu_device *adev = ddev->dev_private; 1410 uint32_t value = 0; 1411 int ret; 1412 1413 if (amdgpu_sriov_vf(adev)) 1414 return 0; 1415 1416 ret = pm_runtime_get_sync(ddev->dev); 1417 if (ret < 0) 1418 return ret; 1419 1420 if (is_support_sw_smu(adev)) 1421 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); 1422 else if (adev->powerplay.pp_funcs->get_sclk_od) 1423 value = amdgpu_dpm_get_sclk_od(adev); 1424 1425 pm_runtime_mark_last_busy(ddev->dev); 1426 pm_runtime_put_autosuspend(ddev->dev); 1427 1428 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1429} 1430 1431static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1432 struct device_attribute *attr, 1433 const char *buf, 1434 size_t count) 1435{ 1436 struct drm_device *ddev = dev_get_drvdata(dev); 1437 struct amdgpu_device *adev = ddev->dev_private; 1438 int ret; 1439 long int value; 1440 1441 if (amdgpu_sriov_vf(adev)) 1442 return -EINVAL; 1443 1444 ret = kstrtol(buf, 0, &value); 1445 1446 if (ret) 1447 return -EINVAL; 1448 1449 ret = pm_runtime_get_sync(ddev->dev); 1450 if (ret < 0) 1451 return ret; 1452 1453 if (is_support_sw_smu(adev)) { 1454 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); 1455 } else { 1456 if (adev->powerplay.pp_funcs->set_sclk_od) 1457 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1458 1459 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1460 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1461 } else { 1462 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1463 amdgpu_pm_compute_clocks(adev); 1464 } 1465 } 1466 1467 pm_runtime_mark_last_busy(ddev->dev); 1468 pm_runtime_put_autosuspend(ddev->dev); 1469 1470 return count; 1471} 1472 1473static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1474 struct device_attribute *attr, 1475 char *buf) 1476{ 1477 struct drm_device *ddev = dev_get_drvdata(dev); 1478 struct amdgpu_device *adev = ddev->dev_private; 1479 uint32_t value = 0; 1480 int ret; 1481 1482 if (amdgpu_sriov_vf(adev)) 1483 return 0; 1484 1485 ret = pm_runtime_get_sync(ddev->dev); 1486 if (ret < 0) 1487 return ret; 1488 1489 if (is_support_sw_smu(adev)) 1490 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); 1491 else if (adev->powerplay.pp_funcs->get_mclk_od) 1492 value = amdgpu_dpm_get_mclk_od(adev); 1493 1494 pm_runtime_mark_last_busy(ddev->dev); 1495 pm_runtime_put_autosuspend(ddev->dev); 1496 1497 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1498} 1499 1500static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1501 struct device_attribute *attr, 1502 const char *buf, 1503 size_t count) 1504{ 1505 struct drm_device *ddev = dev_get_drvdata(dev); 1506 struct amdgpu_device *adev = ddev->dev_private; 1507 int ret; 1508 long int value; 1509 1510 if (amdgpu_sriov_vf(adev)) 1511 return 0; 1512 1513 ret = kstrtol(buf, 0, &value); 1514 1515 if (ret) 1516 return -EINVAL; 1517 1518 ret = pm_runtime_get_sync(ddev->dev); 1519 if (ret < 0) 1520 return ret; 1521 1522 if (is_support_sw_smu(adev)) { 1523 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); 1524 } else { 1525 if (adev->powerplay.pp_funcs->set_mclk_od) 1526 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1527 1528 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1529 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1530 } else { 1531 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1532 amdgpu_pm_compute_clocks(adev); 1533 } 1534 } 1535 1536 pm_runtime_mark_last_busy(ddev->dev); 1537 pm_runtime_put_autosuspend(ddev->dev); 1538 1539 return count; 1540} 1541 1542/** 1543 * DOC: pp_power_profile_mode 1544 * 1545 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1546 * related to switching between power levels in a power state. The file 1547 * pp_power_profile_mode is used for this. 1548 * 1549 * Reading this file outputs a list of all of the predefined power profiles 1550 * and the relevant heuristics settings for that profile. 1551 * 1552 * To select a profile or create a custom profile, first select manual using 1553 * power_dpm_force_performance_level. Writing the number of a predefined 1554 * profile to pp_power_profile_mode will enable those heuristics. To 1555 * create a custom set of heuristics, write a string of numbers to the file 1556 * starting with the number of the custom profile along with a setting 1557 * for each heuristic parameter. Due to differences across asic families 1558 * the heuristic parameters vary from family to family. 1559 * 1560 */ 1561 1562static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1563 struct device_attribute *attr, 1564 char *buf) 1565{ 1566 struct drm_device *ddev = dev_get_drvdata(dev); 1567 struct amdgpu_device *adev = ddev->dev_private; 1568 ssize_t size; 1569 int ret; 1570 1571 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1572 return 0; 1573 1574 ret = pm_runtime_get_sync(ddev->dev); 1575 if (ret < 0) 1576 return ret; 1577 1578 if (is_support_sw_smu(adev)) 1579 size = smu_get_power_profile_mode(&adev->smu, buf); 1580 else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1581 size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1582 else 1583 size = snprintf(buf, PAGE_SIZE, "\n"); 1584 1585 pm_runtime_mark_last_busy(ddev->dev); 1586 pm_runtime_put_autosuspend(ddev->dev); 1587 1588 return size; 1589} 1590 1591 1592static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1593 struct device_attribute *attr, 1594 const char *buf, 1595 size_t count) 1596{ 1597 int ret = 0xff; 1598 struct drm_device *ddev = dev_get_drvdata(dev); 1599 struct amdgpu_device *adev = ddev->dev_private; 1600 uint32_t parameter_size = 0; 1601 long parameter[64]; 1602 char *sub_str, buf_cpy[128]; 1603 char *tmp_str; 1604 uint32_t i = 0; 1605 char tmp[2]; 1606 long int profile_mode = 0; 1607 const char delimiter[3] = {' ', '\n', '\0'}; 1608 1609 tmp[0] = *(buf); 1610 tmp[1] = '\0'; 1611 ret = kstrtol(tmp, 0, &profile_mode); 1612 if (ret) 1613 return -EINVAL; 1614 1615 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1616 return -EINVAL; 1617 1618 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1619 if (count < 2 || count > 127) 1620 return -EINVAL; 1621 while (isspace(*++buf)) 1622 i++; 1623 memcpy(buf_cpy, buf, count-i); 1624 tmp_str = buf_cpy; 1625 while (tmp_str[0]) { 1626 sub_str = strsep(&tmp_str, delimiter); 1627 ret = kstrtol(sub_str, 0, &parameter[parameter_size]); 1628 if (ret) 1629 return -EINVAL; 1630 parameter_size++; 1631 while (isspace(*tmp_str)) 1632 tmp_str++; 1633 } 1634 } 1635 parameter[parameter_size] = profile_mode; 1636 1637 ret = pm_runtime_get_sync(ddev->dev); 1638 if (ret < 0) 1639 return ret; 1640 1641 if (is_support_sw_smu(adev)) 1642 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); 1643 else if (adev->powerplay.pp_funcs->set_power_profile_mode) 1644 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1645 1646 pm_runtime_mark_last_busy(ddev->dev); 1647 pm_runtime_put_autosuspend(ddev->dev); 1648 1649 if (!ret) 1650 return count; 1651 1652 return -EINVAL; 1653} 1654 1655/** 1656 * DOC: busy_percent 1657 * 1658 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1659 * is as a percentage. The file gpu_busy_percent is used for this. 1660 * The SMU firmware computes a percentage of load based on the 1661 * aggregate activity level in the IP cores. 1662 */ 1663static ssize_t amdgpu_get_busy_percent(struct device *dev, 1664 struct device_attribute *attr, 1665 char *buf) 1666{ 1667 struct drm_device *ddev = dev_get_drvdata(dev); 1668 struct amdgpu_device *adev = ddev->dev_private; 1669 int r, value, size = sizeof(value); 1670 1671 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1672 return 0; 1673 1674 r = pm_runtime_get_sync(ddev->dev); 1675 if (r < 0) 1676 return r; 1677 1678 /* read the IP busy sensor */ 1679 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1680 (void *)&value, &size); 1681 1682 pm_runtime_mark_last_busy(ddev->dev); 1683 pm_runtime_put_autosuspend(ddev->dev); 1684 1685 if (r) 1686 return r; 1687 1688 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1689} 1690 1691/** 1692 * DOC: mem_busy_percent 1693 * 1694 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1695 * is as a percentage. The file mem_busy_percent is used for this. 1696 * The SMU firmware computes a percentage of load based on the 1697 * aggregate activity level in the IP cores. 1698 */ 1699static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, 1700 struct device_attribute *attr, 1701 char *buf) 1702{ 1703 struct drm_device *ddev = dev_get_drvdata(dev); 1704 struct amdgpu_device *adev = ddev->dev_private; 1705 int r, value, size = sizeof(value); 1706 1707 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1708 return 0; 1709 1710 r = pm_runtime_get_sync(ddev->dev); 1711 if (r < 0) 1712 return r; 1713 1714 /* read the IP busy sensor */ 1715 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1716 (void *)&value, &size); 1717 1718 pm_runtime_mark_last_busy(ddev->dev); 1719 pm_runtime_put_autosuspend(ddev->dev); 1720 1721 if (r) 1722 return r; 1723 1724 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1725} 1726 1727/** 1728 * DOC: pcie_bw 1729 * 1730 * The amdgpu driver provides a sysfs API for estimating how much data 1731 * has been received and sent by the GPU in the last second through PCIe. 1732 * The file pcie_bw is used for this. 1733 * The Perf counters count the number of received and sent messages and return 1734 * those values, as well as the maximum payload size of a PCIe packet (mps). 1735 * Note that it is not possible to easily and quickly obtain the size of each 1736 * packet transmitted, so we output the max payload size (mps) to allow for 1737 * quick estimation of the PCIe bandwidth usage 1738 */ 1739static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1740 struct device_attribute *attr, 1741 char *buf) 1742{ 1743 struct drm_device *ddev = dev_get_drvdata(dev); 1744 struct amdgpu_device *adev = ddev->dev_private; 1745 uint64_t count0, count1; 1746 int ret; 1747 1748 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1749 return 0; 1750 1751 ret = pm_runtime_get_sync(ddev->dev); 1752 if (ret < 0) 1753 return ret; 1754 1755 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1756 1757 pm_runtime_mark_last_busy(ddev->dev); 1758 pm_runtime_put_autosuspend(ddev->dev); 1759 1760 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1761 count0, count1, pcie_get_mps(adev->pdev)); 1762} 1763 1764/** 1765 * DOC: unique_id 1766 * 1767 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1768 * The file unique_id is used for this. 1769 * This will provide a Unique ID that will persist from machine to machine 1770 * 1771 * NOTE: This will only work for GFX9 and newer. This file will be absent 1772 * on unsupported ASICs (GFX8 and older) 1773 */ 1774static ssize_t amdgpu_get_unique_id(struct device *dev, 1775 struct device_attribute *attr, 1776 char *buf) 1777{ 1778 struct drm_device *ddev = dev_get_drvdata(dev); 1779 struct amdgpu_device *adev = ddev->dev_private; 1780 1781 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1782 return 0; 1783 1784 if (adev->unique_id) 1785 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); 1786 1787 return 0; 1788} 1789 1790static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 1791static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 1792 amdgpu_get_dpm_forced_performance_level, 1793 amdgpu_set_dpm_forced_performance_level); 1794static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 1795static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 1796static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 1797 amdgpu_get_pp_force_state, 1798 amdgpu_set_pp_force_state); 1799static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 1800 amdgpu_get_pp_table, 1801 amdgpu_set_pp_table); 1802static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 1803 amdgpu_get_pp_dpm_sclk, 1804 amdgpu_set_pp_dpm_sclk); 1805static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 1806 amdgpu_get_pp_dpm_mclk, 1807 amdgpu_set_pp_dpm_mclk); 1808static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, 1809 amdgpu_get_pp_dpm_socclk, 1810 amdgpu_set_pp_dpm_socclk); 1811static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, 1812 amdgpu_get_pp_dpm_fclk, 1813 amdgpu_set_pp_dpm_fclk); 1814static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, 1815 amdgpu_get_pp_dpm_dcefclk, 1816 amdgpu_set_pp_dpm_dcefclk); 1817static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 1818 amdgpu_get_pp_dpm_pcie, 1819 amdgpu_set_pp_dpm_pcie); 1820static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 1821 amdgpu_get_pp_sclk_od, 1822 amdgpu_set_pp_sclk_od); 1823static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 1824 amdgpu_get_pp_mclk_od, 1825 amdgpu_set_pp_mclk_od); 1826static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, 1827 amdgpu_get_pp_power_profile_mode, 1828 amdgpu_set_pp_power_profile_mode); 1829static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, 1830 amdgpu_get_pp_od_clk_voltage, 1831 amdgpu_set_pp_od_clk_voltage); 1832static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, 1833 amdgpu_get_busy_percent, NULL); 1834static DEVICE_ATTR(mem_busy_percent, S_IRUGO, 1835 amdgpu_get_memory_busy_percent, NULL); 1836static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); 1837static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, 1838 amdgpu_get_pp_feature_status, 1839 amdgpu_set_pp_feature_status); 1840static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); 1841 1842static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1843 struct device_attribute *attr, 1844 char *buf) 1845{ 1846 struct amdgpu_device *adev = dev_get_drvdata(dev); 1847 int channel = to_sensor_dev_attr(attr)->index; 1848 int r, temp = 0, size = sizeof(temp); 1849 1850 if (channel >= PP_TEMP_MAX) 1851 return -EINVAL; 1852 1853 r = pm_runtime_get_sync(adev->ddev->dev); 1854 if (r < 0) 1855 return r; 1856 1857 switch (channel) { 1858 case PP_TEMP_JUNCTION: 1859 /* get current junction temperature */ 1860 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1861 (void *)&temp, &size); 1862 break; 1863 case PP_TEMP_EDGE: 1864 /* get current edge temperature */ 1865 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 1866 (void *)&temp, &size); 1867 break; 1868 case PP_TEMP_MEM: 1869 /* get current memory temperature */ 1870 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 1871 (void *)&temp, &size); 1872 break; 1873 default: 1874 r = -EINVAL; 1875 break; 1876 } 1877 1878 pm_runtime_mark_last_busy(adev->ddev->dev); 1879 pm_runtime_put_autosuspend(adev->ddev->dev); 1880 1881 if (r) 1882 return r; 1883 1884 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1885} 1886 1887static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 1888 struct device_attribute *attr, 1889 char *buf) 1890{ 1891 struct amdgpu_device *adev = dev_get_drvdata(dev); 1892 int hyst = to_sensor_dev_attr(attr)->index; 1893 int temp; 1894 1895 if (hyst) 1896 temp = adev->pm.dpm.thermal.min_temp; 1897 else 1898 temp = adev->pm.dpm.thermal.max_temp; 1899 1900 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1901} 1902 1903static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 1904 struct device_attribute *attr, 1905 char *buf) 1906{ 1907 struct amdgpu_device *adev = dev_get_drvdata(dev); 1908 int hyst = to_sensor_dev_attr(attr)->index; 1909 int temp; 1910 1911 if (hyst) 1912 temp = adev->pm.dpm.thermal.min_hotspot_temp; 1913 else 1914 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 1915 1916 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1917} 1918 1919static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 1920 struct device_attribute *attr, 1921 char *buf) 1922{ 1923 struct amdgpu_device *adev = dev_get_drvdata(dev); 1924 int hyst = to_sensor_dev_attr(attr)->index; 1925 int temp; 1926 1927 if (hyst) 1928 temp = adev->pm.dpm.thermal.min_mem_temp; 1929 else 1930 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 1931 1932 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1933} 1934 1935static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 1936 struct device_attribute *attr, 1937 char *buf) 1938{ 1939 int channel = to_sensor_dev_attr(attr)->index; 1940 1941 if (channel >= PP_TEMP_MAX) 1942 return -EINVAL; 1943 1944 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); 1945} 1946 1947static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 1948 struct device_attribute *attr, 1949 char *buf) 1950{ 1951 struct amdgpu_device *adev = dev_get_drvdata(dev); 1952 int channel = to_sensor_dev_attr(attr)->index; 1953 int temp = 0; 1954 1955 if (channel >= PP_TEMP_MAX) 1956 return -EINVAL; 1957 1958 switch (channel) { 1959 case PP_TEMP_JUNCTION: 1960 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 1961 break; 1962 case PP_TEMP_EDGE: 1963 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 1964 break; 1965 case PP_TEMP_MEM: 1966 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 1967 break; 1968 } 1969 1970 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1971} 1972 1973static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 1974 struct device_attribute *attr, 1975 char *buf) 1976{ 1977 struct amdgpu_device *adev = dev_get_drvdata(dev); 1978 u32 pwm_mode = 0; 1979 int ret; 1980 1981 ret = pm_runtime_get_sync(adev->ddev->dev); 1982 if (ret < 0) 1983 return ret; 1984 1985 if (is_support_sw_smu(adev)) { 1986 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1987 } else { 1988 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 1989 pm_runtime_mark_last_busy(adev->ddev->dev); 1990 pm_runtime_put_autosuspend(adev->ddev->dev); 1991 return -EINVAL; 1992 } 1993 1994 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 1995 } 1996 1997 pm_runtime_mark_last_busy(adev->ddev->dev); 1998 pm_runtime_put_autosuspend(adev->ddev->dev); 1999 2000 return sprintf(buf, "%i\n", pwm_mode); 2001} 2002 2003static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2004 struct device_attribute *attr, 2005 const char *buf, 2006 size_t count) 2007{ 2008 struct amdgpu_device *adev = dev_get_drvdata(dev); 2009 int err, ret; 2010 int value; 2011 2012 err = kstrtoint(buf, 10, &value); 2013 if (err) 2014 return err; 2015 2016 ret = pm_runtime_get_sync(adev->ddev->dev); 2017 if (ret < 0) 2018 return ret; 2019 2020 if (is_support_sw_smu(adev)) { 2021 smu_set_fan_control_mode(&adev->smu, value); 2022 } else { 2023 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2024 pm_runtime_mark_last_busy(adev->ddev->dev); 2025 pm_runtime_put_autosuspend(adev->ddev->dev); 2026 return -EINVAL; 2027 } 2028 2029 amdgpu_dpm_set_fan_control_mode(adev, value); 2030 } 2031 2032 pm_runtime_mark_last_busy(adev->ddev->dev); 2033 pm_runtime_put_autosuspend(adev->ddev->dev); 2034 2035 return count; 2036} 2037 2038static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2039 struct device_attribute *attr, 2040 char *buf) 2041{ 2042 return sprintf(buf, "%i\n", 0); 2043} 2044 2045static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2046 struct device_attribute *attr, 2047 char *buf) 2048{ 2049 return sprintf(buf, "%i\n", 255); 2050} 2051 2052static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 2053 struct device_attribute *attr, 2054 const char *buf, size_t count) 2055{ 2056 struct amdgpu_device *adev = dev_get_drvdata(dev); 2057 int err; 2058 u32 value; 2059 u32 pwm_mode; 2060 2061 err = pm_runtime_get_sync(adev->ddev->dev); 2062 if (err < 0) 2063 return err; 2064 2065 if (is_support_sw_smu(adev)) 2066 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2067 else 2068 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2069 2070 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2071 pr_info("manual fan speed control should be enabled first\n"); 2072 pm_runtime_mark_last_busy(adev->ddev->dev); 2073 pm_runtime_put_autosuspend(adev->ddev->dev); 2074 return -EINVAL; 2075 } 2076 2077 err = kstrtou32(buf, 10, &value); 2078 if (err) { 2079 pm_runtime_mark_last_busy(adev->ddev->dev); 2080 pm_runtime_put_autosuspend(adev->ddev->dev); 2081 return err; 2082 } 2083 2084 value = (value * 100) / 255; 2085 2086 if (is_support_sw_smu(adev)) 2087 err = smu_set_fan_speed_percent(&adev->smu, value); 2088 else if (adev->powerplay.pp_funcs->set_fan_speed_percent) 2089 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 2090 else 2091 err = -EINVAL; 2092 2093 pm_runtime_mark_last_busy(adev->ddev->dev); 2094 pm_runtime_put_autosuspend(adev->ddev->dev); 2095 2096 if (err) 2097 return err; 2098 2099 return count; 2100} 2101 2102static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 2103 struct device_attribute *attr, 2104 char *buf) 2105{ 2106 struct amdgpu_device *adev = dev_get_drvdata(dev); 2107 int err; 2108 u32 speed = 0; 2109 2110 err = pm_runtime_get_sync(adev->ddev->dev); 2111 if (err < 0) 2112 return err; 2113 2114 if (is_support_sw_smu(adev)) 2115 err = smu_get_fan_speed_percent(&adev->smu, &speed); 2116 else if (adev->powerplay.pp_funcs->get_fan_speed_percent) 2117 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 2118 else 2119 err = -EINVAL; 2120 2121 pm_runtime_mark_last_busy(adev->ddev->dev); 2122 pm_runtime_put_autosuspend(adev->ddev->dev); 2123 2124 if (err) 2125 return err; 2126 2127 speed = (speed * 255) / 100; 2128 2129 return sprintf(buf, "%i\n", speed); 2130} 2131 2132static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 2133 struct device_attribute *attr, 2134 char *buf) 2135{ 2136 struct amdgpu_device *adev = dev_get_drvdata(dev); 2137 int err; 2138 u32 speed = 0; 2139 2140 err = pm_runtime_get_sync(adev->ddev->dev); 2141 if (err < 0) 2142 return err; 2143 2144 if (is_support_sw_smu(adev)) 2145 err = smu_get_fan_speed_rpm(&adev->smu, &speed); 2146 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2147 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2148 else 2149 err = -EINVAL; 2150 2151 pm_runtime_mark_last_busy(adev->ddev->dev); 2152 pm_runtime_put_autosuspend(adev->ddev->dev); 2153 2154 if (err) 2155 return err; 2156 2157 return sprintf(buf, "%i\n", speed); 2158} 2159 2160static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 2161 struct device_attribute *attr, 2162 char *buf) 2163{ 2164 struct amdgpu_device *adev = dev_get_drvdata(dev); 2165 u32 min_rpm = 0; 2166 u32 size = sizeof(min_rpm); 2167 int r; 2168 2169 r = pm_runtime_get_sync(adev->ddev->dev); 2170 if (r < 0) 2171 return r; 2172 2173 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2174 (void *)&min_rpm, &size); 2175 2176 pm_runtime_mark_last_busy(adev->ddev->dev); 2177 pm_runtime_put_autosuspend(adev->ddev->dev); 2178 2179 if (r) 2180 return r; 2181 2182 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); 2183} 2184 2185static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 2186 struct device_attribute *attr, 2187 char *buf) 2188{ 2189 struct amdgpu_device *adev = dev_get_drvdata(dev); 2190 u32 max_rpm = 0; 2191 u32 size = sizeof(max_rpm); 2192 int r; 2193 2194 r = pm_runtime_get_sync(adev->ddev->dev); 2195 if (r < 0) 2196 return r; 2197 2198 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2199 (void *)&max_rpm, &size); 2200 2201 pm_runtime_mark_last_busy(adev->ddev->dev); 2202 pm_runtime_put_autosuspend(adev->ddev->dev); 2203 2204 if (r) 2205 return r; 2206 2207 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); 2208} 2209 2210static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 2211 struct device_attribute *attr, 2212 char *buf) 2213{ 2214 struct amdgpu_device *adev = dev_get_drvdata(dev); 2215 int err; 2216 u32 rpm = 0; 2217 2218 err = pm_runtime_get_sync(adev->ddev->dev); 2219 if (err < 0) 2220 return err; 2221 2222 if (is_support_sw_smu(adev)) 2223 err = smu_get_fan_speed_rpm(&adev->smu, &rpm); 2224 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2225 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2226 else 2227 err = -EINVAL; 2228 2229 pm_runtime_mark_last_busy(adev->ddev->dev); 2230 pm_runtime_put_autosuspend(adev->ddev->dev); 2231 2232 if (err) 2233 return err; 2234 2235 return sprintf(buf, "%i\n", rpm); 2236} 2237 2238static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 2239 struct device_attribute *attr, 2240 const char *buf, size_t count) 2241{ 2242 struct amdgpu_device *adev = dev_get_drvdata(dev); 2243 int err; 2244 u32 value; 2245 u32 pwm_mode; 2246 2247 err = pm_runtime_get_sync(adev->ddev->dev); 2248 if (err < 0) 2249 return err; 2250 2251 if (is_support_sw_smu(adev)) 2252 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2253 else 2254 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2255 2256 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2257 pm_runtime_mark_last_busy(adev->ddev->dev); 2258 pm_runtime_put_autosuspend(adev->ddev->dev); 2259 return -ENODATA; 2260 } 2261 2262 err = kstrtou32(buf, 10, &value); 2263 if (err) { 2264 pm_runtime_mark_last_busy(adev->ddev->dev); 2265 pm_runtime_put_autosuspend(adev->ddev->dev); 2266 return err; 2267 } 2268 2269 if (is_support_sw_smu(adev)) 2270 err = smu_set_fan_speed_rpm(&adev->smu, value); 2271 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) 2272 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2273 else 2274 err = -EINVAL; 2275 2276 pm_runtime_mark_last_busy(adev->ddev->dev); 2277 pm_runtime_put_autosuspend(adev->ddev->dev); 2278 2279 if (err) 2280 return err; 2281 2282 return count; 2283} 2284 2285static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 2286 struct device_attribute *attr, 2287 char *buf) 2288{ 2289 struct amdgpu_device *adev = dev_get_drvdata(dev); 2290 u32 pwm_mode = 0; 2291 int ret; 2292 2293 ret = pm_runtime_get_sync(adev->ddev->dev); 2294 if (ret < 0) 2295 return ret; 2296 2297 if (is_support_sw_smu(adev)) { 2298 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2299 } else { 2300 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2301 pm_runtime_mark_last_busy(adev->ddev->dev); 2302 pm_runtime_put_autosuspend(adev->ddev->dev); 2303 return -EINVAL; 2304 } 2305 2306 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2307 } 2308 2309 pm_runtime_mark_last_busy(adev->ddev->dev); 2310 pm_runtime_put_autosuspend(adev->ddev->dev); 2311 2312 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2313} 2314 2315static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 2316 struct device_attribute *attr, 2317 const char *buf, 2318 size_t count) 2319{ 2320 struct amdgpu_device *adev = dev_get_drvdata(dev); 2321 int err; 2322 int value; 2323 u32 pwm_mode; 2324 2325 err = kstrtoint(buf, 10, &value); 2326 if (err) 2327 return err; 2328 2329 if (value == 0) 2330 pwm_mode = AMD_FAN_CTRL_AUTO; 2331 else if (value == 1) 2332 pwm_mode = AMD_FAN_CTRL_MANUAL; 2333 else 2334 return -EINVAL; 2335 2336 err = pm_runtime_get_sync(adev->ddev->dev); 2337 if (err < 0) 2338 return err; 2339 2340 if (is_support_sw_smu(adev)) { 2341 smu_set_fan_control_mode(&adev->smu, pwm_mode); 2342 } else { 2343 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2344 pm_runtime_mark_last_busy(adev->ddev->dev); 2345 pm_runtime_put_autosuspend(adev->ddev->dev); 2346 return -EINVAL; 2347 } 2348 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2349 } 2350 2351 pm_runtime_mark_last_busy(adev->ddev->dev); 2352 pm_runtime_put_autosuspend(adev->ddev->dev); 2353 2354 return count; 2355} 2356 2357static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 2358 struct device_attribute *attr, 2359 char *buf) 2360{ 2361 struct amdgpu_device *adev = dev_get_drvdata(dev); 2362 u32 vddgfx; 2363 int r, size = sizeof(vddgfx); 2364 2365 r = pm_runtime_get_sync(adev->ddev->dev); 2366 if (r < 0) 2367 return r; 2368 2369 /* get the voltage */ 2370 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 2371 (void *)&vddgfx, &size); 2372 2373 pm_runtime_mark_last_busy(adev->ddev->dev); 2374 pm_runtime_put_autosuspend(adev->ddev->dev); 2375 2376 if (r) 2377 return r; 2378 2379 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 2380} 2381 2382static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 2383 struct device_attribute *attr, 2384 char *buf) 2385{ 2386 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 2387} 2388 2389static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 2390 struct device_attribute *attr, 2391 char *buf) 2392{ 2393 struct amdgpu_device *adev = dev_get_drvdata(dev); 2394 u32 vddnb; 2395 int r, size = sizeof(vddnb); 2396 2397 /* only APUs have vddnb */ 2398 if (!(adev->flags & AMD_IS_APU)) 2399 return -EINVAL; 2400 2401 r = pm_runtime_get_sync(adev->ddev->dev); 2402 if (r < 0) 2403 return r; 2404 2405 /* get the voltage */ 2406 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 2407 (void *)&vddnb, &size); 2408 2409 pm_runtime_mark_last_busy(adev->ddev->dev); 2410 pm_runtime_put_autosuspend(adev->ddev->dev); 2411 2412 if (r) 2413 return r; 2414 2415 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 2416} 2417 2418static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 2419 struct device_attribute *attr, 2420 char *buf) 2421{ 2422 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 2423} 2424 2425static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 2426 struct device_attribute *attr, 2427 char *buf) 2428{ 2429 struct amdgpu_device *adev = dev_get_drvdata(dev); 2430 u32 query = 0; 2431 int r, size = sizeof(u32); 2432 unsigned uw; 2433 2434 r = pm_runtime_get_sync(adev->ddev->dev); 2435 if (r < 0) 2436 return r; 2437 2438 /* get the voltage */ 2439 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 2440 (void *)&query, &size); 2441 2442 pm_runtime_mark_last_busy(adev->ddev->dev); 2443 pm_runtime_put_autosuspend(adev->ddev->dev); 2444 2445 if (r) 2446 return r; 2447 2448 /* convert to microwatts */ 2449 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 2450 2451 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 2452} 2453 2454static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 2455 struct device_attribute *attr, 2456 char *buf) 2457{ 2458 return sprintf(buf, "%i\n", 0); 2459} 2460 2461static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 2462 struct device_attribute *attr, 2463 char *buf) 2464{ 2465 struct amdgpu_device *adev = dev_get_drvdata(dev); 2466 uint32_t limit = 0; 2467 ssize_t size; 2468 int r; 2469 2470 r = pm_runtime_get_sync(adev->ddev->dev); 2471 if (r < 0) 2472 return r; 2473 2474 if (is_support_sw_smu(adev)) { 2475 smu_get_power_limit(&adev->smu, &limit, true, true); 2476 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2477 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2478 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 2479 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2480 } else { 2481 size = snprintf(buf, PAGE_SIZE, "\n"); 2482 } 2483 2484 pm_runtime_mark_last_busy(adev->ddev->dev); 2485 pm_runtime_put_autosuspend(adev->ddev->dev); 2486 2487 return size; 2488} 2489 2490static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 2491 struct device_attribute *attr, 2492 char *buf) 2493{ 2494 struct amdgpu_device *adev = dev_get_drvdata(dev); 2495 uint32_t limit = 0; 2496 ssize_t size; 2497 int r; 2498 2499 r = pm_runtime_get_sync(adev->ddev->dev); 2500 if (r < 0) 2501 return r; 2502 2503 if (is_support_sw_smu(adev)) { 2504 smu_get_power_limit(&adev->smu, &limit, false, true); 2505 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2506 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2507 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 2508 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2509 } else { 2510 size = snprintf(buf, PAGE_SIZE, "\n"); 2511 } 2512 2513 pm_runtime_mark_last_busy(adev->ddev->dev); 2514 pm_runtime_put_autosuspend(adev->ddev->dev); 2515 2516 return size; 2517} 2518 2519 2520static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 2521 struct device_attribute *attr, 2522 const char *buf, 2523 size_t count) 2524{ 2525 struct amdgpu_device *adev = dev_get_drvdata(dev); 2526 int err; 2527 u32 value; 2528 2529 if (amdgpu_sriov_vf(adev)) 2530 return -EINVAL; 2531 2532 err = kstrtou32(buf, 10, &value); 2533 if (err) 2534 return err; 2535 2536 value = value / 1000000; /* convert to Watt */ 2537 2538 2539 err = pm_runtime_get_sync(adev->ddev->dev); 2540 if (err < 0) 2541 return err; 2542 2543 if (is_support_sw_smu(adev)) 2544 err = smu_set_power_limit(&adev->smu, value); 2545 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) 2546 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 2547 else 2548 err = -EINVAL; 2549 2550 pm_runtime_mark_last_busy(adev->ddev->dev); 2551 pm_runtime_put_autosuspend(adev->ddev->dev); 2552 2553 if (err) 2554 return err; 2555 2556 return count; 2557} 2558 2559static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 2560 struct device_attribute *attr, 2561 char *buf) 2562{ 2563 struct amdgpu_device *adev = dev_get_drvdata(dev); 2564 uint32_t sclk; 2565 int r, size = sizeof(sclk); 2566 2567 r = pm_runtime_get_sync(adev->ddev->dev); 2568 if (r < 0) 2569 return r; 2570 2571 /* get the sclk */ 2572 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2573 (void *)&sclk, &size); 2574 2575 pm_runtime_mark_last_busy(adev->ddev->dev); 2576 pm_runtime_put_autosuspend(adev->ddev->dev); 2577 2578 if (r) 2579 return r; 2580 2581 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); 2582} 2583 2584static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 2585 struct device_attribute *attr, 2586 char *buf) 2587{ 2588 return snprintf(buf, PAGE_SIZE, "sclk\n"); 2589} 2590 2591static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 2592 struct device_attribute *attr, 2593 char *buf) 2594{ 2595 struct amdgpu_device *adev = dev_get_drvdata(dev); 2596 uint32_t mclk; 2597 int r, size = sizeof(mclk); 2598 2599 r = pm_runtime_get_sync(adev->ddev->dev); 2600 if (r < 0) 2601 return r; 2602 2603 /* get the sclk */ 2604 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2605 (void *)&mclk, &size); 2606 2607 pm_runtime_mark_last_busy(adev->ddev->dev); 2608 pm_runtime_put_autosuspend(adev->ddev->dev); 2609 2610 if (r) 2611 return r; 2612 2613 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); 2614} 2615 2616static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 2617 struct device_attribute *attr, 2618 char *buf) 2619{ 2620 return snprintf(buf, PAGE_SIZE, "mclk\n"); 2621} 2622 2623/** 2624 * DOC: hwmon 2625 * 2626 * The amdgpu driver exposes the following sensor interfaces: 2627 * 2628 * - GPU temperature (via the on-die sensor) 2629 * 2630 * - GPU voltage 2631 * 2632 * - Northbridge voltage (APUs only) 2633 * 2634 * - GPU power 2635 * 2636 * - GPU fan 2637 * 2638 * - GPU gfx/compute engine clock 2639 * 2640 * - GPU memory clock (dGPU only) 2641 * 2642 * hwmon interfaces for GPU temperature: 2643 * 2644 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 2645 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 2646 * 2647 * - temp[1-3]_label: temperature channel label 2648 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 2649 * 2650 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 2651 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 2652 * 2653 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 2654 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 2655 * 2656 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 2657 * - these are supported on SOC15 dGPUs only 2658 * 2659 * hwmon interfaces for GPU voltage: 2660 * 2661 * - in0_input: the voltage on the GPU in millivolts 2662 * 2663 * - in1_input: the voltage on the Northbridge in millivolts 2664 * 2665 * hwmon interfaces for GPU power: 2666 * 2667 * - power1_average: average power used by the GPU in microWatts 2668 * 2669 * - power1_cap_min: minimum cap supported in microWatts 2670 * 2671 * - power1_cap_max: maximum cap supported in microWatts 2672 * 2673 * - power1_cap: selected power cap in microWatts 2674 * 2675 * hwmon interfaces for GPU fan: 2676 * 2677 * - pwm1: pulse width modulation fan level (0-255) 2678 * 2679 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 2680 * 2681 * - pwm1_min: pulse width modulation fan control minimum level (0) 2682 * 2683 * - pwm1_max: pulse width modulation fan control maximum level (255) 2684 * 2685 * - fan1_min: an minimum value Unit: revolution/min (RPM) 2686 * 2687 * - fan1_max: an maxmum value Unit: revolution/max (RPM) 2688 * 2689 * - fan1_input: fan speed in RPM 2690 * 2691 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 2692 * 2693 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 2694 * 2695 * hwmon interfaces for GPU clocks: 2696 * 2697 * - freq1_input: the gfx/compute clock in hertz 2698 * 2699 * - freq2_input: the memory clock in hertz 2700 * 2701 * You can use hwmon tools like sensors to view this information on your system. 2702 * 2703 */ 2704 2705static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 2706static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 2707static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 2708static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 2709static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 2710static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 2711static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 2712static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 2713static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 2714static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 2715static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 2716static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 2717static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 2718static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 2719static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 2720static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 2721static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 2722static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 2723static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 2724static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 2725static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 2726static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 2727static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 2728static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 2729static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 2730static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 2731static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 2732static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 2733static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 2734static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 2735static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 2736static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 2737static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 2738static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 2739static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 2740static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 2741 2742static struct attribute *hwmon_attributes[] = { 2743 &sensor_dev_attr_temp1_input.dev_attr.attr, 2744 &sensor_dev_attr_temp1_crit.dev_attr.attr, 2745 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 2746 &sensor_dev_attr_temp2_input.dev_attr.attr, 2747 &sensor_dev_attr_temp2_crit.dev_attr.attr, 2748 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 2749 &sensor_dev_attr_temp3_input.dev_attr.attr, 2750 &sensor_dev_attr_temp3_crit.dev_attr.attr, 2751 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 2752 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 2753 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 2754 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 2755 &sensor_dev_attr_temp1_label.dev_attr.attr, 2756 &sensor_dev_attr_temp2_label.dev_attr.attr, 2757 &sensor_dev_attr_temp3_label.dev_attr.attr, 2758 &sensor_dev_attr_pwm1.dev_attr.attr, 2759 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 2760 &sensor_dev_attr_pwm1_min.dev_attr.attr, 2761 &sensor_dev_attr_pwm1_max.dev_attr.attr, 2762 &sensor_dev_attr_fan1_input.dev_attr.attr, 2763 &sensor_dev_attr_fan1_min.dev_attr.attr, 2764 &sensor_dev_attr_fan1_max.dev_attr.attr, 2765 &sensor_dev_attr_fan1_target.dev_attr.attr, 2766 &sensor_dev_attr_fan1_enable.dev_attr.attr, 2767 &sensor_dev_attr_in0_input.dev_attr.attr, 2768 &sensor_dev_attr_in0_label.dev_attr.attr, 2769 &sensor_dev_attr_in1_input.dev_attr.attr, 2770 &sensor_dev_attr_in1_label.dev_attr.attr, 2771 &sensor_dev_attr_power1_average.dev_attr.attr, 2772 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 2773 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 2774 &sensor_dev_attr_power1_cap.dev_attr.attr, 2775 &sensor_dev_attr_freq1_input.dev_attr.attr, 2776 &sensor_dev_attr_freq1_label.dev_attr.attr, 2777 &sensor_dev_attr_freq2_input.dev_attr.attr, 2778 &sensor_dev_attr_freq2_label.dev_attr.attr, 2779 NULL 2780}; 2781 2782static umode_t hwmon_attributes_visible(struct kobject *kobj, 2783 struct attribute *attr, int index) 2784{ 2785 struct device *dev = kobj_to_dev(kobj); 2786 struct amdgpu_device *adev = dev_get_drvdata(dev); 2787 umode_t effective_mode = attr->mode; 2788 2789 /* under multi-vf mode, the hwmon attributes are all not supported */ 2790 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 2791 return 0; 2792 2793 /* there is no fan under pp one vf mode */ 2794 if (amdgpu_sriov_is_pp_one_vf(adev) && 2795 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2796 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2797 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2798 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2799 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2800 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2801 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2802 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2803 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2804 return 0; 2805 2806 /* Skip fan attributes if fan is not present */ 2807 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2808 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2809 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2810 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2811 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2812 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2813 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2814 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2815 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2816 return 0; 2817 2818 /* Skip fan attributes on APU */ 2819 if ((adev->flags & AMD_IS_APU) && 2820 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2821 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2822 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2823 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2824 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2825 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2826 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2827 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2828 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2829 return 0; 2830 2831 /* Skip limit attributes if DPM is not enabled */ 2832 if (!adev->pm.dpm_enabled && 2833 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 2834 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 2835 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2836 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2837 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2838 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2839 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2840 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2841 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2842 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2843 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2844 return 0; 2845 2846 if (!is_support_sw_smu(adev)) { 2847 /* mask fan attributes if we have no bindings for this asic to expose */ 2848 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 2849 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 2850 (!adev->powerplay.pp_funcs->get_fan_control_mode && 2851 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 2852 effective_mode &= ~S_IRUGO; 2853 2854 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 2855 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 2856 (!adev->powerplay.pp_funcs->set_fan_control_mode && 2857 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 2858 effective_mode &= ~S_IWUSR; 2859 } 2860 2861 if (((adev->flags & AMD_IS_APU) || 2862 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 2863 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 2864 (attr == &sensor_dev_attr_power1_average.dev_attr.attr || 2865 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 2866 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 2867 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 2868 return 0; 2869 2870 if (!is_support_sw_smu(adev)) { 2871 /* hide max/min values if we can't both query and manage the fan */ 2872 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 2873 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 2874 (!adev->powerplay.pp_funcs->set_fan_speed_rpm && 2875 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 2876 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2877 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 2878 return 0; 2879 2880 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && 2881 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 2882 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2883 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 2884 return 0; 2885 } 2886 2887 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 2888 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 2889 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 2890 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 2891 return 0; 2892 2893 /* only APUs have vddnb */ 2894 if (!(adev->flags & AMD_IS_APU) && 2895 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 2896 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 2897 return 0; 2898 2899 /* no mclk on APUs */ 2900 if ((adev->flags & AMD_IS_APU) && 2901 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 2902 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 2903 return 0; 2904 2905 /* only SOC15 dGPUs support hotspot and mem temperatures */ 2906 if (((adev->flags & AMD_IS_APU) || 2907 adev->asic_type < CHIP_VEGA10) && 2908 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 2909 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 2910 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || 2911 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 2912 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 2913 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 2914 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || 2915 attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 2916 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 2917 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 2918 attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) 2919 return 0; 2920 2921 return effective_mode; 2922} 2923 2924static const struct attribute_group hwmon_attrgroup = { 2925 .attrs = hwmon_attributes, 2926 .is_visible = hwmon_attributes_visible, 2927}; 2928 2929static const struct attribute_group *hwmon_groups[] = { 2930 &hwmon_attrgroup, 2931 NULL 2932}; 2933 2934void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 2935{ 2936 struct amdgpu_device *adev = 2937 container_of(work, struct amdgpu_device, 2938 pm.dpm.thermal.work); 2939 /* switch to the thermal state */ 2940 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 2941 int temp, size = sizeof(temp); 2942 2943 if (!adev->pm.dpm_enabled) 2944 return; 2945 2946 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 2947 (void *)&temp, &size)) { 2948 if (temp < adev->pm.dpm.thermal.min_temp) 2949 /* switch back the user state */ 2950 dpm_state = adev->pm.dpm.user_state; 2951 } else { 2952 if (adev->pm.dpm.thermal.high_to_low) 2953 /* switch back the user state */ 2954 dpm_state = adev->pm.dpm.user_state; 2955 } 2956 mutex_lock(&adev->pm.mutex); 2957 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 2958 adev->pm.dpm.thermal_active = true; 2959 else 2960 adev->pm.dpm.thermal_active = false; 2961 adev->pm.dpm.state = dpm_state; 2962 mutex_unlock(&adev->pm.mutex); 2963 2964 amdgpu_pm_compute_clocks(adev); 2965} 2966 2967static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 2968 enum amd_pm_state_type dpm_state) 2969{ 2970 int i; 2971 struct amdgpu_ps *ps; 2972 u32 ui_class; 2973 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 2974 true : false; 2975 2976 /* check if the vblank period is too short to adjust the mclk */ 2977 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 2978 if (amdgpu_dpm_vblank_too_short(adev)) 2979 single_display = false; 2980 } 2981 2982 /* certain older asics have a separare 3D performance state, 2983 * so try that first if the user selected performance 2984 */ 2985 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 2986 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 2987 /* balanced states don't exist at the moment */ 2988 if (dpm_state == POWER_STATE_TYPE_BALANCED) 2989 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 2990 2991restart_search: 2992 /* Pick the best power state based on current conditions */ 2993 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 2994 ps = &adev->pm.dpm.ps[i]; 2995 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 2996 switch (dpm_state) { 2997 /* user states */ 2998 case POWER_STATE_TYPE_BATTERY: 2999 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 3000 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3001 if (single_display) 3002 return ps; 3003 } else 3004 return ps; 3005 } 3006 break; 3007 case POWER_STATE_TYPE_BALANCED: 3008 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 3009 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3010 if (single_display) 3011 return ps; 3012 } else 3013 return ps; 3014 } 3015 break; 3016 case POWER_STATE_TYPE_PERFORMANCE: 3017 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 3018 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3019 if (single_display) 3020 return ps; 3021 } else 3022 return ps; 3023 } 3024 break; 3025 /* internal states */ 3026 case POWER_STATE_TYPE_INTERNAL_UVD: 3027 if (adev->pm.dpm.uvd_ps) 3028 return adev->pm.dpm.uvd_ps; 3029 else 3030 break; 3031 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 3032 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 3033 return ps; 3034 break; 3035 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 3036 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 3037 return ps; 3038 break; 3039 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 3040 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 3041 return ps; 3042 break; 3043 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 3044 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 3045 return ps; 3046 break; 3047 case POWER_STATE_TYPE_INTERNAL_BOOT: 3048 return adev->pm.dpm.boot_ps; 3049 case POWER_STATE_TYPE_INTERNAL_THERMAL: 3050 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 3051 return ps; 3052 break; 3053 case POWER_STATE_TYPE_INTERNAL_ACPI: 3054 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 3055 return ps; 3056 break; 3057 case POWER_STATE_TYPE_INTERNAL_ULV: 3058 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 3059 return ps; 3060 break; 3061 case POWER_STATE_TYPE_INTERNAL_3DPERF: 3062 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 3063 return ps; 3064 break; 3065 default: 3066 break; 3067 } 3068 } 3069 /* use a fallback state if we didn't match */ 3070 switch (dpm_state) { 3071 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 3072 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 3073 goto restart_search; 3074 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 3075 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 3076 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 3077 if (adev->pm.dpm.uvd_ps) { 3078 return adev->pm.dpm.uvd_ps; 3079 } else { 3080 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3081 goto restart_search; 3082 } 3083 case POWER_STATE_TYPE_INTERNAL_THERMAL: 3084 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 3085 goto restart_search; 3086 case POWER_STATE_TYPE_INTERNAL_ACPI: 3087 dpm_state = POWER_STATE_TYPE_BATTERY; 3088 goto restart_search; 3089 case POWER_STATE_TYPE_BATTERY: 3090 case POWER_STATE_TYPE_BALANCED: 3091 case POWER_STATE_TYPE_INTERNAL_3DPERF: 3092 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3093 goto restart_search; 3094 default: 3095 break; 3096 } 3097 3098 return NULL; 3099} 3100 3101static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 3102{ 3103 struct amdgpu_ps *ps; 3104 enum amd_pm_state_type dpm_state; 3105 int ret; 3106 bool equal = false; 3107 3108 /* if dpm init failed */ 3109 if (!adev->pm.dpm_enabled) 3110 return; 3111 3112 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 3113 /* add other state override checks here */ 3114 if ((!adev->pm.dpm.thermal_active) && 3115 (!adev->pm.dpm.uvd_active)) 3116 adev->pm.dpm.state = adev->pm.dpm.user_state; 3117 } 3118 dpm_state = adev->pm.dpm.state; 3119 3120 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 3121 if (ps) 3122 adev->pm.dpm.requested_ps = ps; 3123 else 3124 return; 3125 3126 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 3127 printk("switching from power state:\n"); 3128 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 3129 printk("switching to power state:\n"); 3130 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 3131 } 3132 3133 /* update whether vce is active */ 3134 ps->vce_active = adev->pm.dpm.vce_active; 3135 if (adev->powerplay.pp_funcs->display_configuration_changed) 3136 amdgpu_dpm_display_configuration_changed(adev); 3137 3138 ret = amdgpu_dpm_pre_set_power_state(adev); 3139 if (ret) 3140 return; 3141 3142 if (adev->powerplay.pp_funcs->check_state_equal) { 3143 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 3144 equal = false; 3145 } 3146 3147 if (equal) 3148 return; 3149 3150 amdgpu_dpm_set_power_state(adev); 3151 amdgpu_dpm_post_set_power_state(adev); 3152 3153 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 3154 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 3155 3156 if (adev->powerplay.pp_funcs->force_performance_level) { 3157 if (adev->pm.dpm.thermal_active) { 3158 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 3159 /* force low perf level for thermal */ 3160 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 3161 /* save the user's level */ 3162 adev->pm.dpm.forced_level = level; 3163 } else { 3164 /* otherwise, user selected level */ 3165 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 3166 } 3167 } 3168} 3169 3170void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 3171{ 3172 int ret = 0; 3173 3174 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 3175 if (ret) 3176 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 3177 enable ? "enable" : "disable", ret); 3178 3179 /* enable/disable Low Memory PState for UVD (4k videos) */ 3180 if (adev->asic_type == CHIP_STONEY && 3181 adev->uvd.decode_image_width >= WIDTH_4K) { 3182 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3183 3184 if (hwmgr && hwmgr->hwmgr_func && 3185 hwmgr->hwmgr_func->update_nbdpm_pstate) 3186 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 3187 !enable, 3188 true); 3189 } 3190} 3191 3192void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 3193{ 3194 int ret = 0; 3195 3196 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 3197 if (ret) 3198 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 3199 enable ? "enable" : "disable", ret); 3200} 3201 3202void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 3203{ 3204 int i; 3205 3206 if (adev->powerplay.pp_funcs->print_power_state == NULL) 3207 return; 3208 3209 for (i = 0; i < adev->pm.dpm.num_ps; i++) 3210 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 3211 3212} 3213 3214void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 3215{ 3216 int ret = 0; 3217 3218 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 3219 if (ret) 3220 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 3221 enable ? "enable" : "disable", ret); 3222} 3223 3224int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 3225{ 3226 int r; 3227 3228 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 3229 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 3230 if (r) { 3231 pr_err("smu firmware loading failed\n"); 3232 return r; 3233 } 3234 *smu_version = adev->pm.fw_version; 3235 } 3236 return 0; 3237} 3238 3239int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 3240{ 3241 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3242 int ret; 3243 3244 if (adev->pm.sysfs_initialized) 3245 return 0; 3246 3247 if (adev->pm.dpm_enabled == 0) 3248 return 0; 3249 3250 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 3251 DRIVER_NAME, adev, 3252 hwmon_groups); 3253 if (IS_ERR(adev->pm.int_hwmon_dev)) { 3254 ret = PTR_ERR(adev->pm.int_hwmon_dev); 3255 dev_err(adev->dev, 3256 "Unable to register hwmon device: %d\n", ret); 3257 return ret; 3258 } 3259 3260 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 3261 if (ret) { 3262 DRM_ERROR("failed to create device file for dpm state\n"); 3263 return ret; 3264 } 3265 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 3266 if (ret) { 3267 DRM_ERROR("failed to create device file for dpm state\n"); 3268 return ret; 3269 } 3270 3271 3272 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 3273 if (ret) { 3274 DRM_ERROR("failed to create device file pp_num_states\n"); 3275 return ret; 3276 } 3277 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 3278 if (ret) { 3279 DRM_ERROR("failed to create device file pp_cur_state\n"); 3280 return ret; 3281 } 3282 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 3283 if (ret) { 3284 DRM_ERROR("failed to create device file pp_force_state\n"); 3285 return ret; 3286 } 3287 ret = device_create_file(adev->dev, &dev_attr_pp_table); 3288 if (ret) { 3289 DRM_ERROR("failed to create device file pp_table\n"); 3290 return ret; 3291 } 3292 3293 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 3294 if (ret) { 3295 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 3296 return ret; 3297 } 3298 3299 /* Arcturus does not support standalone mclk/socclk/fclk level setting */ 3300 if (adev->asic_type == CHIP_ARCTURUS) { 3301 dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; 3302 dev_attr_pp_dpm_mclk.store = NULL; 3303 3304 dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; 3305 dev_attr_pp_dpm_socclk.store = NULL; 3306 3307 dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; 3308 dev_attr_pp_dpm_fclk.store = NULL; 3309 } 3310 3311 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 3312 if (ret) { 3313 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 3314 return ret; 3315 } 3316 if (adev->asic_type >= CHIP_VEGA10) { 3317 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); 3318 if (ret) { 3319 DRM_ERROR("failed to create device file pp_dpm_socclk\n"); 3320 return ret; 3321 } 3322 if (adev->asic_type != CHIP_ARCTURUS) { 3323 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); 3324 if (ret) { 3325 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); 3326 return ret; 3327 } 3328 } 3329 } 3330 if (adev->asic_type >= CHIP_VEGA20) { 3331 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); 3332 if (ret) { 3333 DRM_ERROR("failed to create device file pp_dpm_fclk\n"); 3334 return ret; 3335 } 3336 } 3337 if (adev->asic_type != CHIP_ARCTURUS) { 3338 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 3339 if (ret) { 3340 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 3341 return ret; 3342 } 3343 } 3344 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 3345 if (ret) { 3346 DRM_ERROR("failed to create device file pp_sclk_od\n"); 3347 return ret; 3348 } 3349 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 3350 if (ret) { 3351 DRM_ERROR("failed to create device file pp_mclk_od\n"); 3352 return ret; 3353 } 3354 ret = device_create_file(adev->dev, 3355 &dev_attr_pp_power_profile_mode); 3356 if (ret) { 3357 DRM_ERROR("failed to create device file " 3358 "pp_power_profile_mode\n"); 3359 return ret; 3360 } 3361 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 3362 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { 3363 ret = device_create_file(adev->dev, 3364 &dev_attr_pp_od_clk_voltage); 3365 if (ret) { 3366 DRM_ERROR("failed to create device file " 3367 "pp_od_clk_voltage\n"); 3368 return ret; 3369 } 3370 } 3371 ret = device_create_file(adev->dev, 3372 &dev_attr_gpu_busy_percent); 3373 if (ret) { 3374 DRM_ERROR("failed to create device file " 3375 "gpu_busy_level\n"); 3376 return ret; 3377 } 3378 /* APU does not have its own dedicated memory */ 3379 if (!(adev->flags & AMD_IS_APU) && 3380 (adev->asic_type != CHIP_VEGA10)) { 3381 ret = device_create_file(adev->dev, 3382 &dev_attr_mem_busy_percent); 3383 if (ret) { 3384 DRM_ERROR("failed to create device file " 3385 "mem_busy_percent\n"); 3386 return ret; 3387 } 3388 } 3389 /* PCIe Perf counters won't work on APU nodes */ 3390 if (!(adev->flags & AMD_IS_APU)) { 3391 ret = device_create_file(adev->dev, &dev_attr_pcie_bw); 3392 if (ret) { 3393 DRM_ERROR("failed to create device file pcie_bw\n"); 3394 return ret; 3395 } 3396 } 3397 if (adev->unique_id) 3398 ret = device_create_file(adev->dev, &dev_attr_unique_id); 3399 if (ret) { 3400 DRM_ERROR("failed to create device file unique_id\n"); 3401 return ret; 3402 } 3403 3404 if ((adev->asic_type >= CHIP_VEGA10) && 3405 !(adev->flags & AMD_IS_APU)) { 3406 ret = device_create_file(adev->dev, 3407 &dev_attr_pp_features); 3408 if (ret) { 3409 DRM_ERROR("failed to create device file " 3410 "pp_features\n"); 3411 return ret; 3412 } 3413 } 3414 3415 adev->pm.sysfs_initialized = true; 3416 3417 return 0; 3418} 3419 3420void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 3421{ 3422 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3423 3424 if (adev->pm.dpm_enabled == 0) 3425 return; 3426 3427 if (adev->pm.int_hwmon_dev) 3428 hwmon_device_unregister(adev->pm.int_hwmon_dev); 3429 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 3430 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 3431 3432 device_remove_file(adev->dev, &dev_attr_pp_num_states); 3433 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 3434 device_remove_file(adev->dev, &dev_attr_pp_force_state); 3435 device_remove_file(adev->dev, &dev_attr_pp_table); 3436 3437 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 3438 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 3439 if (adev->asic_type >= CHIP_VEGA10) { 3440 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); 3441 if (adev->asic_type != CHIP_ARCTURUS) 3442 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); 3443 } 3444 if (adev->asic_type != CHIP_ARCTURUS) 3445 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 3446 if (adev->asic_type >= CHIP_VEGA20) 3447 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); 3448 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 3449 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 3450 device_remove_file(adev->dev, 3451 &dev_attr_pp_power_profile_mode); 3452 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 3453 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 3454 device_remove_file(adev->dev, 3455 &dev_attr_pp_od_clk_voltage); 3456 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 3457 if (!(adev->flags & AMD_IS_APU) && 3458 (adev->asic_type != CHIP_VEGA10)) 3459 device_remove_file(adev->dev, &dev_attr_mem_busy_percent); 3460 if (!(adev->flags & AMD_IS_APU)) 3461 device_remove_file(adev->dev, &dev_attr_pcie_bw); 3462 if (adev->unique_id) 3463 device_remove_file(adev->dev, &dev_attr_unique_id); 3464 if ((adev->asic_type >= CHIP_VEGA10) && 3465 !(adev->flags & AMD_IS_APU)) 3466 device_remove_file(adev->dev, &dev_attr_pp_features); 3467} 3468 3469void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 3470{ 3471 int i = 0; 3472 3473 if (!adev->pm.dpm_enabled) 3474 return; 3475 3476 if (adev->mode_info.num_crtc) 3477 amdgpu_display_bandwidth_update(adev); 3478 3479 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3480 struct amdgpu_ring *ring = adev->rings[i]; 3481 if (ring && ring->sched.ready) 3482 amdgpu_fence_wait_empty(ring); 3483 } 3484 3485 if (is_support_sw_smu(adev)) { 3486 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; 3487 smu_handle_task(&adev->smu, 3488 smu_dpm->dpm_level, 3489 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 3490 true); 3491 } else { 3492 if (adev->powerplay.pp_funcs->dispatch_tasks) { 3493 if (!amdgpu_device_has_dc_support(adev)) { 3494 mutex_lock(&adev->pm.mutex); 3495 amdgpu_dpm_get_active_displays(adev); 3496 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 3497 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 3498 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 3499 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 3500 if (adev->pm.pm_display_cfg.vrefresh > 120) 3501 adev->pm.pm_display_cfg.min_vblank_time = 0; 3502 if (adev->powerplay.pp_funcs->display_configuration_change) 3503 adev->powerplay.pp_funcs->display_configuration_change( 3504 adev->powerplay.pp_handle, 3505 &adev->pm.pm_display_cfg); 3506 mutex_unlock(&adev->pm.mutex); 3507 } 3508 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 3509 } else { 3510 mutex_lock(&adev->pm.mutex); 3511 amdgpu_dpm_get_active_displays(adev); 3512 amdgpu_dpm_change_power_state_locked(adev); 3513 mutex_unlock(&adev->pm.mutex); 3514 } 3515 } 3516} 3517 3518/* 3519 * Debugfs info 3520 */ 3521#if defined(CONFIG_DEBUG_FS) 3522 3523static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3524{ 3525 uint32_t value; 3526 uint64_t value64; 3527 uint32_t query = 0; 3528 int size; 3529 3530 /* GPU Clocks */ 3531 size = sizeof(value); 3532 seq_printf(m, "GFX Clocks and Power:\n"); 3533 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3534 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3535 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3536 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3537 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3538 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3539 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3540 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3541 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3542 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3543 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3544 seq_printf(m, "\t%u mV (VDDNB)\n", value); 3545 size = sizeof(uint32_t); 3546 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 3547 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3548 size = sizeof(value); 3549 seq_printf(m, "\n"); 3550 3551 /* GPU Temp */ 3552 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3553 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3554 3555 /* GPU Load */ 3556 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3557 seq_printf(m, "GPU Load: %u %%\n", value); 3558 /* MEM Load */ 3559 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3560 seq_printf(m, "MEM Load: %u %%\n", value); 3561 3562 seq_printf(m, "\n"); 3563 3564 /* SMC feature mask */ 3565 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3566 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3567 3568 if (adev->asic_type > CHIP_VEGA20) { 3569 /* VCN clocks */ 3570 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3571 if (!value) { 3572 seq_printf(m, "VCN: Disabled\n"); 3573 } else { 3574 seq_printf(m, "VCN: Enabled\n"); 3575 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3576 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3577 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3578 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3579 } 3580 } 3581 seq_printf(m, "\n"); 3582 } else { 3583 /* UVD clocks */ 3584 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3585 if (!value) { 3586 seq_printf(m, "UVD: Disabled\n"); 3587 } else { 3588 seq_printf(m, "UVD: Enabled\n"); 3589 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3590 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3591 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3592 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3593 } 3594 } 3595 seq_printf(m, "\n"); 3596 3597 /* VCE clocks */ 3598 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3599 if (!value) { 3600 seq_printf(m, "VCE: Disabled\n"); 3601 } else { 3602 seq_printf(m, "VCE: Enabled\n"); 3603 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3604 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3605 } 3606 } 3607 } 3608 3609 return 0; 3610} 3611 3612static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 3613{ 3614 int i; 3615 3616 for (i = 0; clocks[i].flag; i++) 3617 seq_printf(m, "\t%s: %s\n", clocks[i].name, 3618 (flags & clocks[i].flag) ? "On" : "Off"); 3619} 3620 3621static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 3622{ 3623 struct drm_info_node *node = (struct drm_info_node *) m->private; 3624 struct drm_device *dev = node->minor->dev; 3625 struct amdgpu_device *adev = dev->dev_private; 3626 u32 flags = 0; 3627 int r; 3628 3629 r = pm_runtime_get_sync(dev->dev); 3630 if (r < 0) 3631 return r; 3632 3633 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3634 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 3635 amdgpu_parse_cg_state(m, flags); 3636 seq_printf(m, "\n"); 3637 3638 if (!adev->pm.dpm_enabled) { 3639 seq_printf(m, "dpm not enabled\n"); 3640 pm_runtime_mark_last_busy(dev->dev); 3641 pm_runtime_put_autosuspend(dev->dev); 3642 return 0; 3643 } 3644 3645 if (!is_support_sw_smu(adev) && 3646 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3647 mutex_lock(&adev->pm.mutex); 3648 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3649 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3650 else 3651 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3652 mutex_unlock(&adev->pm.mutex); 3653 r = 0; 3654 } else { 3655 r = amdgpu_debugfs_pm_info_pp(m, adev); 3656 } 3657 3658 pm_runtime_mark_last_busy(dev->dev); 3659 pm_runtime_put_autosuspend(dev->dev); 3660 3661 return r; 3662} 3663 3664static const struct drm_info_list amdgpu_pm_info_list[] = { 3665 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 3666}; 3667#endif 3668 3669int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3670{ 3671#if defined(CONFIG_DEBUG_FS) 3672 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 3673#else 3674 return 0; 3675#endif 3676}