Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.15-rc6 1626 lines 47 kB view raw
1/* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25#include <drm/drmP.h> 26#include "amdgpu.h" 27#include "amdgpu_drv.h" 28#include "amdgpu_pm.h" 29#include "amdgpu_dpm.h" 30#include "atom.h" 31#include <linux/power_supply.h> 32#include <linux/hwmon.h> 33#include <linux/hwmon-sysfs.h> 34 35#include "amd_powerplay.h" 36 37static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 38 39static const struct cg_flag_name clocks[] = { 40 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 41 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 42 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 43 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 44 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 45 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 46 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 47 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 48 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 49 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 50 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 51 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 52 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 53 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 54 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 55 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 56 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 57 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 58 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 59 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 60 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 61 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 62 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 63 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 64 {0, NULL}, 65}; 66 67void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 68{ 69 if (adev->pm.dpm_enabled) { 70 mutex_lock(&adev->pm.mutex); 71 if (power_supply_is_system_supplied() > 0) 72 adev->pm.dpm.ac_power = true; 73 else 74 adev->pm.dpm.ac_power = false; 75 if (adev->powerplay.pp_funcs->enable_bapm) 76 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); 77 mutex_unlock(&adev->pm.mutex); 78 } 79} 80 81static ssize_t amdgpu_get_dpm_state(struct device *dev, 82 struct device_attribute *attr, 83 char *buf) 84{ 85 struct drm_device *ddev = dev_get_drvdata(dev); 86 struct amdgpu_device *adev = ddev->dev_private; 87 enum amd_pm_state_type pm; 88 89 if (adev->powerplay.pp_funcs->get_current_power_state) 90 pm = amdgpu_dpm_get_current_power_state(adev); 91 else 92 pm = adev->pm.dpm.user_state; 93 94 return snprintf(buf, PAGE_SIZE, "%s\n", 95 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 96 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 97} 98 99static ssize_t amdgpu_set_dpm_state(struct device *dev, 100 struct device_attribute *attr, 101 const char *buf, 102 size_t count) 103{ 104 struct drm_device *ddev = dev_get_drvdata(dev); 105 struct amdgpu_device *adev = ddev->dev_private; 106 enum amd_pm_state_type state; 107 108 if (strncmp("battery", buf, strlen("battery")) == 0) 109 state = POWER_STATE_TYPE_BATTERY; 110 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 111 state = POWER_STATE_TYPE_BALANCED; 112 else if (strncmp("performance", buf, strlen("performance")) == 0) 113 state = POWER_STATE_TYPE_PERFORMANCE; 114 else { 115 count = -EINVAL; 116 goto fail; 117 } 118 119 if (adev->powerplay.pp_funcs->dispatch_tasks) { 120 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); 121 } else { 122 mutex_lock(&adev->pm.mutex); 123 adev->pm.dpm.user_state = state; 124 mutex_unlock(&adev->pm.mutex); 125 126 /* Can't set dpm state when the card is off */ 127 if (!(adev->flags & AMD_IS_PX) || 128 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 129 amdgpu_pm_compute_clocks(adev); 130 } 131fail: 132 return count; 133} 134 135static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 136 struct device_attribute *attr, 137 char *buf) 138{ 139 struct drm_device *ddev = dev_get_drvdata(dev); 140 struct amdgpu_device *adev = ddev->dev_private; 141 enum amd_dpm_forced_level level = 0xff; 142 143 if ((adev->flags & AMD_IS_PX) && 144 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 145 return snprintf(buf, PAGE_SIZE, "off\n"); 146 147 if (adev->powerplay.pp_funcs->get_performance_level) 148 level = amdgpu_dpm_get_performance_level(adev); 149 else 150 level = adev->pm.dpm.forced_level; 151 152 return snprintf(buf, PAGE_SIZE, "%s\n", 153 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 154 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 155 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 156 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 157 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 158 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 159 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 160 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 161 "unknown"); 162} 163 164static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 165 struct device_attribute *attr, 166 const char *buf, 167 size_t count) 168{ 169 struct drm_device *ddev = dev_get_drvdata(dev); 170 struct amdgpu_device *adev = ddev->dev_private; 171 enum amd_dpm_forced_level level; 172 enum amd_dpm_forced_level current_level = 0xff; 173 int ret = 0; 174 175 /* Can't force performance level when the card is off */ 176 if ((adev->flags & AMD_IS_PX) && 177 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 178 return -EINVAL; 179 180 if (adev->powerplay.pp_funcs->get_performance_level) 181 current_level = amdgpu_dpm_get_performance_level(adev); 182 183 if (strncmp("low", buf, strlen("low")) == 0) { 184 level = AMD_DPM_FORCED_LEVEL_LOW; 185 } else if (strncmp("high", buf, strlen("high")) == 0) { 186 level = AMD_DPM_FORCED_LEVEL_HIGH; 187 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 188 level = AMD_DPM_FORCED_LEVEL_AUTO; 189 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 190 level = AMD_DPM_FORCED_LEVEL_MANUAL; 191 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 192 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 193 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 194 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 195 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 196 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 197 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 198 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 199 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 200 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 201 } else { 202 count = -EINVAL; 203 goto fail; 204 } 205 206 if (current_level == level) 207 return count; 208 209 if (adev->powerplay.pp_funcs->force_performance_level) { 210 mutex_lock(&adev->pm.mutex); 211 if (adev->pm.dpm.thermal_active) { 212 count = -EINVAL; 213 mutex_unlock(&adev->pm.mutex); 214 goto fail; 215 } 216 ret = amdgpu_dpm_force_performance_level(adev, level); 217 if (ret) 218 count = -EINVAL; 219 else 220 adev->pm.dpm.forced_level = level; 221 mutex_unlock(&adev->pm.mutex); 222 } 223 224fail: 225 return count; 226} 227 228static ssize_t amdgpu_get_pp_num_states(struct device *dev, 229 struct device_attribute *attr, 230 char *buf) 231{ 232 struct drm_device *ddev = dev_get_drvdata(dev); 233 struct amdgpu_device *adev = ddev->dev_private; 234 struct pp_states_info data; 235 int i, buf_len; 236 237 if (adev->powerplay.pp_funcs->get_pp_num_states) 238 amdgpu_dpm_get_pp_num_states(adev, &data); 239 240 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 241 for (i = 0; i < data.nums; i++) 242 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 243 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 244 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 245 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 246 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 247 248 return buf_len; 249} 250 251static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 252 struct device_attribute *attr, 253 char *buf) 254{ 255 struct drm_device *ddev = dev_get_drvdata(dev); 256 struct amdgpu_device *adev = ddev->dev_private; 257 struct pp_states_info data; 258 enum amd_pm_state_type pm = 0; 259 int i = 0; 260 261 if (adev->powerplay.pp_funcs->get_current_power_state 262 && adev->powerplay.pp_funcs->get_pp_num_states) { 263 pm = amdgpu_dpm_get_current_power_state(adev); 264 amdgpu_dpm_get_pp_num_states(adev, &data); 265 266 for (i = 0; i < data.nums; i++) { 267 if (pm == data.states[i]) 268 break; 269 } 270 271 if (i == data.nums) 272 i = -EINVAL; 273 } 274 275 return snprintf(buf, PAGE_SIZE, "%d\n", i); 276} 277 278static ssize_t amdgpu_get_pp_force_state(struct device *dev, 279 struct device_attribute *attr, 280 char *buf) 281{ 282 struct drm_device *ddev = dev_get_drvdata(dev); 283 struct amdgpu_device *adev = ddev->dev_private; 284 285 if (adev->pp_force_state_enabled) 286 return amdgpu_get_pp_cur_state(dev, attr, buf); 287 else 288 return snprintf(buf, PAGE_SIZE, "\n"); 289} 290 291static ssize_t amdgpu_set_pp_force_state(struct device *dev, 292 struct device_attribute *attr, 293 const char *buf, 294 size_t count) 295{ 296 struct drm_device *ddev = dev_get_drvdata(dev); 297 struct amdgpu_device *adev = ddev->dev_private; 298 enum amd_pm_state_type state = 0; 299 unsigned long idx; 300 int ret; 301 302 if (strlen(buf) == 1) 303 adev->pp_force_state_enabled = false; 304 else if (adev->powerplay.pp_funcs->dispatch_tasks && 305 adev->powerplay.pp_funcs->get_pp_num_states) { 306 struct pp_states_info data; 307 308 ret = kstrtoul(buf, 0, &idx); 309 if (ret || idx >= ARRAY_SIZE(data.states)) { 310 count = -EINVAL; 311 goto fail; 312 } 313 314 amdgpu_dpm_get_pp_num_states(adev, &data); 315 state = data.states[idx]; 316 /* only set user selected power states */ 317 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 318 state != POWER_STATE_TYPE_DEFAULT) { 319 amdgpu_dpm_dispatch_task(adev, 320 AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); 321 adev->pp_force_state_enabled = true; 322 } 323 } 324fail: 325 return count; 326} 327 328static ssize_t amdgpu_get_pp_table(struct device *dev, 329 struct device_attribute *attr, 330 char *buf) 331{ 332 struct drm_device *ddev = dev_get_drvdata(dev); 333 struct amdgpu_device *adev = ddev->dev_private; 334 char *table = NULL; 335 int size; 336 337 if (adev->powerplay.pp_funcs->get_pp_table) 338 size = amdgpu_dpm_get_pp_table(adev, &table); 339 else 340 return 0; 341 342 if (size >= PAGE_SIZE) 343 size = PAGE_SIZE - 1; 344 345 memcpy(buf, table, size); 346 347 return size; 348} 349 350static ssize_t amdgpu_set_pp_table(struct device *dev, 351 struct device_attribute *attr, 352 const char *buf, 353 size_t count) 354{ 355 struct drm_device *ddev = dev_get_drvdata(dev); 356 struct amdgpu_device *adev = ddev->dev_private; 357 358 if (adev->powerplay.pp_funcs->set_pp_table) 359 amdgpu_dpm_set_pp_table(adev, buf, count); 360 361 return count; 362} 363 364static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 365 struct device_attribute *attr, 366 char *buf) 367{ 368 struct drm_device *ddev = dev_get_drvdata(dev); 369 struct amdgpu_device *adev = ddev->dev_private; 370 371 if (adev->powerplay.pp_funcs->print_clock_levels) 372 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 373 else 374 return snprintf(buf, PAGE_SIZE, "\n"); 375} 376 377static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 378 struct device_attribute *attr, 379 const char *buf, 380 size_t count) 381{ 382 struct drm_device *ddev = dev_get_drvdata(dev); 383 struct amdgpu_device *adev = ddev->dev_private; 384 int ret; 385 long level; 386 uint32_t i, mask = 0; 387 char sub_str[2]; 388 389 for (i = 0; i < strlen(buf); i++) { 390 if (*(buf + i) == '\n') 391 continue; 392 sub_str[0] = *(buf + i); 393 sub_str[1] = '\0'; 394 ret = kstrtol(sub_str, 0, &level); 395 396 if (ret) { 397 count = -EINVAL; 398 goto fail; 399 } 400 mask |= 1 << level; 401 } 402 403 if (adev->powerplay.pp_funcs->force_clock_level) 404 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 405 406fail: 407 return count; 408} 409 410static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 411 struct device_attribute *attr, 412 char *buf) 413{ 414 struct drm_device *ddev = dev_get_drvdata(dev); 415 struct amdgpu_device *adev = ddev->dev_private; 416 417 if (adev->powerplay.pp_funcs->print_clock_levels) 418 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 419 else 420 return snprintf(buf, PAGE_SIZE, "\n"); 421} 422 423static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 424 struct device_attribute *attr, 425 const char *buf, 426 size_t count) 427{ 428 struct drm_device *ddev = dev_get_drvdata(dev); 429 struct amdgpu_device *adev = ddev->dev_private; 430 int ret; 431 long level; 432 uint32_t i, mask = 0; 433 char sub_str[2]; 434 435 for (i = 0; i < strlen(buf); i++) { 436 if (*(buf + i) == '\n') 437 continue; 438 sub_str[0] = *(buf + i); 439 sub_str[1] = '\0'; 440 ret = kstrtol(sub_str, 0, &level); 441 442 if (ret) { 443 count = -EINVAL; 444 goto fail; 445 } 446 mask |= 1 << level; 447 } 448 if (adev->powerplay.pp_funcs->force_clock_level) 449 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 450 451fail: 452 return count; 453} 454 455static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 456 struct device_attribute *attr, 457 char *buf) 458{ 459 struct drm_device *ddev = dev_get_drvdata(dev); 460 struct amdgpu_device *adev = ddev->dev_private; 461 462 if (adev->powerplay.pp_funcs->print_clock_levels) 463 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 464 else 465 return snprintf(buf, PAGE_SIZE, "\n"); 466} 467 468static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 469 struct device_attribute *attr, 470 const char *buf, 471 size_t count) 472{ 473 struct drm_device *ddev = dev_get_drvdata(dev); 474 struct amdgpu_device *adev = ddev->dev_private; 475 int ret; 476 long level; 477 uint32_t i, mask = 0; 478 char sub_str[2]; 479 480 for (i = 0; i < strlen(buf); i++) { 481 if (*(buf + i) == '\n') 482 continue; 483 sub_str[0] = *(buf + i); 484 sub_str[1] = '\0'; 485 ret = kstrtol(sub_str, 0, &level); 486 487 if (ret) { 488 count = -EINVAL; 489 goto fail; 490 } 491 mask |= 1 << level; 492 } 493 if (adev->powerplay.pp_funcs->force_clock_level) 494 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 495 496fail: 497 return count; 498} 499 500static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 501 struct device_attribute *attr, 502 char *buf) 503{ 504 struct drm_device *ddev = dev_get_drvdata(dev); 505 struct amdgpu_device *adev = ddev->dev_private; 506 uint32_t value = 0; 507 508 if (adev->powerplay.pp_funcs->get_sclk_od) 509 value = amdgpu_dpm_get_sclk_od(adev); 510 511 return snprintf(buf, PAGE_SIZE, "%d\n", value); 512} 513 514static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 515 struct device_attribute *attr, 516 const char *buf, 517 size_t count) 518{ 519 struct drm_device *ddev = dev_get_drvdata(dev); 520 struct amdgpu_device *adev = ddev->dev_private; 521 int ret; 522 long int value; 523 524 ret = kstrtol(buf, 0, &value); 525 526 if (ret) { 527 count = -EINVAL; 528 goto fail; 529 } 530 if (adev->powerplay.pp_funcs->set_sclk_od) 531 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 532 533 if (adev->powerplay.pp_funcs->dispatch_tasks) { 534 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 535 } else { 536 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 537 amdgpu_pm_compute_clocks(adev); 538 } 539 540fail: 541 return count; 542} 543 544static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 545 struct device_attribute *attr, 546 char *buf) 547{ 548 struct drm_device *ddev = dev_get_drvdata(dev); 549 struct amdgpu_device *adev = ddev->dev_private; 550 uint32_t value = 0; 551 552 if (adev->powerplay.pp_funcs->get_mclk_od) 553 value = amdgpu_dpm_get_mclk_od(adev); 554 555 return snprintf(buf, PAGE_SIZE, "%d\n", value); 556} 557 558static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 559 struct device_attribute *attr, 560 const char *buf, 561 size_t count) 562{ 563 struct drm_device *ddev = dev_get_drvdata(dev); 564 struct amdgpu_device *adev = ddev->dev_private; 565 int ret; 566 long int value; 567 568 ret = kstrtol(buf, 0, &value); 569 570 if (ret) { 571 count = -EINVAL; 572 goto fail; 573 } 574 if (adev->powerplay.pp_funcs->set_mclk_od) 575 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 576 577 if (adev->powerplay.pp_funcs->dispatch_tasks) { 578 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); 579 } else { 580 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 581 amdgpu_pm_compute_clocks(adev); 582 } 583 584fail: 585 return count; 586} 587 588static ssize_t amdgpu_get_pp_power_profile(struct device *dev, 589 char *buf, struct amd_pp_profile *query) 590{ 591 struct drm_device *ddev = dev_get_drvdata(dev); 592 struct amdgpu_device *adev = ddev->dev_private; 593 int ret = 0xff; 594 595 if (adev->powerplay.pp_funcs->get_power_profile_state) 596 ret = amdgpu_dpm_get_power_profile_state( 597 adev, query); 598 599 if (ret) 600 return ret; 601 602 return snprintf(buf, PAGE_SIZE, 603 "%d %d %d %d %d\n", 604 query->min_sclk / 100, 605 query->min_mclk / 100, 606 query->activity_threshold, 607 query->up_hyst, 608 query->down_hyst); 609} 610 611static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev, 612 struct device_attribute *attr, 613 char *buf) 614{ 615 struct amd_pp_profile query = {0}; 616 617 query.type = AMD_PP_GFX_PROFILE; 618 619 return amdgpu_get_pp_power_profile(dev, buf, &query); 620} 621 622static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev, 623 struct device_attribute *attr, 624 char *buf) 625{ 626 struct amd_pp_profile query = {0}; 627 628 query.type = AMD_PP_COMPUTE_PROFILE; 629 630 return amdgpu_get_pp_power_profile(dev, buf, &query); 631} 632 633static ssize_t amdgpu_set_pp_power_profile(struct device *dev, 634 const char *buf, 635 size_t count, 636 struct amd_pp_profile *request) 637{ 638 struct drm_device *ddev = dev_get_drvdata(dev); 639 struct amdgpu_device *adev = ddev->dev_private; 640 uint32_t loop = 0; 641 char *sub_str, buf_cpy[128], *tmp_str; 642 const char delimiter[3] = {' ', '\n', '\0'}; 643 long int value; 644 int ret = 0xff; 645 646 if (strncmp("reset", buf, strlen("reset")) == 0) { 647 if (adev->powerplay.pp_funcs->reset_power_profile_state) 648 ret = amdgpu_dpm_reset_power_profile_state( 649 adev, request); 650 if (ret) { 651 count = -EINVAL; 652 goto fail; 653 } 654 return count; 655 } 656 657 if (strncmp("set", buf, strlen("set")) == 0) { 658 if (adev->powerplay.pp_funcs->set_power_profile_state) 659 ret = amdgpu_dpm_set_power_profile_state( 660 adev, request); 661 662 if (ret) { 663 count = -EINVAL; 664 goto fail; 665 } 666 return count; 667 } 668 669 if (count + 1 >= 128) { 670 count = -EINVAL; 671 goto fail; 672 } 673 674 memcpy(buf_cpy, buf, count + 1); 675 tmp_str = buf_cpy; 676 677 while (tmp_str[0]) { 678 sub_str = strsep(&tmp_str, delimiter); 679 ret = kstrtol(sub_str, 0, &value); 680 if (ret) { 681 count = -EINVAL; 682 goto fail; 683 } 684 685 switch (loop) { 686 case 0: 687 /* input unit MHz convert to dpm table unit 10KHz*/ 688 request->min_sclk = (uint32_t)value * 100; 689 break; 690 case 1: 691 /* input unit MHz convert to dpm table unit 10KHz*/ 692 request->min_mclk = (uint32_t)value * 100; 693 break; 694 case 2: 695 request->activity_threshold = (uint16_t)value; 696 break; 697 case 3: 698 request->up_hyst = (uint8_t)value; 699 break; 700 case 4: 701 request->down_hyst = (uint8_t)value; 702 break; 703 default: 704 break; 705 } 706 707 loop++; 708 } 709 if (adev->powerplay.pp_funcs->set_power_profile_state) 710 ret = amdgpu_dpm_set_power_profile_state(adev, request); 711 712 if (ret) 713 count = -EINVAL; 714 715fail: 716 return count; 717} 718 719static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev, 720 struct device_attribute *attr, 721 const char *buf, 722 size_t count) 723{ 724 struct amd_pp_profile request = {0}; 725 726 request.type = AMD_PP_GFX_PROFILE; 727 728 return amdgpu_set_pp_power_profile(dev, buf, count, &request); 729} 730 731static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev, 732 struct device_attribute *attr, 733 const char *buf, 734 size_t count) 735{ 736 struct amd_pp_profile request = {0}; 737 738 request.type = AMD_PP_COMPUTE_PROFILE; 739 740 return amdgpu_set_pp_power_profile(dev, buf, count, &request); 741} 742 743static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 744static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 745 amdgpu_get_dpm_forced_performance_level, 746 amdgpu_set_dpm_forced_performance_level); 747static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 748static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 749static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 750 amdgpu_get_pp_force_state, 751 amdgpu_set_pp_force_state); 752static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 753 amdgpu_get_pp_table, 754 amdgpu_set_pp_table); 755static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 756 amdgpu_get_pp_dpm_sclk, 757 amdgpu_set_pp_dpm_sclk); 758static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 759 amdgpu_get_pp_dpm_mclk, 760 amdgpu_set_pp_dpm_mclk); 761static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 762 amdgpu_get_pp_dpm_pcie, 763 amdgpu_set_pp_dpm_pcie); 764static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 765 amdgpu_get_pp_sclk_od, 766 amdgpu_set_pp_sclk_od); 767static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 768 amdgpu_get_pp_mclk_od, 769 amdgpu_set_pp_mclk_od); 770static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR, 771 amdgpu_get_pp_gfx_power_profile, 772 amdgpu_set_pp_gfx_power_profile); 773static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR, 774 amdgpu_get_pp_compute_power_profile, 775 amdgpu_set_pp_compute_power_profile); 776 777static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 778 struct device_attribute *attr, 779 char *buf) 780{ 781 struct amdgpu_device *adev = dev_get_drvdata(dev); 782 struct drm_device *ddev = adev->ddev; 783 int temp; 784 785 /* Can't get temperature when the card is off */ 786 if ((adev->flags & AMD_IS_PX) && 787 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 788 return -EINVAL; 789 790 if (!adev->powerplay.pp_funcs->get_temperature) 791 temp = 0; 792 else 793 temp = amdgpu_dpm_get_temperature(adev); 794 795 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 796} 797 798static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 799 struct device_attribute *attr, 800 char *buf) 801{ 802 struct amdgpu_device *adev = dev_get_drvdata(dev); 803 int hyst = to_sensor_dev_attr(attr)->index; 804 int temp; 805 806 if (hyst) 807 temp = adev->pm.dpm.thermal.min_temp; 808 else 809 temp = adev->pm.dpm.thermal.max_temp; 810 811 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 812} 813 814static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 815 struct device_attribute *attr, 816 char *buf) 817{ 818 struct amdgpu_device *adev = dev_get_drvdata(dev); 819 u32 pwm_mode = 0; 820 821 if (!adev->powerplay.pp_funcs->get_fan_control_mode) 822 return -EINVAL; 823 824 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 825 826 return sprintf(buf, "%i\n", pwm_mode); 827} 828 829static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 830 struct device_attribute *attr, 831 const char *buf, 832 size_t count) 833{ 834 struct amdgpu_device *adev = dev_get_drvdata(dev); 835 int err; 836 int value; 837 838 if (!adev->powerplay.pp_funcs->set_fan_control_mode) 839 return -EINVAL; 840 841 err = kstrtoint(buf, 10, &value); 842 if (err) 843 return err; 844 845 amdgpu_dpm_set_fan_control_mode(adev, value); 846 847 return count; 848} 849 850static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 851 struct device_attribute *attr, 852 char *buf) 853{ 854 return sprintf(buf, "%i\n", 0); 855} 856 857static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 858 struct device_attribute *attr, 859 char *buf) 860{ 861 return sprintf(buf, "%i\n", 255); 862} 863 864static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 865 struct device_attribute *attr, 866 const char *buf, size_t count) 867{ 868 struct amdgpu_device *adev = dev_get_drvdata(dev); 869 int err; 870 u32 value; 871 872 err = kstrtou32(buf, 10, &value); 873 if (err) 874 return err; 875 876 value = (value * 100) / 255; 877 878 if (adev->powerplay.pp_funcs->set_fan_speed_percent) { 879 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 880 if (err) 881 return err; 882 } 883 884 return count; 885} 886 887static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 888 struct device_attribute *attr, 889 char *buf) 890{ 891 struct amdgpu_device *adev = dev_get_drvdata(dev); 892 int err; 893 u32 speed = 0; 894 895 if (adev->powerplay.pp_funcs->get_fan_speed_percent) { 896 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 897 if (err) 898 return err; 899 } 900 901 speed = (speed * 255) / 100; 902 903 return sprintf(buf, "%i\n", speed); 904} 905 906static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 907 struct device_attribute *attr, 908 char *buf) 909{ 910 struct amdgpu_device *adev = dev_get_drvdata(dev); 911 int err; 912 u32 speed = 0; 913 914 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { 915 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 916 if (err) 917 return err; 918 } 919 920 return sprintf(buf, "%i\n", speed); 921} 922 923static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); 924static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 925static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 926static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 927static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 928static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 929static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 930static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 931 932static struct attribute *hwmon_attributes[] = { 933 &sensor_dev_attr_temp1_input.dev_attr.attr, 934 &sensor_dev_attr_temp1_crit.dev_attr.attr, 935 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 936 &sensor_dev_attr_pwm1.dev_attr.attr, 937 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 938 &sensor_dev_attr_pwm1_min.dev_attr.attr, 939 &sensor_dev_attr_pwm1_max.dev_attr.attr, 940 &sensor_dev_attr_fan1_input.dev_attr.attr, 941 NULL 942}; 943 944static umode_t hwmon_attributes_visible(struct kobject *kobj, 945 struct attribute *attr, int index) 946{ 947 struct device *dev = kobj_to_dev(kobj); 948 struct amdgpu_device *adev = dev_get_drvdata(dev); 949 umode_t effective_mode = attr->mode; 950 951 /* no skipping for powerplay */ 952 if (adev->powerplay.cgs_device) 953 return effective_mode; 954 955 /* Skip limit attributes if DPM is not enabled */ 956 if (!adev->pm.dpm_enabled && 957 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 958 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 959 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 960 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 961 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 962 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 963 return 0; 964 965 /* Skip fan attributes if fan is not present */ 966 if (adev->pm.no_fan && 967 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 968 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 969 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 970 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 971 return 0; 972 973 /* mask fan attributes if we have no bindings for this asic to expose */ 974 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 975 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 976 (!adev->powerplay.pp_funcs->get_fan_control_mode && 977 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 978 effective_mode &= ~S_IRUGO; 979 980 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 981 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 982 (!adev->powerplay.pp_funcs->set_fan_control_mode && 983 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 984 effective_mode &= ~S_IWUSR; 985 986 /* hide max/min values if we can't both query and manage the fan */ 987 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 988 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 989 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 990 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 991 return 0; 992 993 /* requires powerplay */ 994 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr) 995 return 0; 996 997 return effective_mode; 998} 999 1000static const struct attribute_group hwmon_attrgroup = { 1001 .attrs = hwmon_attributes, 1002 .is_visible = hwmon_attributes_visible, 1003}; 1004 1005static const struct attribute_group *hwmon_groups[] = { 1006 &hwmon_attrgroup, 1007 NULL 1008}; 1009 1010void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1011{ 1012 struct amdgpu_device *adev = 1013 container_of(work, struct amdgpu_device, 1014 pm.dpm.thermal.work); 1015 /* switch to the thermal state */ 1016 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1017 1018 if (!adev->pm.dpm_enabled) 1019 return; 1020 1021 if (adev->powerplay.pp_funcs->get_temperature) { 1022 int temp = amdgpu_dpm_get_temperature(adev); 1023 1024 if (temp < adev->pm.dpm.thermal.min_temp) 1025 /* switch back the user state */ 1026 dpm_state = adev->pm.dpm.user_state; 1027 } else { 1028 if (adev->pm.dpm.thermal.high_to_low) 1029 /* switch back the user state */ 1030 dpm_state = adev->pm.dpm.user_state; 1031 } 1032 mutex_lock(&adev->pm.mutex); 1033 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1034 adev->pm.dpm.thermal_active = true; 1035 else 1036 adev->pm.dpm.thermal_active = false; 1037 adev->pm.dpm.state = dpm_state; 1038 mutex_unlock(&adev->pm.mutex); 1039 1040 amdgpu_pm_compute_clocks(adev); 1041} 1042 1043static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1044 enum amd_pm_state_type dpm_state) 1045{ 1046 int i; 1047 struct amdgpu_ps *ps; 1048 u32 ui_class; 1049 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1050 true : false; 1051 1052 /* check if the vblank period is too short to adjust the mclk */ 1053 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 1054 if (amdgpu_dpm_vblank_too_short(adev)) 1055 single_display = false; 1056 } 1057 1058 /* certain older asics have a separare 3D performance state, 1059 * so try that first if the user selected performance 1060 */ 1061 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1062 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1063 /* balanced states don't exist at the moment */ 1064 if (dpm_state == POWER_STATE_TYPE_BALANCED) 1065 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1066 1067restart_search: 1068 /* Pick the best power state based on current conditions */ 1069 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1070 ps = &adev->pm.dpm.ps[i]; 1071 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1072 switch (dpm_state) { 1073 /* user states */ 1074 case POWER_STATE_TYPE_BATTERY: 1075 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1076 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1077 if (single_display) 1078 return ps; 1079 } else 1080 return ps; 1081 } 1082 break; 1083 case POWER_STATE_TYPE_BALANCED: 1084 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1085 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1086 if (single_display) 1087 return ps; 1088 } else 1089 return ps; 1090 } 1091 break; 1092 case POWER_STATE_TYPE_PERFORMANCE: 1093 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1094 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1095 if (single_display) 1096 return ps; 1097 } else 1098 return ps; 1099 } 1100 break; 1101 /* internal states */ 1102 case POWER_STATE_TYPE_INTERNAL_UVD: 1103 if (adev->pm.dpm.uvd_ps) 1104 return adev->pm.dpm.uvd_ps; 1105 else 1106 break; 1107 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1108 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1109 return ps; 1110 break; 1111 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1112 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1113 return ps; 1114 break; 1115 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1116 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1117 return ps; 1118 break; 1119 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1120 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1121 return ps; 1122 break; 1123 case POWER_STATE_TYPE_INTERNAL_BOOT: 1124 return adev->pm.dpm.boot_ps; 1125 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1126 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1127 return ps; 1128 break; 1129 case POWER_STATE_TYPE_INTERNAL_ACPI: 1130 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1131 return ps; 1132 break; 1133 case POWER_STATE_TYPE_INTERNAL_ULV: 1134 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1135 return ps; 1136 break; 1137 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1138 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1139 return ps; 1140 break; 1141 default: 1142 break; 1143 } 1144 } 1145 /* use a fallback state if we didn't match */ 1146 switch (dpm_state) { 1147 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1148 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1149 goto restart_search; 1150 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1151 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1152 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1153 if (adev->pm.dpm.uvd_ps) { 1154 return adev->pm.dpm.uvd_ps; 1155 } else { 1156 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1157 goto restart_search; 1158 } 1159 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1160 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1161 goto restart_search; 1162 case POWER_STATE_TYPE_INTERNAL_ACPI: 1163 dpm_state = POWER_STATE_TYPE_BATTERY; 1164 goto restart_search; 1165 case POWER_STATE_TYPE_BATTERY: 1166 case POWER_STATE_TYPE_BALANCED: 1167 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1168 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1169 goto restart_search; 1170 default: 1171 break; 1172 } 1173 1174 return NULL; 1175} 1176 1177static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1178{ 1179 struct amdgpu_ps *ps; 1180 enum amd_pm_state_type dpm_state; 1181 int ret; 1182 bool equal = false; 1183 1184 /* if dpm init failed */ 1185 if (!adev->pm.dpm_enabled) 1186 return; 1187 1188 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1189 /* add other state override checks here */ 1190 if ((!adev->pm.dpm.thermal_active) && 1191 (!adev->pm.dpm.uvd_active)) 1192 adev->pm.dpm.state = adev->pm.dpm.user_state; 1193 } 1194 dpm_state = adev->pm.dpm.state; 1195 1196 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1197 if (ps) 1198 adev->pm.dpm.requested_ps = ps; 1199 else 1200 return; 1201 1202 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 1203 printk("switching from power state:\n"); 1204 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1205 printk("switching to power state:\n"); 1206 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1207 } 1208 1209 /* update whether vce is active */ 1210 ps->vce_active = adev->pm.dpm.vce_active; 1211 if (adev->powerplay.pp_funcs->display_configuration_changed) 1212 amdgpu_dpm_display_configuration_changed(adev); 1213 1214 ret = amdgpu_dpm_pre_set_power_state(adev); 1215 if (ret) 1216 return; 1217 1218 if (adev->powerplay.pp_funcs->check_state_equal) { 1219 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 1220 equal = false; 1221 } 1222 1223 if (equal) 1224 return; 1225 1226 amdgpu_dpm_set_power_state(adev); 1227 amdgpu_dpm_post_set_power_state(adev); 1228 1229 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1230 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1231 1232 if (adev->powerplay.pp_funcs->force_performance_level) { 1233 if (adev->pm.dpm.thermal_active) { 1234 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1235 /* force low perf level for thermal */ 1236 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1237 /* save the user's level */ 1238 adev->pm.dpm.forced_level = level; 1239 } else { 1240 /* otherwise, user selected level */ 1241 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1242 } 1243 } 1244} 1245 1246void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1247{ 1248 if (adev->powerplay.pp_funcs->powergate_uvd) { 1249 /* enable/disable UVD */ 1250 mutex_lock(&adev->pm.mutex); 1251 amdgpu_dpm_powergate_uvd(adev, !enable); 1252 mutex_unlock(&adev->pm.mutex); 1253 } else { 1254 if (enable) { 1255 mutex_lock(&adev->pm.mutex); 1256 adev->pm.dpm.uvd_active = true; 1257 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1258 mutex_unlock(&adev->pm.mutex); 1259 } else { 1260 mutex_lock(&adev->pm.mutex); 1261 adev->pm.dpm.uvd_active = false; 1262 mutex_unlock(&adev->pm.mutex); 1263 } 1264 amdgpu_pm_compute_clocks(adev); 1265 } 1266} 1267 1268void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1269{ 1270 if (adev->powerplay.pp_funcs->powergate_vce) { 1271 /* enable/disable VCE */ 1272 mutex_lock(&adev->pm.mutex); 1273 amdgpu_dpm_powergate_vce(adev, !enable); 1274 mutex_unlock(&adev->pm.mutex); 1275 } else { 1276 if (enable) { 1277 mutex_lock(&adev->pm.mutex); 1278 adev->pm.dpm.vce_active = true; 1279 /* XXX select vce level based on ring/task */ 1280 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1281 mutex_unlock(&adev->pm.mutex); 1282 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1283 AMD_CG_STATE_UNGATE); 1284 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1285 AMD_PG_STATE_UNGATE); 1286 amdgpu_pm_compute_clocks(adev); 1287 } else { 1288 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1289 AMD_PG_STATE_GATE); 1290 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1291 AMD_CG_STATE_GATE); 1292 mutex_lock(&adev->pm.mutex); 1293 adev->pm.dpm.vce_active = false; 1294 mutex_unlock(&adev->pm.mutex); 1295 amdgpu_pm_compute_clocks(adev); 1296 } 1297 1298 } 1299} 1300 1301void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1302{ 1303 int i; 1304 1305 if (adev->powerplay.pp_funcs->print_power_state == NULL) 1306 return; 1307 1308 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1309 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1310 1311} 1312 1313int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 1314{ 1315 int ret; 1316 1317 if (adev->pm.sysfs_initialized) 1318 return 0; 1319 1320 if (adev->pm.dpm_enabled == 0) 1321 return 0; 1322 1323 if (adev->powerplay.pp_funcs->get_temperature == NULL) 1324 return 0; 1325 1326 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 1327 DRIVER_NAME, adev, 1328 hwmon_groups); 1329 if (IS_ERR(adev->pm.int_hwmon_dev)) { 1330 ret = PTR_ERR(adev->pm.int_hwmon_dev); 1331 dev_err(adev->dev, 1332 "Unable to register hwmon device: %d\n", ret); 1333 return ret; 1334 } 1335 1336 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 1337 if (ret) { 1338 DRM_ERROR("failed to create device file for dpm state\n"); 1339 return ret; 1340 } 1341 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1342 if (ret) { 1343 DRM_ERROR("failed to create device file for dpm state\n"); 1344 return ret; 1345 } 1346 1347 1348 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 1349 if (ret) { 1350 DRM_ERROR("failed to create device file pp_num_states\n"); 1351 return ret; 1352 } 1353 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 1354 if (ret) { 1355 DRM_ERROR("failed to create device file pp_cur_state\n"); 1356 return ret; 1357 } 1358 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 1359 if (ret) { 1360 DRM_ERROR("failed to create device file pp_force_state\n"); 1361 return ret; 1362 } 1363 ret = device_create_file(adev->dev, &dev_attr_pp_table); 1364 if (ret) { 1365 DRM_ERROR("failed to create device file pp_table\n"); 1366 return ret; 1367 } 1368 1369 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 1370 if (ret) { 1371 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 1372 return ret; 1373 } 1374 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 1375 if (ret) { 1376 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 1377 return ret; 1378 } 1379 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 1380 if (ret) { 1381 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 1382 return ret; 1383 } 1384 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 1385 if (ret) { 1386 DRM_ERROR("failed to create device file pp_sclk_od\n"); 1387 return ret; 1388 } 1389 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 1390 if (ret) { 1391 DRM_ERROR("failed to create device file pp_mclk_od\n"); 1392 return ret; 1393 } 1394 ret = device_create_file(adev->dev, 1395 &dev_attr_pp_gfx_power_profile); 1396 if (ret) { 1397 DRM_ERROR("failed to create device file " 1398 "pp_gfx_power_profile\n"); 1399 return ret; 1400 } 1401 ret = device_create_file(adev->dev, 1402 &dev_attr_pp_compute_power_profile); 1403 if (ret) { 1404 DRM_ERROR("failed to create device file " 1405 "pp_compute_power_profile\n"); 1406 return ret; 1407 } 1408 1409 ret = amdgpu_debugfs_pm_init(adev); 1410 if (ret) { 1411 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1412 return ret; 1413 } 1414 1415 adev->pm.sysfs_initialized = true; 1416 1417 return 0; 1418} 1419 1420void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 1421{ 1422 if (adev->pm.dpm_enabled == 0) 1423 return; 1424 1425 if (adev->pm.int_hwmon_dev) 1426 hwmon_device_unregister(adev->pm.int_hwmon_dev); 1427 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 1428 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1429 1430 device_remove_file(adev->dev, &dev_attr_pp_num_states); 1431 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 1432 device_remove_file(adev->dev, &dev_attr_pp_force_state); 1433 device_remove_file(adev->dev, &dev_attr_pp_table); 1434 1435 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 1436 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 1437 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 1438 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 1439 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 1440 device_remove_file(adev->dev, 1441 &dev_attr_pp_gfx_power_profile); 1442 device_remove_file(adev->dev, 1443 &dev_attr_pp_compute_power_profile); 1444} 1445 1446void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1447{ 1448 struct drm_device *ddev = adev->ddev; 1449 struct drm_crtc *crtc; 1450 struct amdgpu_crtc *amdgpu_crtc; 1451 int i = 0; 1452 1453 if (!adev->pm.dpm_enabled) 1454 return; 1455 1456 if (adev->mode_info.num_crtc) 1457 amdgpu_display_bandwidth_update(adev); 1458 1459 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1460 struct amdgpu_ring *ring = adev->rings[i]; 1461 if (ring && ring->ready) 1462 amdgpu_fence_wait_empty(ring); 1463 } 1464 1465 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1466 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL); 1467 } else { 1468 mutex_lock(&adev->pm.mutex); 1469 adev->pm.dpm.new_active_crtcs = 0; 1470 adev->pm.dpm.new_active_crtc_count = 0; 1471 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 1472 list_for_each_entry(crtc, 1473 &ddev->mode_config.crtc_list, head) { 1474 amdgpu_crtc = to_amdgpu_crtc(crtc); 1475 if (amdgpu_crtc->enabled) { 1476 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 1477 adev->pm.dpm.new_active_crtc_count++; 1478 } 1479 } 1480 } 1481 /* update battery/ac status */ 1482 if (power_supply_is_system_supplied() > 0) 1483 adev->pm.dpm.ac_power = true; 1484 else 1485 adev->pm.dpm.ac_power = false; 1486 1487 amdgpu_dpm_change_power_state_locked(adev); 1488 1489 mutex_unlock(&adev->pm.mutex); 1490 } 1491} 1492 1493/* 1494 * Debugfs info 1495 */ 1496#if defined(CONFIG_DEBUG_FS) 1497 1498static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 1499{ 1500 uint32_t value; 1501 struct pp_gpu_power query = {0}; 1502 int size; 1503 1504 /* sanity check PP is enabled */ 1505 if (!(adev->powerplay.pp_funcs && 1506 adev->powerplay.pp_funcs->read_sensor)) 1507 return -EINVAL; 1508 1509 /* GPU Clocks */ 1510 size = sizeof(value); 1511 seq_printf(m, "GFX Clocks and Power:\n"); 1512 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 1513 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 1514 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 1515 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 1516 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 1517 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 1518 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 1519 seq_printf(m, "\t%u mV (VDDNB)\n", value); 1520 size = sizeof(query); 1521 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) { 1522 seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8, 1523 query.vddc_power & 0xff); 1524 seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8, 1525 query.vddci_power & 0xff); 1526 seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8, 1527 query.max_gpu_power & 0xff); 1528 seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8, 1529 query.average_gpu_power & 0xff); 1530 } 1531 size = sizeof(value); 1532 seq_printf(m, "\n"); 1533 1534 /* GPU Temp */ 1535 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 1536 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 1537 1538 /* GPU Load */ 1539 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 1540 seq_printf(m, "GPU Load: %u %%\n", value); 1541 seq_printf(m, "\n"); 1542 1543 /* UVD clocks */ 1544 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 1545 if (!value) { 1546 seq_printf(m, "UVD: Disabled\n"); 1547 } else { 1548 seq_printf(m, "UVD: Enabled\n"); 1549 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 1550 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 1551 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 1552 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 1553 } 1554 } 1555 seq_printf(m, "\n"); 1556 1557 /* VCE clocks */ 1558 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 1559 if (!value) { 1560 seq_printf(m, "VCE: Disabled\n"); 1561 } else { 1562 seq_printf(m, "VCE: Enabled\n"); 1563 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 1564 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 1565 } 1566 } 1567 1568 return 0; 1569} 1570 1571static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 1572{ 1573 int i; 1574 1575 for (i = 0; clocks[i].flag; i++) 1576 seq_printf(m, "\t%s: %s\n", clocks[i].name, 1577 (flags & clocks[i].flag) ? "On" : "Off"); 1578} 1579 1580static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 1581{ 1582 struct drm_info_node *node = (struct drm_info_node *) m->private; 1583 struct drm_device *dev = node->minor->dev; 1584 struct amdgpu_device *adev = dev->dev_private; 1585 struct drm_device *ddev = adev->ddev; 1586 u32 flags = 0; 1587 1588 amdgpu_get_clockgating_state(adev, &flags); 1589 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 1590 amdgpu_parse_cg_state(m, flags); 1591 seq_printf(m, "\n"); 1592 1593 if (!adev->pm.dpm_enabled) { 1594 seq_printf(m, "dpm not enabled\n"); 1595 return 0; 1596 } 1597 if ((adev->flags & AMD_IS_PX) && 1598 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1599 seq_printf(m, "PX asic powered off\n"); 1600 } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 1601 mutex_lock(&adev->pm.mutex); 1602 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 1603 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 1604 else 1605 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1606 mutex_unlock(&adev->pm.mutex); 1607 } else { 1608 return amdgpu_debugfs_pm_info_pp(m, adev); 1609 } 1610 1611 return 0; 1612} 1613 1614static const struct drm_info_list amdgpu_pm_info_list[] = { 1615 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 1616}; 1617#endif 1618 1619static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 1620{ 1621#if defined(CONFIG_DEBUG_FS) 1622 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 1623#else 1624 return 0; 1625#endif 1626}