Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.14-rc6 1659 lines 47 kB view raw
1/* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 */ 23#include <drm/drmP.h> 24#include "amdgpu.h" 25#include "amdgpu_drv.h" 26#include "amdgpu_pm.h" 27#include "amdgpu_dpm.h" 28#include "atom.h" 29#include <linux/power_supply.h> 30#include <linux/hwmon.h> 31#include <linux/hwmon-sysfs.h> 32 33#include "amd_powerplay.h" 34 35static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 36 37static const struct cg_flag_name clocks[] = { 38 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 39 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 40 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 41 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 42 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 43 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 44 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 45 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 46 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 47 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 48 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 49 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 50 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 51 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 52 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 53 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 54 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 55 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 56 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 57 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 58 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 59 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 60 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 61 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 62 {0, NULL}, 63}; 64 65void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 66{ 67 if (adev->pp_enabled) 68 /* TODO */ 69 return; 70 71 if (adev->pm.dpm_enabled) { 72 mutex_lock(&adev->pm.mutex); 73 if (power_supply_is_system_supplied() > 0) 74 adev->pm.dpm.ac_power = true; 75 else 76 adev->pm.dpm.ac_power = false; 77 if (adev->pm.funcs->enable_bapm) 78 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); 79 mutex_unlock(&adev->pm.mutex); 80 } 81} 82 83static ssize_t amdgpu_get_dpm_state(struct device *dev, 84 struct device_attribute *attr, 85 char *buf) 86{ 87 struct drm_device *ddev = dev_get_drvdata(dev); 88 struct amdgpu_device *adev = ddev->dev_private; 89 enum amd_pm_state_type pm; 90 91 if (adev->pp_enabled) { 92 pm = amdgpu_dpm_get_current_power_state(adev); 93 } else 94 pm = adev->pm.dpm.user_state; 95 96 return snprintf(buf, PAGE_SIZE, "%s\n", 97 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 98 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 99} 100 101static ssize_t amdgpu_set_dpm_state(struct device *dev, 102 struct device_attribute *attr, 103 const char *buf, 104 size_t count) 105{ 106 struct drm_device *ddev = dev_get_drvdata(dev); 107 struct amdgpu_device *adev = ddev->dev_private; 108 enum amd_pm_state_type state; 109 110 if (strncmp("battery", buf, strlen("battery")) == 0) 111 state = POWER_STATE_TYPE_BATTERY; 112 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 113 state = POWER_STATE_TYPE_BALANCED; 114 else if (strncmp("performance", buf, strlen("performance")) == 0) 115 state = POWER_STATE_TYPE_PERFORMANCE; 116 else { 117 count = -EINVAL; 118 goto fail; 119 } 120 121 if (adev->pp_enabled) { 122 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); 123 } else { 124 mutex_lock(&adev->pm.mutex); 125 adev->pm.dpm.user_state = state; 126 mutex_unlock(&adev->pm.mutex); 127 128 /* Can't set dpm state when the card is off */ 129 if (!(adev->flags & AMD_IS_PX) || 130 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 131 amdgpu_pm_compute_clocks(adev); 132 } 133fail: 134 return count; 135} 136 137static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 138 struct device_attribute *attr, 139 char *buf) 140{ 141 struct drm_device *ddev = dev_get_drvdata(dev); 142 struct amdgpu_device *adev = ddev->dev_private; 143 enum amd_dpm_forced_level level; 144 145 if ((adev->flags & AMD_IS_PX) && 146 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 147 return snprintf(buf, PAGE_SIZE, "off\n"); 148 149 level = amdgpu_dpm_get_performance_level(adev); 150 return snprintf(buf, PAGE_SIZE, "%s\n", 151 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 152 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 153 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 154 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 155 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 156 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 157 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 158 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 159 "unknown"); 160} 161 162static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 163 struct device_attribute *attr, 164 const char *buf, 165 size_t count) 166{ 167 struct drm_device *ddev = dev_get_drvdata(dev); 168 struct amdgpu_device *adev = ddev->dev_private; 169 enum amd_dpm_forced_level level; 170 enum amd_dpm_forced_level current_level; 171 int ret = 0; 172 173 /* Can't force performance level when the card is off */ 174 if ((adev->flags & AMD_IS_PX) && 175 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 176 return -EINVAL; 177 178 current_level = amdgpu_dpm_get_performance_level(adev); 179 180 if (strncmp("low", buf, strlen("low")) == 0) { 181 level = AMD_DPM_FORCED_LEVEL_LOW; 182 } else if (strncmp("high", buf, strlen("high")) == 0) { 183 level = AMD_DPM_FORCED_LEVEL_HIGH; 184 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 185 level = AMD_DPM_FORCED_LEVEL_AUTO; 186 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 187 level = AMD_DPM_FORCED_LEVEL_MANUAL; 188 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 189 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 190 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 191 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 192 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 193 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 194 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 195 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 196 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 197 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 198 } else { 199 count = -EINVAL; 200 goto fail; 201 } 202 203 if (current_level == level) 204 return count; 205 206 if (adev->pp_enabled) 207 amdgpu_dpm_force_performance_level(adev, level); 208 else { 209 mutex_lock(&adev->pm.mutex); 210 if (adev->pm.dpm.thermal_active) { 211 count = -EINVAL; 212 mutex_unlock(&adev->pm.mutex); 213 goto fail; 214 } 215 ret = amdgpu_dpm_force_performance_level(adev, level); 216 if (ret) 217 count = -EINVAL; 218 else 219 adev->pm.dpm.forced_level = level; 220 mutex_unlock(&adev->pm.mutex); 221 } 222 223fail: 224 return count; 225} 226 227static ssize_t amdgpu_get_pp_num_states(struct device *dev, 228 struct device_attribute *attr, 229 char *buf) 230{ 231 struct drm_device *ddev = dev_get_drvdata(dev); 232 struct amdgpu_device *adev = ddev->dev_private; 233 struct pp_states_info data; 234 int i, buf_len; 235 236 if (adev->pp_enabled) 237 amdgpu_dpm_get_pp_num_states(adev, &data); 238 239 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 240 for (i = 0; i < data.nums; i++) 241 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 242 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 243 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 244 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 245 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 246 247 return buf_len; 248} 249 250static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 251 struct device_attribute *attr, 252 char *buf) 253{ 254 struct drm_device *ddev = dev_get_drvdata(dev); 255 struct amdgpu_device *adev = ddev->dev_private; 256 struct pp_states_info data; 257 enum amd_pm_state_type pm = 0; 258 int i = 0; 259 260 if (adev->pp_enabled) { 261 262 pm = amdgpu_dpm_get_current_power_state(adev); 263 amdgpu_dpm_get_pp_num_states(adev, &data); 264 265 for (i = 0; i < data.nums; i++) { 266 if (pm == data.states[i]) 267 break; 268 } 269 270 if (i == data.nums) 271 i = -EINVAL; 272 } 273 274 return snprintf(buf, PAGE_SIZE, "%d\n", i); 275} 276 277static ssize_t amdgpu_get_pp_force_state(struct device *dev, 278 struct device_attribute *attr, 279 char *buf) 280{ 281 struct drm_device *ddev = dev_get_drvdata(dev); 282 struct amdgpu_device *adev = ddev->dev_private; 283 struct pp_states_info data; 284 enum amd_pm_state_type pm = 0; 285 int i; 286 287 if (adev->pp_force_state_enabled && adev->pp_enabled) { 288 pm = amdgpu_dpm_get_current_power_state(adev); 289 amdgpu_dpm_get_pp_num_states(adev, &data); 290 291 for (i = 0; i < data.nums; i++) { 292 if (pm == data.states[i]) 293 break; 294 } 295 296 if (i == data.nums) 297 i = -EINVAL; 298 299 return snprintf(buf, PAGE_SIZE, "%d\n", i); 300 301 } else 302 return snprintf(buf, PAGE_SIZE, "\n"); 303} 304 305static ssize_t amdgpu_set_pp_force_state(struct device *dev, 306 struct device_attribute *attr, 307 const char *buf, 308 size_t count) 309{ 310 struct drm_device *ddev = dev_get_drvdata(dev); 311 struct amdgpu_device *adev = ddev->dev_private; 312 enum amd_pm_state_type state = 0; 313 unsigned long idx; 314 int ret; 315 316 if (strlen(buf) == 1) 317 adev->pp_force_state_enabled = false; 318 else if (adev->pp_enabled) { 319 struct pp_states_info data; 320 321 ret = kstrtoul(buf, 0, &idx); 322 if (ret || idx >= ARRAY_SIZE(data.states)) { 323 count = -EINVAL; 324 goto fail; 325 } 326 327 amdgpu_dpm_get_pp_num_states(adev, &data); 328 state = data.states[idx]; 329 /* only set user selected power states */ 330 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 331 state != POWER_STATE_TYPE_DEFAULT) { 332 amdgpu_dpm_dispatch_task(adev, 333 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); 334 adev->pp_force_state_enabled = true; 335 } 336 } 337fail: 338 return count; 339} 340 341static ssize_t amdgpu_get_pp_table(struct device *dev, 342 struct device_attribute *attr, 343 char *buf) 344{ 345 struct drm_device *ddev = dev_get_drvdata(dev); 346 struct amdgpu_device *adev = ddev->dev_private; 347 char *table = NULL; 348 int size; 349 350 if (adev->pp_enabled) 351 size = amdgpu_dpm_get_pp_table(adev, &table); 352 else 353 return 0; 354 355 if (size >= PAGE_SIZE) 356 size = PAGE_SIZE - 1; 357 358 memcpy(buf, table, size); 359 360 return size; 361} 362 363static ssize_t amdgpu_set_pp_table(struct device *dev, 364 struct device_attribute *attr, 365 const char *buf, 366 size_t count) 367{ 368 struct drm_device *ddev = dev_get_drvdata(dev); 369 struct amdgpu_device *adev = ddev->dev_private; 370 371 if (adev->pp_enabled) 372 amdgpu_dpm_set_pp_table(adev, buf, count); 373 374 return count; 375} 376 377static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 378 struct device_attribute *attr, 379 char *buf) 380{ 381 struct drm_device *ddev = dev_get_drvdata(dev); 382 struct amdgpu_device *adev = ddev->dev_private; 383 ssize_t size = 0; 384 385 if (adev->pp_enabled) 386 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 387 else if (adev->pm.funcs->print_clock_levels) 388 size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); 389 390 return size; 391} 392 393static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 394 struct device_attribute *attr, 395 const char *buf, 396 size_t count) 397{ 398 struct drm_device *ddev = dev_get_drvdata(dev); 399 struct amdgpu_device *adev = ddev->dev_private; 400 int ret; 401 long level; 402 uint32_t i, mask = 0; 403 char sub_str[2]; 404 405 for (i = 0; i < strlen(buf); i++) { 406 if (*(buf + i) == '\n') 407 continue; 408 sub_str[0] = *(buf + i); 409 sub_str[1] = '\0'; 410 ret = kstrtol(sub_str, 0, &level); 411 412 if (ret) { 413 count = -EINVAL; 414 goto fail; 415 } 416 mask |= 1 << level; 417 } 418 419 if (adev->pp_enabled) 420 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 421 else if (adev->pm.funcs->force_clock_level) 422 adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); 423fail: 424 return count; 425} 426 427static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 428 struct device_attribute *attr, 429 char *buf) 430{ 431 struct drm_device *ddev = dev_get_drvdata(dev); 432 struct amdgpu_device *adev = ddev->dev_private; 433 ssize_t size = 0; 434 435 if (adev->pp_enabled) 436 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 437 else if (adev->pm.funcs->print_clock_levels) 438 size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); 439 440 return size; 441} 442 443static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 444 struct device_attribute *attr, 445 const char *buf, 446 size_t count) 447{ 448 struct drm_device *ddev = dev_get_drvdata(dev); 449 struct amdgpu_device *adev = ddev->dev_private; 450 int ret; 451 long level; 452 uint32_t i, mask = 0; 453 char sub_str[2]; 454 455 for (i = 0; i < strlen(buf); i++) { 456 if (*(buf + i) == '\n') 457 continue; 458 sub_str[0] = *(buf + i); 459 sub_str[1] = '\0'; 460 ret = kstrtol(sub_str, 0, &level); 461 462 if (ret) { 463 count = -EINVAL; 464 goto fail; 465 } 466 mask |= 1 << level; 467 } 468 469 if (adev->pp_enabled) 470 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 471 else if (adev->pm.funcs->force_clock_level) 472 adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); 473fail: 474 return count; 475} 476 477static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 478 struct device_attribute *attr, 479 char *buf) 480{ 481 struct drm_device *ddev = dev_get_drvdata(dev); 482 struct amdgpu_device *adev = ddev->dev_private; 483 ssize_t size = 0; 484 485 if (adev->pp_enabled) 486 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 487 else if (adev->pm.funcs->print_clock_levels) 488 size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); 489 490 return size; 491} 492 493static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 494 struct device_attribute *attr, 495 const char *buf, 496 size_t count) 497{ 498 struct drm_device *ddev = dev_get_drvdata(dev); 499 struct amdgpu_device *adev = ddev->dev_private; 500 int ret; 501 long level; 502 uint32_t i, mask = 0; 503 char sub_str[2]; 504 505 for (i = 0; i < strlen(buf); i++) { 506 if (*(buf + i) == '\n') 507 continue; 508 sub_str[0] = *(buf + i); 509 sub_str[1] = '\0'; 510 ret = kstrtol(sub_str, 0, &level); 511 512 if (ret) { 513 count = -EINVAL; 514 goto fail; 515 } 516 mask |= 1 << level; 517 } 518 519 if (adev->pp_enabled) 520 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 521 else if (adev->pm.funcs->force_clock_level) 522 adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); 523fail: 524 return count; 525} 526 527static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 528 struct device_attribute *attr, 529 char *buf) 530{ 531 struct drm_device *ddev = dev_get_drvdata(dev); 532 struct amdgpu_device *adev = ddev->dev_private; 533 uint32_t value = 0; 534 535 if (adev->pp_enabled) 536 value = amdgpu_dpm_get_sclk_od(adev); 537 else if (adev->pm.funcs->get_sclk_od) 538 value = adev->pm.funcs->get_sclk_od(adev); 539 540 return snprintf(buf, PAGE_SIZE, "%d\n", value); 541} 542 543static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 544 struct device_attribute *attr, 545 const char *buf, 546 size_t count) 547{ 548 struct drm_device *ddev = dev_get_drvdata(dev); 549 struct amdgpu_device *adev = ddev->dev_private; 550 int ret; 551 long int value; 552 553 ret = kstrtol(buf, 0, &value); 554 555 if (ret) { 556 count = -EINVAL; 557 goto fail; 558 } 559 560 if (adev->pp_enabled) { 561 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 562 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); 563 } else if (adev->pm.funcs->set_sclk_od) { 564 adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); 565 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 566 amdgpu_pm_compute_clocks(adev); 567 } 568 569fail: 570 return count; 571} 572 573static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 574 struct device_attribute *attr, 575 char *buf) 576{ 577 struct drm_device *ddev = dev_get_drvdata(dev); 578 struct amdgpu_device *adev = ddev->dev_private; 579 uint32_t value = 0; 580 581 if (adev->pp_enabled) 582 value = amdgpu_dpm_get_mclk_od(adev); 583 else if (adev->pm.funcs->get_mclk_od) 584 value = adev->pm.funcs->get_mclk_od(adev); 585 586 return snprintf(buf, PAGE_SIZE, "%d\n", value); 587} 588 589static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 590 struct device_attribute *attr, 591 const char *buf, 592 size_t count) 593{ 594 struct drm_device *ddev = dev_get_drvdata(dev); 595 struct amdgpu_device *adev = ddev->dev_private; 596 int ret; 597 long int value; 598 599 ret = kstrtol(buf, 0, &value); 600 601 if (ret) { 602 count = -EINVAL; 603 goto fail; 604 } 605 606 if (adev->pp_enabled) { 607 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 608 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); 609 } else if (adev->pm.funcs->set_mclk_od) { 610 adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); 611 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 612 amdgpu_pm_compute_clocks(adev); 613 } 614 615fail: 616 return count; 617} 618 619static ssize_t amdgpu_get_pp_power_profile(struct device *dev, 620 char *buf, struct amd_pp_profile *query) 621{ 622 struct drm_device *ddev = dev_get_drvdata(dev); 623 struct amdgpu_device *adev = ddev->dev_private; 624 int ret = 0; 625 626 if (adev->pp_enabled) 627 ret = amdgpu_dpm_get_power_profile_state( 628 adev, query); 629 else if (adev->pm.funcs->get_power_profile_state) 630 ret = adev->pm.funcs->get_power_profile_state( 631 adev, query); 632 633 if (ret) 634 return ret; 635 636 return snprintf(buf, PAGE_SIZE, 637 "%d %d %d %d %d\n", 638 query->min_sclk / 100, 639 query->min_mclk / 100, 640 query->activity_threshold, 641 query->up_hyst, 642 query->down_hyst); 643} 644 645static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev, 646 struct device_attribute *attr, 647 char *buf) 648{ 649 struct amd_pp_profile query = {0}; 650 651 query.type = AMD_PP_GFX_PROFILE; 652 653 return amdgpu_get_pp_power_profile(dev, buf, &query); 654} 655 656static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev, 657 struct device_attribute *attr, 658 char *buf) 659{ 660 struct amd_pp_profile query = {0}; 661 662 query.type = AMD_PP_COMPUTE_PROFILE; 663 664 return amdgpu_get_pp_power_profile(dev, buf, &query); 665} 666 667static ssize_t amdgpu_set_pp_power_profile(struct device *dev, 668 const char *buf, 669 size_t count, 670 struct amd_pp_profile *request) 671{ 672 struct drm_device *ddev = dev_get_drvdata(dev); 673 struct amdgpu_device *adev = ddev->dev_private; 674 uint32_t loop = 0; 675 char *sub_str, buf_cpy[128], *tmp_str; 676 const char delimiter[3] = {' ', '\n', '\0'}; 677 long int value; 678 int ret = 0; 679 680 if (strncmp("reset", buf, strlen("reset")) == 0) { 681 if (adev->pp_enabled) 682 ret = amdgpu_dpm_reset_power_profile_state( 683 adev, request); 684 else if (adev->pm.funcs->reset_power_profile_state) 685 ret = adev->pm.funcs->reset_power_profile_state( 686 adev, request); 687 if (ret) { 688 count = -EINVAL; 689 goto fail; 690 } 691 return count; 692 } 693 694 if (strncmp("set", buf, strlen("set")) == 0) { 695 if (adev->pp_enabled) 696 ret = amdgpu_dpm_set_power_profile_state( 697 adev, request); 698 else if (adev->pm.funcs->set_power_profile_state) 699 ret = adev->pm.funcs->set_power_profile_state( 700 adev, request); 701 if (ret) { 702 count = -EINVAL; 703 goto fail; 704 } 705 return count; 706 } 707 708 if (count + 1 >= 128) { 709 count = -EINVAL; 710 goto fail; 711 } 712 713 memcpy(buf_cpy, buf, count + 1); 714 tmp_str = buf_cpy; 715 716 while (tmp_str[0]) { 717 sub_str = strsep(&tmp_str, delimiter); 718 ret = kstrtol(sub_str, 0, &value); 719 if (ret) { 720 count = -EINVAL; 721 goto fail; 722 } 723 724 switch (loop) { 725 case 0: 726 /* input unit MHz convert to dpm table unit 10KHz*/ 727 request->min_sclk = (uint32_t)value * 100; 728 break; 729 case 1: 730 /* input unit MHz convert to dpm table unit 10KHz*/ 731 request->min_mclk = (uint32_t)value * 100; 732 break; 733 case 2: 734 request->activity_threshold = (uint16_t)value; 735 break; 736 case 3: 737 request->up_hyst = (uint8_t)value; 738 break; 739 case 4: 740 request->down_hyst = (uint8_t)value; 741 break; 742 default: 743 break; 744 } 745 746 loop++; 747 } 748 749 if (adev->pp_enabled) 750 ret = amdgpu_dpm_set_power_profile_state( 751 adev, request); 752 else if (adev->pm.funcs->set_power_profile_state) 753 ret = adev->pm.funcs->set_power_profile_state( 754 adev, request); 755 756 if (ret) 757 count = -EINVAL; 758 759fail: 760 return count; 761} 762 763static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev, 764 struct device_attribute *attr, 765 const char *buf, 766 size_t count) 767{ 768 struct amd_pp_profile request = {0}; 769 770 request.type = AMD_PP_GFX_PROFILE; 771 772 return amdgpu_set_pp_power_profile(dev, buf, count, &request); 773} 774 775static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev, 776 struct device_attribute *attr, 777 const char *buf, 778 size_t count) 779{ 780 struct amd_pp_profile request = {0}; 781 782 request.type = AMD_PP_COMPUTE_PROFILE; 783 784 return amdgpu_set_pp_power_profile(dev, buf, count, &request); 785} 786 787static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 788static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 789 amdgpu_get_dpm_forced_performance_level, 790 amdgpu_set_dpm_forced_performance_level); 791static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 792static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 793static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 794 amdgpu_get_pp_force_state, 795 amdgpu_set_pp_force_state); 796static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 797 amdgpu_get_pp_table, 798 amdgpu_set_pp_table); 799static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 800 amdgpu_get_pp_dpm_sclk, 801 amdgpu_set_pp_dpm_sclk); 802static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 803 amdgpu_get_pp_dpm_mclk, 804 amdgpu_set_pp_dpm_mclk); 805static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 806 amdgpu_get_pp_dpm_pcie, 807 amdgpu_set_pp_dpm_pcie); 808static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 809 amdgpu_get_pp_sclk_od, 810 amdgpu_set_pp_sclk_od); 811static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 812 amdgpu_get_pp_mclk_od, 813 amdgpu_set_pp_mclk_od); 814static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR, 815 amdgpu_get_pp_gfx_power_profile, 816 amdgpu_set_pp_gfx_power_profile); 817static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR, 818 amdgpu_get_pp_compute_power_profile, 819 amdgpu_set_pp_compute_power_profile); 820 821static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 822 struct device_attribute *attr, 823 char *buf) 824{ 825 struct amdgpu_device *adev = dev_get_drvdata(dev); 826 struct drm_device *ddev = adev->ddev; 827 int temp; 828 829 /* Can't get temperature when the card is off */ 830 if ((adev->flags & AMD_IS_PX) && 831 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 832 return -EINVAL; 833 834 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 835 temp = 0; 836 else 837 temp = amdgpu_dpm_get_temperature(adev); 838 839 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 840} 841 842static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 843 struct device_attribute *attr, 844 char *buf) 845{ 846 struct amdgpu_device *adev = dev_get_drvdata(dev); 847 int hyst = to_sensor_dev_attr(attr)->index; 848 int temp; 849 850 if (hyst) 851 temp = adev->pm.dpm.thermal.min_temp; 852 else 853 temp = adev->pm.dpm.thermal.max_temp; 854 855 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 856} 857 858static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 859 struct device_attribute *attr, 860 char *buf) 861{ 862 struct amdgpu_device *adev = dev_get_drvdata(dev); 863 u32 pwm_mode = 0; 864 865 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode) 866 return -EINVAL; 867 868 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 869 870 return sprintf(buf, "%i\n", pwm_mode); 871} 872 873static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 874 struct device_attribute *attr, 875 const char *buf, 876 size_t count) 877{ 878 struct amdgpu_device *adev = dev_get_drvdata(dev); 879 int err; 880 int value; 881 882 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode) 883 return -EINVAL; 884 885 err = kstrtoint(buf, 10, &value); 886 if (err) 887 return err; 888 889 amdgpu_dpm_set_fan_control_mode(adev, value); 890 891 return count; 892} 893 894static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 895 struct device_attribute *attr, 896 char *buf) 897{ 898 return sprintf(buf, "%i\n", 0); 899} 900 901static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 902 struct device_attribute *attr, 903 char *buf) 904{ 905 return sprintf(buf, "%i\n", 255); 906} 907 908static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 909 struct device_attribute *attr, 910 const char *buf, size_t count) 911{ 912 struct amdgpu_device *adev = dev_get_drvdata(dev); 913 int err; 914 u32 value; 915 916 err = kstrtou32(buf, 10, &value); 917 if (err) 918 return err; 919 920 value = (value * 100) / 255; 921 922 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 923 if (err) 924 return err; 925 926 return count; 927} 928 929static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 930 struct device_attribute *attr, 931 char *buf) 932{ 933 struct amdgpu_device *adev = dev_get_drvdata(dev); 934 int err; 935 u32 speed; 936 937 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 938 if (err) 939 return err; 940 941 speed = (speed * 255) / 100; 942 943 return sprintf(buf, "%i\n", speed); 944} 945 946static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 947 struct device_attribute *attr, 948 char *buf) 949{ 950 struct amdgpu_device *adev = dev_get_drvdata(dev); 951 int err; 952 u32 speed; 953 954 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 955 if (err) 956 return err; 957 958 return sprintf(buf, "%i\n", speed); 959} 960 961static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); 962static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 963static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 964static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 965static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 966static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 967static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 968static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 969 970static struct attribute *hwmon_attributes[] = { 971 &sensor_dev_attr_temp1_input.dev_attr.attr, 972 &sensor_dev_attr_temp1_crit.dev_attr.attr, 973 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 974 &sensor_dev_attr_pwm1.dev_attr.attr, 975 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 976 &sensor_dev_attr_pwm1_min.dev_attr.attr, 977 &sensor_dev_attr_pwm1_max.dev_attr.attr, 978 &sensor_dev_attr_fan1_input.dev_attr.attr, 979 NULL 980}; 981 982static umode_t hwmon_attributes_visible(struct kobject *kobj, 983 struct attribute *attr, int index) 984{ 985 struct device *dev = kobj_to_dev(kobj); 986 struct amdgpu_device *adev = dev_get_drvdata(dev); 987 umode_t effective_mode = attr->mode; 988 989 /* Skip limit attributes if DPM is not enabled */ 990 if (!adev->pm.dpm_enabled && 991 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 992 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 993 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 994 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 995 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 996 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 997 return 0; 998 999 if (adev->pp_enabled) 1000 return effective_mode; 1001 1002 /* Skip fan attributes if fan is not present */ 1003 if (adev->pm.no_fan && 1004 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 1005 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 1006 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 1007 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1008 return 0; 1009 1010 /* mask fan attributes if we have no bindings for this asic to expose */ 1011 if ((!adev->pm.funcs->get_fan_speed_percent && 1012 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 1013 (!adev->pm.funcs->get_fan_control_mode && 1014 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 1015 effective_mode &= ~S_IRUGO; 1016 1017 if ((!adev->pm.funcs->set_fan_speed_percent && 1018 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 1019 (!adev->pm.funcs->set_fan_control_mode && 1020 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 1021 effective_mode &= ~S_IWUSR; 1022 1023 /* hide max/min values if we can't both query and manage the fan */ 1024 if ((!adev->pm.funcs->set_fan_speed_percent && 1025 !adev->pm.funcs->get_fan_speed_percent) && 1026 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 1027 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 1028 return 0; 1029 1030 /* requires powerplay */ 1031 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr) 1032 return 0; 1033 1034 return effective_mode; 1035} 1036 1037static const struct attribute_group hwmon_attrgroup = { 1038 .attrs = hwmon_attributes, 1039 .is_visible = hwmon_attributes_visible, 1040}; 1041 1042static const struct attribute_group *hwmon_groups[] = { 1043 &hwmon_attrgroup, 1044 NULL 1045}; 1046 1047void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1048{ 1049 struct amdgpu_device *adev = 1050 container_of(work, struct amdgpu_device, 1051 pm.dpm.thermal.work); 1052 /* switch to the thermal state */ 1053 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1054 1055 if (!adev->pm.dpm_enabled) 1056 return; 1057 1058 if (adev->pm.funcs->get_temperature) { 1059 int temp = amdgpu_dpm_get_temperature(adev); 1060 1061 if (temp < adev->pm.dpm.thermal.min_temp) 1062 /* switch back the user state */ 1063 dpm_state = adev->pm.dpm.user_state; 1064 } else { 1065 if (adev->pm.dpm.thermal.high_to_low) 1066 /* switch back the user state */ 1067 dpm_state = adev->pm.dpm.user_state; 1068 } 1069 mutex_lock(&adev->pm.mutex); 1070 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1071 adev->pm.dpm.thermal_active = true; 1072 else 1073 adev->pm.dpm.thermal_active = false; 1074 adev->pm.dpm.state = dpm_state; 1075 mutex_unlock(&adev->pm.mutex); 1076 1077 amdgpu_pm_compute_clocks(adev); 1078} 1079 1080static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1081 enum amd_pm_state_type dpm_state) 1082{ 1083 int i; 1084 struct amdgpu_ps *ps; 1085 u32 ui_class; 1086 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1087 true : false; 1088 1089 /* check if the vblank period is too short to adjust the mclk */ 1090 if (single_display && adev->pm.funcs->vblank_too_short) { 1091 if (amdgpu_dpm_vblank_too_short(adev)) 1092 single_display = false; 1093 } 1094 1095 /* certain older asics have a separare 3D performance state, 1096 * so try that first if the user selected performance 1097 */ 1098 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1099 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1100 /* balanced states don't exist at the moment */ 1101 if (dpm_state == POWER_STATE_TYPE_BALANCED) 1102 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1103 1104restart_search: 1105 /* Pick the best power state based on current conditions */ 1106 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1107 ps = &adev->pm.dpm.ps[i]; 1108 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1109 switch (dpm_state) { 1110 /* user states */ 1111 case POWER_STATE_TYPE_BATTERY: 1112 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1113 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1114 if (single_display) 1115 return ps; 1116 } else 1117 return ps; 1118 } 1119 break; 1120 case POWER_STATE_TYPE_BALANCED: 1121 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1122 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1123 if (single_display) 1124 return ps; 1125 } else 1126 return ps; 1127 } 1128 break; 1129 case POWER_STATE_TYPE_PERFORMANCE: 1130 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1131 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1132 if (single_display) 1133 return ps; 1134 } else 1135 return ps; 1136 } 1137 break; 1138 /* internal states */ 1139 case POWER_STATE_TYPE_INTERNAL_UVD: 1140 if (adev->pm.dpm.uvd_ps) 1141 return adev->pm.dpm.uvd_ps; 1142 else 1143 break; 1144 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1145 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1146 return ps; 1147 break; 1148 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1149 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1150 return ps; 1151 break; 1152 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1153 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1154 return ps; 1155 break; 1156 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1157 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1158 return ps; 1159 break; 1160 case POWER_STATE_TYPE_INTERNAL_BOOT: 1161 return adev->pm.dpm.boot_ps; 1162 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1163 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1164 return ps; 1165 break; 1166 case POWER_STATE_TYPE_INTERNAL_ACPI: 1167 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1168 return ps; 1169 break; 1170 case POWER_STATE_TYPE_INTERNAL_ULV: 1171 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1172 return ps; 1173 break; 1174 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1175 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1176 return ps; 1177 break; 1178 default: 1179 break; 1180 } 1181 } 1182 /* use a fallback state if we didn't match */ 1183 switch (dpm_state) { 1184 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1185 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1186 goto restart_search; 1187 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1188 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1189 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1190 if (adev->pm.dpm.uvd_ps) { 1191 return adev->pm.dpm.uvd_ps; 1192 } else { 1193 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1194 goto restart_search; 1195 } 1196 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1197 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1198 goto restart_search; 1199 case POWER_STATE_TYPE_INTERNAL_ACPI: 1200 dpm_state = POWER_STATE_TYPE_BATTERY; 1201 goto restart_search; 1202 case POWER_STATE_TYPE_BATTERY: 1203 case POWER_STATE_TYPE_BALANCED: 1204 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1205 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1206 goto restart_search; 1207 default: 1208 break; 1209 } 1210 1211 return NULL; 1212} 1213 1214static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1215{ 1216 struct amdgpu_ps *ps; 1217 enum amd_pm_state_type dpm_state; 1218 int ret; 1219 bool equal; 1220 1221 /* if dpm init failed */ 1222 if (!adev->pm.dpm_enabled) 1223 return; 1224 1225 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1226 /* add other state override checks here */ 1227 if ((!adev->pm.dpm.thermal_active) && 1228 (!adev->pm.dpm.uvd_active)) 1229 adev->pm.dpm.state = adev->pm.dpm.user_state; 1230 } 1231 dpm_state = adev->pm.dpm.state; 1232 1233 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1234 if (ps) 1235 adev->pm.dpm.requested_ps = ps; 1236 else 1237 return; 1238 1239 if (amdgpu_dpm == 1) { 1240 printk("switching from power state:\n"); 1241 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1242 printk("switching to power state:\n"); 1243 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1244 } 1245 1246 /* update whether vce is active */ 1247 ps->vce_active = adev->pm.dpm.vce_active; 1248 1249 amdgpu_dpm_display_configuration_changed(adev); 1250 1251 ret = amdgpu_dpm_pre_set_power_state(adev); 1252 if (ret) 1253 return; 1254 1255 if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) 1256 equal = false; 1257 1258 if (equal) 1259 return; 1260 1261 amdgpu_dpm_set_power_state(adev); 1262 amdgpu_dpm_post_set_power_state(adev); 1263 1264 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1265 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1266 1267 if (adev->pm.funcs->force_performance_level) { 1268 if (adev->pm.dpm.thermal_active) { 1269 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1270 /* force low perf level for thermal */ 1271 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1272 /* save the user's level */ 1273 adev->pm.dpm.forced_level = level; 1274 } else { 1275 /* otherwise, user selected level */ 1276 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1277 } 1278 } 1279} 1280 1281void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1282{ 1283 if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { 1284 /* enable/disable UVD */ 1285 mutex_lock(&adev->pm.mutex); 1286 amdgpu_dpm_powergate_uvd(adev, !enable); 1287 mutex_unlock(&adev->pm.mutex); 1288 } else { 1289 if (enable) { 1290 mutex_lock(&adev->pm.mutex); 1291 adev->pm.dpm.uvd_active = true; 1292 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1293 mutex_unlock(&adev->pm.mutex); 1294 } else { 1295 mutex_lock(&adev->pm.mutex); 1296 adev->pm.dpm.uvd_active = false; 1297 mutex_unlock(&adev->pm.mutex); 1298 } 1299 amdgpu_pm_compute_clocks(adev); 1300 } 1301} 1302 1303void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1304{ 1305 if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { 1306 /* enable/disable VCE */ 1307 mutex_lock(&adev->pm.mutex); 1308 amdgpu_dpm_powergate_vce(adev, !enable); 1309 mutex_unlock(&adev->pm.mutex); 1310 } else { 1311 if (enable) { 1312 mutex_lock(&adev->pm.mutex); 1313 adev->pm.dpm.vce_active = true; 1314 /* XXX select vce level based on ring/task */ 1315 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1316 mutex_unlock(&adev->pm.mutex); 1317 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1318 AMD_CG_STATE_UNGATE); 1319 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1320 AMD_PG_STATE_UNGATE); 1321 amdgpu_pm_compute_clocks(adev); 1322 } else { 1323 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1324 AMD_PG_STATE_GATE); 1325 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, 1326 AMD_CG_STATE_GATE); 1327 mutex_lock(&adev->pm.mutex); 1328 adev->pm.dpm.vce_active = false; 1329 mutex_unlock(&adev->pm.mutex); 1330 amdgpu_pm_compute_clocks(adev); 1331 } 1332 1333 } 1334} 1335 1336void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1337{ 1338 int i; 1339 1340 if (adev->pp_enabled) 1341 /* TO DO */ 1342 return; 1343 1344 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1345 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1346 1347} 1348 1349int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 1350{ 1351 int ret; 1352 1353 if (adev->pm.sysfs_initialized) 1354 return 0; 1355 1356 if (!adev->pp_enabled) { 1357 if (adev->pm.funcs->get_temperature == NULL) 1358 return 0; 1359 } 1360 1361 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 1362 DRIVER_NAME, adev, 1363 hwmon_groups); 1364 if (IS_ERR(adev->pm.int_hwmon_dev)) { 1365 ret = PTR_ERR(adev->pm.int_hwmon_dev); 1366 dev_err(adev->dev, 1367 "Unable to register hwmon device: %d\n", ret); 1368 return ret; 1369 } 1370 1371 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 1372 if (ret) { 1373 DRM_ERROR("failed to create device file for dpm state\n"); 1374 return ret; 1375 } 1376 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1377 if (ret) { 1378 DRM_ERROR("failed to create device file for dpm state\n"); 1379 return ret; 1380 } 1381 1382 if (adev->pp_enabled) { 1383 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 1384 if (ret) { 1385 DRM_ERROR("failed to create device file pp_num_states\n"); 1386 return ret; 1387 } 1388 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 1389 if (ret) { 1390 DRM_ERROR("failed to create device file pp_cur_state\n"); 1391 return ret; 1392 } 1393 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 1394 if (ret) { 1395 DRM_ERROR("failed to create device file pp_force_state\n"); 1396 return ret; 1397 } 1398 ret = device_create_file(adev->dev, &dev_attr_pp_table); 1399 if (ret) { 1400 DRM_ERROR("failed to create device file pp_table\n"); 1401 return ret; 1402 } 1403 } 1404 1405 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 1406 if (ret) { 1407 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 1408 return ret; 1409 } 1410 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 1411 if (ret) { 1412 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 1413 return ret; 1414 } 1415 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 1416 if (ret) { 1417 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 1418 return ret; 1419 } 1420 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 1421 if (ret) { 1422 DRM_ERROR("failed to create device file pp_sclk_od\n"); 1423 return ret; 1424 } 1425 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 1426 if (ret) { 1427 DRM_ERROR("failed to create device file pp_mclk_od\n"); 1428 return ret; 1429 } 1430 ret = device_create_file(adev->dev, 1431 &dev_attr_pp_gfx_power_profile); 1432 if (ret) { 1433 DRM_ERROR("failed to create device file " 1434 "pp_gfx_power_profile\n"); 1435 return ret; 1436 } 1437 ret = device_create_file(adev->dev, 1438 &dev_attr_pp_compute_power_profile); 1439 if (ret) { 1440 DRM_ERROR("failed to create device file " 1441 "pp_compute_power_profile\n"); 1442 return ret; 1443 } 1444 1445 ret = amdgpu_debugfs_pm_init(adev); 1446 if (ret) { 1447 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1448 return ret; 1449 } 1450 1451 adev->pm.sysfs_initialized = true; 1452 1453 return 0; 1454} 1455 1456void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 1457{ 1458 if (adev->pm.int_hwmon_dev) 1459 hwmon_device_unregister(adev->pm.int_hwmon_dev); 1460 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 1461 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1462 if (adev->pp_enabled) { 1463 device_remove_file(adev->dev, &dev_attr_pp_num_states); 1464 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 1465 device_remove_file(adev->dev, &dev_attr_pp_force_state); 1466 device_remove_file(adev->dev, &dev_attr_pp_table); 1467 } 1468 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 1469 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 1470 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 1471 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 1472 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 1473 device_remove_file(adev->dev, 1474 &dev_attr_pp_gfx_power_profile); 1475 device_remove_file(adev->dev, 1476 &dev_attr_pp_compute_power_profile); 1477} 1478 1479void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1480{ 1481 struct drm_device *ddev = adev->ddev; 1482 struct drm_crtc *crtc; 1483 struct amdgpu_crtc *amdgpu_crtc; 1484 int i = 0; 1485 1486 if (!adev->pm.dpm_enabled) 1487 return; 1488 1489 if (adev->mode_info.num_crtc) 1490 amdgpu_display_bandwidth_update(adev); 1491 1492 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1493 struct amdgpu_ring *ring = adev->rings[i]; 1494 if (ring && ring->ready) 1495 amdgpu_fence_wait_empty(ring); 1496 } 1497 1498 if (adev->pp_enabled) { 1499 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); 1500 } else { 1501 mutex_lock(&adev->pm.mutex); 1502 adev->pm.dpm.new_active_crtcs = 0; 1503 adev->pm.dpm.new_active_crtc_count = 0; 1504 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 1505 list_for_each_entry(crtc, 1506 &ddev->mode_config.crtc_list, head) { 1507 amdgpu_crtc = to_amdgpu_crtc(crtc); 1508 if (crtc->enabled) { 1509 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 1510 adev->pm.dpm.new_active_crtc_count++; 1511 } 1512 } 1513 } 1514 /* update battery/ac status */ 1515 if (power_supply_is_system_supplied() > 0) 1516 adev->pm.dpm.ac_power = true; 1517 else 1518 adev->pm.dpm.ac_power = false; 1519 1520 amdgpu_dpm_change_power_state_locked(adev); 1521 1522 mutex_unlock(&adev->pm.mutex); 1523 } 1524} 1525 1526/* 1527 * Debugfs info 1528 */ 1529#if defined(CONFIG_DEBUG_FS) 1530 1531static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 1532{ 1533 uint32_t value; 1534 struct pp_gpu_power query = {0}; 1535 int size; 1536 1537 /* sanity check PP is enabled */ 1538 if (!(adev->powerplay.pp_funcs && 1539 adev->powerplay.pp_funcs->read_sensor)) 1540 return -EINVAL; 1541 1542 /* GPU Clocks */ 1543 size = sizeof(value); 1544 seq_printf(m, "GFX Clocks and Power:\n"); 1545 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 1546 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 1547 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 1548 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 1549 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 1550 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 1551 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 1552 seq_printf(m, "\t%u mV (VDDNB)\n", value); 1553 size = sizeof(query); 1554 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) { 1555 seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8, 1556 query.vddc_power & 0xff); 1557 seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8, 1558 query.vddci_power & 0xff); 1559 seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8, 1560 query.max_gpu_power & 0xff); 1561 seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8, 1562 query.average_gpu_power & 0xff); 1563 } 1564 size = sizeof(value); 1565 seq_printf(m, "\n"); 1566 1567 /* GPU Temp */ 1568 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 1569 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 1570 1571 /* GPU Load */ 1572 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 1573 seq_printf(m, "GPU Load: %u %%\n", value); 1574 seq_printf(m, "\n"); 1575 1576 /* UVD clocks */ 1577 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 1578 if (!value) { 1579 seq_printf(m, "UVD: Disabled\n"); 1580 } else { 1581 seq_printf(m, "UVD: Enabled\n"); 1582 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 1583 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 1584 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 1585 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 1586 } 1587 } 1588 seq_printf(m, "\n"); 1589 1590 /* VCE clocks */ 1591 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 1592 if (!value) { 1593 seq_printf(m, "VCE: Disabled\n"); 1594 } else { 1595 seq_printf(m, "VCE: Enabled\n"); 1596 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 1597 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 1598 } 1599 } 1600 1601 return 0; 1602} 1603 1604static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 1605{ 1606 int i; 1607 1608 for (i = 0; clocks[i].flag; i++) 1609 seq_printf(m, "\t%s: %s\n", clocks[i].name, 1610 (flags & clocks[i].flag) ? "On" : "Off"); 1611} 1612 1613static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 1614{ 1615 struct drm_info_node *node = (struct drm_info_node *) m->private; 1616 struct drm_device *dev = node->minor->dev; 1617 struct amdgpu_device *adev = dev->dev_private; 1618 struct drm_device *ddev = adev->ddev; 1619 u32 flags = 0; 1620 1621 amdgpu_get_clockgating_state(adev, &flags); 1622 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 1623 amdgpu_parse_cg_state(m, flags); 1624 seq_printf(m, "\n"); 1625 1626 if (!adev->pm.dpm_enabled) { 1627 seq_printf(m, "dpm not enabled\n"); 1628 return 0; 1629 } 1630 if ((adev->flags & AMD_IS_PX) && 1631 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1632 seq_printf(m, "PX asic powered off\n"); 1633 } else if (adev->pp_enabled) { 1634 return amdgpu_debugfs_pm_info_pp(m, adev); 1635 } else { 1636 mutex_lock(&adev->pm.mutex); 1637 if (adev->pm.funcs->debugfs_print_current_performance_level) 1638 adev->pm.funcs->debugfs_print_current_performance_level(adev, m); 1639 else 1640 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1641 mutex_unlock(&adev->pm.mutex); 1642 } 1643 1644 return 0; 1645} 1646 1647static const struct drm_info_list amdgpu_pm_info_list[] = { 1648 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 1649}; 1650#endif 1651 1652static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 1653{ 1654#if defined(CONFIG_DEBUG_FS) 1655 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 1656#else 1657 return 0; 1658#endif 1659}