at v4.10-rc3 1394 lines 39 kB view raw
1/* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 */ 23#include <drm/drmP.h> 24#include "amdgpu.h" 25#include "amdgpu_drv.h" 26#include "amdgpu_pm.h" 27#include "amdgpu_dpm.h" 28#include "atom.h" 29#include <linux/power_supply.h> 30#include <linux/hwmon.h> 31#include <linux/hwmon-sysfs.h> 32 33#include "amd_powerplay.h" 34 35static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 36 37void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 38{ 39 if (adev->pp_enabled) 40 /* TODO */ 41 return; 42 43 if (adev->pm.dpm_enabled) { 44 mutex_lock(&adev->pm.mutex); 45 if (power_supply_is_system_supplied() > 0) 46 adev->pm.dpm.ac_power = true; 47 else 48 adev->pm.dpm.ac_power = false; 49 if (adev->pm.funcs->enable_bapm) 50 amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power); 51 mutex_unlock(&adev->pm.mutex); 52 } 53} 54 55static ssize_t amdgpu_get_dpm_state(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58{ 59 struct drm_device *ddev = dev_get_drvdata(dev); 60 struct amdgpu_device *adev = ddev->dev_private; 61 enum amd_pm_state_type pm; 62 63 if (adev->pp_enabled) { 64 pm = amdgpu_dpm_get_current_power_state(adev); 65 } else 66 pm = adev->pm.dpm.user_state; 67 68 return snprintf(buf, PAGE_SIZE, "%s\n", 69 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 70 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 71} 72 73static ssize_t amdgpu_set_dpm_state(struct device *dev, 74 struct device_attribute *attr, 75 const char *buf, 76 size_t count) 77{ 78 struct drm_device *ddev = dev_get_drvdata(dev); 79 struct amdgpu_device *adev = ddev->dev_private; 80 enum amd_pm_state_type state; 81 82 if (strncmp("battery", buf, strlen("battery")) == 0) 83 state = POWER_STATE_TYPE_BATTERY; 84 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 85 state = POWER_STATE_TYPE_BALANCED; 86 else if (strncmp("performance", buf, strlen("performance")) == 0) 87 state = POWER_STATE_TYPE_PERFORMANCE; 88 else { 89 count = -EINVAL; 90 goto fail; 91 } 92 93 if (adev->pp_enabled) { 94 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); 95 } else { 96 mutex_lock(&adev->pm.mutex); 97 adev->pm.dpm.user_state = state; 98 mutex_unlock(&adev->pm.mutex); 99 100 /* Can't set dpm state when the card is off */ 101 if (!(adev->flags & AMD_IS_PX) || 102 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 103 amdgpu_pm_compute_clocks(adev); 104 } 105fail: 106 return count; 107} 108 109static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 110 struct device_attribute *attr, 111 char *buf) 112{ 113 struct drm_device *ddev = dev_get_drvdata(dev); 114 struct amdgpu_device *adev = ddev->dev_private; 115 116 if ((adev->flags & AMD_IS_PX) && 117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 118 return snprintf(buf, PAGE_SIZE, "off\n"); 119 120 if (adev->pp_enabled) { 121 enum amd_dpm_forced_level level; 122 123 level = amdgpu_dpm_get_performance_level(adev); 124 return snprintf(buf, PAGE_SIZE, "%s\n", 125 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 126 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 127 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 128 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : "unknown"); 129 } else { 130 enum amdgpu_dpm_forced_level level; 131 132 level = adev->pm.dpm.forced_level; 133 return snprintf(buf, PAGE_SIZE, "%s\n", 134 (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : 135 (level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 136 } 137} 138 139static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 140 struct device_attribute *attr, 141 const char *buf, 142 size_t count) 143{ 144 struct drm_device *ddev = dev_get_drvdata(dev); 145 struct amdgpu_device *adev = ddev->dev_private; 146 enum amdgpu_dpm_forced_level level; 147 int ret = 0; 148 149 /* Can't force performance level when the card is off */ 150 if ((adev->flags & AMD_IS_PX) && 151 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 152 return -EINVAL; 153 154 if (strncmp("low", buf, strlen("low")) == 0) { 155 level = AMDGPU_DPM_FORCED_LEVEL_LOW; 156 } else if (strncmp("high", buf, strlen("high")) == 0) { 157 level = AMDGPU_DPM_FORCED_LEVEL_HIGH; 158 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 159 level = AMDGPU_DPM_FORCED_LEVEL_AUTO; 160 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 161 level = AMDGPU_DPM_FORCED_LEVEL_MANUAL; 162 } else { 163 count = -EINVAL; 164 goto fail; 165 } 166 167 if (adev->pp_enabled) 168 amdgpu_dpm_force_performance_level(adev, level); 169 else { 170 mutex_lock(&adev->pm.mutex); 171 if (adev->pm.dpm.thermal_active) { 172 count = -EINVAL; 173 mutex_unlock(&adev->pm.mutex); 174 goto fail; 175 } 176 ret = amdgpu_dpm_force_performance_level(adev, level); 177 if (ret) 178 count = -EINVAL; 179 else 180 adev->pm.dpm.forced_level = level; 181 mutex_unlock(&adev->pm.mutex); 182 } 183fail: 184 return count; 185} 186 187static ssize_t amdgpu_get_pp_num_states(struct device *dev, 188 struct device_attribute *attr, 189 char *buf) 190{ 191 struct drm_device *ddev = dev_get_drvdata(dev); 192 struct amdgpu_device *adev = ddev->dev_private; 193 struct pp_states_info data; 194 int i, buf_len; 195 196 if (adev->pp_enabled) 197 amdgpu_dpm_get_pp_num_states(adev, &data); 198 199 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 200 for (i = 0; i < data.nums; i++) 201 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 202 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 203 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 204 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 205 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 206 207 return buf_len; 208} 209 210static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 211 struct device_attribute *attr, 212 char *buf) 213{ 214 struct drm_device *ddev = dev_get_drvdata(dev); 215 struct amdgpu_device *adev = ddev->dev_private; 216 struct pp_states_info data; 217 enum amd_pm_state_type pm = 0; 218 int i = 0; 219 220 if (adev->pp_enabled) { 221 222 pm = amdgpu_dpm_get_current_power_state(adev); 223 amdgpu_dpm_get_pp_num_states(adev, &data); 224 225 for (i = 0; i < data.nums; i++) { 226 if (pm == data.states[i]) 227 break; 228 } 229 230 if (i == data.nums) 231 i = -EINVAL; 232 } 233 234 return snprintf(buf, PAGE_SIZE, "%d\n", i); 235} 236 237static ssize_t amdgpu_get_pp_force_state(struct device *dev, 238 struct device_attribute *attr, 239 char *buf) 240{ 241 struct drm_device *ddev = dev_get_drvdata(dev); 242 struct amdgpu_device *adev = ddev->dev_private; 243 struct pp_states_info data; 244 enum amd_pm_state_type pm = 0; 245 int i; 246 247 if (adev->pp_force_state_enabled && adev->pp_enabled) { 248 pm = amdgpu_dpm_get_current_power_state(adev); 249 amdgpu_dpm_get_pp_num_states(adev, &data); 250 251 for (i = 0; i < data.nums; i++) { 252 if (pm == data.states[i]) 253 break; 254 } 255 256 if (i == data.nums) 257 i = -EINVAL; 258 259 return snprintf(buf, PAGE_SIZE, "%d\n", i); 260 261 } else 262 return snprintf(buf, PAGE_SIZE, "\n"); 263} 264 265static ssize_t amdgpu_set_pp_force_state(struct device *dev, 266 struct device_attribute *attr, 267 const char *buf, 268 size_t count) 269{ 270 struct drm_device *ddev = dev_get_drvdata(dev); 271 struct amdgpu_device *adev = ddev->dev_private; 272 enum amd_pm_state_type state = 0; 273 unsigned long idx; 274 int ret; 275 276 if (strlen(buf) == 1) 277 adev->pp_force_state_enabled = false; 278 else if (adev->pp_enabled) { 279 struct pp_states_info data; 280 281 ret = kstrtoul(buf, 0, &idx); 282 if (ret || idx >= ARRAY_SIZE(data.states)) { 283 count = -EINVAL; 284 goto fail; 285 } 286 287 amdgpu_dpm_get_pp_num_states(adev, &data); 288 state = data.states[idx]; 289 /* only set user selected power states */ 290 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 291 state != POWER_STATE_TYPE_DEFAULT) { 292 amdgpu_dpm_dispatch_task(adev, 293 AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); 294 adev->pp_force_state_enabled = true; 295 } 296 } 297fail: 298 return count; 299} 300 301static ssize_t amdgpu_get_pp_table(struct device *dev, 302 struct device_attribute *attr, 303 char *buf) 304{ 305 struct drm_device *ddev = dev_get_drvdata(dev); 306 struct amdgpu_device *adev = ddev->dev_private; 307 char *table = NULL; 308 int size; 309 310 if (adev->pp_enabled) 311 size = amdgpu_dpm_get_pp_table(adev, &table); 312 else 313 return 0; 314 315 if (size >= PAGE_SIZE) 316 size = PAGE_SIZE - 1; 317 318 memcpy(buf, table, size); 319 320 return size; 321} 322 323static ssize_t amdgpu_set_pp_table(struct device *dev, 324 struct device_attribute *attr, 325 const char *buf, 326 size_t count) 327{ 328 struct drm_device *ddev = dev_get_drvdata(dev); 329 struct amdgpu_device *adev = ddev->dev_private; 330 331 if (adev->pp_enabled) 332 amdgpu_dpm_set_pp_table(adev, buf, count); 333 334 return count; 335} 336 337static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 338 struct device_attribute *attr, 339 char *buf) 340{ 341 struct drm_device *ddev = dev_get_drvdata(dev); 342 struct amdgpu_device *adev = ddev->dev_private; 343 ssize_t size = 0; 344 345 if (adev->pp_enabled) 346 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 347 else if (adev->pm.funcs->print_clock_levels) 348 size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); 349 350 return size; 351} 352 353static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 354 struct device_attribute *attr, 355 const char *buf, 356 size_t count) 357{ 358 struct drm_device *ddev = dev_get_drvdata(dev); 359 struct amdgpu_device *adev = ddev->dev_private; 360 int ret; 361 long level; 362 uint32_t i, mask = 0; 363 char sub_str[2]; 364 365 for (i = 0; i < strlen(buf); i++) { 366 if (*(buf + i) == '\n') 367 continue; 368 sub_str[0] = *(buf + i); 369 sub_str[1] = '\0'; 370 ret = kstrtol(sub_str, 0, &level); 371 372 if (ret) { 373 count = -EINVAL; 374 goto fail; 375 } 376 mask |= 1 << level; 377 } 378 379 if (adev->pp_enabled) 380 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 381 else if (adev->pm.funcs->force_clock_level) 382 adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); 383fail: 384 return count; 385} 386 387static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 388 struct device_attribute *attr, 389 char *buf) 390{ 391 struct drm_device *ddev = dev_get_drvdata(dev); 392 struct amdgpu_device *adev = ddev->dev_private; 393 ssize_t size = 0; 394 395 if (adev->pp_enabled) 396 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 397 else if (adev->pm.funcs->print_clock_levels) 398 size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); 399 400 return size; 401} 402 403static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 404 struct device_attribute *attr, 405 const char *buf, 406 size_t count) 407{ 408 struct drm_device *ddev = dev_get_drvdata(dev); 409 struct amdgpu_device *adev = ddev->dev_private; 410 int ret; 411 long level; 412 uint32_t i, mask = 0; 413 char sub_str[2]; 414 415 for (i = 0; i < strlen(buf); i++) { 416 if (*(buf + i) == '\n') 417 continue; 418 sub_str[0] = *(buf + i); 419 sub_str[1] = '\0'; 420 ret = kstrtol(sub_str, 0, &level); 421 422 if (ret) { 423 count = -EINVAL; 424 goto fail; 425 } 426 mask |= 1 << level; 427 } 428 429 if (adev->pp_enabled) 430 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 431 else if (adev->pm.funcs->force_clock_level) 432 adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); 433fail: 434 return count; 435} 436 437static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 438 struct device_attribute *attr, 439 char *buf) 440{ 441 struct drm_device *ddev = dev_get_drvdata(dev); 442 struct amdgpu_device *adev = ddev->dev_private; 443 ssize_t size = 0; 444 445 if (adev->pp_enabled) 446 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 447 else if (adev->pm.funcs->print_clock_levels) 448 size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); 449 450 return size; 451} 452 453static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 454 struct device_attribute *attr, 455 const char *buf, 456 size_t count) 457{ 458 struct drm_device *ddev = dev_get_drvdata(dev); 459 struct amdgpu_device *adev = ddev->dev_private; 460 int ret; 461 long level; 462 uint32_t i, mask = 0; 463 char sub_str[2]; 464 465 for (i = 0; i < strlen(buf); i++) { 466 if (*(buf + i) == '\n') 467 continue; 468 sub_str[0] = *(buf + i); 469 sub_str[1] = '\0'; 470 ret = kstrtol(sub_str, 0, &level); 471 472 if (ret) { 473 count = -EINVAL; 474 goto fail; 475 } 476 mask |= 1 << level; 477 } 478 479 if (adev->pp_enabled) 480 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 481 else if (adev->pm.funcs->force_clock_level) 482 adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); 483fail: 484 return count; 485} 486 487static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 488 struct device_attribute *attr, 489 char *buf) 490{ 491 struct drm_device *ddev = dev_get_drvdata(dev); 492 struct amdgpu_device *adev = ddev->dev_private; 493 uint32_t value = 0; 494 495 if (adev->pp_enabled) 496 value = amdgpu_dpm_get_sclk_od(adev); 497 else if (adev->pm.funcs->get_sclk_od) 498 value = adev->pm.funcs->get_sclk_od(adev); 499 500 return snprintf(buf, PAGE_SIZE, "%d\n", value); 501} 502 503static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 504 struct device_attribute *attr, 505 const char *buf, 506 size_t count) 507{ 508 struct drm_device *ddev = dev_get_drvdata(dev); 509 struct amdgpu_device *adev = ddev->dev_private; 510 int ret; 511 long int value; 512 513 ret = kstrtol(buf, 0, &value); 514 515 if (ret) { 516 count = -EINVAL; 517 goto fail; 518 } 519 520 if (adev->pp_enabled) { 521 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 522 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); 523 } else if (adev->pm.funcs->set_sclk_od) { 524 adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); 525 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 526 amdgpu_pm_compute_clocks(adev); 527 } 528 529fail: 530 return count; 531} 532 533static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 534 struct device_attribute *attr, 535 char *buf) 536{ 537 struct drm_device *ddev = dev_get_drvdata(dev); 538 struct amdgpu_device *adev = ddev->dev_private; 539 uint32_t value = 0; 540 541 if (adev->pp_enabled) 542 value = amdgpu_dpm_get_mclk_od(adev); 543 else if (adev->pm.funcs->get_mclk_od) 544 value = adev->pm.funcs->get_mclk_od(adev); 545 546 return snprintf(buf, PAGE_SIZE, "%d\n", value); 547} 548 549static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 550 struct device_attribute *attr, 551 const char *buf, 552 size_t count) 553{ 554 struct drm_device *ddev = dev_get_drvdata(dev); 555 struct amdgpu_device *adev = ddev->dev_private; 556 int ret; 557 long int value; 558 559 ret = kstrtol(buf, 0, &value); 560 561 if (ret) { 562 count = -EINVAL; 563 goto fail; 564 } 565 566 if (adev->pp_enabled) { 567 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 568 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); 569 } else if (adev->pm.funcs->set_mclk_od) { 570 adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); 571 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 572 amdgpu_pm_compute_clocks(adev); 573 } 574 575fail: 576 return count; 577} 578 579static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 580static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 581 amdgpu_get_dpm_forced_performance_level, 582 amdgpu_set_dpm_forced_performance_level); 583static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 584static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 585static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 586 amdgpu_get_pp_force_state, 587 amdgpu_set_pp_force_state); 588static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 589 amdgpu_get_pp_table, 590 amdgpu_set_pp_table); 591static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 592 amdgpu_get_pp_dpm_sclk, 593 amdgpu_set_pp_dpm_sclk); 594static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 595 amdgpu_get_pp_dpm_mclk, 596 amdgpu_set_pp_dpm_mclk); 597static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 598 amdgpu_get_pp_dpm_pcie, 599 amdgpu_set_pp_dpm_pcie); 600static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 601 amdgpu_get_pp_sclk_od, 602 amdgpu_set_pp_sclk_od); 603static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 604 amdgpu_get_pp_mclk_od, 605 amdgpu_set_pp_mclk_od); 606 607static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 608 struct device_attribute *attr, 609 char *buf) 610{ 611 struct amdgpu_device *adev = dev_get_drvdata(dev); 612 struct drm_device *ddev = adev->ddev; 613 int temp; 614 615 /* Can't get temperature when the card is off */ 616 if ((adev->flags & AMD_IS_PX) && 617 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 618 return -EINVAL; 619 620 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 621 temp = 0; 622 else 623 temp = amdgpu_dpm_get_temperature(adev); 624 625 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 626} 627 628static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 629 struct device_attribute *attr, 630 char *buf) 631{ 632 struct amdgpu_device *adev = dev_get_drvdata(dev); 633 int hyst = to_sensor_dev_attr(attr)->index; 634 int temp; 635 636 if (hyst) 637 temp = adev->pm.dpm.thermal.min_temp; 638 else 639 temp = adev->pm.dpm.thermal.max_temp; 640 641 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 642} 643 644static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 645 struct device_attribute *attr, 646 char *buf) 647{ 648 struct amdgpu_device *adev = dev_get_drvdata(dev); 649 u32 pwm_mode = 0; 650 651 if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode) 652 return -EINVAL; 653 654 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 655 656 /* never 0 (full-speed), fuse or smc-controlled always */ 657 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); 658} 659 660static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 661 struct device_attribute *attr, 662 const char *buf, 663 size_t count) 664{ 665 struct amdgpu_device *adev = dev_get_drvdata(dev); 666 int err; 667 int value; 668 669 if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode) 670 return -EINVAL; 671 672 err = kstrtoint(buf, 10, &value); 673 if (err) 674 return err; 675 676 switch (value) { 677 case 1: /* manual, percent-based */ 678 amdgpu_dpm_set_fan_control_mode(adev, FDO_PWM_MODE_STATIC); 679 break; 680 default: /* disable */ 681 amdgpu_dpm_set_fan_control_mode(adev, 0); 682 break; 683 } 684 685 return count; 686} 687 688static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 689 struct device_attribute *attr, 690 char *buf) 691{ 692 return sprintf(buf, "%i\n", 0); 693} 694 695static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 696 struct device_attribute *attr, 697 char *buf) 698{ 699 return sprintf(buf, "%i\n", 255); 700} 701 702static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 703 struct device_attribute *attr, 704 const char *buf, size_t count) 705{ 706 struct amdgpu_device *adev = dev_get_drvdata(dev); 707 int err; 708 u32 value; 709 710 err = kstrtou32(buf, 10, &value); 711 if (err) 712 return err; 713 714 value = (value * 100) / 255; 715 716 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 717 if (err) 718 return err; 719 720 return count; 721} 722 723static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 724 struct device_attribute *attr, 725 char *buf) 726{ 727 struct amdgpu_device *adev = dev_get_drvdata(dev); 728 int err; 729 u32 speed; 730 731 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 732 if (err) 733 return err; 734 735 speed = (speed * 255) / 100; 736 737 return sprintf(buf, "%i\n", speed); 738} 739 740static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 741 struct device_attribute *attr, 742 char *buf) 743{ 744 struct amdgpu_device *adev = dev_get_drvdata(dev); 745 int err; 746 u32 speed; 747 748 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 749 if (err) 750 return err; 751 752 return sprintf(buf, "%i\n", speed); 753} 754 755static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); 756static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 757static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 758static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 759static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 760static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 761static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 762static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 763 764static struct attribute *hwmon_attributes[] = { 765 &sensor_dev_attr_temp1_input.dev_attr.attr, 766 &sensor_dev_attr_temp1_crit.dev_attr.attr, 767 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 768 &sensor_dev_attr_pwm1.dev_attr.attr, 769 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 770 &sensor_dev_attr_pwm1_min.dev_attr.attr, 771 &sensor_dev_attr_pwm1_max.dev_attr.attr, 772 &sensor_dev_attr_fan1_input.dev_attr.attr, 773 NULL 774}; 775 776static umode_t hwmon_attributes_visible(struct kobject *kobj, 777 struct attribute *attr, int index) 778{ 779 struct device *dev = kobj_to_dev(kobj); 780 struct amdgpu_device *adev = dev_get_drvdata(dev); 781 umode_t effective_mode = attr->mode; 782 783 /* Skip limit attributes if DPM is not enabled */ 784 if (!adev->pm.dpm_enabled && 785 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 786 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 787 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 788 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 789 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 790 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 791 return 0; 792 793 if (adev->pp_enabled) 794 return effective_mode; 795 796 /* Skip fan attributes if fan is not present */ 797 if (adev->pm.no_fan && 798 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 799 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 800 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 801 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 802 return 0; 803 804 /* mask fan attributes if we have no bindings for this asic to expose */ 805 if ((!adev->pm.funcs->get_fan_speed_percent && 806 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 807 (!adev->pm.funcs->get_fan_control_mode && 808 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 809 effective_mode &= ~S_IRUGO; 810 811 if ((!adev->pm.funcs->set_fan_speed_percent && 812 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 813 (!adev->pm.funcs->set_fan_control_mode && 814 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 815 effective_mode &= ~S_IWUSR; 816 817 /* hide max/min values if we can't both query and manage the fan */ 818 if ((!adev->pm.funcs->set_fan_speed_percent && 819 !adev->pm.funcs->get_fan_speed_percent) && 820 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 821 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 822 return 0; 823 824 /* requires powerplay */ 825 if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr) 826 return 0; 827 828 return effective_mode; 829} 830 831static const struct attribute_group hwmon_attrgroup = { 832 .attrs = hwmon_attributes, 833 .is_visible = hwmon_attributes_visible, 834}; 835 836static const struct attribute_group *hwmon_groups[] = { 837 &hwmon_attrgroup, 838 NULL 839}; 840 841void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 842{ 843 struct amdgpu_device *adev = 844 container_of(work, struct amdgpu_device, 845 pm.dpm.thermal.work); 846 /* switch to the thermal state */ 847 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 848 849 if (!adev->pm.dpm_enabled) 850 return; 851 852 if (adev->pm.funcs->get_temperature) { 853 int temp = amdgpu_dpm_get_temperature(adev); 854 855 if (temp < adev->pm.dpm.thermal.min_temp) 856 /* switch back the user state */ 857 dpm_state = adev->pm.dpm.user_state; 858 } else { 859 if (adev->pm.dpm.thermal.high_to_low) 860 /* switch back the user state */ 861 dpm_state = adev->pm.dpm.user_state; 862 } 863 mutex_lock(&adev->pm.mutex); 864 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 865 adev->pm.dpm.thermal_active = true; 866 else 867 adev->pm.dpm.thermal_active = false; 868 adev->pm.dpm.state = dpm_state; 869 mutex_unlock(&adev->pm.mutex); 870 871 amdgpu_pm_compute_clocks(adev); 872} 873 874static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 875 enum amd_pm_state_type dpm_state) 876{ 877 int i; 878 struct amdgpu_ps *ps; 879 u32 ui_class; 880 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 881 true : false; 882 883 /* check if the vblank period is too short to adjust the mclk */ 884 if (single_display && adev->pm.funcs->vblank_too_short) { 885 if (amdgpu_dpm_vblank_too_short(adev)) 886 single_display = false; 887 } 888 889 /* certain older asics have a separare 3D performance state, 890 * so try that first if the user selected performance 891 */ 892 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 893 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 894 /* balanced states don't exist at the moment */ 895 if (dpm_state == POWER_STATE_TYPE_BALANCED) 896 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 897 898restart_search: 899 /* Pick the best power state based on current conditions */ 900 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 901 ps = &adev->pm.dpm.ps[i]; 902 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 903 switch (dpm_state) { 904 /* user states */ 905 case POWER_STATE_TYPE_BATTERY: 906 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 907 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 908 if (single_display) 909 return ps; 910 } else 911 return ps; 912 } 913 break; 914 case POWER_STATE_TYPE_BALANCED: 915 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 916 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 917 if (single_display) 918 return ps; 919 } else 920 return ps; 921 } 922 break; 923 case POWER_STATE_TYPE_PERFORMANCE: 924 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 925 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 926 if (single_display) 927 return ps; 928 } else 929 return ps; 930 } 931 break; 932 /* internal states */ 933 case POWER_STATE_TYPE_INTERNAL_UVD: 934 if (adev->pm.dpm.uvd_ps) 935 return adev->pm.dpm.uvd_ps; 936 else 937 break; 938 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 939 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 940 return ps; 941 break; 942 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 943 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 944 return ps; 945 break; 946 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 947 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 948 return ps; 949 break; 950 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 951 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 952 return ps; 953 break; 954 case POWER_STATE_TYPE_INTERNAL_BOOT: 955 return adev->pm.dpm.boot_ps; 956 case POWER_STATE_TYPE_INTERNAL_THERMAL: 957 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 958 return ps; 959 break; 960 case POWER_STATE_TYPE_INTERNAL_ACPI: 961 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 962 return ps; 963 break; 964 case POWER_STATE_TYPE_INTERNAL_ULV: 965 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 966 return ps; 967 break; 968 case POWER_STATE_TYPE_INTERNAL_3DPERF: 969 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 970 return ps; 971 break; 972 default: 973 break; 974 } 975 } 976 /* use a fallback state if we didn't match */ 977 switch (dpm_state) { 978 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 979 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 980 goto restart_search; 981 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 982 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 983 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 984 if (adev->pm.dpm.uvd_ps) { 985 return adev->pm.dpm.uvd_ps; 986 } else { 987 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 988 goto restart_search; 989 } 990 case POWER_STATE_TYPE_INTERNAL_THERMAL: 991 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 992 goto restart_search; 993 case POWER_STATE_TYPE_INTERNAL_ACPI: 994 dpm_state = POWER_STATE_TYPE_BATTERY; 995 goto restart_search; 996 case POWER_STATE_TYPE_BATTERY: 997 case POWER_STATE_TYPE_BALANCED: 998 case POWER_STATE_TYPE_INTERNAL_3DPERF: 999 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1000 goto restart_search; 1001 default: 1002 break; 1003 } 1004 1005 return NULL; 1006} 1007 1008static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1009{ 1010 struct amdgpu_ps *ps; 1011 enum amd_pm_state_type dpm_state; 1012 int ret; 1013 bool equal; 1014 1015 /* if dpm init failed */ 1016 if (!adev->pm.dpm_enabled) 1017 return; 1018 1019 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1020 /* add other state override checks here */ 1021 if ((!adev->pm.dpm.thermal_active) && 1022 (!adev->pm.dpm.uvd_active)) 1023 adev->pm.dpm.state = adev->pm.dpm.user_state; 1024 } 1025 dpm_state = adev->pm.dpm.state; 1026 1027 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1028 if (ps) 1029 adev->pm.dpm.requested_ps = ps; 1030 else 1031 return; 1032 1033 if (amdgpu_dpm == 1) { 1034 printk("switching from power state:\n"); 1035 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1036 printk("switching to power state:\n"); 1037 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1038 } 1039 1040 /* update whether vce is active */ 1041 ps->vce_active = adev->pm.dpm.vce_active; 1042 1043 amdgpu_dpm_display_configuration_changed(adev); 1044 1045 ret = amdgpu_dpm_pre_set_power_state(adev); 1046 if (ret) 1047 return; 1048 1049 if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) 1050 equal = false; 1051 1052 if (equal) 1053 return; 1054 1055 amdgpu_dpm_set_power_state(adev); 1056 amdgpu_dpm_post_set_power_state(adev); 1057 1058 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1059 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1060 1061 if (adev->pm.funcs->force_performance_level) { 1062 if (adev->pm.dpm.thermal_active) { 1063 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; 1064 /* force low perf level for thermal */ 1065 amdgpu_dpm_force_performance_level(adev, AMDGPU_DPM_FORCED_LEVEL_LOW); 1066 /* save the user's level */ 1067 adev->pm.dpm.forced_level = level; 1068 } else { 1069 /* otherwise, user selected level */ 1070 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1071 } 1072 } 1073} 1074 1075void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1076{ 1077 if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { 1078 /* enable/disable UVD */ 1079 mutex_lock(&adev->pm.mutex); 1080 amdgpu_dpm_powergate_uvd(adev, !enable); 1081 mutex_unlock(&adev->pm.mutex); 1082 } else { 1083 if (enable) { 1084 mutex_lock(&adev->pm.mutex); 1085 adev->pm.dpm.uvd_active = true; 1086 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1087 mutex_unlock(&adev->pm.mutex); 1088 } else { 1089 mutex_lock(&adev->pm.mutex); 1090 adev->pm.dpm.uvd_active = false; 1091 mutex_unlock(&adev->pm.mutex); 1092 } 1093 amdgpu_pm_compute_clocks(adev); 1094 } 1095} 1096 1097void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1098{ 1099 if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { 1100 /* enable/disable VCE */ 1101 mutex_lock(&adev->pm.mutex); 1102 amdgpu_dpm_powergate_vce(adev, !enable); 1103 mutex_unlock(&adev->pm.mutex); 1104 } else { 1105 if (enable) { 1106 mutex_lock(&adev->pm.mutex); 1107 adev->pm.dpm.vce_active = true; 1108 /* XXX select vce level based on ring/task */ 1109 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1110 mutex_unlock(&adev->pm.mutex); 1111 } else { 1112 mutex_lock(&adev->pm.mutex); 1113 adev->pm.dpm.vce_active = false; 1114 mutex_unlock(&adev->pm.mutex); 1115 } 1116 amdgpu_pm_compute_clocks(adev); 1117 } 1118} 1119 1120void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1121{ 1122 int i; 1123 1124 if (adev->pp_enabled) 1125 /* TO DO */ 1126 return; 1127 1128 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1129 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1130 1131} 1132 1133int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 1134{ 1135 int ret; 1136 1137 if (adev->pm.sysfs_initialized) 1138 return 0; 1139 1140 if (!adev->pp_enabled) { 1141 if (adev->pm.funcs->get_temperature == NULL) 1142 return 0; 1143 } 1144 1145 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 1146 DRIVER_NAME, adev, 1147 hwmon_groups); 1148 if (IS_ERR(adev->pm.int_hwmon_dev)) { 1149 ret = PTR_ERR(adev->pm.int_hwmon_dev); 1150 dev_err(adev->dev, 1151 "Unable to register hwmon device: %d\n", ret); 1152 return ret; 1153 } 1154 1155 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 1156 if (ret) { 1157 DRM_ERROR("failed to create device file for dpm state\n"); 1158 return ret; 1159 } 1160 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1161 if (ret) { 1162 DRM_ERROR("failed to create device file for dpm state\n"); 1163 return ret; 1164 } 1165 1166 if (adev->pp_enabled) { 1167 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 1168 if (ret) { 1169 DRM_ERROR("failed to create device file pp_num_states\n"); 1170 return ret; 1171 } 1172 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 1173 if (ret) { 1174 DRM_ERROR("failed to create device file pp_cur_state\n"); 1175 return ret; 1176 } 1177 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 1178 if (ret) { 1179 DRM_ERROR("failed to create device file pp_force_state\n"); 1180 return ret; 1181 } 1182 ret = device_create_file(adev->dev, &dev_attr_pp_table); 1183 if (ret) { 1184 DRM_ERROR("failed to create device file pp_table\n"); 1185 return ret; 1186 } 1187 } 1188 1189 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 1190 if (ret) { 1191 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 1192 return ret; 1193 } 1194 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 1195 if (ret) { 1196 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 1197 return ret; 1198 } 1199 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 1200 if (ret) { 1201 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 1202 return ret; 1203 } 1204 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 1205 if (ret) { 1206 DRM_ERROR("failed to create device file pp_sclk_od\n"); 1207 return ret; 1208 } 1209 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 1210 if (ret) { 1211 DRM_ERROR("failed to create device file pp_mclk_od\n"); 1212 return ret; 1213 } 1214 1215 ret = amdgpu_debugfs_pm_init(adev); 1216 if (ret) { 1217 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1218 return ret; 1219 } 1220 1221 adev->pm.sysfs_initialized = true; 1222 1223 return 0; 1224} 1225 1226void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 1227{ 1228 if (adev->pm.int_hwmon_dev) 1229 hwmon_device_unregister(adev->pm.int_hwmon_dev); 1230 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 1231 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 1232 if (adev->pp_enabled) { 1233 device_remove_file(adev->dev, &dev_attr_pp_num_states); 1234 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 1235 device_remove_file(adev->dev, &dev_attr_pp_force_state); 1236 device_remove_file(adev->dev, &dev_attr_pp_table); 1237 } 1238 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 1239 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 1240 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 1241 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 1242 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 1243} 1244 1245void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1246{ 1247 struct drm_device *ddev = adev->ddev; 1248 struct drm_crtc *crtc; 1249 struct amdgpu_crtc *amdgpu_crtc; 1250 int i = 0; 1251 1252 if (!adev->pm.dpm_enabled) 1253 return; 1254 1255 amdgpu_display_bandwidth_update(adev); 1256 1257 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1258 struct amdgpu_ring *ring = adev->rings[i]; 1259 if (ring && ring->ready) 1260 amdgpu_fence_wait_empty(ring); 1261 } 1262 1263 if (adev->pp_enabled) { 1264 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); 1265 } else { 1266 mutex_lock(&adev->pm.mutex); 1267 adev->pm.dpm.new_active_crtcs = 0; 1268 adev->pm.dpm.new_active_crtc_count = 0; 1269 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 1270 list_for_each_entry(crtc, 1271 &ddev->mode_config.crtc_list, head) { 1272 amdgpu_crtc = to_amdgpu_crtc(crtc); 1273 if (crtc->enabled) { 1274 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 1275 adev->pm.dpm.new_active_crtc_count++; 1276 } 1277 } 1278 } 1279 /* update battery/ac status */ 1280 if (power_supply_is_system_supplied() > 0) 1281 adev->pm.dpm.ac_power = true; 1282 else 1283 adev->pm.dpm.ac_power = false; 1284 1285 amdgpu_dpm_change_power_state_locked(adev); 1286 1287 mutex_unlock(&adev->pm.mutex); 1288 } 1289} 1290 1291/* 1292 * Debugfs info 1293 */ 1294#if defined(CONFIG_DEBUG_FS) 1295 1296static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 1297{ 1298 int32_t value; 1299 1300 /* sanity check PP is enabled */ 1301 if (!(adev->powerplay.pp_funcs && 1302 adev->powerplay.pp_funcs->read_sensor)) 1303 return -EINVAL; 1304 1305 /* GPU Clocks */ 1306 seq_printf(m, "GFX Clocks and Power:\n"); 1307 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value)) 1308 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 1309 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value)) 1310 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 1311 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value)) 1312 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 1313 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value)) 1314 seq_printf(m, "\t%u mV (VDDNB)\n", value); 1315 seq_printf(m, "\n"); 1316 1317 /* GPU Temp */ 1318 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value)) 1319 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 1320 1321 /* GPU Load */ 1322 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value)) 1323 seq_printf(m, "GPU Load: %u %%\n", value); 1324 seq_printf(m, "\n"); 1325 1326 /* UVD clocks */ 1327 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) { 1328 if (!value) { 1329 seq_printf(m, "UVD: Disabled\n"); 1330 } else { 1331 seq_printf(m, "UVD: Enabled\n"); 1332 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value)) 1333 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 1334 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value)) 1335 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 1336 } 1337 } 1338 seq_printf(m, "\n"); 1339 1340 /* VCE clocks */ 1341 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) { 1342 if (!value) { 1343 seq_printf(m, "VCE: Disabled\n"); 1344 } else { 1345 seq_printf(m, "VCE: Enabled\n"); 1346 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value)) 1347 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 1348 } 1349 } 1350 1351 return 0; 1352} 1353 1354static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 1355{ 1356 struct drm_info_node *node = (struct drm_info_node *) m->private; 1357 struct drm_device *dev = node->minor->dev; 1358 struct amdgpu_device *adev = dev->dev_private; 1359 struct drm_device *ddev = adev->ddev; 1360 1361 if (!adev->pm.dpm_enabled) { 1362 seq_printf(m, "dpm not enabled\n"); 1363 return 0; 1364 } 1365 if ((adev->flags & AMD_IS_PX) && 1366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1367 seq_printf(m, "PX asic powered off\n"); 1368 } else if (adev->pp_enabled) { 1369 return amdgpu_debugfs_pm_info_pp(m, adev); 1370 } else { 1371 mutex_lock(&adev->pm.mutex); 1372 if (adev->pm.funcs->debugfs_print_current_performance_level) 1373 adev->pm.funcs->debugfs_print_current_performance_level(adev, m); 1374 else 1375 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1376 mutex_unlock(&adev->pm.mutex); 1377 } 1378 1379 return 0; 1380} 1381 1382static const struct drm_info_list amdgpu_pm_info_list[] = { 1383 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 1384}; 1385#endif 1386 1387static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 1388{ 1389#if defined(CONFIG_DEBUG_FS) 1390 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 1391#else 1392 return 0; 1393#endif 1394}