Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
25
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31#include <linux/pci.h>
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34#include <linux/nospec.h>
35#include <linux/pm_runtime.h>
36#include <asm/processor.h>
37
38#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
39#define MAX_NUM_OF_SUBSETS 8
40
41#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
42
43struct od_attribute {
44 struct kobj_attribute attribute;
45 struct list_head entry;
46};
47
48struct od_kobj {
49 struct kobject kobj;
50 struct list_head entry;
51 struct list_head attribute;
52 void *priv;
53};
54
55struct od_feature_ops {
56 umode_t (*is_visible)(struct amdgpu_device *adev);
57 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
58 char *buf);
59 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
60 const char *buf, size_t count);
61};
62
63struct od_feature_item {
64 const char *name;
65 struct od_feature_ops ops;
66};
67
68struct od_feature_container {
69 char *name;
70 struct od_feature_ops ops;
71 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
72};
73
74struct od_feature_set {
75 struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
76};
77
78static const struct hwmon_temp_label {
79 enum PP_HWMON_TEMP channel;
80 const char *label;
81} temp_label[] = {
82 {PP_TEMP_EDGE, "edge"},
83 {PP_TEMP_JUNCTION, "junction"},
84 {PP_TEMP_MEM, "mem"},
85};
86
87const char * const amdgpu_pp_profile_name[] = {
88 "BOOTUP_DEFAULT",
89 "3D_FULL_SCREEN",
90 "POWER_SAVING",
91 "VIDEO",
92 "VR",
93 "COMPUTE",
94 "CUSTOM",
95 "WINDOW_3D",
96 "CAPPED",
97 "UNCAPPED",
98};
99
100/**
101 * amdgpu_pm_dev_state_check - Check if device can be accessed.
102 * @adev: Target device.
103 * @runpm: Check runpm status for suspend state checks.
104 *
105 * Checks the state of the @adev for access. Return 0 if the device is
106 * accessible or a negative error code otherwise.
107 */
108static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
109{
110 bool runpm_check = runpm ? adev->in_runpm : false;
111 bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
112
113 if (amdgpu_in_reset(adev) || !full_init)
114 return -EBUSY;
115
116 if (adev->in_suspend && !runpm_check)
117 return -EBUSY;
118
119 return 0;
120}
121
122/**
123 * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
124 * @adev: Target device.
125 *
126 * Checks the state of the @adev for access. Use runtime pm API to resume if
127 * needed. Return 0 if the device is accessible or a negative error code
128 * otherwise.
129 */
130static int amdgpu_pm_get_access(struct amdgpu_device *adev)
131{
132 int ret;
133
134 ret = amdgpu_pm_dev_state_check(adev, true);
135 if (ret)
136 return ret;
137
138 return pm_runtime_resume_and_get(adev->dev);
139}
140
141/**
142 * amdgpu_pm_get_access_if_active - Check if device is active for access.
143 * @adev: Target device.
144 *
145 * Checks the state of the @adev for access. Use runtime pm API to determine
146 * if device is active. Allow access only if device is active.Return 0 if the
147 * device is accessible or a negative error code otherwise.
148 */
149static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
150{
151 int ret;
152
153 /* Ignore runpm status. If device is in suspended state, deny access */
154 ret = amdgpu_pm_dev_state_check(adev, false);
155 if (ret)
156 return ret;
157
158 /*
159 * Allow only if device is active. If runpm is disabled also, as in
160 * kernels without CONFIG_PM, allow access.
161 */
162 ret = pm_runtime_get_if_active(adev->dev);
163 if (!ret)
164 return -EPERM;
165
166 return 0;
167}
168
169/**
170 * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
171 * @adev: Target device.
172 *
173 * Should be paired with amdgpu_pm_get_access* calls
174 */
175static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
176{
177 pm_runtime_put_autosuspend(adev->dev);
178}
179
180/**
181 * DOC: power_dpm_state
182 *
183 * The power_dpm_state file is a legacy interface and is only provided for
184 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
185 * certain power related parameters. The file power_dpm_state is used for this.
186 * It accepts the following arguments:
187 *
188 * - battery
189 *
190 * - balanced
191 *
192 * - performance
193 *
194 * battery
195 *
196 * On older GPUs, the vbios provided a special power state for battery
197 * operation. Selecting battery switched to this state. This is no
198 * longer provided on newer GPUs so the option does nothing in that case.
199 *
200 * balanced
201 *
202 * On older GPUs, the vbios provided a special power state for balanced
203 * operation. Selecting balanced switched to this state. This is no
204 * longer provided on newer GPUs so the option does nothing in that case.
205 *
206 * performance
207 *
208 * On older GPUs, the vbios provided a special power state for performance
209 * operation. Selecting performance switched to this state. This is no
210 * longer provided on newer GPUs so the option does nothing in that case.
211 *
212 */
213
214static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217{
218 struct drm_device *ddev = dev_get_drvdata(dev);
219 struct amdgpu_device *adev = drm_to_adev(ddev);
220 enum amd_pm_state_type pm;
221 int ret;
222
223 ret = amdgpu_pm_get_access_if_active(adev);
224 if (ret)
225 return ret;
226
227 amdgpu_dpm_get_current_power_state(adev, &pm);
228
229 amdgpu_pm_put_access(adev);
230
231 return sysfs_emit(buf, "%s\n",
232 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
233 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
234}
235
236static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf,
239 size_t count)
240{
241 struct drm_device *ddev = dev_get_drvdata(dev);
242 struct amdgpu_device *adev = drm_to_adev(ddev);
243 enum amd_pm_state_type state;
244 int ret;
245
246 if (strncmp("battery", buf, strlen("battery")) == 0)
247 state = POWER_STATE_TYPE_BATTERY;
248 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
249 state = POWER_STATE_TYPE_BALANCED;
250 else if (strncmp("performance", buf, strlen("performance")) == 0)
251 state = POWER_STATE_TYPE_PERFORMANCE;
252 else
253 return -EINVAL;
254
255 ret = amdgpu_pm_get_access(adev);
256 if (ret < 0)
257 return ret;
258
259 amdgpu_dpm_set_power_state(adev, state);
260
261 amdgpu_pm_put_access(adev);
262
263 return count;
264}
265
266
267/**
268 * DOC: power_dpm_force_performance_level
269 *
270 * The amdgpu driver provides a sysfs API for adjusting certain power
271 * related parameters. The file power_dpm_force_performance_level is
272 * used for this. It accepts the following arguments:
273 *
274 * - auto
275 *
276 * - low
277 *
278 * - high
279 *
280 * - manual
281 *
282 * - profile_standard
283 *
284 * - profile_min_sclk
285 *
286 * - profile_min_mclk
287 *
288 * - profile_peak
289 *
290 * auto
291 *
292 * When auto is selected, the driver will attempt to dynamically select
293 * the optimal power profile for current conditions in the driver.
294 *
295 * low
296 *
297 * When low is selected, the clocks are forced to the lowest power state.
298 *
299 * high
300 *
301 * When high is selected, the clocks are forced to the highest power state.
302 *
303 * manual
304 *
305 * When manual is selected, the user can manually adjust which power states
306 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
307 * and pp_dpm_pcie files and adjust the power state transition heuristics
308 * via the pp_power_profile_mode sysfs file.
309 *
310 * profile_standard
311 * profile_min_sclk
312 * profile_min_mclk
313 * profile_peak
314 *
315 * When the profiling modes are selected, clock and power gating are
316 * disabled and the clocks are set for different profiling cases. This
317 * mode is recommended for profiling specific work loads where you do
318 * not want clock or power gating for clock fluctuation to interfere
319 * with your results. profile_standard sets the clocks to a fixed clock
320 * level which varies from asic to asic. profile_min_sclk forces the sclk
321 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
322 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
323 *
324 */
325
326static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329{
330 struct drm_device *ddev = dev_get_drvdata(dev);
331 struct amdgpu_device *adev = drm_to_adev(ddev);
332 enum amd_dpm_forced_level level = 0xff;
333 int ret;
334
335 ret = amdgpu_pm_get_access_if_active(adev);
336 if (ret)
337 return ret;
338
339 level = amdgpu_dpm_get_performance_level(adev);
340
341 amdgpu_pm_put_access(adev);
342
343 return sysfs_emit(buf, "%s\n",
344 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
345 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
346 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
347 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
348 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
349 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
351 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
352 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
353 "unknown");
354}
355
356static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
357 struct device_attribute *attr,
358 const char *buf,
359 size_t count)
360{
361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct amdgpu_device *adev = drm_to_adev(ddev);
363 enum amd_dpm_forced_level level;
364 int ret = 0;
365
366 if (strncmp("low", buf, strlen("low")) == 0) {
367 level = AMD_DPM_FORCED_LEVEL_LOW;
368 } else if (strncmp("high", buf, strlen("high")) == 0) {
369 level = AMD_DPM_FORCED_LEVEL_HIGH;
370 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
371 level = AMD_DPM_FORCED_LEVEL_AUTO;
372 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
373 level = AMD_DPM_FORCED_LEVEL_MANUAL;
374 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
375 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
376 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
378 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
379 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
380 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
381 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
382 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
383 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
384 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
385 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
386 } else {
387 return -EINVAL;
388 }
389
390 ret = amdgpu_pm_get_access(adev);
391 if (ret < 0)
392 return ret;
393
394 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
395 if (amdgpu_dpm_force_performance_level(adev, level)) {
396 amdgpu_pm_put_access(adev);
397 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
398 return -EINVAL;
399 }
400 /* override whatever a user ctx may have set */
401 adev->pm.stable_pstate_ctx = NULL;
402 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
403
404 amdgpu_pm_put_access(adev);
405
406 return count;
407}
408
409static ssize_t amdgpu_get_pp_num_states(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412{
413 struct drm_device *ddev = dev_get_drvdata(dev);
414 struct amdgpu_device *adev = drm_to_adev(ddev);
415 struct pp_states_info data;
416 uint32_t i;
417 int buf_len, ret;
418
419 ret = amdgpu_pm_get_access_if_active(adev);
420 if (ret)
421 return ret;
422
423 if (amdgpu_dpm_get_pp_num_states(adev, &data))
424 memset(&data, 0, sizeof(data));
425
426 amdgpu_pm_put_access(adev);
427
428 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
429 for (i = 0; i < data.nums; i++)
430 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
431 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
432 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
433 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
434 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
435
436 return buf_len;
437}
438
439static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
440 struct device_attribute *attr,
441 char *buf)
442{
443 struct drm_device *ddev = dev_get_drvdata(dev);
444 struct amdgpu_device *adev = drm_to_adev(ddev);
445 struct pp_states_info data = {0};
446 enum amd_pm_state_type pm = 0;
447 int i = 0, ret = 0;
448
449 ret = amdgpu_pm_get_access_if_active(adev);
450 if (ret)
451 return ret;
452
453 amdgpu_dpm_get_current_power_state(adev, &pm);
454
455 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
456
457 amdgpu_pm_put_access(adev);
458
459 if (ret)
460 return ret;
461
462 for (i = 0; i < data.nums; i++) {
463 if (pm == data.states[i])
464 break;
465 }
466
467 if (i == data.nums)
468 i = -EINVAL;
469
470 return sysfs_emit(buf, "%d\n", i);
471}
472
473static ssize_t amdgpu_get_pp_force_state(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476{
477 struct drm_device *ddev = dev_get_drvdata(dev);
478 struct amdgpu_device *adev = drm_to_adev(ddev);
479
480 if (adev->pm.pp_force_state_enabled)
481 return amdgpu_get_pp_cur_state(dev, attr, buf);
482 else
483 return sysfs_emit(buf, "\n");
484}
485
486static ssize_t amdgpu_set_pp_force_state(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf,
489 size_t count)
490{
491 struct drm_device *ddev = dev_get_drvdata(dev);
492 struct amdgpu_device *adev = drm_to_adev(ddev);
493 enum amd_pm_state_type state = 0;
494 struct pp_states_info data;
495 unsigned long idx;
496 int ret;
497
498 adev->pm.pp_force_state_enabled = false;
499
500 if (strlen(buf) == 1)
501 return count;
502
503 ret = kstrtoul(buf, 0, &idx);
504 if (ret || idx >= ARRAY_SIZE(data.states))
505 return -EINVAL;
506
507 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
508
509 ret = amdgpu_pm_get_access(adev);
510 if (ret < 0)
511 return ret;
512
513 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
514 if (ret)
515 goto err_out;
516
517 state = data.states[idx];
518
519 /* only set user selected power states */
520 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
521 state != POWER_STATE_TYPE_DEFAULT) {
522 ret = amdgpu_dpm_dispatch_task(adev,
523 AMD_PP_TASK_ENABLE_USER_STATE, &state);
524 if (ret)
525 goto err_out;
526
527 adev->pm.pp_force_state_enabled = true;
528 }
529
530 amdgpu_pm_put_access(adev);
531
532 return count;
533
534err_out:
535 amdgpu_pm_put_access(adev);
536
537 return ret;
538}
539
540/**
541 * DOC: pp_table
542 *
543 * The amdgpu driver provides a sysfs API for uploading new powerplay
544 * tables. The file pp_table is used for this. Reading the file
545 * will dump the current power play table. Writing to the file
546 * will attempt to upload a new powerplay table and re-initialize
547 * powerplay using that new table.
548 *
549 */
550
551static ssize_t amdgpu_get_pp_table(struct device *dev,
552 struct device_attribute *attr,
553 char *buf)
554{
555 struct drm_device *ddev = dev_get_drvdata(dev);
556 struct amdgpu_device *adev = drm_to_adev(ddev);
557 char *table = NULL;
558 int size, ret;
559
560 ret = amdgpu_pm_get_access_if_active(adev);
561 if (ret)
562 return ret;
563
564 size = amdgpu_dpm_get_pp_table(adev, &table);
565
566 amdgpu_pm_put_access(adev);
567
568 if (size <= 0)
569 return size;
570
571 if (size >= PAGE_SIZE)
572 size = PAGE_SIZE - 1;
573
574 memcpy(buf, table, size);
575
576 return size;
577}
578
579static ssize_t amdgpu_set_pp_table(struct device *dev,
580 struct device_attribute *attr,
581 const char *buf,
582 size_t count)
583{
584 struct drm_device *ddev = dev_get_drvdata(dev);
585 struct amdgpu_device *adev = drm_to_adev(ddev);
586 int ret = 0;
587
588 ret = amdgpu_pm_get_access(adev);
589 if (ret < 0)
590 return ret;
591
592 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
593
594 amdgpu_pm_put_access(adev);
595
596 if (ret)
597 return ret;
598
599 return count;
600}
601
602/**
603 * DOC: pp_od_clk_voltage
604 *
605 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
606 * in each power level within a power state. The pp_od_clk_voltage is used for
607 * this.
608 *
609 * Note that the actual memory controller clock rate are exposed, not
610 * the effective memory clock of the DRAMs. To translate it, use the
611 * following formula:
612 *
613 * Clock conversion (Mhz):
614 *
615 * HBM: effective_memory_clock = memory_controller_clock * 1
616 *
617 * G5: effective_memory_clock = memory_controller_clock * 1
618 *
619 * G6: effective_memory_clock = memory_controller_clock * 2
620 *
621 * DRAM data rate (MT/s):
622 *
623 * HBM: effective_memory_clock * 2 = data_rate
624 *
625 * G5: effective_memory_clock * 4 = data_rate
626 *
627 * G6: effective_memory_clock * 8 = data_rate
628 *
629 * Bandwidth (MB/s):
630 *
631 * data_rate * vram_bit_width / 8 = memory_bandwidth
632 *
633 * Some examples:
634 *
635 * G5 on RX460:
636 *
637 * memory_controller_clock = 1750 Mhz
638 *
639 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
640 *
641 * data rate = 1750 * 4 = 7000 MT/s
642 *
643 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
644 *
645 * G6 on RX5700:
646 *
647 * memory_controller_clock = 875 Mhz
648 *
649 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
650 *
651 * data rate = 1750 * 8 = 14000 MT/s
652 *
653 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
654 *
655 * < For Vega10 and previous ASICs >
656 *
657 * Reading the file will display:
658 *
659 * - a list of engine clock levels and voltages labeled OD_SCLK
660 *
661 * - a list of memory clock levels and voltages labeled OD_MCLK
662 *
663 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
664 *
665 * To manually adjust these settings, first select manual using
666 * power_dpm_force_performance_level. Enter a new value for each
667 * level by writing a string that contains "s/m level clock voltage" to
668 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
669 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
670 * 810 mV. When you have edited all of the states as needed, write
671 * "c" (commit) to the file to commit your changes. If you want to reset to the
672 * default power levels, write "r" (reset) to the file to reset them.
673 *
674 *
675 * < For Vega20 and newer ASICs >
676 *
677 * Reading the file will display:
678 *
679 * - minimum and maximum engine clock labeled OD_SCLK
680 *
681 * - minimum(not available for Vega20 and Navi1x) and maximum memory
682 * clock labeled OD_MCLK
683 *
684 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
685 * They can be used to calibrate the sclk voltage curve. This is
686 * available for Vega20 and NV1X.
687 *
688 * - voltage offset(in mV) applied on target voltage calculation.
689 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
690 * Cavefish and some later SMU13 ASICs. For these ASICs, the target
691 * voltage calculation can be illustrated by "voltage = voltage
692 * calculated from v/f curve + overdrive vddgfx offset"
693 *
694 * - a list of valid ranges for sclk, mclk, voltage curve points
695 * or voltage offset labeled OD_RANGE
696 *
697 * < For APUs >
698 *
699 * Reading the file will display:
700 *
701 * - minimum and maximum engine clock labeled OD_SCLK
702 *
703 * - a list of valid ranges for sclk labeled OD_RANGE
704 *
705 * < For VanGogh >
706 *
707 * Reading the file will display:
708 *
709 * - minimum and maximum engine clock labeled OD_SCLK
710 * - minimum and maximum core clocks labeled OD_CCLK
711 *
712 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
713 *
714 * To manually adjust these settings:
715 *
716 * - First select manual using power_dpm_force_performance_level
717 *
718 * - For clock frequency setting, enter a new value by writing a
719 * string that contains "s/m index clock" to the file. The index
720 * should be 0 if to set minimum clock. And 1 if to set maximum
721 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
722 * "m 1 800" will update maximum mclk to be 800Mhz. For core
723 * clocks on VanGogh, the string contains "p core index clock".
724 * E.g., "p 2 0 800" would set the minimum core clock on core
725 * 2 to 800Mhz.
726 *
727 * For sclk voltage curve supported by Vega20 and NV1X, enter the new
728 * values by writing a string that contains "vc point clock voltage"
729 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
730 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
731 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
732 * voltage 1000mV.
733 *
734 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
735 * Cavefish and some later SMU13 ASICs, enter the new value by writing a
736 * string that contains "vo offset". E.g., "vo -10" will update the extra
737 * voltage offset applied to the whole v/f curve line as -10mv.
738 *
739 * - When you have edited all of the states as needed, write "c" (commit)
740 * to the file to commit your changes
741 *
742 * - If you want to reset to the default power levels, write "r" (reset)
743 * to the file to reset them
744 *
745 */
746
747static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
748 struct device_attribute *attr,
749 const char *buf,
750 size_t count)
751{
752 struct drm_device *ddev = dev_get_drvdata(dev);
753 struct amdgpu_device *adev = drm_to_adev(ddev);
754 int ret;
755 uint32_t parameter_size = 0;
756 long parameter[64];
757 char buf_cpy[128];
758 char *tmp_str;
759 char *sub_str;
760 const char delimiter[3] = {' ', '\n', '\0'};
761 uint32_t type;
762
763 if (count > 127 || count == 0)
764 return -EINVAL;
765
766 if (*buf == 's')
767 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
768 else if (*buf == 'p')
769 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
770 else if (*buf == 'm')
771 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
772 else if (*buf == 'r')
773 type = PP_OD_RESTORE_DEFAULT_TABLE;
774 else if (*buf == 'c')
775 type = PP_OD_COMMIT_DPM_TABLE;
776 else if (!strncmp(buf, "vc", 2))
777 type = PP_OD_EDIT_VDDC_CURVE;
778 else if (!strncmp(buf, "vo", 2))
779 type = PP_OD_EDIT_VDDGFX_OFFSET;
780 else
781 return -EINVAL;
782
783 memcpy(buf_cpy, buf, count);
784 buf_cpy[count] = 0;
785
786 tmp_str = buf_cpy;
787
788 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
789 (type == PP_OD_EDIT_VDDGFX_OFFSET))
790 tmp_str++;
791 while (isspace(*++tmp_str));
792
793 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
794 if (strlen(sub_str) == 0)
795 continue;
796 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
797 if (ret)
798 return -EINVAL;
799 parameter_size++;
800
801 if (!tmp_str)
802 break;
803
804 while (isspace(*tmp_str))
805 tmp_str++;
806 }
807
808 ret = amdgpu_pm_get_access(adev);
809 if (ret < 0)
810 return ret;
811
812 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
813 type,
814 parameter,
815 parameter_size))
816 goto err_out;
817
818 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
819 parameter, parameter_size))
820 goto err_out;
821
822 if (type == PP_OD_COMMIT_DPM_TABLE) {
823 if (amdgpu_dpm_dispatch_task(adev,
824 AMD_PP_TASK_READJUST_POWER_STATE,
825 NULL))
826 goto err_out;
827 }
828
829 amdgpu_pm_put_access(adev);
830
831 return count;
832
833err_out:
834 amdgpu_pm_put_access(adev);
835
836 return -EINVAL;
837}
838
839static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842{
843 struct drm_device *ddev = dev_get_drvdata(dev);
844 struct amdgpu_device *adev = drm_to_adev(ddev);
845 int size = 0;
846 int ret;
847 enum pp_clock_type od_clocks[6] = {
848 OD_SCLK,
849 OD_MCLK,
850 OD_VDDC_CURVE,
851 OD_RANGE,
852 OD_VDDGFX_OFFSET,
853 OD_CCLK,
854 };
855 uint clk_index;
856
857 ret = amdgpu_pm_get_access_if_active(adev);
858 if (ret)
859 return ret;
860
861 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
862 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
863 if (ret)
864 break;
865 }
866
867 if (size == 0)
868 size = sysfs_emit(buf, "\n");
869
870 amdgpu_pm_put_access(adev);
871
872 return size;
873}
874
875/**
876 * DOC: pp_features
877 *
878 * The amdgpu driver provides a sysfs API for adjusting what powerplay
879 * features to be enabled. The file pp_features is used for this. And
880 * this is only available for Vega10 and later dGPUs.
881 *
882 * Reading back the file will show you the followings:
883 * - Current ppfeature masks
884 * - List of the all supported powerplay features with their naming,
885 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
886 *
887 * To manually enable or disable a specific feature, just set or clear
888 * the corresponding bit from original ppfeature masks and input the
889 * new ppfeature masks.
890 */
891static ssize_t amdgpu_set_pp_features(struct device *dev,
892 struct device_attribute *attr,
893 const char *buf,
894 size_t count)
895{
896 struct drm_device *ddev = dev_get_drvdata(dev);
897 struct amdgpu_device *adev = drm_to_adev(ddev);
898 uint64_t featuremask;
899 int ret;
900
901 ret = kstrtou64(buf, 0, &featuremask);
902 if (ret)
903 return -EINVAL;
904
905 ret = amdgpu_pm_get_access(adev);
906 if (ret < 0)
907 return ret;
908
909 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
910
911 amdgpu_pm_put_access(adev);
912
913 if (ret)
914 return -EINVAL;
915
916 return count;
917}
918
919static ssize_t amdgpu_get_pp_features(struct device *dev,
920 struct device_attribute *attr,
921 char *buf)
922{
923 struct drm_device *ddev = dev_get_drvdata(dev);
924 struct amdgpu_device *adev = drm_to_adev(ddev);
925 ssize_t size;
926 int ret;
927
928 ret = amdgpu_pm_get_access_if_active(adev);
929 if (ret)
930 return ret;
931
932 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
933 if (size <= 0)
934 size = sysfs_emit(buf, "\n");
935
936 amdgpu_pm_put_access(adev);
937
938 return size;
939}
940
941/**
942 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
943 *
944 * The amdgpu driver provides a sysfs API for adjusting what power levels
945 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
946 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
947 * this.
948 *
949 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
950 * Vega10 and later ASICs.
951 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
952 *
953 * Reading back the files will show you the available power levels within
954 * the power state and the clock information for those levels. If deep sleep is
955 * applied to a clock, the level will be denoted by a special level 'S:'
956 * E.g., ::
957 *
958 * S: 19Mhz *
959 * 0: 615Mhz
960 * 1: 800Mhz
961 * 2: 888Mhz
962 * 3: 1000Mhz
963 *
964 *
965 * To manually adjust these states, first select manual using
966 * power_dpm_force_performance_level.
967 * Secondly, enter a new value for each level by inputing a string that
968 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
969 * E.g.,
970 *
971 * .. code-block:: bash
972 *
973 * echo "4 5 6" > pp_dpm_sclk
974 *
975 * will enable sclk levels 4, 5, and 6.
976 *
977 * NOTE: change to the dcefclk max dpm level is not supported now
978 */
979
980static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
981 enum pp_clock_type type,
982 char *buf)
983{
984 struct drm_device *ddev = dev_get_drvdata(dev);
985 struct amdgpu_device *adev = drm_to_adev(ddev);
986 int size = 0;
987 int ret = 0;
988
989 ret = amdgpu_pm_get_access_if_active(adev);
990 if (ret)
991 return ret;
992
993 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
994 if (ret)
995 return ret;
996
997 if (size == 0)
998 size = sysfs_emit(buf, "\n");
999
1000 amdgpu_pm_put_access(adev);
1001
1002 return size;
1003}
1004
1005/*
1006 * Worst case: 32 bits individually specified, in octal at 12 characters
1007 * per line (+1 for \n).
1008 */
1009#define AMDGPU_MASK_BUF_MAX (32 * 13)
1010
1011static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1012{
1013 int ret;
1014 unsigned long level;
1015 char *sub_str = NULL;
1016 char *tmp;
1017 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1018 const char delimiter[3] = {' ', '\n', '\0'};
1019 size_t bytes;
1020
1021 *mask = 0;
1022
1023 bytes = min(count, sizeof(buf_cpy) - 1);
1024 memcpy(buf_cpy, buf, bytes);
1025 buf_cpy[bytes] = '\0';
1026 tmp = buf_cpy;
1027 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1028 if (strlen(sub_str)) {
1029 ret = kstrtoul(sub_str, 0, &level);
1030 if (ret || level > 31)
1031 return -EINVAL;
1032 *mask |= 1 << level;
1033 } else
1034 break;
1035 }
1036
1037 return 0;
1038}
1039
1040static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1041 enum pp_clock_type type,
1042 const char *buf,
1043 size_t count)
1044{
1045 struct drm_device *ddev = dev_get_drvdata(dev);
1046 struct amdgpu_device *adev = drm_to_adev(ddev);
1047 int ret;
1048 uint32_t mask = 0;
1049
1050 ret = amdgpu_read_mask(buf, count, &mask);
1051 if (ret)
1052 return ret;
1053
1054 ret = amdgpu_pm_get_access(adev);
1055 if (ret < 0)
1056 return ret;
1057
1058 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1059
1060 amdgpu_pm_put_access(adev);
1061
1062 if (ret)
1063 return -EINVAL;
1064
1065 return count;
1066}
1067
1068static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1069 struct device_attribute *attr,
1070 char *buf)
1071{
1072 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1073}
1074
1075static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1076 struct device_attribute *attr,
1077 const char *buf,
1078 size_t count)
1079{
1080 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1081}
1082
1083static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1084 struct device_attribute *attr,
1085 char *buf)
1086{
1087 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1088}
1089
1090static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1091 struct device_attribute *attr,
1092 const char *buf,
1093 size_t count)
1094{
1095 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1096}
1097
1098static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1099 struct device_attribute *attr,
1100 char *buf)
1101{
1102 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1103}
1104
1105static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1106 struct device_attribute *attr,
1107 const char *buf,
1108 size_t count)
1109{
1110 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1111}
1112
1113static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1114 struct device_attribute *attr,
1115 char *buf)
1116{
1117 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1118}
1119
1120static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1121 struct device_attribute *attr,
1122 const char *buf,
1123 size_t count)
1124{
1125 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1126}
1127
1128static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1129 struct device_attribute *attr,
1130 char *buf)
1131{
1132 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1133}
1134
1135static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1136 struct device_attribute *attr,
1137 const char *buf,
1138 size_t count)
1139{
1140 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1141}
1142
1143static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1144 struct device_attribute *attr,
1145 char *buf)
1146{
1147 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1148}
1149
1150static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1151 struct device_attribute *attr,
1152 const char *buf,
1153 size_t count)
1154{
1155 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1156}
1157
1158static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1159 struct device_attribute *attr,
1160 char *buf)
1161{
1162 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1163}
1164
1165static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1166 struct device_attribute *attr,
1167 const char *buf,
1168 size_t count)
1169{
1170 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1171}
1172
1173static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1174 struct device_attribute *attr,
1175 char *buf)
1176{
1177 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1178}
1179
1180static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1181 struct device_attribute *attr,
1182 const char *buf,
1183 size_t count)
1184{
1185 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1186}
1187
1188static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1189 struct device_attribute *attr,
1190 char *buf)
1191{
1192 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1193}
1194
1195static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1196 struct device_attribute *attr,
1197 const char *buf,
1198 size_t count)
1199{
1200 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1201}
1202
1203static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1204 struct device_attribute *attr,
1205 char *buf)
1206{
1207 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1208}
1209
1210static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1211 struct device_attribute *attr,
1212 const char *buf,
1213 size_t count)
1214{
1215 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1216}
1217
1218static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1219 struct device_attribute *attr,
1220 char *buf)
1221{
1222 struct drm_device *ddev = dev_get_drvdata(dev);
1223 struct amdgpu_device *adev = drm_to_adev(ddev);
1224 uint32_t value = 0;
1225 int ret;
1226
1227 ret = amdgpu_pm_get_access_if_active(adev);
1228 if (ret)
1229 return ret;
1230
1231 value = amdgpu_dpm_get_sclk_od(adev);
1232
1233 amdgpu_pm_put_access(adev);
1234
1235 return sysfs_emit(buf, "%d\n", value);
1236}
1237
1238static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1239 struct device_attribute *attr,
1240 const char *buf,
1241 size_t count)
1242{
1243 struct drm_device *ddev = dev_get_drvdata(dev);
1244 struct amdgpu_device *adev = drm_to_adev(ddev);
1245 int ret;
1246 long int value;
1247
1248 ret = kstrtol(buf, 0, &value);
1249
1250 if (ret)
1251 return -EINVAL;
1252
1253 ret = amdgpu_pm_get_access(adev);
1254 if (ret < 0)
1255 return ret;
1256
1257 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1258
1259 amdgpu_pm_put_access(adev);
1260
1261 return count;
1262}
1263
1264static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1265 struct device_attribute *attr,
1266 char *buf)
1267{
1268 struct drm_device *ddev = dev_get_drvdata(dev);
1269 struct amdgpu_device *adev = drm_to_adev(ddev);
1270 uint32_t value = 0;
1271 int ret;
1272
1273 ret = amdgpu_pm_get_access_if_active(adev);
1274 if (ret)
1275 return ret;
1276
1277 value = amdgpu_dpm_get_mclk_od(adev);
1278
1279 amdgpu_pm_put_access(adev);
1280
1281 return sysfs_emit(buf, "%d\n", value);
1282}
1283
1284static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1285 struct device_attribute *attr,
1286 const char *buf,
1287 size_t count)
1288{
1289 struct drm_device *ddev = dev_get_drvdata(dev);
1290 struct amdgpu_device *adev = drm_to_adev(ddev);
1291 int ret;
1292 long int value;
1293
1294 ret = kstrtol(buf, 0, &value);
1295
1296 if (ret)
1297 return -EINVAL;
1298
1299 ret = amdgpu_pm_get_access(adev);
1300 if (ret < 0)
1301 return ret;
1302
1303 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1304
1305 amdgpu_pm_put_access(adev);
1306
1307 return count;
1308}
1309
1310/**
1311 * DOC: pp_power_profile_mode
1312 *
1313 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1314 * related to switching between power levels in a power state. The file
1315 * pp_power_profile_mode is used for this.
1316 *
1317 * Reading this file outputs a list of all of the predefined power profiles
1318 * and the relevant heuristics settings for that profile.
1319 *
1320 * To select a profile or create a custom profile, first select manual using
1321 * power_dpm_force_performance_level. Writing the number of a predefined
1322 * profile to pp_power_profile_mode will enable those heuristics. To
1323 * create a custom set of heuristics, write a string of numbers to the file
1324 * starting with the number of the custom profile along with a setting
1325 * for each heuristic parameter. Due to differences across asic families
1326 * the heuristic parameters vary from family to family. Additionally,
1327 * you can apply the custom heuristics to different clock domains. Each
1328 * clock domain is considered a distinct operation so if you modify the
1329 * gfxclk heuristics and then the memclk heuristics, the all of the
1330 * custom heuristics will be retained until you switch to another profile.
1331 *
1332 */
1333
1334static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1335 struct device_attribute *attr,
1336 char *buf)
1337{
1338 struct drm_device *ddev = dev_get_drvdata(dev);
1339 struct amdgpu_device *adev = drm_to_adev(ddev);
1340 ssize_t size;
1341 int ret;
1342
1343 ret = amdgpu_pm_get_access_if_active(adev);
1344 if (ret)
1345 return ret;
1346
1347 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1348 if (size <= 0)
1349 size = sysfs_emit(buf, "\n");
1350
1351 amdgpu_pm_put_access(adev);
1352
1353 return size;
1354}
1355
1356
1357static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1358 struct device_attribute *attr,
1359 const char *buf,
1360 size_t count)
1361{
1362 int ret;
1363 struct drm_device *ddev = dev_get_drvdata(dev);
1364 struct amdgpu_device *adev = drm_to_adev(ddev);
1365 uint32_t parameter_size = 0;
1366 long parameter[64];
1367 char *sub_str, buf_cpy[128];
1368 char *tmp_str;
1369 uint32_t i = 0;
1370 char tmp[2];
1371 long int profile_mode = 0;
1372 const char delimiter[3] = {' ', '\n', '\0'};
1373
1374 tmp[0] = *(buf);
1375 tmp[1] = '\0';
1376 ret = kstrtol(tmp, 0, &profile_mode);
1377 if (ret)
1378 return -EINVAL;
1379
1380 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1381 if (count < 2 || count > 127)
1382 return -EINVAL;
1383 while (isspace(*++buf))
1384 i++;
1385 memcpy(buf_cpy, buf, count-i);
1386 tmp_str = buf_cpy;
1387 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1388 if (strlen(sub_str) == 0)
1389 continue;
1390 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1391 if (ret)
1392 return -EINVAL;
1393 parameter_size++;
1394 if (!tmp_str)
1395 break;
1396 while (isspace(*tmp_str))
1397 tmp_str++;
1398 }
1399 }
1400 parameter[parameter_size] = profile_mode;
1401
1402 ret = amdgpu_pm_get_access(adev);
1403 if (ret < 0)
1404 return ret;
1405
1406 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1407
1408 amdgpu_pm_put_access(adev);
1409
1410 if (!ret)
1411 return count;
1412
1413 return -EINVAL;
1414}
1415
1416static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
1417 enum amd_pp_sensors sensor,
1418 void *query)
1419{
1420 int r, size = sizeof(uint32_t);
1421
1422 r = amdgpu_pm_get_access_if_active(adev);
1423 if (r)
1424 return r;
1425
1426 /* get the sensor value */
1427 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1428
1429 amdgpu_pm_put_access(adev);
1430
1431 return r;
1432}
1433
1434/**
1435 * DOC: gpu_busy_percent
1436 *
1437 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1438 * is as a percentage. The file gpu_busy_percent is used for this.
1439 * The SMU firmware computes a percentage of load based on the
1440 * aggregate activity level in the IP cores.
1441 */
1442static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1443 struct device_attribute *attr,
1444 char *buf)
1445{
1446 struct drm_device *ddev = dev_get_drvdata(dev);
1447 struct amdgpu_device *adev = drm_to_adev(ddev);
1448 unsigned int value;
1449 int r;
1450
1451 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1452 if (r)
1453 return r;
1454
1455 return sysfs_emit(buf, "%d\n", value);
1456}
1457
1458/**
1459 * DOC: mem_busy_percent
1460 *
1461 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1462 * is as a percentage. The file mem_busy_percent is used for this.
1463 * The SMU firmware computes a percentage of load based on the
1464 * aggregate activity level in the IP cores.
1465 */
1466static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1467 struct device_attribute *attr,
1468 char *buf)
1469{
1470 struct drm_device *ddev = dev_get_drvdata(dev);
1471 struct amdgpu_device *adev = drm_to_adev(ddev);
1472 unsigned int value;
1473 int r;
1474
1475 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1476 if (r)
1477 return r;
1478
1479 return sysfs_emit(buf, "%d\n", value);
1480}
1481
1482/**
1483 * DOC: vcn_busy_percent
1484 *
1485 * The amdgpu driver provides a sysfs API for reading how busy the VCN
1486 * is as a percentage. The file vcn_busy_percent is used for this.
1487 * The SMU firmware computes a percentage of load based on the
1488 * aggregate activity level in the IP cores.
1489 */
1490static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1491 struct device_attribute *attr,
1492 char *buf)
1493{
1494 struct drm_device *ddev = dev_get_drvdata(dev);
1495 struct amdgpu_device *adev = drm_to_adev(ddev);
1496 unsigned int value;
1497 int r;
1498
1499 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1500 if (r)
1501 return r;
1502
1503 return sysfs_emit(buf, "%d\n", value);
1504}
1505
1506/**
1507 * DOC: pcie_bw
1508 *
1509 * The amdgpu driver provides a sysfs API for estimating how much data
1510 * has been received and sent by the GPU in the last second through PCIe.
1511 * The file pcie_bw is used for this.
1512 * The Perf counters count the number of received and sent messages and return
1513 * those values, as well as the maximum payload size of a PCIe packet (mps).
1514 * Note that it is not possible to easily and quickly obtain the size of each
1515 * packet transmitted, so we output the max payload size (mps) to allow for
1516 * quick estimation of the PCIe bandwidth usage
1517 */
1518static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1519 struct device_attribute *attr,
1520 char *buf)
1521{
1522 struct drm_device *ddev = dev_get_drvdata(dev);
1523 struct amdgpu_device *adev = drm_to_adev(ddev);
1524 uint64_t count0 = 0, count1 = 0;
1525 int ret;
1526
1527 if (adev->flags & AMD_IS_APU)
1528 return -ENODATA;
1529
1530 if (!adev->asic_funcs->get_pcie_usage)
1531 return -ENODATA;
1532
1533 ret = amdgpu_pm_get_access_if_active(adev);
1534 if (ret)
1535 return ret;
1536
1537 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1538
1539 amdgpu_pm_put_access(adev);
1540
1541 return sysfs_emit(buf, "%llu %llu %i\n",
1542 count0, count1, pcie_get_mps(adev->pdev));
1543}
1544
1545/**
1546 * DOC: unique_id
1547 *
1548 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1549 * The file unique_id is used for this.
1550 * This will provide a Unique ID that will persist from machine to machine
1551 *
1552 * NOTE: This will only work for GFX9 and newer. This file will be absent
1553 * on unsupported ASICs (GFX8 and older)
1554 */
1555static ssize_t amdgpu_get_unique_id(struct device *dev,
1556 struct device_attribute *attr,
1557 char *buf)
1558{
1559 struct drm_device *ddev = dev_get_drvdata(dev);
1560 struct amdgpu_device *adev = drm_to_adev(ddev);
1561
1562 if (adev->unique_id)
1563 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1564
1565 return 0;
1566}
1567
1568/**
1569 * DOC: thermal_throttling_logging
1570 *
1571 * Thermal throttling pulls down the clock frequency and thus the performance.
1572 * It's an useful mechanism to protect the chip from overheating. Since it
1573 * impacts performance, the user controls whether it is enabled and if so,
1574 * the log frequency.
1575 *
1576 * Reading back the file shows you the status(enabled or disabled) and
1577 * the interval(in seconds) between each thermal logging.
1578 *
1579 * Writing an integer to the file, sets a new logging interval, in seconds.
1580 * The value should be between 1 and 3600. If the value is less than 1,
1581 * thermal logging is disabled. Values greater than 3600 are ignored.
1582 */
1583static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1584 struct device_attribute *attr,
1585 char *buf)
1586{
1587 struct drm_device *ddev = dev_get_drvdata(dev);
1588 struct amdgpu_device *adev = drm_to_adev(ddev);
1589
1590 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1591 adev_to_drm(adev)->unique,
1592 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1593 adev->throttling_logging_rs.interval / HZ + 1);
1594}
1595
1596static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1597 struct device_attribute *attr,
1598 const char *buf,
1599 size_t count)
1600{
1601 struct drm_device *ddev = dev_get_drvdata(dev);
1602 struct amdgpu_device *adev = drm_to_adev(ddev);
1603 long throttling_logging_interval;
1604 int ret = 0;
1605
1606 ret = kstrtol(buf, 0, &throttling_logging_interval);
1607 if (ret)
1608 return ret;
1609
1610 if (throttling_logging_interval > 3600)
1611 return -EINVAL;
1612
1613 if (throttling_logging_interval > 0) {
1614 /*
1615 * Reset the ratelimit timer internals.
1616 * This can effectively restart the timer.
1617 */
1618 ratelimit_state_reset_interval(&adev->throttling_logging_rs,
1619 (throttling_logging_interval - 1) * HZ);
1620 atomic_set(&adev->throttling_logging_enabled, 1);
1621 } else {
1622 atomic_set(&adev->throttling_logging_enabled, 0);
1623 }
1624
1625 return count;
1626}
1627
1628/**
1629 * DOC: apu_thermal_cap
1630 *
1631 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1632 * limit temperature in millidegrees Celsius
1633 *
1634 * Reading back the file shows you core limit value
1635 *
1636 * Writing an integer to the file, sets a new thermal limit. The value
1637 * should be between 0 and 100. If the value is less than 0 or greater
1638 * than 100, then the write request will be ignored.
1639 */
1640static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1641 struct device_attribute *attr,
1642 char *buf)
1643{
1644 int ret, size;
1645 u32 limit;
1646 struct drm_device *ddev = dev_get_drvdata(dev);
1647 struct amdgpu_device *adev = drm_to_adev(ddev);
1648
1649 ret = amdgpu_pm_get_access_if_active(adev);
1650 if (ret)
1651 return ret;
1652
1653 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1654 if (!ret)
1655 size = sysfs_emit(buf, "%u\n", limit);
1656 else
1657 size = sysfs_emit(buf, "failed to get thermal limit\n");
1658
1659 amdgpu_pm_put_access(adev);
1660
1661 return size;
1662}
1663
1664static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1665 struct device_attribute *attr,
1666 const char *buf,
1667 size_t count)
1668{
1669 int ret;
1670 u32 value;
1671 struct drm_device *ddev = dev_get_drvdata(dev);
1672 struct amdgpu_device *adev = drm_to_adev(ddev);
1673
1674 ret = kstrtou32(buf, 10, &value);
1675 if (ret)
1676 return ret;
1677
1678 if (value > 100) {
1679 dev_err(dev, "Invalid argument !\n");
1680 return -EINVAL;
1681 }
1682
1683 ret = amdgpu_pm_get_access(adev);
1684 if (ret < 0)
1685 return ret;
1686
1687 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1688 if (ret) {
1689 amdgpu_pm_put_access(adev);
1690 dev_err(dev, "failed to update thermal limit\n");
1691 return ret;
1692 }
1693
1694 amdgpu_pm_put_access(adev);
1695
1696 return count;
1697}
1698
1699static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1700 struct amdgpu_device_attr *attr,
1701 uint32_t mask,
1702 enum amdgpu_device_attr_states *states)
1703{
1704 if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1705 *states = ATTR_STATE_UNSUPPORTED;
1706
1707 return 0;
1708}
1709
1710static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1711 struct device_attribute *attr, char *buf)
1712{
1713 struct drm_device *ddev = dev_get_drvdata(dev);
1714 struct amdgpu_device *adev = drm_to_adev(ddev);
1715 ssize_t size = 0;
1716 int ret;
1717
1718 ret = amdgpu_pm_get_access_if_active(adev);
1719 if (ret)
1720 return ret;
1721
1722 size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1723
1724 amdgpu_pm_put_access(adev);
1725
1726 return size;
1727}
1728
1729/**
1730 * DOC: gpu_metrics
1731 *
1732 * The amdgpu driver provides a sysfs API for retrieving current gpu
1733 * metrics data. The file gpu_metrics is used for this. Reading the
1734 * file will dump all the current gpu metrics data.
1735 *
1736 * These data include temperature, frequency, engines utilization,
1737 * power consume, throttler status, fan speed and cpu core statistics(
1738 * available for APU only). That's it will give a snapshot of all sensors
1739 * at the same time.
1740 */
1741static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1742 struct device_attribute *attr,
1743 char *buf)
1744{
1745 struct drm_device *ddev = dev_get_drvdata(dev);
1746 struct amdgpu_device *adev = drm_to_adev(ddev);
1747 void *gpu_metrics;
1748 ssize_t size = 0;
1749 int ret;
1750
1751 ret = amdgpu_pm_get_access_if_active(adev);
1752 if (ret)
1753 return ret;
1754
1755 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1756 if (size <= 0)
1757 goto out;
1758
1759 if (size >= PAGE_SIZE)
1760 size = PAGE_SIZE - 1;
1761
1762 memcpy(buf, gpu_metrics, size);
1763
1764out:
1765 amdgpu_pm_put_access(adev);
1766
1767 return size;
1768}
1769
1770static int amdgpu_show_powershift_percent(struct device *dev,
1771 char *buf, enum amd_pp_sensors sensor)
1772{
1773 struct drm_device *ddev = dev_get_drvdata(dev);
1774 struct amdgpu_device *adev = drm_to_adev(ddev);
1775 uint32_t ss_power;
1776 int r = 0, i;
1777
1778 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1779 if (r == -EOPNOTSUPP) {
1780 /* sensor not available on dGPU, try to read from APU */
1781 adev = NULL;
1782 mutex_lock(&mgpu_info.mutex);
1783 for (i = 0; i < mgpu_info.num_gpu; i++) {
1784 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1785 adev = mgpu_info.gpu_ins[i].adev;
1786 break;
1787 }
1788 }
1789 mutex_unlock(&mgpu_info.mutex);
1790 if (adev)
1791 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1792 }
1793
1794 if (r)
1795 return r;
1796
1797 return sysfs_emit(buf, "%u%%\n", ss_power);
1798}
1799
1800/**
1801 * DOC: smartshift_apu_power
1802 *
1803 * The amdgpu driver provides a sysfs API for reporting APU power
1804 * shift in percentage if platform supports smartshift. Value 0 means that
1805 * there is no powershift and values between [1-100] means that the power
1806 * is shifted to APU, the percentage of boost is with respect to APU power
1807 * limit on the platform.
1808 */
1809
1810static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1811 char *buf)
1812{
1813 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1814}
1815
1816/**
1817 * DOC: smartshift_dgpu_power
1818 *
1819 * The amdgpu driver provides a sysfs API for reporting dGPU power
1820 * shift in percentage if platform supports smartshift. Value 0 means that
1821 * there is no powershift and values between [1-100] means that the power is
1822 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1823 * limit on the platform.
1824 */
1825
1826static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1827 char *buf)
1828{
1829 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1830}
1831
1832/**
1833 * DOC: smartshift_bias
1834 *
1835 * The amdgpu driver provides a sysfs API for reporting the
1836 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1837 * and the default is 0. -100 sets maximum preference to APU
1838 * and 100 sets max perference to dGPU.
1839 */
1840
1841static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1842 struct device_attribute *attr,
1843 char *buf)
1844{
1845 int r = 0;
1846
1847 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1848
1849 return r;
1850}
1851
1852static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1853 struct device_attribute *attr,
1854 const char *buf, size_t count)
1855{
1856 struct drm_device *ddev = dev_get_drvdata(dev);
1857 struct amdgpu_device *adev = drm_to_adev(ddev);
1858 int r = 0;
1859 int bias = 0;
1860
1861 r = kstrtoint(buf, 10, &bias);
1862 if (r)
1863 goto out;
1864
1865 r = amdgpu_pm_get_access(adev);
1866 if (r < 0)
1867 return r;
1868
1869 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1870 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1871 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1872 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1873
1874 amdgpu_smartshift_bias = bias;
1875 r = count;
1876
1877 /* TODO: update bias level with SMU message */
1878
1879out:
1880 amdgpu_pm_put_access(adev);
1881
1882 return r;
1883}
1884
1885static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1886 uint32_t mask, enum amdgpu_device_attr_states *states)
1887{
1888 if (!amdgpu_device_supports_smart_shift(adev))
1889 *states = ATTR_STATE_UNSUPPORTED;
1890
1891 return 0;
1892}
1893
1894static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1895 uint32_t mask, enum amdgpu_device_attr_states *states)
1896{
1897 uint32_t ss_power;
1898
1899 if (!amdgpu_device_supports_smart_shift(adev))
1900 *states = ATTR_STATE_UNSUPPORTED;
1901 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1902 (void *)&ss_power))
1903 *states = ATTR_STATE_UNSUPPORTED;
1904 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1905 (void *)&ss_power))
1906 *states = ATTR_STATE_UNSUPPORTED;
1907
1908 return 0;
1909}
1910
1911static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1912 uint32_t mask, enum amdgpu_device_attr_states *states)
1913{
1914 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1915
1916 *states = ATTR_STATE_SUPPORTED;
1917
1918 if (!amdgpu_dpm_is_overdrive_supported(adev)) {
1919 *states = ATTR_STATE_UNSUPPORTED;
1920 return 0;
1921 }
1922
1923 /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
1924 if (gc_ver == IP_VERSION(9, 4, 3) ||
1925 gc_ver == IP_VERSION(9, 4, 4) ||
1926 gc_ver == IP_VERSION(9, 5, 0)) {
1927 if (amdgpu_sriov_multi_vf_mode(adev))
1928 *states = ATTR_STATE_UNSUPPORTED;
1929 return 0;
1930 }
1931
1932 if (!(attr->flags & mask))
1933 *states = ATTR_STATE_UNSUPPORTED;
1934
1935 return 0;
1936}
1937
1938static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1939 uint32_t mask, enum amdgpu_device_attr_states *states)
1940{
1941 struct device_attribute *dev_attr = &attr->dev_attr;
1942 uint32_t gc_ver;
1943
1944 *states = ATTR_STATE_SUPPORTED;
1945
1946 if (!(attr->flags & mask)) {
1947 *states = ATTR_STATE_UNSUPPORTED;
1948 return 0;
1949 }
1950
1951 gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1952 /* dcefclk node is not available on gfx 11.0.3 sriov */
1953 if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
1954 gc_ver < IP_VERSION(9, 0, 0) ||
1955 !amdgpu_device_has_display_hardware(adev))
1956 *states = ATTR_STATE_UNSUPPORTED;
1957
1958 /* SMU MP1 does not support dcefclk level setting,
1959 * setting should not be allowed from VF if not in one VF mode.
1960 */
1961 if (gc_ver >= IP_VERSION(10, 0, 0) ||
1962 (amdgpu_sriov_multi_vf_mode(adev))) {
1963 dev_attr->attr.mode &= ~S_IWUGO;
1964 dev_attr->store = NULL;
1965 }
1966
1967 return 0;
1968}
1969
1970static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1971 uint32_t mask, enum amdgpu_device_attr_states *states)
1972{
1973 struct device_attribute *dev_attr = &attr->dev_attr;
1974 enum amdgpu_device_attr_id attr_id = attr->attr_id;
1975 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
1976 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1977
1978 *states = ATTR_STATE_SUPPORTED;
1979
1980 if (!(attr->flags & mask)) {
1981 *states = ATTR_STATE_UNSUPPORTED;
1982 return 0;
1983 }
1984
1985 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1986 if (gc_ver < IP_VERSION(9, 0, 0))
1987 *states = ATTR_STATE_UNSUPPORTED;
1988 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1989 if (mp1_ver < IP_VERSION(10, 0, 0))
1990 *states = ATTR_STATE_UNSUPPORTED;
1991 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
1992 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
1993 gc_ver == IP_VERSION(10, 3, 3) ||
1994 gc_ver == IP_VERSION(10, 3, 6) ||
1995 gc_ver == IP_VERSION(10, 3, 7) ||
1996 gc_ver == IP_VERSION(10, 3, 0) ||
1997 gc_ver == IP_VERSION(10, 1, 2) ||
1998 gc_ver == IP_VERSION(11, 0, 0) ||
1999 gc_ver == IP_VERSION(11, 0, 1) ||
2000 gc_ver == IP_VERSION(11, 0, 4) ||
2001 gc_ver == IP_VERSION(11, 5, 0) ||
2002 gc_ver == IP_VERSION(11, 0, 2) ||
2003 gc_ver == IP_VERSION(11, 0, 3) ||
2004 gc_ver == IP_VERSION(9, 4, 3) ||
2005 gc_ver == IP_VERSION(9, 4, 4) ||
2006 gc_ver == IP_VERSION(9, 5, 0)))
2007 *states = ATTR_STATE_UNSUPPORTED;
2008 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2009 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2010 gc_ver == IP_VERSION(10, 3, 0) ||
2011 gc_ver == IP_VERSION(11, 0, 2) ||
2012 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2013 *states = ATTR_STATE_UNSUPPORTED;
2014 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2015 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2016 gc_ver == IP_VERSION(10, 3, 3) ||
2017 gc_ver == IP_VERSION(10, 3, 6) ||
2018 gc_ver == IP_VERSION(10, 3, 7) ||
2019 gc_ver == IP_VERSION(10, 3, 0) ||
2020 gc_ver == IP_VERSION(10, 1, 2) ||
2021 gc_ver == IP_VERSION(11, 0, 0) ||
2022 gc_ver == IP_VERSION(11, 0, 1) ||
2023 gc_ver == IP_VERSION(11, 0, 4) ||
2024 gc_ver == IP_VERSION(11, 5, 0) ||
2025 gc_ver == IP_VERSION(11, 0, 2) ||
2026 gc_ver == IP_VERSION(11, 0, 3) ||
2027 gc_ver == IP_VERSION(9, 4, 3) ||
2028 gc_ver == IP_VERSION(9, 4, 4) ||
2029 gc_ver == IP_VERSION(9, 5, 0)))
2030 *states = ATTR_STATE_UNSUPPORTED;
2031 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2032 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2033 gc_ver == IP_VERSION(10, 3, 0) ||
2034 gc_ver == IP_VERSION(11, 0, 2) ||
2035 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2036 *states = ATTR_STATE_UNSUPPORTED;
2037 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2038 if (gc_ver == IP_VERSION(9, 4, 2) ||
2039 gc_ver == IP_VERSION(9, 4, 3) ||
2040 gc_ver == IP_VERSION(9, 4, 4) ||
2041 gc_ver == IP_VERSION(9, 5, 0))
2042 *states = ATTR_STATE_UNSUPPORTED;
2043 }
2044
2045 switch (gc_ver) {
2046 case IP_VERSION(9, 4, 1):
2047 case IP_VERSION(9, 4, 2):
2048 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2049 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2050 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2051 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2052 dev_attr->attr.mode &= ~S_IWUGO;
2053 dev_attr->store = NULL;
2054 }
2055 break;
2056 default:
2057 break;
2058 }
2059
2060 /* setting should not be allowed from VF if not in one VF mode */
2061 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2062 dev_attr->attr.mode &= ~S_IWUGO;
2063 dev_attr->store = NULL;
2064 }
2065
2066 return 0;
2067}
2068
2069/**
2070 * DOC: board
2071 *
2072 * Certain SOCs can support various board attributes reporting. This is useful
2073 * for user application to monitor various board reated attributes.
2074 *
2075 * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
2076 * nine types of attributes are reported. Baseboard temperature and
2077 * gpu board temperature are reported as binary files. Npm status, current node power limit,
2078 * max node power limit, node power, global ppt residency, baseboard_power, baseboard_power_limit
2079 * is reported as ASCII text file.
2080 *
2081 * * .. code-block:: console
2082 *
2083 * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
2084 *
2085 * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
2086 *
2087 * hexdump /sys/bus/pci/devices/.../board/npm_status
2088 *
2089 * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
2090 *
2091 * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
2092 *
2093 * hexdump /sys/bus/pci/devices/.../board/node_power
2094 *
2095 * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
2096 *
2097 * hexdump /sys/bus/pci/devices/.../board/baseboard_power
2098 *
2099 * hexdump /sys/bus/pci/devices/.../board/baseboard_power_limit
2100 */
2101
2102/**
2103 * DOC: baseboard_temp
2104 *
2105 * The amdgpu driver provides a sysfs API for retrieving current baseboard
2106 * temperature metrics data. The file baseboard_temp is used for this.
2107 * Reading the file will dump all the current baseboard temperature metrics data.
2108 */
2109static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
2110 struct device_attribute *attr, char *buf)
2111{
2112 struct drm_device *ddev = dev_get_drvdata(dev);
2113 struct amdgpu_device *adev = drm_to_adev(ddev);
2114 ssize_t size;
2115 int ret;
2116
2117 ret = amdgpu_pm_get_access_if_active(adev);
2118 if (ret)
2119 return ret;
2120
2121 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
2122 if (size <= 0)
2123 goto out;
2124 if (size >= PAGE_SIZE) {
2125 ret = -ENOSPC;
2126 goto out;
2127 }
2128
2129 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
2130
2131out:
2132 amdgpu_pm_put_access(adev);
2133
2134 if (ret)
2135 return ret;
2136
2137 return size;
2138}
2139
2140/**
2141 * DOC: gpuboard_temp
2142 *
2143 * The amdgpu driver provides a sysfs API for retrieving current gpuboard
2144 * temperature metrics data. The file gpuboard_temp is used for this.
2145 * Reading the file will dump all the current gpuboard temperature metrics data.
2146 */
2147static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
2148 struct device_attribute *attr, char *buf)
2149{
2150 struct drm_device *ddev = dev_get_drvdata(dev);
2151 struct amdgpu_device *adev = drm_to_adev(ddev);
2152 ssize_t size;
2153 int ret;
2154
2155 ret = amdgpu_pm_get_access_if_active(adev);
2156 if (ret)
2157 return ret;
2158
2159 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
2160 if (size <= 0)
2161 goto out;
2162 if (size >= PAGE_SIZE) {
2163 ret = -ENOSPC;
2164 goto out;
2165 }
2166
2167 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
2168
2169out:
2170 amdgpu_pm_put_access(adev);
2171
2172 if (ret)
2173 return ret;
2174
2175 return size;
2176}
2177
2178/**
2179 * DOC: cur_node_power_limit
2180 *
2181 * The amdgpu driver provides a sysfs API for retrieving current node power limit.
2182 * The file cur_node_power_limit is used for this.
2183 */
2184static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
2185 struct device_attribute *attr, char *buf)
2186{
2187 struct drm_device *ddev = dev_get_drvdata(dev);
2188 struct amdgpu_device *adev = drm_to_adev(ddev);
2189 u32 nplimit;
2190 int r;
2191
2192 /* get the current node power limit */
2193 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
2194 (void *)&nplimit);
2195 if (r)
2196 return r;
2197
2198 return sysfs_emit(buf, "%u\n", nplimit);
2199}
2200
2201/**
2202 * DOC: node_power
2203 *
2204 * The amdgpu driver provides a sysfs API for retrieving current node power.
2205 * The file node_power is used for this.
2206 */
2207static ssize_t amdgpu_show_node_power(struct device *dev,
2208 struct device_attribute *attr, char *buf)
2209{
2210 struct drm_device *ddev = dev_get_drvdata(dev);
2211 struct amdgpu_device *adev = drm_to_adev(ddev);
2212 u32 npower;
2213 int r;
2214
2215 /* get the node power */
2216 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2217 (void *)&npower);
2218 if (r)
2219 return r;
2220
2221 return sysfs_emit(buf, "%u\n", npower);
2222}
2223
2224/**
2225 * DOC: npm_status
2226 *
2227 * The amdgpu driver provides a sysfs API for retrieving current node power management status.
2228 * The file npm_status is used for this. It shows the status as enabled or disabled based on
2229 * current node power value. If node power is zero, status is disabled else enabled.
2230 */
2231static ssize_t amdgpu_show_npm_status(struct device *dev,
2232 struct device_attribute *attr, char *buf)
2233{
2234 struct drm_device *ddev = dev_get_drvdata(dev);
2235 struct amdgpu_device *adev = drm_to_adev(ddev);
2236 u32 npower;
2237 int r;
2238
2239 /* get the node power */
2240 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2241 (void *)&npower);
2242 if (r)
2243 return r;
2244
2245 return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
2246}
2247
2248/**
2249 * DOC: global_ppt_resid
2250 *
2251 * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
2252 * The file global_ppt_resid is used for this.
2253 */
2254static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
2255 struct device_attribute *attr, char *buf)
2256{
2257 struct drm_device *ddev = dev_get_drvdata(dev);
2258 struct amdgpu_device *adev = drm_to_adev(ddev);
2259 u32 gpptresid;
2260 int r;
2261
2262 /* get the global ppt residency */
2263 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
2264 (void *)&gpptresid);
2265 if (r)
2266 return r;
2267
2268 return sysfs_emit(buf, "%u\n", gpptresid);
2269}
2270
2271/**
2272 * DOC: max_node_power_limit
2273 *
2274 * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
2275 * The file max_node_power_limit is used for this.
2276 */
2277static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
2278 struct device_attribute *attr, char *buf)
2279{
2280 struct drm_device *ddev = dev_get_drvdata(dev);
2281 struct amdgpu_device *adev = drm_to_adev(ddev);
2282 u32 max_nplimit;
2283 int r;
2284
2285 /* get the max node power limit */
2286 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
2287 (void *)&max_nplimit);
2288 if (r)
2289 return r;
2290
2291 return sysfs_emit(buf, "%u\n", max_nplimit);
2292}
2293
2294/**
2295 * DOC: baseboard_power
2296 *
2297 * The amdgpu driver provides a sysfs API for retrieving current ubb power in watts.
2298 * The file baseboard_power is used for this.
2299 */
2300static ssize_t amdgpu_show_baseboard_power(struct device *dev,
2301 struct device_attribute *attr, char *buf)
2302{
2303 struct drm_device *ddev = dev_get_drvdata(dev);
2304 struct amdgpu_device *adev = drm_to_adev(ddev);
2305 u32 ubbpower;
2306 int r;
2307
2308 /* get the ubb power */
2309 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER,
2310 (void *)&ubbpower);
2311 if (r)
2312 return r;
2313
2314 return sysfs_emit(buf, "%u\n", ubbpower);
2315}
2316
2317/**
2318 * DOC: baseboard_power_limit
2319 *
2320 * The amdgpu driver provides a sysfs API for retrieving threshold ubb power in watts.
2321 * The file baseboard_power_limit is used for this.
2322 */
2323static ssize_t amdgpu_show_baseboard_power_limit(struct device *dev,
2324 struct device_attribute *attr, char *buf)
2325{
2326 struct drm_device *ddev = dev_get_drvdata(dev);
2327 struct amdgpu_device *adev = drm_to_adev(ddev);
2328 u32 ubbpowerlimit;
2329 int r;
2330
2331 /* get the ubb power limit */
2332 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT,
2333 (void *)&ubbpowerlimit);
2334 if (r)
2335 return r;
2336
2337 return sysfs_emit(buf, "%u\n", ubbpowerlimit);
2338}
2339
2340static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
2341static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
2342static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
2343static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
2344static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
2345static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
2346static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
2347static DEVICE_ATTR(baseboard_power, 0444, amdgpu_show_baseboard_power, NULL);
2348static DEVICE_ATTR(baseboard_power_limit, 0444, amdgpu_show_baseboard_power_limit, NULL);
2349
2350static struct attribute *board_attrs[] = {
2351 &dev_attr_baseboard_temp.attr,
2352 &dev_attr_gpuboard_temp.attr,
2353 NULL
2354};
2355
2356static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
2357{
2358 struct device *dev = kobj_to_dev(kobj);
2359 struct drm_device *ddev = dev_get_drvdata(dev);
2360 struct amdgpu_device *adev = drm_to_adev(ddev);
2361
2362 if (attr == &dev_attr_baseboard_temp.attr) {
2363 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
2364 return 0;
2365 }
2366
2367 if (attr == &dev_attr_gpuboard_temp.attr) {
2368 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
2369 return 0;
2370 }
2371
2372 return attr->mode;
2373}
2374
2375const struct attribute_group amdgpu_board_attr_group = {
2376 .name = "board",
2377 .attrs = board_attrs,
2378 .is_visible = amdgpu_board_attr_visible,
2379};
2380
2381/* pm policy attributes */
2382struct amdgpu_pm_policy_attr {
2383 struct device_attribute dev_attr;
2384 enum pp_pm_policy id;
2385};
2386
2387/**
2388 * DOC: pm_policy
2389 *
2390 * Certain SOCs can support different power policies to optimize application
2391 * performance. However, this policy is provided only at SOC level and not at a
2392 * per-process level. This is useful especially when entire SOC is utilized for
2393 * dedicated workload.
2394 *
2395 * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2396 * only two types of policies are supported through this interface.
2397 *
2398 * Pstate Policy Selection - This is to select different Pstate profiles which
2399 * decides clock/throttling preferences.
2400 *
2401 * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2402 * this helps to select policy to be applied for per link power down.
2403 *
2404 * The list of available policies and policy levels vary between SOCs. They can
2405 * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2406 * this node won't be available. The different policies supported will be
2407 * available as separate nodes under pm_policy.
2408 *
2409 * cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2410 *
2411 * Reading the policy file shows the different levels supported. The level which
2412 * is applied presently is denoted by * (asterisk). E.g.,
2413 *
2414 * .. code-block:: console
2415 *
2416 * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2417 * 0 : soc_pstate_default
2418 * 1 : soc_pstate_0
2419 * 2 : soc_pstate_1*
2420 * 3 : soc_pstate_2
2421 *
2422 * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2423 * 0 : plpd_disallow
2424 * 1 : plpd_default
2425 * 2 : plpd_optimized*
2426 *
2427 * To apply a specific policy
2428 *
2429 * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2430 *
2431 * For the levels listed in the example above, to select "plpd_optimized" for
2432 * XGMI and "soc_pstate_2" for soc pstate policy -
2433 *
2434 * .. code-block:: console
2435 *
2436 * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2437 * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2438 *
2439 */
2440static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2441 struct device_attribute *attr,
2442 char *buf)
2443{
2444 struct drm_device *ddev = dev_get_drvdata(dev);
2445 struct amdgpu_device *adev = drm_to_adev(ddev);
2446 struct amdgpu_pm_policy_attr *policy_attr;
2447
2448 policy_attr =
2449 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2450
2451 return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2452}
2453
2454static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2455 struct device_attribute *attr,
2456 const char *buf, size_t count)
2457{
2458 struct drm_device *ddev = dev_get_drvdata(dev);
2459 struct amdgpu_device *adev = drm_to_adev(ddev);
2460 struct amdgpu_pm_policy_attr *policy_attr;
2461 int ret, num_params = 0;
2462 char delimiter[] = " \n\t";
2463 char tmp_buf[128];
2464 char *tmp, *param;
2465 long val;
2466
2467 count = min(count, sizeof(tmp_buf));
2468 memcpy(tmp_buf, buf, count);
2469 tmp_buf[count - 1] = '\0';
2470 tmp = tmp_buf;
2471
2472 tmp = skip_spaces(tmp);
2473 while ((param = strsep(&tmp, delimiter))) {
2474 if (!strlen(param)) {
2475 tmp = skip_spaces(tmp);
2476 continue;
2477 }
2478 ret = kstrtol(param, 0, &val);
2479 if (ret)
2480 return -EINVAL;
2481 num_params++;
2482 if (num_params > 1)
2483 return -EINVAL;
2484 }
2485
2486 if (num_params != 1)
2487 return -EINVAL;
2488
2489 policy_attr =
2490 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2491
2492 ret = amdgpu_pm_get_access(adev);
2493 if (ret < 0)
2494 return ret;
2495
2496 ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2497
2498 amdgpu_pm_put_access(adev);
2499
2500 if (ret)
2501 return ret;
2502
2503 return count;
2504}
2505
2506#define AMDGPU_PM_POLICY_ATTR(_name, _id) \
2507 static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
2508 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2509 amdgpu_set_pm_policy_attr), \
2510 .id = PP_PM_POLICY_##_id, \
2511 };
2512
2513#define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2514
2515AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2516AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2517
2518static struct attribute *pm_policy_attrs[] = {
2519 &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2520 &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2521 NULL
2522};
2523
2524static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2525 struct attribute *attr, int n)
2526{
2527 struct device *dev = kobj_to_dev(kobj);
2528 struct drm_device *ddev = dev_get_drvdata(dev);
2529 struct amdgpu_device *adev = drm_to_adev(ddev);
2530 struct amdgpu_pm_policy_attr *policy_attr;
2531
2532 policy_attr =
2533 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2534
2535 if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2536 -ENOENT)
2537 return 0;
2538
2539 return attr->mode;
2540}
2541
2542const struct attribute_group amdgpu_pm_policy_attr_group = {
2543 .name = "pm_policy",
2544 .attrs = pm_policy_attrs,
2545 .is_visible = amdgpu_pm_policy_attr_visible,
2546};
2547
2548static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2549 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2550 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2551 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2552 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2553 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2554 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2555 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2556 .attr_update = pp_dpm_clk_default_attr_update),
2557 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2558 .attr_update = pp_dpm_clk_default_attr_update),
2559 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2560 .attr_update = pp_dpm_clk_default_attr_update),
2561 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2562 .attr_update = pp_dpm_clk_default_attr_update),
2563 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2564 .attr_update = pp_dpm_clk_default_attr_update),
2565 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2566 .attr_update = pp_dpm_clk_default_attr_update),
2567 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2568 .attr_update = pp_dpm_clk_default_attr_update),
2569 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2570 .attr_update = pp_dpm_clk_default_attr_update),
2571 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2572 .attr_update = pp_dpm_dcefclk_attr_update),
2573 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2574 .attr_update = pp_dpm_clk_default_attr_update),
2575 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2576 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2577 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2578 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
2579 .attr_update = pp_od_clk_voltage_attr_update),
2580 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2581 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2582 AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2583 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2584 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2585 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2586 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2587 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2588 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2589 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2590 .attr_update = ss_power_attr_update),
2591 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2592 .attr_update = ss_power_attr_update),
2593 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2594 .attr_update = ss_bias_attr_update),
2595 AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
2596 .attr_update = amdgpu_pm_metrics_attr_update),
2597};
2598
2599static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2600 uint32_t mask, enum amdgpu_device_attr_states *states)
2601{
2602 struct device_attribute *dev_attr = &attr->dev_attr;
2603 enum amdgpu_device_attr_id attr_id = attr->attr_id;
2604 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2605
2606 if (!(attr->flags & mask)) {
2607 *states = ATTR_STATE_UNSUPPORTED;
2608 return 0;
2609 }
2610
2611 if (DEVICE_ATTR_IS(mem_busy_percent)) {
2612 if ((adev->flags & AMD_IS_APU &&
2613 gc_ver != IP_VERSION(9, 4, 3)) ||
2614 gc_ver == IP_VERSION(9, 0, 1))
2615 *states = ATTR_STATE_UNSUPPORTED;
2616 } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2617 if (!(gc_ver == IP_VERSION(9, 3, 0) ||
2618 gc_ver == IP_VERSION(10, 3, 1) ||
2619 gc_ver == IP_VERSION(10, 3, 3) ||
2620 gc_ver == IP_VERSION(10, 3, 6) ||
2621 gc_ver == IP_VERSION(10, 3, 7) ||
2622 gc_ver == IP_VERSION(11, 0, 0) ||
2623 gc_ver == IP_VERSION(11, 0, 1) ||
2624 gc_ver == IP_VERSION(11, 0, 2) ||
2625 gc_ver == IP_VERSION(11, 0, 3) ||
2626 gc_ver == IP_VERSION(11, 0, 4) ||
2627 gc_ver == IP_VERSION(11, 5, 0) ||
2628 gc_ver == IP_VERSION(11, 5, 1) ||
2629 gc_ver == IP_VERSION(11, 5, 2) ||
2630 gc_ver == IP_VERSION(11, 5, 3) ||
2631 gc_ver == IP_VERSION(12, 0, 0) ||
2632 gc_ver == IP_VERSION(12, 0, 1)))
2633 *states = ATTR_STATE_UNSUPPORTED;
2634 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2635 /* PCIe Perf counters won't work on APU nodes */
2636 if (adev->flags & AMD_IS_APU ||
2637 !adev->asic_funcs->get_pcie_usage)
2638 *states = ATTR_STATE_UNSUPPORTED;
2639 } else if (DEVICE_ATTR_IS(unique_id)) {
2640 switch (gc_ver) {
2641 case IP_VERSION(9, 0, 1):
2642 case IP_VERSION(9, 4, 0):
2643 case IP_VERSION(9, 4, 1):
2644 case IP_VERSION(9, 4, 2):
2645 case IP_VERSION(9, 4, 3):
2646 case IP_VERSION(9, 4, 4):
2647 case IP_VERSION(9, 5, 0):
2648 case IP_VERSION(10, 3, 0):
2649 case IP_VERSION(11, 0, 0):
2650 case IP_VERSION(11, 0, 1):
2651 case IP_VERSION(11, 0, 2):
2652 case IP_VERSION(11, 0, 3):
2653 case IP_VERSION(12, 0, 0):
2654 case IP_VERSION(12, 0, 1):
2655 *states = ATTR_STATE_SUPPORTED;
2656 break;
2657 default:
2658 *states = ATTR_STATE_UNSUPPORTED;
2659 }
2660 } else if (DEVICE_ATTR_IS(pp_features)) {
2661 if ((adev->flags & AMD_IS_APU &&
2662 gc_ver != IP_VERSION(9, 4, 3)) ||
2663 gc_ver < IP_VERSION(9, 0, 0))
2664 *states = ATTR_STATE_UNSUPPORTED;
2665 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2666 if (gc_ver < IP_VERSION(9, 1, 0))
2667 *states = ATTR_STATE_UNSUPPORTED;
2668 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2669 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2670 *states = ATTR_STATE_UNSUPPORTED;
2671 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2672 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2673 *states = ATTR_STATE_UNSUPPORTED;
2674 } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2675 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2676 *states = ATTR_STATE_UNSUPPORTED;
2677 } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2678 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2679 *states = ATTR_STATE_UNSUPPORTED;
2680 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2681 u32 limit;
2682
2683 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2684 -EOPNOTSUPP)
2685 *states = ATTR_STATE_UNSUPPORTED;
2686 } else if (DEVICE_ATTR_IS(pp_table)) {
2687 int ret;
2688 char *tmp = NULL;
2689
2690 ret = amdgpu_dpm_get_pp_table(adev, &tmp);
2691 if (ret == -EOPNOTSUPP || !tmp)
2692 *states = ATTR_STATE_UNSUPPORTED;
2693 else
2694 *states = ATTR_STATE_SUPPORTED;
2695 }
2696
2697 switch (gc_ver) {
2698 case IP_VERSION(10, 3, 0):
2699 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2700 amdgpu_sriov_vf(adev)) {
2701 dev_attr->attr.mode &= ~0222;
2702 dev_attr->store = NULL;
2703 }
2704 break;
2705 default:
2706 break;
2707 }
2708
2709 return 0;
2710}
2711
2712
2713static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2714 struct amdgpu_device_attr *attr,
2715 uint32_t mask, struct list_head *attr_list)
2716{
2717 int ret = 0;
2718 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2719 struct amdgpu_device_attr_entry *attr_entry;
2720 struct device_attribute *dev_attr;
2721 const char *name;
2722
2723 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2724 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2725
2726 if (!attr)
2727 return -EINVAL;
2728
2729 dev_attr = &attr->dev_attr;
2730 name = dev_attr->attr.name;
2731
2732 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2733
2734 ret = attr_update(adev, attr, mask, &attr_states);
2735 if (ret) {
2736 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2737 name, ret);
2738 return ret;
2739 }
2740
2741 if (attr_states == ATTR_STATE_UNSUPPORTED)
2742 return 0;
2743
2744 ret = device_create_file(adev->dev, dev_attr);
2745 if (ret) {
2746 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2747 name, ret);
2748 }
2749
2750 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2751 if (!attr_entry)
2752 return -ENOMEM;
2753
2754 attr_entry->attr = attr;
2755 INIT_LIST_HEAD(&attr_entry->entry);
2756
2757 list_add_tail(&attr_entry->entry, attr_list);
2758
2759 return ret;
2760}
2761
2762static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2763{
2764 struct device_attribute *dev_attr = &attr->dev_attr;
2765
2766 device_remove_file(adev->dev, dev_attr);
2767}
2768
2769static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2770 struct list_head *attr_list);
2771
2772static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2773 struct amdgpu_device_attr *attrs,
2774 uint32_t counts,
2775 uint32_t mask,
2776 struct list_head *attr_list)
2777{
2778 int ret = 0;
2779 uint32_t i = 0;
2780
2781 for (i = 0; i < counts; i++) {
2782 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2783 if (ret)
2784 goto failed;
2785 }
2786
2787 return 0;
2788
2789failed:
2790 amdgpu_device_attr_remove_groups(adev, attr_list);
2791
2792 return ret;
2793}
2794
2795static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2796 struct list_head *attr_list)
2797{
2798 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2799
2800 if (list_empty(attr_list))
2801 return ;
2802
2803 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2804 amdgpu_device_attr_remove(adev, entry->attr);
2805 list_del(&entry->entry);
2806 kfree(entry);
2807 }
2808}
2809
2810static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2811 struct device_attribute *attr,
2812 char *buf)
2813{
2814 struct amdgpu_device *adev = dev_get_drvdata(dev);
2815 int channel = to_sensor_dev_attr(attr)->index;
2816 int r, temp = 0;
2817
2818 if (channel >= PP_TEMP_MAX)
2819 return -EINVAL;
2820
2821 switch (channel) {
2822 case PP_TEMP_JUNCTION:
2823 /* get current junction temperature */
2824 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2825 (void *)&temp);
2826 break;
2827 case PP_TEMP_EDGE:
2828 /* get current edge temperature */
2829 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2830 (void *)&temp);
2831 break;
2832 case PP_TEMP_MEM:
2833 /* get current memory temperature */
2834 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2835 (void *)&temp);
2836 break;
2837 default:
2838 r = -EINVAL;
2839 break;
2840 }
2841
2842 if (r)
2843 return r;
2844
2845 return sysfs_emit(buf, "%d\n", temp);
2846}
2847
2848static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2849 struct device_attribute *attr,
2850 char *buf)
2851{
2852 struct amdgpu_device *adev = dev_get_drvdata(dev);
2853 int hyst = to_sensor_dev_attr(attr)->index;
2854 int temp;
2855
2856 if (hyst)
2857 temp = adev->pm.dpm.thermal.min_temp;
2858 else
2859 temp = adev->pm.dpm.thermal.max_temp;
2860
2861 return sysfs_emit(buf, "%d\n", temp);
2862}
2863
2864static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2865 struct device_attribute *attr,
2866 char *buf)
2867{
2868 struct amdgpu_device *adev = dev_get_drvdata(dev);
2869 int hyst = to_sensor_dev_attr(attr)->index;
2870 int temp;
2871
2872 if (hyst)
2873 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2874 else
2875 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2876
2877 return sysfs_emit(buf, "%d\n", temp);
2878}
2879
2880static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2881 struct device_attribute *attr,
2882 char *buf)
2883{
2884 struct amdgpu_device *adev = dev_get_drvdata(dev);
2885 int hyst = to_sensor_dev_attr(attr)->index;
2886 int temp;
2887
2888 if (hyst)
2889 temp = adev->pm.dpm.thermal.min_mem_temp;
2890 else
2891 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2892
2893 return sysfs_emit(buf, "%d\n", temp);
2894}
2895
2896static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2897 struct device_attribute *attr,
2898 char *buf)
2899{
2900 int channel = to_sensor_dev_attr(attr)->index;
2901
2902 if (channel >= PP_TEMP_MAX)
2903 return -EINVAL;
2904
2905 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2906}
2907
2908static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2909 struct device_attribute *attr,
2910 char *buf)
2911{
2912 struct amdgpu_device *adev = dev_get_drvdata(dev);
2913 int channel = to_sensor_dev_attr(attr)->index;
2914 int temp = 0;
2915
2916 if (channel >= PP_TEMP_MAX)
2917 return -EINVAL;
2918
2919 switch (channel) {
2920 case PP_TEMP_JUNCTION:
2921 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2922 break;
2923 case PP_TEMP_EDGE:
2924 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2925 break;
2926 case PP_TEMP_MEM:
2927 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2928 break;
2929 }
2930
2931 return sysfs_emit(buf, "%d\n", temp);
2932}
2933
2934static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2935 struct device_attribute *attr,
2936 char *buf)
2937{
2938 struct amdgpu_device *adev = dev_get_drvdata(dev);
2939 u32 pwm_mode = 0;
2940 int ret;
2941
2942 ret = amdgpu_pm_get_access_if_active(adev);
2943 if (ret)
2944 return ret;
2945
2946 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2947
2948 amdgpu_pm_put_access(adev);
2949
2950 if (ret)
2951 return -EINVAL;
2952
2953 return sysfs_emit(buf, "%u\n", pwm_mode);
2954}
2955
2956static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2957 struct device_attribute *attr,
2958 const char *buf,
2959 size_t count)
2960{
2961 struct amdgpu_device *adev = dev_get_drvdata(dev);
2962 int err, ret;
2963 u32 pwm_mode;
2964 int value;
2965
2966 err = kstrtoint(buf, 10, &value);
2967 if (err)
2968 return err;
2969
2970 if (value == 0)
2971 pwm_mode = AMD_FAN_CTRL_NONE;
2972 else if (value == 1)
2973 pwm_mode = AMD_FAN_CTRL_MANUAL;
2974 else if (value == 2)
2975 pwm_mode = AMD_FAN_CTRL_AUTO;
2976 else
2977 return -EINVAL;
2978
2979 ret = amdgpu_pm_get_access(adev);
2980 if (ret < 0)
2981 return ret;
2982
2983 ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2984
2985 amdgpu_pm_put_access(adev);
2986
2987 if (ret)
2988 return -EINVAL;
2989
2990 return count;
2991}
2992
2993static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2994 struct device_attribute *attr,
2995 char *buf)
2996{
2997 return sysfs_emit(buf, "%i\n", 0);
2998}
2999
3000static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
3001 struct device_attribute *attr,
3002 char *buf)
3003{
3004 return sysfs_emit(buf, "%i\n", 255);
3005}
3006
3007static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
3008 struct device_attribute *attr,
3009 const char *buf, size_t count)
3010{
3011 struct amdgpu_device *adev = dev_get_drvdata(dev);
3012 int err;
3013 u32 value;
3014 u32 pwm_mode;
3015
3016 err = kstrtou32(buf, 10, &value);
3017 if (err)
3018 return err;
3019
3020 err = amdgpu_pm_get_access(adev);
3021 if (err < 0)
3022 return err;
3023
3024 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3025 if (err)
3026 goto out;
3027
3028 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3029 pr_info("manual fan speed control should be enabled first\n");
3030 err = -EINVAL;
3031 goto out;
3032 }
3033
3034 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
3035
3036out:
3037 amdgpu_pm_put_access(adev);
3038
3039 if (err)
3040 return err;
3041
3042 return count;
3043}
3044
3045static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
3046 struct device_attribute *attr,
3047 char *buf)
3048{
3049 struct amdgpu_device *adev = dev_get_drvdata(dev);
3050 int err;
3051 u32 speed = 0;
3052
3053 err = amdgpu_pm_get_access_if_active(adev);
3054 if (err)
3055 return err;
3056
3057 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
3058
3059 amdgpu_pm_put_access(adev);
3060
3061 if (err)
3062 return err;
3063
3064 return sysfs_emit(buf, "%i\n", speed);
3065}
3066
3067static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
3068 struct device_attribute *attr,
3069 char *buf)
3070{
3071 struct amdgpu_device *adev = dev_get_drvdata(dev);
3072 int err;
3073 u32 speed = 0;
3074
3075 err = amdgpu_pm_get_access_if_active(adev);
3076 if (err)
3077 return err;
3078
3079 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
3080
3081 amdgpu_pm_put_access(adev);
3082
3083 if (err)
3084 return err;
3085
3086 return sysfs_emit(buf, "%i\n", speed);
3087}
3088
3089static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
3090 struct device_attribute *attr,
3091 char *buf)
3092{
3093 struct amdgpu_device *adev = dev_get_drvdata(dev);
3094 u32 min_rpm = 0;
3095 int r;
3096
3097 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
3098 (void *)&min_rpm);
3099
3100 if (r)
3101 return r;
3102
3103 return sysfs_emit(buf, "%d\n", min_rpm);
3104}
3105
3106static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
3107 struct device_attribute *attr,
3108 char *buf)
3109{
3110 struct amdgpu_device *adev = dev_get_drvdata(dev);
3111 u32 max_rpm = 0;
3112 int r;
3113
3114 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
3115 (void *)&max_rpm);
3116
3117 if (r)
3118 return r;
3119
3120 return sysfs_emit(buf, "%d\n", max_rpm);
3121}
3122
3123static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
3124 struct device_attribute *attr,
3125 char *buf)
3126{
3127 struct amdgpu_device *adev = dev_get_drvdata(dev);
3128 int err;
3129 u32 rpm = 0;
3130
3131 err = amdgpu_pm_get_access_if_active(adev);
3132 if (err)
3133 return err;
3134
3135 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
3136
3137 amdgpu_pm_put_access(adev);
3138
3139 if (err)
3140 return err;
3141
3142 return sysfs_emit(buf, "%i\n", rpm);
3143}
3144
3145static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
3146 struct device_attribute *attr,
3147 const char *buf, size_t count)
3148{
3149 struct amdgpu_device *adev = dev_get_drvdata(dev);
3150 int err;
3151 u32 value;
3152 u32 pwm_mode;
3153
3154 err = kstrtou32(buf, 10, &value);
3155 if (err)
3156 return err;
3157
3158 err = amdgpu_pm_get_access(adev);
3159 if (err < 0)
3160 return err;
3161
3162 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3163 if (err)
3164 goto out;
3165
3166 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3167 err = -ENODATA;
3168 goto out;
3169 }
3170
3171 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
3172
3173out:
3174 amdgpu_pm_put_access(adev);
3175
3176 if (err)
3177 return err;
3178
3179 return count;
3180}
3181
3182static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
3183 struct device_attribute *attr,
3184 char *buf)
3185{
3186 struct amdgpu_device *adev = dev_get_drvdata(dev);
3187 u32 pwm_mode = 0;
3188 int ret;
3189
3190 ret = amdgpu_pm_get_access_if_active(adev);
3191 if (ret)
3192 return ret;
3193
3194 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3195
3196 amdgpu_pm_put_access(adev);
3197
3198 if (ret)
3199 return -EINVAL;
3200
3201 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3202}
3203
3204static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3205 struct device_attribute *attr,
3206 const char *buf,
3207 size_t count)
3208{
3209 struct amdgpu_device *adev = dev_get_drvdata(dev);
3210 int err;
3211 int value;
3212 u32 pwm_mode;
3213
3214 err = kstrtoint(buf, 10, &value);
3215 if (err)
3216 return err;
3217
3218 if (value == 0)
3219 pwm_mode = AMD_FAN_CTRL_AUTO;
3220 else if (value == 1)
3221 pwm_mode = AMD_FAN_CTRL_MANUAL;
3222 else
3223 return -EINVAL;
3224
3225 err = amdgpu_pm_get_access(adev);
3226 if (err < 0)
3227 return err;
3228
3229 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3230
3231 amdgpu_pm_put_access(adev);
3232
3233 if (err)
3234 return -EINVAL;
3235
3236 return count;
3237}
3238
3239static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3240 struct device_attribute *attr,
3241 char *buf)
3242{
3243 struct amdgpu_device *adev = dev_get_drvdata(dev);
3244 u32 vddgfx;
3245 int r;
3246
3247 /* get the voltage */
3248 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3249 (void *)&vddgfx);
3250 if (r)
3251 return r;
3252
3253 return sysfs_emit(buf, "%d\n", vddgfx);
3254}
3255
3256static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
3257 struct device_attribute *attr,
3258 char *buf)
3259{
3260 struct amdgpu_device *adev = dev_get_drvdata(dev);
3261 u32 vddboard;
3262 int r;
3263
3264 /* get the voltage */
3265 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3266 (void *)&vddboard);
3267 if (r)
3268 return r;
3269
3270 return sysfs_emit(buf, "%d\n", vddboard);
3271}
3272
3273static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3274 struct device_attribute *attr,
3275 char *buf)
3276{
3277 return sysfs_emit(buf, "vddgfx\n");
3278}
3279
3280static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
3281 struct device_attribute *attr,
3282 char *buf)
3283{
3284 return sysfs_emit(buf, "vddboard\n");
3285}
3286static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3287 struct device_attribute *attr,
3288 char *buf)
3289{
3290 struct amdgpu_device *adev = dev_get_drvdata(dev);
3291 u32 vddnb;
3292 int r;
3293
3294 /* only APUs have vddnb */
3295 if (!(adev->flags & AMD_IS_APU))
3296 return -EINVAL;
3297
3298 /* get the voltage */
3299 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3300 (void *)&vddnb);
3301 if (r)
3302 return r;
3303
3304 return sysfs_emit(buf, "%d\n", vddnb);
3305}
3306
3307static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3308 struct device_attribute *attr,
3309 char *buf)
3310{
3311 return sysfs_emit(buf, "vddnb\n");
3312}
3313
3314static int amdgpu_hwmon_get_power(struct device *dev,
3315 enum amd_pp_sensors sensor)
3316{
3317 struct amdgpu_device *adev = dev_get_drvdata(dev);
3318 unsigned int uw;
3319 u32 query = 0;
3320 int r;
3321
3322 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
3323 if (r)
3324 return r;
3325
3326 /* convert to microwatts */
3327 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3328
3329 return uw;
3330}
3331
3332static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3333 struct device_attribute *attr,
3334 char *buf)
3335{
3336 ssize_t val;
3337
3338 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3339 if (val < 0)
3340 return val;
3341
3342 return sysfs_emit(buf, "%zd\n", val);
3343}
3344
3345static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3346 struct device_attribute *attr,
3347 char *buf)
3348{
3349 ssize_t val;
3350
3351 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3352 if (val < 0)
3353 return val;
3354
3355 return sysfs_emit(buf, "%zd\n", val);
3356}
3357
3358static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3359 struct device_attribute *attr,
3360 char *buf,
3361 enum pp_power_limit_level pp_limit_level)
3362{
3363 struct amdgpu_device *adev = dev_get_drvdata(dev);
3364 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3365 uint32_t limit;
3366 ssize_t size;
3367 int r;
3368
3369 r = amdgpu_pm_get_access_if_active(adev);
3370 if (r)
3371 return r;
3372
3373 r = amdgpu_dpm_get_power_limit(adev, &limit,
3374 pp_limit_level, power_type);
3375
3376 if (!r)
3377 size = sysfs_emit(buf, "%u\n", limit * 1000000);
3378 else
3379 size = sysfs_emit(buf, "\n");
3380
3381 amdgpu_pm_put_access(adev);
3382
3383 return size;
3384}
3385
3386static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3387 struct device_attribute *attr,
3388 char *buf)
3389{
3390 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3391}
3392
3393static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3394 struct device_attribute *attr,
3395 char *buf)
3396{
3397 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3398
3399}
3400
3401static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3402 struct device_attribute *attr,
3403 char *buf)
3404{
3405 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3406
3407}
3408
3409static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3410 struct device_attribute *attr,
3411 char *buf)
3412{
3413 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3414
3415}
3416
3417static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3418 struct device_attribute *attr,
3419 char *buf)
3420{
3421 struct amdgpu_device *adev = dev_get_drvdata(dev);
3422 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3423
3424 if (gc_ver == IP_VERSION(10, 3, 1))
3425 return sysfs_emit(buf, "%s\n",
3426 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3427 "fastPPT" : "slowPPT");
3428 else
3429 return sysfs_emit(buf, "%s\n",
3430 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3431 "PPT1" : "PPT");
3432}
3433
3434static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3435 struct device_attribute *attr,
3436 const char *buf,
3437 size_t count)
3438{
3439 struct amdgpu_device *adev = dev_get_drvdata(dev);
3440 int limit_type = to_sensor_dev_attr(attr)->index;
3441 int err;
3442 u32 value;
3443
3444 err = kstrtou32(buf, 10, &value);
3445 if (err)
3446 return err;
3447
3448 value = value / 1000000; /* convert to Watt */
3449
3450 err = amdgpu_pm_get_access(adev);
3451 if (err < 0)
3452 return err;
3453
3454 err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
3455
3456 amdgpu_pm_put_access(adev);
3457
3458 if (err)
3459 return err;
3460
3461 return count;
3462}
3463
3464static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3465 struct device_attribute *attr,
3466 char *buf)
3467{
3468 struct amdgpu_device *adev = dev_get_drvdata(dev);
3469 uint32_t sclk;
3470 int r;
3471
3472 /* get the sclk */
3473 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3474 (void *)&sclk);
3475 if (r)
3476 return r;
3477
3478 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3479}
3480
3481static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3482 struct device_attribute *attr,
3483 char *buf)
3484{
3485 return sysfs_emit(buf, "sclk\n");
3486}
3487
3488static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3489 struct device_attribute *attr,
3490 char *buf)
3491{
3492 struct amdgpu_device *adev = dev_get_drvdata(dev);
3493 uint32_t mclk;
3494 int r;
3495
3496 /* get the sclk */
3497 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3498 (void *)&mclk);
3499 if (r)
3500 return r;
3501
3502 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3503}
3504
3505static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3506 struct device_attribute *attr,
3507 char *buf)
3508{
3509 return sysfs_emit(buf, "mclk\n");
3510}
3511
3512/**
3513 * DOC: hwmon
3514 *
3515 * The amdgpu driver exposes the following sensor interfaces:
3516 *
3517 * - GPU temperature (via the on-die sensor)
3518 *
3519 * - GPU voltage
3520 *
3521 * - Northbridge voltage (APUs only)
3522 *
3523 * - GPU power
3524 *
3525 * - GPU fan
3526 *
3527 * - GPU gfx/compute engine clock
3528 *
3529 * - GPU memory clock (dGPU only)
3530 *
3531 * hwmon interfaces for GPU temperature:
3532 *
3533 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3534 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3535 *
3536 * - temp[1-3]_label: temperature channel label
3537 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3538 *
3539 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3540 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3541 *
3542 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3543 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3544 *
3545 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3546 * - these are supported on SOC15 dGPUs only
3547 *
3548 * hwmon interfaces for GPU voltage:
3549 *
3550 * - in0_input: the voltage on the GPU in millivolts
3551 *
3552 * - in1_input: the voltage on the Northbridge in millivolts
3553 *
3554 * hwmon interfaces for GPU power:
3555 *
3556 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3557 *
3558 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3559 *
3560 * - power1_cap_min: minimum cap supported in microWatts
3561 *
3562 * - power1_cap_max: maximum cap supported in microWatts
3563 *
3564 * - power1_cap: selected power cap in microWatts
3565 *
3566 * hwmon interfaces for GPU fan:
3567 *
3568 * - pwm1: pulse width modulation fan level (0-255)
3569 *
3570 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3571 *
3572 * - pwm1_min: pulse width modulation fan control minimum level (0)
3573 *
3574 * - pwm1_max: pulse width modulation fan control maximum level (255)
3575 *
3576 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3577 *
3578 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3579 *
3580 * - fan1_input: fan speed in RPM
3581 *
3582 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3583 *
3584 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3585 *
3586 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3587 * That will get the former one overridden.
3588 *
3589 * hwmon interfaces for GPU clocks:
3590 *
3591 * - freq1_input: the gfx/compute clock in hertz
3592 *
3593 * - freq2_input: the memory clock in hertz
3594 *
3595 * You can use hwmon tools like sensors to view this information on your system.
3596 *
3597 */
3598
3599static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3600static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3601static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3602static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3603static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3604static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3605static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3606static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3607static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3608static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3609static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3610static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3611static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3612static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3613static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3614static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3615static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3616static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3617static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3618static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3619static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3620static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3621static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3622static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3623static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3624static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3625static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3626static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3627static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
3628static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
3629static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3630static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3631static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3632static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3633static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3634static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3635static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3636static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3637static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3638static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3639static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3640static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3641static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3642static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3643static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3644static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3645
3646static struct attribute *hwmon_attributes[] = {
3647 &sensor_dev_attr_temp1_input.dev_attr.attr,
3648 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3649 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3650 &sensor_dev_attr_temp2_input.dev_attr.attr,
3651 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3652 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3653 &sensor_dev_attr_temp3_input.dev_attr.attr,
3654 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3655 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3656 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3657 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3658 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3659 &sensor_dev_attr_temp1_label.dev_attr.attr,
3660 &sensor_dev_attr_temp2_label.dev_attr.attr,
3661 &sensor_dev_attr_temp3_label.dev_attr.attr,
3662 &sensor_dev_attr_pwm1.dev_attr.attr,
3663 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3664 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3665 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3666 &sensor_dev_attr_fan1_input.dev_attr.attr,
3667 &sensor_dev_attr_fan1_min.dev_attr.attr,
3668 &sensor_dev_attr_fan1_max.dev_attr.attr,
3669 &sensor_dev_attr_fan1_target.dev_attr.attr,
3670 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3671 &sensor_dev_attr_in0_input.dev_attr.attr,
3672 &sensor_dev_attr_in0_label.dev_attr.attr,
3673 &sensor_dev_attr_in1_input.dev_attr.attr,
3674 &sensor_dev_attr_in1_label.dev_attr.attr,
3675 &sensor_dev_attr_in2_input.dev_attr.attr,
3676 &sensor_dev_attr_in2_label.dev_attr.attr,
3677 &sensor_dev_attr_power1_average.dev_attr.attr,
3678 &sensor_dev_attr_power1_input.dev_attr.attr,
3679 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3680 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3681 &sensor_dev_attr_power1_cap.dev_attr.attr,
3682 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3683 &sensor_dev_attr_power1_label.dev_attr.attr,
3684 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3685 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3686 &sensor_dev_attr_power2_cap.dev_attr.attr,
3687 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3688 &sensor_dev_attr_power2_label.dev_attr.attr,
3689 &sensor_dev_attr_freq1_input.dev_attr.attr,
3690 &sensor_dev_attr_freq1_label.dev_attr.attr,
3691 &sensor_dev_attr_freq2_input.dev_attr.attr,
3692 &sensor_dev_attr_freq2_label.dev_attr.attr,
3693 NULL
3694};
3695
3696static umode_t hwmon_attributes_visible(struct kobject *kobj,
3697 struct attribute *attr, int index)
3698{
3699 struct device *dev = kobj_to_dev(kobj);
3700 struct amdgpu_device *adev = dev_get_drvdata(dev);
3701 umode_t effective_mode = attr->mode;
3702 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3703 uint32_t tmp;
3704
3705 /* under pp one vf mode manage of hwmon attributes is not supported */
3706 if (amdgpu_sriov_is_pp_one_vf(adev))
3707 effective_mode &= ~S_IWUSR;
3708
3709 /* Skip fan attributes if fan is not present */
3710 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3711 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3712 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3713 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3714 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3715 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3716 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3717 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3718 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3719 return 0;
3720
3721 /* Skip fan attributes on APU */
3722 if ((adev->flags & AMD_IS_APU) &&
3723 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3724 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3725 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3726 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3727 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3728 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3729 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3730 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3731 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3732 return 0;
3733
3734 /* Skip crit temp on APU */
3735 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3736 (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
3737 gc_ver == IP_VERSION(9, 5, 0))) &&
3738 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3739 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3740 return 0;
3741
3742 /* Skip limit attributes if DPM is not enabled */
3743 if (!adev->pm.dpm_enabled &&
3744 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3745 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3746 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3747 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3748 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3749 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3750 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3751 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3752 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3753 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3754 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3755 return 0;
3756
3757 /* mask fan attributes if we have no bindings for this asic to expose */
3758 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3759 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3760 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3761 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3762 effective_mode &= ~S_IRUGO;
3763
3764 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3765 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3766 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3767 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3768 effective_mode &= ~S_IWUSR;
3769
3770 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3771 if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3772 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3773 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3774 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
3775 if (adev->family == AMDGPU_FAMILY_SI ||
3776 ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
3777 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
3778 (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
3779 return 0;
3780 }
3781
3782 if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
3783 amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
3784 effective_mode |= S_IWUSR;
3785
3786 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3787 if (((adev->family == AMDGPU_FAMILY_SI) ||
3788 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3789 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3790 return 0;
3791
3792 /* not all products support both average and instantaneous */
3793 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3794 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
3795 (void *)&tmp) == -EOPNOTSUPP)
3796 return 0;
3797 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3798 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
3799 (void *)&tmp) == -EOPNOTSUPP)
3800 return 0;
3801
3802 /* hide max/min values if we can't both query and manage the fan */
3803 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3804 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3805 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3806 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3807 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3808 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3809 return 0;
3810
3811 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3812 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3813 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3814 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3815 return 0;
3816
3817 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3818 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3819 (gc_ver == IP_VERSION(9, 4, 3) ||
3820 gc_ver == IP_VERSION(9, 4, 4) ||
3821 gc_ver == IP_VERSION(9, 5, 0))) &&
3822 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3823 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3824 return 0;
3825
3826 /* only APUs other than gc 9,4,3 have vddnb */
3827 if ((!(adev->flags & AMD_IS_APU) ||
3828 (gc_ver == IP_VERSION(9, 4, 3) ||
3829 gc_ver == IP_VERSION(9, 4, 4) ||
3830 gc_ver == IP_VERSION(9, 5, 0))) &&
3831 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3832 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3833 return 0;
3834
3835 /* only few boards support vddboard */
3836 if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
3837 attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
3838 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3839 (void *)&tmp) == -EOPNOTSUPP)
3840 return 0;
3841
3842 /* no mclk on APUs other than gc 9,4,3*/
3843 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3844 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3845 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3846 return 0;
3847
3848 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3849 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3850 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3851 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3852 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3853 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3854 attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3855 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3856 return 0;
3857
3858 /* hotspot temperature for gc 9,4,3*/
3859 if (gc_ver == IP_VERSION(9, 4, 3) ||
3860 gc_ver == IP_VERSION(9, 4, 4) ||
3861 gc_ver == IP_VERSION(9, 5, 0)) {
3862 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3863 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3864 attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3865 return 0;
3866
3867 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3868 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3869 return attr->mode;
3870 }
3871
3872 /* only SOC15 dGPUs support hotspot and mem temperatures */
3873 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3874 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3875 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3876 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3877 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3878 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3879 return 0;
3880
3881 /* only a few GPUs have fast PPT limit and power labels */
3882 if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3883 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3884 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3885 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3886 attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
3887 (amdgpu_dpm_get_power_limit(adev, &tmp,
3888 PP_PWR_LIMIT_MAX,
3889 PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
3890 return 0;
3891
3892 return effective_mode;
3893}
3894
3895static const struct attribute_group hwmon_attrgroup = {
3896 .attrs = hwmon_attributes,
3897 .is_visible = hwmon_attributes_visible,
3898};
3899
3900static const struct attribute_group *hwmon_groups[] = {
3901 &hwmon_attrgroup,
3902 NULL
3903};
3904
3905static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3906 enum pp_clock_type od_type,
3907 char *buf)
3908{
3909 int size = 0;
3910 int ret;
3911
3912 ret = amdgpu_pm_get_access_if_active(adev);
3913 if (ret)
3914 return ret;
3915
3916 ret = amdgpu_dpm_emit_clock_levels(adev, od_type, buf, &size);
3917 if (ret)
3918 return ret;
3919 if (size == 0)
3920 size = sysfs_emit(buf, "\n");
3921
3922 amdgpu_pm_put_access(adev);
3923
3924 return size;
3925}
3926
3927static int parse_input_od_command_lines(const char *buf,
3928 size_t count,
3929 u32 *type,
3930 long *params,
3931 uint32_t *num_of_params)
3932{
3933 const char delimiter[3] = {' ', '\n', '\0'};
3934 uint32_t parameter_size = 0;
3935 char buf_cpy[128] = {0};
3936 char *tmp_str, *sub_str;
3937 int ret;
3938
3939 if (count > sizeof(buf_cpy) - 1)
3940 return -EINVAL;
3941
3942 memcpy(buf_cpy, buf, count);
3943 tmp_str = buf_cpy;
3944
3945 /* skip heading spaces */
3946 while (isspace(*tmp_str))
3947 tmp_str++;
3948
3949 switch (*tmp_str) {
3950 case 'c':
3951 *type = PP_OD_COMMIT_DPM_TABLE;
3952 return 0;
3953 case 'r':
3954 params[parameter_size] = *type;
3955 *num_of_params = 1;
3956 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3957 return 0;
3958 default:
3959 break;
3960 }
3961
3962 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3963 if (strlen(sub_str) == 0)
3964 continue;
3965
3966 ret = kstrtol(sub_str, 0, ¶ms[parameter_size]);
3967 if (ret)
3968 return -EINVAL;
3969 parameter_size++;
3970
3971 if (!tmp_str)
3972 break;
3973
3974 while (isspace(*tmp_str))
3975 tmp_str++;
3976 }
3977
3978 *num_of_params = parameter_size;
3979
3980 return 0;
3981}
3982
3983static int
3984amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3985 enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3986 const char *in_buf,
3987 size_t count)
3988{
3989 uint32_t parameter_size = 0;
3990 long parameter[64];
3991 int ret;
3992
3993 ret = parse_input_od_command_lines(in_buf,
3994 count,
3995 &cmd_type,
3996 parameter,
3997 ¶meter_size);
3998 if (ret)
3999 return ret;
4000
4001 ret = amdgpu_pm_get_access(adev);
4002 if (ret < 0)
4003 return ret;
4004
4005 ret = amdgpu_dpm_odn_edit_dpm_table(adev,
4006 cmd_type,
4007 parameter,
4008 parameter_size);
4009 if (ret)
4010 goto err_out;
4011
4012 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
4013 ret = amdgpu_dpm_dispatch_task(adev,
4014 AMD_PP_TASK_READJUST_POWER_STATE,
4015 NULL);
4016 if (ret)
4017 goto err_out;
4018 }
4019
4020 amdgpu_pm_put_access(adev);
4021
4022 return count;
4023
4024err_out:
4025 amdgpu_pm_put_access(adev);
4026
4027 return ret;
4028}
4029
4030/**
4031 * DOC: fan_curve
4032 *
4033 * The amdgpu driver provides a sysfs API for checking and adjusting the fan
4034 * control curve line.
4035 *
4036 * Reading back the file shows you the current settings(temperature in Celsius
4037 * degree and fan speed in pwm) applied to every anchor point of the curve line
4038 * and their permitted ranges if changable.
4039 *
4040 * Writing a desired string(with the format like "anchor_point_index temperature
4041 * fan_speed_in_pwm") to the file, change the settings for the specific anchor
4042 * point accordingly.
4043 *
4044 * When you have finished the editing, write "c" (commit) to the file to commit
4045 * your changes.
4046 *
4047 * If you want to reset to the default value, write "r" (reset) to the file to
4048 * reset them
4049 *
4050 * There are two fan control modes supported: auto and manual. With auto mode,
4051 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
4052 * While with manual mode, users can set their own fan curve line as what
4053 * described here. Normally the ASIC is booted up with auto mode. Any
4054 * settings via this interface will switch the fan control to manual mode
4055 * implicitly.
4056 */
4057static ssize_t fan_curve_show(struct kobject *kobj,
4058 struct kobj_attribute *attr,
4059 char *buf)
4060{
4061 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4062 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4063
4064 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
4065}
4066
4067static ssize_t fan_curve_store(struct kobject *kobj,
4068 struct kobj_attribute *attr,
4069 const char *buf,
4070 size_t count)
4071{
4072 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4073 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4074
4075 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4076 PP_OD_EDIT_FAN_CURVE,
4077 buf,
4078 count);
4079}
4080
4081static umode_t fan_curve_visible(struct amdgpu_device *adev)
4082{
4083 umode_t umode = 0000;
4084
4085 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
4086 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4087
4088 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
4089 umode |= S_IWUSR;
4090
4091 return umode;
4092}
4093
4094/**
4095 * DOC: acoustic_limit_rpm_threshold
4096 *
4097 * The amdgpu driver provides a sysfs API for checking and adjusting the
4098 * acoustic limit in RPM for fan control.
4099 *
4100 * Reading back the file shows you the current setting and the permitted
4101 * ranges if changable.
4102 *
4103 * Writing an integer to the file, change the setting accordingly.
4104 *
4105 * When you have finished the editing, write "c" (commit) to the file to commit
4106 * your changes.
4107 *
4108 * If you want to reset to the default value, write "r" (reset) to the file to
4109 * reset them
4110 *
4111 * This setting works under auto fan control mode only. It adjusts the PMFW's
4112 * behavior about the maximum speed in RPM the fan can spin. Setting via this
4113 * interface will switch the fan control to auto mode implicitly.
4114 */
4115static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
4116 struct kobj_attribute *attr,
4117 char *buf)
4118{
4119 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4120 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4121
4122 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
4123}
4124
4125static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
4126 struct kobj_attribute *attr,
4127 const char *buf,
4128 size_t count)
4129{
4130 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4131 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4132
4133 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4134 PP_OD_EDIT_ACOUSTIC_LIMIT,
4135 buf,
4136 count);
4137}
4138
4139static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
4140{
4141 umode_t umode = 0000;
4142
4143 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
4144 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4145
4146 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
4147 umode |= S_IWUSR;
4148
4149 return umode;
4150}
4151
4152/**
4153 * DOC: acoustic_target_rpm_threshold
4154 *
4155 * The amdgpu driver provides a sysfs API for checking and adjusting the
4156 * acoustic target in RPM for fan control.
4157 *
4158 * Reading back the file shows you the current setting and the permitted
4159 * ranges if changable.
4160 *
4161 * Writing an integer to the file, change the setting accordingly.
4162 *
4163 * When you have finished the editing, write "c" (commit) to the file to commit
4164 * your changes.
4165 *
4166 * If you want to reset to the default value, write "r" (reset) to the file to
4167 * reset them
4168 *
4169 * This setting works under auto fan control mode only. It can co-exist with
4170 * other settings which can work also under auto mode. It adjusts the PMFW's
4171 * behavior about the maximum speed in RPM the fan can spin when ASIC
4172 * temperature is not greater than target temperature. Setting via this
4173 * interface will switch the fan control to auto mode implicitly.
4174 */
4175static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
4176 struct kobj_attribute *attr,
4177 char *buf)
4178{
4179 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4180 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4181
4182 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
4183}
4184
4185static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
4186 struct kobj_attribute *attr,
4187 const char *buf,
4188 size_t count)
4189{
4190 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4191 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4192
4193 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4194 PP_OD_EDIT_ACOUSTIC_TARGET,
4195 buf,
4196 count);
4197}
4198
4199static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
4200{
4201 umode_t umode = 0000;
4202
4203 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
4204 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4205
4206 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
4207 umode |= S_IWUSR;
4208
4209 return umode;
4210}
4211
4212/**
4213 * DOC: fan_target_temperature
4214 *
4215 * The amdgpu driver provides a sysfs API for checking and adjusting the
4216 * target tempeature in Celsius degree for fan control.
4217 *
4218 * Reading back the file shows you the current setting and the permitted
4219 * ranges if changable.
4220 *
4221 * Writing an integer to the file, change the setting accordingly.
4222 *
4223 * When you have finished the editing, write "c" (commit) to the file to commit
4224 * your changes.
4225 *
4226 * If you want to reset to the default value, write "r" (reset) to the file to
4227 * reset them
4228 *
4229 * This setting works under auto fan control mode only. It can co-exist with
4230 * other settings which can work also under auto mode. Paring with the
4231 * acoustic_target_rpm_threshold setting, they define the maximum speed in
4232 * RPM the fan can spin when ASIC temperature is not greater than target
4233 * temperature. Setting via this interface will switch the fan control to
4234 * auto mode implicitly.
4235 */
4236static ssize_t fan_target_temperature_show(struct kobject *kobj,
4237 struct kobj_attribute *attr,
4238 char *buf)
4239{
4240 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4241 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4242
4243 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4244}
4245
4246static ssize_t fan_target_temperature_store(struct kobject *kobj,
4247 struct kobj_attribute *attr,
4248 const char *buf,
4249 size_t count)
4250{
4251 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4252 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4253
4254 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4255 PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4256 buf,
4257 count);
4258}
4259
4260static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4261{
4262 umode_t umode = 0000;
4263
4264 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4265 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4266
4267 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4268 umode |= S_IWUSR;
4269
4270 return umode;
4271}
4272
4273/**
4274 * DOC: fan_minimum_pwm
4275 *
4276 * The amdgpu driver provides a sysfs API for checking and adjusting the
4277 * minimum fan speed in PWM.
4278 *
4279 * Reading back the file shows you the current setting and the permitted
4280 * ranges if changable.
4281 *
4282 * Writing an integer to the file, change the setting accordingly.
4283 *
4284 * When you have finished the editing, write "c" (commit) to the file to commit
4285 * your changes.
4286 *
4287 * If you want to reset to the default value, write "r" (reset) to the file to
4288 * reset them
4289 *
4290 * This setting works under auto fan control mode only. It can co-exist with
4291 * other settings which can work also under auto mode. It adjusts the PMFW's
4292 * behavior about the minimum fan speed in PWM the fan should spin. Setting
4293 * via this interface will switch the fan control to auto mode implicitly.
4294 */
4295static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4296 struct kobj_attribute *attr,
4297 char *buf)
4298{
4299 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4300 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4301
4302 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4303}
4304
4305static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4306 struct kobj_attribute *attr,
4307 const char *buf,
4308 size_t count)
4309{
4310 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4311 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4312
4313 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4314 PP_OD_EDIT_FAN_MINIMUM_PWM,
4315 buf,
4316 count);
4317}
4318
4319static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4320{
4321 umode_t umode = 0000;
4322
4323 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4324 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4325
4326 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4327 umode |= S_IWUSR;
4328
4329 return umode;
4330}
4331
4332/**
4333 * DOC: fan_zero_rpm_enable
4334 *
4335 * The amdgpu driver provides a sysfs API for checking and adjusting the
4336 * zero RPM feature.
4337 *
4338 * Reading back the file shows you the current setting and the permitted
4339 * ranges if changable.
4340 *
4341 * Writing an integer to the file, change the setting accordingly.
4342 *
4343 * When you have finished the editing, write "c" (commit) to the file to commit
4344 * your changes.
4345 *
4346 * If you want to reset to the default value, write "r" (reset) to the file to
4347 * reset them.
4348 */
4349static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4350 struct kobj_attribute *attr,
4351 char *buf)
4352{
4353 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4354 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4355
4356 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4357}
4358
4359static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4360 struct kobj_attribute *attr,
4361 const char *buf,
4362 size_t count)
4363{
4364 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4365 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4366
4367 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4368 PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4369 buf,
4370 count);
4371}
4372
4373static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4374{
4375 umode_t umode = 0000;
4376
4377 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4378 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4379
4380 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4381 umode |= S_IWUSR;
4382
4383 return umode;
4384}
4385
4386/**
4387 * DOC: fan_zero_rpm_stop_temperature
4388 *
4389 * The amdgpu driver provides a sysfs API for checking and adjusting the
4390 * zero RPM stop temperature feature.
4391 *
4392 * Reading back the file shows you the current setting and the permitted
4393 * ranges if changable.
4394 *
4395 * Writing an integer to the file, change the setting accordingly.
4396 *
4397 * When you have finished the editing, write "c" (commit) to the file to commit
4398 * your changes.
4399 *
4400 * If you want to reset to the default value, write "r" (reset) to the file to
4401 * reset them.
4402 *
4403 * This setting works only if the Zero RPM setting is enabled. It adjusts the
4404 * temperature below which the fan can stop.
4405 */
4406static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4407 struct kobj_attribute *attr,
4408 char *buf)
4409{
4410 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4411 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4412
4413 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4414}
4415
4416static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4417 struct kobj_attribute *attr,
4418 const char *buf,
4419 size_t count)
4420{
4421 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4422 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4423
4424 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4425 PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4426 buf,
4427 count);
4428}
4429
4430static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4431{
4432 umode_t umode = 0000;
4433
4434 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4435 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4436
4437 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4438 umode |= S_IWUSR;
4439
4440 return umode;
4441}
4442
4443static struct od_feature_set amdgpu_od_set = {
4444 .containers = {
4445 [0] = {
4446 .name = "fan_ctrl",
4447 .sub_feature = {
4448 [0] = {
4449 .name = "fan_curve",
4450 .ops = {
4451 .is_visible = fan_curve_visible,
4452 .show = fan_curve_show,
4453 .store = fan_curve_store,
4454 },
4455 },
4456 [1] = {
4457 .name = "acoustic_limit_rpm_threshold",
4458 .ops = {
4459 .is_visible = acoustic_limit_threshold_visible,
4460 .show = acoustic_limit_threshold_show,
4461 .store = acoustic_limit_threshold_store,
4462 },
4463 },
4464 [2] = {
4465 .name = "acoustic_target_rpm_threshold",
4466 .ops = {
4467 .is_visible = acoustic_target_threshold_visible,
4468 .show = acoustic_target_threshold_show,
4469 .store = acoustic_target_threshold_store,
4470 },
4471 },
4472 [3] = {
4473 .name = "fan_target_temperature",
4474 .ops = {
4475 .is_visible = fan_target_temperature_visible,
4476 .show = fan_target_temperature_show,
4477 .store = fan_target_temperature_store,
4478 },
4479 },
4480 [4] = {
4481 .name = "fan_minimum_pwm",
4482 .ops = {
4483 .is_visible = fan_minimum_pwm_visible,
4484 .show = fan_minimum_pwm_show,
4485 .store = fan_minimum_pwm_store,
4486 },
4487 },
4488 [5] = {
4489 .name = "fan_zero_rpm_enable",
4490 .ops = {
4491 .is_visible = fan_zero_rpm_enable_visible,
4492 .show = fan_zero_rpm_enable_show,
4493 .store = fan_zero_rpm_enable_store,
4494 },
4495 },
4496 [6] = {
4497 .name = "fan_zero_rpm_stop_temperature",
4498 .ops = {
4499 .is_visible = fan_zero_rpm_stop_temp_visible,
4500 .show = fan_zero_rpm_stop_temp_show,
4501 .store = fan_zero_rpm_stop_temp_store,
4502 },
4503 },
4504 },
4505 },
4506 },
4507};
4508
4509static void od_kobj_release(struct kobject *kobj)
4510{
4511 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4512
4513 kfree(od_kobj);
4514}
4515
4516static const struct kobj_type od_ktype = {
4517 .release = od_kobj_release,
4518 .sysfs_ops = &kobj_sysfs_ops,
4519};
4520
4521static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4522{
4523 struct od_kobj *container, *container_next;
4524 struct od_attribute *attribute, *attribute_next;
4525
4526 if (list_empty(&adev->pm.od_kobj_list))
4527 return;
4528
4529 list_for_each_entry_safe(container, container_next,
4530 &adev->pm.od_kobj_list, entry) {
4531 list_del(&container->entry);
4532
4533 list_for_each_entry_safe(attribute, attribute_next,
4534 &container->attribute, entry) {
4535 list_del(&attribute->entry);
4536 sysfs_remove_file(&container->kobj,
4537 &attribute->attribute.attr);
4538 kfree(attribute);
4539 }
4540
4541 kobject_put(&container->kobj);
4542 }
4543}
4544
4545static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4546 struct od_feature_ops *feature_ops)
4547{
4548 umode_t mode;
4549
4550 if (!feature_ops->is_visible)
4551 return false;
4552
4553 /*
4554 * If the feature has no user read and write mode set,
4555 * we can assume the feature is actually not supported.(?)
4556 * And the revelant sysfs interface should not be exposed.
4557 */
4558 mode = feature_ops->is_visible(adev);
4559 if (mode & (S_IRUSR | S_IWUSR))
4560 return true;
4561
4562 return false;
4563}
4564
4565static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4566 struct od_feature_container *container)
4567{
4568 int i;
4569
4570 /*
4571 * If there is no valid entry within the container, the container
4572 * is recognized as a self contained container. And the valid entry
4573 * here means it has a valid naming and it is visible/supported by
4574 * the ASIC.
4575 */
4576 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4577 if (container->sub_feature[i].name &&
4578 amdgpu_is_od_feature_supported(adev,
4579 &container->sub_feature[i].ops))
4580 return false;
4581 }
4582
4583 return true;
4584}
4585
4586static int amdgpu_od_set_init(struct amdgpu_device *adev)
4587{
4588 struct od_kobj *top_set, *sub_set;
4589 struct od_attribute *attribute;
4590 struct od_feature_container *container;
4591 struct od_feature_item *feature;
4592 int i, j;
4593 int ret;
4594
4595 /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4596 top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4597 if (!top_set)
4598 return -ENOMEM;
4599 list_add(&top_set->entry, &adev->pm.od_kobj_list);
4600
4601 ret = kobject_init_and_add(&top_set->kobj,
4602 &od_ktype,
4603 &adev->dev->kobj,
4604 "%s",
4605 "gpu_od");
4606 if (ret)
4607 goto err_out;
4608 INIT_LIST_HEAD(&top_set->attribute);
4609 top_set->priv = adev;
4610
4611 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4612 container = &amdgpu_od_set.containers[i];
4613
4614 if (!container->name)
4615 continue;
4616
4617 /*
4618 * If there is valid entries within the container, the container
4619 * will be presented as a sub directory and all its holding entries
4620 * will be presented as plain files under it.
4621 * While if there is no valid entry within the container, the container
4622 * itself will be presented as a plain file under top `gpu_od` directory.
4623 */
4624 if (amdgpu_od_is_self_contained(adev, container)) {
4625 if (!amdgpu_is_od_feature_supported(adev,
4626 &container->ops))
4627 continue;
4628
4629 /*
4630 * The container is presented as a plain file under top `gpu_od`
4631 * directory.
4632 */
4633 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4634 if (!attribute) {
4635 ret = -ENOMEM;
4636 goto err_out;
4637 }
4638 list_add(&attribute->entry, &top_set->attribute);
4639
4640 attribute->attribute.attr.mode =
4641 container->ops.is_visible(adev);
4642 attribute->attribute.attr.name = container->name;
4643 attribute->attribute.show =
4644 container->ops.show;
4645 attribute->attribute.store =
4646 container->ops.store;
4647 ret = sysfs_create_file(&top_set->kobj,
4648 &attribute->attribute.attr);
4649 if (ret)
4650 goto err_out;
4651 } else {
4652 /* The container is presented as a sub directory. */
4653 sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4654 if (!sub_set) {
4655 ret = -ENOMEM;
4656 goto err_out;
4657 }
4658 list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4659
4660 ret = kobject_init_and_add(&sub_set->kobj,
4661 &od_ktype,
4662 &top_set->kobj,
4663 "%s",
4664 container->name);
4665 if (ret)
4666 goto err_out;
4667 INIT_LIST_HEAD(&sub_set->attribute);
4668 sub_set->priv = adev;
4669
4670 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4671 feature = &container->sub_feature[j];
4672 if (!feature->name)
4673 continue;
4674
4675 if (!amdgpu_is_od_feature_supported(adev,
4676 &feature->ops))
4677 continue;
4678
4679 /*
4680 * With the container presented as a sub directory, the entry within
4681 * it is presented as a plain file under the sub directory.
4682 */
4683 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4684 if (!attribute) {
4685 ret = -ENOMEM;
4686 goto err_out;
4687 }
4688 list_add(&attribute->entry, &sub_set->attribute);
4689
4690 attribute->attribute.attr.mode =
4691 feature->ops.is_visible(adev);
4692 attribute->attribute.attr.name = feature->name;
4693 attribute->attribute.show =
4694 feature->ops.show;
4695 attribute->attribute.store =
4696 feature->ops.store;
4697 ret = sysfs_create_file(&sub_set->kobj,
4698 &attribute->attribute.attr);
4699 if (ret)
4700 goto err_out;
4701 }
4702 }
4703 }
4704
4705 /*
4706 * If gpu_od is the only member in the list, that means gpu_od is an
4707 * empty directory, so remove it.
4708 */
4709 if (list_is_singular(&adev->pm.od_kobj_list))
4710 goto err_out;
4711
4712 return 0;
4713
4714err_out:
4715 amdgpu_od_set_fini(adev);
4716
4717 return ret;
4718}
4719
4720int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4721{
4722 enum amdgpu_sriov_vf_mode mode;
4723 uint32_t mask = 0;
4724 uint32_t tmp;
4725 int ret;
4726
4727 if (adev->pm.sysfs_initialized)
4728 return 0;
4729
4730 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4731
4732 if (adev->pm.dpm_enabled == 0)
4733 return 0;
4734
4735 mode = amdgpu_virt_get_sriov_vf_mode(adev);
4736
4737 /* under multi-vf mode, the hwmon attributes are all not supported */
4738 if (mode != SRIOV_VF_MODE_MULTI_VF) {
4739 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4740 DRIVER_NAME, adev,
4741 hwmon_groups);
4742 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4743 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4744 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4745 return ret;
4746 }
4747 }
4748
4749 switch (mode) {
4750 case SRIOV_VF_MODE_ONE_VF:
4751 mask = ATTR_FLAG_ONEVF;
4752 break;
4753 case SRIOV_VF_MODE_MULTI_VF:
4754 mask = 0;
4755 break;
4756 case SRIOV_VF_MODE_BARE_METAL:
4757 default:
4758 mask = ATTR_FLAG_MASK_ALL;
4759 break;
4760 }
4761
4762 ret = amdgpu_device_attr_create_groups(adev,
4763 amdgpu_device_attrs,
4764 ARRAY_SIZE(amdgpu_device_attrs),
4765 mask,
4766 &adev->pm.pm_attr_list);
4767 if (ret)
4768 goto err_out0;
4769
4770 if (amdgpu_dpm_is_overdrive_supported(adev)) {
4771 ret = amdgpu_od_set_init(adev);
4772 if (ret)
4773 goto err_out1;
4774 } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4775 dev_info(adev->dev, "overdrive feature is not supported\n");
4776 }
4777
4778 if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4779 -EOPNOTSUPP) {
4780 ret = devm_device_add_group(adev->dev,
4781 &amdgpu_pm_policy_attr_group);
4782 if (ret)
4783 goto err_out1;
4784 }
4785
4786 if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
4787 ret = devm_device_add_group(adev->dev,
4788 &amdgpu_board_attr_group);
4789 if (ret)
4790 goto err_out1;
4791 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
4792 (void *)&tmp) != -EOPNOTSUPP) {
4793 sysfs_add_file_to_group(&adev->dev->kobj,
4794 &dev_attr_cur_node_power_limit.attr,
4795 amdgpu_board_attr_group.name);
4796 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
4797 amdgpu_board_attr_group.name);
4798 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
4799 amdgpu_board_attr_group.name);
4800 sysfs_add_file_to_group(&adev->dev->kobj,
4801 &dev_attr_max_node_power_limit.attr,
4802 amdgpu_board_attr_group.name);
4803 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
4804 amdgpu_board_attr_group.name);
4805 }
4806 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT,
4807 (void *)&tmp) != -EOPNOTSUPP) {
4808 sysfs_add_file_to_group(&adev->dev->kobj,
4809 &dev_attr_baseboard_power_limit.attr,
4810 amdgpu_board_attr_group.name);
4811 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_baseboard_power.attr,
4812 amdgpu_board_attr_group.name);
4813 }
4814 }
4815
4816 adev->pm.sysfs_initialized = true;
4817
4818 return 0;
4819
4820err_out1:
4821 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4822err_out0:
4823 if (adev->pm.int_hwmon_dev)
4824 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4825
4826 return ret;
4827}
4828
4829void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4830{
4831 amdgpu_od_set_fini(adev);
4832
4833 if (adev->pm.int_hwmon_dev)
4834 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4835
4836 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4837}
4838
4839/*
4840 * Debugfs info
4841 */
4842#if defined(CONFIG_DEBUG_FS)
4843
4844static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4845 struct amdgpu_device *adev)
4846{
4847 uint16_t *p_val;
4848 uint32_t size;
4849 int i;
4850 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4851
4852 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4853 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4854 GFP_KERNEL);
4855
4856 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4857 (void *)p_val, &size)) {
4858 for (i = 0; i < num_cpu_cores; i++)
4859 seq_printf(m, "\t%u MHz (CPU%d)\n",
4860 *(p_val + i), i);
4861 }
4862
4863 kfree(p_val);
4864 }
4865}
4866
4867static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4868{
4869 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4870 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4871 uint32_t value;
4872 uint64_t value64 = 0;
4873 uint32_t query = 0;
4874 int size;
4875
4876 /* GPU Clocks */
4877 size = sizeof(value);
4878 seq_printf(m, "GFX Clocks and Power:\n");
4879
4880 amdgpu_debugfs_prints_cpu_info(m, adev);
4881
4882 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4883 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4884 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4885 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4886 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4887 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4888 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4889 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4890 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4891 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4892 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4893 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4894 size = sizeof(uint32_t);
4895 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4896 if (adev->flags & AMD_IS_APU)
4897 seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4898 else
4899 seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4900 }
4901 size = sizeof(uint32_t);
4902 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4903 if (adev->flags & AMD_IS_APU)
4904 seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4905 else
4906 seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4907 }
4908 size = sizeof(value);
4909 seq_printf(m, "\n");
4910
4911 /* GPU Temp */
4912 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4913 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4914
4915 /* GPU Load */
4916 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4917 seq_printf(m, "GPU Load: %u %%\n", value);
4918 /* MEM Load */
4919 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4920 seq_printf(m, "MEM Load: %u %%\n", value);
4921 /* VCN Load */
4922 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4923 seq_printf(m, "VCN Load: %u %%\n", value);
4924
4925 seq_printf(m, "\n");
4926
4927 /* SMC feature mask */
4928 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4929 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4930
4931 /* ASICs greater than CHIP_VEGA20 supports these sensors */
4932 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4933 /* VCN clocks */
4934 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4935 if (!value) {
4936 seq_printf(m, "VCN: Powered down\n");
4937 } else {
4938 seq_printf(m, "VCN: Powered up\n");
4939 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4940 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4941 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4942 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4943 }
4944 }
4945 seq_printf(m, "\n");
4946 } else {
4947 /* UVD clocks */
4948 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4949 if (!value) {
4950 seq_printf(m, "UVD: Powered down\n");
4951 } else {
4952 seq_printf(m, "UVD: Powered up\n");
4953 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4954 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4955 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4956 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4957 }
4958 }
4959 seq_printf(m, "\n");
4960
4961 /* VCE clocks */
4962 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4963 if (!value) {
4964 seq_printf(m, "VCE: Powered down\n");
4965 } else {
4966 seq_printf(m, "VCE: Powered up\n");
4967 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4968 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4969 }
4970 }
4971 }
4972
4973 return 0;
4974}
4975
4976static const struct cg_flag_name clocks[] = {
4977 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4978 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4979 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4980 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4981 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4982 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4983 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4984 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4985 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4986 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4987 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4988 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4989 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4990 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4991 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4992 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4993 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4994 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4995 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4996 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4997 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4998 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4999 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
5000 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
5001 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
5002 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
5003 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
5004 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
5005 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
5006 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
5007 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
5008 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
5009 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
5010 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
5011 {0, NULL},
5012};
5013
5014static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
5015{
5016 int i;
5017
5018 for (i = 0; clocks[i].flag; i++)
5019 seq_printf(m, "\t%s: %s\n", clocks[i].name,
5020 (flags & clocks[i].flag) ? "On" : "Off");
5021}
5022
5023static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
5024{
5025 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
5026 u64 flags = 0;
5027 int r;
5028
5029 r = amdgpu_pm_get_access(adev);
5030 if (r < 0)
5031 return r;
5032
5033 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
5034 r = amdgpu_debugfs_pm_info_pp(m, adev);
5035 if (r)
5036 goto out;
5037 }
5038
5039 amdgpu_device_ip_get_clockgating_state(adev, &flags);
5040
5041 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
5042 amdgpu_parse_cg_state(m, flags);
5043 seq_printf(m, "\n");
5044
5045out:
5046 amdgpu_pm_put_access(adev);
5047
5048 return r;
5049}
5050
5051DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
5052
5053/*
5054 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
5055 *
5056 * Reads debug memory region allocated to PMFW
5057 */
5058static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
5059 size_t size, loff_t *pos)
5060{
5061 struct amdgpu_device *adev = file_inode(f)->i_private;
5062 size_t smu_prv_buf_size;
5063 void *smu_prv_buf;
5064 int ret = 0;
5065
5066 ret = amdgpu_pm_dev_state_check(adev, true);
5067 if (ret)
5068 return ret;
5069
5070 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
5071 if (ret)
5072 return ret;
5073
5074 if (!smu_prv_buf || !smu_prv_buf_size)
5075 return -EINVAL;
5076
5077 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
5078 smu_prv_buf_size);
5079}
5080
5081static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
5082 .owner = THIS_MODULE,
5083 .open = simple_open,
5084 .read = amdgpu_pm_prv_buffer_read,
5085 .llseek = default_llseek,
5086};
5087
5088#endif
5089
5090void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
5091{
5092#if defined(CONFIG_DEBUG_FS)
5093 struct drm_minor *minor = adev_to_drm(adev)->primary;
5094 struct dentry *root = minor->debugfs_root;
5095
5096 if (!adev->pm.dpm_enabled)
5097 return;
5098
5099 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
5100 &amdgpu_debugfs_pm_info_fops);
5101
5102 if (adev->pm.smu_prv_buffer_size > 0)
5103 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
5104 adev,
5105 &amdgpu_debugfs_pm_prv_buffer_fops,
5106 adev->pm.smu_prv_buffer_size);
5107
5108 amdgpu_dpm_stb_debug_fs_init(adev);
5109#endif
5110}