Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2011 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24
25#include "amdgpu.h"
26#include "amdgpu_atombios.h"
27#include "amdgpu_i2c.h"
28#include "amdgpu_dpm.h"
29#include "atom.h"
30#include "amd_pcie.h"
31#include "amdgpu_display.h"
32#include "hwmgr.h"
33#include <linux/power_supply.h>
34#include "amdgpu_smu.h"
35
36#define amdgpu_dpm_enable_bapm(adev, e) \
37 ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
38
39#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
40
41int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
42{
43 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
44 int ret = 0;
45
46 if (!pp_funcs->get_sclk)
47 return 0;
48
49 mutex_lock(&adev->pm.mutex);
50 ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
51 low);
52 mutex_unlock(&adev->pm.mutex);
53
54 return ret;
55}
56
57int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
58{
59 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
60 int ret = 0;
61
62 if (!pp_funcs->get_mclk)
63 return 0;
64
65 mutex_lock(&adev->pm.mutex);
66 ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
67 low);
68 mutex_unlock(&adev->pm.mutex);
69
70 return ret;
71}
72
73int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
74{
75 int ret = 0;
76 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
77 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
78
79 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
80 dev_dbg(adev->dev, "IP block%d already in the target %s state!",
81 block_type, gate ? "gate" : "ungate");
82 return 0;
83 }
84
85 mutex_lock(&adev->pm.mutex);
86
87 switch (block_type) {
88 case AMD_IP_BLOCK_TYPE_UVD:
89 case AMD_IP_BLOCK_TYPE_VCE:
90 case AMD_IP_BLOCK_TYPE_GFX:
91 case AMD_IP_BLOCK_TYPE_VCN:
92 case AMD_IP_BLOCK_TYPE_SDMA:
93 case AMD_IP_BLOCK_TYPE_JPEG:
94 case AMD_IP_BLOCK_TYPE_GMC:
95 case AMD_IP_BLOCK_TYPE_ACP:
96 case AMD_IP_BLOCK_TYPE_VPE:
97 if (pp_funcs && pp_funcs->set_powergating_by_smu)
98 ret = (pp_funcs->set_powergating_by_smu(
99 (adev)->powerplay.pp_handle, block_type, gate));
100 break;
101 default:
102 break;
103 }
104
105 if (!ret)
106 atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
107
108 mutex_unlock(&adev->pm.mutex);
109
110 return ret;
111}
112
113int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
114{
115 struct smu_context *smu = adev->powerplay.pp_handle;
116 int ret = -EOPNOTSUPP;
117
118 mutex_lock(&adev->pm.mutex);
119 ret = smu_set_gfx_power_up_by_imu(smu);
120 mutex_unlock(&adev->pm.mutex);
121
122 msleep(10);
123
124 return ret;
125}
126
127int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
128{
129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
130 void *pp_handle = adev->powerplay.pp_handle;
131 int ret = 0;
132
133 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
134 return -ENOENT;
135
136 mutex_lock(&adev->pm.mutex);
137
138 /* enter BACO state */
139 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
140
141 mutex_unlock(&adev->pm.mutex);
142
143 return ret;
144}
145
146int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
147{
148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
149 void *pp_handle = adev->powerplay.pp_handle;
150 int ret = 0;
151
152 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
153 return -ENOENT;
154
155 mutex_lock(&adev->pm.mutex);
156
157 /* exit BACO state */
158 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
159
160 mutex_unlock(&adev->pm.mutex);
161
162 return ret;
163}
164
165int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
166 enum pp_mp1_state mp1_state)
167{
168 int ret = 0;
169 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
170
171 if (pp_funcs && pp_funcs->set_mp1_state) {
172 mutex_lock(&adev->pm.mutex);
173
174 ret = pp_funcs->set_mp1_state(
175 adev->powerplay.pp_handle,
176 mp1_state);
177
178 mutex_unlock(&adev->pm.mutex);
179 }
180
181 return ret;
182}
183
184int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
185{
186 int ret = 0;
187 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
188
189 if (pp_funcs && pp_funcs->notify_rlc_state) {
190 mutex_lock(&adev->pm.mutex);
191
192 ret = pp_funcs->notify_rlc_state(
193 adev->powerplay.pp_handle,
194 en);
195
196 mutex_unlock(&adev->pm.mutex);
197 }
198
199 return ret;
200}
201
202bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
203{
204 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
205 void *pp_handle = adev->powerplay.pp_handle;
206 bool baco_cap;
207 int ret = 0;
208
209 if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
210 return false;
211 /* Don't use baco for reset in S3.
212 * This is a workaround for some platforms
213 * where entering BACO during suspend
214 * seems to cause reboots or hangs.
215 * This might be related to the fact that BACO controls
216 * power to the whole GPU including devices like audio and USB.
217 * Powering down/up everything may adversely affect these other
218 * devices. Needs more investigation.
219 */
220 if (adev->in_s3)
221 return false;
222
223 mutex_lock(&adev->pm.mutex);
224
225 ret = pp_funcs->get_asic_baco_capability(pp_handle,
226 &baco_cap);
227
228 mutex_unlock(&adev->pm.mutex);
229
230 return ret ? false : baco_cap;
231}
232
233int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
234{
235 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
236 void *pp_handle = adev->powerplay.pp_handle;
237 int ret = 0;
238
239 if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
240 return -ENOENT;
241
242 mutex_lock(&adev->pm.mutex);
243
244 ret = pp_funcs->asic_reset_mode_2(pp_handle);
245
246 mutex_unlock(&adev->pm.mutex);
247
248 return ret;
249}
250
251int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
252{
253 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
254 void *pp_handle = adev->powerplay.pp_handle;
255 int ret = 0;
256
257 if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
258 return -ENOENT;
259
260 mutex_lock(&adev->pm.mutex);
261
262 ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
263
264 mutex_unlock(&adev->pm.mutex);
265
266 return ret;
267}
268
269int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
270{
271 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
272 void *pp_handle = adev->powerplay.pp_handle;
273 int ret = 0;
274
275 if (!pp_funcs || !pp_funcs->set_asic_baco_state)
276 return -ENOENT;
277
278 mutex_lock(&adev->pm.mutex);
279
280 /* enter BACO state */
281 ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
282 if (ret)
283 goto out;
284
285 /* exit BACO state */
286 ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
287
288out:
289 mutex_unlock(&adev->pm.mutex);
290 return ret;
291}
292
293bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
294{
295 struct smu_context *smu = adev->powerplay.pp_handle;
296 bool support_mode1_reset = false;
297
298 if (is_support_sw_smu(adev)) {
299 mutex_lock(&adev->pm.mutex);
300 support_mode1_reset = smu_mode1_reset_is_support(smu);
301 mutex_unlock(&adev->pm.mutex);
302 }
303
304 return support_mode1_reset;
305}
306
307int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
308{
309 struct smu_context *smu = adev->powerplay.pp_handle;
310 int ret = -EOPNOTSUPP;
311
312 if (is_support_sw_smu(adev)) {
313 mutex_lock(&adev->pm.mutex);
314 ret = smu_mode1_reset(smu);
315 mutex_unlock(&adev->pm.mutex);
316 }
317
318 return ret;
319}
320
321int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
322 enum PP_SMC_POWER_PROFILE type,
323 bool en)
324{
325 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
326 int ret = 0;
327
328 if (amdgpu_sriov_vf(adev))
329 return 0;
330
331 if (pp_funcs && pp_funcs->switch_power_profile) {
332 mutex_lock(&adev->pm.mutex);
333 ret = pp_funcs->switch_power_profile(
334 adev->powerplay.pp_handle, type, en);
335 mutex_unlock(&adev->pm.mutex);
336 }
337
338 return ret;
339}
340
341int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
342 uint32_t pstate)
343{
344 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
345 int ret = 0;
346
347 if (pp_funcs && pp_funcs->set_xgmi_pstate) {
348 mutex_lock(&adev->pm.mutex);
349 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
350 pstate);
351 mutex_unlock(&adev->pm.mutex);
352 }
353
354 return ret;
355}
356
357int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
358 uint32_t cstate)
359{
360 int ret = 0;
361 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
362 void *pp_handle = adev->powerplay.pp_handle;
363
364 if (pp_funcs && pp_funcs->set_df_cstate) {
365 mutex_lock(&adev->pm.mutex);
366 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
367 mutex_unlock(&adev->pm.mutex);
368 }
369
370 return ret;
371}
372
373int amdgpu_dpm_get_xgmi_plpd_mode(struct amdgpu_device *adev, char **mode_desc)
374{
375 struct smu_context *smu = adev->powerplay.pp_handle;
376 int mode = XGMI_PLPD_NONE;
377
378 if (is_support_sw_smu(adev)) {
379 mode = smu->plpd_mode;
380 if (mode_desc == NULL)
381 return mode;
382 switch (smu->plpd_mode) {
383 case XGMI_PLPD_DISALLOW:
384 *mode_desc = "disallow";
385 break;
386 case XGMI_PLPD_DEFAULT:
387 *mode_desc = "default";
388 break;
389 case XGMI_PLPD_OPTIMIZED:
390 *mode_desc = "optimized";
391 break;
392 case XGMI_PLPD_NONE:
393 default:
394 *mode_desc = "none";
395 break;
396 }
397 }
398
399 return mode;
400}
401
402int amdgpu_dpm_set_xgmi_plpd_mode(struct amdgpu_device *adev, int mode)
403{
404 struct smu_context *smu = adev->powerplay.pp_handle;
405 int ret = -EOPNOTSUPP;
406
407 if (is_support_sw_smu(adev)) {
408 mutex_lock(&adev->pm.mutex);
409 ret = smu_set_xgmi_plpd_mode(smu, mode);
410 mutex_unlock(&adev->pm.mutex);
411 }
412
413 return ret;
414}
415
416int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
417{
418 void *pp_handle = adev->powerplay.pp_handle;
419 const struct amd_pm_funcs *pp_funcs =
420 adev->powerplay.pp_funcs;
421 int ret = 0;
422
423 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
424 mutex_lock(&adev->pm.mutex);
425 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
426 mutex_unlock(&adev->pm.mutex);
427 }
428
429 return ret;
430}
431
432int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
433 uint32_t msg_id)
434{
435 void *pp_handle = adev->powerplay.pp_handle;
436 const struct amd_pm_funcs *pp_funcs =
437 adev->powerplay.pp_funcs;
438 int ret = 0;
439
440 if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
441 mutex_lock(&adev->pm.mutex);
442 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
443 msg_id);
444 mutex_unlock(&adev->pm.mutex);
445 }
446
447 return ret;
448}
449
450int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
451 bool acquire)
452{
453 void *pp_handle = adev->powerplay.pp_handle;
454 const struct amd_pm_funcs *pp_funcs =
455 adev->powerplay.pp_funcs;
456 int ret = -EOPNOTSUPP;
457
458 if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
459 mutex_lock(&adev->pm.mutex);
460 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
461 acquire);
462 mutex_unlock(&adev->pm.mutex);
463 }
464
465 return ret;
466}
467
468void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
469{
470 if (adev->pm.dpm_enabled) {
471 mutex_lock(&adev->pm.mutex);
472 if (power_supply_is_system_supplied() > 0)
473 adev->pm.ac_power = true;
474 else
475 adev->pm.ac_power = false;
476
477 if (adev->powerplay.pp_funcs &&
478 adev->powerplay.pp_funcs->enable_bapm)
479 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
480
481 if (is_support_sw_smu(adev))
482 smu_set_ac_dc(adev->powerplay.pp_handle);
483
484 mutex_unlock(&adev->pm.mutex);
485 }
486}
487
488int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
489 void *data, uint32_t *size)
490{
491 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
492 int ret = -EINVAL;
493
494 if (!data || !size)
495 return -EINVAL;
496
497 if (pp_funcs && pp_funcs->read_sensor) {
498 mutex_lock(&adev->pm.mutex);
499 ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
500 sensor,
501 data,
502 size);
503 mutex_unlock(&adev->pm.mutex);
504 }
505
506 return ret;
507}
508
509int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
510{
511 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
512 int ret = -EOPNOTSUPP;
513
514 if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
515 mutex_lock(&adev->pm.mutex);
516 ret = pp_funcs->get_apu_thermal_limit(adev->powerplay.pp_handle, limit);
517 mutex_unlock(&adev->pm.mutex);
518 }
519
520 return ret;
521}
522
523int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
524{
525 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
526 int ret = -EOPNOTSUPP;
527
528 if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
529 mutex_lock(&adev->pm.mutex);
530 ret = pp_funcs->set_apu_thermal_limit(adev->powerplay.pp_handle, limit);
531 mutex_unlock(&adev->pm.mutex);
532 }
533
534 return ret;
535}
536
537void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
538{
539 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
540 int i;
541
542 if (!adev->pm.dpm_enabled)
543 return;
544
545 if (!pp_funcs->pm_compute_clocks)
546 return;
547
548 if (adev->mode_info.num_crtc)
549 amdgpu_display_bandwidth_update(adev);
550
551 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
552 struct amdgpu_ring *ring = adev->rings[i];
553 if (ring && ring->sched.ready)
554 amdgpu_fence_wait_empty(ring);
555 }
556
557 mutex_lock(&adev->pm.mutex);
558 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
559 mutex_unlock(&adev->pm.mutex);
560}
561
562void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
563{
564 int ret = 0;
565
566 if (adev->family == AMDGPU_FAMILY_SI) {
567 mutex_lock(&adev->pm.mutex);
568 if (enable) {
569 adev->pm.dpm.uvd_active = true;
570 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
571 } else {
572 adev->pm.dpm.uvd_active = false;
573 }
574 mutex_unlock(&adev->pm.mutex);
575
576 amdgpu_dpm_compute_clocks(adev);
577 return;
578 }
579
580 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
581 if (ret)
582 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
583 enable ? "enable" : "disable", ret);
584}
585
586void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
587{
588 int ret = 0;
589
590 if (adev->family == AMDGPU_FAMILY_SI) {
591 mutex_lock(&adev->pm.mutex);
592 if (enable) {
593 adev->pm.dpm.vce_active = true;
594 /* XXX select vce level based on ring/task */
595 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
596 } else {
597 adev->pm.dpm.vce_active = false;
598 }
599 mutex_unlock(&adev->pm.mutex);
600
601 amdgpu_dpm_compute_clocks(adev);
602 return;
603 }
604
605 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
606 if (ret)
607 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
608 enable ? "enable" : "disable", ret);
609}
610
611void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
612{
613 int ret = 0;
614
615 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
616 if (ret)
617 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
618 enable ? "enable" : "disable", ret);
619}
620
621int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
622{
623 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
624 int r = 0;
625
626 if (!pp_funcs || !pp_funcs->load_firmware)
627 return 0;
628
629 mutex_lock(&adev->pm.mutex);
630 r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
631 if (r) {
632 pr_err("smu firmware loading failed\n");
633 goto out;
634 }
635
636 if (smu_version)
637 *smu_version = adev->pm.fw_version;
638
639out:
640 mutex_unlock(&adev->pm.mutex);
641 return r;
642}
643
644int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
645{
646 int ret = 0;
647
648 if (is_support_sw_smu(adev)) {
649 mutex_lock(&adev->pm.mutex);
650 ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
651 enable);
652 mutex_unlock(&adev->pm.mutex);
653 }
654
655 return ret;
656}
657
658int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
659{
660 struct smu_context *smu = adev->powerplay.pp_handle;
661 int ret = 0;
662
663 if (!is_support_sw_smu(adev))
664 return -EOPNOTSUPP;
665
666 mutex_lock(&adev->pm.mutex);
667 ret = smu_send_hbm_bad_pages_num(smu, size);
668 mutex_unlock(&adev->pm.mutex);
669
670 return ret;
671}
672
673int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
674{
675 struct smu_context *smu = adev->powerplay.pp_handle;
676 int ret = 0;
677
678 if (!is_support_sw_smu(adev))
679 return -EOPNOTSUPP;
680
681 mutex_lock(&adev->pm.mutex);
682 ret = smu_send_hbm_bad_channel_flag(smu, size);
683 mutex_unlock(&adev->pm.mutex);
684
685 return ret;
686}
687
688int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
689 enum pp_clock_type type,
690 uint32_t *min,
691 uint32_t *max)
692{
693 int ret = 0;
694
695 if (type != PP_SCLK)
696 return -EINVAL;
697
698 if (!is_support_sw_smu(adev))
699 return -EOPNOTSUPP;
700
701 mutex_lock(&adev->pm.mutex);
702 ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
703 SMU_SCLK,
704 min,
705 max);
706 mutex_unlock(&adev->pm.mutex);
707
708 return ret;
709}
710
711int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
712 enum pp_clock_type type,
713 uint32_t min,
714 uint32_t max)
715{
716 struct smu_context *smu = adev->powerplay.pp_handle;
717 int ret = 0;
718
719 if (type != PP_SCLK)
720 return -EINVAL;
721
722 if (!is_support_sw_smu(adev))
723 return -EOPNOTSUPP;
724
725 mutex_lock(&adev->pm.mutex);
726 ret = smu_set_soft_freq_range(smu,
727 SMU_SCLK,
728 min,
729 max);
730 mutex_unlock(&adev->pm.mutex);
731
732 return ret;
733}
734
735int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
736{
737 struct smu_context *smu = adev->powerplay.pp_handle;
738 int ret = 0;
739
740 if (!is_support_sw_smu(adev))
741 return 0;
742
743 mutex_lock(&adev->pm.mutex);
744 ret = smu_write_watermarks_table(smu);
745 mutex_unlock(&adev->pm.mutex);
746
747 return ret;
748}
749
750int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
751 enum smu_event_type event,
752 uint64_t event_arg)
753{
754 struct smu_context *smu = adev->powerplay.pp_handle;
755 int ret = 0;
756
757 if (!is_support_sw_smu(adev))
758 return -EOPNOTSUPP;
759
760 mutex_lock(&adev->pm.mutex);
761 ret = smu_wait_for_event(smu, event, event_arg);
762 mutex_unlock(&adev->pm.mutex);
763
764 return ret;
765}
766
767int amdgpu_dpm_set_residency_gfxoff(struct amdgpu_device *adev, bool value)
768{
769 struct smu_context *smu = adev->powerplay.pp_handle;
770 int ret = 0;
771
772 if (!is_support_sw_smu(adev))
773 return -EOPNOTSUPP;
774
775 mutex_lock(&adev->pm.mutex);
776 ret = smu_set_residency_gfxoff(smu, value);
777 mutex_unlock(&adev->pm.mutex);
778
779 return ret;
780}
781
782int amdgpu_dpm_get_residency_gfxoff(struct amdgpu_device *adev, u32 *value)
783{
784 struct smu_context *smu = adev->powerplay.pp_handle;
785 int ret = 0;
786
787 if (!is_support_sw_smu(adev))
788 return -EOPNOTSUPP;
789
790 mutex_lock(&adev->pm.mutex);
791 ret = smu_get_residency_gfxoff(smu, value);
792 mutex_unlock(&adev->pm.mutex);
793
794 return ret;
795}
796
797int amdgpu_dpm_get_entrycount_gfxoff(struct amdgpu_device *adev, u64 *value)
798{
799 struct smu_context *smu = adev->powerplay.pp_handle;
800 int ret = 0;
801
802 if (!is_support_sw_smu(adev))
803 return -EOPNOTSUPP;
804
805 mutex_lock(&adev->pm.mutex);
806 ret = smu_get_entrycount_gfxoff(smu, value);
807 mutex_unlock(&adev->pm.mutex);
808
809 return ret;
810}
811
812int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
813{
814 struct smu_context *smu = adev->powerplay.pp_handle;
815 int ret = 0;
816
817 if (!is_support_sw_smu(adev))
818 return -EOPNOTSUPP;
819
820 mutex_lock(&adev->pm.mutex);
821 ret = smu_get_status_gfxoff(smu, value);
822 mutex_unlock(&adev->pm.mutex);
823
824 return ret;
825}
826
827uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
828{
829 struct smu_context *smu = adev->powerplay.pp_handle;
830
831 if (!is_support_sw_smu(adev))
832 return 0;
833
834 return atomic64_read(&smu->throttle_int_counter);
835}
836
837/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
838 * @adev: amdgpu_device pointer
839 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
840 *
841 */
842void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
843 enum gfx_change_state state)
844{
845 mutex_lock(&adev->pm.mutex);
846 if (adev->powerplay.pp_funcs &&
847 adev->powerplay.pp_funcs->gfx_state_change_set)
848 ((adev)->powerplay.pp_funcs->gfx_state_change_set(
849 (adev)->powerplay.pp_handle, state));
850 mutex_unlock(&adev->pm.mutex);
851}
852
853int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
854 void *umc_ecc)
855{
856 struct smu_context *smu = adev->powerplay.pp_handle;
857 int ret = 0;
858
859 if (!is_support_sw_smu(adev))
860 return -EOPNOTSUPP;
861
862 mutex_lock(&adev->pm.mutex);
863 ret = smu_get_ecc_info(smu, umc_ecc);
864 mutex_unlock(&adev->pm.mutex);
865
866 return ret;
867}
868
869struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
870 uint32_t idx)
871{
872 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
873 struct amd_vce_state *vstate = NULL;
874
875 if (!pp_funcs->get_vce_clock_state)
876 return NULL;
877
878 mutex_lock(&adev->pm.mutex);
879 vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
880 idx);
881 mutex_unlock(&adev->pm.mutex);
882
883 return vstate;
884}
885
886void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
887 enum amd_pm_state_type *state)
888{
889 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
890
891 mutex_lock(&adev->pm.mutex);
892
893 if (!pp_funcs->get_current_power_state) {
894 *state = adev->pm.dpm.user_state;
895 goto out;
896 }
897
898 *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
899 if (*state < POWER_STATE_TYPE_DEFAULT ||
900 *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
901 *state = adev->pm.dpm.user_state;
902
903out:
904 mutex_unlock(&adev->pm.mutex);
905}
906
907void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
908 enum amd_pm_state_type state)
909{
910 mutex_lock(&adev->pm.mutex);
911 adev->pm.dpm.user_state = state;
912 mutex_unlock(&adev->pm.mutex);
913
914 if (is_support_sw_smu(adev))
915 return;
916
917 if (amdgpu_dpm_dispatch_task(adev,
918 AMD_PP_TASK_ENABLE_USER_STATE,
919 &state) == -EOPNOTSUPP)
920 amdgpu_dpm_compute_clocks(adev);
921}
922
923enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
924{
925 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
926 enum amd_dpm_forced_level level;
927
928 if (!pp_funcs)
929 return AMD_DPM_FORCED_LEVEL_AUTO;
930
931 mutex_lock(&adev->pm.mutex);
932 if (pp_funcs->get_performance_level)
933 level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
934 else
935 level = adev->pm.dpm.forced_level;
936 mutex_unlock(&adev->pm.mutex);
937
938 return level;
939}
940
941int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
942 enum amd_dpm_forced_level level)
943{
944 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
945 enum amd_dpm_forced_level current_level;
946 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
947 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
948 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
949 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
950
951 if (!pp_funcs || !pp_funcs->force_performance_level)
952 return 0;
953
954 if (adev->pm.dpm.thermal_active)
955 return -EINVAL;
956
957 current_level = amdgpu_dpm_get_performance_level(adev);
958 if (current_level == level)
959 return 0;
960
961 if (adev->asic_type == CHIP_RAVEN) {
962 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
963 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
964 level == AMD_DPM_FORCED_LEVEL_MANUAL)
965 amdgpu_gfx_off_ctrl(adev, false);
966 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
967 level != AMD_DPM_FORCED_LEVEL_MANUAL)
968 amdgpu_gfx_off_ctrl(adev, true);
969 }
970 }
971
972 if (!(current_level & profile_mode_mask) &&
973 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
974 return -EINVAL;
975
976 if (!(current_level & profile_mode_mask) &&
977 (level & profile_mode_mask)) {
978 /* enter UMD Pstate */
979 amdgpu_device_ip_set_powergating_state(adev,
980 AMD_IP_BLOCK_TYPE_GFX,
981 AMD_PG_STATE_UNGATE);
982 amdgpu_device_ip_set_clockgating_state(adev,
983 AMD_IP_BLOCK_TYPE_GFX,
984 AMD_CG_STATE_UNGATE);
985 } else if ((current_level & profile_mode_mask) &&
986 !(level & profile_mode_mask)) {
987 /* exit UMD Pstate */
988 amdgpu_device_ip_set_clockgating_state(adev,
989 AMD_IP_BLOCK_TYPE_GFX,
990 AMD_CG_STATE_GATE);
991 amdgpu_device_ip_set_powergating_state(adev,
992 AMD_IP_BLOCK_TYPE_GFX,
993 AMD_PG_STATE_GATE);
994 }
995
996 mutex_lock(&adev->pm.mutex);
997
998 if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
999 level)) {
1000 mutex_unlock(&adev->pm.mutex);
1001 return -EINVAL;
1002 }
1003
1004 adev->pm.dpm.forced_level = level;
1005
1006 mutex_unlock(&adev->pm.mutex);
1007
1008 return 0;
1009}
1010
1011int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
1012 struct pp_states_info *states)
1013{
1014 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1015 int ret = 0;
1016
1017 if (!pp_funcs->get_pp_num_states)
1018 return -EOPNOTSUPP;
1019
1020 mutex_lock(&adev->pm.mutex);
1021 ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
1022 states);
1023 mutex_unlock(&adev->pm.mutex);
1024
1025 return ret;
1026}
1027
1028int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
1029 enum amd_pp_task task_id,
1030 enum amd_pm_state_type *user_state)
1031{
1032 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1033 int ret = 0;
1034
1035 if (!pp_funcs->dispatch_tasks)
1036 return -EOPNOTSUPP;
1037
1038 mutex_lock(&adev->pm.mutex);
1039 ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
1040 task_id,
1041 user_state);
1042 mutex_unlock(&adev->pm.mutex);
1043
1044 return ret;
1045}
1046
1047int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
1048{
1049 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1050 int ret = 0;
1051
1052 if (!pp_funcs->get_pp_table)
1053 return 0;
1054
1055 mutex_lock(&adev->pm.mutex);
1056 ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
1057 table);
1058 mutex_unlock(&adev->pm.mutex);
1059
1060 return ret;
1061}
1062
1063int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
1064 uint32_t type,
1065 long *input,
1066 uint32_t size)
1067{
1068 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1069 int ret = 0;
1070
1071 if (!pp_funcs->set_fine_grain_clk_vol)
1072 return 0;
1073
1074 mutex_lock(&adev->pm.mutex);
1075 ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
1076 type,
1077 input,
1078 size);
1079 mutex_unlock(&adev->pm.mutex);
1080
1081 return ret;
1082}
1083
1084int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
1085 uint32_t type,
1086 long *input,
1087 uint32_t size)
1088{
1089 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1090 int ret = 0;
1091
1092 if (!pp_funcs->odn_edit_dpm_table)
1093 return 0;
1094
1095 mutex_lock(&adev->pm.mutex);
1096 ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
1097 type,
1098 input,
1099 size);
1100 mutex_unlock(&adev->pm.mutex);
1101
1102 return ret;
1103}
1104
1105int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
1106 enum pp_clock_type type,
1107 char *buf)
1108{
1109 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1110 int ret = 0;
1111
1112 if (!pp_funcs->print_clock_levels)
1113 return 0;
1114
1115 mutex_lock(&adev->pm.mutex);
1116 ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
1117 type,
1118 buf);
1119 mutex_unlock(&adev->pm.mutex);
1120
1121 return ret;
1122}
1123
1124int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
1125 enum pp_clock_type type,
1126 char *buf,
1127 int *offset)
1128{
1129 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1130 int ret = 0;
1131
1132 if (!pp_funcs->emit_clock_levels)
1133 return -ENOENT;
1134
1135 mutex_lock(&adev->pm.mutex);
1136 ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
1137 type,
1138 buf,
1139 offset);
1140 mutex_unlock(&adev->pm.mutex);
1141
1142 return ret;
1143}
1144
1145int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
1146 uint64_t ppfeature_masks)
1147{
1148 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1149 int ret = 0;
1150
1151 if (!pp_funcs->set_ppfeature_status)
1152 return 0;
1153
1154 mutex_lock(&adev->pm.mutex);
1155 ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
1156 ppfeature_masks);
1157 mutex_unlock(&adev->pm.mutex);
1158
1159 return ret;
1160}
1161
1162int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
1163{
1164 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1165 int ret = 0;
1166
1167 if (!pp_funcs->get_ppfeature_status)
1168 return 0;
1169
1170 mutex_lock(&adev->pm.mutex);
1171 ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
1172 buf);
1173 mutex_unlock(&adev->pm.mutex);
1174
1175 return ret;
1176}
1177
1178int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
1179 enum pp_clock_type type,
1180 uint32_t mask)
1181{
1182 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1183 int ret = 0;
1184
1185 if (!pp_funcs->force_clock_level)
1186 return 0;
1187
1188 mutex_lock(&adev->pm.mutex);
1189 ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
1190 type,
1191 mask);
1192 mutex_unlock(&adev->pm.mutex);
1193
1194 return ret;
1195}
1196
1197int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
1198{
1199 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1200 int ret = 0;
1201
1202 if (!pp_funcs->get_sclk_od)
1203 return -EOPNOTSUPP;
1204
1205 mutex_lock(&adev->pm.mutex);
1206 ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
1207 mutex_unlock(&adev->pm.mutex);
1208
1209 return ret;
1210}
1211
1212int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
1213{
1214 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1215
1216 if (is_support_sw_smu(adev))
1217 return -EOPNOTSUPP;
1218
1219 mutex_lock(&adev->pm.mutex);
1220 if (pp_funcs->set_sclk_od)
1221 pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
1222 mutex_unlock(&adev->pm.mutex);
1223
1224 if (amdgpu_dpm_dispatch_task(adev,
1225 AMD_PP_TASK_READJUST_POWER_STATE,
1226 NULL) == -EOPNOTSUPP) {
1227 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1228 amdgpu_dpm_compute_clocks(adev);
1229 }
1230
1231 return 0;
1232}
1233
1234int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
1235{
1236 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1237 int ret = 0;
1238
1239 if (!pp_funcs->get_mclk_od)
1240 return -EOPNOTSUPP;
1241
1242 mutex_lock(&adev->pm.mutex);
1243 ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
1244 mutex_unlock(&adev->pm.mutex);
1245
1246 return ret;
1247}
1248
1249int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
1250{
1251 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1252
1253 if (is_support_sw_smu(adev))
1254 return -EOPNOTSUPP;
1255
1256 mutex_lock(&adev->pm.mutex);
1257 if (pp_funcs->set_mclk_od)
1258 pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
1259 mutex_unlock(&adev->pm.mutex);
1260
1261 if (amdgpu_dpm_dispatch_task(adev,
1262 AMD_PP_TASK_READJUST_POWER_STATE,
1263 NULL) == -EOPNOTSUPP) {
1264 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1265 amdgpu_dpm_compute_clocks(adev);
1266 }
1267
1268 return 0;
1269}
1270
1271int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
1272 char *buf)
1273{
1274 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1275 int ret = 0;
1276
1277 if (!pp_funcs->get_power_profile_mode)
1278 return -EOPNOTSUPP;
1279
1280 mutex_lock(&adev->pm.mutex);
1281 ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
1282 buf);
1283 mutex_unlock(&adev->pm.mutex);
1284
1285 return ret;
1286}
1287
1288int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
1289 long *input, uint32_t size)
1290{
1291 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1292 int ret = 0;
1293
1294 if (!pp_funcs->set_power_profile_mode)
1295 return 0;
1296
1297 mutex_lock(&adev->pm.mutex);
1298 ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
1299 input,
1300 size);
1301 mutex_unlock(&adev->pm.mutex);
1302
1303 return ret;
1304}
1305
1306int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
1307{
1308 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1309 int ret = 0;
1310
1311 if (!pp_funcs->get_gpu_metrics)
1312 return 0;
1313
1314 mutex_lock(&adev->pm.mutex);
1315 ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
1316 table);
1317 mutex_unlock(&adev->pm.mutex);
1318
1319 return ret;
1320}
1321
1322int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
1323 uint32_t *fan_mode)
1324{
1325 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1326 int ret = 0;
1327
1328 if (!pp_funcs->get_fan_control_mode)
1329 return -EOPNOTSUPP;
1330
1331 mutex_lock(&adev->pm.mutex);
1332 ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
1333 fan_mode);
1334 mutex_unlock(&adev->pm.mutex);
1335
1336 return ret;
1337}
1338
1339int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
1340 uint32_t speed)
1341{
1342 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1343 int ret = 0;
1344
1345 if (!pp_funcs->set_fan_speed_pwm)
1346 return -EOPNOTSUPP;
1347
1348 mutex_lock(&adev->pm.mutex);
1349 ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
1350 speed);
1351 mutex_unlock(&adev->pm.mutex);
1352
1353 return ret;
1354}
1355
1356int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
1357 uint32_t *speed)
1358{
1359 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1360 int ret = 0;
1361
1362 if (!pp_funcs->get_fan_speed_pwm)
1363 return -EOPNOTSUPP;
1364
1365 mutex_lock(&adev->pm.mutex);
1366 ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
1367 speed);
1368 mutex_unlock(&adev->pm.mutex);
1369
1370 return ret;
1371}
1372
1373int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
1374 uint32_t *speed)
1375{
1376 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1377 int ret = 0;
1378
1379 if (!pp_funcs->get_fan_speed_rpm)
1380 return -EOPNOTSUPP;
1381
1382 mutex_lock(&adev->pm.mutex);
1383 ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
1384 speed);
1385 mutex_unlock(&adev->pm.mutex);
1386
1387 return ret;
1388}
1389
1390int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
1391 uint32_t speed)
1392{
1393 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1394 int ret = 0;
1395
1396 if (!pp_funcs->set_fan_speed_rpm)
1397 return -EOPNOTSUPP;
1398
1399 mutex_lock(&adev->pm.mutex);
1400 ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
1401 speed);
1402 mutex_unlock(&adev->pm.mutex);
1403
1404 return ret;
1405}
1406
1407int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
1408 uint32_t mode)
1409{
1410 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1411 int ret = 0;
1412
1413 if (!pp_funcs->set_fan_control_mode)
1414 return -EOPNOTSUPP;
1415
1416 mutex_lock(&adev->pm.mutex);
1417 ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
1418 mode);
1419 mutex_unlock(&adev->pm.mutex);
1420
1421 return ret;
1422}
1423
1424int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
1425 uint32_t *limit,
1426 enum pp_power_limit_level pp_limit_level,
1427 enum pp_power_type power_type)
1428{
1429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1430 int ret = 0;
1431
1432 if (!pp_funcs->get_power_limit)
1433 return -ENODATA;
1434
1435 mutex_lock(&adev->pm.mutex);
1436 ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
1437 limit,
1438 pp_limit_level,
1439 power_type);
1440 mutex_unlock(&adev->pm.mutex);
1441
1442 return ret;
1443}
1444
1445int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
1446 uint32_t limit)
1447{
1448 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1449 int ret = 0;
1450
1451 if (!pp_funcs->set_power_limit)
1452 return -EINVAL;
1453
1454 mutex_lock(&adev->pm.mutex);
1455 ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
1456 limit);
1457 mutex_unlock(&adev->pm.mutex);
1458
1459 return ret;
1460}
1461
1462int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
1463{
1464 bool cclk_dpm_supported = false;
1465
1466 if (!is_support_sw_smu(adev))
1467 return false;
1468
1469 mutex_lock(&adev->pm.mutex);
1470 cclk_dpm_supported = is_support_cclk_dpm(adev);
1471 mutex_unlock(&adev->pm.mutex);
1472
1473 return (int)cclk_dpm_supported;
1474}
1475
1476int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
1477 struct seq_file *m)
1478{
1479 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1480
1481 if (!pp_funcs->debugfs_print_current_performance_level)
1482 return -EOPNOTSUPP;
1483
1484 mutex_lock(&adev->pm.mutex);
1485 pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
1486 m);
1487 mutex_unlock(&adev->pm.mutex);
1488
1489 return 0;
1490}
1491
1492int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
1493 void **addr,
1494 size_t *size)
1495{
1496 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1497 int ret = 0;
1498
1499 if (!pp_funcs->get_smu_prv_buf_details)
1500 return -ENOSYS;
1501
1502 mutex_lock(&adev->pm.mutex);
1503 ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
1504 addr,
1505 size);
1506 mutex_unlock(&adev->pm.mutex);
1507
1508 return ret;
1509}
1510
1511int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
1512{
1513 if (is_support_sw_smu(adev)) {
1514 struct smu_context *smu = adev->powerplay.pp_handle;
1515
1516 return (smu->od_enabled || smu->is_apu);
1517 } else {
1518 struct pp_hwmgr *hwmgr;
1519
1520 /*
1521 * dpm on some legacy asics don't carry od_enabled member
1522 * as its pp_handle is casted directly from adev.
1523 */
1524 if (amdgpu_dpm_is_legacy_dpm(adev))
1525 return false;
1526
1527 hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
1528
1529 return hwmgr->od_enabled;
1530 }
1531}
1532
1533int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
1534 const char *buf,
1535 size_t size)
1536{
1537 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1538 int ret = 0;
1539
1540 if (!pp_funcs->set_pp_table)
1541 return -EOPNOTSUPP;
1542
1543 mutex_lock(&adev->pm.mutex);
1544 ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
1545 buf,
1546 size);
1547 mutex_unlock(&adev->pm.mutex);
1548
1549 return ret;
1550}
1551
1552int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
1553{
1554 struct smu_context *smu = adev->powerplay.pp_handle;
1555
1556 if (!is_support_sw_smu(adev))
1557 return INT_MAX;
1558
1559 return smu->cpu_core_num;
1560}
1561
1562void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
1563{
1564 if (!is_support_sw_smu(adev))
1565 return;
1566
1567 amdgpu_smu_stb_debug_fs_init(adev);
1568}
1569
1570int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
1571 const struct amd_pp_display_configuration *input)
1572{
1573 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1574 int ret = 0;
1575
1576 if (!pp_funcs->display_configuration_change)
1577 return 0;
1578
1579 mutex_lock(&adev->pm.mutex);
1580 ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
1581 input);
1582 mutex_unlock(&adev->pm.mutex);
1583
1584 return ret;
1585}
1586
1587int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
1588 enum amd_pp_clock_type type,
1589 struct amd_pp_clocks *clocks)
1590{
1591 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1592 int ret = 0;
1593
1594 if (!pp_funcs->get_clock_by_type)
1595 return 0;
1596
1597 mutex_lock(&adev->pm.mutex);
1598 ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
1599 type,
1600 clocks);
1601 mutex_unlock(&adev->pm.mutex);
1602
1603 return ret;
1604}
1605
1606int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
1607 struct amd_pp_simple_clock_info *clocks)
1608{
1609 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1610 int ret = 0;
1611
1612 if (!pp_funcs->get_display_mode_validation_clocks)
1613 return 0;
1614
1615 mutex_lock(&adev->pm.mutex);
1616 ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
1617 clocks);
1618 mutex_unlock(&adev->pm.mutex);
1619
1620 return ret;
1621}
1622
1623int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
1624 enum amd_pp_clock_type type,
1625 struct pp_clock_levels_with_latency *clocks)
1626{
1627 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1628 int ret = 0;
1629
1630 if (!pp_funcs->get_clock_by_type_with_latency)
1631 return 0;
1632
1633 mutex_lock(&adev->pm.mutex);
1634 ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
1635 type,
1636 clocks);
1637 mutex_unlock(&adev->pm.mutex);
1638
1639 return ret;
1640}
1641
1642int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
1643 enum amd_pp_clock_type type,
1644 struct pp_clock_levels_with_voltage *clocks)
1645{
1646 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1647 int ret = 0;
1648
1649 if (!pp_funcs->get_clock_by_type_with_voltage)
1650 return 0;
1651
1652 mutex_lock(&adev->pm.mutex);
1653 ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
1654 type,
1655 clocks);
1656 mutex_unlock(&adev->pm.mutex);
1657
1658 return ret;
1659}
1660
1661int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
1662 void *clock_ranges)
1663{
1664 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1665 int ret = 0;
1666
1667 if (!pp_funcs->set_watermarks_for_clocks_ranges)
1668 return -EOPNOTSUPP;
1669
1670 mutex_lock(&adev->pm.mutex);
1671 ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
1672 clock_ranges);
1673 mutex_unlock(&adev->pm.mutex);
1674
1675 return ret;
1676}
1677
1678int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
1679 struct pp_display_clock_request *clock)
1680{
1681 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1682 int ret = 0;
1683
1684 if (!pp_funcs->display_clock_voltage_request)
1685 return -EOPNOTSUPP;
1686
1687 mutex_lock(&adev->pm.mutex);
1688 ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
1689 clock);
1690 mutex_unlock(&adev->pm.mutex);
1691
1692 return ret;
1693}
1694
1695int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
1696 struct amd_pp_clock_info *clocks)
1697{
1698 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1699 int ret = 0;
1700
1701 if (!pp_funcs->get_current_clocks)
1702 return -EOPNOTSUPP;
1703
1704 mutex_lock(&adev->pm.mutex);
1705 ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
1706 clocks);
1707 mutex_unlock(&adev->pm.mutex);
1708
1709 return ret;
1710}
1711
1712void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
1713{
1714 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1715
1716 if (!pp_funcs->notify_smu_enable_pwe)
1717 return;
1718
1719 mutex_lock(&adev->pm.mutex);
1720 pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
1721 mutex_unlock(&adev->pm.mutex);
1722}
1723
1724int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
1725 uint32_t count)
1726{
1727 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1728 int ret = 0;
1729
1730 if (!pp_funcs->set_active_display_count)
1731 return -EOPNOTSUPP;
1732
1733 mutex_lock(&adev->pm.mutex);
1734 ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
1735 count);
1736 mutex_unlock(&adev->pm.mutex);
1737
1738 return ret;
1739}
1740
1741int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
1742 uint32_t clock)
1743{
1744 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1745 int ret = 0;
1746
1747 if (!pp_funcs->set_min_deep_sleep_dcefclk)
1748 return -EOPNOTSUPP;
1749
1750 mutex_lock(&adev->pm.mutex);
1751 ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
1752 clock);
1753 mutex_unlock(&adev->pm.mutex);
1754
1755 return ret;
1756}
1757
1758void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
1759 uint32_t clock)
1760{
1761 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1762
1763 if (!pp_funcs->set_hard_min_dcefclk_by_freq)
1764 return;
1765
1766 mutex_lock(&adev->pm.mutex);
1767 pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
1768 clock);
1769 mutex_unlock(&adev->pm.mutex);
1770}
1771
1772void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
1773 uint32_t clock)
1774{
1775 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1776
1777 if (!pp_funcs->set_hard_min_fclk_by_freq)
1778 return;
1779
1780 mutex_lock(&adev->pm.mutex);
1781 pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
1782 clock);
1783 mutex_unlock(&adev->pm.mutex);
1784}
1785
1786int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
1787 bool disable_memory_clock_switch)
1788{
1789 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1790 int ret = 0;
1791
1792 if (!pp_funcs->display_disable_memory_clock_switch)
1793 return 0;
1794
1795 mutex_lock(&adev->pm.mutex);
1796 ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
1797 disable_memory_clock_switch);
1798 mutex_unlock(&adev->pm.mutex);
1799
1800 return ret;
1801}
1802
1803int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
1804 struct pp_smu_nv_clock_table *max_clocks)
1805{
1806 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1807 int ret = 0;
1808
1809 if (!pp_funcs->get_max_sustainable_clocks_by_dc)
1810 return -EOPNOTSUPP;
1811
1812 mutex_lock(&adev->pm.mutex);
1813 ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
1814 max_clocks);
1815 mutex_unlock(&adev->pm.mutex);
1816
1817 return ret;
1818}
1819
1820enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
1821 unsigned int *clock_values_in_khz,
1822 unsigned int *num_states)
1823{
1824 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1825 int ret = 0;
1826
1827 if (!pp_funcs->get_uclk_dpm_states)
1828 return -EOPNOTSUPP;
1829
1830 mutex_lock(&adev->pm.mutex);
1831 ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
1832 clock_values_in_khz,
1833 num_states);
1834 mutex_unlock(&adev->pm.mutex);
1835
1836 return ret;
1837}
1838
1839int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
1840 struct dpm_clocks *clock_table)
1841{
1842 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1843 int ret = 0;
1844
1845 if (!pp_funcs->get_dpm_clock_table)
1846 return -EOPNOTSUPP;
1847
1848 mutex_lock(&adev->pm.mutex);
1849 ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
1850 clock_table);
1851 mutex_unlock(&adev->pm.mutex);
1852
1853 return ret;
1854}