Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * amd-pstate.c - AMD Processor P-state Frequency Driver
4 *
5 * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
6 *
7 * Author: Huang Rui <ray.huang@amd.com>
8 *
9 * AMD P-State introduces a new CPU performance scaling design for AMD
10 * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11 * feature which works with the AMD SMU firmware providing a finer grained
12 * frequency control range. It is to replace the legacy ACPI P-States control,
13 * allows a flexible, low-latency interface for the Linux kernel to directly
14 * communicate the performance hints to hardware.
15 *
16 * AMD P-State is supported on recent AMD Zen base CPU series include some of
17 * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18 * P-State supported system. And there are two types of hardware implementations
19 * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20 * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/smp.h>
29#include <linux/sched.h>
30#include <linux/cpufreq.h>
31#include <linux/compiler.h>
32#include <linux/dmi.h>
33#include <linux/slab.h>
34#include <linux/acpi.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/uaccess.h>
38#include <linux/static_call.h>
39#include <linux/amd-pstate.h>
40
41#include <acpi/processor.h>
42#include <acpi/cppc_acpi.h>
43
44#include <asm/msr.h>
45#include <asm/processor.h>
46#include <asm/cpufeature.h>
47#include <asm/cpu_device_id.h>
48#include "amd-pstate-trace.h"
49
50#define AMD_PSTATE_TRANSITION_LATENCY 20000
51#define AMD_PSTATE_TRANSITION_DELAY 1000
52
53/*
54 * TODO: We need more time to fine tune processors with shared memory solution
55 * with community together.
56 *
57 * There are some performance drops on the CPU benchmarks which reports from
58 * Suse. We are co-working with them to fine tune the shared memory solution. So
59 * we disable it by default to go acpi-cpufreq on these processors and add a
60 * module parameter to be able to enable it manually for debugging.
61 */
62static struct cpufreq_driver amd_pstate_driver;
63static int cppc_load __initdata;
64
65static inline int pstate_enable(bool enable)
66{
67 return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
68}
69
70static int cppc_enable(bool enable)
71{
72 int cpu, ret = 0;
73
74 for_each_present_cpu(cpu) {
75 ret = cppc_set_enable(cpu, enable);
76 if (ret)
77 return ret;
78 }
79
80 return ret;
81}
82
83DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
84
85static inline int amd_pstate_enable(bool enable)
86{
87 return static_call(amd_pstate_enable)(enable);
88}
89
90static int pstate_init_perf(struct amd_cpudata *cpudata)
91{
92 u64 cap1;
93 u32 highest_perf;
94
95 int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
96 &cap1);
97 if (ret)
98 return ret;
99
100 /*
101 * TODO: Introduce AMD specific power feature.
102 *
103 * CPPC entry doesn't indicate the highest performance in some ASICs.
104 */
105 highest_perf = amd_get_highest_perf();
106 if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
107 highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
108
109 WRITE_ONCE(cpudata->highest_perf, highest_perf);
110
111 WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
112 WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
113 WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
114
115 return 0;
116}
117
118static int cppc_init_perf(struct amd_cpudata *cpudata)
119{
120 struct cppc_perf_caps cppc_perf;
121 u32 highest_perf;
122
123 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
124 if (ret)
125 return ret;
126
127 highest_perf = amd_get_highest_perf();
128 if (highest_perf > cppc_perf.highest_perf)
129 highest_perf = cppc_perf.highest_perf;
130
131 WRITE_ONCE(cpudata->highest_perf, highest_perf);
132
133 WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
134 WRITE_ONCE(cpudata->lowest_nonlinear_perf,
135 cppc_perf.lowest_nonlinear_perf);
136 WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
137
138 return 0;
139}
140
141DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
142
143static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
144{
145 return static_call(amd_pstate_init_perf)(cpudata);
146}
147
148static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
149 u32 des_perf, u32 max_perf, bool fast_switch)
150{
151 if (fast_switch)
152 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
153 else
154 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
155 READ_ONCE(cpudata->cppc_req_cached));
156}
157
158static void cppc_update_perf(struct amd_cpudata *cpudata,
159 u32 min_perf, u32 des_perf,
160 u32 max_perf, bool fast_switch)
161{
162 struct cppc_perf_ctrls perf_ctrls;
163
164 perf_ctrls.max_perf = max_perf;
165 perf_ctrls.min_perf = min_perf;
166 perf_ctrls.desired_perf = des_perf;
167
168 cppc_set_perf(cpudata->cpu, &perf_ctrls);
169}
170
171DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
172
173static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
174 u32 min_perf, u32 des_perf,
175 u32 max_perf, bool fast_switch)
176{
177 static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
178 max_perf, fast_switch);
179}
180
181static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
182{
183 u64 aperf, mperf, tsc;
184 unsigned long flags;
185
186 local_irq_save(flags);
187 rdmsrl(MSR_IA32_APERF, aperf);
188 rdmsrl(MSR_IA32_MPERF, mperf);
189 tsc = rdtsc();
190
191 if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
192 local_irq_restore(flags);
193 return false;
194 }
195
196 local_irq_restore(flags);
197
198 cpudata->cur.aperf = aperf;
199 cpudata->cur.mperf = mperf;
200 cpudata->cur.tsc = tsc;
201 cpudata->cur.aperf -= cpudata->prev.aperf;
202 cpudata->cur.mperf -= cpudata->prev.mperf;
203 cpudata->cur.tsc -= cpudata->prev.tsc;
204
205 cpudata->prev.aperf = aperf;
206 cpudata->prev.mperf = mperf;
207 cpudata->prev.tsc = tsc;
208
209 cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
210
211 return true;
212}
213
214static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
215 u32 des_perf, u32 max_perf, bool fast_switch)
216{
217 u64 prev = READ_ONCE(cpudata->cppc_req_cached);
218 u64 value = prev;
219
220 des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
221 value &= ~AMD_CPPC_MIN_PERF(~0L);
222 value |= AMD_CPPC_MIN_PERF(min_perf);
223
224 value &= ~AMD_CPPC_DES_PERF(~0L);
225 value |= AMD_CPPC_DES_PERF(des_perf);
226
227 value &= ~AMD_CPPC_MAX_PERF(~0L);
228 value |= AMD_CPPC_MAX_PERF(max_perf);
229
230 if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
231 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
232 cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
233 cpudata->cpu, (value != prev), fast_switch);
234 }
235
236 if (value == prev)
237 return;
238
239 WRITE_ONCE(cpudata->cppc_req_cached, value);
240
241 amd_pstate_update_perf(cpudata, min_perf, des_perf,
242 max_perf, fast_switch);
243}
244
245static int amd_pstate_verify(struct cpufreq_policy_data *policy)
246{
247 cpufreq_verify_within_cpu_limits(policy);
248
249 return 0;
250}
251
252static int amd_pstate_target(struct cpufreq_policy *policy,
253 unsigned int target_freq,
254 unsigned int relation)
255{
256 struct cpufreq_freqs freqs;
257 struct amd_cpudata *cpudata = policy->driver_data;
258 unsigned long max_perf, min_perf, des_perf, cap_perf;
259
260 if (!cpudata->max_freq)
261 return -ENODEV;
262
263 cap_perf = READ_ONCE(cpudata->highest_perf);
264 min_perf = READ_ONCE(cpudata->lowest_perf);
265 max_perf = cap_perf;
266
267 freqs.old = policy->cur;
268 freqs.new = target_freq;
269
270 des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
271 cpudata->max_freq);
272
273 cpufreq_freq_transition_begin(policy, &freqs);
274 amd_pstate_update(cpudata, min_perf, des_perf,
275 max_perf, false);
276 cpufreq_freq_transition_end(policy, &freqs, false);
277
278 return 0;
279}
280
281static void amd_pstate_adjust_perf(unsigned int cpu,
282 unsigned long _min_perf,
283 unsigned long target_perf,
284 unsigned long capacity)
285{
286 unsigned long max_perf, min_perf, des_perf,
287 cap_perf, lowest_nonlinear_perf;
288 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
289 struct amd_cpudata *cpudata = policy->driver_data;
290
291 cap_perf = READ_ONCE(cpudata->highest_perf);
292 lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
293
294 des_perf = cap_perf;
295 if (target_perf < capacity)
296 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
297
298 min_perf = READ_ONCE(cpudata->highest_perf);
299 if (_min_perf < capacity)
300 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
301
302 if (min_perf < lowest_nonlinear_perf)
303 min_perf = lowest_nonlinear_perf;
304
305 max_perf = cap_perf;
306 if (max_perf < min_perf)
307 max_perf = min_perf;
308
309 amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
310 cpufreq_cpu_put(policy);
311}
312
313static int amd_get_min_freq(struct amd_cpudata *cpudata)
314{
315 struct cppc_perf_caps cppc_perf;
316
317 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
318 if (ret)
319 return ret;
320
321 /* Switch to khz */
322 return cppc_perf.lowest_freq * 1000;
323}
324
325static int amd_get_max_freq(struct amd_cpudata *cpudata)
326{
327 struct cppc_perf_caps cppc_perf;
328 u32 max_perf, max_freq, nominal_freq, nominal_perf;
329 u64 boost_ratio;
330
331 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
332 if (ret)
333 return ret;
334
335 nominal_freq = cppc_perf.nominal_freq;
336 nominal_perf = READ_ONCE(cpudata->nominal_perf);
337 max_perf = READ_ONCE(cpudata->highest_perf);
338
339 boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
340 nominal_perf);
341
342 max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
343
344 /* Switch to khz */
345 return max_freq * 1000;
346}
347
348static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
349{
350 struct cppc_perf_caps cppc_perf;
351
352 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
353 if (ret)
354 return ret;
355
356 /* Switch to khz */
357 return cppc_perf.nominal_freq * 1000;
358}
359
360static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
361{
362 struct cppc_perf_caps cppc_perf;
363 u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
364 nominal_freq, nominal_perf;
365 u64 lowest_nonlinear_ratio;
366
367 int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
368 if (ret)
369 return ret;
370
371 nominal_freq = cppc_perf.nominal_freq;
372 nominal_perf = READ_ONCE(cpudata->nominal_perf);
373
374 lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
375
376 lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
377 nominal_perf);
378
379 lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
380
381 /* Switch to khz */
382 return lowest_nonlinear_freq * 1000;
383}
384
385static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
386{
387 struct amd_cpudata *cpudata = policy->driver_data;
388 int ret;
389
390 if (!cpudata->boost_supported) {
391 pr_err("Boost mode is not supported by this processor or SBIOS\n");
392 return -EINVAL;
393 }
394
395 if (state)
396 policy->cpuinfo.max_freq = cpudata->max_freq;
397 else
398 policy->cpuinfo.max_freq = cpudata->nominal_freq;
399
400 policy->max = policy->cpuinfo.max_freq;
401
402 ret = freq_qos_update_request(&cpudata->req[1],
403 policy->cpuinfo.max_freq);
404 if (ret < 0)
405 return ret;
406
407 return 0;
408}
409
410static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
411{
412 u32 highest_perf, nominal_perf;
413
414 highest_perf = READ_ONCE(cpudata->highest_perf);
415 nominal_perf = READ_ONCE(cpudata->nominal_perf);
416
417 if (highest_perf <= nominal_perf)
418 return;
419
420 cpudata->boost_supported = true;
421 amd_pstate_driver.boost_enabled = true;
422}
423
424static void amd_perf_ctl_reset(unsigned int cpu)
425{
426 wrmsrl_on_cpu(cpu, MSR_AMD_PERF_CTL, 0);
427}
428
429static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
430{
431 int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
432 struct device *dev;
433 struct amd_cpudata *cpudata;
434
435 /*
436 * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
437 * which is ideal for initialization process.
438 */
439 amd_perf_ctl_reset(policy->cpu);
440 dev = get_cpu_device(policy->cpu);
441 if (!dev)
442 return -ENODEV;
443
444 cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
445 if (!cpudata)
446 return -ENOMEM;
447
448 cpudata->cpu = policy->cpu;
449
450 ret = amd_pstate_init_perf(cpudata);
451 if (ret)
452 goto free_cpudata1;
453
454 min_freq = amd_get_min_freq(cpudata);
455 max_freq = amd_get_max_freq(cpudata);
456 nominal_freq = amd_get_nominal_freq(cpudata);
457 lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
458
459 if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
460 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
461 min_freq, max_freq);
462 ret = -EINVAL;
463 goto free_cpudata1;
464 }
465
466 policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
467 policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
468
469 policy->min = min_freq;
470 policy->max = max_freq;
471
472 policy->cpuinfo.min_freq = min_freq;
473 policy->cpuinfo.max_freq = max_freq;
474
475 /* It will be updated by governor */
476 policy->cur = policy->cpuinfo.min_freq;
477
478 if (boot_cpu_has(X86_FEATURE_CPPC))
479 policy->fast_switch_possible = true;
480
481 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
482 FREQ_QOS_MIN, policy->cpuinfo.min_freq);
483 if (ret < 0) {
484 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
485 goto free_cpudata1;
486 }
487
488 ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
489 FREQ_QOS_MAX, policy->cpuinfo.max_freq);
490 if (ret < 0) {
491 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
492 goto free_cpudata2;
493 }
494
495 /* Initial processor data capability frequencies */
496 cpudata->max_freq = max_freq;
497 cpudata->min_freq = min_freq;
498 cpudata->nominal_freq = nominal_freq;
499 cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
500
501 policy->driver_data = cpudata;
502
503 amd_pstate_boost_init(cpudata);
504
505 return 0;
506
507free_cpudata2:
508 freq_qos_remove_request(&cpudata->req[0]);
509free_cpudata1:
510 kfree(cpudata);
511 return ret;
512}
513
514static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
515{
516 struct amd_cpudata *cpudata = policy->driver_data;
517
518 freq_qos_remove_request(&cpudata->req[1]);
519 freq_qos_remove_request(&cpudata->req[0]);
520 kfree(cpudata);
521
522 return 0;
523}
524
525static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
526{
527 int ret;
528
529 ret = amd_pstate_enable(true);
530 if (ret)
531 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
532
533 return ret;
534}
535
536static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
537{
538 int ret;
539
540 ret = amd_pstate_enable(false);
541 if (ret)
542 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
543
544 return ret;
545}
546
547/* Sysfs attributes */
548
549/*
550 * This frequency is to indicate the maximum hardware frequency.
551 * If boost is not active but supported, the frequency will be larger than the
552 * one in cpuinfo.
553 */
554static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
555 char *buf)
556{
557 int max_freq;
558 struct amd_cpudata *cpudata = policy->driver_data;
559
560 max_freq = amd_get_max_freq(cpudata);
561 if (max_freq < 0)
562 return max_freq;
563
564 return sprintf(&buf[0], "%u\n", max_freq);
565}
566
567static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
568 char *buf)
569{
570 int freq;
571 struct amd_cpudata *cpudata = policy->driver_data;
572
573 freq = amd_get_lowest_nonlinear_freq(cpudata);
574 if (freq < 0)
575 return freq;
576
577 return sprintf(&buf[0], "%u\n", freq);
578}
579
580/*
581 * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
582 * need to expose it to sysfs.
583 */
584static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
585 char *buf)
586{
587 u32 perf;
588 struct amd_cpudata *cpudata = policy->driver_data;
589
590 perf = READ_ONCE(cpudata->highest_perf);
591
592 return sprintf(&buf[0], "%u\n", perf);
593}
594
595cpufreq_freq_attr_ro(amd_pstate_max_freq);
596cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
597
598cpufreq_freq_attr_ro(amd_pstate_highest_perf);
599
600static struct freq_attr *amd_pstate_attr[] = {
601 &amd_pstate_max_freq,
602 &amd_pstate_lowest_nonlinear_freq,
603 &amd_pstate_highest_perf,
604 NULL,
605};
606
607static struct cpufreq_driver amd_pstate_driver = {
608 .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
609 .verify = amd_pstate_verify,
610 .target = amd_pstate_target,
611 .init = amd_pstate_cpu_init,
612 .exit = amd_pstate_cpu_exit,
613 .suspend = amd_pstate_cpu_suspend,
614 .resume = amd_pstate_cpu_resume,
615 .set_boost = amd_pstate_set_boost,
616 .name = "amd-pstate",
617 .attr = amd_pstate_attr,
618};
619
620static int __init amd_pstate_init(void)
621{
622 int ret;
623
624 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
625 return -ENODEV;
626 /*
627 * by default the pstate driver is disabled to load
628 * enable the amd_pstate passive mode driver explicitly
629 * with amd_pstate=passive in kernel command line
630 */
631 if (!cppc_load) {
632 pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
633 return -ENODEV;
634 }
635
636 if (!acpi_cpc_valid()) {
637 pr_warn_once("the _CPC object is not present in SBIOS or ACPI disabled\n");
638 return -ENODEV;
639 }
640
641 /* don't keep reloading if cpufreq_driver exists */
642 if (cpufreq_get_current_driver())
643 return -EEXIST;
644
645 /* capability check */
646 if (boot_cpu_has(X86_FEATURE_CPPC)) {
647 pr_debug("AMD CPPC MSR based functionality is supported\n");
648 amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
649 } else {
650 pr_debug("AMD CPPC shared memory based functionality is supported\n");
651 static_call_update(amd_pstate_enable, cppc_enable);
652 static_call_update(amd_pstate_init_perf, cppc_init_perf);
653 static_call_update(amd_pstate_update_perf, cppc_update_perf);
654 }
655
656 /* enable amd pstate feature */
657 ret = amd_pstate_enable(true);
658 if (ret) {
659 pr_err("failed to enable amd-pstate with return %d\n", ret);
660 return ret;
661 }
662
663 ret = cpufreq_register_driver(&amd_pstate_driver);
664 if (ret)
665 pr_err("failed to register amd_pstate_driver with return %d\n",
666 ret);
667
668 return ret;
669}
670device_initcall(amd_pstate_init);
671
672static int __init amd_pstate_param(char *str)
673{
674 if (!str)
675 return -EINVAL;
676
677 if (!strcmp(str, "disable")) {
678 cppc_load = 0;
679 pr_info("driver is explicitly disabled\n");
680 } else if (!strcmp(str, "passive"))
681 cppc_load = 1;
682
683 return 0;
684}
685early_param("amd_pstate", amd_pstate_param);
686
687MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
688MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
689MODULE_LICENSE("GPL");