Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cpufreq: intel_pstate: hybrid: Fix build with CONFIG_ACPI unset

One of the previous commits introducing hybrid processor support to
intel_pstate broke build with CONFIG_ACPI unset.

Fix that and while at it make empty stubs of two functions related
to ACPI CPPC static inline and fix a spelling mistake in the name of
one of them.

Fixes: eb3693f0521e ("cpufreq: intel_pstate: hybrid: CPU-specific scaling factor")
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reported-by: Randy Dunlap <rdunlap@infradead.org>
Acked-by: Randy Dunlap <rdunlap@infradead.org> # build-tested

+46 -41
+46 -41
drivers/cpufreq/intel_pstate.c
··· 369 369 } 370 370 } 371 371 372 - static int intel_pstate_get_cppc_guranteed(int cpu) 372 + static int intel_pstate_get_cppc_guaranteed(int cpu) 373 373 { 374 374 struct cppc_perf_caps cppc_perf; 375 375 int ret; ··· 385 385 } 386 386 387 387 #else /* CONFIG_ACPI_CPPC_LIB */ 388 - static void intel_pstate_set_itmt_prio(int cpu) 388 + static inline void intel_pstate_set_itmt_prio(int cpu) 389 389 { 390 390 } 391 391 #endif /* CONFIG_ACPI_CPPC_LIB */ ··· 470 470 471 471 acpi_processor_unregister_performance(policy->cpu); 472 472 } 473 + 474 + static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) 475 + { 476 + return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; 477 + } 478 + 479 + static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, 480 + struct cppc_perf_caps *caps) 481 + { 482 + if (cppc_get_perf_caps(cpu->cpu, caps)) 483 + return false; 484 + 485 + return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; 486 + } 473 487 #else /* CONFIG_ACPI */ 474 488 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 475 489 { ··· 500 486 #endif /* CONFIG_ACPI */ 501 487 502 488 #ifndef CONFIG_ACPI_CPPC_LIB 503 - static int intel_pstate_get_cppc_guranteed(int cpu) 489 + static inline int intel_pstate_get_cppc_guaranteed(int cpu) 504 490 { 505 491 return -ENOTSUPP; 506 492 } 507 493 #endif /* CONFIG_ACPI_CPPC_LIB */ 508 - 509 - static bool intel_pstate_cppc_perf_valid(u32 perf, struct cppc_perf_caps *caps) 510 - { 511 - return perf && perf <= caps->highest_perf && perf >= caps->lowest_perf; 512 - } 513 - 514 - static bool intel_pstate_cppc_perf_caps(struct cpudata *cpu, 515 - struct cppc_perf_caps *caps) 516 - { 517 - if (cppc_get_perf_caps(cpu->cpu, caps)) 518 - return false; 519 - 520 - return caps->highest_perf && caps->lowest_perf <= caps->highest_perf; 521 - } 522 494 523 495 static void intel_pstate_hybrid_hwp_perf_ctl_parity(struct cpudata *cpu) 524 496 { ··· 530 530 */ 531 531 static void intel_pstate_hybrid_hwp_calibrate(struct cpudata *cpu) 532 532 { 533 - struct cppc_perf_caps caps; 534 533 int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; 535 534 int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; 536 535 int perf_ctl_turbo = pstate_funcs.get_turbo(); ··· 547 548 pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); 548 549 pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); 549 550 550 - if (intel_pstate_cppc_perf_caps(cpu, &caps)) { 551 - if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { 552 - pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); 551 + #ifdef CONFIG_ACPI 552 + if (IS_ENABLED(CONFIG_ACPI_CPPC_LIB)) { 553 + struct cppc_perf_caps caps; 553 554 554 - /* 555 - * If the CPPC nominal performance is valid, it can be 556 - * assumed to correspond to cpu_khz. 557 - */ 558 - if (caps.nominal_perf == perf_ctl_max_phys) { 559 - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 560 - return; 561 - } 562 - scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); 563 - } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { 564 - pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); 555 + if (intel_pstate_cppc_perf_caps(cpu, &caps)) { 556 + if (intel_pstate_cppc_perf_valid(caps.nominal_perf, &caps)) { 557 + pr_debug("CPU%d: Using CPPC nominal\n", cpu->cpu); 565 558 566 - /* 567 - * If the CPPC guaranteed performance is valid, it can 568 - * be assumed to correspond to max_freq. 569 - */ 570 - if (caps.guaranteed_perf == perf_ctl_max) { 571 - intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 572 - return; 559 + /* 560 + * If the CPPC nominal performance is valid, it 561 + * can be assumed to correspond to cpu_khz. 562 + */ 563 + if (caps.nominal_perf == perf_ctl_max_phys) { 564 + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 565 + return; 566 + } 567 + scaling = DIV_ROUND_UP(cpu_khz, caps.nominal_perf); 568 + } else if (intel_pstate_cppc_perf_valid(caps.guaranteed_perf, &caps)) { 569 + pr_debug("CPU%d: Using CPPC guaranteed\n", cpu->cpu); 570 + 571 + /* 572 + * If the CPPC guaranteed performance is valid, 573 + * it can be assumed to correspond to max_freq. 574 + */ 575 + if (caps.guaranteed_perf == perf_ctl_max) { 576 + intel_pstate_hybrid_hwp_perf_ctl_parity(cpu); 577 + return; 578 + } 579 + scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); 573 580 } 574 - scaling = DIV_ROUND_UP(max_freq, caps.guaranteed_perf); 575 581 } 576 582 } 583 + #endif 577 584 /* 578 585 * If using the CPPC data to compute the HWP-to-frequency scaling factor 579 586 * doesn't work, use the HWP_CAP gauranteed perf for this purpose with ··· 949 944 struct cpudata *cpu = all_cpu_data[policy->cpu]; 950 945 int ratio, freq; 951 946 952 - ratio = intel_pstate_get_cppc_guranteed(policy->cpu); 947 + ratio = intel_pstate_get_cppc_guaranteed(policy->cpu); 953 948 if (ratio <= 0) { 954 949 u64 cap; 955 950