Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 *
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 *
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
13 *
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
16 * operation involves:
17 *
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
19 *
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
22 *
23 * - Platform conveys its decision back to OS
24 *
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
29 *
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
32 */
33
34#define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36#include <linux/delay.h>
37#include <linux/iopoll.h>
38#include <linux/ktime.h>
39#include <linux/rwsem.h>
40#include <linux/wait.h>
41#include <linux/topology.h>
42#include <linux/dmi.h>
43#include <linux/units.h>
44#include <asm/unaligned.h>
45
46#include <acpi/cppc_acpi.h>
47
48struct cppc_pcc_data {
49 struct pcc_mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 bool pcc_channel_acquired;
52 unsigned int deadline_us;
53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54
55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
56 bool platform_owns_pcc; /* Ownership of PCC subspace */
57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
58
59 /*
60 * Lock to provide controlled access to the PCC channel.
61 *
62 * For performance critical usecases(currently cppc_set_perf)
63 * We need to take read_lock and check if channel belongs to OSPM
64 * before reading or writing to PCC subspace
65 * We need to take write_lock before transferring the channel
66 * ownership to the platform via a Doorbell
67 * This allows us to batch a number of CPPC requests if they happen
68 * to originate in about the same time
69 *
70 * For non-performance critical usecases(init)
71 * Take write_lock for all purposes which gives exclusive access
72 */
73 struct rw_semaphore pcc_lock;
74
75 /* Wait queue for CPUs whose requests were batched */
76 wait_queue_head_t pcc_write_wait_q;
77 ktime_t last_cmd_cmpl_time;
78 ktime_t last_mpar_reset;
79 int mpar_count;
80 int refcount;
81};
82
83/* Array to represent the PCC channel per subspace ID */
84static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
86static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
87
88/*
89 * The cpc_desc structure contains the ACPI register details
90 * as described in the per CPU _CPC tables. The details
91 * include the type of register (e.g. PCC, System IO, FFH etc.)
92 * and destination addresses which lets us READ/WRITE CPU performance
93 * information using the appropriate I/O methods.
94 */
95static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
96
97/* pcc mapped address + header size + offset within PCC subspace */
98#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99 0x8 + (offs))
100
101/* Check if a CPC register is in PCC */
102#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
103 (cpc)->cpc_entry.reg.space_id == \
104 ACPI_ADR_SPACE_PLATFORM_COMM)
105
106/* Check if a CPC register is in SystemMemory */
107#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
108 (cpc)->cpc_entry.reg.space_id == \
109 ACPI_ADR_SPACE_SYSTEM_MEMORY)
110
111/* Check if a CPC register is in SystemIo */
112#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
113 (cpc)->cpc_entry.reg.space_id == \
114 ACPI_ADR_SPACE_SYSTEM_IO)
115
116/* Evaluates to True if reg is a NULL register descriptor */
117#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
118 (reg)->address == 0 && \
119 (reg)->bit_width == 0 && \
120 (reg)->bit_offset == 0 && \
121 (reg)->access_width == 0)
122
123/* Evaluates to True if an optional cpc field is supported */
124#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
125 !!(cpc)->cpc_entry.int_value : \
126 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
127/*
128 * Arbitrary Retries in case the remote processor is slow to respond
129 * to PCC commands. Keeping it high enough to cover emulators where
130 * the processors run painfully slow.
131 */
132#define NUM_RETRIES 500ULL
133
134#define OVER_16BTS_MASK ~0xFFFFULL
135
136#define define_one_cppc_ro(_name) \
137static struct kobj_attribute _name = \
138__ATTR(_name, 0444, show_##_name, NULL)
139
140#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
141
142#define show_cppc_data(access_fn, struct_name, member_name) \
143 static ssize_t show_##member_name(struct kobject *kobj, \
144 struct kobj_attribute *attr, char *buf) \
145 { \
146 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
147 struct struct_name st_name = {0}; \
148 int ret; \
149 \
150 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
151 if (ret) \
152 return ret; \
153 \
154 return sysfs_emit(buf, "%llu\n", \
155 (u64)st_name.member_name); \
156 } \
157 define_one_cppc_ro(member_name)
158
159show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
160show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
161show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
162show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
163show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, guaranteed_perf);
164show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
165show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
166
167show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
168show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
169
170/* Check for valid access_width, otherwise, fallback to using bit_width */
171#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
172
173/* Shift and apply the mask for CPC reads/writes */
174#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
175 GENMASK(((reg)->bit_width) - 1, 0))
176
177static ssize_t show_feedback_ctrs(struct kobject *kobj,
178 struct kobj_attribute *attr, char *buf)
179{
180 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
181 struct cppc_perf_fb_ctrs fb_ctrs = {0};
182 int ret;
183
184 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
185 if (ret)
186 return ret;
187
188 return sysfs_emit(buf, "ref:%llu del:%llu\n",
189 fb_ctrs.reference, fb_ctrs.delivered);
190}
191define_one_cppc_ro(feedback_ctrs);
192
193static struct attribute *cppc_attrs[] = {
194 &feedback_ctrs.attr,
195 &reference_perf.attr,
196 &wraparound_time.attr,
197 &highest_perf.attr,
198 &lowest_perf.attr,
199 &lowest_nonlinear_perf.attr,
200 &guaranteed_perf.attr,
201 &nominal_perf.attr,
202 &nominal_freq.attr,
203 &lowest_freq.attr,
204 NULL
205};
206ATTRIBUTE_GROUPS(cppc);
207
208static const struct kobj_type cppc_ktype = {
209 .sysfs_ops = &kobj_sysfs_ops,
210 .default_groups = cppc_groups,
211};
212
213static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
214{
215 int ret, status;
216 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
217 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
218 pcc_ss_data->pcc_comm_addr;
219
220 if (!pcc_ss_data->platform_owns_pcc)
221 return 0;
222
223 /*
224 * Poll PCC status register every 3us(delay_us) for maximum of
225 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
226 */
227 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
228 status & PCC_CMD_COMPLETE_MASK, 3,
229 pcc_ss_data->deadline_us);
230
231 if (likely(!ret)) {
232 pcc_ss_data->platform_owns_pcc = false;
233 if (chk_err_bit && (status & PCC_ERROR_MASK))
234 ret = -EIO;
235 }
236
237 if (unlikely(ret))
238 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
239 pcc_ss_id, ret);
240
241 return ret;
242}
243
244/*
245 * This function transfers the ownership of the PCC to the platform
246 * So it must be called while holding write_lock(pcc_lock)
247 */
248static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
249{
250 int ret = -EIO, i;
251 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
252 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
253 pcc_ss_data->pcc_comm_addr;
254 unsigned int time_delta;
255
256 /*
257 * For CMD_WRITE we know for a fact the caller should have checked
258 * the channel before writing to PCC space
259 */
260 if (cmd == CMD_READ) {
261 /*
262 * If there are pending cpc_writes, then we stole the channel
263 * before write completion, so first send a WRITE command to
264 * platform
265 */
266 if (pcc_ss_data->pending_pcc_write_cmd)
267 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
268
269 ret = check_pcc_chan(pcc_ss_id, false);
270 if (ret)
271 goto end;
272 } else /* CMD_WRITE */
273 pcc_ss_data->pending_pcc_write_cmd = FALSE;
274
275 /*
276 * Handle the Minimum Request Turnaround Time(MRTT)
277 * "The minimum amount of time that OSPM must wait after the completion
278 * of a command before issuing the next command, in microseconds"
279 */
280 if (pcc_ss_data->pcc_mrtt) {
281 time_delta = ktime_us_delta(ktime_get(),
282 pcc_ss_data->last_cmd_cmpl_time);
283 if (pcc_ss_data->pcc_mrtt > time_delta)
284 udelay(pcc_ss_data->pcc_mrtt - time_delta);
285 }
286
287 /*
288 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
289 * "The maximum number of periodic requests that the subspace channel can
290 * support, reported in commands per minute. 0 indicates no limitation."
291 *
292 * This parameter should be ideally zero or large enough so that it can
293 * handle maximum number of requests that all the cores in the system can
294 * collectively generate. If it is not, we will follow the spec and just
295 * not send the request to the platform after hitting the MPAR limit in
296 * any 60s window
297 */
298 if (pcc_ss_data->pcc_mpar) {
299 if (pcc_ss_data->mpar_count == 0) {
300 time_delta = ktime_ms_delta(ktime_get(),
301 pcc_ss_data->last_mpar_reset);
302 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
303 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
304 pcc_ss_id);
305 ret = -EIO;
306 goto end;
307 }
308 pcc_ss_data->last_mpar_reset = ktime_get();
309 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
310 }
311 pcc_ss_data->mpar_count--;
312 }
313
314 /* Write to the shared comm region. */
315 writew_relaxed(cmd, &generic_comm_base->command);
316
317 /* Flip CMD COMPLETE bit */
318 writew_relaxed(0, &generic_comm_base->status);
319
320 pcc_ss_data->platform_owns_pcc = true;
321
322 /* Ring doorbell */
323 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
324 if (ret < 0) {
325 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
326 pcc_ss_id, cmd, ret);
327 goto end;
328 }
329
330 /* wait for completion and check for PCC error bit */
331 ret = check_pcc_chan(pcc_ss_id, true);
332
333 if (pcc_ss_data->pcc_mrtt)
334 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
335
336 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
337 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
338 else
339 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
340
341end:
342 if (cmd == CMD_WRITE) {
343 if (unlikely(ret)) {
344 for_each_possible_cpu(i) {
345 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
346
347 if (!desc)
348 continue;
349
350 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
351 desc->write_cmd_status = ret;
352 }
353 }
354 pcc_ss_data->pcc_write_cnt++;
355 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
356 }
357
358 return ret;
359}
360
361static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
362{
363 if (ret < 0)
364 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
365 *(u16 *)msg, ret);
366 else
367 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
368 *(u16 *)msg, ret);
369}
370
371static struct mbox_client cppc_mbox_cl = {
372 .tx_done = cppc_chan_tx_done,
373 .knows_txdone = true,
374};
375
376static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
377{
378 int result = -EFAULT;
379 acpi_status status = AE_OK;
380 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
381 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
382 struct acpi_buffer state = {0, NULL};
383 union acpi_object *psd = NULL;
384 struct acpi_psd_package *pdomain;
385
386 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
387 &buffer, ACPI_TYPE_PACKAGE);
388 if (status == AE_NOT_FOUND) /* _PSD is optional */
389 return 0;
390 if (ACPI_FAILURE(status))
391 return -ENODEV;
392
393 psd = buffer.pointer;
394 if (!psd || psd->package.count != 1) {
395 pr_debug("Invalid _PSD data\n");
396 goto end;
397 }
398
399 pdomain = &(cpc_ptr->domain_info);
400
401 state.length = sizeof(struct acpi_psd_package);
402 state.pointer = pdomain;
403
404 status = acpi_extract_package(&(psd->package.elements[0]),
405 &format, &state);
406 if (ACPI_FAILURE(status)) {
407 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
408 goto end;
409 }
410
411 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
412 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
413 goto end;
414 }
415
416 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
417 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
418 goto end;
419 }
420
421 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
422 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
423 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
424 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
425 goto end;
426 }
427
428 result = 0;
429end:
430 kfree(buffer.pointer);
431 return result;
432}
433
434bool acpi_cpc_valid(void)
435{
436 struct cpc_desc *cpc_ptr;
437 int cpu;
438
439 if (acpi_disabled)
440 return false;
441
442 for_each_present_cpu(cpu) {
443 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
444 if (!cpc_ptr)
445 return false;
446 }
447
448 return true;
449}
450EXPORT_SYMBOL_GPL(acpi_cpc_valid);
451
452bool cppc_allow_fast_switch(void)
453{
454 struct cpc_register_resource *desired_reg;
455 struct cpc_desc *cpc_ptr;
456 int cpu;
457
458 for_each_possible_cpu(cpu) {
459 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
460 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
461 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
462 !CPC_IN_SYSTEM_IO(desired_reg))
463 return false;
464 }
465
466 return true;
467}
468EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
469
470/**
471 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
472 * @cpu: Find all CPUs that share a domain with cpu.
473 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
474 *
475 * Return: 0 for success or negative value for err.
476 */
477int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
478{
479 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
480 struct acpi_psd_package *match_pdomain;
481 struct acpi_psd_package *pdomain;
482 int count_target, i;
483
484 /*
485 * Now that we have _PSD data from all CPUs, let's setup P-state
486 * domain info.
487 */
488 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
489 if (!cpc_ptr)
490 return -EFAULT;
491
492 pdomain = &(cpc_ptr->domain_info);
493 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
494 if (pdomain->num_processors <= 1)
495 return 0;
496
497 /* Validate the Domain info */
498 count_target = pdomain->num_processors;
499 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
500 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
501 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
502 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
503 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
504 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
505
506 for_each_possible_cpu(i) {
507 if (i == cpu)
508 continue;
509
510 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
511 if (!match_cpc_ptr)
512 goto err_fault;
513
514 match_pdomain = &(match_cpc_ptr->domain_info);
515 if (match_pdomain->domain != pdomain->domain)
516 continue;
517
518 /* Here i and cpu are in the same domain */
519 if (match_pdomain->num_processors != count_target)
520 goto err_fault;
521
522 if (pdomain->coord_type != match_pdomain->coord_type)
523 goto err_fault;
524
525 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
526 }
527
528 return 0;
529
530err_fault:
531 /* Assume no coordination on any error parsing domain info */
532 cpumask_clear(cpu_data->shared_cpu_map);
533 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
534 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
535
536 return -EFAULT;
537}
538EXPORT_SYMBOL_GPL(acpi_get_psd_map);
539
540static int register_pcc_channel(int pcc_ss_idx)
541{
542 struct pcc_mbox_chan *pcc_chan;
543 u64 usecs_lat;
544
545 if (pcc_ss_idx >= 0) {
546 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
547
548 if (IS_ERR(pcc_chan)) {
549 pr_err("Failed to find PCC channel for subspace %d\n",
550 pcc_ss_idx);
551 return -ENODEV;
552 }
553
554 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
555 /*
556 * cppc_ss->latency is just a Nominal value. In reality
557 * the remote processor could be much slower to reply.
558 * So add an arbitrary amount of wait on top of Nominal.
559 */
560 usecs_lat = NUM_RETRIES * pcc_chan->latency;
561 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
562 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
563 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
564 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
565
566 pcc_data[pcc_ss_idx]->pcc_comm_addr =
567 acpi_os_ioremap(pcc_chan->shmem_base_addr,
568 pcc_chan->shmem_size);
569 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
570 pr_err("Failed to ioremap PCC comm region mem for %d\n",
571 pcc_ss_idx);
572 return -ENOMEM;
573 }
574
575 /* Set flag so that we don't come here for each CPU. */
576 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
577 }
578
579 return 0;
580}
581
582/**
583 * cpc_ffh_supported() - check if FFH reading supported
584 *
585 * Check if the architecture has support for functional fixed hardware
586 * read/write capability.
587 *
588 * Return: true for supported, false for not supported
589 */
590bool __weak cpc_ffh_supported(void)
591{
592 return false;
593}
594
595/**
596 * cpc_supported_by_cpu() - check if CPPC is supported by CPU
597 *
598 * Check if the architectural support for CPPC is present even
599 * if the _OSC hasn't prescribed it
600 *
601 * Return: true for supported, false for not supported
602 */
603bool __weak cpc_supported_by_cpu(void)
604{
605 return false;
606}
607
608/**
609 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
610 * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
611 *
612 * Check and allocate the cppc_pcc_data memory.
613 * In some processor configurations it is possible that same subspace
614 * is shared between multiple CPUs. This is seen especially in CPUs
615 * with hardware multi-threading support.
616 *
617 * Return: 0 for success, errno for failure
618 */
619static int pcc_data_alloc(int pcc_ss_id)
620{
621 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
622 return -EINVAL;
623
624 if (pcc_data[pcc_ss_id]) {
625 pcc_data[pcc_ss_id]->refcount++;
626 } else {
627 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
628 GFP_KERNEL);
629 if (!pcc_data[pcc_ss_id])
630 return -ENOMEM;
631 pcc_data[pcc_ss_id]->refcount++;
632 }
633
634 return 0;
635}
636
637/*
638 * An example CPC table looks like the following.
639 *
640 * Name (_CPC, Package() {
641 * 17, // NumEntries
642 * 1, // Revision
643 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
644 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
645 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
646 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
647 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
648 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
649 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
650 * ...
651 * ...
652 * ...
653 * }
654 * Each Register() encodes how to access that specific register.
655 * e.g. a sample PCC entry has the following encoding:
656 *
657 * Register (
658 * PCC, // AddressSpaceKeyword
659 * 8, // RegisterBitWidth
660 * 8, // RegisterBitOffset
661 * 0x30, // RegisterAddress
662 * 9, // AccessSize (subspace ID)
663 * )
664 */
665
666#ifndef arch_init_invariance_cppc
667static inline void arch_init_invariance_cppc(void) { }
668#endif
669
670/**
671 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
672 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
673 *
674 * Return: 0 for success or negative value for err.
675 */
676int acpi_cppc_processor_probe(struct acpi_processor *pr)
677{
678 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
679 union acpi_object *out_obj, *cpc_obj;
680 struct cpc_desc *cpc_ptr;
681 struct cpc_reg *gas_t;
682 struct device *cpu_dev;
683 acpi_handle handle = pr->handle;
684 unsigned int num_ent, i, cpc_rev;
685 int pcc_subspace_id = -1;
686 acpi_status status;
687 int ret = -ENODATA;
688
689 if (!osc_sb_cppc2_support_acked) {
690 pr_debug("CPPC v2 _OSC not acked\n");
691 if (!cpc_supported_by_cpu()) {
692 pr_debug("CPPC is not supported by the CPU\n");
693 return -ENODEV;
694 }
695 }
696
697 /* Parse the ACPI _CPC table for this CPU. */
698 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
699 ACPI_TYPE_PACKAGE);
700 if (ACPI_FAILURE(status)) {
701 ret = -ENODEV;
702 goto out_buf_free;
703 }
704
705 out_obj = (union acpi_object *) output.pointer;
706
707 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
708 if (!cpc_ptr) {
709 ret = -ENOMEM;
710 goto out_buf_free;
711 }
712
713 /* First entry is NumEntries. */
714 cpc_obj = &out_obj->package.elements[0];
715 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
716 num_ent = cpc_obj->integer.value;
717 if (num_ent <= 1) {
718 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
719 num_ent, pr->id);
720 goto out_free;
721 }
722 } else {
723 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
724 cpc_obj->type, pr->id);
725 goto out_free;
726 }
727
728 /* Second entry should be revision. */
729 cpc_obj = &out_obj->package.elements[1];
730 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
731 cpc_rev = cpc_obj->integer.value;
732 } else {
733 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
734 cpc_obj->type, pr->id);
735 goto out_free;
736 }
737
738 if (cpc_rev < CPPC_V2_REV) {
739 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
740 pr->id);
741 goto out_free;
742 }
743
744 /*
745 * Disregard _CPC if the number of entries in the return pachage is not
746 * as expected, but support future revisions being proper supersets of
747 * the v3 and only causing more entries to be returned by _CPC.
748 */
749 if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
750 (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
751 (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
752 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
753 num_ent, pr->id);
754 goto out_free;
755 }
756 if (cpc_rev > CPPC_V3_REV) {
757 num_ent = CPPC_V3_NUM_ENT;
758 cpc_rev = CPPC_V3_REV;
759 }
760
761 cpc_ptr->num_entries = num_ent;
762 cpc_ptr->version = cpc_rev;
763
764 /* Iterate through remaining entries in _CPC */
765 for (i = 2; i < num_ent; i++) {
766 cpc_obj = &out_obj->package.elements[i];
767
768 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
769 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
770 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
771 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
772 gas_t = (struct cpc_reg *)
773 cpc_obj->buffer.pointer;
774
775 /*
776 * The PCC Subspace index is encoded inside
777 * the CPC table entries. The same PCC index
778 * will be used for all the PCC entries,
779 * so extract it only once.
780 */
781 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
782 if (pcc_subspace_id < 0) {
783 pcc_subspace_id = gas_t->access_width;
784 if (pcc_data_alloc(pcc_subspace_id))
785 goto out_free;
786 } else if (pcc_subspace_id != gas_t->access_width) {
787 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
788 pr->id);
789 goto out_free;
790 }
791 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
792 if (gas_t->address) {
793 void __iomem *addr;
794 size_t access_width;
795
796 if (!osc_cpc_flexible_adr_space_confirmed) {
797 pr_debug("Flexible address space capability not supported\n");
798 if (!cpc_supported_by_cpu())
799 goto out_free;
800 }
801
802 access_width = GET_BIT_WIDTH(gas_t) / 8;
803 addr = ioremap(gas_t->address, access_width);
804 if (!addr)
805 goto out_free;
806 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
807 }
808 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
809 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
810 /*
811 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
812 * SystemIO doesn't implement 64-bit
813 * registers.
814 */
815 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
816 gas_t->access_width);
817 goto out_free;
818 }
819 if (gas_t->address & OVER_16BTS_MASK) {
820 /* SystemIO registers use 16-bit integer addresses */
821 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
822 gas_t->address);
823 goto out_free;
824 }
825 if (!osc_cpc_flexible_adr_space_confirmed) {
826 pr_debug("Flexible address space capability not supported\n");
827 if (!cpc_supported_by_cpu())
828 goto out_free;
829 }
830 } else {
831 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
832 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
833 pr_debug("Unsupported register type (%d) in _CPC\n",
834 gas_t->space_id);
835 goto out_free;
836 }
837 }
838
839 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
840 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
841 } else {
842 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
843 i, pr->id);
844 goto out_free;
845 }
846 }
847 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
848
849 /*
850 * Initialize the remaining cpc_regs as unsupported.
851 * Example: In case FW exposes CPPC v2, the below loop will initialize
852 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
853 */
854 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
855 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
856 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
857 }
858
859
860 /* Store CPU Logical ID */
861 cpc_ptr->cpu_id = pr->id;
862
863 /* Parse PSD data for this CPU */
864 ret = acpi_get_psd(cpc_ptr, handle);
865 if (ret)
866 goto out_free;
867
868 /* Register PCC channel once for all PCC subspace ID. */
869 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
870 ret = register_pcc_channel(pcc_subspace_id);
871 if (ret)
872 goto out_free;
873
874 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
875 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
876 }
877
878 /* Everything looks okay */
879 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
880
881 /* Add per logical CPU nodes for reading its feedback counters. */
882 cpu_dev = get_cpu_device(pr->id);
883 if (!cpu_dev) {
884 ret = -EINVAL;
885 goto out_free;
886 }
887
888 /* Plug PSD data into this CPU's CPC descriptor. */
889 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
890
891 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
892 "acpi_cppc");
893 if (ret) {
894 per_cpu(cpc_desc_ptr, pr->id) = NULL;
895 kobject_put(&cpc_ptr->kobj);
896 goto out_free;
897 }
898
899 arch_init_invariance_cppc();
900
901 kfree(output.pointer);
902 return 0;
903
904out_free:
905 /* Free all the mapped sys mem areas for this CPU */
906 for (i = 2; i < cpc_ptr->num_entries; i++) {
907 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
908
909 if (addr)
910 iounmap(addr);
911 }
912 kfree(cpc_ptr);
913
914out_buf_free:
915 kfree(output.pointer);
916 return ret;
917}
918EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
919
920/**
921 * acpi_cppc_processor_exit - Cleanup CPC structs.
922 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
923 *
924 * Return: Void
925 */
926void acpi_cppc_processor_exit(struct acpi_processor *pr)
927{
928 struct cpc_desc *cpc_ptr;
929 unsigned int i;
930 void __iomem *addr;
931 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
932
933 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
934 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
935 pcc_data[pcc_ss_id]->refcount--;
936 if (!pcc_data[pcc_ss_id]->refcount) {
937 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
938 kfree(pcc_data[pcc_ss_id]);
939 pcc_data[pcc_ss_id] = NULL;
940 }
941 }
942 }
943
944 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
945 if (!cpc_ptr)
946 return;
947
948 /* Free all the mapped sys mem areas for this CPU */
949 for (i = 2; i < cpc_ptr->num_entries; i++) {
950 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
951 if (addr)
952 iounmap(addr);
953 }
954
955 kobject_put(&cpc_ptr->kobj);
956 kfree(cpc_ptr);
957}
958EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
959
960/**
961 * cpc_read_ffh() - Read FFH register
962 * @cpunum: CPU number to read
963 * @reg: cppc register information
964 * @val: place holder for return value
965 *
966 * Read bit_width bits from a specified address and bit_offset
967 *
968 * Return: 0 for success and error code
969 */
970int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
971{
972 return -ENOTSUPP;
973}
974
975/**
976 * cpc_write_ffh() - Write FFH register
977 * @cpunum: CPU number to write
978 * @reg: cppc register information
979 * @val: value to write
980 *
981 * Write value of bit_width bits to a specified address and bit_offset
982 *
983 * Return: 0 for success and error code
984 */
985int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
986{
987 return -ENOTSUPP;
988}
989
990/*
991 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
992 * as fast as possible. We have already mapped the PCC subspace during init, so
993 * we can directly write to it.
994 */
995
996static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
997{
998 void __iomem *vaddr = NULL;
999 int size;
1000 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1001 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1002
1003 if (reg_res->type == ACPI_TYPE_INTEGER) {
1004 *val = reg_res->cpc_entry.int_value;
1005 return 0;
1006 }
1007
1008 *val = 0;
1009 size = GET_BIT_WIDTH(reg);
1010
1011 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1012 u32 val_u32;
1013 acpi_status status;
1014
1015 status = acpi_os_read_port((acpi_io_address)reg->address,
1016 &val_u32, size);
1017 if (ACPI_FAILURE(status)) {
1018 pr_debug("Error: Failed to read SystemIO port %llx\n",
1019 reg->address);
1020 return -EFAULT;
1021 }
1022
1023 *val = val_u32;
1024 return 0;
1025 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1026 /*
1027 * For registers in PCC space, the register size is determined
1028 * by the bit width field; the access size is used to indicate
1029 * the PCC subspace id.
1030 */
1031 size = reg->bit_width;
1032 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1033 }
1034 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1035 vaddr = reg_res->sys_mem_vaddr;
1036 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1037 return cpc_read_ffh(cpu, reg, val);
1038 else
1039 return acpi_os_read_memory((acpi_physical_address)reg->address,
1040 val, size);
1041
1042 switch (size) {
1043 case 8:
1044 *val = readb_relaxed(vaddr);
1045 break;
1046 case 16:
1047 *val = readw_relaxed(vaddr);
1048 break;
1049 case 32:
1050 *val = readl_relaxed(vaddr);
1051 break;
1052 case 64:
1053 *val = readq_relaxed(vaddr);
1054 break;
1055 default:
1056 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1057 pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
1058 size, reg->address);
1059 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1060 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1061 size, pcc_ss_id);
1062 }
1063 return -EFAULT;
1064 }
1065
1066 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1067 *val = MASK_VAL(reg, *val);
1068
1069 return 0;
1070}
1071
1072static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1073{
1074 int ret_val = 0;
1075 int size;
1076 void __iomem *vaddr = NULL;
1077 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1078 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1079
1080 size = GET_BIT_WIDTH(reg);
1081
1082 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1083 acpi_status status;
1084
1085 status = acpi_os_write_port((acpi_io_address)reg->address,
1086 (u32)val, size);
1087 if (ACPI_FAILURE(status)) {
1088 pr_debug("Error: Failed to write SystemIO port %llx\n",
1089 reg->address);
1090 return -EFAULT;
1091 }
1092
1093 return 0;
1094 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
1095 /*
1096 * For registers in PCC space, the register size is determined
1097 * by the bit width field; the access size is used to indicate
1098 * the PCC subspace id.
1099 */
1100 size = reg->bit_width;
1101 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1102 }
1103 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1104 vaddr = reg_res->sys_mem_vaddr;
1105 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1106 return cpc_write_ffh(cpu, reg, val);
1107 else
1108 return acpi_os_write_memory((acpi_physical_address)reg->address,
1109 val, size);
1110
1111 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1112 val = MASK_VAL(reg, val);
1113
1114 switch (size) {
1115 case 8:
1116 writeb_relaxed(val, vaddr);
1117 break;
1118 case 16:
1119 writew_relaxed(val, vaddr);
1120 break;
1121 case 32:
1122 writel_relaxed(val, vaddr);
1123 break;
1124 case 64:
1125 writeq_relaxed(val, vaddr);
1126 break;
1127 default:
1128 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1129 pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
1130 size, reg->address);
1131 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
1132 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1133 size, pcc_ss_id);
1134 }
1135 ret_val = -EFAULT;
1136 break;
1137 }
1138
1139 return ret_val;
1140}
1141
1142static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1143{
1144 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1145 struct cpc_register_resource *reg;
1146
1147 if (!cpc_desc) {
1148 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1149 return -ENODEV;
1150 }
1151
1152 reg = &cpc_desc->cpc_regs[reg_idx];
1153
1154 if (CPC_IN_PCC(reg)) {
1155 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1156 struct cppc_pcc_data *pcc_ss_data = NULL;
1157 int ret = 0;
1158
1159 if (pcc_ss_id < 0)
1160 return -EIO;
1161
1162 pcc_ss_data = pcc_data[pcc_ss_id];
1163
1164 down_write(&pcc_ss_data->pcc_lock);
1165
1166 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1167 cpc_read(cpunum, reg, perf);
1168 else
1169 ret = -EIO;
1170
1171 up_write(&pcc_ss_data->pcc_lock);
1172
1173 return ret;
1174 }
1175
1176 cpc_read(cpunum, reg, perf);
1177
1178 return 0;
1179}
1180
1181/**
1182 * cppc_get_desired_perf - Get the desired performance register value.
1183 * @cpunum: CPU from which to get desired performance.
1184 * @desired_perf: Return address.
1185 *
1186 * Return: 0 for success, -EIO otherwise.
1187 */
1188int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1189{
1190 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1191}
1192EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1193
1194/**
1195 * cppc_get_nominal_perf - Get the nominal performance register value.
1196 * @cpunum: CPU from which to get nominal performance.
1197 * @nominal_perf: Return address.
1198 *
1199 * Return: 0 for success, -EIO otherwise.
1200 */
1201int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1202{
1203 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1204}
1205
1206/**
1207 * cppc_get_highest_perf - Get the highest performance register value.
1208 * @cpunum: CPU from which to get highest performance.
1209 * @highest_perf: Return address.
1210 *
1211 * Return: 0 for success, -EIO otherwise.
1212 */
1213int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
1214{
1215 return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
1216}
1217EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
1218
1219/**
1220 * cppc_get_epp_perf - Get the epp register value.
1221 * @cpunum: CPU from which to get epp preference value.
1222 * @epp_perf: Return address.
1223 *
1224 * Return: 0 for success, -EIO otherwise.
1225 */
1226int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1227{
1228 return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
1229}
1230EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1231
1232/**
1233 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1234 * @cpunum: CPU from which to get capabilities info.
1235 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1236 *
1237 * Return: 0 for success with perf_caps populated else -ERRNO.
1238 */
1239int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1240{
1241 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1242 struct cpc_register_resource *highest_reg, *lowest_reg,
1243 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1244 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1245 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1246 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1247 struct cppc_pcc_data *pcc_ss_data = NULL;
1248 int ret = 0, regs_in_pcc = 0;
1249
1250 if (!cpc_desc) {
1251 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1252 return -ENODEV;
1253 }
1254
1255 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1256 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1257 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1258 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1259 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1260 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1261 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1262
1263 /* Are any of the regs PCC ?*/
1264 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1265 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1266 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1267 if (pcc_ss_id < 0) {
1268 pr_debug("Invalid pcc_ss_id\n");
1269 return -ENODEV;
1270 }
1271 pcc_ss_data = pcc_data[pcc_ss_id];
1272 regs_in_pcc = 1;
1273 down_write(&pcc_ss_data->pcc_lock);
1274 /* Ring doorbell once to update PCC subspace */
1275 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1276 ret = -EIO;
1277 goto out_err;
1278 }
1279 }
1280
1281 cpc_read(cpunum, highest_reg, &high);
1282 perf_caps->highest_perf = high;
1283
1284 cpc_read(cpunum, lowest_reg, &low);
1285 perf_caps->lowest_perf = low;
1286
1287 cpc_read(cpunum, nominal_reg, &nom);
1288 perf_caps->nominal_perf = nom;
1289
1290 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1291 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1292 perf_caps->guaranteed_perf = 0;
1293 } else {
1294 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1295 perf_caps->guaranteed_perf = guaranteed;
1296 }
1297
1298 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1299 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1300
1301 if (!high || !low || !nom || !min_nonlinear)
1302 ret = -EFAULT;
1303
1304 /* Read optional lowest and nominal frequencies if present */
1305 if (CPC_SUPPORTED(low_freq_reg))
1306 cpc_read(cpunum, low_freq_reg, &low_f);
1307
1308 if (CPC_SUPPORTED(nom_freq_reg))
1309 cpc_read(cpunum, nom_freq_reg, &nom_f);
1310
1311 perf_caps->lowest_freq = low_f;
1312 perf_caps->nominal_freq = nom_f;
1313
1314
1315out_err:
1316 if (regs_in_pcc)
1317 up_write(&pcc_ss_data->pcc_lock);
1318 return ret;
1319}
1320EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1321
1322/**
1323 * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1324 *
1325 * CPPC has flexibility about how CPU performance counters are accessed.
1326 * One of the choices is PCC regions, which can have a high access latency. This
1327 * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1328 *
1329 * Return: true if any of the counters are in PCC regions, false otherwise
1330 */
1331bool cppc_perf_ctrs_in_pcc(void)
1332{
1333 int cpu;
1334
1335 for_each_present_cpu(cpu) {
1336 struct cpc_register_resource *ref_perf_reg;
1337 struct cpc_desc *cpc_desc;
1338
1339 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1340
1341 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1342 CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1343 CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1344 return true;
1345
1346
1347 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1348
1349 /*
1350 * If reference perf register is not supported then we should
1351 * use the nominal perf value
1352 */
1353 if (!CPC_SUPPORTED(ref_perf_reg))
1354 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1355
1356 if (CPC_IN_PCC(ref_perf_reg))
1357 return true;
1358 }
1359
1360 return false;
1361}
1362EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1363
1364/**
1365 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1366 * @cpunum: CPU from which to read counters.
1367 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1368 *
1369 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1370 */
1371int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1372{
1373 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1374 struct cpc_register_resource *delivered_reg, *reference_reg,
1375 *ref_perf_reg, *ctr_wrap_reg;
1376 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1377 struct cppc_pcc_data *pcc_ss_data = NULL;
1378 u64 delivered, reference, ref_perf, ctr_wrap_time;
1379 int ret = 0, regs_in_pcc = 0;
1380
1381 if (!cpc_desc) {
1382 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1383 return -ENODEV;
1384 }
1385
1386 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1387 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1388 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1389 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1390
1391 /*
1392 * If reference perf register is not supported then we should
1393 * use the nominal perf value
1394 */
1395 if (!CPC_SUPPORTED(ref_perf_reg))
1396 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1397
1398 /* Are any of the regs PCC ?*/
1399 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1400 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1401 if (pcc_ss_id < 0) {
1402 pr_debug("Invalid pcc_ss_id\n");
1403 return -ENODEV;
1404 }
1405 pcc_ss_data = pcc_data[pcc_ss_id];
1406 down_write(&pcc_ss_data->pcc_lock);
1407 regs_in_pcc = 1;
1408 /* Ring doorbell once to update PCC subspace */
1409 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1410 ret = -EIO;
1411 goto out_err;
1412 }
1413 }
1414
1415 cpc_read(cpunum, delivered_reg, &delivered);
1416 cpc_read(cpunum, reference_reg, &reference);
1417 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1418
1419 /*
1420 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1421 * performance counters are assumed to never wrap during the lifetime of
1422 * platform
1423 */
1424 ctr_wrap_time = (u64)(~((u64)0));
1425 if (CPC_SUPPORTED(ctr_wrap_reg))
1426 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1427
1428 if (!delivered || !reference || !ref_perf) {
1429 ret = -EFAULT;
1430 goto out_err;
1431 }
1432
1433 perf_fb_ctrs->delivered = delivered;
1434 perf_fb_ctrs->reference = reference;
1435 perf_fb_ctrs->reference_perf = ref_perf;
1436 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1437out_err:
1438 if (regs_in_pcc)
1439 up_write(&pcc_ss_data->pcc_lock);
1440 return ret;
1441}
1442EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1443
1444/*
1445 * Set Energy Performance Preference Register value through
1446 * Performance Controls Interface
1447 */
1448int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1449{
1450 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1451 struct cpc_register_resource *epp_set_reg;
1452 struct cpc_register_resource *auto_sel_reg;
1453 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1454 struct cppc_pcc_data *pcc_ss_data = NULL;
1455 int ret;
1456
1457 if (!cpc_desc) {
1458 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1459 return -ENODEV;
1460 }
1461
1462 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1463 epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1464
1465 if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1466 if (pcc_ss_id < 0) {
1467 pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1468 return -ENODEV;
1469 }
1470
1471 if (CPC_SUPPORTED(auto_sel_reg)) {
1472 ret = cpc_write(cpu, auto_sel_reg, enable);
1473 if (ret)
1474 return ret;
1475 }
1476
1477 if (CPC_SUPPORTED(epp_set_reg)) {
1478 ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1479 if (ret)
1480 return ret;
1481 }
1482
1483 pcc_ss_data = pcc_data[pcc_ss_id];
1484
1485 down_write(&pcc_ss_data->pcc_lock);
1486 /* after writing CPC, transfer the ownership of PCC to platform */
1487 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1488 up_write(&pcc_ss_data->pcc_lock);
1489 } else {
1490 ret = -ENOTSUPP;
1491 pr_debug("_CPC in PCC is not supported\n");
1492 }
1493
1494 return ret;
1495}
1496EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1497
1498/**
1499 * cppc_get_auto_sel_caps - Read autonomous selection register.
1500 * @cpunum : CPU from which to read register.
1501 * @perf_caps : struct where autonomous selection register value is updated.
1502 */
1503int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1504{
1505 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1506 struct cpc_register_resource *auto_sel_reg;
1507 u64 auto_sel;
1508
1509 if (!cpc_desc) {
1510 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1511 return -ENODEV;
1512 }
1513
1514 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1515
1516 if (!CPC_SUPPORTED(auto_sel_reg))
1517 pr_warn_once("Autonomous mode is not unsupported!\n");
1518
1519 if (CPC_IN_PCC(auto_sel_reg)) {
1520 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1521 struct cppc_pcc_data *pcc_ss_data = NULL;
1522 int ret = 0;
1523
1524 if (pcc_ss_id < 0)
1525 return -ENODEV;
1526
1527 pcc_ss_data = pcc_data[pcc_ss_id];
1528
1529 down_write(&pcc_ss_data->pcc_lock);
1530
1531 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
1532 cpc_read(cpunum, auto_sel_reg, &auto_sel);
1533 perf_caps->auto_sel = (bool)auto_sel;
1534 } else {
1535 ret = -EIO;
1536 }
1537
1538 up_write(&pcc_ss_data->pcc_lock);
1539
1540 return ret;
1541 }
1542
1543 return 0;
1544}
1545EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
1546
1547/**
1548 * cppc_set_auto_sel - Write autonomous selection register.
1549 * @cpu : CPU to which to write register.
1550 * @enable : the desired value of autonomous selection resiter to be updated.
1551 */
1552int cppc_set_auto_sel(int cpu, bool enable)
1553{
1554 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1555 struct cpc_register_resource *auto_sel_reg;
1556 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1557 struct cppc_pcc_data *pcc_ss_data = NULL;
1558 int ret = -EINVAL;
1559
1560 if (!cpc_desc) {
1561 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1562 return -ENODEV;
1563 }
1564
1565 auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1566
1567 if (CPC_IN_PCC(auto_sel_reg)) {
1568 if (pcc_ss_id < 0) {
1569 pr_debug("Invalid pcc_ss_id\n");
1570 return -ENODEV;
1571 }
1572
1573 if (CPC_SUPPORTED(auto_sel_reg)) {
1574 ret = cpc_write(cpu, auto_sel_reg, enable);
1575 if (ret)
1576 return ret;
1577 }
1578
1579 pcc_ss_data = pcc_data[pcc_ss_id];
1580
1581 down_write(&pcc_ss_data->pcc_lock);
1582 /* after writing CPC, transfer the ownership of PCC to platform */
1583 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1584 up_write(&pcc_ss_data->pcc_lock);
1585 } else {
1586 ret = -ENOTSUPP;
1587 pr_debug("_CPC in PCC is not supported\n");
1588 }
1589
1590 return ret;
1591}
1592EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1593
1594/**
1595 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1596 * Continuous Performance Control package EnableRegister field.
1597 * @cpu: CPU for which to enable CPPC register.
1598 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1599 *
1600 * Return: 0 for success, -ERRNO or -EIO otherwise.
1601 */
1602int cppc_set_enable(int cpu, bool enable)
1603{
1604 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1605 struct cpc_register_resource *enable_reg;
1606 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1607 struct cppc_pcc_data *pcc_ss_data = NULL;
1608 int ret = -EINVAL;
1609
1610 if (!cpc_desc) {
1611 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1612 return -EINVAL;
1613 }
1614
1615 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1616
1617 if (CPC_IN_PCC(enable_reg)) {
1618
1619 if (pcc_ss_id < 0)
1620 return -EIO;
1621
1622 ret = cpc_write(cpu, enable_reg, enable);
1623 if (ret)
1624 return ret;
1625
1626 pcc_ss_data = pcc_data[pcc_ss_id];
1627
1628 down_write(&pcc_ss_data->pcc_lock);
1629 /* after writing CPC, transfer the ownership of PCC to platfrom */
1630 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1631 up_write(&pcc_ss_data->pcc_lock);
1632 return ret;
1633 }
1634
1635 return cpc_write(cpu, enable_reg, enable);
1636}
1637EXPORT_SYMBOL_GPL(cppc_set_enable);
1638
1639/**
1640 * cppc_set_perf - Set a CPU's performance controls.
1641 * @cpu: CPU for which to set performance controls.
1642 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1643 *
1644 * Return: 0 for success, -ERRNO otherwise.
1645 */
1646int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1647{
1648 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1649 struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1650 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1651 struct cppc_pcc_data *pcc_ss_data = NULL;
1652 int ret = 0;
1653
1654 if (!cpc_desc) {
1655 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1656 return -ENODEV;
1657 }
1658
1659 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1660 min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1661 max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1662
1663 /*
1664 * This is Phase-I where we want to write to CPC registers
1665 * -> We want all CPUs to be able to execute this phase in parallel
1666 *
1667 * Since read_lock can be acquired by multiple CPUs simultaneously we
1668 * achieve that goal here
1669 */
1670 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1671 if (pcc_ss_id < 0) {
1672 pr_debug("Invalid pcc_ss_id\n");
1673 return -ENODEV;
1674 }
1675 pcc_ss_data = pcc_data[pcc_ss_id];
1676 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1677 if (pcc_ss_data->platform_owns_pcc) {
1678 ret = check_pcc_chan(pcc_ss_id, false);
1679 if (ret) {
1680 up_read(&pcc_ss_data->pcc_lock);
1681 return ret;
1682 }
1683 }
1684 /*
1685 * Update the pending_write to make sure a PCC CMD_READ will not
1686 * arrive and steal the channel during the switch to write lock
1687 */
1688 pcc_ss_data->pending_pcc_write_cmd = true;
1689 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1690 cpc_desc->write_cmd_status = 0;
1691 }
1692
1693 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1694
1695 /*
1696 * Only write if min_perf and max_perf not zero. Some drivers pass zero
1697 * value to min and max perf, but they don't mean to set the zero value,
1698 * they just don't want to write to those registers.
1699 */
1700 if (perf_ctrls->min_perf)
1701 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1702 if (perf_ctrls->max_perf)
1703 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1704
1705 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1706 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1707 /*
1708 * This is Phase-II where we transfer the ownership of PCC to Platform
1709 *
1710 * Short Summary: Basically if we think of a group of cppc_set_perf
1711 * requests that happened in short overlapping interval. The last CPU to
1712 * come out of Phase-I will enter Phase-II and ring the doorbell.
1713 *
1714 * We have the following requirements for Phase-II:
1715 * 1. We want to execute Phase-II only when there are no CPUs
1716 * currently executing in Phase-I
1717 * 2. Once we start Phase-II we want to avoid all other CPUs from
1718 * entering Phase-I.
1719 * 3. We want only one CPU among all those who went through Phase-I
1720 * to run phase-II
1721 *
1722 * If write_trylock fails to get the lock and doesn't transfer the
1723 * PCC ownership to the platform, then one of the following will be TRUE
1724 * 1. There is at-least one CPU in Phase-I which will later execute
1725 * write_trylock, so the CPUs in Phase-I will be responsible for
1726 * executing the Phase-II.
1727 * 2. Some other CPU has beaten this CPU to successfully execute the
1728 * write_trylock and has already acquired the write_lock. We know for a
1729 * fact it (other CPU acquiring the write_lock) couldn't have happened
1730 * before this CPU's Phase-I as we held the read_lock.
1731 * 3. Some other CPU executing pcc CMD_READ has stolen the
1732 * down_write, in which case, send_pcc_cmd will check for pending
1733 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1734 * So this CPU can be certain that its request will be delivered
1735 * So in all cases, this CPU knows that its request will be delivered
1736 * by another CPU and can return
1737 *
1738 * After getting the down_write we still need to check for
1739 * pending_pcc_write_cmd to take care of the following scenario
1740 * The thread running this code could be scheduled out between
1741 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1742 * could have delivered the request to Platform by triggering the
1743 * doorbell and transferred the ownership of PCC to platform. So this
1744 * avoids triggering an unnecessary doorbell and more importantly before
1745 * triggering the doorbell it makes sure that the PCC channel ownership
1746 * is still with OSPM.
1747 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1748 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1749 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1750 * case during a CMD_READ and if there are pending writes it delivers
1751 * the write command before servicing the read command
1752 */
1753 if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1754 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1755 /* Update only if there are pending write commands */
1756 if (pcc_ss_data->pending_pcc_write_cmd)
1757 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1758 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1759 } else
1760 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1761 wait_event(pcc_ss_data->pcc_write_wait_q,
1762 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1763
1764 /* send_pcc_cmd updates the status in case of failure */
1765 ret = cpc_desc->write_cmd_status;
1766 }
1767 return ret;
1768}
1769EXPORT_SYMBOL_GPL(cppc_set_perf);
1770
1771/**
1772 * cppc_get_transition_latency - returns frequency transition latency in ns
1773 * @cpu_num: CPU number for per_cpu().
1774 *
1775 * ACPI CPPC does not explicitly specify how a platform can specify the
1776 * transition latency for performance change requests. The closest we have
1777 * is the timing information from the PCCT tables which provides the info
1778 * on the number and frequency of PCC commands the platform can handle.
1779 *
1780 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1781 * then assume there is no latency.
1782 */
1783unsigned int cppc_get_transition_latency(int cpu_num)
1784{
1785 /*
1786 * Expected transition latency is based on the PCCT timing values
1787 * Below are definition from ACPI spec:
1788 * pcc_nominal- Expected latency to process a command, in microseconds
1789 * pcc_mpar - The maximum number of periodic requests that the subspace
1790 * channel can support, reported in commands per minute. 0
1791 * indicates no limitation.
1792 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1793 * completion of a command before issuing the next command,
1794 * in microseconds.
1795 */
1796 unsigned int latency_ns = 0;
1797 struct cpc_desc *cpc_desc;
1798 struct cpc_register_resource *desired_reg;
1799 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1800 struct cppc_pcc_data *pcc_ss_data;
1801
1802 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1803 if (!cpc_desc)
1804 return CPUFREQ_ETERNAL;
1805
1806 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1807 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1808 return 0;
1809 else if (!CPC_IN_PCC(desired_reg))
1810 return CPUFREQ_ETERNAL;
1811
1812 if (pcc_ss_id < 0)
1813 return CPUFREQ_ETERNAL;
1814
1815 pcc_ss_data = pcc_data[pcc_ss_id];
1816 if (pcc_ss_data->pcc_mpar)
1817 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1818
1819 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1820 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1821
1822 return latency_ns;
1823}
1824EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1825
1826/* Minimum struct length needed for the DMI processor entry we want */
1827#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
1828
1829/* Offset in the DMI processor structure for the max frequency */
1830#define DMI_PROCESSOR_MAX_SPEED 0x14
1831
1832/* Callback function used to retrieve the max frequency from DMI */
1833static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1834{
1835 const u8 *dmi_data = (const u8 *)dm;
1836 u16 *mhz = (u16 *)private;
1837
1838 if (dm->type == DMI_ENTRY_PROCESSOR &&
1839 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1840 u16 val = (u16)get_unaligned((const u16 *)
1841 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1842 *mhz = umax(val, *mhz);
1843 }
1844}
1845
1846/* Look up the max frequency in DMI */
1847static u64 cppc_get_dmi_max_khz(void)
1848{
1849 u16 mhz = 0;
1850
1851 dmi_walk(cppc_find_dmi_mhz, &mhz);
1852
1853 /*
1854 * Real stupid fallback value, just in case there is no
1855 * actual value set.
1856 */
1857 mhz = mhz ? mhz : 1;
1858
1859 return KHZ_PER_MHZ * mhz;
1860}
1861
1862/*
1863 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1864 * use them to convert perf to freq and vice versa. The conversion is
1865 * extrapolated as an affine function passing by the 2 points:
1866 * - (Low perf, Low freq)
1867 * - (Nominal perf, Nominal freq)
1868 */
1869unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1870{
1871 s64 retval, offset = 0;
1872 static u64 max_khz;
1873 u64 mul, div;
1874
1875 if (caps->lowest_freq && caps->nominal_freq) {
1876 mul = caps->nominal_freq - caps->lowest_freq;
1877 mul *= KHZ_PER_MHZ;
1878 div = caps->nominal_perf - caps->lowest_perf;
1879 offset = caps->nominal_freq * KHZ_PER_MHZ -
1880 div64_u64(caps->nominal_perf * mul, div);
1881 } else {
1882 if (!max_khz)
1883 max_khz = cppc_get_dmi_max_khz();
1884 mul = max_khz;
1885 div = caps->highest_perf;
1886 }
1887
1888 retval = offset + div64_u64(perf * mul, div);
1889 if (retval >= 0)
1890 return retval;
1891 return 0;
1892}
1893EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
1894
1895unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
1896{
1897 s64 retval, offset = 0;
1898 static u64 max_khz;
1899 u64 mul, div;
1900
1901 if (caps->lowest_freq && caps->nominal_freq) {
1902 mul = caps->nominal_perf - caps->lowest_perf;
1903 div = caps->nominal_freq - caps->lowest_freq;
1904 /*
1905 * We don't need to convert to kHz for computing offset and can
1906 * directly use nominal_freq and lowest_freq as the div64_u64
1907 * will remove the frequency unit.
1908 */
1909 offset = caps->nominal_perf -
1910 div64_u64(caps->nominal_freq * mul, div);
1911 /* But we need it for computing the perf level. */
1912 div *= KHZ_PER_MHZ;
1913 } else {
1914 if (!max_khz)
1915 max_khz = cppc_get_dmi_max_khz();
1916 mul = caps->highest_perf;
1917 div = max_khz;
1918 }
1919
1920 retval = offset + div64_u64(freq * mul, div);
1921 if (retval >= 0)
1922 return retval;
1923 return 0;
1924}
1925EXPORT_SYMBOL_GPL(cppc_khz_to_perf);