Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 *
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 *
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
13 *
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
16 * operation involves:
17 *
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
19 *
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
22 *
23 * - Platform conveys its decision back to OS
24 *
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
29 *
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
32 */
33
34#define pr_fmt(fmt) "ACPI CPPC: " fmt
35
36#include <linux/delay.h>
37#include <linux/iopoll.h>
38#include <linux/ktime.h>
39#include <linux/rwsem.h>
40#include <linux/wait.h>
41#include <linux/topology.h>
42
43#include <acpi/cppc_acpi.h>
44
45struct cppc_pcc_data {
46 struct pcc_mbox_chan *pcc_channel;
47 void __iomem *pcc_comm_addr;
48 bool pcc_channel_acquired;
49 unsigned int deadline_us;
50 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
51
52 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
53 bool platform_owns_pcc; /* Ownership of PCC subspace */
54 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
55
56 /*
57 * Lock to provide controlled access to the PCC channel.
58 *
59 * For performance critical usecases(currently cppc_set_perf)
60 * We need to take read_lock and check if channel belongs to OSPM
61 * before reading or writing to PCC subspace
62 * We need to take write_lock before transferring the channel
63 * ownership to the platform via a Doorbell
64 * This allows us to batch a number of CPPC requests if they happen
65 * to originate in about the same time
66 *
67 * For non-performance critical usecases(init)
68 * Take write_lock for all purposes which gives exclusive access
69 */
70 struct rw_semaphore pcc_lock;
71
72 /* Wait queue for CPUs whose requests were batched */
73 wait_queue_head_t pcc_write_wait_q;
74 ktime_t last_cmd_cmpl_time;
75 ktime_t last_mpar_reset;
76 int mpar_count;
77 int refcount;
78};
79
80/* Array to represent the PCC channel per subspace ID */
81static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84
85/*
86 * The cpc_desc structure contains the ACPI register details
87 * as described in the per CPU _CPC tables. The details
88 * include the type of register (e.g. PCC, System IO, FFH etc.)
89 * and destination addresses which lets us READ/WRITE CPU performance
90 * information using the appropriate I/O methods.
91 */
92static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
93
94/* pcc mapped address + header size + offset within PCC subspace */
95#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
96 0x8 + (offs))
97
98/* Check if a CPC register is in PCC */
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
102
103/* Check if a CPC register is in SystemMemory */
104#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
105 (cpc)->cpc_entry.reg.space_id == \
106 ACPI_ADR_SPACE_SYSTEM_MEMORY)
107
108/* Check if a CPC register is in SystemIo */
109#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
110 (cpc)->cpc_entry.reg.space_id == \
111 ACPI_ADR_SPACE_SYSTEM_IO)
112
113/* Evaluates to True if reg is a NULL register descriptor */
114#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
115 (reg)->address == 0 && \
116 (reg)->bit_width == 0 && \
117 (reg)->bit_offset == 0 && \
118 (reg)->access_width == 0)
119
120/* Evaluates to True if an optional cpc field is supported */
121#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
122 !!(cpc)->cpc_entry.int_value : \
123 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
124/*
125 * Arbitrary Retries in case the remote processor is slow to respond
126 * to PCC commands. Keeping it high enough to cover emulators where
127 * the processors run painfully slow.
128 */
129#define NUM_RETRIES 500ULL
130
131#define OVER_16BTS_MASK ~0xFFFFULL
132
133#define define_one_cppc_ro(_name) \
134static struct kobj_attribute _name = \
135__ATTR(_name, 0444, show_##_name, NULL)
136
137#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
138
139#define show_cppc_data(access_fn, struct_name, member_name) \
140 static ssize_t show_##member_name(struct kobject *kobj, \
141 struct kobj_attribute *attr, char *buf) \
142 { \
143 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
144 struct struct_name st_name = {0}; \
145 int ret; \
146 \
147 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
148 if (ret) \
149 return ret; \
150 \
151 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
152 (u64)st_name.member_name); \
153 } \
154 define_one_cppc_ro(member_name)
155
156show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
157show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
158show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
159show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
160show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
161show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
162
163show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
164show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
165
166static ssize_t show_feedback_ctrs(struct kobject *kobj,
167 struct kobj_attribute *attr, char *buf)
168{
169 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
170 struct cppc_perf_fb_ctrs fb_ctrs = {0};
171 int ret;
172
173 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
174 if (ret)
175 return ret;
176
177 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
178 fb_ctrs.reference, fb_ctrs.delivered);
179}
180define_one_cppc_ro(feedback_ctrs);
181
182static struct attribute *cppc_attrs[] = {
183 &feedback_ctrs.attr,
184 &reference_perf.attr,
185 &wraparound_time.attr,
186 &highest_perf.attr,
187 &lowest_perf.attr,
188 &lowest_nonlinear_perf.attr,
189 &nominal_perf.attr,
190 &nominal_freq.attr,
191 &lowest_freq.attr,
192 NULL
193};
194ATTRIBUTE_GROUPS(cppc);
195
196static struct kobj_type cppc_ktype = {
197 .sysfs_ops = &kobj_sysfs_ops,
198 .default_groups = cppc_groups,
199};
200
201static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
202{
203 int ret, status;
204 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
205 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
206 pcc_ss_data->pcc_comm_addr;
207
208 if (!pcc_ss_data->platform_owns_pcc)
209 return 0;
210
211 /*
212 * Poll PCC status register every 3us(delay_us) for maximum of
213 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
214 */
215 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
216 status & PCC_CMD_COMPLETE_MASK, 3,
217 pcc_ss_data->deadline_us);
218
219 if (likely(!ret)) {
220 pcc_ss_data->platform_owns_pcc = false;
221 if (chk_err_bit && (status & PCC_ERROR_MASK))
222 ret = -EIO;
223 }
224
225 if (unlikely(ret))
226 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
227 pcc_ss_id, ret);
228
229 return ret;
230}
231
232/*
233 * This function transfers the ownership of the PCC to the platform
234 * So it must be called while holding write_lock(pcc_lock)
235 */
236static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
237{
238 int ret = -EIO, i;
239 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
240 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
241 pcc_ss_data->pcc_comm_addr;
242 unsigned int time_delta;
243
244 /*
245 * For CMD_WRITE we know for a fact the caller should have checked
246 * the channel before writing to PCC space
247 */
248 if (cmd == CMD_READ) {
249 /*
250 * If there are pending cpc_writes, then we stole the channel
251 * before write completion, so first send a WRITE command to
252 * platform
253 */
254 if (pcc_ss_data->pending_pcc_write_cmd)
255 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
256
257 ret = check_pcc_chan(pcc_ss_id, false);
258 if (ret)
259 goto end;
260 } else /* CMD_WRITE */
261 pcc_ss_data->pending_pcc_write_cmd = FALSE;
262
263 /*
264 * Handle the Minimum Request Turnaround Time(MRTT)
265 * "The minimum amount of time that OSPM must wait after the completion
266 * of a command before issuing the next command, in microseconds"
267 */
268 if (pcc_ss_data->pcc_mrtt) {
269 time_delta = ktime_us_delta(ktime_get(),
270 pcc_ss_data->last_cmd_cmpl_time);
271 if (pcc_ss_data->pcc_mrtt > time_delta)
272 udelay(pcc_ss_data->pcc_mrtt - time_delta);
273 }
274
275 /*
276 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
277 * "The maximum number of periodic requests that the subspace channel can
278 * support, reported in commands per minute. 0 indicates no limitation."
279 *
280 * This parameter should be ideally zero or large enough so that it can
281 * handle maximum number of requests that all the cores in the system can
282 * collectively generate. If it is not, we will follow the spec and just
283 * not send the request to the platform after hitting the MPAR limit in
284 * any 60s window
285 */
286 if (pcc_ss_data->pcc_mpar) {
287 if (pcc_ss_data->mpar_count == 0) {
288 time_delta = ktime_ms_delta(ktime_get(),
289 pcc_ss_data->last_mpar_reset);
290 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
291 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
292 pcc_ss_id);
293 ret = -EIO;
294 goto end;
295 }
296 pcc_ss_data->last_mpar_reset = ktime_get();
297 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
298 }
299 pcc_ss_data->mpar_count--;
300 }
301
302 /* Write to the shared comm region. */
303 writew_relaxed(cmd, &generic_comm_base->command);
304
305 /* Flip CMD COMPLETE bit */
306 writew_relaxed(0, &generic_comm_base->status);
307
308 pcc_ss_data->platform_owns_pcc = true;
309
310 /* Ring doorbell */
311 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
312 if (ret < 0) {
313 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
314 pcc_ss_id, cmd, ret);
315 goto end;
316 }
317
318 /* wait for completion and check for PCC error bit */
319 ret = check_pcc_chan(pcc_ss_id, true);
320
321 if (pcc_ss_data->pcc_mrtt)
322 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
323
324 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
325 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
326 else
327 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
328
329end:
330 if (cmd == CMD_WRITE) {
331 if (unlikely(ret)) {
332 for_each_possible_cpu(i) {
333 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
334
335 if (!desc)
336 continue;
337
338 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
339 desc->write_cmd_status = ret;
340 }
341 }
342 pcc_ss_data->pcc_write_cnt++;
343 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
344 }
345
346 return ret;
347}
348
349static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
350{
351 if (ret < 0)
352 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
353 *(u16 *)msg, ret);
354 else
355 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
356 *(u16 *)msg, ret);
357}
358
359static struct mbox_client cppc_mbox_cl = {
360 .tx_done = cppc_chan_tx_done,
361 .knows_txdone = true,
362};
363
364static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
365{
366 int result = -EFAULT;
367 acpi_status status = AE_OK;
368 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
369 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
370 struct acpi_buffer state = {0, NULL};
371 union acpi_object *psd = NULL;
372 struct acpi_psd_package *pdomain;
373
374 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
375 &buffer, ACPI_TYPE_PACKAGE);
376 if (status == AE_NOT_FOUND) /* _PSD is optional */
377 return 0;
378 if (ACPI_FAILURE(status))
379 return -ENODEV;
380
381 psd = buffer.pointer;
382 if (!psd || psd->package.count != 1) {
383 pr_debug("Invalid _PSD data\n");
384 goto end;
385 }
386
387 pdomain = &(cpc_ptr->domain_info);
388
389 state.length = sizeof(struct acpi_psd_package);
390 state.pointer = pdomain;
391
392 status = acpi_extract_package(&(psd->package.elements[0]),
393 &format, &state);
394 if (ACPI_FAILURE(status)) {
395 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
396 goto end;
397 }
398
399 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
400 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
401 goto end;
402 }
403
404 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
405 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
406 goto end;
407 }
408
409 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
410 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
411 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
412 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
413 goto end;
414 }
415
416 result = 0;
417end:
418 kfree(buffer.pointer);
419 return result;
420}
421
422bool acpi_cpc_valid(void)
423{
424 struct cpc_desc *cpc_ptr;
425 int cpu;
426
427 for_each_present_cpu(cpu) {
428 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
429 if (!cpc_ptr)
430 return false;
431 }
432
433 return true;
434}
435EXPORT_SYMBOL_GPL(acpi_cpc_valid);
436
437bool cppc_allow_fast_switch(void)
438{
439 struct cpc_register_resource *desired_reg;
440 struct cpc_desc *cpc_ptr;
441 int cpu;
442
443 for_each_possible_cpu(cpu) {
444 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
445 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
446 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
447 !CPC_IN_SYSTEM_IO(desired_reg))
448 return false;
449 }
450
451 return true;
452}
453EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
454
455/**
456 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
457 * @cpu: Find all CPUs that share a domain with cpu.
458 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
459 *
460 * Return: 0 for success or negative value for err.
461 */
462int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
463{
464 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
465 struct acpi_psd_package *match_pdomain;
466 struct acpi_psd_package *pdomain;
467 int count_target, i;
468
469 /*
470 * Now that we have _PSD data from all CPUs, let's setup P-state
471 * domain info.
472 */
473 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
474 if (!cpc_ptr)
475 return -EFAULT;
476
477 pdomain = &(cpc_ptr->domain_info);
478 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
479 if (pdomain->num_processors <= 1)
480 return 0;
481
482 /* Validate the Domain info */
483 count_target = pdomain->num_processors;
484 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
485 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
486 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
487 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
488 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
489 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
490
491 for_each_possible_cpu(i) {
492 if (i == cpu)
493 continue;
494
495 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
496 if (!match_cpc_ptr)
497 goto err_fault;
498
499 match_pdomain = &(match_cpc_ptr->domain_info);
500 if (match_pdomain->domain != pdomain->domain)
501 continue;
502
503 /* Here i and cpu are in the same domain */
504 if (match_pdomain->num_processors != count_target)
505 goto err_fault;
506
507 if (pdomain->coord_type != match_pdomain->coord_type)
508 goto err_fault;
509
510 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
511 }
512
513 return 0;
514
515err_fault:
516 /* Assume no coordination on any error parsing domain info */
517 cpumask_clear(cpu_data->shared_cpu_map);
518 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
519 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
520
521 return -EFAULT;
522}
523EXPORT_SYMBOL_GPL(acpi_get_psd_map);
524
525static int register_pcc_channel(int pcc_ss_idx)
526{
527 struct pcc_mbox_chan *pcc_chan;
528 u64 usecs_lat;
529
530 if (pcc_ss_idx >= 0) {
531 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
532
533 if (IS_ERR(pcc_chan)) {
534 pr_err("Failed to find PCC channel for subspace %d\n",
535 pcc_ss_idx);
536 return -ENODEV;
537 }
538
539 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
540 /*
541 * cppc_ss->latency is just a Nominal value. In reality
542 * the remote processor could be much slower to reply.
543 * So add an arbitrary amount of wait on top of Nominal.
544 */
545 usecs_lat = NUM_RETRIES * pcc_chan->latency;
546 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
547 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
548 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
549 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
550
551 pcc_data[pcc_ss_idx]->pcc_comm_addr =
552 acpi_os_ioremap(pcc_chan->shmem_base_addr,
553 pcc_chan->shmem_size);
554 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
555 pr_err("Failed to ioremap PCC comm region mem for %d\n",
556 pcc_ss_idx);
557 return -ENOMEM;
558 }
559
560 /* Set flag so that we don't come here for each CPU. */
561 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
562 }
563
564 return 0;
565}
566
567/**
568 * cpc_ffh_supported() - check if FFH reading supported
569 *
570 * Check if the architecture has support for functional fixed hardware
571 * read/write capability.
572 *
573 * Return: true for supported, false for not supported
574 */
575bool __weak cpc_ffh_supported(void)
576{
577 return false;
578}
579
580/**
581 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
582 *
583 * Check and allocate the cppc_pcc_data memory.
584 * In some processor configurations it is possible that same subspace
585 * is shared between multiple CPUs. This is seen especially in CPUs
586 * with hardware multi-threading support.
587 *
588 * Return: 0 for success, errno for failure
589 */
590static int pcc_data_alloc(int pcc_ss_id)
591{
592 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
593 return -EINVAL;
594
595 if (pcc_data[pcc_ss_id]) {
596 pcc_data[pcc_ss_id]->refcount++;
597 } else {
598 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
599 GFP_KERNEL);
600 if (!pcc_data[pcc_ss_id])
601 return -ENOMEM;
602 pcc_data[pcc_ss_id]->refcount++;
603 }
604
605 return 0;
606}
607
608/* Check if CPPC revision + num_ent combination is supported */
609static bool is_cppc_supported(int revision, int num_ent)
610{
611 int expected_num_ent;
612
613 switch (revision) {
614 case CPPC_V2_REV:
615 expected_num_ent = CPPC_V2_NUM_ENT;
616 break;
617 case CPPC_V3_REV:
618 expected_num_ent = CPPC_V3_NUM_ENT;
619 break;
620 default:
621 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
622 revision);
623 return false;
624 }
625
626 if (expected_num_ent != num_ent) {
627 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
628 num_ent, expected_num_ent, revision);
629 return false;
630 }
631
632 return true;
633}
634
635/*
636 * An example CPC table looks like the following.
637 *
638 * Name (_CPC, Package() {
639 * 17, // NumEntries
640 * 1, // Revision
641 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
642 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
643 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
644 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
645 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
646 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
647 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
648 * ...
649 * ...
650 * ...
651 * }
652 * Each Register() encodes how to access that specific register.
653 * e.g. a sample PCC entry has the following encoding:
654 *
655 * Register (
656 * PCC, // AddressSpaceKeyword
657 * 8, // RegisterBitWidth
658 * 8, // RegisterBitOffset
659 * 0x30, // RegisterAddress
660 * 9, // AccessSize (subspace ID)
661 * )
662 */
663
664#ifndef arch_init_invariance_cppc
665static inline void arch_init_invariance_cppc(void) { }
666#endif
667
668/**
669 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
670 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
671 *
672 * Return: 0 for success or negative value for err.
673 */
674int acpi_cppc_processor_probe(struct acpi_processor *pr)
675{
676 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
677 union acpi_object *out_obj, *cpc_obj;
678 struct cpc_desc *cpc_ptr;
679 struct cpc_reg *gas_t;
680 struct device *cpu_dev;
681 acpi_handle handle = pr->handle;
682 unsigned int num_ent, i, cpc_rev;
683 int pcc_subspace_id = -1;
684 acpi_status status;
685 int ret = -ENODATA;
686
687 if (osc_sb_cppc_not_supported)
688 return -ENODEV;
689
690 /* Parse the ACPI _CPC table for this CPU. */
691 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
692 ACPI_TYPE_PACKAGE);
693 if (ACPI_FAILURE(status)) {
694 ret = -ENODEV;
695 goto out_buf_free;
696 }
697
698 out_obj = (union acpi_object *) output.pointer;
699
700 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
701 if (!cpc_ptr) {
702 ret = -ENOMEM;
703 goto out_buf_free;
704 }
705
706 /* First entry is NumEntries. */
707 cpc_obj = &out_obj->package.elements[0];
708 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
709 num_ent = cpc_obj->integer.value;
710 if (num_ent <= 1) {
711 pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
712 num_ent, pr->id);
713 goto out_free;
714 }
715 } else {
716 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
717 cpc_obj->type, pr->id);
718 goto out_free;
719 }
720 cpc_ptr->num_entries = num_ent;
721
722 /* Second entry should be revision. */
723 cpc_obj = &out_obj->package.elements[1];
724 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
725 cpc_rev = cpc_obj->integer.value;
726 } else {
727 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
728 cpc_obj->type, pr->id);
729 goto out_free;
730 }
731 cpc_ptr->version = cpc_rev;
732
733 if (!is_cppc_supported(cpc_rev, num_ent))
734 goto out_free;
735
736 /* Iterate through remaining entries in _CPC */
737 for (i = 2; i < num_ent; i++) {
738 cpc_obj = &out_obj->package.elements[i];
739
740 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
741 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
742 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
743 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
744 gas_t = (struct cpc_reg *)
745 cpc_obj->buffer.pointer;
746
747 /*
748 * The PCC Subspace index is encoded inside
749 * the CPC table entries. The same PCC index
750 * will be used for all the PCC entries,
751 * so extract it only once.
752 */
753 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
754 if (pcc_subspace_id < 0) {
755 pcc_subspace_id = gas_t->access_width;
756 if (pcc_data_alloc(pcc_subspace_id))
757 goto out_free;
758 } else if (pcc_subspace_id != gas_t->access_width) {
759 pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
760 pr->id);
761 goto out_free;
762 }
763 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
764 if (gas_t->address) {
765 void __iomem *addr;
766
767 if (!osc_cpc_flexible_adr_space_confirmed) {
768 pr_debug("Flexible address space capability not supported\n");
769 goto out_free;
770 }
771
772 addr = ioremap(gas_t->address, gas_t->bit_width/8);
773 if (!addr)
774 goto out_free;
775 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
776 }
777 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
778 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
779 /*
780 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
781 * SystemIO doesn't implement 64-bit
782 * registers.
783 */
784 pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
785 gas_t->access_width);
786 goto out_free;
787 }
788 if (gas_t->address & OVER_16BTS_MASK) {
789 /* SystemIO registers use 16-bit integer addresses */
790 pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
791 gas_t->address);
792 goto out_free;
793 }
794 if (!osc_cpc_flexible_adr_space_confirmed) {
795 pr_debug("Flexible address space capability not supported\n");
796 goto out_free;
797 }
798 } else {
799 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
800 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
801 pr_debug("Unsupported register type (%d) in _CPC\n",
802 gas_t->space_id);
803 goto out_free;
804 }
805 }
806
807 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
808 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
809 } else {
810 pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
811 i, pr->id);
812 goto out_free;
813 }
814 }
815 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
816
817 /*
818 * Initialize the remaining cpc_regs as unsupported.
819 * Example: In case FW exposes CPPC v2, the below loop will initialize
820 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
821 */
822 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
823 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
824 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
825 }
826
827
828 /* Store CPU Logical ID */
829 cpc_ptr->cpu_id = pr->id;
830
831 /* Parse PSD data for this CPU */
832 ret = acpi_get_psd(cpc_ptr, handle);
833 if (ret)
834 goto out_free;
835
836 /* Register PCC channel once for all PCC subspace ID. */
837 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
838 ret = register_pcc_channel(pcc_subspace_id);
839 if (ret)
840 goto out_free;
841
842 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
843 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
844 }
845
846 /* Everything looks okay */
847 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
848
849 /* Add per logical CPU nodes for reading its feedback counters. */
850 cpu_dev = get_cpu_device(pr->id);
851 if (!cpu_dev) {
852 ret = -EINVAL;
853 goto out_free;
854 }
855
856 /* Plug PSD data into this CPU's CPC descriptor. */
857 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
858
859 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
860 "acpi_cppc");
861 if (ret) {
862 per_cpu(cpc_desc_ptr, pr->id) = NULL;
863 kobject_put(&cpc_ptr->kobj);
864 goto out_free;
865 }
866
867 arch_init_invariance_cppc();
868
869 kfree(output.pointer);
870 return 0;
871
872out_free:
873 /* Free all the mapped sys mem areas for this CPU */
874 for (i = 2; i < cpc_ptr->num_entries; i++) {
875 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
876
877 if (addr)
878 iounmap(addr);
879 }
880 kfree(cpc_ptr);
881
882out_buf_free:
883 kfree(output.pointer);
884 return ret;
885}
886EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
887
888/**
889 * acpi_cppc_processor_exit - Cleanup CPC structs.
890 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
891 *
892 * Return: Void
893 */
894void acpi_cppc_processor_exit(struct acpi_processor *pr)
895{
896 struct cpc_desc *cpc_ptr;
897 unsigned int i;
898 void __iomem *addr;
899 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
900
901 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
902 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
903 pcc_data[pcc_ss_id]->refcount--;
904 if (!pcc_data[pcc_ss_id]->refcount) {
905 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
906 kfree(pcc_data[pcc_ss_id]);
907 pcc_data[pcc_ss_id] = NULL;
908 }
909 }
910 }
911
912 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
913 if (!cpc_ptr)
914 return;
915
916 /* Free all the mapped sys mem areas for this CPU */
917 for (i = 2; i < cpc_ptr->num_entries; i++) {
918 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
919 if (addr)
920 iounmap(addr);
921 }
922
923 kobject_put(&cpc_ptr->kobj);
924 kfree(cpc_ptr);
925}
926EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
927
928/**
929 * cpc_read_ffh() - Read FFH register
930 * @cpunum: CPU number to read
931 * @reg: cppc register information
932 * @val: place holder for return value
933 *
934 * Read bit_width bits from a specified address and bit_offset
935 *
936 * Return: 0 for success and error code
937 */
938int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
939{
940 return -ENOTSUPP;
941}
942
943/**
944 * cpc_write_ffh() - Write FFH register
945 * @cpunum: CPU number to write
946 * @reg: cppc register information
947 * @val: value to write
948 *
949 * Write value of bit_width bits to a specified address and bit_offset
950 *
951 * Return: 0 for success and error code
952 */
953int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
954{
955 return -ENOTSUPP;
956}
957
958/*
959 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
960 * as fast as possible. We have already mapped the PCC subspace during init, so
961 * we can directly write to it.
962 */
963
964static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
965{
966 void __iomem *vaddr = NULL;
967 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
968 struct cpc_reg *reg = ®_res->cpc_entry.reg;
969
970 if (reg_res->type == ACPI_TYPE_INTEGER) {
971 *val = reg_res->cpc_entry.int_value;
972 return 0;
973 }
974
975 *val = 0;
976
977 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
978 u32 width = 8 << (reg->access_width - 1);
979 u32 val_u32;
980 acpi_status status;
981
982 status = acpi_os_read_port((acpi_io_address)reg->address,
983 &val_u32, width);
984 if (ACPI_FAILURE(status)) {
985 pr_debug("Error: Failed to read SystemIO port %llx\n",
986 reg->address);
987 return -EFAULT;
988 }
989
990 *val = val_u32;
991 return 0;
992 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
993 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
994 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
995 vaddr = reg_res->sys_mem_vaddr;
996 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
997 return cpc_read_ffh(cpu, reg, val);
998 else
999 return acpi_os_read_memory((acpi_physical_address)reg->address,
1000 val, reg->bit_width);
1001
1002 switch (reg->bit_width) {
1003 case 8:
1004 *val = readb_relaxed(vaddr);
1005 break;
1006 case 16:
1007 *val = readw_relaxed(vaddr);
1008 break;
1009 case 32:
1010 *val = readl_relaxed(vaddr);
1011 break;
1012 case 64:
1013 *val = readq_relaxed(vaddr);
1014 break;
1015 default:
1016 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1017 reg->bit_width, pcc_ss_id);
1018 return -EFAULT;
1019 }
1020
1021 return 0;
1022}
1023
1024static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1025{
1026 int ret_val = 0;
1027 void __iomem *vaddr = NULL;
1028 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1029 struct cpc_reg *reg = ®_res->cpc_entry.reg;
1030
1031 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1032 u32 width = 8 << (reg->access_width - 1);
1033 acpi_status status;
1034
1035 status = acpi_os_write_port((acpi_io_address)reg->address,
1036 (u32)val, width);
1037 if (ACPI_FAILURE(status)) {
1038 pr_debug("Error: Failed to write SystemIO port %llx\n",
1039 reg->address);
1040 return -EFAULT;
1041 }
1042
1043 return 0;
1044 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1045 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1046 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1047 vaddr = reg_res->sys_mem_vaddr;
1048 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1049 return cpc_write_ffh(cpu, reg, val);
1050 else
1051 return acpi_os_write_memory((acpi_physical_address)reg->address,
1052 val, reg->bit_width);
1053
1054 switch (reg->bit_width) {
1055 case 8:
1056 writeb_relaxed(val, vaddr);
1057 break;
1058 case 16:
1059 writew_relaxed(val, vaddr);
1060 break;
1061 case 32:
1062 writel_relaxed(val, vaddr);
1063 break;
1064 case 64:
1065 writeq_relaxed(val, vaddr);
1066 break;
1067 default:
1068 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1069 reg->bit_width, pcc_ss_id);
1070 ret_val = -EFAULT;
1071 break;
1072 }
1073
1074 return ret_val;
1075}
1076
1077static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1078{
1079 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1080 struct cpc_register_resource *reg;
1081
1082 if (!cpc_desc) {
1083 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1084 return -ENODEV;
1085 }
1086
1087 reg = &cpc_desc->cpc_regs[reg_idx];
1088
1089 if (CPC_IN_PCC(reg)) {
1090 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1091 struct cppc_pcc_data *pcc_ss_data = NULL;
1092 int ret = 0;
1093
1094 if (pcc_ss_id < 0)
1095 return -EIO;
1096
1097 pcc_ss_data = pcc_data[pcc_ss_id];
1098
1099 down_write(&pcc_ss_data->pcc_lock);
1100
1101 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1102 cpc_read(cpunum, reg, perf);
1103 else
1104 ret = -EIO;
1105
1106 up_write(&pcc_ss_data->pcc_lock);
1107
1108 return ret;
1109 }
1110
1111 cpc_read(cpunum, reg, perf);
1112
1113 return 0;
1114}
1115
1116/**
1117 * cppc_get_desired_perf - Get the desired performance register value.
1118 * @cpunum: CPU from which to get desired performance.
1119 * @desired_perf: Return address.
1120 *
1121 * Return: 0 for success, -EIO otherwise.
1122 */
1123int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1124{
1125 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1126}
1127EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1128
1129/**
1130 * cppc_get_nominal_perf - Get the nominal performance register value.
1131 * @cpunum: CPU from which to get nominal performance.
1132 * @nominal_perf: Return address.
1133 *
1134 * Return: 0 for success, -EIO otherwise.
1135 */
1136int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1137{
1138 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1139}
1140
1141/**
1142 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1143 * @cpunum: CPU from which to get capabilities info.
1144 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1145 *
1146 * Return: 0 for success with perf_caps populated else -ERRNO.
1147 */
1148int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1149{
1150 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1151 struct cpc_register_resource *highest_reg, *lowest_reg,
1152 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1153 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1154 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1155 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1156 struct cppc_pcc_data *pcc_ss_data = NULL;
1157 int ret = 0, regs_in_pcc = 0;
1158
1159 if (!cpc_desc) {
1160 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1161 return -ENODEV;
1162 }
1163
1164 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1165 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1166 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1167 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1168 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1169 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1170 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1171
1172 /* Are any of the regs PCC ?*/
1173 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1174 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1175 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1176 if (pcc_ss_id < 0) {
1177 pr_debug("Invalid pcc_ss_id\n");
1178 return -ENODEV;
1179 }
1180 pcc_ss_data = pcc_data[pcc_ss_id];
1181 regs_in_pcc = 1;
1182 down_write(&pcc_ss_data->pcc_lock);
1183 /* Ring doorbell once to update PCC subspace */
1184 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1185 ret = -EIO;
1186 goto out_err;
1187 }
1188 }
1189
1190 cpc_read(cpunum, highest_reg, &high);
1191 perf_caps->highest_perf = high;
1192
1193 cpc_read(cpunum, lowest_reg, &low);
1194 perf_caps->lowest_perf = low;
1195
1196 cpc_read(cpunum, nominal_reg, &nom);
1197 perf_caps->nominal_perf = nom;
1198
1199 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1200 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1201 perf_caps->guaranteed_perf = 0;
1202 } else {
1203 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1204 perf_caps->guaranteed_perf = guaranteed;
1205 }
1206
1207 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1208 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1209
1210 if (!high || !low || !nom || !min_nonlinear)
1211 ret = -EFAULT;
1212
1213 /* Read optional lowest and nominal frequencies if present */
1214 if (CPC_SUPPORTED(low_freq_reg))
1215 cpc_read(cpunum, low_freq_reg, &low_f);
1216
1217 if (CPC_SUPPORTED(nom_freq_reg))
1218 cpc_read(cpunum, nom_freq_reg, &nom_f);
1219
1220 perf_caps->lowest_freq = low_f;
1221 perf_caps->nominal_freq = nom_f;
1222
1223
1224out_err:
1225 if (regs_in_pcc)
1226 up_write(&pcc_ss_data->pcc_lock);
1227 return ret;
1228}
1229EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1230
1231/**
1232 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1233 * @cpunum: CPU from which to read counters.
1234 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1235 *
1236 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1237 */
1238int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1239{
1240 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1241 struct cpc_register_resource *delivered_reg, *reference_reg,
1242 *ref_perf_reg, *ctr_wrap_reg;
1243 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1244 struct cppc_pcc_data *pcc_ss_data = NULL;
1245 u64 delivered, reference, ref_perf, ctr_wrap_time;
1246 int ret = 0, regs_in_pcc = 0;
1247
1248 if (!cpc_desc) {
1249 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1250 return -ENODEV;
1251 }
1252
1253 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1254 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1255 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1256 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1257
1258 /*
1259 * If reference perf register is not supported then we should
1260 * use the nominal perf value
1261 */
1262 if (!CPC_SUPPORTED(ref_perf_reg))
1263 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1264
1265 /* Are any of the regs PCC ?*/
1266 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1267 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1268 if (pcc_ss_id < 0) {
1269 pr_debug("Invalid pcc_ss_id\n");
1270 return -ENODEV;
1271 }
1272 pcc_ss_data = pcc_data[pcc_ss_id];
1273 down_write(&pcc_ss_data->pcc_lock);
1274 regs_in_pcc = 1;
1275 /* Ring doorbell once to update PCC subspace */
1276 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1277 ret = -EIO;
1278 goto out_err;
1279 }
1280 }
1281
1282 cpc_read(cpunum, delivered_reg, &delivered);
1283 cpc_read(cpunum, reference_reg, &reference);
1284 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1285
1286 /*
1287 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1288 * performance counters are assumed to never wrap during the lifetime of
1289 * platform
1290 */
1291 ctr_wrap_time = (u64)(~((u64)0));
1292 if (CPC_SUPPORTED(ctr_wrap_reg))
1293 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1294
1295 if (!delivered || !reference || !ref_perf) {
1296 ret = -EFAULT;
1297 goto out_err;
1298 }
1299
1300 perf_fb_ctrs->delivered = delivered;
1301 perf_fb_ctrs->reference = reference;
1302 perf_fb_ctrs->reference_perf = ref_perf;
1303 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1304out_err:
1305 if (regs_in_pcc)
1306 up_write(&pcc_ss_data->pcc_lock);
1307 return ret;
1308}
1309EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1310
1311/**
1312 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1313 * Continuous Performance Control package EnableRegister field.
1314 * @cpu: CPU for which to enable CPPC register.
1315 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1316 *
1317 * Return: 0 for success, -ERRNO or -EIO otherwise.
1318 */
1319int cppc_set_enable(int cpu, bool enable)
1320{
1321 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1322 struct cpc_register_resource *enable_reg;
1323 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1324 struct cppc_pcc_data *pcc_ss_data = NULL;
1325 int ret = -EINVAL;
1326
1327 if (!cpc_desc) {
1328 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1329 return -EINVAL;
1330 }
1331
1332 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1333
1334 if (CPC_IN_PCC(enable_reg)) {
1335
1336 if (pcc_ss_id < 0)
1337 return -EIO;
1338
1339 ret = cpc_write(cpu, enable_reg, enable);
1340 if (ret)
1341 return ret;
1342
1343 pcc_ss_data = pcc_data[pcc_ss_id];
1344
1345 down_write(&pcc_ss_data->pcc_lock);
1346 /* after writing CPC, transfer the ownership of PCC to platfrom */
1347 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1348 up_write(&pcc_ss_data->pcc_lock);
1349 return ret;
1350 }
1351
1352 return cpc_write(cpu, enable_reg, enable);
1353}
1354EXPORT_SYMBOL_GPL(cppc_set_enable);
1355
1356/**
1357 * cppc_set_perf - Set a CPU's performance controls.
1358 * @cpu: CPU for which to set performance controls.
1359 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1360 *
1361 * Return: 0 for success, -ERRNO otherwise.
1362 */
1363int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1364{
1365 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1366 struct cpc_register_resource *desired_reg;
1367 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1368 struct cppc_pcc_data *pcc_ss_data = NULL;
1369 int ret = 0;
1370
1371 if (!cpc_desc) {
1372 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1373 return -ENODEV;
1374 }
1375
1376 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1377
1378 /*
1379 * This is Phase-I where we want to write to CPC registers
1380 * -> We want all CPUs to be able to execute this phase in parallel
1381 *
1382 * Since read_lock can be acquired by multiple CPUs simultaneously we
1383 * achieve that goal here
1384 */
1385 if (CPC_IN_PCC(desired_reg)) {
1386 if (pcc_ss_id < 0) {
1387 pr_debug("Invalid pcc_ss_id\n");
1388 return -ENODEV;
1389 }
1390 pcc_ss_data = pcc_data[pcc_ss_id];
1391 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1392 if (pcc_ss_data->platform_owns_pcc) {
1393 ret = check_pcc_chan(pcc_ss_id, false);
1394 if (ret) {
1395 up_read(&pcc_ss_data->pcc_lock);
1396 return ret;
1397 }
1398 }
1399 /*
1400 * Update the pending_write to make sure a PCC CMD_READ will not
1401 * arrive and steal the channel during the switch to write lock
1402 */
1403 pcc_ss_data->pending_pcc_write_cmd = true;
1404 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1405 cpc_desc->write_cmd_status = 0;
1406 }
1407
1408 /*
1409 * Skip writing MIN/MAX until Linux knows how to come up with
1410 * useful values.
1411 */
1412 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1413
1414 if (CPC_IN_PCC(desired_reg))
1415 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1416 /*
1417 * This is Phase-II where we transfer the ownership of PCC to Platform
1418 *
1419 * Short Summary: Basically if we think of a group of cppc_set_perf
1420 * requests that happened in short overlapping interval. The last CPU to
1421 * come out of Phase-I will enter Phase-II and ring the doorbell.
1422 *
1423 * We have the following requirements for Phase-II:
1424 * 1. We want to execute Phase-II only when there are no CPUs
1425 * currently executing in Phase-I
1426 * 2. Once we start Phase-II we want to avoid all other CPUs from
1427 * entering Phase-I.
1428 * 3. We want only one CPU among all those who went through Phase-I
1429 * to run phase-II
1430 *
1431 * If write_trylock fails to get the lock and doesn't transfer the
1432 * PCC ownership to the platform, then one of the following will be TRUE
1433 * 1. There is at-least one CPU in Phase-I which will later execute
1434 * write_trylock, so the CPUs in Phase-I will be responsible for
1435 * executing the Phase-II.
1436 * 2. Some other CPU has beaten this CPU to successfully execute the
1437 * write_trylock and has already acquired the write_lock. We know for a
1438 * fact it (other CPU acquiring the write_lock) couldn't have happened
1439 * before this CPU's Phase-I as we held the read_lock.
1440 * 3. Some other CPU executing pcc CMD_READ has stolen the
1441 * down_write, in which case, send_pcc_cmd will check for pending
1442 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1443 * So this CPU can be certain that its request will be delivered
1444 * So in all cases, this CPU knows that its request will be delivered
1445 * by another CPU and can return
1446 *
1447 * After getting the down_write we still need to check for
1448 * pending_pcc_write_cmd to take care of the following scenario
1449 * The thread running this code could be scheduled out between
1450 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1451 * could have delivered the request to Platform by triggering the
1452 * doorbell and transferred the ownership of PCC to platform. So this
1453 * avoids triggering an unnecessary doorbell and more importantly before
1454 * triggering the doorbell it makes sure that the PCC channel ownership
1455 * is still with OSPM.
1456 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1457 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1458 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1459 * case during a CMD_READ and if there are pending writes it delivers
1460 * the write command before servicing the read command
1461 */
1462 if (CPC_IN_PCC(desired_reg)) {
1463 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1464 /* Update only if there are pending write commands */
1465 if (pcc_ss_data->pending_pcc_write_cmd)
1466 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1467 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1468 } else
1469 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1470 wait_event(pcc_ss_data->pcc_write_wait_q,
1471 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1472
1473 /* send_pcc_cmd updates the status in case of failure */
1474 ret = cpc_desc->write_cmd_status;
1475 }
1476 return ret;
1477}
1478EXPORT_SYMBOL_GPL(cppc_set_perf);
1479
1480/**
1481 * cppc_get_transition_latency - returns frequency transition latency in ns
1482 *
1483 * ACPI CPPC does not explicitly specify how a platform can specify the
1484 * transition latency for performance change requests. The closest we have
1485 * is the timing information from the PCCT tables which provides the info
1486 * on the number and frequency of PCC commands the platform can handle.
1487 *
1488 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1489 * then assume there is no latency.
1490 */
1491unsigned int cppc_get_transition_latency(int cpu_num)
1492{
1493 /*
1494 * Expected transition latency is based on the PCCT timing values
1495 * Below are definition from ACPI spec:
1496 * pcc_nominal- Expected latency to process a command, in microseconds
1497 * pcc_mpar - The maximum number of periodic requests that the subspace
1498 * channel can support, reported in commands per minute. 0
1499 * indicates no limitation.
1500 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1501 * completion of a command before issuing the next command,
1502 * in microseconds.
1503 */
1504 unsigned int latency_ns = 0;
1505 struct cpc_desc *cpc_desc;
1506 struct cpc_register_resource *desired_reg;
1507 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1508 struct cppc_pcc_data *pcc_ss_data;
1509
1510 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1511 if (!cpc_desc)
1512 return CPUFREQ_ETERNAL;
1513
1514 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1515 if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1516 return 0;
1517 else if (!CPC_IN_PCC(desired_reg))
1518 return CPUFREQ_ETERNAL;
1519
1520 if (pcc_ss_id < 0)
1521 return CPUFREQ_ETERNAL;
1522
1523 pcc_ss_data = pcc_data[pcc_ss_id];
1524 if (pcc_ss_data->pcc_mpar)
1525 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1526
1527 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1528 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1529
1530 return latency_ns;
1531}
1532EXPORT_SYMBOL_GPL(cppc_get_transition_latency);