Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Performance events - AMD Processor Power Reporting Mechanism
4 *
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 *
7 * Author: Huang Rui <ray.huang@amd.com>
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <asm/cpu_device_id.h>
14#include <asm/msr.h>
15#include "../perf_event.h"
16
17/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */
18#define AMD_POWER_EVENT_MASK 0xFFULL
19
20/*
21 * Accumulated power status counters.
22 */
23#define AMD_POWER_EVENTSEL_PKG 1
24
25/*
26 * The ratio of compute unit power accumulator sample period to the
27 * PTSC period.
28 */
29static unsigned int cpu_pwr_sample_ratio;
30
31/* Maximum accumulated power of a compute unit. */
32static u64 max_cu_acc_power;
33
34static struct pmu pmu_class;
35
36/*
37 * Accumulated power represents the sum of each compute unit's (CU) power
38 * consumption. On any core of each CU we read the total accumulated power from
39 * MSR_F15H_CU_PWR_ACCUMULATOR. cpu_mask represents CPU bit map of all cores
40 * which are picked to measure the power for the CUs they belong to.
41 */
42static cpumask_t cpu_mask;
43
44static void event_update(struct perf_event *event)
45{
46 struct hw_perf_event *hwc = &event->hw;
47 u64 prev_pwr_acc, new_pwr_acc, prev_ptsc, new_ptsc;
48 u64 delta, tdelta;
49
50 prev_pwr_acc = hwc->pwr_acc;
51 prev_ptsc = hwc->ptsc;
52 rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
53 rdmsrq(MSR_F15H_PTSC, new_ptsc);
54
55 /*
56 * Calculate the CU power consumption over a time period, the unit of
57 * final value (delta) is micro-Watts. Then add it to the event count.
58 */
59 if (new_pwr_acc < prev_pwr_acc) {
60 delta = max_cu_acc_power + new_pwr_acc;
61 delta -= prev_pwr_acc;
62 } else
63 delta = new_pwr_acc - prev_pwr_acc;
64
65 delta *= cpu_pwr_sample_ratio * 1000;
66 tdelta = new_ptsc - prev_ptsc;
67
68 do_div(delta, tdelta);
69 local64_add(delta, &event->count);
70}
71
72static void __pmu_event_start(struct perf_event *event)
73{
74 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
75 return;
76
77 event->hw.state = 0;
78
79 rdmsrq(MSR_F15H_PTSC, event->hw.ptsc);
80 rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
81}
82
83static void pmu_event_start(struct perf_event *event, int mode)
84{
85 __pmu_event_start(event);
86}
87
88static void pmu_event_stop(struct perf_event *event, int mode)
89{
90 struct hw_perf_event *hwc = &event->hw;
91
92 /* Mark event as deactivated and stopped. */
93 if (!(hwc->state & PERF_HES_STOPPED))
94 hwc->state |= PERF_HES_STOPPED;
95
96 /* Check if software counter update is necessary. */
97 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
98 /*
99 * Drain the remaining delta count out of an event
100 * that we are disabling:
101 */
102 event_update(event);
103 hwc->state |= PERF_HES_UPTODATE;
104 }
105}
106
107static int pmu_event_add(struct perf_event *event, int mode)
108{
109 struct hw_perf_event *hwc = &event->hw;
110
111 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
112
113 if (mode & PERF_EF_START)
114 __pmu_event_start(event);
115
116 return 0;
117}
118
119static void pmu_event_del(struct perf_event *event, int flags)
120{
121 pmu_event_stop(event, PERF_EF_UPDATE);
122}
123
124static int pmu_event_init(struct perf_event *event)
125{
126 u64 cfg = event->attr.config & AMD_POWER_EVENT_MASK;
127
128 /* Only look at AMD power events. */
129 if (event->attr.type != pmu_class.type)
130 return -ENOENT;
131
132 /* Unsupported modes and filters. */
133 if (event->attr.sample_period)
134 return -EINVAL;
135
136 if (cfg != AMD_POWER_EVENTSEL_PKG)
137 return -EINVAL;
138
139 return 0;
140}
141
142static void pmu_event_read(struct perf_event *event)
143{
144 event_update(event);
145}
146
147static ssize_t
148get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf)
149{
150 return cpumap_print_to_pagebuf(true, buf, &cpu_mask);
151}
152
153static DEVICE_ATTR(cpumask, S_IRUGO, get_attr_cpumask, NULL);
154
155static struct attribute *pmu_attrs[] = {
156 &dev_attr_cpumask.attr,
157 NULL,
158};
159
160static struct attribute_group pmu_attr_group = {
161 .attrs = pmu_attrs,
162};
163
164/*
165 * Currently it only supports to report the power of each
166 * processor/package.
167 */
168EVENT_ATTR_STR(power-pkg, power_pkg, "event=0x01");
169
170EVENT_ATTR_STR(power-pkg.unit, power_pkg_unit, "mWatts");
171
172/* Convert the count from micro-Watts to milli-Watts. */
173EVENT_ATTR_STR(power-pkg.scale, power_pkg_scale, "1.000000e-3");
174
175static struct attribute *events_attr[] = {
176 EVENT_PTR(power_pkg),
177 EVENT_PTR(power_pkg_unit),
178 EVENT_PTR(power_pkg_scale),
179 NULL,
180};
181
182static struct attribute_group pmu_events_group = {
183 .name = "events",
184 .attrs = events_attr,
185};
186
187PMU_FORMAT_ATTR(event, "config:0-7");
188
189static struct attribute *formats_attr[] = {
190 &format_attr_event.attr,
191 NULL,
192};
193
194static struct attribute_group pmu_format_group = {
195 .name = "format",
196 .attrs = formats_attr,
197};
198
199static const struct attribute_group *attr_groups[] = {
200 &pmu_attr_group,
201 &pmu_format_group,
202 &pmu_events_group,
203 NULL,
204};
205
206static struct pmu pmu_class = {
207 .attr_groups = attr_groups,
208 /* system-wide only */
209 .task_ctx_nr = perf_invalid_context,
210 .event_init = pmu_event_init,
211 .add = pmu_event_add,
212 .del = pmu_event_del,
213 .start = pmu_event_start,
214 .stop = pmu_event_stop,
215 .read = pmu_event_read,
216 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
217 .module = THIS_MODULE,
218};
219
220static int power_cpu_exit(unsigned int cpu)
221{
222 int target;
223
224 if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
225 return 0;
226
227 /*
228 * Find a new CPU on the same compute unit, if was set in cpumask
229 * and still some CPUs on compute unit. Then migrate event and
230 * context to new CPU.
231 */
232 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
233 if (target < nr_cpumask_bits) {
234 cpumask_set_cpu(target, &cpu_mask);
235 perf_pmu_migrate_context(&pmu_class, cpu, target);
236 }
237 return 0;
238}
239
240static int power_cpu_init(unsigned int cpu)
241{
242 int target;
243
244 /*
245 * 1) If any CPU is set at cpu_mask in the same compute unit, do
246 * nothing.
247 * 2) If no CPU is set at cpu_mask in the same compute unit,
248 * set current ONLINE CPU.
249 *
250 * Note: if there is a CPU aside of the new one already in the
251 * sibling mask, then it is also in cpu_mask.
252 */
253 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
254 if (target >= nr_cpumask_bits)
255 cpumask_set_cpu(cpu, &cpu_mask);
256 return 0;
257}
258
259static const struct x86_cpu_id cpu_match[] = {
260 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
261 {},
262};
263
264static int __init amd_power_pmu_init(void)
265{
266 int ret;
267
268 if (!x86_match_cpu(cpu_match))
269 return -ENODEV;
270
271 if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
272 return -ENODEV;
273
274 cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
275
276 if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
277 pr_err("Failed to read max compute unit power accumulator MSR\n");
278 return -ENODEV;
279 }
280
281
282 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
283 "perf/x86/amd/power:online",
284 power_cpu_init, power_cpu_exit);
285
286 ret = perf_pmu_register(&pmu_class, "power", -1);
287 if (WARN_ON(ret)) {
288 pr_warn("AMD Power PMU registration failed\n");
289 return ret;
290 }
291
292 pr_info("AMD Power PMU detected\n");
293 return ret;
294}
295module_init(amd_power_pmu_init);
296
297static void __exit amd_power_pmu_exit(void)
298{
299 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE);
300 perf_pmu_unregister(&pmu_class);
301}
302module_exit(amd_power_pmu_exit);
303
304MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
305MODULE_DESCRIPTION("AMD Processor Power Reporting Mechanism");
306MODULE_LICENSE("GPL v2");