Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_ENERGY_MODEL_H
3#define _LINUX_ENERGY_MODEL_H
4#include <linux/cpumask.h>
5#include <linux/device.h>
6#include <linux/jump_label.h>
7#include <linux/kobject.h>
8#include <linux/kref.h>
9#include <linux/rcupdate.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/topology.h>
12#include <linux/types.h>
13
14/**
15 * struct em_perf_state - Performance state of a performance domain
16 * @performance: CPU performance (capacity) at a given frequency
17 * @frequency: The frequency in KHz, for consistency with CPUFreq
18 * @power: The power consumed at this level (by 1 CPU or by a registered
19 * device). It can be a total power: static and dynamic.
20 * @cost: The cost coefficient associated with this level, used during
21 * energy calculation. Equal to: power * max_frequency / frequency
22 * @flags: see "em_perf_state flags" description below.
23 */
24struct em_perf_state {
25 unsigned long performance;
26 unsigned long frequency;
27 unsigned long power;
28 unsigned long cost;
29 unsigned long flags;
30};
31
32/*
33 * em_perf_state flags:
34 *
35 * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
36 * in this em_perf_domain, another performance state with a higher frequency
37 * but a lower or equal power cost. Such inefficient states are ignored when
38 * using em_pd_get_efficient_*() functions.
39 */
40#define EM_PERF_STATE_INEFFICIENT BIT(0)
41
42/**
43 * struct em_perf_table - Performance states table
44 * @rcu: RCU used for safe access and destruction
45 * @kref: Reference counter to track the users
46 * @state: List of performance states, in ascending order
47 */
48struct em_perf_table {
49 struct rcu_head rcu;
50 struct kref kref;
51 struct em_perf_state state[];
52};
53
54/**
55 * struct em_perf_domain - Performance domain
56 * @em_table: Pointer to the runtime modifiable em_perf_table
57 * @node: node in em_pd_list (in energy_model.c)
58 * @id: A unique ID number for each performance domain
59 * @nr_perf_states: Number of performance states
60 * @min_perf_state: Minimum allowed Performance State index
61 * @max_perf_state: Maximum allowed Performance State index
62 * @flags: See "em_perf_domain flags"
63 * @cpus: Cpumask covering the CPUs of the domain. It's here
64 * for performance reasons to avoid potential cache
65 * misses during energy calculations in the scheduler
66 * and simplifies allocating/freeing that memory region.
67 *
68 * In case of CPU device, a "performance domain" represents a group of CPUs
69 * whose performance is scaled together. All CPUs of a performance domain
70 * must have the same micro-architecture. Performance domains often have
71 * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
72 * field is unused.
73 */
74struct em_perf_domain {
75 struct em_perf_table __rcu *em_table;
76 struct list_head node;
77 int id;
78 int nr_perf_states;
79 int min_perf_state;
80 int max_perf_state;
81 unsigned long flags;
82 unsigned long cpus[];
83};
84
85/*
86 * em_perf_domain flags:
87 *
88 * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
89 * other scale.
90 *
91 * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
92 * energy consumption.
93 *
94 * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
95 * created by platform missing real power information
96 */
97#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
98#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
99#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
100
101#define em_span_cpus(em) (to_cpumask((em)->cpus))
102#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
103
104#ifdef CONFIG_ENERGY_MODEL
105/*
106 * The max power value in micro-Watts. The limit of 64 Watts is set as
107 * a safety net to not overflow multiplications on 32bit platforms. The
108 * 32bit value limit for total Perf Domain power implies a limit of
109 * maximum CPUs in such domain to 64.
110 */
111#define EM_MAX_POWER (64000000) /* 64 Watts */
112
113/*
114 * To avoid possible energy estimation overflow on 32bit machines add
115 * limits to number of CPUs in the Perf. Domain.
116 * We are safe on 64bit machine, thus some big number.
117 */
118#ifdef CONFIG_64BIT
119#define EM_MAX_NUM_CPUS 4096
120#else
121#define EM_MAX_NUM_CPUS 16
122#endif
123
124struct em_data_callback {
125 /**
126 * active_power() - Provide power at the next performance state of
127 * a device
128 * @dev : Device for which we do this operation (can be a CPU)
129 * @power : Active power at the performance state
130 * (modified)
131 * @freq : Frequency at the performance state in kHz
132 * (modified)
133 *
134 * active_power() must find the lowest performance state of 'dev' above
135 * 'freq' and update 'power' and 'freq' to the matching active power
136 * and frequency.
137 *
138 * In case of CPUs, the power is the one of a single CPU in the domain,
139 * expressed in micro-Watts or an abstract scale. It is expected to
140 * fit in the [0, EM_MAX_POWER] range.
141 *
142 * Return 0 on success.
143 */
144 int (*active_power)(struct device *dev, unsigned long *power,
145 unsigned long *freq);
146
147 /**
148 * get_cost() - Provide the cost at the given performance state of
149 * a device
150 * @dev : Device for which we do this operation (can be a CPU)
151 * @freq : Frequency at the performance state in kHz
152 * @cost : The cost value for the performance state
153 * (modified)
154 *
155 * In case of CPUs, the cost is the one of a single CPU in the domain.
156 * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
157 * usage in EAS calculation.
158 *
159 * Return 0 on success, or appropriate error value in case of failure.
160 */
161 int (*get_cost)(struct device *dev, unsigned long freq,
162 unsigned long *cost);
163};
164#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
165#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
166 { .active_power = _active_power_cb, \
167 .get_cost = _cost_cb }
168#define EM_DATA_CB(_active_power_cb) \
169 EM_ADV_DATA_CB(_active_power_cb, NULL)
170
171struct em_perf_domain *em_cpu_get(int cpu);
172struct em_perf_domain *em_pd_get(struct device *dev);
173int em_dev_update_perf_domain(struct device *dev,
174 struct em_perf_table *new_table);
175int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
176 const struct em_data_callback *cb,
177 const cpumask_t *cpus, bool microwatts);
178int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
179 const struct em_data_callback *cb,
180 const cpumask_t *cpus, bool microwatts);
181void em_dev_unregister_perf_domain(struct device *dev);
182struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
183void em_table_free(struct em_perf_table *table);
184int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
185 int nr_states);
186int em_dev_update_chip_binning(struct device *dev);
187int em_update_performance_limits(struct em_perf_domain *pd,
188 unsigned long freq_min_khz, unsigned long freq_max_khz);
189void em_adjust_cpu_capacity(unsigned int cpu);
190void em_rebuild_sched_domains(void);
191
192/**
193 * em_pd_get_efficient_state() - Get an efficient performance state from the EM
194 * @table: List of performance states, in ascending order
195 * @pd: performance domain for which this must be done
196 * @max_util: Max utilization to map with the EM
197 *
198 * It is called from the scheduler code quite frequently and as a consequence
199 * doesn't implement any check.
200 *
201 * Return: An efficient performance state id, high enough to meet @max_util
202 * requirement.
203 */
204static inline int
205em_pd_get_efficient_state(struct em_perf_state *table,
206 struct em_perf_domain *pd, unsigned long max_util)
207{
208 unsigned long pd_flags = pd->flags;
209 int min_ps = pd->min_perf_state;
210 int max_ps = pd->max_perf_state;
211 struct em_perf_state *ps;
212 int i;
213
214 for (i = min_ps; i <= max_ps; i++) {
215 ps = &table[i];
216 if (ps->performance >= max_util) {
217 if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
218 ps->flags & EM_PERF_STATE_INEFFICIENT)
219 continue;
220 return i;
221 }
222 }
223
224 return max_ps;
225}
226
227/**
228 * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
229 * performance domain
230 * @pd : performance domain for which energy has to be estimated
231 * @max_util : highest utilization among CPUs of the domain
232 * @sum_util : sum of the utilization of all CPUs in the domain
233 * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
234 * might reflect reduced frequency (due to thermal)
235 *
236 * This function must be used only for CPU devices. There is no validation,
237 * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
238 * the scheduler code quite frequently and that is why there is not checks.
239 *
240 * Return: the sum of the energy consumed by the CPUs of the domain assuming
241 * a capacity state satisfying the max utilization of the domain.
242 */
243static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
244 unsigned long max_util, unsigned long sum_util,
245 unsigned long allowed_cpu_cap)
246{
247 struct em_perf_table *em_table;
248 struct em_perf_state *ps;
249 int i;
250
251 WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
252
253 if (!sum_util)
254 return 0;
255
256 /*
257 * In order to predict the performance state, map the utilization of
258 * the most utilized CPU of the performance domain to a requested
259 * performance, like schedutil. Take also into account that the real
260 * performance might be set lower (due to thermal capping). Thus, clamp
261 * max utilization to the allowed CPU capacity before calculating
262 * effective performance.
263 */
264 max_util = min(max_util, allowed_cpu_cap);
265
266 /*
267 * Find the lowest performance state of the Energy Model above the
268 * requested performance.
269 */
270 em_table = rcu_dereference(pd->em_table);
271 i = em_pd_get_efficient_state(em_table->state, pd, max_util);
272 ps = &em_table->state[i];
273
274 /*
275 * The performance (capacity) of a CPU in the domain at the performance
276 * state (ps) can be computed as:
277 *
278 * ps->freq * scale_cpu
279 * ps->performance = -------------------- (1)
280 * cpu_max_freq
281 *
282 * So, ignoring the costs of idle states (which are not available in
283 * the EM), the energy consumed by this CPU at that performance state
284 * is estimated as:
285 *
286 * ps->power * cpu_util
287 * cpu_nrg = -------------------- (2)
288 * ps->performance
289 *
290 * since 'cpu_util / ps->performance' represents its percentage of busy
291 * time.
292 *
293 * NOTE: Although the result of this computation actually is in
294 * units of power, it can be manipulated as an energy value
295 * over a scheduling period, since it is assumed to be
296 * constant during that interval.
297 *
298 * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
299 * of two terms:
300 *
301 * ps->power * cpu_max_freq
302 * cpu_nrg = ------------------------ * cpu_util (3)
303 * ps->freq * scale_cpu
304 *
305 * The first term is static, and is stored in the em_perf_state struct
306 * as 'ps->cost'.
307 *
308 * Since all CPUs of the domain have the same micro-architecture, they
309 * share the same 'ps->cost', and the same CPU capacity. Hence, the
310 * total energy of the domain (which is the simple sum of the energy of
311 * all of its CPUs) can be factorized as:
312 *
313 * pd_nrg = ps->cost * \Sum cpu_util (4)
314 */
315 return ps->cost * sum_util;
316}
317
318/**
319 * em_pd_nr_perf_states() - Get the number of performance states of a perf.
320 * domain
321 * @pd : performance domain for which this must be done
322 *
323 * Return: the number of performance states in the performance domain table
324 */
325static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
326{
327 return pd->nr_perf_states;
328}
329
330/**
331 * em_perf_state_from_pd() - Get the performance states table of perf.
332 * domain
333 * @pd : performance domain for which this must be done
334 *
335 * To use this function the rcu_read_lock() should be hold. After the usage
336 * of the performance states table is finished, the rcu_read_unlock() should
337 * be called.
338 *
339 * Return: the pointer to performance states table of the performance domain
340 */
341static inline
342struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
343{
344 return rcu_dereference(pd->em_table)->state;
345}
346
347#else
348struct em_data_callback {};
349#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
350#define EM_DATA_CB(_active_power_cb) { }
351#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
352
353static inline
354int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
355 const struct em_data_callback *cb,
356 const cpumask_t *cpus, bool microwatts)
357{
358 return -EINVAL;
359}
360static inline
361int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
362 const struct em_data_callback *cb,
363 const cpumask_t *cpus, bool microwatts)
364{
365 return -EINVAL;
366}
367static inline void em_dev_unregister_perf_domain(struct device *dev)
368{
369}
370static inline struct em_perf_domain *em_cpu_get(int cpu)
371{
372 return NULL;
373}
374static inline struct em_perf_domain *em_pd_get(struct device *dev)
375{
376 return NULL;
377}
378static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
379 unsigned long max_util, unsigned long sum_util,
380 unsigned long allowed_cpu_cap)
381{
382 return 0;
383}
384static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
385{
386 return 0;
387}
388static inline
389struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
390{
391 return NULL;
392}
393static inline void em_table_free(struct em_perf_table *table) {}
394static inline
395int em_dev_update_perf_domain(struct device *dev,
396 struct em_perf_table *new_table)
397{
398 return -EINVAL;
399}
400static inline
401struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
402{
403 return NULL;
404}
405static inline
406int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
407 int nr_states)
408{
409 return -EINVAL;
410}
411static inline int em_dev_update_chip_binning(struct device *dev)
412{
413 return -EINVAL;
414}
415static inline
416int em_update_performance_limits(struct em_perf_domain *pd,
417 unsigned long freq_min_khz, unsigned long freq_max_khz)
418{
419 return -EINVAL;
420}
421static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
422static inline void em_rebuild_sched_domains(void) {}
423#endif
424
425#endif