Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_RESCTRL_H
3#define _ASM_X86_RESCTRL_H
4
5#ifdef CONFIG_X86_CPU_RESCTRL
6
7#include <linux/jump_label.h>
8#include <linux/percpu.h>
9#include <linux/resctrl_types.h>
10#include <linux/sched.h>
11
12#include <asm/msr.h>
13
14/*
15 * This value can never be a valid CLOSID, and is used when mapping a
16 * (closid, rmid) pair to an index and back. On x86 only the RMID is
17 * needed. The index is a software defined value.
18 */
19#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0)
20
21/**
22 * struct resctrl_pqr_state - State cache for the PQR MSR
23 * @cur_rmid: The cached Resource Monitoring ID
24 * @cur_closid: The cached Class Of Service ID
25 * @default_rmid: The user assigned Resource Monitoring ID
26 * @default_closid: The user assigned cached Class Of Service ID
27 *
28 * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
29 * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
30 * contains both parts, so we need to cache them. This also
31 * stores the user configured per cpu CLOSID and RMID.
32 *
33 * The cache also helps to avoid pointless updates if the value does
34 * not change.
35 */
36struct resctrl_pqr_state {
37 u32 cur_rmid;
38 u32 cur_closid;
39 u32 default_rmid;
40 u32 default_closid;
41};
42
43DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
44
45extern bool rdt_alloc_capable;
46extern bool rdt_mon_capable;
47extern unsigned int rdt_mon_features;
48
49DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
50DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
51DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
52
53static inline bool resctrl_arch_alloc_capable(void)
54{
55 return rdt_alloc_capable;
56}
57
58static inline void resctrl_arch_enable_alloc(void)
59{
60 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
61 static_branch_inc_cpuslocked(&rdt_enable_key);
62}
63
64static inline void resctrl_arch_disable_alloc(void)
65{
66 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
67 static_branch_dec_cpuslocked(&rdt_enable_key);
68}
69
70static inline bool resctrl_arch_mon_capable(void)
71{
72 return rdt_mon_capable;
73}
74
75static inline void resctrl_arch_enable_mon(void)
76{
77 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
78 static_branch_inc_cpuslocked(&rdt_enable_key);
79}
80
81static inline void resctrl_arch_disable_mon(void)
82{
83 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
84 static_branch_dec_cpuslocked(&rdt_enable_key);
85}
86
87static inline bool resctrl_arch_is_llc_occupancy_enabled(void)
88{
89 return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID));
90}
91
92static inline bool resctrl_arch_is_mbm_total_enabled(void)
93{
94 return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID));
95}
96
97static inline bool resctrl_arch_is_mbm_local_enabled(void)
98{
99 return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID));
100}
101
102/*
103 * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
104 *
105 * Following considerations are made so that this has minimal impact
106 * on scheduler hot path:
107 * - This will stay as no-op unless we are running on an Intel SKU
108 * which supports resource control or monitoring and we enable by
109 * mounting the resctrl file system.
110 * - Caches the per cpu CLOSid/RMID values and does the MSR write only
111 * when a task with a different CLOSid/RMID is scheduled in.
112 * - We allocate RMIDs/CLOSids globally in order to keep this as
113 * simple as possible.
114 * Must be called with preemption disabled.
115 */
116static inline void __resctrl_sched_in(struct task_struct *tsk)
117{
118 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
119 u32 closid = READ_ONCE(state->default_closid);
120 u32 rmid = READ_ONCE(state->default_rmid);
121 u32 tmp;
122
123 /*
124 * If this task has a closid/rmid assigned, use it.
125 * Else use the closid/rmid assigned to this cpu.
126 */
127 if (static_branch_likely(&rdt_alloc_enable_key)) {
128 tmp = READ_ONCE(tsk->closid);
129 if (tmp)
130 closid = tmp;
131 }
132
133 if (static_branch_likely(&rdt_mon_enable_key)) {
134 tmp = READ_ONCE(tsk->rmid);
135 if (tmp)
136 rmid = tmp;
137 }
138
139 if (closid != state->cur_closid || rmid != state->cur_rmid) {
140 state->cur_closid = closid;
141 state->cur_rmid = rmid;
142 wrmsr(MSR_IA32_PQR_ASSOC, rmid, closid);
143 }
144}
145
146static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
147{
148 unsigned int scale = boot_cpu_data.x86_cache_occ_scale;
149
150 /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
151 val /= scale;
152 return val * scale;
153}
154
155static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid,
156 u32 rmid)
157{
158 WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid);
159 WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid);
160}
161
162static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk,
163 u32 closid, u32 rmid)
164{
165 WRITE_ONCE(tsk->closid, closid);
166 WRITE_ONCE(tsk->rmid, rmid);
167}
168
169static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid)
170{
171 return READ_ONCE(tsk->closid) == closid;
172}
173
174static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
175 u32 rmid)
176{
177 return READ_ONCE(tsk->rmid) == rmid;
178}
179
180static inline void resctrl_arch_sched_in(struct task_struct *tsk)
181{
182 if (static_branch_likely(&rdt_enable_key))
183 __resctrl_sched_in(tsk);
184}
185
186static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid)
187{
188 *rmid = idx;
189 *closid = X86_RESCTRL_EMPTY_CLOSID;
190}
191
192static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid)
193{
194 return rmid;
195}
196
197/* x86 can always read an rmid, nothing needs allocating */
198struct rdt_resource;
199static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r,
200 enum resctrl_event_id evtid)
201{
202 might_sleep();
203 return NULL;
204}
205
206static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r,
207 enum resctrl_event_id evtid,
208 void *ctx) { }
209
210void resctrl_cpu_detect(struct cpuinfo_x86 *c);
211
212#else
213
214static inline void resctrl_arch_sched_in(struct task_struct *tsk) {}
215static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
216
217#endif /* CONFIG_X86_CPU_RESCTRL */
218
219#endif /* _ASM_X86_RESCTRL_H */