Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/sched/topology.h>
13#include <linux/sched/task.h>
14#include <linux/cpumask.h>
15#include <linux/nodemask.h>
16#include <linux/mm.h>
17#include <linux/jump_label.h>
18
19#ifdef CONFIG_CPUSETS
20
21/*
22 * Static branch rewrites can happen in an arbitrary order for a given
23 * key. In code paths where we need to loop with read_mems_allowed_begin() and
24 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
25 * to ensure that begin() always gets rewritten before retry() in the
26 * disabled -> enabled transition. If not, then if local irqs are disabled
27 * around the loop, we can deadlock since retry() would always be
28 * comparing the latest value of the mems_allowed seqcount against 0 as
29 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
30 * transition should happen in reverse order for the same reasons (want to stop
31 * looking at real value of mems_allowed.sequence in retry() first).
32 */
33extern struct static_key_false cpusets_pre_enable_key;
34extern struct static_key_false cpusets_enabled_key;
35static inline bool cpusets_enabled(void)
36{
37 return static_branch_unlikely(&cpusets_enabled_key);
38}
39
40static inline void cpuset_inc(void)
41{
42 static_branch_inc(&cpusets_pre_enable_key);
43 static_branch_inc(&cpusets_enabled_key);
44}
45
46static inline void cpuset_dec(void)
47{
48 static_branch_dec(&cpusets_enabled_key);
49 static_branch_dec(&cpusets_pre_enable_key);
50}
51
52extern int cpuset_init(void);
53extern void cpuset_init_smp(void);
54extern void cpuset_update_active_cpus(void);
55extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
56extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
57extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
58#define cpuset_current_mems_allowed (current->mems_allowed)
59void cpuset_init_current_mems_allowed(void);
60int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
61
62extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
63
64static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
65{
66 if (cpusets_enabled())
67 return __cpuset_node_allowed(node, gfp_mask);
68 return true;
69}
70
71static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
72{
73 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
74}
75
76static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
77{
78 if (cpusets_enabled())
79 return __cpuset_zone_allowed(z, gfp_mask);
80 return true;
81}
82
83extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
84 const struct task_struct *tsk2);
85
86#define cpuset_memory_pressure_bump() \
87 do { \
88 if (cpuset_memory_pressure_enabled) \
89 __cpuset_memory_pressure_bump(); \
90 } while (0)
91extern int cpuset_memory_pressure_enabled;
92extern void __cpuset_memory_pressure_bump(void);
93
94extern void cpuset_task_status_allowed(struct seq_file *m,
95 struct task_struct *task);
96extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
97 struct pid *pid, struct task_struct *tsk);
98
99extern int cpuset_mem_spread_node(void);
100extern int cpuset_slab_spread_node(void);
101
102static inline int cpuset_do_page_mem_spread(void)
103{
104 return task_spread_page(current);
105}
106
107static inline int cpuset_do_slab_mem_spread(void)
108{
109 return task_spread_slab(current);
110}
111
112extern int current_cpuset_is_being_rebound(void);
113
114extern void rebuild_sched_domains(void);
115
116extern void cpuset_print_current_mems_allowed(void);
117
118/*
119 * read_mems_allowed_begin is required when making decisions involving
120 * mems_allowed such as during page allocation. mems_allowed can be updated in
121 * parallel and depending on the new value an operation can fail potentially
122 * causing process failure. A retry loop with read_mems_allowed_begin and
123 * read_mems_allowed_retry prevents these artificial failures.
124 */
125static inline unsigned int read_mems_allowed_begin(void)
126{
127 if (!static_branch_unlikely(&cpusets_pre_enable_key))
128 return 0;
129
130 return read_seqcount_begin(¤t->mems_allowed_seq);
131}
132
133/*
134 * If this returns true, the operation that took place after
135 * read_mems_allowed_begin may have failed artificially due to a concurrent
136 * update of mems_allowed. It is up to the caller to retry the operation if
137 * appropriate.
138 */
139static inline bool read_mems_allowed_retry(unsigned int seq)
140{
141 if (!static_branch_unlikely(&cpusets_enabled_key))
142 return false;
143
144 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
145}
146
147static inline void set_mems_allowed(nodemask_t nodemask)
148{
149 unsigned long flags;
150
151 task_lock(current);
152 local_irq_save(flags);
153 write_seqcount_begin(¤t->mems_allowed_seq);
154 current->mems_allowed = nodemask;
155 write_seqcount_end(¤t->mems_allowed_seq);
156 local_irq_restore(flags);
157 task_unlock(current);
158}
159
160#else /* !CONFIG_CPUSETS */
161
162static inline bool cpusets_enabled(void) { return false; }
163
164static inline int cpuset_init(void) { return 0; }
165static inline void cpuset_init_smp(void) {}
166
167static inline void cpuset_update_active_cpus(void)
168{
169 partition_sched_domains(1, NULL, NULL);
170}
171
172static inline void cpuset_cpus_allowed(struct task_struct *p,
173 struct cpumask *mask)
174{
175 cpumask_copy(mask, cpu_possible_mask);
176}
177
178static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
179{
180}
181
182static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
183{
184 return node_possible_map;
185}
186
187#define cpuset_current_mems_allowed (node_states[N_MEMORY])
188static inline void cpuset_init_current_mems_allowed(void) {}
189
190static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
191{
192 return 1;
193}
194
195static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
196{
197 return true;
198}
199
200static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
201{
202 return true;
203}
204
205static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
206{
207 return true;
208}
209
210static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
211 const struct task_struct *tsk2)
212{
213 return 1;
214}
215
216static inline void cpuset_memory_pressure_bump(void) {}
217
218static inline void cpuset_task_status_allowed(struct seq_file *m,
219 struct task_struct *task)
220{
221}
222
223static inline int cpuset_mem_spread_node(void)
224{
225 return 0;
226}
227
228static inline int cpuset_slab_spread_node(void)
229{
230 return 0;
231}
232
233static inline int cpuset_do_page_mem_spread(void)
234{
235 return 0;
236}
237
238static inline int cpuset_do_slab_mem_spread(void)
239{
240 return 0;
241}
242
243static inline int current_cpuset_is_being_rebound(void)
244{
245 return 0;
246}
247
248static inline void rebuild_sched_domains(void)
249{
250 partition_sched_domains(1, NULL, NULL);
251}
252
253static inline void cpuset_print_current_mems_allowed(void)
254{
255}
256
257static inline void set_mems_allowed(nodemask_t nodemask)
258{
259}
260
261static inline unsigned int read_mems_allowed_begin(void)
262{
263 return 0;
264}
265
266static inline bool read_mems_allowed_retry(unsigned int seq)
267{
268 return false;
269}
270
271#endif /* !CONFIG_CPUSETS */
272
273#endif /* _LINUX_CPUSET_H */