Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
14#include <linux/mm.h>
15#include <linux/jump_label.h>
16
17#ifdef CONFIG_CPUSETS
18
19extern struct static_key cpusets_enabled_key;
20static inline bool cpusets_enabled(void)
21{
22 return static_key_false(&cpusets_enabled_key);
23}
24
25static inline int nr_cpusets(void)
26{
27 /* jump label reference count + the top-level cpuset */
28 return static_key_count(&cpusets_enabled_key) + 1;
29}
30
31static inline void cpuset_inc(void)
32{
33 static_key_slow_inc(&cpusets_enabled_key);
34}
35
36static inline void cpuset_dec(void)
37{
38 static_key_slow_dec(&cpusets_enabled_key);
39}
40
41extern int cpuset_init(void);
42extern void cpuset_init_smp(void);
43extern void cpuset_update_active_cpus(bool cpu_online);
44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47#define cpuset_current_mems_allowed (current->mems_allowed)
48void cpuset_init_current_mems_allowed(void);
49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50
51extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53
54static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
55{
56 return nr_cpusets() <= 1 ||
57 __cpuset_node_allowed_softwall(node, gfp_mask);
58}
59
60static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
61{
62 return nr_cpusets() <= 1 ||
63 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74}
75
76extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
77 const struct task_struct *tsk2);
78
79#define cpuset_memory_pressure_bump() \
80 do { \
81 if (cpuset_memory_pressure_enabled) \
82 __cpuset_memory_pressure_bump(); \
83 } while (0)
84extern int cpuset_memory_pressure_enabled;
85extern void __cpuset_memory_pressure_bump(void);
86
87extern void cpuset_task_status_allowed(struct seq_file *m,
88 struct task_struct *task);
89extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
90 struct pid *pid, struct task_struct *tsk);
91
92extern int cpuset_mem_spread_node(void);
93extern int cpuset_slab_spread_node(void);
94
95static inline int cpuset_do_page_mem_spread(void)
96{
97 return task_spread_page(current);
98}
99
100static inline int cpuset_do_slab_mem_spread(void)
101{
102 return task_spread_slab(current);
103}
104
105extern int current_cpuset_is_being_rebound(void);
106
107extern void rebuild_sched_domains(void);
108
109extern void cpuset_print_task_mems_allowed(struct task_struct *p);
110
111/*
112 * read_mems_allowed_begin is required when making decisions involving
113 * mems_allowed such as during page allocation. mems_allowed can be updated in
114 * parallel and depending on the new value an operation can fail potentially
115 * causing process failure. A retry loop with read_mems_allowed_begin and
116 * read_mems_allowed_retry prevents these artificial failures.
117 */
118static inline unsigned int read_mems_allowed_begin(void)
119{
120 return read_seqcount_begin(¤t->mems_allowed_seq);
121}
122
123/*
124 * If this returns true, the operation that took place after
125 * read_mems_allowed_begin may have failed artificially due to a concurrent
126 * update of mems_allowed. It is up to the caller to retry the operation if
127 * appropriate.
128 */
129static inline bool read_mems_allowed_retry(unsigned int seq)
130{
131 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
132}
133
134static inline void set_mems_allowed(nodemask_t nodemask)
135{
136 unsigned long flags;
137
138 task_lock(current);
139 local_irq_save(flags);
140 write_seqcount_begin(¤t->mems_allowed_seq);
141 current->mems_allowed = nodemask;
142 write_seqcount_end(¤t->mems_allowed_seq);
143 local_irq_restore(flags);
144 task_unlock(current);
145}
146
147#else /* !CONFIG_CPUSETS */
148
149static inline bool cpusets_enabled(void) { return false; }
150
151static inline int cpuset_init(void) { return 0; }
152static inline void cpuset_init_smp(void) {}
153
154static inline void cpuset_update_active_cpus(bool cpu_online)
155{
156 partition_sched_domains(1, NULL, NULL);
157}
158
159static inline void cpuset_cpus_allowed(struct task_struct *p,
160 struct cpumask *mask)
161{
162 cpumask_copy(mask, cpu_possible_mask);
163}
164
165static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
166{
167}
168
169static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
170{
171 return node_possible_map;
172}
173
174#define cpuset_current_mems_allowed (node_states[N_MEMORY])
175static inline void cpuset_init_current_mems_allowed(void) {}
176
177static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
178{
179 return 1;
180}
181
182static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
183{
184 return 1;
185}
186
187static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
188{
189 return 1;
190}
191
192static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
193{
194 return 1;
195}
196
197static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
198{
199 return 1;
200}
201
202static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
203 const struct task_struct *tsk2)
204{
205 return 1;
206}
207
208static inline void cpuset_memory_pressure_bump(void) {}
209
210static inline void cpuset_task_status_allowed(struct seq_file *m,
211 struct task_struct *task)
212{
213}
214
215static inline int cpuset_mem_spread_node(void)
216{
217 return 0;
218}
219
220static inline int cpuset_slab_spread_node(void)
221{
222 return 0;
223}
224
225static inline int cpuset_do_page_mem_spread(void)
226{
227 return 0;
228}
229
230static inline int cpuset_do_slab_mem_spread(void)
231{
232 return 0;
233}
234
235static inline int current_cpuset_is_being_rebound(void)
236{
237 return 0;
238}
239
240static inline void rebuild_sched_domains(void)
241{
242 partition_sched_domains(1, NULL, NULL);
243}
244
245static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
246{
247}
248
249static inline void set_mems_allowed(nodemask_t nodemask)
250{
251}
252
253static inline unsigned int read_mems_allowed_begin(void)
254{
255 return 0;
256}
257
258static inline bool read_mems_allowed_retry(unsigned int seq)
259{
260 return false;
261}
262
263#endif /* !CONFIG_CPUSETS */
264
265#endif /* _LINUX_CPUSET_H */