Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
11/*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
17/* Policies */
18enum {
19 MPOL_DEFAULT,
20 MPOL_PREFERRED,
21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24};
25
26/* Flags for set_mempolicy */
27#define MPOL_F_STATIC_NODES (1 << 15)
28#define MPOL_F_RELATIVE_NODES (1 << 14)
29
30/*
31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
32 * either set_mempolicy() or mbind().
33 */
34#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
35
36/* Flags for get_mempolicy */
37#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
38#define MPOL_F_ADDR (1<<1) /* look up vma using address */
39#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
40
41/* Flags for mbind */
42#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
43#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
44#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
45#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
46
47/*
48 * Internal flags that share the struct mempolicy flags word with
49 * "mode flags". These flags are allocated from bit 0 up, as they
50 * are never OR'ed into the mode in mempolicy API arguments.
51 */
52#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
53#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
54
55#ifdef __KERNEL__
56
57#include <linux/mmzone.h>
58#include <linux/slab.h>
59#include <linux/rbtree.h>
60#include <linux/spinlock.h>
61#include <linux/nodemask.h>
62
63struct mm_struct;
64
65#ifdef CONFIG_NUMA
66
67/*
68 * Describe a memory policy.
69 *
70 * A mempolicy can be either associated with a process or with a VMA.
71 * For VMA related allocations the VMA policy is preferred, otherwise
72 * the process policy is used. Interrupts ignore the memory policy
73 * of the current process.
74 *
75 * Locking policy for interlave:
76 * In process context there is no locking because only the process accesses
77 * its own state. All vma manipulation is somewhat protected by a down_read on
78 * mmap_sem.
79 *
80 * Freeing policy:
81 * Mempolicy objects are reference counted. A mempolicy will be freed when
82 * mpol_put() decrements the reference count to zero.
83 *
84 * Duplicating policy objects:
85 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
86 * to the new storage. The reference count of the new object is initialized
87 * to 1, representing the caller of mpol_dup().
88 */
89struct mempolicy {
90 atomic_t refcnt;
91 unsigned short mode; /* See MPOL_* above */
92 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
93 union {
94 short preferred_node; /* preferred */
95 nodemask_t nodes; /* interleave/bind */
96 /* undefined for default */
97 } v;
98 union {
99 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
100 nodemask_t user_nodemask; /* nodemask passed by user */
101 } w;
102};
103
104/*
105 * Support for managing mempolicy data objects (clone, copy, destroy)
106 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
107 */
108
109extern void __mpol_put(struct mempolicy *pol);
110static inline void mpol_put(struct mempolicy *pol)
111{
112 if (pol)
113 __mpol_put(pol);
114}
115
116/*
117 * Does mempolicy pol need explicit unref after use?
118 * Currently only needed for shared policies.
119 */
120static inline int mpol_needs_cond_ref(struct mempolicy *pol)
121{
122 return (pol && (pol->flags & MPOL_F_SHARED));
123}
124
125static inline void mpol_cond_put(struct mempolicy *pol)
126{
127 if (mpol_needs_cond_ref(pol))
128 __mpol_put(pol);
129}
130
131extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
132 struct mempolicy *frompol);
133static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
134 struct mempolicy *frompol)
135{
136 if (!frompol)
137 return frompol;
138 return __mpol_cond_copy(tompol, frompol);
139}
140
141extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
142static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
143{
144 if (pol)
145 pol = __mpol_dup(pol);
146 return pol;
147}
148
149#define vma_policy(vma) ((vma)->vm_policy)
150#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
151
152static inline void mpol_get(struct mempolicy *pol)
153{
154 if (pol)
155 atomic_inc(&pol->refcnt);
156}
157
158extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
159static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
160{
161 if (a == b)
162 return 1;
163 return __mpol_equal(a, b);
164}
165
166/*
167 * Tree of shared policies for a shared memory region.
168 * Maintain the policies in a pseudo mm that contains vmas. The vmas
169 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
170 * bytes, so that we can work with shared memory segments bigger than
171 * unsigned long.
172 */
173
174struct sp_node {
175 struct rb_node nd;
176 unsigned long start, end;
177 struct mempolicy *policy;
178};
179
180struct shared_policy {
181 struct rb_root root;
182 spinlock_t lock;
183};
184
185void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
186int mpol_set_shared_policy(struct shared_policy *info,
187 struct vm_area_struct *vma,
188 struct mempolicy *new);
189void mpol_free_shared_policy(struct shared_policy *p);
190struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
191 unsigned long idx);
192
193extern void numa_default_policy(void);
194extern void numa_policy_init(void);
195extern void mpol_rebind_task(struct task_struct *tsk,
196 const nodemask_t *new);
197extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
198extern void mpol_fix_fork_child_flag(struct task_struct *p);
199
200extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
201 unsigned long addr, gfp_t gfp_flags,
202 struct mempolicy **mpol, nodemask_t **nodemask);
203extern unsigned slab_node(struct mempolicy *policy);
204
205extern enum zone_type policy_zone;
206
207static inline void check_highest_zone(enum zone_type k)
208{
209 if (k > policy_zone && k != ZONE_MOVABLE)
210 policy_zone = k;
211}
212
213int do_migrate_pages(struct mm_struct *mm,
214 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
215
216
217#ifdef CONFIG_TMPFS
218extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
219
220extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
221 int no_context);
222#endif
223#else
224
225struct mempolicy {};
226
227static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
228{
229 return 1;
230}
231
232static inline void mpol_put(struct mempolicy *p)
233{
234}
235
236static inline void mpol_cond_put(struct mempolicy *pol)
237{
238}
239
240static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
241 struct mempolicy *from)
242{
243 return from;
244}
245
246static inline void mpol_get(struct mempolicy *pol)
247{
248}
249
250static inline struct mempolicy *mpol_dup(struct mempolicy *old)
251{
252 return NULL;
253}
254
255struct shared_policy {};
256
257static inline int mpol_set_shared_policy(struct shared_policy *info,
258 struct vm_area_struct *vma,
259 struct mempolicy *new)
260{
261 return -EINVAL;
262}
263
264static inline void mpol_shared_policy_init(struct shared_policy *sp,
265 struct mempolicy *mpol)
266{
267}
268
269static inline void mpol_free_shared_policy(struct shared_policy *p)
270{
271}
272
273static inline struct mempolicy *
274mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
275{
276 return NULL;
277}
278
279#define vma_policy(vma) NULL
280#define vma_set_policy(vma, pol) do {} while(0)
281
282static inline void numa_policy_init(void)
283{
284}
285
286static inline void numa_default_policy(void)
287{
288}
289
290static inline void mpol_rebind_task(struct task_struct *tsk,
291 const nodemask_t *new)
292{
293}
294
295static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
296{
297}
298
299static inline void mpol_fix_fork_child_flag(struct task_struct *p)
300{
301}
302
303static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
304 unsigned long addr, gfp_t gfp_flags,
305 struct mempolicy **mpol, nodemask_t **nodemask)
306{
307 *mpol = NULL;
308 *nodemask = NULL;
309 return node_zonelist(0, gfp_flags);
310}
311
312static inline int do_migrate_pages(struct mm_struct *mm,
313 const nodemask_t *from_nodes,
314 const nodemask_t *to_nodes, int flags)
315{
316 return 0;
317}
318
319static inline void check_highest_zone(int k)
320{
321}
322
323#ifdef CONFIG_TMPFS
324static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
325 int no_context)
326{
327 return 1; /* error */
328}
329
330static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
331 int no_context)
332{
333 return 0;
334}
335#endif
336
337#endif /* CONFIG_NUMA */
338#endif /* __KERNEL__ */
339
340#endif