Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
11/*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
17/* Policies */
18enum {
19 MPOL_DEFAULT,
20 MPOL_PREFERRED,
21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24};
25
26/* Flags for set_mempolicy */
27#define MPOL_F_STATIC_NODES (1 << 15)
28#define MPOL_F_RELATIVE_NODES (1 << 14)
29
30/*
31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
32 * either set_mempolicy() or mbind().
33 */
34#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
35
36/* Flags for get_mempolicy */
37#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
38#define MPOL_F_ADDR (1<<1) /* look up vma using address */
39#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
40
41/* Flags for mbind */
42#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
43#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
44#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
45#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
46
47/*
48 * Internal flags that share the struct mempolicy flags word with
49 * "mode flags". These flags are allocated from bit 0 up, as they
50 * are never OR'ed into the mode in mempolicy API arguments.
51 */
52#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
53#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
54
55#ifdef __KERNEL__
56
57#include <linux/mmzone.h>
58#include <linux/slab.h>
59#include <linux/rbtree.h>
60#include <linux/spinlock.h>
61#include <linux/nodemask.h>
62#include <linux/pagemap.h>
63
64struct mm_struct;
65
66#ifdef CONFIG_NUMA
67
68/*
69 * Describe a memory policy.
70 *
71 * A mempolicy can be either associated with a process or with a VMA.
72 * For VMA related allocations the VMA policy is preferred, otherwise
73 * the process policy is used. Interrupts ignore the memory policy
74 * of the current process.
75 *
76 * Locking policy for interlave:
77 * In process context there is no locking because only the process accesses
78 * its own state. All vma manipulation is somewhat protected by a down_read on
79 * mmap_sem.
80 *
81 * Freeing policy:
82 * Mempolicy objects are reference counted. A mempolicy will be freed when
83 * mpol_put() decrements the reference count to zero.
84 *
85 * Duplicating policy objects:
86 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
87 * to the new storage. The reference count of the new object is initialized
88 * to 1, representing the caller of mpol_dup().
89 */
90struct mempolicy {
91 atomic_t refcnt;
92 unsigned short mode; /* See MPOL_* above */
93 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
94 union {
95 short preferred_node; /* preferred */
96 nodemask_t nodes; /* interleave/bind */
97 /* undefined for default */
98 } v;
99 union {
100 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
101 nodemask_t user_nodemask; /* nodemask passed by user */
102 } w;
103};
104
105/*
106 * Support for managing mempolicy data objects (clone, copy, destroy)
107 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
108 */
109
110extern void __mpol_put(struct mempolicy *pol);
111static inline void mpol_put(struct mempolicy *pol)
112{
113 if (pol)
114 __mpol_put(pol);
115}
116
117/*
118 * Does mempolicy pol need explicit unref after use?
119 * Currently only needed for shared policies.
120 */
121static inline int mpol_needs_cond_ref(struct mempolicy *pol)
122{
123 return (pol && (pol->flags & MPOL_F_SHARED));
124}
125
126static inline void mpol_cond_put(struct mempolicy *pol)
127{
128 if (mpol_needs_cond_ref(pol))
129 __mpol_put(pol);
130}
131
132extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
133 struct mempolicy *frompol);
134static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
135 struct mempolicy *frompol)
136{
137 if (!frompol)
138 return frompol;
139 return __mpol_cond_copy(tompol, frompol);
140}
141
142extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
143static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
144{
145 if (pol)
146 pol = __mpol_dup(pol);
147 return pol;
148}
149
150#define vma_policy(vma) ((vma)->vm_policy)
151#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
152
153static inline void mpol_get(struct mempolicy *pol)
154{
155 if (pol)
156 atomic_inc(&pol->refcnt);
157}
158
159extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
160static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
161{
162 if (a == b)
163 return 1;
164 return __mpol_equal(a, b);
165}
166
167/*
168 * Tree of shared policies for a shared memory region.
169 * Maintain the policies in a pseudo mm that contains vmas. The vmas
170 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
171 * bytes, so that we can work with shared memory segments bigger than
172 * unsigned long.
173 */
174
175struct sp_node {
176 struct rb_node nd;
177 unsigned long start, end;
178 struct mempolicy *policy;
179};
180
181struct shared_policy {
182 struct rb_root root;
183 spinlock_t lock;
184};
185
186void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
187int mpol_set_shared_policy(struct shared_policy *info,
188 struct vm_area_struct *vma,
189 struct mempolicy *new);
190void mpol_free_shared_policy(struct shared_policy *p);
191struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
192 unsigned long idx);
193
194extern void numa_default_policy(void);
195extern void numa_policy_init(void);
196extern void mpol_rebind_task(struct task_struct *tsk,
197 const nodemask_t *new);
198extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
199extern void mpol_fix_fork_child_flag(struct task_struct *p);
200
201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
202 unsigned long addr, gfp_t gfp_flags,
203 struct mempolicy **mpol, nodemask_t **nodemask);
204extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
205extern unsigned slab_node(struct mempolicy *policy);
206
207extern enum zone_type policy_zone;
208
209static inline void check_highest_zone(enum zone_type k)
210{
211 if (k > policy_zone && k != ZONE_MOVABLE)
212 policy_zone = k;
213}
214
215int do_migrate_pages(struct mm_struct *mm,
216 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
217
218
219#ifdef CONFIG_TMPFS
220extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
221
222extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
223 int no_context);
224#endif
225
226/* Check if a vma is migratable */
227static inline int vma_migratable(struct vm_area_struct *vma)
228{
229 if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
230 return 0;
231 /*
232 * Migration allocates pages in the highest zone. If we cannot
233 * do so then migration (at least from node to node) is not
234 * possible.
235 */
236 if (vma->vm_file &&
237 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
238 < policy_zone)
239 return 0;
240 return 1;
241}
242
243#else
244
245struct mempolicy {};
246
247static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
248{
249 return 1;
250}
251
252static inline void mpol_put(struct mempolicy *p)
253{
254}
255
256static inline void mpol_cond_put(struct mempolicy *pol)
257{
258}
259
260static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
261 struct mempolicy *from)
262{
263 return from;
264}
265
266static inline void mpol_get(struct mempolicy *pol)
267{
268}
269
270static inline struct mempolicy *mpol_dup(struct mempolicy *old)
271{
272 return NULL;
273}
274
275struct shared_policy {};
276
277static inline int mpol_set_shared_policy(struct shared_policy *info,
278 struct vm_area_struct *vma,
279 struct mempolicy *new)
280{
281 return -EINVAL;
282}
283
284static inline void mpol_shared_policy_init(struct shared_policy *sp,
285 struct mempolicy *mpol)
286{
287}
288
289static inline void mpol_free_shared_policy(struct shared_policy *p)
290{
291}
292
293static inline struct mempolicy *
294mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
295{
296 return NULL;
297}
298
299#define vma_policy(vma) NULL
300#define vma_set_policy(vma, pol) do {} while(0)
301
302static inline void numa_policy_init(void)
303{
304}
305
306static inline void numa_default_policy(void)
307{
308}
309
310static inline void mpol_rebind_task(struct task_struct *tsk,
311 const nodemask_t *new)
312{
313}
314
315static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
316{
317}
318
319static inline void mpol_fix_fork_child_flag(struct task_struct *p)
320{
321}
322
323static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
324 unsigned long addr, gfp_t gfp_flags,
325 struct mempolicy **mpol, nodemask_t **nodemask)
326{
327 *mpol = NULL;
328 *nodemask = NULL;
329 return node_zonelist(0, gfp_flags);
330}
331
332static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
333
334static inline int do_migrate_pages(struct mm_struct *mm,
335 const nodemask_t *from_nodes,
336 const nodemask_t *to_nodes, int flags)
337{
338 return 0;
339}
340
341static inline void check_highest_zone(int k)
342{
343}
344
345#ifdef CONFIG_TMPFS
346static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
347 int no_context)
348{
349 return 1; /* error */
350}
351
352static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
353 int no_context)
354{
355 return 0;
356}
357#endif
358
359#endif /* CONFIG_NUMA */
360#endif /* __KERNEL__ */
361
362#endif