Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
11/* Policies */
12#define MPOL_DEFAULT 0
13#define MPOL_PREFERRED 1
14#define MPOL_BIND 2
15#define MPOL_INTERLEAVE 3
16
17#define MPOL_MAX MPOL_INTERLEAVE
18
19/* Flags for get_mem_policy */
20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
21#define MPOL_F_ADDR (1<<1) /* look up vma using address */
22
23/* Flags for mbind */
24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
25#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
26#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
27#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
28
29#ifdef __KERNEL__
30
31#include <linux/config.h>
32#include <linux/mmzone.h>
33#include <linux/slab.h>
34#include <linux/rbtree.h>
35#include <linux/spinlock.h>
36#include <linux/nodemask.h>
37
38struct vm_area_struct;
39struct mm_struct;
40
41#ifdef CONFIG_NUMA
42
43/*
44 * Describe a memory policy.
45 *
46 * A mempolicy can be either associated with a process or with a VMA.
47 * For VMA related allocations the VMA policy is preferred, otherwise
48 * the process policy is used. Interrupts ignore the memory policy
49 * of the current process.
50 *
51 * Locking policy for interlave:
52 * In process context there is no locking because only the process accesses
53 * its own state. All vma manipulation is somewhat protected by a down_read on
54 * mmap_sem.
55 *
56 * Freeing policy:
57 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
58 * All other policies don't have any external state. mpol_free() handles this.
59 *
60 * Copying policy objects:
61 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
62 */
63struct mempolicy {
64 atomic_t refcnt;
65 short policy; /* See MPOL_* above */
66 union {
67 struct zonelist *zonelist; /* bind */
68 short preferred_node; /* preferred */
69 nodemask_t nodes; /* interleave */
70 /* undefined for default */
71 } v;
72 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
73};
74
75/*
76 * Support for managing mempolicy data objects (clone, copy, destroy)
77 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
78 */
79
80extern void __mpol_free(struct mempolicy *pol);
81static inline void mpol_free(struct mempolicy *pol)
82{
83 if (pol)
84 __mpol_free(pol);
85}
86
87extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
88static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
89{
90 if (pol)
91 pol = __mpol_copy(pol);
92 return pol;
93}
94
95#define vma_policy(vma) ((vma)->vm_policy)
96#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
97
98static inline void mpol_get(struct mempolicy *pol)
99{
100 if (pol)
101 atomic_inc(&pol->refcnt);
102}
103
104extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
105static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
106{
107 if (a == b)
108 return 1;
109 return __mpol_equal(a, b);
110}
111#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
112
113/* Could later add inheritance of the process policy here. */
114
115#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
116
117/*
118 * Tree of shared policies for a shared memory region.
119 * Maintain the policies in a pseudo mm that contains vmas. The vmas
120 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
121 * bytes, so that we can work with shared memory segments bigger than
122 * unsigned long.
123 */
124
125struct sp_node {
126 struct rb_node nd;
127 unsigned long start, end;
128 struct mempolicy *policy;
129};
130
131struct shared_policy {
132 struct rb_root root;
133 spinlock_t lock;
134};
135
136void mpol_shared_policy_init(struct shared_policy *info, int policy,
137 nodemask_t *nodes);
138int mpol_set_shared_policy(struct shared_policy *info,
139 struct vm_area_struct *vma,
140 struct mempolicy *new);
141void mpol_free_shared_policy(struct shared_policy *p);
142struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
143 unsigned long idx);
144
145extern void numa_default_policy(void);
146extern void numa_policy_init(void);
147extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
148extern void mpol_rebind_task(struct task_struct *tsk,
149 const nodemask_t *new);
150extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
151extern void mpol_fix_fork_child_flag(struct task_struct *p);
152#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
153
154#ifdef CONFIG_CPUSET
155#define current_cpuset_is_being_rebound() \
156 (cpuset_being_rebound == current->cpuset)
157#else
158#define current_cpuset_is_being_rebound() 0
159#endif
160
161extern struct mempolicy default_policy;
162extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
163 unsigned long addr);
164extern unsigned slab_node(struct mempolicy *policy);
165
166extern int policy_zone;
167
168static inline void check_highest_zone(int k)
169{
170 if (k > policy_zone)
171 policy_zone = k;
172}
173
174int do_migrate_pages(struct mm_struct *mm,
175 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
176
177extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */
178
179#else
180
181struct mempolicy {};
182
183static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
184{
185 return 1;
186}
187#define vma_mpol_equal(a,b) 1
188
189#define mpol_set_vma_default(vma) do {} while(0)
190
191static inline void mpol_free(struct mempolicy *p)
192{
193}
194
195static inline void mpol_get(struct mempolicy *pol)
196{
197}
198
199static inline struct mempolicy *mpol_copy(struct mempolicy *old)
200{
201 return NULL;
202}
203
204struct shared_policy {};
205
206static inline int mpol_set_shared_policy(struct shared_policy *info,
207 struct vm_area_struct *vma,
208 struct mempolicy *new)
209{
210 return -EINVAL;
211}
212
213static inline void mpol_shared_policy_init(struct shared_policy *info,
214 int policy, nodemask_t *nodes)
215{
216}
217
218static inline void mpol_free_shared_policy(struct shared_policy *p)
219{
220}
221
222static inline struct mempolicy *
223mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
224{
225 return NULL;
226}
227
228#define vma_policy(vma) NULL
229#define vma_set_policy(vma, pol) do {} while(0)
230
231static inline void numa_policy_init(void)
232{
233}
234
235static inline void numa_default_policy(void)
236{
237}
238
239static inline void mpol_rebind_policy(struct mempolicy *pol,
240 const nodemask_t *new)
241{
242}
243
244static inline void mpol_rebind_task(struct task_struct *tsk,
245 const nodemask_t *new)
246{
247}
248
249static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
250{
251}
252
253static inline void mpol_fix_fork_child_flag(struct task_struct *p)
254{
255}
256
257#define set_cpuset_being_rebound(x) do {} while (0)
258
259static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
260 unsigned long addr)
261{
262 return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
263}
264
265static inline int do_migrate_pages(struct mm_struct *mm,
266 const nodemask_t *from_nodes,
267 const nodemask_t *to_nodes, int flags)
268{
269 return 0;
270}
271
272static inline void check_highest_zone(int k)
273{
274}
275#endif /* CONFIG_NUMA */
276#endif /* __KERNEL__ */
277
278#endif