at v3.7 7.6 kB view raw
1/* 2 * NUMA memory policies for Linux. 3 * Copyright 2003,2004 Andi Kleen SuSE Labs 4 */ 5#ifndef _LINUX_MEMPOLICY_H 6#define _LINUX_MEMPOLICY_H 1 7 8 9#include <linux/mmzone.h> 10#include <linux/slab.h> 11#include <linux/rbtree.h> 12#include <linux/spinlock.h> 13#include <linux/nodemask.h> 14#include <linux/pagemap.h> 15#include <uapi/linux/mempolicy.h> 16 17struct mm_struct; 18 19#ifdef CONFIG_NUMA 20 21/* 22 * Describe a memory policy. 23 * 24 * A mempolicy can be either associated with a process or with a VMA. 25 * For VMA related allocations the VMA policy is preferred, otherwise 26 * the process policy is used. Interrupts ignore the memory policy 27 * of the current process. 28 * 29 * Locking policy for interlave: 30 * In process context there is no locking because only the process accesses 31 * its own state. All vma manipulation is somewhat protected by a down_read on 32 * mmap_sem. 33 * 34 * Freeing policy: 35 * Mempolicy objects are reference counted. A mempolicy will be freed when 36 * mpol_put() decrements the reference count to zero. 37 * 38 * Duplicating policy objects: 39 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy 40 * to the new storage. The reference count of the new object is initialized 41 * to 1, representing the caller of mpol_dup(). 42 */ 43struct mempolicy { 44 atomic_t refcnt; 45 unsigned short mode; /* See MPOL_* above */ 46 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ 47 union { 48 short preferred_node; /* preferred */ 49 nodemask_t nodes; /* interleave/bind */ 50 /* undefined for default */ 51 } v; 52 union { 53 nodemask_t cpuset_mems_allowed; /* relative to these nodes */ 54 nodemask_t user_nodemask; /* nodemask passed by user */ 55 } w; 56}; 57 58/* 59 * Support for managing mempolicy data objects (clone, copy, destroy) 60 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 61 */ 62 63extern void __mpol_put(struct mempolicy *pol); 64static inline void mpol_put(struct mempolicy *pol) 65{ 66 if (pol) 67 __mpol_put(pol); 68} 69 70/* 71 * Does mempolicy pol need explicit unref after use? 72 * Currently only needed for shared policies. 73 */ 74static inline int mpol_needs_cond_ref(struct mempolicy *pol) 75{ 76 return (pol && (pol->flags & MPOL_F_SHARED)); 77} 78 79static inline void mpol_cond_put(struct mempolicy *pol) 80{ 81 if (mpol_needs_cond_ref(pol)) 82 __mpol_put(pol); 83} 84 85extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 86static inline struct mempolicy *mpol_dup(struct mempolicy *pol) 87{ 88 if (pol) 89 pol = __mpol_dup(pol); 90 return pol; 91} 92 93#define vma_policy(vma) ((vma)->vm_policy) 94#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) 95 96static inline void mpol_get(struct mempolicy *pol) 97{ 98 if (pol) 99 atomic_inc(&pol->refcnt); 100} 101 102extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); 103static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 104{ 105 if (a == b) 106 return true; 107 return __mpol_equal(a, b); 108} 109 110/* 111 * Tree of shared policies for a shared memory region. 112 * Maintain the policies in a pseudo mm that contains vmas. The vmas 113 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 114 * bytes, so that we can work with shared memory segments bigger than 115 * unsigned long. 116 */ 117 118struct sp_node { 119 struct rb_node nd; 120 unsigned long start, end; 121 struct mempolicy *policy; 122}; 123 124struct shared_policy { 125 struct rb_root root; 126 struct mutex mutex; 127}; 128 129void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); 130int mpol_set_shared_policy(struct shared_policy *info, 131 struct vm_area_struct *vma, 132 struct mempolicy *new); 133void mpol_free_shared_policy(struct shared_policy *p); 134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 135 unsigned long idx); 136 137struct mempolicy *get_vma_policy(struct task_struct *tsk, 138 struct vm_area_struct *vma, unsigned long addr); 139 140extern void numa_default_policy(void); 141extern void numa_policy_init(void); 142extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, 143 enum mpol_rebind_step step); 144extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 145extern void mpol_fix_fork_child_flag(struct task_struct *p); 146 147extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 148 unsigned long addr, gfp_t gfp_flags, 149 struct mempolicy **mpol, nodemask_t **nodemask); 150extern bool init_nodemask_of_mempolicy(nodemask_t *mask); 151extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, 152 const nodemask_t *mask); 153extern unsigned slab_node(void); 154 155extern enum zone_type policy_zone; 156 157static inline void check_highest_zone(enum zone_type k) 158{ 159 if (k > policy_zone && k != ZONE_MOVABLE) 160 policy_zone = k; 161} 162 163int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 164 const nodemask_t *to, int flags); 165 166 167#ifdef CONFIG_TMPFS 168extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); 169#endif 170 171extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 172 int no_context); 173 174/* Check if a vma is migratable */ 175static inline int vma_migratable(struct vm_area_struct *vma) 176{ 177 if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP)) 178 return 0; 179 /* 180 * Migration allocates pages in the highest zone. If we cannot 181 * do so then migration (at least from node to node) is not 182 * possible. 183 */ 184 if (vma->vm_file && 185 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) 186 < policy_zone) 187 return 0; 188 return 1; 189} 190 191#else 192 193struct mempolicy {}; 194 195static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 196{ 197 return true; 198} 199 200static inline void mpol_put(struct mempolicy *p) 201{ 202} 203 204static inline void mpol_cond_put(struct mempolicy *pol) 205{ 206} 207 208static inline void mpol_get(struct mempolicy *pol) 209{ 210} 211 212static inline struct mempolicy *mpol_dup(struct mempolicy *old) 213{ 214 return NULL; 215} 216 217struct shared_policy {}; 218 219static inline int mpol_set_shared_policy(struct shared_policy *info, 220 struct vm_area_struct *vma, 221 struct mempolicy *new) 222{ 223 return -EINVAL; 224} 225 226static inline void mpol_shared_policy_init(struct shared_policy *sp, 227 struct mempolicy *mpol) 228{ 229} 230 231static inline void mpol_free_shared_policy(struct shared_policy *p) 232{ 233} 234 235static inline struct mempolicy * 236mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 237{ 238 return NULL; 239} 240 241#define vma_policy(vma) NULL 242#define vma_set_policy(vma, pol) do {} while(0) 243 244static inline void numa_policy_init(void) 245{ 246} 247 248static inline void numa_default_policy(void) 249{ 250} 251 252static inline void mpol_rebind_task(struct task_struct *tsk, 253 const nodemask_t *new, 254 enum mpol_rebind_step step) 255{ 256} 257 258static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 259{ 260} 261 262static inline void mpol_fix_fork_child_flag(struct task_struct *p) 263{ 264} 265 266static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, 267 unsigned long addr, gfp_t gfp_flags, 268 struct mempolicy **mpol, nodemask_t **nodemask) 269{ 270 *mpol = NULL; 271 *nodemask = NULL; 272 return node_zonelist(0, gfp_flags); 273} 274 275static inline bool init_nodemask_of_mempolicy(nodemask_t *m) 276{ 277 return false; 278} 279 280static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, 281 const nodemask_t *mask) 282{ 283 return false; 284} 285 286static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 287 const nodemask_t *to, int flags) 288{ 289 return 0; 290} 291 292static inline void check_highest_zone(int k) 293{ 294} 295 296#ifdef CONFIG_TMPFS 297static inline int mpol_parse_str(char *str, struct mempolicy **mpol, 298 int no_context) 299{ 300 return 1; /* error */ 301} 302#endif 303 304static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, 305 int no_context) 306{ 307 return 0; 308} 309 310#endif /* CONFIG_NUMA */ 311#endif