at v2.6.16 272 lines 6.8 kB view raw
1#ifndef _LINUX_MEMPOLICY_H 2#define _LINUX_MEMPOLICY_H 1 3 4#include <linux/errno.h> 5 6/* 7 * NUMA memory policies for Linux. 8 * Copyright 2003,2004 Andi Kleen SuSE Labs 9 */ 10 11/* Policies */ 12#define MPOL_DEFAULT 0 13#define MPOL_PREFERRED 1 14#define MPOL_BIND 2 15#define MPOL_INTERLEAVE 3 16 17#define MPOL_MAX MPOL_INTERLEAVE 18 19/* Flags for get_mem_policy */ 20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ 21#define MPOL_F_ADDR (1<<1) /* look up vma using address */ 22 23/* Flags for mbind */ 24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ 25#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ 26#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ 27#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ 28 29#ifdef __KERNEL__ 30 31#include <linux/config.h> 32#include <linux/mmzone.h> 33#include <linux/slab.h> 34#include <linux/rbtree.h> 35#include <linux/spinlock.h> 36#include <linux/nodemask.h> 37 38struct vm_area_struct; 39 40#ifdef CONFIG_NUMA 41 42/* 43 * Describe a memory policy. 44 * 45 * A mempolicy can be either associated with a process or with a VMA. 46 * For VMA related allocations the VMA policy is preferred, otherwise 47 * the process policy is used. Interrupts ignore the memory policy 48 * of the current process. 49 * 50 * Locking policy for interlave: 51 * In process context there is no locking because only the process accesses 52 * its own state. All vma manipulation is somewhat protected by a down_read on 53 * mmap_sem. 54 * 55 * Freeing policy: 56 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. 57 * All other policies don't have any external state. mpol_free() handles this. 58 * 59 * Copying policy objects: 60 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. 61 */ 62struct mempolicy { 63 atomic_t refcnt; 64 short policy; /* See MPOL_* above */ 65 union { 66 struct zonelist *zonelist; /* bind */ 67 short preferred_node; /* preferred */ 68 nodemask_t nodes; /* interleave */ 69 /* undefined for default */ 70 } v; 71 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ 72}; 73 74/* 75 * Support for managing mempolicy data objects (clone, copy, destroy) 76 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 77 */ 78 79extern void __mpol_free(struct mempolicy *pol); 80static inline void mpol_free(struct mempolicy *pol) 81{ 82 if (pol) 83 __mpol_free(pol); 84} 85 86extern struct mempolicy *__mpol_copy(struct mempolicy *pol); 87static inline struct mempolicy *mpol_copy(struct mempolicy *pol) 88{ 89 if (pol) 90 pol = __mpol_copy(pol); 91 return pol; 92} 93 94#define vma_policy(vma) ((vma)->vm_policy) 95#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) 96 97static inline void mpol_get(struct mempolicy *pol) 98{ 99 if (pol) 100 atomic_inc(&pol->refcnt); 101} 102 103extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); 104static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) 105{ 106 if (a == b) 107 return 1; 108 return __mpol_equal(a, b); 109} 110#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) 111 112/* Could later add inheritance of the process policy here. */ 113 114#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) 115 116/* 117 * Tree of shared policies for a shared memory region. 118 * Maintain the policies in a pseudo mm that contains vmas. The vmas 119 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 120 * bytes, so that we can work with shared memory segments bigger than 121 * unsigned long. 122 */ 123 124struct sp_node { 125 struct rb_node nd; 126 unsigned long start, end; 127 struct mempolicy *policy; 128}; 129 130struct shared_policy { 131 struct rb_root root; 132 spinlock_t lock; 133}; 134 135void mpol_shared_policy_init(struct shared_policy *info, int policy, 136 nodemask_t *nodes); 137int mpol_set_shared_policy(struct shared_policy *info, 138 struct vm_area_struct *vma, 139 struct mempolicy *new); 140void mpol_free_shared_policy(struct shared_policy *p); 141struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 142 unsigned long idx); 143 144extern void numa_default_policy(void); 145extern void numa_policy_init(void); 146extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new); 147extern void mpol_rebind_task(struct task_struct *tsk, 148 const nodemask_t *new); 149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 150#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) 151 152#ifdef CONFIG_CPUSET 153#define current_cpuset_is_being_rebound() \ 154 (cpuset_being_rebound == current->cpuset) 155#else 156#define current_cpuset_is_being_rebound() 0 157#endif 158 159extern struct mempolicy default_policy; 160extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 161 unsigned long addr); 162extern unsigned slab_node(struct mempolicy *policy); 163 164extern int policy_zone; 165 166static inline void check_highest_zone(int k) 167{ 168 if (k > policy_zone) 169 policy_zone = k; 170} 171 172int do_migrate_pages(struct mm_struct *mm, 173 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); 174 175extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */ 176 177#else 178 179struct mempolicy {}; 180 181static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) 182{ 183 return 1; 184} 185#define vma_mpol_equal(a,b) 1 186 187#define mpol_set_vma_default(vma) do {} while(0) 188 189static inline void mpol_free(struct mempolicy *p) 190{ 191} 192 193static inline void mpol_get(struct mempolicy *pol) 194{ 195} 196 197static inline struct mempolicy *mpol_copy(struct mempolicy *old) 198{ 199 return NULL; 200} 201 202struct shared_policy {}; 203 204static inline int mpol_set_shared_policy(struct shared_policy *info, 205 struct vm_area_struct *vma, 206 struct mempolicy *new) 207{ 208 return -EINVAL; 209} 210 211static inline void mpol_shared_policy_init(struct shared_policy *info, 212 int policy, nodemask_t *nodes) 213{ 214} 215 216static inline void mpol_free_shared_policy(struct shared_policy *p) 217{ 218} 219 220static inline struct mempolicy * 221mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 222{ 223 return NULL; 224} 225 226#define vma_policy(vma) NULL 227#define vma_set_policy(vma, pol) do {} while(0) 228 229static inline void numa_policy_init(void) 230{ 231} 232 233static inline void numa_default_policy(void) 234{ 235} 236 237static inline void mpol_rebind_policy(struct mempolicy *pol, 238 const nodemask_t *new) 239{ 240} 241 242static inline void mpol_rebind_task(struct task_struct *tsk, 243 const nodemask_t *new) 244{ 245} 246 247static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 248{ 249} 250 251#define set_cpuset_being_rebound(x) do {} while (0) 252 253static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, 254 unsigned long addr) 255{ 256 return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); 257} 258 259static inline int do_migrate_pages(struct mm_struct *mm, 260 const nodemask_t *from_nodes, 261 const nodemask_t *to_nodes, int flags) 262{ 263 return 0; 264} 265 266static inline void check_highest_zone(int k) 267{ 268} 269#endif /* CONFIG_NUMA */ 270#endif /* __KERNEL__ */ 271 272#endif