at v2.6.15 238 lines 5.7 kB view raw
1#ifndef _LINUX_MEMPOLICY_H 2#define _LINUX_MEMPOLICY_H 1 3 4#include <linux/errno.h> 5 6/* 7 * NUMA memory policies for Linux. 8 * Copyright 2003,2004 Andi Kleen SuSE Labs 9 */ 10 11/* Policies */ 12#define MPOL_DEFAULT 0 13#define MPOL_PREFERRED 1 14#define MPOL_BIND 2 15#define MPOL_INTERLEAVE 3 16 17#define MPOL_MAX MPOL_INTERLEAVE 18 19/* Flags for get_mem_policy */ 20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ 21#define MPOL_F_ADDR (1<<1) /* look up vma using address */ 22 23/* Flags for mbind */ 24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ 25 26#ifdef __KERNEL__ 27 28#include <linux/config.h> 29#include <linux/mmzone.h> 30#include <linux/slab.h> 31#include <linux/rbtree.h> 32#include <linux/spinlock.h> 33#include <linux/nodemask.h> 34 35struct vm_area_struct; 36 37#ifdef CONFIG_NUMA 38 39/* 40 * Describe a memory policy. 41 * 42 * A mempolicy can be either associated with a process or with a VMA. 43 * For VMA related allocations the VMA policy is preferred, otherwise 44 * the process policy is used. Interrupts ignore the memory policy 45 * of the current process. 46 * 47 * Locking policy for interlave: 48 * In process context there is no locking because only the process accesses 49 * its own state. All vma manipulation is somewhat protected by a down_read on 50 * mmap_sem. 51 * 52 * Freeing policy: 53 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. 54 * All other policies don't have any external state. mpol_free() handles this. 55 * 56 * Copying policy objects: 57 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. 58 */ 59struct mempolicy { 60 atomic_t refcnt; 61 short policy; /* See MPOL_* above */ 62 union { 63 struct zonelist *zonelist; /* bind */ 64 short preferred_node; /* preferred */ 65 nodemask_t nodes; /* interleave */ 66 /* undefined for default */ 67 } v; 68}; 69 70/* 71 * Support for managing mempolicy data objects (clone, copy, destroy) 72 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 73 */ 74 75extern void __mpol_free(struct mempolicy *pol); 76static inline void mpol_free(struct mempolicy *pol) 77{ 78 if (pol) 79 __mpol_free(pol); 80} 81 82extern struct mempolicy *__mpol_copy(struct mempolicy *pol); 83static inline struct mempolicy *mpol_copy(struct mempolicy *pol) 84{ 85 if (pol) 86 pol = __mpol_copy(pol); 87 return pol; 88} 89 90#define vma_policy(vma) ((vma)->vm_policy) 91#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) 92 93static inline void mpol_get(struct mempolicy *pol) 94{ 95 if (pol) 96 atomic_inc(&pol->refcnt); 97} 98 99extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); 100static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) 101{ 102 if (a == b) 103 return 1; 104 return __mpol_equal(a, b); 105} 106#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) 107 108/* Could later add inheritance of the process policy here. */ 109 110#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) 111 112/* 113 * Hugetlb policy. i386 hugetlb so far works with node numbers 114 * instead of zone lists, so give it special interfaces for now. 115 */ 116extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); 117extern int mpol_node_valid(int nid, struct vm_area_struct *vma, 118 unsigned long addr); 119 120/* 121 * Tree of shared policies for a shared memory region. 122 * Maintain the policies in a pseudo mm that contains vmas. The vmas 123 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 124 * bytes, so that we can work with shared memory segments bigger than 125 * unsigned long. 126 */ 127 128struct sp_node { 129 struct rb_node nd; 130 unsigned long start, end; 131 struct mempolicy *policy; 132}; 133 134struct shared_policy { 135 struct rb_root root; 136 spinlock_t lock; 137}; 138 139static inline void mpol_shared_policy_init(struct shared_policy *info) 140{ 141 info->root = RB_ROOT; 142 spin_lock_init(&info->lock); 143} 144 145int mpol_set_shared_policy(struct shared_policy *info, 146 struct vm_area_struct *vma, 147 struct mempolicy *new); 148void mpol_free_shared_policy(struct shared_policy *p); 149struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 150 unsigned long idx); 151 152struct mempolicy *get_vma_policy(struct task_struct *task, 153 struct vm_area_struct *vma, unsigned long addr); 154 155extern void numa_default_policy(void); 156extern void numa_policy_init(void); 157extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); 158extern struct mempolicy default_policy; 159 160#else 161 162struct mempolicy {}; 163 164static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) 165{ 166 return 1; 167} 168#define vma_mpol_equal(a,b) 1 169 170#define mpol_set_vma_default(vma) do {} while(0) 171 172static inline void mpol_free(struct mempolicy *p) 173{ 174} 175 176static inline void mpol_get(struct mempolicy *pol) 177{ 178} 179 180static inline struct mempolicy *mpol_copy(struct mempolicy *old) 181{ 182 return NULL; 183} 184 185static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) 186{ 187 return numa_node_id(); 188} 189 190static inline int 191mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) 192{ 193 return 1; 194} 195 196struct shared_policy {}; 197 198static inline int mpol_set_shared_policy(struct shared_policy *info, 199 struct vm_area_struct *vma, 200 struct mempolicy *new) 201{ 202 return -EINVAL; 203} 204 205static inline void mpol_shared_policy_init(struct shared_policy *info) 206{ 207} 208 209static inline void mpol_free_shared_policy(struct shared_policy *p) 210{ 211} 212 213static inline struct mempolicy * 214mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 215{ 216 return NULL; 217} 218 219#define vma_policy(vma) NULL 220#define vma_set_policy(vma, pol) do {} while(0) 221 222static inline void numa_policy_init(void) 223{ 224} 225 226static inline void numa_default_policy(void) 227{ 228} 229 230static inline void numa_policy_rebind(const nodemask_t *old, 231 const nodemask_t *new) 232{ 233} 234 235#endif /* CONFIG_NUMA */ 236#endif /* __KERNEL__ */ 237 238#endif