at v2.6.35 241 lines 5.7 kB view raw
1#ifndef _LINUX_CPUSET_H 2#define _LINUX_CPUSET_H 3/* 4 * cpuset interface 5 * 6 * Copyright (C) 2003 BULL SA 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 */ 10 11#include <linux/sched.h> 12#include <linux/cpumask.h> 13#include <linux/nodemask.h> 14#include <linux/cgroup.h> 15#include <linux/mm.h> 16 17#ifdef CONFIG_CPUSETS 18 19extern int number_of_cpusets; /* How many cpusets are defined in system? */ 20 21extern int cpuset_init(void); 22extern void cpuset_init_smp(void); 23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 24extern int cpuset_cpus_allowed_fallback(struct task_struct *p); 25extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 26#define cpuset_current_mems_allowed (current->mems_allowed) 27void cpuset_init_current_mems_allowed(void); 28int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 29 30extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); 31extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); 32 33static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 34{ 35 return number_of_cpusets <= 1 || 36 __cpuset_node_allowed_softwall(node, gfp_mask); 37} 38 39static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) 40{ 41 return number_of_cpusets <= 1 || 42 __cpuset_node_allowed_hardwall(node, gfp_mask); 43} 44 45static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 46{ 47 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); 48} 49 50static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 51{ 52 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); 53} 54 55extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 56 const struct task_struct *tsk2); 57 58#define cpuset_memory_pressure_bump() \ 59 do { \ 60 if (cpuset_memory_pressure_enabled) \ 61 __cpuset_memory_pressure_bump(); \ 62 } while (0) 63extern int cpuset_memory_pressure_enabled; 64extern void __cpuset_memory_pressure_bump(void); 65 66extern const struct file_operations proc_cpuset_operations; 67struct seq_file; 68extern void cpuset_task_status_allowed(struct seq_file *m, 69 struct task_struct *task); 70 71extern int cpuset_mem_spread_node(void); 72extern int cpuset_slab_spread_node(void); 73 74static inline int cpuset_do_page_mem_spread(void) 75{ 76 return current->flags & PF_SPREAD_PAGE; 77} 78 79static inline int cpuset_do_slab_mem_spread(void) 80{ 81 return current->flags & PF_SPREAD_SLAB; 82} 83 84extern int current_cpuset_is_being_rebound(void); 85 86extern void rebuild_sched_domains(void); 87 88extern void cpuset_print_task_mems_allowed(struct task_struct *p); 89 90/* 91 * reading current mems_allowed and mempolicy in the fastpath must protected 92 * by get_mems_allowed() 93 */ 94static inline void get_mems_allowed(void) 95{ 96 current->mems_allowed_change_disable++; 97 98 /* 99 * ensure that reading mems_allowed and mempolicy happens after the 100 * update of ->mems_allowed_change_disable. 101 * 102 * the write-side task finds ->mems_allowed_change_disable is not 0, 103 * and knows the read-side task is reading mems_allowed or mempolicy, 104 * so it will clear old bits lazily. 105 */ 106 smp_mb(); 107} 108 109static inline void put_mems_allowed(void) 110{ 111 /* 112 * ensure that reading mems_allowed and mempolicy before reducing 113 * mems_allowed_change_disable. 114 * 115 * the write-side task will know that the read-side task is still 116 * reading mems_allowed or mempolicy, don't clears old bits in the 117 * nodemask. 118 */ 119 smp_mb(); 120 --ACCESS_ONCE(current->mems_allowed_change_disable); 121} 122 123static inline void set_mems_allowed(nodemask_t nodemask) 124{ 125 task_lock(current); 126 current->mems_allowed = nodemask; 127 task_unlock(current); 128} 129 130#else /* !CONFIG_CPUSETS */ 131 132static inline int cpuset_init(void) { return 0; } 133static inline void cpuset_init_smp(void) {} 134 135static inline void cpuset_cpus_allowed(struct task_struct *p, 136 struct cpumask *mask) 137{ 138 cpumask_copy(mask, cpu_possible_mask); 139} 140 141static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) 142{ 143 cpumask_copy(&p->cpus_allowed, cpu_possible_mask); 144 return cpumask_any(cpu_active_mask); 145} 146 147static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 148{ 149 return node_possible_map; 150} 151 152#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) 153static inline void cpuset_init_current_mems_allowed(void) {} 154 155static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 156{ 157 return 1; 158} 159 160static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 161{ 162 return 1; 163} 164 165static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) 166{ 167 return 1; 168} 169 170static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 171{ 172 return 1; 173} 174 175static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 176{ 177 return 1; 178} 179 180static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 181 const struct task_struct *tsk2) 182{ 183 return 1; 184} 185 186static inline void cpuset_memory_pressure_bump(void) {} 187 188static inline void cpuset_task_status_allowed(struct seq_file *m, 189 struct task_struct *task) 190{ 191} 192 193static inline int cpuset_mem_spread_node(void) 194{ 195 return 0; 196} 197 198static inline int cpuset_slab_spread_node(void) 199{ 200 return 0; 201} 202 203static inline int cpuset_do_page_mem_spread(void) 204{ 205 return 0; 206} 207 208static inline int cpuset_do_slab_mem_spread(void) 209{ 210 return 0; 211} 212 213static inline int current_cpuset_is_being_rebound(void) 214{ 215 return 0; 216} 217 218static inline void rebuild_sched_domains(void) 219{ 220 partition_sched_domains(1, NULL, NULL); 221} 222 223static inline void cpuset_print_task_mems_allowed(struct task_struct *p) 224{ 225} 226 227static inline void set_mems_allowed(nodemask_t nodemask) 228{ 229} 230 231static inline void get_mems_allowed(void) 232{ 233} 234 235static inline void put_mems_allowed(void) 236{ 237} 238 239#endif /* !CONFIG_CPUSETS */ 240 241#endif /* _LINUX_CPUSET_H */