at v3.1-rc2 11 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24 25struct mem_cgroup; 26struct page_cgroup; 27struct page; 28struct mm_struct; 29 30/* Stats that can be updated by kernel. */ 31enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33}; 34 35extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 36 struct list_head *dst, 37 unsigned long *scanned, int order, 38 int mode, struct zone *z, 39 struct mem_cgroup *mem_cont, 40 int active, int file); 41 42struct memcg_scanrecord { 43 struct mem_cgroup *mem; /* scanend memory cgroup */ 44 struct mem_cgroup *root; /* scan target hierarchy root */ 45 int context; /* scanning context (see memcontrol.c) */ 46 unsigned long nr_scanned[2]; /* the number of scanned pages */ 47 unsigned long nr_rotated[2]; /* the number of rotated pages */ 48 unsigned long nr_freed[2]; /* the number of freed pages */ 49 unsigned long elapsed; /* nsec of time elapsed while scanning */ 50}; 51 52#ifdef CONFIG_CGROUP_MEM_RES_CTLR 53/* 54 * All "charge" functions with gfp_mask should use GFP_KERNEL or 55 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 56 * alloc memory but reclaims memory from all available zones. So, "where I want 57 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 58 * available but adding a rule is better. charge functions' gfp_mask should 59 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 60 * codes. 61 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 62 */ 63 64extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 65 gfp_t gfp_mask); 66/* for swap handling */ 67extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 68 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 69extern void mem_cgroup_commit_charge_swapin(struct page *page, 70 struct mem_cgroup *ptr); 71extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 72 73extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 74 gfp_t gfp_mask); 75extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 76extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 77extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 78extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 79extern void mem_cgroup_del_lru(struct page *page); 80extern void mem_cgroup_move_lists(struct page *page, 81 enum lru_list from, enum lru_list to); 82 83/* For coalescing uncharge for reducing memcg' overhead*/ 84extern void mem_cgroup_uncharge_start(void); 85extern void mem_cgroup_uncharge_end(void); 86 87extern void mem_cgroup_uncharge_page(struct page *page); 88extern void mem_cgroup_uncharge_cache_page(struct page *page); 89 90extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 91int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 92 93extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 94extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 95extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 96 97static inline 98int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 99{ 100 struct mem_cgroup *mem; 101 rcu_read_lock(); 102 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 103 rcu_read_unlock(); 104 return cgroup == mem; 105} 106 107extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 108 109extern int 110mem_cgroup_prepare_migration(struct page *page, 111 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 112extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 113 struct page *oldpage, struct page *newpage, bool migration_ok); 114 115/* 116 * For memory reclaim. 117 */ 118int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 119int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 120int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 121unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 122 int nid, int zid, unsigned int lrumask); 123struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 124 struct zone *zone); 125struct zone_reclaim_stat* 126mem_cgroup_get_reclaim_stat_from_page(struct page *page); 127extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 128 struct task_struct *p); 129 130extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 131 gfp_t gfp_mask, bool noswap, 132 struct memcg_scanrecord *rec); 133extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 134 gfp_t gfp_mask, bool noswap, 135 struct zone *zone, 136 struct memcg_scanrecord *rec, 137 unsigned long *nr_scanned); 138 139#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 140extern int do_swap_account; 141#endif 142 143static inline bool mem_cgroup_disabled(void) 144{ 145 if (mem_cgroup_subsys.disabled) 146 return true; 147 return false; 148} 149 150void mem_cgroup_update_page_stat(struct page *page, 151 enum mem_cgroup_page_stat_item idx, 152 int val); 153 154static inline void mem_cgroup_inc_page_stat(struct page *page, 155 enum mem_cgroup_page_stat_item idx) 156{ 157 mem_cgroup_update_page_stat(page, idx, 1); 158} 159 160static inline void mem_cgroup_dec_page_stat(struct page *page, 161 enum mem_cgroup_page_stat_item idx) 162{ 163 mem_cgroup_update_page_stat(page, idx, -1); 164} 165 166unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 167 gfp_t gfp_mask, 168 unsigned long *total_scanned); 169u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 170 171void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 172#ifdef CONFIG_TRANSPARENT_HUGEPAGE 173void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 174#endif 175 176#ifdef CONFIG_DEBUG_VM 177bool mem_cgroup_bad_page_check(struct page *page); 178void mem_cgroup_print_bad_page(struct page *page); 179#endif 180#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 181struct mem_cgroup; 182 183static inline int mem_cgroup_newpage_charge(struct page *page, 184 struct mm_struct *mm, gfp_t gfp_mask) 185{ 186 return 0; 187} 188 189static inline int mem_cgroup_cache_charge(struct page *page, 190 struct mm_struct *mm, gfp_t gfp_mask) 191{ 192 return 0; 193} 194 195static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 196 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 197{ 198 return 0; 199} 200 201static inline void mem_cgroup_commit_charge_swapin(struct page *page, 202 struct mem_cgroup *ptr) 203{ 204} 205 206static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 207{ 208} 209 210static inline void mem_cgroup_uncharge_start(void) 211{ 212} 213 214static inline void mem_cgroup_uncharge_end(void) 215{ 216} 217 218static inline void mem_cgroup_uncharge_page(struct page *page) 219{ 220} 221 222static inline void mem_cgroup_uncharge_cache_page(struct page *page) 223{ 224} 225 226static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 227{ 228} 229 230static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 231{ 232 return ; 233} 234 235static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 236{ 237 return ; 238} 239 240static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 241{ 242 return ; 243} 244 245static inline void mem_cgroup_del_lru(struct page *page) 246{ 247 return ; 248} 249 250static inline void 251mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 252{ 253} 254 255static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 256{ 257 return NULL; 258} 259 260static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 261{ 262 return NULL; 263} 264 265static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 266{ 267 return 1; 268} 269 270static inline int task_in_mem_cgroup(struct task_struct *task, 271 const struct mem_cgroup *mem) 272{ 273 return 1; 274} 275 276static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 277{ 278 return NULL; 279} 280 281static inline int 282mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 283 struct mem_cgroup **ptr, gfp_t gfp_mask) 284{ 285 return 0; 286} 287 288static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, 289 struct page *oldpage, struct page *newpage, bool migration_ok) 290{ 291} 292 293static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 294{ 295 return 0; 296} 297 298static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 299 int priority) 300{ 301} 302 303static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 304 int priority) 305{ 306} 307 308static inline bool mem_cgroup_disabled(void) 309{ 310 return true; 311} 312 313static inline int 314mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 315{ 316 return 1; 317} 318 319static inline int 320mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 321{ 322 return 1; 323} 324 325static inline unsigned long 326mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 327 unsigned int lru_mask) 328{ 329 return 0; 330} 331 332 333static inline struct zone_reclaim_stat* 334mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 335{ 336 return NULL; 337} 338 339static inline struct zone_reclaim_stat* 340mem_cgroup_get_reclaim_stat_from_page(struct page *page) 341{ 342 return NULL; 343} 344 345static inline void 346mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 347{ 348} 349 350static inline void mem_cgroup_inc_page_stat(struct page *page, 351 enum mem_cgroup_page_stat_item idx) 352{ 353} 354 355static inline void mem_cgroup_dec_page_stat(struct page *page, 356 enum mem_cgroup_page_stat_item idx) 357{ 358} 359 360static inline 361unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 362 gfp_t gfp_mask, 363 unsigned long *total_scanned) 364{ 365 return 0; 366} 367 368static inline 369u64 mem_cgroup_get_limit(struct mem_cgroup *mem) 370{ 371 return 0; 372} 373 374static inline void mem_cgroup_split_huge_fixup(struct page *head, 375 struct page *tail) 376{ 377} 378 379static inline 380void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 381{ 382} 383#endif /* CONFIG_CGROUP_MEM_CONT */ 384 385#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 386static inline bool 387mem_cgroup_bad_page_check(struct page *page) 388{ 389 return false; 390} 391 392static inline void 393mem_cgroup_print_bad_page(struct page *page) 394{ 395} 396#endif 397 398#endif /* _LINUX_MEMCONTROL_H */ 399