at v3.0 10 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24 25struct mem_cgroup; 26struct page_cgroup; 27struct page; 28struct mm_struct; 29 30/* Stats that can be updated by kernel. */ 31enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33}; 34 35extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 36 struct list_head *dst, 37 unsigned long *scanned, int order, 38 int mode, struct zone *z, 39 struct mem_cgroup *mem_cont, 40 int active, int file); 41 42#ifdef CONFIG_CGROUP_MEM_RES_CTLR 43/* 44 * All "charge" functions with gfp_mask should use GFP_KERNEL or 45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 46 * alloc memory but reclaims memory from all available zones. So, "where I want 47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 48 * available but adding a rule is better. charge functions' gfp_mask should 49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 50 * codes. 51 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 52 */ 53 54extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 55 gfp_t gfp_mask); 56/* for swap handling */ 57extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 58 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 59extern void mem_cgroup_commit_charge_swapin(struct page *page, 60 struct mem_cgroup *ptr); 61extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 62 63extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 64 gfp_t gfp_mask); 65extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 66extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 67extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 68extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 69extern void mem_cgroup_del_lru(struct page *page); 70extern void mem_cgroup_move_lists(struct page *page, 71 enum lru_list from, enum lru_list to); 72 73/* For coalescing uncharge for reducing memcg' overhead*/ 74extern void mem_cgroup_uncharge_start(void); 75extern void mem_cgroup_uncharge_end(void); 76 77extern void mem_cgroup_uncharge_page(struct page *page); 78extern void mem_cgroup_uncharge_cache_page(struct page *page); 79extern int mem_cgroup_shmem_charge_fallback(struct page *page, 80 struct mm_struct *mm, gfp_t gfp_mask); 81 82extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 83int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 84 85extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 86extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 87extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 88 89static inline 90int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 91{ 92 struct mem_cgroup *mem; 93 rcu_read_lock(); 94 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 95 rcu_read_unlock(); 96 return cgroup == mem; 97} 98 99extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 100 101extern int 102mem_cgroup_prepare_migration(struct page *page, 103 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 104extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 105 struct page *oldpage, struct page *newpage, bool migration_ok); 106 107/* 108 * For memory reclaim. 109 */ 110int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 111int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 112int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 113unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 114 struct zone *zone, 115 enum lru_list lru); 116struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 117 struct zone *zone); 118struct zone_reclaim_stat* 119mem_cgroup_get_reclaim_stat_from_page(struct page *page); 120extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 121 struct task_struct *p); 122 123#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 124extern int do_swap_account; 125#endif 126 127static inline bool mem_cgroup_disabled(void) 128{ 129 if (mem_cgroup_subsys.disabled) 130 return true; 131 return false; 132} 133 134void mem_cgroup_update_page_stat(struct page *page, 135 enum mem_cgroup_page_stat_item idx, 136 int val); 137 138static inline void mem_cgroup_inc_page_stat(struct page *page, 139 enum mem_cgroup_page_stat_item idx) 140{ 141 mem_cgroup_update_page_stat(page, idx, 1); 142} 143 144static inline void mem_cgroup_dec_page_stat(struct page *page, 145 enum mem_cgroup_page_stat_item idx) 146{ 147 mem_cgroup_update_page_stat(page, idx, -1); 148} 149 150unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 151 gfp_t gfp_mask, 152 unsigned long *total_scanned); 153u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 154 155void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 156#ifdef CONFIG_TRANSPARENT_HUGEPAGE 157void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 158#endif 159 160#ifdef CONFIG_DEBUG_VM 161bool mem_cgroup_bad_page_check(struct page *page); 162void mem_cgroup_print_bad_page(struct page *page); 163#endif 164#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 165struct mem_cgroup; 166 167static inline int mem_cgroup_newpage_charge(struct page *page, 168 struct mm_struct *mm, gfp_t gfp_mask) 169{ 170 return 0; 171} 172 173static inline int mem_cgroup_cache_charge(struct page *page, 174 struct mm_struct *mm, gfp_t gfp_mask) 175{ 176 return 0; 177} 178 179static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 180 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 181{ 182 return 0; 183} 184 185static inline void mem_cgroup_commit_charge_swapin(struct page *page, 186 struct mem_cgroup *ptr) 187{ 188} 189 190static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 191{ 192} 193 194static inline void mem_cgroup_uncharge_start(void) 195{ 196} 197 198static inline void mem_cgroup_uncharge_end(void) 199{ 200} 201 202static inline void mem_cgroup_uncharge_page(struct page *page) 203{ 204} 205 206static inline void mem_cgroup_uncharge_cache_page(struct page *page) 207{ 208} 209 210static inline int mem_cgroup_shmem_charge_fallback(struct page *page, 211 struct mm_struct *mm, gfp_t gfp_mask) 212{ 213 return 0; 214} 215 216static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 217{ 218} 219 220static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 221{ 222 return ; 223} 224 225static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 226{ 227 return ; 228} 229 230static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 231{ 232 return ; 233} 234 235static inline void mem_cgroup_del_lru(struct page *page) 236{ 237 return ; 238} 239 240static inline void 241mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 242{ 243} 244 245static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 246{ 247 return NULL; 248} 249 250static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 251{ 252 return NULL; 253} 254 255static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 256{ 257 return 1; 258} 259 260static inline int task_in_mem_cgroup(struct task_struct *task, 261 const struct mem_cgroup *mem) 262{ 263 return 1; 264} 265 266static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 267{ 268 return NULL; 269} 270 271static inline int 272mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 273 struct mem_cgroup **ptr, gfp_t gfp_mask) 274{ 275 return 0; 276} 277 278static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, 279 struct page *oldpage, struct page *newpage, bool migration_ok) 280{ 281} 282 283static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 284{ 285 return 0; 286} 287 288static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 289 int priority) 290{ 291} 292 293static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 294 int priority) 295{ 296} 297 298static inline bool mem_cgroup_disabled(void) 299{ 300 return true; 301} 302 303static inline int 304mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 305{ 306 return 1; 307} 308 309static inline int 310mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 311{ 312 return 1; 313} 314 315static inline unsigned long 316mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone, 317 enum lru_list lru) 318{ 319 return 0; 320} 321 322 323static inline struct zone_reclaim_stat* 324mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 325{ 326 return NULL; 327} 328 329static inline struct zone_reclaim_stat* 330mem_cgroup_get_reclaim_stat_from_page(struct page *page) 331{ 332 return NULL; 333} 334 335static inline void 336mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 337{ 338} 339 340static inline void mem_cgroup_inc_page_stat(struct page *page, 341 enum mem_cgroup_page_stat_item idx) 342{ 343} 344 345static inline void mem_cgroup_dec_page_stat(struct page *page, 346 enum mem_cgroup_page_stat_item idx) 347{ 348} 349 350static inline 351unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 352 gfp_t gfp_mask, 353 unsigned long *total_scanned) 354{ 355 return 0; 356} 357 358static inline 359u64 mem_cgroup_get_limit(struct mem_cgroup *mem) 360{ 361 return 0; 362} 363 364static inline void mem_cgroup_split_huge_fixup(struct page *head, 365 struct page *tail) 366{ 367} 368 369static inline 370void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 371{ 372} 373#endif /* CONFIG_CGROUP_MEM_CONT */ 374 375#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 376static inline bool 377mem_cgroup_bad_page_check(struct page *page) 378{ 379 return false; 380} 381 382static inline void 383mem_cgroup_print_bad_page(struct page *page) 384{ 385} 386#endif 387 388#endif /* _LINUX_MEMCONTROL_H */ 389