at v3.1 10 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24 25struct mem_cgroup; 26struct page_cgroup; 27struct page; 28struct mm_struct; 29 30/* Stats that can be updated by kernel. */ 31enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33}; 34 35extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 36 struct list_head *dst, 37 unsigned long *scanned, int order, 38 int mode, struct zone *z, 39 struct mem_cgroup *mem_cont, 40 int active, int file); 41 42#ifdef CONFIG_CGROUP_MEM_RES_CTLR 43/* 44 * All "charge" functions with gfp_mask should use GFP_KERNEL or 45 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 46 * alloc memory but reclaims memory from all available zones. So, "where I want 47 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 48 * available but adding a rule is better. charge functions' gfp_mask should 49 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 50 * codes. 51 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 52 */ 53 54extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 55 gfp_t gfp_mask); 56/* for swap handling */ 57extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 58 struct page *page, gfp_t mask, struct mem_cgroup **ptr); 59extern void mem_cgroup_commit_charge_swapin(struct page *page, 60 struct mem_cgroup *ptr); 61extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); 62 63extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 64 gfp_t gfp_mask); 65extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); 66extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); 67extern void mem_cgroup_rotate_reclaimable_page(struct page *page); 68extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); 69extern void mem_cgroup_del_lru(struct page *page); 70extern void mem_cgroup_move_lists(struct page *page, 71 enum lru_list from, enum lru_list to); 72 73/* For coalescing uncharge for reducing memcg' overhead*/ 74extern void mem_cgroup_uncharge_start(void); 75extern void mem_cgroup_uncharge_end(void); 76 77extern void mem_cgroup_uncharge_page(struct page *page); 78extern void mem_cgroup_uncharge_cache_page(struct page *page); 79 80extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 81int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 82 83extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 84extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 85extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 86 87static inline 88int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 89{ 90 struct mem_cgroup *mem; 91 rcu_read_lock(); 92 mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 93 rcu_read_unlock(); 94 return cgroup == mem; 95} 96 97extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); 98 99extern int 100mem_cgroup_prepare_migration(struct page *page, 101 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask); 102extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 103 struct page *oldpage, struct page *newpage, bool migration_ok); 104 105/* 106 * For memory reclaim. 107 */ 108int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); 109int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); 110int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 111unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 112 int nid, int zid, unsigned int lrumask); 113struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 114 struct zone *zone); 115struct zone_reclaim_stat* 116mem_cgroup_get_reclaim_stat_from_page(struct page *page); 117extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 118 struct task_struct *p); 119 120#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 121extern int do_swap_account; 122#endif 123 124static inline bool mem_cgroup_disabled(void) 125{ 126 if (mem_cgroup_subsys.disabled) 127 return true; 128 return false; 129} 130 131void mem_cgroup_update_page_stat(struct page *page, 132 enum mem_cgroup_page_stat_item idx, 133 int val); 134 135static inline void mem_cgroup_inc_page_stat(struct page *page, 136 enum mem_cgroup_page_stat_item idx) 137{ 138 mem_cgroup_update_page_stat(page, idx, 1); 139} 140 141static inline void mem_cgroup_dec_page_stat(struct page *page, 142 enum mem_cgroup_page_stat_item idx) 143{ 144 mem_cgroup_update_page_stat(page, idx, -1); 145} 146 147unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 148 gfp_t gfp_mask, 149 unsigned long *total_scanned); 150u64 mem_cgroup_get_limit(struct mem_cgroup *mem); 151 152void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 153#ifdef CONFIG_TRANSPARENT_HUGEPAGE 154void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); 155#endif 156 157#ifdef CONFIG_DEBUG_VM 158bool mem_cgroup_bad_page_check(struct page *page); 159void mem_cgroup_print_bad_page(struct page *page); 160#endif 161#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 162struct mem_cgroup; 163 164static inline int mem_cgroup_newpage_charge(struct page *page, 165 struct mm_struct *mm, gfp_t gfp_mask) 166{ 167 return 0; 168} 169 170static inline int mem_cgroup_cache_charge(struct page *page, 171 struct mm_struct *mm, gfp_t gfp_mask) 172{ 173 return 0; 174} 175 176static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 177 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr) 178{ 179 return 0; 180} 181 182static inline void mem_cgroup_commit_charge_swapin(struct page *page, 183 struct mem_cgroup *ptr) 184{ 185} 186 187static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) 188{ 189} 190 191static inline void mem_cgroup_uncharge_start(void) 192{ 193} 194 195static inline void mem_cgroup_uncharge_end(void) 196{ 197} 198 199static inline void mem_cgroup_uncharge_page(struct page *page) 200{ 201} 202 203static inline void mem_cgroup_uncharge_cache_page(struct page *page) 204{ 205} 206 207static inline void mem_cgroup_add_lru_list(struct page *page, int lru) 208{ 209} 210 211static inline void mem_cgroup_del_lru_list(struct page *page, int lru) 212{ 213 return ; 214} 215 216static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) 217{ 218 return ; 219} 220 221static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) 222{ 223 return ; 224} 225 226static inline void mem_cgroup_del_lru(struct page *page) 227{ 228 return ; 229} 230 231static inline void 232mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) 233{ 234} 235 236static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 237{ 238 return NULL; 239} 240 241static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 242{ 243 return NULL; 244} 245 246static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 247{ 248 return 1; 249} 250 251static inline int task_in_mem_cgroup(struct task_struct *task, 252 const struct mem_cgroup *mem) 253{ 254 return 1; 255} 256 257static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) 258{ 259 return NULL; 260} 261 262static inline int 263mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 264 struct mem_cgroup **ptr, gfp_t gfp_mask) 265{ 266 return 0; 267} 268 269static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, 270 struct page *oldpage, struct page *newpage, bool migration_ok) 271{ 272} 273 274static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 275{ 276 return 0; 277} 278 279static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 280 int priority) 281{ 282} 283 284static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 285 int priority) 286{ 287} 288 289static inline bool mem_cgroup_disabled(void) 290{ 291 return true; 292} 293 294static inline int 295mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) 296{ 297 return 1; 298} 299 300static inline int 301mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) 302{ 303 return 1; 304} 305 306static inline unsigned long 307mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 308 unsigned int lru_mask) 309{ 310 return 0; 311} 312 313 314static inline struct zone_reclaim_stat* 315mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 316{ 317 return NULL; 318} 319 320static inline struct zone_reclaim_stat* 321mem_cgroup_get_reclaim_stat_from_page(struct page *page) 322{ 323 return NULL; 324} 325 326static inline void 327mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 328{ 329} 330 331static inline void mem_cgroup_inc_page_stat(struct page *page, 332 enum mem_cgroup_page_stat_item idx) 333{ 334} 335 336static inline void mem_cgroup_dec_page_stat(struct page *page, 337 enum mem_cgroup_page_stat_item idx) 338{ 339} 340 341static inline 342unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 343 gfp_t gfp_mask, 344 unsigned long *total_scanned) 345{ 346 return 0; 347} 348 349static inline 350u64 mem_cgroup_get_limit(struct mem_cgroup *mem) 351{ 352 return 0; 353} 354 355static inline void mem_cgroup_split_huge_fixup(struct page *head, 356 struct page *tail) 357{ 358} 359 360static inline 361void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 362{ 363} 364#endif /* CONFIG_CGROUP_MEM_CONT */ 365 366#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 367static inline bool 368mem_cgroup_bad_page_check(struct page *page) 369{ 370 return false; 371} 372 373static inline void 374mem_cgroup_print_bad_page(struct page *page) 375{ 376} 377#endif 378 379#endif /* _LINUX_MEMCONTROL_H */ 380