at v3.7-rc6 11 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24 25struct mem_cgroup; 26struct page_cgroup; 27struct page; 28struct mm_struct; 29 30/* Stats that can be updated by kernel. */ 31enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33}; 34 35struct mem_cgroup_reclaim_cookie { 36 struct zone *zone; 37 int priority; 38 unsigned int generation; 39}; 40 41#ifdef CONFIG_MEMCG 42/* 43 * All "charge" functions with gfp_mask should use GFP_KERNEL or 44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 45 * alloc memory but reclaims memory from all available zones. So, "where I want 46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 47 * available but adding a rule is better. charge functions' gfp_mask should 48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 49 * codes. 50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 51 */ 52 53extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 54 gfp_t gfp_mask); 55/* for swap handling */ 56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp); 58extern void mem_cgroup_commit_charge_swapin(struct page *page, 59 struct mem_cgroup *memcg); 60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 61 62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 63 gfp_t gfp_mask); 64 65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 66struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 67 68/* For coalescing uncharge for reducing memcg' overhead*/ 69extern void mem_cgroup_uncharge_start(void); 70extern void mem_cgroup_uncharge_end(void); 71 72extern void mem_cgroup_uncharge_page(struct page *page); 73extern void mem_cgroup_uncharge_cache_page(struct page *page); 74 75bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 76 struct mem_cgroup *memcg); 77int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 78 79extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 80extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 81extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 82 83extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 84extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); 85 86static inline 87bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) 88{ 89 struct mem_cgroup *task_memcg; 90 bool match; 91 92 rcu_read_lock(); 93 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 94 match = __mem_cgroup_same_or_subtree(memcg, task_memcg); 95 rcu_read_unlock(); 96 return match; 97} 98 99extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 100 101extern void 102mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 103 struct mem_cgroup **memcgp); 104extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 105 struct page *oldpage, struct page *newpage, bool migration_ok); 106 107struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 108 struct mem_cgroup *, 109 struct mem_cgroup_reclaim_cookie *); 110void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 111 112/* 113 * For memory reclaim. 114 */ 115int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); 116int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); 117int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 118unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 119void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 120extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 121 struct task_struct *p); 122extern void mem_cgroup_replace_page_cache(struct page *oldpage, 123 struct page *newpage); 124 125#ifdef CONFIG_MEMCG_SWAP 126extern int do_swap_account; 127#endif 128 129static inline bool mem_cgroup_disabled(void) 130{ 131 if (mem_cgroup_subsys.disabled) 132 return true; 133 return false; 134} 135 136void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 137 unsigned long *flags); 138 139extern atomic_t memcg_moving; 140 141static inline void mem_cgroup_begin_update_page_stat(struct page *page, 142 bool *locked, unsigned long *flags) 143{ 144 if (mem_cgroup_disabled()) 145 return; 146 rcu_read_lock(); 147 *locked = false; 148 if (atomic_read(&memcg_moving)) 149 __mem_cgroup_begin_update_page_stat(page, locked, flags); 150} 151 152void __mem_cgroup_end_update_page_stat(struct page *page, 153 unsigned long *flags); 154static inline void mem_cgroup_end_update_page_stat(struct page *page, 155 bool *locked, unsigned long *flags) 156{ 157 if (mem_cgroup_disabled()) 158 return; 159 if (*locked) 160 __mem_cgroup_end_update_page_stat(page, flags); 161 rcu_read_unlock(); 162} 163 164void mem_cgroup_update_page_stat(struct page *page, 165 enum mem_cgroup_page_stat_item idx, 166 int val); 167 168static inline void mem_cgroup_inc_page_stat(struct page *page, 169 enum mem_cgroup_page_stat_item idx) 170{ 171 mem_cgroup_update_page_stat(page, idx, 1); 172} 173 174static inline void mem_cgroup_dec_page_stat(struct page *page, 175 enum mem_cgroup_page_stat_item idx) 176{ 177 mem_cgroup_update_page_stat(page, idx, -1); 178} 179 180unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 181 gfp_t gfp_mask, 182 unsigned long *total_scanned); 183 184void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 185#ifdef CONFIG_TRANSPARENT_HUGEPAGE 186void mem_cgroup_split_huge_fixup(struct page *head); 187#endif 188 189#ifdef CONFIG_DEBUG_VM 190bool mem_cgroup_bad_page_check(struct page *page); 191void mem_cgroup_print_bad_page(struct page *page); 192#endif 193#else /* CONFIG_MEMCG */ 194struct mem_cgroup; 195 196static inline int mem_cgroup_newpage_charge(struct page *page, 197 struct mm_struct *mm, gfp_t gfp_mask) 198{ 199 return 0; 200} 201 202static inline int mem_cgroup_cache_charge(struct page *page, 203 struct mm_struct *mm, gfp_t gfp_mask) 204{ 205 return 0; 206} 207 208static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 209 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) 210{ 211 return 0; 212} 213 214static inline void mem_cgroup_commit_charge_swapin(struct page *page, 215 struct mem_cgroup *memcg) 216{ 217} 218 219static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 220{ 221} 222 223static inline void mem_cgroup_uncharge_start(void) 224{ 225} 226 227static inline void mem_cgroup_uncharge_end(void) 228{ 229} 230 231static inline void mem_cgroup_uncharge_page(struct page *page) 232{ 233} 234 235static inline void mem_cgroup_uncharge_cache_page(struct page *page) 236{ 237} 238 239static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 240 struct mem_cgroup *memcg) 241{ 242 return &zone->lruvec; 243} 244 245static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 246 struct zone *zone) 247{ 248 return &zone->lruvec; 249} 250 251static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 252{ 253 return NULL; 254} 255 256static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 257{ 258 return NULL; 259} 260 261static inline bool mm_match_cgroup(struct mm_struct *mm, 262 struct mem_cgroup *memcg) 263{ 264 return true; 265} 266 267static inline int task_in_mem_cgroup(struct task_struct *task, 268 const struct mem_cgroup *memcg) 269{ 270 return 1; 271} 272 273static inline struct cgroup_subsys_state 274 *mem_cgroup_css(struct mem_cgroup *memcg) 275{ 276 return NULL; 277} 278 279static inline void 280mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 281 struct mem_cgroup **memcgp) 282{ 283} 284 285static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 286 struct page *oldpage, struct page *newpage, bool migration_ok) 287{ 288} 289 290static inline struct mem_cgroup * 291mem_cgroup_iter(struct mem_cgroup *root, 292 struct mem_cgroup *prev, 293 struct mem_cgroup_reclaim_cookie *reclaim) 294{ 295 return NULL; 296} 297 298static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 299 struct mem_cgroup *prev) 300{ 301} 302 303static inline bool mem_cgroup_disabled(void) 304{ 305 return true; 306} 307 308static inline int 309mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 310{ 311 return 1; 312} 313 314static inline int 315mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) 316{ 317 return 1; 318} 319 320static inline unsigned long 321mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 322{ 323 return 0; 324} 325 326static inline void 327mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 328 int increment) 329{ 330} 331 332static inline void 333mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 334{ 335} 336 337static inline void mem_cgroup_begin_update_page_stat(struct page *page, 338 bool *locked, unsigned long *flags) 339{ 340} 341 342static inline void mem_cgroup_end_update_page_stat(struct page *page, 343 bool *locked, unsigned long *flags) 344{ 345} 346 347static inline void mem_cgroup_inc_page_stat(struct page *page, 348 enum mem_cgroup_page_stat_item idx) 349{ 350} 351 352static inline void mem_cgroup_dec_page_stat(struct page *page, 353 enum mem_cgroup_page_stat_item idx) 354{ 355} 356 357static inline 358unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 359 gfp_t gfp_mask, 360 unsigned long *total_scanned) 361{ 362 return 0; 363} 364 365static inline void mem_cgroup_split_huge_fixup(struct page *head) 366{ 367} 368 369static inline 370void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 371{ 372} 373static inline void mem_cgroup_replace_page_cache(struct page *oldpage, 374 struct page *newpage) 375{ 376} 377#endif /* CONFIG_MEMCG */ 378 379#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) 380static inline bool 381mem_cgroup_bad_page_check(struct page *page) 382{ 383 return false; 384} 385 386static inline void 387mem_cgroup_print_bad_page(struct page *page) 388{ 389} 390#endif 391 392enum { 393 UNDER_LIMIT, 394 SOFT_LIMIT, 395 OVER_LIMIT, 396}; 397 398struct sock; 399#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 400void sock_update_memcg(struct sock *sk); 401void sock_release_memcg(struct sock *sk); 402#else 403static inline void sock_update_memcg(struct sock *sk) 404{ 405} 406static inline void sock_release_memcg(struct sock *sk) 407{ 408} 409#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 410#endif /* _LINUX_MEMCONTROL_H */ 411