at v3.4 12 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24 25struct mem_cgroup; 26struct page_cgroup; 27struct page; 28struct mm_struct; 29 30/* Stats that can be updated by kernel. */ 31enum mem_cgroup_page_stat_item { 32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ 33}; 34 35struct mem_cgroup_reclaim_cookie { 36 struct zone *zone; 37 int priority; 38 unsigned int generation; 39}; 40 41#ifdef CONFIG_CGROUP_MEM_RES_CTLR 42/* 43 * All "charge" functions with gfp_mask should use GFP_KERNEL or 44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't 45 * alloc memory but reclaims memory from all available zones. So, "where I want 46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is 47 * available but adding a rule is better. charge functions' gfp_mask should 48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous 49 * codes. 50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) 51 */ 52 53extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, 54 gfp_t gfp_mask); 55/* for swap handling */ 56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp); 58extern void mem_cgroup_commit_charge_swapin(struct page *page, 59 struct mem_cgroup *memcg); 60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); 61 62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 63 gfp_t gfp_mask); 64 65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, 67 enum lru_list); 68void mem_cgroup_lru_del_list(struct page *, enum lru_list); 69void mem_cgroup_lru_del(struct page *); 70struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, 71 enum lru_list, enum lru_list); 72 73/* For coalescing uncharge for reducing memcg' overhead*/ 74extern void mem_cgroup_uncharge_start(void); 75extern void mem_cgroup_uncharge_end(void); 76 77extern void mem_cgroup_uncharge_page(struct page *page); 78extern void mem_cgroup_uncharge_cache_page(struct page *page); 79 80extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 81 int order); 82int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 83 84extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 85extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 86extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); 87 88extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 89extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); 90 91static inline 92int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 93{ 94 struct mem_cgroup *memcg; 95 rcu_read_lock(); 96 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 97 rcu_read_unlock(); 98 return cgroup == memcg; 99} 100 101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 102 103extern int 104mem_cgroup_prepare_migration(struct page *page, 105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); 106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, 107 struct page *oldpage, struct page *newpage, bool migration_ok); 108 109struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 110 struct mem_cgroup *, 111 struct mem_cgroup_reclaim_cookie *); 112void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 113 114/* 115 * For memory reclaim. 116 */ 117int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, 118 struct zone *zone); 119int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, 120 struct zone *zone); 121int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 122unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 123 int nid, int zid, unsigned int lrumask); 124struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, 125 struct zone *zone); 126struct zone_reclaim_stat* 127mem_cgroup_get_reclaim_stat_from_page(struct page *page); 128extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 129 struct task_struct *p); 130extern void mem_cgroup_replace_page_cache(struct page *oldpage, 131 struct page *newpage); 132 133#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 134extern int do_swap_account; 135#endif 136 137static inline bool mem_cgroup_disabled(void) 138{ 139 if (mem_cgroup_subsys.disabled) 140 return true; 141 return false; 142} 143 144void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 145 unsigned long *flags); 146 147extern atomic_t memcg_moving; 148 149static inline void mem_cgroup_begin_update_page_stat(struct page *page, 150 bool *locked, unsigned long *flags) 151{ 152 if (mem_cgroup_disabled()) 153 return; 154 rcu_read_lock(); 155 *locked = false; 156 if (atomic_read(&memcg_moving)) 157 __mem_cgroup_begin_update_page_stat(page, locked, flags); 158} 159 160void __mem_cgroup_end_update_page_stat(struct page *page, 161 unsigned long *flags); 162static inline void mem_cgroup_end_update_page_stat(struct page *page, 163 bool *locked, unsigned long *flags) 164{ 165 if (mem_cgroup_disabled()) 166 return; 167 if (*locked) 168 __mem_cgroup_end_update_page_stat(page, flags); 169 rcu_read_unlock(); 170} 171 172void mem_cgroup_update_page_stat(struct page *page, 173 enum mem_cgroup_page_stat_item idx, 174 int val); 175 176static inline void mem_cgroup_inc_page_stat(struct page *page, 177 enum mem_cgroup_page_stat_item idx) 178{ 179 mem_cgroup_update_page_stat(page, idx, 1); 180} 181 182static inline void mem_cgroup_dec_page_stat(struct page *page, 183 enum mem_cgroup_page_stat_item idx) 184{ 185 mem_cgroup_update_page_stat(page, idx, -1); 186} 187 188unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 189 gfp_t gfp_mask, 190 unsigned long *total_scanned); 191u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); 192 193void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 194#ifdef CONFIG_TRANSPARENT_HUGEPAGE 195void mem_cgroup_split_huge_fixup(struct page *head); 196#endif 197 198#ifdef CONFIG_DEBUG_VM 199bool mem_cgroup_bad_page_check(struct page *page); 200void mem_cgroup_print_bad_page(struct page *page); 201#endif 202#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 203struct mem_cgroup; 204 205static inline int mem_cgroup_newpage_charge(struct page *page, 206 struct mm_struct *mm, gfp_t gfp_mask) 207{ 208 return 0; 209} 210 211static inline int mem_cgroup_cache_charge(struct page *page, 212 struct mm_struct *mm, gfp_t gfp_mask) 213{ 214 return 0; 215} 216 217static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, 218 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) 219{ 220 return 0; 221} 222 223static inline void mem_cgroup_commit_charge_swapin(struct page *page, 224 struct mem_cgroup *memcg) 225{ 226} 227 228static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) 229{ 230} 231 232static inline void mem_cgroup_uncharge_start(void) 233{ 234} 235 236static inline void mem_cgroup_uncharge_end(void) 237{ 238} 239 240static inline void mem_cgroup_uncharge_page(struct page *page) 241{ 242} 243 244static inline void mem_cgroup_uncharge_cache_page(struct page *page) 245{ 246} 247 248static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 249 struct mem_cgroup *memcg) 250{ 251 return &zone->lruvec; 252} 253 254static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, 255 struct page *page, 256 enum lru_list lru) 257{ 258 return &zone->lruvec; 259} 260 261static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 262{ 263} 264 265static inline void mem_cgroup_lru_del(struct page *page) 266{ 267} 268 269static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone, 270 struct page *page, 271 enum lru_list from, 272 enum lru_list to) 273{ 274 return &zone->lruvec; 275} 276 277static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 278{ 279 return NULL; 280} 281 282static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 283{ 284 return NULL; 285} 286 287static inline int mm_match_cgroup(struct mm_struct *mm, 288 struct mem_cgroup *memcg) 289{ 290 return 1; 291} 292 293static inline int task_in_mem_cgroup(struct task_struct *task, 294 const struct mem_cgroup *memcg) 295{ 296 return 1; 297} 298 299static inline struct cgroup_subsys_state 300 *mem_cgroup_css(struct mem_cgroup *memcg) 301{ 302 return NULL; 303} 304 305static inline int 306mem_cgroup_prepare_migration(struct page *page, struct page *newpage, 307 struct mem_cgroup **memcgp, gfp_t gfp_mask) 308{ 309 return 0; 310} 311 312static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, 313 struct page *oldpage, struct page *newpage, bool migration_ok) 314{ 315} 316 317static inline struct mem_cgroup * 318mem_cgroup_iter(struct mem_cgroup *root, 319 struct mem_cgroup *prev, 320 struct mem_cgroup_reclaim_cookie *reclaim) 321{ 322 return NULL; 323} 324 325static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 326 struct mem_cgroup *prev) 327{ 328} 329 330static inline bool mem_cgroup_disabled(void) 331{ 332 return true; 333} 334 335static inline int 336mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 337{ 338 return 1; 339} 340 341static inline int 342mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 343{ 344 return 1; 345} 346 347static inline unsigned long 348mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 349 unsigned int lru_mask) 350{ 351 return 0; 352} 353 354 355static inline struct zone_reclaim_stat* 356mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 357{ 358 return NULL; 359} 360 361static inline struct zone_reclaim_stat* 362mem_cgroup_get_reclaim_stat_from_page(struct page *page) 363{ 364 return NULL; 365} 366 367static inline void 368mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 369{ 370} 371 372static inline void mem_cgroup_begin_update_page_stat(struct page *page, 373 bool *locked, unsigned long *flags) 374{ 375} 376 377static inline void mem_cgroup_end_update_page_stat(struct page *page, 378 bool *locked, unsigned long *flags) 379{ 380} 381 382static inline void mem_cgroup_inc_page_stat(struct page *page, 383 enum mem_cgroup_page_stat_item idx) 384{ 385} 386 387static inline void mem_cgroup_dec_page_stat(struct page *page, 388 enum mem_cgroup_page_stat_item idx) 389{ 390} 391 392static inline 393unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 394 gfp_t gfp_mask, 395 unsigned long *total_scanned) 396{ 397 return 0; 398} 399 400static inline 401u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) 402{ 403 return 0; 404} 405 406static inline void mem_cgroup_split_huge_fixup(struct page *head) 407{ 408} 409 410static inline 411void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 412{ 413} 414static inline void mem_cgroup_replace_page_cache(struct page *oldpage, 415 struct page *newpage) 416{ 417} 418#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ 419 420#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) 421static inline bool 422mem_cgroup_bad_page_check(struct page *page) 423{ 424 return false; 425} 426 427static inline void 428mem_cgroup_print_bad_page(struct page *page) 429{ 430} 431#endif 432 433enum { 434 UNDER_LIMIT, 435 SOFT_LIMIT, 436 OVER_LIMIT, 437}; 438 439struct sock; 440#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 441void sock_update_memcg(struct sock *sk); 442void sock_release_memcg(struct sock *sk); 443#else 444static inline void sock_update_memcg(struct sock *sk) 445{ 446} 447static inline void sock_release_memcg(struct sock *sk) 448{ 449} 450#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 451#endif /* _LINUX_MEMCONTROL_H */ 452