at v4.2 17 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24#include <linux/hardirq.h> 25#include <linux/jump_label.h> 26 27struct mem_cgroup; 28struct page; 29struct mm_struct; 30struct kmem_cache; 31 32/* 33 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, 34 * These two lists should keep in accord with each other. 35 */ 36enum mem_cgroup_stat_index { 37 /* 38 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 39 */ 40 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 41 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 42 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 43 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 44 MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */ 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ 46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 47 MEM_CGROUP_STAT_NSTATS, 48}; 49 50struct mem_cgroup_reclaim_cookie { 51 struct zone *zone; 52 int priority; 53 unsigned int generation; 54}; 55 56enum mem_cgroup_events_index { 57 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 58 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 59 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 60 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 61 MEM_CGROUP_EVENTS_NSTATS, 62 /* default hierarchy events */ 63 MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS, 64 MEMCG_HIGH, 65 MEMCG_MAX, 66 MEMCG_OOM, 67 MEMCG_NR_EVENTS, 68}; 69 70#ifdef CONFIG_MEMCG 71extern struct cgroup_subsys_state *mem_cgroup_root_css; 72 73void mem_cgroup_events(struct mem_cgroup *memcg, 74 enum mem_cgroup_events_index idx, 75 unsigned int nr); 76 77bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); 78 79int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 80 gfp_t gfp_mask, struct mem_cgroup **memcgp); 81void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 82 bool lrucare); 83void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 84void mem_cgroup_uncharge(struct page *page); 85void mem_cgroup_uncharge_list(struct list_head *page_list); 86 87void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, 88 bool lrucare); 89 90struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 91struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 92 93bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 94 struct mem_cgroup *root); 95bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 96 97extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 98extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 99 100extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 101extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 102 103static inline bool mm_match_cgroup(struct mm_struct *mm, 104 struct mem_cgroup *memcg) 105{ 106 struct mem_cgroup *task_memcg; 107 bool match = false; 108 109 rcu_read_lock(); 110 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 111 if (task_memcg) 112 match = mem_cgroup_is_descendant(task_memcg, memcg); 113 rcu_read_unlock(); 114 return match; 115} 116 117extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 118extern struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 119 120struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 121 struct mem_cgroup *, 122 struct mem_cgroup_reclaim_cookie *); 123void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 124 125/* 126 * For memory reclaim. 127 */ 128int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); 129bool mem_cgroup_lruvec_online(struct lruvec *lruvec); 130int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 131unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 132void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 133extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 134 struct task_struct *p); 135 136static inline void mem_cgroup_oom_enable(void) 137{ 138 WARN_ON(current->memcg_oom.may_oom); 139 current->memcg_oom.may_oom = 1; 140} 141 142static inline void mem_cgroup_oom_disable(void) 143{ 144 WARN_ON(!current->memcg_oom.may_oom); 145 current->memcg_oom.may_oom = 0; 146} 147 148static inline bool task_in_memcg_oom(struct task_struct *p) 149{ 150 return p->memcg_oom.memcg; 151} 152 153bool mem_cgroup_oom_synchronize(bool wait); 154 155#ifdef CONFIG_MEMCG_SWAP 156extern int do_swap_account; 157#endif 158 159static inline bool mem_cgroup_disabled(void) 160{ 161 if (memory_cgrp_subsys.disabled) 162 return true; 163 return false; 164} 165 166struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page); 167void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 168 enum mem_cgroup_stat_index idx, int val); 169void mem_cgroup_end_page_stat(struct mem_cgroup *memcg); 170 171static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 172 enum mem_cgroup_stat_index idx) 173{ 174 mem_cgroup_update_page_stat(memcg, idx, 1); 175} 176 177static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, 178 enum mem_cgroup_stat_index idx) 179{ 180 mem_cgroup_update_page_stat(memcg, idx, -1); 181} 182 183unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 184 gfp_t gfp_mask, 185 unsigned long *total_scanned); 186 187void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 188static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 189 enum vm_event_item idx) 190{ 191 if (mem_cgroup_disabled()) 192 return; 193 __mem_cgroup_count_vm_event(mm, idx); 194} 195#ifdef CONFIG_TRANSPARENT_HUGEPAGE 196void mem_cgroup_split_huge_fixup(struct page *head); 197#endif 198 199#else /* CONFIG_MEMCG */ 200struct mem_cgroup; 201 202#define mem_cgroup_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL)) 203 204static inline void mem_cgroup_events(struct mem_cgroup *memcg, 205 enum mem_cgroup_events_index idx, 206 unsigned int nr) 207{ 208} 209 210static inline bool mem_cgroup_low(struct mem_cgroup *root, 211 struct mem_cgroup *memcg) 212{ 213 return false; 214} 215 216static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 217 gfp_t gfp_mask, 218 struct mem_cgroup **memcgp) 219{ 220 *memcgp = NULL; 221 return 0; 222} 223 224static inline void mem_cgroup_commit_charge(struct page *page, 225 struct mem_cgroup *memcg, 226 bool lrucare) 227{ 228} 229 230static inline void mem_cgroup_cancel_charge(struct page *page, 231 struct mem_cgroup *memcg) 232{ 233} 234 235static inline void mem_cgroup_uncharge(struct page *page) 236{ 237} 238 239static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 240{ 241} 242 243static inline void mem_cgroup_migrate(struct page *oldpage, 244 struct page *newpage, 245 bool lrucare) 246{ 247} 248 249static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 250 struct mem_cgroup *memcg) 251{ 252 return &zone->lruvec; 253} 254 255static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 256 struct zone *zone) 257{ 258 return &zone->lruvec; 259} 260 261static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 262{ 263 return NULL; 264} 265 266static inline bool mm_match_cgroup(struct mm_struct *mm, 267 struct mem_cgroup *memcg) 268{ 269 return true; 270} 271 272static inline bool task_in_mem_cgroup(struct task_struct *task, 273 const struct mem_cgroup *memcg) 274{ 275 return true; 276} 277 278static inline struct cgroup_subsys_state 279 *mem_cgroup_css(struct mem_cgroup *memcg) 280{ 281 return NULL; 282} 283 284static inline struct mem_cgroup * 285mem_cgroup_iter(struct mem_cgroup *root, 286 struct mem_cgroup *prev, 287 struct mem_cgroup_reclaim_cookie *reclaim) 288{ 289 return NULL; 290} 291 292static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 293 struct mem_cgroup *prev) 294{ 295} 296 297static inline bool mem_cgroup_disabled(void) 298{ 299 return true; 300} 301 302static inline int 303mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 304{ 305 return 1; 306} 307 308static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec) 309{ 310 return true; 311} 312 313static inline unsigned long 314mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 315{ 316 return 0; 317} 318 319static inline void 320mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 321 int increment) 322{ 323} 324 325static inline void 326mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 327{ 328} 329 330static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page) 331{ 332 return NULL; 333} 334 335static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) 336{ 337} 338 339static inline void mem_cgroup_oom_enable(void) 340{ 341} 342 343static inline void mem_cgroup_oom_disable(void) 344{ 345} 346 347static inline bool task_in_memcg_oom(struct task_struct *p) 348{ 349 return false; 350} 351 352static inline bool mem_cgroup_oom_synchronize(bool wait) 353{ 354 return false; 355} 356 357static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 358 enum mem_cgroup_stat_index idx) 359{ 360} 361 362static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, 363 enum mem_cgroup_stat_index idx) 364{ 365} 366 367static inline 368unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 369 gfp_t gfp_mask, 370 unsigned long *total_scanned) 371{ 372 return 0; 373} 374 375static inline void mem_cgroup_split_huge_fixup(struct page *head) 376{ 377} 378 379static inline 380void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 381{ 382} 383#endif /* CONFIG_MEMCG */ 384 385enum { 386 UNDER_LIMIT, 387 SOFT_LIMIT, 388 OVER_LIMIT, 389}; 390 391#ifdef CONFIG_CGROUP_WRITEBACK 392 393struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); 394struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 395void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, 396 unsigned long *pdirty, unsigned long *pwriteback); 397 398#else /* CONFIG_CGROUP_WRITEBACK */ 399 400static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 401{ 402 return NULL; 403} 404 405static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 406 unsigned long *pavail, 407 unsigned long *pdirty, 408 unsigned long *pwriteback) 409{ 410} 411 412#endif /* CONFIG_CGROUP_WRITEBACK */ 413 414struct sock; 415#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 416void sock_update_memcg(struct sock *sk); 417void sock_release_memcg(struct sock *sk); 418#else 419static inline void sock_update_memcg(struct sock *sk) 420{ 421} 422static inline void sock_release_memcg(struct sock *sk) 423{ 424} 425#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 426 427#ifdef CONFIG_MEMCG_KMEM 428extern struct static_key memcg_kmem_enabled_key; 429 430extern int memcg_nr_cache_ids; 431extern void memcg_get_cache_ids(void); 432extern void memcg_put_cache_ids(void); 433 434/* 435 * Helper macro to loop through all memcg-specific caches. Callers must still 436 * check if the cache is valid (it is either valid or NULL). 437 * the slab_mutex must be held when looping through those caches 438 */ 439#define for_each_memcg_cache_index(_idx) \ 440 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) 441 442static inline bool memcg_kmem_enabled(void) 443{ 444 return static_key_false(&memcg_kmem_enabled_key); 445} 446 447bool memcg_kmem_is_active(struct mem_cgroup *memcg); 448 449/* 450 * In general, we'll do everything in our power to not incur in any overhead 451 * for non-memcg users for the kmem functions. Not even a function call, if we 452 * can avoid it. 453 * 454 * Therefore, we'll inline all those functions so that in the best case, we'll 455 * see that kmemcg is off for everybody and proceed quickly. If it is on, 456 * we'll still do most of the flag checking inline. We check a lot of 457 * conditions, but because they are pretty simple, they are expected to be 458 * fast. 459 */ 460bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, 461 int order); 462void __memcg_kmem_commit_charge(struct page *page, 463 struct mem_cgroup *memcg, int order); 464void __memcg_kmem_uncharge_pages(struct page *page, int order); 465 466int memcg_cache_id(struct mem_cgroup *memcg); 467 468struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); 469void __memcg_kmem_put_cache(struct kmem_cache *cachep); 470 471struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr); 472 473int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, 474 unsigned long nr_pages); 475void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); 476 477/** 478 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 479 * @gfp: the gfp allocation flags. 480 * @memcg: a pointer to the memcg this was charged against. 481 * @order: allocation order. 482 * 483 * returns true if the memcg where the current task belongs can hold this 484 * allocation. 485 * 486 * We return true automatically if this allocation is not to be accounted to 487 * any memcg. 488 */ 489static inline bool 490memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 491{ 492 if (!memcg_kmem_enabled()) 493 return true; 494 495 if (gfp & __GFP_NOACCOUNT) 496 return true; 497 /* 498 * __GFP_NOFAIL allocations will move on even if charging is not 499 * possible. Therefore we don't even try, and have this allocation 500 * unaccounted. We could in theory charge it forcibly, but we hope 501 * those allocations are rare, and won't be worth the trouble. 502 */ 503 if (gfp & __GFP_NOFAIL) 504 return true; 505 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 506 return true; 507 508 /* If the test is dying, just let it go. */ 509 if (unlikely(fatal_signal_pending(current))) 510 return true; 511 512 return __memcg_kmem_newpage_charge(gfp, memcg, order); 513} 514 515/** 516 * memcg_kmem_uncharge_pages: uncharge pages from memcg 517 * @page: pointer to struct page being freed 518 * @order: allocation order. 519 */ 520static inline void 521memcg_kmem_uncharge_pages(struct page *page, int order) 522{ 523 if (memcg_kmem_enabled()) 524 __memcg_kmem_uncharge_pages(page, order); 525} 526 527/** 528 * memcg_kmem_commit_charge: embeds correct memcg in a page 529 * @page: pointer to struct page recently allocated 530 * @memcg: the memcg structure we charged against 531 * @order: allocation order. 532 * 533 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or 534 * failure of the allocation. if @page is NULL, this function will revert the 535 * charges. Otherwise, it will commit @page to @memcg. 536 */ 537static inline void 538memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 539{ 540 if (memcg_kmem_enabled() && memcg) 541 __memcg_kmem_commit_charge(page, memcg, order); 542} 543 544/** 545 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation 546 * @cachep: the original global kmem cache 547 * @gfp: allocation flags. 548 * 549 * All memory allocated from a per-memcg cache is charged to the owner memcg. 550 */ 551static __always_inline struct kmem_cache * 552memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 553{ 554 if (!memcg_kmem_enabled()) 555 return cachep; 556 if (gfp & __GFP_NOACCOUNT) 557 return cachep; 558 if (gfp & __GFP_NOFAIL) 559 return cachep; 560 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 561 return cachep; 562 if (unlikely(fatal_signal_pending(current))) 563 return cachep; 564 565 return __memcg_kmem_get_cache(cachep); 566} 567 568static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) 569{ 570 if (memcg_kmem_enabled()) 571 __memcg_kmem_put_cache(cachep); 572} 573 574static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) 575{ 576 if (!memcg_kmem_enabled()) 577 return NULL; 578 return __mem_cgroup_from_kmem(ptr); 579} 580#else 581#define for_each_memcg_cache_index(_idx) \ 582 for (; NULL; ) 583 584static inline bool memcg_kmem_enabled(void) 585{ 586 return false; 587} 588 589static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) 590{ 591 return false; 592} 593 594static inline bool 595memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 596{ 597 return true; 598} 599 600static inline void memcg_kmem_uncharge_pages(struct page *page, int order) 601{ 602} 603 604static inline void 605memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 606{ 607} 608 609static inline int memcg_cache_id(struct mem_cgroup *memcg) 610{ 611 return -1; 612} 613 614static inline void memcg_get_cache_ids(void) 615{ 616} 617 618static inline void memcg_put_cache_ids(void) 619{ 620} 621 622static inline struct kmem_cache * 623memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 624{ 625 return cachep; 626} 627 628static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) 629{ 630} 631 632static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr) 633{ 634 return NULL; 635} 636#endif /* CONFIG_MEMCG_KMEM */ 637#endif /* _LINUX_MEMCONTROL_H */ 638