at v5.1 34 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24#include <linux/hardirq.h> 25#include <linux/jump_label.h> 26#include <linux/page_counter.h> 27#include <linux/vmpressure.h> 28#include <linux/eventfd.h> 29#include <linux/mm.h> 30#include <linux/vmstat.h> 31#include <linux/writeback.h> 32#include <linux/page-flags.h> 33 34struct mem_cgroup; 35struct page; 36struct mm_struct; 37struct kmem_cache; 38 39/* Cgroup-specific page state, on top of universal node page state */ 40enum memcg_stat_item { 41 MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, 42 MEMCG_RSS, 43 MEMCG_RSS_HUGE, 44 MEMCG_SWAP, 45 MEMCG_SOCK, 46 /* XXX: why are these zone and not node counters? */ 47 MEMCG_KERNEL_STACK_KB, 48 MEMCG_NR_STAT, 49}; 50 51enum memcg_memory_event { 52 MEMCG_LOW, 53 MEMCG_HIGH, 54 MEMCG_MAX, 55 MEMCG_OOM, 56 MEMCG_OOM_KILL, 57 MEMCG_SWAP_MAX, 58 MEMCG_SWAP_FAIL, 59 MEMCG_NR_MEMORY_EVENTS, 60}; 61 62enum mem_cgroup_protection { 63 MEMCG_PROT_NONE, 64 MEMCG_PROT_LOW, 65 MEMCG_PROT_MIN, 66}; 67 68struct mem_cgroup_reclaim_cookie { 69 pg_data_t *pgdat; 70 int priority; 71 unsigned int generation; 72}; 73 74#ifdef CONFIG_MEMCG 75 76#define MEM_CGROUP_ID_SHIFT 16 77#define MEM_CGROUP_ID_MAX USHRT_MAX 78 79struct mem_cgroup_id { 80 int id; 81 refcount_t ref; 82}; 83 84/* 85 * Per memcg event counter is incremented at every pagein/pageout. With THP, 86 * it will be incremated by the number of pages. This counter is used for 87 * for trigger some periodic events. This is straightforward and better 88 * than using jiffies etc. to handle periodic memcg event. 89 */ 90enum mem_cgroup_events_target { 91 MEM_CGROUP_TARGET_THRESH, 92 MEM_CGROUP_TARGET_SOFTLIMIT, 93 MEM_CGROUP_TARGET_NUMAINFO, 94 MEM_CGROUP_NTARGETS, 95}; 96 97struct mem_cgroup_stat_cpu { 98 long count[MEMCG_NR_STAT]; 99 unsigned long events[NR_VM_EVENT_ITEMS]; 100 unsigned long nr_page_events; 101 unsigned long targets[MEM_CGROUP_NTARGETS]; 102}; 103 104struct mem_cgroup_reclaim_iter { 105 struct mem_cgroup *position; 106 /* scan generation, increased every round-trip */ 107 unsigned int generation; 108}; 109 110struct lruvec_stat { 111 long count[NR_VM_NODE_STAT_ITEMS]; 112}; 113 114/* 115 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, 116 * which have elements charged to this memcg. 117 */ 118struct memcg_shrinker_map { 119 struct rcu_head rcu; 120 unsigned long map[0]; 121}; 122 123/* 124 * per-zone information in memory controller. 125 */ 126struct mem_cgroup_per_node { 127 struct lruvec lruvec; 128 129 struct lruvec_stat __percpu *lruvec_stat_cpu; 130 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; 131 132 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 133 134 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; 135 136#ifdef CONFIG_MEMCG_KMEM 137 struct memcg_shrinker_map __rcu *shrinker_map; 138#endif 139 struct rb_node tree_node; /* RB tree node */ 140 unsigned long usage_in_excess;/* Set to the value by which */ 141 /* the soft limit is exceeded*/ 142 bool on_tree; 143 bool congested; /* memcg has many dirty pages */ 144 /* backed by a congested BDI */ 145 146 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 147 /* use container_of */ 148}; 149 150struct mem_cgroup_threshold { 151 struct eventfd_ctx *eventfd; 152 unsigned long threshold; 153}; 154 155/* For threshold */ 156struct mem_cgroup_threshold_ary { 157 /* An array index points to threshold just below or equal to usage. */ 158 int current_threshold; 159 /* Size of entries[] */ 160 unsigned int size; 161 /* Array of thresholds */ 162 struct mem_cgroup_threshold entries[0]; 163}; 164 165struct mem_cgroup_thresholds { 166 /* Primary thresholds array */ 167 struct mem_cgroup_threshold_ary *primary; 168 /* 169 * Spare threshold array. 170 * This is needed to make mem_cgroup_unregister_event() "never fail". 171 * It must be able to store at least primary->size - 1 entries. 172 */ 173 struct mem_cgroup_threshold_ary *spare; 174}; 175 176enum memcg_kmem_state { 177 KMEM_NONE, 178 KMEM_ALLOCATED, 179 KMEM_ONLINE, 180}; 181 182#if defined(CONFIG_SMP) 183struct memcg_padding { 184 char x[0]; 185} ____cacheline_internodealigned_in_smp; 186#define MEMCG_PADDING(name) struct memcg_padding name; 187#else 188#define MEMCG_PADDING(name) 189#endif 190 191/* 192 * The memory controller data structure. The memory controller controls both 193 * page cache and RSS per cgroup. We would eventually like to provide 194 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 195 * to help the administrator determine what knobs to tune. 196 */ 197struct mem_cgroup { 198 struct cgroup_subsys_state css; 199 200 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 201 struct mem_cgroup_id id; 202 203 /* Accounted resources */ 204 struct page_counter memory; 205 struct page_counter swap; 206 207 /* Legacy consumer-oriented counters */ 208 struct page_counter memsw; 209 struct page_counter kmem; 210 struct page_counter tcpmem; 211 212 /* Upper bound of normal memory consumption range */ 213 unsigned long high; 214 215 /* Range enforcement for interrupt charges */ 216 struct work_struct high_work; 217 218 unsigned long soft_limit; 219 220 /* vmpressure notifications */ 221 struct vmpressure vmpressure; 222 223 /* 224 * Should the accounting and control be hierarchical, per subtree? 225 */ 226 bool use_hierarchy; 227 228 /* 229 * Should the OOM killer kill all belonging tasks, had it kill one? 230 */ 231 bool oom_group; 232 233 /* protected by memcg_oom_lock */ 234 bool oom_lock; 235 int under_oom; 236 237 int swappiness; 238 /* OOM-Killer disable */ 239 int oom_kill_disable; 240 241 /* memory.events */ 242 struct cgroup_file events_file; 243 244 /* handle for "memory.swap.events" */ 245 struct cgroup_file swap_events_file; 246 247 /* protect arrays of thresholds */ 248 struct mutex thresholds_lock; 249 250 /* thresholds for memory usage. RCU-protected */ 251 struct mem_cgroup_thresholds thresholds; 252 253 /* thresholds for mem+swap usage. RCU-protected */ 254 struct mem_cgroup_thresholds memsw_thresholds; 255 256 /* For oom notifier event fd */ 257 struct list_head oom_notify; 258 259 /* 260 * Should we move charges of a task when a task is moved into this 261 * mem_cgroup ? And what type of charges should we move ? 262 */ 263 unsigned long move_charge_at_immigrate; 264 /* taken only while moving_account > 0 */ 265 spinlock_t move_lock; 266 unsigned long move_lock_flags; 267 268 MEMCG_PADDING(_pad1_); 269 270 /* 271 * set > 0 if pages under this cgroup are moving to other cgroup. 272 */ 273 atomic_t moving_account; 274 struct task_struct *move_lock_task; 275 276 /* memory.stat */ 277 struct mem_cgroup_stat_cpu __percpu *stat_cpu; 278 279 MEMCG_PADDING(_pad2_); 280 281 atomic_long_t stat[MEMCG_NR_STAT]; 282 atomic_long_t events[NR_VM_EVENT_ITEMS]; 283 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 284 285 unsigned long socket_pressure; 286 287 /* Legacy tcp memory accounting */ 288 bool tcpmem_active; 289 int tcpmem_pressure; 290 291#ifdef CONFIG_MEMCG_KMEM 292 /* Index in the kmem_cache->memcg_params.memcg_caches array */ 293 int kmemcg_id; 294 enum memcg_kmem_state kmem_state; 295 struct list_head kmem_caches; 296#endif 297 298 int last_scanned_node; 299#if MAX_NUMNODES > 1 300 nodemask_t scan_nodes; 301 atomic_t numainfo_events; 302 atomic_t numainfo_updating; 303#endif 304 305#ifdef CONFIG_CGROUP_WRITEBACK 306 struct list_head cgwb_list; 307 struct wb_domain cgwb_domain; 308#endif 309 310 /* List of events which userspace want to receive */ 311 struct list_head event_list; 312 spinlock_t event_list_lock; 313 314 struct mem_cgroup_per_node *nodeinfo[0]; 315 /* WARNING: nodeinfo must be the last member here */ 316}; 317 318/* 319 * size of first charge trial. "32" comes from vmscan.c's magic value. 320 * TODO: maybe necessary to use big numbers in big irons. 321 */ 322#define MEMCG_CHARGE_BATCH 32U 323 324extern struct mem_cgroup *root_mem_cgroup; 325 326static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 327{ 328 return (memcg == root_mem_cgroup); 329} 330 331static inline bool mem_cgroup_disabled(void) 332{ 333 return !cgroup_subsys_enabled(memory_cgrp_subsys); 334} 335 336enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 337 struct mem_cgroup *memcg); 338 339int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 340 gfp_t gfp_mask, struct mem_cgroup **memcgp, 341 bool compound); 342int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, 343 gfp_t gfp_mask, struct mem_cgroup **memcgp, 344 bool compound); 345void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 346 bool lrucare, bool compound); 347void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 348 bool compound); 349void mem_cgroup_uncharge(struct page *page); 350void mem_cgroup_uncharge_list(struct list_head *page_list); 351 352void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); 353 354static struct mem_cgroup_per_node * 355mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) 356{ 357 return memcg->nodeinfo[nid]; 358} 359 360/** 361 * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone 362 * @node: node of the wanted lruvec 363 * @memcg: memcg of the wanted lruvec 364 * 365 * Returns the lru list vector holding pages for a given @node or a given 366 * @memcg and @zone. This can be the node lruvec, if the memory controller 367 * is disabled. 368 */ 369static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, 370 struct mem_cgroup *memcg) 371{ 372 struct mem_cgroup_per_node *mz; 373 struct lruvec *lruvec; 374 375 if (mem_cgroup_disabled()) { 376 lruvec = node_lruvec(pgdat); 377 goto out; 378 } 379 380 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 381 lruvec = &mz->lruvec; 382out: 383 /* 384 * Since a node can be onlined after the mem_cgroup was created, 385 * we have to be prepared to initialize lruvec->pgdat here; 386 * and if offlined then reonlined, we need to reinitialize it. 387 */ 388 if (unlikely(lruvec->pgdat != pgdat)) 389 lruvec->pgdat = pgdat; 390 return lruvec; 391} 392 393struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); 394 395bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); 396struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 397 398struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 399 400struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); 401 402static inline 403struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 404 return css ? container_of(css, struct mem_cgroup, css) : NULL; 405} 406 407static inline void mem_cgroup_put(struct mem_cgroup *memcg) 408{ 409 if (memcg) 410 css_put(&memcg->css); 411} 412 413#define mem_cgroup_from_counter(counter, member) \ 414 container_of(counter, struct mem_cgroup, member) 415 416struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 417 struct mem_cgroup *, 418 struct mem_cgroup_reclaim_cookie *); 419void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 420int mem_cgroup_scan_tasks(struct mem_cgroup *, 421 int (*)(struct task_struct *, void *), void *); 422 423static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 424{ 425 if (mem_cgroup_disabled()) 426 return 0; 427 428 return memcg->id.id; 429} 430struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 431 432static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 433{ 434 return mem_cgroup_from_css(seq_css(m)); 435} 436 437static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 438{ 439 struct mem_cgroup_per_node *mz; 440 441 if (mem_cgroup_disabled()) 442 return NULL; 443 444 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 445 return mz->memcg; 446} 447 448/** 449 * parent_mem_cgroup - find the accounting parent of a memcg 450 * @memcg: memcg whose parent to find 451 * 452 * Returns the parent memcg, or NULL if this is the root or the memory 453 * controller is in legacy no-hierarchy mode. 454 */ 455static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 456{ 457 if (!memcg->memory.parent) 458 return NULL; 459 return mem_cgroup_from_counter(memcg->memory.parent, memory); 460} 461 462static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 463 struct mem_cgroup *root) 464{ 465 if (root == memcg) 466 return true; 467 if (!root->use_hierarchy) 468 return false; 469 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 470} 471 472static inline bool mm_match_cgroup(struct mm_struct *mm, 473 struct mem_cgroup *memcg) 474{ 475 struct mem_cgroup *task_memcg; 476 bool match = false; 477 478 rcu_read_lock(); 479 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 480 if (task_memcg) 481 match = mem_cgroup_is_descendant(task_memcg, memcg); 482 rcu_read_unlock(); 483 return match; 484} 485 486struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 487ino_t page_cgroup_ino(struct page *page); 488 489static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 490{ 491 if (mem_cgroup_disabled()) 492 return true; 493 return !!(memcg->css.flags & CSS_ONLINE); 494} 495 496/* 497 * For memory reclaim. 498 */ 499int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 500 501void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 502 int zid, int nr_pages); 503 504unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 505 int nid, unsigned int lru_mask); 506 507static inline 508unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 509{ 510 struct mem_cgroup_per_node *mz; 511 unsigned long nr_pages = 0; 512 int zid; 513 514 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 515 for (zid = 0; zid < MAX_NR_ZONES; zid++) 516 nr_pages += mz->lru_zone_size[zid][lru]; 517 return nr_pages; 518} 519 520static inline 521unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 522 enum lru_list lru, int zone_idx) 523{ 524 struct mem_cgroup_per_node *mz; 525 526 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 527 return mz->lru_zone_size[zone_idx][lru]; 528} 529 530void mem_cgroup_handle_over_high(void); 531 532unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 533 534void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 535 struct task_struct *p); 536 537void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 538 539static inline void mem_cgroup_enter_user_fault(void) 540{ 541 WARN_ON(current->in_user_fault); 542 current->in_user_fault = 1; 543} 544 545static inline void mem_cgroup_exit_user_fault(void) 546{ 547 WARN_ON(!current->in_user_fault); 548 current->in_user_fault = 0; 549} 550 551static inline bool task_in_memcg_oom(struct task_struct *p) 552{ 553 return p->memcg_in_oom; 554} 555 556bool mem_cgroup_oom_synchronize(bool wait); 557struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 558 struct mem_cgroup *oom_domain); 559void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 560 561#ifdef CONFIG_MEMCG_SWAP 562extern int do_swap_account; 563#endif 564 565struct mem_cgroup *lock_page_memcg(struct page *page); 566void __unlock_page_memcg(struct mem_cgroup *memcg); 567void unlock_page_memcg(struct page *page); 568 569/* 570 * idx can be of type enum memcg_stat_item or node_stat_item. 571 * Keep in sync with memcg_exact_page_state(). 572 */ 573static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, 574 int idx) 575{ 576 long x = atomic_long_read(&memcg->stat[idx]); 577#ifdef CONFIG_SMP 578 if (x < 0) 579 x = 0; 580#endif 581 return x; 582} 583 584/* idx can be of type enum memcg_stat_item or node_stat_item */ 585static inline void __mod_memcg_state(struct mem_cgroup *memcg, 586 int idx, int val) 587{ 588 long x; 589 590 if (mem_cgroup_disabled()) 591 return; 592 593 x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); 594 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 595 atomic_long_add(x, &memcg->stat[idx]); 596 x = 0; 597 } 598 __this_cpu_write(memcg->stat_cpu->count[idx], x); 599} 600 601/* idx can be of type enum memcg_stat_item or node_stat_item */ 602static inline void mod_memcg_state(struct mem_cgroup *memcg, 603 int idx, int val) 604{ 605 unsigned long flags; 606 607 local_irq_save(flags); 608 __mod_memcg_state(memcg, idx, val); 609 local_irq_restore(flags); 610} 611 612/** 613 * mod_memcg_page_state - update page state statistics 614 * @page: the page 615 * @idx: page state item to account 616 * @val: number of pages (positive or negative) 617 * 618 * The @page must be locked or the caller must use lock_page_memcg() 619 * to prevent double accounting when the page is concurrently being 620 * moved to another memcg: 621 * 622 * lock_page(page) or lock_page_memcg(page) 623 * if (TestClearPageState(page)) 624 * mod_memcg_page_state(page, state, -1); 625 * unlock_page(page) or unlock_page_memcg(page) 626 * 627 * Kernel pages are an exception to this, since they'll never move. 628 */ 629static inline void __mod_memcg_page_state(struct page *page, 630 int idx, int val) 631{ 632 if (page->mem_cgroup) 633 __mod_memcg_state(page->mem_cgroup, idx, val); 634} 635 636static inline void mod_memcg_page_state(struct page *page, 637 int idx, int val) 638{ 639 if (page->mem_cgroup) 640 mod_memcg_state(page->mem_cgroup, idx, val); 641} 642 643static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 644 enum node_stat_item idx) 645{ 646 struct mem_cgroup_per_node *pn; 647 long x; 648 649 if (mem_cgroup_disabled()) 650 return node_page_state(lruvec_pgdat(lruvec), idx); 651 652 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 653 x = atomic_long_read(&pn->lruvec_stat[idx]); 654#ifdef CONFIG_SMP 655 if (x < 0) 656 x = 0; 657#endif 658 return x; 659} 660 661static inline void __mod_lruvec_state(struct lruvec *lruvec, 662 enum node_stat_item idx, int val) 663{ 664 struct mem_cgroup_per_node *pn; 665 long x; 666 667 /* Update node */ 668 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 669 670 if (mem_cgroup_disabled()) 671 return; 672 673 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 674 675 /* Update memcg */ 676 __mod_memcg_state(pn->memcg, idx, val); 677 678 /* Update lruvec */ 679 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 680 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 681 atomic_long_add(x, &pn->lruvec_stat[idx]); 682 x = 0; 683 } 684 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 685} 686 687static inline void mod_lruvec_state(struct lruvec *lruvec, 688 enum node_stat_item idx, int val) 689{ 690 unsigned long flags; 691 692 local_irq_save(flags); 693 __mod_lruvec_state(lruvec, idx, val); 694 local_irq_restore(flags); 695} 696 697static inline void __mod_lruvec_page_state(struct page *page, 698 enum node_stat_item idx, int val) 699{ 700 pg_data_t *pgdat = page_pgdat(page); 701 struct lruvec *lruvec; 702 703 /* Untracked pages have no memcg, no lruvec. Update only the node */ 704 if (!page->mem_cgroup) { 705 __mod_node_page_state(pgdat, idx, val); 706 return; 707 } 708 709 lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup); 710 __mod_lruvec_state(lruvec, idx, val); 711} 712 713static inline void mod_lruvec_page_state(struct page *page, 714 enum node_stat_item idx, int val) 715{ 716 unsigned long flags; 717 718 local_irq_save(flags); 719 __mod_lruvec_page_state(page, idx, val); 720 local_irq_restore(flags); 721} 722 723unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 724 gfp_t gfp_mask, 725 unsigned long *total_scanned); 726 727static inline void __count_memcg_events(struct mem_cgroup *memcg, 728 enum vm_event_item idx, 729 unsigned long count) 730{ 731 unsigned long x; 732 733 if (mem_cgroup_disabled()) 734 return; 735 736 x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); 737 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 738 atomic_long_add(x, &memcg->events[idx]); 739 x = 0; 740 } 741 __this_cpu_write(memcg->stat_cpu->events[idx], x); 742} 743 744static inline void count_memcg_events(struct mem_cgroup *memcg, 745 enum vm_event_item idx, 746 unsigned long count) 747{ 748 unsigned long flags; 749 750 local_irq_save(flags); 751 __count_memcg_events(memcg, idx, count); 752 local_irq_restore(flags); 753} 754 755static inline void count_memcg_page_event(struct page *page, 756 enum vm_event_item idx) 757{ 758 if (page->mem_cgroup) 759 count_memcg_events(page->mem_cgroup, idx, 1); 760} 761 762static inline void count_memcg_event_mm(struct mm_struct *mm, 763 enum vm_event_item idx) 764{ 765 struct mem_cgroup *memcg; 766 767 if (mem_cgroup_disabled()) 768 return; 769 770 rcu_read_lock(); 771 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 772 if (likely(memcg)) 773 count_memcg_events(memcg, idx, 1); 774 rcu_read_unlock(); 775} 776 777static inline void memcg_memory_event(struct mem_cgroup *memcg, 778 enum memcg_memory_event event) 779{ 780 atomic_long_inc(&memcg->memory_events[event]); 781 cgroup_file_notify(&memcg->events_file); 782} 783 784static inline void memcg_memory_event_mm(struct mm_struct *mm, 785 enum memcg_memory_event event) 786{ 787 struct mem_cgroup *memcg; 788 789 if (mem_cgroup_disabled()) 790 return; 791 792 rcu_read_lock(); 793 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 794 if (likely(memcg)) 795 memcg_memory_event(memcg, event); 796 rcu_read_unlock(); 797} 798 799#ifdef CONFIG_TRANSPARENT_HUGEPAGE 800void mem_cgroup_split_huge_fixup(struct page *head); 801#endif 802 803#else /* CONFIG_MEMCG */ 804 805#define MEM_CGROUP_ID_SHIFT 0 806#define MEM_CGROUP_ID_MAX 0 807 808struct mem_cgroup; 809 810static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 811{ 812 return true; 813} 814 815static inline bool mem_cgroup_disabled(void) 816{ 817 return true; 818} 819 820static inline void memcg_memory_event(struct mem_cgroup *memcg, 821 enum memcg_memory_event event) 822{ 823} 824 825static inline void memcg_memory_event_mm(struct mm_struct *mm, 826 enum memcg_memory_event event) 827{ 828} 829 830static inline enum mem_cgroup_protection mem_cgroup_protected( 831 struct mem_cgroup *root, struct mem_cgroup *memcg) 832{ 833 return MEMCG_PROT_NONE; 834} 835 836static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 837 gfp_t gfp_mask, 838 struct mem_cgroup **memcgp, 839 bool compound) 840{ 841 *memcgp = NULL; 842 return 0; 843} 844 845static inline int mem_cgroup_try_charge_delay(struct page *page, 846 struct mm_struct *mm, 847 gfp_t gfp_mask, 848 struct mem_cgroup **memcgp, 849 bool compound) 850{ 851 *memcgp = NULL; 852 return 0; 853} 854 855static inline void mem_cgroup_commit_charge(struct page *page, 856 struct mem_cgroup *memcg, 857 bool lrucare, bool compound) 858{ 859} 860 861static inline void mem_cgroup_cancel_charge(struct page *page, 862 struct mem_cgroup *memcg, 863 bool compound) 864{ 865} 866 867static inline void mem_cgroup_uncharge(struct page *page) 868{ 869} 870 871static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 872{ 873} 874 875static inline void mem_cgroup_migrate(struct page *old, struct page *new) 876{ 877} 878 879static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat, 880 struct mem_cgroup *memcg) 881{ 882 return node_lruvec(pgdat); 883} 884 885static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 886 struct pglist_data *pgdat) 887{ 888 return &pgdat->lruvec; 889} 890 891static inline bool mm_match_cgroup(struct mm_struct *mm, 892 struct mem_cgroup *memcg) 893{ 894 return true; 895} 896 897static inline bool task_in_mem_cgroup(struct task_struct *task, 898 const struct mem_cgroup *memcg) 899{ 900 return true; 901} 902 903static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 904{ 905 return NULL; 906} 907 908static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 909{ 910 return NULL; 911} 912 913static inline void mem_cgroup_put(struct mem_cgroup *memcg) 914{ 915} 916 917static inline struct mem_cgroup * 918mem_cgroup_iter(struct mem_cgroup *root, 919 struct mem_cgroup *prev, 920 struct mem_cgroup_reclaim_cookie *reclaim) 921{ 922 return NULL; 923} 924 925static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 926 struct mem_cgroup *prev) 927{ 928} 929 930static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 931 int (*fn)(struct task_struct *, void *), void *arg) 932{ 933 return 0; 934} 935 936static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 937{ 938 return 0; 939} 940 941static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 942{ 943 WARN_ON_ONCE(id); 944 /* XXX: This should always return root_mem_cgroup */ 945 return NULL; 946} 947 948static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 949{ 950 return NULL; 951} 952 953static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 954{ 955 return NULL; 956} 957 958static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 959{ 960 return true; 961} 962 963static inline unsigned long 964mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 965{ 966 return 0; 967} 968static inline 969unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 970 enum lru_list lru, int zone_idx) 971{ 972 return 0; 973} 974 975static inline unsigned long 976mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 977 int nid, unsigned int lru_mask) 978{ 979 return 0; 980} 981 982static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 983{ 984 return 0; 985} 986 987static inline void 988mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 989{ 990} 991 992static inline void 993mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 994{ 995} 996 997static inline struct mem_cgroup *lock_page_memcg(struct page *page) 998{ 999 return NULL; 1000} 1001 1002static inline void __unlock_page_memcg(struct mem_cgroup *memcg) 1003{ 1004} 1005 1006static inline void unlock_page_memcg(struct page *page) 1007{ 1008} 1009 1010static inline void mem_cgroup_handle_over_high(void) 1011{ 1012} 1013 1014static inline void mem_cgroup_enter_user_fault(void) 1015{ 1016} 1017 1018static inline void mem_cgroup_exit_user_fault(void) 1019{ 1020} 1021 1022static inline bool task_in_memcg_oom(struct task_struct *p) 1023{ 1024 return false; 1025} 1026 1027static inline bool mem_cgroup_oom_synchronize(bool wait) 1028{ 1029 return false; 1030} 1031 1032static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1033 struct task_struct *victim, struct mem_cgroup *oom_domain) 1034{ 1035 return NULL; 1036} 1037 1038static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1039{ 1040} 1041 1042static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, 1043 int idx) 1044{ 1045 return 0; 1046} 1047 1048static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1049 int idx, 1050 int nr) 1051{ 1052} 1053 1054static inline void mod_memcg_state(struct mem_cgroup *memcg, 1055 int idx, 1056 int nr) 1057{ 1058} 1059 1060static inline void __mod_memcg_page_state(struct page *page, 1061 int idx, 1062 int nr) 1063{ 1064} 1065 1066static inline void mod_memcg_page_state(struct page *page, 1067 int idx, 1068 int nr) 1069{ 1070} 1071 1072static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1073 enum node_stat_item idx) 1074{ 1075 return node_page_state(lruvec_pgdat(lruvec), idx); 1076} 1077 1078static inline void __mod_lruvec_state(struct lruvec *lruvec, 1079 enum node_stat_item idx, int val) 1080{ 1081 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 1082} 1083 1084static inline void mod_lruvec_state(struct lruvec *lruvec, 1085 enum node_stat_item idx, int val) 1086{ 1087 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 1088} 1089 1090static inline void __mod_lruvec_page_state(struct page *page, 1091 enum node_stat_item idx, int val) 1092{ 1093 __mod_node_page_state(page_pgdat(page), idx, val); 1094} 1095 1096static inline void mod_lruvec_page_state(struct page *page, 1097 enum node_stat_item idx, int val) 1098{ 1099 mod_node_page_state(page_pgdat(page), idx, val); 1100} 1101 1102static inline 1103unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1104 gfp_t gfp_mask, 1105 unsigned long *total_scanned) 1106{ 1107 return 0; 1108} 1109 1110static inline void mem_cgroup_split_huge_fixup(struct page *head) 1111{ 1112} 1113 1114static inline void count_memcg_events(struct mem_cgroup *memcg, 1115 enum vm_event_item idx, 1116 unsigned long count) 1117{ 1118} 1119 1120static inline void count_memcg_page_event(struct page *page, 1121 int idx) 1122{ 1123} 1124 1125static inline 1126void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1127{ 1128} 1129#endif /* CONFIG_MEMCG */ 1130 1131/* idx can be of type enum memcg_stat_item or node_stat_item */ 1132static inline void __inc_memcg_state(struct mem_cgroup *memcg, 1133 int idx) 1134{ 1135 __mod_memcg_state(memcg, idx, 1); 1136} 1137 1138/* idx can be of type enum memcg_stat_item or node_stat_item */ 1139static inline void __dec_memcg_state(struct mem_cgroup *memcg, 1140 int idx) 1141{ 1142 __mod_memcg_state(memcg, idx, -1); 1143} 1144 1145/* idx can be of type enum memcg_stat_item or node_stat_item */ 1146static inline void __inc_memcg_page_state(struct page *page, 1147 int idx) 1148{ 1149 __mod_memcg_page_state(page, idx, 1); 1150} 1151 1152/* idx can be of type enum memcg_stat_item or node_stat_item */ 1153static inline void __dec_memcg_page_state(struct page *page, 1154 int idx) 1155{ 1156 __mod_memcg_page_state(page, idx, -1); 1157} 1158 1159static inline void __inc_lruvec_state(struct lruvec *lruvec, 1160 enum node_stat_item idx) 1161{ 1162 __mod_lruvec_state(lruvec, idx, 1); 1163} 1164 1165static inline void __dec_lruvec_state(struct lruvec *lruvec, 1166 enum node_stat_item idx) 1167{ 1168 __mod_lruvec_state(lruvec, idx, -1); 1169} 1170 1171static inline void __inc_lruvec_page_state(struct page *page, 1172 enum node_stat_item idx) 1173{ 1174 __mod_lruvec_page_state(page, idx, 1); 1175} 1176 1177static inline void __dec_lruvec_page_state(struct page *page, 1178 enum node_stat_item idx) 1179{ 1180 __mod_lruvec_page_state(page, idx, -1); 1181} 1182 1183/* idx can be of type enum memcg_stat_item or node_stat_item */ 1184static inline void inc_memcg_state(struct mem_cgroup *memcg, 1185 int idx) 1186{ 1187 mod_memcg_state(memcg, idx, 1); 1188} 1189 1190/* idx can be of type enum memcg_stat_item or node_stat_item */ 1191static inline void dec_memcg_state(struct mem_cgroup *memcg, 1192 int idx) 1193{ 1194 mod_memcg_state(memcg, idx, -1); 1195} 1196 1197/* idx can be of type enum memcg_stat_item or node_stat_item */ 1198static inline void inc_memcg_page_state(struct page *page, 1199 int idx) 1200{ 1201 mod_memcg_page_state(page, idx, 1); 1202} 1203 1204/* idx can be of type enum memcg_stat_item or node_stat_item */ 1205static inline void dec_memcg_page_state(struct page *page, 1206 int idx) 1207{ 1208 mod_memcg_page_state(page, idx, -1); 1209} 1210 1211static inline void inc_lruvec_state(struct lruvec *lruvec, 1212 enum node_stat_item idx) 1213{ 1214 mod_lruvec_state(lruvec, idx, 1); 1215} 1216 1217static inline void dec_lruvec_state(struct lruvec *lruvec, 1218 enum node_stat_item idx) 1219{ 1220 mod_lruvec_state(lruvec, idx, -1); 1221} 1222 1223static inline void inc_lruvec_page_state(struct page *page, 1224 enum node_stat_item idx) 1225{ 1226 mod_lruvec_page_state(page, idx, 1); 1227} 1228 1229static inline void dec_lruvec_page_state(struct page *page, 1230 enum node_stat_item idx) 1231{ 1232 mod_lruvec_page_state(page, idx, -1); 1233} 1234 1235#ifdef CONFIG_CGROUP_WRITEBACK 1236 1237struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1238void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1239 unsigned long *pheadroom, unsigned long *pdirty, 1240 unsigned long *pwriteback); 1241 1242#else /* CONFIG_CGROUP_WRITEBACK */ 1243 1244static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1245{ 1246 return NULL; 1247} 1248 1249static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1250 unsigned long *pfilepages, 1251 unsigned long *pheadroom, 1252 unsigned long *pdirty, 1253 unsigned long *pwriteback) 1254{ 1255} 1256 1257#endif /* CONFIG_CGROUP_WRITEBACK */ 1258 1259struct sock; 1260bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1261void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1262#ifdef CONFIG_MEMCG 1263extern struct static_key_false memcg_sockets_enabled_key; 1264#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1265void mem_cgroup_sk_alloc(struct sock *sk); 1266void mem_cgroup_sk_free(struct sock *sk); 1267static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1268{ 1269 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1270 return true; 1271 do { 1272 if (time_before(jiffies, memcg->socket_pressure)) 1273 return true; 1274 } while ((memcg = parent_mem_cgroup(memcg))); 1275 return false; 1276} 1277#else 1278#define mem_cgroup_sockets_enabled 0 1279static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1280static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1281static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1282{ 1283 return false; 1284} 1285#endif 1286 1287struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); 1288void memcg_kmem_put_cache(struct kmem_cache *cachep); 1289 1290#ifdef CONFIG_MEMCG_KMEM 1291int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); 1292void __memcg_kmem_uncharge(struct page *page, int order); 1293int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 1294 struct mem_cgroup *memcg); 1295 1296extern struct static_key_false memcg_kmem_enabled_key; 1297extern struct workqueue_struct *memcg_kmem_cache_wq; 1298 1299extern int memcg_nr_cache_ids; 1300void memcg_get_cache_ids(void); 1301void memcg_put_cache_ids(void); 1302 1303/* 1304 * Helper macro to loop through all memcg-specific caches. Callers must still 1305 * check if the cache is valid (it is either valid or NULL). 1306 * the slab_mutex must be held when looping through those caches 1307 */ 1308#define for_each_memcg_cache_index(_idx) \ 1309 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) 1310 1311static inline bool memcg_kmem_enabled(void) 1312{ 1313 return static_branch_unlikely(&memcg_kmem_enabled_key); 1314} 1315 1316static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 1317{ 1318 if (memcg_kmem_enabled()) 1319 return __memcg_kmem_charge(page, gfp, order); 1320 return 0; 1321} 1322 1323static inline void memcg_kmem_uncharge(struct page *page, int order) 1324{ 1325 if (memcg_kmem_enabled()) 1326 __memcg_kmem_uncharge(page, order); 1327} 1328 1329static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, 1330 int order, struct mem_cgroup *memcg) 1331{ 1332 if (memcg_kmem_enabled()) 1333 return __memcg_kmem_charge_memcg(page, gfp, order, memcg); 1334 return 0; 1335} 1336/* 1337 * helper for accessing a memcg's index. It will be used as an index in the 1338 * child cache array in kmem_cache, and also to derive its name. This function 1339 * will return -1 when this is not a kmem-limited memcg. 1340 */ 1341static inline int memcg_cache_id(struct mem_cgroup *memcg) 1342{ 1343 return memcg ? memcg->kmemcg_id : -1; 1344} 1345 1346extern int memcg_expand_shrinker_maps(int new_id); 1347 1348extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1349 int nid, int shrinker_id); 1350#else 1351 1352static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 1353{ 1354 return 0; 1355} 1356 1357static inline void memcg_kmem_uncharge(struct page *page, int order) 1358{ 1359} 1360 1361static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 1362{ 1363 return 0; 1364} 1365 1366static inline void __memcg_kmem_uncharge(struct page *page, int order) 1367{ 1368} 1369 1370#define for_each_memcg_cache_index(_idx) \ 1371 for (; NULL; ) 1372 1373static inline bool memcg_kmem_enabled(void) 1374{ 1375 return false; 1376} 1377 1378static inline int memcg_cache_id(struct mem_cgroup *memcg) 1379{ 1380 return -1; 1381} 1382 1383static inline void memcg_get_cache_ids(void) 1384{ 1385} 1386 1387static inline void memcg_put_cache_ids(void) 1388{ 1389} 1390 1391static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1392 int nid, int shrinker_id) { } 1393#endif /* CONFIG_MEMCG_KMEM */ 1394 1395#endif /* _LINUX_MEMCONTROL_H */