at v5.6 36 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11#ifndef _LINUX_MEMCONTROL_H 12#define _LINUX_MEMCONTROL_H 13#include <linux/cgroup.h> 14#include <linux/vm_event_item.h> 15#include <linux/hardirq.h> 16#include <linux/jump_label.h> 17#include <linux/page_counter.h> 18#include <linux/vmpressure.h> 19#include <linux/eventfd.h> 20#include <linux/mm.h> 21#include <linux/vmstat.h> 22#include <linux/writeback.h> 23#include <linux/page-flags.h> 24 25struct mem_cgroup; 26struct page; 27struct mm_struct; 28struct kmem_cache; 29 30/* Cgroup-specific page state, on top of universal node page state */ 31enum memcg_stat_item { 32 MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, 33 MEMCG_RSS, 34 MEMCG_RSS_HUGE, 35 MEMCG_SWAP, 36 MEMCG_SOCK, 37 /* XXX: why are these zone and not node counters? */ 38 MEMCG_KERNEL_STACK_KB, 39 MEMCG_NR_STAT, 40}; 41 42enum memcg_memory_event { 43 MEMCG_LOW, 44 MEMCG_HIGH, 45 MEMCG_MAX, 46 MEMCG_OOM, 47 MEMCG_OOM_KILL, 48 MEMCG_SWAP_MAX, 49 MEMCG_SWAP_FAIL, 50 MEMCG_NR_MEMORY_EVENTS, 51}; 52 53enum mem_cgroup_protection { 54 MEMCG_PROT_NONE, 55 MEMCG_PROT_LOW, 56 MEMCG_PROT_MIN, 57}; 58 59struct mem_cgroup_reclaim_cookie { 60 pg_data_t *pgdat; 61 unsigned int generation; 62}; 63 64#ifdef CONFIG_MEMCG 65 66#define MEM_CGROUP_ID_SHIFT 16 67#define MEM_CGROUP_ID_MAX USHRT_MAX 68 69struct mem_cgroup_id { 70 int id; 71 refcount_t ref; 72}; 73 74/* 75 * Per memcg event counter is incremented at every pagein/pageout. With THP, 76 * it will be incremated by the number of pages. This counter is used for 77 * for trigger some periodic events. This is straightforward and better 78 * than using jiffies etc. to handle periodic memcg event. 79 */ 80enum mem_cgroup_events_target { 81 MEM_CGROUP_TARGET_THRESH, 82 MEM_CGROUP_TARGET_SOFTLIMIT, 83 MEM_CGROUP_NTARGETS, 84}; 85 86struct memcg_vmstats_percpu { 87 long stat[MEMCG_NR_STAT]; 88 unsigned long events[NR_VM_EVENT_ITEMS]; 89 unsigned long nr_page_events; 90 unsigned long targets[MEM_CGROUP_NTARGETS]; 91}; 92 93struct mem_cgroup_reclaim_iter { 94 struct mem_cgroup *position; 95 /* scan generation, increased every round-trip */ 96 unsigned int generation; 97}; 98 99struct lruvec_stat { 100 long count[NR_VM_NODE_STAT_ITEMS]; 101}; 102 103/* 104 * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, 105 * which have elements charged to this memcg. 106 */ 107struct memcg_shrinker_map { 108 struct rcu_head rcu; 109 unsigned long map[0]; 110}; 111 112/* 113 * per-node information in memory controller. 114 */ 115struct mem_cgroup_per_node { 116 struct lruvec lruvec; 117 118 /* Legacy local VM stats */ 119 struct lruvec_stat __percpu *lruvec_stat_local; 120 121 /* Subtree VM stats (batched updates) */ 122 struct lruvec_stat __percpu *lruvec_stat_cpu; 123 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; 124 125 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 126 127 struct mem_cgroup_reclaim_iter iter; 128 129 struct memcg_shrinker_map __rcu *shrinker_map; 130 131 struct rb_node tree_node; /* RB tree node */ 132 unsigned long usage_in_excess;/* Set to the value by which */ 133 /* the soft limit is exceeded*/ 134 bool on_tree; 135 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 136 /* use container_of */ 137}; 138 139struct mem_cgroup_threshold { 140 struct eventfd_ctx *eventfd; 141 unsigned long threshold; 142}; 143 144/* For threshold */ 145struct mem_cgroup_threshold_ary { 146 /* An array index points to threshold just below or equal to usage. */ 147 int current_threshold; 148 /* Size of entries[] */ 149 unsigned int size; 150 /* Array of thresholds */ 151 struct mem_cgroup_threshold entries[0]; 152}; 153 154struct mem_cgroup_thresholds { 155 /* Primary thresholds array */ 156 struct mem_cgroup_threshold_ary *primary; 157 /* 158 * Spare threshold array. 159 * This is needed to make mem_cgroup_unregister_event() "never fail". 160 * It must be able to store at least primary->size - 1 entries. 161 */ 162 struct mem_cgroup_threshold_ary *spare; 163}; 164 165enum memcg_kmem_state { 166 KMEM_NONE, 167 KMEM_ALLOCATED, 168 KMEM_ONLINE, 169}; 170 171#if defined(CONFIG_SMP) 172struct memcg_padding { 173 char x[0]; 174} ____cacheline_internodealigned_in_smp; 175#define MEMCG_PADDING(name) struct memcg_padding name; 176#else 177#define MEMCG_PADDING(name) 178#endif 179 180/* 181 * Remember four most recent foreign writebacks with dirty pages in this 182 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 183 * one in a given round, we're likely to catch it later if it keeps 184 * foreign-dirtying, so a fairly low count should be enough. 185 * 186 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 187 */ 188#define MEMCG_CGWB_FRN_CNT 4 189 190struct memcg_cgwb_frn { 191 u64 bdi_id; /* bdi->id of the foreign inode */ 192 int memcg_id; /* memcg->css.id of foreign inode */ 193 u64 at; /* jiffies_64 at the time of dirtying */ 194 struct wb_completion done; /* tracks in-flight foreign writebacks */ 195}; 196 197/* 198 * The memory controller data structure. The memory controller controls both 199 * page cache and RSS per cgroup. We would eventually like to provide 200 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 201 * to help the administrator determine what knobs to tune. 202 */ 203struct mem_cgroup { 204 struct cgroup_subsys_state css; 205 206 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 207 struct mem_cgroup_id id; 208 209 /* Accounted resources */ 210 struct page_counter memory; 211 struct page_counter swap; 212 213 /* Legacy consumer-oriented counters */ 214 struct page_counter memsw; 215 struct page_counter kmem; 216 struct page_counter tcpmem; 217 218 /* Upper bound of normal memory consumption range */ 219 unsigned long high; 220 221 /* Range enforcement for interrupt charges */ 222 struct work_struct high_work; 223 224 unsigned long soft_limit; 225 226 /* vmpressure notifications */ 227 struct vmpressure vmpressure; 228 229 /* 230 * Should the accounting and control be hierarchical, per subtree? 231 */ 232 bool use_hierarchy; 233 234 /* 235 * Should the OOM killer kill all belonging tasks, had it kill one? 236 */ 237 bool oom_group; 238 239 /* protected by memcg_oom_lock */ 240 bool oom_lock; 241 int under_oom; 242 243 int swappiness; 244 /* OOM-Killer disable */ 245 int oom_kill_disable; 246 247 /* memory.events and memory.events.local */ 248 struct cgroup_file events_file; 249 struct cgroup_file events_local_file; 250 251 /* handle for "memory.swap.events" */ 252 struct cgroup_file swap_events_file; 253 254 /* protect arrays of thresholds */ 255 struct mutex thresholds_lock; 256 257 /* thresholds for memory usage. RCU-protected */ 258 struct mem_cgroup_thresholds thresholds; 259 260 /* thresholds for mem+swap usage. RCU-protected */ 261 struct mem_cgroup_thresholds memsw_thresholds; 262 263 /* For oom notifier event fd */ 264 struct list_head oom_notify; 265 266 /* 267 * Should we move charges of a task when a task is moved into this 268 * mem_cgroup ? And what type of charges should we move ? 269 */ 270 unsigned long move_charge_at_immigrate; 271 /* taken only while moving_account > 0 */ 272 spinlock_t move_lock; 273 unsigned long move_lock_flags; 274 275 MEMCG_PADDING(_pad1_); 276 277 /* 278 * set > 0 if pages under this cgroup are moving to other cgroup. 279 */ 280 atomic_t moving_account; 281 struct task_struct *move_lock_task; 282 283 /* Legacy local VM stats and events */ 284 struct memcg_vmstats_percpu __percpu *vmstats_local; 285 286 /* Subtree VM stats and events (batched updates) */ 287 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 288 289 MEMCG_PADDING(_pad2_); 290 291 atomic_long_t vmstats[MEMCG_NR_STAT]; 292 atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; 293 294 /* memory.events */ 295 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 296 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 297 298 unsigned long socket_pressure; 299 300 /* Legacy tcp memory accounting */ 301 bool tcpmem_active; 302 int tcpmem_pressure; 303 304#ifdef CONFIG_MEMCG_KMEM 305 /* Index in the kmem_cache->memcg_params.memcg_caches array */ 306 int kmemcg_id; 307 enum memcg_kmem_state kmem_state; 308 struct list_head kmem_caches; 309#endif 310 311#ifdef CONFIG_CGROUP_WRITEBACK 312 struct list_head cgwb_list; 313 struct wb_domain cgwb_domain; 314 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 315#endif 316 317 /* List of events which userspace want to receive */ 318 struct list_head event_list; 319 spinlock_t event_list_lock; 320 321#ifdef CONFIG_TRANSPARENT_HUGEPAGE 322 struct deferred_split deferred_split_queue; 323#endif 324 325 struct mem_cgroup_per_node *nodeinfo[0]; 326 /* WARNING: nodeinfo must be the last member here */ 327}; 328 329/* 330 * size of first charge trial. "32" comes from vmscan.c's magic value. 331 * TODO: maybe necessary to use big numbers in big irons. 332 */ 333#define MEMCG_CHARGE_BATCH 32U 334 335extern struct mem_cgroup *root_mem_cgroup; 336 337static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 338{ 339 return (memcg == root_mem_cgroup); 340} 341 342static inline bool mem_cgroup_disabled(void) 343{ 344 return !cgroup_subsys_enabled(memory_cgrp_subsys); 345} 346 347static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, 348 bool in_low_reclaim) 349{ 350 if (mem_cgroup_disabled()) 351 return 0; 352 353 if (in_low_reclaim) 354 return READ_ONCE(memcg->memory.emin); 355 356 return max(READ_ONCE(memcg->memory.emin), 357 READ_ONCE(memcg->memory.elow)); 358} 359 360enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, 361 struct mem_cgroup *memcg); 362 363int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 364 gfp_t gfp_mask, struct mem_cgroup **memcgp, 365 bool compound); 366int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, 367 gfp_t gfp_mask, struct mem_cgroup **memcgp, 368 bool compound); 369void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 370 bool lrucare, bool compound); 371void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, 372 bool compound); 373void mem_cgroup_uncharge(struct page *page); 374void mem_cgroup_uncharge_list(struct list_head *page_list); 375 376void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); 377 378static struct mem_cgroup_per_node * 379mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid) 380{ 381 return memcg->nodeinfo[nid]; 382} 383 384/** 385 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 386 * @memcg: memcg of the wanted lruvec 387 * 388 * Returns the lru list vector holding pages for a given @memcg & 389 * @node combination. This can be the node lruvec, if the memory 390 * controller is disabled. 391 */ 392static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 393 struct pglist_data *pgdat) 394{ 395 struct mem_cgroup_per_node *mz; 396 struct lruvec *lruvec; 397 398 if (mem_cgroup_disabled()) { 399 lruvec = &pgdat->__lruvec; 400 goto out; 401 } 402 403 if (!memcg) 404 memcg = root_mem_cgroup; 405 406 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); 407 lruvec = &mz->lruvec; 408out: 409 /* 410 * Since a node can be onlined after the mem_cgroup was created, 411 * we have to be prepared to initialize lruvec->pgdat here; 412 * and if offlined then reonlined, we need to reinitialize it. 413 */ 414 if (unlikely(lruvec->pgdat != pgdat)) 415 lruvec->pgdat = pgdat; 416 return lruvec; 417} 418 419struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *); 420 421struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 422 423struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 424 425struct mem_cgroup *get_mem_cgroup_from_page(struct page *page); 426 427static inline 428struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 429 return css ? container_of(css, struct mem_cgroup, css) : NULL; 430} 431 432static inline void mem_cgroup_put(struct mem_cgroup *memcg) 433{ 434 if (memcg) 435 css_put(&memcg->css); 436} 437 438#define mem_cgroup_from_counter(counter, member) \ 439 container_of(counter, struct mem_cgroup, member) 440 441struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 442 struct mem_cgroup *, 443 struct mem_cgroup_reclaim_cookie *); 444void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 445int mem_cgroup_scan_tasks(struct mem_cgroup *, 446 int (*)(struct task_struct *, void *), void *); 447 448static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 449{ 450 if (mem_cgroup_disabled()) 451 return 0; 452 453 return memcg->id.id; 454} 455struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 456 457static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 458{ 459 return mem_cgroup_from_css(seq_css(m)); 460} 461 462static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 463{ 464 struct mem_cgroup_per_node *mz; 465 466 if (mem_cgroup_disabled()) 467 return NULL; 468 469 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 470 return mz->memcg; 471} 472 473/** 474 * parent_mem_cgroup - find the accounting parent of a memcg 475 * @memcg: memcg whose parent to find 476 * 477 * Returns the parent memcg, or NULL if this is the root or the memory 478 * controller is in legacy no-hierarchy mode. 479 */ 480static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 481{ 482 if (!memcg->memory.parent) 483 return NULL; 484 return mem_cgroup_from_counter(memcg->memory.parent, memory); 485} 486 487static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 488 struct mem_cgroup *root) 489{ 490 if (root == memcg) 491 return true; 492 if (!root->use_hierarchy) 493 return false; 494 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 495} 496 497static inline bool mm_match_cgroup(struct mm_struct *mm, 498 struct mem_cgroup *memcg) 499{ 500 struct mem_cgroup *task_memcg; 501 bool match = false; 502 503 rcu_read_lock(); 504 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 505 if (task_memcg) 506 match = mem_cgroup_is_descendant(task_memcg, memcg); 507 rcu_read_unlock(); 508 return match; 509} 510 511struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page); 512ino_t page_cgroup_ino(struct page *page); 513 514static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 515{ 516 if (mem_cgroup_disabled()) 517 return true; 518 return !!(memcg->css.flags & CSS_ONLINE); 519} 520 521/* 522 * For memory reclaim. 523 */ 524int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 525 526void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 527 int zid, int nr_pages); 528 529static inline 530unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 531 enum lru_list lru, int zone_idx) 532{ 533 struct mem_cgroup_per_node *mz; 534 535 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 536 return mz->lru_zone_size[zone_idx][lru]; 537} 538 539void mem_cgroup_handle_over_high(void); 540 541unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 542 543unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 544 545void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 546 struct task_struct *p); 547 548void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 549 550static inline void mem_cgroup_enter_user_fault(void) 551{ 552 WARN_ON(current->in_user_fault); 553 current->in_user_fault = 1; 554} 555 556static inline void mem_cgroup_exit_user_fault(void) 557{ 558 WARN_ON(!current->in_user_fault); 559 current->in_user_fault = 0; 560} 561 562static inline bool task_in_memcg_oom(struct task_struct *p) 563{ 564 return p->memcg_in_oom; 565} 566 567bool mem_cgroup_oom_synchronize(bool wait); 568struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 569 struct mem_cgroup *oom_domain); 570void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 571 572#ifdef CONFIG_MEMCG_SWAP 573extern int do_swap_account; 574#endif 575 576struct mem_cgroup *lock_page_memcg(struct page *page); 577void __unlock_page_memcg(struct mem_cgroup *memcg); 578void unlock_page_memcg(struct page *page); 579 580/* 581 * idx can be of type enum memcg_stat_item or node_stat_item. 582 * Keep in sync with memcg_exact_page_state(). 583 */ 584static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 585{ 586 long x = atomic_long_read(&memcg->vmstats[idx]); 587#ifdef CONFIG_SMP 588 if (x < 0) 589 x = 0; 590#endif 591 return x; 592} 593 594/* 595 * idx can be of type enum memcg_stat_item or node_stat_item. 596 * Keep in sync with memcg_exact_page_state(). 597 */ 598static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, 599 int idx) 600{ 601 long x = 0; 602 int cpu; 603 604 for_each_possible_cpu(cpu) 605 x += per_cpu(memcg->vmstats_local->stat[idx], cpu); 606#ifdef CONFIG_SMP 607 if (x < 0) 608 x = 0; 609#endif 610 return x; 611} 612 613void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 614 615/* idx can be of type enum memcg_stat_item or node_stat_item */ 616static inline void mod_memcg_state(struct mem_cgroup *memcg, 617 int idx, int val) 618{ 619 unsigned long flags; 620 621 local_irq_save(flags); 622 __mod_memcg_state(memcg, idx, val); 623 local_irq_restore(flags); 624} 625 626/** 627 * mod_memcg_page_state - update page state statistics 628 * @page: the page 629 * @idx: page state item to account 630 * @val: number of pages (positive or negative) 631 * 632 * The @page must be locked or the caller must use lock_page_memcg() 633 * to prevent double accounting when the page is concurrently being 634 * moved to another memcg: 635 * 636 * lock_page(page) or lock_page_memcg(page) 637 * if (TestClearPageState(page)) 638 * mod_memcg_page_state(page, state, -1); 639 * unlock_page(page) or unlock_page_memcg(page) 640 * 641 * Kernel pages are an exception to this, since they'll never move. 642 */ 643static inline void __mod_memcg_page_state(struct page *page, 644 int idx, int val) 645{ 646 if (page->mem_cgroup) 647 __mod_memcg_state(page->mem_cgroup, idx, val); 648} 649 650static inline void mod_memcg_page_state(struct page *page, 651 int idx, int val) 652{ 653 if (page->mem_cgroup) 654 mod_memcg_state(page->mem_cgroup, idx, val); 655} 656 657static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 658 enum node_stat_item idx) 659{ 660 struct mem_cgroup_per_node *pn; 661 long x; 662 663 if (mem_cgroup_disabled()) 664 return node_page_state(lruvec_pgdat(lruvec), idx); 665 666 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 667 x = atomic_long_read(&pn->lruvec_stat[idx]); 668#ifdef CONFIG_SMP 669 if (x < 0) 670 x = 0; 671#endif 672 return x; 673} 674 675static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 676 enum node_stat_item idx) 677{ 678 struct mem_cgroup_per_node *pn; 679 long x = 0; 680 int cpu; 681 682 if (mem_cgroup_disabled()) 683 return node_page_state(lruvec_pgdat(lruvec), idx); 684 685 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 686 for_each_possible_cpu(cpu) 687 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); 688#ifdef CONFIG_SMP 689 if (x < 0) 690 x = 0; 691#endif 692 return x; 693} 694 695void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 696 int val); 697void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); 698void mod_memcg_obj_state(void *p, int idx, int val); 699 700static inline void mod_lruvec_state(struct lruvec *lruvec, 701 enum node_stat_item idx, int val) 702{ 703 unsigned long flags; 704 705 local_irq_save(flags); 706 __mod_lruvec_state(lruvec, idx, val); 707 local_irq_restore(flags); 708} 709 710static inline void __mod_lruvec_page_state(struct page *page, 711 enum node_stat_item idx, int val) 712{ 713 pg_data_t *pgdat = page_pgdat(page); 714 struct lruvec *lruvec; 715 716 /* Untracked pages have no memcg, no lruvec. Update only the node */ 717 if (!page->mem_cgroup) { 718 __mod_node_page_state(pgdat, idx, val); 719 return; 720 } 721 722 lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat); 723 __mod_lruvec_state(lruvec, idx, val); 724} 725 726static inline void mod_lruvec_page_state(struct page *page, 727 enum node_stat_item idx, int val) 728{ 729 unsigned long flags; 730 731 local_irq_save(flags); 732 __mod_lruvec_page_state(page, idx, val); 733 local_irq_restore(flags); 734} 735 736unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 737 gfp_t gfp_mask, 738 unsigned long *total_scanned); 739 740void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 741 unsigned long count); 742 743static inline void count_memcg_events(struct mem_cgroup *memcg, 744 enum vm_event_item idx, 745 unsigned long count) 746{ 747 unsigned long flags; 748 749 local_irq_save(flags); 750 __count_memcg_events(memcg, idx, count); 751 local_irq_restore(flags); 752} 753 754static inline void count_memcg_page_event(struct page *page, 755 enum vm_event_item idx) 756{ 757 if (page->mem_cgroup) 758 count_memcg_events(page->mem_cgroup, idx, 1); 759} 760 761static inline void count_memcg_event_mm(struct mm_struct *mm, 762 enum vm_event_item idx) 763{ 764 struct mem_cgroup *memcg; 765 766 if (mem_cgroup_disabled()) 767 return; 768 769 rcu_read_lock(); 770 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 771 if (likely(memcg)) 772 count_memcg_events(memcg, idx, 1); 773 rcu_read_unlock(); 774} 775 776static inline void memcg_memory_event(struct mem_cgroup *memcg, 777 enum memcg_memory_event event) 778{ 779 atomic_long_inc(&memcg->memory_events_local[event]); 780 cgroup_file_notify(&memcg->events_local_file); 781 782 do { 783 atomic_long_inc(&memcg->memory_events[event]); 784 cgroup_file_notify(&memcg->events_file); 785 786 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 787 break; 788 } while ((memcg = parent_mem_cgroup(memcg)) && 789 !mem_cgroup_is_root(memcg)); 790} 791 792static inline void memcg_memory_event_mm(struct mm_struct *mm, 793 enum memcg_memory_event event) 794{ 795 struct mem_cgroup *memcg; 796 797 if (mem_cgroup_disabled()) 798 return; 799 800 rcu_read_lock(); 801 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 802 if (likely(memcg)) 803 memcg_memory_event(memcg, event); 804 rcu_read_unlock(); 805} 806 807#ifdef CONFIG_TRANSPARENT_HUGEPAGE 808void mem_cgroup_split_huge_fixup(struct page *head); 809#endif 810 811#else /* CONFIG_MEMCG */ 812 813#define MEM_CGROUP_ID_SHIFT 0 814#define MEM_CGROUP_ID_MAX 0 815 816struct mem_cgroup; 817 818static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 819{ 820 return true; 821} 822 823static inline bool mem_cgroup_disabled(void) 824{ 825 return true; 826} 827 828static inline void memcg_memory_event(struct mem_cgroup *memcg, 829 enum memcg_memory_event event) 830{ 831} 832 833static inline void memcg_memory_event_mm(struct mm_struct *mm, 834 enum memcg_memory_event event) 835{ 836} 837 838static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, 839 bool in_low_reclaim) 840{ 841 return 0; 842} 843 844static inline enum mem_cgroup_protection mem_cgroup_protected( 845 struct mem_cgroup *root, struct mem_cgroup *memcg) 846{ 847 return MEMCG_PROT_NONE; 848} 849 850static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 851 gfp_t gfp_mask, 852 struct mem_cgroup **memcgp, 853 bool compound) 854{ 855 *memcgp = NULL; 856 return 0; 857} 858 859static inline int mem_cgroup_try_charge_delay(struct page *page, 860 struct mm_struct *mm, 861 gfp_t gfp_mask, 862 struct mem_cgroup **memcgp, 863 bool compound) 864{ 865 *memcgp = NULL; 866 return 0; 867} 868 869static inline void mem_cgroup_commit_charge(struct page *page, 870 struct mem_cgroup *memcg, 871 bool lrucare, bool compound) 872{ 873} 874 875static inline void mem_cgroup_cancel_charge(struct page *page, 876 struct mem_cgroup *memcg, 877 bool compound) 878{ 879} 880 881static inline void mem_cgroup_uncharge(struct page *page) 882{ 883} 884 885static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 886{ 887} 888 889static inline void mem_cgroup_migrate(struct page *old, struct page *new) 890{ 891} 892 893static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 894 struct pglist_data *pgdat) 895{ 896 return &pgdat->__lruvec; 897} 898 899static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 900 struct pglist_data *pgdat) 901{ 902 return &pgdat->__lruvec; 903} 904 905static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 906{ 907 return NULL; 908} 909 910static inline bool mm_match_cgroup(struct mm_struct *mm, 911 struct mem_cgroup *memcg) 912{ 913 return true; 914} 915 916static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 917{ 918 return NULL; 919} 920 921static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page) 922{ 923 return NULL; 924} 925 926static inline void mem_cgroup_put(struct mem_cgroup *memcg) 927{ 928} 929 930static inline struct mem_cgroup * 931mem_cgroup_iter(struct mem_cgroup *root, 932 struct mem_cgroup *prev, 933 struct mem_cgroup_reclaim_cookie *reclaim) 934{ 935 return NULL; 936} 937 938static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 939 struct mem_cgroup *prev) 940{ 941} 942 943static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 944 int (*fn)(struct task_struct *, void *), void *arg) 945{ 946 return 0; 947} 948 949static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 950{ 951 return 0; 952} 953 954static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 955{ 956 WARN_ON_ONCE(id); 957 /* XXX: This should always return root_mem_cgroup */ 958 return NULL; 959} 960 961static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 962{ 963 return NULL; 964} 965 966static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 967{ 968 return NULL; 969} 970 971static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 972{ 973 return true; 974} 975 976static inline 977unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 978 enum lru_list lru, int zone_idx) 979{ 980 return 0; 981} 982 983static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 984{ 985 return 0; 986} 987 988static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 989{ 990 return 0; 991} 992 993static inline void 994mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 995{ 996} 997 998static inline void 999mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1000{ 1001} 1002 1003static inline struct mem_cgroup *lock_page_memcg(struct page *page) 1004{ 1005 return NULL; 1006} 1007 1008static inline void __unlock_page_memcg(struct mem_cgroup *memcg) 1009{ 1010} 1011 1012static inline void unlock_page_memcg(struct page *page) 1013{ 1014} 1015 1016static inline void mem_cgroup_handle_over_high(void) 1017{ 1018} 1019 1020static inline void mem_cgroup_enter_user_fault(void) 1021{ 1022} 1023 1024static inline void mem_cgroup_exit_user_fault(void) 1025{ 1026} 1027 1028static inline bool task_in_memcg_oom(struct task_struct *p) 1029{ 1030 return false; 1031} 1032 1033static inline bool mem_cgroup_oom_synchronize(bool wait) 1034{ 1035 return false; 1036} 1037 1038static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1039 struct task_struct *victim, struct mem_cgroup *oom_domain) 1040{ 1041 return NULL; 1042} 1043 1044static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1045{ 1046} 1047 1048static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1049{ 1050 return 0; 1051} 1052 1053static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, 1054 int idx) 1055{ 1056 return 0; 1057} 1058 1059static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1060 int idx, 1061 int nr) 1062{ 1063} 1064 1065static inline void mod_memcg_state(struct mem_cgroup *memcg, 1066 int idx, 1067 int nr) 1068{ 1069} 1070 1071static inline void __mod_memcg_page_state(struct page *page, 1072 int idx, 1073 int nr) 1074{ 1075} 1076 1077static inline void mod_memcg_page_state(struct page *page, 1078 int idx, 1079 int nr) 1080{ 1081} 1082 1083static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1084 enum node_stat_item idx) 1085{ 1086 return node_page_state(lruvec_pgdat(lruvec), idx); 1087} 1088 1089static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1090 enum node_stat_item idx) 1091{ 1092 return node_page_state(lruvec_pgdat(lruvec), idx); 1093} 1094 1095static inline void __mod_lruvec_state(struct lruvec *lruvec, 1096 enum node_stat_item idx, int val) 1097{ 1098 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 1099} 1100 1101static inline void mod_lruvec_state(struct lruvec *lruvec, 1102 enum node_stat_item idx, int val) 1103{ 1104 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); 1105} 1106 1107static inline void __mod_lruvec_page_state(struct page *page, 1108 enum node_stat_item idx, int val) 1109{ 1110 __mod_node_page_state(page_pgdat(page), idx, val); 1111} 1112 1113static inline void mod_lruvec_page_state(struct page *page, 1114 enum node_stat_item idx, int val) 1115{ 1116 mod_node_page_state(page_pgdat(page), idx, val); 1117} 1118 1119static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, 1120 int val) 1121{ 1122 struct page *page = virt_to_head_page(p); 1123 1124 __mod_node_page_state(page_pgdat(page), idx, val); 1125} 1126 1127static inline void mod_memcg_obj_state(void *p, int idx, int val) 1128{ 1129} 1130 1131static inline 1132unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1133 gfp_t gfp_mask, 1134 unsigned long *total_scanned) 1135{ 1136 return 0; 1137} 1138 1139static inline void mem_cgroup_split_huge_fixup(struct page *head) 1140{ 1141} 1142 1143static inline void count_memcg_events(struct mem_cgroup *memcg, 1144 enum vm_event_item idx, 1145 unsigned long count) 1146{ 1147} 1148 1149static inline void __count_memcg_events(struct mem_cgroup *memcg, 1150 enum vm_event_item idx, 1151 unsigned long count) 1152{ 1153} 1154 1155static inline void count_memcg_page_event(struct page *page, 1156 int idx) 1157{ 1158} 1159 1160static inline 1161void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1162{ 1163} 1164#endif /* CONFIG_MEMCG */ 1165 1166/* idx can be of type enum memcg_stat_item or node_stat_item */ 1167static inline void __inc_memcg_state(struct mem_cgroup *memcg, 1168 int idx) 1169{ 1170 __mod_memcg_state(memcg, idx, 1); 1171} 1172 1173/* idx can be of type enum memcg_stat_item or node_stat_item */ 1174static inline void __dec_memcg_state(struct mem_cgroup *memcg, 1175 int idx) 1176{ 1177 __mod_memcg_state(memcg, idx, -1); 1178} 1179 1180/* idx can be of type enum memcg_stat_item or node_stat_item */ 1181static inline void __inc_memcg_page_state(struct page *page, 1182 int idx) 1183{ 1184 __mod_memcg_page_state(page, idx, 1); 1185} 1186 1187/* idx can be of type enum memcg_stat_item or node_stat_item */ 1188static inline void __dec_memcg_page_state(struct page *page, 1189 int idx) 1190{ 1191 __mod_memcg_page_state(page, idx, -1); 1192} 1193 1194static inline void __inc_lruvec_state(struct lruvec *lruvec, 1195 enum node_stat_item idx) 1196{ 1197 __mod_lruvec_state(lruvec, idx, 1); 1198} 1199 1200static inline void __dec_lruvec_state(struct lruvec *lruvec, 1201 enum node_stat_item idx) 1202{ 1203 __mod_lruvec_state(lruvec, idx, -1); 1204} 1205 1206static inline void __inc_lruvec_page_state(struct page *page, 1207 enum node_stat_item idx) 1208{ 1209 __mod_lruvec_page_state(page, idx, 1); 1210} 1211 1212static inline void __dec_lruvec_page_state(struct page *page, 1213 enum node_stat_item idx) 1214{ 1215 __mod_lruvec_page_state(page, idx, -1); 1216} 1217 1218static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx) 1219{ 1220 __mod_lruvec_slab_state(p, idx, 1); 1221} 1222 1223static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx) 1224{ 1225 __mod_lruvec_slab_state(p, idx, -1); 1226} 1227 1228/* idx can be of type enum memcg_stat_item or node_stat_item */ 1229static inline void inc_memcg_state(struct mem_cgroup *memcg, 1230 int idx) 1231{ 1232 mod_memcg_state(memcg, idx, 1); 1233} 1234 1235/* idx can be of type enum memcg_stat_item or node_stat_item */ 1236static inline void dec_memcg_state(struct mem_cgroup *memcg, 1237 int idx) 1238{ 1239 mod_memcg_state(memcg, idx, -1); 1240} 1241 1242/* idx can be of type enum memcg_stat_item or node_stat_item */ 1243static inline void inc_memcg_page_state(struct page *page, 1244 int idx) 1245{ 1246 mod_memcg_page_state(page, idx, 1); 1247} 1248 1249/* idx can be of type enum memcg_stat_item or node_stat_item */ 1250static inline void dec_memcg_page_state(struct page *page, 1251 int idx) 1252{ 1253 mod_memcg_page_state(page, idx, -1); 1254} 1255 1256static inline void inc_lruvec_state(struct lruvec *lruvec, 1257 enum node_stat_item idx) 1258{ 1259 mod_lruvec_state(lruvec, idx, 1); 1260} 1261 1262static inline void dec_lruvec_state(struct lruvec *lruvec, 1263 enum node_stat_item idx) 1264{ 1265 mod_lruvec_state(lruvec, idx, -1); 1266} 1267 1268static inline void inc_lruvec_page_state(struct page *page, 1269 enum node_stat_item idx) 1270{ 1271 mod_lruvec_page_state(page, idx, 1); 1272} 1273 1274static inline void dec_lruvec_page_state(struct page *page, 1275 enum node_stat_item idx) 1276{ 1277 mod_lruvec_page_state(page, idx, -1); 1278} 1279 1280#ifdef CONFIG_CGROUP_WRITEBACK 1281 1282struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1283void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1284 unsigned long *pheadroom, unsigned long *pdirty, 1285 unsigned long *pwriteback); 1286 1287void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, 1288 struct bdi_writeback *wb); 1289 1290static inline void mem_cgroup_track_foreign_dirty(struct page *page, 1291 struct bdi_writeback *wb) 1292{ 1293 if (mem_cgroup_disabled()) 1294 return; 1295 1296 if (unlikely(&page->mem_cgroup->css != wb->memcg_css)) 1297 mem_cgroup_track_foreign_dirty_slowpath(page, wb); 1298} 1299 1300void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1301 1302#else /* CONFIG_CGROUP_WRITEBACK */ 1303 1304static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1305{ 1306 return NULL; 1307} 1308 1309static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1310 unsigned long *pfilepages, 1311 unsigned long *pheadroom, 1312 unsigned long *pdirty, 1313 unsigned long *pwriteback) 1314{ 1315} 1316 1317static inline void mem_cgroup_track_foreign_dirty(struct page *page, 1318 struct bdi_writeback *wb) 1319{ 1320} 1321 1322static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1323{ 1324} 1325 1326#endif /* CONFIG_CGROUP_WRITEBACK */ 1327 1328struct sock; 1329bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1330void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1331#ifdef CONFIG_MEMCG 1332extern struct static_key_false memcg_sockets_enabled_key; 1333#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1334void mem_cgroup_sk_alloc(struct sock *sk); 1335void mem_cgroup_sk_free(struct sock *sk); 1336static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1337{ 1338 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) 1339 return true; 1340 do { 1341 if (time_before(jiffies, memcg->socket_pressure)) 1342 return true; 1343 } while ((memcg = parent_mem_cgroup(memcg))); 1344 return false; 1345} 1346 1347extern int memcg_expand_shrinker_maps(int new_id); 1348 1349extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1350 int nid, int shrinker_id); 1351#else 1352#define mem_cgroup_sockets_enabled 0 1353static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1354static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1355static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1356{ 1357 return false; 1358} 1359 1360static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1361 int nid, int shrinker_id) 1362{ 1363} 1364#endif 1365 1366struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); 1367void memcg_kmem_put_cache(struct kmem_cache *cachep); 1368 1369#ifdef CONFIG_MEMCG_KMEM 1370int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); 1371void __memcg_kmem_uncharge(struct page *page, int order); 1372int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 1373 struct mem_cgroup *memcg); 1374void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg, 1375 unsigned int nr_pages); 1376 1377extern struct static_key_false memcg_kmem_enabled_key; 1378extern struct workqueue_struct *memcg_kmem_cache_wq; 1379 1380extern int memcg_nr_cache_ids; 1381void memcg_get_cache_ids(void); 1382void memcg_put_cache_ids(void); 1383 1384/* 1385 * Helper macro to loop through all memcg-specific caches. Callers must still 1386 * check if the cache is valid (it is either valid or NULL). 1387 * the slab_mutex must be held when looping through those caches 1388 */ 1389#define for_each_memcg_cache_index(_idx) \ 1390 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) 1391 1392static inline bool memcg_kmem_enabled(void) 1393{ 1394 return static_branch_unlikely(&memcg_kmem_enabled_key); 1395} 1396 1397static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 1398{ 1399 if (memcg_kmem_enabled()) 1400 return __memcg_kmem_charge(page, gfp, order); 1401 return 0; 1402} 1403 1404static inline void memcg_kmem_uncharge(struct page *page, int order) 1405{ 1406 if (memcg_kmem_enabled()) 1407 __memcg_kmem_uncharge(page, order); 1408} 1409 1410static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, 1411 int order, struct mem_cgroup *memcg) 1412{ 1413 if (memcg_kmem_enabled()) 1414 return __memcg_kmem_charge_memcg(page, gfp, order, memcg); 1415 return 0; 1416} 1417 1418static inline void memcg_kmem_uncharge_memcg(struct page *page, int order, 1419 struct mem_cgroup *memcg) 1420{ 1421 if (memcg_kmem_enabled()) 1422 __memcg_kmem_uncharge_memcg(memcg, 1 << order); 1423} 1424 1425/* 1426 * helper for accessing a memcg's index. It will be used as an index in the 1427 * child cache array in kmem_cache, and also to derive its name. This function 1428 * will return -1 when this is not a kmem-limited memcg. 1429 */ 1430static inline int memcg_cache_id(struct mem_cgroup *memcg) 1431{ 1432 return memcg ? memcg->kmemcg_id : -1; 1433} 1434 1435struct mem_cgroup *mem_cgroup_from_obj(void *p); 1436 1437#else 1438 1439static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 1440{ 1441 return 0; 1442} 1443 1444static inline void memcg_kmem_uncharge(struct page *page, int order) 1445{ 1446} 1447 1448static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) 1449{ 1450 return 0; 1451} 1452 1453static inline void __memcg_kmem_uncharge(struct page *page, int order) 1454{ 1455} 1456 1457#define for_each_memcg_cache_index(_idx) \ 1458 for (; NULL; ) 1459 1460static inline bool memcg_kmem_enabled(void) 1461{ 1462 return false; 1463} 1464 1465static inline int memcg_cache_id(struct mem_cgroup *memcg) 1466{ 1467 return -1; 1468} 1469 1470static inline void memcg_get_cache_ids(void) 1471{ 1472} 1473 1474static inline void memcg_put_cache_ids(void) 1475{ 1476} 1477 1478static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1479{ 1480 return NULL; 1481} 1482 1483#endif /* CONFIG_MEMCG_KMEM */ 1484 1485#endif /* _LINUX_MEMCONTROL_H */