at v6.6 47 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11#ifndef _LINUX_MEMCONTROL_H 12#define _LINUX_MEMCONTROL_H 13#include <linux/cgroup.h> 14#include <linux/vm_event_item.h> 15#include <linux/hardirq.h> 16#include <linux/jump_label.h> 17#include <linux/page_counter.h> 18#include <linux/vmpressure.h> 19#include <linux/eventfd.h> 20#include <linux/mm.h> 21#include <linux/vmstat.h> 22#include <linux/writeback.h> 23#include <linux/page-flags.h> 24 25struct mem_cgroup; 26struct obj_cgroup; 27struct page; 28struct mm_struct; 29struct kmem_cache; 30 31/* Cgroup-specific page state, on top of universal node page state */ 32enum memcg_stat_item { 33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 34 MEMCG_SOCK, 35 MEMCG_PERCPU_B, 36 MEMCG_VMALLOC, 37 MEMCG_KMEM, 38 MEMCG_ZSWAP_B, 39 MEMCG_ZSWAPPED, 40 MEMCG_NR_STAT, 41}; 42 43enum memcg_memory_event { 44 MEMCG_LOW, 45 MEMCG_HIGH, 46 MEMCG_MAX, 47 MEMCG_OOM, 48 MEMCG_OOM_KILL, 49 MEMCG_OOM_GROUP_KILL, 50 MEMCG_SWAP_HIGH, 51 MEMCG_SWAP_MAX, 52 MEMCG_SWAP_FAIL, 53 MEMCG_NR_MEMORY_EVENTS, 54}; 55 56struct mem_cgroup_reclaim_cookie { 57 pg_data_t *pgdat; 58 unsigned int generation; 59}; 60 61#ifdef CONFIG_MEMCG 62 63#define MEM_CGROUP_ID_SHIFT 16 64 65struct mem_cgroup_id { 66 int id; 67 refcount_t ref; 68}; 69 70/* 71 * Per memcg event counter is incremented at every pagein/pageout. With THP, 72 * it will be incremented by the number of pages. This counter is used 73 * to trigger some periodic events. This is straightforward and better 74 * than using jiffies etc. to handle periodic memcg event. 75 */ 76enum mem_cgroup_events_target { 77 MEM_CGROUP_TARGET_THRESH, 78 MEM_CGROUP_TARGET_SOFTLIMIT, 79 MEM_CGROUP_NTARGETS, 80}; 81 82struct memcg_vmstats_percpu; 83struct memcg_vmstats; 84 85struct mem_cgroup_reclaim_iter { 86 struct mem_cgroup *position; 87 /* scan generation, increased every round-trip */ 88 unsigned int generation; 89}; 90 91/* 92 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware 93 * shrinkers, which have elements charged to this memcg. 94 */ 95struct shrinker_info { 96 struct rcu_head rcu; 97 atomic_long_t *nr_deferred; 98 unsigned long *map; 99 int map_nr_max; 100}; 101 102struct lruvec_stats_percpu { 103 /* Local (CPU and cgroup) state */ 104 long state[NR_VM_NODE_STAT_ITEMS]; 105 106 /* Delta calculation for lockless upward propagation */ 107 long state_prev[NR_VM_NODE_STAT_ITEMS]; 108}; 109 110struct lruvec_stats { 111 /* Aggregated (CPU and subtree) state */ 112 long state[NR_VM_NODE_STAT_ITEMS]; 113 114 /* Non-hierarchical (CPU aggregated) state */ 115 long state_local[NR_VM_NODE_STAT_ITEMS]; 116 117 /* Pending child counts during tree propagation */ 118 long state_pending[NR_VM_NODE_STAT_ITEMS]; 119}; 120 121/* 122 * per-node information in memory controller. 123 */ 124struct mem_cgroup_per_node { 125 struct lruvec lruvec; 126 127 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 128 struct lruvec_stats lruvec_stats; 129 130 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 131 132 struct mem_cgroup_reclaim_iter iter; 133 134 struct shrinker_info __rcu *shrinker_info; 135 136 struct rb_node tree_node; /* RB tree node */ 137 unsigned long usage_in_excess;/* Set to the value by which */ 138 /* the soft limit is exceeded*/ 139 bool on_tree; 140 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 141 /* use container_of */ 142}; 143 144struct mem_cgroup_threshold { 145 struct eventfd_ctx *eventfd; 146 unsigned long threshold; 147}; 148 149/* For threshold */ 150struct mem_cgroup_threshold_ary { 151 /* An array index points to threshold just below or equal to usage. */ 152 int current_threshold; 153 /* Size of entries[] */ 154 unsigned int size; 155 /* Array of thresholds */ 156 struct mem_cgroup_threshold entries[]; 157}; 158 159struct mem_cgroup_thresholds { 160 /* Primary thresholds array */ 161 struct mem_cgroup_threshold_ary *primary; 162 /* 163 * Spare threshold array. 164 * This is needed to make mem_cgroup_unregister_event() "never fail". 165 * It must be able to store at least primary->size - 1 entries. 166 */ 167 struct mem_cgroup_threshold_ary *spare; 168}; 169 170/* 171 * Remember four most recent foreign writebacks with dirty pages in this 172 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 173 * one in a given round, we're likely to catch it later if it keeps 174 * foreign-dirtying, so a fairly low count should be enough. 175 * 176 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 177 */ 178#define MEMCG_CGWB_FRN_CNT 4 179 180struct memcg_cgwb_frn { 181 u64 bdi_id; /* bdi->id of the foreign inode */ 182 int memcg_id; /* memcg->css.id of foreign inode */ 183 u64 at; /* jiffies_64 at the time of dirtying */ 184 struct wb_completion done; /* tracks in-flight foreign writebacks */ 185}; 186 187/* 188 * Bucket for arbitrarily byte-sized objects charged to a memory 189 * cgroup. The bucket can be reparented in one piece when the cgroup 190 * is destroyed, without having to round up the individual references 191 * of all live memory objects in the wild. 192 */ 193struct obj_cgroup { 194 struct percpu_ref refcnt; 195 struct mem_cgroup *memcg; 196 atomic_t nr_charged_bytes; 197 union { 198 struct list_head list; /* protected by objcg_lock */ 199 struct rcu_head rcu; 200 }; 201}; 202 203/* 204 * The memory controller data structure. The memory controller controls both 205 * page cache and RSS per cgroup. We would eventually like to provide 206 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 207 * to help the administrator determine what knobs to tune. 208 */ 209struct mem_cgroup { 210 struct cgroup_subsys_state css; 211 212 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 213 struct mem_cgroup_id id; 214 215 /* Accounted resources */ 216 struct page_counter memory; /* Both v1 & v2 */ 217 218 union { 219 struct page_counter swap; /* v2 only */ 220 struct page_counter memsw; /* v1 only */ 221 }; 222 223 /* Legacy consumer-oriented counters */ 224 struct page_counter kmem; /* v1 only */ 225 struct page_counter tcpmem; /* v1 only */ 226 227 /* Range enforcement for interrupt charges */ 228 struct work_struct high_work; 229 230#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 231 unsigned long zswap_max; 232#endif 233 234 unsigned long soft_limit; 235 236 /* vmpressure notifications */ 237 struct vmpressure vmpressure; 238 239 /* 240 * Should the OOM killer kill all belonging tasks, had it kill one? 241 */ 242 bool oom_group; 243 244 /* protected by memcg_oom_lock */ 245 bool oom_lock; 246 int under_oom; 247 248 int swappiness; 249 /* OOM-Killer disable */ 250 int oom_kill_disable; 251 252 /* memory.events and memory.events.local */ 253 struct cgroup_file events_file; 254 struct cgroup_file events_local_file; 255 256 /* handle for "memory.swap.events" */ 257 struct cgroup_file swap_events_file; 258 259 /* protect arrays of thresholds */ 260 struct mutex thresholds_lock; 261 262 /* thresholds for memory usage. RCU-protected */ 263 struct mem_cgroup_thresholds thresholds; 264 265 /* thresholds for mem+swap usage. RCU-protected */ 266 struct mem_cgroup_thresholds memsw_thresholds; 267 268 /* For oom notifier event fd */ 269 struct list_head oom_notify; 270 271 /* 272 * Should we move charges of a task when a task is moved into this 273 * mem_cgroup ? And what type of charges should we move ? 274 */ 275 unsigned long move_charge_at_immigrate; 276 /* taken only while moving_account > 0 */ 277 spinlock_t move_lock; 278 unsigned long move_lock_flags; 279 280 CACHELINE_PADDING(_pad1_); 281 282 /* memory.stat */ 283 struct memcg_vmstats *vmstats; 284 285 /* memory.events */ 286 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 287 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 288 289 /* 290 * Hint of reclaim pressure for socket memroy management. Note 291 * that this indicator should NOT be used in legacy cgroup mode 292 * where socket memory is accounted/charged separately. 293 */ 294 unsigned long socket_pressure; 295 296 /* Legacy tcp memory accounting */ 297 bool tcpmem_active; 298 int tcpmem_pressure; 299 300#ifdef CONFIG_MEMCG_KMEM 301 int kmemcg_id; 302 struct obj_cgroup __rcu *objcg; 303 /* list of inherited objcgs, protected by objcg_lock */ 304 struct list_head objcg_list; 305#endif 306 307 CACHELINE_PADDING(_pad2_); 308 309 /* 310 * set > 0 if pages under this cgroup are moving to other cgroup. 311 */ 312 atomic_t moving_account; 313 struct task_struct *move_lock_task; 314 315 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 316 317#ifdef CONFIG_CGROUP_WRITEBACK 318 struct list_head cgwb_list; 319 struct wb_domain cgwb_domain; 320 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 321#endif 322 323 /* List of events which userspace want to receive */ 324 struct list_head event_list; 325 spinlock_t event_list_lock; 326 327#ifdef CONFIG_TRANSPARENT_HUGEPAGE 328 struct deferred_split deferred_split_queue; 329#endif 330 331#ifdef CONFIG_LRU_GEN 332 /* per-memcg mm_struct list */ 333 struct lru_gen_mm_list mm_list; 334#endif 335 336 struct mem_cgroup_per_node *nodeinfo[]; 337}; 338 339/* 340 * size of first charge trial. 341 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 342 * workload. 343 */ 344#define MEMCG_CHARGE_BATCH 64U 345 346extern struct mem_cgroup *root_mem_cgroup; 347 348enum page_memcg_data_flags { 349 /* page->memcg_data is a pointer to an objcgs vector */ 350 MEMCG_DATA_OBJCGS = (1UL << 0), 351 /* page has been accounted as a non-slab kernel page */ 352 MEMCG_DATA_KMEM = (1UL << 1), 353 /* the next bit after the last actual flag */ 354 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 355}; 356 357#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) 358 359static inline bool folio_memcg_kmem(struct folio *folio); 360 361/* 362 * After the initialization objcg->memcg is always pointing at 363 * a valid memcg, but can be atomically swapped to the parent memcg. 364 * 365 * The caller must ensure that the returned memcg won't be released: 366 * e.g. acquire the rcu_read_lock or css_set_lock. 367 */ 368static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 369{ 370 return READ_ONCE(objcg->memcg); 371} 372 373/* 374 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 375 * @folio: Pointer to the folio. 376 * 377 * Returns a pointer to the memory cgroup associated with the folio, 378 * or NULL. This function assumes that the folio is known to have a 379 * proper memory cgroup pointer. It's not safe to call this function 380 * against some type of folios, e.g. slab folios or ex-slab folios or 381 * kmem folios. 382 */ 383static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 384{ 385 unsigned long memcg_data = folio->memcg_data; 386 387 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 388 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 389 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 390 391 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 392} 393 394/* 395 * __folio_objcg - get the object cgroup associated with a kmem folio. 396 * @folio: Pointer to the folio. 397 * 398 * Returns a pointer to the object cgroup associated with the folio, 399 * or NULL. This function assumes that the folio is known to have a 400 * proper object cgroup pointer. It's not safe to call this function 401 * against some type of folios, e.g. slab folios or ex-slab folios or 402 * LRU folios. 403 */ 404static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 405{ 406 unsigned long memcg_data = folio->memcg_data; 407 408 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 409 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); 410 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 411 412 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 413} 414 415/* 416 * folio_memcg - Get the memory cgroup associated with a folio. 417 * @folio: Pointer to the folio. 418 * 419 * Returns a pointer to the memory cgroup associated with the folio, 420 * or NULL. This function assumes that the folio is known to have a 421 * proper memory cgroup pointer. It's not safe to call this function 422 * against some type of folios, e.g. slab folios or ex-slab folios. 423 * 424 * For a non-kmem folio any of the following ensures folio and memcg binding 425 * stability: 426 * 427 * - the folio lock 428 * - LRU isolation 429 * - folio_memcg_lock() 430 * - exclusive reference 431 * - mem_cgroup_trylock_pages() 432 * 433 * For a kmem folio a caller should hold an rcu read lock to protect memcg 434 * associated with a kmem folio from being released. 435 */ 436static inline struct mem_cgroup *folio_memcg(struct folio *folio) 437{ 438 if (folio_memcg_kmem(folio)) 439 return obj_cgroup_memcg(__folio_objcg(folio)); 440 return __folio_memcg(folio); 441} 442 443static inline struct mem_cgroup *page_memcg(struct page *page) 444{ 445 return folio_memcg(page_folio(page)); 446} 447 448/** 449 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. 450 * @folio: Pointer to the folio. 451 * 452 * This function assumes that the folio is known to have a 453 * proper memory cgroup pointer. It's not safe to call this function 454 * against some type of folios, e.g. slab folios or ex-slab folios. 455 * 456 * Return: A pointer to the memory cgroup associated with the folio, 457 * or NULL. 458 */ 459static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 460{ 461 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 462 463 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 464 WARN_ON_ONCE(!rcu_read_lock_held()); 465 466 if (memcg_data & MEMCG_DATA_KMEM) { 467 struct obj_cgroup *objcg; 468 469 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 470 return obj_cgroup_memcg(objcg); 471 } 472 473 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 474} 475 476/* 477 * folio_memcg_check - Get the memory cgroup associated with a folio. 478 * @folio: Pointer to the folio. 479 * 480 * Returns a pointer to the memory cgroup associated with the folio, 481 * or NULL. This function unlike folio_memcg() can take any folio 482 * as an argument. It has to be used in cases when it's not known if a folio 483 * has an associated memory cgroup pointer or an object cgroups vector or 484 * an object cgroup. 485 * 486 * For a non-kmem folio any of the following ensures folio and memcg binding 487 * stability: 488 * 489 * - the folio lock 490 * - LRU isolation 491 * - lock_folio_memcg() 492 * - exclusive reference 493 * - mem_cgroup_trylock_pages() 494 * 495 * For a kmem folio a caller should hold an rcu read lock to protect memcg 496 * associated with a kmem folio from being released. 497 */ 498static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 499{ 500 /* 501 * Because folio->memcg_data might be changed asynchronously 502 * for slabs, READ_ONCE() should be used here. 503 */ 504 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 505 506 if (memcg_data & MEMCG_DATA_OBJCGS) 507 return NULL; 508 509 if (memcg_data & MEMCG_DATA_KMEM) { 510 struct obj_cgroup *objcg; 511 512 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 513 return obj_cgroup_memcg(objcg); 514 } 515 516 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); 517} 518 519static inline struct mem_cgroup *page_memcg_check(struct page *page) 520{ 521 if (PageTail(page)) 522 return NULL; 523 return folio_memcg_check((struct folio *)page); 524} 525 526static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 527{ 528 struct mem_cgroup *memcg; 529 530 rcu_read_lock(); 531retry: 532 memcg = obj_cgroup_memcg(objcg); 533 if (unlikely(!css_tryget(&memcg->css))) 534 goto retry; 535 rcu_read_unlock(); 536 537 return memcg; 538} 539 540#ifdef CONFIG_MEMCG_KMEM 541/* 542 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 543 * @folio: Pointer to the folio. 544 * 545 * Checks if the folio has MemcgKmem flag set. The caller must ensure 546 * that the folio has an associated memory cgroup. It's not safe to call 547 * this function against some types of folios, e.g. slab folios. 548 */ 549static inline bool folio_memcg_kmem(struct folio *folio) 550{ 551 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 552 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); 553 return folio->memcg_data & MEMCG_DATA_KMEM; 554} 555 556 557#else 558static inline bool folio_memcg_kmem(struct folio *folio) 559{ 560 return false; 561} 562 563#endif 564 565static inline bool PageMemcgKmem(struct page *page) 566{ 567 return folio_memcg_kmem(page_folio(page)); 568} 569 570static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 571{ 572 return (memcg == root_mem_cgroup); 573} 574 575static inline bool mem_cgroup_disabled(void) 576{ 577 return !cgroup_subsys_enabled(memory_cgrp_subsys); 578} 579 580static inline void mem_cgroup_protection(struct mem_cgroup *root, 581 struct mem_cgroup *memcg, 582 unsigned long *min, 583 unsigned long *low) 584{ 585 *min = *low = 0; 586 587 if (mem_cgroup_disabled()) 588 return; 589 590 /* 591 * There is no reclaim protection applied to a targeted reclaim. 592 * We are special casing this specific case here because 593 * mem_cgroup_calculate_protection is not robust enough to keep 594 * the protection invariant for calculated effective values for 595 * parallel reclaimers with different reclaim target. This is 596 * especially a problem for tail memcgs (as they have pages on LRU) 597 * which would want to have effective values 0 for targeted reclaim 598 * but a different value for external reclaim. 599 * 600 * Example 601 * Let's have global and A's reclaim in parallel: 602 * | 603 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 604 * |\ 605 * | C (low = 1G, usage = 2.5G) 606 * B (low = 1G, usage = 0.5G) 607 * 608 * For the global reclaim 609 * A.elow = A.low 610 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 611 * C.elow = min(C.usage, C.low) 612 * 613 * With the effective values resetting we have A reclaim 614 * A.elow = 0 615 * B.elow = B.low 616 * C.elow = C.low 617 * 618 * If the global reclaim races with A's reclaim then 619 * B.elow = C.elow = 0 because children_low_usage > A.elow) 620 * is possible and reclaiming B would be violating the protection. 621 * 622 */ 623 if (root == memcg) 624 return; 625 626 *min = READ_ONCE(memcg->memory.emin); 627 *low = READ_ONCE(memcg->memory.elow); 628} 629 630void mem_cgroup_calculate_protection(struct mem_cgroup *root, 631 struct mem_cgroup *memcg); 632 633static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 634 struct mem_cgroup *memcg) 635{ 636 /* 637 * The root memcg doesn't account charges, and doesn't support 638 * protection. The target memcg's protection is ignored, see 639 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 640 */ 641 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 642 memcg == target; 643} 644 645static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 646 struct mem_cgroup *memcg) 647{ 648 if (mem_cgroup_unprotected(target, memcg)) 649 return false; 650 651 return READ_ONCE(memcg->memory.elow) >= 652 page_counter_read(&memcg->memory); 653} 654 655static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 656 struct mem_cgroup *memcg) 657{ 658 if (mem_cgroup_unprotected(target, memcg)) 659 return false; 660 661 return READ_ONCE(memcg->memory.emin) >= 662 page_counter_read(&memcg->memory); 663} 664 665int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 666 667/** 668 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 669 * @folio: Folio to charge. 670 * @mm: mm context of the allocating task. 671 * @gfp: Reclaim mode. 672 * 673 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 674 * pages according to @gfp if necessary. If @mm is NULL, try to 675 * charge to the active memcg. 676 * 677 * Do not use this for folios allocated for swapin. 678 * 679 * Return: 0 on success. Otherwise, an error code is returned. 680 */ 681static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 682 gfp_t gfp) 683{ 684 if (mem_cgroup_disabled()) 685 return 0; 686 return __mem_cgroup_charge(folio, mm, gfp); 687} 688 689int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 690 gfp_t gfp, swp_entry_t entry); 691void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); 692 693void __mem_cgroup_uncharge(struct folio *folio); 694 695/** 696 * mem_cgroup_uncharge - Uncharge a folio. 697 * @folio: Folio to uncharge. 698 * 699 * Uncharge a folio previously charged with mem_cgroup_charge(). 700 */ 701static inline void mem_cgroup_uncharge(struct folio *folio) 702{ 703 if (mem_cgroup_disabled()) 704 return; 705 __mem_cgroup_uncharge(folio); 706} 707 708void __mem_cgroup_uncharge_list(struct list_head *page_list); 709static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 710{ 711 if (mem_cgroup_disabled()) 712 return; 713 __mem_cgroup_uncharge_list(page_list); 714} 715 716void mem_cgroup_migrate(struct folio *old, struct folio *new); 717 718/** 719 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 720 * @memcg: memcg of the wanted lruvec 721 * @pgdat: pglist_data 722 * 723 * Returns the lru list vector holding pages for a given @memcg & 724 * @pgdat combination. This can be the node lruvec, if the memory 725 * controller is disabled. 726 */ 727static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 728 struct pglist_data *pgdat) 729{ 730 struct mem_cgroup_per_node *mz; 731 struct lruvec *lruvec; 732 733 if (mem_cgroup_disabled()) { 734 lruvec = &pgdat->__lruvec; 735 goto out; 736 } 737 738 if (!memcg) 739 memcg = root_mem_cgroup; 740 741 mz = memcg->nodeinfo[pgdat->node_id]; 742 lruvec = &mz->lruvec; 743out: 744 /* 745 * Since a node can be onlined after the mem_cgroup was created, 746 * we have to be prepared to initialize lruvec->pgdat here; 747 * and if offlined then reonlined, we need to reinitialize it. 748 */ 749 if (unlikely(lruvec->pgdat != pgdat)) 750 lruvec->pgdat = pgdat; 751 return lruvec; 752} 753 754/** 755 * folio_lruvec - return lruvec for isolating/putting an LRU folio 756 * @folio: Pointer to the folio. 757 * 758 * This function relies on folio->mem_cgroup being stable. 759 */ 760static inline struct lruvec *folio_lruvec(struct folio *folio) 761{ 762 struct mem_cgroup *memcg = folio_memcg(folio); 763 764 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 765 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 766} 767 768struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 769 770struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 771 772struct lruvec *folio_lruvec_lock(struct folio *folio); 773struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 774struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 775 unsigned long *flags); 776 777#ifdef CONFIG_DEBUG_VM 778void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 779#else 780static inline 781void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 782{ 783} 784#endif 785 786static inline 787struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 788 return css ? container_of(css, struct mem_cgroup, css) : NULL; 789} 790 791static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 792{ 793 return percpu_ref_tryget(&objcg->refcnt); 794} 795 796static inline void obj_cgroup_get(struct obj_cgroup *objcg) 797{ 798 percpu_ref_get(&objcg->refcnt); 799} 800 801static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 802 unsigned long nr) 803{ 804 percpu_ref_get_many(&objcg->refcnt, nr); 805} 806 807static inline void obj_cgroup_put(struct obj_cgroup *objcg) 808{ 809 percpu_ref_put(&objcg->refcnt); 810} 811 812static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 813{ 814 return !memcg || css_tryget(&memcg->css); 815} 816 817static inline void mem_cgroup_put(struct mem_cgroup *memcg) 818{ 819 if (memcg) 820 css_put(&memcg->css); 821} 822 823#define mem_cgroup_from_counter(counter, member) \ 824 container_of(counter, struct mem_cgroup, member) 825 826struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 827 struct mem_cgroup *, 828 struct mem_cgroup_reclaim_cookie *); 829void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 830void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 831 int (*)(struct task_struct *, void *), void *arg); 832 833static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 834{ 835 if (mem_cgroup_disabled()) 836 return 0; 837 838 return memcg->id.id; 839} 840struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 841 842#ifdef CONFIG_SHRINKER_DEBUG 843static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 844{ 845 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 846} 847 848struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 849#endif 850 851static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 852{ 853 return mem_cgroup_from_css(seq_css(m)); 854} 855 856static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 857{ 858 struct mem_cgroup_per_node *mz; 859 860 if (mem_cgroup_disabled()) 861 return NULL; 862 863 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 864 return mz->memcg; 865} 866 867/** 868 * parent_mem_cgroup - find the accounting parent of a memcg 869 * @memcg: memcg whose parent to find 870 * 871 * Returns the parent memcg, or NULL if this is the root. 872 */ 873static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 874{ 875 return mem_cgroup_from_css(memcg->css.parent); 876} 877 878static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 879 struct mem_cgroup *root) 880{ 881 if (root == memcg) 882 return true; 883 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 884} 885 886static inline bool mm_match_cgroup(struct mm_struct *mm, 887 struct mem_cgroup *memcg) 888{ 889 struct mem_cgroup *task_memcg; 890 bool match = false; 891 892 rcu_read_lock(); 893 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 894 if (task_memcg) 895 match = mem_cgroup_is_descendant(task_memcg, memcg); 896 rcu_read_unlock(); 897 return match; 898} 899 900struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 901ino_t page_cgroup_ino(struct page *page); 902 903static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 904{ 905 if (mem_cgroup_disabled()) 906 return true; 907 return !!(memcg->css.flags & CSS_ONLINE); 908} 909 910void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 911 int zid, int nr_pages); 912 913static inline 914unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 915 enum lru_list lru, int zone_idx) 916{ 917 struct mem_cgroup_per_node *mz; 918 919 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 920 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 921} 922 923void mem_cgroup_handle_over_high(gfp_t gfp_mask); 924 925unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 926 927unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 928 929void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 930 struct task_struct *p); 931 932void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 933 934static inline void mem_cgroup_enter_user_fault(void) 935{ 936 WARN_ON(current->in_user_fault); 937 current->in_user_fault = 1; 938} 939 940static inline void mem_cgroup_exit_user_fault(void) 941{ 942 WARN_ON(!current->in_user_fault); 943 current->in_user_fault = 0; 944} 945 946static inline bool task_in_memcg_oom(struct task_struct *p) 947{ 948 return p->memcg_in_oom; 949} 950 951bool mem_cgroup_oom_synchronize(bool wait); 952struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 953 struct mem_cgroup *oom_domain); 954void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 955 956void folio_memcg_lock(struct folio *folio); 957void folio_memcg_unlock(struct folio *folio); 958 959void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); 960 961/* try to stablize folio_memcg() for all the pages in a memcg */ 962static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 963{ 964 rcu_read_lock(); 965 966 if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) 967 return true; 968 969 rcu_read_unlock(); 970 return false; 971} 972 973static inline void mem_cgroup_unlock_pages(void) 974{ 975 rcu_read_unlock(); 976} 977 978/* idx can be of type enum memcg_stat_item or node_stat_item */ 979static inline void mod_memcg_state(struct mem_cgroup *memcg, 980 int idx, int val) 981{ 982 unsigned long flags; 983 984 local_irq_save(flags); 985 __mod_memcg_state(memcg, idx, val); 986 local_irq_restore(flags); 987} 988 989static inline void mod_memcg_page_state(struct page *page, 990 int idx, int val) 991{ 992 struct mem_cgroup *memcg; 993 994 if (mem_cgroup_disabled()) 995 return; 996 997 rcu_read_lock(); 998 memcg = page_memcg(page); 999 if (memcg) 1000 mod_memcg_state(memcg, idx, val); 1001 rcu_read_unlock(); 1002} 1003 1004unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 1005 1006static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1007 enum node_stat_item idx) 1008{ 1009 struct mem_cgroup_per_node *pn; 1010 long x; 1011 1012 if (mem_cgroup_disabled()) 1013 return node_page_state(lruvec_pgdat(lruvec), idx); 1014 1015 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1016 x = READ_ONCE(pn->lruvec_stats.state[idx]); 1017#ifdef CONFIG_SMP 1018 if (x < 0) 1019 x = 0; 1020#endif 1021 return x; 1022} 1023 1024static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1025 enum node_stat_item idx) 1026{ 1027 struct mem_cgroup_per_node *pn; 1028 long x = 0; 1029 1030 if (mem_cgroup_disabled()) 1031 return node_page_state(lruvec_pgdat(lruvec), idx); 1032 1033 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1034 x = READ_ONCE(pn->lruvec_stats.state_local[idx]); 1035#ifdef CONFIG_SMP 1036 if (x < 0) 1037 x = 0; 1038#endif 1039 return x; 1040} 1041 1042void mem_cgroup_flush_stats(void); 1043void mem_cgroup_flush_stats_ratelimited(void); 1044 1045void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 1046 int val); 1047void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 1048 1049static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1050 int val) 1051{ 1052 unsigned long flags; 1053 1054 local_irq_save(flags); 1055 __mod_lruvec_kmem_state(p, idx, val); 1056 local_irq_restore(flags); 1057} 1058 1059static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, 1060 enum node_stat_item idx, int val) 1061{ 1062 unsigned long flags; 1063 1064 local_irq_save(flags); 1065 __mod_memcg_lruvec_state(lruvec, idx, val); 1066 local_irq_restore(flags); 1067} 1068 1069void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 1070 unsigned long count); 1071 1072static inline void count_memcg_events(struct mem_cgroup *memcg, 1073 enum vm_event_item idx, 1074 unsigned long count) 1075{ 1076 unsigned long flags; 1077 1078 local_irq_save(flags); 1079 __count_memcg_events(memcg, idx, count); 1080 local_irq_restore(flags); 1081} 1082 1083static inline void count_memcg_page_event(struct page *page, 1084 enum vm_event_item idx) 1085{ 1086 struct mem_cgroup *memcg = page_memcg(page); 1087 1088 if (memcg) 1089 count_memcg_events(memcg, idx, 1); 1090} 1091 1092static inline void count_memcg_folio_events(struct folio *folio, 1093 enum vm_event_item idx, unsigned long nr) 1094{ 1095 struct mem_cgroup *memcg = folio_memcg(folio); 1096 1097 if (memcg) 1098 count_memcg_events(memcg, idx, nr); 1099} 1100 1101static inline void count_memcg_event_mm(struct mm_struct *mm, 1102 enum vm_event_item idx) 1103{ 1104 struct mem_cgroup *memcg; 1105 1106 if (mem_cgroup_disabled()) 1107 return; 1108 1109 rcu_read_lock(); 1110 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1111 if (likely(memcg)) 1112 count_memcg_events(memcg, idx, 1); 1113 rcu_read_unlock(); 1114} 1115 1116static inline void memcg_memory_event(struct mem_cgroup *memcg, 1117 enum memcg_memory_event event) 1118{ 1119 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1120 event == MEMCG_SWAP_FAIL; 1121 1122 atomic_long_inc(&memcg->memory_events_local[event]); 1123 if (!swap_event) 1124 cgroup_file_notify(&memcg->events_local_file); 1125 1126 do { 1127 atomic_long_inc(&memcg->memory_events[event]); 1128 if (swap_event) 1129 cgroup_file_notify(&memcg->swap_events_file); 1130 else 1131 cgroup_file_notify(&memcg->events_file); 1132 1133 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1134 break; 1135 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1136 break; 1137 } while ((memcg = parent_mem_cgroup(memcg)) && 1138 !mem_cgroup_is_root(memcg)); 1139} 1140 1141static inline void memcg_memory_event_mm(struct mm_struct *mm, 1142 enum memcg_memory_event event) 1143{ 1144 struct mem_cgroup *memcg; 1145 1146 if (mem_cgroup_disabled()) 1147 return; 1148 1149 rcu_read_lock(); 1150 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1151 if (likely(memcg)) 1152 memcg_memory_event(memcg, event); 1153 rcu_read_unlock(); 1154} 1155 1156void split_page_memcg(struct page *head, unsigned int nr); 1157 1158unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1159 gfp_t gfp_mask, 1160 unsigned long *total_scanned); 1161 1162#else /* CONFIG_MEMCG */ 1163 1164#define MEM_CGROUP_ID_SHIFT 0 1165 1166static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1167{ 1168 return NULL; 1169} 1170 1171static inline struct mem_cgroup *page_memcg(struct page *page) 1172{ 1173 return NULL; 1174} 1175 1176static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) 1177{ 1178 WARN_ON_ONCE(!rcu_read_lock_held()); 1179 return NULL; 1180} 1181 1182static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1183{ 1184 return NULL; 1185} 1186 1187static inline struct mem_cgroup *page_memcg_check(struct page *page) 1188{ 1189 return NULL; 1190} 1191 1192static inline bool folio_memcg_kmem(struct folio *folio) 1193{ 1194 return false; 1195} 1196 1197static inline bool PageMemcgKmem(struct page *page) 1198{ 1199 return false; 1200} 1201 1202static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1203{ 1204 return true; 1205} 1206 1207static inline bool mem_cgroup_disabled(void) 1208{ 1209 return true; 1210} 1211 1212static inline void memcg_memory_event(struct mem_cgroup *memcg, 1213 enum memcg_memory_event event) 1214{ 1215} 1216 1217static inline void memcg_memory_event_mm(struct mm_struct *mm, 1218 enum memcg_memory_event event) 1219{ 1220} 1221 1222static inline void mem_cgroup_protection(struct mem_cgroup *root, 1223 struct mem_cgroup *memcg, 1224 unsigned long *min, 1225 unsigned long *low) 1226{ 1227 *min = *low = 0; 1228} 1229 1230static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1231 struct mem_cgroup *memcg) 1232{ 1233} 1234 1235static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1236 struct mem_cgroup *memcg) 1237{ 1238 return true; 1239} 1240static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1241 struct mem_cgroup *memcg) 1242{ 1243 return false; 1244} 1245 1246static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1247 struct mem_cgroup *memcg) 1248{ 1249 return false; 1250} 1251 1252static inline int mem_cgroup_charge(struct folio *folio, 1253 struct mm_struct *mm, gfp_t gfp) 1254{ 1255 return 0; 1256} 1257 1258static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1259 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1260{ 1261 return 0; 1262} 1263 1264static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) 1265{ 1266} 1267 1268static inline void mem_cgroup_uncharge(struct folio *folio) 1269{ 1270} 1271 1272static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 1273{ 1274} 1275 1276static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1277{ 1278} 1279 1280static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1281 struct pglist_data *pgdat) 1282{ 1283 return &pgdat->__lruvec; 1284} 1285 1286static inline struct lruvec *folio_lruvec(struct folio *folio) 1287{ 1288 struct pglist_data *pgdat = folio_pgdat(folio); 1289 return &pgdat->__lruvec; 1290} 1291 1292static inline 1293void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1294{ 1295} 1296 1297static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1298{ 1299 return NULL; 1300} 1301 1302static inline bool mm_match_cgroup(struct mm_struct *mm, 1303 struct mem_cgroup *memcg) 1304{ 1305 return true; 1306} 1307 1308static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1309{ 1310 return NULL; 1311} 1312 1313static inline 1314struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1315{ 1316 return NULL; 1317} 1318 1319static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1320{ 1321} 1322 1323static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1324{ 1325 return true; 1326} 1327 1328static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1329{ 1330} 1331 1332static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1333{ 1334 struct pglist_data *pgdat = folio_pgdat(folio); 1335 1336 spin_lock(&pgdat->__lruvec.lru_lock); 1337 return &pgdat->__lruvec; 1338} 1339 1340static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1341{ 1342 struct pglist_data *pgdat = folio_pgdat(folio); 1343 1344 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1345 return &pgdat->__lruvec; 1346} 1347 1348static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1349 unsigned long *flagsp) 1350{ 1351 struct pglist_data *pgdat = folio_pgdat(folio); 1352 1353 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1354 return &pgdat->__lruvec; 1355} 1356 1357static inline struct mem_cgroup * 1358mem_cgroup_iter(struct mem_cgroup *root, 1359 struct mem_cgroup *prev, 1360 struct mem_cgroup_reclaim_cookie *reclaim) 1361{ 1362 return NULL; 1363} 1364 1365static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1366 struct mem_cgroup *prev) 1367{ 1368} 1369 1370static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1371 int (*fn)(struct task_struct *, void *), void *arg) 1372{ 1373} 1374 1375static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1376{ 1377 return 0; 1378} 1379 1380static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1381{ 1382 WARN_ON_ONCE(id); 1383 /* XXX: This should always return root_mem_cgroup */ 1384 return NULL; 1385} 1386 1387#ifdef CONFIG_SHRINKER_DEBUG 1388static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1389{ 1390 return 0; 1391} 1392 1393static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1394{ 1395 return NULL; 1396} 1397#endif 1398 1399static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1400{ 1401 return NULL; 1402} 1403 1404static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1405{ 1406 return NULL; 1407} 1408 1409static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1410{ 1411 return true; 1412} 1413 1414static inline 1415unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1416 enum lru_list lru, int zone_idx) 1417{ 1418 return 0; 1419} 1420 1421static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1422{ 1423 return 0; 1424} 1425 1426static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1427{ 1428 return 0; 1429} 1430 1431static inline void 1432mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1433{ 1434} 1435 1436static inline void 1437mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1438{ 1439} 1440 1441static inline void folio_memcg_lock(struct folio *folio) 1442{ 1443} 1444 1445static inline void folio_memcg_unlock(struct folio *folio) 1446{ 1447} 1448 1449static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) 1450{ 1451 /* to match folio_memcg_rcu() */ 1452 rcu_read_lock(); 1453 return true; 1454} 1455 1456static inline void mem_cgroup_unlock_pages(void) 1457{ 1458 rcu_read_unlock(); 1459} 1460 1461static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 1462{ 1463} 1464 1465static inline void mem_cgroup_enter_user_fault(void) 1466{ 1467} 1468 1469static inline void mem_cgroup_exit_user_fault(void) 1470{ 1471} 1472 1473static inline bool task_in_memcg_oom(struct task_struct *p) 1474{ 1475 return false; 1476} 1477 1478static inline bool mem_cgroup_oom_synchronize(bool wait) 1479{ 1480 return false; 1481} 1482 1483static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1484 struct task_struct *victim, struct mem_cgroup *oom_domain) 1485{ 1486 return NULL; 1487} 1488 1489static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1490{ 1491} 1492 1493static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1494 int idx, 1495 int nr) 1496{ 1497} 1498 1499static inline void mod_memcg_state(struct mem_cgroup *memcg, 1500 int idx, 1501 int nr) 1502{ 1503} 1504 1505static inline void mod_memcg_page_state(struct page *page, 1506 int idx, int val) 1507{ 1508} 1509 1510static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1511{ 1512 return 0; 1513} 1514 1515static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1516 enum node_stat_item idx) 1517{ 1518 return node_page_state(lruvec_pgdat(lruvec), idx); 1519} 1520 1521static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1522 enum node_stat_item idx) 1523{ 1524 return node_page_state(lruvec_pgdat(lruvec), idx); 1525} 1526 1527static inline void mem_cgroup_flush_stats(void) 1528{ 1529} 1530 1531static inline void mem_cgroup_flush_stats_ratelimited(void) 1532{ 1533} 1534 1535static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, 1536 enum node_stat_item idx, int val) 1537{ 1538} 1539 1540static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1541 int val) 1542{ 1543 struct page *page = virt_to_head_page(p); 1544 1545 __mod_node_page_state(page_pgdat(page), idx, val); 1546} 1547 1548static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1549 int val) 1550{ 1551 struct page *page = virt_to_head_page(p); 1552 1553 mod_node_page_state(page_pgdat(page), idx, val); 1554} 1555 1556static inline void count_memcg_events(struct mem_cgroup *memcg, 1557 enum vm_event_item idx, 1558 unsigned long count) 1559{ 1560} 1561 1562static inline void __count_memcg_events(struct mem_cgroup *memcg, 1563 enum vm_event_item idx, 1564 unsigned long count) 1565{ 1566} 1567 1568static inline void count_memcg_page_event(struct page *page, 1569 int idx) 1570{ 1571} 1572 1573static inline void count_memcg_folio_events(struct folio *folio, 1574 enum vm_event_item idx, unsigned long nr) 1575{ 1576} 1577 1578static inline 1579void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1580{ 1581} 1582 1583static inline void split_page_memcg(struct page *head, unsigned int nr) 1584{ 1585} 1586 1587static inline 1588unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1589 gfp_t gfp_mask, 1590 unsigned long *total_scanned) 1591{ 1592 return 0; 1593} 1594#endif /* CONFIG_MEMCG */ 1595 1596static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1597{ 1598 __mod_lruvec_kmem_state(p, idx, 1); 1599} 1600 1601static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1602{ 1603 __mod_lruvec_kmem_state(p, idx, -1); 1604} 1605 1606static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1607{ 1608 struct mem_cgroup *memcg; 1609 1610 memcg = lruvec_memcg(lruvec); 1611 if (!memcg) 1612 return NULL; 1613 memcg = parent_mem_cgroup(memcg); 1614 if (!memcg) 1615 return NULL; 1616 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1617} 1618 1619static inline void unlock_page_lruvec(struct lruvec *lruvec) 1620{ 1621 spin_unlock(&lruvec->lru_lock); 1622} 1623 1624static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1625{ 1626 spin_unlock_irq(&lruvec->lru_lock); 1627} 1628 1629static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1630 unsigned long flags) 1631{ 1632 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1633} 1634 1635/* Test requires a stable page->memcg binding, see page_memcg() */ 1636static inline bool folio_matches_lruvec(struct folio *folio, 1637 struct lruvec *lruvec) 1638{ 1639 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1640 lruvec_memcg(lruvec) == folio_memcg(folio); 1641} 1642 1643/* Don't lock again iff page's lruvec locked */ 1644static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1645 struct lruvec *locked_lruvec) 1646{ 1647 if (locked_lruvec) { 1648 if (folio_matches_lruvec(folio, locked_lruvec)) 1649 return locked_lruvec; 1650 1651 unlock_page_lruvec_irq(locked_lruvec); 1652 } 1653 1654 return folio_lruvec_lock_irq(folio); 1655} 1656 1657/* Don't lock again iff page's lruvec locked */ 1658static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio, 1659 struct lruvec *locked_lruvec, unsigned long *flags) 1660{ 1661 if (locked_lruvec) { 1662 if (folio_matches_lruvec(folio, locked_lruvec)) 1663 return locked_lruvec; 1664 1665 unlock_page_lruvec_irqrestore(locked_lruvec, *flags); 1666 } 1667 1668 return folio_lruvec_lock_irqsave(folio, flags); 1669} 1670 1671#ifdef CONFIG_CGROUP_WRITEBACK 1672 1673struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1674void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1675 unsigned long *pheadroom, unsigned long *pdirty, 1676 unsigned long *pwriteback); 1677 1678void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1679 struct bdi_writeback *wb); 1680 1681static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1682 struct bdi_writeback *wb) 1683{ 1684 struct mem_cgroup *memcg; 1685 1686 if (mem_cgroup_disabled()) 1687 return; 1688 1689 memcg = folio_memcg(folio); 1690 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1691 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1692} 1693 1694void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1695 1696#else /* CONFIG_CGROUP_WRITEBACK */ 1697 1698static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1699{ 1700 return NULL; 1701} 1702 1703static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1704 unsigned long *pfilepages, 1705 unsigned long *pheadroom, 1706 unsigned long *pdirty, 1707 unsigned long *pwriteback) 1708{ 1709} 1710 1711static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1712 struct bdi_writeback *wb) 1713{ 1714} 1715 1716static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1717{ 1718} 1719 1720#endif /* CONFIG_CGROUP_WRITEBACK */ 1721 1722struct sock; 1723bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1724 gfp_t gfp_mask); 1725void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1726#ifdef CONFIG_MEMCG 1727extern struct static_key_false memcg_sockets_enabled_key; 1728#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1729void mem_cgroup_sk_alloc(struct sock *sk); 1730void mem_cgroup_sk_free(struct sock *sk); 1731static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1732{ 1733 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1734 return !!memcg->tcpmem_pressure; 1735 do { 1736 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1737 return true; 1738 } while ((memcg = parent_mem_cgroup(memcg))); 1739 return false; 1740} 1741 1742int alloc_shrinker_info(struct mem_cgroup *memcg); 1743void free_shrinker_info(struct mem_cgroup *memcg); 1744void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1745void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1746#else 1747#define mem_cgroup_sockets_enabled 0 1748static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1749static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1750static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1751{ 1752 return false; 1753} 1754 1755static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1756 int nid, int shrinker_id) 1757{ 1758} 1759#endif 1760 1761#ifdef CONFIG_MEMCG_KMEM 1762bool mem_cgroup_kmem_disabled(void); 1763int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1764void __memcg_kmem_uncharge_page(struct page *page, int order); 1765 1766struct obj_cgroup *get_obj_cgroup_from_current(void); 1767struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); 1768 1769int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1770void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1771 1772extern struct static_key_false memcg_bpf_enabled_key; 1773static inline bool memcg_bpf_enabled(void) 1774{ 1775 return static_branch_likely(&memcg_bpf_enabled_key); 1776} 1777 1778extern struct static_key_false memcg_kmem_online_key; 1779 1780static inline bool memcg_kmem_online(void) 1781{ 1782 return static_branch_likely(&memcg_kmem_online_key); 1783} 1784 1785static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1786 int order) 1787{ 1788 if (memcg_kmem_online()) 1789 return __memcg_kmem_charge_page(page, gfp, order); 1790 return 0; 1791} 1792 1793static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1794{ 1795 if (memcg_kmem_online()) 1796 __memcg_kmem_uncharge_page(page, order); 1797} 1798 1799/* 1800 * A helper for accessing memcg's kmem_id, used for getting 1801 * corresponding LRU lists. 1802 */ 1803static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1804{ 1805 return memcg ? memcg->kmemcg_id : -1; 1806} 1807 1808struct mem_cgroup *mem_cgroup_from_obj(void *p); 1809struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1810 1811static inline void count_objcg_event(struct obj_cgroup *objcg, 1812 enum vm_event_item idx) 1813{ 1814 struct mem_cgroup *memcg; 1815 1816 if (!memcg_kmem_online()) 1817 return; 1818 1819 rcu_read_lock(); 1820 memcg = obj_cgroup_memcg(objcg); 1821 count_memcg_events(memcg, idx, 1); 1822 rcu_read_unlock(); 1823} 1824 1825#else 1826static inline bool mem_cgroup_kmem_disabled(void) 1827{ 1828 return true; 1829} 1830 1831static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1832 int order) 1833{ 1834 return 0; 1835} 1836 1837static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1838{ 1839} 1840 1841static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1842 int order) 1843{ 1844 return 0; 1845} 1846 1847static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1848{ 1849} 1850 1851static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 1852{ 1853 return NULL; 1854} 1855 1856static inline bool memcg_bpf_enabled(void) 1857{ 1858 return false; 1859} 1860 1861static inline bool memcg_kmem_online(void) 1862{ 1863 return false; 1864} 1865 1866static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1867{ 1868 return -1; 1869} 1870 1871static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) 1872{ 1873 return NULL; 1874} 1875 1876static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1877{ 1878 return NULL; 1879} 1880 1881static inline void count_objcg_event(struct obj_cgroup *objcg, 1882 enum vm_event_item idx) 1883{ 1884} 1885 1886#endif /* CONFIG_MEMCG_KMEM */ 1887 1888#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) 1889bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1890void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1891void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1892#else 1893static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1894{ 1895 return true; 1896} 1897static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1898 size_t size) 1899{ 1900} 1901static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1902 size_t size) 1903{ 1904} 1905#endif 1906 1907#endif /* _LINUX_MEMCONTROL_H */