at for-next 47 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11#ifndef _LINUX_MEMCONTROL_H 12#define _LINUX_MEMCONTROL_H 13#include <linux/cgroup.h> 14#include <linux/vm_event_item.h> 15#include <linux/hardirq.h> 16#include <linux/jump_label.h> 17#include <linux/kernel.h> 18#include <linux/page_counter.h> 19#include <linux/vmpressure.h> 20#include <linux/eventfd.h> 21#include <linux/mm.h> 22#include <linux/vmstat.h> 23#include <linux/writeback.h> 24#include <linux/page-flags.h> 25#include <linux/shrinker.h> 26 27struct mem_cgroup; 28struct obj_cgroup; 29struct page; 30struct mm_struct; 31struct kmem_cache; 32 33/* Cgroup-specific page state, on top of universal node page state */ 34enum memcg_stat_item { 35 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 36 MEMCG_SOCK, 37 MEMCG_PERCPU_B, 38 MEMCG_VMALLOC, 39 MEMCG_KMEM, 40 MEMCG_ZSWAP_B, 41 MEMCG_ZSWAPPED, 42 MEMCG_NR_STAT, 43}; 44 45enum memcg_memory_event { 46 MEMCG_LOW, 47 MEMCG_HIGH, 48 MEMCG_MAX, 49 MEMCG_OOM, 50 MEMCG_OOM_KILL, 51 MEMCG_OOM_GROUP_KILL, 52 MEMCG_SWAP_HIGH, 53 MEMCG_SWAP_MAX, 54 MEMCG_SWAP_FAIL, 55 MEMCG_NR_MEMORY_EVENTS, 56}; 57 58struct mem_cgroup_reclaim_cookie { 59 pg_data_t *pgdat; 60 int generation; 61}; 62 63#ifdef CONFIG_MEMCG 64 65#define MEM_CGROUP_ID_SHIFT 16 66 67struct mem_cgroup_id { 68 int id; 69 refcount_t ref; 70}; 71 72struct memcg_vmstats_percpu; 73struct memcg1_events_percpu; 74struct memcg_vmstats; 75struct lruvec_stats_percpu; 76struct lruvec_stats; 77 78struct mem_cgroup_reclaim_iter { 79 struct mem_cgroup *position; 80 /* scan generation, increased every round-trip */ 81 atomic_t generation; 82}; 83 84/* 85 * per-node information in memory controller. 86 */ 87struct mem_cgroup_per_node { 88 /* Keep the read-only fields at the start */ 89 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 90 /* use container_of */ 91 92 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 93 struct lruvec_stats *lruvec_stats; 94 struct shrinker_info __rcu *shrinker_info; 95 96#ifdef CONFIG_MEMCG_V1 97 /* 98 * Memcg-v1 only stuff in middle as buffer between read mostly fields 99 * and update often fields to avoid false sharing. If v1 stuff is 100 * not present, an explicit padding is needed. 101 */ 102 103 struct rb_node tree_node; /* RB tree node */ 104 unsigned long usage_in_excess;/* Set to the value by which */ 105 /* the soft limit is exceeded*/ 106 bool on_tree; 107#else 108 CACHELINE_PADDING(_pad1_); 109#endif 110 111 /* Fields which get updated often at the end. */ 112 struct lruvec lruvec; 113 CACHELINE_PADDING(_pad2_); 114 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 115 struct mem_cgroup_reclaim_iter iter; 116}; 117 118struct mem_cgroup_threshold { 119 struct eventfd_ctx *eventfd; 120 unsigned long threshold; 121}; 122 123/* For threshold */ 124struct mem_cgroup_threshold_ary { 125 /* An array index points to threshold just below or equal to usage. */ 126 int current_threshold; 127 /* Size of entries[] */ 128 unsigned int size; 129 /* Array of thresholds */ 130 struct mem_cgroup_threshold entries[] __counted_by(size); 131}; 132 133struct mem_cgroup_thresholds { 134 /* Primary thresholds array */ 135 struct mem_cgroup_threshold_ary *primary; 136 /* 137 * Spare threshold array. 138 * This is needed to make mem_cgroup_unregister_event() "never fail". 139 * It must be able to store at least primary->size - 1 entries. 140 */ 141 struct mem_cgroup_threshold_ary *spare; 142}; 143 144/* 145 * Remember four most recent foreign writebacks with dirty pages in this 146 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 147 * one in a given round, we're likely to catch it later if it keeps 148 * foreign-dirtying, so a fairly low count should be enough. 149 * 150 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 151 */ 152#define MEMCG_CGWB_FRN_CNT 4 153 154struct memcg_cgwb_frn { 155 u64 bdi_id; /* bdi->id of the foreign inode */ 156 int memcg_id; /* memcg->css.id of foreign inode */ 157 u64 at; /* jiffies_64 at the time of dirtying */ 158 struct wb_completion done; /* tracks in-flight foreign writebacks */ 159}; 160 161/* 162 * Bucket for arbitrarily byte-sized objects charged to a memory 163 * cgroup. The bucket can be reparented in one piece when the cgroup 164 * is destroyed, without having to round up the individual references 165 * of all live memory objects in the wild. 166 */ 167struct obj_cgroup { 168 struct percpu_ref refcnt; 169 struct mem_cgroup *memcg; 170 atomic_t nr_charged_bytes; 171 union { 172 struct list_head list; /* protected by objcg_lock */ 173 struct rcu_head rcu; 174 }; 175}; 176 177/* 178 * The memory controller data structure. The memory controller controls both 179 * page cache and RSS per cgroup. We would eventually like to provide 180 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 181 * to help the administrator determine what knobs to tune. 182 */ 183struct mem_cgroup { 184 struct cgroup_subsys_state css; 185 186 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 187 struct mem_cgroup_id id; 188 189 /* Accounted resources */ 190 struct page_counter memory; /* Both v1 & v2 */ 191 192 union { 193 struct page_counter swap; /* v2 only */ 194 struct page_counter memsw; /* v1 only */ 195 }; 196 197 /* registered local peak watchers */ 198 struct list_head memory_peaks; 199 struct list_head swap_peaks; 200 spinlock_t peaks_lock; 201 202 /* Range enforcement for interrupt charges */ 203 struct work_struct high_work; 204 205#ifdef CONFIG_ZSWAP 206 unsigned long zswap_max; 207 208 /* 209 * Prevent pages from this memcg from being written back from zswap to 210 * swap, and from being swapped out on zswap store failures. 211 */ 212 bool zswap_writeback; 213#endif 214 215 /* vmpressure notifications */ 216 struct vmpressure vmpressure; 217 218 /* 219 * Should the OOM killer kill all belonging tasks, had it kill one? 220 */ 221 bool oom_group; 222 223 int swappiness; 224 225 /* memory.events and memory.events.local */ 226 struct cgroup_file events_file; 227 struct cgroup_file events_local_file; 228 229 /* handle for "memory.swap.events" */ 230 struct cgroup_file swap_events_file; 231 232 /* memory.stat */ 233 struct memcg_vmstats *vmstats; 234 235 /* memory.events */ 236 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 237 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 238 239 /* 240 * Hint of reclaim pressure for socket memroy management. Note 241 * that this indicator should NOT be used in legacy cgroup mode 242 * where socket memory is accounted/charged separately. 243 */ 244 unsigned long socket_pressure; 245 246 int kmemcg_id; 247 /* 248 * memcg->objcg is wiped out as a part of the objcg repaprenting 249 * process. memcg->orig_objcg preserves a pointer (and a reference) 250 * to the original objcg until the end of live of memcg. 251 */ 252 struct obj_cgroup __rcu *objcg; 253 struct obj_cgroup *orig_objcg; 254 /* list of inherited objcgs, protected by objcg_lock */ 255 struct list_head objcg_list; 256 257 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 258 259#ifdef CONFIG_CGROUP_WRITEBACK 260 struct list_head cgwb_list; 261 struct wb_domain cgwb_domain; 262 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 263#endif 264 265#ifdef CONFIG_TRANSPARENT_HUGEPAGE 266 struct deferred_split deferred_split_queue; 267#endif 268 269#ifdef CONFIG_LRU_GEN_WALKS_MMU 270 /* per-memcg mm_struct list */ 271 struct lru_gen_mm_list mm_list; 272#endif 273 274#ifdef CONFIG_MEMCG_V1 275 /* Legacy consumer-oriented counters */ 276 struct page_counter kmem; /* v1 only */ 277 struct page_counter tcpmem; /* v1 only */ 278 279 struct memcg1_events_percpu __percpu *events_percpu; 280 281 unsigned long soft_limit; 282 283 /* protected by memcg_oom_lock */ 284 bool oom_lock; 285 int under_oom; 286 287 /* OOM-Killer disable */ 288 int oom_kill_disable; 289 290 /* protect arrays of thresholds */ 291 struct mutex thresholds_lock; 292 293 /* thresholds for memory usage. RCU-protected */ 294 struct mem_cgroup_thresholds thresholds; 295 296 /* thresholds for mem+swap usage. RCU-protected */ 297 struct mem_cgroup_thresholds memsw_thresholds; 298 299 /* For oom notifier event fd */ 300 struct list_head oom_notify; 301 302 /* Legacy tcp memory accounting */ 303 bool tcpmem_active; 304 int tcpmem_pressure; 305 306 /* List of events which userspace want to receive */ 307 struct list_head event_list; 308 spinlock_t event_list_lock; 309#endif /* CONFIG_MEMCG_V1 */ 310 311 struct mem_cgroup_per_node *nodeinfo[]; 312}; 313 314/* 315 * size of first charge trial. 316 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 317 * workload. 318 */ 319#define MEMCG_CHARGE_BATCH 64U 320 321extern struct mem_cgroup *root_mem_cgroup; 322 323enum page_memcg_data_flags { 324 /* page->memcg_data is a pointer to an slabobj_ext vector */ 325 MEMCG_DATA_OBJEXTS = (1UL << 0), 326 /* page has been accounted as a non-slab kernel page */ 327 MEMCG_DATA_KMEM = (1UL << 1), 328 /* the next bit after the last actual flag */ 329 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 330}; 331 332#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS 333 334#else /* CONFIG_MEMCG */ 335 336#define __FIRST_OBJEXT_FLAG (1UL << 0) 337 338#endif /* CONFIG_MEMCG */ 339 340enum objext_flags { 341 /* slabobj_ext vector failed to allocate */ 342 OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG, 343 /* the next bit after the last actual flag */ 344 __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), 345}; 346 347#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) 348 349#ifdef CONFIG_MEMCG 350 351static inline bool folio_memcg_kmem(struct folio *folio); 352 353/* 354 * After the initialization objcg->memcg is always pointing at 355 * a valid memcg, but can be atomically swapped to the parent memcg. 356 * 357 * The caller must ensure that the returned memcg won't be released. 358 */ 359static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 360{ 361 lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex)); 362 return READ_ONCE(objcg->memcg); 363} 364 365/* 366 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 367 * @folio: Pointer to the folio. 368 * 369 * Returns a pointer to the memory cgroup associated with the folio, 370 * or NULL. This function assumes that the folio is known to have a 371 * proper memory cgroup pointer. It's not safe to call this function 372 * against some type of folios, e.g. slab folios or ex-slab folios or 373 * kmem folios. 374 */ 375static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 376{ 377 unsigned long memcg_data = folio->memcg_data; 378 379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 380 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 382 383 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 384} 385 386/* 387 * __folio_objcg - get the object cgroup associated with a kmem folio. 388 * @folio: Pointer to the folio. 389 * 390 * Returns a pointer to the object cgroup associated with the folio, 391 * or NULL. This function assumes that the folio is known to have a 392 * proper object cgroup pointer. It's not safe to call this function 393 * against some type of folios, e.g. slab folios or ex-slab folios or 394 * LRU folios. 395 */ 396static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 397{ 398 unsigned long memcg_data = folio->memcg_data; 399 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 402 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 403 404 return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 405} 406 407/* 408 * folio_memcg - Get the memory cgroup associated with a folio. 409 * @folio: Pointer to the folio. 410 * 411 * Returns a pointer to the memory cgroup associated with the folio, 412 * or NULL. This function assumes that the folio is known to have a 413 * proper memory cgroup pointer. It's not safe to call this function 414 * against some type of folios, e.g. slab folios or ex-slab folios. 415 * 416 * For a non-kmem folio any of the following ensures folio and memcg binding 417 * stability: 418 * 419 * - the folio lock 420 * - LRU isolation 421 * - exclusive reference 422 * 423 * For a kmem folio a caller should hold an rcu read lock to protect memcg 424 * associated with a kmem folio from being released. 425 */ 426static inline struct mem_cgroup *folio_memcg(struct folio *folio) 427{ 428 if (folio_memcg_kmem(folio)) 429 return obj_cgroup_memcg(__folio_objcg(folio)); 430 return __folio_memcg(folio); 431} 432 433/* 434 * folio_memcg_charged - If a folio is charged to a memory cgroup. 435 * @folio: Pointer to the folio. 436 * 437 * Returns true if folio is charged to a memory cgroup, otherwise returns false. 438 */ 439static inline bool folio_memcg_charged(struct folio *folio) 440{ 441 if (folio_memcg_kmem(folio)) 442 return __folio_objcg(folio) != NULL; 443 return __folio_memcg(folio) != NULL; 444} 445 446/* 447 * folio_memcg_check - Get the memory cgroup associated with a folio. 448 * @folio: Pointer to the folio. 449 * 450 * Returns a pointer to the memory cgroup associated with the folio, 451 * or NULL. This function unlike folio_memcg() can take any folio 452 * as an argument. It has to be used in cases when it's not known if a folio 453 * has an associated memory cgroup pointer or an object cgroups vector or 454 * an object cgroup. 455 * 456 * For a non-kmem folio any of the following ensures folio and memcg binding 457 * stability: 458 * 459 * - the folio lock 460 * - LRU isolation 461 * - exclusive reference 462 * 463 * For a kmem folio a caller should hold an rcu read lock to protect memcg 464 * associated with a kmem folio from being released. 465 */ 466static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 467{ 468 /* 469 * Because folio->memcg_data might be changed asynchronously 470 * for slabs, READ_ONCE() should be used here. 471 */ 472 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 473 474 if (memcg_data & MEMCG_DATA_OBJEXTS) 475 return NULL; 476 477 if (memcg_data & MEMCG_DATA_KMEM) { 478 struct obj_cgroup *objcg; 479 480 objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 481 return obj_cgroup_memcg(objcg); 482 } 483 484 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 485} 486 487static inline struct mem_cgroup *page_memcg_check(struct page *page) 488{ 489 if (PageTail(page)) 490 return NULL; 491 return folio_memcg_check((struct folio *)page); 492} 493 494static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 495{ 496 struct mem_cgroup *memcg; 497 498 rcu_read_lock(); 499retry: 500 memcg = obj_cgroup_memcg(objcg); 501 if (unlikely(!css_tryget(&memcg->css))) 502 goto retry; 503 rcu_read_unlock(); 504 505 return memcg; 506} 507 508/* 509 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 510 * @folio: Pointer to the folio. 511 * 512 * Checks if the folio has MemcgKmem flag set. The caller must ensure 513 * that the folio has an associated memory cgroup. It's not safe to call 514 * this function against some types of folios, e.g. slab folios. 515 */ 516static inline bool folio_memcg_kmem(struct folio *folio) 517{ 518 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 519 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); 520 return folio->memcg_data & MEMCG_DATA_KMEM; 521} 522 523static inline bool PageMemcgKmem(struct page *page) 524{ 525 return folio_memcg_kmem(page_folio(page)); 526} 527 528static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 529{ 530 return (memcg == root_mem_cgroup); 531} 532 533static inline bool mem_cgroup_disabled(void) 534{ 535 return !cgroup_subsys_enabled(memory_cgrp_subsys); 536} 537 538static inline void mem_cgroup_protection(struct mem_cgroup *root, 539 struct mem_cgroup *memcg, 540 unsigned long *min, 541 unsigned long *low) 542{ 543 *min = *low = 0; 544 545 if (mem_cgroup_disabled()) 546 return; 547 548 /* 549 * There is no reclaim protection applied to a targeted reclaim. 550 * We are special casing this specific case here because 551 * mem_cgroup_calculate_protection is not robust enough to keep 552 * the protection invariant for calculated effective values for 553 * parallel reclaimers with different reclaim target. This is 554 * especially a problem for tail memcgs (as they have pages on LRU) 555 * which would want to have effective values 0 for targeted reclaim 556 * but a different value for external reclaim. 557 * 558 * Example 559 * Let's have global and A's reclaim in parallel: 560 * | 561 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 562 * |\ 563 * | C (low = 1G, usage = 2.5G) 564 * B (low = 1G, usage = 0.5G) 565 * 566 * For the global reclaim 567 * A.elow = A.low 568 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 569 * C.elow = min(C.usage, C.low) 570 * 571 * With the effective values resetting we have A reclaim 572 * A.elow = 0 573 * B.elow = B.low 574 * C.elow = C.low 575 * 576 * If the global reclaim races with A's reclaim then 577 * B.elow = C.elow = 0 because children_low_usage > A.elow) 578 * is possible and reclaiming B would be violating the protection. 579 * 580 */ 581 if (root == memcg) 582 return; 583 584 *min = READ_ONCE(memcg->memory.emin); 585 *low = READ_ONCE(memcg->memory.elow); 586} 587 588void mem_cgroup_calculate_protection(struct mem_cgroup *root, 589 struct mem_cgroup *memcg); 590 591static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 592 struct mem_cgroup *memcg) 593{ 594 /* 595 * The root memcg doesn't account charges, and doesn't support 596 * protection. The target memcg's protection is ignored, see 597 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 598 */ 599 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 600 memcg == target; 601} 602 603static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 604 struct mem_cgroup *memcg) 605{ 606 if (mem_cgroup_unprotected(target, memcg)) 607 return false; 608 609 return READ_ONCE(memcg->memory.elow) >= 610 page_counter_read(&memcg->memory); 611} 612 613static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 614 struct mem_cgroup *memcg) 615{ 616 if (mem_cgroup_unprotected(target, memcg)) 617 return false; 618 619 return READ_ONCE(memcg->memory.emin) >= 620 page_counter_read(&memcg->memory); 621} 622 623void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg); 624 625int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 626 627/** 628 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 629 * @folio: Folio to charge. 630 * @mm: mm context of the allocating task. 631 * @gfp: Reclaim mode. 632 * 633 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 634 * pages according to @gfp if necessary. If @mm is NULL, try to 635 * charge to the active memcg. 636 * 637 * Do not use this for folios allocated for swapin. 638 * 639 * Return: 0 on success. Otherwise, an error code is returned. 640 */ 641static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 642 gfp_t gfp) 643{ 644 if (mem_cgroup_disabled()) 645 return 0; 646 return __mem_cgroup_charge(folio, mm, gfp); 647} 648 649int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, 650 long nr_pages); 651 652int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 653 gfp_t gfp, swp_entry_t entry); 654 655void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); 656 657void __mem_cgroup_uncharge(struct folio *folio); 658 659/** 660 * mem_cgroup_uncharge - Uncharge a folio. 661 * @folio: Folio to uncharge. 662 * 663 * Uncharge a folio previously charged with mem_cgroup_charge(). 664 */ 665static inline void mem_cgroup_uncharge(struct folio *folio) 666{ 667 if (mem_cgroup_disabled()) 668 return; 669 __mem_cgroup_uncharge(folio); 670} 671 672void __mem_cgroup_uncharge_folios(struct folio_batch *folios); 673static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 674{ 675 if (mem_cgroup_disabled()) 676 return; 677 __mem_cgroup_uncharge_folios(folios); 678} 679 680void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages); 681void mem_cgroup_replace_folio(struct folio *old, struct folio *new); 682void mem_cgroup_migrate(struct folio *old, struct folio *new); 683 684/** 685 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 686 * @memcg: memcg of the wanted lruvec 687 * @pgdat: pglist_data 688 * 689 * Returns the lru list vector holding pages for a given @memcg & 690 * @pgdat combination. This can be the node lruvec, if the memory 691 * controller is disabled. 692 */ 693static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 694 struct pglist_data *pgdat) 695{ 696 struct mem_cgroup_per_node *mz; 697 struct lruvec *lruvec; 698 699 if (mem_cgroup_disabled()) { 700 lruvec = &pgdat->__lruvec; 701 goto out; 702 } 703 704 if (!memcg) 705 memcg = root_mem_cgroup; 706 707 mz = memcg->nodeinfo[pgdat->node_id]; 708 lruvec = &mz->lruvec; 709out: 710 /* 711 * Since a node can be onlined after the mem_cgroup was created, 712 * we have to be prepared to initialize lruvec->pgdat here; 713 * and if offlined then reonlined, we need to reinitialize it. 714 */ 715 if (unlikely(lruvec->pgdat != pgdat)) 716 lruvec->pgdat = pgdat; 717 return lruvec; 718} 719 720/** 721 * folio_lruvec - return lruvec for isolating/putting an LRU folio 722 * @folio: Pointer to the folio. 723 * 724 * This function relies on folio->mem_cgroup being stable. 725 */ 726static inline struct lruvec *folio_lruvec(struct folio *folio) 727{ 728 struct mem_cgroup *memcg = folio_memcg(folio); 729 730 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 731 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 732} 733 734struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 735 736struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 737 738struct mem_cgroup *get_mem_cgroup_from_current(void); 739 740struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio); 741 742struct lruvec *folio_lruvec_lock(struct folio *folio); 743struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 744struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 745 unsigned long *flags); 746 747#ifdef CONFIG_DEBUG_VM 748void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 749#else 750static inline 751void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 752{ 753} 754#endif 755 756static inline 757struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 758 return css ? container_of(css, struct mem_cgroup, css) : NULL; 759} 760 761static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 762{ 763 return percpu_ref_tryget(&objcg->refcnt); 764} 765 766static inline void obj_cgroup_get(struct obj_cgroup *objcg) 767{ 768 percpu_ref_get(&objcg->refcnt); 769} 770 771static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 772 unsigned long nr) 773{ 774 percpu_ref_get_many(&objcg->refcnt, nr); 775} 776 777static inline void obj_cgroup_put(struct obj_cgroup *objcg) 778{ 779 if (objcg) 780 percpu_ref_put(&objcg->refcnt); 781} 782 783static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 784{ 785 return !memcg || css_tryget(&memcg->css); 786} 787 788static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 789{ 790 return !memcg || css_tryget_online(&memcg->css); 791} 792 793static inline void mem_cgroup_put(struct mem_cgroup *memcg) 794{ 795 if (memcg) 796 css_put(&memcg->css); 797} 798 799#define mem_cgroup_from_counter(counter, member) \ 800 container_of(counter, struct mem_cgroup, member) 801 802struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 803 struct mem_cgroup *, 804 struct mem_cgroup_reclaim_cookie *); 805void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 806void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 807 int (*)(struct task_struct *, void *), void *arg); 808 809static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 810{ 811 if (mem_cgroup_disabled()) 812 return 0; 813 814 return memcg->id.id; 815} 816struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 817 818#ifdef CONFIG_SHRINKER_DEBUG 819static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 820{ 821 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 822} 823 824struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 825#endif 826 827static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 828{ 829 return mem_cgroup_from_css(seq_css(m)); 830} 831 832static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 833{ 834 struct mem_cgroup_per_node *mz; 835 836 if (mem_cgroup_disabled()) 837 return NULL; 838 839 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 840 return mz->memcg; 841} 842 843/** 844 * parent_mem_cgroup - find the accounting parent of a memcg 845 * @memcg: memcg whose parent to find 846 * 847 * Returns the parent memcg, or NULL if this is the root. 848 */ 849static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 850{ 851 return mem_cgroup_from_css(memcg->css.parent); 852} 853 854static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 855 struct mem_cgroup *root) 856{ 857 if (root == memcg) 858 return true; 859 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 860} 861 862static inline bool mm_match_cgroup(struct mm_struct *mm, 863 struct mem_cgroup *memcg) 864{ 865 struct mem_cgroup *task_memcg; 866 bool match = false; 867 868 rcu_read_lock(); 869 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 870 if (task_memcg) 871 match = mem_cgroup_is_descendant(task_memcg, memcg); 872 rcu_read_unlock(); 873 return match; 874} 875 876struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 877ino_t page_cgroup_ino(struct page *page); 878 879static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 880{ 881 if (mem_cgroup_disabled()) 882 return true; 883 return !!(memcg->css.flags & CSS_ONLINE); 884} 885 886void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 887 int zid, int nr_pages); 888 889static inline 890unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 891 enum lru_list lru, int zone_idx) 892{ 893 struct mem_cgroup_per_node *mz; 894 895 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 896 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 897} 898 899void mem_cgroup_handle_over_high(gfp_t gfp_mask); 900 901unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 902 903unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 904 905void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 906 struct task_struct *p); 907 908void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 909 910struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 911 struct mem_cgroup *oom_domain); 912void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 913 914void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, 915 int val); 916 917/* idx can be of type enum memcg_stat_item or node_stat_item */ 918static inline void mod_memcg_state(struct mem_cgroup *memcg, 919 enum memcg_stat_item idx, int val) 920{ 921 unsigned long flags; 922 923 local_irq_save(flags); 924 __mod_memcg_state(memcg, idx, val); 925 local_irq_restore(flags); 926} 927 928static inline void mod_memcg_page_state(struct page *page, 929 enum memcg_stat_item idx, int val) 930{ 931 struct mem_cgroup *memcg; 932 933 if (mem_cgroup_disabled()) 934 return; 935 936 rcu_read_lock(); 937 memcg = folio_memcg(page_folio(page)); 938 if (memcg) 939 mod_memcg_state(memcg, idx, val); 940 rcu_read_unlock(); 941} 942 943unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 944unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx); 945unsigned long lruvec_page_state_local(struct lruvec *lruvec, 946 enum node_stat_item idx); 947 948void mem_cgroup_flush_stats(struct mem_cgroup *memcg); 949void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); 950 951void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 952 953static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 954 int val) 955{ 956 unsigned long flags; 957 958 local_irq_save(flags); 959 __mod_lruvec_kmem_state(p, idx, val); 960 local_irq_restore(flags); 961} 962 963void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 964 unsigned long count); 965 966static inline void count_memcg_events(struct mem_cgroup *memcg, 967 enum vm_event_item idx, 968 unsigned long count) 969{ 970 unsigned long flags; 971 972 local_irq_save(flags); 973 __count_memcg_events(memcg, idx, count); 974 local_irq_restore(flags); 975} 976 977static inline void count_memcg_folio_events(struct folio *folio, 978 enum vm_event_item idx, unsigned long nr) 979{ 980 struct mem_cgroup *memcg = folio_memcg(folio); 981 982 if (memcg) 983 count_memcg_events(memcg, idx, nr); 984} 985 986static inline void count_memcg_events_mm(struct mm_struct *mm, 987 enum vm_event_item idx, unsigned long count) 988{ 989 struct mem_cgroup *memcg; 990 991 if (mem_cgroup_disabled()) 992 return; 993 994 rcu_read_lock(); 995 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 996 if (likely(memcg)) 997 count_memcg_events(memcg, idx, count); 998 rcu_read_unlock(); 999} 1000 1001static inline void count_memcg_event_mm(struct mm_struct *mm, 1002 enum vm_event_item idx) 1003{ 1004 count_memcg_events_mm(mm, idx, 1); 1005} 1006 1007static inline void memcg_memory_event(struct mem_cgroup *memcg, 1008 enum memcg_memory_event event) 1009{ 1010 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX || 1011 event == MEMCG_SWAP_FAIL; 1012 1013 atomic_long_inc(&memcg->memory_events_local[event]); 1014 if (!swap_event) 1015 cgroup_file_notify(&memcg->events_local_file); 1016 1017 do { 1018 atomic_long_inc(&memcg->memory_events[event]); 1019 if (swap_event) 1020 cgroup_file_notify(&memcg->swap_events_file); 1021 else 1022 cgroup_file_notify(&memcg->events_file); 1023 1024 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1025 break; 1026 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1027 break; 1028 } while ((memcg = parent_mem_cgroup(memcg)) && 1029 !mem_cgroup_is_root(memcg)); 1030} 1031 1032static inline void memcg_memory_event_mm(struct mm_struct *mm, 1033 enum memcg_memory_event event) 1034{ 1035 struct mem_cgroup *memcg; 1036 1037 if (mem_cgroup_disabled()) 1038 return; 1039 1040 rcu_read_lock(); 1041 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1042 if (likely(memcg)) 1043 memcg_memory_event(memcg, event); 1044 rcu_read_unlock(); 1045} 1046 1047void split_page_memcg(struct page *head, int old_order, int new_order); 1048 1049#else /* CONFIG_MEMCG */ 1050 1051#define MEM_CGROUP_ID_SHIFT 0 1052 1053static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1054{ 1055 return NULL; 1056} 1057 1058static inline bool folio_memcg_charged(struct folio *folio) 1059{ 1060 return false; 1061} 1062 1063static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1064{ 1065 return NULL; 1066} 1067 1068static inline struct mem_cgroup *page_memcg_check(struct page *page) 1069{ 1070 return NULL; 1071} 1072 1073static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 1074{ 1075 return NULL; 1076} 1077 1078static inline bool folio_memcg_kmem(struct folio *folio) 1079{ 1080 return false; 1081} 1082 1083static inline bool PageMemcgKmem(struct page *page) 1084{ 1085 return false; 1086} 1087 1088static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1089{ 1090 return true; 1091} 1092 1093static inline bool mem_cgroup_disabled(void) 1094{ 1095 return true; 1096} 1097 1098static inline void memcg_memory_event(struct mem_cgroup *memcg, 1099 enum memcg_memory_event event) 1100{ 1101} 1102 1103static inline void memcg_memory_event_mm(struct mm_struct *mm, 1104 enum memcg_memory_event event) 1105{ 1106} 1107 1108static inline void mem_cgroup_protection(struct mem_cgroup *root, 1109 struct mem_cgroup *memcg, 1110 unsigned long *min, 1111 unsigned long *low) 1112{ 1113 *min = *low = 0; 1114} 1115 1116static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1117 struct mem_cgroup *memcg) 1118{ 1119} 1120 1121static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1122 struct mem_cgroup *memcg) 1123{ 1124 return true; 1125} 1126static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1127 struct mem_cgroup *memcg) 1128{ 1129 return false; 1130} 1131 1132static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1133 struct mem_cgroup *memcg) 1134{ 1135 return false; 1136} 1137 1138static inline void mem_cgroup_commit_charge(struct folio *folio, 1139 struct mem_cgroup *memcg) 1140{ 1141} 1142 1143static inline int mem_cgroup_charge(struct folio *folio, 1144 struct mm_struct *mm, gfp_t gfp) 1145{ 1146 return 0; 1147} 1148 1149static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, 1150 gfp_t gfp, long nr_pages) 1151{ 1152 return 0; 1153} 1154 1155static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1156 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1157{ 1158 return 0; 1159} 1160 1161static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr) 1162{ 1163} 1164 1165static inline void mem_cgroup_uncharge(struct folio *folio) 1166{ 1167} 1168 1169static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 1170{ 1171} 1172 1173static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, 1174 unsigned int nr_pages) 1175{ 1176} 1177 1178static inline void mem_cgroup_replace_folio(struct folio *old, 1179 struct folio *new) 1180{ 1181} 1182 1183static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1184{ 1185} 1186 1187static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1188 struct pglist_data *pgdat) 1189{ 1190 return &pgdat->__lruvec; 1191} 1192 1193static inline struct lruvec *folio_lruvec(struct folio *folio) 1194{ 1195 struct pglist_data *pgdat = folio_pgdat(folio); 1196 return &pgdat->__lruvec; 1197} 1198 1199static inline 1200void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1201{ 1202} 1203 1204static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1205{ 1206 return NULL; 1207} 1208 1209static inline bool mm_match_cgroup(struct mm_struct *mm, 1210 struct mem_cgroup *memcg) 1211{ 1212 return true; 1213} 1214 1215static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1216{ 1217 return NULL; 1218} 1219 1220static inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1221{ 1222 return NULL; 1223} 1224 1225static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 1226{ 1227 return NULL; 1228} 1229 1230static inline 1231struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1232{ 1233 return NULL; 1234} 1235 1236static inline void obj_cgroup_get(struct obj_cgroup *objcg) 1237{ 1238} 1239 1240static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1241{ 1242} 1243 1244static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1245{ 1246 return true; 1247} 1248 1249static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 1250{ 1251 return true; 1252} 1253 1254static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1255{ 1256} 1257 1258static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1259{ 1260 struct pglist_data *pgdat = folio_pgdat(folio); 1261 1262 spin_lock(&pgdat->__lruvec.lru_lock); 1263 return &pgdat->__lruvec; 1264} 1265 1266static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1267{ 1268 struct pglist_data *pgdat = folio_pgdat(folio); 1269 1270 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1271 return &pgdat->__lruvec; 1272} 1273 1274static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1275 unsigned long *flagsp) 1276{ 1277 struct pglist_data *pgdat = folio_pgdat(folio); 1278 1279 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1280 return &pgdat->__lruvec; 1281} 1282 1283static inline struct mem_cgroup * 1284mem_cgroup_iter(struct mem_cgroup *root, 1285 struct mem_cgroup *prev, 1286 struct mem_cgroup_reclaim_cookie *reclaim) 1287{ 1288 return NULL; 1289} 1290 1291static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1292 struct mem_cgroup *prev) 1293{ 1294} 1295 1296static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1297 int (*fn)(struct task_struct *, void *), void *arg) 1298{ 1299} 1300 1301static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1302{ 1303 return 0; 1304} 1305 1306static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1307{ 1308 WARN_ON_ONCE(id); 1309 /* XXX: This should always return root_mem_cgroup */ 1310 return NULL; 1311} 1312 1313#ifdef CONFIG_SHRINKER_DEBUG 1314static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1315{ 1316 return 0; 1317} 1318 1319static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1320{ 1321 return NULL; 1322} 1323#endif 1324 1325static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1326{ 1327 return NULL; 1328} 1329 1330static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1331{ 1332 return NULL; 1333} 1334 1335static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1336{ 1337 return true; 1338} 1339 1340static inline 1341unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1342 enum lru_list lru, int zone_idx) 1343{ 1344 return 0; 1345} 1346 1347static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1348{ 1349 return 0; 1350} 1351 1352static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1353{ 1354 return 0; 1355} 1356 1357static inline void 1358mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1359{ 1360} 1361 1362static inline void 1363mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1364{ 1365} 1366 1367static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 1368{ 1369} 1370 1371static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1372 struct task_struct *victim, struct mem_cgroup *oom_domain) 1373{ 1374 return NULL; 1375} 1376 1377static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1378{ 1379} 1380 1381static inline void __mod_memcg_state(struct mem_cgroup *memcg, 1382 enum memcg_stat_item idx, 1383 int nr) 1384{ 1385} 1386 1387static inline void mod_memcg_state(struct mem_cgroup *memcg, 1388 enum memcg_stat_item idx, 1389 int nr) 1390{ 1391} 1392 1393static inline void mod_memcg_page_state(struct page *page, 1394 enum memcg_stat_item idx, int val) 1395{ 1396} 1397 1398static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1399{ 1400 return 0; 1401} 1402 1403static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1404 enum node_stat_item idx) 1405{ 1406 return node_page_state(lruvec_pgdat(lruvec), idx); 1407} 1408 1409static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1410 enum node_stat_item idx) 1411{ 1412 return node_page_state(lruvec_pgdat(lruvec), idx); 1413} 1414 1415static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 1416{ 1417} 1418 1419static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 1420{ 1421} 1422 1423static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1424 int val) 1425{ 1426 struct page *page = virt_to_head_page(p); 1427 1428 __mod_node_page_state(page_pgdat(page), idx, val); 1429} 1430 1431static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1432 int val) 1433{ 1434 struct page *page = virt_to_head_page(p); 1435 1436 mod_node_page_state(page_pgdat(page), idx, val); 1437} 1438 1439static inline void count_memcg_events(struct mem_cgroup *memcg, 1440 enum vm_event_item idx, 1441 unsigned long count) 1442{ 1443} 1444 1445static inline void __count_memcg_events(struct mem_cgroup *memcg, 1446 enum vm_event_item idx, 1447 unsigned long count) 1448{ 1449} 1450 1451static inline void count_memcg_folio_events(struct folio *folio, 1452 enum vm_event_item idx, unsigned long nr) 1453{ 1454} 1455 1456static inline void count_memcg_events_mm(struct mm_struct *mm, 1457 enum vm_event_item idx, unsigned long count) 1458{ 1459} 1460 1461static inline 1462void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1463{ 1464} 1465 1466static inline void split_page_memcg(struct page *head, int old_order, int new_order) 1467{ 1468} 1469#endif /* CONFIG_MEMCG */ 1470 1471/* 1472 * Extended information for slab objects stored as an array in page->memcg_data 1473 * if MEMCG_DATA_OBJEXTS is set. 1474 */ 1475struct slabobj_ext { 1476#ifdef CONFIG_MEMCG 1477 struct obj_cgroup *objcg; 1478#endif 1479#ifdef CONFIG_MEM_ALLOC_PROFILING 1480 union codetag_ref ref; 1481#endif 1482} __aligned(8); 1483 1484static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) 1485{ 1486 __mod_lruvec_kmem_state(p, idx, 1); 1487} 1488 1489static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx) 1490{ 1491 __mod_lruvec_kmem_state(p, idx, -1); 1492} 1493 1494static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1495{ 1496 struct mem_cgroup *memcg; 1497 1498 memcg = lruvec_memcg(lruvec); 1499 if (!memcg) 1500 return NULL; 1501 memcg = parent_mem_cgroup(memcg); 1502 if (!memcg) 1503 return NULL; 1504 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1505} 1506 1507static inline void unlock_page_lruvec(struct lruvec *lruvec) 1508{ 1509 spin_unlock(&lruvec->lru_lock); 1510} 1511 1512static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1513{ 1514 spin_unlock_irq(&lruvec->lru_lock); 1515} 1516 1517static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1518 unsigned long flags) 1519{ 1520 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1521} 1522 1523/* Test requires a stable folio->memcg binding, see folio_memcg() */ 1524static inline bool folio_matches_lruvec(struct folio *folio, 1525 struct lruvec *lruvec) 1526{ 1527 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1528 lruvec_memcg(lruvec) == folio_memcg(folio); 1529} 1530 1531/* Don't lock again iff page's lruvec locked */ 1532static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1533 struct lruvec *locked_lruvec) 1534{ 1535 if (locked_lruvec) { 1536 if (folio_matches_lruvec(folio, locked_lruvec)) 1537 return locked_lruvec; 1538 1539 unlock_page_lruvec_irq(locked_lruvec); 1540 } 1541 1542 return folio_lruvec_lock_irq(folio); 1543} 1544 1545/* Don't lock again iff folio's lruvec locked */ 1546static inline void folio_lruvec_relock_irqsave(struct folio *folio, 1547 struct lruvec **lruvecp, unsigned long *flags) 1548{ 1549 if (*lruvecp) { 1550 if (folio_matches_lruvec(folio, *lruvecp)) 1551 return; 1552 1553 unlock_page_lruvec_irqrestore(*lruvecp, *flags); 1554 } 1555 1556 *lruvecp = folio_lruvec_lock_irqsave(folio, flags); 1557} 1558 1559#ifdef CONFIG_CGROUP_WRITEBACK 1560 1561struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1562void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1563 unsigned long *pheadroom, unsigned long *pdirty, 1564 unsigned long *pwriteback); 1565 1566void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1567 struct bdi_writeback *wb); 1568 1569static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1570 struct bdi_writeback *wb) 1571{ 1572 struct mem_cgroup *memcg; 1573 1574 if (mem_cgroup_disabled()) 1575 return; 1576 1577 memcg = folio_memcg(folio); 1578 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1579 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1580} 1581 1582void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1583 1584#else /* CONFIG_CGROUP_WRITEBACK */ 1585 1586static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1587{ 1588 return NULL; 1589} 1590 1591static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1592 unsigned long *pfilepages, 1593 unsigned long *pheadroom, 1594 unsigned long *pdirty, 1595 unsigned long *pwriteback) 1596{ 1597} 1598 1599static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1600 struct bdi_writeback *wb) 1601{ 1602} 1603 1604static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1605{ 1606} 1607 1608#endif /* CONFIG_CGROUP_WRITEBACK */ 1609 1610struct sock; 1611bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 1612 gfp_t gfp_mask); 1613void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); 1614#ifdef CONFIG_MEMCG 1615extern struct static_key_false memcg_sockets_enabled_key; 1616#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1617void mem_cgroup_sk_alloc(struct sock *sk); 1618void mem_cgroup_sk_free(struct sock *sk); 1619static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1620{ 1621#ifdef CONFIG_MEMCG_V1 1622 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) 1623 return !!memcg->tcpmem_pressure; 1624#endif /* CONFIG_MEMCG_V1 */ 1625 do { 1626 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) 1627 return true; 1628 } while ((memcg = parent_mem_cgroup(memcg))); 1629 return false; 1630} 1631 1632int alloc_shrinker_info(struct mem_cgroup *memcg); 1633void free_shrinker_info(struct mem_cgroup *memcg); 1634void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1635void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1636#else 1637#define mem_cgroup_sockets_enabled 0 1638static inline void mem_cgroup_sk_alloc(struct sock *sk) { }; 1639static inline void mem_cgroup_sk_free(struct sock *sk) { }; 1640static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) 1641{ 1642 return false; 1643} 1644 1645static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1646 int nid, int shrinker_id) 1647{ 1648} 1649#endif 1650 1651#ifdef CONFIG_MEMCG 1652bool mem_cgroup_kmem_disabled(void); 1653int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1654void __memcg_kmem_uncharge_page(struct page *page, int order); 1655 1656/* 1657 * The returned objcg pointer is safe to use without additional 1658 * protection within a scope. The scope is defined either by 1659 * the current task (similar to the "current" global variable) 1660 * or by set_active_memcg() pair. 1661 * Please, use obj_cgroup_get() to get a reference if the pointer 1662 * needs to be used outside of the local scope. 1663 */ 1664struct obj_cgroup *current_obj_cgroup(void); 1665struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); 1666 1667static inline struct obj_cgroup *get_obj_cgroup_from_current(void) 1668{ 1669 struct obj_cgroup *objcg = current_obj_cgroup(); 1670 1671 if (objcg) 1672 obj_cgroup_get(objcg); 1673 1674 return objcg; 1675} 1676 1677int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1678void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1679 1680extern struct static_key_false memcg_bpf_enabled_key; 1681static inline bool memcg_bpf_enabled(void) 1682{ 1683 return static_branch_likely(&memcg_bpf_enabled_key); 1684} 1685 1686extern struct static_key_false memcg_kmem_online_key; 1687 1688static inline bool memcg_kmem_online(void) 1689{ 1690 return static_branch_likely(&memcg_kmem_online_key); 1691} 1692 1693static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1694 int order) 1695{ 1696 if (memcg_kmem_online()) 1697 return __memcg_kmem_charge_page(page, gfp, order); 1698 return 0; 1699} 1700 1701static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1702{ 1703 if (memcg_kmem_online()) 1704 __memcg_kmem_uncharge_page(page, order); 1705} 1706 1707/* 1708 * A helper for accessing memcg's kmem_id, used for getting 1709 * corresponding LRU lists. 1710 */ 1711static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1712{ 1713 return memcg ? memcg->kmemcg_id : -1; 1714} 1715 1716struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1717 1718static inline void count_objcg_events(struct obj_cgroup *objcg, 1719 enum vm_event_item idx, 1720 unsigned long count) 1721{ 1722 struct mem_cgroup *memcg; 1723 1724 if (!memcg_kmem_online()) 1725 return; 1726 1727 rcu_read_lock(); 1728 memcg = obj_cgroup_memcg(objcg); 1729 count_memcg_events(memcg, idx, count); 1730 rcu_read_unlock(); 1731} 1732 1733#else 1734static inline bool mem_cgroup_kmem_disabled(void) 1735{ 1736 return true; 1737} 1738 1739static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1740 int order) 1741{ 1742 return 0; 1743} 1744 1745static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1746{ 1747} 1748 1749static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1750 int order) 1751{ 1752 return 0; 1753} 1754 1755static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1756{ 1757} 1758 1759static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 1760{ 1761 return NULL; 1762} 1763 1764static inline bool memcg_bpf_enabled(void) 1765{ 1766 return false; 1767} 1768 1769static inline bool memcg_kmem_online(void) 1770{ 1771 return false; 1772} 1773 1774static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1775{ 1776 return -1; 1777} 1778 1779static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1780{ 1781 return NULL; 1782} 1783 1784static inline void count_objcg_events(struct obj_cgroup *objcg, 1785 enum vm_event_item idx, 1786 unsigned long count) 1787{ 1788} 1789 1790#endif /* CONFIG_MEMCG */ 1791 1792#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) 1793bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1794void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1795void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1796bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg); 1797#else 1798static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1799{ 1800 return true; 1801} 1802static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1803 size_t size) 1804{ 1805} 1806static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1807 size_t size) 1808{ 1809} 1810static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 1811{ 1812 /* if zswap is disabled, do not block pages going to the swapping device */ 1813 return true; 1814} 1815#endif 1816 1817 1818/* Cgroup v1-related declarations */ 1819 1820#ifdef CONFIG_MEMCG_V1 1821unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1822 gfp_t gfp_mask, 1823 unsigned long *total_scanned); 1824 1825bool mem_cgroup_oom_synchronize(bool wait); 1826 1827static inline bool task_in_memcg_oom(struct task_struct *p) 1828{ 1829 return p->memcg_in_oom; 1830} 1831 1832static inline void mem_cgroup_enter_user_fault(void) 1833{ 1834 WARN_ON(current->in_user_fault); 1835 current->in_user_fault = 1; 1836} 1837 1838static inline void mem_cgroup_exit_user_fault(void) 1839{ 1840 WARN_ON(!current->in_user_fault); 1841 current->in_user_fault = 0; 1842} 1843 1844#else /* CONFIG_MEMCG_V1 */ 1845static inline 1846unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1847 gfp_t gfp_mask, 1848 unsigned long *total_scanned) 1849{ 1850 return 0; 1851} 1852 1853static inline bool task_in_memcg_oom(struct task_struct *p) 1854{ 1855 return false; 1856} 1857 1858static inline bool mem_cgroup_oom_synchronize(bool wait) 1859{ 1860 return false; 1861} 1862 1863static inline void mem_cgroup_enter_user_fault(void) 1864{ 1865} 1866 1867static inline void mem_cgroup_exit_user_fault(void) 1868{ 1869} 1870 1871#endif /* CONFIG_MEMCG_V1 */ 1872 1873#endif /* _LINUX_MEMCONTROL_H */