at master 48 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* memcontrol.h - Memory Controller 3 * 4 * Copyright IBM Corporation, 2007 5 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 6 * 7 * Copyright 2007 OpenVZ SWsoft Inc 8 * Author: Pavel Emelianov <xemul@openvz.org> 9 */ 10 11#ifndef _LINUX_MEMCONTROL_H 12#define _LINUX_MEMCONTROL_H 13#include <linux/cgroup.h> 14#include <linux/vm_event_item.h> 15#include <linux/hardirq.h> 16#include <linux/jump_label.h> 17#include <linux/kernel.h> 18#include <linux/page_counter.h> 19#include <linux/vmpressure.h> 20#include <linux/eventfd.h> 21#include <linux/mm.h> 22#include <linux/vmstat.h> 23#include <linux/writeback.h> 24#include <linux/page-flags.h> 25#include <linux/shrinker.h> 26 27struct mem_cgroup; 28struct obj_cgroup; 29struct page; 30struct mm_struct; 31struct kmem_cache; 32 33/* Cgroup-specific page state, on top of universal node page state */ 34enum memcg_stat_item { 35 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, 36 MEMCG_SOCK, 37 MEMCG_PERCPU_B, 38 MEMCG_VMALLOC, 39 MEMCG_KMEM, 40 MEMCG_ZSWAP_B, 41 MEMCG_ZSWAPPED, 42 MEMCG_NR_STAT, 43}; 44 45enum memcg_memory_event { 46 MEMCG_LOW, 47 MEMCG_HIGH, 48 MEMCG_MAX, 49 MEMCG_OOM, 50 MEMCG_OOM_KILL, 51 MEMCG_OOM_GROUP_KILL, 52 MEMCG_SWAP_HIGH, 53 MEMCG_SWAP_MAX, 54 MEMCG_SWAP_FAIL, 55 MEMCG_SOCK_THROTTLED, 56 MEMCG_NR_MEMORY_EVENTS, 57}; 58 59struct mem_cgroup_reclaim_cookie { 60 pg_data_t *pgdat; 61 int generation; 62}; 63 64#ifdef CONFIG_MEMCG 65 66#define MEM_CGROUP_ID_SHIFT 16 67 68struct mem_cgroup_id { 69 int id; 70 refcount_t ref; 71}; 72 73struct memcg_vmstats_percpu; 74struct memcg1_events_percpu; 75struct memcg_vmstats; 76struct lruvec_stats_percpu; 77struct lruvec_stats; 78 79struct mem_cgroup_reclaim_iter { 80 struct mem_cgroup *position; 81 /* scan generation, increased every round-trip */ 82 atomic_t generation; 83}; 84 85/* 86 * per-node information in memory controller. 87 */ 88struct mem_cgroup_per_node { 89 /* Keep the read-only fields at the start */ 90 struct mem_cgroup *memcg; /* Back pointer, we cannot */ 91 /* use container_of */ 92 93 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; 94 struct lruvec_stats *lruvec_stats; 95 struct shrinker_info __rcu *shrinker_info; 96 97#ifdef CONFIG_MEMCG_V1 98 /* 99 * Memcg-v1 only stuff in middle as buffer between read mostly fields 100 * and update often fields to avoid false sharing. If v1 stuff is 101 * not present, an explicit padding is needed. 102 */ 103 104 struct rb_node tree_node; /* RB tree node */ 105 unsigned long usage_in_excess;/* Set to the value by which */ 106 /* the soft limit is exceeded*/ 107 bool on_tree; 108#else 109 CACHELINE_PADDING(_pad1_); 110#endif 111 112 /* Fields which get updated often at the end. */ 113 struct lruvec lruvec; 114 CACHELINE_PADDING(_pad2_); 115 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; 116 struct mem_cgroup_reclaim_iter iter; 117 118#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 119 /* slab stats for nmi context */ 120 atomic_t slab_reclaimable; 121 atomic_t slab_unreclaimable; 122#endif 123}; 124 125struct mem_cgroup_threshold { 126 struct eventfd_ctx *eventfd; 127 unsigned long threshold; 128}; 129 130/* For threshold */ 131struct mem_cgroup_threshold_ary { 132 /* An array index points to threshold just below or equal to usage. */ 133 int current_threshold; 134 /* Size of entries[] */ 135 unsigned int size; 136 /* Array of thresholds */ 137 struct mem_cgroup_threshold entries[] __counted_by(size); 138}; 139 140struct mem_cgroup_thresholds { 141 /* Primary thresholds array */ 142 struct mem_cgroup_threshold_ary *primary; 143 /* 144 * Spare threshold array. 145 * This is needed to make mem_cgroup_unregister_event() "never fail". 146 * It must be able to store at least primary->size - 1 entries. 147 */ 148 struct mem_cgroup_threshold_ary *spare; 149}; 150 151/* 152 * Remember four most recent foreign writebacks with dirty pages in this 153 * cgroup. Inode sharing is expected to be uncommon and, even if we miss 154 * one in a given round, we're likely to catch it later if it keeps 155 * foreign-dirtying, so a fairly low count should be enough. 156 * 157 * See mem_cgroup_track_foreign_dirty_slowpath() for details. 158 */ 159#define MEMCG_CGWB_FRN_CNT 4 160 161struct memcg_cgwb_frn { 162 u64 bdi_id; /* bdi->id of the foreign inode */ 163 int memcg_id; /* memcg->css.id of foreign inode */ 164 u64 at; /* jiffies_64 at the time of dirtying */ 165 struct wb_completion done; /* tracks in-flight foreign writebacks */ 166}; 167 168/* 169 * Bucket for arbitrarily byte-sized objects charged to a memory 170 * cgroup. The bucket can be reparented in one piece when the cgroup 171 * is destroyed, without having to round up the individual references 172 * of all live memory objects in the wild. 173 */ 174struct obj_cgroup { 175 struct percpu_ref refcnt; 176 struct mem_cgroup *memcg; 177 atomic_t nr_charged_bytes; 178 union { 179 struct list_head list; /* protected by objcg_lock */ 180 struct rcu_head rcu; 181 }; 182}; 183 184/* 185 * The memory controller data structure. The memory controller controls both 186 * page cache and RSS per cgroup. We would eventually like to provide 187 * statistics based on the statistics developed by Rik Van Riel for clock-pro, 188 * to help the administrator determine what knobs to tune. 189 */ 190struct mem_cgroup { 191 struct cgroup_subsys_state css; 192 193 /* Private memcg ID. Used to ID objects that outlive the cgroup */ 194 struct mem_cgroup_id id; 195 196 /* Accounted resources */ 197 struct page_counter memory; /* Both v1 & v2 */ 198 199 union { 200 struct page_counter swap; /* v2 only */ 201 struct page_counter memsw; /* v1 only */ 202 }; 203 204 /* registered local peak watchers */ 205 struct list_head memory_peaks; 206 struct list_head swap_peaks; 207 spinlock_t peaks_lock; 208 209 /* Range enforcement for interrupt charges */ 210 struct work_struct high_work; 211 212#ifdef CONFIG_ZSWAP 213 unsigned long zswap_max; 214 215 /* 216 * Prevent pages from this memcg from being written back from zswap to 217 * swap, and from being swapped out on zswap store failures. 218 */ 219 bool zswap_writeback; 220#endif 221 222 /* vmpressure notifications */ 223 struct vmpressure vmpressure; 224 225 /* 226 * Should the OOM killer kill all belonging tasks, had it kill one? 227 */ 228 bool oom_group; 229 230 int swappiness; 231 232 /* memory.events and memory.events.local */ 233 struct cgroup_file events_file; 234 struct cgroup_file events_local_file; 235 236 /* handle for "memory.swap.events" */ 237 struct cgroup_file swap_events_file; 238 239 /* memory.stat */ 240 struct memcg_vmstats *vmstats; 241 242 /* memory.events */ 243 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; 244 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS]; 245 246#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC 247 /* MEMCG_KMEM for nmi context */ 248 atomic_t kmem_stat; 249#endif 250 /* 251 * Hint of reclaim pressure for socket memroy management. Note 252 * that this indicator should NOT be used in legacy cgroup mode 253 * where socket memory is accounted/charged separately. 254 */ 255 u64 socket_pressure; 256#if BITS_PER_LONG < 64 257 seqlock_t socket_pressure_seqlock; 258#endif 259 int kmemcg_id; 260 /* 261 * memcg->objcg is wiped out as a part of the objcg repaprenting 262 * process. memcg->orig_objcg preserves a pointer (and a reference) 263 * to the original objcg until the end of live of memcg. 264 */ 265 struct obj_cgroup __rcu *objcg; 266 struct obj_cgroup *orig_objcg; 267 /* list of inherited objcgs, protected by objcg_lock */ 268 struct list_head objcg_list; 269 270 struct memcg_vmstats_percpu __percpu *vmstats_percpu; 271 272#ifdef CONFIG_CGROUP_WRITEBACK 273 struct list_head cgwb_list; 274 struct wb_domain cgwb_domain; 275 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; 276#endif 277 278#ifdef CONFIG_TRANSPARENT_HUGEPAGE 279 struct deferred_split deferred_split_queue; 280#endif 281 282#ifdef CONFIG_LRU_GEN_WALKS_MMU 283 /* per-memcg mm_struct list */ 284 struct lru_gen_mm_list mm_list; 285#endif 286 287#ifdef CONFIG_MEMCG_V1 288 /* Legacy consumer-oriented counters */ 289 struct page_counter kmem; /* v1 only */ 290 struct page_counter tcpmem; /* v1 only */ 291 292 struct memcg1_events_percpu __percpu *events_percpu; 293 294 unsigned long soft_limit; 295 296 /* protected by memcg_oom_lock */ 297 bool oom_lock; 298 int under_oom; 299 300 /* OOM-Killer disable */ 301 int oom_kill_disable; 302 303 /* protect arrays of thresholds */ 304 struct mutex thresholds_lock; 305 306 /* thresholds for memory usage. RCU-protected */ 307 struct mem_cgroup_thresholds thresholds; 308 309 /* thresholds for mem+swap usage. RCU-protected */ 310 struct mem_cgroup_thresholds memsw_thresholds; 311 312 /* For oom notifier event fd */ 313 struct list_head oom_notify; 314 315 /* Legacy tcp memory accounting */ 316 bool tcpmem_active; 317 int tcpmem_pressure; 318 319 /* List of events which userspace want to receive */ 320 struct list_head event_list; 321 spinlock_t event_list_lock; 322#endif /* CONFIG_MEMCG_V1 */ 323 324 struct mem_cgroup_per_node *nodeinfo[]; 325}; 326 327/* 328 * size of first charge trial. 329 * TODO: maybe necessary to use big numbers in big irons or dynamic based of the 330 * workload. 331 */ 332#define MEMCG_CHARGE_BATCH 64U 333 334extern struct mem_cgroup *root_mem_cgroup; 335 336enum page_memcg_data_flags { 337 /* page->memcg_data is a pointer to an slabobj_ext vector */ 338 MEMCG_DATA_OBJEXTS = (1UL << 0), 339 /* page has been accounted as a non-slab kernel page */ 340 MEMCG_DATA_KMEM = (1UL << 1), 341 /* the next bit after the last actual flag */ 342 __NR_MEMCG_DATA_FLAGS = (1UL << 2), 343}; 344 345#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS 346#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS 347 348#else /* CONFIG_MEMCG */ 349 350#define __OBJEXTS_ALLOC_FAIL (1UL << 0) 351#define __FIRST_OBJEXT_FLAG (1UL << 0) 352 353#endif /* CONFIG_MEMCG */ 354 355enum objext_flags { 356 /* 357 * Use bit 0 with zero other bits to signal that slabobj_ext vector 358 * failed to allocate. The same bit 0 with valid upper bits means 359 * MEMCG_DATA_OBJEXTS. 360 */ 361 OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL, 362 /* slabobj_ext vector allocated with kmalloc_nolock() */ 363 OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG, 364 /* the next bit after the last actual flag */ 365 __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), 366}; 367 368#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) 369 370#ifdef CONFIG_MEMCG 371 372static inline bool folio_memcg_kmem(struct folio *folio); 373 374/* 375 * After the initialization objcg->memcg is always pointing at 376 * a valid memcg, but can be atomically swapped to the parent memcg. 377 * 378 * The caller must ensure that the returned memcg won't be released. 379 */ 380static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) 381{ 382 lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex)); 383 return READ_ONCE(objcg->memcg); 384} 385 386/* 387 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio 388 * @folio: Pointer to the folio. 389 * 390 * Returns a pointer to the memory cgroup associated with the folio, 391 * or NULL. This function assumes that the folio is known to have a 392 * proper memory cgroup pointer. It's not safe to call this function 393 * against some type of folios, e.g. slab folios or ex-slab folios or 394 * kmem folios. 395 */ 396static inline struct mem_cgroup *__folio_memcg(struct folio *folio) 397{ 398 unsigned long memcg_data = folio->memcg_data; 399 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 402 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); 403 404 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 405} 406 407/* 408 * __folio_objcg - get the object cgroup associated with a kmem folio. 409 * @folio: Pointer to the folio. 410 * 411 * Returns a pointer to the object cgroup associated with the folio, 412 * or NULL. This function assumes that the folio is known to have a 413 * proper object cgroup pointer. It's not safe to call this function 414 * against some type of folios, e.g. slab folios or ex-slab folios or 415 * LRU folios. 416 */ 417static inline struct obj_cgroup *__folio_objcg(struct folio *folio) 418{ 419 unsigned long memcg_data = folio->memcg_data; 420 421 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); 422 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); 423 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); 424 425 return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 426} 427 428/* 429 * folio_memcg - Get the memory cgroup associated with a folio. 430 * @folio: Pointer to the folio. 431 * 432 * Returns a pointer to the memory cgroup associated with the folio, 433 * or NULL. This function assumes that the folio is known to have a 434 * proper memory cgroup pointer. It's not safe to call this function 435 * against some type of folios, e.g. slab folios or ex-slab folios. 436 * 437 * For a non-kmem folio any of the following ensures folio and memcg binding 438 * stability: 439 * 440 * - the folio lock 441 * - LRU isolation 442 * - exclusive reference 443 * 444 * For a kmem folio a caller should hold an rcu read lock to protect memcg 445 * associated with a kmem folio from being released. 446 */ 447static inline struct mem_cgroup *folio_memcg(struct folio *folio) 448{ 449 if (folio_memcg_kmem(folio)) 450 return obj_cgroup_memcg(__folio_objcg(folio)); 451 return __folio_memcg(folio); 452} 453 454/* 455 * folio_memcg_charged - If a folio is charged to a memory cgroup. 456 * @folio: Pointer to the folio. 457 * 458 * Returns true if folio is charged to a memory cgroup, otherwise returns false. 459 */ 460static inline bool folio_memcg_charged(struct folio *folio) 461{ 462 return folio->memcg_data != 0; 463} 464 465/* 466 * folio_memcg_check - Get the memory cgroup associated with a folio. 467 * @folio: Pointer to the folio. 468 * 469 * Returns a pointer to the memory cgroup associated with the folio, 470 * or NULL. This function unlike folio_memcg() can take any folio 471 * as an argument. It has to be used in cases when it's not known if a folio 472 * has an associated memory cgroup pointer or an object cgroups vector or 473 * an object cgroup. 474 * 475 * For a non-kmem folio any of the following ensures folio and memcg binding 476 * stability: 477 * 478 * - the folio lock 479 * - LRU isolation 480 * - exclusive reference 481 * 482 * For a kmem folio a caller should hold an rcu read lock to protect memcg 483 * associated with a kmem folio from being released. 484 */ 485static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 486{ 487 /* 488 * Because folio->memcg_data might be changed asynchronously 489 * for slabs, READ_ONCE() should be used here. 490 */ 491 unsigned long memcg_data = READ_ONCE(folio->memcg_data); 492 493 if (memcg_data & MEMCG_DATA_OBJEXTS) 494 return NULL; 495 496 if (memcg_data & MEMCG_DATA_KMEM) { 497 struct obj_cgroup *objcg; 498 499 objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 500 return obj_cgroup_memcg(objcg); 501 } 502 503 return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); 504} 505 506static inline struct mem_cgroup *page_memcg_check(struct page *page) 507{ 508 if (PageTail(page)) 509 return NULL; 510 return folio_memcg_check((struct folio *)page); 511} 512 513static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 514{ 515 struct mem_cgroup *memcg; 516 517 rcu_read_lock(); 518retry: 519 memcg = obj_cgroup_memcg(objcg); 520 if (unlikely(!css_tryget(&memcg->css))) 521 goto retry; 522 rcu_read_unlock(); 523 524 return memcg; 525} 526 527/* 528 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. 529 * @folio: Pointer to the folio. 530 * 531 * Checks if the folio has MemcgKmem flag set. The caller must ensure 532 * that the folio has an associated memory cgroup. It's not safe to call 533 * this function against some types of folios, e.g. slab folios. 534 */ 535static inline bool folio_memcg_kmem(struct folio *folio) 536{ 537 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); 538 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); 539 return folio->memcg_data & MEMCG_DATA_KMEM; 540} 541 542static inline bool PageMemcgKmem(struct page *page) 543{ 544 return folio_memcg_kmem(page_folio(page)); 545} 546 547static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 548{ 549 return (memcg == root_mem_cgroup); 550} 551 552static inline bool mem_cgroup_disabled(void) 553{ 554 return !cgroup_subsys_enabled(memory_cgrp_subsys); 555} 556 557static inline void mem_cgroup_protection(struct mem_cgroup *root, 558 struct mem_cgroup *memcg, 559 unsigned long *min, 560 unsigned long *low) 561{ 562 *min = *low = 0; 563 564 if (mem_cgroup_disabled()) 565 return; 566 567 /* 568 * There is no reclaim protection applied to a targeted reclaim. 569 * We are special casing this specific case here because 570 * mem_cgroup_calculate_protection is not robust enough to keep 571 * the protection invariant for calculated effective values for 572 * parallel reclaimers with different reclaim target. This is 573 * especially a problem for tail memcgs (as they have pages on LRU) 574 * which would want to have effective values 0 for targeted reclaim 575 * but a different value for external reclaim. 576 * 577 * Example 578 * Let's have global and A's reclaim in parallel: 579 * | 580 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) 581 * |\ 582 * | C (low = 1G, usage = 2.5G) 583 * B (low = 1G, usage = 0.5G) 584 * 585 * For the global reclaim 586 * A.elow = A.low 587 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow 588 * C.elow = min(C.usage, C.low) 589 * 590 * With the effective values resetting we have A reclaim 591 * A.elow = 0 592 * B.elow = B.low 593 * C.elow = C.low 594 * 595 * If the global reclaim races with A's reclaim then 596 * B.elow = C.elow = 0 because children_low_usage > A.elow) 597 * is possible and reclaiming B would be violating the protection. 598 * 599 */ 600 if (root == memcg) 601 return; 602 603 *min = READ_ONCE(memcg->memory.emin); 604 *low = READ_ONCE(memcg->memory.elow); 605} 606 607void mem_cgroup_calculate_protection(struct mem_cgroup *root, 608 struct mem_cgroup *memcg); 609 610static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 611 struct mem_cgroup *memcg) 612{ 613 /* 614 * The root memcg doesn't account charges, and doesn't support 615 * protection. The target memcg's protection is ignored, see 616 * mem_cgroup_calculate_protection() and mem_cgroup_protection() 617 */ 618 return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) || 619 memcg == target; 620} 621 622static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 623 struct mem_cgroup *memcg) 624{ 625 if (mem_cgroup_unprotected(target, memcg)) 626 return false; 627 628 return READ_ONCE(memcg->memory.elow) >= 629 page_counter_read(&memcg->memory); 630} 631 632static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 633 struct mem_cgroup *memcg) 634{ 635 if (mem_cgroup_unprotected(target, memcg)) 636 return false; 637 638 return READ_ONCE(memcg->memory.emin) >= 639 page_counter_read(&memcg->memory); 640} 641 642int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); 643 644/** 645 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup. 646 * @folio: Folio to charge. 647 * @mm: mm context of the allocating task. 648 * @gfp: Reclaim mode. 649 * 650 * Try to charge @folio to the memcg that @mm belongs to, reclaiming 651 * pages according to @gfp if necessary. If @mm is NULL, try to 652 * charge to the active memcg. 653 * 654 * Do not use this for folios allocated for swapin. 655 * 656 * Return: 0 on success. Otherwise, an error code is returned. 657 */ 658static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, 659 gfp_t gfp) 660{ 661 if (mem_cgroup_disabled()) 662 return 0; 663 return __mem_cgroup_charge(folio, mm, gfp); 664} 665 666int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); 667 668int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, 669 gfp_t gfp, swp_entry_t entry); 670 671void __mem_cgroup_uncharge(struct folio *folio); 672 673/** 674 * mem_cgroup_uncharge - Uncharge a folio. 675 * @folio: Folio to uncharge. 676 * 677 * Uncharge a folio previously charged with mem_cgroup_charge(). 678 */ 679static inline void mem_cgroup_uncharge(struct folio *folio) 680{ 681 if (mem_cgroup_disabled()) 682 return; 683 __mem_cgroup_uncharge(folio); 684} 685 686void __mem_cgroup_uncharge_folios(struct folio_batch *folios); 687static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 688{ 689 if (mem_cgroup_disabled()) 690 return; 691 __mem_cgroup_uncharge_folios(folios); 692} 693 694void mem_cgroup_replace_folio(struct folio *old, struct folio *new); 695void mem_cgroup_migrate(struct folio *old, struct folio *new); 696 697/** 698 * mem_cgroup_lruvec - get the lru list vector for a memcg & node 699 * @memcg: memcg of the wanted lruvec 700 * @pgdat: pglist_data 701 * 702 * Returns the lru list vector holding pages for a given @memcg & 703 * @pgdat combination. This can be the node lruvec, if the memory 704 * controller is disabled. 705 */ 706static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 707 struct pglist_data *pgdat) 708{ 709 struct mem_cgroup_per_node *mz; 710 struct lruvec *lruvec; 711 712 if (mem_cgroup_disabled()) { 713 lruvec = &pgdat->__lruvec; 714 goto out; 715 } 716 717 if (!memcg) 718 memcg = root_mem_cgroup; 719 720 mz = memcg->nodeinfo[pgdat->node_id]; 721 lruvec = &mz->lruvec; 722out: 723 /* 724 * Since a node can be onlined after the mem_cgroup was created, 725 * we have to be prepared to initialize lruvec->pgdat here; 726 * and if offlined then reonlined, we need to reinitialize it. 727 */ 728 if (unlikely(lruvec->pgdat != pgdat)) 729 lruvec->pgdat = pgdat; 730 return lruvec; 731} 732 733/** 734 * folio_lruvec - return lruvec for isolating/putting an LRU folio 735 * @folio: Pointer to the folio. 736 * 737 * This function relies on folio->mem_cgroup being stable. 738 */ 739static inline struct lruvec *folio_lruvec(struct folio *folio) 740{ 741 struct mem_cgroup *memcg = folio_memcg(folio); 742 743 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio); 744 return mem_cgroup_lruvec(memcg, folio_pgdat(folio)); 745} 746 747struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 748 749struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); 750 751struct mem_cgroup *get_mem_cgroup_from_current(void); 752 753struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio); 754 755struct lruvec *folio_lruvec_lock(struct folio *folio); 756struct lruvec *folio_lruvec_lock_irq(struct folio *folio); 757struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 758 unsigned long *flags); 759 760#ifdef CONFIG_DEBUG_VM 761void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio); 762#else 763static inline 764void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 765{ 766} 767#endif 768 769static inline 770struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ 771 return css ? container_of(css, struct mem_cgroup, css) : NULL; 772} 773 774static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) 775{ 776 return percpu_ref_tryget(&objcg->refcnt); 777} 778 779static inline void obj_cgroup_get(struct obj_cgroup *objcg) 780{ 781 percpu_ref_get(&objcg->refcnt); 782} 783 784static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, 785 unsigned long nr) 786{ 787 percpu_ref_get_many(&objcg->refcnt, nr); 788} 789 790static inline void obj_cgroup_put(struct obj_cgroup *objcg) 791{ 792 if (objcg) 793 percpu_ref_put(&objcg->refcnt); 794} 795 796static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 797{ 798 return !memcg || css_tryget(&memcg->css); 799} 800 801static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 802{ 803 return !memcg || css_tryget_online(&memcg->css); 804} 805 806static inline void mem_cgroup_put(struct mem_cgroup *memcg) 807{ 808 if (memcg) 809 css_put(&memcg->css); 810} 811 812#define mem_cgroup_from_counter(counter, member) \ 813 container_of(counter, struct mem_cgroup, member) 814 815struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 816 struct mem_cgroup *, 817 struct mem_cgroup_reclaim_cookie *); 818void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 819void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 820 int (*)(struct task_struct *, void *), void *arg); 821 822static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 823{ 824 if (mem_cgroup_disabled()) 825 return 0; 826 827 return memcg->id.id; 828} 829struct mem_cgroup *mem_cgroup_from_id(unsigned short id); 830 831#ifdef CONFIG_SHRINKER_DEBUG 832static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 833{ 834 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; 835} 836 837struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); 838#endif 839 840static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 841{ 842 return mem_cgroup_from_css(seq_css(m)); 843} 844 845static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 846{ 847 struct mem_cgroup_per_node *mz; 848 849 if (mem_cgroup_disabled()) 850 return NULL; 851 852 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 853 return mz->memcg; 854} 855 856/** 857 * parent_mem_cgroup - find the accounting parent of a memcg 858 * @memcg: memcg whose parent to find 859 * 860 * Returns the parent memcg, or NULL if this is the root. 861 */ 862static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 863{ 864 return mem_cgroup_from_css(memcg->css.parent); 865} 866 867static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, 868 struct mem_cgroup *root) 869{ 870 if (root == memcg) 871 return true; 872 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); 873} 874 875static inline bool mm_match_cgroup(struct mm_struct *mm, 876 struct mem_cgroup *memcg) 877{ 878 struct mem_cgroup *task_memcg; 879 bool match = false; 880 881 rcu_read_lock(); 882 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 883 if (task_memcg) 884 match = mem_cgroup_is_descendant(task_memcg, memcg); 885 rcu_read_unlock(); 886 return match; 887} 888 889struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio); 890ino_t page_cgroup_ino(struct page *page); 891 892static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 893{ 894 if (mem_cgroup_disabled()) 895 return true; 896 return !!(memcg->css.flags & CSS_ONLINE); 897} 898 899void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 900 int zid, int nr_pages); 901 902static inline 903unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 904 enum lru_list lru, int zone_idx) 905{ 906 struct mem_cgroup_per_node *mz; 907 908 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 909 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); 910} 911 912void __mem_cgroup_handle_over_high(gfp_t gfp_mask); 913 914static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 915{ 916 if (unlikely(current->memcg_nr_pages_over_high)) 917 __mem_cgroup_handle_over_high(gfp_mask); 918} 919 920unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); 921 922unsigned long mem_cgroup_size(struct mem_cgroup *memcg); 923 924void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, 925 struct task_struct *p); 926 927void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg); 928 929struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, 930 struct mem_cgroup *oom_domain); 931void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); 932 933/* idx can be of type enum memcg_stat_item or node_stat_item */ 934void mod_memcg_state(struct mem_cgroup *memcg, 935 enum memcg_stat_item idx, int val); 936 937static inline void mod_memcg_page_state(struct page *page, 938 enum memcg_stat_item idx, int val) 939{ 940 struct mem_cgroup *memcg; 941 942 if (mem_cgroup_disabled()) 943 return; 944 945 rcu_read_lock(); 946 memcg = folio_memcg(page_folio(page)); 947 if (memcg) 948 mod_memcg_state(memcg, idx, val); 949 rcu_read_unlock(); 950} 951 952unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx); 953unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx); 954unsigned long lruvec_page_state_local(struct lruvec *lruvec, 955 enum node_stat_item idx); 956 957void mem_cgroup_flush_stats(struct mem_cgroup *memcg); 958void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg); 959 960void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); 961 962void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, 963 unsigned long count); 964 965static inline void count_memcg_folio_events(struct folio *folio, 966 enum vm_event_item idx, unsigned long nr) 967{ 968 struct mem_cgroup *memcg = folio_memcg(folio); 969 970 if (memcg) 971 count_memcg_events(memcg, idx, nr); 972} 973 974static inline void count_memcg_events_mm(struct mm_struct *mm, 975 enum vm_event_item idx, unsigned long count) 976{ 977 struct mem_cgroup *memcg; 978 979 if (mem_cgroup_disabled()) 980 return; 981 982 rcu_read_lock(); 983 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 984 if (likely(memcg)) 985 count_memcg_events(memcg, idx, count); 986 rcu_read_unlock(); 987} 988 989static inline void count_memcg_event_mm(struct mm_struct *mm, 990 enum vm_event_item idx) 991{ 992 count_memcg_events_mm(mm, idx, 1); 993} 994 995void __memcg_memory_event(struct mem_cgroup *memcg, 996 enum memcg_memory_event event, bool allow_spinning); 997 998static inline void memcg_memory_event(struct mem_cgroup *memcg, 999 enum memcg_memory_event event) 1000{ 1001 __memcg_memory_event(memcg, event, true); 1002} 1003 1004static inline void memcg_memory_event_mm(struct mm_struct *mm, 1005 enum memcg_memory_event event) 1006{ 1007 struct mem_cgroup *memcg; 1008 1009 if (mem_cgroup_disabled()) 1010 return; 1011 1012 rcu_read_lock(); 1013 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1014 if (likely(memcg)) 1015 memcg_memory_event(memcg, event); 1016 rcu_read_unlock(); 1017} 1018 1019void split_page_memcg(struct page *first, unsigned order); 1020void folio_split_memcg_refs(struct folio *folio, unsigned old_order, 1021 unsigned new_order); 1022 1023static inline u64 cgroup_id_from_mm(struct mm_struct *mm) 1024{ 1025 struct mem_cgroup *memcg; 1026 u64 id; 1027 1028 if (mem_cgroup_disabled()) 1029 return 0; 1030 1031 rcu_read_lock(); 1032 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1033 if (!memcg) 1034 memcg = root_mem_cgroup; 1035 id = cgroup_id(memcg->css.cgroup); 1036 rcu_read_unlock(); 1037 return id; 1038} 1039 1040extern int mem_cgroup_init(void); 1041#else /* CONFIG_MEMCG */ 1042 1043#define MEM_CGROUP_ID_SHIFT 0 1044 1045#define root_mem_cgroup (NULL) 1046 1047static inline struct mem_cgroup *folio_memcg(struct folio *folio) 1048{ 1049 return NULL; 1050} 1051 1052static inline bool folio_memcg_charged(struct folio *folio) 1053{ 1054 return false; 1055} 1056 1057static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) 1058{ 1059 return NULL; 1060} 1061 1062static inline struct mem_cgroup *page_memcg_check(struct page *page) 1063{ 1064 return NULL; 1065} 1066 1067static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) 1068{ 1069 return NULL; 1070} 1071 1072static inline bool folio_memcg_kmem(struct folio *folio) 1073{ 1074 return false; 1075} 1076 1077static inline bool PageMemcgKmem(struct page *page) 1078{ 1079 return false; 1080} 1081 1082static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) 1083{ 1084 return true; 1085} 1086 1087static inline bool mem_cgroup_disabled(void) 1088{ 1089 return true; 1090} 1091 1092static inline void memcg_memory_event(struct mem_cgroup *memcg, 1093 enum memcg_memory_event event) 1094{ 1095} 1096 1097static inline void memcg_memory_event_mm(struct mm_struct *mm, 1098 enum memcg_memory_event event) 1099{ 1100} 1101 1102static inline void mem_cgroup_protection(struct mem_cgroup *root, 1103 struct mem_cgroup *memcg, 1104 unsigned long *min, 1105 unsigned long *low) 1106{ 1107 *min = *low = 0; 1108} 1109 1110static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, 1111 struct mem_cgroup *memcg) 1112{ 1113} 1114 1115static inline bool mem_cgroup_unprotected(struct mem_cgroup *target, 1116 struct mem_cgroup *memcg) 1117{ 1118 return true; 1119} 1120static inline bool mem_cgroup_below_low(struct mem_cgroup *target, 1121 struct mem_cgroup *memcg) 1122{ 1123 return false; 1124} 1125 1126static inline bool mem_cgroup_below_min(struct mem_cgroup *target, 1127 struct mem_cgroup *memcg) 1128{ 1129 return false; 1130} 1131 1132static inline int mem_cgroup_charge(struct folio *folio, 1133 struct mm_struct *mm, gfp_t gfp) 1134{ 1135 return 0; 1136} 1137 1138static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) 1139{ 1140 return 0; 1141} 1142 1143static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, 1144 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) 1145{ 1146 return 0; 1147} 1148 1149static inline void mem_cgroup_uncharge(struct folio *folio) 1150{ 1151} 1152 1153static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) 1154{ 1155} 1156 1157static inline void mem_cgroup_replace_folio(struct folio *old, 1158 struct folio *new) 1159{ 1160} 1161 1162static inline void mem_cgroup_migrate(struct folio *old, struct folio *new) 1163{ 1164} 1165 1166static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, 1167 struct pglist_data *pgdat) 1168{ 1169 return &pgdat->__lruvec; 1170} 1171 1172static inline struct lruvec *folio_lruvec(struct folio *folio) 1173{ 1174 struct pglist_data *pgdat = folio_pgdat(folio); 1175 return &pgdat->__lruvec; 1176} 1177 1178static inline 1179void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) 1180{ 1181} 1182 1183static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) 1184{ 1185 return NULL; 1186} 1187 1188static inline bool mm_match_cgroup(struct mm_struct *mm, 1189 struct mem_cgroup *memcg) 1190{ 1191 return true; 1192} 1193 1194static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) 1195{ 1196 return NULL; 1197} 1198 1199static inline struct mem_cgroup *get_mem_cgroup_from_current(void) 1200{ 1201 return NULL; 1202} 1203 1204static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) 1205{ 1206 return NULL; 1207} 1208 1209static inline 1210struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) 1211{ 1212 return NULL; 1213} 1214 1215static inline void obj_cgroup_get(struct obj_cgroup *objcg) 1216{ 1217} 1218 1219static inline void obj_cgroup_put(struct obj_cgroup *objcg) 1220{ 1221} 1222 1223static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) 1224{ 1225 return true; 1226} 1227 1228static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg) 1229{ 1230 return true; 1231} 1232 1233static inline void mem_cgroup_put(struct mem_cgroup *memcg) 1234{ 1235} 1236 1237static inline struct lruvec *folio_lruvec_lock(struct folio *folio) 1238{ 1239 struct pglist_data *pgdat = folio_pgdat(folio); 1240 1241 spin_lock(&pgdat->__lruvec.lru_lock); 1242 return &pgdat->__lruvec; 1243} 1244 1245static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio) 1246{ 1247 struct pglist_data *pgdat = folio_pgdat(folio); 1248 1249 spin_lock_irq(&pgdat->__lruvec.lru_lock); 1250 return &pgdat->__lruvec; 1251} 1252 1253static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, 1254 unsigned long *flagsp) 1255{ 1256 struct pglist_data *pgdat = folio_pgdat(folio); 1257 1258 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); 1259 return &pgdat->__lruvec; 1260} 1261 1262static inline struct mem_cgroup * 1263mem_cgroup_iter(struct mem_cgroup *root, 1264 struct mem_cgroup *prev, 1265 struct mem_cgroup_reclaim_cookie *reclaim) 1266{ 1267 return NULL; 1268} 1269 1270static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 1271 struct mem_cgroup *prev) 1272{ 1273} 1274 1275static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, 1276 int (*fn)(struct task_struct *, void *), void *arg) 1277{ 1278} 1279 1280static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) 1281{ 1282 return 0; 1283} 1284 1285static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) 1286{ 1287 WARN_ON_ONCE(id); 1288 /* XXX: This should always return root_mem_cgroup */ 1289 return NULL; 1290} 1291 1292#ifdef CONFIG_SHRINKER_DEBUG 1293static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) 1294{ 1295 return 0; 1296} 1297 1298static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) 1299{ 1300 return NULL; 1301} 1302#endif 1303 1304static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) 1305{ 1306 return NULL; 1307} 1308 1309static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) 1310{ 1311 return NULL; 1312} 1313 1314static inline bool mem_cgroup_online(struct mem_cgroup *memcg) 1315{ 1316 return true; 1317} 1318 1319static inline 1320unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, 1321 enum lru_list lru, int zone_idx) 1322{ 1323 return 0; 1324} 1325 1326static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) 1327{ 1328 return 0; 1329} 1330 1331static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg) 1332{ 1333 return 0; 1334} 1335 1336static inline void 1337mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) 1338{ 1339} 1340 1341static inline void 1342mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) 1343{ 1344} 1345 1346static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) 1347{ 1348} 1349 1350static inline struct mem_cgroup *mem_cgroup_get_oom_group( 1351 struct task_struct *victim, struct mem_cgroup *oom_domain) 1352{ 1353 return NULL; 1354} 1355 1356static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) 1357{ 1358} 1359 1360static inline void mod_memcg_state(struct mem_cgroup *memcg, 1361 enum memcg_stat_item idx, 1362 int nr) 1363{ 1364} 1365 1366static inline void mod_memcg_page_state(struct page *page, 1367 enum memcg_stat_item idx, int val) 1368{ 1369} 1370 1371static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 1372{ 1373 return 0; 1374} 1375 1376static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 1377 enum node_stat_item idx) 1378{ 1379 return node_page_state(lruvec_pgdat(lruvec), idx); 1380} 1381 1382static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, 1383 enum node_stat_item idx) 1384{ 1385 return node_page_state(lruvec_pgdat(lruvec), idx); 1386} 1387 1388static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg) 1389{ 1390} 1391 1392static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) 1393{ 1394} 1395 1396static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, 1397 int val) 1398{ 1399 struct page *page = virt_to_head_page(p); 1400 1401 mod_node_page_state(page_pgdat(page), idx, val); 1402} 1403 1404static inline void count_memcg_events(struct mem_cgroup *memcg, 1405 enum vm_event_item idx, 1406 unsigned long count) 1407{ 1408} 1409 1410static inline void count_memcg_folio_events(struct folio *folio, 1411 enum vm_event_item idx, unsigned long nr) 1412{ 1413} 1414 1415static inline void count_memcg_events_mm(struct mm_struct *mm, 1416 enum vm_event_item idx, unsigned long count) 1417{ 1418} 1419 1420static inline 1421void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) 1422{ 1423} 1424 1425static inline void split_page_memcg(struct page *first, unsigned order) 1426{ 1427} 1428 1429static inline void folio_split_memcg_refs(struct folio *folio, 1430 unsigned old_order, unsigned new_order) 1431{ 1432} 1433 1434static inline u64 cgroup_id_from_mm(struct mm_struct *mm) 1435{ 1436 return 0; 1437} 1438 1439static inline int mem_cgroup_init(void) { return 0; } 1440#endif /* CONFIG_MEMCG */ 1441 1442/* 1443 * Extended information for slab objects stored as an array in page->memcg_data 1444 * if MEMCG_DATA_OBJEXTS is set. 1445 */ 1446struct slabobj_ext { 1447#ifdef CONFIG_MEMCG 1448 struct obj_cgroup *objcg; 1449#endif 1450#ifdef CONFIG_MEM_ALLOC_PROFILING 1451 union codetag_ref ref; 1452#endif 1453} __aligned(8); 1454 1455static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) 1456{ 1457 struct mem_cgroup *memcg; 1458 1459 memcg = lruvec_memcg(lruvec); 1460 if (!memcg) 1461 return NULL; 1462 memcg = parent_mem_cgroup(memcg); 1463 if (!memcg) 1464 return NULL; 1465 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); 1466} 1467 1468static inline void unlock_page_lruvec(struct lruvec *lruvec) 1469{ 1470 spin_unlock(&lruvec->lru_lock); 1471} 1472 1473static inline void unlock_page_lruvec_irq(struct lruvec *lruvec) 1474{ 1475 spin_unlock_irq(&lruvec->lru_lock); 1476} 1477 1478static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, 1479 unsigned long flags) 1480{ 1481 spin_unlock_irqrestore(&lruvec->lru_lock, flags); 1482} 1483 1484/* Test requires a stable folio->memcg binding, see folio_memcg() */ 1485static inline bool folio_matches_lruvec(struct folio *folio, 1486 struct lruvec *lruvec) 1487{ 1488 return lruvec_pgdat(lruvec) == folio_pgdat(folio) && 1489 lruvec_memcg(lruvec) == folio_memcg(folio); 1490} 1491 1492/* Don't lock again iff page's lruvec locked */ 1493static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio, 1494 struct lruvec *locked_lruvec) 1495{ 1496 if (locked_lruvec) { 1497 if (folio_matches_lruvec(folio, locked_lruvec)) 1498 return locked_lruvec; 1499 1500 unlock_page_lruvec_irq(locked_lruvec); 1501 } 1502 1503 return folio_lruvec_lock_irq(folio); 1504} 1505 1506/* Don't lock again iff folio's lruvec locked */ 1507static inline void folio_lruvec_relock_irqsave(struct folio *folio, 1508 struct lruvec **lruvecp, unsigned long *flags) 1509{ 1510 if (*lruvecp) { 1511 if (folio_matches_lruvec(folio, *lruvecp)) 1512 return; 1513 1514 unlock_page_lruvec_irqrestore(*lruvecp, *flags); 1515 } 1516 1517 *lruvecp = folio_lruvec_lock_irqsave(folio, flags); 1518} 1519 1520#ifdef CONFIG_CGROUP_WRITEBACK 1521 1522struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); 1523void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, 1524 unsigned long *pheadroom, unsigned long *pdirty, 1525 unsigned long *pwriteback); 1526 1527void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio, 1528 struct bdi_writeback *wb); 1529 1530static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1531 struct bdi_writeback *wb) 1532{ 1533 struct mem_cgroup *memcg; 1534 1535 if (mem_cgroup_disabled()) 1536 return; 1537 1538 memcg = folio_memcg(folio); 1539 if (unlikely(memcg && &memcg->css != wb->memcg_css)) 1540 mem_cgroup_track_foreign_dirty_slowpath(folio, wb); 1541} 1542 1543void mem_cgroup_flush_foreign(struct bdi_writeback *wb); 1544 1545#else /* CONFIG_CGROUP_WRITEBACK */ 1546 1547static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) 1548{ 1549 return NULL; 1550} 1551 1552static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, 1553 unsigned long *pfilepages, 1554 unsigned long *pheadroom, 1555 unsigned long *pdirty, 1556 unsigned long *pwriteback) 1557{ 1558} 1559 1560static inline void mem_cgroup_track_foreign_dirty(struct folio *folio, 1561 struct bdi_writeback *wb) 1562{ 1563} 1564 1565static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) 1566{ 1567} 1568 1569#endif /* CONFIG_CGROUP_WRITEBACK */ 1570 1571struct sock; 1572#ifdef CONFIG_MEMCG 1573extern struct static_key_false memcg_sockets_enabled_key; 1574#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) 1575 1576void mem_cgroup_sk_alloc(struct sock *sk); 1577void mem_cgroup_sk_free(struct sock *sk); 1578void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk); 1579bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages, 1580 gfp_t gfp_mask); 1581void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages); 1582 1583#if BITS_PER_LONG < 64 1584static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) 1585{ 1586 u64 val = get_jiffies_64() + HZ; 1587 unsigned long flags; 1588 1589 write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags); 1590 memcg->socket_pressure = val; 1591 write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags); 1592} 1593 1594static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) 1595{ 1596 unsigned int seq; 1597 u64 val; 1598 1599 do { 1600 seq = read_seqbegin(&memcg->socket_pressure_seqlock); 1601 val = memcg->socket_pressure; 1602 } while (read_seqretry(&memcg->socket_pressure_seqlock, seq)); 1603 1604 return val; 1605} 1606#else 1607static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) 1608{ 1609 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); 1610} 1611 1612static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) 1613{ 1614 return READ_ONCE(memcg->socket_pressure); 1615} 1616#endif 1617 1618int alloc_shrinker_info(struct mem_cgroup *memcg); 1619void free_shrinker_info(struct mem_cgroup *memcg); 1620void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id); 1621void reparent_shrinker_deferred(struct mem_cgroup *memcg); 1622 1623static inline int shrinker_id(struct shrinker *shrinker) 1624{ 1625 return shrinker->id; 1626} 1627#else 1628#define mem_cgroup_sockets_enabled 0 1629 1630static inline void mem_cgroup_sk_alloc(struct sock *sk) 1631{ 1632} 1633 1634static inline void mem_cgroup_sk_free(struct sock *sk) 1635{ 1636} 1637 1638static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk) 1639{ 1640} 1641 1642static inline bool mem_cgroup_sk_charge(const struct sock *sk, 1643 unsigned int nr_pages, 1644 gfp_t gfp_mask) 1645{ 1646 return false; 1647} 1648 1649static inline void mem_cgroup_sk_uncharge(const struct sock *sk, 1650 unsigned int nr_pages) 1651{ 1652} 1653 1654static inline void set_shrinker_bit(struct mem_cgroup *memcg, 1655 int nid, int shrinker_id) 1656{ 1657} 1658 1659static inline int shrinker_id(struct shrinker *shrinker) 1660{ 1661 return -1; 1662} 1663#endif 1664 1665#ifdef CONFIG_MEMCG 1666bool mem_cgroup_kmem_disabled(void); 1667int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); 1668void __memcg_kmem_uncharge_page(struct page *page, int order); 1669 1670/* 1671 * The returned objcg pointer is safe to use without additional 1672 * protection within a scope. The scope is defined either by 1673 * the current task (similar to the "current" global variable) 1674 * or by set_active_memcg() pair. 1675 * Please, use obj_cgroup_get() to get a reference if the pointer 1676 * needs to be used outside of the local scope. 1677 */ 1678struct obj_cgroup *current_obj_cgroup(void); 1679struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio); 1680 1681static inline struct obj_cgroup *get_obj_cgroup_from_current(void) 1682{ 1683 struct obj_cgroup *objcg = current_obj_cgroup(); 1684 1685 if (objcg) 1686 obj_cgroup_get(objcg); 1687 1688 return objcg; 1689} 1690 1691int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); 1692void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); 1693 1694extern struct static_key_false memcg_bpf_enabled_key; 1695static inline bool memcg_bpf_enabled(void) 1696{ 1697 return static_branch_likely(&memcg_bpf_enabled_key); 1698} 1699 1700extern struct static_key_false memcg_kmem_online_key; 1701 1702static inline bool memcg_kmem_online(void) 1703{ 1704 return static_branch_likely(&memcg_kmem_online_key); 1705} 1706 1707static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1708 int order) 1709{ 1710 if (memcg_kmem_online()) 1711 return __memcg_kmem_charge_page(page, gfp, order); 1712 return 0; 1713} 1714 1715static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1716{ 1717 if (memcg_kmem_online()) 1718 __memcg_kmem_uncharge_page(page, order); 1719} 1720 1721/* 1722 * A helper for accessing memcg's kmem_id, used for getting 1723 * corresponding LRU lists. 1724 */ 1725static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1726{ 1727 return memcg ? memcg->kmemcg_id : -1; 1728} 1729 1730struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); 1731 1732static inline void count_objcg_events(struct obj_cgroup *objcg, 1733 enum vm_event_item idx, 1734 unsigned long count) 1735{ 1736 struct mem_cgroup *memcg; 1737 1738 if (!memcg_kmem_online()) 1739 return; 1740 1741 rcu_read_lock(); 1742 memcg = obj_cgroup_memcg(objcg); 1743 count_memcg_events(memcg, idx, count); 1744 rcu_read_unlock(); 1745} 1746 1747bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid); 1748 1749void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg); 1750 1751static inline bool memcg_is_dying(struct mem_cgroup *memcg) 1752{ 1753 return memcg ? css_is_dying(&memcg->css) : false; 1754} 1755 1756#else 1757static inline bool mem_cgroup_kmem_disabled(void) 1758{ 1759 return true; 1760} 1761 1762static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1763 int order) 1764{ 1765 return 0; 1766} 1767 1768static inline void memcg_kmem_uncharge_page(struct page *page, int order) 1769{ 1770} 1771 1772static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, 1773 int order) 1774{ 1775 return 0; 1776} 1777 1778static inline void __memcg_kmem_uncharge_page(struct page *page, int order) 1779{ 1780} 1781 1782static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio) 1783{ 1784 return NULL; 1785} 1786 1787static inline bool memcg_bpf_enabled(void) 1788{ 1789 return false; 1790} 1791 1792static inline bool memcg_kmem_online(void) 1793{ 1794 return false; 1795} 1796 1797static inline int memcg_kmem_id(struct mem_cgroup *memcg) 1798{ 1799 return -1; 1800} 1801 1802static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) 1803{ 1804 return NULL; 1805} 1806 1807static inline void count_objcg_events(struct obj_cgroup *objcg, 1808 enum vm_event_item idx, 1809 unsigned long count) 1810{ 1811} 1812 1813static inline ino_t page_cgroup_ino(struct page *page) 1814{ 1815 return 0; 1816} 1817 1818static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid) 1819{ 1820 return true; 1821} 1822 1823static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg) 1824{ 1825} 1826 1827static inline bool memcg_is_dying(struct mem_cgroup *memcg) 1828{ 1829 return false; 1830} 1831#endif /* CONFIG_MEMCG */ 1832 1833#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) 1834bool obj_cgroup_may_zswap(struct obj_cgroup *objcg); 1835void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size); 1836void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size); 1837bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg); 1838#else 1839static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg) 1840{ 1841 return true; 1842} 1843static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, 1844 size_t size) 1845{ 1846} 1847static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, 1848 size_t size) 1849{ 1850} 1851static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) 1852{ 1853 /* if zswap is disabled, do not block pages going to the swapping device */ 1854 return true; 1855} 1856#endif 1857 1858 1859/* Cgroup v1-related declarations */ 1860 1861#ifdef CONFIG_MEMCG_V1 1862unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1863 gfp_t gfp_mask, 1864 unsigned long *total_scanned); 1865 1866bool mem_cgroup_oom_synchronize(bool wait); 1867 1868static inline bool task_in_memcg_oom(struct task_struct *p) 1869{ 1870 return p->memcg_in_oom; 1871} 1872 1873static inline void mem_cgroup_enter_user_fault(void) 1874{ 1875 WARN_ON(current->in_user_fault); 1876 current->in_user_fault = 1; 1877} 1878 1879static inline void mem_cgroup_exit_user_fault(void) 1880{ 1881 WARN_ON(!current->in_user_fault); 1882 current->in_user_fault = 0; 1883} 1884 1885void memcg1_swapout(struct folio *folio, swp_entry_t entry); 1886void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages); 1887 1888#else /* CONFIG_MEMCG_V1 */ 1889static inline 1890unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, 1891 gfp_t gfp_mask, 1892 unsigned long *total_scanned) 1893{ 1894 return 0; 1895} 1896 1897static inline bool task_in_memcg_oom(struct task_struct *p) 1898{ 1899 return false; 1900} 1901 1902static inline bool mem_cgroup_oom_synchronize(bool wait) 1903{ 1904 return false; 1905} 1906 1907static inline void mem_cgroup_enter_user_fault(void) 1908{ 1909} 1910 1911static inline void mem_cgroup_exit_user_fault(void) 1912{ 1913} 1914 1915static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry) 1916{ 1917} 1918 1919static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages) 1920{ 1921} 1922 1923#endif /* CONFIG_MEMCG_V1 */ 1924 1925#endif /* _LINUX_MEMCONTROL_H */