at v6.7-rc1 17 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef LINUX_MM_INLINE_H 3#define LINUX_MM_INLINE_H 4 5#include <linux/atomic.h> 6#include <linux/huge_mm.h> 7#include <linux/mm_types.h> 8#include <linux/swap.h> 9#include <linux/string.h> 10#include <linux/userfaultfd_k.h> 11#include <linux/swapops.h> 12 13/** 14 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU? 15 * @folio: The folio to test. 16 * 17 * We would like to get this info without a page flag, but the state 18 * needs to survive until the folio is last deleted from the LRU, which 19 * could be as far down as __page_cache_release. 20 * 21 * Return: An integer (not a boolean!) used to sort a folio onto the 22 * right LRU list and to account folios correctly. 23 * 1 if @folio is a regular filesystem backed page cache folio 24 * or a lazily freed anonymous folio (e.g. via MADV_FREE). 25 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise 26 * ram or swap backed folio. 27 */ 28static inline int folio_is_file_lru(struct folio *folio) 29{ 30 return !folio_test_swapbacked(folio); 31} 32 33static inline int page_is_file_lru(struct page *page) 34{ 35 return folio_is_file_lru(page_folio(page)); 36} 37 38static __always_inline void __update_lru_size(struct lruvec *lruvec, 39 enum lru_list lru, enum zone_type zid, 40 long nr_pages) 41{ 42 struct pglist_data *pgdat = lruvec_pgdat(lruvec); 43 44 lockdep_assert_held(&lruvec->lru_lock); 45 WARN_ON_ONCE(nr_pages != (int)nr_pages); 46 47 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); 48 __mod_zone_page_state(&pgdat->node_zones[zid], 49 NR_ZONE_LRU_BASE + lru, nr_pages); 50} 51 52static __always_inline void update_lru_size(struct lruvec *lruvec, 53 enum lru_list lru, enum zone_type zid, 54 long nr_pages) 55{ 56 __update_lru_size(lruvec, lru, zid, nr_pages); 57#ifdef CONFIG_MEMCG 58 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); 59#endif 60} 61 62/** 63 * __folio_clear_lru_flags - Clear page lru flags before releasing a page. 64 * @folio: The folio that was on lru and now has a zero reference. 65 */ 66static __always_inline void __folio_clear_lru_flags(struct folio *folio) 67{ 68 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); 69 70 __folio_clear_lru(folio); 71 72 /* this shouldn't happen, so leave the flags to bad_page() */ 73 if (folio_test_active(folio) && folio_test_unevictable(folio)) 74 return; 75 76 __folio_clear_active(folio); 77 __folio_clear_unevictable(folio); 78} 79 80/** 81 * folio_lru_list - Which LRU list should a folio be on? 82 * @folio: The folio to test. 83 * 84 * Return: The LRU list a folio should be on, as an index 85 * into the array of LRU lists. 86 */ 87static __always_inline enum lru_list folio_lru_list(struct folio *folio) 88{ 89 enum lru_list lru; 90 91 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 92 93 if (folio_test_unevictable(folio)) 94 return LRU_UNEVICTABLE; 95 96 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON; 97 if (folio_test_active(folio)) 98 lru += LRU_ACTIVE; 99 100 return lru; 101} 102 103#ifdef CONFIG_LRU_GEN 104 105#ifdef CONFIG_LRU_GEN_ENABLED 106static inline bool lru_gen_enabled(void) 107{ 108 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]); 109 110 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]); 111} 112#else 113static inline bool lru_gen_enabled(void) 114{ 115 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]); 116 117 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]); 118} 119#endif 120 121static inline bool lru_gen_in_fault(void) 122{ 123 return current->in_lru_fault; 124} 125 126static inline int lru_gen_from_seq(unsigned long seq) 127{ 128 return seq % MAX_NR_GENS; 129} 130 131static inline int lru_hist_from_seq(unsigned long seq) 132{ 133 return seq % NR_HIST_GENS; 134} 135 136static inline int lru_tier_from_refs(int refs) 137{ 138 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); 139 140 /* see the comment in folio_lru_refs() */ 141 return order_base_2(refs + 1); 142} 143 144static inline int folio_lru_refs(struct folio *folio) 145{ 146 unsigned long flags = READ_ONCE(folio->flags); 147 bool workingset = flags & BIT(PG_workingset); 148 149 /* 150 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the 151 * total number of accesses is N>1, since N=0,1 both map to the first 152 * tier. lru_tier_from_refs() will account for this off-by-one. Also see 153 * the comment on MAX_NR_TIERS. 154 */ 155 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; 156} 157 158static inline int folio_lru_gen(struct folio *folio) 159{ 160 unsigned long flags = READ_ONCE(folio->flags); 161 162 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 163} 164 165static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen) 166{ 167 unsigned long max_seq = lruvec->lrugen.max_seq; 168 169 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); 170 171 /* see the comment on MIN_NR_GENS */ 172 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1); 173} 174 175static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio, 176 int old_gen, int new_gen) 177{ 178 int type = folio_is_file_lru(folio); 179 int zone = folio_zonenum(folio); 180 int delta = folio_nr_pages(folio); 181 enum lru_list lru = type * LRU_INACTIVE_FILE; 182 struct lru_gen_folio *lrugen = &lruvec->lrugen; 183 184 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); 185 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); 186 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1); 187 188 if (old_gen >= 0) 189 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone], 190 lrugen->nr_pages[old_gen][type][zone] - delta); 191 if (new_gen >= 0) 192 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone], 193 lrugen->nr_pages[new_gen][type][zone] + delta); 194 195 /* addition */ 196 if (old_gen < 0) { 197 if (lru_gen_is_active(lruvec, new_gen)) 198 lru += LRU_ACTIVE; 199 __update_lru_size(lruvec, lru, zone, delta); 200 return; 201 } 202 203 /* deletion */ 204 if (new_gen < 0) { 205 if (lru_gen_is_active(lruvec, old_gen)) 206 lru += LRU_ACTIVE; 207 __update_lru_size(lruvec, lru, zone, -delta); 208 return; 209 } 210 211 /* promotion */ 212 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) { 213 __update_lru_size(lruvec, lru, zone, -delta); 214 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta); 215 } 216 217 /* demotion requires isolation, e.g., lru_deactivate_fn() */ 218 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); 219} 220 221static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 222{ 223 unsigned long seq; 224 unsigned long flags; 225 int gen = folio_lru_gen(folio); 226 int type = folio_is_file_lru(folio); 227 int zone = folio_zonenum(folio); 228 struct lru_gen_folio *lrugen = &lruvec->lrugen; 229 230 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio); 231 232 if (folio_test_unevictable(folio) || !lrugen->enabled) 233 return false; 234 /* 235 * There are three common cases for this page: 236 * 1. If it's hot, e.g., freshly faulted in or previously hot and 237 * migrated, add it to the youngest generation. 238 * 2. If it's cold but can't be evicted immediately, i.e., an anon page 239 * not in swapcache or a dirty page pending writeback, add it to the 240 * second oldest generation. 241 * 3. Everything else (clean, cold) is added to the oldest generation. 242 */ 243 if (folio_test_active(folio)) 244 seq = lrugen->max_seq; 245 else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || 246 (folio_test_reclaim(folio) && 247 (folio_test_dirty(folio) || folio_test_writeback(folio)))) 248 seq = lrugen->min_seq[type] + 1; 249 else 250 seq = lrugen->min_seq[type]; 251 252 gen = lru_gen_from_seq(seq); 253 flags = (gen + 1UL) << LRU_GEN_PGOFF; 254 /* see the comment on MIN_NR_GENS about PG_active */ 255 set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); 256 257 lru_gen_update_size(lruvec, folio, -1, gen); 258 /* for folio_rotate_reclaimable() */ 259 if (reclaiming) 260 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 261 else 262 list_add(&folio->lru, &lrugen->folios[gen][type][zone]); 263 264 return true; 265} 266 267static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 268{ 269 unsigned long flags; 270 int gen = folio_lru_gen(folio); 271 272 if (gen < 0) 273 return false; 274 275 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); 276 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); 277 278 /* for folio_migrate_flags() */ 279 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0; 280 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags); 281 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; 282 283 lru_gen_update_size(lruvec, folio, gen, -1); 284 list_del(&folio->lru); 285 286 return true; 287} 288 289#else /* !CONFIG_LRU_GEN */ 290 291static inline bool lru_gen_enabled(void) 292{ 293 return false; 294} 295 296static inline bool lru_gen_in_fault(void) 297{ 298 return false; 299} 300 301static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 302{ 303 return false; 304} 305 306static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) 307{ 308 return false; 309} 310 311#endif /* CONFIG_LRU_GEN */ 312 313static __always_inline 314void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) 315{ 316 enum lru_list lru = folio_lru_list(folio); 317 318 if (lru_gen_add_folio(lruvec, folio, false)) 319 return; 320 321 update_lru_size(lruvec, lru, folio_zonenum(folio), 322 folio_nr_pages(folio)); 323 if (lru != LRU_UNEVICTABLE) 324 list_add(&folio->lru, &lruvec->lists[lru]); 325} 326 327static __always_inline 328void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) 329{ 330 enum lru_list lru = folio_lru_list(folio); 331 332 if (lru_gen_add_folio(lruvec, folio, true)) 333 return; 334 335 update_lru_size(lruvec, lru, folio_zonenum(folio), 336 folio_nr_pages(folio)); 337 /* This is not expected to be used on LRU_UNEVICTABLE */ 338 list_add_tail(&folio->lru, &lruvec->lists[lru]); 339} 340 341static __always_inline 342void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) 343{ 344 enum lru_list lru = folio_lru_list(folio); 345 346 if (lru_gen_del_folio(lruvec, folio, false)) 347 return; 348 349 if (lru != LRU_UNEVICTABLE) 350 list_del(&folio->lru); 351 update_lru_size(lruvec, lru, folio_zonenum(folio), 352 -folio_nr_pages(folio)); 353} 354 355#ifdef CONFIG_ANON_VMA_NAME 356/* mmap_lock should be read-locked */ 357static inline void anon_vma_name_get(struct anon_vma_name *anon_name) 358{ 359 if (anon_name) 360 kref_get(&anon_name->kref); 361} 362 363static inline void anon_vma_name_put(struct anon_vma_name *anon_name) 364{ 365 if (anon_name) 366 kref_put(&anon_name->kref, anon_vma_name_free); 367} 368 369static inline 370struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name) 371{ 372 /* Prevent anon_name refcount saturation early on */ 373 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) { 374 anon_vma_name_get(anon_name); 375 return anon_name; 376 377 } 378 return anon_vma_name_alloc(anon_name->name); 379} 380 381static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 382 struct vm_area_struct *new_vma) 383{ 384 struct anon_vma_name *anon_name = anon_vma_name(orig_vma); 385 386 if (anon_name) 387 new_vma->anon_name = anon_vma_name_reuse(anon_name); 388} 389 390static inline void free_anon_vma_name(struct vm_area_struct *vma) 391{ 392 /* 393 * Not using anon_vma_name because it generates a warning if mmap_lock 394 * is not held, which might be the case here. 395 */ 396 anon_vma_name_put(vma->anon_name); 397} 398 399static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 400 struct anon_vma_name *anon_name2) 401{ 402 if (anon_name1 == anon_name2) 403 return true; 404 405 return anon_name1 && anon_name2 && 406 !strcmp(anon_name1->name, anon_name2->name); 407} 408 409#else /* CONFIG_ANON_VMA_NAME */ 410static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {} 411static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {} 412static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma, 413 struct vm_area_struct *new_vma) {} 414static inline void free_anon_vma_name(struct vm_area_struct *vma) {} 415 416static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1, 417 struct anon_vma_name *anon_name2) 418{ 419 return true; 420} 421 422#endif /* CONFIG_ANON_VMA_NAME */ 423 424static inline void init_tlb_flush_pending(struct mm_struct *mm) 425{ 426 atomic_set(&mm->tlb_flush_pending, 0); 427} 428 429static inline void inc_tlb_flush_pending(struct mm_struct *mm) 430{ 431 atomic_inc(&mm->tlb_flush_pending); 432 /* 433 * The only time this value is relevant is when there are indeed pages 434 * to flush. And we'll only flush pages after changing them, which 435 * requires the PTL. 436 * 437 * So the ordering here is: 438 * 439 * atomic_inc(&mm->tlb_flush_pending); 440 * spin_lock(&ptl); 441 * ... 442 * set_pte_at(); 443 * spin_unlock(&ptl); 444 * 445 * spin_lock(&ptl) 446 * mm_tlb_flush_pending(); 447 * .... 448 * spin_unlock(&ptl); 449 * 450 * flush_tlb_range(); 451 * atomic_dec(&mm->tlb_flush_pending); 452 * 453 * Where the increment if constrained by the PTL unlock, it thus 454 * ensures that the increment is visible if the PTE modification is 455 * visible. After all, if there is no PTE modification, nobody cares 456 * about TLB flushes either. 457 * 458 * This very much relies on users (mm_tlb_flush_pending() and 459 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and 460 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc 461 * locks (PPC) the unlock of one doesn't order against the lock of 462 * another PTL. 463 * 464 * The decrement is ordered by the flush_tlb_range(), such that 465 * mm_tlb_flush_pending() will not return false unless all flushes have 466 * completed. 467 */ 468} 469 470static inline void dec_tlb_flush_pending(struct mm_struct *mm) 471{ 472 /* 473 * See inc_tlb_flush_pending(). 474 * 475 * This cannot be smp_mb__before_atomic() because smp_mb() simply does 476 * not order against TLB invalidate completion, which is what we need. 477 * 478 * Therefore we must rely on tlb_flush_*() to guarantee order. 479 */ 480 atomic_dec(&mm->tlb_flush_pending); 481} 482 483static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 484{ 485 /* 486 * Must be called after having acquired the PTL; orders against that 487 * PTLs release and therefore ensures that if we observe the modified 488 * PTE we must also observe the increment from inc_tlb_flush_pending(). 489 * 490 * That is, it only guarantees to return true if there is a flush 491 * pending for _this_ PTL. 492 */ 493 return atomic_read(&mm->tlb_flush_pending); 494} 495 496static inline bool mm_tlb_flush_nested(struct mm_struct *mm) 497{ 498 /* 499 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL 500 * for which there is a TLB flush pending in order to guarantee 501 * we've seen both that PTE modification and the increment. 502 * 503 * (no requirement on actually still holding the PTL, that is irrelevant) 504 */ 505 return atomic_read(&mm->tlb_flush_pending) > 1; 506} 507 508#ifdef CONFIG_MMU 509/* 510 * Computes the pte marker to copy from the given source entry into dst_vma. 511 * If no marker should be copied, returns 0. 512 * The caller should insert a new pte created with make_pte_marker(). 513 */ 514static inline pte_marker copy_pte_marker( 515 swp_entry_t entry, struct vm_area_struct *dst_vma) 516{ 517 pte_marker srcm = pte_marker_get(entry); 518 /* Always copy error entries. */ 519 pte_marker dstm = srcm & PTE_MARKER_POISONED; 520 521 /* Only copy PTE markers if UFFD register matches. */ 522 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) 523 dstm |= PTE_MARKER_UFFD_WP; 524 525 return dstm; 526} 527#endif 528 529/* 530 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to 531 * replace a none pte. NOTE! This should only be called when *pte is already 532 * cleared so we will never accidentally replace something valuable. Meanwhile 533 * none pte also means we are not demoting the pte so tlb flushed is not needed. 534 * E.g., when pte cleared the caller should have taken care of the tlb flush. 535 * 536 * Must be called with pgtable lock held so that no thread will see the none 537 * pte, and if they see it, they'll fault and serialize at the pgtable lock. 538 * 539 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. 540 */ 541static inline void 542pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, 543 pte_t *pte, pte_t pteval) 544{ 545#ifdef CONFIG_PTE_MARKER_UFFD_WP 546 bool arm_uffd_pte = false; 547 548 /* The current status of the pte should be "cleared" before calling */ 549 WARN_ON_ONCE(!pte_none(ptep_get(pte))); 550 551 /* 552 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole 553 * thing, because when zapping either it means it's dropping the 554 * page, or in TTU where the present pte will be quickly replaced 555 * with a swap pte. There's no way of leaking the bit. 556 */ 557 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) 558 return; 559 560 /* A uffd-wp wr-protected normal pte */ 561 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) 562 arm_uffd_pte = true; 563 564 /* 565 * A uffd-wp wr-protected swap pte. Note: this should even cover an 566 * existing pte marker with uffd-wp bit set. 567 */ 568 if (unlikely(pte_swp_uffd_wp_any(pteval))) 569 arm_uffd_pte = true; 570 571 if (unlikely(arm_uffd_pte)) 572 set_pte_at(vma->vm_mm, addr, pte, 573 make_pte_marker(PTE_MARKER_UFFD_WP)); 574#endif 575} 576 577static inline bool vma_has_recency(struct vm_area_struct *vma) 578{ 579 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) 580 return false; 581 582 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE)) 583 return false; 584 585 return true; 586} 587 588#endif