Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/khugepaged.h>
12#include <linux/mm.h>
13#include <linux/mm_inline.h>
14#include <linux/pagemap.h>
15#include <linux/pagewalk.h>
16#include <linux/rmap.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/swap_cgroup.h>
20#include <linux/tracepoint-defs.h>
21
22/* Internal core VMA manipulation functions. */
23#include "vma.h"
24
25struct folio_batch;
26
27/*
28 * The set of flags that only affect watermark checking and reclaim
29 * behaviour. This is used by the MM to obey the caller constraints
30 * about IO, FS and watermark checking while ignoring placement
31 * hints such as HIGHMEM usage.
32 */
33#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
34 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
35 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
36 __GFP_NOLOCKDEP)
37
38/* The GFP flags allowed during early boot */
39#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
40
41/* Control allocation cpuset and node placement constraints */
42#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
43
44/* Do not use these with a slab allocator */
45#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
46
47/*
48 * Different from WARN_ON_ONCE(), no warning will be issued
49 * when we specify __GFP_NOWARN.
50 */
51#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
52 static bool __section(".data..once") __warned; \
53 int __ret_warn_once = !!(cond); \
54 \
55 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
56 __warned = true; \
57 WARN_ON(1); \
58 } \
59 unlikely(__ret_warn_once); \
60})
61
62void page_writeback_init(void);
63
64/*
65 * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
66 * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit
67 * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
68 * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
69 */
70#define ENTIRELY_MAPPED 0x800000
71#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1)
72
73/*
74 * Flags passed to __show_mem() and show_free_areas() to suppress output in
75 * various contexts.
76 */
77#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
78
79/*
80 * How many individual pages have an elevated _mapcount. Excludes
81 * the folio's entire_mapcount.
82 *
83 * Don't use this function outside of debugging code.
84 */
85static inline int folio_nr_pages_mapped(const struct folio *folio)
86{
87 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
88}
89
90/*
91 * Retrieve the first entry of a folio based on a provided entry within the
92 * folio. We cannot rely on folio->swap as there is no guarantee that it has
93 * been initialized. Used for calling arch_swap_restore()
94 */
95static inline swp_entry_t folio_swap(swp_entry_t entry,
96 const struct folio *folio)
97{
98 swp_entry_t swap = {
99 .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
100 };
101
102 return swap;
103}
104
105static inline void *folio_raw_mapping(const struct folio *folio)
106{
107 unsigned long mapping = (unsigned long)folio->mapping;
108
109 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
110}
111
112/*
113 * This is a file-backed mapping, and is about to be memory mapped - invoke its
114 * mmap hook and safely handle error conditions. On error, VMA hooks will be
115 * mutated.
116 *
117 * @file: File which backs the mapping.
118 * @vma: VMA which we are mapping.
119 *
120 * Returns: 0 if success, error otherwise.
121 */
122static inline int mmap_file(struct file *file, struct vm_area_struct *vma)
123{
124 int err = call_mmap(file, vma);
125
126 if (likely(!err))
127 return 0;
128
129 /*
130 * OK, we tried to call the file hook for mmap(), but an error
131 * arose. The mapping is in an inconsistent state and we most not invoke
132 * any further hooks on it.
133 */
134 vma->vm_ops = &vma_dummy_vm_ops;
135
136 return err;
137}
138
139/*
140 * If the VMA has a close hook then close it, and since closing it might leave
141 * it in an inconsistent state which makes the use of any hooks suspect, clear
142 * them down by installing dummy empty hooks.
143 */
144static inline void vma_close(struct vm_area_struct *vma)
145{
146 if (vma->vm_ops && vma->vm_ops->close) {
147 vma->vm_ops->close(vma);
148
149 /*
150 * The mapping is in an inconsistent state, and no further hooks
151 * may be invoked upon it.
152 */
153 vma->vm_ops = &vma_dummy_vm_ops;
154 }
155}
156
157#ifdef CONFIG_MMU
158
159/* Flags for folio_pte_batch(). */
160typedef int __bitwise fpb_t;
161
162/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */
163#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0))
164
165/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */
166#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1))
167
168static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
169{
170 if (flags & FPB_IGNORE_DIRTY)
171 pte = pte_mkclean(pte);
172 if (likely(flags & FPB_IGNORE_SOFT_DIRTY))
173 pte = pte_clear_soft_dirty(pte);
174 return pte_wrprotect(pte_mkold(pte));
175}
176
177/**
178 * folio_pte_batch - detect a PTE batch for a large folio
179 * @folio: The large folio to detect a PTE batch for.
180 * @addr: The user virtual address the first page is mapped at.
181 * @start_ptep: Page table pointer for the first entry.
182 * @pte: Page table entry for the first page.
183 * @max_nr: The maximum number of table entries to consider.
184 * @flags: Flags to modify the PTE batch semantics.
185 * @any_writable: Optional pointer to indicate whether any entry except the
186 * first one is writable.
187 * @any_young: Optional pointer to indicate whether any entry except the
188 * first one is young.
189 * @any_dirty: Optional pointer to indicate whether any entry except the
190 * first one is dirty.
191 *
192 * Detect a PTE batch: consecutive (present) PTEs that map consecutive
193 * pages of the same large folio.
194 *
195 * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN,
196 * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and
197 * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY).
198 *
199 * start_ptep must map any page of the folio. max_nr must be at least one and
200 * must be limited by the caller so scanning cannot exceed a single page table.
201 *
202 * Return: the number of table entries in the batch.
203 */
204static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
205 pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags,
206 bool *any_writable, bool *any_young, bool *any_dirty)
207{
208 unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
209 const pte_t *end_ptep = start_ptep + max_nr;
210 pte_t expected_pte, *ptep;
211 bool writable, young, dirty;
212 int nr;
213
214 if (any_writable)
215 *any_writable = false;
216 if (any_young)
217 *any_young = false;
218 if (any_dirty)
219 *any_dirty = false;
220
221 VM_WARN_ON_FOLIO(!pte_present(pte), folio);
222 VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
223 VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
224
225 nr = pte_batch_hint(start_ptep, pte);
226 expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
227 ptep = start_ptep + nr;
228
229 while (ptep < end_ptep) {
230 pte = ptep_get(ptep);
231 if (any_writable)
232 writable = !!pte_write(pte);
233 if (any_young)
234 young = !!pte_young(pte);
235 if (any_dirty)
236 dirty = !!pte_dirty(pte);
237 pte = __pte_batch_clear_ignored(pte, flags);
238
239 if (!pte_same(pte, expected_pte))
240 break;
241
242 /*
243 * Stop immediately once we reached the end of the folio. In
244 * corner cases the next PFN might fall into a different
245 * folio.
246 */
247 if (pte_pfn(pte) >= folio_end_pfn)
248 break;
249
250 if (any_writable)
251 *any_writable |= writable;
252 if (any_young)
253 *any_young |= young;
254 if (any_dirty)
255 *any_dirty |= dirty;
256
257 nr = pte_batch_hint(ptep, pte);
258 expected_pte = pte_advance_pfn(expected_pte, nr);
259 ptep += nr;
260 }
261
262 return min(ptep - start_ptep, max_nr);
263}
264
265/**
266 * pte_move_swp_offset - Move the swap entry offset field of a swap pte
267 * forward or backward by delta
268 * @pte: The initial pte state; is_swap_pte(pte) must be true and
269 * non_swap_entry() must be false.
270 * @delta: The direction and the offset we are moving; forward if delta
271 * is positive; backward if delta is negative
272 *
273 * Moves the swap offset, while maintaining all other fields, including
274 * swap type, and any swp pte bits. The resulting pte is returned.
275 */
276static inline pte_t pte_move_swp_offset(pte_t pte, long delta)
277{
278 swp_entry_t entry = pte_to_swp_entry(pte);
279 pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry),
280 (swp_offset(entry) + delta)));
281
282 if (pte_swp_soft_dirty(pte))
283 new = pte_swp_mksoft_dirty(new);
284 if (pte_swp_exclusive(pte))
285 new = pte_swp_mkexclusive(new);
286 if (pte_swp_uffd_wp(pte))
287 new = pte_swp_mkuffd_wp(new);
288
289 return new;
290}
291
292
293/**
294 * pte_next_swp_offset - Increment the swap entry offset field of a swap pte.
295 * @pte: The initial pte state; is_swap_pte(pte) must be true and
296 * non_swap_entry() must be false.
297 *
298 * Increments the swap offset, while maintaining all other fields, including
299 * swap type, and any swp pte bits. The resulting pte is returned.
300 */
301static inline pte_t pte_next_swp_offset(pte_t pte)
302{
303 return pte_move_swp_offset(pte, 1);
304}
305
306/**
307 * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries
308 * @start_ptep: Page table pointer for the first entry.
309 * @max_nr: The maximum number of table entries to consider.
310 * @pte: Page table entry for the first entry.
311 *
312 * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs
313 * containing swap entries all with consecutive offsets and targeting the same
314 * swap type, all with matching swp pte bits.
315 *
316 * max_nr must be at least one and must be limited by the caller so scanning
317 * cannot exceed a single page table.
318 *
319 * Return: the number of table entries in the batch.
320 */
321static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte)
322{
323 pte_t expected_pte = pte_next_swp_offset(pte);
324 const pte_t *end_ptep = start_ptep + max_nr;
325 swp_entry_t entry = pte_to_swp_entry(pte);
326 pte_t *ptep = start_ptep + 1;
327 unsigned short cgroup_id;
328
329 VM_WARN_ON(max_nr < 1);
330 VM_WARN_ON(!is_swap_pte(pte));
331 VM_WARN_ON(non_swap_entry(entry));
332
333 cgroup_id = lookup_swap_cgroup_id(entry);
334 while (ptep < end_ptep) {
335 pte = ptep_get(ptep);
336
337 if (!pte_same(pte, expected_pte))
338 break;
339 if (lookup_swap_cgroup_id(pte_to_swp_entry(pte)) != cgroup_id)
340 break;
341 expected_pte = pte_next_swp_offset(expected_pte);
342 ptep++;
343 }
344
345 return ptep - start_ptep;
346}
347#endif /* CONFIG_MMU */
348
349void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
350 int nr_throttled);
351static inline void acct_reclaim_writeback(struct folio *folio)
352{
353 pg_data_t *pgdat = folio_pgdat(folio);
354 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
355
356 if (nr_throttled)
357 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
358}
359
360static inline void wake_throttle_isolated(pg_data_t *pgdat)
361{
362 wait_queue_head_t *wqh;
363
364 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
365 if (waitqueue_active(wqh))
366 wake_up(wqh);
367}
368
369vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
370static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
371{
372 vm_fault_t ret = __vmf_anon_prepare(vmf);
373
374 if (unlikely(ret & VM_FAULT_RETRY))
375 vma_end_read(vmf->vma);
376 return ret;
377}
378
379vm_fault_t do_swap_page(struct vm_fault *vmf);
380void folio_rotate_reclaimable(struct folio *folio);
381bool __folio_end_writeback(struct folio *folio);
382void deactivate_file_folio(struct folio *folio);
383void folio_activate(struct folio *folio);
384
385void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
386 struct vm_area_struct *start_vma, unsigned long floor,
387 unsigned long ceiling, bool mm_wr_locked);
388void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
389
390struct zap_details;
391void unmap_page_range(struct mmu_gather *tlb,
392 struct vm_area_struct *vma,
393 unsigned long addr, unsigned long end,
394 struct zap_details *details);
395int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
396 gfp_t gfp);
397
398void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
399 unsigned int order);
400void force_page_cache_ra(struct readahead_control *, unsigned long nr);
401static inline void force_page_cache_readahead(struct address_space *mapping,
402 struct file *file, pgoff_t index, unsigned long nr_to_read)
403{
404 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
405 force_page_cache_ra(&ractl, nr_to_read);
406}
407
408unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
409 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
410unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
411 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
412void filemap_free_folio(struct address_space *mapping, struct folio *folio);
413int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
414bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
415 loff_t end);
416long mapping_evict_folio(struct address_space *mapping, struct folio *folio);
417unsigned long mapping_try_invalidate(struct address_space *mapping,
418 pgoff_t start, pgoff_t end, unsigned long *nr_failed);
419
420/**
421 * folio_evictable - Test whether a folio is evictable.
422 * @folio: The folio to test.
423 *
424 * Test whether @folio is evictable -- i.e., should be placed on
425 * active/inactive lists vs unevictable list.
426 *
427 * Reasons folio might not be evictable:
428 * 1. folio's mapping marked unevictable
429 * 2. One of the pages in the folio is part of an mlocked VMA
430 */
431static inline bool folio_evictable(struct folio *folio)
432{
433 bool ret;
434
435 /* Prevent address_space of inode and swap cache from being freed */
436 rcu_read_lock();
437 ret = !mapping_unevictable(folio_mapping(folio)) &&
438 !folio_test_mlocked(folio);
439 rcu_read_unlock();
440 return ret;
441}
442
443/*
444 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
445 * a count of one.
446 */
447static inline void set_page_refcounted(struct page *page)
448{
449 VM_BUG_ON_PAGE(PageTail(page), page);
450 VM_BUG_ON_PAGE(page_ref_count(page), page);
451 set_page_count(page, 1);
452}
453
454/*
455 * Return true if a folio needs ->release_folio() calling upon it.
456 */
457static inline bool folio_needs_release(struct folio *folio)
458{
459 struct address_space *mapping = folio_mapping(folio);
460
461 return folio_has_private(folio) ||
462 (mapping && mapping_release_always(mapping));
463}
464
465extern unsigned long highest_memmap_pfn;
466
467/*
468 * Maximum number of reclaim retries without progress before the OOM
469 * killer is consider the only way forward.
470 */
471#define MAX_RECLAIM_RETRIES 16
472
473/*
474 * in mm/vmscan.c:
475 */
476bool folio_isolate_lru(struct folio *folio);
477void folio_putback_lru(struct folio *folio);
478extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
479
480/*
481 * in mm/rmap.c:
482 */
483pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
484
485/*
486 * in mm/page_alloc.c
487 */
488#define K(x) ((x) << (PAGE_SHIFT-10))
489
490extern char * const zone_names[MAX_NR_ZONES];
491
492/* perform sanity checks on struct pages being allocated or freed */
493DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
494
495extern int min_free_kbytes;
496
497void setup_per_zone_wmarks(void);
498void calculate_min_free_kbytes(void);
499int __meminit init_per_zone_wmark_min(void);
500void page_alloc_sysctl_init(void);
501
502/*
503 * Structure for holding the mostly immutable allocation parameters passed
504 * between functions involved in allocations, including the alloc_pages*
505 * family of functions.
506 *
507 * nodemask, migratetype and highest_zoneidx are initialized only once in
508 * __alloc_pages() and then never change.
509 *
510 * zonelist, preferred_zone and highest_zoneidx are set first in
511 * __alloc_pages() for the fast path, and might be later changed
512 * in __alloc_pages_slowpath(). All other functions pass the whole structure
513 * by a const pointer.
514 */
515struct alloc_context {
516 struct zonelist *zonelist;
517 nodemask_t *nodemask;
518 struct zoneref *preferred_zoneref;
519 int migratetype;
520
521 /*
522 * highest_zoneidx represents highest usable zone index of
523 * the allocation request. Due to the nature of the zone,
524 * memory on lower zone than the highest_zoneidx will be
525 * protected by lowmem_reserve[highest_zoneidx].
526 *
527 * highest_zoneidx is also used by reclaim/compaction to limit
528 * the target zone since higher zone than this index cannot be
529 * usable for this allocation request.
530 */
531 enum zone_type highest_zoneidx;
532 bool spread_dirty_pages;
533};
534
535/*
536 * This function returns the order of a free page in the buddy system. In
537 * general, page_zone(page)->lock must be held by the caller to prevent the
538 * page from being allocated in parallel and returning garbage as the order.
539 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
540 * page cannot be allocated or merged in parallel. Alternatively, it must
541 * handle invalid values gracefully, and use buddy_order_unsafe() below.
542 */
543static inline unsigned int buddy_order(struct page *page)
544{
545 /* PageBuddy() must be checked by the caller */
546 return page_private(page);
547}
548
549/*
550 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
551 * PageBuddy() should be checked first by the caller to minimize race window,
552 * and invalid values must be handled gracefully.
553 *
554 * READ_ONCE is used so that if the caller assigns the result into a local
555 * variable and e.g. tests it for valid range before using, the compiler cannot
556 * decide to remove the variable and inline the page_private(page) multiple
557 * times, potentially observing different values in the tests and the actual
558 * use of the result.
559 */
560#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
561
562/*
563 * This function checks whether a page is free && is the buddy
564 * we can coalesce a page and its buddy if
565 * (a) the buddy is not in a hole (check before calling!) &&
566 * (b) the buddy is in the buddy system &&
567 * (c) a page and its buddy have the same order &&
568 * (d) a page and its buddy are in the same zone.
569 *
570 * For recording whether a page is in the buddy system, we set PageBuddy.
571 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
572 *
573 * For recording page's order, we use page_private(page).
574 */
575static inline bool page_is_buddy(struct page *page, struct page *buddy,
576 unsigned int order)
577{
578 if (!page_is_guard(buddy) && !PageBuddy(buddy))
579 return false;
580
581 if (buddy_order(buddy) != order)
582 return false;
583
584 /*
585 * zone check is done late to avoid uselessly calculating
586 * zone/node ids for pages that could never merge.
587 */
588 if (page_zone_id(page) != page_zone_id(buddy))
589 return false;
590
591 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
592
593 return true;
594}
595
596/*
597 * Locate the struct page for both the matching buddy in our
598 * pair (buddy1) and the combined O(n+1) page they form (page).
599 *
600 * 1) Any buddy B1 will have an order O twin B2 which satisfies
601 * the following equation:
602 * B2 = B1 ^ (1 << O)
603 * For example, if the starting buddy (buddy2) is #8 its order
604 * 1 buddy is #10:
605 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
606 *
607 * 2) Any buddy B will have an order O+1 parent P which
608 * satisfies the following equation:
609 * P = B & ~(1 << O)
610 *
611 * Assumption: *_mem_map is contiguous at least up to MAX_PAGE_ORDER
612 */
613static inline unsigned long
614__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
615{
616 return page_pfn ^ (1 << order);
617}
618
619/*
620 * Find the buddy of @page and validate it.
621 * @page: The input page
622 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
623 * function is used in the performance-critical __free_one_page().
624 * @order: The order of the page
625 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
626 * page_to_pfn().
627 *
628 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
629 * not the same as @page. The validation is necessary before use it.
630 *
631 * Return: the found buddy page or NULL if not found.
632 */
633static inline struct page *find_buddy_page_pfn(struct page *page,
634 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
635{
636 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
637 struct page *buddy;
638
639 buddy = page + (__buddy_pfn - pfn);
640 if (buddy_pfn)
641 *buddy_pfn = __buddy_pfn;
642
643 if (page_is_buddy(page, buddy, order))
644 return buddy;
645 return NULL;
646}
647
648extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
649 unsigned long end_pfn, struct zone *zone);
650
651static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
652 unsigned long end_pfn, struct zone *zone)
653{
654 if (zone->contiguous)
655 return pfn_to_page(start_pfn);
656
657 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
658}
659
660void set_zone_contiguous(struct zone *zone);
661
662static inline void clear_zone_contiguous(struct zone *zone)
663{
664 zone->contiguous = false;
665}
666
667extern int __isolate_free_page(struct page *page, unsigned int order);
668extern void __putback_isolated_page(struct page *page, unsigned int order,
669 int mt);
670extern void memblock_free_pages(struct page *page, unsigned long pfn,
671 unsigned int order);
672extern void __free_pages_core(struct page *page, unsigned int order,
673 enum meminit_context context);
674
675/*
676 * This will have no effect, other than possibly generating a warning, if the
677 * caller passes in a non-large folio.
678 */
679static inline void folio_set_order(struct folio *folio, unsigned int order)
680{
681 if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
682 return;
683
684 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order;
685#ifdef CONFIG_64BIT
686 folio->_folio_nr_pages = 1U << order;
687#endif
688}
689
690bool __folio_unqueue_deferred_split(struct folio *folio);
691static inline bool folio_unqueue_deferred_split(struct folio *folio)
692{
693 if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
694 return false;
695
696 /*
697 * At this point, there is no one trying to add the folio to
698 * deferred_list. If folio is not in deferred_list, it's safe
699 * to check without acquiring the split_queue_lock.
700 */
701 if (data_race(list_empty(&folio->_deferred_list)))
702 return false;
703
704 return __folio_unqueue_deferred_split(folio);
705}
706
707static inline struct folio *page_rmappable_folio(struct page *page)
708{
709 struct folio *folio = (struct folio *)page;
710
711 if (folio && folio_test_large(folio))
712 folio_set_large_rmappable(folio);
713 return folio;
714}
715
716static inline void prep_compound_head(struct page *page, unsigned int order)
717{
718 struct folio *folio = (struct folio *)page;
719
720 folio_set_order(folio, order);
721 atomic_set(&folio->_large_mapcount, -1);
722 atomic_set(&folio->_entire_mapcount, -1);
723 atomic_set(&folio->_nr_pages_mapped, 0);
724 atomic_set(&folio->_pincount, 0);
725 if (order > 1)
726 INIT_LIST_HEAD(&folio->_deferred_list);
727}
728
729static inline void prep_compound_tail(struct page *head, int tail_idx)
730{
731 struct page *p = head + tail_idx;
732
733 p->mapping = TAIL_MAPPING;
734 set_compound_head(p, head);
735 set_page_private(p, 0);
736}
737
738extern void prep_compound_page(struct page *page, unsigned int order);
739
740void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags);
741extern bool free_pages_prepare(struct page *page, unsigned int order);
742
743extern int user_min_free_kbytes;
744
745struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid,
746 nodemask_t *);
747#define __alloc_frozen_pages(...) \
748 alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__))
749void free_frozen_pages(struct page *page, unsigned int order);
750void free_unref_folios(struct folio_batch *fbatch);
751
752#ifdef CONFIG_NUMA
753struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order);
754#else
755static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order)
756{
757 return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL);
758}
759#endif
760
761#define alloc_frozen_pages(...) \
762 alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__))
763
764extern void zone_pcp_reset(struct zone *zone);
765extern void zone_pcp_disable(struct zone *zone);
766extern void zone_pcp_enable(struct zone *zone);
767extern void zone_pcp_init(struct zone *zone);
768
769extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
770 phys_addr_t min_addr,
771 int nid, bool exact_nid);
772
773void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
774 unsigned long, enum meminit_context, struct vmem_altmap *, int);
775
776#if defined CONFIG_COMPACTION || defined CONFIG_CMA
777
778/*
779 * in mm/compaction.c
780 */
781/*
782 * compact_control is used to track pages being migrated and the free pages
783 * they are being migrated to during memory compaction. The free_pfn starts
784 * at the end of a zone and migrate_pfn begins at the start. Movable pages
785 * are moved to the end of a zone during a compaction run and the run
786 * completes when free_pfn <= migrate_pfn
787 */
788struct compact_control {
789 struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */
790 struct list_head migratepages; /* List of pages being migrated */
791 unsigned int nr_freepages; /* Number of isolated free pages */
792 unsigned int nr_migratepages; /* Number of pages to migrate */
793 unsigned long free_pfn; /* isolate_freepages search base */
794 /*
795 * Acts as an in/out parameter to page isolation for migration.
796 * isolate_migratepages uses it as a search base.
797 * isolate_migratepages_block will update the value to the next pfn
798 * after the last isolated one.
799 */
800 unsigned long migrate_pfn;
801 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
802 struct zone *zone;
803 unsigned long total_migrate_scanned;
804 unsigned long total_free_scanned;
805 unsigned short fast_search_fail;/* failures to use free list searches */
806 short search_order; /* order to start a fast search at */
807 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
808 int order; /* order a direct compactor needs */
809 int migratetype; /* migratetype of direct compactor */
810 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
811 const int highest_zoneidx; /* zone index of a direct compactor */
812 enum migrate_mode mode; /* Async or sync migration mode */
813 bool ignore_skip_hint; /* Scan blocks even if marked skip */
814 bool no_set_skip_hint; /* Don't mark blocks for skipping */
815 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
816 bool direct_compaction; /* False from kcompactd or /proc/... */
817 bool proactive_compaction; /* kcompactd proactive compaction */
818 bool whole_zone; /* Whole zone should/has been scanned */
819 bool contended; /* Signal lock contention */
820 bool finish_pageblock; /* Scan the remainder of a pageblock. Used
821 * when there are potentially transient
822 * isolation or migration failures to
823 * ensure forward progress.
824 */
825 bool alloc_contig; /* alloc_contig_range allocation */
826};
827
828/*
829 * Used in direct compaction when a page should be taken from the freelists
830 * immediately when one is created during the free path.
831 */
832struct capture_control {
833 struct compact_control *cc;
834 struct page *page;
835};
836
837unsigned long
838isolate_freepages_range(struct compact_control *cc,
839 unsigned long start_pfn, unsigned long end_pfn);
840int
841isolate_migratepages_range(struct compact_control *cc,
842 unsigned long low_pfn, unsigned long end_pfn);
843
844/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
845void init_cma_reserved_pageblock(struct page *page);
846
847#endif /* CONFIG_COMPACTION || CONFIG_CMA */
848
849int find_suitable_fallback(struct free_area *area, unsigned int order,
850 int migratetype, bool only_stealable, bool *can_steal);
851
852static inline bool free_area_empty(struct free_area *area, int migratetype)
853{
854 return list_empty(&area->free_list[migratetype]);
855}
856
857/* mm/util.c */
858struct anon_vma *folio_anon_vma(const struct folio *folio);
859
860#ifdef CONFIG_MMU
861void unmap_mapping_folio(struct folio *folio);
862extern long populate_vma_page_range(struct vm_area_struct *vma,
863 unsigned long start, unsigned long end, int *locked);
864extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
865 unsigned long end, bool write, int *locked);
866extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
867 unsigned long bytes);
868
869/*
870 * NOTE: This function can't tell whether the folio is "fully mapped" in the
871 * range.
872 * "fully mapped" means all the pages of folio is associated with the page
873 * table of range while this function just check whether the folio range is
874 * within the range [start, end). Function caller needs to do page table
875 * check if it cares about the page table association.
876 *
877 * Typical usage (like mlock or madvise) is:
878 * Caller knows at least 1 page of folio is associated with page table of VMA
879 * and the range [start, end) is intersect with the VMA range. Caller wants
880 * to know whether the folio is fully associated with the range. It calls
881 * this function to check whether the folio is in the range first. Then checks
882 * the page table to know whether the folio is fully mapped to the range.
883 */
884static inline bool
885folio_within_range(struct folio *folio, struct vm_area_struct *vma,
886 unsigned long start, unsigned long end)
887{
888 pgoff_t pgoff, addr;
889 unsigned long vma_pglen = vma_pages(vma);
890
891 VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio);
892 if (start > end)
893 return false;
894
895 if (start < vma->vm_start)
896 start = vma->vm_start;
897
898 if (end > vma->vm_end)
899 end = vma->vm_end;
900
901 pgoff = folio_pgoff(folio);
902
903 /* if folio start address is not in vma range */
904 if (!in_range(pgoff, vma->vm_pgoff, vma_pglen))
905 return false;
906
907 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
908
909 return !(addr < start || end - addr < folio_size(folio));
910}
911
912static inline bool
913folio_within_vma(struct folio *folio, struct vm_area_struct *vma)
914{
915 return folio_within_range(folio, vma, vma->vm_start, vma->vm_end);
916}
917
918/*
919 * mlock_vma_folio() and munlock_vma_folio():
920 * should be called with vma's mmap_lock held for read or write,
921 * under page table lock for the pte/pmd being added or removed.
922 *
923 * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at
924 * the end of folio_remove_rmap_*(); but new anon folios are managed by
925 * folio_add_lru_vma() calling mlock_new_folio().
926 */
927void mlock_folio(struct folio *folio);
928static inline void mlock_vma_folio(struct folio *folio,
929 struct vm_area_struct *vma)
930{
931 /*
932 * The VM_SPECIAL check here serves two purposes.
933 * 1) VM_IO check prevents migration from double-counting during mlock.
934 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
935 * is never left set on a VM_SPECIAL vma, there is an interval while
936 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
937 * still be set while VM_SPECIAL bits are added: so ignore it then.
938 */
939 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED))
940 mlock_folio(folio);
941}
942
943void munlock_folio(struct folio *folio);
944static inline void munlock_vma_folio(struct folio *folio,
945 struct vm_area_struct *vma)
946{
947 /*
948 * munlock if the function is called. Ideally, we should only
949 * do munlock if any page of folio is unmapped from VMA and
950 * cause folio not fully mapped to VMA.
951 *
952 * But it's not easy to confirm that's the situation. So we
953 * always munlock the folio and page reclaim will correct it
954 * if it's wrong.
955 */
956 if (unlikely(vma->vm_flags & VM_LOCKED))
957 munlock_folio(folio);
958}
959
960void mlock_new_folio(struct folio *folio);
961bool need_mlock_drain(int cpu);
962void mlock_drain_local(void);
963void mlock_drain_remote(int cpu);
964
965extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
966
967/**
968 * vma_address - Find the virtual address a page range is mapped at
969 * @vma: The vma which maps this object.
970 * @pgoff: The page offset within its object.
971 * @nr_pages: The number of pages to consider.
972 *
973 * If any page in this range is mapped by this VMA, return the first address
974 * where any of these pages appear. Otherwise, return -EFAULT.
975 */
976static inline unsigned long vma_address(const struct vm_area_struct *vma,
977 pgoff_t pgoff, unsigned long nr_pages)
978{
979 unsigned long address;
980
981 if (pgoff >= vma->vm_pgoff) {
982 address = vma->vm_start +
983 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
984 /* Check for address beyond vma (or wrapped through 0?) */
985 if (address < vma->vm_start || address >= vma->vm_end)
986 address = -EFAULT;
987 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
988 /* Test above avoids possibility of wrap to 0 on 32-bit */
989 address = vma->vm_start;
990 } else {
991 address = -EFAULT;
992 }
993 return address;
994}
995
996/*
997 * Then at what user virtual address will none of the range be found in vma?
998 * Assumes that vma_address() already returned a good starting address.
999 */
1000static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
1001{
1002 struct vm_area_struct *vma = pvmw->vma;
1003 pgoff_t pgoff;
1004 unsigned long address;
1005
1006 /* Common case, plus ->pgoff is invalid for KSM */
1007 if (pvmw->nr_pages == 1)
1008 return pvmw->address + PAGE_SIZE;
1009
1010 pgoff = pvmw->pgoff + pvmw->nr_pages;
1011 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1012 /* Check for address beyond vma (or wrapped through 0?) */
1013 if (address < vma->vm_start || address > vma->vm_end)
1014 address = vma->vm_end;
1015 return address;
1016}
1017
1018static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1019 struct file *fpin)
1020{
1021 int flags = vmf->flags;
1022
1023 if (fpin)
1024 return fpin;
1025
1026 /*
1027 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
1028 * anything, so we only pin the file and drop the mmap_lock if only
1029 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
1030 */
1031 if (fault_flag_allow_retry_first(flags) &&
1032 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
1033 fpin = get_file(vmf->vma->vm_file);
1034 release_fault_lock(vmf);
1035 }
1036 return fpin;
1037}
1038#else /* !CONFIG_MMU */
1039static inline void unmap_mapping_folio(struct folio *folio) { }
1040static inline void mlock_new_folio(struct folio *folio) { }
1041static inline bool need_mlock_drain(int cpu) { return false; }
1042static inline void mlock_drain_local(void) { }
1043static inline void mlock_drain_remote(int cpu) { }
1044static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
1045{
1046}
1047#endif /* !CONFIG_MMU */
1048
1049/* Memory initialisation debug and verification */
1050#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1051DECLARE_STATIC_KEY_TRUE(deferred_pages);
1052
1053bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
1054#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1055
1056enum mminit_level {
1057 MMINIT_WARNING,
1058 MMINIT_VERIFY,
1059 MMINIT_TRACE
1060};
1061
1062#ifdef CONFIG_DEBUG_MEMORY_INIT
1063
1064extern int mminit_loglevel;
1065
1066#define mminit_dprintk(level, prefix, fmt, arg...) \
1067do { \
1068 if (level < mminit_loglevel) { \
1069 if (level <= MMINIT_WARNING) \
1070 pr_warn("mminit::" prefix " " fmt, ##arg); \
1071 else \
1072 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
1073 } \
1074} while (0)
1075
1076extern void mminit_verify_pageflags_layout(void);
1077extern void mminit_verify_zonelist(void);
1078#else
1079
1080static inline void mminit_dprintk(enum mminit_level level,
1081 const char *prefix, const char *fmt, ...)
1082{
1083}
1084
1085static inline void mminit_verify_pageflags_layout(void)
1086{
1087}
1088
1089static inline void mminit_verify_zonelist(void)
1090{
1091}
1092#endif /* CONFIG_DEBUG_MEMORY_INIT */
1093
1094#define NODE_RECLAIM_NOSCAN -2
1095#define NODE_RECLAIM_FULL -1
1096#define NODE_RECLAIM_SOME 0
1097#define NODE_RECLAIM_SUCCESS 1
1098
1099#ifdef CONFIG_NUMA
1100extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
1101extern int find_next_best_node(int node, nodemask_t *used_node_mask);
1102#else
1103static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
1104 unsigned int order)
1105{
1106 return NODE_RECLAIM_NOSCAN;
1107}
1108static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
1109{
1110 return NUMA_NO_NODE;
1111}
1112#endif
1113
1114/*
1115 * mm/memory-failure.c
1116 */
1117#ifdef CONFIG_MEMORY_FAILURE
1118void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu);
1119void shake_folio(struct folio *folio);
1120extern int hwpoison_filter(struct page *p);
1121
1122extern u32 hwpoison_filter_dev_major;
1123extern u32 hwpoison_filter_dev_minor;
1124extern u64 hwpoison_filter_flags_mask;
1125extern u64 hwpoison_filter_flags_value;
1126extern u64 hwpoison_filter_memcg;
1127extern u32 hwpoison_filter_enable;
1128#define MAGIC_HWPOISON 0x48575053U /* HWPS */
1129void SetPageHWPoisonTakenOff(struct page *page);
1130void ClearPageHWPoisonTakenOff(struct page *page);
1131bool take_page_off_buddy(struct page *page);
1132bool put_page_back_buddy(struct page *page);
1133struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
1134void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
1135 struct vm_area_struct *vma, struct list_head *to_kill,
1136 unsigned long ksm_addr);
1137unsigned long page_mapped_in_vma(const struct page *page,
1138 struct vm_area_struct *vma);
1139
1140#else
1141static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
1142{
1143}
1144#endif
1145
1146extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
1147 unsigned long, unsigned long,
1148 unsigned long, unsigned long);
1149
1150extern void set_pageblock_order(void);
1151struct folio *alloc_migrate_folio(struct folio *src, unsigned long private);
1152unsigned long reclaim_pages(struct list_head *folio_list);
1153unsigned int reclaim_clean_pages_from_list(struct zone *zone,
1154 struct list_head *folio_list);
1155/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1156#define ALLOC_WMARK_MIN WMARK_MIN
1157#define ALLOC_WMARK_LOW WMARK_LOW
1158#define ALLOC_WMARK_HIGH WMARK_HIGH
1159#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
1160
1161/* Mask to get the watermark bits */
1162#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
1163
1164/*
1165 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
1166 * cannot assume a reduced access to memory reserves is sufficient for
1167 * !MMU
1168 */
1169#ifdef CONFIG_MMU
1170#define ALLOC_OOM 0x08
1171#else
1172#define ALLOC_OOM ALLOC_NO_WATERMARKS
1173#endif
1174
1175#define ALLOC_NON_BLOCK 0x10 /* Caller cannot block. Allow access
1176 * to 25% of the min watermark or
1177 * 62.5% if __GFP_HIGH is set.
1178 */
1179#define ALLOC_MIN_RESERVE 0x20 /* __GFP_HIGH set. Allow access to 50%
1180 * of the min watermark.
1181 */
1182#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
1183#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
1184#ifdef CONFIG_ZONE_DMA32
1185#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
1186#else
1187#define ALLOC_NOFRAGMENT 0x0
1188#endif
1189#define ALLOC_HIGHATOMIC 0x200 /* Allows access to MIGRATE_HIGHATOMIC */
1190#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
1191
1192/* Flags that allow allocations below the min watermark. */
1193#define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
1194
1195enum ttu_flags;
1196struct tlbflush_unmap_batch;
1197
1198
1199/*
1200 * only for MM internal work items which do not depend on
1201 * any allocations or locks which might depend on allocations
1202 */
1203extern struct workqueue_struct *mm_percpu_wq;
1204
1205#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1206void try_to_unmap_flush(void);
1207void try_to_unmap_flush_dirty(void);
1208void flush_tlb_batched_pending(struct mm_struct *mm);
1209#else
1210static inline void try_to_unmap_flush(void)
1211{
1212}
1213static inline void try_to_unmap_flush_dirty(void)
1214{
1215}
1216static inline void flush_tlb_batched_pending(struct mm_struct *mm)
1217{
1218}
1219#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
1220
1221extern const struct trace_print_flags pageflag_names[];
1222extern const struct trace_print_flags vmaflag_names[];
1223extern const struct trace_print_flags gfpflag_names[];
1224
1225static inline bool is_migrate_highatomic(enum migratetype migratetype)
1226{
1227 return migratetype == MIGRATE_HIGHATOMIC;
1228}
1229
1230void setup_zone_pageset(struct zone *zone);
1231
1232struct migration_target_control {
1233 int nid; /* preferred node id */
1234 nodemask_t *nmask;
1235 gfp_t gfp_mask;
1236 enum migrate_reason reason;
1237};
1238
1239/*
1240 * mm/filemap.c
1241 */
1242size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
1243 struct folio *folio, loff_t fpos, size_t size);
1244
1245/*
1246 * mm/vmalloc.c
1247 */
1248#ifdef CONFIG_MMU
1249void __init vmalloc_init(void);
1250int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1251 pgprot_t prot, struct page **pages, unsigned int page_shift);
1252unsigned int get_vm_area_page_order(struct vm_struct *vm);
1253#else
1254static inline void vmalloc_init(void)
1255{
1256}
1257
1258static inline
1259int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
1260 pgprot_t prot, struct page **pages, unsigned int page_shift)
1261{
1262 return -EINVAL;
1263}
1264#endif
1265
1266int __must_check __vmap_pages_range_noflush(unsigned long addr,
1267 unsigned long end, pgprot_t prot,
1268 struct page **pages, unsigned int page_shift);
1269
1270void vunmap_range_noflush(unsigned long start, unsigned long end);
1271
1272void __vunmap_range_noflush(unsigned long start, unsigned long end);
1273
1274int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
1275 unsigned long addr, int *flags, bool writable,
1276 int *last_cpupid);
1277
1278void free_zone_device_folio(struct folio *folio);
1279int migrate_device_coherent_folio(struct folio *folio);
1280
1281struct vm_struct *__get_vm_area_node(unsigned long size,
1282 unsigned long align, unsigned long shift,
1283 unsigned long flags, unsigned long start,
1284 unsigned long end, int node, gfp_t gfp_mask,
1285 const void *caller);
1286
1287/*
1288 * mm/gup.c
1289 */
1290int __must_check try_grab_folio(struct folio *folio, int refs,
1291 unsigned int flags);
1292
1293/*
1294 * mm/huge_memory.c
1295 */
1296void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1297 pud_t *pud, bool write);
1298void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1299 pmd_t *pmd, bool write);
1300
1301/*
1302 * Parses a string with mem suffixes into its order. Useful to parse kernel
1303 * parameters.
1304 */
1305static inline int get_order_from_str(const char *size_str,
1306 unsigned long valid_orders)
1307{
1308 unsigned long size;
1309 char *endptr;
1310 int order;
1311
1312 size = memparse(size_str, &endptr);
1313
1314 if (!is_power_of_2(size))
1315 return -EINVAL;
1316 order = get_order(size);
1317 if (BIT(order) & ~valid_orders)
1318 return -EINVAL;
1319
1320 return order;
1321}
1322
1323enum {
1324 /* mark page accessed */
1325 FOLL_TOUCH = 1 << 16,
1326 /* a retry, previous pass started an IO */
1327 FOLL_TRIED = 1 << 17,
1328 /* we are working on non-current tsk/mm */
1329 FOLL_REMOTE = 1 << 18,
1330 /* pages must be released via unpin_user_page */
1331 FOLL_PIN = 1 << 19,
1332 /* gup_fast: prevent fall-back to slow gup */
1333 FOLL_FAST_ONLY = 1 << 20,
1334 /* allow unlocking the mmap lock */
1335 FOLL_UNLOCKABLE = 1 << 21,
1336 /* VMA lookup+checks compatible with MADV_POPULATE_(READ|WRITE) */
1337 FOLL_MADV_POPULATE = 1 << 22,
1338};
1339
1340#define INTERNAL_GUP_FLAGS (FOLL_TOUCH | FOLL_TRIED | FOLL_REMOTE | FOLL_PIN | \
1341 FOLL_FAST_ONLY | FOLL_UNLOCKABLE | \
1342 FOLL_MADV_POPULATE)
1343
1344/*
1345 * Indicates for which pages that are write-protected in the page table,
1346 * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
1347 * GUP pin will remain consistent with the pages mapped into the page tables
1348 * of the MM.
1349 *
1350 * Temporary unmapping of PageAnonExclusive() pages or clearing of
1351 * PageAnonExclusive() has to protect against concurrent GUP:
1352 * * Ordinary GUP: Using the PT lock
1353 * * GUP-fast and fork(): mm->write_protect_seq
1354 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1355 * folio_try_share_anon_rmap_*()
1356 *
1357 * Must be called with the (sub)page that's actually referenced via the
1358 * page table entry, which might not necessarily be the head page for a
1359 * PTE-mapped THP.
1360 *
1361 * If the vma is NULL, we're coming from the GUP-fast path and might have
1362 * to fallback to the slow path just to lookup the vma.
1363 */
1364static inline bool gup_must_unshare(struct vm_area_struct *vma,
1365 unsigned int flags, struct page *page)
1366{
1367 /*
1368 * FOLL_WRITE is implicitly handled correctly as the page table entry
1369 * has to be writable -- and if it references (part of) an anonymous
1370 * folio, that part is required to be marked exclusive.
1371 */
1372 if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
1373 return false;
1374 /*
1375 * Note: PageAnon(page) is stable until the page is actually getting
1376 * freed.
1377 */
1378 if (!PageAnon(page)) {
1379 /*
1380 * We only care about R/O long-term pining: R/O short-term
1381 * pinning does not have the semantics to observe successive
1382 * changes through the process page tables.
1383 */
1384 if (!(flags & FOLL_LONGTERM))
1385 return false;
1386
1387 /* We really need the vma ... */
1388 if (!vma)
1389 return true;
1390
1391 /*
1392 * ... because we only care about writable private ("COW")
1393 * mappings where we have to break COW early.
1394 */
1395 return is_cow_mapping(vma->vm_flags);
1396 }
1397
1398 /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
1399 if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
1400 smp_rmb();
1401
1402 /*
1403 * Note that KSM pages cannot be exclusive, and consequently,
1404 * cannot get pinned.
1405 */
1406 return !PageAnonExclusive(page);
1407}
1408
1409extern bool mirrored_kernelcore;
1410extern bool memblock_has_mirror(void);
1411
1412static __always_inline void vma_set_range(struct vm_area_struct *vma,
1413 unsigned long start, unsigned long end,
1414 pgoff_t pgoff)
1415{
1416 vma->vm_start = start;
1417 vma->vm_end = end;
1418 vma->vm_pgoff = pgoff;
1419}
1420
1421static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
1422{
1423 /*
1424 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
1425 * enablements, because when without soft-dirty being compiled in,
1426 * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
1427 * will be constantly true.
1428 */
1429 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
1430 return false;
1431
1432 /*
1433 * Soft-dirty is kind of special: its tracking is enabled when the
1434 * vma flags not set.
1435 */
1436 return !(vma->vm_flags & VM_SOFTDIRTY);
1437}
1438
1439static inline bool pmd_needs_soft_dirty_wp(struct vm_area_struct *vma, pmd_t pmd)
1440{
1441 return vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd);
1442}
1443
1444static inline bool pte_needs_soft_dirty_wp(struct vm_area_struct *vma, pte_t pte)
1445{
1446 return vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte);
1447}
1448
1449void __meminit __init_single_page(struct page *page, unsigned long pfn,
1450 unsigned long zone, int nid);
1451
1452/* shrinker related functions */
1453unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
1454 int priority);
1455
1456#ifdef CONFIG_SHRINKER_DEBUG
1457static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
1458 struct shrinker *shrinker, const char *fmt, va_list ap)
1459{
1460 shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
1461
1462 return shrinker->name ? 0 : -ENOMEM;
1463}
1464
1465static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1466{
1467 kfree_const(shrinker->name);
1468 shrinker->name = NULL;
1469}
1470
1471extern int shrinker_debugfs_add(struct shrinker *shrinker);
1472extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1473 int *debugfs_id);
1474extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1475 int debugfs_id);
1476#else /* CONFIG_SHRINKER_DEBUG */
1477static inline int shrinker_debugfs_add(struct shrinker *shrinker)
1478{
1479 return 0;
1480}
1481static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
1482 const char *fmt, va_list ap)
1483{
1484 return 0;
1485}
1486static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
1487{
1488}
1489static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
1490 int *debugfs_id)
1491{
1492 *debugfs_id = -1;
1493 return NULL;
1494}
1495static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
1496 int debugfs_id)
1497{
1498}
1499#endif /* CONFIG_SHRINKER_DEBUG */
1500
1501/* Only track the nodes of mappings with shadow entries */
1502void workingset_update_node(struct xa_node *node);
1503extern struct list_lru shadow_nodes;
1504#define mapping_set_update(xas, mapping) do { \
1505 if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
1506 xas_set_update(xas, workingset_update_node); \
1507 xas_set_lru(xas, &shadow_nodes); \
1508 } \
1509} while (0)
1510
1511/* mremap.c */
1512unsigned long move_page_tables(struct vm_area_struct *vma,
1513 unsigned long old_addr, struct vm_area_struct *new_vma,
1514 unsigned long new_addr, unsigned long len,
1515 bool need_rmap_locks, bool for_stack);
1516
1517#ifdef CONFIG_UNACCEPTED_MEMORY
1518void accept_page(struct page *page);
1519#else /* CONFIG_UNACCEPTED_MEMORY */
1520static inline void accept_page(struct page *page)
1521{
1522}
1523#endif /* CONFIG_UNACCEPTED_MEMORY */
1524
1525/* pagewalk.c */
1526int walk_page_range_mm(struct mm_struct *mm, unsigned long start,
1527 unsigned long end, const struct mm_walk_ops *ops,
1528 void *private);
1529
1530/* pt_reclaim.c */
1531bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval);
1532void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb,
1533 pmd_t pmdval);
1534void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
1535 struct mmu_gather *tlb);
1536
1537#ifdef CONFIG_PT_RECLAIM
1538bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1539 struct zap_details *details);
1540#else
1541static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end,
1542 struct zap_details *details)
1543{
1544 return false;
1545}
1546#endif /* CONFIG_PT_RECLAIM */
1547
1548
1549#endif /* __MM_INTERNAL_H */