Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6#include <linux/mmdebug.h>
7#include <linux/gfp.h>
8#include <linux/pgalloc_tag.h>
9#include <linux/bug.h>
10#include <linux/list.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/atomic.h>
14#include <linux/debug_locks.h>
15#include <linux/mm_types.h>
16#include <linux/mmap_lock.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/percpu-refcount.h>
20#include <linux/bit_spinlock.h>
21#include <linux/shrinker.h>
22#include <linux/resource.h>
23#include <linux/page_ext.h>
24#include <linux/err.h>
25#include <linux/page-flags.h>
26#include <linux/page_ref.h>
27#include <linux/overflow.h>
28#include <linux/sizes.h>
29#include <linux/sched.h>
30#include <linux/pgtable.h>
31#include <linux/kasan.h>
32#include <linux/memremap.h>
33#include <linux/slab.h>
34#include <linux/cacheinfo.h>
35#include <linux/rcuwait.h>
36
37struct mempolicy;
38struct anon_vma;
39struct anon_vma_chain;
40struct user_struct;
41struct pt_regs;
42struct folio_batch;
43
44void arch_mm_preinit(void);
45void mm_core_init(void);
46void init_mm_internals(void);
47
48extern atomic_long_t _totalram_pages;
49static inline unsigned long totalram_pages(void)
50{
51 return (unsigned long)atomic_long_read(&_totalram_pages);
52}
53
54static inline void totalram_pages_inc(void)
55{
56 atomic_long_inc(&_totalram_pages);
57}
58
59static inline void totalram_pages_dec(void)
60{
61 atomic_long_dec(&_totalram_pages);
62}
63
64static inline void totalram_pages_add(long count)
65{
66 atomic_long_add(count, &_totalram_pages);
67}
68
69extern void * high_memory;
70
71#ifdef CONFIG_SYSCTL
72extern int sysctl_legacy_va_layout;
73#else
74#define sysctl_legacy_va_layout 0
75#endif
76
77#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
78extern const int mmap_rnd_bits_min;
79extern int mmap_rnd_bits_max __ro_after_init;
80extern int mmap_rnd_bits __read_mostly;
81#endif
82#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
83extern const int mmap_rnd_compat_bits_min;
84extern const int mmap_rnd_compat_bits_max;
85extern int mmap_rnd_compat_bits __read_mostly;
86#endif
87
88#ifndef DIRECT_MAP_PHYSMEM_END
89# ifdef MAX_PHYSMEM_BITS
90# define DIRECT_MAP_PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
91# else
92# define DIRECT_MAP_PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
93# endif
94#endif
95
96#include <asm/page.h>
97#include <asm/processor.h>
98
99#ifndef __pa_symbol
100#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
101#endif
102
103#ifndef page_to_virt
104#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
105#endif
106
107#ifndef lm_alias
108#define lm_alias(x) __va(__pa_symbol(x))
109#endif
110
111/*
112 * To prevent common memory management code establishing
113 * a zero page mapping on a read fault.
114 * This macro should be defined within <asm/pgtable.h>.
115 * s390 does this to prevent multiplexing of hardware bits
116 * related to the physical page in case of virtualization.
117 */
118#ifndef mm_forbids_zeropage
119#define mm_forbids_zeropage(X) (0)
120#endif
121
122/*
123 * On some architectures it is expensive to call memset() for small sizes.
124 * If an architecture decides to implement their own version of
125 * mm_zero_struct_page they should wrap the defines below in a #ifndef and
126 * define their own version of this macro in <asm/pgtable.h>
127 */
128#if BITS_PER_LONG == 64
129/* This function must be updated when the size of struct page grows above 96
130 * or reduces below 56. The idea that compiler optimizes out switch()
131 * statement, and only leaves move/store instructions. Also the compiler can
132 * combine write statements if they are both assignments and can be reordered,
133 * this can result in several of the writes here being dropped.
134 */
135#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
136static inline void __mm_zero_struct_page(struct page *page)
137{
138 unsigned long *_pp = (void *)page;
139
140 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
141 BUILD_BUG_ON(sizeof(struct page) & 7);
142 BUILD_BUG_ON(sizeof(struct page) < 56);
143 BUILD_BUG_ON(sizeof(struct page) > 96);
144
145 switch (sizeof(struct page)) {
146 case 96:
147 _pp[11] = 0;
148 fallthrough;
149 case 88:
150 _pp[10] = 0;
151 fallthrough;
152 case 80:
153 _pp[9] = 0;
154 fallthrough;
155 case 72:
156 _pp[8] = 0;
157 fallthrough;
158 case 64:
159 _pp[7] = 0;
160 fallthrough;
161 case 56:
162 _pp[6] = 0;
163 _pp[5] = 0;
164 _pp[4] = 0;
165 _pp[3] = 0;
166 _pp[2] = 0;
167 _pp[1] = 0;
168 _pp[0] = 0;
169 }
170}
171#else
172#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
173#endif
174
175/*
176 * Default maximum number of active map areas, this limits the number of vmas
177 * per mm struct. Users can overwrite this number by sysctl but there is a
178 * problem.
179 *
180 * When a program's coredump is generated as ELF format, a section is created
181 * per a vma. In ELF, the number of sections is represented in unsigned short.
182 * This means the number of sections should be smaller than 65535 at coredump.
183 * Because the kernel adds some informative sections to a image of program at
184 * generating coredump, we need some margin. The number of extra sections is
185 * 1-3 now and depends on arch. We use "5" as safe margin, here.
186 *
187 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
188 * not a hard limit any more. Although some userspace tools can be surprised by
189 * that.
190 */
191#define MAPCOUNT_ELF_CORE_MARGIN (5)
192#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
193
194extern int sysctl_max_map_count;
195
196extern unsigned long sysctl_user_reserve_kbytes;
197extern unsigned long sysctl_admin_reserve_kbytes;
198
199#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
200#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
201#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
202#else
203#define nth_page(page,n) ((page) + (n))
204#define folio_page_idx(folio, p) ((p) - &(folio)->page)
205#endif
206
207/* to align the pointer to the (next) page boundary */
208#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
209
210/* to align the pointer to the (prev) page boundary */
211#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
212
213/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
214#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
215
216static inline struct folio *lru_to_folio(struct list_head *head)
217{
218 return list_entry((head)->prev, struct folio, lru);
219}
220
221void setup_initial_init_mm(void *start_code, void *end_code,
222 void *end_data, void *brk);
223
224/*
225 * Linux kernel virtual memory manager primitives.
226 * The idea being to have a "virtual" mm in the same way
227 * we have a virtual fs - giving a cleaner interface to the
228 * mm details, and allowing different kinds of memory mappings
229 * (from shared memory to executable loading to arbitrary
230 * mmap() functions).
231 */
232
233struct vm_area_struct *vm_area_alloc(struct mm_struct *);
234struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
235void vm_area_free(struct vm_area_struct *);
236
237#ifndef CONFIG_MMU
238extern struct rb_root nommu_region_tree;
239extern struct rw_semaphore nommu_region_sem;
240
241extern unsigned int kobjsize(const void *objp);
242#endif
243
244/*
245 * vm_flags in vm_area_struct, see mm_types.h.
246 * When changing, update also include/trace/events/mmflags.h
247 */
248#define VM_NONE 0x00000000
249
250#define VM_READ 0x00000001 /* currently active flags */
251#define VM_WRITE 0x00000002
252#define VM_EXEC 0x00000004
253#define VM_SHARED 0x00000008
254
255/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
256#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
257#define VM_MAYWRITE 0x00000020
258#define VM_MAYEXEC 0x00000040
259#define VM_MAYSHARE 0x00000080
260
261#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
262#ifdef CONFIG_MMU
263#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
264#else /* CONFIG_MMU */
265#define VM_MAYOVERLAY 0x00000200 /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
266#define VM_UFFD_MISSING 0
267#endif /* CONFIG_MMU */
268#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
269#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
270
271#define VM_LOCKED 0x00002000
272#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
273
274 /* Used by sys_madvise() */
275#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
276#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
277
278#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
279#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
280#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
281#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
282#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
283#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
284#define VM_SYNC 0x00800000 /* Synchronous page faults */
285#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
286#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
287#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
288
289#ifdef CONFIG_MEM_SOFT_DIRTY
290# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
291#else
292# define VM_SOFTDIRTY 0
293#endif
294
295#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
296#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
297#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
298#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
299
300#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
301#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
302#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
303#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
304#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
305#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
306#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
307#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */
308#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
309#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
310#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
311#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
312#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
313#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
314#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6)
315#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
316
317#ifdef CONFIG_ARCH_HAS_PKEYS
318# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
319# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
320# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
321# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
322#if CONFIG_ARCH_PKEY_BITS > 3
323# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
324#else
325# define VM_PKEY_BIT3 0
326#endif
327#if CONFIG_ARCH_PKEY_BITS > 4
328# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
329#else
330# define VM_PKEY_BIT4 0
331#endif
332#endif /* CONFIG_ARCH_HAS_PKEYS */
333
334#ifdef CONFIG_X86_USER_SHADOW_STACK
335/*
336 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
337 * support core mm.
338 *
339 * These VMAs will get a single end guard page. This helps userspace protect
340 * itself from attacks. A single page is enough for current shadow stack archs
341 * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
342 * for more details on the guard size.
343 */
344# define VM_SHADOW_STACK VM_HIGH_ARCH_5
345#endif
346
347#if defined(CONFIG_ARM64_GCS)
348/*
349 * arm64's Guarded Control Stack implements similar functionality and
350 * has similar constraints to shadow stacks.
351 */
352# define VM_SHADOW_STACK VM_HIGH_ARCH_6
353#endif
354
355#ifndef VM_SHADOW_STACK
356# define VM_SHADOW_STACK VM_NONE
357#endif
358
359#if defined(CONFIG_X86)
360# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
361#elif defined(CONFIG_PPC64)
362# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
363#elif defined(CONFIG_PARISC)
364# define VM_GROWSUP VM_ARCH_1
365#elif defined(CONFIG_SPARC64)
366# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
367# define VM_ARCH_CLEAR VM_SPARC_ADI
368#elif defined(CONFIG_ARM64)
369# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
370# define VM_ARCH_CLEAR VM_ARM64_BTI
371#elif !defined(CONFIG_MMU)
372# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
373#endif
374
375#if defined(CONFIG_ARM64_MTE)
376# define VM_MTE VM_HIGH_ARCH_4 /* Use Tagged memory for access control */
377# define VM_MTE_ALLOWED VM_HIGH_ARCH_5 /* Tagged memory permitted */
378#else
379# define VM_MTE VM_NONE
380# define VM_MTE_ALLOWED VM_NONE
381#endif
382
383#ifndef VM_GROWSUP
384# define VM_GROWSUP VM_NONE
385#endif
386
387#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
388# define VM_UFFD_MINOR_BIT 38
389# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
390#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
391# define VM_UFFD_MINOR VM_NONE
392#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
393
394/*
395 * This flag is used to connect VFIO to arch specific KVM code. It
396 * indicates that the memory under this VMA is safe for use with any
397 * non-cachable memory type inside KVM. Some VFIO devices, on some
398 * platforms, are thought to be unsafe and can cause machine crashes
399 * if KVM does not lock down the memory type.
400 */
401#ifdef CONFIG_64BIT
402#define VM_ALLOW_ANY_UNCACHED_BIT 39
403#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT)
404#else
405#define VM_ALLOW_ANY_UNCACHED VM_NONE
406#endif
407
408#ifdef CONFIG_64BIT
409#define VM_DROPPABLE_BIT 40
410#define VM_DROPPABLE BIT(VM_DROPPABLE_BIT)
411#elif defined(CONFIG_PPC32)
412#define VM_DROPPABLE VM_ARCH_1
413#else
414#define VM_DROPPABLE VM_NONE
415#endif
416
417#ifdef CONFIG_64BIT
418/* VM is sealed, in vm_flags */
419#define VM_SEALED _BITUL(63)
420#endif
421
422/* Bits set in the VMA until the stack is in its final location */
423#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
424
425#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
426
427/* Common data flag combinations */
428#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
429 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
430#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
431 VM_MAYWRITE | VM_MAYEXEC)
432#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
433 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
434
435#ifndef VM_DATA_DEFAULT_FLAGS /* arch can override this */
436#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
437#endif
438
439#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
440#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
441#endif
442
443#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
444
445#ifdef CONFIG_STACK_GROWSUP
446#define VM_STACK VM_GROWSUP
447#define VM_STACK_EARLY VM_GROWSDOWN
448#else
449#define VM_STACK VM_GROWSDOWN
450#define VM_STACK_EARLY 0
451#endif
452
453#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
454
455/* VMA basic access permission flags */
456#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
457
458
459/*
460 * Special vmas that are non-mergable, non-mlock()able.
461 */
462#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
463
464/* This mask prevents VMA from being scanned with khugepaged */
465#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
466
467/* This mask defines which mm->def_flags a process can inherit its parent */
468#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
469
470/* This mask represents all the VMA flag bits used by mlock */
471#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
472
473/* Arch-specific flags to clear when updating VM flags on protection change */
474#ifndef VM_ARCH_CLEAR
475# define VM_ARCH_CLEAR VM_NONE
476#endif
477#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
478
479/*
480 * mapping from the currently active vm_flags protection bits (the
481 * low four bits) to a page protection mask..
482 */
483
484/*
485 * The default fault flags that should be used by most of the
486 * arch-specific page fault handlers.
487 */
488#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
489 FAULT_FLAG_KILLABLE | \
490 FAULT_FLAG_INTERRUPTIBLE)
491
492/**
493 * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
494 * @flags: Fault flags.
495 *
496 * This is mostly used for places where we want to try to avoid taking
497 * the mmap_lock for too long a time when waiting for another condition
498 * to change, in which case we can try to be polite to release the
499 * mmap_lock in the first round to avoid potential starvation of other
500 * processes that would also want the mmap_lock.
501 *
502 * Return: true if the page fault allows retry and this is the first
503 * attempt of the fault handling; false otherwise.
504 */
505static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
506{
507 return (flags & FAULT_FLAG_ALLOW_RETRY) &&
508 (!(flags & FAULT_FLAG_TRIED));
509}
510
511#define FAULT_FLAG_TRACE \
512 { FAULT_FLAG_WRITE, "WRITE" }, \
513 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
514 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
515 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
516 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
517 { FAULT_FLAG_TRIED, "TRIED" }, \
518 { FAULT_FLAG_USER, "USER" }, \
519 { FAULT_FLAG_REMOTE, "REMOTE" }, \
520 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
521 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \
522 { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" }
523
524/*
525 * vm_fault is filled by the pagefault handler and passed to the vma's
526 * ->fault function. The vma's ->fault is responsible for returning a bitmask
527 * of VM_FAULT_xxx flags that give details about how the fault was handled.
528 *
529 * MM layer fills up gfp_mask for page allocations but fault handler might
530 * alter it if its implementation requires a different allocation context.
531 *
532 * pgoff should be used in favour of virtual_address, if possible.
533 */
534struct vm_fault {
535 const struct {
536 struct vm_area_struct *vma; /* Target VMA */
537 gfp_t gfp_mask; /* gfp mask to be used for allocations */
538 pgoff_t pgoff; /* Logical page offset based on vma */
539 unsigned long address; /* Faulting virtual address - masked */
540 unsigned long real_address; /* Faulting virtual address - unmasked */
541 };
542 enum fault_flag flags; /* FAULT_FLAG_xxx flags
543 * XXX: should really be 'const' */
544 pmd_t *pmd; /* Pointer to pmd entry matching
545 * the 'address' */
546 pud_t *pud; /* Pointer to pud entry matching
547 * the 'address'
548 */
549 union {
550 pte_t orig_pte; /* Value of PTE at the time of fault */
551 pmd_t orig_pmd; /* Value of PMD at the time of fault,
552 * used by PMD fault only.
553 */
554 };
555
556 struct page *cow_page; /* Page handler may use for COW fault */
557 struct page *page; /* ->fault handlers should return a
558 * page here, unless VM_FAULT_NOPAGE
559 * is set (which is also implied by
560 * VM_FAULT_ERROR).
561 */
562 /* These three entries are valid only while holding ptl lock */
563 pte_t *pte; /* Pointer to pte entry matching
564 * the 'address'. NULL if the page
565 * table hasn't been allocated.
566 */
567 spinlock_t *ptl; /* Page table lock.
568 * Protects pte page table if 'pte'
569 * is not NULL, otherwise pmd.
570 */
571 pgtable_t prealloc_pte; /* Pre-allocated pte page table.
572 * vm_ops->map_pages() sets up a page
573 * table from atomic context.
574 * do_fault_around() pre-allocates
575 * page table to avoid allocation from
576 * atomic context.
577 */
578};
579
580/*
581 * These are the virtual MM functions - opening of an area, closing and
582 * unmapping it (needed to keep files on disk up-to-date etc), pointer
583 * to the functions called when a no-page or a wp-page exception occurs.
584 */
585struct vm_operations_struct {
586 void (*open)(struct vm_area_struct * area);
587 /**
588 * @close: Called when the VMA is being removed from the MM.
589 * Context: User context. May sleep. Caller holds mmap_lock.
590 */
591 void (*close)(struct vm_area_struct * area);
592 /* Called any time before splitting to check if it's allowed */
593 int (*may_split)(struct vm_area_struct *area, unsigned long addr);
594 int (*mremap)(struct vm_area_struct *area);
595 /*
596 * Called by mprotect() to make driver-specific permission
597 * checks before mprotect() is finalised. The VMA must not
598 * be modified. Returns 0 if mprotect() can proceed.
599 */
600 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
601 unsigned long end, unsigned long newflags);
602 vm_fault_t (*fault)(struct vm_fault *vmf);
603 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
604 vm_fault_t (*map_pages)(struct vm_fault *vmf,
605 pgoff_t start_pgoff, pgoff_t end_pgoff);
606 unsigned long (*pagesize)(struct vm_area_struct * area);
607
608 /* notification that a previously read-only page is about to become
609 * writable, if an error is returned it will cause a SIGBUS */
610 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
611
612 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
613 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
614
615 /* called by access_process_vm when get_user_pages() fails, typically
616 * for use by special VMAs. See also generic_access_phys() for a generic
617 * implementation useful for any iomem mapping.
618 */
619 int (*access)(struct vm_area_struct *vma, unsigned long addr,
620 void *buf, int len, int write);
621
622 /* Called by the /proc/PID/maps code to ask the vma whether it
623 * has a special name. Returning non-NULL will also cause this
624 * vma to be dumped unconditionally. */
625 const char *(*name)(struct vm_area_struct *vma);
626
627#ifdef CONFIG_NUMA
628 /*
629 * set_policy() op must add a reference to any non-NULL @new mempolicy
630 * to hold the policy upon return. Caller should pass NULL @new to
631 * remove a policy and fall back to surrounding context--i.e. do not
632 * install a MPOL_DEFAULT policy, nor the task or system default
633 * mempolicy.
634 */
635 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
636
637 /*
638 * get_policy() op must add reference [mpol_get()] to any policy at
639 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
640 * in mm/mempolicy.c will do this automatically.
641 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
642 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
643 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
644 * must return NULL--i.e., do not "fallback" to task or system default
645 * policy.
646 */
647 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
648 unsigned long addr, pgoff_t *ilx);
649#endif
650 /*
651 * Called by vm_normal_page() for special PTEs to find the
652 * page for @addr. This is useful if the default behavior
653 * (using pte_page()) would not find the correct page.
654 */
655 struct page *(*find_special_page)(struct vm_area_struct *vma,
656 unsigned long addr);
657};
658
659#ifdef CONFIG_NUMA_BALANCING
660static inline void vma_numab_state_init(struct vm_area_struct *vma)
661{
662 vma->numab_state = NULL;
663}
664static inline void vma_numab_state_free(struct vm_area_struct *vma)
665{
666 kfree(vma->numab_state);
667}
668#else
669static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
670static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
671#endif /* CONFIG_NUMA_BALANCING */
672
673#ifdef CONFIG_PER_VMA_LOCK
674static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
675{
676#ifdef CONFIG_DEBUG_LOCK_ALLOC
677 static struct lock_class_key lockdep_key;
678
679 lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
680#endif
681 if (reset_refcnt)
682 refcount_set(&vma->vm_refcnt, 0);
683 vma->vm_lock_seq = UINT_MAX;
684}
685
686static inline bool is_vma_writer_only(int refcnt)
687{
688 /*
689 * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
690 * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
691 * a detached vma happens only in vma_mark_detached() and is a rare
692 * case, therefore most of the time there will be no unnecessary wakeup.
693 */
694 return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1;
695}
696
697static inline void vma_refcount_put(struct vm_area_struct *vma)
698{
699 /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
700 struct mm_struct *mm = vma->vm_mm;
701 int oldcnt;
702
703 rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
704 if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
705
706 if (is_vma_writer_only(oldcnt - 1))
707 rcuwait_wake_up(&mm->vma_writer_wait);
708 }
709}
710
711/*
712 * Try to read-lock a vma. The function is allowed to occasionally yield false
713 * locked result to avoid performance overhead, in which case we fall back to
714 * using mmap_lock. The function should never yield false unlocked result.
715 * False locked result is possible if mm_lock_seq overflows or if vma gets
716 * reused and attached to a different mm before we lock it.
717 * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
718 * detached.
719 */
720static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
721 struct vm_area_struct *vma)
722{
723 int oldcnt;
724
725 /*
726 * Check before locking. A race might cause false locked result.
727 * We can use READ_ONCE() for the mm_lock_seq here, and don't need
728 * ACQUIRE semantics, because this is just a lockless check whose result
729 * we don't rely on for anything - the mm_lock_seq read against which we
730 * need ordering is below.
731 */
732 if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
733 return NULL;
734
735 /*
736 * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
737 * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET.
738 * Acquire fence is required here to avoid reordering against later
739 * vm_lock_seq check and checks inside lock_vma_under_rcu().
740 */
741 if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
742 VMA_REF_LIMIT))) {
743 /* return EAGAIN if vma got detached from under us */
744 return oldcnt ? NULL : ERR_PTR(-EAGAIN);
745 }
746
747 rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
748 /*
749 * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
750 * False unlocked result is impossible because we modify and check
751 * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
752 * modification invalidates all existing locks.
753 *
754 * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
755 * racing with vma_end_write_all(), we only start reading from the VMA
756 * after it has been unlocked.
757 * This pairs with RELEASE semantics in vma_end_write_all().
758 */
759 if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
760 vma_refcount_put(vma);
761 return NULL;
762 }
763
764 return vma;
765}
766
767/*
768 * Use only while holding mmap read lock which guarantees that locking will not
769 * fail (nobody can concurrently write-lock the vma). vma_start_read() should
770 * not be used in such cases because it might fail due to mm_lock_seq overflow.
771 * This functionality is used to obtain vma read lock and drop the mmap read lock.
772 */
773static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
774{
775 int oldcnt;
776
777 mmap_assert_locked(vma->vm_mm);
778 if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
779 VMA_REF_LIMIT)))
780 return false;
781
782 rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
783 return true;
784}
785
786/*
787 * Use only while holding mmap read lock which guarantees that locking will not
788 * fail (nobody can concurrently write-lock the vma). vma_start_read() should
789 * not be used in such cases because it might fail due to mm_lock_seq overflow.
790 * This functionality is used to obtain vma read lock and drop the mmap read lock.
791 */
792static inline bool vma_start_read_locked(struct vm_area_struct *vma)
793{
794 return vma_start_read_locked_nested(vma, 0);
795}
796
797static inline void vma_end_read(struct vm_area_struct *vma)
798{
799 vma_refcount_put(vma);
800}
801
802/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
803static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
804{
805 mmap_assert_write_locked(vma->vm_mm);
806
807 /*
808 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
809 * mm->mm_lock_seq can't be concurrently modified.
810 */
811 *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
812 return (vma->vm_lock_seq == *mm_lock_seq);
813}
814
815void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq);
816
817/*
818 * Begin writing to a VMA.
819 * Exclude concurrent readers under the per-VMA lock until the currently
820 * write-locked mmap_lock is dropped or downgraded.
821 */
822static inline void vma_start_write(struct vm_area_struct *vma)
823{
824 unsigned int mm_lock_seq;
825
826 if (__is_vma_write_locked(vma, &mm_lock_seq))
827 return;
828
829 __vma_start_write(vma, mm_lock_seq);
830}
831
832static inline void vma_assert_write_locked(struct vm_area_struct *vma)
833{
834 unsigned int mm_lock_seq;
835
836 VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
837}
838
839static inline void vma_assert_locked(struct vm_area_struct *vma)
840{
841 unsigned int mm_lock_seq;
842
843 VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
844 !__is_vma_write_locked(vma, &mm_lock_seq), vma);
845}
846
847/*
848 * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
849 * assertions should be made either under mmap_write_lock or when the object
850 * has been isolated under mmap_write_lock, ensuring no competing writers.
851 */
852static inline void vma_assert_attached(struct vm_area_struct *vma)
853{
854 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
855}
856
857static inline void vma_assert_detached(struct vm_area_struct *vma)
858{
859 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
860}
861
862static inline void vma_mark_attached(struct vm_area_struct *vma)
863{
864 vma_assert_write_locked(vma);
865 vma_assert_detached(vma);
866 refcount_set_release(&vma->vm_refcnt, 1);
867}
868
869void vma_mark_detached(struct vm_area_struct *vma);
870
871static inline void release_fault_lock(struct vm_fault *vmf)
872{
873 if (vmf->flags & FAULT_FLAG_VMA_LOCK)
874 vma_end_read(vmf->vma);
875 else
876 mmap_read_unlock(vmf->vma->vm_mm);
877}
878
879static inline void assert_fault_locked(struct vm_fault *vmf)
880{
881 if (vmf->flags & FAULT_FLAG_VMA_LOCK)
882 vma_assert_locked(vmf->vma);
883 else
884 mmap_assert_locked(vmf->vma->vm_mm);
885}
886
887struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
888 unsigned long address);
889
890#else /* CONFIG_PER_VMA_LOCK */
891
892static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {}
893static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
894 struct vm_area_struct *vma)
895 { return NULL; }
896static inline void vma_end_read(struct vm_area_struct *vma) {}
897static inline void vma_start_write(struct vm_area_struct *vma) {}
898static inline void vma_assert_write_locked(struct vm_area_struct *vma)
899 { mmap_assert_write_locked(vma->vm_mm); }
900static inline void vma_assert_attached(struct vm_area_struct *vma) {}
901static inline void vma_assert_detached(struct vm_area_struct *vma) {}
902static inline void vma_mark_attached(struct vm_area_struct *vma) {}
903static inline void vma_mark_detached(struct vm_area_struct *vma) {}
904
905static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
906 unsigned long address)
907{
908 return NULL;
909}
910
911static inline void vma_assert_locked(struct vm_area_struct *vma)
912{
913 mmap_assert_locked(vma->vm_mm);
914}
915
916static inline void release_fault_lock(struct vm_fault *vmf)
917{
918 mmap_read_unlock(vmf->vma->vm_mm);
919}
920
921static inline void assert_fault_locked(struct vm_fault *vmf)
922{
923 mmap_assert_locked(vmf->vma->vm_mm);
924}
925
926#endif /* CONFIG_PER_VMA_LOCK */
927
928extern const struct vm_operations_struct vma_dummy_vm_ops;
929
930static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
931{
932 memset(vma, 0, sizeof(*vma));
933 vma->vm_mm = mm;
934 vma->vm_ops = &vma_dummy_vm_ops;
935 INIT_LIST_HEAD(&vma->anon_vma_chain);
936 vma_lock_init(vma, false);
937}
938
939/* Use when VMA is not part of the VMA tree and needs no locking */
940static inline void vm_flags_init(struct vm_area_struct *vma,
941 vm_flags_t flags)
942{
943 ACCESS_PRIVATE(vma, __vm_flags) = flags;
944}
945
946/*
947 * Use when VMA is part of the VMA tree and modifications need coordination
948 * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
949 * it should be locked explicitly beforehand.
950 */
951static inline void vm_flags_reset(struct vm_area_struct *vma,
952 vm_flags_t flags)
953{
954 vma_assert_write_locked(vma);
955 vm_flags_init(vma, flags);
956}
957
958static inline void vm_flags_reset_once(struct vm_area_struct *vma,
959 vm_flags_t flags)
960{
961 vma_assert_write_locked(vma);
962 WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
963}
964
965static inline void vm_flags_set(struct vm_area_struct *vma,
966 vm_flags_t flags)
967{
968 vma_start_write(vma);
969 ACCESS_PRIVATE(vma, __vm_flags) |= flags;
970}
971
972static inline void vm_flags_clear(struct vm_area_struct *vma,
973 vm_flags_t flags)
974{
975 vma_start_write(vma);
976 ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
977}
978
979/*
980 * Use only if VMA is not part of the VMA tree or has no other users and
981 * therefore needs no locking.
982 */
983static inline void __vm_flags_mod(struct vm_area_struct *vma,
984 vm_flags_t set, vm_flags_t clear)
985{
986 vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
987}
988
989/*
990 * Use only when the order of set/clear operations is unimportant, otherwise
991 * use vm_flags_{set|clear} explicitly.
992 */
993static inline void vm_flags_mod(struct vm_area_struct *vma,
994 vm_flags_t set, vm_flags_t clear)
995{
996 vma_start_write(vma);
997 __vm_flags_mod(vma, set, clear);
998}
999
1000static inline void vma_set_anonymous(struct vm_area_struct *vma)
1001{
1002 vma->vm_ops = NULL;
1003}
1004
1005static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1006{
1007 return !vma->vm_ops;
1008}
1009
1010/*
1011 * Indicate if the VMA is a heap for the given task; for
1012 * /proc/PID/maps that is the heap of the main task.
1013 */
1014static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
1015{
1016 return vma->vm_start < vma->vm_mm->brk &&
1017 vma->vm_end > vma->vm_mm->start_brk;
1018}
1019
1020/*
1021 * Indicate if the VMA is a stack for the given task; for
1022 * /proc/PID/maps that is the stack of the main task.
1023 */
1024static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
1025{
1026 /*
1027 * We make no effort to guess what a given thread considers to be
1028 * its "stack". It's not even well-defined for programs written
1029 * languages like Go.
1030 */
1031 return vma->vm_start <= vma->vm_mm->start_stack &&
1032 vma->vm_end >= vma->vm_mm->start_stack;
1033}
1034
1035static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
1036{
1037 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1038
1039 if (!maybe_stack)
1040 return false;
1041
1042 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1043 VM_STACK_INCOMPLETE_SETUP)
1044 return true;
1045
1046 return false;
1047}
1048
1049static inline bool vma_is_foreign(struct vm_area_struct *vma)
1050{
1051 if (!current->mm)
1052 return true;
1053
1054 if (current->mm != vma->vm_mm)
1055 return true;
1056
1057 return false;
1058}
1059
1060static inline bool vma_is_accessible(struct vm_area_struct *vma)
1061{
1062 return vma->vm_flags & VM_ACCESS_FLAGS;
1063}
1064
1065static inline bool is_shared_maywrite(vm_flags_t vm_flags)
1066{
1067 return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
1068 (VM_SHARED | VM_MAYWRITE);
1069}
1070
1071static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1072{
1073 return is_shared_maywrite(vma->vm_flags);
1074}
1075
1076static inline
1077struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1078{
1079 return mas_find(&vmi->mas, max - 1);
1080}
1081
1082static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1083{
1084 /*
1085 * Uses mas_find() to get the first VMA when the iterator starts.
1086 * Calling mas_next() could skip the first entry.
1087 */
1088 return mas_find(&vmi->mas, ULONG_MAX);
1089}
1090
1091static inline
1092struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1093{
1094 return mas_next_range(&vmi->mas, ULONG_MAX);
1095}
1096
1097
1098static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
1099{
1100 return mas_prev(&vmi->mas, 0);
1101}
1102
1103static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1104 unsigned long start, unsigned long end, gfp_t gfp)
1105{
1106 __mas_set_range(&vmi->mas, start, end - 1);
1107 mas_store_gfp(&vmi->mas, NULL, gfp);
1108 if (unlikely(mas_is_err(&vmi->mas)))
1109 return -ENOMEM;
1110
1111 return 0;
1112}
1113
1114/* Free any unused preallocations */
1115static inline void vma_iter_free(struct vma_iterator *vmi)
1116{
1117 mas_destroy(&vmi->mas);
1118}
1119
1120static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1121 struct vm_area_struct *vma)
1122{
1123 vmi->mas.index = vma->vm_start;
1124 vmi->mas.last = vma->vm_end - 1;
1125 mas_store(&vmi->mas, vma);
1126 if (unlikely(mas_is_err(&vmi->mas)))
1127 return -ENOMEM;
1128
1129 vma_mark_attached(vma);
1130 return 0;
1131}
1132
1133static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1134{
1135 mas_pause(&vmi->mas);
1136}
1137
1138static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1139{
1140 mas_set(&vmi->mas, addr);
1141}
1142
1143#define for_each_vma(__vmi, __vma) \
1144 while (((__vma) = vma_next(&(__vmi))) != NULL)
1145
1146/* The MM code likes to work with exclusive end addresses */
1147#define for_each_vma_range(__vmi, __vma, __end) \
1148 while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1149
1150#ifdef CONFIG_SHMEM
1151/*
1152 * The vma_is_shmem is not inline because it is used only by slow
1153 * paths in userfault.
1154 */
1155bool vma_is_shmem(struct vm_area_struct *vma);
1156bool vma_is_anon_shmem(struct vm_area_struct *vma);
1157#else
1158static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1159static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
1160#endif
1161
1162int vma_is_stack_for_current(struct vm_area_struct *vma);
1163
1164/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1165#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1166
1167struct mmu_gather;
1168struct inode;
1169
1170extern void prep_compound_page(struct page *page, unsigned int order);
1171
1172static inline unsigned int folio_large_order(const struct folio *folio)
1173{
1174 return folio->_flags_1 & 0xff;
1175}
1176
1177#ifdef NR_PAGES_IN_LARGE_FOLIO
1178static inline long folio_large_nr_pages(const struct folio *folio)
1179{
1180 return folio->_nr_pages;
1181}
1182#else
1183static inline long folio_large_nr_pages(const struct folio *folio)
1184{
1185 return 1L << folio_large_order(folio);
1186}
1187#endif
1188
1189/*
1190 * compound_order() can be called without holding a reference, which means
1191 * that niceties like page_folio() don't work. These callers should be
1192 * prepared to handle wild return values. For example, PG_head may be
1193 * set before the order is initialised, or this may be a tail page.
1194 * See compaction.c for some good examples.
1195 */
1196static inline unsigned int compound_order(struct page *page)
1197{
1198 struct folio *folio = (struct folio *)page;
1199
1200 if (!test_bit(PG_head, &folio->flags))
1201 return 0;
1202 return folio_large_order(folio);
1203}
1204
1205/**
1206 * folio_order - The allocation order of a folio.
1207 * @folio: The folio.
1208 *
1209 * A folio is composed of 2^order pages. See get_order() for the definition
1210 * of order.
1211 *
1212 * Return: The order of the folio.
1213 */
1214static inline unsigned int folio_order(const struct folio *folio)
1215{
1216 if (!folio_test_large(folio))
1217 return 0;
1218 return folio_large_order(folio);
1219}
1220
1221#include <linux/huge_mm.h>
1222
1223/*
1224 * Methods to modify the page usage count.
1225 *
1226 * What counts for a page usage:
1227 * - cache mapping (page->mapping)
1228 * - private data (page->private)
1229 * - page mapped in a task's page tables, each mapping
1230 * is counted separately
1231 *
1232 * Also, many kernel routines increase the page count before a critical
1233 * routine so they can be sure the page doesn't go away from under them.
1234 */
1235
1236/*
1237 * Drop a ref, return true if the refcount fell to zero (the page has no users)
1238 */
1239static inline int put_page_testzero(struct page *page)
1240{
1241 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1242 return page_ref_dec_and_test(page);
1243}
1244
1245static inline int folio_put_testzero(struct folio *folio)
1246{
1247 return put_page_testzero(&folio->page);
1248}
1249
1250/*
1251 * Try to grab a ref unless the page has a refcount of zero, return false if
1252 * that is the case.
1253 * This can be called when MMU is off so it must not access
1254 * any of the virtual mappings.
1255 */
1256static inline bool get_page_unless_zero(struct page *page)
1257{
1258 return page_ref_add_unless(page, 1, 0);
1259}
1260
1261static inline struct folio *folio_get_nontail_page(struct page *page)
1262{
1263 if (unlikely(!get_page_unless_zero(page)))
1264 return NULL;
1265 return (struct folio *)page;
1266}
1267
1268extern int page_is_ram(unsigned long pfn);
1269
1270enum {
1271 REGION_INTERSECTS,
1272 REGION_DISJOINT,
1273 REGION_MIXED,
1274};
1275
1276int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1277 unsigned long desc);
1278
1279/* Support for virtually mapped pages */
1280struct page *vmalloc_to_page(const void *addr);
1281unsigned long vmalloc_to_pfn(const void *addr);
1282
1283/*
1284 * Determine if an address is within the vmalloc range
1285 *
1286 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1287 * is no special casing required.
1288 */
1289#ifdef CONFIG_MMU
1290extern bool is_vmalloc_addr(const void *x);
1291extern int is_vmalloc_or_module_addr(const void *x);
1292#else
1293static inline bool is_vmalloc_addr(const void *x)
1294{
1295 return false;
1296}
1297static inline int is_vmalloc_or_module_addr(const void *x)
1298{
1299 return 0;
1300}
1301#endif
1302
1303/*
1304 * How many times the entire folio is mapped as a single unit (eg by a
1305 * PMD or PUD entry). This is probably not what you want, except for
1306 * debugging purposes or implementation of other core folio_*() primitives.
1307 */
1308static inline int folio_entire_mapcount(const struct folio *folio)
1309{
1310 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1311 if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
1312 return 0;
1313 return atomic_read(&folio->_entire_mapcount) + 1;
1314}
1315
1316static inline int folio_large_mapcount(const struct folio *folio)
1317{
1318 VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1319 return atomic_read(&folio->_large_mapcount) + 1;
1320}
1321
1322/**
1323 * folio_mapcount() - Number of mappings of this folio.
1324 * @folio: The folio.
1325 *
1326 * The folio mapcount corresponds to the number of present user page table
1327 * entries that reference any part of a folio. Each such present user page
1328 * table entry must be paired with exactly on folio reference.
1329 *
1330 * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1331 * exactly once.
1332 *
1333 * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1334 * references the entire folio counts exactly once, even when such special
1335 * page table entries are comprised of multiple ordinary page table entries.
1336 *
1337 * Will report 0 for pages which cannot be mapped into userspace, such as
1338 * slab, page tables and similar.
1339 *
1340 * Return: The number of times this folio is mapped.
1341 */
1342static inline int folio_mapcount(const struct folio *folio)
1343{
1344 int mapcount;
1345
1346 if (likely(!folio_test_large(folio))) {
1347 mapcount = atomic_read(&folio->_mapcount) + 1;
1348 if (page_mapcount_is_type(mapcount))
1349 mapcount = 0;
1350 return mapcount;
1351 }
1352 return folio_large_mapcount(folio);
1353}
1354
1355/**
1356 * folio_mapped - Is this folio mapped into userspace?
1357 * @folio: The folio.
1358 *
1359 * Return: True if any page in this folio is referenced by user page tables.
1360 */
1361static inline bool folio_mapped(const struct folio *folio)
1362{
1363 return folio_mapcount(folio) >= 1;
1364}
1365
1366/*
1367 * Return true if this page is mapped into pagetables.
1368 * For compound page it returns true if any sub-page of compound page is mapped,
1369 * even if this particular sub-page is not itself mapped by any PTE or PMD.
1370 */
1371static inline bool page_mapped(const struct page *page)
1372{
1373 return folio_mapped(page_folio(page));
1374}
1375
1376static inline struct page *virt_to_head_page(const void *x)
1377{
1378 struct page *page = virt_to_page(x);
1379
1380 return compound_head(page);
1381}
1382
1383static inline struct folio *virt_to_folio(const void *x)
1384{
1385 struct page *page = virt_to_page(x);
1386
1387 return page_folio(page);
1388}
1389
1390void __folio_put(struct folio *folio);
1391
1392void split_page(struct page *page, unsigned int order);
1393void folio_copy(struct folio *dst, struct folio *src);
1394int folio_mc_copy(struct folio *dst, struct folio *src);
1395
1396unsigned long nr_free_buffer_pages(void);
1397
1398/* Returns the number of bytes in this potentially compound page. */
1399static inline unsigned long page_size(struct page *page)
1400{
1401 return PAGE_SIZE << compound_order(page);
1402}
1403
1404/* Returns the number of bits needed for the number of bytes in a page */
1405static inline unsigned int page_shift(struct page *page)
1406{
1407 return PAGE_SHIFT + compound_order(page);
1408}
1409
1410/**
1411 * thp_order - Order of a transparent huge page.
1412 * @page: Head page of a transparent huge page.
1413 */
1414static inline unsigned int thp_order(struct page *page)
1415{
1416 VM_BUG_ON_PGFLAGS(PageTail(page), page);
1417 return compound_order(page);
1418}
1419
1420/**
1421 * thp_size - Size of a transparent huge page.
1422 * @page: Head page of a transparent huge page.
1423 *
1424 * Return: Number of bytes in this page.
1425 */
1426static inline unsigned long thp_size(struct page *page)
1427{
1428 return PAGE_SIZE << thp_order(page);
1429}
1430
1431#ifdef CONFIG_MMU
1432/*
1433 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
1434 * servicing faults for write access. In the normal case, do always want
1435 * pte_mkwrite. But get_user_pages can cause write faults for mappings
1436 * that do not have writing enabled, when used by access_process_vm.
1437 */
1438static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1439{
1440 if (likely(vma->vm_flags & VM_WRITE))
1441 pte = pte_mkwrite(pte, vma);
1442 return pte;
1443}
1444
1445vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1446void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1447 struct page *page, unsigned int nr, unsigned long addr);
1448
1449vm_fault_t finish_fault(struct vm_fault *vmf);
1450#endif
1451
1452/*
1453 * Multiple processes may "see" the same page. E.g. for untouched
1454 * mappings of /dev/null, all processes see the same page full of
1455 * zeroes, and text pages of executables and shared libraries have
1456 * only one copy in memory, at most, normally.
1457 *
1458 * For the non-reserved pages, page_count(page) denotes a reference count.
1459 * page_count() == 0 means the page is free. page->lru is then used for
1460 * freelist management in the buddy allocator.
1461 * page_count() > 0 means the page has been allocated.
1462 *
1463 * Pages are allocated by the slab allocator in order to provide memory
1464 * to kmalloc and kmem_cache_alloc. In this case, the management of the
1465 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1466 * unless a particular usage is carefully commented. (the responsibility of
1467 * freeing the kmalloc memory is the caller's, of course).
1468 *
1469 * A page may be used by anyone else who does a __get_free_page().
1470 * In this case, page_count still tracks the references, and should only
1471 * be used through the normal accessor functions. The top bits of page->flags
1472 * and page->virtual store page management information, but all other fields
1473 * are unused and could be used privately, carefully. The management of this
1474 * page is the responsibility of the one who allocated it, and those who have
1475 * subsequently been given references to it.
1476 *
1477 * The other pages (we may call them "pagecache pages") are completely
1478 * managed by the Linux memory manager: I/O, buffers, swapping etc.
1479 * The following discussion applies only to them.
1480 *
1481 * A pagecache page contains an opaque `private' member, which belongs to the
1482 * page's address_space. Usually, this is the address of a circular list of
1483 * the page's disk buffers. PG_private must be set to tell the VM to call
1484 * into the filesystem to release these pages.
1485 *
1486 * A page may belong to an inode's memory mapping. In this case, page->mapping
1487 * is the pointer to the inode, and page->index is the file offset of the page,
1488 * in units of PAGE_SIZE.
1489 *
1490 * If pagecache pages are not associated with an inode, they are said to be
1491 * anonymous pages. These may become associated with the swapcache, and in that
1492 * case PG_swapcache is set, and page->private is an offset into the swapcache.
1493 *
1494 * In either case (swapcache or inode backed), the pagecache itself holds one
1495 * reference to the page. Setting PG_private should also increment the
1496 * refcount. The each user mapping also has a reference to the page.
1497 *
1498 * The pagecache pages are stored in a per-mapping radix tree, which is
1499 * rooted at mapping->i_pages, and indexed by offset.
1500 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1501 * lists, we instead now tag pages as dirty/writeback in the radix tree.
1502 *
1503 * All pagecache pages may be subject to I/O:
1504 * - inode pages may need to be read from disk,
1505 * - inode pages which have been modified and are MAP_SHARED may need
1506 * to be written back to the inode on disk,
1507 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1508 * modified may need to be swapped out to swap space and (later) to be read
1509 * back into memory.
1510 */
1511
1512/* 127: arbitrary random number, small enough to assemble well */
1513#define folio_ref_zero_or_close_to_overflow(folio) \
1514 ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1515
1516/**
1517 * folio_get - Increment the reference count on a folio.
1518 * @folio: The folio.
1519 *
1520 * Context: May be called in any context, as long as you know that
1521 * you have a refcount on the folio. If you do not already have one,
1522 * folio_try_get() may be the right interface for you to use.
1523 */
1524static inline void folio_get(struct folio *folio)
1525{
1526 VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1527 folio_ref_inc(folio);
1528}
1529
1530static inline void get_page(struct page *page)
1531{
1532 struct folio *folio = page_folio(page);
1533 if (WARN_ON_ONCE(folio_test_slab(folio)))
1534 return;
1535 folio_get(folio);
1536}
1537
1538static inline __must_check bool try_get_page(struct page *page)
1539{
1540 page = compound_head(page);
1541 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1542 return false;
1543 page_ref_inc(page);
1544 return true;
1545}
1546
1547/**
1548 * folio_put - Decrement the reference count on a folio.
1549 * @folio: The folio.
1550 *
1551 * If the folio's reference count reaches zero, the memory will be
1552 * released back to the page allocator and may be used by another
1553 * allocation immediately. Do not access the memory or the struct folio
1554 * after calling folio_put() unless you can be sure that it wasn't the
1555 * last reference.
1556 *
1557 * Context: May be called in process or interrupt context, but not in NMI
1558 * context. May be called while holding a spinlock.
1559 */
1560static inline void folio_put(struct folio *folio)
1561{
1562 if (folio_put_testzero(folio))
1563 __folio_put(folio);
1564}
1565
1566/**
1567 * folio_put_refs - Reduce the reference count on a folio.
1568 * @folio: The folio.
1569 * @refs: The amount to subtract from the folio's reference count.
1570 *
1571 * If the folio's reference count reaches zero, the memory will be
1572 * released back to the page allocator and may be used by another
1573 * allocation immediately. Do not access the memory or the struct folio
1574 * after calling folio_put_refs() unless you can be sure that these weren't
1575 * the last references.
1576 *
1577 * Context: May be called in process or interrupt context, but not in NMI
1578 * context. May be called while holding a spinlock.
1579 */
1580static inline void folio_put_refs(struct folio *folio, int refs)
1581{
1582 if (folio_ref_sub_and_test(folio, refs))
1583 __folio_put(folio);
1584}
1585
1586void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
1587
1588/*
1589 * union release_pages_arg - an array of pages or folios
1590 *
1591 * release_pages() releases a simple array of multiple pages, and
1592 * accepts various different forms of said page array: either
1593 * a regular old boring array of pages, an array of folios, or
1594 * an array of encoded page pointers.
1595 *
1596 * The transparent union syntax for this kind of "any of these
1597 * argument types" is all kinds of ugly, so look away.
1598 */
1599typedef union {
1600 struct page **pages;
1601 struct folio **folios;
1602 struct encoded_page **encoded_pages;
1603} release_pages_arg __attribute__ ((__transparent_union__));
1604
1605void release_pages(release_pages_arg, int nr);
1606
1607/**
1608 * folios_put - Decrement the reference count on an array of folios.
1609 * @folios: The folios.
1610 *
1611 * Like folio_put(), but for a batch of folios. This is more efficient
1612 * than writing the loop yourself as it will optimise the locks which need
1613 * to be taken if the folios are freed. The folios batch is returned
1614 * empty and ready to be reused for another batch; there is no need to
1615 * reinitialise it.
1616 *
1617 * Context: May be called in process or interrupt context, but not in NMI
1618 * context. May be called while holding a spinlock.
1619 */
1620static inline void folios_put(struct folio_batch *folios)
1621{
1622 folios_put_refs(folios, NULL);
1623}
1624
1625static inline void put_page(struct page *page)
1626{
1627 struct folio *folio = page_folio(page);
1628
1629 if (folio_test_slab(folio))
1630 return;
1631
1632 folio_put(folio);
1633}
1634
1635/*
1636 * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1637 * the page's refcount so that two separate items are tracked: the original page
1638 * reference count, and also a new count of how many pin_user_pages() calls were
1639 * made against the page. ("gup-pinned" is another term for the latter).
1640 *
1641 * With this scheme, pin_user_pages() becomes special: such pages are marked as
1642 * distinct from normal pages. As such, the unpin_user_page() call (and its
1643 * variants) must be used in order to release gup-pinned pages.
1644 *
1645 * Choice of value:
1646 *
1647 * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1648 * counts with respect to pin_user_pages() and unpin_user_page() becomes
1649 * simpler, due to the fact that adding an even power of two to the page
1650 * refcount has the effect of using only the upper N bits, for the code that
1651 * counts up using the bias value. This means that the lower bits are left for
1652 * the exclusive use of the original code that increments and decrements by one
1653 * (or at least, by much smaller values than the bias value).
1654 *
1655 * Of course, once the lower bits overflow into the upper bits (and this is
1656 * OK, because subtraction recovers the original values), then visual inspection
1657 * no longer suffices to directly view the separate counts. However, for normal
1658 * applications that don't have huge page reference counts, this won't be an
1659 * issue.
1660 *
1661 * Locking: the lockless algorithm described in folio_try_get_rcu()
1662 * provides safe operation for get_user_pages(), folio_mkclean() and
1663 * other calls that race to set up page table entries.
1664 */
1665#define GUP_PIN_COUNTING_BIAS (1U << 10)
1666
1667void unpin_user_page(struct page *page);
1668void unpin_folio(struct folio *folio);
1669void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1670 bool make_dirty);
1671void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1672 bool make_dirty);
1673void unpin_user_pages(struct page **pages, unsigned long npages);
1674void unpin_user_folio(struct folio *folio, unsigned long npages);
1675void unpin_folios(struct folio **folios, unsigned long nfolios);
1676
1677static inline bool is_cow_mapping(vm_flags_t flags)
1678{
1679 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1680}
1681
1682#ifndef CONFIG_MMU
1683static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1684{
1685 /*
1686 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
1687 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
1688 * a file mapping. R/O MAP_PRIVATE mappings might still modify
1689 * underlying memory if ptrace is active, so this is only possible if
1690 * ptrace does not apply. Note that there is no mprotect() to upgrade
1691 * write permissions later.
1692 */
1693 return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1694}
1695#endif
1696
1697#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1698#define SECTION_IN_PAGE_FLAGS
1699#endif
1700
1701/*
1702 * The identification function is mainly used by the buddy allocator for
1703 * determining if two pages could be buddies. We are not really identifying
1704 * the zone since we could be using the section number id if we do not have
1705 * node id available in page flags.
1706 * We only guarantee that it will return the same value for two combinable
1707 * pages in a zone.
1708 */
1709static inline int page_zone_id(struct page *page)
1710{
1711 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1712}
1713
1714#ifdef NODE_NOT_IN_PAGE_FLAGS
1715int page_to_nid(const struct page *page);
1716#else
1717static inline int page_to_nid(const struct page *page)
1718{
1719 return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
1720}
1721#endif
1722
1723static inline int folio_nid(const struct folio *folio)
1724{
1725 return page_to_nid(&folio->page);
1726}
1727
1728#ifdef CONFIG_NUMA_BALANCING
1729/* page access time bits needs to hold at least 4 seconds */
1730#define PAGE_ACCESS_TIME_MIN_BITS 12
1731#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1732#define PAGE_ACCESS_TIME_BUCKETS \
1733 (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1734#else
1735#define PAGE_ACCESS_TIME_BUCKETS 0
1736#endif
1737
1738#define PAGE_ACCESS_TIME_MASK \
1739 (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1740
1741static inline int cpu_pid_to_cpupid(int cpu, int pid)
1742{
1743 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1744}
1745
1746static inline int cpupid_to_pid(int cpupid)
1747{
1748 return cpupid & LAST__PID_MASK;
1749}
1750
1751static inline int cpupid_to_cpu(int cpupid)
1752{
1753 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1754}
1755
1756static inline int cpupid_to_nid(int cpupid)
1757{
1758 return cpu_to_node(cpupid_to_cpu(cpupid));
1759}
1760
1761static inline bool cpupid_pid_unset(int cpupid)
1762{
1763 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1764}
1765
1766static inline bool cpupid_cpu_unset(int cpupid)
1767{
1768 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1769}
1770
1771static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1772{
1773 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1774}
1775
1776#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1777#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1778static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1779{
1780 return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1781}
1782
1783static inline int folio_last_cpupid(struct folio *folio)
1784{
1785 return folio->_last_cpupid;
1786}
1787static inline void page_cpupid_reset_last(struct page *page)
1788{
1789 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1790}
1791#else
1792static inline int folio_last_cpupid(struct folio *folio)
1793{
1794 return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1795}
1796
1797int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1798
1799static inline void page_cpupid_reset_last(struct page *page)
1800{
1801 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1802}
1803#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1804
1805static inline int folio_xchg_access_time(struct folio *folio, int time)
1806{
1807 int last_time;
1808
1809 last_time = folio_xchg_last_cpupid(folio,
1810 time >> PAGE_ACCESS_TIME_BUCKETS);
1811 return last_time << PAGE_ACCESS_TIME_BUCKETS;
1812}
1813
1814static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1815{
1816 unsigned int pid_bit;
1817
1818 pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1819 if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1820 __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1821 }
1822}
1823
1824bool folio_use_access_time(struct folio *folio);
1825#else /* !CONFIG_NUMA_BALANCING */
1826static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1827{
1828 return folio_nid(folio); /* XXX */
1829}
1830
1831static inline int folio_xchg_access_time(struct folio *folio, int time)
1832{
1833 return 0;
1834}
1835
1836static inline int folio_last_cpupid(struct folio *folio)
1837{
1838 return folio_nid(folio); /* XXX */
1839}
1840
1841static inline int cpupid_to_nid(int cpupid)
1842{
1843 return -1;
1844}
1845
1846static inline int cpupid_to_pid(int cpupid)
1847{
1848 return -1;
1849}
1850
1851static inline int cpupid_to_cpu(int cpupid)
1852{
1853 return -1;
1854}
1855
1856static inline int cpu_pid_to_cpupid(int nid, int pid)
1857{
1858 return -1;
1859}
1860
1861static inline bool cpupid_pid_unset(int cpupid)
1862{
1863 return true;
1864}
1865
1866static inline void page_cpupid_reset_last(struct page *page)
1867{
1868}
1869
1870static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1871{
1872 return false;
1873}
1874
1875static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1876{
1877}
1878static inline bool folio_use_access_time(struct folio *folio)
1879{
1880 return false;
1881}
1882#endif /* CONFIG_NUMA_BALANCING */
1883
1884#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1885
1886/*
1887 * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1888 * setting tags for all pages to native kernel tag value 0xff, as the default
1889 * value 0x00 maps to 0xff.
1890 */
1891
1892static inline u8 page_kasan_tag(const struct page *page)
1893{
1894 u8 tag = KASAN_TAG_KERNEL;
1895
1896 if (kasan_enabled()) {
1897 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1898 tag ^= 0xff;
1899 }
1900
1901 return tag;
1902}
1903
1904static inline void page_kasan_tag_set(struct page *page, u8 tag)
1905{
1906 unsigned long old_flags, flags;
1907
1908 if (!kasan_enabled())
1909 return;
1910
1911 tag ^= 0xff;
1912 old_flags = READ_ONCE(page->flags);
1913 do {
1914 flags = old_flags;
1915 flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1916 flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1917 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1918}
1919
1920static inline void page_kasan_tag_reset(struct page *page)
1921{
1922 if (kasan_enabled())
1923 page_kasan_tag_set(page, KASAN_TAG_KERNEL);
1924}
1925
1926#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1927
1928static inline u8 page_kasan_tag(const struct page *page)
1929{
1930 return 0xff;
1931}
1932
1933static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1934static inline void page_kasan_tag_reset(struct page *page) { }
1935
1936#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1937
1938static inline struct zone *page_zone(const struct page *page)
1939{
1940 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1941}
1942
1943static inline pg_data_t *page_pgdat(const struct page *page)
1944{
1945 return NODE_DATA(page_to_nid(page));
1946}
1947
1948static inline struct zone *folio_zone(const struct folio *folio)
1949{
1950 return page_zone(&folio->page);
1951}
1952
1953static inline pg_data_t *folio_pgdat(const struct folio *folio)
1954{
1955 return page_pgdat(&folio->page);
1956}
1957
1958#ifdef SECTION_IN_PAGE_FLAGS
1959static inline void set_page_section(struct page *page, unsigned long section)
1960{
1961 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1962 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1963}
1964
1965static inline unsigned long page_to_section(const struct page *page)
1966{
1967 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1968}
1969#endif
1970
1971/**
1972 * folio_pfn - Return the Page Frame Number of a folio.
1973 * @folio: The folio.
1974 *
1975 * A folio may contain multiple pages. The pages have consecutive
1976 * Page Frame Numbers.
1977 *
1978 * Return: The Page Frame Number of the first page in the folio.
1979 */
1980static inline unsigned long folio_pfn(const struct folio *folio)
1981{
1982 return page_to_pfn(&folio->page);
1983}
1984
1985static inline struct folio *pfn_folio(unsigned long pfn)
1986{
1987 return page_folio(pfn_to_page(pfn));
1988}
1989
1990static inline bool folio_has_pincount(const struct folio *folio)
1991{
1992 if (IS_ENABLED(CONFIG_64BIT))
1993 return folio_test_large(folio);
1994 return folio_order(folio) > 1;
1995}
1996
1997/**
1998 * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
1999 * @folio: The folio.
2000 *
2001 * This function checks if a folio has been pinned via a call to
2002 * a function in the pin_user_pages() family.
2003 *
2004 * For small folios, the return value is partially fuzzy: false is not fuzzy,
2005 * because it means "definitely not pinned for DMA", but true means "probably
2006 * pinned for DMA, but possibly a false positive due to having at least
2007 * GUP_PIN_COUNTING_BIAS worth of normal folio references".
2008 *
2009 * False positives are OK, because: a) it's unlikely for a folio to
2010 * get that many refcounts, and b) all the callers of this routine are
2011 * expected to be able to deal gracefully with a false positive.
2012 *
2013 * For most large folios, the result will be exactly correct. That's because
2014 * we have more tracking data available: the _pincount field is used
2015 * instead of the GUP_PIN_COUNTING_BIAS scheme.
2016 *
2017 * For more information, please see Documentation/core-api/pin_user_pages.rst.
2018 *
2019 * Return: True, if it is likely that the folio has been "dma-pinned".
2020 * False, if the folio is definitely not dma-pinned.
2021 */
2022static inline bool folio_maybe_dma_pinned(struct folio *folio)
2023{
2024 if (folio_has_pincount(folio))
2025 return atomic_read(&folio->_pincount) > 0;
2026
2027 /*
2028 * folio_ref_count() is signed. If that refcount overflows, then
2029 * folio_ref_count() returns a negative value, and callers will avoid
2030 * further incrementing the refcount.
2031 *
2032 * Here, for that overflow case, use the sign bit to count a little
2033 * bit higher via unsigned math, and thus still get an accurate result.
2034 */
2035 return ((unsigned int)folio_ref_count(folio)) >=
2036 GUP_PIN_COUNTING_BIAS;
2037}
2038
2039/*
2040 * This should most likely only be called during fork() to see whether we
2041 * should break the cow immediately for an anon page on the src mm.
2042 *
2043 * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
2044 */
2045static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
2046 struct folio *folio)
2047{
2048 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
2049
2050 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
2051 return false;
2052
2053 return folio_maybe_dma_pinned(folio);
2054}
2055
2056/**
2057 * is_zero_page - Query if a page is a zero page
2058 * @page: The page to query
2059 *
2060 * This returns true if @page is one of the permanent zero pages.
2061 */
2062static inline bool is_zero_page(const struct page *page)
2063{
2064 return is_zero_pfn(page_to_pfn(page));
2065}
2066
2067/**
2068 * is_zero_folio - Query if a folio is a zero page
2069 * @folio: The folio to query
2070 *
2071 * This returns true if @folio is one of the permanent zero pages.
2072 */
2073static inline bool is_zero_folio(const struct folio *folio)
2074{
2075 return is_zero_page(&folio->page);
2076}
2077
2078/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
2079#ifdef CONFIG_MIGRATION
2080static inline bool folio_is_longterm_pinnable(struct folio *folio)
2081{
2082#ifdef CONFIG_CMA
2083 int mt = folio_migratetype(folio);
2084
2085 if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
2086 return false;
2087#endif
2088 /* The zero page can be "pinned" but gets special handling. */
2089 if (is_zero_folio(folio))
2090 return true;
2091
2092 /* Coherent device memory must always allow eviction. */
2093 if (folio_is_device_coherent(folio))
2094 return false;
2095
2096 /*
2097 * Filesystems can only tolerate transient delays to truncate and
2098 * hole-punch operations
2099 */
2100 if (folio_is_fsdax(folio))
2101 return false;
2102
2103 /* Otherwise, non-movable zone folios can be pinned. */
2104 return !folio_is_zone_movable(folio);
2105
2106}
2107#else
2108static inline bool folio_is_longterm_pinnable(struct folio *folio)
2109{
2110 return true;
2111}
2112#endif
2113
2114static inline void set_page_zone(struct page *page, enum zone_type zone)
2115{
2116 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
2117 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2118}
2119
2120static inline void set_page_node(struct page *page, unsigned long node)
2121{
2122 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
2123 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
2124}
2125
2126static inline void set_page_links(struct page *page, enum zone_type zone,
2127 unsigned long node, unsigned long pfn)
2128{
2129 set_page_zone(page, zone);
2130 set_page_node(page, node);
2131#ifdef SECTION_IN_PAGE_FLAGS
2132 set_page_section(page, pfn_to_section_nr(pfn));
2133#endif
2134}
2135
2136/**
2137 * folio_nr_pages - The number of pages in the folio.
2138 * @folio: The folio.
2139 *
2140 * Return: A positive power of two.
2141 */
2142static inline long folio_nr_pages(const struct folio *folio)
2143{
2144 if (!folio_test_large(folio))
2145 return 1;
2146 return folio_large_nr_pages(folio);
2147}
2148
2149/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
2150#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
2151#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
2152#else
2153#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
2154#endif
2155
2156/*
2157 * compound_nr() returns the number of pages in this potentially compound
2158 * page. compound_nr() can be called on a tail page, and is defined to
2159 * return 1 in that case.
2160 */
2161static inline long compound_nr(struct page *page)
2162{
2163 struct folio *folio = (struct folio *)page;
2164
2165 if (!test_bit(PG_head, &folio->flags))
2166 return 1;
2167 return folio_large_nr_pages(folio);
2168}
2169
2170/**
2171 * thp_nr_pages - The number of regular pages in this huge page.
2172 * @page: The head page of a huge page.
2173 */
2174static inline long thp_nr_pages(struct page *page)
2175{
2176 return folio_nr_pages((struct folio *)page);
2177}
2178
2179/**
2180 * folio_next - Move to the next physical folio.
2181 * @folio: The folio we're currently operating on.
2182 *
2183 * If you have physically contiguous memory which may span more than
2184 * one folio (eg a &struct bio_vec), use this function to move from one
2185 * folio to the next. Do not use it if the memory is only virtually
2186 * contiguous as the folios are almost certainly not adjacent to each
2187 * other. This is the folio equivalent to writing ``page++``.
2188 *
2189 * Context: We assume that the folios are refcounted and/or locked at a
2190 * higher level and do not adjust the reference counts.
2191 * Return: The next struct folio.
2192 */
2193static inline struct folio *folio_next(struct folio *folio)
2194{
2195 return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2196}
2197
2198/**
2199 * folio_shift - The size of the memory described by this folio.
2200 * @folio: The folio.
2201 *
2202 * A folio represents a number of bytes which is a power-of-two in size.
2203 * This function tells you which power-of-two the folio is. See also
2204 * folio_size() and folio_order().
2205 *
2206 * Context: The caller should have a reference on the folio to prevent
2207 * it from being split. It is not necessary for the folio to be locked.
2208 * Return: The base-2 logarithm of the size of this folio.
2209 */
2210static inline unsigned int folio_shift(const struct folio *folio)
2211{
2212 return PAGE_SHIFT + folio_order(folio);
2213}
2214
2215/**
2216 * folio_size - The number of bytes in a folio.
2217 * @folio: The folio.
2218 *
2219 * Context: The caller should have a reference on the folio to prevent
2220 * it from being split. It is not necessary for the folio to be locked.
2221 * Return: The number of bytes in this folio.
2222 */
2223static inline size_t folio_size(const struct folio *folio)
2224{
2225 return PAGE_SIZE << folio_order(folio);
2226}
2227
2228/**
2229 * folio_maybe_mapped_shared - Whether the folio is mapped into the page
2230 * tables of more than one MM
2231 * @folio: The folio.
2232 *
2233 * This function checks if the folio maybe currently mapped into more than one
2234 * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
2235 * MM ("mapped exclusively").
2236 *
2237 * For KSM folios, this function also returns "mapped shared" when a folio is
2238 * mapped multiple times into the same MM, because the individual page mappings
2239 * are independent.
2240 *
2241 * For small anonymous folios and anonymous hugetlb folios, the return
2242 * value will be exactly correct: non-KSM folios can only be mapped at most once
2243 * into an MM, and they cannot be partially mapped. KSM folios are
2244 * considered shared even if mapped multiple times into the same MM.
2245 *
2246 * For other folios, the result can be fuzzy:
2247 * #. For partially-mappable large folios (THP), the return value can wrongly
2248 * indicate "mapped shared" (false positive) if a folio was mapped by
2249 * more than two MMs at one point in time.
2250 * #. For pagecache folios (including hugetlb), the return value can wrongly
2251 * indicate "mapped shared" (false positive) when two VMAs in the same MM
2252 * cover the same file range.
2253 *
2254 * Further, this function only considers current page table mappings that
2255 * are tracked using the folio mapcount(s).
2256 *
2257 * This function does not consider:
2258 * #. If the folio might get mapped in the (near) future (e.g., swapcache,
2259 * pagecache, temporary unmapping for migration).
2260 * #. If the folio is mapped differently (VM_PFNMAP).
2261 * #. If hugetlb page table sharing applies. Callers might want to check
2262 * hugetlb_pmd_shared().
2263 *
2264 * Return: Whether the folio is estimated to be mapped into more than one MM.
2265 */
2266static inline bool folio_maybe_mapped_shared(struct folio *folio)
2267{
2268 int mapcount = folio_mapcount(folio);
2269
2270 /* Only partially-mappable folios require more care. */
2271 if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2272 return mapcount > 1;
2273
2274 /*
2275 * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
2276 * simply assume "mapped shared", nobody should really care
2277 * about this for arbitrary kernel allocations.
2278 */
2279 if (!IS_ENABLED(CONFIG_MM_ID))
2280 return true;
2281
2282 /*
2283 * A single mapping implies "mapped exclusively", even if the
2284 * folio flag says something different: it's easier to handle this
2285 * case here instead of on the RMAP hot path.
2286 */
2287 if (mapcount <= 1)
2288 return false;
2289 return folio_test_large_maybe_mapped_shared(folio);
2290}
2291
2292#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
2293static inline int arch_make_folio_accessible(struct folio *folio)
2294{
2295 return 0;
2296}
2297#endif
2298
2299/*
2300 * Some inline functions in vmstat.h depend on page_zone()
2301 */
2302#include <linux/vmstat.h>
2303
2304#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2305#define HASHED_PAGE_VIRTUAL
2306#endif
2307
2308#if defined(WANT_PAGE_VIRTUAL)
2309static inline void *page_address(const struct page *page)
2310{
2311 return page->virtual;
2312}
2313static inline void set_page_address(struct page *page, void *address)
2314{
2315 page->virtual = address;
2316}
2317#define page_address_init() do { } while(0)
2318#endif
2319
2320#if defined(HASHED_PAGE_VIRTUAL)
2321void *page_address(const struct page *page);
2322void set_page_address(struct page *page, void *virtual);
2323void page_address_init(void);
2324#endif
2325
2326static __always_inline void *lowmem_page_address(const struct page *page)
2327{
2328 return page_to_virt(page);
2329}
2330
2331#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2332#define page_address(page) lowmem_page_address(page)
2333#define set_page_address(page, address) do { } while(0)
2334#define page_address_init() do { } while(0)
2335#endif
2336
2337static inline void *folio_address(const struct folio *folio)
2338{
2339 return page_address(&folio->page);
2340}
2341
2342/*
2343 * Return true only if the page has been allocated with
2344 * ALLOC_NO_WATERMARKS and the low watermark was not
2345 * met implying that the system is under some pressure.
2346 */
2347static inline bool page_is_pfmemalloc(const struct page *page)
2348{
2349 /*
2350 * lru.next has bit 1 set if the page is allocated from the
2351 * pfmemalloc reserves. Callers may simply overwrite it if
2352 * they do not need to preserve that information.
2353 */
2354 return (uintptr_t)page->lru.next & BIT(1);
2355}
2356
2357/*
2358 * Return true only if the folio has been allocated with
2359 * ALLOC_NO_WATERMARKS and the low watermark was not
2360 * met implying that the system is under some pressure.
2361 */
2362static inline bool folio_is_pfmemalloc(const struct folio *folio)
2363{
2364 /*
2365 * lru.next has bit 1 set if the page is allocated from the
2366 * pfmemalloc reserves. Callers may simply overwrite it if
2367 * they do not need to preserve that information.
2368 */
2369 return (uintptr_t)folio->lru.next & BIT(1);
2370}
2371
2372/*
2373 * Only to be called by the page allocator on a freshly allocated
2374 * page.
2375 */
2376static inline void set_page_pfmemalloc(struct page *page)
2377{
2378 page->lru.next = (void *)BIT(1);
2379}
2380
2381static inline void clear_page_pfmemalloc(struct page *page)
2382{
2383 page->lru.next = NULL;
2384}
2385
2386/*
2387 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
2388 */
2389extern void pagefault_out_of_memory(void);
2390
2391#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
2392#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
2393#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2394
2395/*
2396 * Parameter block passed down to zap_pte_range in exceptional cases.
2397 */
2398struct zap_details {
2399 struct folio *single_folio; /* Locked folio to be unmapped */
2400 bool even_cows; /* Zap COWed private pages too? */
2401 bool reclaim_pt; /* Need reclaim page tables? */
2402 zap_flags_t zap_flags; /* Extra flags for zapping */
2403};
2404
2405/*
2406 * Whether to drop the pte markers, for example, the uffd-wp information for
2407 * file-backed memory. This should only be specified when we will completely
2408 * drop the page in the mm, either by truncation or unmapping of the vma. By
2409 * default, the flag is not set.
2410 */
2411#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
2412/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
2413#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
2414
2415#ifdef CONFIG_SCHED_MM_CID
2416void sched_mm_cid_before_execve(struct task_struct *t);
2417void sched_mm_cid_after_execve(struct task_struct *t);
2418void sched_mm_cid_fork(struct task_struct *t);
2419void sched_mm_cid_exit_signals(struct task_struct *t);
2420static inline int task_mm_cid(struct task_struct *t)
2421{
2422 return t->mm_cid;
2423}
2424#else
2425static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
2426static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
2427static inline void sched_mm_cid_fork(struct task_struct *t) { }
2428static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
2429static inline int task_mm_cid(struct task_struct *t)
2430{
2431 /*
2432 * Use the processor id as a fall-back when the mm cid feature is
2433 * disabled. This provides functional per-cpu data structure accesses
2434 * in user-space, althrough it won't provide the memory usage benefits.
2435 */
2436 return raw_smp_processor_id();
2437}
2438#endif
2439
2440#ifdef CONFIG_MMU
2441extern bool can_do_mlock(void);
2442#else
2443static inline bool can_do_mlock(void) { return false; }
2444#endif
2445extern int user_shm_lock(size_t, struct ucounts *);
2446extern void user_shm_unlock(size_t, struct ucounts *);
2447
2448struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2449 pte_t pte);
2450struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2451 pte_t pte);
2452struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2453 unsigned long addr, pmd_t pmd);
2454struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2455 pmd_t pmd);
2456
2457void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2458 unsigned long size);
2459void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2460 unsigned long size, struct zap_details *details);
2461static inline void zap_vma_pages(struct vm_area_struct *vma)
2462{
2463 zap_page_range_single(vma, vma->vm_start,
2464 vma->vm_end - vma->vm_start, NULL);
2465}
2466void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2467 struct vm_area_struct *start_vma, unsigned long start,
2468 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
2469
2470struct mmu_notifier_range;
2471
2472void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2473 unsigned long end, unsigned long floor, unsigned long ceiling);
2474int
2475copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
2476int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2477 void *buf, int len, int write);
2478
2479struct follow_pfnmap_args {
2480 /**
2481 * Inputs:
2482 * @vma: Pointer to @vm_area_struct struct
2483 * @address: the virtual address to walk
2484 */
2485 struct vm_area_struct *vma;
2486 unsigned long address;
2487 /**
2488 * Internals:
2489 *
2490 * The caller shouldn't touch any of these.
2491 */
2492 spinlock_t *lock;
2493 pte_t *ptep;
2494 /**
2495 * Outputs:
2496 *
2497 * @pfn: the PFN of the address
2498 * @addr_mask: address mask covering pfn
2499 * @pgprot: the pgprot_t of the mapping
2500 * @writable: whether the mapping is writable
2501 * @special: whether the mapping is a special mapping (real PFN maps)
2502 */
2503 unsigned long pfn;
2504 unsigned long addr_mask;
2505 pgprot_t pgprot;
2506 bool writable;
2507 bool special;
2508};
2509int follow_pfnmap_start(struct follow_pfnmap_args *args);
2510void follow_pfnmap_end(struct follow_pfnmap_args *args);
2511
2512extern void truncate_pagecache(struct inode *inode, loff_t new);
2513extern void truncate_setsize(struct inode *inode, loff_t newsize);
2514void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2515void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2516int generic_error_remove_folio(struct address_space *mapping,
2517 struct folio *folio);
2518
2519struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2520 unsigned long address, struct pt_regs *regs);
2521
2522#ifdef CONFIG_MMU
2523extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2524 unsigned long address, unsigned int flags,
2525 struct pt_regs *regs);
2526extern int fixup_user_fault(struct mm_struct *mm,
2527 unsigned long address, unsigned int fault_flags,
2528 bool *unlocked);
2529void unmap_mapping_pages(struct address_space *mapping,
2530 pgoff_t start, pgoff_t nr, bool even_cows);
2531void unmap_mapping_range(struct address_space *mapping,
2532 loff_t const holebegin, loff_t const holelen, int even_cows);
2533#else
2534static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2535 unsigned long address, unsigned int flags,
2536 struct pt_regs *regs)
2537{
2538 /* should never happen if there's no MMU */
2539 BUG();
2540 return VM_FAULT_SIGBUS;
2541}
2542static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2543 unsigned int fault_flags, bool *unlocked)
2544{
2545 /* should never happen if there's no MMU */
2546 BUG();
2547 return -EFAULT;
2548}
2549static inline void unmap_mapping_pages(struct address_space *mapping,
2550 pgoff_t start, pgoff_t nr, bool even_cows) { }
2551static inline void unmap_mapping_range(struct address_space *mapping,
2552 loff_t const holebegin, loff_t const holelen, int even_cows) { }
2553#endif
2554
2555static inline void unmap_shared_mapping_range(struct address_space *mapping,
2556 loff_t const holebegin, loff_t const holelen)
2557{
2558 unmap_mapping_range(mapping, holebegin, holelen, 0);
2559}
2560
2561static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2562 unsigned long addr);
2563
2564extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2565 void *buf, int len, unsigned int gup_flags);
2566extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2567 void *buf, int len, unsigned int gup_flags);
2568
2569#ifdef CONFIG_BPF_SYSCALL
2570extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
2571 void *buf, int len, unsigned int gup_flags);
2572#endif
2573
2574long get_user_pages_remote(struct mm_struct *mm,
2575 unsigned long start, unsigned long nr_pages,
2576 unsigned int gup_flags, struct page **pages,
2577 int *locked);
2578long pin_user_pages_remote(struct mm_struct *mm,
2579 unsigned long start, unsigned long nr_pages,
2580 unsigned int gup_flags, struct page **pages,
2581 int *locked);
2582
2583/*
2584 * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
2585 */
2586static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2587 unsigned long addr,
2588 int gup_flags,
2589 struct vm_area_struct **vmap)
2590{
2591 struct page *page;
2592 struct vm_area_struct *vma;
2593 int got;
2594
2595 if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2596 return ERR_PTR(-EINVAL);
2597
2598 got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2599
2600 if (got < 0)
2601 return ERR_PTR(got);
2602
2603 vma = vma_lookup(mm, addr);
2604 if (WARN_ON_ONCE(!vma)) {
2605 put_page(page);
2606 return ERR_PTR(-EINVAL);
2607 }
2608
2609 *vmap = vma;
2610 return page;
2611}
2612
2613long get_user_pages(unsigned long start, unsigned long nr_pages,
2614 unsigned int gup_flags, struct page **pages);
2615long pin_user_pages(unsigned long start, unsigned long nr_pages,
2616 unsigned int gup_flags, struct page **pages);
2617long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2618 struct page **pages, unsigned int gup_flags);
2619long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2620 struct page **pages, unsigned int gup_flags);
2621long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
2622 struct folio **folios, unsigned int max_folios,
2623 pgoff_t *offset);
2624int folio_add_pins(struct folio *folio, unsigned int pins);
2625
2626int get_user_pages_fast(unsigned long start, int nr_pages,
2627 unsigned int gup_flags, struct page **pages);
2628int pin_user_pages_fast(unsigned long start, int nr_pages,
2629 unsigned int gup_flags, struct page **pages);
2630void folio_add_pin(struct folio *folio);
2631
2632int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2633int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2634 struct task_struct *task, bool bypass_rlim);
2635
2636struct kvec;
2637struct page *get_dump_page(unsigned long addr, int *locked);
2638
2639bool folio_mark_dirty(struct folio *folio);
2640bool folio_mark_dirty_lock(struct folio *folio);
2641bool set_page_dirty(struct page *page);
2642int set_page_dirty_lock(struct page *page);
2643
2644int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2645
2646/*
2647 * Flags used by change_protection(). For now we make it a bitmap so
2648 * that we can pass in multiple flags just like parameters. However
2649 * for now all the callers are only use one of the flags at the same
2650 * time.
2651 */
2652/*
2653 * Whether we should manually check if we can map individual PTEs writable,
2654 * because something (e.g., COW, uffd-wp) blocks that from happening for all
2655 * PTEs automatically in a writable mapping.
2656 */
2657#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
2658/* Whether this protection change is for NUMA hints */
2659#define MM_CP_PROT_NUMA (1UL << 1)
2660/* Whether this change is for write protecting */
2661#define MM_CP_UFFD_WP (1UL << 2) /* do wp */
2662#define MM_CP_UFFD_WP_RESOLVE (1UL << 3) /* Resolve wp */
2663#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
2664 MM_CP_UFFD_WP_RESOLVE)
2665
2666bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2667 pte_t pte);
2668extern long change_protection(struct mmu_gather *tlb,
2669 struct vm_area_struct *vma, unsigned long start,
2670 unsigned long end, unsigned long cp_flags);
2671extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2672 struct vm_area_struct *vma, struct vm_area_struct **pprev,
2673 unsigned long start, unsigned long end, unsigned long newflags);
2674
2675/*
2676 * doesn't attempt to fault and will return short.
2677 */
2678int get_user_pages_fast_only(unsigned long start, int nr_pages,
2679 unsigned int gup_flags, struct page **pages);
2680
2681static inline bool get_user_page_fast_only(unsigned long addr,
2682 unsigned int gup_flags, struct page **pagep)
2683{
2684 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2685}
2686/*
2687 * per-process(per-mm_struct) statistics.
2688 */
2689static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2690{
2691 return percpu_counter_read_positive(&mm->rss_stat[member]);
2692}
2693
2694void mm_trace_rss_stat(struct mm_struct *mm, int member);
2695
2696static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2697{
2698 percpu_counter_add(&mm->rss_stat[member], value);
2699
2700 mm_trace_rss_stat(mm, member);
2701}
2702
2703static inline void inc_mm_counter(struct mm_struct *mm, int member)
2704{
2705 percpu_counter_inc(&mm->rss_stat[member]);
2706
2707 mm_trace_rss_stat(mm, member);
2708}
2709
2710static inline void dec_mm_counter(struct mm_struct *mm, int member)
2711{
2712 percpu_counter_dec(&mm->rss_stat[member]);
2713
2714 mm_trace_rss_stat(mm, member);
2715}
2716
2717/* Optimized variant when folio is already known not to be anon */
2718static inline int mm_counter_file(struct folio *folio)
2719{
2720 if (folio_test_swapbacked(folio))
2721 return MM_SHMEMPAGES;
2722 return MM_FILEPAGES;
2723}
2724
2725static inline int mm_counter(struct folio *folio)
2726{
2727 if (folio_test_anon(folio))
2728 return MM_ANONPAGES;
2729 return mm_counter_file(folio);
2730}
2731
2732static inline unsigned long get_mm_rss(struct mm_struct *mm)
2733{
2734 return get_mm_counter(mm, MM_FILEPAGES) +
2735 get_mm_counter(mm, MM_ANONPAGES) +
2736 get_mm_counter(mm, MM_SHMEMPAGES);
2737}
2738
2739static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2740{
2741 return max(mm->hiwater_rss, get_mm_rss(mm));
2742}
2743
2744static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2745{
2746 return max(mm->hiwater_vm, mm->total_vm);
2747}
2748
2749static inline void update_hiwater_rss(struct mm_struct *mm)
2750{
2751 unsigned long _rss = get_mm_rss(mm);
2752
2753 if ((mm)->hiwater_rss < _rss)
2754 (mm)->hiwater_rss = _rss;
2755}
2756
2757static inline void update_hiwater_vm(struct mm_struct *mm)
2758{
2759 if (mm->hiwater_vm < mm->total_vm)
2760 mm->hiwater_vm = mm->total_vm;
2761}
2762
2763static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2764{
2765 mm->hiwater_rss = get_mm_rss(mm);
2766}
2767
2768static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2769 struct mm_struct *mm)
2770{
2771 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2772
2773 if (*maxrss < hiwater_rss)
2774 *maxrss = hiwater_rss;
2775}
2776
2777#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2778static inline int pte_special(pte_t pte)
2779{
2780 return 0;
2781}
2782
2783static inline pte_t pte_mkspecial(pte_t pte)
2784{
2785 return pte;
2786}
2787#endif
2788
2789#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
2790static inline bool pmd_special(pmd_t pmd)
2791{
2792 return false;
2793}
2794
2795static inline pmd_t pmd_mkspecial(pmd_t pmd)
2796{
2797 return pmd;
2798}
2799#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
2800
2801#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
2802static inline bool pud_special(pud_t pud)
2803{
2804 return false;
2805}
2806
2807static inline pud_t pud_mkspecial(pud_t pud)
2808{
2809 return pud;
2810}
2811#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
2812
2813#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2814static inline int pte_devmap(pte_t pte)
2815{
2816 return 0;
2817}
2818#endif
2819
2820extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2821 spinlock_t **ptl);
2822static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2823 spinlock_t **ptl)
2824{
2825 pte_t *ptep;
2826 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2827 return ptep;
2828}
2829
2830#ifdef __PAGETABLE_P4D_FOLDED
2831static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2832 unsigned long address)
2833{
2834 return 0;
2835}
2836#else
2837int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2838#endif
2839
2840#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2841static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2842 unsigned long address)
2843{
2844 return 0;
2845}
2846static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2847static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2848
2849#else
2850int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2851
2852static inline void mm_inc_nr_puds(struct mm_struct *mm)
2853{
2854 if (mm_pud_folded(mm))
2855 return;
2856 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2857}
2858
2859static inline void mm_dec_nr_puds(struct mm_struct *mm)
2860{
2861 if (mm_pud_folded(mm))
2862 return;
2863 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2864}
2865#endif
2866
2867#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2868static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2869 unsigned long address)
2870{
2871 return 0;
2872}
2873
2874static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2875static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2876
2877#else
2878int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2879
2880static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2881{
2882 if (mm_pmd_folded(mm))
2883 return;
2884 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2885}
2886
2887static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2888{
2889 if (mm_pmd_folded(mm))
2890 return;
2891 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2892}
2893#endif
2894
2895#ifdef CONFIG_MMU
2896static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2897{
2898 atomic_long_set(&mm->pgtables_bytes, 0);
2899}
2900
2901static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2902{
2903 return atomic_long_read(&mm->pgtables_bytes);
2904}
2905
2906static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2907{
2908 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2909}
2910
2911static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2912{
2913 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2914}
2915#else
2916
2917static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2918static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2919{
2920 return 0;
2921}
2922
2923static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2924static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2925#endif
2926
2927int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2928int __pte_alloc_kernel(pmd_t *pmd);
2929
2930#if defined(CONFIG_MMU)
2931
2932static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2933 unsigned long address)
2934{
2935 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2936 NULL : p4d_offset(pgd, address);
2937}
2938
2939static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2940 unsigned long address)
2941{
2942 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2943 NULL : pud_offset(p4d, address);
2944}
2945
2946static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2947{
2948 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2949 NULL: pmd_offset(pud, address);
2950}
2951#endif /* CONFIG_MMU */
2952
2953static inline struct ptdesc *virt_to_ptdesc(const void *x)
2954{
2955 return page_ptdesc(virt_to_page(x));
2956}
2957
2958static inline void *ptdesc_to_virt(const struct ptdesc *pt)
2959{
2960 return page_to_virt(ptdesc_page(pt));
2961}
2962
2963static inline void *ptdesc_address(const struct ptdesc *pt)
2964{
2965 return folio_address(ptdesc_folio(pt));
2966}
2967
2968static inline bool pagetable_is_reserved(struct ptdesc *pt)
2969{
2970 return folio_test_reserved(ptdesc_folio(pt));
2971}
2972
2973/**
2974 * pagetable_alloc - Allocate pagetables
2975 * @gfp: GFP flags
2976 * @order: desired pagetable order
2977 *
2978 * pagetable_alloc allocates memory for page tables as well as a page table
2979 * descriptor to describe that memory.
2980 *
2981 * Return: The ptdesc describing the allocated page tables.
2982 */
2983static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
2984{
2985 struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
2986
2987 return page_ptdesc(page);
2988}
2989#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
2990
2991/**
2992 * pagetable_free - Free pagetables
2993 * @pt: The page table descriptor
2994 *
2995 * pagetable_free frees the memory of all page tables described by a page
2996 * table descriptor and the memory for the descriptor itself.
2997 */
2998static inline void pagetable_free(struct ptdesc *pt)
2999{
3000 struct page *page = ptdesc_page(pt);
3001
3002 __free_pages(page, compound_order(page));
3003}
3004
3005#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
3006#if ALLOC_SPLIT_PTLOCKS
3007void __init ptlock_cache_init(void);
3008bool ptlock_alloc(struct ptdesc *ptdesc);
3009void ptlock_free(struct ptdesc *ptdesc);
3010
3011static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3012{
3013 return ptdesc->ptl;
3014}
3015#else /* ALLOC_SPLIT_PTLOCKS */
3016static inline void ptlock_cache_init(void)
3017{
3018}
3019
3020static inline bool ptlock_alloc(struct ptdesc *ptdesc)
3021{
3022 return true;
3023}
3024
3025static inline void ptlock_free(struct ptdesc *ptdesc)
3026{
3027}
3028
3029static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
3030{
3031 return &ptdesc->ptl;
3032}
3033#endif /* ALLOC_SPLIT_PTLOCKS */
3034
3035static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3036{
3037 return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
3038}
3039
3040static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3041{
3042 BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
3043 BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
3044 return ptlock_ptr(virt_to_ptdesc(pte));
3045}
3046
3047static inline bool ptlock_init(struct ptdesc *ptdesc)
3048{
3049 /*
3050 * prep_new_page() initialize page->private (and therefore page->ptl)
3051 * with 0. Make sure nobody took it in use in between.
3052 *
3053 * It can happen if arch try to use slab for page table allocation:
3054 * slab code uses page->slab_cache, which share storage with page->ptl.
3055 */
3056 VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
3057 if (!ptlock_alloc(ptdesc))
3058 return false;
3059 spin_lock_init(ptlock_ptr(ptdesc));
3060 return true;
3061}
3062
3063#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3064/*
3065 * We use mm->page_table_lock to guard all pagetable pages of the mm.
3066 */
3067static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3068{
3069 return &mm->page_table_lock;
3070}
3071static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3072{
3073 return &mm->page_table_lock;
3074}
3075static inline void ptlock_cache_init(void) {}
3076static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
3077static inline void ptlock_free(struct ptdesc *ptdesc) {}
3078#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
3079
3080static inline void __pagetable_ctor(struct ptdesc *ptdesc)
3081{
3082 struct folio *folio = ptdesc_folio(ptdesc);
3083
3084 __folio_set_pgtable(folio);
3085 lruvec_stat_add_folio(folio, NR_PAGETABLE);
3086}
3087
3088static inline void pagetable_dtor(struct ptdesc *ptdesc)
3089{
3090 struct folio *folio = ptdesc_folio(ptdesc);
3091
3092 ptlock_free(ptdesc);
3093 __folio_clear_pgtable(folio);
3094 lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3095}
3096
3097static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
3098{
3099 pagetable_dtor(ptdesc);
3100 pagetable_free(ptdesc);
3101}
3102
3103static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
3104{
3105 if (!ptlock_init(ptdesc))
3106 return false;
3107 __pagetable_ctor(ptdesc);
3108 return true;
3109}
3110
3111pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
3112static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
3113 pmd_t *pmdvalp)
3114{
3115 pte_t *pte;
3116
3117 __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
3118 return pte;
3119}
3120static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3121{
3122 return __pte_offset_map(pmd, addr, NULL);
3123}
3124
3125pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3126 unsigned long addr, spinlock_t **ptlp);
3127static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3128 unsigned long addr, spinlock_t **ptlp)
3129{
3130 pte_t *pte;
3131
3132 __cond_lock(RCU, __cond_lock(*ptlp,
3133 pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
3134 return pte;
3135}
3136
3137pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3138 unsigned long addr, spinlock_t **ptlp);
3139pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3140 unsigned long addr, pmd_t *pmdvalp,
3141 spinlock_t **ptlp);
3142
3143#define pte_unmap_unlock(pte, ptl) do { \
3144 spin_unlock(ptl); \
3145 pte_unmap(pte); \
3146} while (0)
3147
3148#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3149
3150#define pte_alloc_map(mm, pmd, address) \
3151 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3152
3153#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
3154 (pte_alloc(mm, pmd) ? \
3155 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3156
3157#define pte_alloc_kernel(pmd, address) \
3158 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3159 NULL: pte_offset_kernel(pmd, address))
3160
3161#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3162
3163static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3164{
3165 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3166 return virt_to_page((void *)((unsigned long) pmd & mask));
3167}
3168
3169static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3170{
3171 return page_ptdesc(pmd_pgtable_page(pmd));
3172}
3173
3174static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3175{
3176 return ptlock_ptr(pmd_ptdesc(pmd));
3177}
3178
3179static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3180{
3181#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3182 ptdesc->pmd_huge_pte = NULL;
3183#endif
3184 return ptlock_init(ptdesc);
3185}
3186
3187#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3188
3189#else
3190
3191static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3192{
3193 return &mm->page_table_lock;
3194}
3195
3196static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3197
3198#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3199
3200#endif
3201
3202static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3203{
3204 spinlock_t *ptl = pmd_lockptr(mm, pmd);
3205 spin_lock(ptl);
3206 return ptl;
3207}
3208
3209static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
3210{
3211 if (!pmd_ptlock_init(ptdesc))
3212 return false;
3213 ptdesc_pmd_pts_init(ptdesc);
3214 __pagetable_ctor(ptdesc);
3215 return true;
3216}
3217
3218/*
3219 * No scalability reason to split PUD locks yet, but follow the same pattern
3220 * as the PMD locks to make it easier if we decide to. The VM should not be
3221 * considered ready to switch to split PUD locks yet; there may be places
3222 * which need to be converted from page_table_lock.
3223 */
3224static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3225{
3226 return &mm->page_table_lock;
3227}
3228
3229static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3230{
3231 spinlock_t *ptl = pud_lockptr(mm, pud);
3232
3233 spin_lock(ptl);
3234 return ptl;
3235}
3236
3237static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3238{
3239 __pagetable_ctor(ptdesc);
3240}
3241
3242static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
3243{
3244 __pagetable_ctor(ptdesc);
3245}
3246
3247static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
3248{
3249 __pagetable_ctor(ptdesc);
3250}
3251
3252extern void __init pagecache_init(void);
3253extern void free_initmem(void);
3254
3255/*
3256 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3257 * into the buddy system. The freed pages will be poisoned with pattern
3258 * "poison" if it's within range [0, UCHAR_MAX].
3259 * Return pages freed into the buddy system.
3260 */
3261extern unsigned long free_reserved_area(void *start, void *end,
3262 int poison, const char *s);
3263
3264extern void adjust_managed_page_count(struct page *page, long count);
3265
3266extern void reserve_bootmem_region(phys_addr_t start,
3267 phys_addr_t end, int nid);
3268
3269/* Free the reserved page into the buddy system, so it gets managed. */
3270void free_reserved_page(struct page *page);
3271
3272static inline void mark_page_reserved(struct page *page)
3273{
3274 SetPageReserved(page);
3275 adjust_managed_page_count(page, -1);
3276}
3277
3278static inline void free_reserved_ptdesc(struct ptdesc *pt)
3279{
3280 free_reserved_page(ptdesc_page(pt));
3281}
3282
3283/*
3284 * Default method to free all the __init memory into the buddy system.
3285 * The freed pages will be poisoned with pattern "poison" if it's within
3286 * range [0, UCHAR_MAX].
3287 * Return pages freed into the buddy system.
3288 */
3289static inline unsigned long free_initmem_default(int poison)
3290{
3291 extern char __init_begin[], __init_end[];
3292
3293 return free_reserved_area(&__init_begin, &__init_end,
3294 poison, "unused kernel image (initmem)");
3295}
3296
3297static inline unsigned long get_num_physpages(void)
3298{
3299 int nid;
3300 unsigned long phys_pages = 0;
3301
3302 for_each_online_node(nid)
3303 phys_pages += node_present_pages(nid);
3304
3305 return phys_pages;
3306}
3307
3308/*
3309 * Using memblock node mappings, an architecture may initialise its
3310 * zones, allocate the backing mem_map and account for memory holes in an
3311 * architecture independent manner.
3312 *
3313 * An architecture is expected to register range of page frames backed by
3314 * physical memory with memblock_add[_node]() before calling
3315 * free_area_init() passing in the PFN each zone ends at. At a basic
3316 * usage, an architecture is expected to do something like
3317 *
3318 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3319 * max_highmem_pfn};
3320 * for_each_valid_physical_page_range()
3321 * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3322 * free_area_init(max_zone_pfns);
3323 */
3324void free_area_init(unsigned long *max_zone_pfn);
3325unsigned long node_map_pfn_alignment(void);
3326extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3327 unsigned long end_pfn);
3328extern void get_pfn_range_for_nid(unsigned int nid,
3329 unsigned long *start_pfn, unsigned long *end_pfn);
3330
3331#ifndef CONFIG_NUMA
3332static inline int early_pfn_to_nid(unsigned long pfn)
3333{
3334 return 0;
3335}
3336#else
3337/* please see mm/page_alloc.c */
3338extern int __meminit early_pfn_to_nid(unsigned long pfn);
3339#endif
3340
3341extern void mem_init(void);
3342extern void __init mmap_init(void);
3343
3344extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
3345static inline void show_mem(void)
3346{
3347 __show_mem(0, NULL, MAX_NR_ZONES - 1);
3348}
3349extern long si_mem_available(void);
3350extern void si_meminfo(struct sysinfo * val);
3351extern void si_meminfo_node(struct sysinfo *val, int nid);
3352
3353extern __printf(3, 4)
3354void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3355
3356extern void setup_per_cpu_pageset(void);
3357
3358/* nommu.c */
3359extern atomic_long_t mmap_pages_allocated;
3360extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3361
3362/* interval_tree.c */
3363void vma_interval_tree_insert(struct vm_area_struct *node,
3364 struct rb_root_cached *root);
3365void vma_interval_tree_insert_after(struct vm_area_struct *node,
3366 struct vm_area_struct *prev,
3367 struct rb_root_cached *root);
3368void vma_interval_tree_remove(struct vm_area_struct *node,
3369 struct rb_root_cached *root);
3370struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3371 unsigned long start, unsigned long last);
3372struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3373 unsigned long start, unsigned long last);
3374
3375#define vma_interval_tree_foreach(vma, root, start, last) \
3376 for (vma = vma_interval_tree_iter_first(root, start, last); \
3377 vma; vma = vma_interval_tree_iter_next(vma, start, last))
3378
3379void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3380 struct rb_root_cached *root);
3381void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3382 struct rb_root_cached *root);
3383struct anon_vma_chain *
3384anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3385 unsigned long start, unsigned long last);
3386struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3387 struct anon_vma_chain *node, unsigned long start, unsigned long last);
3388#ifdef CONFIG_DEBUG_VM_RB
3389void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3390#endif
3391
3392#define anon_vma_interval_tree_foreach(avc, root, start, last) \
3393 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3394 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3395
3396/* mmap.c */
3397extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
3398extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
3399extern void exit_mmap(struct mm_struct *);
3400int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
3401bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
3402 unsigned long addr, bool write);
3403
3404static inline int check_data_rlimit(unsigned long rlim,
3405 unsigned long new,
3406 unsigned long start,
3407 unsigned long end_data,
3408 unsigned long start_data)
3409{
3410 if (rlim < RLIM_INFINITY) {
3411 if (((new - start) + (end_data - start_data)) > rlim)
3412 return -ENOSPC;
3413 }
3414
3415 return 0;
3416}
3417
3418extern int mm_take_all_locks(struct mm_struct *mm);
3419extern void mm_drop_all_locks(struct mm_struct *mm);
3420
3421extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3422extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3423extern struct file *get_mm_exe_file(struct mm_struct *mm);
3424extern struct file *get_task_exe_file(struct task_struct *task);
3425
3426extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3427extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3428
3429extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3430 const struct vm_special_mapping *sm);
3431extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3432 unsigned long addr, unsigned long len,
3433 unsigned long flags,
3434 const struct vm_special_mapping *spec);
3435
3436unsigned long randomize_stack_top(unsigned long stack_top);
3437unsigned long randomize_page(unsigned long start, unsigned long range);
3438
3439unsigned long
3440__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3441 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
3442
3443static inline unsigned long
3444get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3445 unsigned long pgoff, unsigned long flags)
3446{
3447 return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
3448}
3449
3450extern unsigned long do_mmap(struct file *file, unsigned long addr,
3451 unsigned long len, unsigned long prot, unsigned long flags,
3452 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3453 struct list_head *uf);
3454extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3455 unsigned long start, size_t len, struct list_head *uf,
3456 bool unlock);
3457int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3458 struct mm_struct *mm, unsigned long start,
3459 unsigned long end, struct list_head *uf, bool unlock);
3460extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3461 struct list_head *uf);
3462extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3463
3464#ifdef CONFIG_MMU
3465extern int __mm_populate(unsigned long addr, unsigned long len,
3466 int ignore_errors);
3467static inline void mm_populate(unsigned long addr, unsigned long len)
3468{
3469 /* Ignore errors */
3470 (void) __mm_populate(addr, len, 1);
3471}
3472#else
3473static inline void mm_populate(unsigned long addr, unsigned long len) {}
3474#endif
3475
3476/* This takes the mm semaphore itself */
3477extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3478extern int vm_munmap(unsigned long, size_t);
3479extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3480 unsigned long, unsigned long,
3481 unsigned long, unsigned long);
3482
3483struct vm_unmapped_area_info {
3484#define VM_UNMAPPED_AREA_TOPDOWN 1
3485 unsigned long flags;
3486 unsigned long length;
3487 unsigned long low_limit;
3488 unsigned long high_limit;
3489 unsigned long align_mask;
3490 unsigned long align_offset;
3491 unsigned long start_gap;
3492};
3493
3494extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
3495
3496/* truncate.c */
3497extern void truncate_inode_pages(struct address_space *, loff_t);
3498extern void truncate_inode_pages_range(struct address_space *,
3499 loff_t lstart, loff_t lend);
3500extern void truncate_inode_pages_final(struct address_space *);
3501
3502/* generic vm_area_ops exported for stackable file systems */
3503extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3504extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3505 pgoff_t start_pgoff, pgoff_t end_pgoff);
3506extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
3507
3508extern unsigned long stack_guard_gap;
3509/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
3510int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3511struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3512
3513/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
3514extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3515extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3516 struct vm_area_struct **pprev);
3517
3518/*
3519 * Look up the first VMA which intersects the interval [start_addr, end_addr)
3520 * NULL if none. Assume start_addr < end_addr.
3521 */
3522struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3523 unsigned long start_addr, unsigned long end_addr);
3524
3525/**
3526 * vma_lookup() - Find a VMA at a specific address
3527 * @mm: The process address space.
3528 * @addr: The user address.
3529 *
3530 * Return: The vm_area_struct at the given address, %NULL otherwise.
3531 */
3532static inline
3533struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3534{
3535 return mtree_load(&mm->mm_mt, addr);
3536}
3537
3538static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3539{
3540 if (vma->vm_flags & VM_GROWSDOWN)
3541 return stack_guard_gap;
3542
3543 /* See reasoning around the VM_SHADOW_STACK definition */
3544 if (vma->vm_flags & VM_SHADOW_STACK)
3545 return PAGE_SIZE;
3546
3547 return 0;
3548}
3549
3550static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3551{
3552 unsigned long gap = stack_guard_start_gap(vma);
3553 unsigned long vm_start = vma->vm_start;
3554
3555 vm_start -= gap;
3556 if (vm_start > vma->vm_start)
3557 vm_start = 0;
3558 return vm_start;
3559}
3560
3561static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3562{
3563 unsigned long vm_end = vma->vm_end;
3564
3565 if (vma->vm_flags & VM_GROWSUP) {
3566 vm_end += stack_guard_gap;
3567 if (vm_end < vma->vm_end)
3568 vm_end = -PAGE_SIZE;
3569 }
3570 return vm_end;
3571}
3572
3573static inline unsigned long vma_pages(struct vm_area_struct *vma)
3574{
3575 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3576}
3577
3578/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
3579static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3580 unsigned long vm_start, unsigned long vm_end)
3581{
3582 struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3583
3584 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3585 vma = NULL;
3586
3587 return vma;
3588}
3589
3590static inline bool range_in_vma(struct vm_area_struct *vma,
3591 unsigned long start, unsigned long end)
3592{
3593 return (vma && vma->vm_start <= start && end <= vma->vm_end);
3594}
3595
3596#ifdef CONFIG_MMU
3597pgprot_t vm_get_page_prot(unsigned long vm_flags);
3598void vma_set_page_prot(struct vm_area_struct *vma);
3599#else
3600static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3601{
3602 return __pgprot(0);
3603}
3604static inline void vma_set_page_prot(struct vm_area_struct *vma)
3605{
3606 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3607}
3608#endif
3609
3610void vma_set_file(struct vm_area_struct *vma, struct file *file);
3611
3612#ifdef CONFIG_NUMA_BALANCING
3613unsigned long change_prot_numa(struct vm_area_struct *vma,
3614 unsigned long start, unsigned long end);
3615#endif
3616
3617struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3618 unsigned long addr);
3619int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3620 unsigned long pfn, unsigned long size, pgprot_t);
3621int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3622 unsigned long pfn, unsigned long size, pgprot_t prot);
3623int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3624int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3625 struct page **pages, unsigned long *num);
3626int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3627 unsigned long num);
3628int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3629 unsigned long num);
3630vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
3631 bool write);
3632vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3633 unsigned long pfn);
3634vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3635 unsigned long pfn, pgprot_t pgprot);
3636vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3637 pfn_t pfn);
3638vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3639 unsigned long addr, pfn_t pfn);
3640int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3641
3642static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3643 unsigned long addr, struct page *page)
3644{
3645 int err = vm_insert_page(vma, addr, page);
3646
3647 if (err == -ENOMEM)
3648 return VM_FAULT_OOM;
3649 if (err < 0 && err != -EBUSY)
3650 return VM_FAULT_SIGBUS;
3651
3652 return VM_FAULT_NOPAGE;
3653}
3654
3655#ifndef io_remap_pfn_range
3656static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3657 unsigned long addr, unsigned long pfn,
3658 unsigned long size, pgprot_t prot)
3659{
3660 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3661}
3662#endif
3663
3664static inline vm_fault_t vmf_error(int err)
3665{
3666 if (err == -ENOMEM)
3667 return VM_FAULT_OOM;
3668 else if (err == -EHWPOISON)
3669 return VM_FAULT_HWPOISON;
3670 return VM_FAULT_SIGBUS;
3671}
3672
3673/*
3674 * Convert errno to return value for ->page_mkwrite() calls.
3675 *
3676 * This should eventually be merged with vmf_error() above, but will need a
3677 * careful audit of all vmf_error() callers.
3678 */
3679static inline vm_fault_t vmf_fs_error(int err)
3680{
3681 if (err == 0)
3682 return VM_FAULT_LOCKED;
3683 if (err == -EFAULT || err == -EAGAIN)
3684 return VM_FAULT_NOPAGE;
3685 if (err == -ENOMEM)
3686 return VM_FAULT_OOM;
3687 /* -ENOSPC, -EDQUOT, -EIO ... */
3688 return VM_FAULT_SIGBUS;
3689}
3690
3691static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3692{
3693 if (vm_fault & VM_FAULT_OOM)
3694 return -ENOMEM;
3695 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3696 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3697 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3698 return -EFAULT;
3699 return 0;
3700}
3701
3702/*
3703 * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
3704 * a (NUMA hinting) fault is required.
3705 */
3706static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3707 unsigned int flags)
3708{
3709 /*
3710 * If callers don't want to honor NUMA hinting faults, no need to
3711 * determine if we would actually have to trigger a NUMA hinting fault.
3712 */
3713 if (!(flags & FOLL_HONOR_NUMA_FAULT))
3714 return true;
3715
3716 /*
3717 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
3718 *
3719 * Requiring a fault here even for inaccessible VMAs would mean that
3720 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
3721 * refuses to process NUMA hinting faults in inaccessible VMAs.
3722 */
3723 return !vma_is_accessible(vma);
3724}
3725
3726typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
3727extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3728 unsigned long size, pte_fn_t fn, void *data);
3729extern int apply_to_existing_page_range(struct mm_struct *mm,
3730 unsigned long address, unsigned long size,
3731 pte_fn_t fn, void *data);
3732
3733#ifdef CONFIG_PAGE_POISONING
3734extern void __kernel_poison_pages(struct page *page, int numpages);
3735extern void __kernel_unpoison_pages(struct page *page, int numpages);
3736extern bool _page_poisoning_enabled_early;
3737DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
3738static inline bool page_poisoning_enabled(void)
3739{
3740 return _page_poisoning_enabled_early;
3741}
3742/*
3743 * For use in fast paths after init_mem_debugging() has run, or when a
3744 * false negative result is not harmful when called too early.
3745 */
3746static inline bool page_poisoning_enabled_static(void)
3747{
3748 return static_branch_unlikely(&_page_poisoning_enabled);
3749}
3750static inline void kernel_poison_pages(struct page *page, int numpages)
3751{
3752 if (page_poisoning_enabled_static())
3753 __kernel_poison_pages(page, numpages);
3754}
3755static inline void kernel_unpoison_pages(struct page *page, int numpages)
3756{
3757 if (page_poisoning_enabled_static())
3758 __kernel_unpoison_pages(page, numpages);
3759}
3760#else
3761static inline bool page_poisoning_enabled(void) { return false; }
3762static inline bool page_poisoning_enabled_static(void) { return false; }
3763static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
3764static inline void kernel_poison_pages(struct page *page, int numpages) { }
3765static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3766#endif
3767
3768DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
3769static inline bool want_init_on_alloc(gfp_t flags)
3770{
3771 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3772 &init_on_alloc))
3773 return true;
3774 return flags & __GFP_ZERO;
3775}
3776
3777DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
3778static inline bool want_init_on_free(void)
3779{
3780 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3781 &init_on_free);
3782}
3783
3784extern bool _debug_pagealloc_enabled_early;
3785DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3786
3787static inline bool debug_pagealloc_enabled(void)
3788{
3789 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3790 _debug_pagealloc_enabled_early;
3791}
3792
3793/*
3794 * For use in fast paths after mem_debugging_and_hardening_init() has run,
3795 * or when a false negative result is not harmful when called too early.
3796 */
3797static inline bool debug_pagealloc_enabled_static(void)
3798{
3799 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3800 return false;
3801
3802 return static_branch_unlikely(&_debug_pagealloc_enabled);
3803}
3804
3805/*
3806 * To support DEBUG_PAGEALLOC architecture must ensure that
3807 * __kernel_map_pages() never fails
3808 */
3809extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3810#ifdef CONFIG_DEBUG_PAGEALLOC
3811static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3812{
3813 if (debug_pagealloc_enabled_static())
3814 __kernel_map_pages(page, numpages, 1);
3815}
3816
3817static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3818{
3819 if (debug_pagealloc_enabled_static())
3820 __kernel_map_pages(page, numpages, 0);
3821}
3822
3823extern unsigned int _debug_guardpage_minorder;
3824DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3825
3826static inline unsigned int debug_guardpage_minorder(void)
3827{
3828 return _debug_guardpage_minorder;
3829}
3830
3831static inline bool debug_guardpage_enabled(void)
3832{
3833 return static_branch_unlikely(&_debug_guardpage_enabled);
3834}
3835
3836static inline bool page_is_guard(struct page *page)
3837{
3838 if (!debug_guardpage_enabled())
3839 return false;
3840
3841 return PageGuard(page);
3842}
3843
3844bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
3845static inline bool set_page_guard(struct zone *zone, struct page *page,
3846 unsigned int order)
3847{
3848 if (!debug_guardpage_enabled())
3849 return false;
3850 return __set_page_guard(zone, page, order);
3851}
3852
3853void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
3854static inline void clear_page_guard(struct zone *zone, struct page *page,
3855 unsigned int order)
3856{
3857 if (!debug_guardpage_enabled())
3858 return;
3859 __clear_page_guard(zone, page, order);
3860}
3861
3862#else /* CONFIG_DEBUG_PAGEALLOC */
3863static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3864static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3865static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3866static inline bool debug_guardpage_enabled(void) { return false; }
3867static inline bool page_is_guard(struct page *page) { return false; }
3868static inline bool set_page_guard(struct zone *zone, struct page *page,
3869 unsigned int order) { return false; }
3870static inline void clear_page_guard(struct zone *zone, struct page *page,
3871 unsigned int order) {}
3872#endif /* CONFIG_DEBUG_PAGEALLOC */
3873
3874#ifdef __HAVE_ARCH_GATE_AREA
3875extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3876extern int in_gate_area_no_mm(unsigned long addr);
3877extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3878#else
3879static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3880{
3881 return NULL;
3882}
3883static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3884static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3885{
3886 return 0;
3887}
3888#endif /* __HAVE_ARCH_GATE_AREA */
3889
3890extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3891
3892void drop_slab(void);
3893
3894#ifndef CONFIG_MMU
3895#define randomize_va_space 0
3896#else
3897extern int randomize_va_space;
3898#endif
3899
3900const char * arch_vma_name(struct vm_area_struct *vma);
3901#ifdef CONFIG_MMU
3902void print_vma_addr(char *prefix, unsigned long rip);
3903#else
3904static inline void print_vma_addr(char *prefix, unsigned long rip)
3905{
3906}
3907#endif
3908
3909void *sparse_buffer_alloc(unsigned long size);
3910unsigned long section_map_size(void);
3911struct page * __populate_section_memmap(unsigned long pfn,
3912 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3913 struct dev_pagemap *pgmap);
3914pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3915p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3916pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3917pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3918pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3919 struct vmem_altmap *altmap, unsigned long ptpfn,
3920 unsigned long flags);
3921void *vmemmap_alloc_block(unsigned long size, int node);
3922struct vmem_altmap;
3923void *vmemmap_alloc_block_buf(unsigned long size, int node,
3924 struct vmem_altmap *altmap);
3925void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3926void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
3927 unsigned long addr, unsigned long next);
3928int vmemmap_check_pmd(pmd_t *pmd, int node,
3929 unsigned long addr, unsigned long next);
3930int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3931 int node, struct vmem_altmap *altmap);
3932int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
3933 int node, struct vmem_altmap *altmap);
3934int vmemmap_populate(unsigned long start, unsigned long end, int node,
3935 struct vmem_altmap *altmap);
3936int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
3937 unsigned long headsize);
3938int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
3939 unsigned long headsize);
3940void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
3941 unsigned long headsize);
3942void vmemmap_populate_print_last(void);
3943#ifdef CONFIG_MEMORY_HOTPLUG
3944void vmemmap_free(unsigned long start, unsigned long end,
3945 struct vmem_altmap *altmap);
3946#endif
3947
3948#ifdef CONFIG_SPARSEMEM_VMEMMAP
3949static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3950{
3951 /* number of pfns from base where pfn_to_page() is valid */
3952 if (altmap)
3953 return altmap->reserve + altmap->free;
3954 return 0;
3955}
3956
3957static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3958 unsigned long nr_pfns)
3959{
3960 altmap->alloc -= nr_pfns;
3961}
3962#else
3963static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3964{
3965 return 0;
3966}
3967
3968static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3969 unsigned long nr_pfns)
3970{
3971}
3972#endif
3973
3974#define VMEMMAP_RESERVE_NR 2
3975#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
3976static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
3977 struct dev_pagemap *pgmap)
3978{
3979 unsigned long nr_pages;
3980 unsigned long nr_vmemmap_pages;
3981
3982 if (!pgmap || !is_power_of_2(sizeof(struct page)))
3983 return false;
3984
3985 nr_pages = pgmap_vmemmap_nr(pgmap);
3986 nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
3987 /*
3988 * For vmemmap optimization with DAX we need minimum 2 vmemmap
3989 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
3990 */
3991 return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
3992}
3993/*
3994 * If we don't have an architecture override, use the generic rule
3995 */
3996#ifndef vmemmap_can_optimize
3997#define vmemmap_can_optimize __vmemmap_can_optimize
3998#endif
3999
4000#else
4001static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
4002 struct dev_pagemap *pgmap)
4003{
4004 return false;
4005}
4006#endif
4007
4008enum mf_flags {
4009 MF_COUNT_INCREASED = 1 << 0,
4010 MF_ACTION_REQUIRED = 1 << 1,
4011 MF_MUST_KILL = 1 << 2,
4012 MF_SOFT_OFFLINE = 1 << 3,
4013 MF_UNPOISON = 1 << 4,
4014 MF_SW_SIMULATED = 1 << 5,
4015 MF_NO_RETRY = 1 << 6,
4016 MF_MEM_PRE_REMOVE = 1 << 7,
4017};
4018int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
4019 unsigned long count, int mf_flags);
4020extern int memory_failure(unsigned long pfn, int flags);
4021extern void memory_failure_queue_kick(int cpu);
4022extern int unpoison_memory(unsigned long pfn);
4023extern atomic_long_t num_poisoned_pages __read_mostly;
4024extern int soft_offline_page(unsigned long pfn, int flags);
4025#ifdef CONFIG_MEMORY_FAILURE
4026/*
4027 * Sysfs entries for memory failure handling statistics.
4028 */
4029extern const struct attribute_group memory_failure_attr_group;
4030extern void memory_failure_queue(unsigned long pfn, int flags);
4031extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4032 bool *migratable_cleared);
4033void num_poisoned_pages_inc(unsigned long pfn);
4034void num_poisoned_pages_sub(unsigned long pfn, long i);
4035#else
4036static inline void memory_failure_queue(unsigned long pfn, int flags)
4037{
4038}
4039
4040static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
4041 bool *migratable_cleared)
4042{
4043 return 0;
4044}
4045
4046static inline void num_poisoned_pages_inc(unsigned long pfn)
4047{
4048}
4049
4050static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
4051{
4052}
4053#endif
4054
4055#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
4056extern void memblk_nr_poison_inc(unsigned long pfn);
4057extern void memblk_nr_poison_sub(unsigned long pfn, long i);
4058#else
4059static inline void memblk_nr_poison_inc(unsigned long pfn)
4060{
4061}
4062
4063static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
4064{
4065}
4066#endif
4067
4068#ifndef arch_memory_failure
4069static inline int arch_memory_failure(unsigned long pfn, int flags)
4070{
4071 return -ENXIO;
4072}
4073#endif
4074
4075#ifndef arch_is_platform_page
4076static inline bool arch_is_platform_page(u64 paddr)
4077{
4078 return false;
4079}
4080#endif
4081
4082/*
4083 * Error handlers for various types of pages.
4084 */
4085enum mf_result {
4086 MF_IGNORED, /* Error: cannot be handled */
4087 MF_FAILED, /* Error: handling failed */
4088 MF_DELAYED, /* Will be handled later */
4089 MF_RECOVERED, /* Successfully recovered */
4090};
4091
4092enum mf_action_page_type {
4093 MF_MSG_KERNEL,
4094 MF_MSG_KERNEL_HIGH_ORDER,
4095 MF_MSG_DIFFERENT_COMPOUND,
4096 MF_MSG_HUGE,
4097 MF_MSG_FREE_HUGE,
4098 MF_MSG_GET_HWPOISON,
4099 MF_MSG_UNMAP_FAILED,
4100 MF_MSG_DIRTY_SWAPCACHE,
4101 MF_MSG_CLEAN_SWAPCACHE,
4102 MF_MSG_DIRTY_MLOCKED_LRU,
4103 MF_MSG_CLEAN_MLOCKED_LRU,
4104 MF_MSG_DIRTY_UNEVICTABLE_LRU,
4105 MF_MSG_CLEAN_UNEVICTABLE_LRU,
4106 MF_MSG_DIRTY_LRU,
4107 MF_MSG_CLEAN_LRU,
4108 MF_MSG_TRUNCATED_LRU,
4109 MF_MSG_BUDDY,
4110 MF_MSG_DAX,
4111 MF_MSG_UNSPLIT_THP,
4112 MF_MSG_ALREADY_POISONED,
4113 MF_MSG_UNKNOWN,
4114};
4115
4116#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4117void folio_zero_user(struct folio *folio, unsigned long addr_hint);
4118int copy_user_large_folio(struct folio *dst, struct folio *src,
4119 unsigned long addr_hint,
4120 struct vm_area_struct *vma);
4121long copy_folio_from_user(struct folio *dst_folio,
4122 const void __user *usr_src,
4123 bool allow_pagefault);
4124
4125/**
4126 * vma_is_special_huge - Are transhuge page-table entries considered special?
4127 * @vma: Pointer to the struct vm_area_struct to consider
4128 *
4129 * Whether transhuge page-table entries are considered "special" following
4130 * the definition in vm_normal_page().
4131 *
4132 * Return: true if transhuge page-table entries should be considered special,
4133 * false otherwise.
4134 */
4135static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4136{
4137 return vma_is_dax(vma) || (vma->vm_file &&
4138 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4139}
4140
4141#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4142
4143#if MAX_NUMNODES > 1
4144void __init setup_nr_node_ids(void);
4145#else
4146static inline void setup_nr_node_ids(void) {}
4147#endif
4148
4149extern int memcmp_pages(struct page *page1, struct page *page2);
4150
4151static inline int pages_identical(struct page *page1, struct page *page2)
4152{
4153 return !memcmp_pages(page1, page2);
4154}
4155
4156#ifdef CONFIG_MAPPING_DIRTY_HELPERS
4157unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4158 pgoff_t first_index, pgoff_t nr,
4159 pgoff_t bitmap_pgoff,
4160 unsigned long *bitmap,
4161 pgoff_t *start,
4162 pgoff_t *end);
4163
4164unsigned long wp_shared_mapping_range(struct address_space *mapping,
4165 pgoff_t first_index, pgoff_t nr);
4166#endif
4167
4168#ifdef CONFIG_ANON_VMA_NAME
4169int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4170 unsigned long len_in,
4171 struct anon_vma_name *anon_name);
4172#else
4173static inline int
4174madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4175 unsigned long len_in, struct anon_vma_name *anon_name) {
4176 return 0;
4177}
4178#endif
4179
4180#ifdef CONFIG_UNACCEPTED_MEMORY
4181
4182bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
4183void accept_memory(phys_addr_t start, unsigned long size);
4184
4185#else
4186
4187static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4188 unsigned long size)
4189{
4190 return false;
4191}
4192
4193static inline void accept_memory(phys_addr_t start, unsigned long size)
4194{
4195}
4196
4197#endif
4198
4199static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4200{
4201 return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
4202}
4203
4204void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4205void vma_pgtable_walk_end(struct vm_area_struct *vma);
4206
4207int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
4208int reserve_mem_release_by_name(const char *name);
4209
4210#ifdef CONFIG_64BIT
4211int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
4212#else
4213static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
4214{
4215 /* noop on 32 bit */
4216 return 0;
4217}
4218#endif
4219
4220/*
4221 * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
4222 * be zeroed or not.
4223 */
4224static inline bool user_alloc_needs_zeroing(void)
4225{
4226 /*
4227 * for user folios, arch with cache aliasing requires cache flush and
4228 * arc changes folio->flags to make icache coherent with dcache, so
4229 * always return false to make caller use
4230 * clear_user_page()/clear_user_highpage().
4231 */
4232 return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
4233 !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
4234 &init_on_alloc);
4235}
4236
4237int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
4238int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
4239int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
4240
4241
4242/*
4243 * mseal of userspace process's system mappings.
4244 */
4245#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
4246#define VM_SEALED_SYSMAP VM_SEALED
4247#else
4248#define VM_SEALED_SYSMAP VM_NONE
4249#endif
4250
4251#endif /* _LINUX_MM_H */