at v4.17 87 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_MM_H 3#define _LINUX_MM_H 4 5#include <linux/errno.h> 6 7#ifdef __KERNEL__ 8 9#include <linux/mmdebug.h> 10#include <linux/gfp.h> 11#include <linux/bug.h> 12#include <linux/list.h> 13#include <linux/mmzone.h> 14#include <linux/rbtree.h> 15#include <linux/atomic.h> 16#include <linux/debug_locks.h> 17#include <linux/mm_types.h> 18#include <linux/range.h> 19#include <linux/pfn.h> 20#include <linux/percpu-refcount.h> 21#include <linux/bit_spinlock.h> 22#include <linux/shrinker.h> 23#include <linux/resource.h> 24#include <linux/page_ext.h> 25#include <linux/err.h> 26#include <linux/page_ref.h> 27#include <linux/memremap.h> 28 29struct mempolicy; 30struct anon_vma; 31struct anon_vma_chain; 32struct file_ra_state; 33struct user_struct; 34struct writeback_control; 35struct bdi_writeback; 36 37void init_mm_internals(void); 38 39#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 40extern unsigned long max_mapnr; 41 42static inline void set_max_mapnr(unsigned long limit) 43{ 44 max_mapnr = limit; 45} 46#else 47static inline void set_max_mapnr(unsigned long limit) { } 48#endif 49 50extern unsigned long totalram_pages; 51extern void * high_memory; 52extern int page_cluster; 53 54#ifdef CONFIG_SYSCTL 55extern int sysctl_legacy_va_layout; 56#else 57#define sysctl_legacy_va_layout 0 58#endif 59 60#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 61extern const int mmap_rnd_bits_min; 62extern const int mmap_rnd_bits_max; 63extern int mmap_rnd_bits __read_mostly; 64#endif 65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 66extern const int mmap_rnd_compat_bits_min; 67extern const int mmap_rnd_compat_bits_max; 68extern int mmap_rnd_compat_bits __read_mostly; 69#endif 70 71#include <asm/page.h> 72#include <asm/pgtable.h> 73#include <asm/processor.h> 74 75#ifndef __pa_symbol 76#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 77#endif 78 79#ifndef page_to_virt 80#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) 81#endif 82 83#ifndef lm_alias 84#define lm_alias(x) __va(__pa_symbol(x)) 85#endif 86 87/* 88 * To prevent common memory management code establishing 89 * a zero page mapping on a read fault. 90 * This macro should be defined within <asm/pgtable.h>. 91 * s390 does this to prevent multiplexing of hardware bits 92 * related to the physical page in case of virtualization. 93 */ 94#ifndef mm_forbids_zeropage 95#define mm_forbids_zeropage(X) (0) 96#endif 97 98/* 99 * On some architectures it is expensive to call memset() for small sizes. 100 * Those architectures should provide their own implementation of "struct page" 101 * zeroing by defining this macro in <asm/pgtable.h>. 102 */ 103#ifndef mm_zero_struct_page 104#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 105#endif 106 107/* 108 * Default maximum number of active map areas, this limits the number of vmas 109 * per mm struct. Users can overwrite this number by sysctl but there is a 110 * problem. 111 * 112 * When a program's coredump is generated as ELF format, a section is created 113 * per a vma. In ELF, the number of sections is represented in unsigned short. 114 * This means the number of sections should be smaller than 65535 at coredump. 115 * Because the kernel adds some informative sections to a image of program at 116 * generating coredump, we need some margin. The number of extra sections is 117 * 1-3 now and depends on arch. We use "5" as safe margin, here. 118 * 119 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 120 * not a hard limit any more. Although some userspace tools can be surprised by 121 * that. 122 */ 123#define MAPCOUNT_ELF_CORE_MARGIN (5) 124#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 125 126extern int sysctl_max_map_count; 127 128extern unsigned long sysctl_user_reserve_kbytes; 129extern unsigned long sysctl_admin_reserve_kbytes; 130 131extern int sysctl_overcommit_memory; 132extern int sysctl_overcommit_ratio; 133extern unsigned long sysctl_overcommit_kbytes; 134 135extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 136 size_t *, loff_t *); 137extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 138 size_t *, loff_t *); 139 140#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 141 142/* to align the pointer to the (next) page boundary */ 143#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 144 145/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 146#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) 147 148/* 149 * Linux kernel virtual memory manager primitives. 150 * The idea being to have a "virtual" mm in the same way 151 * we have a virtual fs - giving a cleaner interface to the 152 * mm details, and allowing different kinds of memory mappings 153 * (from shared memory to executable loading to arbitrary 154 * mmap() functions). 155 */ 156 157extern struct kmem_cache *vm_area_cachep; 158 159#ifndef CONFIG_MMU 160extern struct rb_root nommu_region_tree; 161extern struct rw_semaphore nommu_region_sem; 162 163extern unsigned int kobjsize(const void *objp); 164#endif 165 166/* 167 * vm_flags in vm_area_struct, see mm_types.h. 168 * When changing, update also include/trace/events/mmflags.h 169 */ 170#define VM_NONE 0x00000000 171 172#define VM_READ 0x00000001 /* currently active flags */ 173#define VM_WRITE 0x00000002 174#define VM_EXEC 0x00000004 175#define VM_SHARED 0x00000008 176 177/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 178#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 179#define VM_MAYWRITE 0x00000020 180#define VM_MAYEXEC 0x00000040 181#define VM_MAYSHARE 0x00000080 182 183#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 184#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 185#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 186#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 187#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 188 189#define VM_LOCKED 0x00002000 190#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 191 192 /* Used by sys_madvise() */ 193#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 194#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 195 196#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 197#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 198#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 199#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 200#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 201#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 202#define VM_SYNC 0x00800000 /* Synchronous page faults */ 203#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 204#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */ 205#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 206 207#ifdef CONFIG_MEM_SOFT_DIRTY 208# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 209#else 210# define VM_SOFTDIRTY 0 211#endif 212 213#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 214#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 215#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 216#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 217 218#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 219#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 220#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 221#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 222#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 223#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ 224#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 225#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 226#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 227#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 228#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) 229#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 230 231#if defined(CONFIG_X86) 232# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 233#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) 234# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 235# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 236# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 237# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 238# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 239#endif 240#elif defined(CONFIG_PPC) 241# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 242#elif defined(CONFIG_PARISC) 243# define VM_GROWSUP VM_ARCH_1 244#elif defined(CONFIG_IA64) 245# define VM_GROWSUP VM_ARCH_1 246#elif defined(CONFIG_SPARC64) 247# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ 248# define VM_ARCH_CLEAR VM_SPARC_ADI 249#elif !defined(CONFIG_MMU) 250# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 251#endif 252 253#if defined(CONFIG_X86_INTEL_MPX) 254/* MPX specific bounds table or bounds directory */ 255# define VM_MPX VM_HIGH_ARCH_4 256#else 257# define VM_MPX VM_NONE 258#endif 259 260#ifndef VM_GROWSUP 261# define VM_GROWSUP VM_NONE 262#endif 263 264/* Bits set in the VMA until the stack is in its final location */ 265#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 266 267#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 268#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 269#endif 270 271#ifdef CONFIG_STACK_GROWSUP 272#define VM_STACK VM_GROWSUP 273#else 274#define VM_STACK VM_GROWSDOWN 275#endif 276 277#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 278 279/* 280 * Special vmas that are non-mergable, non-mlock()able. 281 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 282 */ 283#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 284 285/* This mask defines which mm->def_flags a process can inherit its parent */ 286#define VM_INIT_DEF_MASK VM_NOHUGEPAGE 287 288/* This mask is used to clear all the VMA flags used by mlock */ 289#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 290 291/* Arch-specific flags to clear when updating VM flags on protection change */ 292#ifndef VM_ARCH_CLEAR 293# define VM_ARCH_CLEAR VM_NONE 294#endif 295#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR) 296 297/* 298 * mapping from the currently active vm_flags protection bits (the 299 * low four bits) to a page protection mask.. 300 */ 301extern pgprot_t protection_map[16]; 302 303#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 304#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ 305#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ 306#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ 307#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ 308#define FAULT_FLAG_TRIED 0x20 /* Second try */ 309#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ 310#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ 311#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ 312 313#define FAULT_FLAG_TRACE \ 314 { FAULT_FLAG_WRITE, "WRITE" }, \ 315 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \ 316 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \ 317 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \ 318 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \ 319 { FAULT_FLAG_TRIED, "TRIED" }, \ 320 { FAULT_FLAG_USER, "USER" }, \ 321 { FAULT_FLAG_REMOTE, "REMOTE" }, \ 322 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" } 323 324/* 325 * vm_fault is filled by the the pagefault handler and passed to the vma's 326 * ->fault function. The vma's ->fault is responsible for returning a bitmask 327 * of VM_FAULT_xxx flags that give details about how the fault was handled. 328 * 329 * MM layer fills up gfp_mask for page allocations but fault handler might 330 * alter it if its implementation requires a different allocation context. 331 * 332 * pgoff should be used in favour of virtual_address, if possible. 333 */ 334struct vm_fault { 335 struct vm_area_struct *vma; /* Target VMA */ 336 unsigned int flags; /* FAULT_FLAG_xxx flags */ 337 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 338 pgoff_t pgoff; /* Logical page offset based on vma */ 339 unsigned long address; /* Faulting virtual address */ 340 pmd_t *pmd; /* Pointer to pmd entry matching 341 * the 'address' */ 342 pud_t *pud; /* Pointer to pud entry matching 343 * the 'address' 344 */ 345 pte_t orig_pte; /* Value of PTE at the time of fault */ 346 347 struct page *cow_page; /* Page handler may use for COW fault */ 348 struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ 349 struct page *page; /* ->fault handlers should return a 350 * page here, unless VM_FAULT_NOPAGE 351 * is set (which is also implied by 352 * VM_FAULT_ERROR). 353 */ 354 /* These three entries are valid only while holding ptl lock */ 355 pte_t *pte; /* Pointer to pte entry matching 356 * the 'address'. NULL if the page 357 * table hasn't been allocated. 358 */ 359 spinlock_t *ptl; /* Page table lock. 360 * Protects pte page table if 'pte' 361 * is not NULL, otherwise pmd. 362 */ 363 pgtable_t prealloc_pte; /* Pre-allocated pte page table. 364 * vm_ops->map_pages() calls 365 * alloc_set_pte() from atomic context. 366 * do_fault_around() pre-allocates 367 * page table to avoid allocation from 368 * atomic context. 369 */ 370}; 371 372/* page entry size for vm->huge_fault() */ 373enum page_entry_size { 374 PE_SIZE_PTE = 0, 375 PE_SIZE_PMD, 376 PE_SIZE_PUD, 377}; 378 379/* 380 * These are the virtual MM functions - opening of an area, closing and 381 * unmapping it (needed to keep files on disk up-to-date etc), pointer 382 * to the functions called when a no-page or a wp-page exception occurs. 383 */ 384struct vm_operations_struct { 385 void (*open)(struct vm_area_struct * area); 386 void (*close)(struct vm_area_struct * area); 387 int (*split)(struct vm_area_struct * area, unsigned long addr); 388 int (*mremap)(struct vm_area_struct * area); 389 vm_fault_t (*fault)(struct vm_fault *vmf); 390 vm_fault_t (*huge_fault)(struct vm_fault *vmf, 391 enum page_entry_size pe_size); 392 void (*map_pages)(struct vm_fault *vmf, 393 pgoff_t start_pgoff, pgoff_t end_pgoff); 394 unsigned long (*pagesize)(struct vm_area_struct * area); 395 396 /* notification that a previously read-only page is about to become 397 * writable, if an error is returned it will cause a SIGBUS */ 398 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 399 400 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 401 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 402 403 /* called by access_process_vm when get_user_pages() fails, typically 404 * for use by special VMAs that can switch between memory and hardware 405 */ 406 int (*access)(struct vm_area_struct *vma, unsigned long addr, 407 void *buf, int len, int write); 408 409 /* Called by the /proc/PID/maps code to ask the vma whether it 410 * has a special name. Returning non-NULL will also cause this 411 * vma to be dumped unconditionally. */ 412 const char *(*name)(struct vm_area_struct *vma); 413 414#ifdef CONFIG_NUMA 415 /* 416 * set_policy() op must add a reference to any non-NULL @new mempolicy 417 * to hold the policy upon return. Caller should pass NULL @new to 418 * remove a policy and fall back to surrounding context--i.e. do not 419 * install a MPOL_DEFAULT policy, nor the task or system default 420 * mempolicy. 421 */ 422 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 423 424 /* 425 * get_policy() op must add reference [mpol_get()] to any policy at 426 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 427 * in mm/mempolicy.c will do this automatically. 428 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 429 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 430 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 431 * must return NULL--i.e., do not "fallback" to task or system default 432 * policy. 433 */ 434 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 435 unsigned long addr); 436#endif 437 /* 438 * Called by vm_normal_page() for special PTEs to find the 439 * page for @addr. This is useful if the default behavior 440 * (using pte_page()) would not find the correct page. 441 */ 442 struct page *(*find_special_page)(struct vm_area_struct *vma, 443 unsigned long addr); 444}; 445 446struct mmu_gather; 447struct inode; 448 449#define page_private(page) ((page)->private) 450#define set_page_private(page, v) ((page)->private = (v)) 451 452#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 453static inline int pmd_devmap(pmd_t pmd) 454{ 455 return 0; 456} 457static inline int pud_devmap(pud_t pud) 458{ 459 return 0; 460} 461static inline int pgd_devmap(pgd_t pgd) 462{ 463 return 0; 464} 465#endif 466 467/* 468 * FIXME: take this include out, include page-flags.h in 469 * files which need it (119 of them) 470 */ 471#include <linux/page-flags.h> 472#include <linux/huge_mm.h> 473 474/* 475 * Methods to modify the page usage count. 476 * 477 * What counts for a page usage: 478 * - cache mapping (page->mapping) 479 * - private data (page->private) 480 * - page mapped in a task's page tables, each mapping 481 * is counted separately 482 * 483 * Also, many kernel routines increase the page count before a critical 484 * routine so they can be sure the page doesn't go away from under them. 485 */ 486 487/* 488 * Drop a ref, return true if the refcount fell to zero (the page has no users) 489 */ 490static inline int put_page_testzero(struct page *page) 491{ 492 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 493 return page_ref_dec_and_test(page); 494} 495 496/* 497 * Try to grab a ref unless the page has a refcount of zero, return false if 498 * that is the case. 499 * This can be called when MMU is off so it must not access 500 * any of the virtual mappings. 501 */ 502static inline int get_page_unless_zero(struct page *page) 503{ 504 return page_ref_add_unless(page, 1, 0); 505} 506 507extern int page_is_ram(unsigned long pfn); 508 509enum { 510 REGION_INTERSECTS, 511 REGION_DISJOINT, 512 REGION_MIXED, 513}; 514 515int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 516 unsigned long desc); 517 518/* Support for virtually mapped pages */ 519struct page *vmalloc_to_page(const void *addr); 520unsigned long vmalloc_to_pfn(const void *addr); 521 522/* 523 * Determine if an address is within the vmalloc range 524 * 525 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 526 * is no special casing required. 527 */ 528static inline bool is_vmalloc_addr(const void *x) 529{ 530#ifdef CONFIG_MMU 531 unsigned long addr = (unsigned long)x; 532 533 return addr >= VMALLOC_START && addr < VMALLOC_END; 534#else 535 return false; 536#endif 537} 538#ifdef CONFIG_MMU 539extern int is_vmalloc_or_module_addr(const void *x); 540#else 541static inline int is_vmalloc_or_module_addr(const void *x) 542{ 543 return 0; 544} 545#endif 546 547extern void *kvmalloc_node(size_t size, gfp_t flags, int node); 548static inline void *kvmalloc(size_t size, gfp_t flags) 549{ 550 return kvmalloc_node(size, flags, NUMA_NO_NODE); 551} 552static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) 553{ 554 return kvmalloc_node(size, flags | __GFP_ZERO, node); 555} 556static inline void *kvzalloc(size_t size, gfp_t flags) 557{ 558 return kvmalloc(size, flags | __GFP_ZERO); 559} 560 561static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) 562{ 563 if (size != 0 && n > SIZE_MAX / size) 564 return NULL; 565 566 return kvmalloc(n * size, flags); 567} 568 569extern void kvfree(const void *addr); 570 571static inline atomic_t *compound_mapcount_ptr(struct page *page) 572{ 573 return &page[1].compound_mapcount; 574} 575 576static inline int compound_mapcount(struct page *page) 577{ 578 VM_BUG_ON_PAGE(!PageCompound(page), page); 579 page = compound_head(page); 580 return atomic_read(compound_mapcount_ptr(page)) + 1; 581} 582 583/* 584 * The atomic page->_mapcount, starts from -1: so that transitions 585 * both from it and to it can be tracked, using atomic_inc_and_test 586 * and atomic_add_negative(-1). 587 */ 588static inline void page_mapcount_reset(struct page *page) 589{ 590 atomic_set(&(page)->_mapcount, -1); 591} 592 593int __page_mapcount(struct page *page); 594 595static inline int page_mapcount(struct page *page) 596{ 597 VM_BUG_ON_PAGE(PageSlab(page), page); 598 599 if (unlikely(PageCompound(page))) 600 return __page_mapcount(page); 601 return atomic_read(&page->_mapcount) + 1; 602} 603 604#ifdef CONFIG_TRANSPARENT_HUGEPAGE 605int total_mapcount(struct page *page); 606int page_trans_huge_mapcount(struct page *page, int *total_mapcount); 607#else 608static inline int total_mapcount(struct page *page) 609{ 610 return page_mapcount(page); 611} 612static inline int page_trans_huge_mapcount(struct page *page, 613 int *total_mapcount) 614{ 615 int mapcount = page_mapcount(page); 616 if (total_mapcount) 617 *total_mapcount = mapcount; 618 return mapcount; 619} 620#endif 621 622static inline struct page *virt_to_head_page(const void *x) 623{ 624 struct page *page = virt_to_page(x); 625 626 return compound_head(page); 627} 628 629void __put_page(struct page *page); 630 631void put_pages_list(struct list_head *pages); 632 633void split_page(struct page *page, unsigned int order); 634 635/* 636 * Compound pages have a destructor function. Provide a 637 * prototype for that function and accessor functions. 638 * These are _only_ valid on the head of a compound page. 639 */ 640typedef void compound_page_dtor(struct page *); 641 642/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 643enum compound_dtor_id { 644 NULL_COMPOUND_DTOR, 645 COMPOUND_PAGE_DTOR, 646#ifdef CONFIG_HUGETLB_PAGE 647 HUGETLB_PAGE_DTOR, 648#endif 649#ifdef CONFIG_TRANSPARENT_HUGEPAGE 650 TRANSHUGE_PAGE_DTOR, 651#endif 652 NR_COMPOUND_DTORS, 653}; 654extern compound_page_dtor * const compound_page_dtors[]; 655 656static inline void set_compound_page_dtor(struct page *page, 657 enum compound_dtor_id compound_dtor) 658{ 659 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 660 page[1].compound_dtor = compound_dtor; 661} 662 663static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 664{ 665 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 666 return compound_page_dtors[page[1].compound_dtor]; 667} 668 669static inline unsigned int compound_order(struct page *page) 670{ 671 if (!PageHead(page)) 672 return 0; 673 return page[1].compound_order; 674} 675 676static inline void set_compound_order(struct page *page, unsigned int order) 677{ 678 page[1].compound_order = order; 679} 680 681void free_compound_page(struct page *page); 682 683#ifdef CONFIG_MMU 684/* 685 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 686 * servicing faults for write access. In the normal case, do always want 687 * pte_mkwrite. But get_user_pages can cause write faults for mappings 688 * that do not have writing enabled, when used by access_process_vm. 689 */ 690static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 691{ 692 if (likely(vma->vm_flags & VM_WRITE)) 693 pte = pte_mkwrite(pte); 694 return pte; 695} 696 697int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, 698 struct page *page); 699int finish_fault(struct vm_fault *vmf); 700int finish_mkwrite_fault(struct vm_fault *vmf); 701#endif 702 703/* 704 * Multiple processes may "see" the same page. E.g. for untouched 705 * mappings of /dev/null, all processes see the same page full of 706 * zeroes, and text pages of executables and shared libraries have 707 * only one copy in memory, at most, normally. 708 * 709 * For the non-reserved pages, page_count(page) denotes a reference count. 710 * page_count() == 0 means the page is free. page->lru is then used for 711 * freelist management in the buddy allocator. 712 * page_count() > 0 means the page has been allocated. 713 * 714 * Pages are allocated by the slab allocator in order to provide memory 715 * to kmalloc and kmem_cache_alloc. In this case, the management of the 716 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 717 * unless a particular usage is carefully commented. (the responsibility of 718 * freeing the kmalloc memory is the caller's, of course). 719 * 720 * A page may be used by anyone else who does a __get_free_page(). 721 * In this case, page_count still tracks the references, and should only 722 * be used through the normal accessor functions. The top bits of page->flags 723 * and page->virtual store page management information, but all other fields 724 * are unused and could be used privately, carefully. The management of this 725 * page is the responsibility of the one who allocated it, and those who have 726 * subsequently been given references to it. 727 * 728 * The other pages (we may call them "pagecache pages") are completely 729 * managed by the Linux memory manager: I/O, buffers, swapping etc. 730 * The following discussion applies only to them. 731 * 732 * A pagecache page contains an opaque `private' member, which belongs to the 733 * page's address_space. Usually, this is the address of a circular list of 734 * the page's disk buffers. PG_private must be set to tell the VM to call 735 * into the filesystem to release these pages. 736 * 737 * A page may belong to an inode's memory mapping. In this case, page->mapping 738 * is the pointer to the inode, and page->index is the file offset of the page, 739 * in units of PAGE_SIZE. 740 * 741 * If pagecache pages are not associated with an inode, they are said to be 742 * anonymous pages. These may become associated with the swapcache, and in that 743 * case PG_swapcache is set, and page->private is an offset into the swapcache. 744 * 745 * In either case (swapcache or inode backed), the pagecache itself holds one 746 * reference to the page. Setting PG_private should also increment the 747 * refcount. The each user mapping also has a reference to the page. 748 * 749 * The pagecache pages are stored in a per-mapping radix tree, which is 750 * rooted at mapping->i_pages, and indexed by offset. 751 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 752 * lists, we instead now tag pages as dirty/writeback in the radix tree. 753 * 754 * All pagecache pages may be subject to I/O: 755 * - inode pages may need to be read from disk, 756 * - inode pages which have been modified and are MAP_SHARED may need 757 * to be written back to the inode on disk, 758 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 759 * modified may need to be swapped out to swap space and (later) to be read 760 * back into memory. 761 */ 762 763/* 764 * The zone field is never updated after free_area_init_core() 765 * sets it, so none of the operations on it need to be atomic. 766 */ 767 768/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 769#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 770#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 771#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 772#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 773 774/* 775 * Define the bit shifts to access each section. For non-existent 776 * sections we define the shift as 0; that plus a 0 mask ensures 777 * the compiler will optimise away reference to them. 778 */ 779#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 780#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 781#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 782#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 783 784/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 785#ifdef NODE_NOT_IN_PAGE_FLAGS 786#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 787#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 788 SECTIONS_PGOFF : ZONES_PGOFF) 789#else 790#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 791#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 792 NODES_PGOFF : ZONES_PGOFF) 793#endif 794 795#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 796 797#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 798#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 799#endif 800 801#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 802#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 803#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 804#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 805#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 806 807static inline enum zone_type page_zonenum(const struct page *page) 808{ 809 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 810} 811 812#ifdef CONFIG_ZONE_DEVICE 813static inline bool is_zone_device_page(const struct page *page) 814{ 815 return page_zonenum(page) == ZONE_DEVICE; 816} 817#else 818static inline bool is_zone_device_page(const struct page *page) 819{ 820 return false; 821} 822#endif 823 824#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) 825void put_zone_device_private_or_public_page(struct page *page); 826DECLARE_STATIC_KEY_FALSE(device_private_key); 827#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key) 828static inline bool is_device_private_page(const struct page *page); 829static inline bool is_device_public_page(const struct page *page); 830#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 831static inline void put_zone_device_private_or_public_page(struct page *page) 832{ 833} 834#define IS_HMM_ENABLED 0 835static inline bool is_device_private_page(const struct page *page) 836{ 837 return false; 838} 839static inline bool is_device_public_page(const struct page *page) 840{ 841 return false; 842} 843#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ 844 845 846static inline void get_page(struct page *page) 847{ 848 page = compound_head(page); 849 /* 850 * Getting a normal page or the head of a compound page 851 * requires to already have an elevated page->_refcount. 852 */ 853 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); 854 page_ref_inc(page); 855} 856 857static inline void put_page(struct page *page) 858{ 859 page = compound_head(page); 860 861 /* 862 * For private device pages we need to catch refcount transition from 863 * 2 to 1, when refcount reach one it means the private device page is 864 * free and we need to inform the device driver through callback. See 865 * include/linux/memremap.h and HMM for details. 866 */ 867 if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) || 868 unlikely(is_device_public_page(page)))) { 869 put_zone_device_private_or_public_page(page); 870 return; 871 } 872 873 if (put_page_testzero(page)) 874 __put_page(page); 875} 876 877#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 878#define SECTION_IN_PAGE_FLAGS 879#endif 880 881/* 882 * The identification function is mainly used by the buddy allocator for 883 * determining if two pages could be buddies. We are not really identifying 884 * the zone since we could be using the section number id if we do not have 885 * node id available in page flags. 886 * We only guarantee that it will return the same value for two combinable 887 * pages in a zone. 888 */ 889static inline int page_zone_id(struct page *page) 890{ 891 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 892} 893 894static inline int zone_to_nid(struct zone *zone) 895{ 896#ifdef CONFIG_NUMA 897 return zone->node; 898#else 899 return 0; 900#endif 901} 902 903#ifdef NODE_NOT_IN_PAGE_FLAGS 904extern int page_to_nid(const struct page *page); 905#else 906static inline int page_to_nid(const struct page *page) 907{ 908 struct page *p = (struct page *)page; 909 910 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; 911} 912#endif 913 914#ifdef CONFIG_NUMA_BALANCING 915static inline int cpu_pid_to_cpupid(int cpu, int pid) 916{ 917 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 918} 919 920static inline int cpupid_to_pid(int cpupid) 921{ 922 return cpupid & LAST__PID_MASK; 923} 924 925static inline int cpupid_to_cpu(int cpupid) 926{ 927 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 928} 929 930static inline int cpupid_to_nid(int cpupid) 931{ 932 return cpu_to_node(cpupid_to_cpu(cpupid)); 933} 934 935static inline bool cpupid_pid_unset(int cpupid) 936{ 937 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 938} 939 940static inline bool cpupid_cpu_unset(int cpupid) 941{ 942 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 943} 944 945static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 946{ 947 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 948} 949 950#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 951#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 952static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 953{ 954 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 955} 956 957static inline int page_cpupid_last(struct page *page) 958{ 959 return page->_last_cpupid; 960} 961static inline void page_cpupid_reset_last(struct page *page) 962{ 963 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 964} 965#else 966static inline int page_cpupid_last(struct page *page) 967{ 968 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 969} 970 971extern int page_cpupid_xchg_last(struct page *page, int cpupid); 972 973static inline void page_cpupid_reset_last(struct page *page) 974{ 975 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; 976} 977#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 978#else /* !CONFIG_NUMA_BALANCING */ 979static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 980{ 981 return page_to_nid(page); /* XXX */ 982} 983 984static inline int page_cpupid_last(struct page *page) 985{ 986 return page_to_nid(page); /* XXX */ 987} 988 989static inline int cpupid_to_nid(int cpupid) 990{ 991 return -1; 992} 993 994static inline int cpupid_to_pid(int cpupid) 995{ 996 return -1; 997} 998 999static inline int cpupid_to_cpu(int cpupid) 1000{ 1001 return -1; 1002} 1003 1004static inline int cpu_pid_to_cpupid(int nid, int pid) 1005{ 1006 return -1; 1007} 1008 1009static inline bool cpupid_pid_unset(int cpupid) 1010{ 1011 return 1; 1012} 1013 1014static inline void page_cpupid_reset_last(struct page *page) 1015{ 1016} 1017 1018static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 1019{ 1020 return false; 1021} 1022#endif /* CONFIG_NUMA_BALANCING */ 1023 1024static inline struct zone *page_zone(const struct page *page) 1025{ 1026 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 1027} 1028 1029static inline pg_data_t *page_pgdat(const struct page *page) 1030{ 1031 return NODE_DATA(page_to_nid(page)); 1032} 1033 1034#ifdef SECTION_IN_PAGE_FLAGS 1035static inline void set_page_section(struct page *page, unsigned long section) 1036{ 1037 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 1038 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 1039} 1040 1041static inline unsigned long page_to_section(const struct page *page) 1042{ 1043 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 1044} 1045#endif 1046 1047static inline void set_page_zone(struct page *page, enum zone_type zone) 1048{ 1049 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 1050 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 1051} 1052 1053static inline void set_page_node(struct page *page, unsigned long node) 1054{ 1055 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 1056 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 1057} 1058 1059static inline void set_page_links(struct page *page, enum zone_type zone, 1060 unsigned long node, unsigned long pfn) 1061{ 1062 set_page_zone(page, zone); 1063 set_page_node(page, node); 1064#ifdef SECTION_IN_PAGE_FLAGS 1065 set_page_section(page, pfn_to_section_nr(pfn)); 1066#endif 1067} 1068 1069#ifdef CONFIG_MEMCG 1070static inline struct mem_cgroup *page_memcg(struct page *page) 1071{ 1072 return page->mem_cgroup; 1073} 1074static inline struct mem_cgroup *page_memcg_rcu(struct page *page) 1075{ 1076 WARN_ON_ONCE(!rcu_read_lock_held()); 1077 return READ_ONCE(page->mem_cgroup); 1078} 1079#else 1080static inline struct mem_cgroup *page_memcg(struct page *page) 1081{ 1082 return NULL; 1083} 1084static inline struct mem_cgroup *page_memcg_rcu(struct page *page) 1085{ 1086 WARN_ON_ONCE(!rcu_read_lock_held()); 1087 return NULL; 1088} 1089#endif 1090 1091/* 1092 * Some inline functions in vmstat.h depend on page_zone() 1093 */ 1094#include <linux/vmstat.h> 1095 1096static __always_inline void *lowmem_page_address(const struct page *page) 1097{ 1098 return page_to_virt(page); 1099} 1100 1101#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 1102#define HASHED_PAGE_VIRTUAL 1103#endif 1104 1105#if defined(WANT_PAGE_VIRTUAL) 1106static inline void *page_address(const struct page *page) 1107{ 1108 return page->virtual; 1109} 1110static inline void set_page_address(struct page *page, void *address) 1111{ 1112 page->virtual = address; 1113} 1114#define page_address_init() do { } while(0) 1115#endif 1116 1117#if defined(HASHED_PAGE_VIRTUAL) 1118void *page_address(const struct page *page); 1119void set_page_address(struct page *page, void *virtual); 1120void page_address_init(void); 1121#endif 1122 1123#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 1124#define page_address(page) lowmem_page_address(page) 1125#define set_page_address(page, address) do { } while(0) 1126#define page_address_init() do { } while(0) 1127#endif 1128 1129extern void *page_rmapping(struct page *page); 1130extern struct anon_vma *page_anon_vma(struct page *page); 1131extern struct address_space *page_mapping(struct page *page); 1132 1133extern struct address_space *__page_file_mapping(struct page *); 1134 1135static inline 1136struct address_space *page_file_mapping(struct page *page) 1137{ 1138 if (unlikely(PageSwapCache(page))) 1139 return __page_file_mapping(page); 1140 1141 return page->mapping; 1142} 1143 1144extern pgoff_t __page_file_index(struct page *page); 1145 1146/* 1147 * Return the pagecache index of the passed page. Regular pagecache pages 1148 * use ->index whereas swapcache pages use swp_offset(->private) 1149 */ 1150static inline pgoff_t page_index(struct page *page) 1151{ 1152 if (unlikely(PageSwapCache(page))) 1153 return __page_file_index(page); 1154 return page->index; 1155} 1156 1157bool page_mapped(struct page *page); 1158struct address_space *page_mapping(struct page *page); 1159struct address_space *page_mapping_file(struct page *page); 1160 1161/* 1162 * Return true only if the page has been allocated with 1163 * ALLOC_NO_WATERMARKS and the low watermark was not 1164 * met implying that the system is under some pressure. 1165 */ 1166static inline bool page_is_pfmemalloc(struct page *page) 1167{ 1168 /* 1169 * Page index cannot be this large so this must be 1170 * a pfmemalloc page. 1171 */ 1172 return page->index == -1UL; 1173} 1174 1175/* 1176 * Only to be called by the page allocator on a freshly allocated 1177 * page. 1178 */ 1179static inline void set_page_pfmemalloc(struct page *page) 1180{ 1181 page->index = -1UL; 1182} 1183 1184static inline void clear_page_pfmemalloc(struct page *page) 1185{ 1186 page->index = 0; 1187} 1188 1189/* 1190 * Different kinds of faults, as returned by handle_mm_fault(). 1191 * Used to decide whether a process gets delivered SIGBUS or 1192 * just gets major/minor fault counters bumped up. 1193 */ 1194 1195#define VM_FAULT_OOM 0x0001 1196#define VM_FAULT_SIGBUS 0x0002 1197#define VM_FAULT_MAJOR 0x0004 1198#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1199#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1200#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1201#define VM_FAULT_SIGSEGV 0x0040 1202 1203#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1204#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1205#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1206#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1207#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ 1208#define VM_FAULT_NEEDDSYNC 0x2000 /* ->fault did not modify page tables 1209 * and needs fsync() to complete (for 1210 * synchronous page faults in DAX) */ 1211 1212#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ 1213 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ 1214 VM_FAULT_FALLBACK) 1215 1216#define VM_FAULT_RESULT_TRACE \ 1217 { VM_FAULT_OOM, "OOM" }, \ 1218 { VM_FAULT_SIGBUS, "SIGBUS" }, \ 1219 { VM_FAULT_MAJOR, "MAJOR" }, \ 1220 { VM_FAULT_WRITE, "WRITE" }, \ 1221 { VM_FAULT_HWPOISON, "HWPOISON" }, \ 1222 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ 1223 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ 1224 { VM_FAULT_NOPAGE, "NOPAGE" }, \ 1225 { VM_FAULT_LOCKED, "LOCKED" }, \ 1226 { VM_FAULT_RETRY, "RETRY" }, \ 1227 { VM_FAULT_FALLBACK, "FALLBACK" }, \ 1228 { VM_FAULT_DONE_COW, "DONE_COW" }, \ 1229 { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } 1230 1231/* Encode hstate index for a hwpoisoned large page */ 1232#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1233#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) 1234 1235/* 1236 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1237 */ 1238extern void pagefault_out_of_memory(void); 1239 1240#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1241 1242/* 1243 * Flags passed to show_mem() and show_free_areas() to suppress output in 1244 * various contexts. 1245 */ 1246#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1247 1248extern void show_free_areas(unsigned int flags, nodemask_t *nodemask); 1249 1250extern bool can_do_mlock(void); 1251extern int user_shm_lock(size_t, struct user_struct *); 1252extern void user_shm_unlock(size_t, struct user_struct *); 1253 1254/* 1255 * Parameter block passed down to zap_pte_range in exceptional cases. 1256 */ 1257struct zap_details { 1258 struct address_space *check_mapping; /* Check page->mapping if set */ 1259 pgoff_t first_index; /* Lowest page->index to unmap */ 1260 pgoff_t last_index; /* Highest page->index to unmap */ 1261}; 1262 1263struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1264 pte_t pte, bool with_public_device); 1265#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false) 1266 1267struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 1268 pmd_t pmd); 1269 1270int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1271 unsigned long size); 1272void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1273 unsigned long size); 1274void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1275 unsigned long start, unsigned long end); 1276 1277/** 1278 * mm_walk - callbacks for walk_page_range 1279 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 1280 * this handler should only handle pud_trans_huge() puds. 1281 * the pmd_entry or pte_entry callbacks will be used for 1282 * regular PUDs. 1283 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1284 * this handler is required to be able to handle 1285 * pmd_trans_huge() pmds. They may simply choose to 1286 * split_huge_page() instead of handling it explicitly. 1287 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1288 * @pte_hole: if set, called for each hole at all levels 1289 * @hugetlb_entry: if set, called for each hugetlb entry 1290 * @test_walk: caller specific callback function to determine whether 1291 * we walk over the current vma or not. Returning 0 1292 * value means "do page table walk over the current vma," 1293 * and a negative one means "abort current page table walk 1294 * right now." 1 means "skip the current vma." 1295 * @mm: mm_struct representing the target process of page table walk 1296 * @vma: vma currently walked (NULL if walking outside vmas) 1297 * @private: private data for callbacks' usage 1298 * 1299 * (see the comment on walk_page_range() for more details) 1300 */ 1301struct mm_walk { 1302 int (*pud_entry)(pud_t *pud, unsigned long addr, 1303 unsigned long next, struct mm_walk *walk); 1304 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1305 unsigned long next, struct mm_walk *walk); 1306 int (*pte_entry)(pte_t *pte, unsigned long addr, 1307 unsigned long next, struct mm_walk *walk); 1308 int (*pte_hole)(unsigned long addr, unsigned long next, 1309 struct mm_walk *walk); 1310 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1311 unsigned long addr, unsigned long next, 1312 struct mm_walk *walk); 1313 int (*test_walk)(unsigned long addr, unsigned long next, 1314 struct mm_walk *walk); 1315 struct mm_struct *mm; 1316 struct vm_area_struct *vma; 1317 void *private; 1318}; 1319 1320int walk_page_range(unsigned long addr, unsigned long end, 1321 struct mm_walk *walk); 1322int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); 1323void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1324 unsigned long end, unsigned long floor, unsigned long ceiling); 1325int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1326 struct vm_area_struct *vma); 1327int follow_pte_pmd(struct mm_struct *mm, unsigned long address, 1328 unsigned long *start, unsigned long *end, 1329 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); 1330int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1331 unsigned long *pfn); 1332int follow_phys(struct vm_area_struct *vma, unsigned long address, 1333 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1334int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1335 void *buf, int len, int write); 1336 1337extern void truncate_pagecache(struct inode *inode, loff_t new); 1338extern void truncate_setsize(struct inode *inode, loff_t newsize); 1339void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1340void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1341int truncate_inode_page(struct address_space *mapping, struct page *page); 1342int generic_error_remove_page(struct address_space *mapping, struct page *page); 1343int invalidate_inode_page(struct page *page); 1344 1345#ifdef CONFIG_MMU 1346extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 1347 unsigned int flags); 1348extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1349 unsigned long address, unsigned int fault_flags, 1350 bool *unlocked); 1351void unmap_mapping_pages(struct address_space *mapping, 1352 pgoff_t start, pgoff_t nr, bool even_cows); 1353void unmap_mapping_range(struct address_space *mapping, 1354 loff_t const holebegin, loff_t const holelen, int even_cows); 1355#else 1356static inline int handle_mm_fault(struct vm_area_struct *vma, 1357 unsigned long address, unsigned int flags) 1358{ 1359 /* should never happen if there's no MMU */ 1360 BUG(); 1361 return VM_FAULT_SIGBUS; 1362} 1363static inline int fixup_user_fault(struct task_struct *tsk, 1364 struct mm_struct *mm, unsigned long address, 1365 unsigned int fault_flags, bool *unlocked) 1366{ 1367 /* should never happen if there's no MMU */ 1368 BUG(); 1369 return -EFAULT; 1370} 1371static inline void unmap_mapping_pages(struct address_space *mapping, 1372 pgoff_t start, pgoff_t nr, bool even_cows) { } 1373static inline void unmap_mapping_range(struct address_space *mapping, 1374 loff_t const holebegin, loff_t const holelen, int even_cows) { } 1375#endif 1376 1377static inline void unmap_shared_mapping_range(struct address_space *mapping, 1378 loff_t const holebegin, loff_t const holelen) 1379{ 1380 unmap_mapping_range(mapping, holebegin, holelen, 0); 1381} 1382 1383extern int access_process_vm(struct task_struct *tsk, unsigned long addr, 1384 void *buf, int len, unsigned int gup_flags); 1385extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1386 void *buf, int len, unsigned int gup_flags); 1387extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1388 unsigned long addr, void *buf, int len, unsigned int gup_flags); 1389 1390long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1391 unsigned long start, unsigned long nr_pages, 1392 unsigned int gup_flags, struct page **pages, 1393 struct vm_area_struct **vmas, int *locked); 1394long get_user_pages(unsigned long start, unsigned long nr_pages, 1395 unsigned int gup_flags, struct page **pages, 1396 struct vm_area_struct **vmas); 1397long get_user_pages_locked(unsigned long start, unsigned long nr_pages, 1398 unsigned int gup_flags, struct page **pages, int *locked); 1399long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, 1400 struct page **pages, unsigned int gup_flags); 1401#ifdef CONFIG_FS_DAX 1402long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, 1403 unsigned int gup_flags, struct page **pages, 1404 struct vm_area_struct **vmas); 1405#else 1406static inline long get_user_pages_longterm(unsigned long start, 1407 unsigned long nr_pages, unsigned int gup_flags, 1408 struct page **pages, struct vm_area_struct **vmas) 1409{ 1410 return get_user_pages(start, nr_pages, gup_flags, pages, vmas); 1411} 1412#endif /* CONFIG_FS_DAX */ 1413 1414int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1415 struct page **pages); 1416 1417/* Container for pinned pfns / pages */ 1418struct frame_vector { 1419 unsigned int nr_allocated; /* Number of frames we have space for */ 1420 unsigned int nr_frames; /* Number of frames stored in ptrs array */ 1421 bool got_ref; /* Did we pin pages by getting page ref? */ 1422 bool is_pfns; /* Does array contain pages or pfns? */ 1423 void *ptrs[0]; /* Array of pinned pfns / pages. Use 1424 * pfns_vector_pages() or pfns_vector_pfns() 1425 * for access */ 1426}; 1427 1428struct frame_vector *frame_vector_create(unsigned int nr_frames); 1429void frame_vector_destroy(struct frame_vector *vec); 1430int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1431 unsigned int gup_flags, struct frame_vector *vec); 1432void put_vaddr_frames(struct frame_vector *vec); 1433int frame_vector_to_pages(struct frame_vector *vec); 1434void frame_vector_to_pfns(struct frame_vector *vec); 1435 1436static inline unsigned int frame_vector_count(struct frame_vector *vec) 1437{ 1438 return vec->nr_frames; 1439} 1440 1441static inline struct page **frame_vector_pages(struct frame_vector *vec) 1442{ 1443 if (vec->is_pfns) { 1444 int err = frame_vector_to_pages(vec); 1445 1446 if (err) 1447 return ERR_PTR(err); 1448 } 1449 return (struct page **)(vec->ptrs); 1450} 1451 1452static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 1453{ 1454 if (!vec->is_pfns) 1455 frame_vector_to_pfns(vec); 1456 return (unsigned long *)(vec->ptrs); 1457} 1458 1459struct kvec; 1460int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1461 struct page **pages); 1462int get_kernel_page(unsigned long start, int write, struct page **pages); 1463struct page *get_dump_page(unsigned long addr); 1464 1465extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1466extern void do_invalidatepage(struct page *page, unsigned int offset, 1467 unsigned int length); 1468 1469void __set_page_dirty(struct page *, struct address_space *, int warn); 1470int __set_page_dirty_nobuffers(struct page *page); 1471int __set_page_dirty_no_writeback(struct page *page); 1472int redirty_page_for_writepage(struct writeback_control *wbc, 1473 struct page *page); 1474void account_page_dirtied(struct page *page, struct address_space *mapping); 1475void account_page_cleaned(struct page *page, struct address_space *mapping, 1476 struct bdi_writeback *wb); 1477int set_page_dirty(struct page *page); 1478int set_page_dirty_lock(struct page *page); 1479void __cancel_dirty_page(struct page *page); 1480static inline void cancel_dirty_page(struct page *page) 1481{ 1482 /* Avoid atomic ops, locking, etc. when not actually needed. */ 1483 if (PageDirty(page)) 1484 __cancel_dirty_page(page); 1485} 1486int clear_page_dirty_for_io(struct page *page); 1487 1488int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1489 1490static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1491{ 1492 return !vma->vm_ops; 1493} 1494 1495#ifdef CONFIG_SHMEM 1496/* 1497 * The vma_is_shmem is not inline because it is used only by slow 1498 * paths in userfault. 1499 */ 1500bool vma_is_shmem(struct vm_area_struct *vma); 1501#else 1502static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 1503#endif 1504 1505int vma_is_stack_for_current(struct vm_area_struct *vma); 1506 1507extern unsigned long move_page_tables(struct vm_area_struct *vma, 1508 unsigned long old_addr, struct vm_area_struct *new_vma, 1509 unsigned long new_addr, unsigned long len, 1510 bool need_rmap_locks); 1511extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1512 unsigned long end, pgprot_t newprot, 1513 int dirty_accountable, int prot_numa); 1514extern int mprotect_fixup(struct vm_area_struct *vma, 1515 struct vm_area_struct **pprev, unsigned long start, 1516 unsigned long end, unsigned long newflags); 1517 1518/* 1519 * doesn't attempt to fault and will return short. 1520 */ 1521int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1522 struct page **pages); 1523/* 1524 * per-process(per-mm_struct) statistics. 1525 */ 1526static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1527{ 1528 long val = atomic_long_read(&mm->rss_stat.count[member]); 1529 1530#ifdef SPLIT_RSS_COUNTING 1531 /* 1532 * counter is updated in asynchronous manner and may go to minus. 1533 * But it's never be expected number for users. 1534 */ 1535 if (val < 0) 1536 val = 0; 1537#endif 1538 return (unsigned long)val; 1539} 1540 1541static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1542{ 1543 atomic_long_add(value, &mm->rss_stat.count[member]); 1544} 1545 1546static inline void inc_mm_counter(struct mm_struct *mm, int member) 1547{ 1548 atomic_long_inc(&mm->rss_stat.count[member]); 1549} 1550 1551static inline void dec_mm_counter(struct mm_struct *mm, int member) 1552{ 1553 atomic_long_dec(&mm->rss_stat.count[member]); 1554} 1555 1556/* Optimized variant when page is already known not to be PageAnon */ 1557static inline int mm_counter_file(struct page *page) 1558{ 1559 if (PageSwapBacked(page)) 1560 return MM_SHMEMPAGES; 1561 return MM_FILEPAGES; 1562} 1563 1564static inline int mm_counter(struct page *page) 1565{ 1566 if (PageAnon(page)) 1567 return MM_ANONPAGES; 1568 return mm_counter_file(page); 1569} 1570 1571static inline unsigned long get_mm_rss(struct mm_struct *mm) 1572{ 1573 return get_mm_counter(mm, MM_FILEPAGES) + 1574 get_mm_counter(mm, MM_ANONPAGES) + 1575 get_mm_counter(mm, MM_SHMEMPAGES); 1576} 1577 1578static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1579{ 1580 return max(mm->hiwater_rss, get_mm_rss(mm)); 1581} 1582 1583static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1584{ 1585 return max(mm->hiwater_vm, mm->total_vm); 1586} 1587 1588static inline void update_hiwater_rss(struct mm_struct *mm) 1589{ 1590 unsigned long _rss = get_mm_rss(mm); 1591 1592 if ((mm)->hiwater_rss < _rss) 1593 (mm)->hiwater_rss = _rss; 1594} 1595 1596static inline void update_hiwater_vm(struct mm_struct *mm) 1597{ 1598 if (mm->hiwater_vm < mm->total_vm) 1599 mm->hiwater_vm = mm->total_vm; 1600} 1601 1602static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 1603{ 1604 mm->hiwater_rss = get_mm_rss(mm); 1605} 1606 1607static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1608 struct mm_struct *mm) 1609{ 1610 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1611 1612 if (*maxrss < hiwater_rss) 1613 *maxrss = hiwater_rss; 1614} 1615 1616#if defined(SPLIT_RSS_COUNTING) 1617void sync_mm_rss(struct mm_struct *mm); 1618#else 1619static inline void sync_mm_rss(struct mm_struct *mm) 1620{ 1621} 1622#endif 1623 1624#ifndef __HAVE_ARCH_PTE_DEVMAP 1625static inline int pte_devmap(pte_t pte) 1626{ 1627 return 0; 1628} 1629#endif 1630 1631int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); 1632 1633extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1634 spinlock_t **ptl); 1635static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1636 spinlock_t **ptl) 1637{ 1638 pte_t *ptep; 1639 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1640 return ptep; 1641} 1642 1643#ifdef __PAGETABLE_P4D_FOLDED 1644static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 1645 unsigned long address) 1646{ 1647 return 0; 1648} 1649#else 1650int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1651#endif 1652 1653#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU) 1654static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1655 unsigned long address) 1656{ 1657 return 0; 1658} 1659static inline void mm_inc_nr_puds(struct mm_struct *mm) {} 1660static inline void mm_dec_nr_puds(struct mm_struct *mm) {} 1661 1662#else 1663int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); 1664 1665static inline void mm_inc_nr_puds(struct mm_struct *mm) 1666{ 1667 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 1668} 1669 1670static inline void mm_dec_nr_puds(struct mm_struct *mm) 1671{ 1672 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); 1673} 1674#endif 1675 1676#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1677static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1678 unsigned long address) 1679{ 1680 return 0; 1681} 1682 1683static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 1684static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 1685 1686#else 1687int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1688 1689static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1690{ 1691 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 1692} 1693 1694static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1695{ 1696 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); 1697} 1698#endif 1699 1700#ifdef CONFIG_MMU 1701static inline void mm_pgtables_bytes_init(struct mm_struct *mm) 1702{ 1703 atomic_long_set(&mm->pgtables_bytes, 0); 1704} 1705 1706static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 1707{ 1708 return atomic_long_read(&mm->pgtables_bytes); 1709} 1710 1711static inline void mm_inc_nr_ptes(struct mm_struct *mm) 1712{ 1713 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 1714} 1715 1716static inline void mm_dec_nr_ptes(struct mm_struct *mm) 1717{ 1718 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); 1719} 1720#else 1721 1722static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} 1723static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) 1724{ 1725 return 0; 1726} 1727 1728static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} 1729static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} 1730#endif 1731 1732int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1733int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1734 1735/* 1736 * The following ifdef needed to get the 4level-fixup.h header to work. 1737 * Remove it when 4level-fixup.h has been removed. 1738 */ 1739#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1740 1741#ifndef __ARCH_HAS_5LEVEL_HACK 1742static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, 1743 unsigned long address) 1744{ 1745 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? 1746 NULL : p4d_offset(pgd, address); 1747} 1748 1749static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, 1750 unsigned long address) 1751{ 1752 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? 1753 NULL : pud_offset(p4d, address); 1754} 1755#endif /* !__ARCH_HAS_5LEVEL_HACK */ 1756 1757static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1758{ 1759 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1760 NULL: pmd_offset(pud, address); 1761} 1762#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1763 1764#if USE_SPLIT_PTE_PTLOCKS 1765#if ALLOC_SPLIT_PTLOCKS 1766void __init ptlock_cache_init(void); 1767extern bool ptlock_alloc(struct page *page); 1768extern void ptlock_free(struct page *page); 1769 1770static inline spinlock_t *ptlock_ptr(struct page *page) 1771{ 1772 return page->ptl; 1773} 1774#else /* ALLOC_SPLIT_PTLOCKS */ 1775static inline void ptlock_cache_init(void) 1776{ 1777} 1778 1779static inline bool ptlock_alloc(struct page *page) 1780{ 1781 return true; 1782} 1783 1784static inline void ptlock_free(struct page *page) 1785{ 1786} 1787 1788static inline spinlock_t *ptlock_ptr(struct page *page) 1789{ 1790 return &page->ptl; 1791} 1792#endif /* ALLOC_SPLIT_PTLOCKS */ 1793 1794static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1795{ 1796 return ptlock_ptr(pmd_page(*pmd)); 1797} 1798 1799static inline bool ptlock_init(struct page *page) 1800{ 1801 /* 1802 * prep_new_page() initialize page->private (and therefore page->ptl) 1803 * with 0. Make sure nobody took it in use in between. 1804 * 1805 * It can happen if arch try to use slab for page table allocation: 1806 * slab code uses page->slab_cache, which share storage with page->ptl. 1807 */ 1808 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1809 if (!ptlock_alloc(page)) 1810 return false; 1811 spin_lock_init(ptlock_ptr(page)); 1812 return true; 1813} 1814 1815/* Reset page->mapping so free_pages_check won't complain. */ 1816static inline void pte_lock_deinit(struct page *page) 1817{ 1818 page->mapping = NULL; 1819 ptlock_free(page); 1820} 1821 1822#else /* !USE_SPLIT_PTE_PTLOCKS */ 1823/* 1824 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1825 */ 1826static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1827{ 1828 return &mm->page_table_lock; 1829} 1830static inline void ptlock_cache_init(void) {} 1831static inline bool ptlock_init(struct page *page) { return true; } 1832static inline void pte_lock_deinit(struct page *page) {} 1833#endif /* USE_SPLIT_PTE_PTLOCKS */ 1834 1835static inline void pgtable_init(void) 1836{ 1837 ptlock_cache_init(); 1838 pgtable_cache_init(); 1839} 1840 1841static inline bool pgtable_page_ctor(struct page *page) 1842{ 1843 if (!ptlock_init(page)) 1844 return false; 1845 inc_zone_page_state(page, NR_PAGETABLE); 1846 return true; 1847} 1848 1849static inline void pgtable_page_dtor(struct page *page) 1850{ 1851 pte_lock_deinit(page); 1852 dec_zone_page_state(page, NR_PAGETABLE); 1853} 1854 1855#define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1856({ \ 1857 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1858 pte_t *__pte = pte_offset_map(pmd, address); \ 1859 *(ptlp) = __ptl; \ 1860 spin_lock(__ptl); \ 1861 __pte; \ 1862}) 1863 1864#define pte_unmap_unlock(pte, ptl) do { \ 1865 spin_unlock(ptl); \ 1866 pte_unmap(pte); \ 1867} while (0) 1868 1869#define pte_alloc(mm, pmd, address) \ 1870 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) 1871 1872#define pte_alloc_map(mm, pmd, address) \ 1873 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) 1874 1875#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1876 (pte_alloc(mm, pmd, address) ? \ 1877 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 1878 1879#define pte_alloc_kernel(pmd, address) \ 1880 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1881 NULL: pte_offset_kernel(pmd, address)) 1882 1883#if USE_SPLIT_PMD_PTLOCKS 1884 1885static struct page *pmd_to_page(pmd_t *pmd) 1886{ 1887 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1888 return virt_to_page((void *)((unsigned long) pmd & mask)); 1889} 1890 1891static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1892{ 1893 return ptlock_ptr(pmd_to_page(pmd)); 1894} 1895 1896static inline bool pgtable_pmd_page_ctor(struct page *page) 1897{ 1898#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1899 page->pmd_huge_pte = NULL; 1900#endif 1901 return ptlock_init(page); 1902} 1903 1904static inline void pgtable_pmd_page_dtor(struct page *page) 1905{ 1906#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1907 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1908#endif 1909 ptlock_free(page); 1910} 1911 1912#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 1913 1914#else 1915 1916static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1917{ 1918 return &mm->page_table_lock; 1919} 1920 1921static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 1922static inline void pgtable_pmd_page_dtor(struct page *page) {} 1923 1924#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 1925 1926#endif 1927 1928static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 1929{ 1930 spinlock_t *ptl = pmd_lockptr(mm, pmd); 1931 spin_lock(ptl); 1932 return ptl; 1933} 1934 1935/* 1936 * No scalability reason to split PUD locks yet, but follow the same pattern 1937 * as the PMD locks to make it easier if we decide to. The VM should not be 1938 * considered ready to switch to split PUD locks yet; there may be places 1939 * which need to be converted from page_table_lock. 1940 */ 1941static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) 1942{ 1943 return &mm->page_table_lock; 1944} 1945 1946static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) 1947{ 1948 spinlock_t *ptl = pud_lockptr(mm, pud); 1949 1950 spin_lock(ptl); 1951 return ptl; 1952} 1953 1954extern void __init pagecache_init(void); 1955extern void free_area_init(unsigned long * zones_size); 1956extern void free_area_init_node(int nid, unsigned long * zones_size, 1957 unsigned long zone_start_pfn, unsigned long *zholes_size); 1958extern void free_initmem(void); 1959 1960/* 1961 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 1962 * into the buddy system. The freed pages will be poisoned with pattern 1963 * "poison" if it's within range [0, UCHAR_MAX]. 1964 * Return pages freed into the buddy system. 1965 */ 1966extern unsigned long free_reserved_area(void *start, void *end, 1967 int poison, char *s); 1968 1969#ifdef CONFIG_HIGHMEM 1970/* 1971 * Free a highmem page into the buddy system, adjusting totalhigh_pages 1972 * and totalram_pages. 1973 */ 1974extern void free_highmem_page(struct page *page); 1975#endif 1976 1977extern void adjust_managed_page_count(struct page *page, long count); 1978extern void mem_init_print_info(const char *str); 1979 1980extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); 1981 1982/* Free the reserved page into the buddy system, so it gets managed. */ 1983static inline void __free_reserved_page(struct page *page) 1984{ 1985 ClearPageReserved(page); 1986 init_page_count(page); 1987 __free_page(page); 1988} 1989 1990static inline void free_reserved_page(struct page *page) 1991{ 1992 __free_reserved_page(page); 1993 adjust_managed_page_count(page, 1); 1994} 1995 1996static inline void mark_page_reserved(struct page *page) 1997{ 1998 SetPageReserved(page); 1999 adjust_managed_page_count(page, -1); 2000} 2001 2002/* 2003 * Default method to free all the __init memory into the buddy system. 2004 * The freed pages will be poisoned with pattern "poison" if it's within 2005 * range [0, UCHAR_MAX]. 2006 * Return pages freed into the buddy system. 2007 */ 2008static inline unsigned long free_initmem_default(int poison) 2009{ 2010 extern char __init_begin[], __init_end[]; 2011 2012 return free_reserved_area(&__init_begin, &__init_end, 2013 poison, "unused kernel"); 2014} 2015 2016static inline unsigned long get_num_physpages(void) 2017{ 2018 int nid; 2019 unsigned long phys_pages = 0; 2020 2021 for_each_online_node(nid) 2022 phys_pages += node_present_pages(nid); 2023 2024 return phys_pages; 2025} 2026 2027#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 2028/* 2029 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 2030 * zones, allocate the backing mem_map and account for memory holes in a more 2031 * architecture independent manner. This is a substitute for creating the 2032 * zone_sizes[] and zholes_size[] arrays and passing them to 2033 * free_area_init_node() 2034 * 2035 * An architecture is expected to register range of page frames backed by 2036 * physical memory with memblock_add[_node]() before calling 2037 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 2038 * usage, an architecture is expected to do something like 2039 * 2040 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 2041 * max_highmem_pfn}; 2042 * for_each_valid_physical_page_range() 2043 * memblock_add_node(base, size, nid) 2044 * free_area_init_nodes(max_zone_pfns); 2045 * 2046 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 2047 * registered physical page range. Similarly 2048 * sparse_memory_present_with_active_regions() calls memory_present() for 2049 * each range when SPARSEMEM is enabled. 2050 * 2051 * See mm/page_alloc.c for more information on each function exposed by 2052 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 2053 */ 2054extern void free_area_init_nodes(unsigned long *max_zone_pfn); 2055unsigned long node_map_pfn_alignment(void); 2056unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 2057 unsigned long end_pfn); 2058extern unsigned long absent_pages_in_range(unsigned long start_pfn, 2059 unsigned long end_pfn); 2060extern void get_pfn_range_for_nid(unsigned int nid, 2061 unsigned long *start_pfn, unsigned long *end_pfn); 2062extern unsigned long find_min_pfn_with_active_regions(void); 2063extern void free_bootmem_with_active_regions(int nid, 2064 unsigned long max_low_pfn); 2065extern void sparse_memory_present_with_active_regions(int nid); 2066 2067#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 2068 2069#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 2070 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 2071static inline int __early_pfn_to_nid(unsigned long pfn, 2072 struct mminit_pfnnid_cache *state) 2073{ 2074 return 0; 2075} 2076#else 2077/* please see mm/page_alloc.c */ 2078extern int __meminit early_pfn_to_nid(unsigned long pfn); 2079/* there is a per-arch backend function. */ 2080extern int __meminit __early_pfn_to_nid(unsigned long pfn, 2081 struct mminit_pfnnid_cache *state); 2082#endif 2083 2084#ifdef CONFIG_HAVE_MEMBLOCK 2085void zero_resv_unavail(void); 2086#else 2087static inline void zero_resv_unavail(void) {} 2088#endif 2089 2090extern void set_dma_reserve(unsigned long new_dma_reserve); 2091extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, 2092 enum memmap_context, struct vmem_altmap *); 2093extern void setup_per_zone_wmarks(void); 2094extern int __meminit init_per_zone_wmark_min(void); 2095extern void mem_init(void); 2096extern void __init mmap_init(void); 2097extern void show_mem(unsigned int flags, nodemask_t *nodemask); 2098extern long si_mem_available(void); 2099extern void si_meminfo(struct sysinfo * val); 2100extern void si_meminfo_node(struct sysinfo *val, int nid); 2101#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2102extern unsigned long arch_reserved_kernel_pages(void); 2103#endif 2104 2105extern __printf(3, 4) 2106void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); 2107 2108extern void setup_per_cpu_pageset(void); 2109 2110extern void zone_pcp_update(struct zone *zone); 2111extern void zone_pcp_reset(struct zone *zone); 2112 2113/* page_alloc.c */ 2114extern int min_free_kbytes; 2115extern int watermark_scale_factor; 2116 2117/* nommu.c */ 2118extern atomic_long_t mmap_pages_allocated; 2119extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 2120 2121/* interval_tree.c */ 2122void vma_interval_tree_insert(struct vm_area_struct *node, 2123 struct rb_root_cached *root); 2124void vma_interval_tree_insert_after(struct vm_area_struct *node, 2125 struct vm_area_struct *prev, 2126 struct rb_root_cached *root); 2127void vma_interval_tree_remove(struct vm_area_struct *node, 2128 struct rb_root_cached *root); 2129struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, 2130 unsigned long start, unsigned long last); 2131struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 2132 unsigned long start, unsigned long last); 2133 2134#define vma_interval_tree_foreach(vma, root, start, last) \ 2135 for (vma = vma_interval_tree_iter_first(root, start, last); \ 2136 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 2137 2138void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 2139 struct rb_root_cached *root); 2140void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 2141 struct rb_root_cached *root); 2142struct anon_vma_chain * 2143anon_vma_interval_tree_iter_first(struct rb_root_cached *root, 2144 unsigned long start, unsigned long last); 2145struct anon_vma_chain *anon_vma_interval_tree_iter_next( 2146 struct anon_vma_chain *node, unsigned long start, unsigned long last); 2147#ifdef CONFIG_DEBUG_VM_RB 2148void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 2149#endif 2150 2151#define anon_vma_interval_tree_foreach(avc, root, start, last) \ 2152 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 2153 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 2154 2155/* mmap.c */ 2156extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 2157extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, 2158 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, 2159 struct vm_area_struct *expand); 2160static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, 2161 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 2162{ 2163 return __vma_adjust(vma, start, end, pgoff, insert, NULL); 2164} 2165extern struct vm_area_struct *vma_merge(struct mm_struct *, 2166 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 2167 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 2168 struct mempolicy *, struct vm_userfaultfd_ctx); 2169extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2170extern int __split_vma(struct mm_struct *, struct vm_area_struct *, 2171 unsigned long addr, int new_below); 2172extern int split_vma(struct mm_struct *, struct vm_area_struct *, 2173 unsigned long addr, int new_below); 2174extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2175extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 2176 struct rb_node **, struct rb_node *); 2177extern void unlink_file_vma(struct vm_area_struct *); 2178extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2179 unsigned long addr, unsigned long len, pgoff_t pgoff, 2180 bool *need_rmap_locks); 2181extern void exit_mmap(struct mm_struct *); 2182 2183static inline int check_data_rlimit(unsigned long rlim, 2184 unsigned long new, 2185 unsigned long start, 2186 unsigned long end_data, 2187 unsigned long start_data) 2188{ 2189 if (rlim < RLIM_INFINITY) { 2190 if (((new - start) + (end_data - start_data)) > rlim) 2191 return -ENOSPC; 2192 } 2193 2194 return 0; 2195} 2196 2197extern int mm_take_all_locks(struct mm_struct *mm); 2198extern void mm_drop_all_locks(struct mm_struct *mm); 2199 2200extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2201extern struct file *get_mm_exe_file(struct mm_struct *mm); 2202extern struct file *get_task_exe_file(struct task_struct *task); 2203 2204extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2205extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2206 2207extern bool vma_is_special_mapping(const struct vm_area_struct *vma, 2208 const struct vm_special_mapping *sm); 2209extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2210 unsigned long addr, unsigned long len, 2211 unsigned long flags, 2212 const struct vm_special_mapping *spec); 2213/* This is an obsolete alternative to _install_special_mapping. */ 2214extern int install_special_mapping(struct mm_struct *mm, 2215 unsigned long addr, unsigned long len, 2216 unsigned long flags, struct page **pages); 2217 2218extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 2219 2220extern unsigned long mmap_region(struct file *file, unsigned long addr, 2221 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, 2222 struct list_head *uf); 2223extern unsigned long do_mmap(struct file *file, unsigned long addr, 2224 unsigned long len, unsigned long prot, unsigned long flags, 2225 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, 2226 struct list_head *uf); 2227extern int do_munmap(struct mm_struct *, unsigned long, size_t, 2228 struct list_head *uf); 2229 2230static inline unsigned long 2231do_mmap_pgoff(struct file *file, unsigned long addr, 2232 unsigned long len, unsigned long prot, unsigned long flags, 2233 unsigned long pgoff, unsigned long *populate, 2234 struct list_head *uf) 2235{ 2236 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); 2237} 2238 2239#ifdef CONFIG_MMU 2240extern int __mm_populate(unsigned long addr, unsigned long len, 2241 int ignore_errors); 2242static inline void mm_populate(unsigned long addr, unsigned long len) 2243{ 2244 /* Ignore errors */ 2245 (void) __mm_populate(addr, len, 1); 2246} 2247#else 2248static inline void mm_populate(unsigned long addr, unsigned long len) {} 2249#endif 2250 2251/* These take the mm semaphore themselves */ 2252extern int __must_check vm_brk(unsigned long, unsigned long); 2253extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long); 2254extern int vm_munmap(unsigned long, size_t); 2255extern unsigned long __must_check vm_mmap(struct file *, unsigned long, 2256 unsigned long, unsigned long, 2257 unsigned long, unsigned long); 2258 2259struct vm_unmapped_area_info { 2260#define VM_UNMAPPED_AREA_TOPDOWN 1 2261 unsigned long flags; 2262 unsigned long length; 2263 unsigned long low_limit; 2264 unsigned long high_limit; 2265 unsigned long align_mask; 2266 unsigned long align_offset; 2267}; 2268 2269extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 2270extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 2271 2272/* 2273 * Search for an unmapped address range. 2274 * 2275 * We are looking for a range that: 2276 * - does not intersect with any VMA; 2277 * - is contained within the [low_limit, high_limit) interval; 2278 * - is at least the desired size. 2279 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2280 */ 2281static inline unsigned long 2282vm_unmapped_area(struct vm_unmapped_area_info *info) 2283{ 2284 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2285 return unmapped_area_topdown(info); 2286 else 2287 return unmapped_area(info); 2288} 2289 2290/* truncate.c */ 2291extern void truncate_inode_pages(struct address_space *, loff_t); 2292extern void truncate_inode_pages_range(struct address_space *, 2293 loff_t lstart, loff_t lend); 2294extern void truncate_inode_pages_final(struct address_space *); 2295 2296/* generic vm_area_ops exported for stackable file systems */ 2297extern int filemap_fault(struct vm_fault *vmf); 2298extern void filemap_map_pages(struct vm_fault *vmf, 2299 pgoff_t start_pgoff, pgoff_t end_pgoff); 2300extern int filemap_page_mkwrite(struct vm_fault *vmf); 2301 2302/* mm/page-writeback.c */ 2303int __must_check write_one_page(struct page *page); 2304void task_dirty_inc(struct task_struct *tsk); 2305 2306/* readahead.c */ 2307#define VM_MAX_READAHEAD 128 /* kbytes */ 2308#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 2309 2310int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2311 pgoff_t offset, unsigned long nr_to_read); 2312 2313void page_cache_sync_readahead(struct address_space *mapping, 2314 struct file_ra_state *ra, 2315 struct file *filp, 2316 pgoff_t offset, 2317 unsigned long size); 2318 2319void page_cache_async_readahead(struct address_space *mapping, 2320 struct file_ra_state *ra, 2321 struct file *filp, 2322 struct page *pg, 2323 pgoff_t offset, 2324 unsigned long size); 2325 2326extern unsigned long stack_guard_gap; 2327/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2328extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2329 2330/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 2331extern int expand_downwards(struct vm_area_struct *vma, 2332 unsigned long address); 2333#if VM_GROWSUP 2334extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2335#else 2336 #define expand_upwards(vma, address) (0) 2337#endif 2338 2339/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2340extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2341extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2342 struct vm_area_struct **pprev); 2343 2344/* Look up the first VMA which intersects the interval start_addr..end_addr-1, 2345 NULL if none. Assume start_addr < end_addr. */ 2346static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 2347{ 2348 struct vm_area_struct * vma = find_vma(mm,start_addr); 2349 2350 if (vma && end_addr <= vma->vm_start) 2351 vma = NULL; 2352 return vma; 2353} 2354 2355static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 2356{ 2357 unsigned long vm_start = vma->vm_start; 2358 2359 if (vma->vm_flags & VM_GROWSDOWN) { 2360 vm_start -= stack_guard_gap; 2361 if (vm_start > vma->vm_start) 2362 vm_start = 0; 2363 } 2364 return vm_start; 2365} 2366 2367static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 2368{ 2369 unsigned long vm_end = vma->vm_end; 2370 2371 if (vma->vm_flags & VM_GROWSUP) { 2372 vm_end += stack_guard_gap; 2373 if (vm_end < vma->vm_end) 2374 vm_end = -PAGE_SIZE; 2375 } 2376 return vm_end; 2377} 2378 2379static inline unsigned long vma_pages(struct vm_area_struct *vma) 2380{ 2381 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2382} 2383 2384/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2385static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2386 unsigned long vm_start, unsigned long vm_end) 2387{ 2388 struct vm_area_struct *vma = find_vma(mm, vm_start); 2389 2390 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2391 vma = NULL; 2392 2393 return vma; 2394} 2395 2396#ifdef CONFIG_MMU 2397pgprot_t vm_get_page_prot(unsigned long vm_flags); 2398void vma_set_page_prot(struct vm_area_struct *vma); 2399#else 2400static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2401{ 2402 return __pgprot(0); 2403} 2404static inline void vma_set_page_prot(struct vm_area_struct *vma) 2405{ 2406 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2407} 2408#endif 2409 2410#ifdef CONFIG_NUMA_BALANCING 2411unsigned long change_prot_numa(struct vm_area_struct *vma, 2412 unsigned long start, unsigned long end); 2413#endif 2414 2415struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2416int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2417 unsigned long pfn, unsigned long size, pgprot_t); 2418int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2419int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2420 unsigned long pfn); 2421int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2422 unsigned long pfn, pgprot_t pgprot); 2423int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2424 pfn_t pfn); 2425int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, 2426 pfn_t pfn); 2427int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2428 2429static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, 2430 unsigned long addr, struct page *page) 2431{ 2432 int err = vm_insert_page(vma, addr, page); 2433 2434 if (err == -ENOMEM) 2435 return VM_FAULT_OOM; 2436 if (err < 0 && err != -EBUSY) 2437 return VM_FAULT_SIGBUS; 2438 2439 return VM_FAULT_NOPAGE; 2440} 2441 2442static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, 2443 unsigned long addr, pfn_t pfn) 2444{ 2445 int err = vm_insert_mixed(vma, addr, pfn); 2446 2447 if (err == -ENOMEM) 2448 return VM_FAULT_OOM; 2449 if (err < 0 && err != -EBUSY) 2450 return VM_FAULT_SIGBUS; 2451 2452 return VM_FAULT_NOPAGE; 2453} 2454 2455static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, 2456 unsigned long addr, unsigned long pfn) 2457{ 2458 int err = vm_insert_pfn(vma, addr, pfn); 2459 2460 if (err == -ENOMEM) 2461 return VM_FAULT_OOM; 2462 if (err < 0 && err != -EBUSY) 2463 return VM_FAULT_SIGBUS; 2464 2465 return VM_FAULT_NOPAGE; 2466} 2467 2468static inline vm_fault_t vmf_error(int err) 2469{ 2470 if (err == -ENOMEM) 2471 return VM_FAULT_OOM; 2472 return VM_FAULT_SIGBUS; 2473} 2474 2475struct page *follow_page_mask(struct vm_area_struct *vma, 2476 unsigned long address, unsigned int foll_flags, 2477 unsigned int *page_mask); 2478 2479static inline struct page *follow_page(struct vm_area_struct *vma, 2480 unsigned long address, unsigned int foll_flags) 2481{ 2482 unsigned int unused_page_mask; 2483 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); 2484} 2485 2486#define FOLL_WRITE 0x01 /* check pte is writable */ 2487#define FOLL_TOUCH 0x02 /* mark page accessed */ 2488#define FOLL_GET 0x04 /* do get_page on page */ 2489#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2490#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2491#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2492 * and return without waiting upon it */ 2493#define FOLL_POPULATE 0x40 /* fault in page */ 2494#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 2495#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2496#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2497#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2498#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2499#define FOLL_MLOCK 0x1000 /* lock present pages */ 2500#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2501#define FOLL_COW 0x4000 /* internal GUP flag */ 2502#define FOLL_ANON 0x8000 /* don't do file mappings */ 2503 2504static inline int vm_fault_to_errno(int vm_fault, int foll_flags) 2505{ 2506 if (vm_fault & VM_FAULT_OOM) 2507 return -ENOMEM; 2508 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 2509 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 2510 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 2511 return -EFAULT; 2512 return 0; 2513} 2514 2515typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2516 void *data); 2517extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2518 unsigned long size, pte_fn_t fn, void *data); 2519 2520 2521#ifdef CONFIG_PAGE_POISONING 2522extern bool page_poisoning_enabled(void); 2523extern void kernel_poison_pages(struct page *page, int numpages, int enable); 2524extern bool page_is_poisoned(struct page *page); 2525#else 2526static inline bool page_poisoning_enabled(void) { return false; } 2527static inline void kernel_poison_pages(struct page *page, int numpages, 2528 int enable) { } 2529static inline bool page_is_poisoned(struct page *page) { return false; } 2530#endif 2531 2532#ifdef CONFIG_DEBUG_PAGEALLOC 2533extern bool _debug_pagealloc_enabled; 2534extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2535 2536static inline bool debug_pagealloc_enabled(void) 2537{ 2538 return _debug_pagealloc_enabled; 2539} 2540 2541static inline void 2542kernel_map_pages(struct page *page, int numpages, int enable) 2543{ 2544 if (!debug_pagealloc_enabled()) 2545 return; 2546 2547 __kernel_map_pages(page, numpages, enable); 2548} 2549#ifdef CONFIG_HIBERNATION 2550extern bool kernel_page_present(struct page *page); 2551#endif /* CONFIG_HIBERNATION */ 2552#else /* CONFIG_DEBUG_PAGEALLOC */ 2553static inline void 2554kernel_map_pages(struct page *page, int numpages, int enable) {} 2555#ifdef CONFIG_HIBERNATION 2556static inline bool kernel_page_present(struct page *page) { return true; } 2557#endif /* CONFIG_HIBERNATION */ 2558static inline bool debug_pagealloc_enabled(void) 2559{ 2560 return false; 2561} 2562#endif /* CONFIG_DEBUG_PAGEALLOC */ 2563 2564#ifdef __HAVE_ARCH_GATE_AREA 2565extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2566extern int in_gate_area_no_mm(unsigned long addr); 2567extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2568#else 2569static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2570{ 2571 return NULL; 2572} 2573static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2574static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2575{ 2576 return 0; 2577} 2578#endif /* __HAVE_ARCH_GATE_AREA */ 2579 2580extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); 2581 2582#ifdef CONFIG_SYSCTL 2583extern int sysctl_drop_caches; 2584int drop_caches_sysctl_handler(struct ctl_table *, int, 2585 void __user *, size_t *, loff_t *); 2586#endif 2587 2588void drop_slab(void); 2589void drop_slab_node(int nid); 2590 2591#ifndef CONFIG_MMU 2592#define randomize_va_space 0 2593#else 2594extern int randomize_va_space; 2595#endif 2596 2597const char * arch_vma_name(struct vm_area_struct *vma); 2598void print_vma_addr(char *prefix, unsigned long rip); 2599 2600void sparse_mem_maps_populate_node(struct page **map_map, 2601 unsigned long pnum_begin, 2602 unsigned long pnum_end, 2603 unsigned long map_count, 2604 int nodeid); 2605 2606struct page *sparse_mem_map_populate(unsigned long pnum, int nid, 2607 struct vmem_altmap *altmap); 2608pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2609p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); 2610pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); 2611pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2612pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2613void *vmemmap_alloc_block(unsigned long size, int node); 2614struct vmem_altmap; 2615void *vmemmap_alloc_block_buf(unsigned long size, int node); 2616void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); 2617void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2618int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2619 int node); 2620int vmemmap_populate(unsigned long start, unsigned long end, int node, 2621 struct vmem_altmap *altmap); 2622void vmemmap_populate_print_last(void); 2623#ifdef CONFIG_MEMORY_HOTPLUG 2624void vmemmap_free(unsigned long start, unsigned long end, 2625 struct vmem_altmap *altmap); 2626#endif 2627void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2628 unsigned long nr_pages); 2629 2630enum mf_flags { 2631 MF_COUNT_INCREASED = 1 << 0, 2632 MF_ACTION_REQUIRED = 1 << 1, 2633 MF_MUST_KILL = 1 << 2, 2634 MF_SOFT_OFFLINE = 1 << 3, 2635}; 2636extern int memory_failure(unsigned long pfn, int flags); 2637extern void memory_failure_queue(unsigned long pfn, int flags); 2638extern int unpoison_memory(unsigned long pfn); 2639extern int get_hwpoison_page(struct page *page); 2640#define put_hwpoison_page(page) put_page(page) 2641extern int sysctl_memory_failure_early_kill; 2642extern int sysctl_memory_failure_recovery; 2643extern void shake_page(struct page *p, int access); 2644extern atomic_long_t num_poisoned_pages __read_mostly; 2645extern int soft_offline_page(struct page *page, int flags); 2646 2647 2648/* 2649 * Error handlers for various types of pages. 2650 */ 2651enum mf_result { 2652 MF_IGNORED, /* Error: cannot be handled */ 2653 MF_FAILED, /* Error: handling failed */ 2654 MF_DELAYED, /* Will be handled later */ 2655 MF_RECOVERED, /* Successfully recovered */ 2656}; 2657 2658enum mf_action_page_type { 2659 MF_MSG_KERNEL, 2660 MF_MSG_KERNEL_HIGH_ORDER, 2661 MF_MSG_SLAB, 2662 MF_MSG_DIFFERENT_COMPOUND, 2663 MF_MSG_POISONED_HUGE, 2664 MF_MSG_HUGE, 2665 MF_MSG_FREE_HUGE, 2666 MF_MSG_NON_PMD_HUGE, 2667 MF_MSG_UNMAP_FAILED, 2668 MF_MSG_DIRTY_SWAPCACHE, 2669 MF_MSG_CLEAN_SWAPCACHE, 2670 MF_MSG_DIRTY_MLOCKED_LRU, 2671 MF_MSG_CLEAN_MLOCKED_LRU, 2672 MF_MSG_DIRTY_UNEVICTABLE_LRU, 2673 MF_MSG_CLEAN_UNEVICTABLE_LRU, 2674 MF_MSG_DIRTY_LRU, 2675 MF_MSG_CLEAN_LRU, 2676 MF_MSG_TRUNCATED_LRU, 2677 MF_MSG_BUDDY, 2678 MF_MSG_BUDDY_2ND, 2679 MF_MSG_UNKNOWN, 2680}; 2681 2682#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2683extern void clear_huge_page(struct page *page, 2684 unsigned long addr_hint, 2685 unsigned int pages_per_huge_page); 2686extern void copy_user_huge_page(struct page *dst, struct page *src, 2687 unsigned long addr, struct vm_area_struct *vma, 2688 unsigned int pages_per_huge_page); 2689extern long copy_huge_page_from_user(struct page *dst_page, 2690 const void __user *usr_src, 2691 unsigned int pages_per_huge_page, 2692 bool allow_pagefault); 2693#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2694 2695extern struct page_ext_operations debug_guardpage_ops; 2696 2697#ifdef CONFIG_DEBUG_PAGEALLOC 2698extern unsigned int _debug_guardpage_minorder; 2699extern bool _debug_guardpage_enabled; 2700 2701static inline unsigned int debug_guardpage_minorder(void) 2702{ 2703 return _debug_guardpage_minorder; 2704} 2705 2706static inline bool debug_guardpage_enabled(void) 2707{ 2708 return _debug_guardpage_enabled; 2709} 2710 2711static inline bool page_is_guard(struct page *page) 2712{ 2713 struct page_ext *page_ext; 2714 2715 if (!debug_guardpage_enabled()) 2716 return false; 2717 2718 page_ext = lookup_page_ext(page); 2719 if (unlikely(!page_ext)) 2720 return false; 2721 2722 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 2723} 2724#else 2725static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2726static inline bool debug_guardpage_enabled(void) { return false; } 2727static inline bool page_is_guard(struct page *page) { return false; } 2728#endif /* CONFIG_DEBUG_PAGEALLOC */ 2729 2730#if MAX_NUMNODES > 1 2731void __init setup_nr_node_ids(void); 2732#else 2733static inline void setup_nr_node_ids(void) {} 2734#endif 2735 2736#endif /* __KERNEL__ */ 2737#endif /* _LINUX_MM_H */