at v4.6-rc2 2468 lines 79 kB view raw
1#ifndef _LINUX_MM_H 2#define _LINUX_MM_H 3 4#include <linux/errno.h> 5 6#ifdef __KERNEL__ 7 8#include <linux/mmdebug.h> 9#include <linux/gfp.h> 10#include <linux/bug.h> 11#include <linux/list.h> 12#include <linux/mmzone.h> 13#include <linux/rbtree.h> 14#include <linux/atomic.h> 15#include <linux/debug_locks.h> 16#include <linux/mm_types.h> 17#include <linux/range.h> 18#include <linux/pfn.h> 19#include <linux/percpu-refcount.h> 20#include <linux/bit_spinlock.h> 21#include <linux/shrinker.h> 22#include <linux/resource.h> 23#include <linux/page_ext.h> 24#include <linux/err.h> 25#include <linux/page_ref.h> 26 27struct mempolicy; 28struct anon_vma; 29struct anon_vma_chain; 30struct file_ra_state; 31struct user_struct; 32struct writeback_control; 33struct bdi_writeback; 34 35#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 36extern unsigned long max_mapnr; 37 38static inline void set_max_mapnr(unsigned long limit) 39{ 40 max_mapnr = limit; 41} 42#else 43static inline void set_max_mapnr(unsigned long limit) { } 44#endif 45 46extern unsigned long totalram_pages; 47extern void * high_memory; 48extern int page_cluster; 49 50#ifdef CONFIG_SYSCTL 51extern int sysctl_legacy_va_layout; 52#else 53#define sysctl_legacy_va_layout 0 54#endif 55 56#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS 57extern const int mmap_rnd_bits_min; 58extern const int mmap_rnd_bits_max; 59extern int mmap_rnd_bits __read_mostly; 60#endif 61#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS 62extern const int mmap_rnd_compat_bits_min; 63extern const int mmap_rnd_compat_bits_max; 64extern int mmap_rnd_compat_bits __read_mostly; 65#endif 66 67#include <asm/page.h> 68#include <asm/pgtable.h> 69#include <asm/processor.h> 70 71#ifndef __pa_symbol 72#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 73#endif 74 75/* 76 * To prevent common memory management code establishing 77 * a zero page mapping on a read fault. 78 * This macro should be defined within <asm/pgtable.h>. 79 * s390 does this to prevent multiplexing of hardware bits 80 * related to the physical page in case of virtualization. 81 */ 82#ifndef mm_forbids_zeropage 83#define mm_forbids_zeropage(X) (0) 84#endif 85 86/* 87 * Default maximum number of active map areas, this limits the number of vmas 88 * per mm struct. Users can overwrite this number by sysctl but there is a 89 * problem. 90 * 91 * When a program's coredump is generated as ELF format, a section is created 92 * per a vma. In ELF, the number of sections is represented in unsigned short. 93 * This means the number of sections should be smaller than 65535 at coredump. 94 * Because the kernel adds some informative sections to a image of program at 95 * generating coredump, we need some margin. The number of extra sections is 96 * 1-3 now and depends on arch. We use "5" as safe margin, here. 97 * 98 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is 99 * not a hard limit any more. Although some userspace tools can be surprised by 100 * that. 101 */ 102#define MAPCOUNT_ELF_CORE_MARGIN (5) 103#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) 104 105extern int sysctl_max_map_count; 106 107extern unsigned long sysctl_user_reserve_kbytes; 108extern unsigned long sysctl_admin_reserve_kbytes; 109 110extern int sysctl_overcommit_memory; 111extern int sysctl_overcommit_ratio; 112extern unsigned long sysctl_overcommit_kbytes; 113 114extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 115 size_t *, loff_t *); 116extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 117 size_t *, loff_t *); 118 119#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 120 121/* to align the pointer to the (next) page boundary */ 122#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 123 124/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 125#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE) 126 127/* 128 * Linux kernel virtual memory manager primitives. 129 * The idea being to have a "virtual" mm in the same way 130 * we have a virtual fs - giving a cleaner interface to the 131 * mm details, and allowing different kinds of memory mappings 132 * (from shared memory to executable loading to arbitrary 133 * mmap() functions). 134 */ 135 136extern struct kmem_cache *vm_area_cachep; 137 138#ifndef CONFIG_MMU 139extern struct rb_root nommu_region_tree; 140extern struct rw_semaphore nommu_region_sem; 141 142extern unsigned int kobjsize(const void *objp); 143#endif 144 145/* 146 * vm_flags in vm_area_struct, see mm_types.h. 147 * When changing, update also include/trace/events/mmflags.h 148 */ 149#define VM_NONE 0x00000000 150 151#define VM_READ 0x00000001 /* currently active flags */ 152#define VM_WRITE 0x00000002 153#define VM_EXEC 0x00000004 154#define VM_SHARED 0x00000008 155 156/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 157#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 158#define VM_MAYWRITE 0x00000020 159#define VM_MAYEXEC 0x00000040 160#define VM_MAYSHARE 0x00000080 161 162#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 163#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ 164#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 165#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 166#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ 167 168#define VM_LOCKED 0x00002000 169#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 170 171 /* Used by sys_madvise() */ 172#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 173#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 174 175#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 176#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 177#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */ 178#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 179#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 180#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 181#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 182#define VM_ARCH_2 0x02000000 183#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 184 185#ifdef CONFIG_MEM_SOFT_DIRTY 186# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 187#else 188# define VM_SOFTDIRTY 0 189#endif 190 191#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 192#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 193#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 194#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 195 196#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS 197#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */ 198#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */ 199#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */ 200#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ 201#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) 202#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) 203#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) 204#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) 205#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ 206 207#if defined(CONFIG_X86) 208# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 209#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) 210# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 211# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ 212# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 213# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 214# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 215#endif 216#elif defined(CONFIG_PPC) 217# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 218#elif defined(CONFIG_PARISC) 219# define VM_GROWSUP VM_ARCH_1 220#elif defined(CONFIG_METAG) 221# define VM_GROWSUP VM_ARCH_1 222#elif defined(CONFIG_IA64) 223# define VM_GROWSUP VM_ARCH_1 224#elif !defined(CONFIG_MMU) 225# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 226#endif 227 228#if defined(CONFIG_X86) 229/* MPX specific bounds table or bounds directory */ 230# define VM_MPX VM_ARCH_2 231#endif 232 233#ifndef VM_GROWSUP 234# define VM_GROWSUP VM_NONE 235#endif 236 237/* Bits set in the VMA until the stack is in its final location */ 238#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 239 240#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 241#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 242#endif 243 244#ifdef CONFIG_STACK_GROWSUP 245#define VM_STACK VM_GROWSUP 246#else 247#define VM_STACK VM_GROWSDOWN 248#endif 249 250#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 251 252/* 253 * Special vmas that are non-mergable, non-mlock()able. 254 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 255 */ 256#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 257 258/* This mask defines which mm->def_flags a process can inherit its parent */ 259#define VM_INIT_DEF_MASK VM_NOHUGEPAGE 260 261/* This mask is used to clear all the VMA flags used by mlock */ 262#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) 263 264/* 265 * mapping from the currently active vm_flags protection bits (the 266 * low four bits) to a page protection mask.. 267 */ 268extern pgprot_t protection_map[16]; 269 270#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 271#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */ 272#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */ 273#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */ 274#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */ 275#define FAULT_FLAG_TRIED 0x20 /* Second try */ 276#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */ 277#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ 278#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ 279 280/* 281 * vm_fault is filled by the the pagefault handler and passed to the vma's 282 * ->fault function. The vma's ->fault is responsible for returning a bitmask 283 * of VM_FAULT_xxx flags that give details about how the fault was handled. 284 * 285 * MM layer fills up gfp_mask for page allocations but fault handler might 286 * alter it if its implementation requires a different allocation context. 287 * 288 * pgoff should be used in favour of virtual_address, if possible. 289 */ 290struct vm_fault { 291 unsigned int flags; /* FAULT_FLAG_xxx flags */ 292 gfp_t gfp_mask; /* gfp mask to be used for allocations */ 293 pgoff_t pgoff; /* Logical page offset based on vma */ 294 void __user *virtual_address; /* Faulting virtual address */ 295 296 struct page *cow_page; /* Handler may choose to COW */ 297 struct page *page; /* ->fault handlers should return a 298 * page here, unless VM_FAULT_NOPAGE 299 * is set (which is also implied by 300 * VM_FAULT_ERROR). 301 */ 302 /* for ->map_pages() only */ 303 pgoff_t max_pgoff; /* map pages for offset from pgoff till 304 * max_pgoff inclusive */ 305 pte_t *pte; /* pte entry associated with ->pgoff */ 306}; 307 308/* 309 * These are the virtual MM functions - opening of an area, closing and 310 * unmapping it (needed to keep files on disk up-to-date etc), pointer 311 * to the functions called when a no-page or a wp-page exception occurs. 312 */ 313struct vm_operations_struct { 314 void (*open)(struct vm_area_struct * area); 315 void (*close)(struct vm_area_struct * area); 316 int (*mremap)(struct vm_area_struct * area); 317 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 318 int (*pmd_fault)(struct vm_area_struct *, unsigned long address, 319 pmd_t *, unsigned int flags); 320 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 321 322 /* notification that a previously read-only page is about to become 323 * writable, if an error is returned it will cause a SIGBUS */ 324 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 325 326 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ 327 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 328 329 /* called by access_process_vm when get_user_pages() fails, typically 330 * for use by special VMAs that can switch between memory and hardware 331 */ 332 int (*access)(struct vm_area_struct *vma, unsigned long addr, 333 void *buf, int len, int write); 334 335 /* Called by the /proc/PID/maps code to ask the vma whether it 336 * has a special name. Returning non-NULL will also cause this 337 * vma to be dumped unconditionally. */ 338 const char *(*name)(struct vm_area_struct *vma); 339 340#ifdef CONFIG_NUMA 341 /* 342 * set_policy() op must add a reference to any non-NULL @new mempolicy 343 * to hold the policy upon return. Caller should pass NULL @new to 344 * remove a policy and fall back to surrounding context--i.e. do not 345 * install a MPOL_DEFAULT policy, nor the task or system default 346 * mempolicy. 347 */ 348 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 349 350 /* 351 * get_policy() op must add reference [mpol_get()] to any policy at 352 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 353 * in mm/mempolicy.c will do this automatically. 354 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 355 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 356 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 357 * must return NULL--i.e., do not "fallback" to task or system default 358 * policy. 359 */ 360 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 361 unsigned long addr); 362#endif 363 /* 364 * Called by vm_normal_page() for special PTEs to find the 365 * page for @addr. This is useful if the default behavior 366 * (using pte_page()) would not find the correct page. 367 */ 368 struct page *(*find_special_page)(struct vm_area_struct *vma, 369 unsigned long addr); 370}; 371 372struct mmu_gather; 373struct inode; 374 375#define page_private(page) ((page)->private) 376#define set_page_private(page, v) ((page)->private = (v)) 377 378#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) 379static inline int pmd_devmap(pmd_t pmd) 380{ 381 return 0; 382} 383#endif 384 385/* 386 * FIXME: take this include out, include page-flags.h in 387 * files which need it (119 of them) 388 */ 389#include <linux/page-flags.h> 390#include <linux/huge_mm.h> 391 392/* 393 * Methods to modify the page usage count. 394 * 395 * What counts for a page usage: 396 * - cache mapping (page->mapping) 397 * - private data (page->private) 398 * - page mapped in a task's page tables, each mapping 399 * is counted separately 400 * 401 * Also, many kernel routines increase the page count before a critical 402 * routine so they can be sure the page doesn't go away from under them. 403 */ 404 405/* 406 * Drop a ref, return true if the refcount fell to zero (the page has no users) 407 */ 408static inline int put_page_testzero(struct page *page) 409{ 410 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 411 return page_ref_dec_and_test(page); 412} 413 414/* 415 * Try to grab a ref unless the page has a refcount of zero, return false if 416 * that is the case. 417 * This can be called when MMU is off so it must not access 418 * any of the virtual mappings. 419 */ 420static inline int get_page_unless_zero(struct page *page) 421{ 422 return page_ref_add_unless(page, 1, 0); 423} 424 425extern int page_is_ram(unsigned long pfn); 426 427enum { 428 REGION_INTERSECTS, 429 REGION_DISJOINT, 430 REGION_MIXED, 431}; 432 433int region_intersects(resource_size_t offset, size_t size, unsigned long flags, 434 unsigned long desc); 435 436/* Support for virtually mapped pages */ 437struct page *vmalloc_to_page(const void *addr); 438unsigned long vmalloc_to_pfn(const void *addr); 439 440/* 441 * Determine if an address is within the vmalloc range 442 * 443 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 444 * is no special casing required. 445 */ 446static inline int is_vmalloc_addr(const void *x) 447{ 448#ifdef CONFIG_MMU 449 unsigned long addr = (unsigned long)x; 450 451 return addr >= VMALLOC_START && addr < VMALLOC_END; 452#else 453 return 0; 454#endif 455} 456#ifdef CONFIG_MMU 457extern int is_vmalloc_or_module_addr(const void *x); 458#else 459static inline int is_vmalloc_or_module_addr(const void *x) 460{ 461 return 0; 462} 463#endif 464 465extern void kvfree(const void *addr); 466 467static inline atomic_t *compound_mapcount_ptr(struct page *page) 468{ 469 return &page[1].compound_mapcount; 470} 471 472static inline int compound_mapcount(struct page *page) 473{ 474 if (!PageCompound(page)) 475 return 0; 476 page = compound_head(page); 477 return atomic_read(compound_mapcount_ptr(page)) + 1; 478} 479 480/* 481 * The atomic page->_mapcount, starts from -1: so that transitions 482 * both from it and to it can be tracked, using atomic_inc_and_test 483 * and atomic_add_negative(-1). 484 */ 485static inline void page_mapcount_reset(struct page *page) 486{ 487 atomic_set(&(page)->_mapcount, -1); 488} 489 490int __page_mapcount(struct page *page); 491 492static inline int page_mapcount(struct page *page) 493{ 494 VM_BUG_ON_PAGE(PageSlab(page), page); 495 496 if (unlikely(PageCompound(page))) 497 return __page_mapcount(page); 498 return atomic_read(&page->_mapcount) + 1; 499} 500 501#ifdef CONFIG_TRANSPARENT_HUGEPAGE 502int total_mapcount(struct page *page); 503#else 504static inline int total_mapcount(struct page *page) 505{ 506 return page_mapcount(page); 507} 508#endif 509 510static inline struct page *virt_to_head_page(const void *x) 511{ 512 struct page *page = virt_to_page(x); 513 514 return compound_head(page); 515} 516 517void __put_page(struct page *page); 518 519void put_pages_list(struct list_head *pages); 520 521void split_page(struct page *page, unsigned int order); 522int split_free_page(struct page *page); 523 524/* 525 * Compound pages have a destructor function. Provide a 526 * prototype for that function and accessor functions. 527 * These are _only_ valid on the head of a compound page. 528 */ 529typedef void compound_page_dtor(struct page *); 530 531/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */ 532enum compound_dtor_id { 533 NULL_COMPOUND_DTOR, 534 COMPOUND_PAGE_DTOR, 535#ifdef CONFIG_HUGETLB_PAGE 536 HUGETLB_PAGE_DTOR, 537#endif 538#ifdef CONFIG_TRANSPARENT_HUGEPAGE 539 TRANSHUGE_PAGE_DTOR, 540#endif 541 NR_COMPOUND_DTORS, 542}; 543extern compound_page_dtor * const compound_page_dtors[]; 544 545static inline void set_compound_page_dtor(struct page *page, 546 enum compound_dtor_id compound_dtor) 547{ 548 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); 549 page[1].compound_dtor = compound_dtor; 550} 551 552static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 553{ 554 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); 555 return compound_page_dtors[page[1].compound_dtor]; 556} 557 558static inline unsigned int compound_order(struct page *page) 559{ 560 if (!PageHead(page)) 561 return 0; 562 return page[1].compound_order; 563} 564 565static inline void set_compound_order(struct page *page, unsigned int order) 566{ 567 page[1].compound_order = order; 568} 569 570void free_compound_page(struct page *page); 571 572#ifdef CONFIG_MMU 573/* 574 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 575 * servicing faults for write access. In the normal case, do always want 576 * pte_mkwrite. But get_user_pages can cause write faults for mappings 577 * that do not have writing enabled, when used by access_process_vm. 578 */ 579static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 580{ 581 if (likely(vma->vm_flags & VM_WRITE)) 582 pte = pte_mkwrite(pte); 583 return pte; 584} 585 586void do_set_pte(struct vm_area_struct *vma, unsigned long address, 587 struct page *page, pte_t *pte, bool write, bool anon); 588#endif 589 590/* 591 * Multiple processes may "see" the same page. E.g. for untouched 592 * mappings of /dev/null, all processes see the same page full of 593 * zeroes, and text pages of executables and shared libraries have 594 * only one copy in memory, at most, normally. 595 * 596 * For the non-reserved pages, page_count(page) denotes a reference count. 597 * page_count() == 0 means the page is free. page->lru is then used for 598 * freelist management in the buddy allocator. 599 * page_count() > 0 means the page has been allocated. 600 * 601 * Pages are allocated by the slab allocator in order to provide memory 602 * to kmalloc and kmem_cache_alloc. In this case, the management of the 603 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 604 * unless a particular usage is carefully commented. (the responsibility of 605 * freeing the kmalloc memory is the caller's, of course). 606 * 607 * A page may be used by anyone else who does a __get_free_page(). 608 * In this case, page_count still tracks the references, and should only 609 * be used through the normal accessor functions. The top bits of page->flags 610 * and page->virtual store page management information, but all other fields 611 * are unused and could be used privately, carefully. The management of this 612 * page is the responsibility of the one who allocated it, and those who have 613 * subsequently been given references to it. 614 * 615 * The other pages (we may call them "pagecache pages") are completely 616 * managed by the Linux memory manager: I/O, buffers, swapping etc. 617 * The following discussion applies only to them. 618 * 619 * A pagecache page contains an opaque `private' member, which belongs to the 620 * page's address_space. Usually, this is the address of a circular list of 621 * the page's disk buffers. PG_private must be set to tell the VM to call 622 * into the filesystem to release these pages. 623 * 624 * A page may belong to an inode's memory mapping. In this case, page->mapping 625 * is the pointer to the inode, and page->index is the file offset of the page, 626 * in units of PAGE_CACHE_SIZE. 627 * 628 * If pagecache pages are not associated with an inode, they are said to be 629 * anonymous pages. These may become associated with the swapcache, and in that 630 * case PG_swapcache is set, and page->private is an offset into the swapcache. 631 * 632 * In either case (swapcache or inode backed), the pagecache itself holds one 633 * reference to the page. Setting PG_private should also increment the 634 * refcount. The each user mapping also has a reference to the page. 635 * 636 * The pagecache pages are stored in a per-mapping radix tree, which is 637 * rooted at mapping->page_tree, and indexed by offset. 638 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 639 * lists, we instead now tag pages as dirty/writeback in the radix tree. 640 * 641 * All pagecache pages may be subject to I/O: 642 * - inode pages may need to be read from disk, 643 * - inode pages which have been modified and are MAP_SHARED may need 644 * to be written back to the inode on disk, 645 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 646 * modified may need to be swapped out to swap space and (later) to be read 647 * back into memory. 648 */ 649 650/* 651 * The zone field is never updated after free_area_init_core() 652 * sets it, so none of the operations on it need to be atomic. 653 */ 654 655/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 656#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 657#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 658#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 659#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 660 661/* 662 * Define the bit shifts to access each section. For non-existent 663 * sections we define the shift as 0; that plus a 0 mask ensures 664 * the compiler will optimise away reference to them. 665 */ 666#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 667#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 668#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 669#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 670 671/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 672#ifdef NODE_NOT_IN_PAGE_FLAGS 673#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 674#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 675 SECTIONS_PGOFF : ZONES_PGOFF) 676#else 677#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 678#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 679 NODES_PGOFF : ZONES_PGOFF) 680#endif 681 682#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 683 684#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 685#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 686#endif 687 688#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 689#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 690#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 691#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 692#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 693 694static inline enum zone_type page_zonenum(const struct page *page) 695{ 696 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 697} 698 699#ifdef CONFIG_ZONE_DEVICE 700void get_zone_device_page(struct page *page); 701void put_zone_device_page(struct page *page); 702static inline bool is_zone_device_page(const struct page *page) 703{ 704 return page_zonenum(page) == ZONE_DEVICE; 705} 706#else 707static inline void get_zone_device_page(struct page *page) 708{ 709} 710static inline void put_zone_device_page(struct page *page) 711{ 712} 713static inline bool is_zone_device_page(const struct page *page) 714{ 715 return false; 716} 717#endif 718 719static inline void get_page(struct page *page) 720{ 721 page = compound_head(page); 722 /* 723 * Getting a normal page or the head of a compound page 724 * requires to already have an elevated page->_count. 725 */ 726 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); 727 page_ref_inc(page); 728 729 if (unlikely(is_zone_device_page(page))) 730 get_zone_device_page(page); 731} 732 733static inline void put_page(struct page *page) 734{ 735 page = compound_head(page); 736 737 if (put_page_testzero(page)) 738 __put_page(page); 739 740 if (unlikely(is_zone_device_page(page))) 741 put_zone_device_page(page); 742} 743 744#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 745#define SECTION_IN_PAGE_FLAGS 746#endif 747 748/* 749 * The identification function is mainly used by the buddy allocator for 750 * determining if two pages could be buddies. We are not really identifying 751 * the zone since we could be using the section number id if we do not have 752 * node id available in page flags. 753 * We only guarantee that it will return the same value for two combinable 754 * pages in a zone. 755 */ 756static inline int page_zone_id(struct page *page) 757{ 758 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 759} 760 761static inline int zone_to_nid(struct zone *zone) 762{ 763#ifdef CONFIG_NUMA 764 return zone->node; 765#else 766 return 0; 767#endif 768} 769 770#ifdef NODE_NOT_IN_PAGE_FLAGS 771extern int page_to_nid(const struct page *page); 772#else 773static inline int page_to_nid(const struct page *page) 774{ 775 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 776} 777#endif 778 779#ifdef CONFIG_NUMA_BALANCING 780static inline int cpu_pid_to_cpupid(int cpu, int pid) 781{ 782 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 783} 784 785static inline int cpupid_to_pid(int cpupid) 786{ 787 return cpupid & LAST__PID_MASK; 788} 789 790static inline int cpupid_to_cpu(int cpupid) 791{ 792 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 793} 794 795static inline int cpupid_to_nid(int cpupid) 796{ 797 return cpu_to_node(cpupid_to_cpu(cpupid)); 798} 799 800static inline bool cpupid_pid_unset(int cpupid) 801{ 802 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 803} 804 805static inline bool cpupid_cpu_unset(int cpupid) 806{ 807 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 808} 809 810static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 811{ 812 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 813} 814 815#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 816#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 817static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 818{ 819 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 820} 821 822static inline int page_cpupid_last(struct page *page) 823{ 824 return page->_last_cpupid; 825} 826static inline void page_cpupid_reset_last(struct page *page) 827{ 828 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 829} 830#else 831static inline int page_cpupid_last(struct page *page) 832{ 833 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 834} 835 836extern int page_cpupid_xchg_last(struct page *page, int cpupid); 837 838static inline void page_cpupid_reset_last(struct page *page) 839{ 840 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1; 841 842 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); 843 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; 844} 845#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 846#else /* !CONFIG_NUMA_BALANCING */ 847static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 848{ 849 return page_to_nid(page); /* XXX */ 850} 851 852static inline int page_cpupid_last(struct page *page) 853{ 854 return page_to_nid(page); /* XXX */ 855} 856 857static inline int cpupid_to_nid(int cpupid) 858{ 859 return -1; 860} 861 862static inline int cpupid_to_pid(int cpupid) 863{ 864 return -1; 865} 866 867static inline int cpupid_to_cpu(int cpupid) 868{ 869 return -1; 870} 871 872static inline int cpu_pid_to_cpupid(int nid, int pid) 873{ 874 return -1; 875} 876 877static inline bool cpupid_pid_unset(int cpupid) 878{ 879 return 1; 880} 881 882static inline void page_cpupid_reset_last(struct page *page) 883{ 884} 885 886static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 887{ 888 return false; 889} 890#endif /* CONFIG_NUMA_BALANCING */ 891 892static inline struct zone *page_zone(const struct page *page) 893{ 894 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 895} 896 897#ifdef SECTION_IN_PAGE_FLAGS 898static inline void set_page_section(struct page *page, unsigned long section) 899{ 900 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 901 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 902} 903 904static inline unsigned long page_to_section(const struct page *page) 905{ 906 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 907} 908#endif 909 910static inline void set_page_zone(struct page *page, enum zone_type zone) 911{ 912 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 913 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 914} 915 916static inline void set_page_node(struct page *page, unsigned long node) 917{ 918 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 919 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 920} 921 922static inline void set_page_links(struct page *page, enum zone_type zone, 923 unsigned long node, unsigned long pfn) 924{ 925 set_page_zone(page, zone); 926 set_page_node(page, node); 927#ifdef SECTION_IN_PAGE_FLAGS 928 set_page_section(page, pfn_to_section_nr(pfn)); 929#endif 930} 931 932#ifdef CONFIG_MEMCG 933static inline struct mem_cgroup *page_memcg(struct page *page) 934{ 935 return page->mem_cgroup; 936} 937#else 938static inline struct mem_cgroup *page_memcg(struct page *page) 939{ 940 return NULL; 941} 942#endif 943 944/* 945 * Some inline functions in vmstat.h depend on page_zone() 946 */ 947#include <linux/vmstat.h> 948 949static __always_inline void *lowmem_page_address(const struct page *page) 950{ 951 return __va(PFN_PHYS(page_to_pfn(page))); 952} 953 954#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 955#define HASHED_PAGE_VIRTUAL 956#endif 957 958#if defined(WANT_PAGE_VIRTUAL) 959static inline void *page_address(const struct page *page) 960{ 961 return page->virtual; 962} 963static inline void set_page_address(struct page *page, void *address) 964{ 965 page->virtual = address; 966} 967#define page_address_init() do { } while(0) 968#endif 969 970#if defined(HASHED_PAGE_VIRTUAL) 971void *page_address(const struct page *page); 972void set_page_address(struct page *page, void *virtual); 973void page_address_init(void); 974#endif 975 976#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 977#define page_address(page) lowmem_page_address(page) 978#define set_page_address(page, address) do { } while(0) 979#define page_address_init() do { } while(0) 980#endif 981 982extern void *page_rmapping(struct page *page); 983extern struct anon_vma *page_anon_vma(struct page *page); 984extern struct address_space *page_mapping(struct page *page); 985 986extern struct address_space *__page_file_mapping(struct page *); 987 988static inline 989struct address_space *page_file_mapping(struct page *page) 990{ 991 if (unlikely(PageSwapCache(page))) 992 return __page_file_mapping(page); 993 994 return page->mapping; 995} 996 997/* 998 * Return the pagecache index of the passed page. Regular pagecache pages 999 * use ->index whereas swapcache pages use ->private 1000 */ 1001static inline pgoff_t page_index(struct page *page) 1002{ 1003 if (unlikely(PageSwapCache(page))) 1004 return page_private(page); 1005 return page->index; 1006} 1007 1008extern pgoff_t __page_file_index(struct page *page); 1009 1010/* 1011 * Return the file index of the page. Regular pagecache pages use ->index 1012 * whereas swapcache pages use swp_offset(->private) 1013 */ 1014static inline pgoff_t page_file_index(struct page *page) 1015{ 1016 if (unlikely(PageSwapCache(page))) 1017 return __page_file_index(page); 1018 1019 return page->index; 1020} 1021 1022/* 1023 * Return true if this page is mapped into pagetables. 1024 * For compound page it returns true if any subpage of compound page is mapped. 1025 */ 1026static inline bool page_mapped(struct page *page) 1027{ 1028 int i; 1029 if (likely(!PageCompound(page))) 1030 return atomic_read(&page->_mapcount) >= 0; 1031 page = compound_head(page); 1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 1033 return true; 1034 for (i = 0; i < hpage_nr_pages(page); i++) { 1035 if (atomic_read(&page[i]._mapcount) >= 0) 1036 return true; 1037 } 1038 return false; 1039} 1040 1041/* 1042 * Return true only if the page has been allocated with 1043 * ALLOC_NO_WATERMARKS and the low watermark was not 1044 * met implying that the system is under some pressure. 1045 */ 1046static inline bool page_is_pfmemalloc(struct page *page) 1047{ 1048 /* 1049 * Page index cannot be this large so this must be 1050 * a pfmemalloc page. 1051 */ 1052 return page->index == -1UL; 1053} 1054 1055/* 1056 * Only to be called by the page allocator on a freshly allocated 1057 * page. 1058 */ 1059static inline void set_page_pfmemalloc(struct page *page) 1060{ 1061 page->index = -1UL; 1062} 1063 1064static inline void clear_page_pfmemalloc(struct page *page) 1065{ 1066 page->index = 0; 1067} 1068 1069/* 1070 * Different kinds of faults, as returned by handle_mm_fault(). 1071 * Used to decide whether a process gets delivered SIGBUS or 1072 * just gets major/minor fault counters bumped up. 1073 */ 1074 1075#define VM_FAULT_OOM 0x0001 1076#define VM_FAULT_SIGBUS 0x0002 1077#define VM_FAULT_MAJOR 0x0004 1078#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1079#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1080#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1081#define VM_FAULT_SIGSEGV 0x0040 1082 1083#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1084#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1085#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1086#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1087 1088#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1089 1090#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ 1091 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ 1092 VM_FAULT_FALLBACK) 1093 1094/* Encode hstate index for a hwpoisoned large page */ 1095#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1096#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) 1097 1098/* 1099 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1100 */ 1101extern void pagefault_out_of_memory(void); 1102 1103#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1104 1105/* 1106 * Flags passed to show_mem() and show_free_areas() to suppress output in 1107 * various contexts. 1108 */ 1109#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1110 1111extern void show_free_areas(unsigned int flags); 1112extern bool skip_free_areas_node(unsigned int flags, int nid); 1113 1114int shmem_zero_setup(struct vm_area_struct *); 1115#ifdef CONFIG_SHMEM 1116bool shmem_mapping(struct address_space *mapping); 1117#else 1118static inline bool shmem_mapping(struct address_space *mapping) 1119{ 1120 return false; 1121} 1122#endif 1123 1124extern bool can_do_mlock(void); 1125extern int user_shm_lock(size_t, struct user_struct *); 1126extern void user_shm_unlock(size_t, struct user_struct *); 1127 1128/* 1129 * Parameter block passed down to zap_pte_range in exceptional cases. 1130 */ 1131struct zap_details { 1132 struct address_space *check_mapping; /* Check page->mapping if set */ 1133 pgoff_t first_index; /* Lowest page->index to unmap */ 1134 pgoff_t last_index; /* Highest page->index to unmap */ 1135 bool ignore_dirty; /* Ignore dirty pages */ 1136 bool check_swap_entries; /* Check also swap entries */ 1137}; 1138 1139struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1140 pte_t pte); 1141 1142int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1143 unsigned long size); 1144void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1145 unsigned long size, struct zap_details *); 1146void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1147 unsigned long start, unsigned long end); 1148 1149/** 1150 * mm_walk - callbacks for walk_page_range 1151 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1152 * this handler is required to be able to handle 1153 * pmd_trans_huge() pmds. They may simply choose to 1154 * split_huge_page() instead of handling it explicitly. 1155 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1156 * @pte_hole: if set, called for each hole at all levels 1157 * @hugetlb_entry: if set, called for each hugetlb entry 1158 * @test_walk: caller specific callback function to determine whether 1159 * we walk over the current vma or not. A positive returned 1160 * value means "do page table walk over the current vma," 1161 * and a negative one means "abort current page table walk 1162 * right now." 0 means "skip the current vma." 1163 * @mm: mm_struct representing the target process of page table walk 1164 * @vma: vma currently walked (NULL if walking outside vmas) 1165 * @private: private data for callbacks' usage 1166 * 1167 * (see the comment on walk_page_range() for more details) 1168 */ 1169struct mm_walk { 1170 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1171 unsigned long next, struct mm_walk *walk); 1172 int (*pte_entry)(pte_t *pte, unsigned long addr, 1173 unsigned long next, struct mm_walk *walk); 1174 int (*pte_hole)(unsigned long addr, unsigned long next, 1175 struct mm_walk *walk); 1176 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1177 unsigned long addr, unsigned long next, 1178 struct mm_walk *walk); 1179 int (*test_walk)(unsigned long addr, unsigned long next, 1180 struct mm_walk *walk); 1181 struct mm_struct *mm; 1182 struct vm_area_struct *vma; 1183 void *private; 1184}; 1185 1186int walk_page_range(unsigned long addr, unsigned long end, 1187 struct mm_walk *walk); 1188int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk); 1189void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1190 unsigned long end, unsigned long floor, unsigned long ceiling); 1191int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1192 struct vm_area_struct *vma); 1193void unmap_mapping_range(struct address_space *mapping, 1194 loff_t const holebegin, loff_t const holelen, int even_cows); 1195int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1196 unsigned long *pfn); 1197int follow_phys(struct vm_area_struct *vma, unsigned long address, 1198 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1199int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1200 void *buf, int len, int write); 1201 1202static inline void unmap_shared_mapping_range(struct address_space *mapping, 1203 loff_t const holebegin, loff_t const holelen) 1204{ 1205 unmap_mapping_range(mapping, holebegin, holelen, 0); 1206} 1207 1208extern void truncate_pagecache(struct inode *inode, loff_t new); 1209extern void truncate_setsize(struct inode *inode, loff_t newsize); 1210void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); 1211void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1212int truncate_inode_page(struct address_space *mapping, struct page *page); 1213int generic_error_remove_page(struct address_space *mapping, struct page *page); 1214int invalidate_inode_page(struct page *page); 1215 1216#ifdef CONFIG_MMU 1217extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 1218 unsigned long address, unsigned int flags); 1219extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1220 unsigned long address, unsigned int fault_flags, 1221 bool *unlocked); 1222#else 1223static inline int handle_mm_fault(struct mm_struct *mm, 1224 struct vm_area_struct *vma, unsigned long address, 1225 unsigned int flags) 1226{ 1227 /* should never happen if there's no MMU */ 1228 BUG(); 1229 return VM_FAULT_SIGBUS; 1230} 1231static inline int fixup_user_fault(struct task_struct *tsk, 1232 struct mm_struct *mm, unsigned long address, 1233 unsigned int fault_flags, bool *unlocked) 1234{ 1235 /* should never happen if there's no MMU */ 1236 BUG(); 1237 return -EFAULT; 1238} 1239#endif 1240 1241extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1242extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1243 void *buf, int len, int write); 1244 1245long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1246 unsigned long start, unsigned long nr_pages, 1247 unsigned int foll_flags, struct page **pages, 1248 struct vm_area_struct **vmas, int *nonblocking); 1249long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, 1250 unsigned long start, unsigned long nr_pages, 1251 int write, int force, struct page **pages, 1252 struct vm_area_struct **vmas); 1253long get_user_pages6(unsigned long start, unsigned long nr_pages, 1254 int write, int force, struct page **pages, 1255 struct vm_area_struct **vmas); 1256long get_user_pages_locked6(unsigned long start, unsigned long nr_pages, 1257 int write, int force, struct page **pages, int *locked); 1258long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, 1259 unsigned long start, unsigned long nr_pages, 1260 int write, int force, struct page **pages, 1261 unsigned int gup_flags); 1262long get_user_pages_unlocked5(unsigned long start, unsigned long nr_pages, 1263 int write, int force, struct page **pages); 1264int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1265 struct page **pages); 1266 1267/* suppress warnings from use in EXPORT_SYMBOL() */ 1268#ifndef __DISABLE_GUP_DEPRECATED 1269#define __gup_deprecated __deprecated 1270#else 1271#define __gup_deprecated 1272#endif 1273/* 1274 * These macros provide backward-compatibility with the old 1275 * get_user_pages() variants which took tsk/mm. These 1276 * functions/macros provide both compile-time __deprecated so we 1277 * can catch old-style use and not break the build. The actual 1278 * functions also have WARN_ON()s to let us know at runtime if 1279 * the get_user_pages() should have been the "remote" variant. 1280 * 1281 * These are hideous, but temporary. 1282 * 1283 * If you run into one of these __deprecated warnings, look 1284 * at how you are calling get_user_pages(). If you are calling 1285 * it with current/current->mm as the first two arguments, 1286 * simply remove those arguments. The behavior will be the same 1287 * as it is now. If you are calling it on another task, use 1288 * get_user_pages_remote() instead. 1289 * 1290 * Any questions? Ask Dave Hansen <dave@sr71.net> 1291 */ 1292long 1293__gup_deprecated 1294get_user_pages8(struct task_struct *tsk, struct mm_struct *mm, 1295 unsigned long start, unsigned long nr_pages, 1296 int write, int force, struct page **pages, 1297 struct vm_area_struct **vmas); 1298#define GUP_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages, ...) \ 1299 get_user_pages 1300#define get_user_pages(...) GUP_MACRO(__VA_ARGS__, \ 1301 get_user_pages8, x, \ 1302 get_user_pages6, x, x, x, x, x)(__VA_ARGS__) 1303 1304__gup_deprecated 1305long get_user_pages_locked8(struct task_struct *tsk, struct mm_struct *mm, 1306 unsigned long start, unsigned long nr_pages, 1307 int write, int force, struct page **pages, 1308 int *locked); 1309#define GUPL_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, get_user_pages_locked, ...) \ 1310 get_user_pages_locked 1311#define get_user_pages_locked(...) GUPL_MACRO(__VA_ARGS__, \ 1312 get_user_pages_locked8, x, \ 1313 get_user_pages_locked6, x, x, x, x)(__VA_ARGS__) 1314 1315__gup_deprecated 1316long get_user_pages_unlocked7(struct task_struct *tsk, struct mm_struct *mm, 1317 unsigned long start, unsigned long nr_pages, 1318 int write, int force, struct page **pages); 1319#define GUPU_MACRO(_1, _2, _3, _4, _5, _6, _7, get_user_pages_unlocked, ...) \ 1320 get_user_pages_unlocked 1321#define get_user_pages_unlocked(...) GUPU_MACRO(__VA_ARGS__, \ 1322 get_user_pages_unlocked7, x, \ 1323 get_user_pages_unlocked5, x, x, x, x)(__VA_ARGS__) 1324 1325/* Container for pinned pfns / pages */ 1326struct frame_vector { 1327 unsigned int nr_allocated; /* Number of frames we have space for */ 1328 unsigned int nr_frames; /* Number of frames stored in ptrs array */ 1329 bool got_ref; /* Did we pin pages by getting page ref? */ 1330 bool is_pfns; /* Does array contain pages or pfns? */ 1331 void *ptrs[0]; /* Array of pinned pfns / pages. Use 1332 * pfns_vector_pages() or pfns_vector_pfns() 1333 * for access */ 1334}; 1335 1336struct frame_vector *frame_vector_create(unsigned int nr_frames); 1337void frame_vector_destroy(struct frame_vector *vec); 1338int get_vaddr_frames(unsigned long start, unsigned int nr_pfns, 1339 bool write, bool force, struct frame_vector *vec); 1340void put_vaddr_frames(struct frame_vector *vec); 1341int frame_vector_to_pages(struct frame_vector *vec); 1342void frame_vector_to_pfns(struct frame_vector *vec); 1343 1344static inline unsigned int frame_vector_count(struct frame_vector *vec) 1345{ 1346 return vec->nr_frames; 1347} 1348 1349static inline struct page **frame_vector_pages(struct frame_vector *vec) 1350{ 1351 if (vec->is_pfns) { 1352 int err = frame_vector_to_pages(vec); 1353 1354 if (err) 1355 return ERR_PTR(err); 1356 } 1357 return (struct page **)(vec->ptrs); 1358} 1359 1360static inline unsigned long *frame_vector_pfns(struct frame_vector *vec) 1361{ 1362 if (!vec->is_pfns) 1363 frame_vector_to_pfns(vec); 1364 return (unsigned long *)(vec->ptrs); 1365} 1366 1367struct kvec; 1368int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1369 struct page **pages); 1370int get_kernel_page(unsigned long start, int write, struct page **pages); 1371struct page *get_dump_page(unsigned long addr); 1372 1373extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1374extern void do_invalidatepage(struct page *page, unsigned int offset, 1375 unsigned int length); 1376 1377int __set_page_dirty_nobuffers(struct page *page); 1378int __set_page_dirty_no_writeback(struct page *page); 1379int redirty_page_for_writepage(struct writeback_control *wbc, 1380 struct page *page); 1381void account_page_dirtied(struct page *page, struct address_space *mapping); 1382void account_page_cleaned(struct page *page, struct address_space *mapping, 1383 struct bdi_writeback *wb); 1384int set_page_dirty(struct page *page); 1385int set_page_dirty_lock(struct page *page); 1386void cancel_dirty_page(struct page *page); 1387int clear_page_dirty_for_io(struct page *page); 1388 1389int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1390 1391/* Is the vma a continuation of the stack vma above it? */ 1392static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1393{ 1394 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1395} 1396 1397static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1398{ 1399 return !vma->vm_ops; 1400} 1401 1402static inline int stack_guard_page_start(struct vm_area_struct *vma, 1403 unsigned long addr) 1404{ 1405 return (vma->vm_flags & VM_GROWSDOWN) && 1406 (vma->vm_start == addr) && 1407 !vma_growsdown(vma->vm_prev, addr); 1408} 1409 1410/* Is the vma a continuation of the stack vma below it? */ 1411static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1412{ 1413 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1414} 1415 1416static inline int stack_guard_page_end(struct vm_area_struct *vma, 1417 unsigned long addr) 1418{ 1419 return (vma->vm_flags & VM_GROWSUP) && 1420 (vma->vm_end == addr) && 1421 !vma_growsup(vma->vm_next, addr); 1422} 1423 1424int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); 1425 1426extern unsigned long move_page_tables(struct vm_area_struct *vma, 1427 unsigned long old_addr, struct vm_area_struct *new_vma, 1428 unsigned long new_addr, unsigned long len, 1429 bool need_rmap_locks); 1430extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1431 unsigned long end, pgprot_t newprot, 1432 int dirty_accountable, int prot_numa); 1433extern int mprotect_fixup(struct vm_area_struct *vma, 1434 struct vm_area_struct **pprev, unsigned long start, 1435 unsigned long end, unsigned long newflags); 1436 1437/* 1438 * doesn't attempt to fault and will return short. 1439 */ 1440int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1441 struct page **pages); 1442/* 1443 * per-process(per-mm_struct) statistics. 1444 */ 1445static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1446{ 1447 long val = atomic_long_read(&mm->rss_stat.count[member]); 1448 1449#ifdef SPLIT_RSS_COUNTING 1450 /* 1451 * counter is updated in asynchronous manner and may go to minus. 1452 * But it's never be expected number for users. 1453 */ 1454 if (val < 0) 1455 val = 0; 1456#endif 1457 return (unsigned long)val; 1458} 1459 1460static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1461{ 1462 atomic_long_add(value, &mm->rss_stat.count[member]); 1463} 1464 1465static inline void inc_mm_counter(struct mm_struct *mm, int member) 1466{ 1467 atomic_long_inc(&mm->rss_stat.count[member]); 1468} 1469 1470static inline void dec_mm_counter(struct mm_struct *mm, int member) 1471{ 1472 atomic_long_dec(&mm->rss_stat.count[member]); 1473} 1474 1475/* Optimized variant when page is already known not to be PageAnon */ 1476static inline int mm_counter_file(struct page *page) 1477{ 1478 if (PageSwapBacked(page)) 1479 return MM_SHMEMPAGES; 1480 return MM_FILEPAGES; 1481} 1482 1483static inline int mm_counter(struct page *page) 1484{ 1485 if (PageAnon(page)) 1486 return MM_ANONPAGES; 1487 return mm_counter_file(page); 1488} 1489 1490static inline unsigned long get_mm_rss(struct mm_struct *mm) 1491{ 1492 return get_mm_counter(mm, MM_FILEPAGES) + 1493 get_mm_counter(mm, MM_ANONPAGES) + 1494 get_mm_counter(mm, MM_SHMEMPAGES); 1495} 1496 1497static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1498{ 1499 return max(mm->hiwater_rss, get_mm_rss(mm)); 1500} 1501 1502static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1503{ 1504 return max(mm->hiwater_vm, mm->total_vm); 1505} 1506 1507static inline void update_hiwater_rss(struct mm_struct *mm) 1508{ 1509 unsigned long _rss = get_mm_rss(mm); 1510 1511 if ((mm)->hiwater_rss < _rss) 1512 (mm)->hiwater_rss = _rss; 1513} 1514 1515static inline void update_hiwater_vm(struct mm_struct *mm) 1516{ 1517 if (mm->hiwater_vm < mm->total_vm) 1518 mm->hiwater_vm = mm->total_vm; 1519} 1520 1521static inline void reset_mm_hiwater_rss(struct mm_struct *mm) 1522{ 1523 mm->hiwater_rss = get_mm_rss(mm); 1524} 1525 1526static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1527 struct mm_struct *mm) 1528{ 1529 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1530 1531 if (*maxrss < hiwater_rss) 1532 *maxrss = hiwater_rss; 1533} 1534 1535#if defined(SPLIT_RSS_COUNTING) 1536void sync_mm_rss(struct mm_struct *mm); 1537#else 1538static inline void sync_mm_rss(struct mm_struct *mm) 1539{ 1540} 1541#endif 1542 1543#ifndef __HAVE_ARCH_PTE_DEVMAP 1544static inline int pte_devmap(pte_t pte) 1545{ 1546 return 0; 1547} 1548#endif 1549 1550int vma_wants_writenotify(struct vm_area_struct *vma); 1551 1552extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1553 spinlock_t **ptl); 1554static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1555 spinlock_t **ptl) 1556{ 1557 pte_t *ptep; 1558 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1559 return ptep; 1560} 1561 1562#ifdef __PAGETABLE_PUD_FOLDED 1563static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 1564 unsigned long address) 1565{ 1566 return 0; 1567} 1568#else 1569int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1570#endif 1571 1572#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU) 1573static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1574 unsigned long address) 1575{ 1576 return 0; 1577} 1578 1579static inline void mm_nr_pmds_init(struct mm_struct *mm) {} 1580 1581static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1582{ 1583 return 0; 1584} 1585 1586static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} 1587static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} 1588 1589#else 1590int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1591 1592static inline void mm_nr_pmds_init(struct mm_struct *mm) 1593{ 1594 atomic_long_set(&mm->nr_pmds, 0); 1595} 1596 1597static inline unsigned long mm_nr_pmds(struct mm_struct *mm) 1598{ 1599 return atomic_long_read(&mm->nr_pmds); 1600} 1601 1602static inline void mm_inc_nr_pmds(struct mm_struct *mm) 1603{ 1604 atomic_long_inc(&mm->nr_pmds); 1605} 1606 1607static inline void mm_dec_nr_pmds(struct mm_struct *mm) 1608{ 1609 atomic_long_dec(&mm->nr_pmds); 1610} 1611#endif 1612 1613int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1614int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1615 1616/* 1617 * The following ifdef needed to get the 4level-fixup.h header to work. 1618 * Remove it when 4level-fixup.h has been removed. 1619 */ 1620#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1621static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 1622{ 1623 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? 1624 NULL: pud_offset(pgd, address); 1625} 1626 1627static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1628{ 1629 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1630 NULL: pmd_offset(pud, address); 1631} 1632#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1633 1634#if USE_SPLIT_PTE_PTLOCKS 1635#if ALLOC_SPLIT_PTLOCKS 1636void __init ptlock_cache_init(void); 1637extern bool ptlock_alloc(struct page *page); 1638extern void ptlock_free(struct page *page); 1639 1640static inline spinlock_t *ptlock_ptr(struct page *page) 1641{ 1642 return page->ptl; 1643} 1644#else /* ALLOC_SPLIT_PTLOCKS */ 1645static inline void ptlock_cache_init(void) 1646{ 1647} 1648 1649static inline bool ptlock_alloc(struct page *page) 1650{ 1651 return true; 1652} 1653 1654static inline void ptlock_free(struct page *page) 1655{ 1656} 1657 1658static inline spinlock_t *ptlock_ptr(struct page *page) 1659{ 1660 return &page->ptl; 1661} 1662#endif /* ALLOC_SPLIT_PTLOCKS */ 1663 1664static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1665{ 1666 return ptlock_ptr(pmd_page(*pmd)); 1667} 1668 1669static inline bool ptlock_init(struct page *page) 1670{ 1671 /* 1672 * prep_new_page() initialize page->private (and therefore page->ptl) 1673 * with 0. Make sure nobody took it in use in between. 1674 * 1675 * It can happen if arch try to use slab for page table allocation: 1676 * slab code uses page->slab_cache, which share storage with page->ptl. 1677 */ 1678 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1679 if (!ptlock_alloc(page)) 1680 return false; 1681 spin_lock_init(ptlock_ptr(page)); 1682 return true; 1683} 1684 1685/* Reset page->mapping so free_pages_check won't complain. */ 1686static inline void pte_lock_deinit(struct page *page) 1687{ 1688 page->mapping = NULL; 1689 ptlock_free(page); 1690} 1691 1692#else /* !USE_SPLIT_PTE_PTLOCKS */ 1693/* 1694 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1695 */ 1696static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1697{ 1698 return &mm->page_table_lock; 1699} 1700static inline void ptlock_cache_init(void) {} 1701static inline bool ptlock_init(struct page *page) { return true; } 1702static inline void pte_lock_deinit(struct page *page) {} 1703#endif /* USE_SPLIT_PTE_PTLOCKS */ 1704 1705static inline void pgtable_init(void) 1706{ 1707 ptlock_cache_init(); 1708 pgtable_cache_init(); 1709} 1710 1711static inline bool pgtable_page_ctor(struct page *page) 1712{ 1713 if (!ptlock_init(page)) 1714 return false; 1715 inc_zone_page_state(page, NR_PAGETABLE); 1716 return true; 1717} 1718 1719static inline void pgtable_page_dtor(struct page *page) 1720{ 1721 pte_lock_deinit(page); 1722 dec_zone_page_state(page, NR_PAGETABLE); 1723} 1724 1725#define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1726({ \ 1727 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1728 pte_t *__pte = pte_offset_map(pmd, address); \ 1729 *(ptlp) = __ptl; \ 1730 spin_lock(__ptl); \ 1731 __pte; \ 1732}) 1733 1734#define pte_unmap_unlock(pte, ptl) do { \ 1735 spin_unlock(ptl); \ 1736 pte_unmap(pte); \ 1737} while (0) 1738 1739#define pte_alloc(mm, pmd, address) \ 1740 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) 1741 1742#define pte_alloc_map(mm, pmd, address) \ 1743 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) 1744 1745#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1746 (pte_alloc(mm, pmd, address) ? \ 1747 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 1748 1749#define pte_alloc_kernel(pmd, address) \ 1750 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1751 NULL: pte_offset_kernel(pmd, address)) 1752 1753#if USE_SPLIT_PMD_PTLOCKS 1754 1755static struct page *pmd_to_page(pmd_t *pmd) 1756{ 1757 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1758 return virt_to_page((void *)((unsigned long) pmd & mask)); 1759} 1760 1761static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1762{ 1763 return ptlock_ptr(pmd_to_page(pmd)); 1764} 1765 1766static inline bool pgtable_pmd_page_ctor(struct page *page) 1767{ 1768#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1769 page->pmd_huge_pte = NULL; 1770#endif 1771 return ptlock_init(page); 1772} 1773 1774static inline void pgtable_pmd_page_dtor(struct page *page) 1775{ 1776#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1777 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1778#endif 1779 ptlock_free(page); 1780} 1781 1782#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 1783 1784#else 1785 1786static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1787{ 1788 return &mm->page_table_lock; 1789} 1790 1791static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 1792static inline void pgtable_pmd_page_dtor(struct page *page) {} 1793 1794#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 1795 1796#endif 1797 1798static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 1799{ 1800 spinlock_t *ptl = pmd_lockptr(mm, pmd); 1801 spin_lock(ptl); 1802 return ptl; 1803} 1804 1805extern void free_area_init(unsigned long * zones_size); 1806extern void free_area_init_node(int nid, unsigned long * zones_size, 1807 unsigned long zone_start_pfn, unsigned long *zholes_size); 1808extern void free_initmem(void); 1809 1810/* 1811 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 1812 * into the buddy system. The freed pages will be poisoned with pattern 1813 * "poison" if it's within range [0, UCHAR_MAX]. 1814 * Return pages freed into the buddy system. 1815 */ 1816extern unsigned long free_reserved_area(void *start, void *end, 1817 int poison, char *s); 1818 1819#ifdef CONFIG_HIGHMEM 1820/* 1821 * Free a highmem page into the buddy system, adjusting totalhigh_pages 1822 * and totalram_pages. 1823 */ 1824extern void free_highmem_page(struct page *page); 1825#endif 1826 1827extern void adjust_managed_page_count(struct page *page, long count); 1828extern void mem_init_print_info(const char *str); 1829 1830extern void reserve_bootmem_region(unsigned long start, unsigned long end); 1831 1832/* Free the reserved page into the buddy system, so it gets managed. */ 1833static inline void __free_reserved_page(struct page *page) 1834{ 1835 ClearPageReserved(page); 1836 init_page_count(page); 1837 __free_page(page); 1838} 1839 1840static inline void free_reserved_page(struct page *page) 1841{ 1842 __free_reserved_page(page); 1843 adjust_managed_page_count(page, 1); 1844} 1845 1846static inline void mark_page_reserved(struct page *page) 1847{ 1848 SetPageReserved(page); 1849 adjust_managed_page_count(page, -1); 1850} 1851 1852/* 1853 * Default method to free all the __init memory into the buddy system. 1854 * The freed pages will be poisoned with pattern "poison" if it's within 1855 * range [0, UCHAR_MAX]. 1856 * Return pages freed into the buddy system. 1857 */ 1858static inline unsigned long free_initmem_default(int poison) 1859{ 1860 extern char __init_begin[], __init_end[]; 1861 1862 return free_reserved_area(&__init_begin, &__init_end, 1863 poison, "unused kernel"); 1864} 1865 1866static inline unsigned long get_num_physpages(void) 1867{ 1868 int nid; 1869 unsigned long phys_pages = 0; 1870 1871 for_each_online_node(nid) 1872 phys_pages += node_present_pages(nid); 1873 1874 return phys_pages; 1875} 1876 1877#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1878/* 1879 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1880 * zones, allocate the backing mem_map and account for memory holes in a more 1881 * architecture independent manner. This is a substitute for creating the 1882 * zone_sizes[] and zholes_size[] arrays and passing them to 1883 * free_area_init_node() 1884 * 1885 * An architecture is expected to register range of page frames backed by 1886 * physical memory with memblock_add[_node]() before calling 1887 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1888 * usage, an architecture is expected to do something like 1889 * 1890 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1891 * max_highmem_pfn}; 1892 * for_each_valid_physical_page_range() 1893 * memblock_add_node(base, size, nid) 1894 * free_area_init_nodes(max_zone_pfns); 1895 * 1896 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 1897 * registered physical page range. Similarly 1898 * sparse_memory_present_with_active_regions() calls memory_present() for 1899 * each range when SPARSEMEM is enabled. 1900 * 1901 * See mm/page_alloc.c for more information on each function exposed by 1902 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 1903 */ 1904extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1905unsigned long node_map_pfn_alignment(void); 1906unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1907 unsigned long end_pfn); 1908extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1909 unsigned long end_pfn); 1910extern void get_pfn_range_for_nid(unsigned int nid, 1911 unsigned long *start_pfn, unsigned long *end_pfn); 1912extern unsigned long find_min_pfn_with_active_regions(void); 1913extern void free_bootmem_with_active_regions(int nid, 1914 unsigned long max_low_pfn); 1915extern void sparse_memory_present_with_active_regions(int nid); 1916 1917#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1918 1919#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1920 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1921static inline int __early_pfn_to_nid(unsigned long pfn, 1922 struct mminit_pfnnid_cache *state) 1923{ 1924 return 0; 1925} 1926#else 1927/* please see mm/page_alloc.c */ 1928extern int __meminit early_pfn_to_nid(unsigned long pfn); 1929/* there is a per-arch backend function. */ 1930extern int __meminit __early_pfn_to_nid(unsigned long pfn, 1931 struct mminit_pfnnid_cache *state); 1932#endif 1933 1934extern void set_dma_reserve(unsigned long new_dma_reserve); 1935extern void memmap_init_zone(unsigned long, int, unsigned long, 1936 unsigned long, enum memmap_context); 1937extern void setup_per_zone_wmarks(void); 1938extern int __meminit init_per_zone_wmark_min(void); 1939extern void mem_init(void); 1940extern void __init mmap_init(void); 1941extern void show_mem(unsigned int flags); 1942extern long si_mem_available(void); 1943extern void si_meminfo(struct sysinfo * val); 1944extern void si_meminfo_node(struct sysinfo *val, int nid); 1945 1946extern __printf(3, 4) 1947void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, 1948 const char *fmt, ...); 1949 1950extern void setup_per_cpu_pageset(void); 1951 1952extern void zone_pcp_update(struct zone *zone); 1953extern void zone_pcp_reset(struct zone *zone); 1954 1955/* page_alloc.c */ 1956extern int min_free_kbytes; 1957extern int watermark_scale_factor; 1958 1959/* nommu.c */ 1960extern atomic_long_t mmap_pages_allocated; 1961extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 1962 1963/* interval_tree.c */ 1964void vma_interval_tree_insert(struct vm_area_struct *node, 1965 struct rb_root *root); 1966void vma_interval_tree_insert_after(struct vm_area_struct *node, 1967 struct vm_area_struct *prev, 1968 struct rb_root *root); 1969void vma_interval_tree_remove(struct vm_area_struct *node, 1970 struct rb_root *root); 1971struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, 1972 unsigned long start, unsigned long last); 1973struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 1974 unsigned long start, unsigned long last); 1975 1976#define vma_interval_tree_foreach(vma, root, start, last) \ 1977 for (vma = vma_interval_tree_iter_first(root, start, last); \ 1978 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 1979 1980void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 1981 struct rb_root *root); 1982void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 1983 struct rb_root *root); 1984struct anon_vma_chain *anon_vma_interval_tree_iter_first( 1985 struct rb_root *root, unsigned long start, unsigned long last); 1986struct anon_vma_chain *anon_vma_interval_tree_iter_next( 1987 struct anon_vma_chain *node, unsigned long start, unsigned long last); 1988#ifdef CONFIG_DEBUG_VM_RB 1989void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 1990#endif 1991 1992#define anon_vma_interval_tree_foreach(avc, root, start, last) \ 1993 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 1994 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 1995 1996/* mmap.c */ 1997extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1998extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, 1999 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 2000extern struct vm_area_struct *vma_merge(struct mm_struct *, 2001 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 2002 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 2003 struct mempolicy *, struct vm_userfaultfd_ctx); 2004extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 2005extern int split_vma(struct mm_struct *, 2006 struct vm_area_struct *, unsigned long addr, int new_below); 2007extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 2008extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 2009 struct rb_node **, struct rb_node *); 2010extern void unlink_file_vma(struct vm_area_struct *); 2011extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 2012 unsigned long addr, unsigned long len, pgoff_t pgoff, 2013 bool *need_rmap_locks); 2014extern void exit_mmap(struct mm_struct *); 2015 2016static inline int check_data_rlimit(unsigned long rlim, 2017 unsigned long new, 2018 unsigned long start, 2019 unsigned long end_data, 2020 unsigned long start_data) 2021{ 2022 if (rlim < RLIM_INFINITY) { 2023 if (((new - start) + (end_data - start_data)) > rlim) 2024 return -ENOSPC; 2025 } 2026 2027 return 0; 2028} 2029 2030extern int mm_take_all_locks(struct mm_struct *mm); 2031extern void mm_drop_all_locks(struct mm_struct *mm); 2032 2033extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 2034extern struct file *get_mm_exe_file(struct mm_struct *mm); 2035 2036extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); 2037extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); 2038 2039extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 2040 unsigned long addr, unsigned long len, 2041 unsigned long flags, 2042 const struct vm_special_mapping *spec); 2043/* This is an obsolete alternative to _install_special_mapping. */ 2044extern int install_special_mapping(struct mm_struct *mm, 2045 unsigned long addr, unsigned long len, 2046 unsigned long flags, struct page **pages); 2047 2048extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 2049 2050extern unsigned long mmap_region(struct file *file, unsigned long addr, 2051 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); 2052extern unsigned long do_mmap(struct file *file, unsigned long addr, 2053 unsigned long len, unsigned long prot, unsigned long flags, 2054 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate); 2055extern int do_munmap(struct mm_struct *, unsigned long, size_t); 2056 2057static inline unsigned long 2058do_mmap_pgoff(struct file *file, unsigned long addr, 2059 unsigned long len, unsigned long prot, unsigned long flags, 2060 unsigned long pgoff, unsigned long *populate) 2061{ 2062 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate); 2063} 2064 2065#ifdef CONFIG_MMU 2066extern int __mm_populate(unsigned long addr, unsigned long len, 2067 int ignore_errors); 2068static inline void mm_populate(unsigned long addr, unsigned long len) 2069{ 2070 /* Ignore errors */ 2071 (void) __mm_populate(addr, len, 1); 2072} 2073#else 2074static inline void mm_populate(unsigned long addr, unsigned long len) {} 2075#endif 2076 2077/* These take the mm semaphore themselves */ 2078extern unsigned long vm_brk(unsigned long, unsigned long); 2079extern int vm_munmap(unsigned long, size_t); 2080extern unsigned long vm_mmap(struct file *, unsigned long, 2081 unsigned long, unsigned long, 2082 unsigned long, unsigned long); 2083 2084struct vm_unmapped_area_info { 2085#define VM_UNMAPPED_AREA_TOPDOWN 1 2086 unsigned long flags; 2087 unsigned long length; 2088 unsigned long low_limit; 2089 unsigned long high_limit; 2090 unsigned long align_mask; 2091 unsigned long align_offset; 2092}; 2093 2094extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 2095extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 2096 2097/* 2098 * Search for an unmapped address range. 2099 * 2100 * We are looking for a range that: 2101 * - does not intersect with any VMA; 2102 * - is contained within the [low_limit, high_limit) interval; 2103 * - is at least the desired size. 2104 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 2105 */ 2106static inline unsigned long 2107vm_unmapped_area(struct vm_unmapped_area_info *info) 2108{ 2109 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) 2110 return unmapped_area_topdown(info); 2111 else 2112 return unmapped_area(info); 2113} 2114 2115/* truncate.c */ 2116extern void truncate_inode_pages(struct address_space *, loff_t); 2117extern void truncate_inode_pages_range(struct address_space *, 2118 loff_t lstart, loff_t lend); 2119extern void truncate_inode_pages_final(struct address_space *); 2120 2121/* generic vm_area_ops exported for stackable file systems */ 2122extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 2123extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); 2124extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 2125 2126/* mm/page-writeback.c */ 2127int write_one_page(struct page *page, int wait); 2128void task_dirty_inc(struct task_struct *tsk); 2129 2130/* readahead.c */ 2131#define VM_MAX_READAHEAD 128 /* kbytes */ 2132#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 2133 2134int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 2135 pgoff_t offset, unsigned long nr_to_read); 2136 2137void page_cache_sync_readahead(struct address_space *mapping, 2138 struct file_ra_state *ra, 2139 struct file *filp, 2140 pgoff_t offset, 2141 unsigned long size); 2142 2143void page_cache_async_readahead(struct address_space *mapping, 2144 struct file_ra_state *ra, 2145 struct file *filp, 2146 struct page *pg, 2147 pgoff_t offset, 2148 unsigned long size); 2149 2150/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2151extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2152 2153/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 2154extern int expand_downwards(struct vm_area_struct *vma, 2155 unsigned long address); 2156#if VM_GROWSUP 2157extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 2158#else 2159 #define expand_upwards(vma, address) (0) 2160#endif 2161 2162/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 2163extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 2164extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 2165 struct vm_area_struct **pprev); 2166 2167/* Look up the first VMA which intersects the interval start_addr..end_addr-1, 2168 NULL if none. Assume start_addr < end_addr. */ 2169static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 2170{ 2171 struct vm_area_struct * vma = find_vma(mm,start_addr); 2172 2173 if (vma && end_addr <= vma->vm_start) 2174 vma = NULL; 2175 return vma; 2176} 2177 2178static inline unsigned long vma_pages(struct vm_area_struct *vma) 2179{ 2180 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2181} 2182 2183/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 2184static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 2185 unsigned long vm_start, unsigned long vm_end) 2186{ 2187 struct vm_area_struct *vma = find_vma(mm, vm_start); 2188 2189 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 2190 vma = NULL; 2191 2192 return vma; 2193} 2194 2195#ifdef CONFIG_MMU 2196pgprot_t vm_get_page_prot(unsigned long vm_flags); 2197void vma_set_page_prot(struct vm_area_struct *vma); 2198#else 2199static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 2200{ 2201 return __pgprot(0); 2202} 2203static inline void vma_set_page_prot(struct vm_area_struct *vma) 2204{ 2205 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2206} 2207#endif 2208 2209#ifdef CONFIG_NUMA_BALANCING 2210unsigned long change_prot_numa(struct vm_area_struct *vma, 2211 unsigned long start, unsigned long end); 2212#endif 2213 2214struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 2215int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 2216 unsigned long pfn, unsigned long size, pgprot_t); 2217int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 2218int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2219 unsigned long pfn); 2220int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2221 unsigned long pfn, pgprot_t pgprot); 2222int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2223 pfn_t pfn); 2224int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 2225 2226 2227struct page *follow_page_mask(struct vm_area_struct *vma, 2228 unsigned long address, unsigned int foll_flags, 2229 unsigned int *page_mask); 2230 2231static inline struct page *follow_page(struct vm_area_struct *vma, 2232 unsigned long address, unsigned int foll_flags) 2233{ 2234 unsigned int unused_page_mask; 2235 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); 2236} 2237 2238#define FOLL_WRITE 0x01 /* check pte is writable */ 2239#define FOLL_TOUCH 0x02 /* mark page accessed */ 2240#define FOLL_GET 0x04 /* do get_page on page */ 2241#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 2242#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 2243#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 2244 * and return without waiting upon it */ 2245#define FOLL_POPULATE 0x40 /* fault in page */ 2246#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 2247#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2248#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2249#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2250#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ 2251#define FOLL_MLOCK 0x1000 /* lock present pages */ 2252#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2253 2254typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2255 void *data); 2256extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 2257 unsigned long size, pte_fn_t fn, void *data); 2258 2259 2260#ifdef CONFIG_PAGE_POISONING 2261extern bool page_poisoning_enabled(void); 2262extern void kernel_poison_pages(struct page *page, int numpages, int enable); 2263extern bool page_is_poisoned(struct page *page); 2264#else 2265static inline bool page_poisoning_enabled(void) { return false; } 2266static inline void kernel_poison_pages(struct page *page, int numpages, 2267 int enable) { } 2268static inline bool page_is_poisoned(struct page *page) { return false; } 2269#endif 2270 2271#ifdef CONFIG_DEBUG_PAGEALLOC 2272extern bool _debug_pagealloc_enabled; 2273extern void __kernel_map_pages(struct page *page, int numpages, int enable); 2274 2275static inline bool debug_pagealloc_enabled(void) 2276{ 2277 return _debug_pagealloc_enabled; 2278} 2279 2280static inline void 2281kernel_map_pages(struct page *page, int numpages, int enable) 2282{ 2283 if (!debug_pagealloc_enabled()) 2284 return; 2285 2286 __kernel_map_pages(page, numpages, enable); 2287} 2288#ifdef CONFIG_HIBERNATION 2289extern bool kernel_page_present(struct page *page); 2290#endif /* CONFIG_HIBERNATION */ 2291#else /* CONFIG_DEBUG_PAGEALLOC */ 2292static inline void 2293kernel_map_pages(struct page *page, int numpages, int enable) {} 2294#ifdef CONFIG_HIBERNATION 2295static inline bool kernel_page_present(struct page *page) { return true; } 2296#endif /* CONFIG_HIBERNATION */ 2297static inline bool debug_pagealloc_enabled(void) 2298{ 2299 return false; 2300} 2301#endif /* CONFIG_DEBUG_PAGEALLOC */ 2302 2303#ifdef __HAVE_ARCH_GATE_AREA 2304extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2305extern int in_gate_area_no_mm(unsigned long addr); 2306extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2307#else 2308static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2309{ 2310 return NULL; 2311} 2312static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2313static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2314{ 2315 return 0; 2316} 2317#endif /* __HAVE_ARCH_GATE_AREA */ 2318 2319#ifdef CONFIG_SYSCTL 2320extern int sysctl_drop_caches; 2321int drop_caches_sysctl_handler(struct ctl_table *, int, 2322 void __user *, size_t *, loff_t *); 2323#endif 2324 2325void drop_slab(void); 2326void drop_slab_node(int nid); 2327 2328#ifndef CONFIG_MMU 2329#define randomize_va_space 0 2330#else 2331extern int randomize_va_space; 2332#endif 2333 2334const char * arch_vma_name(struct vm_area_struct *vma); 2335void print_vma_addr(char *prefix, unsigned long rip); 2336 2337void sparse_mem_maps_populate_node(struct page **map_map, 2338 unsigned long pnum_begin, 2339 unsigned long pnum_end, 2340 unsigned long map_count, 2341 int nodeid); 2342 2343struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2344pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2345pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 2346pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2347pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2348void *vmemmap_alloc_block(unsigned long size, int node); 2349struct vmem_altmap; 2350void *__vmemmap_alloc_block_buf(unsigned long size, int node, 2351 struct vmem_altmap *altmap); 2352static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) 2353{ 2354 return __vmemmap_alloc_block_buf(size, node, NULL); 2355} 2356 2357void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2358int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2359 int node); 2360int vmemmap_populate(unsigned long start, unsigned long end, int node); 2361void vmemmap_populate_print_last(void); 2362#ifdef CONFIG_MEMORY_HOTPLUG 2363void vmemmap_free(unsigned long start, unsigned long end); 2364#endif 2365void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2366 unsigned long size); 2367 2368enum mf_flags { 2369 MF_COUNT_INCREASED = 1 << 0, 2370 MF_ACTION_REQUIRED = 1 << 1, 2371 MF_MUST_KILL = 1 << 2, 2372 MF_SOFT_OFFLINE = 1 << 3, 2373}; 2374extern int memory_failure(unsigned long pfn, int trapno, int flags); 2375extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2376extern int unpoison_memory(unsigned long pfn); 2377extern int get_hwpoison_page(struct page *page); 2378#define put_hwpoison_page(page) put_page(page) 2379extern int sysctl_memory_failure_early_kill; 2380extern int sysctl_memory_failure_recovery; 2381extern void shake_page(struct page *p, int access); 2382extern atomic_long_t num_poisoned_pages; 2383extern int soft_offline_page(struct page *page, int flags); 2384 2385 2386/* 2387 * Error handlers for various types of pages. 2388 */ 2389enum mf_result { 2390 MF_IGNORED, /* Error: cannot be handled */ 2391 MF_FAILED, /* Error: handling failed */ 2392 MF_DELAYED, /* Will be handled later */ 2393 MF_RECOVERED, /* Successfully recovered */ 2394}; 2395 2396enum mf_action_page_type { 2397 MF_MSG_KERNEL, 2398 MF_MSG_KERNEL_HIGH_ORDER, 2399 MF_MSG_SLAB, 2400 MF_MSG_DIFFERENT_COMPOUND, 2401 MF_MSG_POISONED_HUGE, 2402 MF_MSG_HUGE, 2403 MF_MSG_FREE_HUGE, 2404 MF_MSG_UNMAP_FAILED, 2405 MF_MSG_DIRTY_SWAPCACHE, 2406 MF_MSG_CLEAN_SWAPCACHE, 2407 MF_MSG_DIRTY_MLOCKED_LRU, 2408 MF_MSG_CLEAN_MLOCKED_LRU, 2409 MF_MSG_DIRTY_UNEVICTABLE_LRU, 2410 MF_MSG_CLEAN_UNEVICTABLE_LRU, 2411 MF_MSG_DIRTY_LRU, 2412 MF_MSG_CLEAN_LRU, 2413 MF_MSG_TRUNCATED_LRU, 2414 MF_MSG_BUDDY, 2415 MF_MSG_BUDDY_2ND, 2416 MF_MSG_UNKNOWN, 2417}; 2418 2419#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2420extern void clear_huge_page(struct page *page, 2421 unsigned long addr, 2422 unsigned int pages_per_huge_page); 2423extern void copy_user_huge_page(struct page *dst, struct page *src, 2424 unsigned long addr, struct vm_area_struct *vma, 2425 unsigned int pages_per_huge_page); 2426#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2427 2428extern struct page_ext_operations debug_guardpage_ops; 2429extern struct page_ext_operations page_poisoning_ops; 2430 2431#ifdef CONFIG_DEBUG_PAGEALLOC 2432extern unsigned int _debug_guardpage_minorder; 2433extern bool _debug_guardpage_enabled; 2434 2435static inline unsigned int debug_guardpage_minorder(void) 2436{ 2437 return _debug_guardpage_minorder; 2438} 2439 2440static inline bool debug_guardpage_enabled(void) 2441{ 2442 return _debug_guardpage_enabled; 2443} 2444 2445static inline bool page_is_guard(struct page *page) 2446{ 2447 struct page_ext *page_ext; 2448 2449 if (!debug_guardpage_enabled()) 2450 return false; 2451 2452 page_ext = lookup_page_ext(page); 2453 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 2454} 2455#else 2456static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2457static inline bool debug_guardpage_enabled(void) { return false; } 2458static inline bool page_is_guard(struct page *page) { return false; } 2459#endif /* CONFIG_DEBUG_PAGEALLOC */ 2460 2461#if MAX_NUMNODES > 1 2462void __init setup_nr_node_ids(void); 2463#else 2464static inline void setup_nr_node_ids(void) {} 2465#endif 2466 2467#endif /* __KERNEL__ */ 2468#endif /* _LINUX_MM_H */