at v3.15-rc2 2102 lines 68 kB view raw
1#ifndef _LINUX_MM_H 2#define _LINUX_MM_H 3 4#include <linux/errno.h> 5 6#ifdef __KERNEL__ 7 8#include <linux/mmdebug.h> 9#include <linux/gfp.h> 10#include <linux/bug.h> 11#include <linux/list.h> 12#include <linux/mmzone.h> 13#include <linux/rbtree.h> 14#include <linux/atomic.h> 15#include <linux/debug_locks.h> 16#include <linux/mm_types.h> 17#include <linux/range.h> 18#include <linux/pfn.h> 19#include <linux/bit_spinlock.h> 20#include <linux/shrinker.h> 21 22struct mempolicy; 23struct anon_vma; 24struct anon_vma_chain; 25struct file_ra_state; 26struct user_struct; 27struct writeback_control; 28 29#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */ 30extern unsigned long max_mapnr; 31 32static inline void set_max_mapnr(unsigned long limit) 33{ 34 max_mapnr = limit; 35} 36#else 37static inline void set_max_mapnr(unsigned long limit) { } 38#endif 39 40extern unsigned long totalram_pages; 41extern void * high_memory; 42extern int page_cluster; 43 44#ifdef CONFIG_SYSCTL 45extern int sysctl_legacy_va_layout; 46#else 47#define sysctl_legacy_va_layout 0 48#endif 49 50#include <asm/page.h> 51#include <asm/pgtable.h> 52#include <asm/processor.h> 53 54#ifndef __pa_symbol 55#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 56#endif 57 58extern unsigned long sysctl_user_reserve_kbytes; 59extern unsigned long sysctl_admin_reserve_kbytes; 60 61extern int sysctl_overcommit_memory; 62extern int sysctl_overcommit_ratio; 63extern unsigned long sysctl_overcommit_kbytes; 64 65extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, 66 size_t *, loff_t *); 67extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, 68 size_t *, loff_t *); 69 70#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 71 72/* to align the pointer to the (next) page boundary */ 73#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) 74 75/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ 76#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE) 77 78/* 79 * Linux kernel virtual memory manager primitives. 80 * The idea being to have a "virtual" mm in the same way 81 * we have a virtual fs - giving a cleaner interface to the 82 * mm details, and allowing different kinds of memory mappings 83 * (from shared memory to executable loading to arbitrary 84 * mmap() functions). 85 */ 86 87extern struct kmem_cache *vm_area_cachep; 88 89#ifndef CONFIG_MMU 90extern struct rb_root nommu_region_tree; 91extern struct rw_semaphore nommu_region_sem; 92 93extern unsigned int kobjsize(const void *objp); 94#endif 95 96/* 97 * vm_flags in vm_area_struct, see mm_types.h. 98 */ 99#define VM_NONE 0x00000000 100 101#define VM_READ 0x00000001 /* currently active flags */ 102#define VM_WRITE 0x00000002 103#define VM_EXEC 0x00000004 104#define VM_SHARED 0x00000008 105 106/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ 107#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ 108#define VM_MAYWRITE 0x00000020 109#define VM_MAYEXEC 0x00000040 110#define VM_MAYSHARE 0x00000080 111 112#define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 113#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 114#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 115 116#define VM_LOCKED 0x00002000 117#define VM_IO 0x00004000 /* Memory mapped I/O or similar */ 118 119 /* Used by sys_madvise() */ 120#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */ 121#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */ 122 123#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ 124#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ 125#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ 126#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ 127#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 128#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 129#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 130#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 131 132#ifdef CONFIG_MEM_SOFT_DIRTY 133# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */ 134#else 135# define VM_SOFTDIRTY 0 136#endif 137 138#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 139#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */ 140#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */ 141#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ 142 143#if defined(CONFIG_X86) 144# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 145#elif defined(CONFIG_PPC) 146# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 147#elif defined(CONFIG_PARISC) 148# define VM_GROWSUP VM_ARCH_1 149#elif defined(CONFIG_METAG) 150# define VM_GROWSUP VM_ARCH_1 151#elif defined(CONFIG_IA64) 152# define VM_GROWSUP VM_ARCH_1 153#elif !defined(CONFIG_MMU) 154# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 155#endif 156 157#ifndef VM_GROWSUP 158# define VM_GROWSUP VM_NONE 159#endif 160 161/* Bits set in the VMA until the stack is in its final location */ 162#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ) 163 164#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 165#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 166#endif 167 168#ifdef CONFIG_STACK_GROWSUP 169#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 170#else 171#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) 172#endif 173 174/* 175 * Special vmas that are non-mergable, non-mlock()able. 176 * Note: mm/huge_memory.c VM_NO_THP depends on this definition. 177 */ 178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) 179 180/* This mask defines which mm->def_flags a process can inherit its parent */ 181#define VM_INIT_DEF_MASK VM_NOHUGEPAGE 182 183/* 184 * mapping from the currently active vm_flags protection bits (the 185 * low four bits) to a page protection mask.. 186 */ 187extern pgprot_t protection_map[16]; 188 189#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 190#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 191#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ 192#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ 193#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */ 194#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */ 195#define FAULT_FLAG_TRIED 0x40 /* second try */ 196#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */ 197 198/* 199 * vm_fault is filled by the the pagefault handler and passed to the vma's 200 * ->fault function. The vma's ->fault is responsible for returning a bitmask 201 * of VM_FAULT_xxx flags that give details about how the fault was handled. 202 * 203 * pgoff should be used in favour of virtual_address, if possible. If pgoff 204 * is used, one may implement ->remap_pages to get nonlinear mapping support. 205 */ 206struct vm_fault { 207 unsigned int flags; /* FAULT_FLAG_xxx flags */ 208 pgoff_t pgoff; /* Logical page offset based on vma */ 209 void __user *virtual_address; /* Faulting virtual address */ 210 211 struct page *page; /* ->fault handlers should return a 212 * page here, unless VM_FAULT_NOPAGE 213 * is set (which is also implied by 214 * VM_FAULT_ERROR). 215 */ 216 /* for ->map_pages() only */ 217 pgoff_t max_pgoff; /* map pages for offset from pgoff till 218 * max_pgoff inclusive */ 219 pte_t *pte; /* pte entry associated with ->pgoff */ 220}; 221 222/* 223 * These are the virtual MM functions - opening of an area, closing and 224 * unmapping it (needed to keep files on disk up-to-date etc), pointer 225 * to the functions called when a no-page or a wp-page exception occurs. 226 */ 227struct vm_operations_struct { 228 void (*open)(struct vm_area_struct * area); 229 void (*close)(struct vm_area_struct * area); 230 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); 231 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); 232 233 /* notification that a previously read-only page is about to become 234 * writable, if an error is returned it will cause a SIGBUS */ 235 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); 236 237 /* called by access_process_vm when get_user_pages() fails, typically 238 * for use by special VMAs that can switch between memory and hardware 239 */ 240 int (*access)(struct vm_area_struct *vma, unsigned long addr, 241 void *buf, int len, int write); 242#ifdef CONFIG_NUMA 243 /* 244 * set_policy() op must add a reference to any non-NULL @new mempolicy 245 * to hold the policy upon return. Caller should pass NULL @new to 246 * remove a policy and fall back to surrounding context--i.e. do not 247 * install a MPOL_DEFAULT policy, nor the task or system default 248 * mempolicy. 249 */ 250 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 251 252 /* 253 * get_policy() op must add reference [mpol_get()] to any policy at 254 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure 255 * in mm/mempolicy.c will do this automatically. 256 * get_policy() must NOT add a ref if the policy at (vma,addr) is not 257 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. 258 * If no [shared/vma] mempolicy exists at the addr, get_policy() op 259 * must return NULL--i.e., do not "fallback" to task or system default 260 * policy. 261 */ 262 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 263 unsigned long addr); 264 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from, 265 const nodemask_t *to, unsigned long flags); 266#endif 267 /* called by sys_remap_file_pages() to populate non-linear mapping */ 268 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 269 unsigned long size, pgoff_t pgoff); 270}; 271 272struct mmu_gather; 273struct inode; 274 275#define page_private(page) ((page)->private) 276#define set_page_private(page, v) ((page)->private = (v)) 277 278/* It's valid only if the page is free path or free_list */ 279static inline void set_freepage_migratetype(struct page *page, int migratetype) 280{ 281 page->index = migratetype; 282} 283 284/* It's valid only if the page is free path or free_list */ 285static inline int get_freepage_migratetype(struct page *page) 286{ 287 return page->index; 288} 289 290/* 291 * FIXME: take this include out, include page-flags.h in 292 * files which need it (119 of them) 293 */ 294#include <linux/page-flags.h> 295#include <linux/huge_mm.h> 296 297/* 298 * Methods to modify the page usage count. 299 * 300 * What counts for a page usage: 301 * - cache mapping (page->mapping) 302 * - private data (page->private) 303 * - page mapped in a task's page tables, each mapping 304 * is counted separately 305 * 306 * Also, many kernel routines increase the page count before a critical 307 * routine so they can be sure the page doesn't go away from under them. 308 */ 309 310/* 311 * Drop a ref, return true if the refcount fell to zero (the page has no users) 312 */ 313static inline int put_page_testzero(struct page *page) 314{ 315 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); 316 return atomic_dec_and_test(&page->_count); 317} 318 319/* 320 * Try to grab a ref unless the page has a refcount of zero, return false if 321 * that is the case. 322 * This can be called when MMU is off so it must not access 323 * any of the virtual mappings. 324 */ 325static inline int get_page_unless_zero(struct page *page) 326{ 327 return atomic_inc_not_zero(&page->_count); 328} 329 330/* 331 * Try to drop a ref unless the page has a refcount of one, return false if 332 * that is the case. 333 * This is to make sure that the refcount won't become zero after this drop. 334 * This can be called when MMU is off so it must not access 335 * any of the virtual mappings. 336 */ 337static inline int put_page_unless_one(struct page *page) 338{ 339 return atomic_add_unless(&page->_count, -1, 1); 340} 341 342extern int page_is_ram(unsigned long pfn); 343 344/* Support for virtually mapped pages */ 345struct page *vmalloc_to_page(const void *addr); 346unsigned long vmalloc_to_pfn(const void *addr); 347 348/* 349 * Determine if an address is within the vmalloc range 350 * 351 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there 352 * is no special casing required. 353 */ 354static inline int is_vmalloc_addr(const void *x) 355{ 356#ifdef CONFIG_MMU 357 unsigned long addr = (unsigned long)x; 358 359 return addr >= VMALLOC_START && addr < VMALLOC_END; 360#else 361 return 0; 362#endif 363} 364#ifdef CONFIG_MMU 365extern int is_vmalloc_or_module_addr(const void *x); 366#else 367static inline int is_vmalloc_or_module_addr(const void *x) 368{ 369 return 0; 370} 371#endif 372 373static inline void compound_lock(struct page *page) 374{ 375#ifdef CONFIG_TRANSPARENT_HUGEPAGE 376 VM_BUG_ON_PAGE(PageSlab(page), page); 377 bit_spin_lock(PG_compound_lock, &page->flags); 378#endif 379} 380 381static inline void compound_unlock(struct page *page) 382{ 383#ifdef CONFIG_TRANSPARENT_HUGEPAGE 384 VM_BUG_ON_PAGE(PageSlab(page), page); 385 bit_spin_unlock(PG_compound_lock, &page->flags); 386#endif 387} 388 389static inline unsigned long compound_lock_irqsave(struct page *page) 390{ 391 unsigned long uninitialized_var(flags); 392#ifdef CONFIG_TRANSPARENT_HUGEPAGE 393 local_irq_save(flags); 394 compound_lock(page); 395#endif 396 return flags; 397} 398 399static inline void compound_unlock_irqrestore(struct page *page, 400 unsigned long flags) 401{ 402#ifdef CONFIG_TRANSPARENT_HUGEPAGE 403 compound_unlock(page); 404 local_irq_restore(flags); 405#endif 406} 407 408static inline struct page *compound_head(struct page *page) 409{ 410 if (unlikely(PageTail(page))) { 411 struct page *head = page->first_page; 412 413 /* 414 * page->first_page may be a dangling pointer to an old 415 * compound page, so recheck that it is still a tail 416 * page before returning. 417 */ 418 smp_rmb(); 419 if (likely(PageTail(page))) 420 return head; 421 } 422 return page; 423} 424 425/* 426 * The atomic page->_mapcount, starts from -1: so that transitions 427 * both from it and to it can be tracked, using atomic_inc_and_test 428 * and atomic_add_negative(-1). 429 */ 430static inline void page_mapcount_reset(struct page *page) 431{ 432 atomic_set(&(page)->_mapcount, -1); 433} 434 435static inline int page_mapcount(struct page *page) 436{ 437 return atomic_read(&(page)->_mapcount) + 1; 438} 439 440static inline int page_count(struct page *page) 441{ 442 return atomic_read(&compound_head(page)->_count); 443} 444 445#ifdef CONFIG_HUGETLB_PAGE 446extern int PageHeadHuge(struct page *page_head); 447#else /* CONFIG_HUGETLB_PAGE */ 448static inline int PageHeadHuge(struct page *page_head) 449{ 450 return 0; 451} 452#endif /* CONFIG_HUGETLB_PAGE */ 453 454static inline bool __compound_tail_refcounted(struct page *page) 455{ 456 return !PageSlab(page) && !PageHeadHuge(page); 457} 458 459/* 460 * This takes a head page as parameter and tells if the 461 * tail page reference counting can be skipped. 462 * 463 * For this to be safe, PageSlab and PageHeadHuge must remain true on 464 * any given page where they return true here, until all tail pins 465 * have been released. 466 */ 467static inline bool compound_tail_refcounted(struct page *page) 468{ 469 VM_BUG_ON_PAGE(!PageHead(page), page); 470 return __compound_tail_refcounted(page); 471} 472 473static inline void get_huge_page_tail(struct page *page) 474{ 475 /* 476 * __split_huge_page_refcount() cannot run from under us. 477 */ 478 VM_BUG_ON_PAGE(!PageTail(page), page); 479 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); 480 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); 481 if (compound_tail_refcounted(page->first_page)) 482 atomic_inc(&page->_mapcount); 483} 484 485extern bool __get_page_tail(struct page *page); 486 487static inline void get_page(struct page *page) 488{ 489 if (unlikely(PageTail(page))) 490 if (likely(__get_page_tail(page))) 491 return; 492 /* 493 * Getting a normal page or the head of a compound page 494 * requires to already have an elevated page->_count. 495 */ 496 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); 497 atomic_inc(&page->_count); 498} 499 500static inline struct page *virt_to_head_page(const void *x) 501{ 502 struct page *page = virt_to_page(x); 503 return compound_head(page); 504} 505 506/* 507 * Setup the page count before being freed into the page allocator for 508 * the first time (boot or memory hotplug) 509 */ 510static inline void init_page_count(struct page *page) 511{ 512 atomic_set(&page->_count, 1); 513} 514 515/* 516 * PageBuddy() indicate that the page is free and in the buddy system 517 * (see mm/page_alloc.c). 518 * 519 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to 520 * -2 so that an underflow of the page_mapcount() won't be mistaken 521 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very 522 * efficiently by most CPU architectures. 523 */ 524#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) 525 526static inline int PageBuddy(struct page *page) 527{ 528 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; 529} 530 531static inline void __SetPageBuddy(struct page *page) 532{ 533 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); 534 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 535} 536 537static inline void __ClearPageBuddy(struct page *page) 538{ 539 VM_BUG_ON_PAGE(!PageBuddy(page), page); 540 atomic_set(&page->_mapcount, -1); 541} 542 543void put_page(struct page *page); 544void put_pages_list(struct list_head *pages); 545 546void split_page(struct page *page, unsigned int order); 547int split_free_page(struct page *page); 548 549/* 550 * Compound pages have a destructor function. Provide a 551 * prototype for that function and accessor functions. 552 * These are _only_ valid on the head of a PG_compound page. 553 */ 554typedef void compound_page_dtor(struct page *); 555 556static inline void set_compound_page_dtor(struct page *page, 557 compound_page_dtor *dtor) 558{ 559 page[1].lru.next = (void *)dtor; 560} 561 562static inline compound_page_dtor *get_compound_page_dtor(struct page *page) 563{ 564 return (compound_page_dtor *)page[1].lru.next; 565} 566 567static inline int compound_order(struct page *page) 568{ 569 if (!PageHead(page)) 570 return 0; 571 return (unsigned long)page[1].lru.prev; 572} 573 574static inline void set_compound_order(struct page *page, unsigned long order) 575{ 576 page[1].lru.prev = (void *)order; 577} 578 579#ifdef CONFIG_MMU 580/* 581 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 582 * servicing faults for write access. In the normal case, do always want 583 * pte_mkwrite. But get_user_pages can cause write faults for mappings 584 * that do not have writing enabled, when used by access_process_vm. 585 */ 586static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 587{ 588 if (likely(vma->vm_flags & VM_WRITE)) 589 pte = pte_mkwrite(pte); 590 return pte; 591} 592 593void do_set_pte(struct vm_area_struct *vma, unsigned long address, 594 struct page *page, pte_t *pte, bool write, bool anon); 595#endif 596 597/* 598 * Multiple processes may "see" the same page. E.g. for untouched 599 * mappings of /dev/null, all processes see the same page full of 600 * zeroes, and text pages of executables and shared libraries have 601 * only one copy in memory, at most, normally. 602 * 603 * For the non-reserved pages, page_count(page) denotes a reference count. 604 * page_count() == 0 means the page is free. page->lru is then used for 605 * freelist management in the buddy allocator. 606 * page_count() > 0 means the page has been allocated. 607 * 608 * Pages are allocated by the slab allocator in order to provide memory 609 * to kmalloc and kmem_cache_alloc. In this case, the management of the 610 * page, and the fields in 'struct page' are the responsibility of mm/slab.c 611 * unless a particular usage is carefully commented. (the responsibility of 612 * freeing the kmalloc memory is the caller's, of course). 613 * 614 * A page may be used by anyone else who does a __get_free_page(). 615 * In this case, page_count still tracks the references, and should only 616 * be used through the normal accessor functions. The top bits of page->flags 617 * and page->virtual store page management information, but all other fields 618 * are unused and could be used privately, carefully. The management of this 619 * page is the responsibility of the one who allocated it, and those who have 620 * subsequently been given references to it. 621 * 622 * The other pages (we may call them "pagecache pages") are completely 623 * managed by the Linux memory manager: I/O, buffers, swapping etc. 624 * The following discussion applies only to them. 625 * 626 * A pagecache page contains an opaque `private' member, which belongs to the 627 * page's address_space. Usually, this is the address of a circular list of 628 * the page's disk buffers. PG_private must be set to tell the VM to call 629 * into the filesystem to release these pages. 630 * 631 * A page may belong to an inode's memory mapping. In this case, page->mapping 632 * is the pointer to the inode, and page->index is the file offset of the page, 633 * in units of PAGE_CACHE_SIZE. 634 * 635 * If pagecache pages are not associated with an inode, they are said to be 636 * anonymous pages. These may become associated with the swapcache, and in that 637 * case PG_swapcache is set, and page->private is an offset into the swapcache. 638 * 639 * In either case (swapcache or inode backed), the pagecache itself holds one 640 * reference to the page. Setting PG_private should also increment the 641 * refcount. The each user mapping also has a reference to the page. 642 * 643 * The pagecache pages are stored in a per-mapping radix tree, which is 644 * rooted at mapping->page_tree, and indexed by offset. 645 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space 646 * lists, we instead now tag pages as dirty/writeback in the radix tree. 647 * 648 * All pagecache pages may be subject to I/O: 649 * - inode pages may need to be read from disk, 650 * - inode pages which have been modified and are MAP_SHARED may need 651 * to be written back to the inode on disk, 652 * - anonymous pages (including MAP_PRIVATE file mappings) which have been 653 * modified may need to be swapped out to swap space and (later) to be read 654 * back into memory. 655 */ 656 657/* 658 * The zone field is never updated after free_area_init_core() 659 * sets it, so none of the operations on it need to be atomic. 660 */ 661 662/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ 663#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) 664#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) 665#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) 666#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) 667 668/* 669 * Define the bit shifts to access each section. For non-existent 670 * sections we define the shift as 0; that plus a 0 mask ensures 671 * the compiler will optimise away reference to them. 672 */ 673#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) 674#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) 675#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) 676#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) 677 678/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ 679#ifdef NODE_NOT_IN_PAGE_FLAGS 680#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) 681#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ 682 SECTIONS_PGOFF : ZONES_PGOFF) 683#else 684#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) 685#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ 686 NODES_PGOFF : ZONES_PGOFF) 687#endif 688 689#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) 690 691#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 692#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS 693#endif 694 695#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) 696#define NODES_MASK ((1UL << NODES_WIDTH) - 1) 697#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 698#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) 699#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) 700 701static inline enum zone_type page_zonenum(const struct page *page) 702{ 703 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 704} 705 706#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) 707#define SECTION_IN_PAGE_FLAGS 708#endif 709 710/* 711 * The identification function is mainly used by the buddy allocator for 712 * determining if two pages could be buddies. We are not really identifying 713 * the zone since we could be using the section number id if we do not have 714 * node id available in page flags. 715 * We only guarantee that it will return the same value for two combinable 716 * pages in a zone. 717 */ 718static inline int page_zone_id(struct page *page) 719{ 720 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; 721} 722 723static inline int zone_to_nid(struct zone *zone) 724{ 725#ifdef CONFIG_NUMA 726 return zone->node; 727#else 728 return 0; 729#endif 730} 731 732#ifdef NODE_NOT_IN_PAGE_FLAGS 733extern int page_to_nid(const struct page *page); 734#else 735static inline int page_to_nid(const struct page *page) 736{ 737 return (page->flags >> NODES_PGSHIFT) & NODES_MASK; 738} 739#endif 740 741#ifdef CONFIG_NUMA_BALANCING 742static inline int cpu_pid_to_cpupid(int cpu, int pid) 743{ 744 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); 745} 746 747static inline int cpupid_to_pid(int cpupid) 748{ 749 return cpupid & LAST__PID_MASK; 750} 751 752static inline int cpupid_to_cpu(int cpupid) 753{ 754 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK; 755} 756 757static inline int cpupid_to_nid(int cpupid) 758{ 759 return cpu_to_node(cpupid_to_cpu(cpupid)); 760} 761 762static inline bool cpupid_pid_unset(int cpupid) 763{ 764 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK); 765} 766 767static inline bool cpupid_cpu_unset(int cpupid) 768{ 769 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK); 770} 771 772static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) 773{ 774 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid); 775} 776 777#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) 778#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 779static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 780{ 781 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); 782} 783 784static inline int page_cpupid_last(struct page *page) 785{ 786 return page->_last_cpupid; 787} 788static inline void page_cpupid_reset_last(struct page *page) 789{ 790 page->_last_cpupid = -1 & LAST_CPUPID_MASK; 791} 792#else 793static inline int page_cpupid_last(struct page *page) 794{ 795 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; 796} 797 798extern int page_cpupid_xchg_last(struct page *page, int cpupid); 799 800static inline void page_cpupid_reset_last(struct page *page) 801{ 802 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1; 803 804 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); 805 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; 806} 807#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ 808#else /* !CONFIG_NUMA_BALANCING */ 809static inline int page_cpupid_xchg_last(struct page *page, int cpupid) 810{ 811 return page_to_nid(page); /* XXX */ 812} 813 814static inline int page_cpupid_last(struct page *page) 815{ 816 return page_to_nid(page); /* XXX */ 817} 818 819static inline int cpupid_to_nid(int cpupid) 820{ 821 return -1; 822} 823 824static inline int cpupid_to_pid(int cpupid) 825{ 826 return -1; 827} 828 829static inline int cpupid_to_cpu(int cpupid) 830{ 831 return -1; 832} 833 834static inline int cpu_pid_to_cpupid(int nid, int pid) 835{ 836 return -1; 837} 838 839static inline bool cpupid_pid_unset(int cpupid) 840{ 841 return 1; 842} 843 844static inline void page_cpupid_reset_last(struct page *page) 845{ 846} 847 848static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) 849{ 850 return false; 851} 852#endif /* CONFIG_NUMA_BALANCING */ 853 854static inline struct zone *page_zone(const struct page *page) 855{ 856 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; 857} 858 859#ifdef SECTION_IN_PAGE_FLAGS 860static inline void set_page_section(struct page *page, unsigned long section) 861{ 862 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); 863 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 864} 865 866static inline unsigned long page_to_section(const struct page *page) 867{ 868 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 869} 870#endif 871 872static inline void set_page_zone(struct page *page, enum zone_type zone) 873{ 874 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 875 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 876} 877 878static inline void set_page_node(struct page *page, unsigned long node) 879{ 880 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 881 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; 882} 883 884static inline void set_page_links(struct page *page, enum zone_type zone, 885 unsigned long node, unsigned long pfn) 886{ 887 set_page_zone(page, zone); 888 set_page_node(page, node); 889#ifdef SECTION_IN_PAGE_FLAGS 890 set_page_section(page, pfn_to_section_nr(pfn)); 891#endif 892} 893 894/* 895 * Some inline functions in vmstat.h depend on page_zone() 896 */ 897#include <linux/vmstat.h> 898 899static __always_inline void *lowmem_page_address(const struct page *page) 900{ 901 return __va(PFN_PHYS(page_to_pfn(page))); 902} 903 904#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) 905#define HASHED_PAGE_VIRTUAL 906#endif 907 908#if defined(WANT_PAGE_VIRTUAL) 909static inline void *page_address(const struct page *page) 910{ 911 return page->virtual; 912} 913static inline void set_page_address(struct page *page, void *address) 914{ 915 page->virtual = address; 916} 917#define page_address_init() do { } while(0) 918#endif 919 920#if defined(HASHED_PAGE_VIRTUAL) 921void *page_address(const struct page *page); 922void set_page_address(struct page *page, void *virtual); 923void page_address_init(void); 924#endif 925 926#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL) 927#define page_address(page) lowmem_page_address(page) 928#define set_page_address(page, address) do { } while(0) 929#define page_address_init() do { } while(0) 930#endif 931 932/* 933 * On an anonymous page mapped into a user virtual memory area, 934 * page->mapping points to its anon_vma, not to a struct address_space; 935 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. 936 * 937 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, 938 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; 939 * and then page->mapping points, not to an anon_vma, but to a private 940 * structure which KSM associates with that merged page. See ksm.h. 941 * 942 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. 943 * 944 * Please note that, confusingly, "page_mapping" refers to the inode 945 * address_space which maps the page from disk; whereas "page_mapped" 946 * refers to user virtual address space into which the page is mapped. 947 */ 948#define PAGE_MAPPING_ANON 1 949#define PAGE_MAPPING_KSM 2 950#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) 951 952extern struct address_space *page_mapping(struct page *page); 953 954/* Neutral page->mapping pointer to address_space or anon_vma or other */ 955static inline void *page_rmapping(struct page *page) 956{ 957 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); 958} 959 960extern struct address_space *__page_file_mapping(struct page *); 961 962static inline 963struct address_space *page_file_mapping(struct page *page) 964{ 965 if (unlikely(PageSwapCache(page))) 966 return __page_file_mapping(page); 967 968 return page->mapping; 969} 970 971static inline int PageAnon(struct page *page) 972{ 973 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 974} 975 976/* 977 * Return the pagecache index of the passed page. Regular pagecache pages 978 * use ->index whereas swapcache pages use ->private 979 */ 980static inline pgoff_t page_index(struct page *page) 981{ 982 if (unlikely(PageSwapCache(page))) 983 return page_private(page); 984 return page->index; 985} 986 987extern pgoff_t __page_file_index(struct page *page); 988 989/* 990 * Return the file index of the page. Regular pagecache pages use ->index 991 * whereas swapcache pages use swp_offset(->private) 992 */ 993static inline pgoff_t page_file_index(struct page *page) 994{ 995 if (unlikely(PageSwapCache(page))) 996 return __page_file_index(page); 997 998 return page->index; 999} 1000 1001/* 1002 * Return true if this page is mapped into pagetables. 1003 */ 1004static inline int page_mapped(struct page *page) 1005{ 1006 return atomic_read(&(page)->_mapcount) >= 0; 1007} 1008 1009/* 1010 * Different kinds of faults, as returned by handle_mm_fault(). 1011 * Used to decide whether a process gets delivered SIGBUS or 1012 * just gets major/minor fault counters bumped up. 1013 */ 1014 1015#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */ 1016 1017#define VM_FAULT_OOM 0x0001 1018#define VM_FAULT_SIGBUS 0x0002 1019#define VM_FAULT_MAJOR 0x0004 1020#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ 1021#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ 1022#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ 1023 1024#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 1025#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 1026#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 1027#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ 1028 1029#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 1030 1031#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 1032 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) 1033 1034/* Encode hstate index for a hwpoisoned large page */ 1035#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 1036#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) 1037 1038/* 1039 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. 1040 */ 1041extern void pagefault_out_of_memory(void); 1042 1043#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) 1044 1045/* 1046 * Flags passed to show_mem() and show_free_areas() to suppress output in 1047 * various contexts. 1048 */ 1049#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ 1050 1051extern void show_free_areas(unsigned int flags); 1052extern bool skip_free_areas_node(unsigned int flags, int nid); 1053 1054int shmem_zero_setup(struct vm_area_struct *); 1055#ifdef CONFIG_SHMEM 1056bool shmem_mapping(struct address_space *mapping); 1057#else 1058static inline bool shmem_mapping(struct address_space *mapping) 1059{ 1060 return false; 1061} 1062#endif 1063 1064extern int can_do_mlock(void); 1065extern int user_shm_lock(size_t, struct user_struct *); 1066extern void user_shm_unlock(size_t, struct user_struct *); 1067 1068/* 1069 * Parameter block passed down to zap_pte_range in exceptional cases. 1070 */ 1071struct zap_details { 1072 struct vm_area_struct *nonlinear_vma; /* Check page->index if set */ 1073 struct address_space *check_mapping; /* Check page->mapping if set */ 1074 pgoff_t first_index; /* Lowest page->index to unmap */ 1075 pgoff_t last_index; /* Highest page->index to unmap */ 1076}; 1077 1078struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1079 pte_t pte); 1080 1081int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1082 unsigned long size); 1083void zap_page_range(struct vm_area_struct *vma, unsigned long address, 1084 unsigned long size, struct zap_details *); 1085void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 1086 unsigned long start, unsigned long end); 1087 1088/** 1089 * mm_walk - callbacks for walk_page_range 1090 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry 1091 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry 1092 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 1093 * this handler is required to be able to handle 1094 * pmd_trans_huge() pmds. They may simply choose to 1095 * split_huge_page() instead of handling it explicitly. 1096 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 1097 * @pte_hole: if set, called for each hole at all levels 1098 * @hugetlb_entry: if set, called for each hugetlb entry 1099 * *Caution*: The caller must hold mmap_sem() if @hugetlb_entry 1100 * is used. 1101 * 1102 * (see walk_page_range for more details) 1103 */ 1104struct mm_walk { 1105 int (*pgd_entry)(pgd_t *pgd, unsigned long addr, 1106 unsigned long next, struct mm_walk *walk); 1107 int (*pud_entry)(pud_t *pud, unsigned long addr, 1108 unsigned long next, struct mm_walk *walk); 1109 int (*pmd_entry)(pmd_t *pmd, unsigned long addr, 1110 unsigned long next, struct mm_walk *walk); 1111 int (*pte_entry)(pte_t *pte, unsigned long addr, 1112 unsigned long next, struct mm_walk *walk); 1113 int (*pte_hole)(unsigned long addr, unsigned long next, 1114 struct mm_walk *walk); 1115 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask, 1116 unsigned long addr, unsigned long next, 1117 struct mm_walk *walk); 1118 struct mm_struct *mm; 1119 void *private; 1120}; 1121 1122int walk_page_range(unsigned long addr, unsigned long end, 1123 struct mm_walk *walk); 1124void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 1125 unsigned long end, unsigned long floor, unsigned long ceiling); 1126int copy_page_range(struct mm_struct *dst, struct mm_struct *src, 1127 struct vm_area_struct *vma); 1128void unmap_mapping_range(struct address_space *mapping, 1129 loff_t const holebegin, loff_t const holelen, int even_cows); 1130int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1131 unsigned long *pfn); 1132int follow_phys(struct vm_area_struct *vma, unsigned long address, 1133 unsigned int flags, unsigned long *prot, resource_size_t *phys); 1134int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 1135 void *buf, int len, int write); 1136 1137static inline void unmap_shared_mapping_range(struct address_space *mapping, 1138 loff_t const holebegin, loff_t const holelen) 1139{ 1140 unmap_mapping_range(mapping, holebegin, holelen, 0); 1141} 1142 1143extern void truncate_pagecache(struct inode *inode, loff_t new); 1144extern void truncate_setsize(struct inode *inode, loff_t newsize); 1145void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1146int truncate_inode_page(struct address_space *mapping, struct page *page); 1147int generic_error_remove_page(struct address_space *mapping, struct page *page); 1148int invalidate_inode_page(struct page *page); 1149 1150#ifdef CONFIG_MMU 1151extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 1152 unsigned long address, unsigned int flags); 1153extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1154 unsigned long address, unsigned int fault_flags); 1155#else 1156static inline int handle_mm_fault(struct mm_struct *mm, 1157 struct vm_area_struct *vma, unsigned long address, 1158 unsigned int flags) 1159{ 1160 /* should never happen if there's no MMU */ 1161 BUG(); 1162 return VM_FAULT_SIGBUS; 1163} 1164static inline int fixup_user_fault(struct task_struct *tsk, 1165 struct mm_struct *mm, unsigned long address, 1166 unsigned int fault_flags) 1167{ 1168 /* should never happen if there's no MMU */ 1169 BUG(); 1170 return -EFAULT; 1171} 1172#endif 1173 1174extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 1175extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1176 void *buf, int len, int write); 1177 1178long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1179 unsigned long start, unsigned long nr_pages, 1180 unsigned int foll_flags, struct page **pages, 1181 struct vm_area_struct **vmas, int *nonblocking); 1182long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1183 unsigned long start, unsigned long nr_pages, 1184 int write, int force, struct page **pages, 1185 struct vm_area_struct **vmas); 1186int get_user_pages_fast(unsigned long start, int nr_pages, int write, 1187 struct page **pages); 1188struct kvec; 1189int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, 1190 struct page **pages); 1191int get_kernel_page(unsigned long start, int write, struct page **pages); 1192struct page *get_dump_page(unsigned long addr); 1193 1194extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1195extern void do_invalidatepage(struct page *page, unsigned int offset, 1196 unsigned int length); 1197 1198int __set_page_dirty_nobuffers(struct page *page); 1199int __set_page_dirty_no_writeback(struct page *page); 1200int redirty_page_for_writepage(struct writeback_control *wbc, 1201 struct page *page); 1202void account_page_dirtied(struct page *page, struct address_space *mapping); 1203void account_page_writeback(struct page *page); 1204int set_page_dirty(struct page *page); 1205int set_page_dirty_lock(struct page *page); 1206int clear_page_dirty_for_io(struct page *page); 1207int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1208 1209/* Is the vma a continuation of the stack vma above it? */ 1210static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1211{ 1212 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1213} 1214 1215static inline int stack_guard_page_start(struct vm_area_struct *vma, 1216 unsigned long addr) 1217{ 1218 return (vma->vm_flags & VM_GROWSDOWN) && 1219 (vma->vm_start == addr) && 1220 !vma_growsdown(vma->vm_prev, addr); 1221} 1222 1223/* Is the vma a continuation of the stack vma below it? */ 1224static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1225{ 1226 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1227} 1228 1229static inline int stack_guard_page_end(struct vm_area_struct *vma, 1230 unsigned long addr) 1231{ 1232 return (vma->vm_flags & VM_GROWSUP) && 1233 (vma->vm_end == addr) && 1234 !vma_growsup(vma->vm_next, addr); 1235} 1236 1237extern pid_t 1238vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); 1239 1240extern unsigned long move_page_tables(struct vm_area_struct *vma, 1241 unsigned long old_addr, struct vm_area_struct *new_vma, 1242 unsigned long new_addr, unsigned long len, 1243 bool need_rmap_locks); 1244extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, 1245 unsigned long end, pgprot_t newprot, 1246 int dirty_accountable, int prot_numa); 1247extern int mprotect_fixup(struct vm_area_struct *vma, 1248 struct vm_area_struct **pprev, unsigned long start, 1249 unsigned long end, unsigned long newflags); 1250 1251/* 1252 * doesn't attempt to fault and will return short. 1253 */ 1254int __get_user_pages_fast(unsigned long start, int nr_pages, int write, 1255 struct page **pages); 1256/* 1257 * per-process(per-mm_struct) statistics. 1258 */ 1259static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) 1260{ 1261 long val = atomic_long_read(&mm->rss_stat.count[member]); 1262 1263#ifdef SPLIT_RSS_COUNTING 1264 /* 1265 * counter is updated in asynchronous manner and may go to minus. 1266 * But it's never be expected number for users. 1267 */ 1268 if (val < 0) 1269 val = 0; 1270#endif 1271 return (unsigned long)val; 1272} 1273 1274static inline void add_mm_counter(struct mm_struct *mm, int member, long value) 1275{ 1276 atomic_long_add(value, &mm->rss_stat.count[member]); 1277} 1278 1279static inline void inc_mm_counter(struct mm_struct *mm, int member) 1280{ 1281 atomic_long_inc(&mm->rss_stat.count[member]); 1282} 1283 1284static inline void dec_mm_counter(struct mm_struct *mm, int member) 1285{ 1286 atomic_long_dec(&mm->rss_stat.count[member]); 1287} 1288 1289static inline unsigned long get_mm_rss(struct mm_struct *mm) 1290{ 1291 return get_mm_counter(mm, MM_FILEPAGES) + 1292 get_mm_counter(mm, MM_ANONPAGES); 1293} 1294 1295static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) 1296{ 1297 return max(mm->hiwater_rss, get_mm_rss(mm)); 1298} 1299 1300static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) 1301{ 1302 return max(mm->hiwater_vm, mm->total_vm); 1303} 1304 1305static inline void update_hiwater_rss(struct mm_struct *mm) 1306{ 1307 unsigned long _rss = get_mm_rss(mm); 1308 1309 if ((mm)->hiwater_rss < _rss) 1310 (mm)->hiwater_rss = _rss; 1311} 1312 1313static inline void update_hiwater_vm(struct mm_struct *mm) 1314{ 1315 if (mm->hiwater_vm < mm->total_vm) 1316 mm->hiwater_vm = mm->total_vm; 1317} 1318 1319static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, 1320 struct mm_struct *mm) 1321{ 1322 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); 1323 1324 if (*maxrss < hiwater_rss) 1325 *maxrss = hiwater_rss; 1326} 1327 1328#if defined(SPLIT_RSS_COUNTING) 1329void sync_mm_rss(struct mm_struct *mm); 1330#else 1331static inline void sync_mm_rss(struct mm_struct *mm) 1332{ 1333} 1334#endif 1335 1336int vma_wants_writenotify(struct vm_area_struct *vma); 1337 1338extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1339 spinlock_t **ptl); 1340static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1341 spinlock_t **ptl) 1342{ 1343 pte_t *ptep; 1344 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); 1345 return ptep; 1346} 1347 1348#ifdef __PAGETABLE_PUD_FOLDED 1349static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, 1350 unsigned long address) 1351{ 1352 return 0; 1353} 1354#else 1355int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); 1356#endif 1357 1358#ifdef __PAGETABLE_PMD_FOLDED 1359static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, 1360 unsigned long address) 1361{ 1362 return 0; 1363} 1364#else 1365int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); 1366#endif 1367 1368int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 1369 pmd_t *pmd, unsigned long address); 1370int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1371 1372/* 1373 * The following ifdef needed to get the 4level-fixup.h header to work. 1374 * Remove it when 4level-fixup.h has been removed. 1375 */ 1376#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) 1377static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 1378{ 1379 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? 1380 NULL: pud_offset(pgd, address); 1381} 1382 1383static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 1384{ 1385 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? 1386 NULL: pmd_offset(pud, address); 1387} 1388#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 1389 1390#if USE_SPLIT_PTE_PTLOCKS 1391#if ALLOC_SPLIT_PTLOCKS 1392void __init ptlock_cache_init(void); 1393extern bool ptlock_alloc(struct page *page); 1394extern void ptlock_free(struct page *page); 1395 1396static inline spinlock_t *ptlock_ptr(struct page *page) 1397{ 1398 return page->ptl; 1399} 1400#else /* ALLOC_SPLIT_PTLOCKS */ 1401static inline void ptlock_cache_init(void) 1402{ 1403} 1404 1405static inline bool ptlock_alloc(struct page *page) 1406{ 1407 return true; 1408} 1409 1410static inline void ptlock_free(struct page *page) 1411{ 1412} 1413 1414static inline spinlock_t *ptlock_ptr(struct page *page) 1415{ 1416 return &page->ptl; 1417} 1418#endif /* ALLOC_SPLIT_PTLOCKS */ 1419 1420static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1421{ 1422 return ptlock_ptr(pmd_page(*pmd)); 1423} 1424 1425static inline bool ptlock_init(struct page *page) 1426{ 1427 /* 1428 * prep_new_page() initialize page->private (and therefore page->ptl) 1429 * with 0. Make sure nobody took it in use in between. 1430 * 1431 * It can happen if arch try to use slab for page table allocation: 1432 * slab code uses page->slab_cache and page->first_page (for tail 1433 * pages), which share storage with page->ptl. 1434 */ 1435 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); 1436 if (!ptlock_alloc(page)) 1437 return false; 1438 spin_lock_init(ptlock_ptr(page)); 1439 return true; 1440} 1441 1442/* Reset page->mapping so free_pages_check won't complain. */ 1443static inline void pte_lock_deinit(struct page *page) 1444{ 1445 page->mapping = NULL; 1446 ptlock_free(page); 1447} 1448 1449#else /* !USE_SPLIT_PTE_PTLOCKS */ 1450/* 1451 * We use mm->page_table_lock to guard all pagetable pages of the mm. 1452 */ 1453static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) 1454{ 1455 return &mm->page_table_lock; 1456} 1457static inline void ptlock_cache_init(void) {} 1458static inline bool ptlock_init(struct page *page) { return true; } 1459static inline void pte_lock_deinit(struct page *page) {} 1460#endif /* USE_SPLIT_PTE_PTLOCKS */ 1461 1462static inline void pgtable_init(void) 1463{ 1464 ptlock_cache_init(); 1465 pgtable_cache_init(); 1466} 1467 1468static inline bool pgtable_page_ctor(struct page *page) 1469{ 1470 inc_zone_page_state(page, NR_PAGETABLE); 1471 return ptlock_init(page); 1472} 1473 1474static inline void pgtable_page_dtor(struct page *page) 1475{ 1476 pte_lock_deinit(page); 1477 dec_zone_page_state(page, NR_PAGETABLE); 1478} 1479 1480#define pte_offset_map_lock(mm, pmd, address, ptlp) \ 1481({ \ 1482 spinlock_t *__ptl = pte_lockptr(mm, pmd); \ 1483 pte_t *__pte = pte_offset_map(pmd, address); \ 1484 *(ptlp) = __ptl; \ 1485 spin_lock(__ptl); \ 1486 __pte; \ 1487}) 1488 1489#define pte_unmap_unlock(pte, ptl) do { \ 1490 spin_unlock(ptl); \ 1491 pte_unmap(pte); \ 1492} while (0) 1493 1494#define pte_alloc_map(mm, vma, pmd, address) \ 1495 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \ 1496 pmd, address))? \ 1497 NULL: pte_offset_map(pmd, address)) 1498 1499#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 1500 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \ 1501 pmd, address))? \ 1502 NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) 1503 1504#define pte_alloc_kernel(pmd, address) \ 1505 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 1506 NULL: pte_offset_kernel(pmd, address)) 1507 1508#if USE_SPLIT_PMD_PTLOCKS 1509 1510static struct page *pmd_to_page(pmd_t *pmd) 1511{ 1512 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); 1513 return virt_to_page((void *)((unsigned long) pmd & mask)); 1514} 1515 1516static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1517{ 1518 return ptlock_ptr(pmd_to_page(pmd)); 1519} 1520 1521static inline bool pgtable_pmd_page_ctor(struct page *page) 1522{ 1523#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1524 page->pmd_huge_pte = NULL; 1525#endif 1526 return ptlock_init(page); 1527} 1528 1529static inline void pgtable_pmd_page_dtor(struct page *page) 1530{ 1531#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1532 VM_BUG_ON_PAGE(page->pmd_huge_pte, page); 1533#endif 1534 ptlock_free(page); 1535} 1536 1537#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) 1538 1539#else 1540 1541static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) 1542{ 1543 return &mm->page_table_lock; 1544} 1545 1546static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; } 1547static inline void pgtable_pmd_page_dtor(struct page *page) {} 1548 1549#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) 1550 1551#endif 1552 1553static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) 1554{ 1555 spinlock_t *ptl = pmd_lockptr(mm, pmd); 1556 spin_lock(ptl); 1557 return ptl; 1558} 1559 1560extern void free_area_init(unsigned long * zones_size); 1561extern void free_area_init_node(int nid, unsigned long * zones_size, 1562 unsigned long zone_start_pfn, unsigned long *zholes_size); 1563extern void free_initmem(void); 1564 1565/* 1566 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK) 1567 * into the buddy system. The freed pages will be poisoned with pattern 1568 * "poison" if it's within range [0, UCHAR_MAX]. 1569 * Return pages freed into the buddy system. 1570 */ 1571extern unsigned long free_reserved_area(void *start, void *end, 1572 int poison, char *s); 1573 1574#ifdef CONFIG_HIGHMEM 1575/* 1576 * Free a highmem page into the buddy system, adjusting totalhigh_pages 1577 * and totalram_pages. 1578 */ 1579extern void free_highmem_page(struct page *page); 1580#endif 1581 1582extern void adjust_managed_page_count(struct page *page, long count); 1583extern void mem_init_print_info(const char *str); 1584 1585/* Free the reserved page into the buddy system, so it gets managed. */ 1586static inline void __free_reserved_page(struct page *page) 1587{ 1588 ClearPageReserved(page); 1589 init_page_count(page); 1590 __free_page(page); 1591} 1592 1593static inline void free_reserved_page(struct page *page) 1594{ 1595 __free_reserved_page(page); 1596 adjust_managed_page_count(page, 1); 1597} 1598 1599static inline void mark_page_reserved(struct page *page) 1600{ 1601 SetPageReserved(page); 1602 adjust_managed_page_count(page, -1); 1603} 1604 1605/* 1606 * Default method to free all the __init memory into the buddy system. 1607 * The freed pages will be poisoned with pattern "poison" if it's within 1608 * range [0, UCHAR_MAX]. 1609 * Return pages freed into the buddy system. 1610 */ 1611static inline unsigned long free_initmem_default(int poison) 1612{ 1613 extern char __init_begin[], __init_end[]; 1614 1615 return free_reserved_area(&__init_begin, &__init_end, 1616 poison, "unused kernel"); 1617} 1618 1619static inline unsigned long get_num_physpages(void) 1620{ 1621 int nid; 1622 unsigned long phys_pages = 0; 1623 1624 for_each_online_node(nid) 1625 phys_pages += node_present_pages(nid); 1626 1627 return phys_pages; 1628} 1629 1630#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1631/* 1632 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its 1633 * zones, allocate the backing mem_map and account for memory holes in a more 1634 * architecture independent manner. This is a substitute for creating the 1635 * zone_sizes[] and zholes_size[] arrays and passing them to 1636 * free_area_init_node() 1637 * 1638 * An architecture is expected to register range of page frames backed by 1639 * physical memory with memblock_add[_node]() before calling 1640 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic 1641 * usage, an architecture is expected to do something like 1642 * 1643 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, 1644 * max_highmem_pfn}; 1645 * for_each_valid_physical_page_range() 1646 * memblock_add_node(base, size, nid) 1647 * free_area_init_nodes(max_zone_pfns); 1648 * 1649 * free_bootmem_with_active_regions() calls free_bootmem_node() for each 1650 * registered physical page range. Similarly 1651 * sparse_memory_present_with_active_regions() calls memory_present() for 1652 * each range when SPARSEMEM is enabled. 1653 * 1654 * See mm/page_alloc.c for more information on each function exposed by 1655 * CONFIG_HAVE_MEMBLOCK_NODE_MAP. 1656 */ 1657extern void free_area_init_nodes(unsigned long *max_zone_pfn); 1658unsigned long node_map_pfn_alignment(void); 1659unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, 1660 unsigned long end_pfn); 1661extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1662 unsigned long end_pfn); 1663extern void get_pfn_range_for_nid(unsigned int nid, 1664 unsigned long *start_pfn, unsigned long *end_pfn); 1665extern unsigned long find_min_pfn_with_active_regions(void); 1666extern void free_bootmem_with_active_regions(int nid, 1667 unsigned long max_low_pfn); 1668extern void sparse_memory_present_with_active_regions(int nid); 1669 1670#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1671 1672#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ 1673 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) 1674static inline int __early_pfn_to_nid(unsigned long pfn) 1675{ 1676 return 0; 1677} 1678#else 1679/* please see mm/page_alloc.c */ 1680extern int __meminit early_pfn_to_nid(unsigned long pfn); 1681/* there is a per-arch backend function. */ 1682extern int __meminit __early_pfn_to_nid(unsigned long pfn); 1683#endif 1684 1685extern void set_dma_reserve(unsigned long new_dma_reserve); 1686extern void memmap_init_zone(unsigned long, int, unsigned long, 1687 unsigned long, enum memmap_context); 1688extern void setup_per_zone_wmarks(void); 1689extern int __meminit init_per_zone_wmark_min(void); 1690extern void mem_init(void); 1691extern void __init mmap_init(void); 1692extern void show_mem(unsigned int flags); 1693extern void si_meminfo(struct sysinfo * val); 1694extern void si_meminfo_node(struct sysinfo *val, int nid); 1695 1696extern __printf(3, 4) 1697void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); 1698 1699extern void setup_per_cpu_pageset(void); 1700 1701extern void zone_pcp_update(struct zone *zone); 1702extern void zone_pcp_reset(struct zone *zone); 1703 1704/* page_alloc.c */ 1705extern int min_free_kbytes; 1706 1707/* nommu.c */ 1708extern atomic_long_t mmap_pages_allocated; 1709extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); 1710 1711/* interval_tree.c */ 1712void vma_interval_tree_insert(struct vm_area_struct *node, 1713 struct rb_root *root); 1714void vma_interval_tree_insert_after(struct vm_area_struct *node, 1715 struct vm_area_struct *prev, 1716 struct rb_root *root); 1717void vma_interval_tree_remove(struct vm_area_struct *node, 1718 struct rb_root *root); 1719struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root, 1720 unsigned long start, unsigned long last); 1721struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node, 1722 unsigned long start, unsigned long last); 1723 1724#define vma_interval_tree_foreach(vma, root, start, last) \ 1725 for (vma = vma_interval_tree_iter_first(root, start, last); \ 1726 vma; vma = vma_interval_tree_iter_next(vma, start, last)) 1727 1728static inline void vma_nonlinear_insert(struct vm_area_struct *vma, 1729 struct list_head *list) 1730{ 1731 list_add_tail(&vma->shared.nonlinear, list); 1732} 1733 1734void anon_vma_interval_tree_insert(struct anon_vma_chain *node, 1735 struct rb_root *root); 1736void anon_vma_interval_tree_remove(struct anon_vma_chain *node, 1737 struct rb_root *root); 1738struct anon_vma_chain *anon_vma_interval_tree_iter_first( 1739 struct rb_root *root, unsigned long start, unsigned long last); 1740struct anon_vma_chain *anon_vma_interval_tree_iter_next( 1741 struct anon_vma_chain *node, unsigned long start, unsigned long last); 1742#ifdef CONFIG_DEBUG_VM_RB 1743void anon_vma_interval_tree_verify(struct anon_vma_chain *node); 1744#endif 1745 1746#define anon_vma_interval_tree_foreach(avc, root, start, last) \ 1747 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ 1748 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last)) 1749 1750/* mmap.c */ 1751extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); 1752extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, 1753 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); 1754extern struct vm_area_struct *vma_merge(struct mm_struct *, 1755 struct vm_area_struct *prev, unsigned long addr, unsigned long end, 1756 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, 1757 struct mempolicy *); 1758extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); 1759extern int split_vma(struct mm_struct *, 1760 struct vm_area_struct *, unsigned long addr, int new_below); 1761extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); 1762extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, 1763 struct rb_node **, struct rb_node *); 1764extern void unlink_file_vma(struct vm_area_struct *); 1765extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 1766 unsigned long addr, unsigned long len, pgoff_t pgoff, 1767 bool *need_rmap_locks); 1768extern void exit_mmap(struct mm_struct *); 1769 1770extern int mm_take_all_locks(struct mm_struct *mm); 1771extern void mm_drop_all_locks(struct mm_struct *mm); 1772 1773extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); 1774extern struct file *get_mm_exe_file(struct mm_struct *mm); 1775 1776extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); 1777extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, 1778 unsigned long addr, unsigned long len, 1779 unsigned long flags, struct page **pages); 1780extern int install_special_mapping(struct mm_struct *mm, 1781 unsigned long addr, unsigned long len, 1782 unsigned long flags, struct page **pages); 1783 1784extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); 1785 1786extern unsigned long mmap_region(struct file *file, unsigned long addr, 1787 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff); 1788extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 1789 unsigned long len, unsigned long prot, unsigned long flags, 1790 unsigned long pgoff, unsigned long *populate); 1791extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1792 1793#ifdef CONFIG_MMU 1794extern int __mm_populate(unsigned long addr, unsigned long len, 1795 int ignore_errors); 1796static inline void mm_populate(unsigned long addr, unsigned long len) 1797{ 1798 /* Ignore errors */ 1799 (void) __mm_populate(addr, len, 1); 1800} 1801#else 1802static inline void mm_populate(unsigned long addr, unsigned long len) {} 1803#endif 1804 1805/* These take the mm semaphore themselves */ 1806extern unsigned long vm_brk(unsigned long, unsigned long); 1807extern int vm_munmap(unsigned long, size_t); 1808extern unsigned long vm_mmap(struct file *, unsigned long, 1809 unsigned long, unsigned long, 1810 unsigned long, unsigned long); 1811 1812struct vm_unmapped_area_info { 1813#define VM_UNMAPPED_AREA_TOPDOWN 1 1814 unsigned long flags; 1815 unsigned long length; 1816 unsigned long low_limit; 1817 unsigned long high_limit; 1818 unsigned long align_mask; 1819 unsigned long align_offset; 1820}; 1821 1822extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); 1823extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); 1824 1825/* 1826 * Search for an unmapped address range. 1827 * 1828 * We are looking for a range that: 1829 * - does not intersect with any VMA; 1830 * - is contained within the [low_limit, high_limit) interval; 1831 * - is at least the desired size. 1832 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) 1833 */ 1834static inline unsigned long 1835vm_unmapped_area(struct vm_unmapped_area_info *info) 1836{ 1837 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN)) 1838 return unmapped_area(info); 1839 else 1840 return unmapped_area_topdown(info); 1841} 1842 1843/* truncate.c */ 1844extern void truncate_inode_pages(struct address_space *, loff_t); 1845extern void truncate_inode_pages_range(struct address_space *, 1846 loff_t lstart, loff_t lend); 1847extern void truncate_inode_pages_final(struct address_space *); 1848 1849/* generic vm_area_ops exported for stackable file systems */ 1850extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); 1851extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); 1852extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 1853 1854/* mm/page-writeback.c */ 1855int write_one_page(struct page *page, int wait); 1856void task_dirty_inc(struct task_struct *tsk); 1857 1858/* readahead.c */ 1859#define VM_MAX_READAHEAD 128 /* kbytes */ 1860#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ 1861 1862int force_page_cache_readahead(struct address_space *mapping, struct file *filp, 1863 pgoff_t offset, unsigned long nr_to_read); 1864 1865void page_cache_sync_readahead(struct address_space *mapping, 1866 struct file_ra_state *ra, 1867 struct file *filp, 1868 pgoff_t offset, 1869 unsigned long size); 1870 1871void page_cache_async_readahead(struct address_space *mapping, 1872 struct file_ra_state *ra, 1873 struct file *filp, 1874 struct page *pg, 1875 pgoff_t offset, 1876 unsigned long size); 1877 1878unsigned long max_sane_readahead(unsigned long nr); 1879 1880/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 1881extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 1882 1883/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ 1884extern int expand_downwards(struct vm_area_struct *vma, 1885 unsigned long address); 1886#if VM_GROWSUP 1887extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1888#else 1889 #define expand_upwards(vma, address) do { } while (0) 1890#endif 1891 1892/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1893extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); 1894extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, 1895 struct vm_area_struct **pprev); 1896 1897/* Look up the first VMA which intersects the interval start_addr..end_addr-1, 1898 NULL if none. Assume start_addr < end_addr. */ 1899static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) 1900{ 1901 struct vm_area_struct * vma = find_vma(mm,start_addr); 1902 1903 if (vma && end_addr <= vma->vm_start) 1904 vma = NULL; 1905 return vma; 1906} 1907 1908static inline unsigned long vma_pages(struct vm_area_struct *vma) 1909{ 1910 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 1911} 1912 1913/* Look up the first VMA which exactly match the interval vm_start ... vm_end */ 1914static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, 1915 unsigned long vm_start, unsigned long vm_end) 1916{ 1917 struct vm_area_struct *vma = find_vma(mm, vm_start); 1918 1919 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) 1920 vma = NULL; 1921 1922 return vma; 1923} 1924 1925#ifdef CONFIG_MMU 1926pgprot_t vm_get_page_prot(unsigned long vm_flags); 1927#else 1928static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 1929{ 1930 return __pgprot(0); 1931} 1932#endif 1933 1934#ifdef CONFIG_NUMA_BALANCING 1935unsigned long change_prot_numa(struct vm_area_struct *vma, 1936 unsigned long start, unsigned long end); 1937#endif 1938 1939struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); 1940int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 1941 unsigned long pfn, unsigned long size, pgprot_t); 1942int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); 1943int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1944 unsigned long pfn); 1945int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1946 unsigned long pfn); 1947int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); 1948 1949 1950struct page *follow_page_mask(struct vm_area_struct *vma, 1951 unsigned long address, unsigned int foll_flags, 1952 unsigned int *page_mask); 1953 1954static inline struct page *follow_page(struct vm_area_struct *vma, 1955 unsigned long address, unsigned int foll_flags) 1956{ 1957 unsigned int unused_page_mask; 1958 return follow_page_mask(vma, address, foll_flags, &unused_page_mask); 1959} 1960 1961#define FOLL_WRITE 0x01 /* check pte is writable */ 1962#define FOLL_TOUCH 0x02 /* mark page accessed */ 1963#define FOLL_GET 0x04 /* do get_page on page */ 1964#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */ 1965#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ 1966#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO 1967 * and return without waiting upon it */ 1968#define FOLL_MLOCK 0x40 /* mark page as mlocked */ 1969#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 1970#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 1971#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 1972#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 1973 1974typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1975 void *data); 1976extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, 1977 unsigned long size, pte_fn_t fn, void *data); 1978 1979#ifdef CONFIG_PROC_FS 1980void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); 1981#else 1982static inline void vm_stat_account(struct mm_struct *mm, 1983 unsigned long flags, struct file *file, long pages) 1984{ 1985 mm->total_vm += pages; 1986} 1987#endif /* CONFIG_PROC_FS */ 1988 1989#ifdef CONFIG_DEBUG_PAGEALLOC 1990extern void kernel_map_pages(struct page *page, int numpages, int enable); 1991#ifdef CONFIG_HIBERNATION 1992extern bool kernel_page_present(struct page *page); 1993#endif /* CONFIG_HIBERNATION */ 1994#else 1995static inline void 1996kernel_map_pages(struct page *page, int numpages, int enable) {} 1997#ifdef CONFIG_HIBERNATION 1998static inline bool kernel_page_present(struct page *page) { return true; } 1999#endif /* CONFIG_HIBERNATION */ 2000#endif 2001 2002extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2003#ifdef __HAVE_ARCH_GATE_AREA 2004int in_gate_area_no_mm(unsigned long addr); 2005int in_gate_area(struct mm_struct *mm, unsigned long addr); 2006#else 2007int in_gate_area_no_mm(unsigned long addr); 2008#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);}) 2009#endif /* __HAVE_ARCH_GATE_AREA */ 2010 2011#ifdef CONFIG_SYSCTL 2012extern int sysctl_drop_caches; 2013int drop_caches_sysctl_handler(struct ctl_table *, int, 2014 void __user *, size_t *, loff_t *); 2015#endif 2016 2017unsigned long shrink_slab(struct shrink_control *shrink, 2018 unsigned long nr_pages_scanned, 2019 unsigned long lru_pages); 2020 2021#ifndef CONFIG_MMU 2022#define randomize_va_space 0 2023#else 2024extern int randomize_va_space; 2025#endif 2026 2027const char * arch_vma_name(struct vm_area_struct *vma); 2028void print_vma_addr(char *prefix, unsigned long rip); 2029 2030void sparse_mem_maps_populate_node(struct page **map_map, 2031 unsigned long pnum_begin, 2032 unsigned long pnum_end, 2033 unsigned long map_count, 2034 int nodeid); 2035 2036struct page *sparse_mem_map_populate(unsigned long pnum, int nid); 2037pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); 2038pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); 2039pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); 2040pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); 2041void *vmemmap_alloc_block(unsigned long size, int node); 2042void *vmemmap_alloc_block_buf(unsigned long size, int node); 2043void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); 2044int vmemmap_populate_basepages(unsigned long start, unsigned long end, 2045 int node); 2046int vmemmap_populate(unsigned long start, unsigned long end, int node); 2047void vmemmap_populate_print_last(void); 2048#ifdef CONFIG_MEMORY_HOTPLUG 2049void vmemmap_free(unsigned long start, unsigned long end); 2050#endif 2051void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, 2052 unsigned long size); 2053 2054enum mf_flags { 2055 MF_COUNT_INCREASED = 1 << 0, 2056 MF_ACTION_REQUIRED = 1 << 1, 2057 MF_MUST_KILL = 1 << 2, 2058 MF_SOFT_OFFLINE = 1 << 3, 2059}; 2060extern int memory_failure(unsigned long pfn, int trapno, int flags); 2061extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); 2062extern int unpoison_memory(unsigned long pfn); 2063extern int sysctl_memory_failure_early_kill; 2064extern int sysctl_memory_failure_recovery; 2065extern void shake_page(struct page *p, int access); 2066extern atomic_long_t num_poisoned_pages; 2067extern int soft_offline_page(struct page *page, int flags); 2068 2069#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2070extern void clear_huge_page(struct page *page, 2071 unsigned long addr, 2072 unsigned int pages_per_huge_page); 2073extern void copy_user_huge_page(struct page *dst, struct page *src, 2074 unsigned long addr, struct vm_area_struct *vma, 2075 unsigned int pages_per_huge_page); 2076#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2077 2078#ifdef CONFIG_DEBUG_PAGEALLOC 2079extern unsigned int _debug_guardpage_minorder; 2080 2081static inline unsigned int debug_guardpage_minorder(void) 2082{ 2083 return _debug_guardpage_minorder; 2084} 2085 2086static inline bool page_is_guard(struct page *page) 2087{ 2088 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 2089} 2090#else 2091static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2092static inline bool page_is_guard(struct page *page) { return false; } 2093#endif /* CONFIG_DEBUG_PAGEALLOC */ 2094 2095#if MAX_NUMNODES > 1 2096void __init setup_nr_node_ids(void); 2097#else 2098static inline void setup_nr_node_ids(void) {} 2099#endif 2100 2101#endif /* __KERNEL__ */ 2102#endif /* _LINUX_MM_H */