Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.34-rc2 3502 lines 97 kB view raw
1/* 2 * linux/mm/memory.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7/* 8 * demand-loading started 01.12.91 - seems it is high on the list of 9 * things wanted, and it should be easy to implement. - Linus 10 */ 11 12/* 13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 14 * pages started 02.12.91, seems to work. - Linus. 15 * 16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 17 * would have taken more than the 6M I have free, but it worked well as 18 * far as I could see. 19 * 20 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 21 */ 22 23/* 24 * Real VM (paging to/from disk) started 18.12.91. Much more work and 25 * thought has to go into this. Oh, well.. 26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 27 * Found it. Everything seems to work now. 28 * 20.12.91 - Ok, making the swap-device changeable like the root. 29 */ 30 31/* 32 * 05.04.94 - Multi-page memory management added for v1.1. 33 * Idea by Alex Bligh (alex@cconcepts.co.uk) 34 * 35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 36 * (Gerhard.Wichert@pdb.siemens.de) 37 * 38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 39 */ 40 41#include <linux/kernel_stat.h> 42#include <linux/mm.h> 43#include <linux/hugetlb.h> 44#include <linux/mman.h> 45#include <linux/swap.h> 46#include <linux/highmem.h> 47#include <linux/pagemap.h> 48#include <linux/ksm.h> 49#include <linux/rmap.h> 50#include <linux/module.h> 51#include <linux/delayacct.h> 52#include <linux/init.h> 53#include <linux/writeback.h> 54#include <linux/memcontrol.h> 55#include <linux/mmu_notifier.h> 56#include <linux/kallsyms.h> 57#include <linux/swapops.h> 58#include <linux/elf.h> 59 60#include <asm/io.h> 61#include <asm/pgalloc.h> 62#include <asm/uaccess.h> 63#include <asm/tlb.h> 64#include <asm/tlbflush.h> 65#include <asm/pgtable.h> 66 67#include "internal.h" 68 69#ifndef CONFIG_NEED_MULTIPLE_NODES 70/* use the per-pgdat data instead for discontigmem - mbligh */ 71unsigned long max_mapnr; 72struct page *mem_map; 73 74EXPORT_SYMBOL(max_mapnr); 75EXPORT_SYMBOL(mem_map); 76#endif 77 78unsigned long num_physpages; 79/* 80 * A number of key systems in x86 including ioremap() rely on the assumption 81 * that high_memory defines the upper bound on direct map memory, then end 82 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 83 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 84 * and ZONE_HIGHMEM. 85 */ 86void * high_memory; 87 88EXPORT_SYMBOL(num_physpages); 89EXPORT_SYMBOL(high_memory); 90 91/* 92 * Randomize the address space (stacks, mmaps, brk, etc.). 93 * 94 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 95 * as ancient (libc5 based) binaries can segfault. ) 96 */ 97int randomize_va_space __read_mostly = 98#ifdef CONFIG_COMPAT_BRK 99 1; 100#else 101 2; 102#endif 103 104static int __init disable_randmaps(char *s) 105{ 106 randomize_va_space = 0; 107 return 1; 108} 109__setup("norandmaps", disable_randmaps); 110 111unsigned long zero_pfn __read_mostly; 112unsigned long highest_memmap_pfn __read_mostly; 113 114/* 115 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 116 */ 117static int __init init_zero_pfn(void) 118{ 119 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 120 return 0; 121} 122core_initcall(init_zero_pfn); 123 124 125#if defined(SPLIT_RSS_COUNTING) 126 127void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) 128{ 129 int i; 130 131 for (i = 0; i < NR_MM_COUNTERS; i++) { 132 if (task->rss_stat.count[i]) { 133 add_mm_counter(mm, i, task->rss_stat.count[i]); 134 task->rss_stat.count[i] = 0; 135 } 136 } 137 task->rss_stat.events = 0; 138} 139 140static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) 141{ 142 struct task_struct *task = current; 143 144 if (likely(task->mm == mm)) 145 task->rss_stat.count[member] += val; 146 else 147 add_mm_counter(mm, member, val); 148} 149#define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) 150#define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) 151 152/* sync counter once per 64 page faults */ 153#define TASK_RSS_EVENTS_THRESH (64) 154static void check_sync_rss_stat(struct task_struct *task) 155{ 156 if (unlikely(task != current)) 157 return; 158 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) 159 __sync_task_rss_stat(task, task->mm); 160} 161 162unsigned long get_mm_counter(struct mm_struct *mm, int member) 163{ 164 long val = 0; 165 166 /* 167 * Don't use task->mm here...for avoiding to use task_get_mm().. 168 * The caller must guarantee task->mm is not invalid. 169 */ 170 val = atomic_long_read(&mm->rss_stat.count[member]); 171 /* 172 * counter is updated in asynchronous manner and may go to minus. 173 * But it's never be expected number for users. 174 */ 175 if (val < 0) 176 return 0; 177 return (unsigned long)val; 178} 179 180void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) 181{ 182 __sync_task_rss_stat(task, mm); 183} 184#else 185 186#define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 187#define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 188 189static void check_sync_rss_stat(struct task_struct *task) 190{ 191} 192 193#endif 194 195/* 196 * If a p?d_bad entry is found while walking page tables, report 197 * the error, before resetting entry to p?d_none. Usually (but 198 * very seldom) called out from the p?d_none_or_clear_bad macros. 199 */ 200 201void pgd_clear_bad(pgd_t *pgd) 202{ 203 pgd_ERROR(*pgd); 204 pgd_clear(pgd); 205} 206 207void pud_clear_bad(pud_t *pud) 208{ 209 pud_ERROR(*pud); 210 pud_clear(pud); 211} 212 213void pmd_clear_bad(pmd_t *pmd) 214{ 215 pmd_ERROR(*pmd); 216 pmd_clear(pmd); 217} 218 219/* 220 * Note: this doesn't free the actual pages themselves. That 221 * has been handled earlier when unmapping all the memory regions. 222 */ 223static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 224 unsigned long addr) 225{ 226 pgtable_t token = pmd_pgtable(*pmd); 227 pmd_clear(pmd); 228 pte_free_tlb(tlb, token, addr); 229 tlb->mm->nr_ptes--; 230} 231 232static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 233 unsigned long addr, unsigned long end, 234 unsigned long floor, unsigned long ceiling) 235{ 236 pmd_t *pmd; 237 unsigned long next; 238 unsigned long start; 239 240 start = addr; 241 pmd = pmd_offset(pud, addr); 242 do { 243 next = pmd_addr_end(addr, end); 244 if (pmd_none_or_clear_bad(pmd)) 245 continue; 246 free_pte_range(tlb, pmd, addr); 247 } while (pmd++, addr = next, addr != end); 248 249 start &= PUD_MASK; 250 if (start < floor) 251 return; 252 if (ceiling) { 253 ceiling &= PUD_MASK; 254 if (!ceiling) 255 return; 256 } 257 if (end - 1 > ceiling - 1) 258 return; 259 260 pmd = pmd_offset(pud, start); 261 pud_clear(pud); 262 pmd_free_tlb(tlb, pmd, start); 263} 264 265static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 266 unsigned long addr, unsigned long end, 267 unsigned long floor, unsigned long ceiling) 268{ 269 pud_t *pud; 270 unsigned long next; 271 unsigned long start; 272 273 start = addr; 274 pud = pud_offset(pgd, addr); 275 do { 276 next = pud_addr_end(addr, end); 277 if (pud_none_or_clear_bad(pud)) 278 continue; 279 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 280 } while (pud++, addr = next, addr != end); 281 282 start &= PGDIR_MASK; 283 if (start < floor) 284 return; 285 if (ceiling) { 286 ceiling &= PGDIR_MASK; 287 if (!ceiling) 288 return; 289 } 290 if (end - 1 > ceiling - 1) 291 return; 292 293 pud = pud_offset(pgd, start); 294 pgd_clear(pgd); 295 pud_free_tlb(tlb, pud, start); 296} 297 298/* 299 * This function frees user-level page tables of a process. 300 * 301 * Must be called with pagetable lock held. 302 */ 303void free_pgd_range(struct mmu_gather *tlb, 304 unsigned long addr, unsigned long end, 305 unsigned long floor, unsigned long ceiling) 306{ 307 pgd_t *pgd; 308 unsigned long next; 309 unsigned long start; 310 311 /* 312 * The next few lines have given us lots of grief... 313 * 314 * Why are we testing PMD* at this top level? Because often 315 * there will be no work to do at all, and we'd prefer not to 316 * go all the way down to the bottom just to discover that. 317 * 318 * Why all these "- 1"s? Because 0 represents both the bottom 319 * of the address space and the top of it (using -1 for the 320 * top wouldn't help much: the masks would do the wrong thing). 321 * The rule is that addr 0 and floor 0 refer to the bottom of 322 * the address space, but end 0 and ceiling 0 refer to the top 323 * Comparisons need to use "end - 1" and "ceiling - 1" (though 324 * that end 0 case should be mythical). 325 * 326 * Wherever addr is brought up or ceiling brought down, we must 327 * be careful to reject "the opposite 0" before it confuses the 328 * subsequent tests. But what about where end is brought down 329 * by PMD_SIZE below? no, end can't go down to 0 there. 330 * 331 * Whereas we round start (addr) and ceiling down, by different 332 * masks at different levels, in order to test whether a table 333 * now has no other vmas using it, so can be freed, we don't 334 * bother to round floor or end up - the tests don't need that. 335 */ 336 337 addr &= PMD_MASK; 338 if (addr < floor) { 339 addr += PMD_SIZE; 340 if (!addr) 341 return; 342 } 343 if (ceiling) { 344 ceiling &= PMD_MASK; 345 if (!ceiling) 346 return; 347 } 348 if (end - 1 > ceiling - 1) 349 end -= PMD_SIZE; 350 if (addr > end - 1) 351 return; 352 353 start = addr; 354 pgd = pgd_offset(tlb->mm, addr); 355 do { 356 next = pgd_addr_end(addr, end); 357 if (pgd_none_or_clear_bad(pgd)) 358 continue; 359 free_pud_range(tlb, pgd, addr, next, floor, ceiling); 360 } while (pgd++, addr = next, addr != end); 361} 362 363void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 364 unsigned long floor, unsigned long ceiling) 365{ 366 while (vma) { 367 struct vm_area_struct *next = vma->vm_next; 368 unsigned long addr = vma->vm_start; 369 370 /* 371 * Hide vma from rmap and truncate_pagecache before freeing 372 * pgtables 373 */ 374 unlink_anon_vmas(vma); 375 unlink_file_vma(vma); 376 377 if (is_vm_hugetlb_page(vma)) { 378 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 379 floor, next? next->vm_start: ceiling); 380 } else { 381 /* 382 * Optimization: gather nearby vmas into one call down 383 */ 384 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 385 && !is_vm_hugetlb_page(next)) { 386 vma = next; 387 next = vma->vm_next; 388 unlink_anon_vmas(vma); 389 unlink_file_vma(vma); 390 } 391 free_pgd_range(tlb, addr, vma->vm_end, 392 floor, next? next->vm_start: ceiling); 393 } 394 vma = next; 395 } 396} 397 398int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 399{ 400 pgtable_t new = pte_alloc_one(mm, address); 401 if (!new) 402 return -ENOMEM; 403 404 /* 405 * Ensure all pte setup (eg. pte page lock and page clearing) are 406 * visible before the pte is made visible to other CPUs by being 407 * put into page tables. 408 * 409 * The other side of the story is the pointer chasing in the page 410 * table walking code (when walking the page table without locking; 411 * ie. most of the time). Fortunately, these data accesses consist 412 * of a chain of data-dependent loads, meaning most CPUs (alpha 413 * being the notable exception) will already guarantee loads are 414 * seen in-order. See the alpha page table accessors for the 415 * smp_read_barrier_depends() barriers in page table walking code. 416 */ 417 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 418 419 spin_lock(&mm->page_table_lock); 420 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 421 mm->nr_ptes++; 422 pmd_populate(mm, pmd, new); 423 new = NULL; 424 } 425 spin_unlock(&mm->page_table_lock); 426 if (new) 427 pte_free(mm, new); 428 return 0; 429} 430 431int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 432{ 433 pte_t *new = pte_alloc_one_kernel(&init_mm, address); 434 if (!new) 435 return -ENOMEM; 436 437 smp_wmb(); /* See comment in __pte_alloc */ 438 439 spin_lock(&init_mm.page_table_lock); 440 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 441 pmd_populate_kernel(&init_mm, pmd, new); 442 new = NULL; 443 } 444 spin_unlock(&init_mm.page_table_lock); 445 if (new) 446 pte_free_kernel(&init_mm, new); 447 return 0; 448} 449 450static inline void init_rss_vec(int *rss) 451{ 452 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 453} 454 455static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 456{ 457 int i; 458 459 if (current->mm == mm) 460 sync_mm_rss(current, mm); 461 for (i = 0; i < NR_MM_COUNTERS; i++) 462 if (rss[i]) 463 add_mm_counter(mm, i, rss[i]); 464} 465 466/* 467 * This function is called to print an error when a bad pte 468 * is found. For example, we might have a PFN-mapped pte in 469 * a region that doesn't allow it. 470 * 471 * The calling function must still handle the error. 472 */ 473static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 474 pte_t pte, struct page *page) 475{ 476 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 477 pud_t *pud = pud_offset(pgd, addr); 478 pmd_t *pmd = pmd_offset(pud, addr); 479 struct address_space *mapping; 480 pgoff_t index; 481 static unsigned long resume; 482 static unsigned long nr_shown; 483 static unsigned long nr_unshown; 484 485 /* 486 * Allow a burst of 60 reports, then keep quiet for that minute; 487 * or allow a steady drip of one report per second. 488 */ 489 if (nr_shown == 60) { 490 if (time_before(jiffies, resume)) { 491 nr_unshown++; 492 return; 493 } 494 if (nr_unshown) { 495 printk(KERN_ALERT 496 "BUG: Bad page map: %lu messages suppressed\n", 497 nr_unshown); 498 nr_unshown = 0; 499 } 500 nr_shown = 0; 501 } 502 if (nr_shown++ == 0) 503 resume = jiffies + 60 * HZ; 504 505 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 506 index = linear_page_index(vma, addr); 507 508 printk(KERN_ALERT 509 "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 510 current->comm, 511 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 512 if (page) 513 dump_page(page); 514 printk(KERN_ALERT 515 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", 516 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 517 /* 518 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y 519 */ 520 if (vma->vm_ops) 521 print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n", 522 (unsigned long)vma->vm_ops->fault); 523 if (vma->vm_file && vma->vm_file->f_op) 524 print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n", 525 (unsigned long)vma->vm_file->f_op->mmap); 526 dump_stack(); 527 add_taint(TAINT_BAD_PAGE); 528} 529 530static inline int is_cow_mapping(unsigned int flags) 531{ 532 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 533} 534 535#ifndef is_zero_pfn 536static inline int is_zero_pfn(unsigned long pfn) 537{ 538 return pfn == zero_pfn; 539} 540#endif 541 542#ifndef my_zero_pfn 543static inline unsigned long my_zero_pfn(unsigned long addr) 544{ 545 return zero_pfn; 546} 547#endif 548 549/* 550 * vm_normal_page -- This function gets the "struct page" associated with a pte. 551 * 552 * "Special" mappings do not wish to be associated with a "struct page" (either 553 * it doesn't exist, or it exists but they don't want to touch it). In this 554 * case, NULL is returned here. "Normal" mappings do have a struct page. 555 * 556 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 557 * pte bit, in which case this function is trivial. Secondly, an architecture 558 * may not have a spare pte bit, which requires a more complicated scheme, 559 * described below. 560 * 561 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 562 * special mapping (even if there are underlying and valid "struct pages"). 563 * COWed pages of a VM_PFNMAP are always normal. 564 * 565 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 566 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 567 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 568 * mapping will always honor the rule 569 * 570 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 571 * 572 * And for normal mappings this is false. 573 * 574 * This restricts such mappings to be a linear translation from virtual address 575 * to pfn. To get around this restriction, we allow arbitrary mappings so long 576 * as the vma is not a COW mapping; in that case, we know that all ptes are 577 * special (because none can have been COWed). 578 * 579 * 580 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 581 * 582 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 583 * page" backing, however the difference is that _all_ pages with a struct 584 * page (that is, those where pfn_valid is true) are refcounted and considered 585 * normal pages by the VM. The disadvantage is that pages are refcounted 586 * (which can be slower and simply not an option for some PFNMAP users). The 587 * advantage is that we don't have to follow the strict linearity rule of 588 * PFNMAP mappings in order to support COWable mappings. 589 * 590 */ 591#ifdef __HAVE_ARCH_PTE_SPECIAL 592# define HAVE_PTE_SPECIAL 1 593#else 594# define HAVE_PTE_SPECIAL 0 595#endif 596struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 597 pte_t pte) 598{ 599 unsigned long pfn = pte_pfn(pte); 600 601 if (HAVE_PTE_SPECIAL) { 602 if (likely(!pte_special(pte))) 603 goto check_pfn; 604 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 605 return NULL; 606 if (!is_zero_pfn(pfn)) 607 print_bad_pte(vma, addr, pte, NULL); 608 return NULL; 609 } 610 611 /* !HAVE_PTE_SPECIAL case follows: */ 612 613 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 614 if (vma->vm_flags & VM_MIXEDMAP) { 615 if (!pfn_valid(pfn)) 616 return NULL; 617 goto out; 618 } else { 619 unsigned long off; 620 off = (addr - vma->vm_start) >> PAGE_SHIFT; 621 if (pfn == vma->vm_pgoff + off) 622 return NULL; 623 if (!is_cow_mapping(vma->vm_flags)) 624 return NULL; 625 } 626 } 627 628 if (is_zero_pfn(pfn)) 629 return NULL; 630check_pfn: 631 if (unlikely(pfn > highest_memmap_pfn)) { 632 print_bad_pte(vma, addr, pte, NULL); 633 return NULL; 634 } 635 636 /* 637 * NOTE! We still have PageReserved() pages in the page tables. 638 * eg. VDSO mappings can cause them to exist. 639 */ 640out: 641 return pfn_to_page(pfn); 642} 643 644/* 645 * copy one vm_area from one task to the other. Assumes the page tables 646 * already present in the new task to be cleared in the whole range 647 * covered by this vma. 648 */ 649 650static inline unsigned long 651copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 652 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 653 unsigned long addr, int *rss) 654{ 655 unsigned long vm_flags = vma->vm_flags; 656 pte_t pte = *src_pte; 657 struct page *page; 658 659 /* pte contains position in swap or file, so copy. */ 660 if (unlikely(!pte_present(pte))) { 661 if (!pte_file(pte)) { 662 swp_entry_t entry = pte_to_swp_entry(pte); 663 664 if (swap_duplicate(entry) < 0) 665 return entry.val; 666 667 /* make sure dst_mm is on swapoff's mmlist. */ 668 if (unlikely(list_empty(&dst_mm->mmlist))) { 669 spin_lock(&mmlist_lock); 670 if (list_empty(&dst_mm->mmlist)) 671 list_add(&dst_mm->mmlist, 672 &src_mm->mmlist); 673 spin_unlock(&mmlist_lock); 674 } 675 if (likely(!non_swap_entry(entry))) 676 rss[MM_SWAPENTS]++; 677 else if (is_write_migration_entry(entry) && 678 is_cow_mapping(vm_flags)) { 679 /* 680 * COW mappings require pages in both parent 681 * and child to be set to read. 682 */ 683 make_migration_entry_read(&entry); 684 pte = swp_entry_to_pte(entry); 685 set_pte_at(src_mm, addr, src_pte, pte); 686 } 687 } 688 goto out_set_pte; 689 } 690 691 /* 692 * If it's a COW mapping, write protect it both 693 * in the parent and the child 694 */ 695 if (is_cow_mapping(vm_flags)) { 696 ptep_set_wrprotect(src_mm, addr, src_pte); 697 pte = pte_wrprotect(pte); 698 } 699 700 /* 701 * If it's a shared mapping, mark it clean in 702 * the child 703 */ 704 if (vm_flags & VM_SHARED) 705 pte = pte_mkclean(pte); 706 pte = pte_mkold(pte); 707 708 page = vm_normal_page(vma, addr, pte); 709 if (page) { 710 get_page(page); 711 page_dup_rmap(page); 712 if (PageAnon(page)) 713 rss[MM_ANONPAGES]++; 714 else 715 rss[MM_FILEPAGES]++; 716 } 717 718out_set_pte: 719 set_pte_at(dst_mm, addr, dst_pte, pte); 720 return 0; 721} 722 723static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 724 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 725 unsigned long addr, unsigned long end) 726{ 727 pte_t *orig_src_pte, *orig_dst_pte; 728 pte_t *src_pte, *dst_pte; 729 spinlock_t *src_ptl, *dst_ptl; 730 int progress = 0; 731 int rss[NR_MM_COUNTERS]; 732 swp_entry_t entry = (swp_entry_t){0}; 733 734again: 735 init_rss_vec(rss); 736 737 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 738 if (!dst_pte) 739 return -ENOMEM; 740 src_pte = pte_offset_map_nested(src_pmd, addr); 741 src_ptl = pte_lockptr(src_mm, src_pmd); 742 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 743 orig_src_pte = src_pte; 744 orig_dst_pte = dst_pte; 745 arch_enter_lazy_mmu_mode(); 746 747 do { 748 /* 749 * We are holding two locks at this point - either of them 750 * could generate latencies in another task on another CPU. 751 */ 752 if (progress >= 32) { 753 progress = 0; 754 if (need_resched() || 755 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 756 break; 757 } 758 if (pte_none(*src_pte)) { 759 progress++; 760 continue; 761 } 762 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, 763 vma, addr, rss); 764 if (entry.val) 765 break; 766 progress += 8; 767 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 768 769 arch_leave_lazy_mmu_mode(); 770 spin_unlock(src_ptl); 771 pte_unmap_nested(orig_src_pte); 772 add_mm_rss_vec(dst_mm, rss); 773 pte_unmap_unlock(orig_dst_pte, dst_ptl); 774 cond_resched(); 775 776 if (entry.val) { 777 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) 778 return -ENOMEM; 779 progress = 0; 780 } 781 if (addr != end) 782 goto again; 783 return 0; 784} 785 786static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 787 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 788 unsigned long addr, unsigned long end) 789{ 790 pmd_t *src_pmd, *dst_pmd; 791 unsigned long next; 792 793 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 794 if (!dst_pmd) 795 return -ENOMEM; 796 src_pmd = pmd_offset(src_pud, addr); 797 do { 798 next = pmd_addr_end(addr, end); 799 if (pmd_none_or_clear_bad(src_pmd)) 800 continue; 801 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 802 vma, addr, next)) 803 return -ENOMEM; 804 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 805 return 0; 806} 807 808static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 809 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 810 unsigned long addr, unsigned long end) 811{ 812 pud_t *src_pud, *dst_pud; 813 unsigned long next; 814 815 dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 816 if (!dst_pud) 817 return -ENOMEM; 818 src_pud = pud_offset(src_pgd, addr); 819 do { 820 next = pud_addr_end(addr, end); 821 if (pud_none_or_clear_bad(src_pud)) 822 continue; 823 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 824 vma, addr, next)) 825 return -ENOMEM; 826 } while (dst_pud++, src_pud++, addr = next, addr != end); 827 return 0; 828} 829 830int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 831 struct vm_area_struct *vma) 832{ 833 pgd_t *src_pgd, *dst_pgd; 834 unsigned long next; 835 unsigned long addr = vma->vm_start; 836 unsigned long end = vma->vm_end; 837 int ret; 838 839 /* 840 * Don't copy ptes where a page fault will fill them correctly. 841 * Fork becomes much lighter when there are big shared or private 842 * readonly mappings. The tradeoff is that copy_page_range is more 843 * efficient than faulting. 844 */ 845 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { 846 if (!vma->anon_vma) 847 return 0; 848 } 849 850 if (is_vm_hugetlb_page(vma)) 851 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 852 853 if (unlikely(is_pfn_mapping(vma))) { 854 /* 855 * We do not free on error cases below as remove_vma 856 * gets called on error from higher level routine 857 */ 858 ret = track_pfn_vma_copy(vma); 859 if (ret) 860 return ret; 861 } 862 863 /* 864 * We need to invalidate the secondary MMU mappings only when 865 * there could be a permission downgrade on the ptes of the 866 * parent mm. And a permission downgrade will only happen if 867 * is_cow_mapping() returns true. 868 */ 869 if (is_cow_mapping(vma->vm_flags)) 870 mmu_notifier_invalidate_range_start(src_mm, addr, end); 871 872 ret = 0; 873 dst_pgd = pgd_offset(dst_mm, addr); 874 src_pgd = pgd_offset(src_mm, addr); 875 do { 876 next = pgd_addr_end(addr, end); 877 if (pgd_none_or_clear_bad(src_pgd)) 878 continue; 879 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 880 vma, addr, next))) { 881 ret = -ENOMEM; 882 break; 883 } 884 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 885 886 if (is_cow_mapping(vma->vm_flags)) 887 mmu_notifier_invalidate_range_end(src_mm, 888 vma->vm_start, end); 889 return ret; 890} 891 892static unsigned long zap_pte_range(struct mmu_gather *tlb, 893 struct vm_area_struct *vma, pmd_t *pmd, 894 unsigned long addr, unsigned long end, 895 long *zap_work, struct zap_details *details) 896{ 897 struct mm_struct *mm = tlb->mm; 898 pte_t *pte; 899 spinlock_t *ptl; 900 int rss[NR_MM_COUNTERS]; 901 902 init_rss_vec(rss); 903 904 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 905 arch_enter_lazy_mmu_mode(); 906 do { 907 pte_t ptent = *pte; 908 if (pte_none(ptent)) { 909 (*zap_work)--; 910 continue; 911 } 912 913 (*zap_work) -= PAGE_SIZE; 914 915 if (pte_present(ptent)) { 916 struct page *page; 917 918 page = vm_normal_page(vma, addr, ptent); 919 if (unlikely(details) && page) { 920 /* 921 * unmap_shared_mapping_pages() wants to 922 * invalidate cache without truncating: 923 * unmap shared but keep private pages. 924 */ 925 if (details->check_mapping && 926 details->check_mapping != page->mapping) 927 continue; 928 /* 929 * Each page->index must be checked when 930 * invalidating or truncating nonlinear. 931 */ 932 if (details->nonlinear_vma && 933 (page->index < details->first_index || 934 page->index > details->last_index)) 935 continue; 936 } 937 ptent = ptep_get_and_clear_full(mm, addr, pte, 938 tlb->fullmm); 939 tlb_remove_tlb_entry(tlb, pte, addr); 940 if (unlikely(!page)) 941 continue; 942 if (unlikely(details) && details->nonlinear_vma 943 && linear_page_index(details->nonlinear_vma, 944 addr) != page->index) 945 set_pte_at(mm, addr, pte, 946 pgoff_to_pte(page->index)); 947 if (PageAnon(page)) 948 rss[MM_ANONPAGES]--; 949 else { 950 if (pte_dirty(ptent)) 951 set_page_dirty(page); 952 if (pte_young(ptent) && 953 likely(!VM_SequentialReadHint(vma))) 954 mark_page_accessed(page); 955 rss[MM_FILEPAGES]--; 956 } 957 page_remove_rmap(page); 958 if (unlikely(page_mapcount(page) < 0)) 959 print_bad_pte(vma, addr, ptent, page); 960 tlb_remove_page(tlb, page); 961 continue; 962 } 963 /* 964 * If details->check_mapping, we leave swap entries; 965 * if details->nonlinear_vma, we leave file entries. 966 */ 967 if (unlikely(details)) 968 continue; 969 if (pte_file(ptent)) { 970 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) 971 print_bad_pte(vma, addr, ptent, NULL); 972 } else { 973 swp_entry_t entry = pte_to_swp_entry(ptent); 974 975 if (!non_swap_entry(entry)) 976 rss[MM_SWAPENTS]--; 977 if (unlikely(!free_swap_and_cache(entry))) 978 print_bad_pte(vma, addr, ptent, NULL); 979 } 980 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 981 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 982 983 add_mm_rss_vec(mm, rss); 984 arch_leave_lazy_mmu_mode(); 985 pte_unmap_unlock(pte - 1, ptl); 986 987 return addr; 988} 989 990static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 991 struct vm_area_struct *vma, pud_t *pud, 992 unsigned long addr, unsigned long end, 993 long *zap_work, struct zap_details *details) 994{ 995 pmd_t *pmd; 996 unsigned long next; 997 998 pmd = pmd_offset(pud, addr); 999 do { 1000 next = pmd_addr_end(addr, end); 1001 if (pmd_none_or_clear_bad(pmd)) { 1002 (*zap_work)--; 1003 continue; 1004 } 1005 next = zap_pte_range(tlb, vma, pmd, addr, next, 1006 zap_work, details); 1007 } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 1008 1009 return addr; 1010} 1011 1012static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1013 struct vm_area_struct *vma, pgd_t *pgd, 1014 unsigned long addr, unsigned long end, 1015 long *zap_work, struct zap_details *details) 1016{ 1017 pud_t *pud; 1018 unsigned long next; 1019 1020 pud = pud_offset(pgd, addr); 1021 do { 1022 next = pud_addr_end(addr, end); 1023 if (pud_none_or_clear_bad(pud)) { 1024 (*zap_work)--; 1025 continue; 1026 } 1027 next = zap_pmd_range(tlb, vma, pud, addr, next, 1028 zap_work, details); 1029 } while (pud++, addr = next, (addr != end && *zap_work > 0)); 1030 1031 return addr; 1032} 1033 1034static unsigned long unmap_page_range(struct mmu_gather *tlb, 1035 struct vm_area_struct *vma, 1036 unsigned long addr, unsigned long end, 1037 long *zap_work, struct zap_details *details) 1038{ 1039 pgd_t *pgd; 1040 unsigned long next; 1041 1042 if (details && !details->check_mapping && !details->nonlinear_vma) 1043 details = NULL; 1044 1045 BUG_ON(addr >= end); 1046 mem_cgroup_uncharge_start(); 1047 tlb_start_vma(tlb, vma); 1048 pgd = pgd_offset(vma->vm_mm, addr); 1049 do { 1050 next = pgd_addr_end(addr, end); 1051 if (pgd_none_or_clear_bad(pgd)) { 1052 (*zap_work)--; 1053 continue; 1054 } 1055 next = zap_pud_range(tlb, vma, pgd, addr, next, 1056 zap_work, details); 1057 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 1058 tlb_end_vma(tlb, vma); 1059 mem_cgroup_uncharge_end(); 1060 1061 return addr; 1062} 1063 1064#ifdef CONFIG_PREEMPT 1065# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 1066#else 1067/* No preempt: go for improved straight-line efficiency */ 1068# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 1069#endif 1070 1071/** 1072 * unmap_vmas - unmap a range of memory covered by a list of vma's 1073 * @tlbp: address of the caller's struct mmu_gather 1074 * @vma: the starting vma 1075 * @start_addr: virtual address at which to start unmapping 1076 * @end_addr: virtual address at which to end unmapping 1077 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 1078 * @details: details of nonlinear truncation or shared cache invalidation 1079 * 1080 * Returns the end address of the unmapping (restart addr if interrupted). 1081 * 1082 * Unmap all pages in the vma list. 1083 * 1084 * We aim to not hold locks for too long (for scheduling latency reasons). 1085 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 1086 * return the ending mmu_gather to the caller. 1087 * 1088 * Only addresses between `start' and `end' will be unmapped. 1089 * 1090 * The VMA list must be sorted in ascending virtual address order. 1091 * 1092 * unmap_vmas() assumes that the caller will flush the whole unmapped address 1093 * range after unmap_vmas() returns. So the only responsibility here is to 1094 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1095 * drops the lock and schedules. 1096 */ 1097unsigned long unmap_vmas(struct mmu_gather **tlbp, 1098 struct vm_area_struct *vma, unsigned long start_addr, 1099 unsigned long end_addr, unsigned long *nr_accounted, 1100 struct zap_details *details) 1101{ 1102 long zap_work = ZAP_BLOCK_SIZE; 1103 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 1104 int tlb_start_valid = 0; 1105 unsigned long start = start_addr; 1106 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 1107 int fullmm = (*tlbp)->fullmm; 1108 struct mm_struct *mm = vma->vm_mm; 1109 1110 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 1111 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 1112 unsigned long end; 1113 1114 start = max(vma->vm_start, start_addr); 1115 if (start >= vma->vm_end) 1116 continue; 1117 end = min(vma->vm_end, end_addr); 1118 if (end <= vma->vm_start) 1119 continue; 1120 1121 if (vma->vm_flags & VM_ACCOUNT) 1122 *nr_accounted += (end - start) >> PAGE_SHIFT; 1123 1124 if (unlikely(is_pfn_mapping(vma))) 1125 untrack_pfn_vma(vma, 0, 0); 1126 1127 while (start != end) { 1128 if (!tlb_start_valid) { 1129 tlb_start = start; 1130 tlb_start_valid = 1; 1131 } 1132 1133 if (unlikely(is_vm_hugetlb_page(vma))) { 1134 /* 1135 * It is undesirable to test vma->vm_file as it 1136 * should be non-null for valid hugetlb area. 1137 * However, vm_file will be NULL in the error 1138 * cleanup path of do_mmap_pgoff. When 1139 * hugetlbfs ->mmap method fails, 1140 * do_mmap_pgoff() nullifies vma->vm_file 1141 * before calling this function to clean up. 1142 * Since no pte has actually been setup, it is 1143 * safe to do nothing in this case. 1144 */ 1145 if (vma->vm_file) { 1146 unmap_hugepage_range(vma, start, end, NULL); 1147 zap_work -= (end - start) / 1148 pages_per_huge_page(hstate_vma(vma)); 1149 } 1150 1151 start = end; 1152 } else 1153 start = unmap_page_range(*tlbp, vma, 1154 start, end, &zap_work, details); 1155 1156 if (zap_work > 0) { 1157 BUG_ON(start != end); 1158 break; 1159 } 1160 1161 tlb_finish_mmu(*tlbp, tlb_start, start); 1162 1163 if (need_resched() || 1164 (i_mmap_lock && spin_needbreak(i_mmap_lock))) { 1165 if (i_mmap_lock) { 1166 *tlbp = NULL; 1167 goto out; 1168 } 1169 cond_resched(); 1170 } 1171 1172 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 1173 tlb_start_valid = 0; 1174 zap_work = ZAP_BLOCK_SIZE; 1175 } 1176 } 1177out: 1178 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 1179 return start; /* which is now the end (or restart) address */ 1180} 1181 1182/** 1183 * zap_page_range - remove user pages in a given range 1184 * @vma: vm_area_struct holding the applicable pages 1185 * @address: starting address of pages to zap 1186 * @size: number of bytes to zap 1187 * @details: details of nonlinear truncation or shared cache invalidation 1188 */ 1189unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 1190 unsigned long size, struct zap_details *details) 1191{ 1192 struct mm_struct *mm = vma->vm_mm; 1193 struct mmu_gather *tlb; 1194 unsigned long end = address + size; 1195 unsigned long nr_accounted = 0; 1196 1197 lru_add_drain(); 1198 tlb = tlb_gather_mmu(mm, 0); 1199 update_hiwater_rss(mm); 1200 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 1201 if (tlb) 1202 tlb_finish_mmu(tlb, address, end); 1203 return end; 1204} 1205 1206/** 1207 * zap_vma_ptes - remove ptes mapping the vma 1208 * @vma: vm_area_struct holding ptes to be zapped 1209 * @address: starting address of pages to zap 1210 * @size: number of bytes to zap 1211 * 1212 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1213 * 1214 * The entire address range must be fully contained within the vma. 1215 * 1216 * Returns 0 if successful. 1217 */ 1218int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1219 unsigned long size) 1220{ 1221 if (address < vma->vm_start || address + size > vma->vm_end || 1222 !(vma->vm_flags & VM_PFNMAP)) 1223 return -1; 1224 zap_page_range(vma, address, size, NULL); 1225 return 0; 1226} 1227EXPORT_SYMBOL_GPL(zap_vma_ptes); 1228 1229/* 1230 * Do a quick page-table lookup for a single page. 1231 */ 1232struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1233 unsigned int flags) 1234{ 1235 pgd_t *pgd; 1236 pud_t *pud; 1237 pmd_t *pmd; 1238 pte_t *ptep, pte; 1239 spinlock_t *ptl; 1240 struct page *page; 1241 struct mm_struct *mm = vma->vm_mm; 1242 1243 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 1244 if (!IS_ERR(page)) { 1245 BUG_ON(flags & FOLL_GET); 1246 goto out; 1247 } 1248 1249 page = NULL; 1250 pgd = pgd_offset(mm, address); 1251 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1252 goto no_page_table; 1253 1254 pud = pud_offset(pgd, address); 1255 if (pud_none(*pud)) 1256 goto no_page_table; 1257 if (pud_huge(*pud)) { 1258 BUG_ON(flags & FOLL_GET); 1259 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); 1260 goto out; 1261 } 1262 if (unlikely(pud_bad(*pud))) 1263 goto no_page_table; 1264 1265 pmd = pmd_offset(pud, address); 1266 if (pmd_none(*pmd)) 1267 goto no_page_table; 1268 if (pmd_huge(*pmd)) { 1269 BUG_ON(flags & FOLL_GET); 1270 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 1271 goto out; 1272 } 1273 if (unlikely(pmd_bad(*pmd))) 1274 goto no_page_table; 1275 1276 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 1277 1278 pte = *ptep; 1279 if (!pte_present(pte)) 1280 goto no_page; 1281 if ((flags & FOLL_WRITE) && !pte_write(pte)) 1282 goto unlock; 1283 1284 page = vm_normal_page(vma, address, pte); 1285 if (unlikely(!page)) { 1286 if ((flags & FOLL_DUMP) || 1287 !is_zero_pfn(pte_pfn(pte))) 1288 goto bad_page; 1289 page = pte_page(pte); 1290 } 1291 1292 if (flags & FOLL_GET) 1293 get_page(page); 1294 if (flags & FOLL_TOUCH) { 1295 if ((flags & FOLL_WRITE) && 1296 !pte_dirty(pte) && !PageDirty(page)) 1297 set_page_dirty(page); 1298 /* 1299 * pte_mkyoung() would be more correct here, but atomic care 1300 * is needed to avoid losing the dirty bit: it is easier to use 1301 * mark_page_accessed(). 1302 */ 1303 mark_page_accessed(page); 1304 } 1305unlock: 1306 pte_unmap_unlock(ptep, ptl); 1307out: 1308 return page; 1309 1310bad_page: 1311 pte_unmap_unlock(ptep, ptl); 1312 return ERR_PTR(-EFAULT); 1313 1314no_page: 1315 pte_unmap_unlock(ptep, ptl); 1316 if (!pte_none(pte)) 1317 return page; 1318 1319no_page_table: 1320 /* 1321 * When core dumping an enormous anonymous area that nobody 1322 * has touched so far, we don't want to allocate unnecessary pages or 1323 * page tables. Return error instead of NULL to skip handle_mm_fault, 1324 * then get_dump_page() will return NULL to leave a hole in the dump. 1325 * But we can only make this optimization where a hole would surely 1326 * be zero-filled if handle_mm_fault() actually did handle it. 1327 */ 1328 if ((flags & FOLL_DUMP) && 1329 (!vma->vm_ops || !vma->vm_ops->fault)) 1330 return ERR_PTR(-EFAULT); 1331 return page; 1332} 1333 1334int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1335 unsigned long start, int nr_pages, unsigned int gup_flags, 1336 struct page **pages, struct vm_area_struct **vmas) 1337{ 1338 int i; 1339 unsigned long vm_flags; 1340 1341 if (nr_pages <= 0) 1342 return 0; 1343 1344 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); 1345 1346 /* 1347 * Require read or write permissions. 1348 * If FOLL_FORCE is set, we only require the "MAY" flags. 1349 */ 1350 vm_flags = (gup_flags & FOLL_WRITE) ? 1351 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1352 vm_flags &= (gup_flags & FOLL_FORCE) ? 1353 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 1354 i = 0; 1355 1356 do { 1357 struct vm_area_struct *vma; 1358 1359 vma = find_extend_vma(mm, start); 1360 if (!vma && in_gate_area(tsk, start)) { 1361 unsigned long pg = start & PAGE_MASK; 1362 struct vm_area_struct *gate_vma = get_gate_vma(tsk); 1363 pgd_t *pgd; 1364 pud_t *pud; 1365 pmd_t *pmd; 1366 pte_t *pte; 1367 1368 /* user gate pages are read-only */ 1369 if (gup_flags & FOLL_WRITE) 1370 return i ? : -EFAULT; 1371 if (pg > TASK_SIZE) 1372 pgd = pgd_offset_k(pg); 1373 else 1374 pgd = pgd_offset_gate(mm, pg); 1375 BUG_ON(pgd_none(*pgd)); 1376 pud = pud_offset(pgd, pg); 1377 BUG_ON(pud_none(*pud)); 1378 pmd = pmd_offset(pud, pg); 1379 if (pmd_none(*pmd)) 1380 return i ? : -EFAULT; 1381 pte = pte_offset_map(pmd, pg); 1382 if (pte_none(*pte)) { 1383 pte_unmap(pte); 1384 return i ? : -EFAULT; 1385 } 1386 if (pages) { 1387 struct page *page = vm_normal_page(gate_vma, start, *pte); 1388 pages[i] = page; 1389 if (page) 1390 get_page(page); 1391 } 1392 pte_unmap(pte); 1393 if (vmas) 1394 vmas[i] = gate_vma; 1395 i++; 1396 start += PAGE_SIZE; 1397 nr_pages--; 1398 continue; 1399 } 1400 1401 if (!vma || 1402 (vma->vm_flags & (VM_IO | VM_PFNMAP)) || 1403 !(vm_flags & vma->vm_flags)) 1404 return i ? : -EFAULT; 1405 1406 if (is_vm_hugetlb_page(vma)) { 1407 i = follow_hugetlb_page(mm, vma, pages, vmas, 1408 &start, &nr_pages, i, gup_flags); 1409 continue; 1410 } 1411 1412 do { 1413 struct page *page; 1414 unsigned int foll_flags = gup_flags; 1415 1416 /* 1417 * If we have a pending SIGKILL, don't keep faulting 1418 * pages and potentially allocating memory. 1419 */ 1420 if (unlikely(fatal_signal_pending(current))) 1421 return i ? i : -ERESTARTSYS; 1422 1423 cond_resched(); 1424 while (!(page = follow_page(vma, start, foll_flags))) { 1425 int ret; 1426 1427 ret = handle_mm_fault(mm, vma, start, 1428 (foll_flags & FOLL_WRITE) ? 1429 FAULT_FLAG_WRITE : 0); 1430 1431 if (ret & VM_FAULT_ERROR) { 1432 if (ret & VM_FAULT_OOM) 1433 return i ? i : -ENOMEM; 1434 if (ret & 1435 (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) 1436 return i ? i : -EFAULT; 1437 BUG(); 1438 } 1439 if (ret & VM_FAULT_MAJOR) 1440 tsk->maj_flt++; 1441 else 1442 tsk->min_flt++; 1443 1444 /* 1445 * The VM_FAULT_WRITE bit tells us that 1446 * do_wp_page has broken COW when necessary, 1447 * even if maybe_mkwrite decided not to set 1448 * pte_write. We can thus safely do subsequent 1449 * page lookups as if they were reads. But only 1450 * do so when looping for pte_write is futile: 1451 * in some cases userspace may also be wanting 1452 * to write to the gotten user page, which a 1453 * read fault here might prevent (a readonly 1454 * page might get reCOWed by userspace write). 1455 */ 1456 if ((ret & VM_FAULT_WRITE) && 1457 !(vma->vm_flags & VM_WRITE)) 1458 foll_flags &= ~FOLL_WRITE; 1459 1460 cond_resched(); 1461 } 1462 if (IS_ERR(page)) 1463 return i ? i : PTR_ERR(page); 1464 if (pages) { 1465 pages[i] = page; 1466 1467 flush_anon_page(vma, page, start); 1468 flush_dcache_page(page); 1469 } 1470 if (vmas) 1471 vmas[i] = vma; 1472 i++; 1473 start += PAGE_SIZE; 1474 nr_pages--; 1475 } while (nr_pages && start < vma->vm_end); 1476 } while (nr_pages); 1477 return i; 1478} 1479 1480/** 1481 * get_user_pages() - pin user pages in memory 1482 * @tsk: task_struct of target task 1483 * @mm: mm_struct of target mm 1484 * @start: starting user address 1485 * @nr_pages: number of pages from start to pin 1486 * @write: whether pages will be written to by the caller 1487 * @force: whether to force write access even if user mapping is 1488 * readonly. This will result in the page being COWed even 1489 * in MAP_SHARED mappings. You do not want this. 1490 * @pages: array that receives pointers to the pages pinned. 1491 * Should be at least nr_pages long. Or NULL, if caller 1492 * only intends to ensure the pages are faulted in. 1493 * @vmas: array of pointers to vmas corresponding to each page. 1494 * Or NULL if the caller does not require them. 1495 * 1496 * Returns number of pages pinned. This may be fewer than the number 1497 * requested. If nr_pages is 0 or negative, returns 0. If no pages 1498 * were pinned, returns -errno. Each page returned must be released 1499 * with a put_page() call when it is finished with. vmas will only 1500 * remain valid while mmap_sem is held. 1501 * 1502 * Must be called with mmap_sem held for read or write. 1503 * 1504 * get_user_pages walks a process's page tables and takes a reference to 1505 * each struct page that each user address corresponds to at a given 1506 * instant. That is, it takes the page that would be accessed if a user 1507 * thread accesses the given user virtual address at that instant. 1508 * 1509 * This does not guarantee that the page exists in the user mappings when 1510 * get_user_pages returns, and there may even be a completely different 1511 * page there in some cases (eg. if mmapped pagecache has been invalidated 1512 * and subsequently re faulted). However it does guarantee that the page 1513 * won't be freed completely. And mostly callers simply care that the page 1514 * contains data that was valid *at some point in time*. Typically, an IO 1515 * or similar operation cannot guarantee anything stronger anyway because 1516 * locks can't be held over the syscall boundary. 1517 * 1518 * If write=0, the page must not be written to. If the page is written to, 1519 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called 1520 * after the page is finished with, and before put_page is called. 1521 * 1522 * get_user_pages is typically used for fewer-copy IO operations, to get a 1523 * handle on the memory by some means other than accesses via the user virtual 1524 * addresses. The pages may be submitted for DMA to devices or accessed via 1525 * their kernel linear mapping (via the kmap APIs). Care should be taken to 1526 * use the correct cache flushing APIs. 1527 * 1528 * See also get_user_pages_fast, for performance critical applications. 1529 */ 1530int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1531 unsigned long start, int nr_pages, int write, int force, 1532 struct page **pages, struct vm_area_struct **vmas) 1533{ 1534 int flags = FOLL_TOUCH; 1535 1536 if (pages) 1537 flags |= FOLL_GET; 1538 if (write) 1539 flags |= FOLL_WRITE; 1540 if (force) 1541 flags |= FOLL_FORCE; 1542 1543 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas); 1544} 1545EXPORT_SYMBOL(get_user_pages); 1546 1547/** 1548 * get_dump_page() - pin user page in memory while writing it to core dump 1549 * @addr: user address 1550 * 1551 * Returns struct page pointer of user page pinned for dump, 1552 * to be freed afterwards by page_cache_release() or put_page(). 1553 * 1554 * Returns NULL on any kind of failure - a hole must then be inserted into 1555 * the corefile, to preserve alignment with its headers; and also returns 1556 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - 1557 * allowing a hole to be left in the corefile to save diskspace. 1558 * 1559 * Called without mmap_sem, but after all other threads have been killed. 1560 */ 1561#ifdef CONFIG_ELF_CORE 1562struct page *get_dump_page(unsigned long addr) 1563{ 1564 struct vm_area_struct *vma; 1565 struct page *page; 1566 1567 if (__get_user_pages(current, current->mm, addr, 1, 1568 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1) 1569 return NULL; 1570 flush_cache_page(vma, addr, page_to_pfn(page)); 1571 return page; 1572} 1573#endif /* CONFIG_ELF_CORE */ 1574 1575pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1576 spinlock_t **ptl) 1577{ 1578 pgd_t * pgd = pgd_offset(mm, addr); 1579 pud_t * pud = pud_alloc(mm, pgd, addr); 1580 if (pud) { 1581 pmd_t * pmd = pmd_alloc(mm, pud, addr); 1582 if (pmd) 1583 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1584 } 1585 return NULL; 1586} 1587 1588/* 1589 * This is the old fallback for page remapping. 1590 * 1591 * For historical reasons, it only allows reserved pages. Only 1592 * old drivers should use this, and they needed to mark their 1593 * pages reserved for the old functions anyway. 1594 */ 1595static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1596 struct page *page, pgprot_t prot) 1597{ 1598 struct mm_struct *mm = vma->vm_mm; 1599 int retval; 1600 pte_t *pte; 1601 spinlock_t *ptl; 1602 1603 retval = -EINVAL; 1604 if (PageAnon(page)) 1605 goto out; 1606 retval = -ENOMEM; 1607 flush_dcache_page(page); 1608 pte = get_locked_pte(mm, addr, &ptl); 1609 if (!pte) 1610 goto out; 1611 retval = -EBUSY; 1612 if (!pte_none(*pte)) 1613 goto out_unlock; 1614 1615 /* Ok, finally just insert the thing.. */ 1616 get_page(page); 1617 inc_mm_counter_fast(mm, MM_FILEPAGES); 1618 page_add_file_rmap(page); 1619 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1620 1621 retval = 0; 1622 pte_unmap_unlock(pte, ptl); 1623 return retval; 1624out_unlock: 1625 pte_unmap_unlock(pte, ptl); 1626out: 1627 return retval; 1628} 1629 1630/** 1631 * vm_insert_page - insert single page into user vma 1632 * @vma: user vma to map to 1633 * @addr: target user address of this page 1634 * @page: source kernel page 1635 * 1636 * This allows drivers to insert individual pages they've allocated 1637 * into a user vma. 1638 * 1639 * The page has to be a nice clean _individual_ kernel allocation. 1640 * If you allocate a compound page, you need to have marked it as 1641 * such (__GFP_COMP), or manually just split the page up yourself 1642 * (see split_page()). 1643 * 1644 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1645 * took an arbitrary page protection parameter. This doesn't allow 1646 * that. Your vma protection will have to be set up correctly, which 1647 * means that if you want a shared writable mapping, you'd better 1648 * ask for a shared writable mapping! 1649 * 1650 * The page does not need to be reserved. 1651 */ 1652int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1653 struct page *page) 1654{ 1655 if (addr < vma->vm_start || addr >= vma->vm_end) 1656 return -EFAULT; 1657 if (!page_count(page)) 1658 return -EINVAL; 1659 vma->vm_flags |= VM_INSERTPAGE; 1660 return insert_page(vma, addr, page, vma->vm_page_prot); 1661} 1662EXPORT_SYMBOL(vm_insert_page); 1663 1664static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1665 unsigned long pfn, pgprot_t prot) 1666{ 1667 struct mm_struct *mm = vma->vm_mm; 1668 int retval; 1669 pte_t *pte, entry; 1670 spinlock_t *ptl; 1671 1672 retval = -ENOMEM; 1673 pte = get_locked_pte(mm, addr, &ptl); 1674 if (!pte) 1675 goto out; 1676 retval = -EBUSY; 1677 if (!pte_none(*pte)) 1678 goto out_unlock; 1679 1680 /* Ok, finally just insert the thing.. */ 1681 entry = pte_mkspecial(pfn_pte(pfn, prot)); 1682 set_pte_at(mm, addr, pte, entry); 1683 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 1684 1685 retval = 0; 1686out_unlock: 1687 pte_unmap_unlock(pte, ptl); 1688out: 1689 return retval; 1690} 1691 1692/** 1693 * vm_insert_pfn - insert single pfn into user vma 1694 * @vma: user vma to map to 1695 * @addr: target user address of this page 1696 * @pfn: source kernel pfn 1697 * 1698 * Similar to vm_inert_page, this allows drivers to insert individual pages 1699 * they've allocated into a user vma. Same comments apply. 1700 * 1701 * This function should only be called from a vm_ops->fault handler, and 1702 * in that case the handler should return NULL. 1703 * 1704 * vma cannot be a COW mapping. 1705 * 1706 * As this is called only for pages that do not currently exist, we 1707 * do not need to flush old virtual caches or the TLB. 1708 */ 1709int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1710 unsigned long pfn) 1711{ 1712 int ret; 1713 pgprot_t pgprot = vma->vm_page_prot; 1714 /* 1715 * Technically, architectures with pte_special can avoid all these 1716 * restrictions (same for remap_pfn_range). However we would like 1717 * consistency in testing and feature parity among all, so we should 1718 * try to keep these invariants in place for everybody. 1719 */ 1720 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1721 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1722 (VM_PFNMAP|VM_MIXEDMAP)); 1723 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1724 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1725 1726 if (addr < vma->vm_start || addr >= vma->vm_end) 1727 return -EFAULT; 1728 if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE)) 1729 return -EINVAL; 1730 1731 ret = insert_pfn(vma, addr, pfn, pgprot); 1732 1733 if (ret) 1734 untrack_pfn_vma(vma, pfn, PAGE_SIZE); 1735 1736 return ret; 1737} 1738EXPORT_SYMBOL(vm_insert_pfn); 1739 1740int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1741 unsigned long pfn) 1742{ 1743 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1744 1745 if (addr < vma->vm_start || addr >= vma->vm_end) 1746 return -EFAULT; 1747 1748 /* 1749 * If we don't have pte special, then we have to use the pfn_valid() 1750 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1751 * refcount the page if pfn_valid is true (hence insert_page rather 1752 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 1753 * without pte special, it would there be refcounted as a normal page. 1754 */ 1755 if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { 1756 struct page *page; 1757 1758 page = pfn_to_page(pfn); 1759 return insert_page(vma, addr, page, vma->vm_page_prot); 1760 } 1761 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1762} 1763EXPORT_SYMBOL(vm_insert_mixed); 1764 1765/* 1766 * maps a range of physical memory into the requested pages. the old 1767 * mappings are removed. any references to nonexistent pages results 1768 * in null mappings (currently treated as "copy-on-access") 1769 */ 1770static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1771 unsigned long addr, unsigned long end, 1772 unsigned long pfn, pgprot_t prot) 1773{ 1774 pte_t *pte; 1775 spinlock_t *ptl; 1776 1777 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1778 if (!pte) 1779 return -ENOMEM; 1780 arch_enter_lazy_mmu_mode(); 1781 do { 1782 BUG_ON(!pte_none(*pte)); 1783 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 1784 pfn++; 1785 } while (pte++, addr += PAGE_SIZE, addr != end); 1786 arch_leave_lazy_mmu_mode(); 1787 pte_unmap_unlock(pte - 1, ptl); 1788 return 0; 1789} 1790 1791static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 1792 unsigned long addr, unsigned long end, 1793 unsigned long pfn, pgprot_t prot) 1794{ 1795 pmd_t *pmd; 1796 unsigned long next; 1797 1798 pfn -= addr >> PAGE_SHIFT; 1799 pmd = pmd_alloc(mm, pud, addr); 1800 if (!pmd) 1801 return -ENOMEM; 1802 do { 1803 next = pmd_addr_end(addr, end); 1804 if (remap_pte_range(mm, pmd, addr, next, 1805 pfn + (addr >> PAGE_SHIFT), prot)) 1806 return -ENOMEM; 1807 } while (pmd++, addr = next, addr != end); 1808 return 0; 1809} 1810 1811static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1812 unsigned long addr, unsigned long end, 1813 unsigned long pfn, pgprot_t prot) 1814{ 1815 pud_t *pud; 1816 unsigned long next; 1817 1818 pfn -= addr >> PAGE_SHIFT; 1819 pud = pud_alloc(mm, pgd, addr); 1820 if (!pud) 1821 return -ENOMEM; 1822 do { 1823 next = pud_addr_end(addr, end); 1824 if (remap_pmd_range(mm, pud, addr, next, 1825 pfn + (addr >> PAGE_SHIFT), prot)) 1826 return -ENOMEM; 1827 } while (pud++, addr = next, addr != end); 1828 return 0; 1829} 1830 1831/** 1832 * remap_pfn_range - remap kernel memory to userspace 1833 * @vma: user vma to map to 1834 * @addr: target user address to start at 1835 * @pfn: physical address of kernel memory 1836 * @size: size of map area 1837 * @prot: page protection flags for this mapping 1838 * 1839 * Note: this is only safe if the mm semaphore is held when called. 1840 */ 1841int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1842 unsigned long pfn, unsigned long size, pgprot_t prot) 1843{ 1844 pgd_t *pgd; 1845 unsigned long next; 1846 unsigned long end = addr + PAGE_ALIGN(size); 1847 struct mm_struct *mm = vma->vm_mm; 1848 int err; 1849 1850 /* 1851 * Physically remapped pages are special. Tell the 1852 * rest of the world about it: 1853 * VM_IO tells people not to look at these pages 1854 * (accesses can have side effects). 1855 * VM_RESERVED is specified all over the place, because 1856 * in 2.4 it kept swapout's vma scan off this vma; but 1857 * in 2.6 the LRU scan won't even find its pages, so this 1858 * flag means no more than count its pages in reserved_vm, 1859 * and omit it from core dump, even when VM_IO turned off. 1860 * VM_PFNMAP tells the core MM that the base pages are just 1861 * raw PFN mappings, and do not have a "struct page" associated 1862 * with them. 1863 * 1864 * There's a horrible special case to handle copy-on-write 1865 * behaviour that some programs depend on. We mark the "original" 1866 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1867 */ 1868 if (addr == vma->vm_start && end == vma->vm_end) { 1869 vma->vm_pgoff = pfn; 1870 vma->vm_flags |= VM_PFN_AT_MMAP; 1871 } else if (is_cow_mapping(vma->vm_flags)) 1872 return -EINVAL; 1873 1874 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1875 1876 err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size)); 1877 if (err) { 1878 /* 1879 * To indicate that track_pfn related cleanup is not 1880 * needed from higher level routine calling unmap_vmas 1881 */ 1882 vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); 1883 vma->vm_flags &= ~VM_PFN_AT_MMAP; 1884 return -EINVAL; 1885 } 1886 1887 BUG_ON(addr >= end); 1888 pfn -= addr >> PAGE_SHIFT; 1889 pgd = pgd_offset(mm, addr); 1890 flush_cache_range(vma, addr, end); 1891 do { 1892 next = pgd_addr_end(addr, end); 1893 err = remap_pud_range(mm, pgd, addr, next, 1894 pfn + (addr >> PAGE_SHIFT), prot); 1895 if (err) 1896 break; 1897 } while (pgd++, addr = next, addr != end); 1898 1899 if (err) 1900 untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size)); 1901 1902 return err; 1903} 1904EXPORT_SYMBOL(remap_pfn_range); 1905 1906static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 1907 unsigned long addr, unsigned long end, 1908 pte_fn_t fn, void *data) 1909{ 1910 pte_t *pte; 1911 int err; 1912 pgtable_t token; 1913 spinlock_t *uninitialized_var(ptl); 1914 1915 pte = (mm == &init_mm) ? 1916 pte_alloc_kernel(pmd, addr) : 1917 pte_alloc_map_lock(mm, pmd, addr, &ptl); 1918 if (!pte) 1919 return -ENOMEM; 1920 1921 BUG_ON(pmd_huge(*pmd)); 1922 1923 arch_enter_lazy_mmu_mode(); 1924 1925 token = pmd_pgtable(*pmd); 1926 1927 do { 1928 err = fn(pte++, token, addr, data); 1929 if (err) 1930 break; 1931 } while (addr += PAGE_SIZE, addr != end); 1932 1933 arch_leave_lazy_mmu_mode(); 1934 1935 if (mm != &init_mm) 1936 pte_unmap_unlock(pte-1, ptl); 1937 return err; 1938} 1939 1940static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 1941 unsigned long addr, unsigned long end, 1942 pte_fn_t fn, void *data) 1943{ 1944 pmd_t *pmd; 1945 unsigned long next; 1946 int err; 1947 1948 BUG_ON(pud_huge(*pud)); 1949 1950 pmd = pmd_alloc(mm, pud, addr); 1951 if (!pmd) 1952 return -ENOMEM; 1953 do { 1954 next = pmd_addr_end(addr, end); 1955 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 1956 if (err) 1957 break; 1958 } while (pmd++, addr = next, addr != end); 1959 return err; 1960} 1961 1962static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, 1963 unsigned long addr, unsigned long end, 1964 pte_fn_t fn, void *data) 1965{ 1966 pud_t *pud; 1967 unsigned long next; 1968 int err; 1969 1970 pud = pud_alloc(mm, pgd, addr); 1971 if (!pud) 1972 return -ENOMEM; 1973 do { 1974 next = pud_addr_end(addr, end); 1975 err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 1976 if (err) 1977 break; 1978 } while (pud++, addr = next, addr != end); 1979 return err; 1980} 1981 1982/* 1983 * Scan a region of virtual memory, filling in page tables as necessary 1984 * and calling a provided function on each leaf page table. 1985 */ 1986int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 1987 unsigned long size, pte_fn_t fn, void *data) 1988{ 1989 pgd_t *pgd; 1990 unsigned long next; 1991 unsigned long start = addr, end = addr + size; 1992 int err; 1993 1994 BUG_ON(addr >= end); 1995 mmu_notifier_invalidate_range_start(mm, start, end); 1996 pgd = pgd_offset(mm, addr); 1997 do { 1998 next = pgd_addr_end(addr, end); 1999 err = apply_to_pud_range(mm, pgd, addr, next, fn, data); 2000 if (err) 2001 break; 2002 } while (pgd++, addr = next, addr != end); 2003 mmu_notifier_invalidate_range_end(mm, start, end); 2004 return err; 2005} 2006EXPORT_SYMBOL_GPL(apply_to_page_range); 2007 2008/* 2009 * handle_pte_fault chooses page fault handler according to an entry 2010 * which was read non-atomically. Before making any commitment, on 2011 * those architectures or configurations (e.g. i386 with PAE) which 2012 * might give a mix of unmatched parts, do_swap_page and do_file_page 2013 * must check under lock before unmapping the pte and proceeding 2014 * (but do_wp_page is only called after already making such a check; 2015 * and do_anonymous_page and do_no_page can safely check later on). 2016 */ 2017static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 2018 pte_t *page_table, pte_t orig_pte) 2019{ 2020 int same = 1; 2021#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 2022 if (sizeof(pte_t) > sizeof(unsigned long)) { 2023 spinlock_t *ptl = pte_lockptr(mm, pmd); 2024 spin_lock(ptl); 2025 same = pte_same(*page_table, orig_pte); 2026 spin_unlock(ptl); 2027 } 2028#endif 2029 pte_unmap(page_table); 2030 return same; 2031} 2032 2033/* 2034 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 2035 * servicing faults for write access. In the normal case, do always want 2036 * pte_mkwrite. But get_user_pages can cause write faults for mappings 2037 * that do not have writing enabled, when used by access_process_vm. 2038 */ 2039static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 2040{ 2041 if (likely(vma->vm_flags & VM_WRITE)) 2042 pte = pte_mkwrite(pte); 2043 return pte; 2044} 2045 2046static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 2047{ 2048 /* 2049 * If the source page was a PFN mapping, we don't have 2050 * a "struct page" for it. We do a best-effort copy by 2051 * just copying from the original user address. If that 2052 * fails, we just zero-fill it. Live with it. 2053 */ 2054 if (unlikely(!src)) { 2055 void *kaddr = kmap_atomic(dst, KM_USER0); 2056 void __user *uaddr = (void __user *)(va & PAGE_MASK); 2057 2058 /* 2059 * This really shouldn't fail, because the page is there 2060 * in the page tables. But it might just be unreadable, 2061 * in which case we just give up and fill the result with 2062 * zeroes. 2063 */ 2064 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 2065 memset(kaddr, 0, PAGE_SIZE); 2066 kunmap_atomic(kaddr, KM_USER0); 2067 flush_dcache_page(dst); 2068 } else 2069 copy_user_highpage(dst, src, va, vma); 2070} 2071 2072/* 2073 * This routine handles present pages, when users try to write 2074 * to a shared page. It is done by copying the page to a new address 2075 * and decrementing the shared-page counter for the old page. 2076 * 2077 * Note that this routine assumes that the protection checks have been 2078 * done by the caller (the low-level page fault routine in most cases). 2079 * Thus we can safely just mark it writable once we've done any necessary 2080 * COW. 2081 * 2082 * We also mark the page dirty at this point even though the page will 2083 * change only once the write actually happens. This avoids a few races, 2084 * and potentially makes it more efficient. 2085 * 2086 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2087 * but allow concurrent faults), with pte both mapped and locked. 2088 * We return with mmap_sem still held, but pte unmapped and unlocked. 2089 */ 2090static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 2091 unsigned long address, pte_t *page_table, pmd_t *pmd, 2092 spinlock_t *ptl, pte_t orig_pte) 2093{ 2094 struct page *old_page, *new_page; 2095 pte_t entry; 2096 int reuse = 0, ret = 0; 2097 int page_mkwrite = 0; 2098 struct page *dirty_page = NULL; 2099 2100 old_page = vm_normal_page(vma, address, orig_pte); 2101 if (!old_page) { 2102 /* 2103 * VM_MIXEDMAP !pfn_valid() case 2104 * 2105 * We should not cow pages in a shared writeable mapping. 2106 * Just mark the pages writable as we can't do any dirty 2107 * accounting on raw pfn maps. 2108 */ 2109 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2110 (VM_WRITE|VM_SHARED)) 2111 goto reuse; 2112 goto gotten; 2113 } 2114 2115 /* 2116 * Take out anonymous pages first, anonymous shared vmas are 2117 * not dirty accountable. 2118 */ 2119 if (PageAnon(old_page) && !PageKsm(old_page)) { 2120 if (!trylock_page(old_page)) { 2121 page_cache_get(old_page); 2122 pte_unmap_unlock(page_table, ptl); 2123 lock_page(old_page); 2124 page_table = pte_offset_map_lock(mm, pmd, address, 2125 &ptl); 2126 if (!pte_same(*page_table, orig_pte)) { 2127 unlock_page(old_page); 2128 page_cache_release(old_page); 2129 goto unlock; 2130 } 2131 page_cache_release(old_page); 2132 } 2133 reuse = reuse_swap_page(old_page); 2134 if (reuse) 2135 /* 2136 * The page is all ours. Move it to our anon_vma so 2137 * the rmap code will not search our parent or siblings. 2138 * Protected against the rmap code by the page lock. 2139 */ 2140 page_move_anon_rmap(old_page, vma, address); 2141 unlock_page(old_page); 2142 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2143 (VM_WRITE|VM_SHARED))) { 2144 /* 2145 * Only catch write-faults on shared writable pages, 2146 * read-only shared pages can get COWed by 2147 * get_user_pages(.write=1, .force=1). 2148 */ 2149 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 2150 struct vm_fault vmf; 2151 int tmp; 2152 2153 vmf.virtual_address = (void __user *)(address & 2154 PAGE_MASK); 2155 vmf.pgoff = old_page->index; 2156 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2157 vmf.page = old_page; 2158 2159 /* 2160 * Notify the address space that the page is about to 2161 * become writable so that it can prohibit this or wait 2162 * for the page to get into an appropriate state. 2163 * 2164 * We do this without the lock held, so that it can 2165 * sleep if it needs to. 2166 */ 2167 page_cache_get(old_page); 2168 pte_unmap_unlock(page_table, ptl); 2169 2170 tmp = vma->vm_ops->page_mkwrite(vma, &vmf); 2171 if (unlikely(tmp & 2172 (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 2173 ret = tmp; 2174 goto unwritable_page; 2175 } 2176 if (unlikely(!(tmp & VM_FAULT_LOCKED))) { 2177 lock_page(old_page); 2178 if (!old_page->mapping) { 2179 ret = 0; /* retry the fault */ 2180 unlock_page(old_page); 2181 goto unwritable_page; 2182 } 2183 } else 2184 VM_BUG_ON(!PageLocked(old_page)); 2185 2186 /* 2187 * Since we dropped the lock we need to revalidate 2188 * the PTE as someone else may have changed it. If 2189 * they did, we just return, as we can count on the 2190 * MMU to tell us if they didn't also make it writable. 2191 */ 2192 page_table = pte_offset_map_lock(mm, pmd, address, 2193 &ptl); 2194 if (!pte_same(*page_table, orig_pte)) { 2195 unlock_page(old_page); 2196 page_cache_release(old_page); 2197 goto unlock; 2198 } 2199 2200 page_mkwrite = 1; 2201 } 2202 dirty_page = old_page; 2203 get_page(dirty_page); 2204 reuse = 1; 2205 } 2206 2207 if (reuse) { 2208reuse: 2209 flush_cache_page(vma, address, pte_pfn(orig_pte)); 2210 entry = pte_mkyoung(orig_pte); 2211 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2212 if (ptep_set_access_flags(vma, address, page_table, entry,1)) 2213 update_mmu_cache(vma, address, page_table); 2214 ret |= VM_FAULT_WRITE; 2215 goto unlock; 2216 } 2217 2218 /* 2219 * Ok, we need to copy. Oh, well.. 2220 */ 2221 page_cache_get(old_page); 2222gotten: 2223 pte_unmap_unlock(page_table, ptl); 2224 2225 if (unlikely(anon_vma_prepare(vma))) 2226 goto oom; 2227 2228 if (is_zero_pfn(pte_pfn(orig_pte))) { 2229 new_page = alloc_zeroed_user_highpage_movable(vma, address); 2230 if (!new_page) 2231 goto oom; 2232 } else { 2233 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 2234 if (!new_page) 2235 goto oom; 2236 cow_user_page(new_page, old_page, address, vma); 2237 } 2238 __SetPageUptodate(new_page); 2239 2240 /* 2241 * Don't let another task, with possibly unlocked vma, 2242 * keep the mlocked page. 2243 */ 2244 if ((vma->vm_flags & VM_LOCKED) && old_page) { 2245 lock_page(old_page); /* for LRU manipulation */ 2246 clear_page_mlock(old_page); 2247 unlock_page(old_page); 2248 } 2249 2250 if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)) 2251 goto oom_free_new; 2252 2253 /* 2254 * Re-check the pte - we dropped the lock 2255 */ 2256 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2257 if (likely(pte_same(*page_table, orig_pte))) { 2258 if (old_page) { 2259 if (!PageAnon(old_page)) { 2260 dec_mm_counter_fast(mm, MM_FILEPAGES); 2261 inc_mm_counter_fast(mm, MM_ANONPAGES); 2262 } 2263 } else 2264 inc_mm_counter_fast(mm, MM_ANONPAGES); 2265 flush_cache_page(vma, address, pte_pfn(orig_pte)); 2266 entry = mk_pte(new_page, vma->vm_page_prot); 2267 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2268 /* 2269 * Clear the pte entry and flush it first, before updating the 2270 * pte with the new entry. This will avoid a race condition 2271 * seen in the presence of one thread doing SMC and another 2272 * thread doing COW. 2273 */ 2274 ptep_clear_flush(vma, address, page_table); 2275 page_add_new_anon_rmap(new_page, vma, address); 2276 /* 2277 * We call the notify macro here because, when using secondary 2278 * mmu page tables (such as kvm shadow page tables), we want the 2279 * new page to be mapped directly into the secondary page table. 2280 */ 2281 set_pte_at_notify(mm, address, page_table, entry); 2282 update_mmu_cache(vma, address, page_table); 2283 if (old_page) { 2284 /* 2285 * Only after switching the pte to the new page may 2286 * we remove the mapcount here. Otherwise another 2287 * process may come and find the rmap count decremented 2288 * before the pte is switched to the new page, and 2289 * "reuse" the old page writing into it while our pte 2290 * here still points into it and can be read by other 2291 * threads. 2292 * 2293 * The critical issue is to order this 2294 * page_remove_rmap with the ptp_clear_flush above. 2295 * Those stores are ordered by (if nothing else,) 2296 * the barrier present in the atomic_add_negative 2297 * in page_remove_rmap. 2298 * 2299 * Then the TLB flush in ptep_clear_flush ensures that 2300 * no process can access the old page before the 2301 * decremented mapcount is visible. And the old page 2302 * cannot be reused until after the decremented 2303 * mapcount is visible. So transitively, TLBs to 2304 * old page will be flushed before it can be reused. 2305 */ 2306 page_remove_rmap(old_page); 2307 } 2308 2309 /* Free the old page.. */ 2310 new_page = old_page; 2311 ret |= VM_FAULT_WRITE; 2312 } else 2313 mem_cgroup_uncharge_page(new_page); 2314 2315 if (new_page) 2316 page_cache_release(new_page); 2317 if (old_page) 2318 page_cache_release(old_page); 2319unlock: 2320 pte_unmap_unlock(page_table, ptl); 2321 if (dirty_page) { 2322 /* 2323 * Yes, Virginia, this is actually required to prevent a race 2324 * with clear_page_dirty_for_io() from clearing the page dirty 2325 * bit after it clear all dirty ptes, but before a racing 2326 * do_wp_page installs a dirty pte. 2327 * 2328 * do_no_page is protected similarly. 2329 */ 2330 if (!page_mkwrite) { 2331 wait_on_page_locked(dirty_page); 2332 set_page_dirty_balance(dirty_page, page_mkwrite); 2333 } 2334 put_page(dirty_page); 2335 if (page_mkwrite) { 2336 struct address_space *mapping = dirty_page->mapping; 2337 2338 set_page_dirty(dirty_page); 2339 unlock_page(dirty_page); 2340 page_cache_release(dirty_page); 2341 if (mapping) { 2342 /* 2343 * Some device drivers do not set page.mapping 2344 * but still dirty their pages 2345 */ 2346 balance_dirty_pages_ratelimited(mapping); 2347 } 2348 } 2349 2350 /* file_update_time outside page_lock */ 2351 if (vma->vm_file) 2352 file_update_time(vma->vm_file); 2353 } 2354 return ret; 2355oom_free_new: 2356 page_cache_release(new_page); 2357oom: 2358 if (old_page) { 2359 if (page_mkwrite) { 2360 unlock_page(old_page); 2361 page_cache_release(old_page); 2362 } 2363 page_cache_release(old_page); 2364 } 2365 return VM_FAULT_OOM; 2366 2367unwritable_page: 2368 page_cache_release(old_page); 2369 return ret; 2370} 2371 2372/* 2373 * Helper functions for unmap_mapping_range(). 2374 * 2375 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 2376 * 2377 * We have to restart searching the prio_tree whenever we drop the lock, 2378 * since the iterator is only valid while the lock is held, and anyway 2379 * a later vma might be split and reinserted earlier while lock dropped. 2380 * 2381 * The list of nonlinear vmas could be handled more efficiently, using 2382 * a placeholder, but handle it in the same way until a need is shown. 2383 * It is important to search the prio_tree before nonlinear list: a vma 2384 * may become nonlinear and be shifted from prio_tree to nonlinear list 2385 * while the lock is dropped; but never shifted from list to prio_tree. 2386 * 2387 * In order to make forward progress despite restarting the search, 2388 * vm_truncate_count is used to mark a vma as now dealt with, so we can 2389 * quickly skip it next time around. Since the prio_tree search only 2390 * shows us those vmas affected by unmapping the range in question, we 2391 * can't efficiently keep all vmas in step with mapping->truncate_count: 2392 * so instead reset them all whenever it wraps back to 0 (then go to 1). 2393 * mapping->truncate_count and vma->vm_truncate_count are protected by 2394 * i_mmap_lock. 2395 * 2396 * In order to make forward progress despite repeatedly restarting some 2397 * large vma, note the restart_addr from unmap_vmas when it breaks out: 2398 * and restart from that address when we reach that vma again. It might 2399 * have been split or merged, shrunk or extended, but never shifted: so 2400 * restart_addr remains valid so long as it remains in the vma's range. 2401 * unmap_mapping_range forces truncate_count to leap over page-aligned 2402 * values so we can save vma's restart_addr in its truncate_count field. 2403 */ 2404#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 2405 2406static void reset_vma_truncate_counts(struct address_space *mapping) 2407{ 2408 struct vm_area_struct *vma; 2409 struct prio_tree_iter iter; 2410 2411 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 2412 vma->vm_truncate_count = 0; 2413 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 2414 vma->vm_truncate_count = 0; 2415} 2416 2417static int unmap_mapping_range_vma(struct vm_area_struct *vma, 2418 unsigned long start_addr, unsigned long end_addr, 2419 struct zap_details *details) 2420{ 2421 unsigned long restart_addr; 2422 int need_break; 2423 2424 /* 2425 * files that support invalidating or truncating portions of the 2426 * file from under mmaped areas must have their ->fault function 2427 * return a locked page (and set VM_FAULT_LOCKED in the return). 2428 * This provides synchronisation against concurrent unmapping here. 2429 */ 2430 2431again: 2432 restart_addr = vma->vm_truncate_count; 2433 if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 2434 start_addr = restart_addr; 2435 if (start_addr >= end_addr) { 2436 /* Top of vma has been split off since last time */ 2437 vma->vm_truncate_count = details->truncate_count; 2438 return 0; 2439 } 2440 } 2441 2442 restart_addr = zap_page_range(vma, start_addr, 2443 end_addr - start_addr, details); 2444 need_break = need_resched() || spin_needbreak(details->i_mmap_lock); 2445 2446 if (restart_addr >= end_addr) { 2447 /* We have now completed this vma: mark it so */ 2448 vma->vm_truncate_count = details->truncate_count; 2449 if (!need_break) 2450 return 0; 2451 } else { 2452 /* Note restart_addr in vma's truncate_count field */ 2453 vma->vm_truncate_count = restart_addr; 2454 if (!need_break) 2455 goto again; 2456 } 2457 2458 spin_unlock(details->i_mmap_lock); 2459 cond_resched(); 2460 spin_lock(details->i_mmap_lock); 2461 return -EINTR; 2462} 2463 2464static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 2465 struct zap_details *details) 2466{ 2467 struct vm_area_struct *vma; 2468 struct prio_tree_iter iter; 2469 pgoff_t vba, vea, zba, zea; 2470 2471restart: 2472 vma_prio_tree_foreach(vma, &iter, root, 2473 details->first_index, details->last_index) { 2474 /* Skip quickly over those we have already dealt with */ 2475 if (vma->vm_truncate_count == details->truncate_count) 2476 continue; 2477 2478 vba = vma->vm_pgoff; 2479 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 2480 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 2481 zba = details->first_index; 2482 if (zba < vba) 2483 zba = vba; 2484 zea = details->last_index; 2485 if (zea > vea) 2486 zea = vea; 2487 2488 if (unmap_mapping_range_vma(vma, 2489 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 2490 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 2491 details) < 0) 2492 goto restart; 2493 } 2494} 2495 2496static inline void unmap_mapping_range_list(struct list_head *head, 2497 struct zap_details *details) 2498{ 2499 struct vm_area_struct *vma; 2500 2501 /* 2502 * In nonlinear VMAs there is no correspondence between virtual address 2503 * offset and file offset. So we must perform an exhaustive search 2504 * across *all* the pages in each nonlinear VMA, not just the pages 2505 * whose virtual address lies outside the file truncation point. 2506 */ 2507restart: 2508 list_for_each_entry(vma, head, shared.vm_set.list) { 2509 /* Skip quickly over those we have already dealt with */ 2510 if (vma->vm_truncate_count == details->truncate_count) 2511 continue; 2512 details->nonlinear_vma = vma; 2513 if (unmap_mapping_range_vma(vma, vma->vm_start, 2514 vma->vm_end, details) < 0) 2515 goto restart; 2516 } 2517} 2518 2519/** 2520 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. 2521 * @mapping: the address space containing mmaps to be unmapped. 2522 * @holebegin: byte in first page to unmap, relative to the start of 2523 * the underlying file. This will be rounded down to a PAGE_SIZE 2524 * boundary. Note that this is different from truncate_pagecache(), which 2525 * must keep the partial page. In contrast, we must get rid of 2526 * partial pages. 2527 * @holelen: size of prospective hole in bytes. This will be rounded 2528 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 2529 * end of the file. 2530 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 2531 * but 0 when invalidating pagecache, don't throw away private data. 2532 */ 2533void unmap_mapping_range(struct address_space *mapping, 2534 loff_t const holebegin, loff_t const holelen, int even_cows) 2535{ 2536 struct zap_details details; 2537 pgoff_t hba = holebegin >> PAGE_SHIFT; 2538 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 2539 2540 /* Check for overflow. */ 2541 if (sizeof(holelen) > sizeof(hlen)) { 2542 long long holeend = 2543 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 2544 if (holeend & ~(long long)ULONG_MAX) 2545 hlen = ULONG_MAX - hba + 1; 2546 } 2547 2548 details.check_mapping = even_cows? NULL: mapping; 2549 details.nonlinear_vma = NULL; 2550 details.first_index = hba; 2551 details.last_index = hba + hlen - 1; 2552 if (details.last_index < details.first_index) 2553 details.last_index = ULONG_MAX; 2554 details.i_mmap_lock = &mapping->i_mmap_lock; 2555 2556 spin_lock(&mapping->i_mmap_lock); 2557 2558 /* Protect against endless unmapping loops */ 2559 mapping->truncate_count++; 2560 if (unlikely(is_restart_addr(mapping->truncate_count))) { 2561 if (mapping->truncate_count == 0) 2562 reset_vma_truncate_counts(mapping); 2563 mapping->truncate_count++; 2564 } 2565 details.truncate_count = mapping->truncate_count; 2566 2567 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 2568 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2569 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2570 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2571 spin_unlock(&mapping->i_mmap_lock); 2572} 2573EXPORT_SYMBOL(unmap_mapping_range); 2574 2575int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) 2576{ 2577 struct address_space *mapping = inode->i_mapping; 2578 2579 /* 2580 * If the underlying filesystem is not going to provide 2581 * a way to truncate a range of blocks (punch a hole) - 2582 * we should return failure right now. 2583 */ 2584 if (!inode->i_op->truncate_range) 2585 return -ENOSYS; 2586 2587 mutex_lock(&inode->i_mutex); 2588 down_write(&inode->i_alloc_sem); 2589 unmap_mapping_range(mapping, offset, (end - offset), 1); 2590 truncate_inode_pages_range(mapping, offset, end); 2591 unmap_mapping_range(mapping, offset, (end - offset), 1); 2592 inode->i_op->truncate_range(inode, offset, end); 2593 up_write(&inode->i_alloc_sem); 2594 mutex_unlock(&inode->i_mutex); 2595 2596 return 0; 2597} 2598 2599/* 2600 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2601 * but allow concurrent faults), and pte mapped but not yet locked. 2602 * We return with mmap_sem still held, but pte unmapped and unlocked. 2603 */ 2604static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 2605 unsigned long address, pte_t *page_table, pmd_t *pmd, 2606 unsigned int flags, pte_t orig_pte) 2607{ 2608 spinlock_t *ptl; 2609 struct page *page; 2610 swp_entry_t entry; 2611 pte_t pte; 2612 struct mem_cgroup *ptr = NULL; 2613 int ret = 0; 2614 2615 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2616 goto out; 2617 2618 entry = pte_to_swp_entry(orig_pte); 2619 if (unlikely(non_swap_entry(entry))) { 2620 if (is_migration_entry(entry)) { 2621 migration_entry_wait(mm, pmd, address); 2622 } else if (is_hwpoison_entry(entry)) { 2623 ret = VM_FAULT_HWPOISON; 2624 } else { 2625 print_bad_pte(vma, address, orig_pte, NULL); 2626 ret = VM_FAULT_SIGBUS; 2627 } 2628 goto out; 2629 } 2630 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2631 page = lookup_swap_cache(entry); 2632 if (!page) { 2633 grab_swap_token(mm); /* Contend for token _before_ read-in */ 2634 page = swapin_readahead(entry, 2635 GFP_HIGHUSER_MOVABLE, vma, address); 2636 if (!page) { 2637 /* 2638 * Back out if somebody else faulted in this pte 2639 * while we released the pte lock. 2640 */ 2641 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2642 if (likely(pte_same(*page_table, orig_pte))) 2643 ret = VM_FAULT_OOM; 2644 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2645 goto unlock; 2646 } 2647 2648 /* Had to read the page from swap area: Major fault */ 2649 ret = VM_FAULT_MAJOR; 2650 count_vm_event(PGMAJFAULT); 2651 } else if (PageHWPoison(page)) { 2652 /* 2653 * hwpoisoned dirty swapcache pages are kept for killing 2654 * owner processes (which may be unknown at hwpoison time) 2655 */ 2656 ret = VM_FAULT_HWPOISON; 2657 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2658 goto out_release; 2659 } 2660 2661 lock_page(page); 2662 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2663 2664 page = ksm_might_need_to_copy(page, vma, address); 2665 if (!page) { 2666 ret = VM_FAULT_OOM; 2667 goto out; 2668 } 2669 2670 if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { 2671 ret = VM_FAULT_OOM; 2672 goto out_page; 2673 } 2674 2675 /* 2676 * Back out if somebody else already faulted in this pte. 2677 */ 2678 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2679 if (unlikely(!pte_same(*page_table, orig_pte))) 2680 goto out_nomap; 2681 2682 if (unlikely(!PageUptodate(page))) { 2683 ret = VM_FAULT_SIGBUS; 2684 goto out_nomap; 2685 } 2686 2687 /* 2688 * The page isn't present yet, go ahead with the fault. 2689 * 2690 * Be careful about the sequence of operations here. 2691 * To get its accounting right, reuse_swap_page() must be called 2692 * while the page is counted on swap but not yet in mapcount i.e. 2693 * before page_add_anon_rmap() and swap_free(); try_to_free_swap() 2694 * must be called after the swap_free(), or it will never succeed. 2695 * Because delete_from_swap_page() may be called by reuse_swap_page(), 2696 * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry 2697 * in page->private. In this case, a record in swap_cgroup is silently 2698 * discarded at swap_free(). 2699 */ 2700 2701 inc_mm_counter_fast(mm, MM_ANONPAGES); 2702 dec_mm_counter_fast(mm, MM_SWAPENTS); 2703 pte = mk_pte(page, vma->vm_page_prot); 2704 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { 2705 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2706 flags &= ~FAULT_FLAG_WRITE; 2707 } 2708 flush_icache_page(vma, page); 2709 set_pte_at(mm, address, page_table, pte); 2710 page_add_anon_rmap(page, vma, address); 2711 /* It's better to call commit-charge after rmap is established */ 2712 mem_cgroup_commit_charge_swapin(page, ptr); 2713 2714 swap_free(entry); 2715 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) 2716 try_to_free_swap(page); 2717 unlock_page(page); 2718 2719 if (flags & FAULT_FLAG_WRITE) { 2720 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2721 if (ret & VM_FAULT_ERROR) 2722 ret &= VM_FAULT_ERROR; 2723 goto out; 2724 } 2725 2726 /* No need to invalidate - it was non-present before */ 2727 update_mmu_cache(vma, address, page_table); 2728unlock: 2729 pte_unmap_unlock(page_table, ptl); 2730out: 2731 return ret; 2732out_nomap: 2733 mem_cgroup_cancel_charge_swapin(ptr); 2734 pte_unmap_unlock(page_table, ptl); 2735out_page: 2736 unlock_page(page); 2737out_release: 2738 page_cache_release(page); 2739 return ret; 2740} 2741 2742/* 2743 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2744 * but allow concurrent faults), and pte mapped but not yet locked. 2745 * We return with mmap_sem still held, but pte unmapped and unlocked. 2746 */ 2747static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 2748 unsigned long address, pte_t *page_table, pmd_t *pmd, 2749 unsigned int flags) 2750{ 2751 struct page *page; 2752 spinlock_t *ptl; 2753 pte_t entry; 2754 2755 if (!(flags & FAULT_FLAG_WRITE)) { 2756 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), 2757 vma->vm_page_prot)); 2758 ptl = pte_lockptr(mm, pmd); 2759 spin_lock(ptl); 2760 if (!pte_none(*page_table)) 2761 goto unlock; 2762 goto setpte; 2763 } 2764 2765 /* Allocate our own private page. */ 2766 pte_unmap(page_table); 2767 2768 if (unlikely(anon_vma_prepare(vma))) 2769 goto oom; 2770 page = alloc_zeroed_user_highpage_movable(vma, address); 2771 if (!page) 2772 goto oom; 2773 __SetPageUptodate(page); 2774 2775 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) 2776 goto oom_free_page; 2777 2778 entry = mk_pte(page, vma->vm_page_prot); 2779 if (vma->vm_flags & VM_WRITE) 2780 entry = pte_mkwrite(pte_mkdirty(entry)); 2781 2782 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2783 if (!pte_none(*page_table)) 2784 goto release; 2785 2786 inc_mm_counter_fast(mm, MM_ANONPAGES); 2787 page_add_new_anon_rmap(page, vma, address); 2788setpte: 2789 set_pte_at(mm, address, page_table, entry); 2790 2791 /* No need to invalidate - it was non-present before */ 2792 update_mmu_cache(vma, address, page_table); 2793unlock: 2794 pte_unmap_unlock(page_table, ptl); 2795 return 0; 2796release: 2797 mem_cgroup_uncharge_page(page); 2798 page_cache_release(page); 2799 goto unlock; 2800oom_free_page: 2801 page_cache_release(page); 2802oom: 2803 return VM_FAULT_OOM; 2804} 2805 2806/* 2807 * __do_fault() tries to create a new page mapping. It aggressively 2808 * tries to share with existing pages, but makes a separate copy if 2809 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid 2810 * the next page fault. 2811 * 2812 * As this is called only for pages that do not currently exist, we 2813 * do not need to flush old virtual caches or the TLB. 2814 * 2815 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2816 * but allow concurrent faults), and pte neither mapped nor locked. 2817 * We return with mmap_sem still held, but pte unmapped and unlocked. 2818 */ 2819static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2820 unsigned long address, pmd_t *pmd, 2821 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2822{ 2823 pte_t *page_table; 2824 spinlock_t *ptl; 2825 struct page *page; 2826 pte_t entry; 2827 int anon = 0; 2828 int charged = 0; 2829 struct page *dirty_page = NULL; 2830 struct vm_fault vmf; 2831 int ret; 2832 int page_mkwrite = 0; 2833 2834 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2835 vmf.pgoff = pgoff; 2836 vmf.flags = flags; 2837 vmf.page = NULL; 2838 2839 ret = vma->vm_ops->fault(vma, &vmf); 2840 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2841 return ret; 2842 2843 if (unlikely(PageHWPoison(vmf.page))) { 2844 if (ret & VM_FAULT_LOCKED) 2845 unlock_page(vmf.page); 2846 return VM_FAULT_HWPOISON; 2847 } 2848 2849 /* 2850 * For consistency in subsequent calls, make the faulted page always 2851 * locked. 2852 */ 2853 if (unlikely(!(ret & VM_FAULT_LOCKED))) 2854 lock_page(vmf.page); 2855 else 2856 VM_BUG_ON(!PageLocked(vmf.page)); 2857 2858 /* 2859 * Should we do an early C-O-W break? 2860 */ 2861 page = vmf.page; 2862 if (flags & FAULT_FLAG_WRITE) { 2863 if (!(vma->vm_flags & VM_SHARED)) { 2864 anon = 1; 2865 if (unlikely(anon_vma_prepare(vma))) { 2866 ret = VM_FAULT_OOM; 2867 goto out; 2868 } 2869 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 2870 vma, address); 2871 if (!page) { 2872 ret = VM_FAULT_OOM; 2873 goto out; 2874 } 2875 if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { 2876 ret = VM_FAULT_OOM; 2877 page_cache_release(page); 2878 goto out; 2879 } 2880 charged = 1; 2881 /* 2882 * Don't let another task, with possibly unlocked vma, 2883 * keep the mlocked page. 2884 */ 2885 if (vma->vm_flags & VM_LOCKED) 2886 clear_page_mlock(vmf.page); 2887 copy_user_highpage(page, vmf.page, address, vma); 2888 __SetPageUptodate(page); 2889 } else { 2890 /* 2891 * If the page will be shareable, see if the backing 2892 * address space wants to know that the page is about 2893 * to become writable 2894 */ 2895 if (vma->vm_ops->page_mkwrite) { 2896 int tmp; 2897 2898 unlock_page(page); 2899 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2900 tmp = vma->vm_ops->page_mkwrite(vma, &vmf); 2901 if (unlikely(tmp & 2902 (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 2903 ret = tmp; 2904 goto unwritable_page; 2905 } 2906 if (unlikely(!(tmp & VM_FAULT_LOCKED))) { 2907 lock_page(page); 2908 if (!page->mapping) { 2909 ret = 0; /* retry the fault */ 2910 unlock_page(page); 2911 goto unwritable_page; 2912 } 2913 } else 2914 VM_BUG_ON(!PageLocked(page)); 2915 page_mkwrite = 1; 2916 } 2917 } 2918 2919 } 2920 2921 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2922 2923 /* 2924 * This silly early PAGE_DIRTY setting removes a race 2925 * due to the bad i386 page protection. But it's valid 2926 * for other architectures too. 2927 * 2928 * Note that if FAULT_FLAG_WRITE is set, we either now have 2929 * an exclusive copy of the page, or this is a shared mapping, 2930 * so we can make it writable and dirty to avoid having to 2931 * handle that later. 2932 */ 2933 /* Only go through if we didn't race with anybody else... */ 2934 if (likely(pte_same(*page_table, orig_pte))) { 2935 flush_icache_page(vma, page); 2936 entry = mk_pte(page, vma->vm_page_prot); 2937 if (flags & FAULT_FLAG_WRITE) 2938 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2939 if (anon) { 2940 inc_mm_counter_fast(mm, MM_ANONPAGES); 2941 page_add_new_anon_rmap(page, vma, address); 2942 } else { 2943 inc_mm_counter_fast(mm, MM_FILEPAGES); 2944 page_add_file_rmap(page); 2945 if (flags & FAULT_FLAG_WRITE) { 2946 dirty_page = page; 2947 get_page(dirty_page); 2948 } 2949 } 2950 set_pte_at(mm, address, page_table, entry); 2951 2952 /* no need to invalidate: a not-present page won't be cached */ 2953 update_mmu_cache(vma, address, page_table); 2954 } else { 2955 if (charged) 2956 mem_cgroup_uncharge_page(page); 2957 if (anon) 2958 page_cache_release(page); 2959 else 2960 anon = 1; /* no anon but release faulted_page */ 2961 } 2962 2963 pte_unmap_unlock(page_table, ptl); 2964 2965out: 2966 if (dirty_page) { 2967 struct address_space *mapping = page->mapping; 2968 2969 if (set_page_dirty(dirty_page)) 2970 page_mkwrite = 1; 2971 unlock_page(dirty_page); 2972 put_page(dirty_page); 2973 if (page_mkwrite && mapping) { 2974 /* 2975 * Some device drivers do not set page.mapping but still 2976 * dirty their pages 2977 */ 2978 balance_dirty_pages_ratelimited(mapping); 2979 } 2980 2981 /* file_update_time outside page_lock */ 2982 if (vma->vm_file) 2983 file_update_time(vma->vm_file); 2984 } else { 2985 unlock_page(vmf.page); 2986 if (anon) 2987 page_cache_release(vmf.page); 2988 } 2989 2990 return ret; 2991 2992unwritable_page: 2993 page_cache_release(page); 2994 return ret; 2995} 2996 2997static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2998 unsigned long address, pte_t *page_table, pmd_t *pmd, 2999 unsigned int flags, pte_t orig_pte) 3000{ 3001 pgoff_t pgoff = (((address & PAGE_MASK) 3002 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 3003 3004 pte_unmap(page_table); 3005 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 3006} 3007 3008/* 3009 * Fault of a previously existing named mapping. Repopulate the pte 3010 * from the encoded file_pte if possible. This enables swappable 3011 * nonlinear vmas. 3012 * 3013 * We enter with non-exclusive mmap_sem (to exclude vma changes, 3014 * but allow concurrent faults), and pte mapped but not yet locked. 3015 * We return with mmap_sem still held, but pte unmapped and unlocked. 3016 */ 3017static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3018 unsigned long address, pte_t *page_table, pmd_t *pmd, 3019 unsigned int flags, pte_t orig_pte) 3020{ 3021 pgoff_t pgoff; 3022 3023 flags |= FAULT_FLAG_NONLINEAR; 3024 3025 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 3026 return 0; 3027 3028 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { 3029 /* 3030 * Page table corrupted: show pte and kill process. 3031 */ 3032 print_bad_pte(vma, address, orig_pte, NULL); 3033 return VM_FAULT_SIGBUS; 3034 } 3035 3036 pgoff = pte_to_pgoff(orig_pte); 3037 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 3038} 3039 3040/* 3041 * These routines also need to handle stuff like marking pages dirty 3042 * and/or accessed for architectures that don't do it in hardware (most 3043 * RISC architectures). The early dirtying is also good on the i386. 3044 * 3045 * There is also a hook called "update_mmu_cache()" that architectures 3046 * with external mmu caches can use to update those (ie the Sparc or 3047 * PowerPC hashed page tables that act as extended TLBs). 3048 * 3049 * We enter with non-exclusive mmap_sem (to exclude vma changes, 3050 * but allow concurrent faults), and pte mapped but not yet locked. 3051 * We return with mmap_sem still held, but pte unmapped and unlocked. 3052 */ 3053static inline int handle_pte_fault(struct mm_struct *mm, 3054 struct vm_area_struct *vma, unsigned long address, 3055 pte_t *pte, pmd_t *pmd, unsigned int flags) 3056{ 3057 pte_t entry; 3058 spinlock_t *ptl; 3059 3060 entry = *pte; 3061 if (!pte_present(entry)) { 3062 if (pte_none(entry)) { 3063 if (vma->vm_ops) { 3064 if (likely(vma->vm_ops->fault)) 3065 return do_linear_fault(mm, vma, address, 3066 pte, pmd, flags, entry); 3067 } 3068 return do_anonymous_page(mm, vma, address, 3069 pte, pmd, flags); 3070 } 3071 if (pte_file(entry)) 3072 return do_nonlinear_fault(mm, vma, address, 3073 pte, pmd, flags, entry); 3074 return do_swap_page(mm, vma, address, 3075 pte, pmd, flags, entry); 3076 } 3077 3078 ptl = pte_lockptr(mm, pmd); 3079 spin_lock(ptl); 3080 if (unlikely(!pte_same(*pte, entry))) 3081 goto unlock; 3082 if (flags & FAULT_FLAG_WRITE) { 3083 if (!pte_write(entry)) 3084 return do_wp_page(mm, vma, address, 3085 pte, pmd, ptl, entry); 3086 entry = pte_mkdirty(entry); 3087 } 3088 entry = pte_mkyoung(entry); 3089 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { 3090 update_mmu_cache(vma, address, pte); 3091 } else { 3092 /* 3093 * This is needed only for protection faults but the arch code 3094 * is not yet telling us if this is a protection fault or not. 3095 * This still avoids useless tlb flushes for .text page faults 3096 * with threads. 3097 */ 3098 if (flags & FAULT_FLAG_WRITE) 3099 flush_tlb_page(vma, address); 3100 } 3101unlock: 3102 pte_unmap_unlock(pte, ptl); 3103 return 0; 3104} 3105 3106/* 3107 * By the time we get here, we already hold the mm semaphore 3108 */ 3109int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 3110 unsigned long address, unsigned int flags) 3111{ 3112 pgd_t *pgd; 3113 pud_t *pud; 3114 pmd_t *pmd; 3115 pte_t *pte; 3116 3117 __set_current_state(TASK_RUNNING); 3118 3119 count_vm_event(PGFAULT); 3120 3121 /* do counter updates before entering really critical section. */ 3122 check_sync_rss_stat(current); 3123 3124 if (unlikely(is_vm_hugetlb_page(vma))) 3125 return hugetlb_fault(mm, vma, address, flags); 3126 3127 pgd = pgd_offset(mm, address); 3128 pud = pud_alloc(mm, pgd, address); 3129 if (!pud) 3130 return VM_FAULT_OOM; 3131 pmd = pmd_alloc(mm, pud, address); 3132 if (!pmd) 3133 return VM_FAULT_OOM; 3134 pte = pte_alloc_map(mm, pmd, address); 3135 if (!pte) 3136 return VM_FAULT_OOM; 3137 3138 return handle_pte_fault(mm, vma, address, pte, pmd, flags); 3139} 3140 3141#ifndef __PAGETABLE_PUD_FOLDED 3142/* 3143 * Allocate page upper directory. 3144 * We've already handled the fast-path in-line. 3145 */ 3146int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 3147{ 3148 pud_t *new = pud_alloc_one(mm, address); 3149 if (!new) 3150 return -ENOMEM; 3151 3152 smp_wmb(); /* See comment in __pte_alloc */ 3153 3154 spin_lock(&mm->page_table_lock); 3155 if (pgd_present(*pgd)) /* Another has populated it */ 3156 pud_free(mm, new); 3157 else 3158 pgd_populate(mm, pgd, new); 3159 spin_unlock(&mm->page_table_lock); 3160 return 0; 3161} 3162#endif /* __PAGETABLE_PUD_FOLDED */ 3163 3164#ifndef __PAGETABLE_PMD_FOLDED 3165/* 3166 * Allocate page middle directory. 3167 * We've already handled the fast-path in-line. 3168 */ 3169int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 3170{ 3171 pmd_t *new = pmd_alloc_one(mm, address); 3172 if (!new) 3173 return -ENOMEM; 3174 3175 smp_wmb(); /* See comment in __pte_alloc */ 3176 3177 spin_lock(&mm->page_table_lock); 3178#ifndef __ARCH_HAS_4LEVEL_HACK 3179 if (pud_present(*pud)) /* Another has populated it */ 3180 pmd_free(mm, new); 3181 else 3182 pud_populate(mm, pud, new); 3183#else 3184 if (pgd_present(*pud)) /* Another has populated it */ 3185 pmd_free(mm, new); 3186 else 3187 pgd_populate(mm, pud, new); 3188#endif /* __ARCH_HAS_4LEVEL_HACK */ 3189 spin_unlock(&mm->page_table_lock); 3190 return 0; 3191} 3192#endif /* __PAGETABLE_PMD_FOLDED */ 3193 3194int make_pages_present(unsigned long addr, unsigned long end) 3195{ 3196 int ret, len, write; 3197 struct vm_area_struct * vma; 3198 3199 vma = find_vma(current->mm, addr); 3200 if (!vma) 3201 return -ENOMEM; 3202 write = (vma->vm_flags & VM_WRITE) != 0; 3203 BUG_ON(addr >= end); 3204 BUG_ON(end > vma->vm_end); 3205 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; 3206 ret = get_user_pages(current, current->mm, addr, 3207 len, write, 0, NULL, NULL); 3208 if (ret < 0) 3209 return ret; 3210 return ret == len ? 0 : -EFAULT; 3211} 3212 3213#if !defined(__HAVE_ARCH_GATE_AREA) 3214 3215#if defined(AT_SYSINFO_EHDR) 3216static struct vm_area_struct gate_vma; 3217 3218static int __init gate_vma_init(void) 3219{ 3220 gate_vma.vm_mm = NULL; 3221 gate_vma.vm_start = FIXADDR_USER_START; 3222 gate_vma.vm_end = FIXADDR_USER_END; 3223 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 3224 gate_vma.vm_page_prot = __P101; 3225 /* 3226 * Make sure the vDSO gets into every core dump. 3227 * Dumping its contents makes post-mortem fully interpretable later 3228 * without matching up the same kernel and hardware config to see 3229 * what PC values meant. 3230 */ 3231 gate_vma.vm_flags |= VM_ALWAYSDUMP; 3232 return 0; 3233} 3234__initcall(gate_vma_init); 3235#endif 3236 3237struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 3238{ 3239#ifdef AT_SYSINFO_EHDR 3240 return &gate_vma; 3241#else 3242 return NULL; 3243#endif 3244} 3245 3246int in_gate_area_no_task(unsigned long addr) 3247{ 3248#ifdef AT_SYSINFO_EHDR 3249 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 3250 return 1; 3251#endif 3252 return 0; 3253} 3254 3255#endif /* __HAVE_ARCH_GATE_AREA */ 3256 3257static int follow_pte(struct mm_struct *mm, unsigned long address, 3258 pte_t **ptepp, spinlock_t **ptlp) 3259{ 3260 pgd_t *pgd; 3261 pud_t *pud; 3262 pmd_t *pmd; 3263 pte_t *ptep; 3264 3265 pgd = pgd_offset(mm, address); 3266 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 3267 goto out; 3268 3269 pud = pud_offset(pgd, address); 3270 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 3271 goto out; 3272 3273 pmd = pmd_offset(pud, address); 3274 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 3275 goto out; 3276 3277 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 3278 if (pmd_huge(*pmd)) 3279 goto out; 3280 3281 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 3282 if (!ptep) 3283 goto out; 3284 if (!pte_present(*ptep)) 3285 goto unlock; 3286 *ptepp = ptep; 3287 return 0; 3288unlock: 3289 pte_unmap_unlock(ptep, *ptlp); 3290out: 3291 return -EINVAL; 3292} 3293 3294/** 3295 * follow_pfn - look up PFN at a user virtual address 3296 * @vma: memory mapping 3297 * @address: user virtual address 3298 * @pfn: location to store found PFN 3299 * 3300 * Only IO mappings and raw PFN mappings are allowed. 3301 * 3302 * Returns zero and the pfn at @pfn on success, -ve otherwise. 3303 */ 3304int follow_pfn(struct vm_area_struct *vma, unsigned long address, 3305 unsigned long *pfn) 3306{ 3307 int ret = -EINVAL; 3308 spinlock_t *ptl; 3309 pte_t *ptep; 3310 3311 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 3312 return ret; 3313 3314 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); 3315 if (ret) 3316 return ret; 3317 *pfn = pte_pfn(*ptep); 3318 pte_unmap_unlock(ptep, ptl); 3319 return 0; 3320} 3321EXPORT_SYMBOL(follow_pfn); 3322 3323#ifdef CONFIG_HAVE_IOREMAP_PROT 3324int follow_phys(struct vm_area_struct *vma, 3325 unsigned long address, unsigned int flags, 3326 unsigned long *prot, resource_size_t *phys) 3327{ 3328 int ret = -EINVAL; 3329 pte_t *ptep, pte; 3330 spinlock_t *ptl; 3331 3332 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 3333 goto out; 3334 3335 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 3336 goto out; 3337 pte = *ptep; 3338 3339 if ((flags & FOLL_WRITE) && !pte_write(pte)) 3340 goto unlock; 3341 3342 *prot = pgprot_val(pte_pgprot(pte)); 3343 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 3344 3345 ret = 0; 3346unlock: 3347 pte_unmap_unlock(ptep, ptl); 3348out: 3349 return ret; 3350} 3351 3352int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 3353 void *buf, int len, int write) 3354{ 3355 resource_size_t phys_addr; 3356 unsigned long prot = 0; 3357 void __iomem *maddr; 3358 int offset = addr & (PAGE_SIZE-1); 3359 3360 if (follow_phys(vma, addr, write, &prot, &phys_addr)) 3361 return -EINVAL; 3362 3363 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 3364 if (write) 3365 memcpy_toio(maddr + offset, buf, len); 3366 else 3367 memcpy_fromio(buf, maddr + offset, len); 3368 iounmap(maddr); 3369 3370 return len; 3371} 3372#endif 3373 3374/* 3375 * Access another process' address space. 3376 * Source/target buffer must be kernel space, 3377 * Do not walk the page table directly, use get_user_pages 3378 */ 3379int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 3380{ 3381 struct mm_struct *mm; 3382 struct vm_area_struct *vma; 3383 void *old_buf = buf; 3384 3385 mm = get_task_mm(tsk); 3386 if (!mm) 3387 return 0; 3388 3389 down_read(&mm->mmap_sem); 3390 /* ignore errors, just check how much was successfully transferred */ 3391 while (len) { 3392 int bytes, ret, offset; 3393 void *maddr; 3394 struct page *page = NULL; 3395 3396 ret = get_user_pages(tsk, mm, addr, 1, 3397 write, 1, &page, &vma); 3398 if (ret <= 0) { 3399 /* 3400 * Check if this is a VM_IO | VM_PFNMAP VMA, which 3401 * we can access using slightly different code. 3402 */ 3403#ifdef CONFIG_HAVE_IOREMAP_PROT 3404 vma = find_vma(mm, addr); 3405 if (!vma) 3406 break; 3407 if (vma->vm_ops && vma->vm_ops->access) 3408 ret = vma->vm_ops->access(vma, addr, buf, 3409 len, write); 3410 if (ret <= 0) 3411#endif 3412 break; 3413 bytes = ret; 3414 } else { 3415 bytes = len; 3416 offset = addr & (PAGE_SIZE-1); 3417 if (bytes > PAGE_SIZE-offset) 3418 bytes = PAGE_SIZE-offset; 3419 3420 maddr = kmap(page); 3421 if (write) { 3422 copy_to_user_page(vma, page, addr, 3423 maddr + offset, buf, bytes); 3424 set_page_dirty_lock(page); 3425 } else { 3426 copy_from_user_page(vma, page, addr, 3427 buf, maddr + offset, bytes); 3428 } 3429 kunmap(page); 3430 page_cache_release(page); 3431 } 3432 len -= bytes; 3433 buf += bytes; 3434 addr += bytes; 3435 } 3436 up_read(&mm->mmap_sem); 3437 mmput(mm); 3438 3439 return buf - old_buf; 3440} 3441 3442/* 3443 * Print the name of a VMA. 3444 */ 3445void print_vma_addr(char *prefix, unsigned long ip) 3446{ 3447 struct mm_struct *mm = current->mm; 3448 struct vm_area_struct *vma; 3449 3450 /* 3451 * Do not print if we are in atomic 3452 * contexts (in exception stacks, etc.): 3453 */ 3454 if (preempt_count()) 3455 return; 3456 3457 down_read(&mm->mmap_sem); 3458 vma = find_vma(mm, ip); 3459 if (vma && vma->vm_file) { 3460 struct file *f = vma->vm_file; 3461 char *buf = (char *)__get_free_page(GFP_KERNEL); 3462 if (buf) { 3463 char *p, *s; 3464 3465 p = d_path(&f->f_path, buf, PAGE_SIZE); 3466 if (IS_ERR(p)) 3467 p = "?"; 3468 s = strrchr(p, '/'); 3469 if (s) 3470 p = s+1; 3471 printk("%s%s[%lx+%lx]", prefix, p, 3472 vma->vm_start, 3473 vma->vm_end - vma->vm_start); 3474 free_page((unsigned long)buf); 3475 } 3476 } 3477 up_read(&current->mm->mmap_sem); 3478} 3479 3480#ifdef CONFIG_PROVE_LOCKING 3481void might_fault(void) 3482{ 3483 /* 3484 * Some code (nfs/sunrpc) uses socket ops on kernel memory while 3485 * holding the mmap_sem, this is safe because kernel memory doesn't 3486 * get paged out, therefore we'll never actually fault, and the 3487 * below annotations will generate false positives. 3488 */ 3489 if (segment_eq(get_fs(), KERNEL_DS)) 3490 return; 3491 3492 might_sleep(); 3493 /* 3494 * it would be nicer only to annotate paths which are not under 3495 * pagefault_disable, however that requires a larger audit and 3496 * providing helpers like get_user_atomic. 3497 */ 3498 if (!in_atomic() && current->mm) 3499 might_lock_read(&current->mm->mmap_sem); 3500} 3501EXPORT_SYMBOL(might_fault); 3502#endif