at v2.6.18-rc2 2507 lines 68 kB view raw
1/* 2 * linux/mm/memory.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7/* 8 * demand-loading started 01.12.91 - seems it is high on the list of 9 * things wanted, and it should be easy to implement. - Linus 10 */ 11 12/* 13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 14 * pages started 02.12.91, seems to work. - Linus. 15 * 16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 17 * would have taken more than the 6M I have free, but it worked well as 18 * far as I could see. 19 * 20 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 21 */ 22 23/* 24 * Real VM (paging to/from disk) started 18.12.91. Much more work and 25 * thought has to go into this. Oh, well.. 26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 27 * Found it. Everything seems to work now. 28 * 20.12.91 - Ok, making the swap-device changeable like the root. 29 */ 30 31/* 32 * 05.04.94 - Multi-page memory management added for v1.1. 33 * Idea by Alex Bligh (alex@cconcepts.co.uk) 34 * 35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 36 * (Gerhard.Wichert@pdb.siemens.de) 37 * 38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 39 */ 40 41#include <linux/kernel_stat.h> 42#include <linux/mm.h> 43#include <linux/hugetlb.h> 44#include <linux/mman.h> 45#include <linux/swap.h> 46#include <linux/highmem.h> 47#include <linux/pagemap.h> 48#include <linux/rmap.h> 49#include <linux/module.h> 50#include <linux/delayacct.h> 51#include <linux/init.h> 52 53#include <asm/pgalloc.h> 54#include <asm/uaccess.h> 55#include <asm/tlb.h> 56#include <asm/tlbflush.h> 57#include <asm/pgtable.h> 58 59#include <linux/swapops.h> 60#include <linux/elf.h> 61 62#ifndef CONFIG_NEED_MULTIPLE_NODES 63/* use the per-pgdat data instead for discontigmem - mbligh */ 64unsigned long max_mapnr; 65struct page *mem_map; 66 67EXPORT_SYMBOL(max_mapnr); 68EXPORT_SYMBOL(mem_map); 69#endif 70 71unsigned long num_physpages; 72/* 73 * A number of key systems in x86 including ioremap() rely on the assumption 74 * that high_memory defines the upper bound on direct map memory, then end 75 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 76 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 77 * and ZONE_HIGHMEM. 78 */ 79void * high_memory; 80unsigned long vmalloc_earlyreserve; 81 82EXPORT_SYMBOL(num_physpages); 83EXPORT_SYMBOL(high_memory); 84EXPORT_SYMBOL(vmalloc_earlyreserve); 85 86int randomize_va_space __read_mostly = 1; 87 88static int __init disable_randmaps(char *s) 89{ 90 randomize_va_space = 0; 91 return 1; 92} 93__setup("norandmaps", disable_randmaps); 94 95 96/* 97 * If a p?d_bad entry is found while walking page tables, report 98 * the error, before resetting entry to p?d_none. Usually (but 99 * very seldom) called out from the p?d_none_or_clear_bad macros. 100 */ 101 102void pgd_clear_bad(pgd_t *pgd) 103{ 104 pgd_ERROR(*pgd); 105 pgd_clear(pgd); 106} 107 108void pud_clear_bad(pud_t *pud) 109{ 110 pud_ERROR(*pud); 111 pud_clear(pud); 112} 113 114void pmd_clear_bad(pmd_t *pmd) 115{ 116 pmd_ERROR(*pmd); 117 pmd_clear(pmd); 118} 119 120/* 121 * Note: this doesn't free the actual pages themselves. That 122 * has been handled earlier when unmapping all the memory regions. 123 */ 124static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 125{ 126 struct page *page = pmd_page(*pmd); 127 pmd_clear(pmd); 128 pte_lock_deinit(page); 129 pte_free_tlb(tlb, page); 130 dec_zone_page_state(page, NR_PAGETABLE); 131 tlb->mm->nr_ptes--; 132} 133 134static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 135 unsigned long addr, unsigned long end, 136 unsigned long floor, unsigned long ceiling) 137{ 138 pmd_t *pmd; 139 unsigned long next; 140 unsigned long start; 141 142 start = addr; 143 pmd = pmd_offset(pud, addr); 144 do { 145 next = pmd_addr_end(addr, end); 146 if (pmd_none_or_clear_bad(pmd)) 147 continue; 148 free_pte_range(tlb, pmd); 149 } while (pmd++, addr = next, addr != end); 150 151 start &= PUD_MASK; 152 if (start < floor) 153 return; 154 if (ceiling) { 155 ceiling &= PUD_MASK; 156 if (!ceiling) 157 return; 158 } 159 if (end - 1 > ceiling - 1) 160 return; 161 162 pmd = pmd_offset(pud, start); 163 pud_clear(pud); 164 pmd_free_tlb(tlb, pmd); 165} 166 167static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 168 unsigned long addr, unsigned long end, 169 unsigned long floor, unsigned long ceiling) 170{ 171 pud_t *pud; 172 unsigned long next; 173 unsigned long start; 174 175 start = addr; 176 pud = pud_offset(pgd, addr); 177 do { 178 next = pud_addr_end(addr, end); 179 if (pud_none_or_clear_bad(pud)) 180 continue; 181 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 182 } while (pud++, addr = next, addr != end); 183 184 start &= PGDIR_MASK; 185 if (start < floor) 186 return; 187 if (ceiling) { 188 ceiling &= PGDIR_MASK; 189 if (!ceiling) 190 return; 191 } 192 if (end - 1 > ceiling - 1) 193 return; 194 195 pud = pud_offset(pgd, start); 196 pgd_clear(pgd); 197 pud_free_tlb(tlb, pud); 198} 199 200/* 201 * This function frees user-level page tables of a process. 202 * 203 * Must be called with pagetable lock held. 204 */ 205void free_pgd_range(struct mmu_gather **tlb, 206 unsigned long addr, unsigned long end, 207 unsigned long floor, unsigned long ceiling) 208{ 209 pgd_t *pgd; 210 unsigned long next; 211 unsigned long start; 212 213 /* 214 * The next few lines have given us lots of grief... 215 * 216 * Why are we testing PMD* at this top level? Because often 217 * there will be no work to do at all, and we'd prefer not to 218 * go all the way down to the bottom just to discover that. 219 * 220 * Why all these "- 1"s? Because 0 represents both the bottom 221 * of the address space and the top of it (using -1 for the 222 * top wouldn't help much: the masks would do the wrong thing). 223 * The rule is that addr 0 and floor 0 refer to the bottom of 224 * the address space, but end 0 and ceiling 0 refer to the top 225 * Comparisons need to use "end - 1" and "ceiling - 1" (though 226 * that end 0 case should be mythical). 227 * 228 * Wherever addr is brought up or ceiling brought down, we must 229 * be careful to reject "the opposite 0" before it confuses the 230 * subsequent tests. But what about where end is brought down 231 * by PMD_SIZE below? no, end can't go down to 0 there. 232 * 233 * Whereas we round start (addr) and ceiling down, by different 234 * masks at different levels, in order to test whether a table 235 * now has no other vmas using it, so can be freed, we don't 236 * bother to round floor or end up - the tests don't need that. 237 */ 238 239 addr &= PMD_MASK; 240 if (addr < floor) { 241 addr += PMD_SIZE; 242 if (!addr) 243 return; 244 } 245 if (ceiling) { 246 ceiling &= PMD_MASK; 247 if (!ceiling) 248 return; 249 } 250 if (end - 1 > ceiling - 1) 251 end -= PMD_SIZE; 252 if (addr > end - 1) 253 return; 254 255 start = addr; 256 pgd = pgd_offset((*tlb)->mm, addr); 257 do { 258 next = pgd_addr_end(addr, end); 259 if (pgd_none_or_clear_bad(pgd)) 260 continue; 261 free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 262 } while (pgd++, addr = next, addr != end); 263 264 if (!(*tlb)->fullmm) 265 flush_tlb_pgtables((*tlb)->mm, start, end); 266} 267 268void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 269 unsigned long floor, unsigned long ceiling) 270{ 271 while (vma) { 272 struct vm_area_struct *next = vma->vm_next; 273 unsigned long addr = vma->vm_start; 274 275 /* 276 * Hide vma from rmap and vmtruncate before freeing pgtables 277 */ 278 anon_vma_unlink(vma); 279 unlink_file_vma(vma); 280 281 if (is_vm_hugetlb_page(vma)) { 282 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 283 floor, next? next->vm_start: ceiling); 284 } else { 285 /* 286 * Optimization: gather nearby vmas into one call down 287 */ 288 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 289 && !is_vm_hugetlb_page(next)) { 290 vma = next; 291 next = vma->vm_next; 292 anon_vma_unlink(vma); 293 unlink_file_vma(vma); 294 } 295 free_pgd_range(tlb, addr, vma->vm_end, 296 floor, next? next->vm_start: ceiling); 297 } 298 vma = next; 299 } 300} 301 302int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 303{ 304 struct page *new = pte_alloc_one(mm, address); 305 if (!new) 306 return -ENOMEM; 307 308 pte_lock_init(new); 309 spin_lock(&mm->page_table_lock); 310 if (pmd_present(*pmd)) { /* Another has populated it */ 311 pte_lock_deinit(new); 312 pte_free(new); 313 } else { 314 mm->nr_ptes++; 315 inc_zone_page_state(new, NR_PAGETABLE); 316 pmd_populate(mm, pmd, new); 317 } 318 spin_unlock(&mm->page_table_lock); 319 return 0; 320} 321 322int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 323{ 324 pte_t *new = pte_alloc_one_kernel(&init_mm, address); 325 if (!new) 326 return -ENOMEM; 327 328 spin_lock(&init_mm.page_table_lock); 329 if (pmd_present(*pmd)) /* Another has populated it */ 330 pte_free_kernel(new); 331 else 332 pmd_populate_kernel(&init_mm, pmd, new); 333 spin_unlock(&init_mm.page_table_lock); 334 return 0; 335} 336 337static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 338{ 339 if (file_rss) 340 add_mm_counter(mm, file_rss, file_rss); 341 if (anon_rss) 342 add_mm_counter(mm, anon_rss, anon_rss); 343} 344 345/* 346 * This function is called to print an error when a bad pte 347 * is found. For example, we might have a PFN-mapped pte in 348 * a region that doesn't allow it. 349 * 350 * The calling function must still handle the error. 351 */ 352void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) 353{ 354 printk(KERN_ERR "Bad pte = %08llx, process = %s, " 355 "vm_flags = %lx, vaddr = %lx\n", 356 (long long)pte_val(pte), 357 (vma->vm_mm == current->mm ? current->comm : "???"), 358 vma->vm_flags, vaddr); 359 dump_stack(); 360} 361 362static inline int is_cow_mapping(unsigned int flags) 363{ 364 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 365} 366 367/* 368 * This function gets the "struct page" associated with a pte. 369 * 370 * NOTE! Some mappings do not have "struct pages". A raw PFN mapping 371 * will have each page table entry just pointing to a raw page frame 372 * number, and as far as the VM layer is concerned, those do not have 373 * pages associated with them - even if the PFN might point to memory 374 * that otherwise is perfectly fine and has a "struct page". 375 * 376 * The way we recognize those mappings is through the rules set up 377 * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set, 378 * and the vm_pgoff will point to the first PFN mapped: thus every 379 * page that is a raw mapping will always honor the rule 380 * 381 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 382 * 383 * and if that isn't true, the page has been COW'ed (in which case it 384 * _does_ have a "struct page" associated with it even if it is in a 385 * VM_PFNMAP range). 386 */ 387struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 388{ 389 unsigned long pfn = pte_pfn(pte); 390 391 if (unlikely(vma->vm_flags & VM_PFNMAP)) { 392 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 393 if (pfn == vma->vm_pgoff + off) 394 return NULL; 395 if (!is_cow_mapping(vma->vm_flags)) 396 return NULL; 397 } 398 399 /* 400 * Add some anal sanity checks for now. Eventually, 401 * we should just do "return pfn_to_page(pfn)", but 402 * in the meantime we check that we get a valid pfn, 403 * and that the resulting page looks ok. 404 */ 405 if (unlikely(!pfn_valid(pfn))) { 406 print_bad_pte(vma, pte, addr); 407 return NULL; 408 } 409 410 /* 411 * NOTE! We still have PageReserved() pages in the page 412 * tables. 413 * 414 * The PAGE_ZERO() pages and various VDSO mappings can 415 * cause them to exist. 416 */ 417 return pfn_to_page(pfn); 418} 419 420/* 421 * copy one vm_area from one task to the other. Assumes the page tables 422 * already present in the new task to be cleared in the whole range 423 * covered by this vma. 424 */ 425 426static inline void 427copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 428 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 429 unsigned long addr, int *rss) 430{ 431 unsigned long vm_flags = vma->vm_flags; 432 pte_t pte = *src_pte; 433 struct page *page; 434 435 /* pte contains position in swap or file, so copy. */ 436 if (unlikely(!pte_present(pte))) { 437 if (!pte_file(pte)) { 438 swp_entry_t entry = pte_to_swp_entry(pte); 439 440 swap_duplicate(entry); 441 /* make sure dst_mm is on swapoff's mmlist. */ 442 if (unlikely(list_empty(&dst_mm->mmlist))) { 443 spin_lock(&mmlist_lock); 444 if (list_empty(&dst_mm->mmlist)) 445 list_add(&dst_mm->mmlist, 446 &src_mm->mmlist); 447 spin_unlock(&mmlist_lock); 448 } 449 if (is_write_migration_entry(entry) && 450 is_cow_mapping(vm_flags)) { 451 /* 452 * COW mappings require pages in both parent 453 * and child to be set to read. 454 */ 455 make_migration_entry_read(&entry); 456 pte = swp_entry_to_pte(entry); 457 set_pte_at(src_mm, addr, src_pte, pte); 458 } 459 } 460 goto out_set_pte; 461 } 462 463 /* 464 * If it's a COW mapping, write protect it both 465 * in the parent and the child 466 */ 467 if (is_cow_mapping(vm_flags)) { 468 ptep_set_wrprotect(src_mm, addr, src_pte); 469 pte = *src_pte; 470 } 471 472 /* 473 * If it's a shared mapping, mark it clean in 474 * the child 475 */ 476 if (vm_flags & VM_SHARED) 477 pte = pte_mkclean(pte); 478 pte = pte_mkold(pte); 479 480 page = vm_normal_page(vma, addr, pte); 481 if (page) { 482 get_page(page); 483 page_dup_rmap(page); 484 rss[!!PageAnon(page)]++; 485 } 486 487out_set_pte: 488 set_pte_at(dst_mm, addr, dst_pte, pte); 489} 490 491static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 492 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 493 unsigned long addr, unsigned long end) 494{ 495 pte_t *src_pte, *dst_pte; 496 spinlock_t *src_ptl, *dst_ptl; 497 int progress = 0; 498 int rss[2]; 499 500again: 501 rss[1] = rss[0] = 0; 502 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 503 if (!dst_pte) 504 return -ENOMEM; 505 src_pte = pte_offset_map_nested(src_pmd, addr); 506 src_ptl = pte_lockptr(src_mm, src_pmd); 507 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 508 509 do { 510 /* 511 * We are holding two locks at this point - either of them 512 * could generate latencies in another task on another CPU. 513 */ 514 if (progress >= 32) { 515 progress = 0; 516 if (need_resched() || 517 need_lockbreak(src_ptl) || 518 need_lockbreak(dst_ptl)) 519 break; 520 } 521 if (pte_none(*src_pte)) { 522 progress++; 523 continue; 524 } 525 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 526 progress += 8; 527 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 528 529 spin_unlock(src_ptl); 530 pte_unmap_nested(src_pte - 1); 531 add_mm_rss(dst_mm, rss[0], rss[1]); 532 pte_unmap_unlock(dst_pte - 1, dst_ptl); 533 cond_resched(); 534 if (addr != end) 535 goto again; 536 return 0; 537} 538 539static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 540 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 541 unsigned long addr, unsigned long end) 542{ 543 pmd_t *src_pmd, *dst_pmd; 544 unsigned long next; 545 546 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 547 if (!dst_pmd) 548 return -ENOMEM; 549 src_pmd = pmd_offset(src_pud, addr); 550 do { 551 next = pmd_addr_end(addr, end); 552 if (pmd_none_or_clear_bad(src_pmd)) 553 continue; 554 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 555 vma, addr, next)) 556 return -ENOMEM; 557 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 558 return 0; 559} 560 561static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 562 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 563 unsigned long addr, unsigned long end) 564{ 565 pud_t *src_pud, *dst_pud; 566 unsigned long next; 567 568 dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 569 if (!dst_pud) 570 return -ENOMEM; 571 src_pud = pud_offset(src_pgd, addr); 572 do { 573 next = pud_addr_end(addr, end); 574 if (pud_none_or_clear_bad(src_pud)) 575 continue; 576 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 577 vma, addr, next)) 578 return -ENOMEM; 579 } while (dst_pud++, src_pud++, addr = next, addr != end); 580 return 0; 581} 582 583int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 584 struct vm_area_struct *vma) 585{ 586 pgd_t *src_pgd, *dst_pgd; 587 unsigned long next; 588 unsigned long addr = vma->vm_start; 589 unsigned long end = vma->vm_end; 590 591 /* 592 * Don't copy ptes where a page fault will fill them correctly. 593 * Fork becomes much lighter when there are big shared or private 594 * readonly mappings. The tradeoff is that copy_page_range is more 595 * efficient than faulting. 596 */ 597 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { 598 if (!vma->anon_vma) 599 return 0; 600 } 601 602 if (is_vm_hugetlb_page(vma)) 603 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 604 605 dst_pgd = pgd_offset(dst_mm, addr); 606 src_pgd = pgd_offset(src_mm, addr); 607 do { 608 next = pgd_addr_end(addr, end); 609 if (pgd_none_or_clear_bad(src_pgd)) 610 continue; 611 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 612 vma, addr, next)) 613 return -ENOMEM; 614 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 615 return 0; 616} 617 618static unsigned long zap_pte_range(struct mmu_gather *tlb, 619 struct vm_area_struct *vma, pmd_t *pmd, 620 unsigned long addr, unsigned long end, 621 long *zap_work, struct zap_details *details) 622{ 623 struct mm_struct *mm = tlb->mm; 624 pte_t *pte; 625 spinlock_t *ptl; 626 int file_rss = 0; 627 int anon_rss = 0; 628 629 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 630 do { 631 pte_t ptent = *pte; 632 if (pte_none(ptent)) { 633 (*zap_work)--; 634 continue; 635 } 636 637 (*zap_work) -= PAGE_SIZE; 638 639 if (pte_present(ptent)) { 640 struct page *page; 641 642 page = vm_normal_page(vma, addr, ptent); 643 if (unlikely(details) && page) { 644 /* 645 * unmap_shared_mapping_pages() wants to 646 * invalidate cache without truncating: 647 * unmap shared but keep private pages. 648 */ 649 if (details->check_mapping && 650 details->check_mapping != page->mapping) 651 continue; 652 /* 653 * Each page->index must be checked when 654 * invalidating or truncating nonlinear. 655 */ 656 if (details->nonlinear_vma && 657 (page->index < details->first_index || 658 page->index > details->last_index)) 659 continue; 660 } 661 ptent = ptep_get_and_clear_full(mm, addr, pte, 662 tlb->fullmm); 663 tlb_remove_tlb_entry(tlb, pte, addr); 664 if (unlikely(!page)) 665 continue; 666 if (unlikely(details) && details->nonlinear_vma 667 && linear_page_index(details->nonlinear_vma, 668 addr) != page->index) 669 set_pte_at(mm, addr, pte, 670 pgoff_to_pte(page->index)); 671 if (PageAnon(page)) 672 anon_rss--; 673 else { 674 if (pte_dirty(ptent)) 675 set_page_dirty(page); 676 if (pte_young(ptent)) 677 mark_page_accessed(page); 678 file_rss--; 679 } 680 page_remove_rmap(page); 681 tlb_remove_page(tlb, page); 682 continue; 683 } 684 /* 685 * If details->check_mapping, we leave swap entries; 686 * if details->nonlinear_vma, we leave file entries. 687 */ 688 if (unlikely(details)) 689 continue; 690 if (!pte_file(ptent)) 691 free_swap_and_cache(pte_to_swp_entry(ptent)); 692 pte_clear_full(mm, addr, pte, tlb->fullmm); 693 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 694 695 add_mm_rss(mm, file_rss, anon_rss); 696 pte_unmap_unlock(pte - 1, ptl); 697 698 return addr; 699} 700 701static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 702 struct vm_area_struct *vma, pud_t *pud, 703 unsigned long addr, unsigned long end, 704 long *zap_work, struct zap_details *details) 705{ 706 pmd_t *pmd; 707 unsigned long next; 708 709 pmd = pmd_offset(pud, addr); 710 do { 711 next = pmd_addr_end(addr, end); 712 if (pmd_none_or_clear_bad(pmd)) { 713 (*zap_work)--; 714 continue; 715 } 716 next = zap_pte_range(tlb, vma, pmd, addr, next, 717 zap_work, details); 718 } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 719 720 return addr; 721} 722 723static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 724 struct vm_area_struct *vma, pgd_t *pgd, 725 unsigned long addr, unsigned long end, 726 long *zap_work, struct zap_details *details) 727{ 728 pud_t *pud; 729 unsigned long next; 730 731 pud = pud_offset(pgd, addr); 732 do { 733 next = pud_addr_end(addr, end); 734 if (pud_none_or_clear_bad(pud)) { 735 (*zap_work)--; 736 continue; 737 } 738 next = zap_pmd_range(tlb, vma, pud, addr, next, 739 zap_work, details); 740 } while (pud++, addr = next, (addr != end && *zap_work > 0)); 741 742 return addr; 743} 744 745static unsigned long unmap_page_range(struct mmu_gather *tlb, 746 struct vm_area_struct *vma, 747 unsigned long addr, unsigned long end, 748 long *zap_work, struct zap_details *details) 749{ 750 pgd_t *pgd; 751 unsigned long next; 752 753 if (details && !details->check_mapping && !details->nonlinear_vma) 754 details = NULL; 755 756 BUG_ON(addr >= end); 757 tlb_start_vma(tlb, vma); 758 pgd = pgd_offset(vma->vm_mm, addr); 759 do { 760 next = pgd_addr_end(addr, end); 761 if (pgd_none_or_clear_bad(pgd)) { 762 (*zap_work)--; 763 continue; 764 } 765 next = zap_pud_range(tlb, vma, pgd, addr, next, 766 zap_work, details); 767 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 768 tlb_end_vma(tlb, vma); 769 770 return addr; 771} 772 773#ifdef CONFIG_PREEMPT 774# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 775#else 776/* No preempt: go for improved straight-line efficiency */ 777# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 778#endif 779 780/** 781 * unmap_vmas - unmap a range of memory covered by a list of vma's 782 * @tlbp: address of the caller's struct mmu_gather 783 * @vma: the starting vma 784 * @start_addr: virtual address at which to start unmapping 785 * @end_addr: virtual address at which to end unmapping 786 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 787 * @details: details of nonlinear truncation or shared cache invalidation 788 * 789 * Returns the end address of the unmapping (restart addr if interrupted). 790 * 791 * Unmap all pages in the vma list. 792 * 793 * We aim to not hold locks for too long (for scheduling latency reasons). 794 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 795 * return the ending mmu_gather to the caller. 796 * 797 * Only addresses between `start' and `end' will be unmapped. 798 * 799 * The VMA list must be sorted in ascending virtual address order. 800 * 801 * unmap_vmas() assumes that the caller will flush the whole unmapped address 802 * range after unmap_vmas() returns. So the only responsibility here is to 803 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 804 * drops the lock and schedules. 805 */ 806unsigned long unmap_vmas(struct mmu_gather **tlbp, 807 struct vm_area_struct *vma, unsigned long start_addr, 808 unsigned long end_addr, unsigned long *nr_accounted, 809 struct zap_details *details) 810{ 811 long zap_work = ZAP_BLOCK_SIZE; 812 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 813 int tlb_start_valid = 0; 814 unsigned long start = start_addr; 815 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 816 int fullmm = (*tlbp)->fullmm; 817 818 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 819 unsigned long end; 820 821 start = max(vma->vm_start, start_addr); 822 if (start >= vma->vm_end) 823 continue; 824 end = min(vma->vm_end, end_addr); 825 if (end <= vma->vm_start) 826 continue; 827 828 if (vma->vm_flags & VM_ACCOUNT) 829 *nr_accounted += (end - start) >> PAGE_SHIFT; 830 831 while (start != end) { 832 if (!tlb_start_valid) { 833 tlb_start = start; 834 tlb_start_valid = 1; 835 } 836 837 if (unlikely(is_vm_hugetlb_page(vma))) { 838 unmap_hugepage_range(vma, start, end); 839 zap_work -= (end - start) / 840 (HPAGE_SIZE / PAGE_SIZE); 841 start = end; 842 } else 843 start = unmap_page_range(*tlbp, vma, 844 start, end, &zap_work, details); 845 846 if (zap_work > 0) { 847 BUG_ON(start != end); 848 break; 849 } 850 851 tlb_finish_mmu(*tlbp, tlb_start, start); 852 853 if (need_resched() || 854 (i_mmap_lock && need_lockbreak(i_mmap_lock))) { 855 if (i_mmap_lock) { 856 *tlbp = NULL; 857 goto out; 858 } 859 cond_resched(); 860 } 861 862 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 863 tlb_start_valid = 0; 864 zap_work = ZAP_BLOCK_SIZE; 865 } 866 } 867out: 868 return start; /* which is now the end (or restart) address */ 869} 870 871/** 872 * zap_page_range - remove user pages in a given range 873 * @vma: vm_area_struct holding the applicable pages 874 * @address: starting address of pages to zap 875 * @size: number of bytes to zap 876 * @details: details of nonlinear truncation or shared cache invalidation 877 */ 878unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 879 unsigned long size, struct zap_details *details) 880{ 881 struct mm_struct *mm = vma->vm_mm; 882 struct mmu_gather *tlb; 883 unsigned long end = address + size; 884 unsigned long nr_accounted = 0; 885 886 lru_add_drain(); 887 tlb = tlb_gather_mmu(mm, 0); 888 update_hiwater_rss(mm); 889 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 890 if (tlb) 891 tlb_finish_mmu(tlb, address, end); 892 return end; 893} 894 895/* 896 * Do a quick page-table lookup for a single page. 897 */ 898struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 899 unsigned int flags) 900{ 901 pgd_t *pgd; 902 pud_t *pud; 903 pmd_t *pmd; 904 pte_t *ptep, pte; 905 spinlock_t *ptl; 906 struct page *page; 907 struct mm_struct *mm = vma->vm_mm; 908 909 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 910 if (!IS_ERR(page)) { 911 BUG_ON(flags & FOLL_GET); 912 goto out; 913 } 914 915 page = NULL; 916 pgd = pgd_offset(mm, address); 917 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 918 goto no_page_table; 919 920 pud = pud_offset(pgd, address); 921 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 922 goto no_page_table; 923 924 pmd = pmd_offset(pud, address); 925 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 926 goto no_page_table; 927 928 if (pmd_huge(*pmd)) { 929 BUG_ON(flags & FOLL_GET); 930 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 931 goto out; 932 } 933 934 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 935 if (!ptep) 936 goto out; 937 938 pte = *ptep; 939 if (!pte_present(pte)) 940 goto unlock; 941 if ((flags & FOLL_WRITE) && !pte_write(pte)) 942 goto unlock; 943 page = vm_normal_page(vma, address, pte); 944 if (unlikely(!page)) 945 goto unlock; 946 947 if (flags & FOLL_GET) 948 get_page(page); 949 if (flags & FOLL_TOUCH) { 950 if ((flags & FOLL_WRITE) && 951 !pte_dirty(pte) && !PageDirty(page)) 952 set_page_dirty(page); 953 mark_page_accessed(page); 954 } 955unlock: 956 pte_unmap_unlock(ptep, ptl); 957out: 958 return page; 959 960no_page_table: 961 /* 962 * When core dumping an enormous anonymous area that nobody 963 * has touched so far, we don't want to allocate page tables. 964 */ 965 if (flags & FOLL_ANON) { 966 page = ZERO_PAGE(address); 967 if (flags & FOLL_GET) 968 get_page(page); 969 BUG_ON(flags & FOLL_WRITE); 970 } 971 return page; 972} 973 974int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 975 unsigned long start, int len, int write, int force, 976 struct page **pages, struct vm_area_struct **vmas) 977{ 978 int i; 979 unsigned int vm_flags; 980 981 /* 982 * Require read or write permissions. 983 * If 'force' is set, we only require the "MAY" flags. 984 */ 985 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 986 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 987 i = 0; 988 989 do { 990 struct vm_area_struct *vma; 991 unsigned int foll_flags; 992 993 vma = find_extend_vma(mm, start); 994 if (!vma && in_gate_area(tsk, start)) { 995 unsigned long pg = start & PAGE_MASK; 996 struct vm_area_struct *gate_vma = get_gate_vma(tsk); 997 pgd_t *pgd; 998 pud_t *pud; 999 pmd_t *pmd; 1000 pte_t *pte; 1001 if (write) /* user gate pages are read-only */ 1002 return i ? : -EFAULT; 1003 if (pg > TASK_SIZE) 1004 pgd = pgd_offset_k(pg); 1005 else 1006 pgd = pgd_offset_gate(mm, pg); 1007 BUG_ON(pgd_none(*pgd)); 1008 pud = pud_offset(pgd, pg); 1009 BUG_ON(pud_none(*pud)); 1010 pmd = pmd_offset(pud, pg); 1011 if (pmd_none(*pmd)) 1012 return i ? : -EFAULT; 1013 pte = pte_offset_map(pmd, pg); 1014 if (pte_none(*pte)) { 1015 pte_unmap(pte); 1016 return i ? : -EFAULT; 1017 } 1018 if (pages) { 1019 struct page *page = vm_normal_page(gate_vma, start, *pte); 1020 pages[i] = page; 1021 if (page) 1022 get_page(page); 1023 } 1024 pte_unmap(pte); 1025 if (vmas) 1026 vmas[i] = gate_vma; 1027 i++; 1028 start += PAGE_SIZE; 1029 len--; 1030 continue; 1031 } 1032 1033 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1034 || !(vm_flags & vma->vm_flags)) 1035 return i ? : -EFAULT; 1036 1037 if (is_vm_hugetlb_page(vma)) { 1038 i = follow_hugetlb_page(mm, vma, pages, vmas, 1039 &start, &len, i); 1040 continue; 1041 } 1042 1043 foll_flags = FOLL_TOUCH; 1044 if (pages) 1045 foll_flags |= FOLL_GET; 1046 if (!write && !(vma->vm_flags & VM_LOCKED) && 1047 (!vma->vm_ops || !vma->vm_ops->nopage)) 1048 foll_flags |= FOLL_ANON; 1049 1050 do { 1051 struct page *page; 1052 1053 if (write) 1054 foll_flags |= FOLL_WRITE; 1055 1056 cond_resched(); 1057 while (!(page = follow_page(vma, start, foll_flags))) { 1058 int ret; 1059 ret = __handle_mm_fault(mm, vma, start, 1060 foll_flags & FOLL_WRITE); 1061 /* 1062 * The VM_FAULT_WRITE bit tells us that do_wp_page has 1063 * broken COW when necessary, even if maybe_mkwrite 1064 * decided not to set pte_write. We can thus safely do 1065 * subsequent page lookups as if they were reads. 1066 */ 1067 if (ret & VM_FAULT_WRITE) 1068 foll_flags &= ~FOLL_WRITE; 1069 1070 switch (ret & ~VM_FAULT_WRITE) { 1071 case VM_FAULT_MINOR: 1072 tsk->min_flt++; 1073 break; 1074 case VM_FAULT_MAJOR: 1075 tsk->maj_flt++; 1076 break; 1077 case VM_FAULT_SIGBUS: 1078 return i ? i : -EFAULT; 1079 case VM_FAULT_OOM: 1080 return i ? i : -ENOMEM; 1081 default: 1082 BUG(); 1083 } 1084 } 1085 if (pages) { 1086 pages[i] = page; 1087 1088 flush_anon_page(page, start); 1089 flush_dcache_page(page); 1090 } 1091 if (vmas) 1092 vmas[i] = vma; 1093 i++; 1094 start += PAGE_SIZE; 1095 len--; 1096 } while (len && start < vma->vm_end); 1097 } while (len); 1098 return i; 1099} 1100EXPORT_SYMBOL(get_user_pages); 1101 1102static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1103 unsigned long addr, unsigned long end, pgprot_t prot) 1104{ 1105 pte_t *pte; 1106 spinlock_t *ptl; 1107 1108 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1109 if (!pte) 1110 return -ENOMEM; 1111 do { 1112 struct page *page = ZERO_PAGE(addr); 1113 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); 1114 page_cache_get(page); 1115 page_add_file_rmap(page); 1116 inc_mm_counter(mm, file_rss); 1117 BUG_ON(!pte_none(*pte)); 1118 set_pte_at(mm, addr, pte, zero_pte); 1119 } while (pte++, addr += PAGE_SIZE, addr != end); 1120 pte_unmap_unlock(pte - 1, ptl); 1121 return 0; 1122} 1123 1124static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, 1125 unsigned long addr, unsigned long end, pgprot_t prot) 1126{ 1127 pmd_t *pmd; 1128 unsigned long next; 1129 1130 pmd = pmd_alloc(mm, pud, addr); 1131 if (!pmd) 1132 return -ENOMEM; 1133 do { 1134 next = pmd_addr_end(addr, end); 1135 if (zeromap_pte_range(mm, pmd, addr, next, prot)) 1136 return -ENOMEM; 1137 } while (pmd++, addr = next, addr != end); 1138 return 0; 1139} 1140 1141static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1142 unsigned long addr, unsigned long end, pgprot_t prot) 1143{ 1144 pud_t *pud; 1145 unsigned long next; 1146 1147 pud = pud_alloc(mm, pgd, addr); 1148 if (!pud) 1149 return -ENOMEM; 1150 do { 1151 next = pud_addr_end(addr, end); 1152 if (zeromap_pmd_range(mm, pud, addr, next, prot)) 1153 return -ENOMEM; 1154 } while (pud++, addr = next, addr != end); 1155 return 0; 1156} 1157 1158int zeromap_page_range(struct vm_area_struct *vma, 1159 unsigned long addr, unsigned long size, pgprot_t prot) 1160{ 1161 pgd_t *pgd; 1162 unsigned long next; 1163 unsigned long end = addr + size; 1164 struct mm_struct *mm = vma->vm_mm; 1165 int err; 1166 1167 BUG_ON(addr >= end); 1168 pgd = pgd_offset(mm, addr); 1169 flush_cache_range(vma, addr, end); 1170 do { 1171 next = pgd_addr_end(addr, end); 1172 err = zeromap_pud_range(mm, pgd, addr, next, prot); 1173 if (err) 1174 break; 1175 } while (pgd++, addr = next, addr != end); 1176 return err; 1177} 1178 1179pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) 1180{ 1181 pgd_t * pgd = pgd_offset(mm, addr); 1182 pud_t * pud = pud_alloc(mm, pgd, addr); 1183 if (pud) { 1184 pmd_t * pmd = pmd_alloc(mm, pud, addr); 1185 if (pmd) 1186 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1187 } 1188 return NULL; 1189} 1190 1191/* 1192 * This is the old fallback for page remapping. 1193 * 1194 * For historical reasons, it only allows reserved pages. Only 1195 * old drivers should use this, and they needed to mark their 1196 * pages reserved for the old functions anyway. 1197 */ 1198static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot) 1199{ 1200 int retval; 1201 pte_t *pte; 1202 spinlock_t *ptl; 1203 1204 retval = -EINVAL; 1205 if (PageAnon(page)) 1206 goto out; 1207 retval = -ENOMEM; 1208 flush_dcache_page(page); 1209 pte = get_locked_pte(mm, addr, &ptl); 1210 if (!pte) 1211 goto out; 1212 retval = -EBUSY; 1213 if (!pte_none(*pte)) 1214 goto out_unlock; 1215 1216 /* Ok, finally just insert the thing.. */ 1217 get_page(page); 1218 inc_mm_counter(mm, file_rss); 1219 page_add_file_rmap(page); 1220 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1221 1222 retval = 0; 1223out_unlock: 1224 pte_unmap_unlock(pte, ptl); 1225out: 1226 return retval; 1227} 1228 1229/* 1230 * This allows drivers to insert individual pages they've allocated 1231 * into a user vma. 1232 * 1233 * The page has to be a nice clean _individual_ kernel allocation. 1234 * If you allocate a compound page, you need to have marked it as 1235 * such (__GFP_COMP), or manually just split the page up yourself 1236 * (see split_page()). 1237 * 1238 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1239 * took an arbitrary page protection parameter. This doesn't allow 1240 * that. Your vma protection will have to be set up correctly, which 1241 * means that if you want a shared writable mapping, you'd better 1242 * ask for a shared writable mapping! 1243 * 1244 * The page does not need to be reserved. 1245 */ 1246int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) 1247{ 1248 if (addr < vma->vm_start || addr >= vma->vm_end) 1249 return -EFAULT; 1250 if (!page_count(page)) 1251 return -EINVAL; 1252 vma->vm_flags |= VM_INSERTPAGE; 1253 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); 1254} 1255EXPORT_SYMBOL(vm_insert_page); 1256 1257/* 1258 * maps a range of physical memory into the requested pages. the old 1259 * mappings are removed. any references to nonexistent pages results 1260 * in null mappings (currently treated as "copy-on-access") 1261 */ 1262static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1263 unsigned long addr, unsigned long end, 1264 unsigned long pfn, pgprot_t prot) 1265{ 1266 pte_t *pte; 1267 spinlock_t *ptl; 1268 1269 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1270 if (!pte) 1271 return -ENOMEM; 1272 do { 1273 BUG_ON(!pte_none(*pte)); 1274 set_pte_at(mm, addr, pte, pfn_pte(pfn, prot)); 1275 pfn++; 1276 } while (pte++, addr += PAGE_SIZE, addr != end); 1277 pte_unmap_unlock(pte - 1, ptl); 1278 return 0; 1279} 1280 1281static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 1282 unsigned long addr, unsigned long end, 1283 unsigned long pfn, pgprot_t prot) 1284{ 1285 pmd_t *pmd; 1286 unsigned long next; 1287 1288 pfn -= addr >> PAGE_SHIFT; 1289 pmd = pmd_alloc(mm, pud, addr); 1290 if (!pmd) 1291 return -ENOMEM; 1292 do { 1293 next = pmd_addr_end(addr, end); 1294 if (remap_pte_range(mm, pmd, addr, next, 1295 pfn + (addr >> PAGE_SHIFT), prot)) 1296 return -ENOMEM; 1297 } while (pmd++, addr = next, addr != end); 1298 return 0; 1299} 1300 1301static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1302 unsigned long addr, unsigned long end, 1303 unsigned long pfn, pgprot_t prot) 1304{ 1305 pud_t *pud; 1306 unsigned long next; 1307 1308 pfn -= addr >> PAGE_SHIFT; 1309 pud = pud_alloc(mm, pgd, addr); 1310 if (!pud) 1311 return -ENOMEM; 1312 do { 1313 next = pud_addr_end(addr, end); 1314 if (remap_pmd_range(mm, pud, addr, next, 1315 pfn + (addr >> PAGE_SHIFT), prot)) 1316 return -ENOMEM; 1317 } while (pud++, addr = next, addr != end); 1318 return 0; 1319} 1320 1321/* Note: this is only safe if the mm semaphore is held when called. */ 1322int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1323 unsigned long pfn, unsigned long size, pgprot_t prot) 1324{ 1325 pgd_t *pgd; 1326 unsigned long next; 1327 unsigned long end = addr + PAGE_ALIGN(size); 1328 struct mm_struct *mm = vma->vm_mm; 1329 int err; 1330 1331 /* 1332 * Physically remapped pages are special. Tell the 1333 * rest of the world about it: 1334 * VM_IO tells people not to look at these pages 1335 * (accesses can have side effects). 1336 * VM_RESERVED is specified all over the place, because 1337 * in 2.4 it kept swapout's vma scan off this vma; but 1338 * in 2.6 the LRU scan won't even find its pages, so this 1339 * flag means no more than count its pages in reserved_vm, 1340 * and omit it from core dump, even when VM_IO turned off. 1341 * VM_PFNMAP tells the core MM that the base pages are just 1342 * raw PFN mappings, and do not have a "struct page" associated 1343 * with them. 1344 * 1345 * There's a horrible special case to handle copy-on-write 1346 * behaviour that some programs depend on. We mark the "original" 1347 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1348 */ 1349 if (is_cow_mapping(vma->vm_flags)) { 1350 if (addr != vma->vm_start || end != vma->vm_end) 1351 return -EINVAL; 1352 vma->vm_pgoff = pfn; 1353 } 1354 1355 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1356 1357 BUG_ON(addr >= end); 1358 pfn -= addr >> PAGE_SHIFT; 1359 pgd = pgd_offset(mm, addr); 1360 flush_cache_range(vma, addr, end); 1361 do { 1362 next = pgd_addr_end(addr, end); 1363 err = remap_pud_range(mm, pgd, addr, next, 1364 pfn + (addr >> PAGE_SHIFT), prot); 1365 if (err) 1366 break; 1367 } while (pgd++, addr = next, addr != end); 1368 return err; 1369} 1370EXPORT_SYMBOL(remap_pfn_range); 1371 1372/* 1373 * handle_pte_fault chooses page fault handler according to an entry 1374 * which was read non-atomically. Before making any commitment, on 1375 * those architectures or configurations (e.g. i386 with PAE) which 1376 * might give a mix of unmatched parts, do_swap_page and do_file_page 1377 * must check under lock before unmapping the pte and proceeding 1378 * (but do_wp_page is only called after already making such a check; 1379 * and do_anonymous_page and do_no_page can safely check later on). 1380 */ 1381static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 1382 pte_t *page_table, pte_t orig_pte) 1383{ 1384 int same = 1; 1385#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1386 if (sizeof(pte_t) > sizeof(unsigned long)) { 1387 spinlock_t *ptl = pte_lockptr(mm, pmd); 1388 spin_lock(ptl); 1389 same = pte_same(*page_table, orig_pte); 1390 spin_unlock(ptl); 1391 } 1392#endif 1393 pte_unmap(page_table); 1394 return same; 1395} 1396 1397/* 1398 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1399 * servicing faults for write access. In the normal case, do always want 1400 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1401 * that do not have writing enabled, when used by access_process_vm. 1402 */ 1403static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1404{ 1405 if (likely(vma->vm_flags & VM_WRITE)) 1406 pte = pte_mkwrite(pte); 1407 return pte; 1408} 1409 1410static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) 1411{ 1412 /* 1413 * If the source page was a PFN mapping, we don't have 1414 * a "struct page" for it. We do a best-effort copy by 1415 * just copying from the original user address. If that 1416 * fails, we just zero-fill it. Live with it. 1417 */ 1418 if (unlikely(!src)) { 1419 void *kaddr = kmap_atomic(dst, KM_USER0); 1420 void __user *uaddr = (void __user *)(va & PAGE_MASK); 1421 1422 /* 1423 * This really shouldn't fail, because the page is there 1424 * in the page tables. But it might just be unreadable, 1425 * in which case we just give up and fill the result with 1426 * zeroes. 1427 */ 1428 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 1429 memset(kaddr, 0, PAGE_SIZE); 1430 kunmap_atomic(kaddr, KM_USER0); 1431 return; 1432 1433 } 1434 copy_user_highpage(dst, src, va); 1435} 1436 1437/* 1438 * This routine handles present pages, when users try to write 1439 * to a shared page. It is done by copying the page to a new address 1440 * and decrementing the shared-page counter for the old page. 1441 * 1442 * Note that this routine assumes that the protection checks have been 1443 * done by the caller (the low-level page fault routine in most cases). 1444 * Thus we can safely just mark it writable once we've done any necessary 1445 * COW. 1446 * 1447 * We also mark the page dirty at this point even though the page will 1448 * change only once the write actually happens. This avoids a few races, 1449 * and potentially makes it more efficient. 1450 * 1451 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1452 * but allow concurrent faults), with pte both mapped and locked. 1453 * We return with mmap_sem still held, but pte unmapped and unlocked. 1454 */ 1455static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1456 unsigned long address, pte_t *page_table, pmd_t *pmd, 1457 spinlock_t *ptl, pte_t orig_pte) 1458{ 1459 struct page *old_page, *new_page; 1460 pte_t entry; 1461 int reuse, ret = VM_FAULT_MINOR; 1462 1463 old_page = vm_normal_page(vma, address, orig_pte); 1464 if (!old_page) 1465 goto gotten; 1466 1467 if (unlikely((vma->vm_flags & (VM_SHARED|VM_WRITE)) == 1468 (VM_SHARED|VM_WRITE))) { 1469 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 1470 /* 1471 * Notify the address space that the page is about to 1472 * become writable so that it can prohibit this or wait 1473 * for the page to get into an appropriate state. 1474 * 1475 * We do this without the lock held, so that it can 1476 * sleep if it needs to. 1477 */ 1478 page_cache_get(old_page); 1479 pte_unmap_unlock(page_table, ptl); 1480 1481 if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) 1482 goto unwritable_page; 1483 1484 page_cache_release(old_page); 1485 1486 /* 1487 * Since we dropped the lock we need to revalidate 1488 * the PTE as someone else may have changed it. If 1489 * they did, we just return, as we can count on the 1490 * MMU to tell us if they didn't also make it writable. 1491 */ 1492 page_table = pte_offset_map_lock(mm, pmd, address, 1493 &ptl); 1494 if (!pte_same(*page_table, orig_pte)) 1495 goto unlock; 1496 } 1497 1498 reuse = 1; 1499 } else if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { 1500 reuse = can_share_swap_page(old_page); 1501 unlock_page(old_page); 1502 } else { 1503 reuse = 0; 1504 } 1505 1506 if (reuse) { 1507 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1508 entry = pte_mkyoung(orig_pte); 1509 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1510 ptep_set_access_flags(vma, address, page_table, entry, 1); 1511 update_mmu_cache(vma, address, entry); 1512 lazy_mmu_prot_update(entry); 1513 ret |= VM_FAULT_WRITE; 1514 goto unlock; 1515 } 1516 1517 /* 1518 * Ok, we need to copy. Oh, well.. 1519 */ 1520 page_cache_get(old_page); 1521gotten: 1522 pte_unmap_unlock(page_table, ptl); 1523 1524 if (unlikely(anon_vma_prepare(vma))) 1525 goto oom; 1526 if (old_page == ZERO_PAGE(address)) { 1527 new_page = alloc_zeroed_user_highpage(vma, address); 1528 if (!new_page) 1529 goto oom; 1530 } else { 1531 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1532 if (!new_page) 1533 goto oom; 1534 cow_user_page(new_page, old_page, address); 1535 } 1536 1537 /* 1538 * Re-check the pte - we dropped the lock 1539 */ 1540 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1541 if (likely(pte_same(*page_table, orig_pte))) { 1542 if (old_page) { 1543 page_remove_rmap(old_page); 1544 if (!PageAnon(old_page)) { 1545 dec_mm_counter(mm, file_rss); 1546 inc_mm_counter(mm, anon_rss); 1547 } 1548 } else 1549 inc_mm_counter(mm, anon_rss); 1550 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1551 entry = mk_pte(new_page, vma->vm_page_prot); 1552 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1553 lazy_mmu_prot_update(entry); 1554 ptep_establish(vma, address, page_table, entry); 1555 update_mmu_cache(vma, address, entry); 1556 lru_cache_add_active(new_page); 1557 page_add_new_anon_rmap(new_page, vma, address); 1558 1559 /* Free the old page.. */ 1560 new_page = old_page; 1561 ret |= VM_FAULT_WRITE; 1562 } 1563 if (new_page) 1564 page_cache_release(new_page); 1565 if (old_page) 1566 page_cache_release(old_page); 1567unlock: 1568 pte_unmap_unlock(page_table, ptl); 1569 return ret; 1570oom: 1571 if (old_page) 1572 page_cache_release(old_page); 1573 return VM_FAULT_OOM; 1574 1575unwritable_page: 1576 page_cache_release(old_page); 1577 return VM_FAULT_SIGBUS; 1578} 1579 1580/* 1581 * Helper functions for unmap_mapping_range(). 1582 * 1583 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 1584 * 1585 * We have to restart searching the prio_tree whenever we drop the lock, 1586 * since the iterator is only valid while the lock is held, and anyway 1587 * a later vma might be split and reinserted earlier while lock dropped. 1588 * 1589 * The list of nonlinear vmas could be handled more efficiently, using 1590 * a placeholder, but handle it in the same way until a need is shown. 1591 * It is important to search the prio_tree before nonlinear list: a vma 1592 * may become nonlinear and be shifted from prio_tree to nonlinear list 1593 * while the lock is dropped; but never shifted from list to prio_tree. 1594 * 1595 * In order to make forward progress despite restarting the search, 1596 * vm_truncate_count is used to mark a vma as now dealt with, so we can 1597 * quickly skip it next time around. Since the prio_tree search only 1598 * shows us those vmas affected by unmapping the range in question, we 1599 * can't efficiently keep all vmas in step with mapping->truncate_count: 1600 * so instead reset them all whenever it wraps back to 0 (then go to 1). 1601 * mapping->truncate_count and vma->vm_truncate_count are protected by 1602 * i_mmap_lock. 1603 * 1604 * In order to make forward progress despite repeatedly restarting some 1605 * large vma, note the restart_addr from unmap_vmas when it breaks out: 1606 * and restart from that address when we reach that vma again. It might 1607 * have been split or merged, shrunk or extended, but never shifted: so 1608 * restart_addr remains valid so long as it remains in the vma's range. 1609 * unmap_mapping_range forces truncate_count to leap over page-aligned 1610 * values so we can save vma's restart_addr in its truncate_count field. 1611 */ 1612#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 1613 1614static void reset_vma_truncate_counts(struct address_space *mapping) 1615{ 1616 struct vm_area_struct *vma; 1617 struct prio_tree_iter iter; 1618 1619 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 1620 vma->vm_truncate_count = 0; 1621 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1622 vma->vm_truncate_count = 0; 1623} 1624 1625static int unmap_mapping_range_vma(struct vm_area_struct *vma, 1626 unsigned long start_addr, unsigned long end_addr, 1627 struct zap_details *details) 1628{ 1629 unsigned long restart_addr; 1630 int need_break; 1631 1632again: 1633 restart_addr = vma->vm_truncate_count; 1634 if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 1635 start_addr = restart_addr; 1636 if (start_addr >= end_addr) { 1637 /* Top of vma has been split off since last time */ 1638 vma->vm_truncate_count = details->truncate_count; 1639 return 0; 1640 } 1641 } 1642 1643 restart_addr = zap_page_range(vma, start_addr, 1644 end_addr - start_addr, details); 1645 need_break = need_resched() || 1646 need_lockbreak(details->i_mmap_lock); 1647 1648 if (restart_addr >= end_addr) { 1649 /* We have now completed this vma: mark it so */ 1650 vma->vm_truncate_count = details->truncate_count; 1651 if (!need_break) 1652 return 0; 1653 } else { 1654 /* Note restart_addr in vma's truncate_count field */ 1655 vma->vm_truncate_count = restart_addr; 1656 if (!need_break) 1657 goto again; 1658 } 1659 1660 spin_unlock(details->i_mmap_lock); 1661 cond_resched(); 1662 spin_lock(details->i_mmap_lock); 1663 return -EINTR; 1664} 1665 1666static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 1667 struct zap_details *details) 1668{ 1669 struct vm_area_struct *vma; 1670 struct prio_tree_iter iter; 1671 pgoff_t vba, vea, zba, zea; 1672 1673restart: 1674 vma_prio_tree_foreach(vma, &iter, root, 1675 details->first_index, details->last_index) { 1676 /* Skip quickly over those we have already dealt with */ 1677 if (vma->vm_truncate_count == details->truncate_count) 1678 continue; 1679 1680 vba = vma->vm_pgoff; 1681 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 1682 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 1683 zba = details->first_index; 1684 if (zba < vba) 1685 zba = vba; 1686 zea = details->last_index; 1687 if (zea > vea) 1688 zea = vea; 1689 1690 if (unmap_mapping_range_vma(vma, 1691 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 1692 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 1693 details) < 0) 1694 goto restart; 1695 } 1696} 1697 1698static inline void unmap_mapping_range_list(struct list_head *head, 1699 struct zap_details *details) 1700{ 1701 struct vm_area_struct *vma; 1702 1703 /* 1704 * In nonlinear VMAs there is no correspondence between virtual address 1705 * offset and file offset. So we must perform an exhaustive search 1706 * across *all* the pages in each nonlinear VMA, not just the pages 1707 * whose virtual address lies outside the file truncation point. 1708 */ 1709restart: 1710 list_for_each_entry(vma, head, shared.vm_set.list) { 1711 /* Skip quickly over those we have already dealt with */ 1712 if (vma->vm_truncate_count == details->truncate_count) 1713 continue; 1714 details->nonlinear_vma = vma; 1715 if (unmap_mapping_range_vma(vma, vma->vm_start, 1716 vma->vm_end, details) < 0) 1717 goto restart; 1718 } 1719} 1720 1721/** 1722 * unmap_mapping_range - unmap the portion of all mmaps 1723 * in the specified address_space corresponding to the specified 1724 * page range in the underlying file. 1725 * @mapping: the address space containing mmaps to be unmapped. 1726 * @holebegin: byte in first page to unmap, relative to the start of 1727 * the underlying file. This will be rounded down to a PAGE_SIZE 1728 * boundary. Note that this is different from vmtruncate(), which 1729 * must keep the partial page. In contrast, we must get rid of 1730 * partial pages. 1731 * @holelen: size of prospective hole in bytes. This will be rounded 1732 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 1733 * end of the file. 1734 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 1735 * but 0 when invalidating pagecache, don't throw away private data. 1736 */ 1737void unmap_mapping_range(struct address_space *mapping, 1738 loff_t const holebegin, loff_t const holelen, int even_cows) 1739{ 1740 struct zap_details details; 1741 pgoff_t hba = holebegin >> PAGE_SHIFT; 1742 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1743 1744 /* Check for overflow. */ 1745 if (sizeof(holelen) > sizeof(hlen)) { 1746 long long holeend = 1747 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1748 if (holeend & ~(long long)ULONG_MAX) 1749 hlen = ULONG_MAX - hba + 1; 1750 } 1751 1752 details.check_mapping = even_cows? NULL: mapping; 1753 details.nonlinear_vma = NULL; 1754 details.first_index = hba; 1755 details.last_index = hba + hlen - 1; 1756 if (details.last_index < details.first_index) 1757 details.last_index = ULONG_MAX; 1758 details.i_mmap_lock = &mapping->i_mmap_lock; 1759 1760 spin_lock(&mapping->i_mmap_lock); 1761 1762 /* serialize i_size write against truncate_count write */ 1763 smp_wmb(); 1764 /* Protect against page faults, and endless unmapping loops */ 1765 mapping->truncate_count++; 1766 /* 1767 * For archs where spin_lock has inclusive semantics like ia64 1768 * this smp_mb() will prevent to read pagetable contents 1769 * before the truncate_count increment is visible to 1770 * other cpus. 1771 */ 1772 smp_mb(); 1773 if (unlikely(is_restart_addr(mapping->truncate_count))) { 1774 if (mapping->truncate_count == 0) 1775 reset_vma_truncate_counts(mapping); 1776 mapping->truncate_count++; 1777 } 1778 details.truncate_count = mapping->truncate_count; 1779 1780 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 1781 unmap_mapping_range_tree(&mapping->i_mmap, &details); 1782 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 1783 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 1784 spin_unlock(&mapping->i_mmap_lock); 1785} 1786EXPORT_SYMBOL(unmap_mapping_range); 1787 1788/* 1789 * Handle all mappings that got truncated by a "truncate()" 1790 * system call. 1791 * 1792 * NOTE! We have to be ready to update the memory sharing 1793 * between the file and the memory map for a potential last 1794 * incomplete page. Ugly, but necessary. 1795 */ 1796int vmtruncate(struct inode * inode, loff_t offset) 1797{ 1798 struct address_space *mapping = inode->i_mapping; 1799 unsigned long limit; 1800 1801 if (inode->i_size < offset) 1802 goto do_expand; 1803 /* 1804 * truncation of in-use swapfiles is disallowed - it would cause 1805 * subsequent swapout to scribble on the now-freed blocks. 1806 */ 1807 if (IS_SWAPFILE(inode)) 1808 goto out_busy; 1809 i_size_write(inode, offset); 1810 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 1811 truncate_inode_pages(mapping, offset); 1812 goto out_truncate; 1813 1814do_expand: 1815 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1816 if (limit != RLIM_INFINITY && offset > limit) 1817 goto out_sig; 1818 if (offset > inode->i_sb->s_maxbytes) 1819 goto out_big; 1820 i_size_write(inode, offset); 1821 1822out_truncate: 1823 if (inode->i_op && inode->i_op->truncate) 1824 inode->i_op->truncate(inode); 1825 return 0; 1826out_sig: 1827 send_sig(SIGXFSZ, current, 0); 1828out_big: 1829 return -EFBIG; 1830out_busy: 1831 return -ETXTBSY; 1832} 1833EXPORT_SYMBOL(vmtruncate); 1834 1835int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) 1836{ 1837 struct address_space *mapping = inode->i_mapping; 1838 1839 /* 1840 * If the underlying filesystem is not going to provide 1841 * a way to truncate a range of blocks (punch a hole) - 1842 * we should return failure right now. 1843 */ 1844 if (!inode->i_op || !inode->i_op->truncate_range) 1845 return -ENOSYS; 1846 1847 mutex_lock(&inode->i_mutex); 1848 down_write(&inode->i_alloc_sem); 1849 unmap_mapping_range(mapping, offset, (end - offset), 1); 1850 truncate_inode_pages_range(mapping, offset, end); 1851 inode->i_op->truncate_range(inode, offset, end); 1852 up_write(&inode->i_alloc_sem); 1853 mutex_unlock(&inode->i_mutex); 1854 1855 return 0; 1856} 1857EXPORT_UNUSED_SYMBOL(vmtruncate_range); /* June 2006 */ 1858 1859/* 1860 * Primitive swap readahead code. We simply read an aligned block of 1861 * (1 << page_cluster) entries in the swap area. This method is chosen 1862 * because it doesn't cost us any seek time. We also make sure to queue 1863 * the 'original' request together with the readahead ones... 1864 * 1865 * This has been extended to use the NUMA policies from the mm triggering 1866 * the readahead. 1867 * 1868 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 1869 */ 1870void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) 1871{ 1872#ifdef CONFIG_NUMA 1873 struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; 1874#endif 1875 int i, num; 1876 struct page *new_page; 1877 unsigned long offset; 1878 1879 /* 1880 * Get the number of handles we should do readahead io to. 1881 */ 1882 num = valid_swaphandles(entry, &offset); 1883 for (i = 0; i < num; offset++, i++) { 1884 /* Ok, do the async read-ahead now */ 1885 new_page = read_swap_cache_async(swp_entry(swp_type(entry), 1886 offset), vma, addr); 1887 if (!new_page) 1888 break; 1889 page_cache_release(new_page); 1890#ifdef CONFIG_NUMA 1891 /* 1892 * Find the next applicable VMA for the NUMA policy. 1893 */ 1894 addr += PAGE_SIZE; 1895 if (addr == 0) 1896 vma = NULL; 1897 if (vma) { 1898 if (addr >= vma->vm_end) { 1899 vma = next_vma; 1900 next_vma = vma ? vma->vm_next : NULL; 1901 } 1902 if (vma && addr < vma->vm_start) 1903 vma = NULL; 1904 } else { 1905 if (next_vma && addr >= next_vma->vm_start) { 1906 vma = next_vma; 1907 next_vma = vma->vm_next; 1908 } 1909 } 1910#endif 1911 } 1912 lru_add_drain(); /* Push any new pages onto the LRU now */ 1913} 1914 1915/* 1916 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1917 * but allow concurrent faults), and pte mapped but not yet locked. 1918 * We return with mmap_sem still held, but pte unmapped and unlocked. 1919 */ 1920static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 1921 unsigned long address, pte_t *page_table, pmd_t *pmd, 1922 int write_access, pte_t orig_pte) 1923{ 1924 spinlock_t *ptl; 1925 struct page *page; 1926 swp_entry_t entry; 1927 pte_t pte; 1928 int ret = VM_FAULT_MINOR; 1929 1930 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 1931 goto out; 1932 1933 entry = pte_to_swp_entry(orig_pte); 1934 if (is_migration_entry(entry)) { 1935 migration_entry_wait(mm, pmd, address); 1936 goto out; 1937 } 1938 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 1939 page = lookup_swap_cache(entry); 1940 if (!page) { 1941 swapin_readahead(entry, address, vma); 1942 page = read_swap_cache_async(entry, vma, address); 1943 if (!page) { 1944 /* 1945 * Back out if somebody else faulted in this pte 1946 * while we released the pte lock. 1947 */ 1948 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1949 if (likely(pte_same(*page_table, orig_pte))) 1950 ret = VM_FAULT_OOM; 1951 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 1952 goto unlock; 1953 } 1954 1955 /* Had to read the page from swap area: Major fault */ 1956 ret = VM_FAULT_MAJOR; 1957 count_vm_event(PGMAJFAULT); 1958 grab_swap_token(); 1959 } 1960 1961 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 1962 mark_page_accessed(page); 1963 lock_page(page); 1964 1965 /* 1966 * Back out if somebody else already faulted in this pte. 1967 */ 1968 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1969 if (unlikely(!pte_same(*page_table, orig_pte))) 1970 goto out_nomap; 1971 1972 if (unlikely(!PageUptodate(page))) { 1973 ret = VM_FAULT_SIGBUS; 1974 goto out_nomap; 1975 } 1976 1977 /* The page isn't present yet, go ahead with the fault. */ 1978 1979 inc_mm_counter(mm, anon_rss); 1980 pte = mk_pte(page, vma->vm_page_prot); 1981 if (write_access && can_share_swap_page(page)) { 1982 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 1983 write_access = 0; 1984 } 1985 1986 flush_icache_page(vma, page); 1987 set_pte_at(mm, address, page_table, pte); 1988 page_add_anon_rmap(page, vma, address); 1989 1990 swap_free(entry); 1991 if (vm_swap_full()) 1992 remove_exclusive_swap_page(page); 1993 unlock_page(page); 1994 1995 if (write_access) { 1996 if (do_wp_page(mm, vma, address, 1997 page_table, pmd, ptl, pte) == VM_FAULT_OOM) 1998 ret = VM_FAULT_OOM; 1999 goto out; 2000 } 2001 2002 /* No need to invalidate - it was non-present before */ 2003 update_mmu_cache(vma, address, pte); 2004 lazy_mmu_prot_update(pte); 2005unlock: 2006 pte_unmap_unlock(page_table, ptl); 2007out: 2008 return ret; 2009out_nomap: 2010 pte_unmap_unlock(page_table, ptl); 2011 unlock_page(page); 2012 page_cache_release(page); 2013 return ret; 2014} 2015 2016/* 2017 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2018 * but allow concurrent faults), and pte mapped but not yet locked. 2019 * We return with mmap_sem still held, but pte unmapped and unlocked. 2020 */ 2021static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 2022 unsigned long address, pte_t *page_table, pmd_t *pmd, 2023 int write_access) 2024{ 2025 struct page *page; 2026 spinlock_t *ptl; 2027 pte_t entry; 2028 2029 if (write_access) { 2030 /* Allocate our own private page. */ 2031 pte_unmap(page_table); 2032 2033 if (unlikely(anon_vma_prepare(vma))) 2034 goto oom; 2035 page = alloc_zeroed_user_highpage(vma, address); 2036 if (!page) 2037 goto oom; 2038 2039 entry = mk_pte(page, vma->vm_page_prot); 2040 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2041 2042 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2043 if (!pte_none(*page_table)) 2044 goto release; 2045 inc_mm_counter(mm, anon_rss); 2046 lru_cache_add_active(page); 2047 page_add_new_anon_rmap(page, vma, address); 2048 } else { 2049 /* Map the ZERO_PAGE - vm_page_prot is readonly */ 2050 page = ZERO_PAGE(address); 2051 page_cache_get(page); 2052 entry = mk_pte(page, vma->vm_page_prot); 2053 2054 ptl = pte_lockptr(mm, pmd); 2055 spin_lock(ptl); 2056 if (!pte_none(*page_table)) 2057 goto release; 2058 inc_mm_counter(mm, file_rss); 2059 page_add_file_rmap(page); 2060 } 2061 2062 set_pte_at(mm, address, page_table, entry); 2063 2064 /* No need to invalidate - it was non-present before */ 2065 update_mmu_cache(vma, address, entry); 2066 lazy_mmu_prot_update(entry); 2067unlock: 2068 pte_unmap_unlock(page_table, ptl); 2069 return VM_FAULT_MINOR; 2070release: 2071 page_cache_release(page); 2072 goto unlock; 2073oom: 2074 return VM_FAULT_OOM; 2075} 2076 2077/* 2078 * do_no_page() tries to create a new page mapping. It aggressively 2079 * tries to share with existing pages, but makes a separate copy if 2080 * the "write_access" parameter is true in order to avoid the next 2081 * page fault. 2082 * 2083 * As this is called only for pages that do not currently exist, we 2084 * do not need to flush old virtual caches or the TLB. 2085 * 2086 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2087 * but allow concurrent faults), and pte mapped but not yet locked. 2088 * We return with mmap_sem still held, but pte unmapped and unlocked. 2089 */ 2090static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2091 unsigned long address, pte_t *page_table, pmd_t *pmd, 2092 int write_access) 2093{ 2094 spinlock_t *ptl; 2095 struct page *new_page; 2096 struct address_space *mapping = NULL; 2097 pte_t entry; 2098 unsigned int sequence = 0; 2099 int ret = VM_FAULT_MINOR; 2100 int anon = 0; 2101 2102 pte_unmap(page_table); 2103 BUG_ON(vma->vm_flags & VM_PFNMAP); 2104 2105 if (vma->vm_file) { 2106 mapping = vma->vm_file->f_mapping; 2107 sequence = mapping->truncate_count; 2108 smp_rmb(); /* serializes i_size against truncate_count */ 2109 } 2110retry: 2111 new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); 2112 /* 2113 * No smp_rmb is needed here as long as there's a full 2114 * spin_lock/unlock sequence inside the ->nopage callback 2115 * (for the pagecache lookup) that acts as an implicit 2116 * smp_mb() and prevents the i_size read to happen 2117 * after the next truncate_count read. 2118 */ 2119 2120 /* no page was available -- either SIGBUS or OOM */ 2121 if (new_page == NOPAGE_SIGBUS) 2122 return VM_FAULT_SIGBUS; 2123 if (new_page == NOPAGE_OOM) 2124 return VM_FAULT_OOM; 2125 2126 /* 2127 * Should we do an early C-O-W break? 2128 */ 2129 if (write_access) { 2130 if (!(vma->vm_flags & VM_SHARED)) { 2131 struct page *page; 2132 2133 if (unlikely(anon_vma_prepare(vma))) 2134 goto oom; 2135 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2136 if (!page) 2137 goto oom; 2138 copy_user_highpage(page, new_page, address); 2139 page_cache_release(new_page); 2140 new_page = page; 2141 anon = 1; 2142 2143 } else { 2144 /* if the page will be shareable, see if the backing 2145 * address space wants to know that the page is about 2146 * to become writable */ 2147 if (vma->vm_ops->page_mkwrite && 2148 vma->vm_ops->page_mkwrite(vma, new_page) < 0 2149 ) { 2150 page_cache_release(new_page); 2151 return VM_FAULT_SIGBUS; 2152 } 2153 } 2154 } 2155 2156 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2157 /* 2158 * For a file-backed vma, someone could have truncated or otherwise 2159 * invalidated this page. If unmap_mapping_range got called, 2160 * retry getting the page. 2161 */ 2162 if (mapping && unlikely(sequence != mapping->truncate_count)) { 2163 pte_unmap_unlock(page_table, ptl); 2164 page_cache_release(new_page); 2165 cond_resched(); 2166 sequence = mapping->truncate_count; 2167 smp_rmb(); 2168 goto retry; 2169 } 2170 2171 /* 2172 * This silly early PAGE_DIRTY setting removes a race 2173 * due to the bad i386 page protection. But it's valid 2174 * for other architectures too. 2175 * 2176 * Note that if write_access is true, we either now have 2177 * an exclusive copy of the page, or this is a shared mapping, 2178 * so we can make it writable and dirty to avoid having to 2179 * handle that later. 2180 */ 2181 /* Only go through if we didn't race with anybody else... */ 2182 if (pte_none(*page_table)) { 2183 flush_icache_page(vma, new_page); 2184 entry = mk_pte(new_page, vma->vm_page_prot); 2185 if (write_access) 2186 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2187 set_pte_at(mm, address, page_table, entry); 2188 if (anon) { 2189 inc_mm_counter(mm, anon_rss); 2190 lru_cache_add_active(new_page); 2191 page_add_new_anon_rmap(new_page, vma, address); 2192 } else { 2193 inc_mm_counter(mm, file_rss); 2194 page_add_file_rmap(new_page); 2195 } 2196 } else { 2197 /* One of our sibling threads was faster, back out. */ 2198 page_cache_release(new_page); 2199 goto unlock; 2200 } 2201 2202 /* no need to invalidate: a not-present page shouldn't be cached */ 2203 update_mmu_cache(vma, address, entry); 2204 lazy_mmu_prot_update(entry); 2205unlock: 2206 pte_unmap_unlock(page_table, ptl); 2207 return ret; 2208oom: 2209 page_cache_release(new_page); 2210 return VM_FAULT_OOM; 2211} 2212 2213/* 2214 * Fault of a previously existing named mapping. Repopulate the pte 2215 * from the encoded file_pte if possible. This enables swappable 2216 * nonlinear vmas. 2217 * 2218 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2219 * but allow concurrent faults), and pte mapped but not yet locked. 2220 * We return with mmap_sem still held, but pte unmapped and unlocked. 2221 */ 2222static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma, 2223 unsigned long address, pte_t *page_table, pmd_t *pmd, 2224 int write_access, pte_t orig_pte) 2225{ 2226 pgoff_t pgoff; 2227 int err; 2228 2229 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2230 return VM_FAULT_MINOR; 2231 2232 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { 2233 /* 2234 * Page table corrupted: show pte and kill process. 2235 */ 2236 print_bad_pte(vma, orig_pte, address); 2237 return VM_FAULT_OOM; 2238 } 2239 /* We can then assume vm->vm_ops && vma->vm_ops->populate */ 2240 2241 pgoff = pte_to_pgoff(orig_pte); 2242 err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, 2243 vma->vm_page_prot, pgoff, 0); 2244 if (err == -ENOMEM) 2245 return VM_FAULT_OOM; 2246 if (err) 2247 return VM_FAULT_SIGBUS; 2248 return VM_FAULT_MAJOR; 2249} 2250 2251/* 2252 * These routines also need to handle stuff like marking pages dirty 2253 * and/or accessed for architectures that don't do it in hardware (most 2254 * RISC architectures). The early dirtying is also good on the i386. 2255 * 2256 * There is also a hook called "update_mmu_cache()" that architectures 2257 * with external mmu caches can use to update those (ie the Sparc or 2258 * PowerPC hashed page tables that act as extended TLBs). 2259 * 2260 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2261 * but allow concurrent faults), and pte mapped but not yet locked. 2262 * We return with mmap_sem still held, but pte unmapped and unlocked. 2263 */ 2264static inline int handle_pte_fault(struct mm_struct *mm, 2265 struct vm_area_struct *vma, unsigned long address, 2266 pte_t *pte, pmd_t *pmd, int write_access) 2267{ 2268 pte_t entry; 2269 pte_t old_entry; 2270 spinlock_t *ptl; 2271 2272 old_entry = entry = *pte; 2273 if (!pte_present(entry)) { 2274 if (pte_none(entry)) { 2275 if (!vma->vm_ops || !vma->vm_ops->nopage) 2276 return do_anonymous_page(mm, vma, address, 2277 pte, pmd, write_access); 2278 return do_no_page(mm, vma, address, 2279 pte, pmd, write_access); 2280 } 2281 if (pte_file(entry)) 2282 return do_file_page(mm, vma, address, 2283 pte, pmd, write_access, entry); 2284 return do_swap_page(mm, vma, address, 2285 pte, pmd, write_access, entry); 2286 } 2287 2288 ptl = pte_lockptr(mm, pmd); 2289 spin_lock(ptl); 2290 if (unlikely(!pte_same(*pte, entry))) 2291 goto unlock; 2292 if (write_access) { 2293 if (!pte_write(entry)) 2294 return do_wp_page(mm, vma, address, 2295 pte, pmd, ptl, entry); 2296 entry = pte_mkdirty(entry); 2297 } 2298 entry = pte_mkyoung(entry); 2299 if (!pte_same(old_entry, entry)) { 2300 ptep_set_access_flags(vma, address, pte, entry, write_access); 2301 update_mmu_cache(vma, address, entry); 2302 lazy_mmu_prot_update(entry); 2303 } else { 2304 /* 2305 * This is needed only for protection faults but the arch code 2306 * is not yet telling us if this is a protection fault or not. 2307 * This still avoids useless tlb flushes for .text page faults 2308 * with threads. 2309 */ 2310 if (write_access) 2311 flush_tlb_page(vma, address); 2312 } 2313unlock: 2314 pte_unmap_unlock(pte, ptl); 2315 return VM_FAULT_MINOR; 2316} 2317 2318/* 2319 * By the time we get here, we already hold the mm semaphore 2320 */ 2321int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2322 unsigned long address, int write_access) 2323{ 2324 pgd_t *pgd; 2325 pud_t *pud; 2326 pmd_t *pmd; 2327 pte_t *pte; 2328 2329 __set_current_state(TASK_RUNNING); 2330 2331 count_vm_event(PGFAULT); 2332 2333 if (unlikely(is_vm_hugetlb_page(vma))) 2334 return hugetlb_fault(mm, vma, address, write_access); 2335 2336 pgd = pgd_offset(mm, address); 2337 pud = pud_alloc(mm, pgd, address); 2338 if (!pud) 2339 return VM_FAULT_OOM; 2340 pmd = pmd_alloc(mm, pud, address); 2341 if (!pmd) 2342 return VM_FAULT_OOM; 2343 pte = pte_alloc_map(mm, pmd, address); 2344 if (!pte) 2345 return VM_FAULT_OOM; 2346 2347 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2348} 2349 2350EXPORT_SYMBOL_GPL(__handle_mm_fault); 2351 2352#ifndef __PAGETABLE_PUD_FOLDED 2353/* 2354 * Allocate page upper directory. 2355 * We've already handled the fast-path in-line. 2356 */ 2357int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2358{ 2359 pud_t *new = pud_alloc_one(mm, address); 2360 if (!new) 2361 return -ENOMEM; 2362 2363 spin_lock(&mm->page_table_lock); 2364 if (pgd_present(*pgd)) /* Another has populated it */ 2365 pud_free(new); 2366 else 2367 pgd_populate(mm, pgd, new); 2368 spin_unlock(&mm->page_table_lock); 2369 return 0; 2370} 2371#else 2372/* Workaround for gcc 2.96 */ 2373int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2374{ 2375 return 0; 2376} 2377#endif /* __PAGETABLE_PUD_FOLDED */ 2378 2379#ifndef __PAGETABLE_PMD_FOLDED 2380/* 2381 * Allocate page middle directory. 2382 * We've already handled the fast-path in-line. 2383 */ 2384int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2385{ 2386 pmd_t *new = pmd_alloc_one(mm, address); 2387 if (!new) 2388 return -ENOMEM; 2389 2390 spin_lock(&mm->page_table_lock); 2391#ifndef __ARCH_HAS_4LEVEL_HACK 2392 if (pud_present(*pud)) /* Another has populated it */ 2393 pmd_free(new); 2394 else 2395 pud_populate(mm, pud, new); 2396#else 2397 if (pgd_present(*pud)) /* Another has populated it */ 2398 pmd_free(new); 2399 else 2400 pgd_populate(mm, pud, new); 2401#endif /* __ARCH_HAS_4LEVEL_HACK */ 2402 spin_unlock(&mm->page_table_lock); 2403 return 0; 2404} 2405#else 2406/* Workaround for gcc 2.96 */ 2407int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2408{ 2409 return 0; 2410} 2411#endif /* __PAGETABLE_PMD_FOLDED */ 2412 2413int make_pages_present(unsigned long addr, unsigned long end) 2414{ 2415 int ret, len, write; 2416 struct vm_area_struct * vma; 2417 2418 vma = find_vma(current->mm, addr); 2419 if (!vma) 2420 return -1; 2421 write = (vma->vm_flags & VM_WRITE) != 0; 2422 BUG_ON(addr >= end); 2423 BUG_ON(end > vma->vm_end); 2424 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; 2425 ret = get_user_pages(current, current->mm, addr, 2426 len, write, 0, NULL, NULL); 2427 if (ret < 0) 2428 return ret; 2429 return ret == len ? 0 : -1; 2430} 2431 2432/* 2433 * Map a vmalloc()-space virtual address to the physical page. 2434 */ 2435struct page * vmalloc_to_page(void * vmalloc_addr) 2436{ 2437 unsigned long addr = (unsigned long) vmalloc_addr; 2438 struct page *page = NULL; 2439 pgd_t *pgd = pgd_offset_k(addr); 2440 pud_t *pud; 2441 pmd_t *pmd; 2442 pte_t *ptep, pte; 2443 2444 if (!pgd_none(*pgd)) { 2445 pud = pud_offset(pgd, addr); 2446 if (!pud_none(*pud)) { 2447 pmd = pmd_offset(pud, addr); 2448 if (!pmd_none(*pmd)) { 2449 ptep = pte_offset_map(pmd, addr); 2450 pte = *ptep; 2451 if (pte_present(pte)) 2452 page = pte_page(pte); 2453 pte_unmap(ptep); 2454 } 2455 } 2456 } 2457 return page; 2458} 2459 2460EXPORT_SYMBOL(vmalloc_to_page); 2461 2462/* 2463 * Map a vmalloc()-space virtual address to the physical page frame number. 2464 */ 2465unsigned long vmalloc_to_pfn(void * vmalloc_addr) 2466{ 2467 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 2468} 2469 2470EXPORT_SYMBOL(vmalloc_to_pfn); 2471 2472#if !defined(__HAVE_ARCH_GATE_AREA) 2473 2474#if defined(AT_SYSINFO_EHDR) 2475static struct vm_area_struct gate_vma; 2476 2477static int __init gate_vma_init(void) 2478{ 2479 gate_vma.vm_mm = NULL; 2480 gate_vma.vm_start = FIXADDR_USER_START; 2481 gate_vma.vm_end = FIXADDR_USER_END; 2482 gate_vma.vm_page_prot = PAGE_READONLY; 2483 gate_vma.vm_flags = 0; 2484 return 0; 2485} 2486__initcall(gate_vma_init); 2487#endif 2488 2489struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 2490{ 2491#ifdef AT_SYSINFO_EHDR 2492 return &gate_vma; 2493#else 2494 return NULL; 2495#endif 2496} 2497 2498int in_gate_area_no_task(unsigned long addr) 2499{ 2500#ifdef AT_SYSINFO_EHDR 2501 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 2502 return 1; 2503#endif 2504 return 0; 2505} 2506 2507#endif /* __HAVE_ARCH_GATE_AREA */