Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: re-architect the VM_UNPAGED logic

This replaces the (in my opinion horrible) VM_UNMAPPED logic with very
explicit support for a "remapped page range" aka VM_PFNMAP. It allows a
VM area to contain an arbitrary range of page table entries that the VM
never touches, and never considers to be normal pages.

Any user of "remap_pfn_range()" automatically gets this new
functionality, and doesn't even have to mark the pages reserved or
indeed mark them any other way. It just works. As a side effect, doing
mmap() on /dev/mem works for arbitrary ranges.

Sparc update from David in the next commit.

Signed-off-by: Linus Torvalds <torvalds@osdl.org>

+127 -146
+2 -4
arch/powerpc/kernel/vdso.c
··· 145 145 struct page *pg = virt_to_page(vdso32_kbase + 146 146 i*PAGE_SIZE); 147 147 struct page *upg = (vma && vma->vm_mm) ? 148 - follow_page(vma->vm_mm, vma->vm_start + 149 - i*PAGE_SIZE, 0) 148 + follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) 150 149 : NULL; 151 150 dump_one_vdso_page(pg, upg); 152 151 } ··· 156 157 struct page *pg = virt_to_page(vdso64_kbase + 157 158 i*PAGE_SIZE); 158 159 struct page *upg = (vma && vma->vm_mm) ? 159 - follow_page(vma->vm_mm, vma->vm_start + 160 - i*PAGE_SIZE, 0) 160 + follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) 161 161 : NULL; 162 162 dump_one_vdso_page(pg, upg); 163 163 }
+1 -1
drivers/char/mem.c
··· 591 591 592 592 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) 593 593 goto out_up; 594 - if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED)) 594 + if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) 595 595 break; 596 596 count = vma->vm_end - addr; 597 597 if (count > size)
+3 -4
fs/proc/task_mmu.c
··· 402 402 /* 403 403 * Calculate numa node maps for a vma 404 404 */ 405 - static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) 405 + static struct numa_maps *get_numa_maps(struct vm_area_struct *vma) 406 406 { 407 + int i; 407 408 struct page *page; 408 409 unsigned long vaddr; 409 - struct mm_struct *mm = vma->vm_mm; 410 - int i; 411 410 struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); 412 411 413 412 if (!md) ··· 419 420 md->node[i] =0; 420 421 421 422 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { 422 - page = follow_page(mm, vaddr, 0); 423 + page = follow_page(vma, vaddr, 0); 423 424 if (page) { 424 425 int count = page_mapcount(page); 425 426
+3 -2
include/linux/mm.h
··· 145 145 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ 146 146 #define VM_GROWSUP 0x00000200 147 147 #define VM_SHM 0x00000000 /* Means nothing: delete it later */ 148 - #define VM_UNPAGED 0x00000400 /* Pages managed without map count */ 148 + #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ 149 149 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ 150 150 151 151 #define VM_EXECUTABLE 0x00001000 ··· 664 664 unsigned long truncate_count; /* Compare vm_truncate_count */ 665 665 }; 666 666 667 + struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); 667 668 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 668 669 unsigned long size, struct zap_details *); 669 670 unsigned long unmap_vmas(struct mmu_gather **tlb, ··· 954 953 int remap_pfn_range(struct vm_area_struct *, unsigned long addr, 955 954 unsigned long pfn, unsigned long size, pgprot_t); 956 955 957 - struct page *follow_page(struct mm_struct *, unsigned long address, 956 + struct page *follow_page(struct vm_area_struct *, unsigned long address, 958 957 unsigned int foll_flags); 959 958 #define FOLL_WRITE 0x01 /* check pte is writable */ 960 959 #define FOLL_TOUCH 0x02 /* mark page accessed */
+7 -15
mm/fremap.c
··· 27 27 struct page *page = NULL; 28 28 29 29 if (pte_present(pte)) { 30 - unsigned long pfn = pte_pfn(pte); 31 - flush_cache_page(vma, addr, pfn); 30 + flush_cache_page(vma, addr, pte_pfn(pte)); 32 31 pte = ptep_clear_flush(vma, addr, ptep); 33 - if (unlikely(!pfn_valid(pfn))) { 34 - print_bad_pte(vma, pte, addr); 35 - goto out; 32 + page = vm_normal_page(vma, addr, pte); 33 + if (page) { 34 + if (pte_dirty(pte)) 35 + set_page_dirty(page); 36 + page_remove_rmap(page); 37 + page_cache_release(page); 36 38 } 37 - page = pfn_to_page(pfn); 38 - if (pte_dirty(pte)) 39 - set_page_dirty(page); 40 - page_remove_rmap(page); 41 - page_cache_release(page); 42 39 } else { 43 40 if (!pte_file(pte)) 44 41 free_swap_and_cache(pte_to_swp_entry(pte)); 45 42 pte_clear(mm, addr, ptep); 46 43 } 47 - out: 48 44 return !!page; 49 45 } 50 46 ··· 60 64 pgd_t *pgd; 61 65 pte_t pte_val; 62 66 spinlock_t *ptl; 63 - 64 - BUG_ON(vma->vm_flags & VM_UNPAGED); 65 67 66 68 pgd = pgd_offset(mm, addr); 67 69 pud = pud_alloc(mm, pgd, addr); ··· 115 121 pgd_t *pgd; 116 122 pte_t pte_val; 117 123 spinlock_t *ptl; 118 - 119 - BUG_ON(vma->vm_flags & VM_UNPAGED); 120 124 121 125 pgd = pgd_offset(mm, addr); 122 126 pud = pud_alloc(mm, pgd, addr);
+1 -1
mm/madvise.c
··· 126 126 unsigned long start, unsigned long end) 127 127 { 128 128 *prev = vma; 129 - if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_UNPAGED)) 129 + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) 130 130 return -EINVAL; 131 131 132 132 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
+100 -89
mm/memory.c
··· 333 333 } 334 334 335 335 /* 336 - * This function is called to print an error when a pte in a 337 - * !VM_UNPAGED region is found pointing to an invalid pfn (which 338 - * is an error. 336 + * This function is called to print an error when a bad pte 337 + * is found. For example, we might have a PFN-mapped pte in 338 + * a region that doesn't allow it. 339 339 * 340 340 * The calling function must still handle the error. 341 341 */ ··· 350 350 } 351 351 352 352 /* 353 - * page_is_anon applies strict checks for an anonymous page belonging to 354 - * this vma at this address. It is used on VM_UNPAGED vmas, which are 355 - * usually populated with shared originals (which must not be counted), 356 - * but occasionally contain private COWed copies (when !VM_SHARED, or 357 - * perhaps via ptrace when VM_SHARED). An mmap of /dev/mem might window 358 - * free pages, pages from other processes, or from other parts of this: 359 - * it's tricky, but try not to be deceived by foreign anonymous pages. 353 + * This function gets the "struct page" associated with a pte. 354 + * 355 + * NOTE! Some mappings do not have "struct pages". A raw PFN mapping 356 + * will have each page table entry just pointing to a raw page frame 357 + * number, and as far as the VM layer is concerned, those do not have 358 + * pages associated with them - even if the PFN might point to memory 359 + * that otherwise is perfectly fine and has a "struct page". 360 + * 361 + * The way we recognize those mappings is through the rules set up 362 + * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set, 363 + * and the vm_pgoff will point to the first PFN mapped: thus every 364 + * page that is a raw mapping will always honor the rule 365 + * 366 + * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 367 + * 368 + * and if that isn't true, the page has been COW'ed (in which case it 369 + * _does_ have a "struct page" associated with it even if it is in a 370 + * VM_PFNMAP range). 360 371 */ 361 - static inline int page_is_anon(struct page *page, 362 - struct vm_area_struct *vma, unsigned long addr) 372 + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 363 373 { 364 - return page && PageAnon(page) && page_mapped(page) && 365 - page_address_in_vma(page, vma) == addr; 374 + unsigned long pfn = pte_pfn(pte); 375 + 376 + if (vma->vm_flags & VM_PFNMAP) { 377 + unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 378 + if (pfn == vma->vm_pgoff + off) 379 + return NULL; 380 + } 381 + 382 + /* 383 + * Add some anal sanity checks for now. Eventually, 384 + * we should just do "return pfn_to_page(pfn)", but 385 + * in the meantime we check that we get a valid pfn, 386 + * and that the resulting page looks ok. 387 + * 388 + * Remove this test eventually! 389 + */ 390 + if (unlikely(!pfn_valid(pfn))) { 391 + print_bad_pte(vma, pte, addr); 392 + return NULL; 393 + } 394 + 395 + /* 396 + * NOTE! We still have PageReserved() pages in the page 397 + * tables. 398 + * 399 + * The PAGE_ZERO() pages and various VDSO mappings can 400 + * cause them to exist. 401 + */ 402 + return pfn_to_page(pfn); 366 403 } 367 404 368 405 /* ··· 416 379 unsigned long vm_flags = vma->vm_flags; 417 380 pte_t pte = *src_pte; 418 381 struct page *page; 419 - unsigned long pfn; 420 382 421 383 /* pte contains position in swap or file, so copy. */ 422 384 if (unlikely(!pte_present(pte))) { ··· 431 395 } 432 396 } 433 397 goto out_set_pte; 434 - } 435 - 436 - pfn = pte_pfn(pte); 437 - page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; 438 - 439 - if (unlikely(vm_flags & VM_UNPAGED)) 440 - if (!page_is_anon(page, vma, addr)) 441 - goto out_set_pte; 442 - 443 - /* 444 - * If the pte points outside of valid memory but 445 - * the region is not VM_UNPAGED, we have a problem. 446 - */ 447 - if (unlikely(!page)) { 448 - print_bad_pte(vma, pte, addr); 449 - goto out_set_pte; /* try to do something sane */ 450 398 } 451 399 452 400 /* ··· 449 429 if (vm_flags & VM_SHARED) 450 430 pte = pte_mkclean(pte); 451 431 pte = pte_mkold(pte); 452 - get_page(page); 453 - page_dup_rmap(page); 454 - rss[!!PageAnon(page)]++; 432 + 433 + page = vm_normal_page(vma, addr, pte); 434 + if (page) { 435 + get_page(page); 436 + page_dup_rmap(page); 437 + rss[!!PageAnon(page)]++; 438 + } 455 439 456 440 out_set_pte: 457 441 set_pte_at(dst_mm, addr, dst_pte, pte); ··· 567 543 * readonly mappings. The tradeoff is that copy_page_range is more 568 544 * efficient than faulting. 569 545 */ 570 - if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) { 546 + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { 571 547 if (!vma->anon_vma) 572 548 return 0; 573 549 } ··· 608 584 } 609 585 if (pte_present(ptent)) { 610 586 struct page *page; 611 - unsigned long pfn; 612 587 613 588 (*zap_work) -= PAGE_SIZE; 614 589 615 - pfn = pte_pfn(ptent); 616 - page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; 617 - 618 - if (unlikely(vma->vm_flags & VM_UNPAGED)) { 619 - if (!page_is_anon(page, vma, addr)) 620 - page = NULL; 621 - } else if (unlikely(!page)) 622 - print_bad_pte(vma, ptent, addr); 623 - 590 + page = vm_normal_page(vma, addr, ptent); 624 591 if (unlikely(details) && page) { 625 592 /* 626 593 * unmap_shared_mapping_pages() wants to ··· 867 852 /* 868 853 * Do a quick page-table lookup for a single page. 869 854 */ 870 - struct page *follow_page(struct mm_struct *mm, unsigned long address, 855 + struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 871 856 unsigned int flags) 872 857 { 873 858 pgd_t *pgd; ··· 875 860 pmd_t *pmd; 876 861 pte_t *ptep, pte; 877 862 spinlock_t *ptl; 878 - unsigned long pfn; 879 863 struct page *page; 864 + struct mm_struct *mm = vma->vm_mm; 880 865 881 866 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 882 867 if (!IS_ERR(page)) { ··· 912 897 goto unlock; 913 898 if ((flags & FOLL_WRITE) && !pte_write(pte)) 914 899 goto unlock; 915 - pfn = pte_pfn(pte); 916 - if (!pfn_valid(pfn)) 900 + page = vm_normal_page(vma, address, pte); 901 + if (unlikely(!page)) 917 902 goto unlock; 918 903 919 - page = pfn_to_page(pfn); 920 904 if (flags & FOLL_GET) 921 905 get_page(page); 922 906 if (flags & FOLL_TOUCH) { ··· 988 974 return i ? : -EFAULT; 989 975 } 990 976 if (pages) { 991 - pages[i] = pte_page(*pte); 992 - get_page(pages[i]); 977 + struct page *page = vm_normal_page(vma, start, *pte); 978 + pages[i] = page; 979 + if (page) 980 + get_page(page); 993 981 } 994 982 pte_unmap(pte); 995 983 if (vmas) ··· 1026 1010 foll_flags |= FOLL_WRITE; 1027 1011 1028 1012 cond_resched(); 1029 - while (!(page = follow_page(mm, start, foll_flags))) { 1013 + while (!(page = follow_page(vma, start, foll_flags))) { 1030 1014 int ret; 1031 1015 ret = __handle_mm_fault(mm, vma, start, 1032 1016 foll_flags & FOLL_WRITE); ··· 1230 1214 * in 2.6 the LRU scan won't even find its pages, so this 1231 1215 * flag means no more than count its pages in reserved_vm, 1232 1216 * and omit it from core dump, even when VM_IO turned off. 1233 - * VM_UNPAGED tells the core MM not to "manage" these pages 1234 - * (e.g. refcount, mapcount, try to swap them out): in 1235 - * particular, zap_pte_range does not try to free them. 1217 + * VM_PFNMAP tells the core MM that the base pages are just 1218 + * raw PFN mappings, and do not have a "struct page" associated 1219 + * with them. 1236 1220 */ 1237 - vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED; 1221 + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1222 + vma->vm_pgoff = pfn; 1238 1223 1239 1224 BUG_ON(addr >= end); 1240 1225 pfn -= addr >> PAGE_SHIFT; ··· 1290 1273 return pte; 1291 1274 } 1292 1275 1276 + static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) 1277 + { 1278 + /* 1279 + * If the source page was a PFN mapping, we don't have 1280 + * a "struct page" for it. We do a best-effort copy by 1281 + * just copying from the original user address. If that 1282 + * fails, we just zero-fill it. Live with it. 1283 + */ 1284 + if (unlikely(!src)) { 1285 + void *kaddr = kmap_atomic(dst, KM_USER0); 1286 + unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE); 1287 + if (left) 1288 + memset(kaddr, 0, PAGE_SIZE); 1289 + kunmap_atomic(kaddr, KM_USER0); 1290 + return; 1291 + 1292 + } 1293 + copy_user_highpage(dst, src, va); 1294 + } 1295 + 1293 1296 /* 1294 1297 * This routine handles present pages, when users try to write 1295 1298 * to a shared page. It is done by copying the page to a new address ··· 1333 1296 spinlock_t *ptl, pte_t orig_pte) 1334 1297 { 1335 1298 struct page *old_page, *src_page, *new_page; 1336 - unsigned long pfn = pte_pfn(orig_pte); 1337 1299 pte_t entry; 1338 1300 int ret = VM_FAULT_MINOR; 1339 1301 1340 - if (unlikely(!pfn_valid(pfn))) { 1341 - /* 1342 - * Page table corrupted: show pte and kill process. 1343 - * Or it's an attempt to COW an out-of-map VM_UNPAGED 1344 - * entry, which copy_user_highpage does not support. 1345 - */ 1346 - print_bad_pte(vma, orig_pte, address); 1347 - ret = VM_FAULT_OOM; 1348 - goto unlock; 1349 - } 1350 - old_page = pfn_to_page(pfn); 1302 + old_page = vm_normal_page(vma, address, orig_pte); 1351 1303 src_page = old_page; 1352 - 1353 - if (unlikely(vma->vm_flags & VM_UNPAGED)) 1354 - if (!page_is_anon(old_page, vma, address)) { 1355 - old_page = NULL; 1356 - goto gotten; 1357 - } 1304 + if (!old_page) 1305 + goto gotten; 1358 1306 1359 1307 if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { 1360 1308 int reuse = can_share_swap_page(old_page); ··· 1373 1351 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1374 1352 if (!new_page) 1375 1353 goto oom; 1376 - copy_user_highpage(new_page, src_page, address); 1354 + cow_user_page(new_page, src_page, address); 1377 1355 } 1378 1356 1379 1357 /* ··· 1834 1812 spinlock_t *ptl; 1835 1813 pte_t entry; 1836 1814 1837 - /* 1838 - * A VM_UNPAGED vma will normally be filled with present ptes 1839 - * by remap_pfn_range, and never arrive here; but it might have 1840 - * holes, or if !VM_DONTEXPAND, mremap might have expanded it. 1841 - * It's weird enough handling anon pages in unpaged vmas, we do 1842 - * not want to worry about ZERO_PAGEs too (it may or may not 1843 - * matter if their counts wrap): just give them anon pages. 1844 - */ 1845 - 1846 - if (write_access || (vma->vm_flags & VM_UNPAGED)) { 1815 + if (write_access) { 1847 1816 /* Allocate our own private page. */ 1848 1817 pte_unmap(page_table); 1849 1818 ··· 1909 1896 int anon = 0; 1910 1897 1911 1898 pte_unmap(page_table); 1912 - BUG_ON(vma->vm_flags & VM_UNPAGED); 1913 - 1914 1899 if (vma->vm_file) { 1915 1900 mapping = vma->vm_file->f_mapping; 1916 1901 sequence = mapping->truncate_count; ··· 1941 1930 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1942 1931 if (!page) 1943 1932 goto oom; 1944 - copy_user_highpage(page, new_page, address); 1933 + cow_user_page(page, new_page, address); 1945 1934 page_cache_release(new_page); 1946 1935 new_page = page; 1947 1936 anon = 1;
+4 -8
mm/mempolicy.c
··· 189 189 190 190 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 191 191 do { 192 - unsigned long pfn; 192 + struct page *page; 193 193 unsigned int nid; 194 194 195 195 if (!pte_present(*pte)) 196 196 continue; 197 - pfn = pte_pfn(*pte); 198 - if (!pfn_valid(pfn)) { 199 - print_bad_pte(vma, *pte, addr); 197 + page = vm_normal_page(vma, addr, *pte); 198 + if (!page) 200 199 continue; 201 - } 202 - nid = pfn_to_nid(pfn); 200 + nid = page_to_nid(page); 203 201 if (!node_isset(nid, *nodes)) 204 202 break; 205 203 } while (pte++, addr += PAGE_SIZE, addr != end); ··· 267 269 first = find_vma(mm, start); 268 270 if (!first) 269 271 return ERR_PTR(-EFAULT); 270 - if (first->vm_flags & VM_UNPAGED) 271 - return ERR_PTR(-EACCES); 272 272 prev = NULL; 273 273 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 274 274 if (!vma->vm_next && vma->vm_end < end)
+3 -9
mm/msync.c
··· 27 27 again: 28 28 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 29 29 do { 30 - unsigned long pfn; 31 30 struct page *page; 32 31 33 32 if (progress >= 64) { ··· 39 40 continue; 40 41 if (!pte_maybe_dirty(*pte)) 41 42 continue; 42 - pfn = pte_pfn(*pte); 43 - if (unlikely(!pfn_valid(pfn))) { 44 - print_bad_pte(vma, *pte, addr); 43 + page = vm_normal_page(vma, addr, *pte); 44 + if (!page) 45 45 continue; 46 - } 47 - page = pfn_to_page(pfn); 48 - 49 46 if (ptep_clear_flush_dirty(vma, addr, pte) || 50 47 page_test_and_clear_dirty(page)) 51 48 set_page_dirty(page); ··· 92 97 /* For hugepages we can't go walking the page table normally, 93 98 * but that's ok, hugetlbfs is memory based, so we don't need 94 99 * to do anything more on an msync(). 95 - * Can't do anything with VM_UNPAGED regions either. 96 100 */ 97 - if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED)) 101 + if (vma->vm_flags & VM_HUGETLB) 98 102 return; 99 103 100 104 BUG_ON(addr >= end);
+1 -1
mm/nommu.c
··· 1045 1045 1046 1046 EXPORT_SYMBOL(find_vma); 1047 1047 1048 - struct page *follow_page(struct mm_struct *mm, unsigned long address, 1048 + struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1049 1049 unsigned int foll_flags) 1050 1050 { 1051 1051 return NULL;
+2 -12
mm/rmap.c
··· 226 226 /* 227 227 * At what user virtual address is page expected in vma? checking that the 228 228 * page matches the vma: currently only used on anon pages, by unuse_vma; 229 - * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking 230 - * care that an mmap of /dev/mem might window free and foreign pages. 231 229 */ 232 230 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 233 231 { ··· 612 614 struct page *page; 613 615 unsigned long address; 614 616 unsigned long end; 615 - unsigned long pfn; 616 617 617 618 address = (vma->vm_start + cursor) & CLUSTER_MASK; 618 619 end = address + CLUSTER_SIZE; ··· 640 643 for (; address < end; pte++, address += PAGE_SIZE) { 641 644 if (!pte_present(*pte)) 642 645 continue; 643 - 644 - pfn = pte_pfn(*pte); 645 - if (unlikely(!pfn_valid(pfn))) { 646 - print_bad_pte(vma, *pte, address); 647 - continue; 648 - } 649 - 650 - page = pfn_to_page(pfn); 651 - BUG_ON(PageAnon(page)); 646 + page = vm_normal_page(vma, address, *pte); 647 + BUG_ON(!page || PageAnon(page)); 652 648 653 649 if (ptep_clear_flush_young(vma, address, pte)) 654 650 continue;