pagemap: fix large pages in pagemap

We were walking right into huge page areas in the pagemap walker, and
calling the pmds pmd_bad() and clearing them.

That leaked huge pages. Bad.

This patch at least works around that for now. It ignores huge pages in
the pagemap walker for the time being, and won't leak those pages.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Dave Hansen and committed by Linus Torvalds bcf8039e 2165009b

+30 -9
+30 -9
fs/proc/task_mmu.c
··· 553 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 554 } 555 556 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 557 struct mm_walk *walk) 558 { 559 struct pagemapread *pm = walk->private; 560 pte_t *pte; 561 int err = 0; 562 563 for (; addr != end; addr += PAGE_SIZE) { 564 u64 pfn = PM_NOT_PRESENT; 565 - pte = pte_offset_map(pmd, addr); 566 - if (is_swap_pte(*pte)) 567 - pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte)) 568 - | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 569 - else if (pte_present(*pte)) 570 - pfn = PM_PFRAME(pte_pfn(*pte)) 571 - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 572 - /* unmap so we're not in atomic when we copy to userspace */ 573 - pte_unmap(pte); 574 err = add_to_pagemap(addr, pfn, pm); 575 if (err) 576 return err;
··· 553 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 554 } 555 556 + static unsigned long pte_to_pagemap_entry(pte_t pte) 557 + { 558 + unsigned long pme = 0; 559 + if (is_swap_pte(pte)) 560 + pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 561 + | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 562 + else if (pte_present(pte)) 563 + pme = PM_PFRAME(pte_pfn(pte)) 564 + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 565 + return pme; 566 + } 567 + 568 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 569 struct mm_walk *walk) 570 { 571 + struct vm_area_struct *vma; 572 struct pagemapread *pm = walk->private; 573 pte_t *pte; 574 int err = 0; 575 576 + /* find the first VMA at or above 'addr' */ 577 + vma = find_vma(walk->mm, addr); 578 for (; addr != end; addr += PAGE_SIZE) { 579 u64 pfn = PM_NOT_PRESENT; 580 + 581 + /* check to see if we've left 'vma' behind 582 + * and need a new, higher one */ 583 + if (vma && (addr >= vma->vm_end)) 584 + vma = find_vma(walk->mm, addr); 585 + 586 + /* check that 'vma' actually covers this address, 587 + * and that it isn't a huge page vma */ 588 + if (vma && (vma->vm_start <= addr) && 589 + !is_vm_hugetlb_page(vma)) { 590 + pte = pte_offset_map(pmd, addr); 591 + pfn = pte_to_pagemap_entry(*pte); 592 + /* unmap before userspace copy */ 593 + pte_unmap(pte); 594 + } 595 err = add_to_pagemap(addr, pfn, pm); 596 if (err) 597 return err;