[PATCH] Pass vma argument to copy_user_highpage().

To allow a more effective copy_user_highpage() on certain architectures,
a vma argument is added to the function and cow_user_page() allowing
the implementation of these functions to check for the VM_EXEC bit.

The main part of this patch was originally written by Ralf Baechle;
Atushi Nemoto did the the debugging.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Atsushi Nemoto and committed by Linus Torvalds 9de455b2 77fff4ae

+10 -9
+2 -1
include/linux/highmem.h
··· 98 99 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 100 101 - static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) 102 { 103 char *vfrom, *vto; 104
··· 98 99 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 100 101 + static inline void copy_user_highpage(struct page *to, struct page *from, 102 + unsigned long vaddr, struct vm_area_struct *vma) 103 { 104 char *vfrom, *vto; 105
+3 -3
mm/hugetlb.c
··· 44 } 45 46 static void copy_huge_page(struct page *dst, struct page *src, 47 - unsigned long addr) 48 { 49 int i; 50 51 might_sleep(); 52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 53 cond_resched(); 54 - copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE); 55 } 56 } 57 ··· 442 } 443 444 spin_unlock(&mm->page_table_lock); 445 - copy_huge_page(new_page, old_page, address); 446 spin_lock(&mm->page_table_lock); 447 448 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
··· 44 } 45 46 static void copy_huge_page(struct page *dst, struct page *src, 47 + unsigned long addr, struct vm_area_struct *vma) 48 { 49 int i; 50 51 might_sleep(); 52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 53 cond_resched(); 54 + copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); 55 } 56 } 57 ··· 442 } 443 444 spin_unlock(&mm->page_table_lock); 445 + copy_huge_page(new_page, old_page, address, vma); 446 spin_lock(&mm->page_table_lock); 447 448 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
+5 -5
mm/memory.c
··· 1441 return pte; 1442 } 1443 1444 - static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) 1445 { 1446 /* 1447 * If the source page was a PFN mapping, we don't have ··· 1464 kunmap_atomic(kaddr, KM_USER0); 1465 flush_dcache_page(dst); 1466 return; 1467 - 1468 } 1469 - copy_user_highpage(dst, src, va); 1470 } 1471 1472 /* ··· 1577 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1578 if (!new_page) 1579 goto oom; 1580 - cow_user_page(new_page, old_page, address); 1581 } 1582 1583 /* ··· 2200 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2201 if (!page) 2202 goto oom; 2203 - copy_user_highpage(page, new_page, address); 2204 page_cache_release(new_page); 2205 new_page = page; 2206 anon = 1;
··· 1441 return pte; 1442 } 1443 1444 + static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 1445 { 1446 /* 1447 * If the source page was a PFN mapping, we don't have ··· 1464 kunmap_atomic(kaddr, KM_USER0); 1465 flush_dcache_page(dst); 1466 return; 1467 + 1468 } 1469 + copy_user_highpage(dst, src, va, vma); 1470 } 1471 1472 /* ··· 1577 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1578 if (!new_page) 1579 goto oom; 1580 + cow_user_page(new_page, old_page, address, vma); 1581 } 1582 1583 /* ··· 2200 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2201 if (!page) 2202 goto oom; 2203 + copy_user_highpage(page, new_page, address, vma); 2204 page_cache_release(new_page); 2205 new_page = page; 2206 anon = 1;