Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: Pass VMA to copy_user_highpage() implementations

Our copy_user_highpage() implementations may require cache maintainence.
Ensure that implementations have all necessary details to perform this
maintainence.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+13 -12
+4 -3
arch/arm/include/asm/page.h
··· 117 117 #endif 118 118 119 119 struct page; 120 + struct vm_area_struct; 120 121 121 122 struct cpu_user_fns { 122 123 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 123 124 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 124 - unsigned long vaddr); 125 + unsigned long vaddr, struct vm_area_struct *vma); 125 126 }; 126 127 127 128 #ifdef MULTI_USER ··· 138 137 139 138 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); 140 139 extern void __cpu_copy_user_highpage(struct page *to, struct page *from, 141 - unsigned long vaddr); 140 + unsigned long vaddr, struct vm_area_struct *vma); 142 141 #endif 143 142 144 143 #define clear_user_highpage(page,vaddr) \ ··· 146 145 147 146 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 148 147 #define copy_user_highpage(to,from,vaddr,vma) \ 149 - __cpu_copy_user_highpage(to, from, vaddr) 148 + __cpu_copy_user_highpage(to, from, vaddr, vma) 150 149 151 150 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 152 151 extern void copy_page(void *to, const void *from);
+1 -1
arch/arm/mm/copypage-feroceon.c
··· 68 68 } 69 69 70 70 void feroceon_copy_user_highpage(struct page *to, struct page *from, 71 - unsigned long vaddr) 71 + unsigned long vaddr, struct vm_area_struct *vma) 72 72 { 73 73 void *kto, *kfrom; 74 74
+1 -1
arch/arm/mm/copypage-v3.c
··· 38 38 } 39 39 40 40 void v3_copy_user_highpage(struct page *to, struct page *from, 41 - unsigned long vaddr) 41 + unsigned long vaddr, struct vm_area_struct *vma) 42 42 { 43 43 void *kto, *kfrom; 44 44
+1 -1
arch/arm/mm/copypage-v4mc.c
··· 69 69 } 70 70 71 71 void v4_mc_copy_user_highpage(struct page *to, struct page *from, 72 - unsigned long vaddr) 72 + unsigned long vaddr, struct vm_area_struct *vma) 73 73 { 74 74 void *kto = kmap_atomic(to, KM_USER1); 75 75
+1 -1
arch/arm/mm/copypage-v4wb.c
··· 48 48 } 49 49 50 50 void v4wb_copy_user_highpage(struct page *to, struct page *from, 51 - unsigned long vaddr) 51 + unsigned long vaddr, struct vm_area_struct *vma) 52 52 { 53 53 void *kto, *kfrom; 54 54
+1 -1
arch/arm/mm/copypage-v4wt.c
··· 44 44 } 45 45 46 46 void v4wt_copy_user_highpage(struct page *to, struct page *from, 47 - unsigned long vaddr) 47 + unsigned long vaddr, struct vm_area_struct *vma) 48 48 { 49 49 void *kto, *kfrom; 50 50
+2 -2
arch/arm/mm/copypage-v6.c
··· 34 34 * attack the kernel's existing mapping of these pages. 35 35 */ 36 36 static void v6_copy_user_highpage_nonaliasing(struct page *to, 37 - struct page *from, unsigned long vaddr) 37 + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) 38 38 { 39 39 void *kto, *kfrom; 40 40 ··· 73 73 * Copy the page, taking account of the cache colour. 74 74 */ 75 75 static void v6_copy_user_highpage_aliasing(struct page *to, 76 - struct page *from, unsigned long vaddr) 76 + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) 77 77 { 78 78 unsigned int offset = CACHE_COLOUR(vaddr); 79 79 unsigned long kfrom, kto;
+1 -1
arch/arm/mm/copypage-xsc3.c
··· 71 71 } 72 72 73 73 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, 74 - unsigned long vaddr) 74 + unsigned long vaddr, struct vm_area_struct *vma) 75 75 { 76 76 void *kto, *kfrom; 77 77
+1 -1
arch/arm/mm/copypage-xscale.c
··· 91 91 } 92 92 93 93 void xscale_mc_copy_user_highpage(struct page *to, struct page *from, 94 - unsigned long vaddr) 94 + unsigned long vaddr, struct vm_area_struct *vma) 95 95 { 96 96 void *kto = kmap_atomic(to, KM_USER1); 97 97