Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] clearpage: provide our own clear_user_highpage()

For similar reasons as copy_user_page(), we want to avoid the
additional kmap_atomic if it's unnecessary.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Russell King and committed by
Russell King
303c6443 063b0a42

+100 -100
+6 -5
arch/arm/include/asm/page.h
··· 111 111 struct page; 112 112 113 113 struct cpu_user_fns { 114 - void (*cpu_clear_user_page)(void *p, unsigned long user); 114 + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); 115 115 void (*cpu_copy_user_highpage)(struct page *to, struct page *from, 116 116 unsigned long vaddr); 117 117 }; ··· 119 119 #ifdef MULTI_USER 120 120 extern struct cpu_user_fns cpu_user; 121 121 122 - #define __cpu_clear_user_page cpu_user.cpu_clear_user_page 122 + #define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage 123 123 #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage 124 124 125 125 #else 126 126 127 - #define __cpu_clear_user_page __glue(_USER,_clear_user_page) 127 + #define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) 128 128 #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) 129 129 130 - extern void __cpu_clear_user_page(void *p, unsigned long user); 130 + extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); 131 131 extern void __cpu_copy_user_highpage(struct page *to, struct page *from, 132 132 unsigned long vaddr); 133 133 #endif 134 134 135 - #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) 135 + #define clear_user_highpage(page,vaddr) \ 136 + __cpu_clear_user_highpage(page, vaddr) 136 137 137 138 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 138 139 #define copy_user_highpage(to,from,vaddr,vma) \
+10 -10
arch/arm/mm/copypage-feroceon.c
··· 79 79 kunmap_atomic(kto, KM_USER0); 80 80 } 81 81 82 - void __attribute__((naked)) 83 - feroceon_clear_user_page(void *kaddr, unsigned long vaddr) 82 + void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) 84 83 { 84 + void *kaddr = kmap_atomic(page, KM_USER0); 85 85 asm("\ 86 - stmfd sp!, {r4-r7, lr} \n\ 87 - mov r1, %0 \n\ 86 + mov r1, %1 \n\ 88 87 mov r2, #0 \n\ 89 88 mov r3, #0 \n\ 90 89 mov r4, #0 \n\ ··· 92 93 mov r7, #0 \n\ 93 94 mov ip, #0 \n\ 94 95 mov lr, #0 \n\ 95 - 1: stmia r0, {r2-r7, ip, lr} \n\ 96 + 1: stmia %0, {r2-r7, ip, lr} \n\ 96 97 subs r1, r1, #1 \n\ 97 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ 98 + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ 98 99 add r0, r0, #32 \n\ 99 100 bne 1b \n\ 100 - mcr p15, 0, r1, c7, c10, 4 @ drain WB\n\ 101 - ldmfd sp!, {r4-r7, pc}" 101 + mcr p15, 0, r1, c7, c10, 4 @ drain WB" 102 102 : 103 - : "I" (PAGE_SIZE / 32)); 103 + : "r" (kaddr), "I" (PAGE_SIZE / 32) 104 + : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); 105 + kunmap_atomic(kaddr, KM_USER0); 104 106 } 105 107 106 108 struct cpu_user_fns feroceon_user_fns __initdata = { 107 - .cpu_clear_user_page = feroceon_clear_user_page, 109 + .cpu_clear_user_highpage = feroceon_clear_user_highpage, 108 110 .cpu_copy_user_highpage = feroceon_copy_user_highpage, 109 111 }; 110 112
+7 -6
arch/arm/mm/copypage-v3.c
··· 54 54 * 55 55 * FIXME: do we need to handle cache stuff... 56 56 */ 57 - void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) 57 + void v3_clear_user_highpage(struct page *page, unsigned long vaddr) 58 58 { 59 + void *kaddr = kmap_atomic(page, KM_USER0); 59 60 asm("\n\ 60 - str lr, [sp, #-4]!\n\ 61 61 mov r1, %1 @ 1\n\ 62 62 mov r2, #0 @ 1\n\ 63 63 mov r3, #0 @ 1\n\ ··· 68 68 stmia %0!, {r2, r3, ip, lr} @ 4\n\ 69 69 stmia %0!, {r2, r3, ip, lr} @ 4\n\ 70 70 subs r1, r1, #1 @ 1\n\ 71 - bne 1b @ 1\n\ 72 - ldr pc, [sp], #4" 71 + bne 1b @ 1" 73 72 : 74 - : "r" (kaddr), "I" (PAGE_SIZE / 64)); 73 + : "r" (kaddr), "I" (PAGE_SIZE / 64) 74 + : "r1", "r2", "r3", "ip", "lr"); 75 + kunmap_atomic(kaddr, KM_USER0); 75 76 } 76 77 77 78 struct cpu_user_fns v3_user_fns __initdata = { 78 - .cpu_clear_user_page = v3_clear_user_page, 79 + .cpu_clear_user_highpage = v3_clear_user_highpage, 79 80 .cpu_copy_user_highpage = v3_copy_user_highpage, 80 81 };
+14 -14
arch/arm/mm/copypage-v4mc.c
··· 91 91 /* 92 92 * ARMv4 optimised clear_user_page 93 93 */ 94 - void __attribute__((naked)) 95 - v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) 94 + void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 96 95 { 97 - asm volatile( 98 - "str lr, [sp, #-4]!\n\ 96 + void *kaddr = kmap_atomic(page, KM_USER0); 97 + asm volatile("\ 99 98 mov r1, %0 @ 1\n\ 100 99 mov r2, #0 @ 1\n\ 101 100 mov r3, #0 @ 1\n\ 102 101 mov ip, #0 @ 1\n\ 103 102 mov lr, #0 @ 1\n\ 104 - 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 105 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 106 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 107 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 108 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 109 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 103 + 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 104 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 105 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 106 + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 107 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 108 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 110 109 subs r1, r1, #1 @ 1\n\ 111 - bne 1b @ 1\n\ 112 - ldr pc, [sp], #4" 110 + bne 1b @ 1" 113 111 : 114 - : "I" (PAGE_SIZE / 64)); 112 + : "r" (kaddr), "I" (PAGE_SIZE / 64) 113 + : "r1", "r2", "r3", "ip", "lr"); 114 + kunmap_atomic(kaddr, KM_USER0); 115 115 } 116 116 117 117 struct cpu_user_fns v4_mc_user_fns __initdata = { 118 - .cpu_clear_user_page = v4_mc_clear_user_page, 118 + .cpu_clear_user_highpage = v4_mc_clear_user_highpage, 119 119 .cpu_copy_user_highpage = v4_mc_copy_user_highpage, 120 120 };
+14 -14
arch/arm/mm/copypage-v4wb.c
··· 64 64 * 65 65 * Same story as above. 66 66 */ 67 - void __attribute__((naked)) 68 - v4wb_clear_user_page(void *kaddr, unsigned long vaddr) 67 + void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) 69 68 { 69 + void *kaddr = kmap_atomic(page, KM_USER0); 70 70 asm("\ 71 - str lr, [sp, #-4]!\n\ 72 - mov r1, %0 @ 1\n\ 71 + mov r1, %1 @ 1\n\ 73 72 mov r2, #0 @ 1\n\ 74 73 mov r3, #0 @ 1\n\ 75 74 mov ip, #0 @ 1\n\ 76 75 mov lr, #0 @ 1\n\ 77 - 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 78 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 79 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 80 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ 81 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 82 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 76 + 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 77 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 78 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 79 + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ 80 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 81 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 83 82 subs r1, r1, #1 @ 1\n\ 84 83 bne 1b @ 1\n\ 85 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ 86 - ldr pc, [sp], #4" 84 + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" 87 85 : 88 - : "I" (PAGE_SIZE / 64)); 86 + : "r" (kaddr), "I" (PAGE_SIZE / 64) 87 + : "r1", "r2", "r3", "ip", "lr"); 88 + kunmap_atomic(kaddr, KM_USER0); 89 89 } 90 90 91 91 struct cpu_user_fns v4wb_user_fns __initdata = { 92 - .cpu_clear_user_page = v4wb_clear_user_page, 92 + .cpu_clear_user_highpage = v4wb_clear_user_highpage, 93 93 .cpu_copy_user_highpage = v4wb_copy_user_highpage, 94 94 };
+12 -12
arch/arm/mm/copypage-v4wt.c
··· 60 60 * 61 61 * Same story as above. 62 62 */ 63 - void __attribute__((naked)) 64 - v4wt_clear_user_page(void *kaddr, unsigned long vaddr) 63 + void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) 65 64 { 65 + void *kaddr = kmap_atomic(page, KM_USER0); 66 66 asm("\ 67 - str lr, [sp, #-4]!\n\ 68 - mov r1, %0 @ 1\n\ 67 + mov r1, %1 @ 1\n\ 69 68 mov r2, #0 @ 1\n\ 70 69 mov r3, #0 @ 1\n\ 71 70 mov ip, #0 @ 1\n\ 72 71 mov lr, #0 @ 1\n\ 73 - 1: stmia r0!, {r2, r3, ip, lr} @ 4\n\ 74 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 75 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 76 - stmia r0!, {r2, r3, ip, lr} @ 4\n\ 72 + 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ 73 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 74 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 75 + stmia %0!, {r2, r3, ip, lr} @ 4\n\ 77 76 subs r1, r1, #1 @ 1\n\ 78 77 bne 1b @ 1\n\ 79 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ 80 - ldr pc, [sp], #4" 78 + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" 81 79 : 82 - : "I" (PAGE_SIZE / 64)); 80 + : "r" (kaddr), "I" (PAGE_SIZE / 64) 81 + : "r1", "r2", "r3", "ip", "lr"); 82 + kunmap_atomic(kaddr, KM_USER0); 83 83 } 84 84 85 85 struct cpu_user_fns v4wt_user_fns __initdata = { 86 - .cpu_clear_user_page = v4wt_clear_user_page, 86 + .cpu_clear_user_highpage = v4wt_clear_user_highpage, 87 87 .cpu_copy_user_highpage = v4wt_copy_user_highpage, 88 88 };
+9 -14
arch/arm/mm/copypage-v6.c
··· 49 49 * Clear the user page. No aliasing to deal with so we can just 50 50 * attack the kernel's existing mapping of this page. 51 51 */ 52 - static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) 52 + static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) 53 53 { 54 + void *kaddr = kmap_atomic(page, KM_USER0); 54 55 clear_page(kaddr); 56 + kunmap_atomic(kaddr, KM_USER0); 55 57 } 56 58 57 59 /* ··· 109 107 * so remap the kernel page into the same cache colour as the user 110 108 * page. 111 109 */ 112 - static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) 110 + static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) 113 111 { 114 112 unsigned int offset = CACHE_COLOUR(vaddr); 115 113 unsigned long to = to_address + (offset << PAGE_SHIFT); 116 114 117 - /* 118 - * Discard data in the kernel mapping for the new page 119 - * FIXME: needs this MCRR to be supported. 120 - */ 121 - __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 122 - : 123 - : "r" (kaddr), 124 - "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) 125 - : "cc"); 115 + /* FIXME: not highmem safe */ 116 + discard_old_kernel_data(page_address(page)); 126 117 127 118 /* 128 119 * Now clear the page using the same cache colour as ··· 123 128 */ 124 129 spin_lock(&v6_lock); 125 130 126 - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); 131 + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 127 132 flush_tlb_kernel_page(to); 128 133 clear_page((void *)to); 129 134 ··· 131 136 } 132 137 133 138 struct cpu_user_fns v6_user_fns __initdata = { 134 - .cpu_clear_user_page = v6_clear_user_page_nonaliasing, 139 + .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, 135 140 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, 136 141 }; 137 142 138 143 static int __init v6_userpage_init(void) 139 144 { 140 145 if (cache_is_vipt_aliasing()) { 141 - cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; 146 + cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; 142 147 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; 143 148 } 144 149
+13 -12
arch/arm/mm/copypage-xsc3.c
··· 87 87 * r0 = destination 88 88 * r1 = virtual user address of ultimate destination page 89 89 */ 90 - void __attribute__((naked)) 91 - xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) 90 + void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 92 91 { 92 + void *kaddr = kmap_atomic(page, KM_USER0); 93 93 asm("\ 94 - mov r1, %0 \n\ 94 + mov r1, %1 \n\ 95 95 mov r2, #0 \n\ 96 96 mov r3, #0 \n\ 97 - 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line\n\ 98 - strd r2, [r0], #8 \n\ 99 - strd r2, [r0], #8 \n\ 100 - strd r2, [r0], #8 \n\ 101 - strd r2, [r0], #8 \n\ 97 + 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ 98 + strd r2, [%0], #8 \n\ 99 + strd r2, [%0], #8 \n\ 100 + strd r2, [%0], #8 \n\ 101 + strd r2, [%0], #8 \n\ 102 102 subs r1, r1, #1 \n\ 103 - bne 1b \n\ 104 - mov pc, lr" 103 + bne 1b" 105 104 : 106 - : "I" (PAGE_SIZE / 32)); 105 + : "r" (kaddr), "I" (PAGE_SIZE / 32) 106 + : "r1", "r2", "r3"); 107 + kunmap_atomic(kaddr, KM_USER0); 107 108 } 108 109 109 110 struct cpu_user_fns xsc3_mc_user_fns __initdata = { 110 - .cpu_clear_user_page = xsc3_mc_clear_user_page, 111 + .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, 111 112 .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, 112 113 };
+14 -12
arch/arm/mm/copypage-xscale.c
··· 113 113 /* 114 114 * XScale optimised clear_user_page 115 115 */ 116 - void __attribute__((naked)) 117 - xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) 116 + void 117 + xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 118 118 { 119 + void *kaddr = kmap_atomic(page, KM_USER0); 119 120 asm volatile( 120 - "mov r1, %0 \n\ 121 + "mov r1, %1 \n\ 121 122 mov r2, #0 \n\ 122 123 mov r3, #0 \n\ 123 - 1: mov ip, r0 \n\ 124 - strd r2, [r0], #8 \n\ 125 - strd r2, [r0], #8 \n\ 126 - strd r2, [r0], #8 \n\ 127 - strd r2, [r0], #8 \n\ 124 + 1: mov ip, %0 \n\ 125 + strd r2, [%0], #8 \n\ 126 + strd r2, [%0], #8 \n\ 127 + strd r2, [%0], #8 \n\ 128 + strd r2, [%0], #8 \n\ 128 129 mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ 129 130 subs r1, r1, #1 \n\ 130 131 mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ 131 - bne 1b \n\ 132 - mov pc, lr" 132 + bne 1b" 133 133 : 134 - : "I" (PAGE_SIZE / 32)); 134 + : "r" (kaddr), "I" (PAGE_SIZE / 32) 135 + : "r1", "r2", "r3", "ip"); 136 + kunmap_atomic(kaddr, KM_USER0); 135 137 } 136 138 137 139 struct cpu_user_fns xscale_mc_user_fns __initdata = { 138 - .cpu_clear_user_page = xscale_mc_clear_user_page, 140 + .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, 139 141 .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, 140 142 };
+1 -1
arch/arm/mm/proc-syms.c
··· 33 33 34 34 #ifdef CONFIG_MMU 35 35 #ifndef MULTI_USER 36 - EXPORT_SYMBOL(__cpu_clear_user_page); 36 + EXPORT_SYMBOL(__cpu_clear_user_highpage); 37 37 EXPORT_SYMBOL(__cpu_copy_user_highpage); 38 38 #else 39 39 EXPORT_SYMBOL(cpu_user);