Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: implement clear_user_highpage and copy_user_highpage

Existing clear_user_page and copy_user_page cannot be used with highmem
because they calculate physical page address from its virtual address
and do it incorrectly in case of high memory page mapped with
kmap_atomic. Also kmap is not needed, as most likely userspace mapping
color would be different from the kmapped color.

Provide clear_user_highpage and copy_user_highpage functions that
determine if temporary mapping is needed for the pages. Move most of the
logic of the former clear_user_page and copy_user_page to
xtensa/mm/cache.c only leaving temporary mapping setup, invalidation and
clearing/copying in the xtensa/mm/misc.S. Rename these functions to
clear_page_alias and copy_page_alias.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

+127 -66
+2
arch/xtensa/include/asm/cacheflush.h
··· 37 37 * specials for cache aliasing: 38 38 * 39 39 * __flush_invalidate_dcache_page_alias(vaddr,paddr) 40 + * __invalidate_dcache_page_alias(vaddr,paddr) 40 41 * __invalidate_icache_page_alias(vaddr,paddr) 41 42 */ 42 43 ··· 63 62 64 63 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) 65 64 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); 65 + extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); 66 66 #else 67 67 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, 68 68 unsigned long phys) { }
+10 -2
arch/xtensa/include/asm/page.h
··· 134 134 #endif 135 135 136 136 struct page; 137 + struct vm_area_struct; 137 138 extern void clear_page(void *page); 138 139 extern void copy_page(void *to, void *from); 139 140 ··· 144 143 */ 145 144 146 145 #if DCACHE_WAY_SIZE > PAGE_SIZE 147 - extern void clear_user_page(void*, unsigned long, struct page*); 148 - extern void copy_user_page(void*, void*, unsigned long, struct page*); 146 + extern void clear_page_alias(void *vaddr, unsigned long paddr); 147 + extern void copy_page_alias(void *to, void *from, 148 + unsigned long to_paddr, unsigned long from_paddr); 149 + 150 + #define clear_user_highpage clear_user_highpage 151 + void clear_user_highpage(struct page *page, unsigned long vaddr); 152 + #define __HAVE_ARCH_COPY_USER_HIGHPAGE 153 + void copy_user_highpage(struct page *to, struct page *from, 154 + unsigned long vaddr, struct vm_area_struct *vma); 149 155 #else 150 156 # define clear_user_page(page, vaddr, pg) clear_page(page) 151 157 # define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+63
arch/xtensa/mm/cache.c
··· 63 63 #error "HIGHMEM is not supported on cores with aliasing cache." 64 64 #endif 65 65 66 + #if (DCACHE_WAY_SIZE > PAGE_SIZE) 67 + static inline void kmap_invalidate_coherent(struct page *page, 68 + unsigned long vaddr) 69 + { 70 + if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { 71 + unsigned long kvaddr; 72 + 73 + if (!PageHighMem(page)) { 74 + kvaddr = (unsigned long)page_to_virt(page); 75 + 76 + __invalidate_dcache_page(kvaddr); 77 + } else { 78 + kvaddr = TLBTEMP_BASE_1 + 79 + (page_to_phys(page) & DCACHE_ALIAS_MASK); 80 + 81 + __invalidate_dcache_page_alias(kvaddr, 82 + page_to_phys(page)); 83 + } 84 + } 85 + } 86 + 87 + static inline void *coherent_kvaddr(struct page *page, unsigned long base, 88 + unsigned long vaddr, unsigned long *paddr) 89 + { 90 + if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { 91 + *paddr = page_to_phys(page); 92 + return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); 93 + } else { 94 + *paddr = 0; 95 + return page_to_virt(page); 96 + } 97 + } 98 + 99 + void clear_user_highpage(struct page *page, unsigned long vaddr) 100 + { 101 + unsigned long paddr; 102 + void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); 103 + 104 + pagefault_disable(); 105 + kmap_invalidate_coherent(page, vaddr); 106 + set_bit(PG_arch_1, &page->flags); 107 + clear_page_alias(kvaddr, paddr); 108 + pagefault_enable(); 109 + } 110 + 111 + void copy_user_highpage(struct page *dst, struct page *src, 112 + unsigned long vaddr, struct vm_area_struct *vma) 113 + { 114 + unsigned long dst_paddr, src_paddr; 115 + void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, 116 + &dst_paddr); 117 + void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, 118 + &src_paddr); 119 + 120 + pagefault_disable(); 121 + kmap_invalidate_coherent(dst, vaddr); 122 + set_bit(PG_arch_1, &dst->flags); 123 + copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); 124 + pagefault_enable(); 125 + } 126 + 127 + #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ 128 + 66 129 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 67 130 68 131 /*
+52 -64
arch/xtensa/mm/misc.S
··· 110 110 #if (DCACHE_WAY_SIZE > PAGE_SIZE) 111 111 112 112 /* 113 - * clear_user_page (void *addr, unsigned long vaddr, struct page *page) 114 - * a2 a3 a4 113 + * clear_page_alias(void *addr, unsigned long paddr) 114 + * a2 a3 115 115 */ 116 116 117 - ENTRY(clear_user_page) 117 + ENTRY(clear_page_alias) 118 118 119 119 entry a1, 32 120 120 121 - /* Mark page dirty and determine alias. */ 121 + /* Skip setting up a temporary DTLB if not aliased low page. */ 122 122 123 - movi a7, (1 << PG_ARCH_1) 124 - l32i a5, a4, PAGE_FLAGS 125 - xor a6, a2, a3 126 - extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER 127 - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 128 - or a5, a5, a7 129 - slli a3, a3, PAGE_SHIFT 130 - s32i a5, a4, PAGE_FLAGS 123 + movi a5, PAGE_OFFSET 124 + movi a6, 0 125 + beqz a3, 1f 131 126 132 - /* Skip setting up a temporary DTLB if not aliased. */ 127 + /* Setup a temporary DTLB for the addr. */ 133 128 134 - beqz a6, 1f 135 - 136 - /* Invalidate kernel page. */ 137 - 138 - mov a10, a2 139 - call8 __invalidate_dcache_page 140 - 141 - /* Setup a temporary DTLB with the color of the VPN */ 142 - 143 - movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff 144 - movi a5, TLBTEMP_BASE_1 # virt 145 - add a6, a2, a4 # ppn 146 - add a2, a5, a3 # add 'color' 147 - 129 + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 130 + mov a4, a2 148 131 wdtlb a6, a2 149 132 dsync 150 133 ··· 148 165 149 166 /* We need to invalidate the temporary idtlb entry, if any. */ 150 167 151 - 1: addi a2, a2, -PAGE_SIZE 152 - idtlb a2 168 + 1: idtlb a4 153 169 dsync 154 170 155 171 retw 156 172 157 - ENDPROC(clear_user_page) 173 + ENDPROC(clear_page_alias) 158 174 159 175 /* 160 - * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) 161 - * a2 a3 a4 a5 176 + * copy_page_alias(void *to, void *from, 177 + * a2 a3 178 + * unsigned long to_paddr, unsigned long from_paddr) 179 + * a4 a5 162 180 */ 163 181 164 - ENTRY(copy_user_page) 182 + ENTRY(copy_page_alias) 165 183 166 184 entry a1, 32 167 185 168 - /* Mark page dirty and determine alias for destination. */ 186 + /* Skip setting up a temporary DTLB for destination if not aliased. */ 169 187 170 - movi a8, (1 << PG_ARCH_1) 171 - l32i a9, a5, PAGE_FLAGS 172 - xor a6, a2, a4 173 - xor a7, a3, a4 174 - extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER 175 - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 176 - extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER 177 - or a9, a9, a8 178 - slli a4, a4, PAGE_SHIFT 179 - s32i a9, a5, PAGE_FLAGS 180 - movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff 188 + movi a6, 0 189 + movi a7, 0 190 + beqz a4, 1f 181 191 182 - beqz a6, 1f 192 + /* Setup a temporary DTLB for destination. */ 183 193 184 - /* Invalidate dcache */ 185 - 186 - mov a10, a2 187 - call8 __invalidate_dcache_page 188 - 189 - /* Setup a temporary DTLB with a matching color. */ 190 - 191 - movi a8, TLBTEMP_BASE_1 # base 192 - add a6, a2, a5 # ppn 193 - add a2, a8, a4 # add 'color' 194 - 194 + addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) 195 195 wdtlb a6, a2 196 196 dsync 197 197 198 - /* Skip setting up a temporary DTLB for destination if not aliased. */ 198 + /* Skip setting up a temporary DTLB for source if not aliased. */ 199 199 200 - 1: beqz a7, 1f 200 + 1: beqz a5, 1f 201 201 202 - /* Setup a temporary DTLB with a matching color. */ 202 + /* Setup a temporary DTLB for source. */ 203 203 204 - movi a8, TLBTEMP_BASE_2 # base 205 - add a7, a3, a5 # ppn 206 - add a3, a8, a4 204 + addi a7, a5, PAGE_KERNEL 207 205 addi a8, a3, 1 # way1 208 206 209 207 wdtlb a7, a8 ··· 235 271 236 272 retw 237 273 238 - ENDPROC(copy_user_page) 274 + ENDPROC(copy_page_alias) 239 275 240 276 #endif 241 277 ··· 264 300 retw 265 301 266 302 ENDPROC(__flush_invalidate_dcache_page_alias) 303 + 304 + /* 305 + * void __invalidate_dcache_page_alias (addr, phys) 306 + * a2 a3 307 + */ 308 + 309 + ENTRY(__invalidate_dcache_page_alias) 310 + 311 + entry sp, 16 312 + 313 + movi a7, 0 # required for exception handler 314 + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 315 + mov a4, a2 316 + wdtlb a6, a2 317 + dsync 318 + 319 + ___invalidate_dcache_page a2 a3 320 + 321 + idtlb a4 322 + dsync 323 + 324 + retw 325 + 326 + ENDPROC(__invalidate_dcache_page_alias) 267 327 #endif 268 328 269 329 ENTRY(__tlbtemp_mapping_itlb)