Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 9387/2: mm: Rewrite cacheflush vtables in CFI safe C

Instead of defining all cache flush operations with an assembly
macro in proc-macros.S, provide an explicit struct cpu_cache_fns
for each CPU cache type in mm/cache.c.

As a side effect from rewriting the vtables in C, we can
avoid the aliasing for the "louis" cache callback, instead we
can just assign the NN_flush_kern_cache_all() function to the
louis callback in the C vtable.

As the louis cache callback is called explicitly (not through the
vtable) if we only have one type of cache support compiled in, we
need an ifdef quirk for this in the !MULTI_CACHE case.

Feroceon and XScale have some dma mapping quirk, in this case we
can just define two structs and assign all but one callback to the
main implementation; since each of them invoked define_cache_functions
twice they require MULTI_CACHE by definition so the compiled-in
shortcut is not used on these variants.

Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

authored by

Linus Walleij and committed by
Russell King (Oracle)
b4d20eff 2074beeb

+688 -259
+9 -19
arch/arm/include/asm/glue-cache.h
··· 118 118 # define MULTI_CACHE 1 119 119 #endif 120 120 121 + #ifdef CONFIG_CPU_CACHE_NOP 122 + # define MULTI_CACHE 1 123 + #endif 124 + 121 125 #if defined(CONFIG_CPU_V7M) 122 126 # define MULTI_CACHE 1 123 127 #endif ··· 130 126 #error Unknown cache maintenance model 131 127 #endif 132 128 133 - #ifndef __ASSEMBLER__ 134 - static inline void nop_flush_icache_all(void) { } 135 - static inline void nop_flush_kern_cache_all(void) { } 136 - static inline void nop_flush_kern_cache_louis(void) { } 137 - static inline void nop_flush_user_cache_all(void) { } 138 - static inline void nop_flush_user_cache_range(unsigned long a, 139 - unsigned long b, unsigned int c) { } 140 - 141 - static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } 142 - static inline int nop_coherent_user_range(unsigned long a, 143 - unsigned long b) { return 0; } 144 - static inline void nop_flush_kern_dcache_area(void *a, size_t s) { } 145 - 146 - static inline void nop_dma_flush_range(const void *a, const void *b) { } 147 - 148 - static inline void nop_dma_map_area(const void *s, size_t l, int f) { } 149 - static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } 150 - #endif 151 - 152 129 #ifndef MULTI_CACHE 153 130 #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) 154 131 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 132 + /* This function only has a dedicated assembly callback on the v7 cache */ 133 + #ifdef CONFIG_CPU_CACHE_V7 155 134 #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) 135 + #else 136 + #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all) 137 + #endif 156 138 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) 157 139 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 158 140 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
+1
arch/arm/mm/Makefile
··· 45 45 obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o 46 46 obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o 47 47 obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o 48 + obj-y += cache.o 48 49 49 50 obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o 50 51 obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
+1
arch/arm/mm/cache-b15-rac.c
··· 5 5 * Copyright (C) 2015-2016 Broadcom 6 6 */ 7 7 8 + #include <linux/cfi_types.h> 8 9 #include <linux/err.h> 9 10 #include <linux/spinlock.h> 10 11 #include <linux/io.h>
-8
arch/arm/mm/cache-fa.S
··· 243 243 SYM_TYPED_FUNC_START(fa_dma_unmap_area) 244 244 ret lr 245 245 SYM_FUNC_END(fa_dma_unmap_area) 246 - 247 - .globl fa_flush_kern_cache_louis 248 - .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all 249 - 250 - __INITDATA 251 - 252 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 253 - define_cache_functions fa
-8
arch/arm/mm/cache-nop.S
··· 18 18 ret lr 19 19 SYM_FUNC_END(nop_flush_kern_cache_all) 20 20 21 - .globl nop_flush_kern_cache_louis 22 - .equ nop_flush_kern_cache_louis, nop_flush_icache_all 23 - 24 21 SYM_TYPED_FUNC_START(nop_flush_user_cache_all) 25 22 ret lr 26 23 SYM_FUNC_END(nop_flush_user_cache_all) ··· 46 49 SYM_TYPED_FUNC_START(nop_dma_map_area) 47 50 ret lr 48 51 SYM_FUNC_END(nop_dma_map_area) 49 - 50 - __INITDATA 51 - 52 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 53 - define_cache_functions nop 54 52 55 53 SYM_TYPED_FUNC_START(nop_dma_unmap_area) 56 54 ret lr
-8
arch/arm/mm/cache-v4.S
··· 144 144 SYM_TYPED_FUNC_START(v4_dma_map_area) 145 145 ret lr 146 146 SYM_FUNC_END(v4_dma_map_area) 147 - 148 - .globl v4_flush_kern_cache_louis 149 - .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all 150 - 151 - __INITDATA 152 - 153 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 154 - define_cache_functions v4
-8
arch/arm/mm/cache-v4wb.S
··· 253 253 SYM_TYPED_FUNC_START(v4wb_dma_unmap_area) 254 254 ret lr 255 255 SYM_FUNC_END(v4wb_dma_unmap_area) 256 - 257 - .globl v4wb_flush_kern_cache_louis 258 - .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all 259 - 260 - __INITDATA 261 - 262 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 263 - define_cache_functions v4wb
-8
arch/arm/mm/cache-v4wt.S
··· 200 200 SYM_TYPED_FUNC_START(v4wt_dma_map_area) 201 201 ret lr 202 202 SYM_FUNC_END(v4wt_dma_map_area) 203 - 204 - .globl v4wt_flush_kern_cache_louis 205 - .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all 206 - 207 - __INITDATA 208 - 209 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 210 - define_cache_functions v4wt
-8
arch/arm/mm/cache-v6.S
··· 298 298 bne v6_dma_inv_range 299 299 ret lr 300 300 SYM_FUNC_END(v6_dma_unmap_area) 301 - 302 - .globl v6_flush_kern_cache_louis 303 - .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all 304 - 305 - __INITDATA 306 - 307 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 308 - define_cache_functions v6
-25
arch/arm/mm/cache-v7.S
··· 456 456 bne v7_dma_inv_range 457 457 ret lr 458 458 SYM_FUNC_END(v7_dma_unmap_area) 459 - 460 - __INITDATA 461 - 462 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 463 - define_cache_functions v7 464 - 465 - /* The Broadcom Brahma-B15 read-ahead cache requires some modifications 466 - * to the v7_cache_fns, we only override the ones we need 467 - */ 468 - #ifndef CONFIG_CACHE_B15_RAC 469 - globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all 470 - #endif 471 - globl_equ b15_flush_icache_all, v7_flush_icache_all 472 - globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis 473 - globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all 474 - globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range 475 - globl_equ b15_coherent_kern_range, v7_coherent_kern_range 476 - globl_equ b15_coherent_user_range, v7_coherent_user_range 477 - globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area 478 - 479 - globl_equ b15_dma_map_area, v7_dma_map_area 480 - globl_equ b15_dma_unmap_area, v7_dma_unmap_area 481 - globl_equ b15_dma_flush_range, v7_dma_flush_range 482 - 483 - define_cache_functions b15
-8
arch/arm/mm/cache-v7m.S
··· 447 447 bne v7m_dma_inv_range 448 448 ret lr 449 449 SYM_FUNC_END(v7m_dma_unmap_area) 450 - 451 - .globl v7m_flush_kern_cache_louis 452 - .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all 453 - 454 - __INITDATA 455 - 456 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 457 - define_cache_functions v7m
+663
arch/arm/mm/cache.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * This file defines C prototypes for the low-level cache assembly functions 4 + * and populates a vtable for each selected ARM CPU cache type. 5 + */ 6 + 7 + #include <linux/types.h> 8 + #include <asm/cacheflush.h> 9 + 10 + #ifdef CONFIG_CPU_CACHE_V4 11 + void v4_flush_icache_all(void); 12 + void v4_flush_kern_cache_all(void); 13 + void v4_flush_user_cache_all(void); 14 + void v4_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 15 + void v4_coherent_kern_range(unsigned long, unsigned long); 16 + int v4_coherent_user_range(unsigned long, unsigned long); 17 + void v4_flush_kern_dcache_area(void *, size_t); 18 + void v4_dma_map_area(const void *, size_t, int); 19 + void v4_dma_unmap_area(const void *, size_t, int); 20 + void v4_dma_flush_range(const void *, const void *); 21 + 22 + struct cpu_cache_fns v4_cache_fns __initconst = { 23 + .flush_icache_all = v4_flush_icache_all, 24 + .flush_kern_all = v4_flush_kern_cache_all, 25 + .flush_kern_louis = v4_flush_kern_cache_all, 26 + .flush_user_all = v4_flush_user_cache_all, 27 + .flush_user_range = v4_flush_user_cache_range, 28 + .coherent_kern_range = v4_coherent_kern_range, 29 + .coherent_user_range = v4_coherent_user_range, 30 + .flush_kern_dcache_area = v4_flush_kern_dcache_area, 31 + .dma_map_area = v4_dma_map_area, 32 + .dma_unmap_area = v4_dma_unmap_area, 33 + .dma_flush_range = v4_dma_flush_range, 34 + }; 35 + #endif 36 + 37 + /* V4 write-back cache "V4WB" */ 38 + #ifdef CONFIG_CPU_CACHE_V4WB 39 + void v4wb_flush_icache_all(void); 40 + void v4wb_flush_kern_cache_all(void); 41 + void v4wb_flush_user_cache_all(void); 42 + void v4wb_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 43 + void v4wb_coherent_kern_range(unsigned long, unsigned long); 44 + int v4wb_coherent_user_range(unsigned long, unsigned long); 45 + void v4wb_flush_kern_dcache_area(void *, size_t); 46 + void v4wb_dma_map_area(const void *, size_t, int); 47 + void v4wb_dma_unmap_area(const void *, size_t, int); 48 + void v4wb_dma_flush_range(const void *, const void *); 49 + 50 + struct cpu_cache_fns v4wb_cache_fns __initconst = { 51 + .flush_icache_all = v4wb_flush_icache_all, 52 + .flush_kern_all = v4wb_flush_kern_cache_all, 53 + .flush_kern_louis = v4wb_flush_kern_cache_all, 54 + .flush_user_all = v4wb_flush_user_cache_all, 55 + .flush_user_range = v4wb_flush_user_cache_range, 56 + .coherent_kern_range = v4wb_coherent_kern_range, 57 + .coherent_user_range = v4wb_coherent_user_range, 58 + .flush_kern_dcache_area = v4wb_flush_kern_dcache_area, 59 + .dma_map_area = v4wb_dma_map_area, 60 + .dma_unmap_area = v4wb_dma_unmap_area, 61 + .dma_flush_range = v4wb_dma_flush_range, 62 + }; 63 + #endif 64 + 65 + /* V4 write-through cache "V4WT" */ 66 + #ifdef CONFIG_CPU_CACHE_V4WT 67 + void v4wt_flush_icache_all(void); 68 + void v4wt_flush_kern_cache_all(void); 69 + void v4wt_flush_user_cache_all(void); 70 + void v4wt_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 71 + void v4wt_coherent_kern_range(unsigned long, unsigned long); 72 + int v4wt_coherent_user_range(unsigned long, unsigned long); 73 + void v4wt_flush_kern_dcache_area(void *, size_t); 74 + void v4wt_dma_map_area(const void *, size_t, int); 75 + void v4wt_dma_unmap_area(const void *, size_t, int); 76 + void v4wt_dma_flush_range(const void *, const void *); 77 + 78 + struct cpu_cache_fns v4wt_cache_fns __initconst = { 79 + .flush_icache_all = v4wt_flush_icache_all, 80 + .flush_kern_all = v4wt_flush_kern_cache_all, 81 + .flush_kern_louis = v4wt_flush_kern_cache_all, 82 + .flush_user_all = v4wt_flush_user_cache_all, 83 + .flush_user_range = v4wt_flush_user_cache_range, 84 + .coherent_kern_range = v4wt_coherent_kern_range, 85 + .coherent_user_range = v4wt_coherent_user_range, 86 + .flush_kern_dcache_area = v4wt_flush_kern_dcache_area, 87 + .dma_map_area = v4wt_dma_map_area, 88 + .dma_unmap_area = v4wt_dma_unmap_area, 89 + .dma_flush_range = v4wt_dma_flush_range, 90 + }; 91 + #endif 92 + 93 + /* Faraday FA526 cache */ 94 + #ifdef CONFIG_CPU_CACHE_FA 95 + void fa_flush_icache_all(void); 96 + void fa_flush_kern_cache_all(void); 97 + void fa_flush_user_cache_all(void); 98 + void fa_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 99 + void fa_coherent_kern_range(unsigned long, unsigned long); 100 + int fa_coherent_user_range(unsigned long, unsigned long); 101 + void fa_flush_kern_dcache_area(void *, size_t); 102 + void fa_dma_map_area(const void *, size_t, int); 103 + void fa_dma_unmap_area(const void *, size_t, int); 104 + void fa_dma_flush_range(const void *, const void *); 105 + 106 + struct cpu_cache_fns fa_cache_fns __initconst = { 107 + .flush_icache_all = fa_flush_icache_all, 108 + .flush_kern_all = fa_flush_kern_cache_all, 109 + .flush_kern_louis = fa_flush_kern_cache_all, 110 + .flush_user_all = fa_flush_user_cache_all, 111 + .flush_user_range = fa_flush_user_cache_range, 112 + .coherent_kern_range = fa_coherent_kern_range, 113 + .coherent_user_range = fa_coherent_user_range, 114 + .flush_kern_dcache_area = fa_flush_kern_dcache_area, 115 + .dma_map_area = fa_dma_map_area, 116 + .dma_unmap_area = fa_dma_unmap_area, 117 + .dma_flush_range = fa_dma_flush_range, 118 + }; 119 + #endif 120 + 121 + #ifdef CONFIG_CPU_CACHE_V6 122 + void v6_flush_icache_all(void); 123 + void v6_flush_kern_cache_all(void); 124 + void v6_flush_user_cache_all(void); 125 + void v6_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 126 + void v6_coherent_kern_range(unsigned long, unsigned long); 127 + int v6_coherent_user_range(unsigned long, unsigned long); 128 + void v6_flush_kern_dcache_area(void *, size_t); 129 + void v6_dma_map_area(const void *, size_t, int); 130 + void v6_dma_unmap_area(const void *, size_t, int); 131 + void v6_dma_flush_range(const void *, const void *); 132 + 133 + struct cpu_cache_fns v6_cache_fns __initconst = { 134 + .flush_icache_all = v6_flush_icache_all, 135 + .flush_kern_all = v6_flush_kern_cache_all, 136 + .flush_kern_louis = v6_flush_kern_cache_all, 137 + .flush_user_all = v6_flush_user_cache_all, 138 + .flush_user_range = v6_flush_user_cache_range, 139 + .coherent_kern_range = v6_coherent_kern_range, 140 + .coherent_user_range = v6_coherent_user_range, 141 + .flush_kern_dcache_area = v6_flush_kern_dcache_area, 142 + .dma_map_area = v6_dma_map_area, 143 + .dma_unmap_area = v6_dma_unmap_area, 144 + .dma_flush_range = v6_dma_flush_range, 145 + }; 146 + #endif 147 + 148 + #ifdef CONFIG_CPU_CACHE_V7 149 + void v7_flush_icache_all(void); 150 + void v7_flush_kern_cache_all(void); 151 + void v7_flush_kern_cache_louis(void); 152 + void v7_flush_user_cache_all(void); 153 + void v7_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 154 + void v7_coherent_kern_range(unsigned long, unsigned long); 155 + int v7_coherent_user_range(unsigned long, unsigned long); 156 + void v7_flush_kern_dcache_area(void *, size_t); 157 + void v7_dma_map_area(const void *, size_t, int); 158 + void v7_dma_unmap_area(const void *, size_t, int); 159 + void v7_dma_flush_range(const void *, const void *); 160 + 161 + struct cpu_cache_fns v7_cache_fns __initconst = { 162 + .flush_icache_all = v7_flush_icache_all, 163 + .flush_kern_all = v7_flush_kern_cache_all, 164 + .flush_kern_louis = v7_flush_kern_cache_louis, 165 + .flush_user_all = v7_flush_user_cache_all, 166 + .flush_user_range = v7_flush_user_cache_range, 167 + .coherent_kern_range = v7_coherent_kern_range, 168 + .coherent_user_range = v7_coherent_user_range, 169 + .flush_kern_dcache_area = v7_flush_kern_dcache_area, 170 + .dma_map_area = v7_dma_map_area, 171 + .dma_unmap_area = v7_dma_unmap_area, 172 + .dma_flush_range = v7_dma_flush_range, 173 + }; 174 + 175 + /* Special quirky cache flush function for Broadcom B15 v7 caches */ 176 + void b15_flush_kern_cache_all(void); 177 + 178 + struct cpu_cache_fns b15_cache_fns __initconst = { 179 + .flush_icache_all = v7_flush_icache_all, 180 + #ifdef CONFIG_CACHE_B15_RAC 181 + .flush_kern_all = b15_flush_kern_cache_all, 182 + #else 183 + .flush_kern_all = v7_flush_kern_cache_all, 184 + #endif 185 + .flush_kern_louis = v7_flush_kern_cache_louis, 186 + .flush_user_all = v7_flush_user_cache_all, 187 + .flush_user_range = v7_flush_user_cache_range, 188 + .coherent_kern_range = v7_coherent_kern_range, 189 + .coherent_user_range = v7_coherent_user_range, 190 + .flush_kern_dcache_area = v7_flush_kern_dcache_area, 191 + .dma_map_area = v7_dma_map_area, 192 + .dma_unmap_area = v7_dma_unmap_area, 193 + .dma_flush_range = v7_dma_flush_range, 194 + }; 195 + #endif 196 + 197 + /* The NOP cache is just a set of dummy stubs that by definition does nothing */ 198 + #ifdef CONFIG_CPU_CACHE_NOP 199 + void nop_flush_icache_all(void); 200 + void nop_flush_kern_cache_all(void); 201 + void nop_flush_user_cache_all(void); 202 + void nop_flush_user_cache_range(unsigned long start, unsigned long end, unsigned int flags); 203 + void nop_coherent_kern_range(unsigned long start, unsigned long end); 204 + int nop_coherent_user_range(unsigned long, unsigned long); 205 + void nop_flush_kern_dcache_area(void *kaddr, size_t size); 206 + void nop_dma_map_area(const void *start, size_t size, int flags); 207 + void nop_dma_unmap_area(const void *start, size_t size, int flags); 208 + void nop_dma_flush_range(const void *start, const void *end); 209 + 210 + struct cpu_cache_fns nop_cache_fns __initconst = { 211 + .flush_icache_all = nop_flush_icache_all, 212 + .flush_kern_all = nop_flush_kern_cache_all, 213 + .flush_kern_louis = nop_flush_kern_cache_all, 214 + .flush_user_all = nop_flush_user_cache_all, 215 + .flush_user_range = nop_flush_user_cache_range, 216 + .coherent_kern_range = nop_coherent_kern_range, 217 + .coherent_user_range = nop_coherent_user_range, 218 + .flush_kern_dcache_area = nop_flush_kern_dcache_area, 219 + .dma_map_area = nop_dma_map_area, 220 + .dma_unmap_area = nop_dma_unmap_area, 221 + .dma_flush_range = nop_dma_flush_range, 222 + }; 223 + #endif 224 + 225 + #ifdef CONFIG_CPU_CACHE_V7M 226 + void v7m_flush_icache_all(void); 227 + void v7m_flush_kern_cache_all(void); 228 + void v7m_flush_user_cache_all(void); 229 + void v7m_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 230 + void v7m_coherent_kern_range(unsigned long, unsigned long); 231 + int v7m_coherent_user_range(unsigned long, unsigned long); 232 + void v7m_flush_kern_dcache_area(void *, size_t); 233 + void v7m_dma_map_area(const void *, size_t, int); 234 + void v7m_dma_unmap_area(const void *, size_t, int); 235 + void v7m_dma_flush_range(const void *, const void *); 236 + 237 + struct cpu_cache_fns v7m_cache_fns __initconst = { 238 + .flush_icache_all = v7m_flush_icache_all, 239 + .flush_kern_all = v7m_flush_kern_cache_all, 240 + .flush_kern_louis = v7m_flush_kern_cache_all, 241 + .flush_user_all = v7m_flush_user_cache_all, 242 + .flush_user_range = v7m_flush_user_cache_range, 243 + .coherent_kern_range = v7m_coherent_kern_range, 244 + .coherent_user_range = v7m_coherent_user_range, 245 + .flush_kern_dcache_area = v7m_flush_kern_dcache_area, 246 + .dma_map_area = v7m_dma_map_area, 247 + .dma_unmap_area = v7m_dma_unmap_area, 248 + .dma_flush_range = v7m_dma_flush_range, 249 + }; 250 + #endif 251 + 252 + #ifdef CONFIG_CPU_ARM1020 253 + void arm1020_flush_icache_all(void); 254 + void arm1020_flush_kern_cache_all(void); 255 + void arm1020_flush_user_cache_all(void); 256 + void arm1020_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 257 + void arm1020_coherent_kern_range(unsigned long, unsigned long); 258 + int arm1020_coherent_user_range(unsigned long, unsigned long); 259 + void arm1020_flush_kern_dcache_area(void *, size_t); 260 + void arm1020_dma_map_area(const void *, size_t, int); 261 + void arm1020_dma_unmap_area(const void *, size_t, int); 262 + void arm1020_dma_flush_range(const void *, const void *); 263 + 264 + struct cpu_cache_fns arm1020_cache_fns __initconst = { 265 + .flush_icache_all = arm1020_flush_icache_all, 266 + .flush_kern_all = arm1020_flush_kern_cache_all, 267 + .flush_kern_louis = arm1020_flush_kern_cache_all, 268 + .flush_user_all = arm1020_flush_user_cache_all, 269 + .flush_user_range = arm1020_flush_user_cache_range, 270 + .coherent_kern_range = arm1020_coherent_kern_range, 271 + .coherent_user_range = arm1020_coherent_user_range, 272 + .flush_kern_dcache_area = arm1020_flush_kern_dcache_area, 273 + .dma_map_area = arm1020_dma_map_area, 274 + .dma_unmap_area = arm1020_dma_unmap_area, 275 + .dma_flush_range = arm1020_dma_flush_range, 276 + }; 277 + #endif 278 + 279 + #ifdef CONFIG_CPU_ARM1020E 280 + void arm1020e_flush_icache_all(void); 281 + void arm1020e_flush_kern_cache_all(void); 282 + void arm1020e_flush_user_cache_all(void); 283 + void arm1020e_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 284 + void arm1020e_coherent_kern_range(unsigned long, unsigned long); 285 + int arm1020e_coherent_user_range(unsigned long, unsigned long); 286 + void arm1020e_flush_kern_dcache_area(void *, size_t); 287 + void arm1020e_dma_map_area(const void *, size_t, int); 288 + void arm1020e_dma_unmap_area(const void *, size_t, int); 289 + void arm1020e_dma_flush_range(const void *, const void *); 290 + 291 + struct cpu_cache_fns arm1020e_cache_fns __initconst = { 292 + .flush_icache_all = arm1020e_flush_icache_all, 293 + .flush_kern_all = arm1020e_flush_kern_cache_all, 294 + .flush_kern_louis = arm1020e_flush_kern_cache_all, 295 + .flush_user_all = arm1020e_flush_user_cache_all, 296 + .flush_user_range = arm1020e_flush_user_cache_range, 297 + .coherent_kern_range = arm1020e_coherent_kern_range, 298 + .coherent_user_range = arm1020e_coherent_user_range, 299 + .flush_kern_dcache_area = arm1020e_flush_kern_dcache_area, 300 + .dma_map_area = arm1020e_dma_map_area, 301 + .dma_unmap_area = arm1020e_dma_unmap_area, 302 + .dma_flush_range = arm1020e_dma_flush_range, 303 + }; 304 + #endif 305 + 306 + #ifdef CONFIG_CPU_ARM1022 307 + void arm1022_flush_icache_all(void); 308 + void arm1022_flush_kern_cache_all(void); 309 + void arm1022_flush_user_cache_all(void); 310 + void arm1022_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 311 + void arm1022_coherent_kern_range(unsigned long, unsigned long); 312 + int arm1022_coherent_user_range(unsigned long, unsigned long); 313 + void arm1022_flush_kern_dcache_area(void *, size_t); 314 + void arm1022_dma_map_area(const void *, size_t, int); 315 + void arm1022_dma_unmap_area(const void *, size_t, int); 316 + void arm1022_dma_flush_range(const void *, const void *); 317 + 318 + struct cpu_cache_fns arm1022_cache_fns __initconst = { 319 + .flush_icache_all = arm1022_flush_icache_all, 320 + .flush_kern_all = arm1022_flush_kern_cache_all, 321 + .flush_kern_louis = arm1022_flush_kern_cache_all, 322 + .flush_user_all = arm1022_flush_user_cache_all, 323 + .flush_user_range = arm1022_flush_user_cache_range, 324 + .coherent_kern_range = arm1022_coherent_kern_range, 325 + .coherent_user_range = arm1022_coherent_user_range, 326 + .flush_kern_dcache_area = arm1022_flush_kern_dcache_area, 327 + .dma_map_area = arm1022_dma_map_area, 328 + .dma_unmap_area = arm1022_dma_unmap_area, 329 + .dma_flush_range = arm1022_dma_flush_range, 330 + }; 331 + #endif 332 + 333 + #ifdef CONFIG_CPU_ARM1026 334 + void arm1026_flush_icache_all(void); 335 + void arm1026_flush_kern_cache_all(void); 336 + void arm1026_flush_user_cache_all(void); 337 + void arm1026_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 338 + void arm1026_coherent_kern_range(unsigned long, unsigned long); 339 + int arm1026_coherent_user_range(unsigned long, unsigned long); 340 + void arm1026_flush_kern_dcache_area(void *, size_t); 341 + void arm1026_dma_map_area(const void *, size_t, int); 342 + void arm1026_dma_unmap_area(const void *, size_t, int); 343 + void arm1026_dma_flush_range(const void *, const void *); 344 + 345 + struct cpu_cache_fns arm1026_cache_fns __initconst = { 346 + .flush_icache_all = arm1026_flush_icache_all, 347 + .flush_kern_all = arm1026_flush_kern_cache_all, 348 + .flush_kern_louis = arm1026_flush_kern_cache_all, 349 + .flush_user_all = arm1026_flush_user_cache_all, 350 + .flush_user_range = arm1026_flush_user_cache_range, 351 + .coherent_kern_range = arm1026_coherent_kern_range, 352 + .coherent_user_range = arm1026_coherent_user_range, 353 + .flush_kern_dcache_area = arm1026_flush_kern_dcache_area, 354 + .dma_map_area = arm1026_dma_map_area, 355 + .dma_unmap_area = arm1026_dma_unmap_area, 356 + .dma_flush_range = arm1026_dma_flush_range, 357 + }; 358 + #endif 359 + 360 + #if defined(CONFIG_CPU_ARM920T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 361 + void arm920_flush_icache_all(void); 362 + void arm920_flush_kern_cache_all(void); 363 + void arm920_flush_user_cache_all(void); 364 + void arm920_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 365 + void arm920_coherent_kern_range(unsigned long, unsigned long); 366 + int arm920_coherent_user_range(unsigned long, unsigned long); 367 + void arm920_flush_kern_dcache_area(void *, size_t); 368 + void arm920_dma_map_area(const void *, size_t, int); 369 + void arm920_dma_unmap_area(const void *, size_t, int); 370 + void arm920_dma_flush_range(const void *, const void *); 371 + 372 + struct cpu_cache_fns arm920_cache_fns __initconst = { 373 + .flush_icache_all = arm920_flush_icache_all, 374 + .flush_kern_all = arm920_flush_kern_cache_all, 375 + .flush_kern_louis = arm920_flush_kern_cache_all, 376 + .flush_user_all = arm920_flush_user_cache_all, 377 + .flush_user_range = arm920_flush_user_cache_range, 378 + .coherent_kern_range = arm920_coherent_kern_range, 379 + .coherent_user_range = arm920_coherent_user_range, 380 + .flush_kern_dcache_area = arm920_flush_kern_dcache_area, 381 + .dma_map_area = arm920_dma_map_area, 382 + .dma_unmap_area = arm920_dma_unmap_area, 383 + .dma_flush_range = arm920_dma_flush_range, 384 + }; 385 + #endif 386 + 387 + #if defined(CONFIG_CPU_ARM922T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 388 + void arm922_flush_icache_all(void); 389 + void arm922_flush_kern_cache_all(void); 390 + void arm922_flush_user_cache_all(void); 391 + void arm922_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 392 + void arm922_coherent_kern_range(unsigned long, unsigned long); 393 + int arm922_coherent_user_range(unsigned long, unsigned long); 394 + void arm922_flush_kern_dcache_area(void *, size_t); 395 + void arm922_dma_map_area(const void *, size_t, int); 396 + void arm922_dma_unmap_area(const void *, size_t, int); 397 + void arm922_dma_flush_range(const void *, const void *); 398 + 399 + struct cpu_cache_fns arm922_cache_fns __initconst = { 400 + .flush_icache_all = arm922_flush_icache_all, 401 + .flush_kern_all = arm922_flush_kern_cache_all, 402 + .flush_kern_louis = arm922_flush_kern_cache_all, 403 + .flush_user_all = arm922_flush_user_cache_all, 404 + .flush_user_range = arm922_flush_user_cache_range, 405 + .coherent_kern_range = arm922_coherent_kern_range, 406 + .coherent_user_range = arm922_coherent_user_range, 407 + .flush_kern_dcache_area = arm922_flush_kern_dcache_area, 408 + .dma_map_area = arm922_dma_map_area, 409 + .dma_unmap_area = arm922_dma_unmap_area, 410 + .dma_flush_range = arm922_dma_flush_range, 411 + }; 412 + #endif 413 + 414 + #ifdef CONFIG_CPU_ARM925T 415 + void arm925_flush_icache_all(void); 416 + void arm925_flush_kern_cache_all(void); 417 + void arm925_flush_user_cache_all(void); 418 + void arm925_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 419 + void arm925_coherent_kern_range(unsigned long, unsigned long); 420 + int arm925_coherent_user_range(unsigned long, unsigned long); 421 + void arm925_flush_kern_dcache_area(void *, size_t); 422 + void arm925_dma_map_area(const void *, size_t, int); 423 + void arm925_dma_unmap_area(const void *, size_t, int); 424 + void arm925_dma_flush_range(const void *, const void *); 425 + 426 + struct cpu_cache_fns arm925_cache_fns __initconst = { 427 + .flush_icache_all = arm925_flush_icache_all, 428 + .flush_kern_all = arm925_flush_kern_cache_all, 429 + .flush_kern_louis = arm925_flush_kern_cache_all, 430 + .flush_user_all = arm925_flush_user_cache_all, 431 + .flush_user_range = arm925_flush_user_cache_range, 432 + .coherent_kern_range = arm925_coherent_kern_range, 433 + .coherent_user_range = arm925_coherent_user_range, 434 + .flush_kern_dcache_area = arm925_flush_kern_dcache_area, 435 + .dma_map_area = arm925_dma_map_area, 436 + .dma_unmap_area = arm925_dma_unmap_area, 437 + .dma_flush_range = arm925_dma_flush_range, 438 + }; 439 + #endif 440 + 441 + #ifdef CONFIG_CPU_ARM926T 442 + void arm926_flush_icache_all(void); 443 + void arm926_flush_kern_cache_all(void); 444 + void arm926_flush_user_cache_all(void); 445 + void arm926_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 446 + void arm926_coherent_kern_range(unsigned long, unsigned long); 447 + int arm926_coherent_user_range(unsigned long, unsigned long); 448 + void arm926_flush_kern_dcache_area(void *, size_t); 449 + void arm926_dma_map_area(const void *, size_t, int); 450 + void arm926_dma_unmap_area(const void *, size_t, int); 451 + void arm926_dma_flush_range(const void *, const void *); 452 + 453 + struct cpu_cache_fns arm926_cache_fns __initconst = { 454 + .flush_icache_all = arm926_flush_icache_all, 455 + .flush_kern_all = arm926_flush_kern_cache_all, 456 + .flush_kern_louis = arm926_flush_kern_cache_all, 457 + .flush_user_all = arm926_flush_user_cache_all, 458 + .flush_user_range = arm926_flush_user_cache_range, 459 + .coherent_kern_range = arm926_coherent_kern_range, 460 + .coherent_user_range = arm926_coherent_user_range, 461 + .flush_kern_dcache_area = arm926_flush_kern_dcache_area, 462 + .dma_map_area = arm926_dma_map_area, 463 + .dma_unmap_area = arm926_dma_unmap_area, 464 + .dma_flush_range = arm926_dma_flush_range, 465 + }; 466 + #endif 467 + 468 + #ifdef CONFIG_CPU_ARM940T 469 + void arm940_flush_icache_all(void); 470 + void arm940_flush_kern_cache_all(void); 471 + void arm940_flush_user_cache_all(void); 472 + void arm940_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 473 + void arm940_coherent_kern_range(unsigned long, unsigned long); 474 + int arm940_coherent_user_range(unsigned long, unsigned long); 475 + void arm940_flush_kern_dcache_area(void *, size_t); 476 + void arm940_dma_map_area(const void *, size_t, int); 477 + void arm940_dma_unmap_area(const void *, size_t, int); 478 + void arm940_dma_flush_range(const void *, const void *); 479 + 480 + struct cpu_cache_fns arm940_cache_fns __initconst = { 481 + .flush_icache_all = arm940_flush_icache_all, 482 + .flush_kern_all = arm940_flush_kern_cache_all, 483 + .flush_kern_louis = arm940_flush_kern_cache_all, 484 + .flush_user_all = arm940_flush_user_cache_all, 485 + .flush_user_range = arm940_flush_user_cache_range, 486 + .coherent_kern_range = arm940_coherent_kern_range, 487 + .coherent_user_range = arm940_coherent_user_range, 488 + .flush_kern_dcache_area = arm940_flush_kern_dcache_area, 489 + .dma_map_area = arm940_dma_map_area, 490 + .dma_unmap_area = arm940_dma_unmap_area, 491 + .dma_flush_range = arm940_dma_flush_range, 492 + }; 493 + #endif 494 + 495 + #ifdef CONFIG_CPU_ARM946E 496 + void arm946_flush_icache_all(void); 497 + void arm946_flush_kern_cache_all(void); 498 + void arm946_flush_user_cache_all(void); 499 + void arm946_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 500 + void arm946_coherent_kern_range(unsigned long, unsigned long); 501 + int arm946_coherent_user_range(unsigned long, unsigned long); 502 + void arm946_flush_kern_dcache_area(void *, size_t); 503 + void arm946_dma_map_area(const void *, size_t, int); 504 + void arm946_dma_unmap_area(const void *, size_t, int); 505 + void arm946_dma_flush_range(const void *, const void *); 506 + 507 + struct cpu_cache_fns arm946_cache_fns __initconst = { 508 + .flush_icache_all = arm946_flush_icache_all, 509 + .flush_kern_all = arm946_flush_kern_cache_all, 510 + .flush_kern_louis = arm946_flush_kern_cache_all, 511 + .flush_user_all = arm946_flush_user_cache_all, 512 + .flush_user_range = arm946_flush_user_cache_range, 513 + .coherent_kern_range = arm946_coherent_kern_range, 514 + .coherent_user_range = arm946_coherent_user_range, 515 + .flush_kern_dcache_area = arm946_flush_kern_dcache_area, 516 + .dma_map_area = arm946_dma_map_area, 517 + .dma_unmap_area = arm946_dma_unmap_area, 518 + .dma_flush_range = arm946_dma_flush_range, 519 + }; 520 + #endif 521 + 522 + #ifdef CONFIG_CPU_XSCALE 523 + void xscale_flush_icache_all(void); 524 + void xscale_flush_kern_cache_all(void); 525 + void xscale_flush_user_cache_all(void); 526 + void xscale_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 527 + void xscale_coherent_kern_range(unsigned long, unsigned long); 528 + int xscale_coherent_user_range(unsigned long, unsigned long); 529 + void xscale_flush_kern_dcache_area(void *, size_t); 530 + void xscale_dma_map_area(const void *, size_t, int); 531 + void xscale_dma_unmap_area(const void *, size_t, int); 532 + void xscale_dma_flush_range(const void *, const void *); 533 + 534 + struct cpu_cache_fns xscale_cache_fns __initconst = { 535 + .flush_icache_all = xscale_flush_icache_all, 536 + .flush_kern_all = xscale_flush_kern_cache_all, 537 + .flush_kern_louis = xscale_flush_kern_cache_all, 538 + .flush_user_all = xscale_flush_user_cache_all, 539 + .flush_user_range = xscale_flush_user_cache_range, 540 + .coherent_kern_range = xscale_coherent_kern_range, 541 + .coherent_user_range = xscale_coherent_user_range, 542 + .flush_kern_dcache_area = xscale_flush_kern_dcache_area, 543 + .dma_map_area = xscale_dma_map_area, 544 + .dma_unmap_area = xscale_dma_unmap_area, 545 + .dma_flush_range = xscale_dma_flush_range, 546 + }; 547 + 548 + /* The 80200 A0 and A1 need a special quirk for dma_map_area() */ 549 + void xscale_80200_A0_A1_dma_map_area(const void *, size_t, int); 550 + 551 + struct cpu_cache_fns xscale_80200_A0_A1_cache_fns __initconst = { 552 + .flush_icache_all = xscale_flush_icache_all, 553 + .flush_kern_all = xscale_flush_kern_cache_all, 554 + .flush_kern_louis = xscale_flush_kern_cache_all, 555 + .flush_user_all = xscale_flush_user_cache_all, 556 + .flush_user_range = xscale_flush_user_cache_range, 557 + .coherent_kern_range = xscale_coherent_kern_range, 558 + .coherent_user_range = xscale_coherent_user_range, 559 + .flush_kern_dcache_area = xscale_flush_kern_dcache_area, 560 + .dma_map_area = xscale_80200_A0_A1_dma_map_area, 561 + .dma_unmap_area = xscale_dma_unmap_area, 562 + .dma_flush_range = xscale_dma_flush_range, 563 + }; 564 + #endif 565 + 566 + #ifdef CONFIG_CPU_XSC3 567 + void xsc3_flush_icache_all(void); 568 + void xsc3_flush_kern_cache_all(void); 569 + void xsc3_flush_user_cache_all(void); 570 + void xsc3_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 571 + void xsc3_coherent_kern_range(unsigned long, unsigned long); 572 + int xsc3_coherent_user_range(unsigned long, unsigned long); 573 + void xsc3_flush_kern_dcache_area(void *, size_t); 574 + void xsc3_dma_map_area(const void *, size_t, int); 575 + void xsc3_dma_unmap_area(const void *, size_t, int); 576 + void xsc3_dma_flush_range(const void *, const void *); 577 + 578 + struct cpu_cache_fns xsc3_cache_fns __initconst = { 579 + .flush_icache_all = xsc3_flush_icache_all, 580 + .flush_kern_all = xsc3_flush_kern_cache_all, 581 + .flush_kern_louis = xsc3_flush_kern_cache_all, 582 + .flush_user_all = xsc3_flush_user_cache_all, 583 + .flush_user_range = xsc3_flush_user_cache_range, 584 + .coherent_kern_range = xsc3_coherent_kern_range, 585 + .coherent_user_range = xsc3_coherent_user_range, 586 + .flush_kern_dcache_area = xsc3_flush_kern_dcache_area, 587 + .dma_map_area = xsc3_dma_map_area, 588 + .dma_unmap_area = xsc3_dma_unmap_area, 589 + .dma_flush_range = xsc3_dma_flush_range, 590 + }; 591 + #endif 592 + 593 + #ifdef CONFIG_CPU_MOHAWK 594 + void mohawk_flush_icache_all(void); 595 + void mohawk_flush_kern_cache_all(void); 596 + void mohawk_flush_user_cache_all(void); 597 + void mohawk_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 598 + void mohawk_coherent_kern_range(unsigned long, unsigned long); 599 + int mohawk_coherent_user_range(unsigned long, unsigned long); 600 + void mohawk_flush_kern_dcache_area(void *, size_t); 601 + void mohawk_dma_map_area(const void *, size_t, int); 602 + void mohawk_dma_unmap_area(const void *, size_t, int); 603 + void mohawk_dma_flush_range(const void *, const void *); 604 + 605 + struct cpu_cache_fns mohawk_cache_fns __initconst = { 606 + .flush_icache_all = mohawk_flush_icache_all, 607 + .flush_kern_all = mohawk_flush_kern_cache_all, 608 + .flush_kern_louis = mohawk_flush_kern_cache_all, 609 + .flush_user_all = mohawk_flush_user_cache_all, 610 + .flush_user_range = mohawk_flush_user_cache_range, 611 + .coherent_kern_range = mohawk_coherent_kern_range, 612 + .coherent_user_range = mohawk_coherent_user_range, 613 + .flush_kern_dcache_area = mohawk_flush_kern_dcache_area, 614 + .dma_map_area = mohawk_dma_map_area, 615 + .dma_unmap_area = mohawk_dma_unmap_area, 616 + .dma_flush_range = mohawk_dma_flush_range, 617 + }; 618 + #endif 619 + 620 + #ifdef CONFIG_CPU_FEROCEON 621 + void feroceon_flush_icache_all(void); 622 + void feroceon_flush_kern_cache_all(void); 623 + void feroceon_flush_user_cache_all(void); 624 + void feroceon_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 625 + void feroceon_coherent_kern_range(unsigned long, unsigned long); 626 + int feroceon_coherent_user_range(unsigned long, unsigned long); 627 + void feroceon_flush_kern_dcache_area(void *, size_t); 628 + void feroceon_dma_map_area(const void *, size_t, int); 629 + void feroceon_dma_unmap_area(const void *, size_t, int); 630 + void feroceon_dma_flush_range(const void *, const void *); 631 + 632 + struct cpu_cache_fns feroceon_cache_fns __initconst = { 633 + .flush_icache_all = feroceon_flush_icache_all, 634 + .flush_kern_all = feroceon_flush_kern_cache_all, 635 + .flush_kern_louis = feroceon_flush_kern_cache_all, 636 + .flush_user_all = feroceon_flush_user_cache_all, 637 + .flush_user_range = feroceon_flush_user_cache_range, 638 + .coherent_kern_range = feroceon_coherent_kern_range, 639 + .coherent_user_range = feroceon_coherent_user_range, 640 + .flush_kern_dcache_area = feroceon_flush_kern_dcache_area, 641 + .dma_map_area = feroceon_dma_map_area, 642 + .dma_unmap_area = feroceon_dma_unmap_area, 643 + .dma_flush_range = feroceon_dma_flush_range, 644 + }; 645 + 646 + void feroceon_range_flush_kern_dcache_area(void *, size_t); 647 + void feroceon_range_dma_map_area(const void *, size_t, int); 648 + void feroceon_range_dma_flush_range(const void *, const void *); 649 + 650 + struct cpu_cache_fns feroceon_range_cache_fns __initconst = { 651 + .flush_icache_all = feroceon_flush_icache_all, 652 + .flush_kern_all = feroceon_flush_kern_cache_all, 653 + .flush_kern_louis = feroceon_flush_kern_cache_all, 654 + .flush_user_all = feroceon_flush_user_cache_all, 655 + .flush_user_range = feroceon_flush_user_cache_range, 656 + .coherent_kern_range = feroceon_coherent_kern_range, 657 + .coherent_user_range = feroceon_coherent_user_range, 658 + .flush_kern_dcache_area = feroceon_range_flush_kern_dcache_area, 659 + .dma_map_area = feroceon_range_dma_map_area, 660 + .dma_unmap_area = feroceon_dma_unmap_area, 661 + .dma_flush_range = feroceon_range_dma_flush_range, 662 + }; 663 + #endif
-6
arch/arm/mm/proc-arm1020.S
··· 359 359 ret lr 360 360 SYM_FUNC_END(arm1020_dma_unmap_area) 361 361 362 - .globl arm1020_flush_kern_cache_louis 363 - .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all 364 - 365 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 366 - define_cache_functions arm1020 367 - 368 362 .align 5 369 363 ENTRY(cpu_arm1020_dcache_clean_area) 370 364 #ifndef CONFIG_CPU_DCACHE_DISABLE
-6
arch/arm/mm/proc-arm1020e.S
··· 346 346 ret lr 347 347 SYM_FUNC_END(arm1020e_dma_unmap_area) 348 348 349 - .globl arm1020e_flush_kern_cache_louis 350 - .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all 351 - 352 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 353 - define_cache_functions arm1020e 354 - 355 349 .align 5 356 350 ENTRY(cpu_arm1020e_dcache_clean_area) 357 351 #ifndef CONFIG_CPU_DCACHE_DISABLE
-6
arch/arm/mm/proc-arm1022.S
··· 345 345 ret lr 346 346 SYM_FUNC_END(arm1022_dma_unmap_area) 347 347 348 - .globl arm1022_flush_kern_cache_louis 349 - .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all 350 - 351 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 352 - define_cache_functions arm1022 353 - 354 348 .align 5 355 349 ENTRY(cpu_arm1022_dcache_clean_area) 356 350 #ifndef CONFIG_CPU_DCACHE_DISABLE
-6
arch/arm/mm/proc-arm1026.S
··· 340 340 ret lr 341 341 SYM_FUNC_END(arm1026_dma_unmap_area) 342 342 343 - .globl arm1026_flush_kern_cache_louis 344 - .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all 345 - 346 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 347 - define_cache_functions arm1026 348 - 349 343 .align 5 350 344 ENTRY(cpu_arm1026_dcache_clean_area) 351 345 #ifndef CONFIG_CPU_DCACHE_DISABLE
-5
arch/arm/mm/proc-arm920.S
··· 311 311 ret lr 312 312 SYM_FUNC_END(arm920_dma_unmap_area) 313 313 314 - .globl arm920_flush_kern_cache_louis 315 - .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all 316 - 317 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 318 - define_cache_functions arm920 319 314 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 320 315 321 316
-6
arch/arm/mm/proc-arm922.S
··· 313 313 ret lr 314 314 SYM_FUNC_END(arm922_dma_unmap_area) 315 315 316 - .globl arm922_flush_kern_cache_louis 317 - .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all 318 - 319 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 320 - define_cache_functions arm922 321 - 322 316 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 323 317 324 318 ENTRY(cpu_arm922_dcache_clean_area)
-6
arch/arm/mm/proc-arm925.S
··· 368 368 ret lr 369 369 SYM_FUNC_END(arm925_dma_unmap_area) 370 370 371 - .globl arm925_flush_kern_cache_louis 372 - .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all 373 - 374 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 375 - define_cache_functions arm925 376 - 377 371 ENTRY(cpu_arm925_dcache_clean_area) 378 372 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 379 373 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-6
arch/arm/mm/proc-arm926.S
··· 331 331 ret lr 332 332 SYM_FUNC_END(arm926_dma_unmap_area) 333 333 334 - .globl arm926_flush_kern_cache_louis 335 - .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all 336 - 337 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 338 - define_cache_functions arm926 339 - 340 334 ENTRY(cpu_arm926_dcache_clean_area) 341 335 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 342 336 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-6
arch/arm/mm/proc-arm940.S
··· 269 269 ret lr 270 270 SYM_FUNC_END(arm940_dma_unmap_area) 271 271 272 - .globl arm940_flush_kern_cache_louis 273 - .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all 274 - 275 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 276 - define_cache_functions arm940 277 - 278 272 .type __arm940_setup, #function 279 273 __arm940_setup: 280 274 mov r0, #0
-6
arch/arm/mm/proc-arm946.S
··· 312 312 ret lr 313 313 SYM_FUNC_END(arm946_dma_unmap_area) 314 314 315 - .globl arm946_flush_kern_cache_louis 316 - .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all 317 - 318 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 319 - define_cache_functions arm946 320 - 321 315 ENTRY(cpu_arm946_dcache_clean_area) 322 316 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 323 317 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
-27
arch/arm/mm/proc-feroceon.S
··· 414 414 ret lr 415 415 SYM_FUNC_END(feroceon_dma_unmap_area) 416 416 417 - .globl feroceon_flush_kern_cache_louis 418 - .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all 419 - 420 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 421 - define_cache_functions feroceon 422 - 423 - .macro range_alias basename 424 - .globl feroceon_range_\basename 425 - .type feroceon_range_\basename , %function 426 - .equ feroceon_range_\basename , feroceon_\basename 427 - .endm 428 - 429 - /* 430 - * Most of the cache functions are unchanged for this case. 431 - * Export suitable alias symbols for the unchanged functions: 432 - */ 433 - range_alias flush_icache_all 434 - range_alias flush_user_cache_all 435 - range_alias flush_kern_cache_all 436 - range_alias flush_kern_cache_louis 437 - range_alias flush_user_cache_range 438 - range_alias coherent_kern_range 439 - range_alias coherent_user_range 440 - range_alias dma_unmap_area 441 - 442 - define_cache_functions feroceon_range 443 - 444 417 .align 5 445 418 ENTRY(cpu_feroceon_dcache_clean_area) 446 419 #if defined(CONFIG_CACHE_FEROCEON_L2) && \
-18
arch/arm/mm/proc-macros.S
··· 320 320 #endif 321 321 .endm 322 322 323 - .macro define_cache_functions name:req 324 - .align 2 325 - .type \name\()_cache_fns, #object 326 - ENTRY(\name\()_cache_fns) 327 - .long \name\()_flush_icache_all 328 - .long \name\()_flush_kern_cache_all 329 - .long \name\()_flush_kern_cache_louis 330 - .long \name\()_flush_user_cache_all 331 - .long \name\()_flush_user_cache_range 332 - .long \name\()_coherent_kern_range 333 - .long \name\()_coherent_user_range 334 - .long \name\()_flush_kern_dcache_area 335 - .long \name\()_dma_map_area 336 - .long \name\()_dma_unmap_area 337 - .long \name\()_dma_flush_range 338 - .size \name\()_cache_fns, . - \name\()_cache_fns 339 - .endm 340 - 341 323 .macro globl_equ x, y 342 324 .globl \x 343 325 .equ \x, \y
-6
arch/arm/mm/proc-mohawk.S
··· 296 296 ret lr 297 297 SYM_FUNC_END(mohawk_dma_unmap_area) 298 298 299 - .globl mohawk_flush_kern_cache_louis 300 - .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all 301 - 302 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 303 - define_cache_functions mohawk 304 - 305 299 ENTRY(cpu_mohawk_dcache_clean_area) 306 300 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 307 301 add r0, r0, #CACHE_DLINESIZE
-6
arch/arm/mm/proc-xsc3.S
··· 341 341 ret lr 342 342 SYM_FUNC_END(xsc3_dma_unmap_area) 343 343 344 - .globl xsc3_flush_kern_cache_louis 345 - .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all 346 - 347 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 348 - define_cache_functions xsc3 349 - 350 344 ENTRY(cpu_xsc3_dcache_clean_area) 351 345 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 352 346 add r0, r0, #CACHELINESIZE
+14 -43
arch/arm/mm/proc-xscale.S
··· 392 392 SYM_FUNC_END(xscale_dma_map_area) 393 393 394 394 /* 395 + * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 396 + * clear the dirty bits, which means that if we invalidate a dirty line, 397 + * the dirty data can still be written back to external memory later on. 398 + * 399 + * The recommended workaround is to always do a clean D-cache line before 400 + * doing an invalidate D-cache line, so on the affected processors, 401 + * dma_inv_range() is implemented as dma_flush_range(). 402 + * 403 + * See erratum #25 of "Intel 80200 Processor Specification Update", 404 + * revision January 22, 2003, available at: 405 + * http://www.intel.com/design/iio/specupdt/273415.htm 406 + */ 407 + 408 + /* 395 409 * dma_map_area(start, size, dir) 396 410 * - start - kernel virtual start address 397 411 * - size - size of region ··· 427 413 SYM_TYPED_FUNC_START(xscale_dma_unmap_area) 428 414 ret lr 429 415 SYM_FUNC_END(xscale_dma_unmap_area) 430 - 431 - .globl xscale_flush_kern_cache_louis 432 - .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all 433 - 434 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 435 - define_cache_functions xscale 436 - 437 - /* 438 - * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 439 - * clear the dirty bits, which means that if we invalidate a dirty line, 440 - * the dirty data can still be written back to external memory later on. 441 - * 442 - * The recommended workaround is to always do a clean D-cache line before 443 - * doing an invalidate D-cache line, so on the affected processors, 444 - * dma_inv_range() is implemented as dma_flush_range(). 445 - * 446 - * See erratum #25 of "Intel 80200 Processor Specification Update", 447 - * revision January 22, 2003, available at: 448 - * http://www.intel.com/design/iio/specupdt/273415.htm 449 - */ 450 - .macro a0_alias basename 451 - .globl xscale_80200_A0_A1_\basename 452 - .type xscale_80200_A0_A1_\basename , %function 453 - .equ xscale_80200_A0_A1_\basename , xscale_\basename 454 - .endm 455 - 456 - /* 457 - * Most of the cache functions are unchanged for these processor revisions. 458 - * Export suitable alias symbols for the unchanged functions: 459 - */ 460 - a0_alias flush_icache_all 461 - a0_alias flush_user_cache_all 462 - a0_alias flush_kern_cache_all 463 - a0_alias flush_kern_cache_louis 464 - a0_alias flush_user_cache_range 465 - a0_alias coherent_kern_range 466 - a0_alias coherent_user_range 467 - a0_alias flush_kern_dcache_area 468 - a0_alias dma_flush_range 469 - a0_alias dma_unmap_area 470 - 471 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 472 - define_cache_functions xscale_80200_A0_A1 473 416 474 417 ENTRY(cpu_xscale_dcache_clean_area) 475 418 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry