Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 9385/2: mm: Type-annotate all cache assembly routines

Tag all references to assembly functions with SYM_TYPED_FUNC_START()
and SYM_FUNC_END() so they also become CFI-safe.

When we add SYM_TYPED_FUNC_START() to assembly calls, a function
prototype signature will be emitted into the object file at
(pc-4) at the call site, so that the KCFI runtime check can compare
this to the expected call. Example:

8011ae38: a540670c .word 0xa540670c

8011ae3c <v7_flush_icache_all>:
8011ae3c: e3a00000 mov r0, #0
8011ae40: ee070f11 mcr 15, 0, r0, cr7, cr1, {0}
8011ae44: e12fff1e bx lr

This means no "fallthrough" code can enter a SYM_TYPED_FUNC_START()
call from above it: there will be a function prototype signature
there, so those are consistently converted to a branch or ret lr
depending on context.

Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

authored by

Linus Walleij and committed by
Russell King (Oracle)
1036b895 6b0ef279

+544 -373
+24 -15
arch/arm/mm/cache-fa.S
··· 12 12 */ 13 13 #include <linux/linkage.h> 14 14 #include <linux/init.h> 15 + #include <linux/cfi_types.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/page.h> 17 18 ··· 40 39 * 41 40 * Unconditionally clean and invalidate the entire icache. 42 41 */ 43 - ENTRY(fa_flush_icache_all) 42 + SYM_TYPED_FUNC_START(fa_flush_icache_all) 44 43 mov r0, #0 45 44 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 46 45 ret lr 47 - ENDPROC(fa_flush_icache_all) 46 + SYM_FUNC_END(fa_flush_icache_all) 48 47 49 48 /* 50 49 * flush_user_cache_all() ··· 52 51 * Clean and invalidate all cache entries in a particular address 53 52 * space. 54 53 */ 55 - ENTRY(fa_flush_user_cache_all) 56 - /* FALLTHROUGH */ 54 + SYM_TYPED_FUNC_START(fa_flush_user_cache_all) 55 + b fa_flush_kern_cache_all 56 + SYM_FUNC_END(fa_flush_user_cache_all) 57 + 57 58 /* 58 59 * flush_kern_cache_all() 59 60 * 60 61 * Clean and invalidate the entire cache. 61 62 */ 62 - ENTRY(fa_flush_kern_cache_all) 63 + SYM_TYPED_FUNC_START(fa_flush_kern_cache_all) 63 64 mov ip, #0 64 65 mov r2, #VM_EXEC 65 66 __flush_whole_cache: ··· 72 69 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 73 70 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 74 71 ret lr 72 + SYM_FUNC_END(fa_flush_kern_cache_all) 75 73 76 74 /* 77 75 * flush_user_cache_range(start, end, flags) ··· 84 80 * - end - end address (exclusive, page aligned) 85 81 * - flags - vma_area_struct flags describing address space 86 82 */ 87 - ENTRY(fa_flush_user_cache_range) 83 + SYM_TYPED_FUNC_START(fa_flush_user_cache_range) 88 84 mov ip, #0 89 85 sub r3, r1, r0 @ calculate total size 90 86 cmp r3, #CACHE_DLIMIT @ total size >= limit? ··· 101 97 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 102 98 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 103 99 ret lr 100 + SYM_FUNC_END(fa_flush_user_cache_range) 104 101 105 102 /* 106 103 * coherent_kern_range(start, end) ··· 113 108 * - start - virtual start address 114 109 * - end - virtual end address 115 110 */ 116 - ENTRY(fa_coherent_kern_range) 117 - /* fall through */ 111 + SYM_TYPED_FUNC_START(fa_coherent_kern_range) 112 + b fa_coherent_user_range 113 + SYM_FUNC_END(fa_coherent_kern_range) 118 114 119 115 /* 120 116 * coherent_user_range(start, end) ··· 127 121 * - start - virtual start address 128 122 * - end - virtual end address 129 123 */ 130 - ENTRY(fa_coherent_user_range) 124 + SYM_TYPED_FUNC_START(fa_coherent_user_range) 131 125 bic r0, r0, #CACHE_DLINESIZE - 1 132 126 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 133 127 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 139 133 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 140 134 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 141 135 ret lr 136 + SYM_FUNC_END(fa_coherent_user_range) 142 137 143 138 /* 144 139 * flush_kern_dcache_area(void *addr, size_t size) ··· 150 143 * - addr - kernel address 151 144 * - size - size of region 152 145 */ 153 - ENTRY(fa_flush_kern_dcache_area) 146 + SYM_TYPED_FUNC_START(fa_flush_kern_dcache_area) 154 147 add r1, r0, r1 155 148 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 156 149 add r0, r0, #CACHE_DLINESIZE ··· 160 153 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 161 154 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 162 155 ret lr 156 + SYM_FUNC_END(fa_flush_kern_dcache_area) 163 157 164 158 /* 165 159 * dma_inv_range(start, end) ··· 211 203 * - start - virtual start address of region 212 204 * - end - virtual end address of region 213 205 */ 214 - ENTRY(fa_dma_flush_range) 206 + SYM_TYPED_FUNC_START(fa_dma_flush_range) 215 207 bic r0, r0, #CACHE_DLINESIZE - 1 216 208 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry 217 209 add r0, r0, #CACHE_DLINESIZE ··· 220 212 mov r0, #0 221 213 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 222 214 ret lr 215 + SYM_FUNC_END(fa_dma_flush_range) 223 216 224 217 /* 225 218 * dma_map_area(start, size, dir) ··· 228 219 * - size - size of region 229 220 * - dir - DMA direction 230 221 */ 231 - ENTRY(fa_dma_map_area) 222 + SYM_TYPED_FUNC_START(fa_dma_map_area) 232 223 add r1, r1, r0 233 224 cmp r2, #DMA_TO_DEVICE 234 225 beq fa_dma_clean_range 235 226 bcs fa_dma_inv_range 236 227 b fa_dma_flush_range 237 - ENDPROC(fa_dma_map_area) 228 + SYM_FUNC_END(fa_dma_map_area) 238 229 239 230 /* 240 231 * dma_unmap_area(start, size, dir) ··· 242 233 * - size - size of region 243 234 * - dir - DMA direction 244 235 */ 245 - ENTRY(fa_dma_unmap_area) 236 + SYM_TYPED_FUNC_START(fa_dma_unmap_area) 246 237 ret lr 247 - ENDPROC(fa_dma_unmap_area) 238 + SYM_FUNC_END(fa_dma_unmap_area) 248 239 249 240 .globl fa_flush_kern_cache_louis 250 241 .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
+30 -21
arch/arm/mm/cache-nop.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 #include <linux/linkage.h> 3 3 #include <linux/init.h> 4 + #include <linux/cfi_types.h> 4 5 #include <asm/assembler.h> 5 6 6 7 #include "proc-macros.S" 7 8 8 - ENTRY(nop_flush_icache_all) 9 + SYM_TYPED_FUNC_START(nop_flush_icache_all) 9 10 ret lr 10 - ENDPROC(nop_flush_icache_all) 11 + SYM_FUNC_END(nop_flush_icache_all) 11 12 12 - .globl nop_flush_kern_cache_all 13 - .equ nop_flush_kern_cache_all, nop_flush_icache_all 13 + SYM_TYPED_FUNC_START(nop_flush_kern_cache_all) 14 + ret lr 15 + SYM_FUNC_END(nop_flush_kern_cache_all) 14 16 15 17 .globl nop_flush_kern_cache_louis 16 18 .equ nop_flush_kern_cache_louis, nop_flush_icache_all 17 19 18 - .globl nop_flush_user_cache_all 19 - .equ nop_flush_user_cache_all, nop_flush_icache_all 20 + SYM_TYPED_FUNC_START(nop_flush_user_cache_all) 21 + ret lr 22 + SYM_FUNC_END(nop_flush_user_cache_all) 20 23 21 - .globl nop_flush_user_cache_range 22 - .equ nop_flush_user_cache_range, nop_flush_icache_all 24 + SYM_TYPED_FUNC_START(nop_flush_user_cache_range) 25 + ret lr 26 + SYM_FUNC_END(nop_flush_user_cache_range) 23 27 24 - .globl nop_coherent_kern_range 25 - .equ nop_coherent_kern_range, nop_flush_icache_all 28 + SYM_TYPED_FUNC_START(nop_coherent_kern_range) 29 + ret lr 30 + SYM_FUNC_END(nop_coherent_kern_range) 26 31 27 - ENTRY(nop_coherent_user_range) 32 + SYM_TYPED_FUNC_START(nop_coherent_user_range) 28 33 mov r0, 0 29 34 ret lr 30 - ENDPROC(nop_coherent_user_range) 35 + SYM_FUNC_END(nop_coherent_user_range) 31 36 32 - .globl nop_flush_kern_dcache_area 33 - .equ nop_flush_kern_dcache_area, nop_flush_icache_all 37 + SYM_TYPED_FUNC_START(nop_flush_kern_dcache_area) 38 + ret lr 39 + SYM_FUNC_END(nop_flush_kern_dcache_area) 34 40 35 - .globl nop_dma_flush_range 36 - .equ nop_dma_flush_range, nop_flush_icache_all 41 + SYM_TYPED_FUNC_START(nop_dma_flush_range) 42 + ret lr 43 + SYM_FUNC_END(nop_dma_flush_range) 37 44 38 - .globl nop_dma_map_area 39 - .equ nop_dma_map_area, nop_flush_icache_all 40 - 41 - .globl nop_dma_unmap_area 42 - .equ nop_dma_unmap_area, nop_flush_icache_all 45 + SYM_TYPED_FUNC_START(nop_dma_map_area) 46 + ret lr 47 + SYM_FUNC_END(nop_dma_map_area) 43 48 44 49 __INITDATA 45 50 46 51 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 47 52 define_cache_functions nop 53 + 54 + SYM_TYPED_FUNC_START(nop_dma_unmap_area) 55 + ret lr 56 + SYM_FUNC_END(nop_dma_unmap_area)
+28 -19
arch/arm/mm/cache-v4.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <asm/assembler.h> 10 11 #include <asm/page.h> 11 12 #include "proc-macros.S" ··· 16 15 * 17 16 * Unconditionally clean and invalidate the entire icache. 18 17 */ 19 - ENTRY(v4_flush_icache_all) 18 + SYM_TYPED_FUNC_START(v4_flush_icache_all) 20 19 ret lr 21 - ENDPROC(v4_flush_icache_all) 20 + SYM_FUNC_END(v4_flush_icache_all) 22 21 23 22 /* 24 23 * flush_user_cache_all() ··· 28 27 * 29 28 * - mm - mm_struct describing address space 30 29 */ 31 - ENTRY(v4_flush_user_cache_all) 32 - /* FALLTHROUGH */ 30 + SYM_TYPED_FUNC_START(v4_flush_user_cache_all) 31 + b v4_flush_kern_cache_all 32 + SYM_FUNC_END(v4_flush_user_cache_all) 33 + 33 34 /* 34 35 * flush_kern_cache_all() 35 36 * 36 37 * Clean and invalidate the entire cache. 37 38 */ 38 - ENTRY(v4_flush_kern_cache_all) 39 + SYM_TYPED_FUNC_START(v4_flush_kern_cache_all) 39 40 #ifdef CONFIG_CPU_CP15 40 41 mov r0, #0 41 42 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 42 43 ret lr 43 44 #else 44 - /* FALLTHROUGH */ 45 + ret lr 45 46 #endif 47 + SYM_FUNC_END(v4_flush_kern_cache_all) 46 48 47 49 /* 48 50 * flush_user_cache_range(start, end, flags) ··· 57 53 * - end - end address (exclusive, may not be aligned) 58 54 * - flags - vma_area_struct flags describing address space 59 55 */ 60 - ENTRY(v4_flush_user_cache_range) 56 + SYM_TYPED_FUNC_START(v4_flush_user_cache_range) 61 57 #ifdef CONFIG_CPU_CP15 62 58 mov ip, #0 63 59 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 64 60 ret lr 65 61 #else 66 - /* FALLTHROUGH */ 62 + ret lr 67 63 #endif 64 + SYM_FUNC_END(v4_flush_user_cache_range) 68 65 69 66 /* 70 67 * coherent_kern_range(start, end) ··· 77 72 * - start - virtual start address 78 73 * - end - virtual end address 79 74 */ 80 - ENTRY(v4_coherent_kern_range) 81 - /* FALLTHROUGH */ 75 + SYM_TYPED_FUNC_START(v4_coherent_kern_range) 76 + ret lr 77 + SYM_FUNC_END(v4_coherent_kern_range) 82 78 83 79 /* 84 80 * coherent_user_range(start, end) ··· 91 85 * - start - virtual start address 92 86 * - end - virtual end address 93 87 */ 94 - ENTRY(v4_coherent_user_range) 88 + SYM_TYPED_FUNC_START(v4_coherent_user_range) 95 89 mov r0, #0 96 90 ret lr 91 + SYM_FUNC_END(v4_coherent_user_range) 97 92 98 93 /* 99 94 * flush_kern_dcache_area(void *addr, size_t size) ··· 105 98 * - addr - kernel address 106 99 * - size - region size 107 100 */ 108 - ENTRY(v4_flush_kern_dcache_area) 109 - /* FALLTHROUGH */ 101 + SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area) 102 + b v4_dma_flush_range 103 + SYM_FUNC_END(v4_flush_kern_dcache_area) 110 104 111 105 /* 112 106 * dma_flush_range(start, end) ··· 117 109 * - start - virtual start address 118 110 * - end - virtual end address 119 111 */ 120 - ENTRY(v4_dma_flush_range) 112 + SYM_TYPED_FUNC_START(v4_dma_flush_range) 121 113 #ifdef CONFIG_CPU_CP15 122 114 mov r0, #0 123 115 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 124 116 #endif 125 117 ret lr 118 + SYM_FUNC_END(v4_dma_flush_range) 126 119 127 120 /* 128 121 * dma_unmap_area(start, size, dir) ··· 131 122 * - size - size of region 132 123 * - dir - DMA direction 133 124 */ 134 - ENTRY(v4_dma_unmap_area) 125 + SYM_TYPED_FUNC_START(v4_dma_unmap_area) 135 126 teq r2, #DMA_TO_DEVICE 136 127 bne v4_dma_flush_range 137 - /* FALLTHROUGH */ 128 + ret lr 129 + SYM_FUNC_END(v4_dma_unmap_area) 138 130 139 131 /* 140 132 * dma_map_area(start, size, dir) ··· 143 133 * - size - size of region 144 134 * - dir - DMA direction 145 135 */ 146 - ENTRY(v4_dma_map_area) 136 + SYM_TYPED_FUNC_START(v4_dma_map_area) 147 137 ret lr 148 - ENDPROC(v4_dma_unmap_area) 149 - ENDPROC(v4_dma_map_area) 138 + SYM_FUNC_END(v4_dma_map_area) 150 139 151 140 .globl v4_flush_kern_cache_louis 152 141 .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
+23 -16
arch/arm/mm/cache-v4wb.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <asm/assembler.h> 10 11 #include <asm/page.h> 11 12 #include "proc-macros.S" ··· 54 53 * 55 54 * Unconditionally clean and invalidate the entire icache. 56 55 */ 57 - ENTRY(v4wb_flush_icache_all) 56 + SYM_TYPED_FUNC_START(v4wb_flush_icache_all) 58 57 mov r0, #0 59 58 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 60 59 ret lr 61 - ENDPROC(v4wb_flush_icache_all) 60 + SYM_FUNC_END(v4wb_flush_icache_all) 62 61 63 62 /* 64 63 * flush_user_cache_all() ··· 66 65 * Clean and invalidate all cache entries in a particular address 67 66 * space. 68 67 */ 69 - ENTRY(v4wb_flush_user_cache_all) 70 - /* FALLTHROUGH */ 68 + SYM_TYPED_FUNC_START(v4wb_flush_user_cache_all) 69 + b v4wb_flush_kern_cache_all 70 + SYM_FUNC_END(v4wb_flush_user_cache_all) 71 + 71 72 /* 72 73 * flush_kern_cache_all() 73 74 * 74 75 * Clean and invalidate the entire cache. 75 76 */ 76 - ENTRY(v4wb_flush_kern_cache_all) 77 + SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all) 77 78 mov ip, #0 78 79 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 79 80 __flush_whole_cache: ··· 96 93 #endif 97 94 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 98 95 ret lr 96 + SYM_FUNC_END(v4wb_flush_kern_cache_all) 99 97 100 98 /* 101 99 * flush_user_cache_range(start, end, flags) ··· 108 104 * - end - end address (exclusive, page aligned) 109 105 * - flags - vma_area_struct flags describing address space 110 106 */ 111 - ENTRY(v4wb_flush_user_cache_range) 107 + SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range) 112 108 mov ip, #0 113 109 sub r3, r1, r0 @ calculate total size 114 110 tst r2, #VM_EXEC @ executable region? ··· 125 121 tst r2, #VM_EXEC 126 122 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 127 123 ret lr 124 + SYM_FUNC_END(v4wb_flush_user_cache_range) 128 125 129 126 /* 130 127 * flush_kern_dcache_area(void *addr, size_t size) ··· 136 131 * - addr - kernel address 137 132 * - size - region size 138 133 */ 139 - ENTRY(v4wb_flush_kern_dcache_area) 134 + SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area) 140 135 add r1, r0, r1 141 - /* fall through */ 136 + b v4wb_coherent_user_range 137 + SYM_FUNC_END(v4wb_flush_kern_dcache_area) 142 138 143 139 /* 144 140 * coherent_kern_range(start, end) ··· 151 145 * - start - virtual start address 152 146 * - end - virtual end address 153 147 */ 154 - ENTRY(v4wb_coherent_kern_range) 155 - /* fall through */ 148 + SYM_TYPED_FUNC_START(v4wb_coherent_kern_range) 149 + b v4wb_coherent_user_range 150 + SYM_FUNC_END(v4wb_coherent_kern_range) 156 151 157 152 /* 158 153 * coherent_user_range(start, end) ··· 165 158 * - start - virtual start address 166 159 * - end - virtual end address 167 160 */ 168 - ENTRY(v4wb_coherent_user_range) 161 + SYM_TYPED_FUNC_START(v4wb_coherent_user_range) 169 162 bic r0, r0, #CACHE_DLINESIZE - 1 170 163 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 171 164 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry ··· 176 169 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 177 170 mcr p15, 0, r0, c7, c10, 4 @ drain WB 178 171 ret lr 179 - 172 + SYM_FUNC_END(v4wb_coherent_user_range) 180 173 181 174 /* 182 175 * dma_inv_range(start, end) ··· 238 231 * - size - size of region 239 232 * - dir - DMA direction 240 233 */ 241 - ENTRY(v4wb_dma_map_area) 234 + SYM_TYPED_FUNC_START(v4wb_dma_map_area) 242 235 add r1, r1, r0 243 236 cmp r2, #DMA_TO_DEVICE 244 237 beq v4wb_dma_clean_range 245 238 bcs v4wb_dma_inv_range 246 239 b v4wb_dma_flush_range 247 - ENDPROC(v4wb_dma_map_area) 240 + SYM_FUNC_END(v4wb_dma_map_area) 248 241 249 242 /* 250 243 * dma_unmap_area(start, size, dir) ··· 252 245 * - size - size of region 253 246 * - dir - DMA direction 254 247 */ 255 - ENTRY(v4wb_dma_unmap_area) 248 + SYM_TYPED_FUNC_START(v4wb_dma_unmap_area) 256 249 ret lr 257 - ENDPROC(v4wb_dma_unmap_area) 250 + SYM_FUNC_END(v4wb_dma_unmap_area) 258 251 259 252 .globl v4wb_flush_kern_cache_louis 260 253 .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
+28 -19
arch/arm/mm/cache-v4wt.S
··· 10 10 */ 11 11 #include <linux/linkage.h> 12 12 #include <linux/init.h> 13 + #include <linux/cfi_types.h> 13 14 #include <asm/assembler.h> 14 15 #include <asm/page.h> 15 16 #include "proc-macros.S" ··· 44 43 * 45 44 * Unconditionally clean and invalidate the entire icache. 46 45 */ 47 - ENTRY(v4wt_flush_icache_all) 46 + SYM_TYPED_FUNC_START(v4wt_flush_icache_all) 48 47 mov r0, #0 49 48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 50 49 ret lr 51 - ENDPROC(v4wt_flush_icache_all) 50 + SYM_FUNC_END(v4wt_flush_icache_all) 52 51 53 52 /* 54 53 * flush_user_cache_all() ··· 56 55 * Invalidate all cache entries in a particular address 57 56 * space. 58 57 */ 59 - ENTRY(v4wt_flush_user_cache_all) 60 - /* FALLTHROUGH */ 58 + SYM_TYPED_FUNC_START(v4wt_flush_user_cache_all) 59 + b v4wt_flush_kern_cache_all 60 + SYM_FUNC_END(v4wt_flush_user_cache_all) 61 + 61 62 /* 62 63 * flush_kern_cache_all() 63 64 * 64 65 * Clean and invalidate the entire cache. 65 66 */ 66 - ENTRY(v4wt_flush_kern_cache_all) 67 + SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all) 67 68 mov r2, #VM_EXEC 68 69 mov ip, #0 69 70 __flush_whole_cache: ··· 73 70 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 74 71 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 75 72 ret lr 73 + SYM_FUNC_END(v4wt_flush_kern_cache_all) 76 74 77 75 /* 78 76 * flush_user_cache_range(start, end, flags) ··· 85 81 * - end - end address (exclusive, page aligned) 86 82 * - flags - vma_area_struct flags describing address space 87 83 */ 88 - ENTRY(v4wt_flush_user_cache_range) 84 + SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range) 89 85 sub r3, r1, r0 @ calculate total size 90 86 cmp r3, #CACHE_DLIMIT 91 87 bhs __flush_whole_cache ··· 97 93 cmp r0, r1 98 94 blo 1b 99 95 ret lr 96 + SYM_FUNC_END(v4wt_flush_user_cache_range) 100 97 101 98 /* 102 99 * coherent_kern_range(start, end) ··· 109 104 * - start - virtual start address 110 105 * - end - virtual end address 111 106 */ 112 - ENTRY(v4wt_coherent_kern_range) 113 - /* FALLTRHOUGH */ 107 + SYM_TYPED_FUNC_START(v4wt_coherent_kern_range) 108 + b v4wt_coherent_user_range 109 + SYM_FUNC_END(v4wt_coherent_kern_range) 114 110 115 111 /* 116 112 * coherent_user_range(start, end) ··· 123 117 * - start - virtual start address 124 118 * - end - virtual end address 125 119 */ 126 - ENTRY(v4wt_coherent_user_range) 120 + SYM_TYPED_FUNC_START(v4wt_coherent_user_range) 127 121 bic r0, r0, #CACHE_DLINESIZE - 1 128 122 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 129 123 add r0, r0, #CACHE_DLINESIZE ··· 131 125 blo 1b 132 126 mov r0, #0 133 127 ret lr 128 + SYM_FUNC_END(v4wt_coherent_user_range) 134 129 135 130 /* 136 131 * flush_kern_dcache_area(void *addr, size_t size) ··· 142 135 * - addr - kernel address 143 136 * - size - region size 144 137 */ 145 - ENTRY(v4wt_flush_kern_dcache_area) 138 + SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area) 146 139 mov r2, #0 147 140 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache 148 141 add r1, r0, r1 149 - /* fallthrough */ 142 + b v4wt_dma_inv_range 143 + SYM_FUNC_END(v4wt_flush_kern_dcache_area) 150 144 151 145 /* 152 146 * dma_inv_range(start, end) ··· 175 167 * 176 168 * - start - virtual start address 177 169 * - end - virtual end address 178 - */ 179 - .globl v4wt_dma_flush_range 180 - .equ v4wt_dma_flush_range, v4wt_dma_inv_range 170 + */ 171 + SYM_TYPED_FUNC_START(v4wt_dma_flush_range) 172 + b v4wt_dma_inv_range 173 + SYM_FUNC_END(v4wt_dma_flush_range) 181 174 182 175 /* 183 176 * dma_unmap_area(start, size, dir) ··· 186 177 * - size - size of region 187 178 * - dir - DMA direction 188 179 */ 189 - ENTRY(v4wt_dma_unmap_area) 180 + SYM_TYPED_FUNC_START(v4wt_dma_unmap_area) 190 181 add r1, r1, r0 191 182 teq r2, #DMA_TO_DEVICE 192 183 bne v4wt_dma_inv_range 193 - /* FALLTHROUGH */ 184 + ret lr 185 + SYM_FUNC_END(v4wt_dma_unmap_area) 194 186 195 187 /* 196 188 * dma_map_area(start, size, dir) ··· 199 189 * - size - size of region 200 190 * - dir - DMA direction 201 191 */ 202 - ENTRY(v4wt_dma_map_area) 192 + SYM_TYPED_FUNC_START(v4wt_dma_map_area) 203 193 ret lr 204 - ENDPROC(v4wt_dma_unmap_area) 205 - ENDPROC(v4wt_dma_map_area) 194 + SYM_FUNC_END(v4wt_dma_map_area) 206 195 207 196 .globl v4wt_flush_kern_cache_louis 208 197 .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
+23 -18
arch/arm/mm/cache-v6.S
··· 8 8 */ 9 9 #include <linux/linkage.h> 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <asm/assembler.h> 12 13 #include <asm/errno.h> 13 14 #include <asm/unwind.h> ··· 35 34 * r0 - set to 0 36 35 * r1 - corrupted 37 36 */ 38 - ENTRY(v6_flush_icache_all) 37 + SYM_TYPED_FUNC_START(v6_flush_icache_all) 39 38 mov r0, #0 40 39 #ifdef CONFIG_ARM_ERRATA_411920 41 40 mrs r1, cpsr ··· 52 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 53 52 #endif 54 53 ret lr 55 - ENDPROC(v6_flush_icache_all) 54 + SYM_FUNC_END(v6_flush_icache_all) 56 55 57 56 /* 58 57 * v6_flush_cache_all() ··· 61 60 * 62 61 * It is assumed that: 63 62 */ 64 - ENTRY(v6_flush_kern_cache_all) 63 + SYM_TYPED_FUNC_START(v6_flush_kern_cache_all) 65 64 mov r0, #0 66 65 #ifdef HARVARD_CACHE 67 66 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate ··· 74 73 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 75 74 #endif 76 75 ret lr 76 + SYM_FUNC_END(v6_flush_kern_cache_all) 77 77 78 78 /* 79 79 * v6_flush_cache_all() ··· 83 81 * 84 82 * - mm - mm_struct describing address space 85 83 */ 86 - ENTRY(v6_flush_user_cache_all) 87 - /*FALLTHROUGH*/ 84 + SYM_TYPED_FUNC_START(v6_flush_user_cache_all) 85 + ret lr 86 + SYM_FUNC_END(v6_flush_user_cache_all) 88 87 89 88 /* 90 89 * v6_flush_cache_range(start, end, flags) ··· 99 96 * It is assumed that: 100 97 * - we have a VIPT cache. 101 98 */ 102 - ENTRY(v6_flush_user_cache_range) 99 + SYM_TYPED_FUNC_START(v6_flush_user_cache_range) 103 100 ret lr 101 + SYM_FUNC_END(v6_flush_user_cache_range) 104 102 105 103 /* 106 104 * v6_coherent_kern_range(start,end) ··· 116 112 * It is assumed that: 117 113 * - the Icache does not read data from the write buffer 118 114 */ 119 - ENTRY(v6_coherent_kern_range) 120 - /* FALLTHROUGH */ 115 + SYM_TYPED_FUNC_START(v6_coherent_kern_range) 116 + b v6_coherent_user_range 117 + SYM_FUNC_END(v6_coherent_kern_range) 121 118 122 119 /* 123 120 * v6_coherent_user_range(start,end) ··· 133 128 * It is assumed that: 134 129 * - the Icache does not read data from the write buffer 135 130 */ 136 - ENTRY(v6_coherent_user_range) 131 + SYM_TYPED_FUNC_START(v6_coherent_user_range) 137 132 UNWIND(.fnstart ) 138 133 #ifdef HARVARD_CACHE 139 134 bic r0, r0, #CACHE_LINE_SIZE - 1 ··· 164 159 mov r0, #-EFAULT 165 160 ret lr 166 161 UNWIND(.fnend ) 167 - ENDPROC(v6_coherent_user_range) 168 - ENDPROC(v6_coherent_kern_range) 162 + SYM_FUNC_END(v6_coherent_user_range) 169 163 170 164 /* 171 165 * v6_flush_kern_dcache_area(void *addr, size_t size) ··· 175 171 * - addr - kernel address 176 172 * - size - region size 177 173 */ 178 - ENTRY(v6_flush_kern_dcache_area) 174 + SYM_TYPED_FUNC_START(v6_flush_kern_dcache_area) 179 175 add r1, r0, r1 180 176 bic r0, r0, #D_CACHE_LINE_SIZE - 1 181 177 1: ··· 192 188 mcr p15, 0, r0, c7, c10, 4 193 189 #endif 194 190 ret lr 195 - 191 + SYM_FUNC_END(v6_flush_kern_dcache_area) 196 192 197 193 /* 198 194 * v6_dma_inv_range(start,end) ··· 257 253 * - start - virtual start address of region 258 254 * - end - virtual end address of region 259 255 */ 260 - ENTRY(v6_dma_flush_range) 256 + SYM_TYPED_FUNC_START(v6_dma_flush_range) 261 257 bic r0, r0, #D_CACHE_LINE_SIZE - 1 262 258 1: 263 259 #ifdef HARVARD_CACHE ··· 271 267 mov r0, #0 272 268 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 273 269 ret lr 270 + SYM_FUNC_END(v6_dma_flush_range) 274 271 275 272 /* 276 273 * dma_map_area(start, size, dir) ··· 279 274 * - size - size of region 280 275 * - dir - DMA direction 281 276 */ 282 - ENTRY(v6_dma_map_area) 277 + SYM_TYPED_FUNC_START(v6_dma_map_area) 283 278 add r1, r1, r0 284 279 teq r2, #DMA_FROM_DEVICE 285 280 beq v6_dma_inv_range 286 281 b v6_dma_clean_range 287 - ENDPROC(v6_dma_map_area) 282 + SYM_FUNC_END(v6_dma_map_area) 288 283 289 284 /* 290 285 * dma_unmap_area(start, size, dir) ··· 292 287 * - size - size of region 293 288 * - dir - DMA direction 294 289 */ 295 - ENTRY(v6_dma_unmap_area) 290 + SYM_TYPED_FUNC_START(v6_dma_unmap_area) 296 291 add r1, r1, r0 297 292 teq r2, #DMA_TO_DEVICE 298 293 bne v6_dma_inv_range 299 294 ret lr 300 - ENDPROC(v6_dma_unmap_area) 295 + SYM_FUNC_END(v6_dma_unmap_area) 301 296 302 297 .globl v6_flush_kern_cache_louis 303 298 .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
+25 -24
arch/arm/mm/cache-v7.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <linux/init.h> 12 + #include <linux/cfi_types.h> 12 13 #include <asm/assembler.h> 13 14 #include <asm/errno.h> 14 15 #include <asm/unwind.h> ··· 81 80 * Registers: 82 81 * r0 - set to 0 83 82 */ 84 - ENTRY(v7_flush_icache_all) 83 + SYM_TYPED_FUNC_START(v7_flush_icache_all) 85 84 mov r0, #0 86 85 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 87 86 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 88 87 ret lr 89 - ENDPROC(v7_flush_icache_all) 88 + SYM_FUNC_END(v7_flush_icache_all) 90 89 91 90 /* 92 91 * v7_flush_dcache_louis() ··· 194 193 * unification in a single instruction. 195 194 * 196 195 */ 197 - ENTRY(v7_flush_kern_cache_all) 196 + SYM_TYPED_FUNC_START(v7_flush_kern_cache_all) 198 197 stmfd sp!, {r4-r6, r9-r10, lr} 199 198 bl v7_flush_dcache_all 200 199 mov r0, #0 ··· 202 201 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 203 202 ldmfd sp!, {r4-r6, r9-r10, lr} 204 203 ret lr 205 - ENDPROC(v7_flush_kern_cache_all) 204 + SYM_FUNC_END(v7_flush_kern_cache_all) 206 205 207 206 /* 208 207 * v7_flush_kern_cache_louis(void) ··· 210 209 * Flush the data cache up to Level of Unification Inner Shareable. 211 210 * Invalidate the I-cache to the point of unification. 212 211 */ 213 - ENTRY(v7_flush_kern_cache_louis) 212 + SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis) 214 213 stmfd sp!, {r4-r6, r9-r10, lr} 215 214 bl v7_flush_dcache_louis 216 215 mov r0, #0 ··· 218 217 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 219 218 ldmfd sp!, {r4-r6, r9-r10, lr} 220 219 ret lr 221 - ENDPROC(v7_flush_kern_cache_louis) 220 + SYM_FUNC_END(v7_flush_kern_cache_louis) 222 221 223 222 /* 224 223 * v7_flush_cache_all() ··· 227 226 * 228 227 * - mm - mm_struct describing address space 229 228 */ 230 - ENTRY(v7_flush_user_cache_all) 231 - /*FALLTHROUGH*/ 229 + SYM_TYPED_FUNC_START(v7_flush_user_cache_all) 230 + ret lr 231 + SYM_FUNC_END(v7_flush_user_cache_all) 232 232 233 233 /* 234 234 * v7_flush_cache_range(start, end, flags) ··· 243 241 * It is assumed that: 244 242 * - we have a VIPT cache. 245 243 */ 246 - ENTRY(v7_flush_user_cache_range) 244 + SYM_TYPED_FUNC_START(v7_flush_user_cache_range) 247 245 ret lr 248 - ENDPROC(v7_flush_user_cache_all) 249 - ENDPROC(v7_flush_user_cache_range) 246 + SYM_FUNC_END(v7_flush_user_cache_range) 250 247 251 248 /* 252 249 * v7_coherent_kern_range(start,end) ··· 260 259 * It is assumed that: 261 260 * - the Icache does not read data from the write buffer 262 261 */ 263 - ENTRY(v7_coherent_kern_range) 264 - /* FALLTHROUGH */ 262 + SYM_TYPED_FUNC_START(v7_coherent_kern_range) 263 + b v7_coherent_user_range 264 + SYM_FUNC_END(v7_coherent_kern_range) 265 265 266 266 /* 267 267 * v7_coherent_user_range(start,end) ··· 277 275 * It is assumed that: 278 276 * - the Icache does not read data from the write buffer 279 277 */ 280 - ENTRY(v7_coherent_user_range) 278 + SYM_TYPED_FUNC_START(v7_coherent_user_range) 281 279 UNWIND(.fnstart ) 282 280 dcache_line_size r2, r3 283 281 sub r3, r2, #1 ··· 323 321 mov r0, #-EFAULT 324 322 ret lr 325 323 UNWIND(.fnend ) 326 - ENDPROC(v7_coherent_kern_range) 327 - ENDPROC(v7_coherent_user_range) 324 + SYM_FUNC_END(v7_coherent_user_range) 328 325 329 326 /* 330 327 * v7_flush_kern_dcache_area(void *addr, size_t size) ··· 334 333 * - addr - kernel address 335 334 * - size - region size 336 335 */ 337 - ENTRY(v7_flush_kern_dcache_area) 336 + SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area) 338 337 dcache_line_size r2, r3 339 338 add r1, r0, r1 340 339 sub r3, r2, #1 ··· 350 349 blo 1b 351 350 dsb st 352 351 ret lr 353 - ENDPROC(v7_flush_kern_dcache_area) 352 + SYM_FUNC_END(v7_flush_kern_dcache_area) 354 353 355 354 /* 356 355 * v7_dma_inv_range(start,end) ··· 414 413 * - start - virtual start address of region 415 414 * - end - virtual end address of region 416 415 */ 417 - ENTRY(v7_dma_flush_range) 416 + SYM_TYPED_FUNC_START(v7_dma_flush_range) 418 417 dcache_line_size r2, r3 419 418 sub r3, r2, #1 420 419 bic r0, r0, r3 ··· 429 428 blo 1b 430 429 dsb st 431 430 ret lr 432 - ENDPROC(v7_dma_flush_range) 431 + SYM_FUNC_END(v7_dma_flush_range) 433 432 434 433 /* 435 434 * dma_map_area(start, size, dir) ··· 437 436 * - size - size of region 438 437 * - dir - DMA direction 439 438 */ 440 - ENTRY(v7_dma_map_area) 439 + SYM_TYPED_FUNC_START(v7_dma_map_area) 441 440 add r1, r1, r0 442 441 teq r2, #DMA_FROM_DEVICE 443 442 beq v7_dma_inv_range 444 443 b v7_dma_clean_range 445 - ENDPROC(v7_dma_map_area) 444 + SYM_FUNC_END(v7_dma_map_area) 446 445 447 446 /* 448 447 * dma_unmap_area(start, size, dir) ··· 450 449 * - size - size of region 451 450 * - dir - DMA direction 452 451 */ 453 - ENTRY(v7_dma_unmap_area) 452 + SYM_TYPED_FUNC_START(v7_dma_unmap_area) 454 453 add r1, r1, r0 455 454 teq r2, #DMA_TO_DEVICE 456 455 bne v7_dma_inv_range 457 456 ret lr 458 - ENDPROC(v7_dma_unmap_area) 457 + SYM_FUNC_END(v7_dma_unmap_area) 459 458 460 459 __INITDATA 461 460
+23 -22
arch/arm/mm/cache-v7m.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <asm/assembler.h> 15 16 #include <asm/errno.h> 16 17 #include <asm/unwind.h> ··· 160 159 * Registers: 161 160 * r0 - set to 0 162 161 */ 163 - ENTRY(v7m_flush_icache_all) 162 + SYM_TYPED_FUNC_START(v7m_flush_icache_all) 164 163 invalidate_icache r0 165 164 ret lr 166 - ENDPROC(v7m_flush_icache_all) 165 + SYM_FUNC_END(v7m_flush_icache_all) 167 166 168 167 /* 169 168 * v7m_flush_dcache_all() ··· 237 236 * unification in a single instruction. 238 237 * 239 238 */ 240 - ENTRY(v7m_flush_kern_cache_all) 239 + SYM_TYPED_FUNC_START(v7m_flush_kern_cache_all) 241 240 stmfd sp!, {r4-r7, r9-r11, lr} 242 241 bl v7m_flush_dcache_all 243 242 invalidate_icache r0 244 243 ldmfd sp!, {r4-r7, r9-r11, lr} 245 244 ret lr 246 - ENDPROC(v7m_flush_kern_cache_all) 245 + SYM_FUNC_END(v7m_flush_kern_cache_all) 247 246 248 247 /* 249 248 * v7m_flush_cache_all() ··· 252 251 * 253 252 * - mm - mm_struct describing address space 254 253 */ 255 - ENTRY(v7m_flush_user_cache_all) 256 - /*FALLTHROUGH*/ 254 + SYM_TYPED_FUNC_START(v7m_flush_user_cache_all) 255 + ret lr 256 + SYM_FUNC_END(v7m_flush_user_cache_all) 257 257 258 258 /* 259 259 * v7m_flush_cache_range(start, end, flags) ··· 268 266 * It is assumed that: 269 267 * - we have a VIPT cache. 270 268 */ 271 - ENTRY(v7m_flush_user_cache_range) 269 + SYM_TYPED_FUNC_START(v7m_flush_user_cache_range) 272 270 ret lr 273 - ENDPROC(v7m_flush_user_cache_all) 274 - ENDPROC(v7m_flush_user_cache_range) 271 + SYM_FUNC_END(v7m_flush_user_cache_range) 275 272 276 273 /* 277 274 * v7m_coherent_kern_range(start,end) ··· 285 284 * It is assumed that: 286 285 * - the Icache does not read data from the write buffer 287 286 */ 288 - ENTRY(v7m_coherent_kern_range) 289 - /* FALLTHROUGH */ 287 + SYM_TYPED_FUNC_START(v7m_coherent_kern_range) 288 + b v7m_coherent_user_range 289 + SYM_FUNC_END(v7m_coherent_kern_range) 290 290 291 291 /* 292 292 * v7m_coherent_user_range(start,end) ··· 302 300 * It is assumed that: 303 301 * - the Icache does not read data from the write buffer 304 302 */ 305 - ENTRY(v7m_coherent_user_range) 303 + SYM_TYPED_FUNC_START(v7m_coherent_user_range) 306 304 UNWIND(.fnstart ) 307 305 dcache_line_size r2, r3 308 306 sub r3, r2, #1 ··· 330 328 isb 331 329 ret lr 332 330 UNWIND(.fnend ) 333 - ENDPROC(v7m_coherent_kern_range) 334 - ENDPROC(v7m_coherent_user_range) 331 + SYM_FUNC_END(v7m_coherent_user_range) 335 332 336 333 /* 337 334 * v7m_flush_kern_dcache_area(void *addr, size_t size) ··· 341 340 * - addr - kernel address 342 341 * - size - region size 343 342 */ 344 - ENTRY(v7m_flush_kern_dcache_area) 343 + SYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area) 345 344 dcache_line_size r2, r3 346 345 add r1, r0, r1 347 346 sub r3, r2, #1 ··· 353 352 blo 1b 354 353 dsb st 355 354 ret lr 356 - ENDPROC(v7m_flush_kern_dcache_area) 355 + SYM_FUNC_END(v7m_flush_kern_dcache_area) 357 356 358 357 /* 359 358 * v7m_dma_inv_range(start,end) ··· 409 408 * - start - virtual start address of region 410 409 * - end - virtual end address of region 411 410 */ 412 - ENTRY(v7m_dma_flush_range) 411 + SYM_TYPED_FUNC_START(v7m_dma_flush_range) 413 412 dcache_line_size r2, r3 414 413 sub r3, r2, #1 415 414 bic r0, r0, r3 ··· 420 419 blo 1b 421 420 dsb st 422 421 ret lr 423 - ENDPROC(v7m_dma_flush_range) 422 + SYM_FUNC_END(v7m_dma_flush_range) 424 423 425 424 /* 426 425 * dma_map_area(start, size, dir) ··· 428 427 * - size - size of region 429 428 * - dir - DMA direction 430 429 */ 431 - ENTRY(v7m_dma_map_area) 430 + SYM_TYPED_FUNC_START(v7m_dma_map_area) 432 431 add r1, r1, r0 433 432 teq r2, #DMA_FROM_DEVICE 434 433 beq v7m_dma_inv_range 435 434 b v7m_dma_clean_range 436 - ENDPROC(v7m_dma_map_area) 435 + SYM_FUNC_END(v7m_dma_map_area) 437 436 438 437 /* 439 438 * dma_unmap_area(start, size, dir) ··· 441 440 * - size - size of region 442 441 * - dir - DMA direction 443 442 */ 444 - ENTRY(v7m_dma_unmap_area) 443 + SYM_TYPED_FUNC_START(v7m_dma_unmap_area) 445 444 add r1, r1, r0 446 445 teq r2, #DMA_TO_DEVICE 447 446 bne v7m_dma_inv_range 448 447 ret lr 449 - ENDPROC(v7m_dma_unmap_area) 448 + SYM_FUNC_END(v7m_dma_unmap_area) 450 449 451 450 .globl v7m_flush_kern_cache_louis 452 451 .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
+24 -15
arch/arm/mm/proc-arm1020.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 113 112 * 114 113 * Unconditionally clean and invalidate the entire icache. 115 114 */ 116 - ENTRY(arm1020_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1020_flush_icache_all) 117 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 118 117 mov r0, #0 119 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 120 119 #endif 121 120 ret lr 122 - ENDPROC(arm1020_flush_icache_all) 121 + SYM_FUNC_END(arm1020_flush_icache_all) 123 122 124 123 /* 125 124 * flush_user_cache_all() ··· 127 126 * Invalidate all cache entries in a particular address 128 127 * space. 129 128 */ 130 - ENTRY(arm1020_flush_user_cache_all) 131 - /* FALLTHROUGH */ 129 + SYM_TYPED_FUNC_START(arm1020_flush_user_cache_all) 130 + b arm1020_flush_kern_cache_all 131 + SYM_FUNC_END(arm1020_flush_user_cache_all) 132 + 132 133 /* 133 134 * flush_kern_cache_all() 134 135 * 135 136 * Clean and invalidate the entire cache. 136 137 */ 137 - ENTRY(arm1020_flush_kern_cache_all) 138 + SYM_TYPED_FUNC_START(arm1020_flush_kern_cache_all) 138 139 mov r2, #VM_EXEC 139 140 mov ip, #0 140 141 __flush_whole_cache: ··· 157 154 #endif 158 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 159 156 ret lr 157 + SYM_FUNC_END(arm1020_flush_kern_cache_all) 160 158 161 159 /* 162 160 * flush_user_cache_range(start, end, flags) ··· 169 165 * - end - end address (exclusive) 170 166 * - flags - vm_flags for this space 171 167 */ 172 - ENTRY(arm1020_flush_user_cache_range) 168 + SYM_TYPED_FUNC_START(arm1020_flush_user_cache_range) 173 169 mov ip, #0 174 170 sub r3, r1, r0 @ calculate total size 175 171 cmp r3, #CACHE_DLIMIT ··· 189 185 #endif 190 186 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 191 187 ret lr 188 + SYM_FUNC_END(arm1020_flush_user_cache_range) 192 189 193 190 /* 194 191 * coherent_kern_range(start, end) ··· 201 196 * - start - virtual start address 202 197 * - end - virtual end address 203 198 */ 204 - ENTRY(arm1020_coherent_kern_range) 205 - /* FALLTRHOUGH */ 199 + SYM_TYPED_FUNC_START(arm1020_coherent_kern_range) 200 + b arm1020_coherent_user_range 201 + SYM_FUNC_END(arm1020_coherent_kern_range) 206 202 207 203 /* 208 204 * coherent_user_range(start, end) ··· 215 209 * - start - virtual start address 216 210 * - end - virtual end address 217 211 */ 218 - ENTRY(arm1020_coherent_user_range) 212 + SYM_TYPED_FUNC_START(arm1020_coherent_user_range) 219 213 mov ip, #0 220 214 bic r0, r0, #CACHE_DLINESIZE - 1 221 215 mcr p15, 0, ip, c7, c10, 4 ··· 233 227 mcr p15, 0, ip, c7, c10, 4 @ drain WB 234 228 mov r0, #0 235 229 ret lr 230 + SYM_FUNC_END(arm1020_coherent_user_range) 236 231 237 232 /* 238 233 * flush_kern_dcache_area(void *addr, size_t size) ··· 244 237 * - addr - kernel address 245 238 * - size - region size 246 239 */ 247 - ENTRY(arm1020_flush_kern_dcache_area) 240 + SYM_TYPED_FUNC_START(arm1020_flush_kern_dcache_area) 248 241 mov ip, #0 249 242 #ifndef CONFIG_CPU_DCACHE_DISABLE 250 243 add r1, r0, r1 ··· 256 249 #endif 257 250 mcr p15, 0, ip, c7, c10, 4 @ drain WB 258 251 ret lr 252 + SYM_FUNC_END(arm1020_flush_kern_dcache_area) 259 253 260 254 /* 261 255 * dma_inv_range(start, end) ··· 322 314 * - start - virtual start address 323 315 * - end - virtual end address 324 316 */ 325 - ENTRY(arm1020_dma_flush_range) 317 + SYM_TYPED_FUNC_START(arm1020_dma_flush_range) 326 318 mov ip, #0 327 319 #ifndef CONFIG_CPU_DCACHE_DISABLE 328 320 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 335 327 #endif 336 328 mcr p15, 0, ip, c7, c10, 4 @ drain WB 337 329 ret lr 330 + SYM_FUNC_END(arm1020_dma_flush_range) 338 331 339 332 /* 340 333 * dma_map_area(start, size, dir) ··· 343 334 * - size - size of region 344 335 * - dir - DMA direction 345 336 */ 346 - ENTRY(arm1020_dma_map_area) 337 + SYM_TYPED_FUNC_START(arm1020_dma_map_area) 347 338 add r1, r1, r0 348 339 cmp r2, #DMA_TO_DEVICE 349 340 beq arm1020_dma_clean_range 350 341 bcs arm1020_dma_inv_range 351 342 b arm1020_dma_flush_range 352 - ENDPROC(arm1020_dma_map_area) 343 + SYM_FUNC_END(arm1020_dma_map_area) 353 344 354 345 /* 355 346 * dma_unmap_area(start, size, dir) ··· 357 348 * - size - size of region 358 349 * - dir - DMA direction 359 350 */ 360 - ENTRY(arm1020_dma_unmap_area) 351 + SYM_TYPED_FUNC_START(arm1020_dma_unmap_area) 361 352 ret lr 362 - ENDPROC(arm1020_dma_unmap_area) 353 + SYM_FUNC_END(arm1020_dma_unmap_area) 363 354 364 355 .globl arm1020_flush_kern_cache_louis 365 356 .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
+25 -15
arch/arm/mm/proc-arm1020e.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 113 112 * 114 113 * Unconditionally clean and invalidate the entire icache. 115 114 */ 116 - ENTRY(arm1020e_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1020e_flush_icache_all) 117 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 118 117 mov r0, #0 119 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 120 119 #endif 121 120 ret lr 122 - ENDPROC(arm1020e_flush_icache_all) 121 + SYM_FUNC_END(arm1020e_flush_icache_all) 123 122 124 123 /* 125 124 * flush_user_cache_all() ··· 127 126 * Invalidate all cache entries in a particular address 128 127 * space. 129 128 */ 130 - ENTRY(arm1020e_flush_user_cache_all) 131 - /* FALLTHROUGH */ 129 + SYM_TYPED_FUNC_START(arm1020e_flush_user_cache_all) 130 + b arm1020e_flush_kern_cache_all 131 + SYM_FUNC_END(arm1020e_flush_user_cache_all) 132 + 132 133 /* 133 134 * flush_kern_cache_all() 134 135 * 135 136 * Clean and invalidate the entire cache. 136 137 */ 137 - ENTRY(arm1020e_flush_kern_cache_all) 138 + SYM_TYPED_FUNC_START(arm1020e_flush_kern_cache_all) 138 139 mov r2, #VM_EXEC 139 140 mov ip, #0 140 141 __flush_whole_cache: ··· 156 153 #endif 157 154 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 155 ret lr 156 + SYM_FUNC_END(arm1020e_flush_kern_cache_all) 159 157 160 158 /* 161 159 * flush_user_cache_range(start, end, flags) ··· 168 164 * - end - end address (exclusive) 169 165 * - flags - vm_flags for this space 170 166 */ 171 - ENTRY(arm1020e_flush_user_cache_range) 167 + SYM_TYPED_FUNC_START(arm1020e_flush_user_cache_range) 172 168 mov ip, #0 173 169 sub r3, r1, r0 @ calculate total size 174 170 cmp r3, #CACHE_DLIMIT ··· 186 182 #endif 187 183 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 188 184 ret lr 185 + SYM_FUNC_END(arm1020e_flush_user_cache_range) 189 186 190 187 /* 191 188 * coherent_kern_range(start, end) ··· 198 193 * - start - virtual start address 199 194 * - end - virtual end address 200 195 */ 201 - ENTRY(arm1020e_coherent_kern_range) 202 - /* FALLTHROUGH */ 196 + SYM_TYPED_FUNC_START(arm1020e_coherent_kern_range) 197 + b arm1020e_coherent_user_range 198 + SYM_FUNC_END(arm1020e_coherent_kern_range) 199 + 203 200 /* 204 201 * coherent_user_range(start, end) 205 202 * ··· 212 205 * - start - virtual start address 213 206 * - end - virtual end address 214 207 */ 215 - ENTRY(arm1020e_coherent_user_range) 208 + SYM_TYPED_FUNC_START(arm1020e_coherent_user_range) 216 209 mov ip, #0 217 210 bic r0, r0, #CACHE_DLINESIZE - 1 218 211 1: ··· 228 221 mcr p15, 0, ip, c7, c10, 4 @ drain WB 229 222 mov r0, #0 230 223 ret lr 224 + SYM_FUNC_END(arm1020e_coherent_user_range) 231 225 232 226 /* 233 227 * flush_kern_dcache_area(void *addr, size_t size) ··· 239 231 * - addr - kernel address 240 232 * - size - region size 241 233 */ 242 - ENTRY(arm1020e_flush_kern_dcache_area) 234 + SYM_TYPED_FUNC_START(arm1020e_flush_kern_dcache_area) 243 235 mov ip, #0 244 236 #ifndef CONFIG_CPU_DCACHE_DISABLE 245 237 add r1, r0, r1 ··· 250 242 #endif 251 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 252 244 ret lr 245 + SYM_FUNC_END(arm1020e_flush_kern_dcache_area) 253 246 254 247 /* 255 248 * dma_inv_range(start, end) ··· 311 302 * - start - virtual start address 312 303 * - end - virtual end address 313 304 */ 314 - ENTRY(arm1020e_dma_flush_range) 305 + SYM_TYPED_FUNC_START(arm1020e_dma_flush_range) 315 306 mov ip, #0 316 307 #ifndef CONFIG_CPU_DCACHE_DISABLE 317 308 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 322 313 #endif 323 314 mcr p15, 0, ip, c7, c10, 4 @ drain WB 324 315 ret lr 316 + SYM_FUNC_END(arm1020e_dma_flush_range) 325 317 326 318 /* 327 319 * dma_map_area(start, size, dir) ··· 330 320 * - size - size of region 331 321 * - dir - DMA direction 332 322 */ 333 - ENTRY(arm1020e_dma_map_area) 323 + SYM_TYPED_FUNC_START(arm1020e_dma_map_area) 334 324 add r1, r1, r0 335 325 cmp r2, #DMA_TO_DEVICE 336 326 beq arm1020e_dma_clean_range 337 327 bcs arm1020e_dma_inv_range 338 328 b arm1020e_dma_flush_range 339 - ENDPROC(arm1020e_dma_map_area) 329 + SYM_FUNC_END(arm1020e_dma_map_area) 340 330 341 331 /* 342 332 * dma_unmap_area(start, size, dir) ··· 344 334 * - size - size of region 345 335 * - dir - DMA direction 346 336 */ 347 - ENTRY(arm1020e_dma_unmap_area) 337 + SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area) 348 338 ret lr 349 - ENDPROC(arm1020e_dma_unmap_area) 339 + SYM_FUNC_END(arm1020e_dma_unmap_area) 350 340 351 341 .globl arm1020e_flush_kern_cache_louis 352 342 .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
+24 -15
arch/arm/mm/proc-arm1022.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 113 112 * 114 113 * Unconditionally clean and invalidate the entire icache. 115 114 */ 116 - ENTRY(arm1022_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1022_flush_icache_all) 117 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 118 117 mov r0, #0 119 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 120 119 #endif 121 120 ret lr 122 - ENDPROC(arm1022_flush_icache_all) 121 + SYM_FUNC_END(arm1022_flush_icache_all) 123 122 124 123 /* 125 124 * flush_user_cache_all() ··· 127 126 * Invalidate all cache entries in a particular address 128 127 * space. 129 128 */ 130 - ENTRY(arm1022_flush_user_cache_all) 131 - /* FALLTHROUGH */ 129 + SYM_TYPED_FUNC_START(arm1022_flush_user_cache_all) 130 + b arm1022_flush_kern_cache_all 131 + SYM_FUNC_END(arm1022_flush_user_cache_all) 132 + 132 133 /* 133 134 * flush_kern_cache_all() 134 135 * 135 136 * Clean and invalidate the entire cache. 136 137 */ 137 - ENTRY(arm1022_flush_kern_cache_all) 138 + SYM_TYPED_FUNC_START(arm1022_flush_kern_cache_all) 138 139 mov r2, #VM_EXEC 139 140 mov ip, #0 140 141 __flush_whole_cache: ··· 155 152 #endif 156 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 157 154 ret lr 155 + SYM_FUNC_END(arm1022_flush_kern_cache_all) 158 156 159 157 /* 160 158 * flush_user_cache_range(start, end, flags) ··· 167 163 * - end - end address (exclusive) 168 164 * - flags - vm_flags for this space 169 165 */ 170 - ENTRY(arm1022_flush_user_cache_range) 166 + SYM_TYPED_FUNC_START(arm1022_flush_user_cache_range) 171 167 mov ip, #0 172 168 sub r3, r1, r0 @ calculate total size 173 169 cmp r3, #CACHE_DLIMIT ··· 185 181 #endif 186 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 187 183 ret lr 184 + SYM_FUNC_END(arm1022_flush_user_cache_range) 188 185 189 186 /* 190 187 * coherent_kern_range(start, end) ··· 197 192 * - start - virtual start address 198 193 * - end - virtual end address 199 194 */ 200 - ENTRY(arm1022_coherent_kern_range) 201 - /* FALLTHROUGH */ 195 + SYM_TYPED_FUNC_START(arm1022_coherent_kern_range) 196 + b arm1022_coherent_user_range 197 + SYM_FUNC_END(arm1022_coherent_kern_range) 202 198 203 199 /* 204 200 * coherent_user_range(start, end) ··· 211 205 * - start - virtual start address 212 206 * - end - virtual end address 213 207 */ 214 - ENTRY(arm1022_coherent_user_range) 208 + SYM_TYPED_FUNC_START(arm1022_coherent_user_range) 215 209 mov ip, #0 216 210 bic r0, r0, #CACHE_DLINESIZE - 1 217 211 1: ··· 227 221 mcr p15, 0, ip, c7, c10, 4 @ drain WB 228 222 mov r0, #0 229 223 ret lr 224 + SYM_FUNC_END(arm1022_coherent_user_range) 230 225 231 226 /* 232 227 * flush_kern_dcache_area(void *addr, size_t size) ··· 238 231 * - addr - kernel address 239 232 * - size - region size 240 233 */ 241 - ENTRY(arm1022_flush_kern_dcache_area) 234 + SYM_TYPED_FUNC_START(arm1022_flush_kern_dcache_area) 242 235 mov ip, #0 243 236 #ifndef CONFIG_CPU_DCACHE_DISABLE 244 237 add r1, r0, r1 ··· 249 242 #endif 250 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 251 244 ret lr 245 + SYM_FUNC_END(arm1022_flush_kern_dcache_area) 252 246 253 247 /* 254 248 * dma_inv_range(start, end) ··· 310 302 * - start - virtual start address 311 303 * - end - virtual end address 312 304 */ 313 - ENTRY(arm1022_dma_flush_range) 305 + SYM_TYPED_FUNC_START(arm1022_dma_flush_range) 314 306 mov ip, #0 315 307 #ifndef CONFIG_CPU_DCACHE_DISABLE 316 308 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 321 313 #endif 322 314 mcr p15, 0, ip, c7, c10, 4 @ drain WB 323 315 ret lr 316 + SYM_FUNC_END(arm1022_dma_flush_range) 324 317 325 318 /* 326 319 * dma_map_area(start, size, dir) ··· 329 320 * - size - size of region 330 321 * - dir - DMA direction 331 322 */ 332 - ENTRY(arm1022_dma_map_area) 323 + SYM_TYPED_FUNC_START(arm1022_dma_map_area) 333 324 add r1, r1, r0 334 325 cmp r2, #DMA_TO_DEVICE 335 326 beq arm1022_dma_clean_range 336 327 bcs arm1022_dma_inv_range 337 328 b arm1022_dma_flush_range 338 - ENDPROC(arm1022_dma_map_area) 329 + SYM_FUNC_END(arm1022_dma_map_area) 339 330 340 331 /* 341 332 * dma_unmap_area(start, size, dir) ··· 343 334 * - size - size of region 344 335 * - dir - DMA direction 345 336 */ 346 - ENTRY(arm1022_dma_unmap_area) 337 + SYM_TYPED_FUNC_START(arm1022_dma_unmap_area) 347 338 ret lr 348 - ENDPROC(arm1022_dma_unmap_area) 339 + SYM_FUNC_END(arm1022_dma_unmap_area) 349 340 350 341 .globl arm1022_flush_kern_cache_louis 351 342 .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
+25 -15
arch/arm/mm/proc-arm1026.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 113 112 * 114 113 * Unconditionally clean and invalidate the entire icache. 115 114 */ 116 - ENTRY(arm1026_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1026_flush_icache_all) 117 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 118 117 mov r0, #0 119 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 120 119 #endif 121 120 ret lr 122 - ENDPROC(arm1026_flush_icache_all) 121 + SYM_FUNC_END(arm1026_flush_icache_all) 123 122 124 123 /* 125 124 * flush_user_cache_all() ··· 127 126 * Invalidate all cache entries in a particular address 128 127 * space. 129 128 */ 130 - ENTRY(arm1026_flush_user_cache_all) 131 - /* FALLTHROUGH */ 129 + SYM_TYPED_FUNC_START(arm1026_flush_user_cache_all) 130 + b arm1026_flush_kern_cache_all 131 + SYM_FUNC_END(arm1026_flush_user_cache_all) 132 + 132 133 /* 133 134 * flush_kern_cache_all() 134 135 * 135 136 * Clean and invalidate the entire cache. 136 137 */ 137 - ENTRY(arm1026_flush_kern_cache_all) 138 + SYM_TYPED_FUNC_START(arm1026_flush_kern_cache_all) 138 139 mov r2, #VM_EXEC 139 140 mov ip, #0 140 141 __flush_whole_cache: ··· 150 147 #endif 151 148 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 152 149 ret lr 150 + SYM_FUNC_END(arm1026_flush_kern_cache_all) 153 151 154 152 /* 155 153 * flush_user_cache_range(start, end, flags) ··· 162 158 * - end - end address (exclusive) 163 159 * - flags - vm_flags for this space 164 160 */ 165 - ENTRY(arm1026_flush_user_cache_range) 161 + SYM_TYPED_FUNC_START(arm1026_flush_user_cache_range) 166 162 mov ip, #0 167 163 sub r3, r1, r0 @ calculate total size 168 164 cmp r3, #CACHE_DLIMIT ··· 180 176 #endif 181 177 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 182 178 ret lr 179 + SYM_FUNC_END(arm1026_flush_user_cache_range) 183 180 184 181 /* 185 182 * coherent_kern_range(start, end) ··· 192 187 * - start - virtual start address 193 188 * - end - virtual end address 194 189 */ 195 - ENTRY(arm1026_coherent_kern_range) 196 - /* FALLTHROUGH */ 190 + SYM_TYPED_FUNC_START(arm1026_coherent_kern_range) 191 + b arm1026_coherent_user_range 192 + SYM_FUNC_END(arm1026_coherent_kern_range) 193 + 197 194 /* 198 195 * coherent_user_range(start, end) 199 196 * ··· 206 199 * - start - virtual start address 207 200 * - end - virtual end address 208 201 */ 209 - ENTRY(arm1026_coherent_user_range) 202 + SYM_TYPED_FUNC_START(arm1026_coherent_user_range) 210 203 mov ip, #0 211 204 bic r0, r0, #CACHE_DLINESIZE - 1 212 205 1: ··· 222 215 mcr p15, 0, ip, c7, c10, 4 @ drain WB 223 216 mov r0, #0 224 217 ret lr 218 + SYM_FUNC_END(arm1026_coherent_user_range) 225 219 226 220 /* 227 221 * flush_kern_dcache_area(void *addr, size_t size) ··· 233 225 * - addr - kernel address 234 226 * - size - region size 235 227 */ 236 - ENTRY(arm1026_flush_kern_dcache_area) 228 + SYM_TYPED_FUNC_START(arm1026_flush_kern_dcache_area) 237 229 mov ip, #0 238 230 #ifndef CONFIG_CPU_DCACHE_DISABLE 239 231 add r1, r0, r1 ··· 244 236 #endif 245 237 mcr p15, 0, ip, c7, c10, 4 @ drain WB 246 238 ret lr 239 + SYM_FUNC_END(arm1026_flush_kern_dcache_area) 247 240 248 241 /* 249 242 * dma_inv_range(start, end) ··· 305 296 * - start - virtual start address 306 297 * - end - virtual end address 307 298 */ 308 - ENTRY(arm1026_dma_flush_range) 299 + SYM_TYPED_FUNC_START(arm1026_dma_flush_range) 309 300 mov ip, #0 310 301 #ifndef CONFIG_CPU_DCACHE_DISABLE 311 302 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 316 307 #endif 317 308 mcr p15, 0, ip, c7, c10, 4 @ drain WB 318 309 ret lr 310 + SYM_FUNC_END(arm1026_dma_flush_range) 319 311 320 312 /* 321 313 * dma_map_area(start, size, dir) ··· 324 314 * - size - size of region 325 315 * - dir - DMA direction 326 316 */ 327 - ENTRY(arm1026_dma_map_area) 317 + SYM_TYPED_FUNC_START(arm1026_dma_map_area) 328 318 add r1, r1, r0 329 319 cmp r2, #DMA_TO_DEVICE 330 320 beq arm1026_dma_clean_range 331 321 bcs arm1026_dma_inv_range 332 322 b arm1026_dma_flush_range 333 - ENDPROC(arm1026_dma_map_area) 323 + SYM_FUNC_END(arm1026_dma_map_area) 334 324 335 325 /* 336 326 * dma_unmap_area(start, size, dir) ··· 338 328 * - size - size of region 339 329 * - dir - DMA direction 340 330 */ 341 - ENTRY(arm1026_dma_unmap_area) 331 + SYM_TYPED_FUNC_START(arm1026_dma_unmap_area) 342 332 ret lr 343 - ENDPROC(arm1026_dma_unmap_area) 333 + SYM_FUNC_END(arm1026_dma_unmap_area) 344 334 345 335 .globl arm1026_flush_kern_cache_louis 346 336 .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
+24 -16
arch/arm/mm/proc-arm920.S
··· 13 13 */ 14 14 #include <linux/linkage.h> 15 15 #include <linux/init.h> 16 + #include <linux/cfi_types.h> 16 17 #include <linux/pgtable.h> 17 18 #include <asm/assembler.h> 18 19 #include <asm/hwcap.h> ··· 104 103 * 105 104 * Unconditionally clean and invalidate the entire icache. 106 105 */ 107 - ENTRY(arm920_flush_icache_all) 106 + SYM_TYPED_FUNC_START(arm920_flush_icache_all) 108 107 mov r0, #0 109 108 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 110 109 ret lr 111 - ENDPROC(arm920_flush_icache_all) 110 + SYM_FUNC_END(arm920_flush_icache_all) 112 111 113 112 /* 114 113 * flush_user_cache_all() ··· 116 115 * Invalidate all cache entries in a particular address 117 116 * space. 118 117 */ 119 - ENTRY(arm920_flush_user_cache_all) 120 - /* FALLTHROUGH */ 118 + SYM_TYPED_FUNC_START(arm920_flush_user_cache_all) 119 + b arm920_flush_kern_cache_all 120 + SYM_FUNC_END(arm920_flush_user_cache_all) 121 121 122 122 /* 123 123 * flush_kern_cache_all() 124 124 * 125 125 * Clean and invalidate the entire cache. 126 126 */ 127 - ENTRY(arm920_flush_kern_cache_all) 127 + SYM_TYPED_FUNC_START(arm920_flush_kern_cache_all) 128 128 mov r2, #VM_EXEC 129 129 mov ip, #0 130 130 __flush_whole_cache: ··· 140 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 141 139 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 142 140 ret lr 141 + SYM_FUNC_END(arm920_flush_kern_cache_all) 143 142 144 143 /* 145 144 * flush_user_cache_range(start, end, flags) ··· 152 149 * - end - end address (exclusive) 153 150 * - flags - vm_flags for address space 154 151 */ 155 - ENTRY(arm920_flush_user_cache_range) 152 + SYM_TYPED_FUNC_START(arm920_flush_user_cache_range) 156 153 mov ip, #0 157 154 sub r3, r1, r0 @ calculate total size 158 155 cmp r3, #CACHE_DLIMIT ··· 167 164 tst r2, #VM_EXEC 168 165 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 169 166 ret lr 167 + SYM_FUNC_END(arm920_flush_user_cache_range) 170 168 171 169 /* 172 170 * coherent_kern_range(start, end) ··· 179 175 * - start - virtual start address 180 176 * - end - virtual end address 181 177 */ 182 - ENTRY(arm920_coherent_kern_range) 183 - /* FALLTHROUGH */ 178 + SYM_TYPED_FUNC_START(arm920_coherent_kern_range) 179 + b arm920_coherent_user_range 180 + SYM_FUNC_END(arm920_coherent_kern_range) 184 181 185 182 /* 186 183 * coherent_user_range(start, end) ··· 193 188 * - start - virtual start address 194 189 * - end - virtual end address 195 190 */ 196 - ENTRY(arm920_coherent_user_range) 191 + SYM_TYPED_FUNC_START(arm920_coherent_user_range) 197 192 bic r0, r0, #CACHE_DLINESIZE - 1 198 193 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 199 194 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 203 198 mcr p15, 0, r0, c7, c10, 4 @ drain WB 204 199 mov r0, #0 205 200 ret lr 201 + SYM_FUNC_END(arm920_coherent_user_range) 206 202 207 203 /* 208 204 * flush_kern_dcache_area(void *addr, size_t size) ··· 214 208 * - addr - kernel address 215 209 * - size - region size 216 210 */ 217 - ENTRY(arm920_flush_kern_dcache_area) 211 + SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_area) 218 212 add r1, r0, r1 219 213 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 220 214 add r0, r0, #CACHE_DLINESIZE ··· 224 218 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 225 219 mcr p15, 0, r0, c7, c10, 4 @ drain WB 226 220 ret lr 221 + SYM_FUNC_END(arm920_flush_kern_dcache_area) 227 222 228 223 /* 229 224 * dma_inv_range(start, end) ··· 279 272 * - start - virtual start address 280 273 * - end - virtual end address 281 274 */ 282 - ENTRY(arm920_dma_flush_range) 275 + SYM_TYPED_FUNC_START(arm920_dma_flush_range) 283 276 bic r0, r0, #CACHE_DLINESIZE - 1 284 277 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 285 278 add r0, r0, #CACHE_DLINESIZE ··· 287 280 blo 1b 288 281 mcr p15, 0, r0, c7, c10, 4 @ drain WB 289 282 ret lr 283 + SYM_FUNC_END(arm920_dma_flush_range) 290 284 291 285 /* 292 286 * dma_map_area(start, size, dir) ··· 295 287 * - size - size of region 296 288 * - dir - DMA direction 297 289 */ 298 - ENTRY(arm920_dma_map_area) 290 + SYM_TYPED_FUNC_START(arm920_dma_map_area) 299 291 add r1, r1, r0 300 292 cmp r2, #DMA_TO_DEVICE 301 293 beq arm920_dma_clean_range 302 294 bcs arm920_dma_inv_range 303 295 b arm920_dma_flush_range 304 - ENDPROC(arm920_dma_map_area) 296 + SYM_FUNC_END(arm920_dma_map_area) 305 297 306 298 /* 307 299 * dma_unmap_area(start, size, dir) ··· 309 301 * - size - size of region 310 302 * - dir - DMA direction 311 303 */ 312 - ENTRY(arm920_dma_unmap_area) 304 + SYM_TYPED_FUNC_START(arm920_dma_unmap_area) 313 305 ret lr 314 - ENDPROC(arm920_dma_unmap_area) 306 + SYM_FUNC_END(arm920_dma_unmap_area) 315 307 316 308 .globl arm920_flush_kern_cache_louis 317 309 .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all 318 310 319 311 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 320 312 define_cache_functions arm920 321 - #endif 313 + #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 322 314 323 315 324 316 ENTRY(cpu_arm920_dcache_clean_area)
+24 -16
arch/arm/mm/proc-arm922.S
··· 14 14 */ 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 + #include <linux/cfi_types.h> 17 18 #include <linux/pgtable.h> 18 19 #include <asm/assembler.h> 19 20 #include <asm/hwcap.h> ··· 106 105 * 107 106 * Unconditionally clean and invalidate the entire icache. 108 107 */ 109 - ENTRY(arm922_flush_icache_all) 108 + SYM_TYPED_FUNC_START(arm922_flush_icache_all) 110 109 mov r0, #0 111 110 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 112 111 ret lr 113 - ENDPROC(arm922_flush_icache_all) 112 + SYM_FUNC_END(arm922_flush_icache_all) 114 113 115 114 /* 116 115 * flush_user_cache_all() ··· 118 117 * Clean and invalidate all cache entries in a particular 119 118 * address space. 120 119 */ 121 - ENTRY(arm922_flush_user_cache_all) 122 - /* FALLTHROUGH */ 120 + SYM_TYPED_FUNC_START(arm922_flush_user_cache_all) 121 + b arm922_flush_kern_cache_all 122 + SYM_FUNC_END(arm922_flush_user_cache_all) 123 123 124 124 /* 125 125 * flush_kern_cache_all() 126 126 * 127 127 * Clean and invalidate the entire cache. 128 128 */ 129 - ENTRY(arm922_flush_kern_cache_all) 129 + SYM_TYPED_FUNC_START(arm922_flush_kern_cache_all) 130 130 mov r2, #VM_EXEC 131 131 mov ip, #0 132 132 __flush_whole_cache: ··· 142 140 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 143 141 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 144 142 ret lr 143 + SYM_FUNC_END(arm922_flush_kern_cache_all) 145 144 146 145 /* 147 146 * flush_user_cache_range(start, end, flags) ··· 154 151 * - end - end address (exclusive) 155 152 * - flags - vm_flags describing address space 156 153 */ 157 - ENTRY(arm922_flush_user_cache_range) 154 + SYM_TYPED_FUNC_START(arm922_flush_user_cache_range) 158 155 mov ip, #0 159 156 sub r3, r1, r0 @ calculate total size 160 157 cmp r3, #CACHE_DLIMIT ··· 169 166 tst r2, #VM_EXEC 170 167 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 168 ret lr 169 + SYM_FUNC_END(arm922_flush_user_cache_range) 172 170 173 171 /* 174 172 * coherent_kern_range(start, end) ··· 181 177 * - start - virtual start address 182 178 * - end - virtual end address 183 179 */ 184 - ENTRY(arm922_coherent_kern_range) 185 - /* FALLTHROUGH */ 180 + SYM_TYPED_FUNC_START(arm922_coherent_kern_range) 181 + b arm922_coherent_user_range 182 + SYM_FUNC_END(arm922_coherent_kern_range) 186 183 187 184 /* 188 185 * coherent_user_range(start, end) ··· 195 190 * - start - virtual start address 196 191 * - end - virtual end address 197 192 */ 198 - ENTRY(arm922_coherent_user_range) 193 + SYM_TYPED_FUNC_START(arm922_coherent_user_range) 199 194 bic r0, r0, #CACHE_DLINESIZE - 1 200 195 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 201 196 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 205 200 mcr p15, 0, r0, c7, c10, 4 @ drain WB 206 201 mov r0, #0 207 202 ret lr 203 + SYM_FUNC_END(arm922_coherent_user_range) 208 204 209 205 /* 210 206 * flush_kern_dcache_area(void *addr, size_t size) ··· 216 210 * - addr - kernel address 217 211 * - size - region size 218 212 */ 219 - ENTRY(arm922_flush_kern_dcache_area) 213 + SYM_TYPED_FUNC_START(arm922_flush_kern_dcache_area) 220 214 add r1, r0, r1 221 215 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 222 216 add r0, r0, #CACHE_DLINESIZE ··· 226 220 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 227 221 mcr p15, 0, r0, c7, c10, 4 @ drain WB 228 222 ret lr 223 + SYM_FUNC_END(arm922_flush_kern_dcache_area) 229 224 230 225 /* 231 226 * dma_inv_range(start, end) ··· 281 274 * - start - virtual start address 282 275 * - end - virtual end address 283 276 */ 284 - ENTRY(arm922_dma_flush_range) 277 + SYM_TYPED_FUNC_START(arm922_dma_flush_range) 285 278 bic r0, r0, #CACHE_DLINESIZE - 1 286 279 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 287 280 add r0, r0, #CACHE_DLINESIZE ··· 289 282 blo 1b 290 283 mcr p15, 0, r0, c7, c10, 4 @ drain WB 291 284 ret lr 285 + SYM_FUNC_END(arm922_dma_flush_range) 292 286 293 287 /* 294 288 * dma_map_area(start, size, dir) ··· 297 289 * - size - size of region 298 290 * - dir - DMA direction 299 291 */ 300 - ENTRY(arm922_dma_map_area) 292 + SYM_TYPED_FUNC_START(arm922_dma_map_area) 301 293 add r1, r1, r0 302 294 cmp r2, #DMA_TO_DEVICE 303 295 beq arm922_dma_clean_range 304 296 bcs arm922_dma_inv_range 305 297 b arm922_dma_flush_range 306 - ENDPROC(arm922_dma_map_area) 298 + SYM_FUNC_END(arm922_dma_map_area) 307 299 308 300 /* 309 301 * dma_unmap_area(start, size, dir) ··· 311 303 * - size - size of region 312 304 * - dir - DMA direction 313 305 */ 314 - ENTRY(arm922_dma_unmap_area) 306 + SYM_TYPED_FUNC_START(arm922_dma_unmap_area) 315 307 ret lr 316 - ENDPROC(arm922_dma_unmap_area) 308 + SYM_FUNC_END(arm922_dma_unmap_area) 317 309 318 310 .globl arm922_flush_kern_cache_louis 319 311 .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all 320 312 321 313 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 322 314 define_cache_functions arm922 323 - #endif 324 315 316 + #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 325 317 326 318 ENTRY(cpu_arm922_dcache_clean_area) 327 319 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+23 -15
arch/arm/mm/proc-arm925.S
··· 37 37 38 38 #include <linux/linkage.h> 39 39 #include <linux/init.h> 40 + #include <linux/cfi_types.h> 40 41 #include <linux/pgtable.h> 41 42 #include <asm/assembler.h> 42 43 #include <asm/hwcap.h> ··· 139 138 * 140 139 * Unconditionally clean and invalidate the entire icache. 141 140 */ 142 - ENTRY(arm925_flush_icache_all) 141 + SYM_TYPED_FUNC_START(arm925_flush_icache_all) 143 142 mov r0, #0 144 143 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 145 144 ret lr 146 - ENDPROC(arm925_flush_icache_all) 145 + SYM_FUNC_END(arm925_flush_icache_all) 147 146 148 147 /* 149 148 * flush_user_cache_all() ··· 151 150 * Clean and invalidate all cache entries in a particular 152 151 * address space. 153 152 */ 154 - ENTRY(arm925_flush_user_cache_all) 155 - /* FALLTHROUGH */ 153 + SYM_TYPED_FUNC_START(arm925_flush_user_cache_all) 154 + b arm925_flush_kern_cache_all 155 + SYM_FUNC_END(arm925_flush_user_cache_all) 156 156 157 157 /* 158 158 * flush_kern_cache_all() 159 159 * 160 160 * Clean and invalidate the entire cache. 161 161 */ 162 - ENTRY(arm925_flush_kern_cache_all) 162 + SYM_TYPED_FUNC_START(arm925_flush_kern_cache_all) 163 163 mov r2, #VM_EXEC 164 164 mov ip, #0 165 165 __flush_whole_cache: ··· 177 175 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 178 176 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 179 177 ret lr 178 + SYM_FUNC_END(arm925_flush_kern_cache_all) 180 179 181 180 /* 182 181 * flush_user_cache_range(start, end, flags) ··· 189 186 * - end - end address (exclusive) 190 187 * - flags - vm_flags describing address space 191 188 */ 192 - ENTRY(arm925_flush_user_cache_range) 189 + SYM_TYPED_FUNC_START(arm925_flush_user_cache_range) 193 190 mov ip, #0 194 191 sub r3, r1, r0 @ calculate total size 195 192 cmp r3, #CACHE_DLIMIT ··· 215 212 tst r2, #VM_EXEC 216 213 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 217 214 ret lr 215 + SYM_FUNC_END(arm925_flush_user_cache_range) 218 216 219 217 /* 220 218 * coherent_kern_range(start, end) ··· 227 223 * - start - virtual start address 228 224 * - end - virtual end address 229 225 */ 230 - ENTRY(arm925_coherent_kern_range) 231 - /* FALLTHROUGH */ 226 + SYM_TYPED_FUNC_START(arm925_coherent_kern_range) 227 + b arm925_coherent_user_range 228 + SYM_FUNC_END(arm925_coherent_kern_range) 232 229 233 230 /* 234 231 * coherent_user_range(start, end) ··· 241 236 * - start - virtual start address 242 237 * - end - virtual end address 243 238 */ 244 - ENTRY(arm925_coherent_user_range) 239 + SYM_TYPED_FUNC_START(arm925_coherent_user_range) 245 240 bic r0, r0, #CACHE_DLINESIZE - 1 246 241 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 247 242 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 251 246 mcr p15, 0, r0, c7, c10, 4 @ drain WB 252 247 mov r0, #0 253 248 ret lr 249 + SYM_FUNC_END(arm925_coherent_user_range) 254 250 255 251 /* 256 252 * flush_kern_dcache_area(void *addr, size_t size) ··· 262 256 * - addr - kernel address 263 257 * - size - region size 264 258 */ 265 - ENTRY(arm925_flush_kern_dcache_area) 259 + SYM_TYPED_FUNC_START(arm925_flush_kern_dcache_area) 266 260 add r1, r0, r1 267 261 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 268 262 add r0, r0, #CACHE_DLINESIZE ··· 272 266 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 273 267 mcr p15, 0, r0, c7, c10, 4 @ drain WB 274 268 ret lr 269 + SYM_FUNC_END(arm925_flush_kern_dcache_area) 275 270 276 271 /* 277 272 * dma_inv_range(start, end) ··· 331 324 * - start - virtual start address 332 325 * - end - virtual end address 333 326 */ 334 - ENTRY(arm925_dma_flush_range) 327 + SYM_TYPED_FUNC_START(arm925_dma_flush_range) 335 328 bic r0, r0, #CACHE_DLINESIZE - 1 336 329 1: 337 330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 344 337 blo 1b 345 338 mcr p15, 0, r0, c7, c10, 4 @ drain WB 346 339 ret lr 340 + SYM_FUNC_END(arm925_dma_flush_range) 347 341 348 342 /* 349 343 * dma_map_area(start, size, dir) ··· 352 344 * - size - size of region 353 345 * - dir - DMA direction 354 346 */ 355 - ENTRY(arm925_dma_map_area) 347 + SYM_TYPED_FUNC_START(arm925_dma_map_area) 356 348 add r1, r1, r0 357 349 cmp r2, #DMA_TO_DEVICE 358 350 beq arm925_dma_clean_range 359 351 bcs arm925_dma_inv_range 360 352 b arm925_dma_flush_range 361 - ENDPROC(arm925_dma_map_area) 353 + SYM_FUNC_END(arm925_dma_map_area) 362 354 363 355 /* 364 356 * dma_unmap_area(start, size, dir) ··· 366 358 * - size - size of region 367 359 * - dir - DMA direction 368 360 */ 369 - ENTRY(arm925_dma_unmap_area) 361 + SYM_TYPED_FUNC_START(arm925_dma_unmap_area) 370 362 ret lr 371 - ENDPROC(arm925_dma_unmap_area) 363 + SYM_FUNC_END(arm925_dma_unmap_area) 372 364 373 365 .globl arm925_flush_kern_cache_louis 374 366 .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
+23 -15
arch/arm/mm/proc-arm926.S
··· 13 13 */ 14 14 #include <linux/linkage.h> 15 15 #include <linux/init.h> 16 + #include <linux/cfi_types.h> 16 17 #include <linux/pgtable.h> 17 18 #include <asm/assembler.h> 18 19 #include <asm/hwcap.h> ··· 105 104 * 106 105 * Unconditionally clean and invalidate the entire icache. 107 106 */ 108 - ENTRY(arm926_flush_icache_all) 107 + SYM_TYPED_FUNC_START(arm926_flush_icache_all) 109 108 mov r0, #0 110 109 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 111 110 ret lr 112 - ENDPROC(arm926_flush_icache_all) 111 + SYM_FUNC_END(arm926_flush_icache_all) 113 112 114 113 /* 115 114 * flush_user_cache_all() ··· 117 116 * Clean and invalidate all cache entries in a particular 118 117 * address space. 119 118 */ 120 - ENTRY(arm926_flush_user_cache_all) 121 - /* FALLTHROUGH */ 119 + SYM_TYPED_FUNC_START(arm926_flush_user_cache_all) 120 + b arm926_flush_kern_cache_all 121 + SYM_FUNC_END(arm926_flush_user_cache_all) 122 122 123 123 /* 124 124 * flush_kern_cache_all() 125 125 * 126 126 * Clean and invalidate the entire cache. 127 127 */ 128 - ENTRY(arm926_flush_kern_cache_all) 128 + SYM_TYPED_FUNC_START(arm926_flush_kern_cache_all) 129 129 mov r2, #VM_EXEC 130 130 mov ip, #0 131 131 __flush_whole_cache: ··· 140 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 141 139 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 142 140 ret lr 141 + SYM_FUNC_END(arm926_flush_kern_cache_all) 143 142 144 143 /* 145 144 * flush_user_cache_range(start, end, flags) ··· 152 149 * - end - end address (exclusive) 153 150 * - flags - vm_flags describing address space 154 151 */ 155 - ENTRY(arm926_flush_user_cache_range) 152 + SYM_TYPED_FUNC_START(arm926_flush_user_cache_range) 156 153 mov ip, #0 157 154 sub r3, r1, r0 @ calculate total size 158 155 cmp r3, #CACHE_DLIMIT ··· 178 175 tst r2, #VM_EXEC 179 176 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 180 177 ret lr 178 + SYM_FUNC_END(arm926_flush_user_cache_range) 181 179 182 180 /* 183 181 * coherent_kern_range(start, end) ··· 190 186 * - start - virtual start address 191 187 * - end - virtual end address 192 188 */ 193 - ENTRY(arm926_coherent_kern_range) 194 - /* FALLTHROUGH */ 189 + SYM_TYPED_FUNC_START(arm926_coherent_kern_range) 190 + b arm926_coherent_user_range 191 + SYM_FUNC_END(arm926_coherent_kern_range) 195 192 196 193 /* 197 194 * coherent_user_range(start, end) ··· 204 199 * - start - virtual start address 205 200 * - end - virtual end address 206 201 */ 207 - ENTRY(arm926_coherent_user_range) 202 + SYM_TYPED_FUNC_START(arm926_coherent_user_range) 208 203 bic r0, r0, #CACHE_DLINESIZE - 1 209 204 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 210 205 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 214 209 mcr p15, 0, r0, c7, c10, 4 @ drain WB 215 210 mov r0, #0 216 211 ret lr 212 + SYM_FUNC_END(arm926_coherent_user_range) 217 213 218 214 /* 219 215 * flush_kern_dcache_area(void *addr, size_t size) ··· 225 219 * - addr - kernel address 226 220 * - size - region size 227 221 */ 228 - ENTRY(arm926_flush_kern_dcache_area) 222 + SYM_TYPED_FUNC_START(arm926_flush_kern_dcache_area) 229 223 add r1, r0, r1 230 224 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 231 225 add r0, r0, #CACHE_DLINESIZE ··· 235 229 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 236 230 mcr p15, 0, r0, c7, c10, 4 @ drain WB 237 231 ret lr 232 + SYM_FUNC_END(arm926_flush_kern_dcache_area) 238 233 239 234 /* 240 235 * dma_inv_range(start, end) ··· 294 287 * - start - virtual start address 295 288 * - end - virtual end address 296 289 */ 297 - ENTRY(arm926_dma_flush_range) 290 + SYM_TYPED_FUNC_START(arm926_dma_flush_range) 298 291 bic r0, r0, #CACHE_DLINESIZE - 1 299 292 1: 300 293 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 307 300 blo 1b 308 301 mcr p15, 0, r0, c7, c10, 4 @ drain WB 309 302 ret lr 303 + SYM_FUNC_END(arm926_dma_flush_range) 310 304 311 305 /* 312 306 * dma_map_area(start, size, dir) ··· 315 307 * - size - size of region 316 308 * - dir - DMA direction 317 309 */ 318 - ENTRY(arm926_dma_map_area) 310 + SYM_TYPED_FUNC_START(arm926_dma_map_area) 319 311 add r1, r1, r0 320 312 cmp r2, #DMA_TO_DEVICE 321 313 beq arm926_dma_clean_range 322 314 bcs arm926_dma_inv_range 323 315 b arm926_dma_flush_range 324 - ENDPROC(arm926_dma_map_area) 316 + SYM_FUNC_END(arm926_dma_map_area) 325 317 326 318 /* 327 319 * dma_unmap_area(start, size, dir) ··· 329 321 * - size - size of region 330 322 * - dir - DMA direction 331 323 */ 332 - ENTRY(arm926_dma_unmap_area) 324 + SYM_TYPED_FUNC_START(arm926_dma_unmap_area) 333 325 ret lr 334 - ENDPROC(arm926_dma_unmap_area) 326 + SYM_FUNC_END(arm926_dma_unmap_area) 335 327 336 328 .globl arm926_flush_kern_cache_louis 337 329 .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
+25 -17
arch/arm/mm/proc-arm940.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/hwcap.h> ··· 72 71 * 73 72 * Unconditionally clean and invalidate the entire icache. 74 73 */ 75 - ENTRY(arm940_flush_icache_all) 74 + SYM_TYPED_FUNC_START(arm940_flush_icache_all) 76 75 mov r0, #0 77 76 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 78 77 ret lr 79 - ENDPROC(arm940_flush_icache_all) 78 + SYM_FUNC_END(arm940_flush_icache_all) 80 79 81 80 /* 82 81 * flush_user_cache_all() 83 82 */ 84 - ENTRY(arm940_flush_user_cache_all) 85 - /* FALLTHROUGH */ 83 + SYM_TYPED_FUNC_START(arm940_flush_user_cache_all) 84 + b arm940_flush_kern_cache_all 85 + SYM_FUNC_END(arm940_flush_user_cache_all) 86 86 87 87 /* 88 88 * flush_kern_cache_all() 89 89 * 90 90 * Clean and invalidate the entire cache. 91 91 */ 92 - ENTRY(arm940_flush_kern_cache_all) 92 + SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all) 93 93 mov r2, #VM_EXEC 94 - /* FALLTHROUGH */ 94 + b arm940_flush_user_cache_range 95 + SYM_FUNC_END(arm940_flush_kern_cache_all) 95 96 96 97 /* 97 98 * flush_user_cache_range(start, end, flags) ··· 105 102 * - end - end address (exclusive) 106 103 * - flags - vm_flags describing address space 107 104 */ 108 - ENTRY(arm940_flush_user_cache_range) 105 + SYM_TYPED_FUNC_START(arm940_flush_user_cache_range) 109 106 mov ip, #0 110 107 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 111 108 mcr p15, 0, ip, c7, c6, 0 @ flush D cache ··· 122 119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 123 120 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 124 121 ret lr 122 + SYM_FUNC_END(arm940_flush_user_cache_range) 125 123 126 124 /* 127 125 * coherent_kern_range(start, end) ··· 134 130 * - start - virtual start address 135 131 * - end - virtual end address 136 132 */ 137 - ENTRY(arm940_coherent_kern_range) 138 - /* FALLTHROUGH */ 133 + SYM_TYPED_FUNC_START(arm940_coherent_kern_range) 134 + b arm940_flush_kern_dcache_area 135 + SYM_FUNC_END(arm940_coherent_kern_range) 139 136 140 137 /* 141 138 * coherent_user_range(start, end) ··· 148 143 * - start - virtual start address 149 144 * - end - virtual end address 150 145 */ 151 - ENTRY(arm940_coherent_user_range) 152 - /* FALLTHROUGH */ 146 + SYM_TYPED_FUNC_START(arm940_coherent_user_range) 147 + b arm940_flush_kern_dcache_area 148 + SYM_FUNC_END(arm940_coherent_user_range) 153 149 154 150 /* 155 151 * flush_kern_dcache_area(void *addr, size_t size) ··· 161 155 * - addr - kernel address 162 156 * - size - region size 163 157 */ 164 - ENTRY(arm940_flush_kern_dcache_area) 158 + SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area) 165 159 mov r0, #0 166 160 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 167 161 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 173 167 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 174 168 mcr p15, 0, r0, c7, c10, 4 @ drain WB 175 169 ret lr 170 + SYM_FUNC_END(arm940_flush_kern_dcache_area) 176 171 177 172 /* 178 173 * dma_inv_range(start, end) ··· 229 222 * - start - virtual start address 230 223 * - end - virtual end address 231 224 */ 232 - ENTRY(arm940_dma_flush_range) 225 + SYM_TYPED_FUNC_START(arm940_dma_flush_range) 233 226 mov ip, #0 234 227 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 235 228 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 245 238 bcs 1b @ segments 7 to 0 246 239 mcr p15, 0, ip, c7, c10, 4 @ drain WB 247 240 ret lr 241 + SYM_FUNC_END(arm940_dma_flush_range) 248 242 249 243 /* 250 244 * dma_map_area(start, size, dir) ··· 253 245 * - size - size of region 254 246 * - dir - DMA direction 255 247 */ 256 - ENTRY(arm940_dma_map_area) 248 + SYM_TYPED_FUNC_START(arm940_dma_map_area) 257 249 add r1, r1, r0 258 250 cmp r2, #DMA_TO_DEVICE 259 251 beq arm940_dma_clean_range 260 252 bcs arm940_dma_inv_range 261 253 b arm940_dma_flush_range 262 - ENDPROC(arm940_dma_map_area) 254 + SYM_FUNC_END(arm940_dma_map_area) 263 255 264 256 /* 265 257 * dma_unmap_area(start, size, dir) ··· 267 259 * - size - size of region 268 260 * - dir - DMA direction 269 261 */ 270 - ENTRY(arm940_dma_unmap_area) 262 + SYM_TYPED_FUNC_START(arm940_dma_unmap_area) 271 263 ret lr 272 - ENDPROC(arm940_dma_unmap_area) 264 + SYM_FUNC_END(arm940_dma_unmap_area) 273 265 274 266 .globl arm940_flush_kern_cache_louis 275 267 .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
+23 -15
arch/arm/mm/proc-arm946.S
··· 8 8 */ 9 9 #include <linux/linkage.h> 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <linux/pgtable.h> 12 13 #include <asm/assembler.h> 13 14 #include <asm/hwcap.h> ··· 79 78 * 80 79 * Unconditionally clean and invalidate the entire icache. 81 80 */ 82 - ENTRY(arm946_flush_icache_all) 81 + SYM_TYPED_FUNC_START(arm946_flush_icache_all) 83 82 mov r0, #0 84 83 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 85 84 ret lr 86 - ENDPROC(arm946_flush_icache_all) 85 + SYM_FUNC_END(arm946_flush_icache_all) 87 86 88 87 /* 89 88 * flush_user_cache_all() 90 89 */ 91 - ENTRY(arm946_flush_user_cache_all) 92 - /* FALLTHROUGH */ 90 + SYM_TYPED_FUNC_START(arm946_flush_user_cache_all) 91 + b arm946_flush_kern_cache_all 92 + SYM_FUNC_END(arm946_flush_user_cache_all) 93 93 94 94 /* 95 95 * flush_kern_cache_all() 96 96 * 97 97 * Clean and invalidate the entire cache. 98 98 */ 99 - ENTRY(arm946_flush_kern_cache_all) 99 + SYM_TYPED_FUNC_START(arm946_flush_kern_cache_all) 100 100 mov r2, #VM_EXEC 101 101 mov ip, #0 102 102 __flush_whole_cache: ··· 116 114 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 117 115 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 118 116 ret lr 117 + SYM_FUNC_END(arm946_flush_kern_cache_all) 119 118 120 119 /* 121 120 * flush_user_cache_range(start, end, flags) ··· 129 126 * - flags - vm_flags describing address space 130 127 * (same as arm926) 131 128 */ 132 - ENTRY(arm946_flush_user_cache_range) 129 + SYM_TYPED_FUNC_START(arm946_flush_user_cache_range) 133 130 mov ip, #0 134 131 sub r3, r1, r0 @ calculate total size 135 132 cmp r3, #CACHE_DLIMIT ··· 156 153 tst r2, #VM_EXEC 157 154 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 155 ret lr 156 + SYM_FUNC_END(arm946_flush_user_cache_range) 159 157 160 158 /* 161 159 * coherent_kern_range(start, end) ··· 168 164 * - start - virtual start address 169 165 * - end - virtual end address 170 166 */ 171 - ENTRY(arm946_coherent_kern_range) 172 - /* FALLTHROUGH */ 167 + SYM_TYPED_FUNC_START(arm946_coherent_kern_range) 168 + b arm946_coherent_user_range 169 + SYM_FUNC_END(arm946_coherent_kern_range) 173 170 174 171 /* 175 172 * coherent_user_range(start, end) ··· 183 178 * - end - virtual end address 184 179 * (same as arm926) 185 180 */ 186 - ENTRY(arm946_coherent_user_range) 181 + SYM_TYPED_FUNC_START(arm946_coherent_user_range) 187 182 bic r0, r0, #CACHE_DLINESIZE - 1 188 183 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 189 184 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 193 188 mcr p15, 0, r0, c7, c10, 4 @ drain WB 194 189 mov r0, #0 195 190 ret lr 191 + SYM_FUNC_END(arm946_coherent_user_range) 196 192 197 193 /* 198 194 * flush_kern_dcache_area(void *addr, size_t size) ··· 205 199 * - size - region size 206 200 * (same as arm926) 207 201 */ 208 - ENTRY(arm946_flush_kern_dcache_area) 202 + SYM_TYPED_FUNC_START(arm946_flush_kern_dcache_area) 209 203 add r1, r0, r1 210 204 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 211 205 add r0, r0, #CACHE_DLINESIZE ··· 215 209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 216 210 mcr p15, 0, r0, c7, c10, 4 @ drain WB 217 211 ret lr 212 + SYM_FUNC_END(arm946_flush_kern_dcache_area) 218 213 219 214 /* 220 215 * dma_inv_range(start, end) ··· 275 268 * 276 269 * (same as arm926) 277 270 */ 278 - ENTRY(arm946_dma_flush_range) 271 + SYM_TYPED_FUNC_START(arm946_dma_flush_range) 279 272 bic r0, r0, #CACHE_DLINESIZE - 1 280 273 1: 281 274 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 288 281 blo 1b 289 282 mcr p15, 0, r0, c7, c10, 4 @ drain WB 290 283 ret lr 284 + SYM_FUNC_END(arm946_dma_flush_range) 291 285 292 286 /* 293 287 * dma_map_area(start, size, dir) ··· 296 288 * - size - size of region 297 289 * - dir - DMA direction 298 290 */ 299 - ENTRY(arm946_dma_map_area) 291 + SYM_TYPED_FUNC_START(arm946_dma_map_area) 300 292 add r1, r1, r0 301 293 cmp r2, #DMA_TO_DEVICE 302 294 beq arm946_dma_clean_range 303 295 bcs arm946_dma_inv_range 304 296 b arm946_dma_flush_range 305 - ENDPROC(arm946_dma_map_area) 297 + SYM_FUNC_END(arm946_dma_map_area) 306 298 307 299 /* 308 300 * dma_unmap_area(start, size, dir) ··· 310 302 * - size - size of region 311 303 * - dir - DMA direction 312 304 */ 313 - ENTRY(arm946_dma_unmap_area) 305 + SYM_TYPED_FUNC_START(arm946_dma_unmap_area) 314 306 ret lr 315 - ENDPROC(arm946_dma_unmap_area) 307 + SYM_FUNC_END(arm946_dma_unmap_area) 316 308 317 309 .globl arm946_flush_kern_cache_louis 318 310 .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
+29 -19
arch/arm/mm/proc-feroceon.S
··· 8 8 9 9 #include <linux/linkage.h> 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <linux/pgtable.h> 12 13 #include <asm/assembler.h> 13 14 #include <asm/hwcap.h> ··· 123 122 * 124 123 * Unconditionally clean and invalidate the entire icache. 125 124 */ 126 - ENTRY(feroceon_flush_icache_all) 125 + SYM_TYPED_FUNC_START(feroceon_flush_icache_all) 127 126 mov r0, #0 128 127 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 129 128 ret lr 130 - ENDPROC(feroceon_flush_icache_all) 129 + SYM_FUNC_END(feroceon_flush_icache_all) 131 130 132 131 /* 133 132 * flush_user_cache_all() ··· 136 135 * address space. 137 136 */ 138 137 .align 5 139 - ENTRY(feroceon_flush_user_cache_all) 140 - /* FALLTHROUGH */ 138 + SYM_TYPED_FUNC_START(feroceon_flush_user_cache_all) 139 + b feroceon_flush_kern_cache_all 140 + SYM_FUNC_END(feroceon_flush_user_cache_all) 141 141 142 142 /* 143 143 * flush_kern_cache_all() 144 144 * 145 145 * Clean and invalidate the entire cache. 146 146 */ 147 - ENTRY(feroceon_flush_kern_cache_all) 147 + SYM_TYPED_FUNC_START(feroceon_flush_kern_cache_all) 148 148 mov r2, #VM_EXEC 149 149 150 150 __flush_whole_cache: ··· 163 161 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 164 162 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 165 163 ret lr 164 + SYM_FUNC_END(feroceon_flush_kern_cache_all) 166 165 167 166 /* 168 167 * flush_user_cache_range(start, end, flags) ··· 176 173 * - flags - vm_flags describing address space 177 174 */ 178 175 .align 5 179 - ENTRY(feroceon_flush_user_cache_range) 176 + SYM_TYPED_FUNC_START(feroceon_flush_user_cache_range) 180 177 sub r3, r1, r0 @ calculate total size 181 178 cmp r3, #CACHE_DLIMIT 182 179 bgt __flush_whole_cache ··· 193 190 mov ip, #0 194 191 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 195 192 ret lr 193 + SYM_FUNC_END(feroceon_flush_user_cache_range) 196 194 197 195 /* 198 196 * coherent_kern_range(start, end) ··· 206 202 * - end - virtual end address 207 203 */ 208 204 .align 5 209 - ENTRY(feroceon_coherent_kern_range) 210 - /* FALLTHROUGH */ 205 + SYM_TYPED_FUNC_START(feroceon_coherent_kern_range) 206 + b feroceon_coherent_user_range 207 + SYM_FUNC_END(feroceon_coherent_kern_range) 211 208 212 209 /* 213 210 * coherent_user_range(start, end) ··· 220 215 * - start - virtual start address 221 216 * - end - virtual end address 222 217 */ 223 - ENTRY(feroceon_coherent_user_range) 218 + SYM_TYPED_FUNC_START(feroceon_coherent_user_range) 224 219 bic r0, r0, #CACHE_DLINESIZE - 1 225 220 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 226 221 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 230 225 mcr p15, 0, r0, c7, c10, 4 @ drain WB 231 226 mov r0, #0 232 227 ret lr 228 + SYM_FUNC_END(feroceon_coherent_user_range) 233 229 234 230 /* 235 231 * flush_kern_dcache_area(void *addr, size_t size) ··· 242 236 * - size - region size 243 237 */ 244 238 .align 5 245 - ENTRY(feroceon_flush_kern_dcache_area) 239 + SYM_TYPED_FUNC_START(feroceon_flush_kern_dcache_area) 246 240 add r1, r0, r1 247 241 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 248 242 add r0, r0, #CACHE_DLINESIZE ··· 252 246 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 253 247 mcr p15, 0, r0, c7, c10, 4 @ drain WB 254 248 ret lr 249 + SYM_FUNC_END(feroceon_flush_kern_dcache_area) 255 250 256 251 .align 5 257 - ENTRY(feroceon_range_flush_kern_dcache_area) 252 + SYM_TYPED_FUNC_START(feroceon_range_flush_kern_dcache_area) 258 253 mrs r2, cpsr 259 254 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive 260 255 orr r3, r2, #PSR_I_BIT ··· 267 260 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 268 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 269 262 ret lr 263 + SYM_FUNC_END(feroceon_range_flush_kern_dcache_area) 270 264 271 265 /* 272 266 * dma_inv_range(start, end) ··· 354 346 * - end - virtual end address 355 347 */ 356 348 .align 5 357 - ENTRY(feroceon_dma_flush_range) 349 + SYM_TYPED_FUNC_START(feroceon_dma_flush_range) 358 350 bic r0, r0, #CACHE_DLINESIZE - 1 359 351 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 360 352 add r0, r0, #CACHE_DLINESIZE ··· 362 354 blo 1b 363 355 mcr p15, 0, r0, c7, c10, 4 @ drain WB 364 356 ret lr 357 + SYM_FUNC_END(feroceon_dma_flush_range) 365 358 366 359 .align 5 367 - ENTRY(feroceon_range_dma_flush_range) 360 + SYM_TYPED_FUNC_START(feroceon_range_dma_flush_range) 368 361 mrs r2, cpsr 369 362 cmp r1, r0 370 363 subne r1, r1, #1 @ top address is inclusive ··· 376 367 msr cpsr_c, r2 @ restore interrupts 377 368 mcr p15, 0, r0, c7, c10, 4 @ drain WB 378 369 ret lr 370 + SYM_FUNC_END(feroceon_range_dma_flush_range) 379 371 380 372 /* 381 373 * dma_map_area(start, size, dir) ··· 384 374 * - size - size of region 385 375 * - dir - DMA direction 386 376 */ 387 - ENTRY(feroceon_dma_map_area) 377 + SYM_TYPED_FUNC_START(feroceon_dma_map_area) 388 378 add r1, r1, r0 389 379 cmp r2, #DMA_TO_DEVICE 390 380 beq feroceon_dma_clean_range 391 381 bcs feroceon_dma_inv_range 392 382 b feroceon_dma_flush_range 393 - ENDPROC(feroceon_dma_map_area) 383 + SYM_FUNC_END(feroceon_dma_map_area) 394 384 395 385 /* 396 386 * dma_map_area(start, size, dir) ··· 398 388 * - size - size of region 399 389 * - dir - DMA direction 400 390 */ 401 - ENTRY(feroceon_range_dma_map_area) 391 + SYM_TYPED_FUNC_START(feroceon_range_dma_map_area) 402 392 add r1, r1, r0 403 393 cmp r2, #DMA_TO_DEVICE 404 394 beq feroceon_range_dma_clean_range 405 395 bcs feroceon_range_dma_inv_range 406 396 b feroceon_range_dma_flush_range 407 - ENDPROC(feroceon_range_dma_map_area) 397 + SYM_FUNC_END(feroceon_range_dma_map_area) 408 398 409 399 /* 410 400 * dma_unmap_area(start, size, dir) ··· 412 402 * - size - size of region 413 403 * - dir - DMA direction 414 404 */ 415 - ENTRY(feroceon_dma_unmap_area) 405 + SYM_TYPED_FUNC_START(feroceon_dma_unmap_area) 416 406 ret lr 417 - ENDPROC(feroceon_dma_unmap_area) 407 + SYM_FUNC_END(feroceon_dma_unmap_area) 418 408 419 409 .globl feroceon_flush_kern_cache_louis 420 410 .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
+23 -15
arch/arm/mm/proc-mohawk.S
··· 9 9 10 10 #include <linux/linkage.h> 11 11 #include <linux/init.h> 12 + #include <linux/cfi_types.h> 12 13 #include <linux/pgtable.h> 13 14 #include <asm/assembler.h> 14 15 #include <asm/hwcap.h> ··· 88 87 * 89 88 * Unconditionally clean and invalidate the entire icache. 90 89 */ 91 - ENTRY(mohawk_flush_icache_all) 90 + SYM_TYPED_FUNC_START(mohawk_flush_icache_all) 92 91 mov r0, #0 93 92 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 94 93 ret lr 95 - ENDPROC(mohawk_flush_icache_all) 94 + SYM_FUNC_END(mohawk_flush_icache_all) 96 95 97 96 /* 98 97 * flush_user_cache_all() ··· 100 99 * Clean and invalidate all cache entries in a particular 101 100 * address space. 102 101 */ 103 - ENTRY(mohawk_flush_user_cache_all) 104 - /* FALLTHROUGH */ 102 + SYM_TYPED_FUNC_START(mohawk_flush_user_cache_all) 103 + b mohawk_flush_kern_cache_all 104 + SYM_FUNC_END(mohawk_flush_user_cache_all) 105 105 106 106 /* 107 107 * flush_kern_cache_all() 108 108 * 109 109 * Clean and invalidate the entire cache. 110 110 */ 111 - ENTRY(mohawk_flush_kern_cache_all) 111 + SYM_TYPED_FUNC_START(mohawk_flush_kern_cache_all) 112 112 mov r2, #VM_EXEC 113 113 mov ip, #0 114 114 __flush_whole_cache: ··· 118 116 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 119 117 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 120 118 ret lr 119 + SYM_FUNC_END(mohawk_flush_kern_cache_all) 121 120 122 121 /* 123 122 * flush_user_cache_range(start, end, flags) ··· 132 129 * 133 130 * (same as arm926) 134 131 */ 135 - ENTRY(mohawk_flush_user_cache_range) 132 + SYM_TYPED_FUNC_START(mohawk_flush_user_cache_range) 136 133 mov ip, #0 137 134 sub r3, r1, r0 @ calculate total size 138 135 cmp r3, #CACHE_DLIMIT ··· 149 146 tst r2, #VM_EXEC 150 147 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 151 148 ret lr 149 + SYM_FUNC_END(mohawk_flush_user_cache_range) 152 150 153 151 /* 154 152 * coherent_kern_range(start, end) ··· 161 157 * - start - virtual start address 162 158 * - end - virtual end address 163 159 */ 164 - ENTRY(mohawk_coherent_kern_range) 165 - /* FALLTHROUGH */ 160 + SYM_TYPED_FUNC_START(mohawk_coherent_kern_range) 161 + b mohawk_coherent_user_range 162 + SYM_FUNC_END(mohawk_coherent_kern_range) 166 163 167 164 /* 168 165 * coherent_user_range(start, end) ··· 177 172 * 178 173 * (same as arm926) 179 174 */ 180 - ENTRY(mohawk_coherent_user_range) 175 + SYM_TYPED_FUNC_START(mohawk_coherent_user_range) 181 176 bic r0, r0, #CACHE_DLINESIZE - 1 182 177 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 183 178 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 187 182 mcr p15, 0, r0, c7, c10, 4 @ drain WB 188 183 mov r0, #0 189 184 ret lr 185 + SYM_FUNC_END(mohawk_coherent_user_range) 190 186 191 187 /* 192 188 * flush_kern_dcache_area(void *addr, size_t size) ··· 198 192 * - addr - kernel address 199 193 * - size - region size 200 194 */ 201 - ENTRY(mohawk_flush_kern_dcache_area) 195 + SYM_TYPED_FUNC_START(mohawk_flush_kern_dcache_area) 202 196 add r1, r0, r1 203 197 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 204 198 add r0, r0, #CACHE_DLINESIZE ··· 208 202 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 209 203 mcr p15, 0, r0, c7, c10, 4 @ drain WB 210 204 ret lr 205 + SYM_FUNC_END(mohawk_flush_kern_dcache_area) 211 206 212 207 /* 213 208 * dma_inv_range(start, end) ··· 263 256 * - start - virtual start address 264 257 * - end - virtual end address 265 258 */ 266 - ENTRY(mohawk_dma_flush_range) 259 + SYM_TYPED_FUNC_START(mohawk_dma_flush_range) 267 260 bic r0, r0, #CACHE_DLINESIZE - 1 268 261 1: 269 262 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry ··· 272 265 blo 1b 273 266 mcr p15, 0, r0, c7, c10, 4 @ drain WB 274 267 ret lr 268 + SYM_FUNC_END(mohawk_dma_flush_range) 275 269 276 270 /* 277 271 * dma_map_area(start, size, dir) ··· 280 272 * - size - size of region 281 273 * - dir - DMA direction 282 274 */ 283 - ENTRY(mohawk_dma_map_area) 275 + SYM_TYPED_FUNC_START(mohawk_dma_map_area) 284 276 add r1, r1, r0 285 277 cmp r2, #DMA_TO_DEVICE 286 278 beq mohawk_dma_clean_range 287 279 bcs mohawk_dma_inv_range 288 280 b mohawk_dma_flush_range 289 - ENDPROC(mohawk_dma_map_area) 281 + SYM_FUNC_END(mohawk_dma_map_area) 290 282 291 283 /* 292 284 * dma_unmap_area(start, size, dir) ··· 294 286 * - size - size of region 295 287 * - dir - DMA direction 296 288 */ 297 - ENTRY(mohawk_dma_unmap_area) 289 + SYM_TYPED_FUNC_START(mohawk_dma_unmap_area) 298 290 ret lr 299 - ENDPROC(mohawk_dma_unmap_area) 291 + SYM_FUNC_END(mohawk_dma_unmap_area) 300 292 301 293 .globl mohawk_flush_kern_cache_louis 302 294 .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
+24 -15
arch/arm/mm/proc-xsc3.S
··· 23 23 24 24 #include <linux/linkage.h> 25 25 #include <linux/init.h> 26 + #include <linux/cfi_types.h> 26 27 #include <linux/pgtable.h> 27 28 #include <asm/assembler.h> 28 29 #include <asm/hwcap.h> ··· 145 144 * 146 145 * Unconditionally clean and invalidate the entire icache. 147 146 */ 148 - ENTRY(xsc3_flush_icache_all) 147 + SYM_TYPED_FUNC_START(xsc3_flush_icache_all) 149 148 mov r0, #0 150 149 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 151 150 ret lr 152 - ENDPROC(xsc3_flush_icache_all) 151 + SYM_FUNC_END(xsc3_flush_icache_all) 153 152 154 153 /* 155 154 * flush_user_cache_all() ··· 157 156 * Invalidate all cache entries in a particular address 158 157 * space. 159 158 */ 160 - ENTRY(xsc3_flush_user_cache_all) 161 - /* FALLTHROUGH */ 159 + SYM_TYPED_FUNC_START(xsc3_flush_user_cache_all) 160 + b xsc3_flush_kern_cache_all 161 + SYM_FUNC_END(xsc3_flush_user_cache_all) 162 162 163 163 /* 164 164 * flush_kern_cache_all() 165 165 * 166 166 * Clean and invalidate the entire cache. 167 167 */ 168 - ENTRY(xsc3_flush_kern_cache_all) 168 + SYM_TYPED_FUNC_START(xsc3_flush_kern_cache_all) 169 169 mov r2, #VM_EXEC 170 170 mov ip, #0 171 171 __flush_whole_cache: ··· 176 174 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 177 175 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 178 176 ret lr 177 + SYM_FUNC_END(xsc3_flush_kern_cache_all) 179 178 180 179 /* 181 180 * flush_user_cache_range(start, end, vm_flags) ··· 189 186 * - vma - vma_area_struct describing address space 190 187 */ 191 188 .align 5 192 - ENTRY(xsc3_flush_user_cache_range) 189 + SYM_TYPED_FUNC_START(xsc3_flush_user_cache_range) 193 190 mov ip, #0 194 191 sub r3, r1, r0 @ calculate total size 195 192 cmp r3, #MAX_AREA_SIZE ··· 206 203 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 207 204 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 208 205 ret lr 206 + SYM_FUNC_END(xsc3_flush_user_cache_range) 209 207 210 208 /* 211 209 * coherent_kern_range(start, end) ··· 221 217 * Note: single I-cache line invalidation isn't used here since 222 218 * it also trashes the mini I-cache used by JTAG debuggers. 223 219 */ 224 - ENTRY(xsc3_coherent_kern_range) 225 - /* FALLTHROUGH */ 226 - ENTRY(xsc3_coherent_user_range) 220 + SYM_TYPED_FUNC_START(xsc3_coherent_kern_range) 221 + b xsc3_coherent_user_range 222 + SYM_FUNC_END(xsc3_coherent_kern_range) 223 + 224 + SYM_TYPED_FUNC_START(xsc3_coherent_user_range) 227 225 bic r0, r0, #CACHELINESIZE - 1 228 226 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 229 227 add r0, r0, #CACHELINESIZE ··· 236 230 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 237 231 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 238 232 ret lr 233 + SYM_FUNC_END(xsc3_coherent_user_range) 239 234 240 235 /* 241 236 * flush_kern_dcache_area(void *addr, size_t size) ··· 247 240 * - addr - kernel address 248 241 * - size - region size 249 242 */ 250 - ENTRY(xsc3_flush_kern_dcache_area) 243 + SYM_TYPED_FUNC_START(xsc3_flush_kern_dcache_area) 251 244 add r1, r0, r1 252 245 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 253 246 add r0, r0, #CACHELINESIZE ··· 258 251 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 259 252 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 260 253 ret lr 254 + SYM_FUNC_END(xsc3_flush_kern_dcache_area) 261 255 262 256 /* 263 257 * dma_inv_range(start, end) ··· 309 301 * - start - virtual start address 310 302 * - end - virtual end address 311 303 */ 312 - ENTRY(xsc3_dma_flush_range) 304 + SYM_TYPED_FUNC_START(xsc3_dma_flush_range) 313 305 bic r0, r0, #CACHELINESIZE - 1 314 306 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 315 307 add r0, r0, #CACHELINESIZE ··· 317 309 blo 1b 318 310 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 319 311 ret lr 312 + SYM_FUNC_END(xsc3_dma_flush_range) 320 313 321 314 /* 322 315 * dma_map_area(start, size, dir) ··· 325 316 * - size - size of region 326 317 * - dir - DMA direction 327 318 */ 328 - ENTRY(xsc3_dma_map_area) 319 + SYM_TYPED_FUNC_START(xsc3_dma_map_area) 329 320 add r1, r1, r0 330 321 cmp r2, #DMA_TO_DEVICE 331 322 beq xsc3_dma_clean_range 332 323 bcs xsc3_dma_inv_range 333 324 b xsc3_dma_flush_range 334 - ENDPROC(xsc3_dma_map_area) 325 + SYM_FUNC_END(xsc3_dma_map_area) 335 326 336 327 /* 337 328 * dma_unmap_area(start, size, dir) ··· 339 330 * - size - size of region 340 331 * - dir - DMA direction 341 332 */ 342 - ENTRY(xsc3_dma_unmap_area) 333 + SYM_TYPED_FUNC_START(xsc3_dma_unmap_area) 343 334 ret lr 344 - ENDPROC(xsc3_dma_unmap_area) 335 + SYM_FUNC_END(xsc3_dma_unmap_area) 345 336 346 337 .globl xsc3_flush_kern_cache_louis 347 338 .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
+24 -16
arch/arm/mm/proc-xscale.S
··· 19 19 20 20 #include <linux/linkage.h> 21 21 #include <linux/init.h> 22 + #include <linux/cfi_types.h> 22 23 #include <linux/pgtable.h> 23 24 #include <asm/assembler.h> 24 25 #include <asm/hwcap.h> ··· 187 186 * 188 187 * Unconditionally clean and invalidate the entire icache. 189 188 */ 190 - ENTRY(xscale_flush_icache_all) 189 + SYM_TYPED_FUNC_START(xscale_flush_icache_all) 191 190 mov r0, #0 192 191 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 193 192 ret lr 194 - ENDPROC(xscale_flush_icache_all) 193 + SYM_FUNC_END(xscale_flush_icache_all) 195 194 196 195 /* 197 196 * flush_user_cache_all() ··· 199 198 * Invalidate all cache entries in a particular address 200 199 * space. 201 200 */ 202 - ENTRY(xscale_flush_user_cache_all) 203 - /* FALLTHROUGH */ 201 + SYM_TYPED_FUNC_START(xscale_flush_user_cache_all) 202 + b xscale_flush_kern_cache_all 203 + SYM_FUNC_END(xscale_flush_user_cache_all) 204 204 205 205 /* 206 206 * flush_kern_cache_all() 207 207 * 208 208 * Clean and invalidate the entire cache. 209 209 */ 210 - ENTRY(xscale_flush_kern_cache_all) 210 + SYM_TYPED_FUNC_START(xscale_flush_kern_cache_all) 211 211 mov r2, #VM_EXEC 212 212 mov ip, #0 213 213 __flush_whole_cache: ··· 217 215 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 218 216 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 219 217 ret lr 218 + SYM_FUNC_END(xscale_flush_kern_cache_all) 220 219 221 220 /* 222 221 * flush_user_cache_range(start, end, vm_flags) ··· 230 227 * - vma - vma_area_struct describing address space 231 228 */ 232 229 .align 5 233 - ENTRY(xscale_flush_user_cache_range) 230 + SYM_TYPED_FUNC_START(xscale_flush_user_cache_range) 234 231 mov ip, #0 235 232 sub r3, r1, r0 @ calculate total size 236 233 cmp r3, #MAX_AREA_SIZE ··· 247 244 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 248 245 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 249 246 ret lr 247 + SYM_FUNC_END(xscale_flush_user_cache_range) 250 248 251 249 /* 252 250 * coherent_kern_range(start, end) ··· 262 258 * Note: single I-cache line invalidation isn't used here since 263 259 * it also trashes the mini I-cache used by JTAG debuggers. 264 260 */ 265 - ENTRY(xscale_coherent_kern_range) 261 + SYM_TYPED_FUNC_START(xscale_coherent_kern_range) 266 262 bic r0, r0, #CACHELINESIZE - 1 267 263 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 268 264 add r0, r0, #CACHELINESIZE ··· 272 268 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 273 269 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 274 270 ret lr 271 + SYM_FUNC_END(xscale_coherent_kern_range) 275 272 276 273 /* 277 274 * coherent_user_range(start, end) ··· 284 279 * - start - virtual start address 285 280 * - end - virtual end address 286 281 */ 287 - ENTRY(xscale_coherent_user_range) 282 + SYM_TYPED_FUNC_START(xscale_coherent_user_range) 288 283 bic r0, r0, #CACHELINESIZE - 1 289 284 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 290 285 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry ··· 295 290 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 296 291 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 297 292 ret lr 293 + SYM_FUNC_END(xscale_coherent_user_range) 298 294 299 295 /* 300 296 * flush_kern_dcache_area(void *addr, size_t size) ··· 306 300 * - addr - kernel address 307 301 * - size - region size 308 302 */ 309 - ENTRY(xscale_flush_kern_dcache_area) 303 + SYM_TYPED_FUNC_START(xscale_flush_kern_dcache_area) 310 304 add r1, r0, r1 311 305 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 312 306 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry ··· 317 311 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 318 312 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 319 313 ret lr 314 + SYM_FUNC_END(xscale_flush_kern_dcache_area) 320 315 321 316 /* 322 317 * dma_inv_range(start, end) ··· 368 361 * - start - virtual start address 369 362 * - end - virtual end address 370 363 */ 371 - ENTRY(xscale_dma_flush_range) 364 + SYM_TYPED_FUNC_START(xscale_dma_flush_range) 372 365 bic r0, r0, #CACHELINESIZE - 1 373 366 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 374 367 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry ··· 377 370 blo 1b 378 371 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 379 372 ret lr 373 + SYM_FUNC_END(xscale_dma_flush_range) 380 374 381 375 /* 382 376 * dma_map_area(start, size, dir) ··· 385 377 * - size - size of region 386 378 * - dir - DMA direction 387 379 */ 388 - ENTRY(xscale_dma_map_area) 380 + SYM_TYPED_FUNC_START(xscale_dma_map_area) 389 381 add r1, r1, r0 390 382 cmp r2, #DMA_TO_DEVICE 391 383 beq xscale_dma_clean_range 392 384 bcs xscale_dma_inv_range 393 385 b xscale_dma_flush_range 394 - ENDPROC(xscale_dma_map_area) 386 + SYM_FUNC_END(xscale_dma_map_area) 395 387 396 388 /* 397 389 * dma_map_area(start, size, dir) ··· 399 391 * - size - size of region 400 392 * - dir - DMA direction 401 393 */ 402 - ENTRY(xscale_80200_A0_A1_dma_map_area) 394 + SYM_TYPED_FUNC_START(xscale_80200_A0_A1_dma_map_area) 403 395 add r1, r1, r0 404 396 teq r2, #DMA_TO_DEVICE 405 397 beq xscale_dma_clean_range 406 398 b xscale_dma_flush_range 407 - ENDPROC(xscale_80200_A0_A1_dma_map_area) 399 + SYM_FUNC_END(xscale_80200_A0_A1_dma_map_area) 408 400 409 401 /* 410 402 * dma_unmap_area(start, size, dir) ··· 412 404 * - size - size of region 413 405 * - dir - DMA direction 414 406 */ 415 - ENTRY(xscale_dma_unmap_area) 407 + SYM_TYPED_FUNC_START(xscale_dma_unmap_area) 416 408 ret lr 417 - ENDPROC(xscale_dma_unmap_area) 409 + SYM_FUNC_END(xscale_dma_unmap_area) 418 410 419 411 .globl xscale_flush_kern_cache_louis 420 412 .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all