Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux

Pull ARM updates from Russell King:

- Updates to AMBA bus subsystem to drop .owner struct device_driver
initialisations, moving that to code instead.

- Add LPAE privileged-access-never support

- Add support for Clang CFI

- clkdev: report over-sized device or connection strings

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux: (36 commits)
ARM: 9398/1: Fix userspace enter on LPAE with CC_OPTIMIZE_FOR_SIZE=y
clkdev: report over-sized strings when creating clkdev entries
ARM: 9393/1: mm: Use conditionals for CFI branches
ARM: 9392/2: Support CLANG CFI
ARM: 9391/2: hw_breakpoint: Handle CFI breakpoints
ARM: 9390/2: lib: Annotate loop delay instructions for CFI
ARM: 9389/2: mm: Define prototypes for all per-processor calls
ARM: 9388/2: mm: Type-annotate all per-processor assembly routines
ARM: 9387/2: mm: Rewrite cacheflush vtables in CFI safe C
ARM: 9386/2: mm: Use symbol alias for cache functions
ARM: 9385/2: mm: Type-annotate all cache assembly routines
ARM: 9384/2: mm: Make tlbflush routines CFI safe
ARM: 9382/1: ftrace: Define ftrace_stub_graph
ARM: 9358/2: Implement PAN for LPAE by TTBR0 page table walks disablement
ARM: 9357/2: Reduce the number of #ifdef CONFIG_CPU_SW_DOMAIN_PAN
ARM: 9356/2: Move asm statements accessing TTBCR into C functions
ARM: 9355/2: Add TTBCR_* definitions to pgtable-3level-hwdef.h
ARM: 9379/1: coresight: tpda: drop owner assignment
ARM: 9378/1: coresight: etm4x: drop owner assignment
ARM: 9377/1: hwrng: nomadik: drop owner assignment
...

+2608 -1014
+20 -3
arch/arm/Kconfig
··· 35 35 select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX 36 36 select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 37 37 select ARCH_SUPPORTS_ATOMIC_RMW 38 + select ARCH_SUPPORTS_CFI_CLANG 38 39 select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE 39 40 select ARCH_SUPPORTS_PER_VMA_LOCK 40 41 select ARCH_USE_BUILTIN_BSWAP ··· 1234 1233 consumed by page tables. Setting this option will allow 1235 1234 user-space 2nd level page tables to reside in high memory. 1236 1235 1237 - config CPU_SW_DOMAIN_PAN 1238 - bool "Enable use of CPU domains to implement privileged no-access" 1239 - depends on MMU && !ARM_LPAE 1236 + config ARM_PAN 1237 + bool "Enable privileged no-access" 1238 + depends on MMU 1240 1239 default y 1241 1240 help 1242 1241 Increase kernel security by ensuring that normal kernel accesses ··· 1245 1244 by ensuring that magic values (such as LIST_POISON) will always 1246 1245 fault when dereferenced. 1247 1246 1247 + The implementation uses CPU domains when !CONFIG_ARM_LPAE and 1248 + disabling of TTBR0 page table walks with CONFIG_ARM_LPAE. 1249 + 1250 + config CPU_SW_DOMAIN_PAN 1251 + def_bool y 1252 + depends on ARM_PAN && !ARM_LPAE 1253 + help 1254 + Enable use of CPU domains to implement privileged no-access. 1255 + 1248 1256 CPUs with low-vector mappings use a best-efforts implementation. 1249 1257 Their lower 1MB needs to remain accessible for the vectors, but 1250 1258 the remainder of userspace will become appropriately inaccessible. 1259 + 1260 + config CPU_TTBR0_PAN 1261 + def_bool y 1262 + depends on ARM_PAN && ARM_LPAE 1263 + help 1264 + Enable privileged no-access by disabling TTBR0 page table walks when 1265 + running in kernel mode. 1251 1266 1252 1267 config HW_PERF_EVENTS 1253 1268 def_bool y
+1
arch/arm/include/asm/assembler.h
··· 21 21 #include <asm/opcodes-virt.h> 22 22 #include <asm/asm-offsets.h> 23 23 #include <asm/page.h> 24 + #include <asm/pgtable.h> 24 25 #include <asm/thread_info.h> 25 26 #include <asm/uaccess-asm.h> 26 27
+9 -19
arch/arm/include/asm/glue-cache.h
··· 118 118 # define MULTI_CACHE 1 119 119 #endif 120 120 121 + #ifdef CONFIG_CPU_CACHE_NOP 122 + # define MULTI_CACHE 1 123 + #endif 124 + 121 125 #if defined(CONFIG_CPU_V7M) 122 126 # define MULTI_CACHE 1 123 127 #endif ··· 130 126 #error Unknown cache maintenance model 131 127 #endif 132 128 133 - #ifndef __ASSEMBLER__ 134 - static inline void nop_flush_icache_all(void) { } 135 - static inline void nop_flush_kern_cache_all(void) { } 136 - static inline void nop_flush_kern_cache_louis(void) { } 137 - static inline void nop_flush_user_cache_all(void) { } 138 - static inline void nop_flush_user_cache_range(unsigned long a, 139 - unsigned long b, unsigned int c) { } 140 - 141 - static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } 142 - static inline int nop_coherent_user_range(unsigned long a, 143 - unsigned long b) { return 0; } 144 - static inline void nop_flush_kern_dcache_area(void *a, size_t s) { } 145 - 146 - static inline void nop_dma_flush_range(const void *a, const void *b) { } 147 - 148 - static inline void nop_dma_map_area(const void *s, size_t l, int f) { } 149 - static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } 150 - #endif 151 - 152 129 #ifndef MULTI_CACHE 153 130 #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) 154 131 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 132 + /* This function only has a dedicated assembly callback on the v7 cache */ 133 + #ifdef CONFIG_CPU_CACHE_V7 155 134 #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis) 135 + #else 136 + #define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_all) 137 + #endif 156 138 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) 157 139 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 158 140 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
+1
arch/arm/include/asm/hw_breakpoint.h
··· 84 84 #define ARM_DSCR_MOE(x) ((x >> 2) & 0xf) 85 85 #define ARM_ENTRY_BREAKPOINT 0x1 86 86 #define ARM_ENTRY_ASYNC_WATCHPOINT 0x2 87 + #define ARM_ENTRY_CFI_BREAKPOINT 0x3 87 88 #define ARM_ENTRY_SYNC_WATCHPOINT 0xa 88 89 89 90 /* DSCR monitor/halting bits. */
+26
arch/arm/include/asm/pgtable-3level-hwdef.h
··· 74 74 #define PHYS_MASK_SHIFT (40) 75 75 #define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) 76 76 77 + #ifndef CONFIG_CPU_TTBR0_PAN 77 78 /* 78 79 * TTBR0/TTBR1 split (PAGE_OFFSET): 79 80 * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) ··· 94 93 #endif 95 94 96 95 #define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16) 96 + #else 97 + /* 98 + * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess 99 + * disabled regions when TTBR0 is disabled. 100 + */ 101 + #define TTBR1_OFFSET 0 /* pointing to swapper_pg_dir */ 102 + #define TTBR1_SIZE 0 /* TTBR1 size controlled via TTBCR.T0SZ */ 103 + #endif 104 + 105 + /* 106 + * TTBCR register bits. 107 + */ 108 + #define TTBCR_EAE (1 << 31) 109 + #define TTBCR_IMP (1 << 30) 110 + #define TTBCR_SH1_MASK (3 << 28) 111 + #define TTBCR_ORGN1_MASK (3 << 26) 112 + #define TTBCR_IRGN1_MASK (3 << 24) 113 + #define TTBCR_EPD1 (1 << 23) 114 + #define TTBCR_A1 (1 << 22) 115 + #define TTBCR_T1SZ_MASK (7 << 16) 116 + #define TTBCR_SH0_MASK (3 << 12) 117 + #define TTBCR_ORGN0_MASK (3 << 10) 118 + #define TTBCR_IRGN0_MASK (3 << 8) 119 + #define TTBCR_EPD0 (1 << 7) 120 + #define TTBCR_T0SZ_MASK (7 << 0) 97 121 98 122 #endif
+12
arch/arm/include/asm/proc-fns.h
··· 178 178 }) 179 179 #endif 180 180 181 + static inline unsigned int cpu_get_ttbcr(void) 182 + { 183 + unsigned int ttbcr; 184 + asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr)); 185 + return ttbcr; 186 + } 187 + 188 + static inline void cpu_set_ttbcr(unsigned int ttbcr) 189 + { 190 + asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory"); 191 + } 192 + 181 193 #else /*!CONFIG_MMU */ 182 194 183 195 #define cpu_switch_mm(pgd,mm) { }
+1
arch/arm/include/asm/ptrace.h
··· 20 20 struct svc_pt_regs { 21 21 struct pt_regs regs; 22 22 u32 dacr; 23 + u32 ttbcr; 23 24 }; 24 25 25 26 #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
+54 -4
arch/arm/include/asm/uaccess-asm.h
··· 39 39 #endif 40 40 .endm 41 41 42 + #if defined(CONFIG_CPU_SW_DOMAIN_PAN) 43 + 42 44 .macro uaccess_disable, tmp, isb=1 43 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 44 45 /* 45 46 * Whenever we re-enter userspace, the domains should always be 46 47 * set appropriately. ··· 51 50 .if \isb 52 51 instr_sync 53 52 .endif 54 - #endif 55 53 .endm 56 54 57 55 .macro uaccess_enable, tmp, isb=1 58 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 59 56 /* 60 57 * Whenever we re-enter userspace, the domains should always be 61 58 * set appropriately. ··· 63 64 .if \isb 64 65 instr_sync 65 66 .endif 66 - #endif 67 67 .endm 68 + 69 + #elif defined(CONFIG_CPU_TTBR0_PAN) 70 + 71 + .macro uaccess_disable, tmp, isb=1 72 + /* 73 + * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID 74 + * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel 75 + * addresses by reducing TTBR0 range to 32MB (T0SZ = 7). 76 + */ 77 + mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR 78 + orr \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK 79 + orr \tmp, \tmp, #TTBCR_A1 80 + mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR 81 + .if \isb 82 + instr_sync 83 + .endif 84 + .endm 85 + 86 + .macro uaccess_enable, tmp, isb=1 87 + /* 88 + * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from 89 + * TTBR0 (A1 = 0). 90 + */ 91 + mrc p15, 0, \tmp, c2, c0, 2 @ read TTBCR 92 + bic \tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK 93 + bic \tmp, \tmp, #TTBCR_A1 94 + mcr p15, 0, \tmp, c2, c0, 2 @ write TTBCR 95 + .if \isb 96 + instr_sync 97 + .endif 98 + .endm 99 + 100 + #else 101 + 102 + .macro uaccess_disable, tmp, isb=1 103 + .endm 104 + 105 + .macro uaccess_enable, tmp, isb=1 106 + .endm 107 + 108 + #endif 68 109 69 110 #if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) 70 111 #define DACR(x...) x 71 112 #else 72 113 #define DACR(x...) 114 + #endif 115 + 116 + #ifdef CONFIG_CPU_TTBR0_PAN 117 + #define PAN(x...) x 118 + #else 119 + #define PAN(x...) 73 120 #endif 74 121 75 122 /* ··· 131 86 .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable 132 87 DACR( mrc p15, 0, \tmp0, c3, c0, 0) 133 88 DACR( str \tmp0, [sp, #SVC_DACR]) 89 + PAN( mrc p15, 0, \tmp0, c2, c0, 2) 90 + PAN( str \tmp0, [sp, #SVC_TTBCR]) 134 91 .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) 135 92 /* kernel=client, user=no access */ 136 93 mov \tmp2, #DACR_UACCESS_DISABLE ··· 151 104 .macro uaccess_exit, tsk, tmp0, tmp1 152 105 DACR( ldr \tmp0, [sp, #SVC_DACR]) 153 106 DACR( mcr p15, 0, \tmp0, c3, c0, 0) 107 + PAN( ldr \tmp0, [sp, #SVC_TTBCR]) 108 + PAN( mcr p15, 0, \tmp0, c2, c0, 2) 154 109 .endm 155 110 156 111 #undef DACR 112 + #undef PAN 157 113 158 114 #endif /* __ASM_UACCESS_ASM_H__ */
+39 -6
arch/arm/include/asm/uaccess.h
··· 14 14 #include <asm/domain.h> 15 15 #include <asm/unaligned.h> 16 16 #include <asm/unified.h> 17 + #include <asm/pgtable.h> 18 + #include <asm/proc-fns.h> 17 19 #include <asm/compiler.h> 18 20 19 21 #include <asm/extable.h> ··· 26 24 * perform such accesses (eg, via list poison values) which could then 27 25 * be exploited for priviledge escalation. 28 26 */ 27 + #if defined(CONFIG_CPU_SW_DOMAIN_PAN) 28 + 29 29 static __always_inline unsigned int uaccess_save_and_enable(void) 30 30 { 31 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 32 31 unsigned int old_domain = get_domain(); 33 32 34 33 /* Set the current domain access to permit user accesses */ ··· 37 34 domain_val(DOMAIN_USER, DOMAIN_CLIENT)); 38 35 39 36 return old_domain; 40 - #else 41 - return 0; 42 - #endif 43 37 } 44 38 45 39 static __always_inline void uaccess_restore(unsigned int flags) 46 40 { 47 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 48 41 /* Restore the user access mask */ 49 42 set_domain(flags); 50 - #endif 51 43 } 44 + 45 + #elif defined(CONFIG_CPU_TTBR0_PAN) 46 + 47 + static __always_inline unsigned int uaccess_save_and_enable(void) 48 + { 49 + unsigned int old_ttbcr = cpu_get_ttbcr(); 50 + 51 + /* 52 + * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from 53 + * TTBR0 (A1 = 0). 54 + */ 55 + cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK)); 56 + isb(); 57 + 58 + return old_ttbcr; 59 + } 60 + 61 + static inline void uaccess_restore(unsigned int flags) 62 + { 63 + cpu_set_ttbcr(flags); 64 + isb(); 65 + } 66 + 67 + #else 68 + 69 + static inline unsigned int uaccess_save_and_enable(void) 70 + { 71 + return 0; 72 + } 73 + 74 + static inline void uaccess_restore(unsigned int flags) 75 + { 76 + } 77 + 78 + #endif 52 79 53 80 /* 54 81 * These two are intentionally not defined anywhere - if the kernel
+1
arch/arm/kernel/asm-offsets.c
··· 85 85 DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); 86 86 DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); 87 87 DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); 88 + DEFINE(SVC_TTBCR, offsetof(struct svc_pt_regs, ttbcr)); 88 89 DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); 89 90 BLANK(); 90 91 DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
+4
arch/arm/kernel/entry-ftrace.S
··· 271 271 ret lr 272 272 ENDPROC(ftrace_stub) 273 273 274 + ENTRY(ftrace_stub_graph) 275 + ret lr 276 + ENDPROC(ftrace_stub_graph) 277 + 274 278 #ifdef CONFIG_DYNAMIC_FTRACE 275 279 276 280 __INIT
+35
arch/arm/kernel/hw_breakpoint.c
··· 17 17 #include <linux/perf_event.h> 18 18 #include <linux/hw_breakpoint.h> 19 19 #include <linux/smp.h> 20 + #include <linux/cfi.h> 20 21 #include <linux/cpu_pm.h> 21 22 #include <linux/coresight.h> 22 23 ··· 904 903 watchpoint_single_step_handler(addr); 905 904 } 906 905 906 + #ifdef CONFIG_CFI_CLANG 907 + static void hw_breakpoint_cfi_handler(struct pt_regs *regs) 908 + { 909 + /* 910 + * TODO: implementing target and type to pass to CFI using the more 911 + * elaborate report_cfi_failure() requires compiler work. To be able 912 + * to properly extract target information the compiler needs to 913 + * emit a stable instructions sequence for the CFI checks so we can 914 + * decode the instructions preceding the trap and figure out which 915 + * registers were used. 916 + */ 917 + 918 + switch (report_cfi_failure_noaddr(regs, instruction_pointer(regs))) { 919 + case BUG_TRAP_TYPE_BUG: 920 + die("Oops - CFI", regs, 0); 921 + break; 922 + case BUG_TRAP_TYPE_WARN: 923 + /* Skip the breaking instruction */ 924 + instruction_pointer(regs) += 4; 925 + break; 926 + default: 927 + die("Unknown CFI error", regs, 0); 928 + break; 929 + } 930 + } 931 + #else 932 + static void hw_breakpoint_cfi_handler(struct pt_regs *regs) 933 + { 934 + } 935 + #endif 936 + 907 937 /* 908 938 * Called from either the Data Abort Handler [watchpoint] or the 909 939 * Prefetch Abort Handler [breakpoint] with interrupts disabled. ··· 963 931 fallthrough; 964 932 case ARM_ENTRY_SYNC_WATCHPOINT: 965 933 watchpoint_handler(addr, fsr, regs); 934 + break; 935 + case ARM_ENTRY_CFI_BREAKPOINT: 936 + hw_breakpoint_cfi_handler(regs); 966 937 break; 967 938 default: 968 939 ret = 1; /* Unhandled fault. */
+8
arch/arm/kernel/suspend.c
··· 12 12 #include <asm/smp_plat.h> 13 13 #include <asm/suspend.h> 14 14 #include <asm/tlbflush.h> 15 + #include <asm/uaccess.h> 15 16 16 17 extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid); 17 18 extern void cpu_resume_mmu(void); ··· 26 25 27 26 if (!idmap_pgd) 28 27 return -EINVAL; 28 + 29 + /* 30 + * Needed for the MMU disabling/enabing code to be able to run from 31 + * TTBR0 addresses. 32 + */ 33 + if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN)) 34 + uaccess_save_and_enable(); 29 35 30 36 /* 31 37 * Function graph tracer state gets incosistent when the kernel
+19 -1
arch/arm/lib/csumpartialcopyuser.S
··· 13 13 14 14 .text 15 15 16 - #ifdef CONFIG_CPU_SW_DOMAIN_PAN 16 + #if defined(CONFIG_CPU_SW_DOMAIN_PAN) 17 + 17 18 .macro save_regs 18 19 mrc p15, 0, ip, c3, c0, 0 19 20 stmfd sp!, {r1, r2, r4 - r8, ip, lr} ··· 26 25 mcr p15, 0, ip, c3, c0, 0 27 26 ret lr 28 27 .endm 28 + 29 + #elif defined(CONFIG_CPU_TTBR0_PAN) 30 + 31 + .macro save_regs 32 + mrc p15, 0, ip, c2, c0, 2 @ read TTBCR 33 + stmfd sp!, {r1, r2, r4 - r8, ip, lr} 34 + uaccess_enable ip 35 + .endm 36 + 37 + .macro load_regs 38 + ldmfd sp!, {r1, r2, r4 - r8, ip, lr} 39 + mcr p15, 0, ip, c2, c0, 2 @ restore TTBCR 40 + ret lr 41 + .endm 42 + 29 43 #else 44 + 30 45 .macro save_regs 31 46 stmfd sp!, {r1, r2, r4 - r8, lr} 32 47 .endm ··· 50 33 .macro load_regs 51 34 ldmfd sp!, {r1, r2, r4 - r8, pc} 52 35 .endm 36 + 53 37 #endif 54 38 55 39 .macro load1b, reg1
+10 -6
arch/arm/lib/delay-loop.S
··· 5 5 * Copyright (C) 1995, 1996 Russell King 6 6 */ 7 7 #include <linux/linkage.h> 8 + #include <linux/cfi_types.h> 8 9 #include <asm/assembler.h> 9 10 #include <asm/delay.h> 10 11 ··· 25 24 * HZ <= 1000 26 25 */ 27 26 28 - ENTRY(__loop_udelay) 27 + SYM_TYPED_FUNC_START(__loop_udelay) 29 28 ldr r2, .LC1 30 29 mul r0, r2, r0 @ r0 = delay_us * UDELAY_MULT 31 - ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0 30 + b __loop_const_udelay 31 + SYM_FUNC_END(__loop_udelay) 32 + 33 + SYM_TYPED_FUNC_START(__loop_const_udelay) @ 0 <= r0 <= 0xfffffaf0 32 34 ldr r2, .LC0 33 35 ldr r2, [r2] 34 36 umull r1, r0, r2, r0 @ r0-r1 = r0 * loops_per_jiffy 35 37 adds r1, r1, #0xffffffff @ rounding up ... 36 38 adcs r0, r0, r0 @ and right shift by 31 37 39 reteq lr 40 + b __loop_delay 41 + SYM_FUNC_END(__loop_const_udelay) 38 42 39 43 .align 3 40 44 41 45 @ Delay routine 42 - ENTRY(__loop_delay) 46 + SYM_TYPED_FUNC_START(__loop_delay) 43 47 subs r0, r0, #1 44 48 #if 0 45 49 retls lr ··· 64 58 #endif 65 59 bhi __loop_delay 66 60 ret lr 67 - ENDPROC(__loop_udelay) 68 - ENDPROC(__loop_const_udelay) 69 - ENDPROC(__loop_delay) 61 + SYM_FUNC_END(__loop_delay)
+3
arch/arm/mm/Makefile
··· 45 45 obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o 46 46 obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o 47 47 obj-$(CONFIG_CPU_CACHE_V7M) += cache-v7m.o 48 + obj-y += cache.o 48 49 49 50 obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o 50 51 obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o ··· 63 62 obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o 64 63 obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o 65 64 obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o 65 + obj-y += tlb.o 66 66 67 67 obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o 68 68 obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o ··· 90 88 obj-$(CONFIG_CPU_V6K) += proc-v6.o 91 89 obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o 92 90 obj-$(CONFIG_CPU_V7M) += proc-v7m.o 91 + obj-$(CONFIG_CFI_CLANG) += proc.o 93 92 94 93 obj-$(CONFIG_OUTER_CACHE) += l2c-common.o 95 94 obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
+1
arch/arm/mm/cache-b15-rac.c
··· 5 5 * Copyright (C) 2015-2016 Broadcom 6 6 */ 7 7 8 + #include <linux/cfi_types.h> 8 9 #include <linux/err.h> 9 10 #include <linux/spinlock.h> 10 11 #include <linux/io.h>
+24 -23
arch/arm/mm/cache-fa.S
··· 12 12 */ 13 13 #include <linux/linkage.h> 14 14 #include <linux/init.h> 15 + #include <linux/cfi_types.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/page.h> 17 18 ··· 40 39 * 41 40 * Unconditionally clean and invalidate the entire icache. 42 41 */ 43 - ENTRY(fa_flush_icache_all) 42 + SYM_TYPED_FUNC_START(fa_flush_icache_all) 44 43 mov r0, #0 45 44 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 46 45 ret lr 47 - ENDPROC(fa_flush_icache_all) 46 + SYM_FUNC_END(fa_flush_icache_all) 48 47 49 48 /* 50 49 * flush_user_cache_all() ··· 52 51 * Clean and invalidate all cache entries in a particular address 53 52 * space. 54 53 */ 55 - ENTRY(fa_flush_user_cache_all) 56 - /* FALLTHROUGH */ 54 + SYM_FUNC_ALIAS(fa_flush_user_cache_all, fa_flush_kern_cache_all) 55 + 57 56 /* 58 57 * flush_kern_cache_all() 59 58 * 60 59 * Clean and invalidate the entire cache. 61 60 */ 62 - ENTRY(fa_flush_kern_cache_all) 61 + SYM_TYPED_FUNC_START(fa_flush_kern_cache_all) 63 62 mov ip, #0 64 63 mov r2, #VM_EXEC 65 64 __flush_whole_cache: ··· 70 69 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 71 70 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 72 71 ret lr 72 + SYM_FUNC_END(fa_flush_kern_cache_all) 73 73 74 74 /* 75 75 * flush_user_cache_range(start, end, flags) ··· 82 80 * - end - end address (exclusive, page aligned) 83 81 * - flags - vma_area_struct flags describing address space 84 82 */ 85 - ENTRY(fa_flush_user_cache_range) 83 + SYM_TYPED_FUNC_START(fa_flush_user_cache_range) 86 84 mov ip, #0 87 85 sub r3, r1, r0 @ calculate total size 88 86 cmp r3, #CACHE_DLIMIT @ total size >= limit? ··· 99 97 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 100 98 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 101 99 ret lr 100 + SYM_FUNC_END(fa_flush_user_cache_range) 102 101 103 102 /* 104 103 * coherent_kern_range(start, end) ··· 111 108 * - start - virtual start address 112 109 * - end - virtual end address 113 110 */ 114 - ENTRY(fa_coherent_kern_range) 115 - /* fall through */ 111 + SYM_TYPED_FUNC_START(fa_coherent_kern_range) 112 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 113 + b fa_coherent_user_range 114 + #endif 115 + SYM_FUNC_END(fa_coherent_kern_range) 116 116 117 117 /* 118 118 * coherent_user_range(start, end) ··· 127 121 * - start - virtual start address 128 122 * - end - virtual end address 129 123 */ 130 - ENTRY(fa_coherent_user_range) 124 + SYM_TYPED_FUNC_START(fa_coherent_user_range) 131 125 bic r0, r0, #CACHE_DLINESIZE - 1 132 126 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 133 127 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 139 133 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 140 134 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 141 135 ret lr 136 + SYM_FUNC_END(fa_coherent_user_range) 142 137 143 138 /* 144 139 * flush_kern_dcache_area(void *addr, size_t size) ··· 150 143 * - addr - kernel address 151 144 * - size - size of region 152 145 */ 153 - ENTRY(fa_flush_kern_dcache_area) 146 + SYM_TYPED_FUNC_START(fa_flush_kern_dcache_area) 154 147 add r1, r0, r1 155 148 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 156 149 add r0, r0, #CACHE_DLINESIZE ··· 160 153 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 161 154 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 162 155 ret lr 156 + SYM_FUNC_END(fa_flush_kern_dcache_area) 163 157 164 158 /* 165 159 * dma_inv_range(start, end) ··· 211 203 * - start - virtual start address of region 212 204 * - end - virtual end address of region 213 205 */ 214 - ENTRY(fa_dma_flush_range) 206 + SYM_TYPED_FUNC_START(fa_dma_flush_range) 215 207 bic r0, r0, #CACHE_DLINESIZE - 1 216 208 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry 217 209 add r0, r0, #CACHE_DLINESIZE ··· 220 212 mov r0, #0 221 213 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 222 214 ret lr 215 + SYM_FUNC_END(fa_dma_flush_range) 223 216 224 217 /* 225 218 * dma_map_area(start, size, dir) ··· 228 219 * - size - size of region 229 220 * - dir - DMA direction 230 221 */ 231 - ENTRY(fa_dma_map_area) 222 + SYM_TYPED_FUNC_START(fa_dma_map_area) 232 223 add r1, r1, r0 233 224 cmp r2, #DMA_TO_DEVICE 234 225 beq fa_dma_clean_range 235 226 bcs fa_dma_inv_range 236 227 b fa_dma_flush_range 237 - ENDPROC(fa_dma_map_area) 228 + SYM_FUNC_END(fa_dma_map_area) 238 229 239 230 /* 240 231 * dma_unmap_area(start, size, dir) ··· 242 233 * - size - size of region 243 234 * - dir - DMA direction 244 235 */ 245 - ENTRY(fa_dma_unmap_area) 236 + SYM_TYPED_FUNC_START(fa_dma_unmap_area) 246 237 ret lr 247 - ENDPROC(fa_dma_unmap_area) 248 - 249 - .globl fa_flush_kern_cache_louis 250 - .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all 251 - 252 - __INITDATA 253 - 254 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 255 - define_cache_functions fa 238 + SYM_FUNC_END(fa_dma_unmap_area)
+33 -28
arch/arm/mm/cache-nop.S
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 #include <linux/linkage.h> 3 3 #include <linux/init.h> 4 + #include <linux/cfi_types.h> 4 5 #include <asm/assembler.h> 5 6 6 7 #include "proc-macros.S" 7 8 8 - ENTRY(nop_flush_icache_all) 9 + /* 10 + * These are all open-coded instead of aliased, to make clear 11 + * what is going on here: all functions are stubbed out. 12 + */ 13 + SYM_TYPED_FUNC_START(nop_flush_icache_all) 9 14 ret lr 10 - ENDPROC(nop_flush_icache_all) 15 + SYM_FUNC_END(nop_flush_icache_all) 11 16 12 - .globl nop_flush_kern_cache_all 13 - .equ nop_flush_kern_cache_all, nop_flush_icache_all 17 + SYM_TYPED_FUNC_START(nop_flush_kern_cache_all) 18 + ret lr 19 + SYM_FUNC_END(nop_flush_kern_cache_all) 14 20 15 - .globl nop_flush_kern_cache_louis 16 - .equ nop_flush_kern_cache_louis, nop_flush_icache_all 21 + SYM_TYPED_FUNC_START(nop_flush_user_cache_all) 22 + ret lr 23 + SYM_FUNC_END(nop_flush_user_cache_all) 17 24 18 - .globl nop_flush_user_cache_all 19 - .equ nop_flush_user_cache_all, nop_flush_icache_all 25 + SYM_TYPED_FUNC_START(nop_flush_user_cache_range) 26 + ret lr 27 + SYM_FUNC_END(nop_flush_user_cache_range) 20 28 21 - .globl nop_flush_user_cache_range 22 - .equ nop_flush_user_cache_range, nop_flush_icache_all 29 + SYM_TYPED_FUNC_START(nop_coherent_kern_range) 30 + ret lr 31 + SYM_FUNC_END(nop_coherent_kern_range) 23 32 24 - .globl nop_coherent_kern_range 25 - .equ nop_coherent_kern_range, nop_flush_icache_all 26 - 27 - ENTRY(nop_coherent_user_range) 33 + SYM_TYPED_FUNC_START(nop_coherent_user_range) 28 34 mov r0, 0 29 35 ret lr 30 - ENDPROC(nop_coherent_user_range) 36 + SYM_FUNC_END(nop_coherent_user_range) 31 37 32 - .globl nop_flush_kern_dcache_area 33 - .equ nop_flush_kern_dcache_area, nop_flush_icache_all 38 + SYM_TYPED_FUNC_START(nop_flush_kern_dcache_area) 39 + ret lr 40 + SYM_FUNC_END(nop_flush_kern_dcache_area) 34 41 35 - .globl nop_dma_flush_range 36 - .equ nop_dma_flush_range, nop_flush_icache_all 42 + SYM_TYPED_FUNC_START(nop_dma_flush_range) 43 + ret lr 44 + SYM_FUNC_END(nop_dma_flush_range) 37 45 38 - .globl nop_dma_map_area 39 - .equ nop_dma_map_area, nop_flush_icache_all 46 + SYM_TYPED_FUNC_START(nop_dma_map_area) 47 + ret lr 48 + SYM_FUNC_END(nop_dma_map_area) 40 49 41 - .globl nop_dma_unmap_area 42 - .equ nop_dma_unmap_area, nop_flush_icache_all 43 - 44 - __INITDATA 45 - 46 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 47 - define_cache_functions nop 50 + SYM_TYPED_FUNC_START(nop_dma_unmap_area) 51 + ret lr 52 + SYM_FUNC_END(nop_dma_unmap_area)
+28 -27
arch/arm/mm/cache-v4.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <asm/assembler.h> 10 11 #include <asm/page.h> 11 12 #include "proc-macros.S" ··· 16 15 * 17 16 * Unconditionally clean and invalidate the entire icache. 18 17 */ 19 - ENTRY(v4_flush_icache_all) 18 + SYM_TYPED_FUNC_START(v4_flush_icache_all) 20 19 ret lr 21 - ENDPROC(v4_flush_icache_all) 20 + SYM_FUNC_END(v4_flush_icache_all) 22 21 23 22 /* 24 23 * flush_user_cache_all() ··· 28 27 * 29 28 * - mm - mm_struct describing address space 30 29 */ 31 - ENTRY(v4_flush_user_cache_all) 32 - /* FALLTHROUGH */ 30 + SYM_FUNC_ALIAS(v4_flush_user_cache_all, v4_flush_kern_cache_all) 31 + 33 32 /* 34 33 * flush_kern_cache_all() 35 34 * 36 35 * Clean and invalidate the entire cache. 37 36 */ 38 - ENTRY(v4_flush_kern_cache_all) 37 + SYM_TYPED_FUNC_START(v4_flush_kern_cache_all) 39 38 #ifdef CONFIG_CPU_CP15 40 39 mov r0, #0 41 40 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 42 41 ret lr 43 42 #else 44 - /* FALLTHROUGH */ 43 + ret lr 45 44 #endif 45 + SYM_FUNC_END(v4_flush_kern_cache_all) 46 46 47 47 /* 48 48 * flush_user_cache_range(start, end, flags) ··· 55 53 * - end - end address (exclusive, may not be aligned) 56 54 * - flags - vma_area_struct flags describing address space 57 55 */ 58 - ENTRY(v4_flush_user_cache_range) 56 + SYM_TYPED_FUNC_START(v4_flush_user_cache_range) 59 57 #ifdef CONFIG_CPU_CP15 60 58 mov ip, #0 61 59 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 62 60 ret lr 63 61 #else 64 - /* FALLTHROUGH */ 62 + ret lr 65 63 #endif 64 + SYM_FUNC_END(v4_flush_user_cache_range) 66 65 67 66 /* 68 67 * coherent_kern_range(start, end) ··· 75 72 * - start - virtual start address 76 73 * - end - virtual end address 77 74 */ 78 - ENTRY(v4_coherent_kern_range) 79 - /* FALLTHROUGH */ 75 + SYM_TYPED_FUNC_START(v4_coherent_kern_range) 76 + ret lr 77 + SYM_FUNC_END(v4_coherent_kern_range) 80 78 81 79 /* 82 80 * coherent_user_range(start, end) ··· 89 85 * - start - virtual start address 90 86 * - end - virtual end address 91 87 */ 92 - ENTRY(v4_coherent_user_range) 88 + SYM_TYPED_FUNC_START(v4_coherent_user_range) 93 89 mov r0, #0 94 90 ret lr 91 + SYM_FUNC_END(v4_coherent_user_range) 95 92 96 93 /* 97 94 * flush_kern_dcache_area(void *addr, size_t size) ··· 103 98 * - addr - kernel address 104 99 * - size - region size 105 100 */ 106 - ENTRY(v4_flush_kern_dcache_area) 107 - /* FALLTHROUGH */ 101 + SYM_TYPED_FUNC_START(v4_flush_kern_dcache_area) 102 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 103 + b v4_dma_flush_range 104 + #endif 105 + SYM_FUNC_END(v4_flush_kern_dcache_area) 108 106 109 107 /* 110 108 * dma_flush_range(start, end) ··· 117 109 * - start - virtual start address 118 110 * - end - virtual end address 119 111 */ 120 - ENTRY(v4_dma_flush_range) 112 + SYM_TYPED_FUNC_START(v4_dma_flush_range) 121 113 #ifdef CONFIG_CPU_CP15 122 114 mov r0, #0 123 115 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 124 116 #endif 125 117 ret lr 118 + SYM_FUNC_END(v4_dma_flush_range) 126 119 127 120 /* 128 121 * dma_unmap_area(start, size, dir) ··· 131 122 * - size - size of region 132 123 * - dir - DMA direction 133 124 */ 134 - ENTRY(v4_dma_unmap_area) 125 + SYM_TYPED_FUNC_START(v4_dma_unmap_area) 135 126 teq r2, #DMA_TO_DEVICE 136 127 bne v4_dma_flush_range 137 - /* FALLTHROUGH */ 128 + ret lr 129 + SYM_FUNC_END(v4_dma_unmap_area) 138 130 139 131 /* 140 132 * dma_map_area(start, size, dir) ··· 143 133 * - size - size of region 144 134 * - dir - DMA direction 145 135 */ 146 - ENTRY(v4_dma_map_area) 136 + SYM_TYPED_FUNC_START(v4_dma_map_area) 147 137 ret lr 148 - ENDPROC(v4_dma_unmap_area) 149 - ENDPROC(v4_dma_map_area) 150 - 151 - .globl v4_flush_kern_cache_louis 152 - .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all 153 - 154 - __INITDATA 155 - 156 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 157 - define_cache_functions v4 138 + SYM_FUNC_END(v4_dma_map_area)
+25 -24
arch/arm/mm/cache-v4wb.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <asm/assembler.h> 10 11 #include <asm/page.h> 11 12 #include "proc-macros.S" ··· 54 53 * 55 54 * Unconditionally clean and invalidate the entire icache. 56 55 */ 57 - ENTRY(v4wb_flush_icache_all) 56 + SYM_TYPED_FUNC_START(v4wb_flush_icache_all) 58 57 mov r0, #0 59 58 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 60 59 ret lr 61 - ENDPROC(v4wb_flush_icache_all) 60 + SYM_FUNC_END(v4wb_flush_icache_all) 62 61 63 62 /* 64 63 * flush_user_cache_all() ··· 66 65 * Clean and invalidate all cache entries in a particular address 67 66 * space. 68 67 */ 69 - ENTRY(v4wb_flush_user_cache_all) 70 - /* FALLTHROUGH */ 68 + SYM_FUNC_ALIAS(v4wb_flush_user_cache_all, v4wb_flush_kern_cache_all) 69 + 71 70 /* 72 71 * flush_kern_cache_all() 73 72 * 74 73 * Clean and invalidate the entire cache. 75 74 */ 76 - ENTRY(v4wb_flush_kern_cache_all) 75 + SYM_TYPED_FUNC_START(v4wb_flush_kern_cache_all) 77 76 mov ip, #0 78 77 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 79 78 __flush_whole_cache: ··· 94 93 #endif 95 94 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 96 95 ret lr 96 + SYM_FUNC_END(v4wb_flush_kern_cache_all) 97 97 98 98 /* 99 99 * flush_user_cache_range(start, end, flags) ··· 106 104 * - end - end address (exclusive, page aligned) 107 105 * - flags - vma_area_struct flags describing address space 108 106 */ 109 - ENTRY(v4wb_flush_user_cache_range) 107 + SYM_TYPED_FUNC_START(v4wb_flush_user_cache_range) 110 108 mov ip, #0 111 109 sub r3, r1, r0 @ calculate total size 112 110 tst r2, #VM_EXEC @ executable region? ··· 123 121 tst r2, #VM_EXEC 124 122 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 125 123 ret lr 124 + SYM_FUNC_END(v4wb_flush_user_cache_range) 126 125 127 126 /* 128 127 * flush_kern_dcache_area(void *addr, size_t size) ··· 134 131 * - addr - kernel address 135 132 * - size - region size 136 133 */ 137 - ENTRY(v4wb_flush_kern_dcache_area) 134 + SYM_TYPED_FUNC_START(v4wb_flush_kern_dcache_area) 138 135 add r1, r0, r1 139 - /* fall through */ 136 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 137 + b v4wb_coherent_user_range 138 + #endif 139 + SYM_FUNC_END(v4wb_flush_kern_dcache_area) 140 140 141 141 /* 142 142 * coherent_kern_range(start, end) ··· 151 145 * - start - virtual start address 152 146 * - end - virtual end address 153 147 */ 154 - ENTRY(v4wb_coherent_kern_range) 155 - /* fall through */ 148 + SYM_TYPED_FUNC_START(v4wb_coherent_kern_range) 149 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 150 + b v4wb_coherent_user_range 151 + #endif 152 + SYM_FUNC_END(v4wb_coherent_kern_range) 156 153 157 154 /* 158 155 * coherent_user_range(start, end) ··· 167 158 * - start - virtual start address 168 159 * - end - virtual end address 169 160 */ 170 - ENTRY(v4wb_coherent_user_range) 161 + SYM_TYPED_FUNC_START(v4wb_coherent_user_range) 171 162 bic r0, r0, #CACHE_DLINESIZE - 1 172 163 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 173 164 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry ··· 178 169 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 179 170 mcr p15, 0, r0, c7, c10, 4 @ drain WB 180 171 ret lr 181 - 172 + SYM_FUNC_END(v4wb_coherent_user_range) 182 173 183 174 /* 184 175 * dma_inv_range(start, end) ··· 240 231 * - size - size of region 241 232 * - dir - DMA direction 242 233 */ 243 - ENTRY(v4wb_dma_map_area) 234 + SYM_TYPED_FUNC_START(v4wb_dma_map_area) 244 235 add r1, r1, r0 245 236 cmp r2, #DMA_TO_DEVICE 246 237 beq v4wb_dma_clean_range 247 238 bcs v4wb_dma_inv_range 248 239 b v4wb_dma_flush_range 249 - ENDPROC(v4wb_dma_map_area) 240 + SYM_FUNC_END(v4wb_dma_map_area) 250 241 251 242 /* 252 243 * dma_unmap_area(start, size, dir) ··· 254 245 * - size - size of region 255 246 * - dir - DMA direction 256 247 */ 257 - ENTRY(v4wb_dma_unmap_area) 248 + SYM_TYPED_FUNC_START(v4wb_dma_unmap_area) 258 249 ret lr 259 - ENDPROC(v4wb_dma_unmap_area) 260 - 261 - .globl v4wb_flush_kern_cache_louis 262 - .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all 263 - 264 - __INITDATA 265 - 266 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 267 - define_cache_functions v4wb 250 + SYM_FUNC_END(v4wb_dma_unmap_area)
+28 -27
arch/arm/mm/cache-v4wt.S
··· 10 10 */ 11 11 #include <linux/linkage.h> 12 12 #include <linux/init.h> 13 + #include <linux/cfi_types.h> 13 14 #include <asm/assembler.h> 14 15 #include <asm/page.h> 15 16 #include "proc-macros.S" ··· 44 43 * 45 44 * Unconditionally clean and invalidate the entire icache. 46 45 */ 47 - ENTRY(v4wt_flush_icache_all) 46 + SYM_TYPED_FUNC_START(v4wt_flush_icache_all) 48 47 mov r0, #0 49 48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 50 49 ret lr 51 - ENDPROC(v4wt_flush_icache_all) 50 + SYM_FUNC_END(v4wt_flush_icache_all) 52 51 53 52 /* 54 53 * flush_user_cache_all() ··· 56 55 * Invalidate all cache entries in a particular address 57 56 * space. 58 57 */ 59 - ENTRY(v4wt_flush_user_cache_all) 60 - /* FALLTHROUGH */ 58 + SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all) 59 + 61 60 /* 62 61 * flush_kern_cache_all() 63 62 * 64 63 * Clean and invalidate the entire cache. 65 64 */ 66 - ENTRY(v4wt_flush_kern_cache_all) 65 + SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all) 67 66 mov r2, #VM_EXEC 68 67 mov ip, #0 69 68 __flush_whole_cache: ··· 71 70 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 72 71 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 73 72 ret lr 73 + SYM_FUNC_END(v4wt_flush_kern_cache_all) 74 74 75 75 /* 76 76 * flush_user_cache_range(start, end, flags) ··· 83 81 * - end - end address (exclusive, page aligned) 84 82 * - flags - vma_area_struct flags describing address space 85 83 */ 86 - ENTRY(v4wt_flush_user_cache_range) 84 + SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range) 87 85 sub r3, r1, r0 @ calculate total size 88 86 cmp r3, #CACHE_DLIMIT 89 87 bhs __flush_whole_cache ··· 95 93 cmp r0, r1 96 94 blo 1b 97 95 ret lr 96 + SYM_FUNC_END(v4wt_flush_user_cache_range) 98 97 99 98 /* 100 99 * coherent_kern_range(start, end) ··· 107 104 * - start - virtual start address 108 105 * - end - virtual end address 109 106 */ 110 - ENTRY(v4wt_coherent_kern_range) 111 - /* FALLTRHOUGH */ 107 + SYM_TYPED_FUNC_START(v4wt_coherent_kern_range) 108 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 109 + b v4wt_coherent_user_range 110 + #endif 111 + SYM_FUNC_END(v4wt_coherent_kern_range) 112 112 113 113 /* 114 114 * coherent_user_range(start, end) ··· 123 117 * - start - virtual start address 124 118 * - end - virtual end address 125 119 */ 126 - ENTRY(v4wt_coherent_user_range) 120 + SYM_TYPED_FUNC_START(v4wt_coherent_user_range) 127 121 bic r0, r0, #CACHE_DLINESIZE - 1 128 122 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 129 123 add r0, r0, #CACHE_DLINESIZE ··· 131 125 blo 1b 132 126 mov r0, #0 133 127 ret lr 128 + SYM_FUNC_END(v4wt_coherent_user_range) 134 129 135 130 /* 136 131 * flush_kern_dcache_area(void *addr, size_t size) ··· 142 135 * - addr - kernel address 143 136 * - size - region size 144 137 */ 145 - ENTRY(v4wt_flush_kern_dcache_area) 138 + SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area) 146 139 mov r2, #0 147 140 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache 148 141 add r1, r0, r1 149 - /* fallthrough */ 142 + b v4wt_dma_inv_range 143 + SYM_FUNC_END(v4wt_flush_kern_dcache_area) 150 144 151 145 /* 152 146 * dma_inv_range(start, end) ··· 175 167 * 176 168 * - start - virtual start address 177 169 * - end - virtual end address 178 - */ 179 - .globl v4wt_dma_flush_range 180 - .equ v4wt_dma_flush_range, v4wt_dma_inv_range 170 + */ 171 + SYM_TYPED_FUNC_START(v4wt_dma_flush_range) 172 + b v4wt_dma_inv_range 173 + SYM_FUNC_END(v4wt_dma_flush_range) 181 174 182 175 /* 183 176 * dma_unmap_area(start, size, dir) ··· 186 177 * - size - size of region 187 178 * - dir - DMA direction 188 179 */ 189 - ENTRY(v4wt_dma_unmap_area) 180 + SYM_TYPED_FUNC_START(v4wt_dma_unmap_area) 190 181 add r1, r1, r0 191 182 teq r2, #DMA_TO_DEVICE 192 183 bne v4wt_dma_inv_range 193 - /* FALLTHROUGH */ 184 + ret lr 185 + SYM_FUNC_END(v4wt_dma_unmap_area) 194 186 195 187 /* 196 188 * dma_map_area(start, size, dir) ··· 199 189 * - size - size of region 200 190 * - dir - DMA direction 201 191 */ 202 - ENTRY(v4wt_dma_map_area) 192 + SYM_TYPED_FUNC_START(v4wt_dma_map_area) 203 193 ret lr 204 - ENDPROC(v4wt_dma_unmap_area) 205 - ENDPROC(v4wt_dma_map_area) 206 - 207 - .globl v4wt_flush_kern_cache_louis 208 - .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all 209 - 210 - __INITDATA 211 - 212 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 213 - define_cache_functions v4wt 194 + SYM_FUNC_END(v4wt_dma_map_area)
+25 -26
arch/arm/mm/cache-v6.S
··· 8 8 */ 9 9 #include <linux/linkage.h> 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <asm/assembler.h> 12 13 #include <asm/errno.h> 13 14 #include <asm/unwind.h> ··· 35 34 * r0 - set to 0 36 35 * r1 - corrupted 37 36 */ 38 - ENTRY(v6_flush_icache_all) 37 + SYM_TYPED_FUNC_START(v6_flush_icache_all) 39 38 mov r0, #0 40 39 #ifdef CONFIG_ARM_ERRATA_411920 41 40 mrs r1, cpsr ··· 52 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 53 52 #endif 54 53 ret lr 55 - ENDPROC(v6_flush_icache_all) 54 + SYM_FUNC_END(v6_flush_icache_all) 56 55 57 56 /* 58 57 * v6_flush_cache_all() ··· 61 60 * 62 61 * It is assumed that: 63 62 */ 64 - ENTRY(v6_flush_kern_cache_all) 63 + SYM_TYPED_FUNC_START(v6_flush_kern_cache_all) 65 64 mov r0, #0 66 65 #ifdef HARVARD_CACHE 67 66 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate ··· 74 73 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 75 74 #endif 76 75 ret lr 76 + SYM_FUNC_END(v6_flush_kern_cache_all) 77 77 78 78 /* 79 79 * v6_flush_cache_all() ··· 83 81 * 84 82 * - mm - mm_struct describing address space 85 83 */ 86 - ENTRY(v6_flush_user_cache_all) 87 - /*FALLTHROUGH*/ 84 + SYM_TYPED_FUNC_START(v6_flush_user_cache_all) 85 + ret lr 86 + SYM_FUNC_END(v6_flush_user_cache_all) 88 87 89 88 /* 90 89 * v6_flush_cache_range(start, end, flags) ··· 99 96 * It is assumed that: 100 97 * - we have a VIPT cache. 101 98 */ 102 - ENTRY(v6_flush_user_cache_range) 99 + SYM_TYPED_FUNC_START(v6_flush_user_cache_range) 103 100 ret lr 101 + SYM_FUNC_END(v6_flush_user_cache_range) 104 102 105 103 /* 106 104 * v6_coherent_kern_range(start,end) ··· 116 112 * It is assumed that: 117 113 * - the Icache does not read data from the write buffer 118 114 */ 119 - ENTRY(v6_coherent_kern_range) 120 - /* FALLTHROUGH */ 115 + SYM_TYPED_FUNC_START(v6_coherent_kern_range) 116 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 117 + b v6_coherent_user_range 118 + #endif 119 + SYM_FUNC_END(v6_coherent_kern_range) 121 120 122 121 /* 123 122 * v6_coherent_user_range(start,end) ··· 135 128 * It is assumed that: 136 129 * - the Icache does not read data from the write buffer 137 130 */ 138 - ENTRY(v6_coherent_user_range) 131 + SYM_TYPED_FUNC_START(v6_coherent_user_range) 139 132 UNWIND(.fnstart ) 140 133 #ifdef HARVARD_CACHE 141 134 bic r0, r0, #CACHE_LINE_SIZE - 1 ··· 166 159 mov r0, #-EFAULT 167 160 ret lr 168 161 UNWIND(.fnend ) 169 - ENDPROC(v6_coherent_user_range) 170 - ENDPROC(v6_coherent_kern_range) 162 + SYM_FUNC_END(v6_coherent_user_range) 171 163 172 164 /* 173 165 * v6_flush_kern_dcache_area(void *addr, size_t size) ··· 177 171 * - addr - kernel address 178 172 * - size - region size 179 173 */ 180 - ENTRY(v6_flush_kern_dcache_area) 174 + SYM_TYPED_FUNC_START(v6_flush_kern_dcache_area) 181 175 add r1, r0, r1 182 176 bic r0, r0, #D_CACHE_LINE_SIZE - 1 183 177 1: ··· 194 188 mcr p15, 0, r0, c7, c10, 4 195 189 #endif 196 190 ret lr 197 - 191 + SYM_FUNC_END(v6_flush_kern_dcache_area) 198 192 199 193 /* 200 194 * v6_dma_inv_range(start,end) ··· 259 253 * - start - virtual start address of region 260 254 * - end - virtual end address of region 261 255 */ 262 - ENTRY(v6_dma_flush_range) 256 + SYM_TYPED_FUNC_START(v6_dma_flush_range) 263 257 bic r0, r0, #D_CACHE_LINE_SIZE - 1 264 258 1: 265 259 #ifdef HARVARD_CACHE ··· 273 267 mov r0, #0 274 268 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 275 269 ret lr 270 + SYM_FUNC_END(v6_dma_flush_range) 276 271 277 272 /* 278 273 * dma_map_area(start, size, dir) ··· 281 274 * - size - size of region 282 275 * - dir - DMA direction 283 276 */ 284 - ENTRY(v6_dma_map_area) 277 + SYM_TYPED_FUNC_START(v6_dma_map_area) 285 278 add r1, r1, r0 286 279 teq r2, #DMA_FROM_DEVICE 287 280 beq v6_dma_inv_range 288 281 b v6_dma_clean_range 289 - ENDPROC(v6_dma_map_area) 282 + SYM_FUNC_END(v6_dma_map_area) 290 283 291 284 /* 292 285 * dma_unmap_area(start, size, dir) ··· 294 287 * - size - size of region 295 288 * - dir - DMA direction 296 289 */ 297 - ENTRY(v6_dma_unmap_area) 290 + SYM_TYPED_FUNC_START(v6_dma_unmap_area) 298 291 add r1, r1, r0 299 292 teq r2, #DMA_TO_DEVICE 300 293 bne v6_dma_inv_range 301 294 ret lr 302 - ENDPROC(v6_dma_unmap_area) 303 - 304 - .globl v6_flush_kern_cache_louis 305 - .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all 306 - 307 - __INITDATA 308 - 309 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 310 - define_cache_functions v6 295 + SYM_FUNC_END(v6_dma_unmap_area)
+27 -49
arch/arm/mm/cache-v7.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <linux/init.h> 12 + #include <linux/cfi_types.h> 12 13 #include <asm/assembler.h> 13 14 #include <asm/errno.h> 14 15 #include <asm/unwind.h> ··· 81 80 * Registers: 82 81 * r0 - set to 0 83 82 */ 84 - ENTRY(v7_flush_icache_all) 83 + SYM_TYPED_FUNC_START(v7_flush_icache_all) 85 84 mov r0, #0 86 85 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 87 86 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 88 87 ret lr 89 - ENDPROC(v7_flush_icache_all) 88 + SYM_FUNC_END(v7_flush_icache_all) 90 89 91 90 /* 92 91 * v7_flush_dcache_louis() ··· 194 193 * unification in a single instruction. 195 194 * 196 195 */ 197 - ENTRY(v7_flush_kern_cache_all) 196 + SYM_TYPED_FUNC_START(v7_flush_kern_cache_all) 198 197 stmfd sp!, {r4-r6, r9-r10, lr} 199 198 bl v7_flush_dcache_all 200 199 mov r0, #0 ··· 202 201 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 203 202 ldmfd sp!, {r4-r6, r9-r10, lr} 204 203 ret lr 205 - ENDPROC(v7_flush_kern_cache_all) 204 + SYM_FUNC_END(v7_flush_kern_cache_all) 206 205 207 206 /* 208 207 * v7_flush_kern_cache_louis(void) ··· 210 209 * Flush the data cache up to Level of Unification Inner Shareable. 211 210 * Invalidate the I-cache to the point of unification. 212 211 */ 213 - ENTRY(v7_flush_kern_cache_louis) 212 + SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis) 214 213 stmfd sp!, {r4-r6, r9-r10, lr} 215 214 bl v7_flush_dcache_louis 216 215 mov r0, #0 ··· 218 217 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 219 218 ldmfd sp!, {r4-r6, r9-r10, lr} 220 219 ret lr 221 - ENDPROC(v7_flush_kern_cache_louis) 220 + SYM_FUNC_END(v7_flush_kern_cache_louis) 222 221 223 222 /* 224 223 * v7_flush_cache_all() ··· 227 226 * 228 227 * - mm - mm_struct describing address space 229 228 */ 230 - ENTRY(v7_flush_user_cache_all) 231 - /*FALLTHROUGH*/ 229 + SYM_TYPED_FUNC_START(v7_flush_user_cache_all) 230 + ret lr 231 + SYM_FUNC_END(v7_flush_user_cache_all) 232 232 233 233 /* 234 234 * v7_flush_cache_range(start, end, flags) ··· 243 241 * It is assumed that: 244 242 * - we have a VIPT cache. 245 243 */ 246 - ENTRY(v7_flush_user_cache_range) 244 + SYM_TYPED_FUNC_START(v7_flush_user_cache_range) 247 245 ret lr 248 - ENDPROC(v7_flush_user_cache_all) 249 - ENDPROC(v7_flush_user_cache_range) 246 + SYM_FUNC_END(v7_flush_user_cache_range) 250 247 251 248 /* 252 249 * v7_coherent_kern_range(start,end) ··· 260 259 * It is assumed that: 261 260 * - the Icache does not read data from the write buffer 262 261 */ 263 - ENTRY(v7_coherent_kern_range) 264 - /* FALLTHROUGH */ 262 + SYM_TYPED_FUNC_START(v7_coherent_kern_range) 263 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 264 + b v7_coherent_user_range 265 + #endif 266 + SYM_FUNC_END(v7_coherent_kern_range) 265 267 266 268 /* 267 269 * v7_coherent_user_range(start,end) ··· 279 275 * It is assumed that: 280 276 * - the Icache does not read data from the write buffer 281 277 */ 282 - ENTRY(v7_coherent_user_range) 278 + SYM_TYPED_FUNC_START(v7_coherent_user_range) 283 279 UNWIND(.fnstart ) 284 280 dcache_line_size r2, r3 285 281 sub r3, r2, #1 ··· 325 321 mov r0, #-EFAULT 326 322 ret lr 327 323 UNWIND(.fnend ) 328 - ENDPROC(v7_coherent_kern_range) 329 - ENDPROC(v7_coherent_user_range) 324 + SYM_FUNC_END(v7_coherent_user_range) 330 325 331 326 /* 332 327 * v7_flush_kern_dcache_area(void *addr, size_t size) ··· 336 333 * - addr - kernel address 337 334 * - size - region size 338 335 */ 339 - ENTRY(v7_flush_kern_dcache_area) 336 + SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area) 340 337 dcache_line_size r2, r3 341 338 add r1, r0, r1 342 339 sub r3, r2, #1 ··· 352 349 blo 1b 353 350 dsb st 354 351 ret lr 355 - ENDPROC(v7_flush_kern_dcache_area) 352 + SYM_FUNC_END(v7_flush_kern_dcache_area) 356 353 357 354 /* 358 355 * v7_dma_inv_range(start,end) ··· 416 413 * - start - virtual start address of region 417 414 * - end - virtual end address of region 418 415 */ 419 - ENTRY(v7_dma_flush_range) 416 + SYM_TYPED_FUNC_START(v7_dma_flush_range) 420 417 dcache_line_size r2, r3 421 418 sub r3, r2, #1 422 419 bic r0, r0, r3 ··· 431 428 blo 1b 432 429 dsb st 433 430 ret lr 434 - ENDPROC(v7_dma_flush_range) 431 + SYM_FUNC_END(v7_dma_flush_range) 435 432 436 433 /* 437 434 * dma_map_area(start, size, dir) ··· 439 436 * - size - size of region 440 437 * - dir - DMA direction 441 438 */ 442 - ENTRY(v7_dma_map_area) 439 + SYM_TYPED_FUNC_START(v7_dma_map_area) 443 440 add r1, r1, r0 444 441 teq r2, #DMA_FROM_DEVICE 445 442 beq v7_dma_inv_range 446 443 b v7_dma_clean_range 447 - ENDPROC(v7_dma_map_area) 444 + SYM_FUNC_END(v7_dma_map_area) 448 445 449 446 /* 450 447 * dma_unmap_area(start, size, dir) ··· 452 449 * - size - size of region 453 450 * - dir - DMA direction 454 451 */ 455 - ENTRY(v7_dma_unmap_area) 452 + SYM_TYPED_FUNC_START(v7_dma_unmap_area) 456 453 add r1, r1, r0 457 454 teq r2, #DMA_TO_DEVICE 458 455 bne v7_dma_inv_range 459 456 ret lr 460 - ENDPROC(v7_dma_unmap_area) 461 - 462 - __INITDATA 463 - 464 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 465 - define_cache_functions v7 466 - 467 - /* The Broadcom Brahma-B15 read-ahead cache requires some modifications 468 - * to the v7_cache_fns, we only override the ones we need 469 - */ 470 - #ifndef CONFIG_CACHE_B15_RAC 471 - globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all 472 - #endif 473 - globl_equ b15_flush_icache_all, v7_flush_icache_all 474 - globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis 475 - globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all 476 - globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range 477 - globl_equ b15_coherent_kern_range, v7_coherent_kern_range 478 - globl_equ b15_coherent_user_range, v7_coherent_user_range 479 - globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area 480 - 481 - globl_equ b15_dma_map_area, v7_dma_map_area 482 - globl_equ b15_dma_unmap_area, v7_dma_unmap_area 483 - globl_equ b15_dma_flush_range, v7_dma_flush_range 484 - 485 - define_cache_functions b15 457 + SYM_FUNC_END(v7_dma_unmap_area)
+25 -30
arch/arm/mm/cache-v7m.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <asm/assembler.h> 15 16 #include <asm/errno.h> 16 17 #include <asm/unwind.h> ··· 160 159 * Registers: 161 160 * r0 - set to 0 162 161 */ 163 - ENTRY(v7m_flush_icache_all) 162 + SYM_TYPED_FUNC_START(v7m_flush_icache_all) 164 163 invalidate_icache r0 165 164 ret lr 166 - ENDPROC(v7m_flush_icache_all) 165 + SYM_FUNC_END(v7m_flush_icache_all) 167 166 168 167 /* 169 168 * v7m_flush_dcache_all() ··· 237 236 * unification in a single instruction. 238 237 * 239 238 */ 240 - ENTRY(v7m_flush_kern_cache_all) 239 + SYM_TYPED_FUNC_START(v7m_flush_kern_cache_all) 241 240 stmfd sp!, {r4-r7, r9-r11, lr} 242 241 bl v7m_flush_dcache_all 243 242 invalidate_icache r0 244 243 ldmfd sp!, {r4-r7, r9-r11, lr} 245 244 ret lr 246 - ENDPROC(v7m_flush_kern_cache_all) 245 + SYM_FUNC_END(v7m_flush_kern_cache_all) 247 246 248 247 /* 249 248 * v7m_flush_cache_all() ··· 252 251 * 253 252 * - mm - mm_struct describing address space 254 253 */ 255 - ENTRY(v7m_flush_user_cache_all) 256 - /*FALLTHROUGH*/ 254 + SYM_TYPED_FUNC_START(v7m_flush_user_cache_all) 255 + ret lr 256 + SYM_FUNC_END(v7m_flush_user_cache_all) 257 257 258 258 /* 259 259 * v7m_flush_cache_range(start, end, flags) ··· 268 266 * It is assumed that: 269 267 * - we have a VIPT cache. 270 268 */ 271 - ENTRY(v7m_flush_user_cache_range) 269 + SYM_TYPED_FUNC_START(v7m_flush_user_cache_range) 272 270 ret lr 273 - ENDPROC(v7m_flush_user_cache_all) 274 - ENDPROC(v7m_flush_user_cache_range) 271 + SYM_FUNC_END(v7m_flush_user_cache_range) 275 272 276 273 /* 277 274 * v7m_coherent_kern_range(start,end) ··· 285 284 * It is assumed that: 286 285 * - the Icache does not read data from the write buffer 287 286 */ 288 - ENTRY(v7m_coherent_kern_range) 289 - /* FALLTHROUGH */ 287 + SYM_TYPED_FUNC_START(v7m_coherent_kern_range) 288 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 289 + b v7m_coherent_user_range 290 + #endif 291 + SYM_FUNC_END(v7m_coherent_kern_range) 290 292 291 293 /* 292 294 * v7m_coherent_user_range(start,end) ··· 304 300 * It is assumed that: 305 301 * - the Icache does not read data from the write buffer 306 302 */ 307 - ENTRY(v7m_coherent_user_range) 303 + SYM_TYPED_FUNC_START(v7m_coherent_user_range) 308 304 UNWIND(.fnstart ) 309 305 dcache_line_size r2, r3 310 306 sub r3, r2, #1 ··· 332 328 isb 333 329 ret lr 334 330 UNWIND(.fnend ) 335 - ENDPROC(v7m_coherent_kern_range) 336 - ENDPROC(v7m_coherent_user_range) 331 + SYM_FUNC_END(v7m_coherent_user_range) 337 332 338 333 /* 339 334 * v7m_flush_kern_dcache_area(void *addr, size_t size) ··· 343 340 * - addr - kernel address 344 341 * - size - region size 345 342 */ 346 - ENTRY(v7m_flush_kern_dcache_area) 343 + SYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area) 347 344 dcache_line_size r2, r3 348 345 add r1, r0, r1 349 346 sub r3, r2, #1 ··· 355 352 blo 1b 356 353 dsb st 357 354 ret lr 358 - ENDPROC(v7m_flush_kern_dcache_area) 355 + SYM_FUNC_END(v7m_flush_kern_dcache_area) 359 356 360 357 /* 361 358 * v7m_dma_inv_range(start,end) ··· 411 408 * - start - virtual start address of region 412 409 * - end - virtual end address of region 413 410 */ 414 - ENTRY(v7m_dma_flush_range) 411 + SYM_TYPED_FUNC_START(v7m_dma_flush_range) 415 412 dcache_line_size r2, r3 416 413 sub r3, r2, #1 417 414 bic r0, r0, r3 ··· 422 419 blo 1b 423 420 dsb st 424 421 ret lr 425 - ENDPROC(v7m_dma_flush_range) 422 + SYM_FUNC_END(v7m_dma_flush_range) 426 423 427 424 /* 428 425 * dma_map_area(start, size, dir) ··· 430 427 * - size - size of region 431 428 * - dir - DMA direction 432 429 */ 433 - ENTRY(v7m_dma_map_area) 430 + SYM_TYPED_FUNC_START(v7m_dma_map_area) 434 431 add r1, r1, r0 435 432 teq r2, #DMA_FROM_DEVICE 436 433 beq v7m_dma_inv_range 437 434 b v7m_dma_clean_range 438 - ENDPROC(v7m_dma_map_area) 435 + SYM_FUNC_END(v7m_dma_map_area) 439 436 440 437 /* 441 438 * dma_unmap_area(start, size, dir) ··· 443 440 * - size - size of region 444 441 * - dir - DMA direction 445 442 */ 446 - ENTRY(v7m_dma_unmap_area) 443 + SYM_TYPED_FUNC_START(v7m_dma_unmap_area) 447 444 add r1, r1, r0 448 445 teq r2, #DMA_TO_DEVICE 449 446 bne v7m_dma_inv_range 450 447 ret lr 451 - ENDPROC(v7m_dma_unmap_area) 452 - 453 - .globl v7m_flush_kern_cache_louis 454 - .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all 455 - 456 - __INITDATA 457 - 458 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 459 - define_cache_functions v7m 448 + SYM_FUNC_END(v7m_dma_unmap_area)
+663
arch/arm/mm/cache.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * This file defines C prototypes for the low-level cache assembly functions 4 + * and populates a vtable for each selected ARM CPU cache type. 5 + */ 6 + 7 + #include <linux/types.h> 8 + #include <asm/cacheflush.h> 9 + 10 + #ifdef CONFIG_CPU_CACHE_V4 11 + void v4_flush_icache_all(void); 12 + void v4_flush_kern_cache_all(void); 13 + void v4_flush_user_cache_all(void); 14 + void v4_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 15 + void v4_coherent_kern_range(unsigned long, unsigned long); 16 + int v4_coherent_user_range(unsigned long, unsigned long); 17 + void v4_flush_kern_dcache_area(void *, size_t); 18 + void v4_dma_map_area(const void *, size_t, int); 19 + void v4_dma_unmap_area(const void *, size_t, int); 20 + void v4_dma_flush_range(const void *, const void *); 21 + 22 + struct cpu_cache_fns v4_cache_fns __initconst = { 23 + .flush_icache_all = v4_flush_icache_all, 24 + .flush_kern_all = v4_flush_kern_cache_all, 25 + .flush_kern_louis = v4_flush_kern_cache_all, 26 + .flush_user_all = v4_flush_user_cache_all, 27 + .flush_user_range = v4_flush_user_cache_range, 28 + .coherent_kern_range = v4_coherent_kern_range, 29 + .coherent_user_range = v4_coherent_user_range, 30 + .flush_kern_dcache_area = v4_flush_kern_dcache_area, 31 + .dma_map_area = v4_dma_map_area, 32 + .dma_unmap_area = v4_dma_unmap_area, 33 + .dma_flush_range = v4_dma_flush_range, 34 + }; 35 + #endif 36 + 37 + /* V4 write-back cache "V4WB" */ 38 + #ifdef CONFIG_CPU_CACHE_V4WB 39 + void v4wb_flush_icache_all(void); 40 + void v4wb_flush_kern_cache_all(void); 41 + void v4wb_flush_user_cache_all(void); 42 + void v4wb_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 43 + void v4wb_coherent_kern_range(unsigned long, unsigned long); 44 + int v4wb_coherent_user_range(unsigned long, unsigned long); 45 + void v4wb_flush_kern_dcache_area(void *, size_t); 46 + void v4wb_dma_map_area(const void *, size_t, int); 47 + void v4wb_dma_unmap_area(const void *, size_t, int); 48 + void v4wb_dma_flush_range(const void *, const void *); 49 + 50 + struct cpu_cache_fns v4wb_cache_fns __initconst = { 51 + .flush_icache_all = v4wb_flush_icache_all, 52 + .flush_kern_all = v4wb_flush_kern_cache_all, 53 + .flush_kern_louis = v4wb_flush_kern_cache_all, 54 + .flush_user_all = v4wb_flush_user_cache_all, 55 + .flush_user_range = v4wb_flush_user_cache_range, 56 + .coherent_kern_range = v4wb_coherent_kern_range, 57 + .coherent_user_range = v4wb_coherent_user_range, 58 + .flush_kern_dcache_area = v4wb_flush_kern_dcache_area, 59 + .dma_map_area = v4wb_dma_map_area, 60 + .dma_unmap_area = v4wb_dma_unmap_area, 61 + .dma_flush_range = v4wb_dma_flush_range, 62 + }; 63 + #endif 64 + 65 + /* V4 write-through cache "V4WT" */ 66 + #ifdef CONFIG_CPU_CACHE_V4WT 67 + void v4wt_flush_icache_all(void); 68 + void v4wt_flush_kern_cache_all(void); 69 + void v4wt_flush_user_cache_all(void); 70 + void v4wt_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 71 + void v4wt_coherent_kern_range(unsigned long, unsigned long); 72 + int v4wt_coherent_user_range(unsigned long, unsigned long); 73 + void v4wt_flush_kern_dcache_area(void *, size_t); 74 + void v4wt_dma_map_area(const void *, size_t, int); 75 + void v4wt_dma_unmap_area(const void *, size_t, int); 76 + void v4wt_dma_flush_range(const void *, const void *); 77 + 78 + struct cpu_cache_fns v4wt_cache_fns __initconst = { 79 + .flush_icache_all = v4wt_flush_icache_all, 80 + .flush_kern_all = v4wt_flush_kern_cache_all, 81 + .flush_kern_louis = v4wt_flush_kern_cache_all, 82 + .flush_user_all = v4wt_flush_user_cache_all, 83 + .flush_user_range = v4wt_flush_user_cache_range, 84 + .coherent_kern_range = v4wt_coherent_kern_range, 85 + .coherent_user_range = v4wt_coherent_user_range, 86 + .flush_kern_dcache_area = v4wt_flush_kern_dcache_area, 87 + .dma_map_area = v4wt_dma_map_area, 88 + .dma_unmap_area = v4wt_dma_unmap_area, 89 + .dma_flush_range = v4wt_dma_flush_range, 90 + }; 91 + #endif 92 + 93 + /* Faraday FA526 cache */ 94 + #ifdef CONFIG_CPU_CACHE_FA 95 + void fa_flush_icache_all(void); 96 + void fa_flush_kern_cache_all(void); 97 + void fa_flush_user_cache_all(void); 98 + void fa_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 99 + void fa_coherent_kern_range(unsigned long, unsigned long); 100 + int fa_coherent_user_range(unsigned long, unsigned long); 101 + void fa_flush_kern_dcache_area(void *, size_t); 102 + void fa_dma_map_area(const void *, size_t, int); 103 + void fa_dma_unmap_area(const void *, size_t, int); 104 + void fa_dma_flush_range(const void *, const void *); 105 + 106 + struct cpu_cache_fns fa_cache_fns __initconst = { 107 + .flush_icache_all = fa_flush_icache_all, 108 + .flush_kern_all = fa_flush_kern_cache_all, 109 + .flush_kern_louis = fa_flush_kern_cache_all, 110 + .flush_user_all = fa_flush_user_cache_all, 111 + .flush_user_range = fa_flush_user_cache_range, 112 + .coherent_kern_range = fa_coherent_kern_range, 113 + .coherent_user_range = fa_coherent_user_range, 114 + .flush_kern_dcache_area = fa_flush_kern_dcache_area, 115 + .dma_map_area = fa_dma_map_area, 116 + .dma_unmap_area = fa_dma_unmap_area, 117 + .dma_flush_range = fa_dma_flush_range, 118 + }; 119 + #endif 120 + 121 + #ifdef CONFIG_CPU_CACHE_V6 122 + void v6_flush_icache_all(void); 123 + void v6_flush_kern_cache_all(void); 124 + void v6_flush_user_cache_all(void); 125 + void v6_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 126 + void v6_coherent_kern_range(unsigned long, unsigned long); 127 + int v6_coherent_user_range(unsigned long, unsigned long); 128 + void v6_flush_kern_dcache_area(void *, size_t); 129 + void v6_dma_map_area(const void *, size_t, int); 130 + void v6_dma_unmap_area(const void *, size_t, int); 131 + void v6_dma_flush_range(const void *, const void *); 132 + 133 + struct cpu_cache_fns v6_cache_fns __initconst = { 134 + .flush_icache_all = v6_flush_icache_all, 135 + .flush_kern_all = v6_flush_kern_cache_all, 136 + .flush_kern_louis = v6_flush_kern_cache_all, 137 + .flush_user_all = v6_flush_user_cache_all, 138 + .flush_user_range = v6_flush_user_cache_range, 139 + .coherent_kern_range = v6_coherent_kern_range, 140 + .coherent_user_range = v6_coherent_user_range, 141 + .flush_kern_dcache_area = v6_flush_kern_dcache_area, 142 + .dma_map_area = v6_dma_map_area, 143 + .dma_unmap_area = v6_dma_unmap_area, 144 + .dma_flush_range = v6_dma_flush_range, 145 + }; 146 + #endif 147 + 148 + #ifdef CONFIG_CPU_CACHE_V7 149 + void v7_flush_icache_all(void); 150 + void v7_flush_kern_cache_all(void); 151 + void v7_flush_kern_cache_louis(void); 152 + void v7_flush_user_cache_all(void); 153 + void v7_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 154 + void v7_coherent_kern_range(unsigned long, unsigned long); 155 + int v7_coherent_user_range(unsigned long, unsigned long); 156 + void v7_flush_kern_dcache_area(void *, size_t); 157 + void v7_dma_map_area(const void *, size_t, int); 158 + void v7_dma_unmap_area(const void *, size_t, int); 159 + void v7_dma_flush_range(const void *, const void *); 160 + 161 + struct cpu_cache_fns v7_cache_fns __initconst = { 162 + .flush_icache_all = v7_flush_icache_all, 163 + .flush_kern_all = v7_flush_kern_cache_all, 164 + .flush_kern_louis = v7_flush_kern_cache_louis, 165 + .flush_user_all = v7_flush_user_cache_all, 166 + .flush_user_range = v7_flush_user_cache_range, 167 + .coherent_kern_range = v7_coherent_kern_range, 168 + .coherent_user_range = v7_coherent_user_range, 169 + .flush_kern_dcache_area = v7_flush_kern_dcache_area, 170 + .dma_map_area = v7_dma_map_area, 171 + .dma_unmap_area = v7_dma_unmap_area, 172 + .dma_flush_range = v7_dma_flush_range, 173 + }; 174 + 175 + /* Special quirky cache flush function for Broadcom B15 v7 caches */ 176 + void b15_flush_kern_cache_all(void); 177 + 178 + struct cpu_cache_fns b15_cache_fns __initconst = { 179 + .flush_icache_all = v7_flush_icache_all, 180 + #ifdef CONFIG_CACHE_B15_RAC 181 + .flush_kern_all = b15_flush_kern_cache_all, 182 + #else 183 + .flush_kern_all = v7_flush_kern_cache_all, 184 + #endif 185 + .flush_kern_louis = v7_flush_kern_cache_louis, 186 + .flush_user_all = v7_flush_user_cache_all, 187 + .flush_user_range = v7_flush_user_cache_range, 188 + .coherent_kern_range = v7_coherent_kern_range, 189 + .coherent_user_range = v7_coherent_user_range, 190 + .flush_kern_dcache_area = v7_flush_kern_dcache_area, 191 + .dma_map_area = v7_dma_map_area, 192 + .dma_unmap_area = v7_dma_unmap_area, 193 + .dma_flush_range = v7_dma_flush_range, 194 + }; 195 + #endif 196 + 197 + /* The NOP cache is just a set of dummy stubs that by definition does nothing */ 198 + #ifdef CONFIG_CPU_CACHE_NOP 199 + void nop_flush_icache_all(void); 200 + void nop_flush_kern_cache_all(void); 201 + void nop_flush_user_cache_all(void); 202 + void nop_flush_user_cache_range(unsigned long start, unsigned long end, unsigned int flags); 203 + void nop_coherent_kern_range(unsigned long start, unsigned long end); 204 + int nop_coherent_user_range(unsigned long, unsigned long); 205 + void nop_flush_kern_dcache_area(void *kaddr, size_t size); 206 + void nop_dma_map_area(const void *start, size_t size, int flags); 207 + void nop_dma_unmap_area(const void *start, size_t size, int flags); 208 + void nop_dma_flush_range(const void *start, const void *end); 209 + 210 + struct cpu_cache_fns nop_cache_fns __initconst = { 211 + .flush_icache_all = nop_flush_icache_all, 212 + .flush_kern_all = nop_flush_kern_cache_all, 213 + .flush_kern_louis = nop_flush_kern_cache_all, 214 + .flush_user_all = nop_flush_user_cache_all, 215 + .flush_user_range = nop_flush_user_cache_range, 216 + .coherent_kern_range = nop_coherent_kern_range, 217 + .coherent_user_range = nop_coherent_user_range, 218 + .flush_kern_dcache_area = nop_flush_kern_dcache_area, 219 + .dma_map_area = nop_dma_map_area, 220 + .dma_unmap_area = nop_dma_unmap_area, 221 + .dma_flush_range = nop_dma_flush_range, 222 + }; 223 + #endif 224 + 225 + #ifdef CONFIG_CPU_CACHE_V7M 226 + void v7m_flush_icache_all(void); 227 + void v7m_flush_kern_cache_all(void); 228 + void v7m_flush_user_cache_all(void); 229 + void v7m_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 230 + void v7m_coherent_kern_range(unsigned long, unsigned long); 231 + int v7m_coherent_user_range(unsigned long, unsigned long); 232 + void v7m_flush_kern_dcache_area(void *, size_t); 233 + void v7m_dma_map_area(const void *, size_t, int); 234 + void v7m_dma_unmap_area(const void *, size_t, int); 235 + void v7m_dma_flush_range(const void *, const void *); 236 + 237 + struct cpu_cache_fns v7m_cache_fns __initconst = { 238 + .flush_icache_all = v7m_flush_icache_all, 239 + .flush_kern_all = v7m_flush_kern_cache_all, 240 + .flush_kern_louis = v7m_flush_kern_cache_all, 241 + .flush_user_all = v7m_flush_user_cache_all, 242 + .flush_user_range = v7m_flush_user_cache_range, 243 + .coherent_kern_range = v7m_coherent_kern_range, 244 + .coherent_user_range = v7m_coherent_user_range, 245 + .flush_kern_dcache_area = v7m_flush_kern_dcache_area, 246 + .dma_map_area = v7m_dma_map_area, 247 + .dma_unmap_area = v7m_dma_unmap_area, 248 + .dma_flush_range = v7m_dma_flush_range, 249 + }; 250 + #endif 251 + 252 + #ifdef CONFIG_CPU_ARM1020 253 + void arm1020_flush_icache_all(void); 254 + void arm1020_flush_kern_cache_all(void); 255 + void arm1020_flush_user_cache_all(void); 256 + void arm1020_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 257 + void arm1020_coherent_kern_range(unsigned long, unsigned long); 258 + int arm1020_coherent_user_range(unsigned long, unsigned long); 259 + void arm1020_flush_kern_dcache_area(void *, size_t); 260 + void arm1020_dma_map_area(const void *, size_t, int); 261 + void arm1020_dma_unmap_area(const void *, size_t, int); 262 + void arm1020_dma_flush_range(const void *, const void *); 263 + 264 + struct cpu_cache_fns arm1020_cache_fns __initconst = { 265 + .flush_icache_all = arm1020_flush_icache_all, 266 + .flush_kern_all = arm1020_flush_kern_cache_all, 267 + .flush_kern_louis = arm1020_flush_kern_cache_all, 268 + .flush_user_all = arm1020_flush_user_cache_all, 269 + .flush_user_range = arm1020_flush_user_cache_range, 270 + .coherent_kern_range = arm1020_coherent_kern_range, 271 + .coherent_user_range = arm1020_coherent_user_range, 272 + .flush_kern_dcache_area = arm1020_flush_kern_dcache_area, 273 + .dma_map_area = arm1020_dma_map_area, 274 + .dma_unmap_area = arm1020_dma_unmap_area, 275 + .dma_flush_range = arm1020_dma_flush_range, 276 + }; 277 + #endif 278 + 279 + #ifdef CONFIG_CPU_ARM1020E 280 + void arm1020e_flush_icache_all(void); 281 + void arm1020e_flush_kern_cache_all(void); 282 + void arm1020e_flush_user_cache_all(void); 283 + void arm1020e_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 284 + void arm1020e_coherent_kern_range(unsigned long, unsigned long); 285 + int arm1020e_coherent_user_range(unsigned long, unsigned long); 286 + void arm1020e_flush_kern_dcache_area(void *, size_t); 287 + void arm1020e_dma_map_area(const void *, size_t, int); 288 + void arm1020e_dma_unmap_area(const void *, size_t, int); 289 + void arm1020e_dma_flush_range(const void *, const void *); 290 + 291 + struct cpu_cache_fns arm1020e_cache_fns __initconst = { 292 + .flush_icache_all = arm1020e_flush_icache_all, 293 + .flush_kern_all = arm1020e_flush_kern_cache_all, 294 + .flush_kern_louis = arm1020e_flush_kern_cache_all, 295 + .flush_user_all = arm1020e_flush_user_cache_all, 296 + .flush_user_range = arm1020e_flush_user_cache_range, 297 + .coherent_kern_range = arm1020e_coherent_kern_range, 298 + .coherent_user_range = arm1020e_coherent_user_range, 299 + .flush_kern_dcache_area = arm1020e_flush_kern_dcache_area, 300 + .dma_map_area = arm1020e_dma_map_area, 301 + .dma_unmap_area = arm1020e_dma_unmap_area, 302 + .dma_flush_range = arm1020e_dma_flush_range, 303 + }; 304 + #endif 305 + 306 + #ifdef CONFIG_CPU_ARM1022 307 + void arm1022_flush_icache_all(void); 308 + void arm1022_flush_kern_cache_all(void); 309 + void arm1022_flush_user_cache_all(void); 310 + void arm1022_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 311 + void arm1022_coherent_kern_range(unsigned long, unsigned long); 312 + int arm1022_coherent_user_range(unsigned long, unsigned long); 313 + void arm1022_flush_kern_dcache_area(void *, size_t); 314 + void arm1022_dma_map_area(const void *, size_t, int); 315 + void arm1022_dma_unmap_area(const void *, size_t, int); 316 + void arm1022_dma_flush_range(const void *, const void *); 317 + 318 + struct cpu_cache_fns arm1022_cache_fns __initconst = { 319 + .flush_icache_all = arm1022_flush_icache_all, 320 + .flush_kern_all = arm1022_flush_kern_cache_all, 321 + .flush_kern_louis = arm1022_flush_kern_cache_all, 322 + .flush_user_all = arm1022_flush_user_cache_all, 323 + .flush_user_range = arm1022_flush_user_cache_range, 324 + .coherent_kern_range = arm1022_coherent_kern_range, 325 + .coherent_user_range = arm1022_coherent_user_range, 326 + .flush_kern_dcache_area = arm1022_flush_kern_dcache_area, 327 + .dma_map_area = arm1022_dma_map_area, 328 + .dma_unmap_area = arm1022_dma_unmap_area, 329 + .dma_flush_range = arm1022_dma_flush_range, 330 + }; 331 + #endif 332 + 333 + #ifdef CONFIG_CPU_ARM1026 334 + void arm1026_flush_icache_all(void); 335 + void arm1026_flush_kern_cache_all(void); 336 + void arm1026_flush_user_cache_all(void); 337 + void arm1026_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 338 + void arm1026_coherent_kern_range(unsigned long, unsigned long); 339 + int arm1026_coherent_user_range(unsigned long, unsigned long); 340 + void arm1026_flush_kern_dcache_area(void *, size_t); 341 + void arm1026_dma_map_area(const void *, size_t, int); 342 + void arm1026_dma_unmap_area(const void *, size_t, int); 343 + void arm1026_dma_flush_range(const void *, const void *); 344 + 345 + struct cpu_cache_fns arm1026_cache_fns __initconst = { 346 + .flush_icache_all = arm1026_flush_icache_all, 347 + .flush_kern_all = arm1026_flush_kern_cache_all, 348 + .flush_kern_louis = arm1026_flush_kern_cache_all, 349 + .flush_user_all = arm1026_flush_user_cache_all, 350 + .flush_user_range = arm1026_flush_user_cache_range, 351 + .coherent_kern_range = arm1026_coherent_kern_range, 352 + .coherent_user_range = arm1026_coherent_user_range, 353 + .flush_kern_dcache_area = arm1026_flush_kern_dcache_area, 354 + .dma_map_area = arm1026_dma_map_area, 355 + .dma_unmap_area = arm1026_dma_unmap_area, 356 + .dma_flush_range = arm1026_dma_flush_range, 357 + }; 358 + #endif 359 + 360 + #if defined(CONFIG_CPU_ARM920T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 361 + void arm920_flush_icache_all(void); 362 + void arm920_flush_kern_cache_all(void); 363 + void arm920_flush_user_cache_all(void); 364 + void arm920_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 365 + void arm920_coherent_kern_range(unsigned long, unsigned long); 366 + int arm920_coherent_user_range(unsigned long, unsigned long); 367 + void arm920_flush_kern_dcache_area(void *, size_t); 368 + void arm920_dma_map_area(const void *, size_t, int); 369 + void arm920_dma_unmap_area(const void *, size_t, int); 370 + void arm920_dma_flush_range(const void *, const void *); 371 + 372 + struct cpu_cache_fns arm920_cache_fns __initconst = { 373 + .flush_icache_all = arm920_flush_icache_all, 374 + .flush_kern_all = arm920_flush_kern_cache_all, 375 + .flush_kern_louis = arm920_flush_kern_cache_all, 376 + .flush_user_all = arm920_flush_user_cache_all, 377 + .flush_user_range = arm920_flush_user_cache_range, 378 + .coherent_kern_range = arm920_coherent_kern_range, 379 + .coherent_user_range = arm920_coherent_user_range, 380 + .flush_kern_dcache_area = arm920_flush_kern_dcache_area, 381 + .dma_map_area = arm920_dma_map_area, 382 + .dma_unmap_area = arm920_dma_unmap_area, 383 + .dma_flush_range = arm920_dma_flush_range, 384 + }; 385 + #endif 386 + 387 + #if defined(CONFIG_CPU_ARM922T) && !defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 388 + void arm922_flush_icache_all(void); 389 + void arm922_flush_kern_cache_all(void); 390 + void arm922_flush_user_cache_all(void); 391 + void arm922_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 392 + void arm922_coherent_kern_range(unsigned long, unsigned long); 393 + int arm922_coherent_user_range(unsigned long, unsigned long); 394 + void arm922_flush_kern_dcache_area(void *, size_t); 395 + void arm922_dma_map_area(const void *, size_t, int); 396 + void arm922_dma_unmap_area(const void *, size_t, int); 397 + void arm922_dma_flush_range(const void *, const void *); 398 + 399 + struct cpu_cache_fns arm922_cache_fns __initconst = { 400 + .flush_icache_all = arm922_flush_icache_all, 401 + .flush_kern_all = arm922_flush_kern_cache_all, 402 + .flush_kern_louis = arm922_flush_kern_cache_all, 403 + .flush_user_all = arm922_flush_user_cache_all, 404 + .flush_user_range = arm922_flush_user_cache_range, 405 + .coherent_kern_range = arm922_coherent_kern_range, 406 + .coherent_user_range = arm922_coherent_user_range, 407 + .flush_kern_dcache_area = arm922_flush_kern_dcache_area, 408 + .dma_map_area = arm922_dma_map_area, 409 + .dma_unmap_area = arm922_dma_unmap_area, 410 + .dma_flush_range = arm922_dma_flush_range, 411 + }; 412 + #endif 413 + 414 + #ifdef CONFIG_CPU_ARM925T 415 + void arm925_flush_icache_all(void); 416 + void arm925_flush_kern_cache_all(void); 417 + void arm925_flush_user_cache_all(void); 418 + void arm925_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 419 + void arm925_coherent_kern_range(unsigned long, unsigned long); 420 + int arm925_coherent_user_range(unsigned long, unsigned long); 421 + void arm925_flush_kern_dcache_area(void *, size_t); 422 + void arm925_dma_map_area(const void *, size_t, int); 423 + void arm925_dma_unmap_area(const void *, size_t, int); 424 + void arm925_dma_flush_range(const void *, const void *); 425 + 426 + struct cpu_cache_fns arm925_cache_fns __initconst = { 427 + .flush_icache_all = arm925_flush_icache_all, 428 + .flush_kern_all = arm925_flush_kern_cache_all, 429 + .flush_kern_louis = arm925_flush_kern_cache_all, 430 + .flush_user_all = arm925_flush_user_cache_all, 431 + .flush_user_range = arm925_flush_user_cache_range, 432 + .coherent_kern_range = arm925_coherent_kern_range, 433 + .coherent_user_range = arm925_coherent_user_range, 434 + .flush_kern_dcache_area = arm925_flush_kern_dcache_area, 435 + .dma_map_area = arm925_dma_map_area, 436 + .dma_unmap_area = arm925_dma_unmap_area, 437 + .dma_flush_range = arm925_dma_flush_range, 438 + }; 439 + #endif 440 + 441 + #ifdef CONFIG_CPU_ARM926T 442 + void arm926_flush_icache_all(void); 443 + void arm926_flush_kern_cache_all(void); 444 + void arm926_flush_user_cache_all(void); 445 + void arm926_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 446 + void arm926_coherent_kern_range(unsigned long, unsigned long); 447 + int arm926_coherent_user_range(unsigned long, unsigned long); 448 + void arm926_flush_kern_dcache_area(void *, size_t); 449 + void arm926_dma_map_area(const void *, size_t, int); 450 + void arm926_dma_unmap_area(const void *, size_t, int); 451 + void arm926_dma_flush_range(const void *, const void *); 452 + 453 + struct cpu_cache_fns arm926_cache_fns __initconst = { 454 + .flush_icache_all = arm926_flush_icache_all, 455 + .flush_kern_all = arm926_flush_kern_cache_all, 456 + .flush_kern_louis = arm926_flush_kern_cache_all, 457 + .flush_user_all = arm926_flush_user_cache_all, 458 + .flush_user_range = arm926_flush_user_cache_range, 459 + .coherent_kern_range = arm926_coherent_kern_range, 460 + .coherent_user_range = arm926_coherent_user_range, 461 + .flush_kern_dcache_area = arm926_flush_kern_dcache_area, 462 + .dma_map_area = arm926_dma_map_area, 463 + .dma_unmap_area = arm926_dma_unmap_area, 464 + .dma_flush_range = arm926_dma_flush_range, 465 + }; 466 + #endif 467 + 468 + #ifdef CONFIG_CPU_ARM940T 469 + void arm940_flush_icache_all(void); 470 + void arm940_flush_kern_cache_all(void); 471 + void arm940_flush_user_cache_all(void); 472 + void arm940_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 473 + void arm940_coherent_kern_range(unsigned long, unsigned long); 474 + int arm940_coherent_user_range(unsigned long, unsigned long); 475 + void arm940_flush_kern_dcache_area(void *, size_t); 476 + void arm940_dma_map_area(const void *, size_t, int); 477 + void arm940_dma_unmap_area(const void *, size_t, int); 478 + void arm940_dma_flush_range(const void *, const void *); 479 + 480 + struct cpu_cache_fns arm940_cache_fns __initconst = { 481 + .flush_icache_all = arm940_flush_icache_all, 482 + .flush_kern_all = arm940_flush_kern_cache_all, 483 + .flush_kern_louis = arm940_flush_kern_cache_all, 484 + .flush_user_all = arm940_flush_user_cache_all, 485 + .flush_user_range = arm940_flush_user_cache_range, 486 + .coherent_kern_range = arm940_coherent_kern_range, 487 + .coherent_user_range = arm940_coherent_user_range, 488 + .flush_kern_dcache_area = arm940_flush_kern_dcache_area, 489 + .dma_map_area = arm940_dma_map_area, 490 + .dma_unmap_area = arm940_dma_unmap_area, 491 + .dma_flush_range = arm940_dma_flush_range, 492 + }; 493 + #endif 494 + 495 + #ifdef CONFIG_CPU_ARM946E 496 + void arm946_flush_icache_all(void); 497 + void arm946_flush_kern_cache_all(void); 498 + void arm946_flush_user_cache_all(void); 499 + void arm946_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 500 + void arm946_coherent_kern_range(unsigned long, unsigned long); 501 + int arm946_coherent_user_range(unsigned long, unsigned long); 502 + void arm946_flush_kern_dcache_area(void *, size_t); 503 + void arm946_dma_map_area(const void *, size_t, int); 504 + void arm946_dma_unmap_area(const void *, size_t, int); 505 + void arm946_dma_flush_range(const void *, const void *); 506 + 507 + struct cpu_cache_fns arm946_cache_fns __initconst = { 508 + .flush_icache_all = arm946_flush_icache_all, 509 + .flush_kern_all = arm946_flush_kern_cache_all, 510 + .flush_kern_louis = arm946_flush_kern_cache_all, 511 + .flush_user_all = arm946_flush_user_cache_all, 512 + .flush_user_range = arm946_flush_user_cache_range, 513 + .coherent_kern_range = arm946_coherent_kern_range, 514 + .coherent_user_range = arm946_coherent_user_range, 515 + .flush_kern_dcache_area = arm946_flush_kern_dcache_area, 516 + .dma_map_area = arm946_dma_map_area, 517 + .dma_unmap_area = arm946_dma_unmap_area, 518 + .dma_flush_range = arm946_dma_flush_range, 519 + }; 520 + #endif 521 + 522 + #ifdef CONFIG_CPU_XSCALE 523 + void xscale_flush_icache_all(void); 524 + void xscale_flush_kern_cache_all(void); 525 + void xscale_flush_user_cache_all(void); 526 + void xscale_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 527 + void xscale_coherent_kern_range(unsigned long, unsigned long); 528 + int xscale_coherent_user_range(unsigned long, unsigned long); 529 + void xscale_flush_kern_dcache_area(void *, size_t); 530 + void xscale_dma_map_area(const void *, size_t, int); 531 + void xscale_dma_unmap_area(const void *, size_t, int); 532 + void xscale_dma_flush_range(const void *, const void *); 533 + 534 + struct cpu_cache_fns xscale_cache_fns __initconst = { 535 + .flush_icache_all = xscale_flush_icache_all, 536 + .flush_kern_all = xscale_flush_kern_cache_all, 537 + .flush_kern_louis = xscale_flush_kern_cache_all, 538 + .flush_user_all = xscale_flush_user_cache_all, 539 + .flush_user_range = xscale_flush_user_cache_range, 540 + .coherent_kern_range = xscale_coherent_kern_range, 541 + .coherent_user_range = xscale_coherent_user_range, 542 + .flush_kern_dcache_area = xscale_flush_kern_dcache_area, 543 + .dma_map_area = xscale_dma_map_area, 544 + .dma_unmap_area = xscale_dma_unmap_area, 545 + .dma_flush_range = xscale_dma_flush_range, 546 + }; 547 + 548 + /* The 80200 A0 and A1 need a special quirk for dma_map_area() */ 549 + void xscale_80200_A0_A1_dma_map_area(const void *, size_t, int); 550 + 551 + struct cpu_cache_fns xscale_80200_A0_A1_cache_fns __initconst = { 552 + .flush_icache_all = xscale_flush_icache_all, 553 + .flush_kern_all = xscale_flush_kern_cache_all, 554 + .flush_kern_louis = xscale_flush_kern_cache_all, 555 + .flush_user_all = xscale_flush_user_cache_all, 556 + .flush_user_range = xscale_flush_user_cache_range, 557 + .coherent_kern_range = xscale_coherent_kern_range, 558 + .coherent_user_range = xscale_coherent_user_range, 559 + .flush_kern_dcache_area = xscale_flush_kern_dcache_area, 560 + .dma_map_area = xscale_80200_A0_A1_dma_map_area, 561 + .dma_unmap_area = xscale_dma_unmap_area, 562 + .dma_flush_range = xscale_dma_flush_range, 563 + }; 564 + #endif 565 + 566 + #ifdef CONFIG_CPU_XSC3 567 + void xsc3_flush_icache_all(void); 568 + void xsc3_flush_kern_cache_all(void); 569 + void xsc3_flush_user_cache_all(void); 570 + void xsc3_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 571 + void xsc3_coherent_kern_range(unsigned long, unsigned long); 572 + int xsc3_coherent_user_range(unsigned long, unsigned long); 573 + void xsc3_flush_kern_dcache_area(void *, size_t); 574 + void xsc3_dma_map_area(const void *, size_t, int); 575 + void xsc3_dma_unmap_area(const void *, size_t, int); 576 + void xsc3_dma_flush_range(const void *, const void *); 577 + 578 + struct cpu_cache_fns xsc3_cache_fns __initconst = { 579 + .flush_icache_all = xsc3_flush_icache_all, 580 + .flush_kern_all = xsc3_flush_kern_cache_all, 581 + .flush_kern_louis = xsc3_flush_kern_cache_all, 582 + .flush_user_all = xsc3_flush_user_cache_all, 583 + .flush_user_range = xsc3_flush_user_cache_range, 584 + .coherent_kern_range = xsc3_coherent_kern_range, 585 + .coherent_user_range = xsc3_coherent_user_range, 586 + .flush_kern_dcache_area = xsc3_flush_kern_dcache_area, 587 + .dma_map_area = xsc3_dma_map_area, 588 + .dma_unmap_area = xsc3_dma_unmap_area, 589 + .dma_flush_range = xsc3_dma_flush_range, 590 + }; 591 + #endif 592 + 593 + #ifdef CONFIG_CPU_MOHAWK 594 + void mohawk_flush_icache_all(void); 595 + void mohawk_flush_kern_cache_all(void); 596 + void mohawk_flush_user_cache_all(void); 597 + void mohawk_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 598 + void mohawk_coherent_kern_range(unsigned long, unsigned long); 599 + int mohawk_coherent_user_range(unsigned long, unsigned long); 600 + void mohawk_flush_kern_dcache_area(void *, size_t); 601 + void mohawk_dma_map_area(const void *, size_t, int); 602 + void mohawk_dma_unmap_area(const void *, size_t, int); 603 + void mohawk_dma_flush_range(const void *, const void *); 604 + 605 + struct cpu_cache_fns mohawk_cache_fns __initconst = { 606 + .flush_icache_all = mohawk_flush_icache_all, 607 + .flush_kern_all = mohawk_flush_kern_cache_all, 608 + .flush_kern_louis = mohawk_flush_kern_cache_all, 609 + .flush_user_all = mohawk_flush_user_cache_all, 610 + .flush_user_range = mohawk_flush_user_cache_range, 611 + .coherent_kern_range = mohawk_coherent_kern_range, 612 + .coherent_user_range = mohawk_coherent_user_range, 613 + .flush_kern_dcache_area = mohawk_flush_kern_dcache_area, 614 + .dma_map_area = mohawk_dma_map_area, 615 + .dma_unmap_area = mohawk_dma_unmap_area, 616 + .dma_flush_range = mohawk_dma_flush_range, 617 + }; 618 + #endif 619 + 620 + #ifdef CONFIG_CPU_FEROCEON 621 + void feroceon_flush_icache_all(void); 622 + void feroceon_flush_kern_cache_all(void); 623 + void feroceon_flush_user_cache_all(void); 624 + void feroceon_flush_user_cache_range(unsigned long, unsigned long, unsigned int); 625 + void feroceon_coherent_kern_range(unsigned long, unsigned long); 626 + int feroceon_coherent_user_range(unsigned long, unsigned long); 627 + void feroceon_flush_kern_dcache_area(void *, size_t); 628 + void feroceon_dma_map_area(const void *, size_t, int); 629 + void feroceon_dma_unmap_area(const void *, size_t, int); 630 + void feroceon_dma_flush_range(const void *, const void *); 631 + 632 + struct cpu_cache_fns feroceon_cache_fns __initconst = { 633 + .flush_icache_all = feroceon_flush_icache_all, 634 + .flush_kern_all = feroceon_flush_kern_cache_all, 635 + .flush_kern_louis = feroceon_flush_kern_cache_all, 636 + .flush_user_all = feroceon_flush_user_cache_all, 637 + .flush_user_range = feroceon_flush_user_cache_range, 638 + .coherent_kern_range = feroceon_coherent_kern_range, 639 + .coherent_user_range = feroceon_coherent_user_range, 640 + .flush_kern_dcache_area = feroceon_flush_kern_dcache_area, 641 + .dma_map_area = feroceon_dma_map_area, 642 + .dma_unmap_area = feroceon_dma_unmap_area, 643 + .dma_flush_range = feroceon_dma_flush_range, 644 + }; 645 + 646 + void feroceon_range_flush_kern_dcache_area(void *, size_t); 647 + void feroceon_range_dma_map_area(const void *, size_t, int); 648 + void feroceon_range_dma_flush_range(const void *, const void *); 649 + 650 + struct cpu_cache_fns feroceon_range_cache_fns __initconst = { 651 + .flush_icache_all = feroceon_flush_icache_all, 652 + .flush_kern_all = feroceon_flush_kern_cache_all, 653 + .flush_kern_louis = feroceon_flush_kern_cache_all, 654 + .flush_user_all = feroceon_flush_user_cache_all, 655 + .flush_user_range = feroceon_flush_user_cache_range, 656 + .coherent_kern_range = feroceon_coherent_kern_range, 657 + .coherent_user_range = feroceon_coherent_user_range, 658 + .flush_kern_dcache_area = feroceon_range_flush_kern_dcache_area, 659 + .dma_map_area = feroceon_range_dma_map_area, 660 + .dma_unmap_area = feroceon_dma_unmap_area, 661 + .dma_flush_range = feroceon_range_dma_flush_range, 662 + }; 663 + #endif
+29
arch/arm/mm/fault.c
··· 242 242 return false; 243 243 } 244 244 245 + #ifdef CONFIG_CPU_TTBR0_PAN 246 + static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs) 247 + { 248 + struct svc_pt_regs *svcregs; 249 + 250 + /* If we are in user mode: permission granted */ 251 + if (user_mode(regs)) 252 + return true; 253 + 254 + /* uaccess state saved above pt_regs on SVC exception entry */ 255 + svcregs = to_svc_pt_regs(regs); 256 + 257 + return !(svcregs->ttbcr & TTBCR_EPD0); 258 + } 259 + #else 260 + static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs) 261 + { 262 + return true; 263 + } 264 + #endif 265 + 245 266 static int __kprobes 246 267 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 247 268 { ··· 305 284 } 306 285 307 286 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 287 + 288 + /* 289 + * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are 290 + * routed via the translation fault mechanism. Check whether uaccess 291 + * is disabled while in kernel mode. 292 + */ 293 + if (!ttbr0_usermode_access_allowed(regs)) 294 + goto no_context; 308 295 309 296 if (!(flags & FAULT_FLAG_USER)) 310 297 goto lock_mmap;
+3 -4
arch/arm/mm/mmu.c
··· 1687 1687 */ 1688 1688 cr = get_cr(); 1689 1689 set_cr(cr & ~(CR_I | CR_C)); 1690 - asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr)); 1691 - asm volatile("mcr p15, 0, %0, c2, c0, 2" 1692 - : : "r" (ttbcr & ~(3 << 8 | 3 << 10))); 1690 + ttbcr = cpu_get_ttbcr(); 1691 + cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10)); 1693 1692 flush_cache_all(); 1694 1693 1695 1694 /* ··· 1700 1701 lpae_pgtables_remap(offset, pa_pgd); 1701 1702 1702 1703 /* Re-enable the caches and cacheable TLB walks */ 1703 - asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr)); 1704 + cpu_set_ttbcr(ttbcr); 1704 1705 set_cr(cr); 1705 1706 } 1706 1707
+39 -30
arch/arm/mm/proc-arm1020.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 57 56 /* 58 57 * cpu_arm1020_proc_init() 59 58 */ 60 - ENTRY(cpu_arm1020_proc_init) 59 + SYM_TYPED_FUNC_START(cpu_arm1020_proc_init) 61 60 ret lr 61 + SYM_FUNC_END(cpu_arm1020_proc_init) 62 62 63 63 /* 64 64 * cpu_arm1020_proc_fin() 65 65 */ 66 - ENTRY(cpu_arm1020_proc_fin) 66 + SYM_TYPED_FUNC_START(cpu_arm1020_proc_fin) 67 67 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 68 bic r0, r0, #0x1000 @ ...i............ 69 69 bic r0, r0, #0x000e @ ............wca. 70 70 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 71 ret lr 72 + SYM_FUNC_END(cpu_arm1020_proc_fin) 72 73 73 74 /* 74 75 * cpu_arm1020_reset(loc) ··· 83 80 */ 84 81 .align 5 85 82 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1020_reset) 83 + SYM_TYPED_FUNC_START(cpu_arm1020_reset) 87 84 mov ip, #0 88 85 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 86 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 92 bic ip, ip, #0x1100 @ ...i...s........ 96 93 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 94 ret r0 98 - ENDPROC(cpu_arm1020_reset) 95 + SYM_FUNC_END(cpu_arm1020_reset) 99 96 .popsection 100 97 101 98 /* 102 99 * cpu_arm1020_do_idle() 103 100 */ 104 101 .align 5 105 - ENTRY(cpu_arm1020_do_idle) 102 + SYM_TYPED_FUNC_START(cpu_arm1020_do_idle) 106 103 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 104 ret lr 105 + SYM_FUNC_END(cpu_arm1020_do_idle) 108 106 109 107 /* ================================= CACHE ================================ */ 110 108 ··· 116 112 * 117 113 * Unconditionally clean and invalidate the entire icache. 118 114 */ 119 - ENTRY(arm1020_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1020_flush_icache_all) 120 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 121 117 mov r0, #0 122 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 119 #endif 124 120 ret lr 125 - ENDPROC(arm1020_flush_icache_all) 121 + SYM_FUNC_END(arm1020_flush_icache_all) 126 122 127 123 /* 128 124 * flush_user_cache_all() ··· 130 126 * Invalidate all cache entries in a particular address 131 127 * space. 132 128 */ 133 - ENTRY(arm1020_flush_user_cache_all) 134 - /* FALLTHROUGH */ 129 + SYM_FUNC_ALIAS(arm1020_flush_user_cache_all, arm1020_flush_kern_cache_all) 130 + 135 131 /* 136 132 * flush_kern_cache_all() 137 133 * 138 134 * Clean and invalidate the entire cache. 139 135 */ 140 - ENTRY(arm1020_flush_kern_cache_all) 136 + SYM_TYPED_FUNC_START(arm1020_flush_kern_cache_all) 141 137 mov r2, #VM_EXEC 142 138 mov ip, #0 143 139 __flush_whole_cache: ··· 158 154 #endif 159 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 160 156 ret lr 157 + SYM_FUNC_END(arm1020_flush_kern_cache_all) 161 158 162 159 /* 163 160 * flush_user_cache_range(start, end, flags) ··· 170 165 * - end - end address (exclusive) 171 166 * - flags - vm_flags for this space 172 167 */ 173 - ENTRY(arm1020_flush_user_cache_range) 168 + SYM_TYPED_FUNC_START(arm1020_flush_user_cache_range) 174 169 mov ip, #0 175 170 sub r3, r1, r0 @ calculate total size 176 171 cmp r3, #CACHE_DLIMIT ··· 190 185 #endif 191 186 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 192 187 ret lr 188 + SYM_FUNC_END(arm1020_flush_user_cache_range) 193 189 194 190 /* 195 191 * coherent_kern_range(start, end) ··· 202 196 * - start - virtual start address 203 197 * - end - virtual end address 204 198 */ 205 - ENTRY(arm1020_coherent_kern_range) 206 - /* FALLTRHOUGH */ 199 + SYM_TYPED_FUNC_START(arm1020_coherent_kern_range) 200 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 201 + b arm1020_coherent_user_range 202 + #endif 203 + SYM_FUNC_END(arm1020_coherent_kern_range) 207 204 208 205 /* 209 206 * coherent_user_range(start, end) ··· 218 209 * - start - virtual start address 219 210 * - end - virtual end address 220 211 */ 221 - ENTRY(arm1020_coherent_user_range) 212 + SYM_TYPED_FUNC_START(arm1020_coherent_user_range) 222 213 mov ip, #0 223 214 bic r0, r0, #CACHE_DLINESIZE - 1 224 215 mcr p15, 0, ip, c7, c10, 4 ··· 236 227 mcr p15, 0, ip, c7, c10, 4 @ drain WB 237 228 mov r0, #0 238 229 ret lr 230 + SYM_FUNC_END(arm1020_coherent_user_range) 239 231 240 232 /* 241 233 * flush_kern_dcache_area(void *addr, size_t size) ··· 247 237 * - addr - kernel address 248 238 * - size - region size 249 239 */ 250 - ENTRY(arm1020_flush_kern_dcache_area) 240 + SYM_TYPED_FUNC_START(arm1020_flush_kern_dcache_area) 251 241 mov ip, #0 252 242 #ifndef CONFIG_CPU_DCACHE_DISABLE 253 243 add r1, r0, r1 ··· 259 249 #endif 260 250 mcr p15, 0, ip, c7, c10, 4 @ drain WB 261 251 ret lr 252 + SYM_FUNC_END(arm1020_flush_kern_dcache_area) 262 253 263 254 /* 264 255 * dma_inv_range(start, end) ··· 325 314 * - start - virtual start address 326 315 * - end - virtual end address 327 316 */ 328 - ENTRY(arm1020_dma_flush_range) 317 + SYM_TYPED_FUNC_START(arm1020_dma_flush_range) 329 318 mov ip, #0 330 319 #ifndef CONFIG_CPU_DCACHE_DISABLE 331 320 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 338 327 #endif 339 328 mcr p15, 0, ip, c7, c10, 4 @ drain WB 340 329 ret lr 330 + SYM_FUNC_END(arm1020_dma_flush_range) 341 331 342 332 /* 343 333 * dma_map_area(start, size, dir) ··· 346 334 * - size - size of region 347 335 * - dir - DMA direction 348 336 */ 349 - ENTRY(arm1020_dma_map_area) 337 + SYM_TYPED_FUNC_START(arm1020_dma_map_area) 350 338 add r1, r1, r0 351 339 cmp r2, #DMA_TO_DEVICE 352 340 beq arm1020_dma_clean_range 353 341 bcs arm1020_dma_inv_range 354 342 b arm1020_dma_flush_range 355 - ENDPROC(arm1020_dma_map_area) 343 + SYM_FUNC_END(arm1020_dma_map_area) 356 344 357 345 /* 358 346 * dma_unmap_area(start, size, dir) ··· 360 348 * - size - size of region 361 349 * - dir - DMA direction 362 350 */ 363 - ENTRY(arm1020_dma_unmap_area) 351 + SYM_TYPED_FUNC_START(arm1020_dma_unmap_area) 364 352 ret lr 365 - ENDPROC(arm1020_dma_unmap_area) 366 - 367 - .globl arm1020_flush_kern_cache_louis 368 - .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all 369 - 370 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 371 - define_cache_functions arm1020 353 + SYM_FUNC_END(arm1020_dma_unmap_area) 372 354 373 355 .align 5 374 - ENTRY(cpu_arm1020_dcache_clean_area) 356 + SYM_TYPED_FUNC_START(cpu_arm1020_dcache_clean_area) 375 357 #ifndef CONFIG_CPU_DCACHE_DISABLE 376 358 mov ip, #0 377 359 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 375 369 bhi 1b 376 370 #endif 377 371 ret lr 372 + SYM_FUNC_END(cpu_arm1020_dcache_clean_area) 378 373 379 374 /* =============================== PageTable ============================== */ 380 375 ··· 387 380 * pgd: new page tables 388 381 */ 389 382 .align 5 390 - ENTRY(cpu_arm1020_switch_mm) 383 + SYM_TYPED_FUNC_START(cpu_arm1020_switch_mm) 391 384 #ifdef CONFIG_MMU 392 385 #ifndef CONFIG_CPU_DCACHE_DISABLE 393 386 mcr p15, 0, r3, c7, c10, 4 ··· 415 408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 416 409 #endif /* CONFIG_MMU */ 417 410 ret lr 418 - 411 + SYM_FUNC_END(cpu_arm1020_switch_mm) 412 + 419 413 /* 420 414 * cpu_arm1020_set_pte(ptep, pte) 421 415 * 422 416 * Set a PTE and flush it out 423 417 */ 424 418 .align 5 425 - ENTRY(cpu_arm1020_set_pte_ext) 419 + SYM_TYPED_FUNC_START(cpu_arm1020_set_pte_ext) 426 420 #ifdef CONFIG_MMU 427 421 armv3_set_pte_ext 428 422 mov r0, r0 ··· 434 426 mcr p15, 0, r0, c7, c10, 4 @ drain WB 435 427 #endif /* CONFIG_MMU */ 436 428 ret lr 429 + SYM_FUNC_END(cpu_arm1020_set_pte_ext) 437 430 438 431 .type __arm1020_setup, #function 439 432 __arm1020_setup:
+40 -30
arch/arm/mm/proc-arm1020e.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 57 56 /* 58 57 * cpu_arm1020e_proc_init() 59 58 */ 60 - ENTRY(cpu_arm1020e_proc_init) 59 + SYM_TYPED_FUNC_START(cpu_arm1020e_proc_init) 61 60 ret lr 61 + SYM_FUNC_END(cpu_arm1020e_proc_init) 62 62 63 63 /* 64 64 * cpu_arm1020e_proc_fin() 65 65 */ 66 - ENTRY(cpu_arm1020e_proc_fin) 66 + SYM_TYPED_FUNC_START(cpu_arm1020e_proc_fin) 67 67 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 68 bic r0, r0, #0x1000 @ ...i............ 69 69 bic r0, r0, #0x000e @ ............wca. 70 70 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 71 ret lr 72 + SYM_FUNC_END(cpu_arm1020e_proc_fin) 72 73 73 74 /* 74 75 * cpu_arm1020e_reset(loc) ··· 83 80 */ 84 81 .align 5 85 82 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1020e_reset) 83 + SYM_TYPED_FUNC_START(cpu_arm1020e_reset) 87 84 mov ip, #0 88 85 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 86 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 92 bic ip, ip, #0x1100 @ ...i...s........ 96 93 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 94 ret r0 98 - ENDPROC(cpu_arm1020e_reset) 95 + SYM_FUNC_END(cpu_arm1020e_reset) 99 96 .popsection 100 97 101 98 /* 102 99 * cpu_arm1020e_do_idle() 103 100 */ 104 101 .align 5 105 - ENTRY(cpu_arm1020e_do_idle) 102 + SYM_TYPED_FUNC_START(cpu_arm1020e_do_idle) 106 103 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 104 ret lr 105 + SYM_FUNC_END(cpu_arm1020e_do_idle) 108 106 109 107 /* ================================= CACHE ================================ */ 110 108 ··· 116 112 * 117 113 * Unconditionally clean and invalidate the entire icache. 118 114 */ 119 - ENTRY(arm1020e_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1020e_flush_icache_all) 120 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 121 117 mov r0, #0 122 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 119 #endif 124 120 ret lr 125 - ENDPROC(arm1020e_flush_icache_all) 121 + SYM_FUNC_END(arm1020e_flush_icache_all) 126 122 127 123 /* 128 124 * flush_user_cache_all() ··· 130 126 * Invalidate all cache entries in a particular address 131 127 * space. 132 128 */ 133 - ENTRY(arm1020e_flush_user_cache_all) 134 - /* FALLTHROUGH */ 129 + SYM_FUNC_ALIAS(arm1020e_flush_user_cache_all, arm1020e_flush_kern_cache_all) 130 + 135 131 /* 136 132 * flush_kern_cache_all() 137 133 * 138 134 * Clean and invalidate the entire cache. 139 135 */ 140 - ENTRY(arm1020e_flush_kern_cache_all) 136 + SYM_TYPED_FUNC_START(arm1020e_flush_kern_cache_all) 141 137 mov r2, #VM_EXEC 142 138 mov ip, #0 143 139 __flush_whole_cache: ··· 157 153 #endif 158 154 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 159 155 ret lr 156 + SYM_FUNC_END(arm1020e_flush_kern_cache_all) 160 157 161 158 /* 162 159 * flush_user_cache_range(start, end, flags) ··· 169 164 * - end - end address (exclusive) 170 165 * - flags - vm_flags for this space 171 166 */ 172 - ENTRY(arm1020e_flush_user_cache_range) 167 + SYM_TYPED_FUNC_START(arm1020e_flush_user_cache_range) 173 168 mov ip, #0 174 169 sub r3, r1, r0 @ calculate total size 175 170 cmp r3, #CACHE_DLIMIT ··· 187 182 #endif 188 183 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 189 184 ret lr 185 + SYM_FUNC_END(arm1020e_flush_user_cache_range) 190 186 191 187 /* 192 188 * coherent_kern_range(start, end) ··· 199 193 * - start - virtual start address 200 194 * - end - virtual end address 201 195 */ 202 - ENTRY(arm1020e_coherent_kern_range) 203 - /* FALLTHROUGH */ 196 + SYM_TYPED_FUNC_START(arm1020e_coherent_kern_range) 197 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 198 + b arm1020e_coherent_user_range 199 + #endif 200 + SYM_FUNC_END(arm1020e_coherent_kern_range) 201 + 204 202 /* 205 203 * coherent_user_range(start, end) 206 204 * ··· 215 205 * - start - virtual start address 216 206 * - end - virtual end address 217 207 */ 218 - ENTRY(arm1020e_coherent_user_range) 208 + SYM_TYPED_FUNC_START(arm1020e_coherent_user_range) 219 209 mov ip, #0 220 210 bic r0, r0, #CACHE_DLINESIZE - 1 221 211 1: ··· 231 221 mcr p15, 0, ip, c7, c10, 4 @ drain WB 232 222 mov r0, #0 233 223 ret lr 224 + SYM_FUNC_END(arm1020e_coherent_user_range) 234 225 235 226 /* 236 227 * flush_kern_dcache_area(void *addr, size_t size) ··· 242 231 * - addr - kernel address 243 232 * - size - region size 244 233 */ 245 - ENTRY(arm1020e_flush_kern_dcache_area) 234 + SYM_TYPED_FUNC_START(arm1020e_flush_kern_dcache_area) 246 235 mov ip, #0 247 236 #ifndef CONFIG_CPU_DCACHE_DISABLE 248 237 add r1, r0, r1 ··· 253 242 #endif 254 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 255 244 ret lr 245 + SYM_FUNC_END(arm1020e_flush_kern_dcache_area) 256 246 257 247 /* 258 248 * dma_inv_range(start, end) ··· 314 302 * - start - virtual start address 315 303 * - end - virtual end address 316 304 */ 317 - ENTRY(arm1020e_dma_flush_range) 305 + SYM_TYPED_FUNC_START(arm1020e_dma_flush_range) 318 306 mov ip, #0 319 307 #ifndef CONFIG_CPU_DCACHE_DISABLE 320 308 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 325 313 #endif 326 314 mcr p15, 0, ip, c7, c10, 4 @ drain WB 327 315 ret lr 316 + SYM_FUNC_END(arm1020e_dma_flush_range) 328 317 329 318 /* 330 319 * dma_map_area(start, size, dir) ··· 333 320 * - size - size of region 334 321 * - dir - DMA direction 335 322 */ 336 - ENTRY(arm1020e_dma_map_area) 323 + SYM_TYPED_FUNC_START(arm1020e_dma_map_area) 337 324 add r1, r1, r0 338 325 cmp r2, #DMA_TO_DEVICE 339 326 beq arm1020e_dma_clean_range 340 327 bcs arm1020e_dma_inv_range 341 328 b arm1020e_dma_flush_range 342 - ENDPROC(arm1020e_dma_map_area) 329 + SYM_FUNC_END(arm1020e_dma_map_area) 343 330 344 331 /* 345 332 * dma_unmap_area(start, size, dir) ··· 347 334 * - size - size of region 348 335 * - dir - DMA direction 349 336 */ 350 - ENTRY(arm1020e_dma_unmap_area) 337 + SYM_TYPED_FUNC_START(arm1020e_dma_unmap_area) 351 338 ret lr 352 - ENDPROC(arm1020e_dma_unmap_area) 353 - 354 - .globl arm1020e_flush_kern_cache_louis 355 - .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all 356 - 357 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 358 - define_cache_functions arm1020e 339 + SYM_FUNC_END(arm1020e_dma_unmap_area) 359 340 360 341 .align 5 361 - ENTRY(cpu_arm1020e_dcache_clean_area) 342 + SYM_TYPED_FUNC_START(cpu_arm1020e_dcache_clean_area) 362 343 #ifndef CONFIG_CPU_DCACHE_DISABLE 363 344 mov ip, #0 364 345 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 361 354 bhi 1b 362 355 #endif 363 356 ret lr 357 + SYM_FUNC_END(cpu_arm1020e_dcache_clean_area) 364 358 365 359 /* =============================== PageTable ============================== */ 366 360 ··· 373 365 * pgd: new page tables 374 366 */ 375 367 .align 5 376 - ENTRY(cpu_arm1020e_switch_mm) 368 + SYM_TYPED_FUNC_START(cpu_arm1020e_switch_mm) 377 369 #ifdef CONFIG_MMU 378 370 #ifndef CONFIG_CPU_DCACHE_DISABLE 379 371 mcr p15, 0, r3, c7, c10, 4 ··· 400 392 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 401 393 #endif 402 394 ret lr 403 - 395 + SYM_FUNC_END(cpu_arm1020e_switch_mm) 396 + 404 397 /* 405 398 * cpu_arm1020e_set_pte(ptep, pte) 406 399 * 407 400 * Set a PTE and flush it out 408 401 */ 409 402 .align 5 410 - ENTRY(cpu_arm1020e_set_pte_ext) 403 + SYM_TYPED_FUNC_START(cpu_arm1020e_set_pte_ext) 411 404 #ifdef CONFIG_MMU 412 405 armv3_set_pte_ext 413 406 mov r0, r0 ··· 417 408 #endif 418 409 #endif /* CONFIG_MMU */ 419 410 ret lr 411 + SYM_FUNC_END(cpu_arm1020e_set_pte_ext) 420 412 421 413 .type __arm1020e_setup, #function 422 414 __arm1020e_setup:
+39 -30
arch/arm/mm/proc-arm1022.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 57 56 /* 58 57 * cpu_arm1022_proc_init() 59 58 */ 60 - ENTRY(cpu_arm1022_proc_init) 59 + SYM_TYPED_FUNC_START(cpu_arm1022_proc_init) 61 60 ret lr 61 + SYM_FUNC_END(cpu_arm1022_proc_init) 62 62 63 63 /* 64 64 * cpu_arm1022_proc_fin() 65 65 */ 66 - ENTRY(cpu_arm1022_proc_fin) 66 + SYM_TYPED_FUNC_START(cpu_arm1022_proc_fin) 67 67 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 68 bic r0, r0, #0x1000 @ ...i............ 69 69 bic r0, r0, #0x000e @ ............wca. 70 70 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 71 ret lr 72 + SYM_FUNC_END(cpu_arm1022_proc_fin) 72 73 73 74 /* 74 75 * cpu_arm1022_reset(loc) ··· 83 80 */ 84 81 .align 5 85 82 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1022_reset) 83 + SYM_TYPED_FUNC_START(cpu_arm1022_reset) 87 84 mov ip, #0 88 85 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 86 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 92 bic ip, ip, #0x1100 @ ...i...s........ 96 93 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 94 ret r0 98 - ENDPROC(cpu_arm1022_reset) 95 + SYM_FUNC_END(cpu_arm1022_reset) 99 96 .popsection 100 97 101 98 /* 102 99 * cpu_arm1022_do_idle() 103 100 */ 104 101 .align 5 105 - ENTRY(cpu_arm1022_do_idle) 102 + SYM_TYPED_FUNC_START(cpu_arm1022_do_idle) 106 103 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 104 ret lr 105 + SYM_FUNC_END(cpu_arm1022_do_idle) 108 106 109 107 /* ================================= CACHE ================================ */ 110 108 ··· 116 112 * 117 113 * Unconditionally clean and invalidate the entire icache. 118 114 */ 119 - ENTRY(arm1022_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1022_flush_icache_all) 120 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 121 117 mov r0, #0 122 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 119 #endif 124 120 ret lr 125 - ENDPROC(arm1022_flush_icache_all) 121 + SYM_FUNC_END(arm1022_flush_icache_all) 126 122 127 123 /* 128 124 * flush_user_cache_all() ··· 130 126 * Invalidate all cache entries in a particular address 131 127 * space. 132 128 */ 133 - ENTRY(arm1022_flush_user_cache_all) 134 - /* FALLTHROUGH */ 129 + SYM_FUNC_ALIAS(arm1022_flush_user_cache_all, arm1022_flush_kern_cache_all) 130 + 135 131 /* 136 132 * flush_kern_cache_all() 137 133 * 138 134 * Clean and invalidate the entire cache. 139 135 */ 140 - ENTRY(arm1022_flush_kern_cache_all) 136 + SYM_TYPED_FUNC_START(arm1022_flush_kern_cache_all) 141 137 mov r2, #VM_EXEC 142 138 mov ip, #0 143 139 __flush_whole_cache: ··· 156 152 #endif 157 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 154 ret lr 155 + SYM_FUNC_END(arm1022_flush_kern_cache_all) 159 156 160 157 /* 161 158 * flush_user_cache_range(start, end, flags) ··· 168 163 * - end - end address (exclusive) 169 164 * - flags - vm_flags for this space 170 165 */ 171 - ENTRY(arm1022_flush_user_cache_range) 166 + SYM_TYPED_FUNC_START(arm1022_flush_user_cache_range) 172 167 mov ip, #0 173 168 sub r3, r1, r0 @ calculate total size 174 169 cmp r3, #CACHE_DLIMIT ··· 186 181 #endif 187 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 188 183 ret lr 184 + SYM_FUNC_END(arm1022_flush_user_cache_range) 189 185 190 186 /* 191 187 * coherent_kern_range(start, end) ··· 198 192 * - start - virtual start address 199 193 * - end - virtual end address 200 194 */ 201 - ENTRY(arm1022_coherent_kern_range) 202 - /* FALLTHROUGH */ 195 + SYM_TYPED_FUNC_START(arm1022_coherent_kern_range) 196 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 197 + b arm1022_coherent_user_range 198 + #endif 199 + SYM_FUNC_END(arm1022_coherent_kern_range) 203 200 204 201 /* 205 202 * coherent_user_range(start, end) ··· 214 205 * - start - virtual start address 215 206 * - end - virtual end address 216 207 */ 217 - ENTRY(arm1022_coherent_user_range) 208 + SYM_TYPED_FUNC_START(arm1022_coherent_user_range) 218 209 mov ip, #0 219 210 bic r0, r0, #CACHE_DLINESIZE - 1 220 211 1: ··· 230 221 mcr p15, 0, ip, c7, c10, 4 @ drain WB 231 222 mov r0, #0 232 223 ret lr 224 + SYM_FUNC_END(arm1022_coherent_user_range) 233 225 234 226 /* 235 227 * flush_kern_dcache_area(void *addr, size_t size) ··· 241 231 * - addr - kernel address 242 232 * - size - region size 243 233 */ 244 - ENTRY(arm1022_flush_kern_dcache_area) 234 + SYM_TYPED_FUNC_START(arm1022_flush_kern_dcache_area) 245 235 mov ip, #0 246 236 #ifndef CONFIG_CPU_DCACHE_DISABLE 247 237 add r1, r0, r1 ··· 252 242 #endif 253 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 254 244 ret lr 245 + SYM_FUNC_END(arm1022_flush_kern_dcache_area) 255 246 256 247 /* 257 248 * dma_inv_range(start, end) ··· 313 302 * - start - virtual start address 314 303 * - end - virtual end address 315 304 */ 316 - ENTRY(arm1022_dma_flush_range) 305 + SYM_TYPED_FUNC_START(arm1022_dma_flush_range) 317 306 mov ip, #0 318 307 #ifndef CONFIG_CPU_DCACHE_DISABLE 319 308 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 324 313 #endif 325 314 mcr p15, 0, ip, c7, c10, 4 @ drain WB 326 315 ret lr 316 + SYM_FUNC_END(arm1022_dma_flush_range) 327 317 328 318 /* 329 319 * dma_map_area(start, size, dir) ··· 332 320 * - size - size of region 333 321 * - dir - DMA direction 334 322 */ 335 - ENTRY(arm1022_dma_map_area) 323 + SYM_TYPED_FUNC_START(arm1022_dma_map_area) 336 324 add r1, r1, r0 337 325 cmp r2, #DMA_TO_DEVICE 338 326 beq arm1022_dma_clean_range 339 327 bcs arm1022_dma_inv_range 340 328 b arm1022_dma_flush_range 341 - ENDPROC(arm1022_dma_map_area) 329 + SYM_FUNC_END(arm1022_dma_map_area) 342 330 343 331 /* 344 332 * dma_unmap_area(start, size, dir) ··· 346 334 * - size - size of region 347 335 * - dir - DMA direction 348 336 */ 349 - ENTRY(arm1022_dma_unmap_area) 337 + SYM_TYPED_FUNC_START(arm1022_dma_unmap_area) 350 338 ret lr 351 - ENDPROC(arm1022_dma_unmap_area) 352 - 353 - .globl arm1022_flush_kern_cache_louis 354 - .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all 355 - 356 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 357 - define_cache_functions arm1022 339 + SYM_FUNC_END(arm1022_dma_unmap_area) 358 340 359 341 .align 5 360 - ENTRY(cpu_arm1022_dcache_clean_area) 342 + SYM_TYPED_FUNC_START(cpu_arm1022_dcache_clean_area) 361 343 #ifndef CONFIG_CPU_DCACHE_DISABLE 362 344 mov ip, #0 363 345 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 360 354 bhi 1b 361 355 #endif 362 356 ret lr 357 + SYM_FUNC_END(cpu_arm1022_dcache_clean_area) 363 358 364 359 /* =============================== PageTable ============================== */ 365 360 ··· 372 365 * pgd: new page tables 373 366 */ 374 367 .align 5 375 - ENTRY(cpu_arm1022_switch_mm) 368 + SYM_TYPED_FUNC_START(cpu_arm1022_switch_mm) 376 369 #ifdef CONFIG_MMU 377 370 #ifndef CONFIG_CPU_DCACHE_DISABLE 378 371 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments ··· 392 385 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 393 386 #endif 394 387 ret lr 395 - 388 + SYM_FUNC_END(cpu_arm1022_switch_mm) 389 + 396 390 /* 397 391 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 398 392 * 399 393 * Set a PTE and flush it out 400 394 */ 401 395 .align 5 402 - ENTRY(cpu_arm1022_set_pte_ext) 396 + SYM_TYPED_FUNC_START(cpu_arm1022_set_pte_ext) 403 397 #ifdef CONFIG_MMU 404 398 armv3_set_pte_ext 405 399 mov r0, r0 ··· 409 401 #endif 410 402 #endif /* CONFIG_MMU */ 411 403 ret lr 404 + SYM_FUNC_END(cpu_arm1022_set_pte_ext) 412 405 413 406 .type __arm1022_setup, #function 414 407 __arm1022_setup:
+40 -30
arch/arm/mm/proc-arm1026.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/asm-offsets.h> ··· 57 56 /* 58 57 * cpu_arm1026_proc_init() 59 58 */ 60 - ENTRY(cpu_arm1026_proc_init) 59 + SYM_TYPED_FUNC_START(cpu_arm1026_proc_init) 61 60 ret lr 61 + SYM_FUNC_END(cpu_arm1026_proc_init) 62 62 63 63 /* 64 64 * cpu_arm1026_proc_fin() 65 65 */ 66 - ENTRY(cpu_arm1026_proc_fin) 66 + SYM_TYPED_FUNC_START(cpu_arm1026_proc_fin) 67 67 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 68 68 bic r0, r0, #0x1000 @ ...i............ 69 69 bic r0, r0, #0x000e @ ............wca. 70 70 mcr p15, 0, r0, c1, c0, 0 @ disable caches 71 71 ret lr 72 + SYM_FUNC_END(cpu_arm1026_proc_fin) 72 73 73 74 /* 74 75 * cpu_arm1026_reset(loc) ··· 83 80 */ 84 81 .align 5 85 82 .pushsection .idmap.text, "ax" 86 - ENTRY(cpu_arm1026_reset) 83 + SYM_TYPED_FUNC_START(cpu_arm1026_reset) 87 84 mov ip, #0 88 85 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 89 86 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 95 92 bic ip, ip, #0x1100 @ ...i...s........ 96 93 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 97 94 ret r0 98 - ENDPROC(cpu_arm1026_reset) 95 + SYM_FUNC_END(cpu_arm1026_reset) 99 96 .popsection 100 97 101 98 /* 102 99 * cpu_arm1026_do_idle() 103 100 */ 104 101 .align 5 105 - ENTRY(cpu_arm1026_do_idle) 102 + SYM_TYPED_FUNC_START(cpu_arm1026_do_idle) 106 103 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 107 104 ret lr 105 + SYM_FUNC_END(cpu_arm1026_do_idle) 108 106 109 107 /* ================================= CACHE ================================ */ 110 108 ··· 116 112 * 117 113 * Unconditionally clean and invalidate the entire icache. 118 114 */ 119 - ENTRY(arm1026_flush_icache_all) 115 + SYM_TYPED_FUNC_START(arm1026_flush_icache_all) 120 116 #ifndef CONFIG_CPU_ICACHE_DISABLE 121 117 mov r0, #0 122 118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 119 #endif 124 120 ret lr 125 - ENDPROC(arm1026_flush_icache_all) 121 + SYM_FUNC_END(arm1026_flush_icache_all) 126 122 127 123 /* 128 124 * flush_user_cache_all() ··· 130 126 * Invalidate all cache entries in a particular address 131 127 * space. 132 128 */ 133 - ENTRY(arm1026_flush_user_cache_all) 134 - /* FALLTHROUGH */ 129 + SYM_FUNC_ALIAS(arm1026_flush_user_cache_all, arm1026_flush_kern_cache_all) 130 + 135 131 /* 136 132 * flush_kern_cache_all() 137 133 * 138 134 * Clean and invalidate the entire cache. 139 135 */ 140 - ENTRY(arm1026_flush_kern_cache_all) 136 + SYM_TYPED_FUNC_START(arm1026_flush_kern_cache_all) 141 137 mov r2, #VM_EXEC 142 138 mov ip, #0 143 139 __flush_whole_cache: ··· 151 147 #endif 152 148 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 149 ret lr 150 + SYM_FUNC_END(arm1026_flush_kern_cache_all) 154 151 155 152 /* 156 153 * flush_user_cache_range(start, end, flags) ··· 163 158 * - end - end address (exclusive) 164 159 * - flags - vm_flags for this space 165 160 */ 166 - ENTRY(arm1026_flush_user_cache_range) 161 + SYM_TYPED_FUNC_START(arm1026_flush_user_cache_range) 167 162 mov ip, #0 168 163 sub r3, r1, r0 @ calculate total size 169 164 cmp r3, #CACHE_DLIMIT ··· 181 176 #endif 182 177 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 183 178 ret lr 179 + SYM_FUNC_END(arm1026_flush_user_cache_range) 184 180 185 181 /* 186 182 * coherent_kern_range(start, end) ··· 193 187 * - start - virtual start address 194 188 * - end - virtual end address 195 189 */ 196 - ENTRY(arm1026_coherent_kern_range) 197 - /* FALLTHROUGH */ 190 + SYM_TYPED_FUNC_START(arm1026_coherent_kern_range) 191 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 192 + b arm1026_coherent_user_range 193 + #endif 194 + SYM_FUNC_END(arm1026_coherent_kern_range) 195 + 198 196 /* 199 197 * coherent_user_range(start, end) 200 198 * ··· 209 199 * - start - virtual start address 210 200 * - end - virtual end address 211 201 */ 212 - ENTRY(arm1026_coherent_user_range) 202 + SYM_TYPED_FUNC_START(arm1026_coherent_user_range) 213 203 mov ip, #0 214 204 bic r0, r0, #CACHE_DLINESIZE - 1 215 205 1: ··· 225 215 mcr p15, 0, ip, c7, c10, 4 @ drain WB 226 216 mov r0, #0 227 217 ret lr 218 + SYM_FUNC_END(arm1026_coherent_user_range) 228 219 229 220 /* 230 221 * flush_kern_dcache_area(void *addr, size_t size) ··· 236 225 * - addr - kernel address 237 226 * - size - region size 238 227 */ 239 - ENTRY(arm1026_flush_kern_dcache_area) 228 + SYM_TYPED_FUNC_START(arm1026_flush_kern_dcache_area) 240 229 mov ip, #0 241 230 #ifndef CONFIG_CPU_DCACHE_DISABLE 242 231 add r1, r0, r1 ··· 247 236 #endif 248 237 mcr p15, 0, ip, c7, c10, 4 @ drain WB 249 238 ret lr 239 + SYM_FUNC_END(arm1026_flush_kern_dcache_area) 250 240 251 241 /* 252 242 * dma_inv_range(start, end) ··· 308 296 * - start - virtual start address 309 297 * - end - virtual end address 310 298 */ 311 - ENTRY(arm1026_dma_flush_range) 299 + SYM_TYPED_FUNC_START(arm1026_dma_flush_range) 312 300 mov ip, #0 313 301 #ifndef CONFIG_CPU_DCACHE_DISABLE 314 302 bic r0, r0, #CACHE_DLINESIZE - 1 ··· 319 307 #endif 320 308 mcr p15, 0, ip, c7, c10, 4 @ drain WB 321 309 ret lr 310 + SYM_FUNC_END(arm1026_dma_flush_range) 322 311 323 312 /* 324 313 * dma_map_area(start, size, dir) ··· 327 314 * - size - size of region 328 315 * - dir - DMA direction 329 316 */ 330 - ENTRY(arm1026_dma_map_area) 317 + SYM_TYPED_FUNC_START(arm1026_dma_map_area) 331 318 add r1, r1, r0 332 319 cmp r2, #DMA_TO_DEVICE 333 320 beq arm1026_dma_clean_range 334 321 bcs arm1026_dma_inv_range 335 322 b arm1026_dma_flush_range 336 - ENDPROC(arm1026_dma_map_area) 323 + SYM_FUNC_END(arm1026_dma_map_area) 337 324 338 325 /* 339 326 * dma_unmap_area(start, size, dir) ··· 341 328 * - size - size of region 342 329 * - dir - DMA direction 343 330 */ 344 - ENTRY(arm1026_dma_unmap_area) 331 + SYM_TYPED_FUNC_START(arm1026_dma_unmap_area) 345 332 ret lr 346 - ENDPROC(arm1026_dma_unmap_area) 347 - 348 - .globl arm1026_flush_kern_cache_louis 349 - .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all 350 - 351 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 352 - define_cache_functions arm1026 333 + SYM_FUNC_END(arm1026_dma_unmap_area) 353 334 354 335 .align 5 355 - ENTRY(cpu_arm1026_dcache_clean_area) 336 + SYM_TYPED_FUNC_START(cpu_arm1026_dcache_clean_area) 356 337 #ifndef CONFIG_CPU_DCACHE_DISABLE 357 338 mov ip, #0 358 339 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry ··· 355 348 bhi 1b 356 349 #endif 357 350 ret lr 351 + SYM_FUNC_END(cpu_arm1026_dcache_clean_area) 358 352 359 353 /* =============================== PageTable ============================== */ 360 354 ··· 367 359 * pgd: new page tables 368 360 */ 369 361 .align 5 370 - ENTRY(cpu_arm1026_switch_mm) 362 + SYM_TYPED_FUNC_START(cpu_arm1026_switch_mm) 371 363 #ifdef CONFIG_MMU 372 364 mov r1, #0 373 365 #ifndef CONFIG_CPU_DCACHE_DISABLE ··· 382 374 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 383 375 #endif 384 376 ret lr 385 - 377 + SYM_FUNC_END(cpu_arm1026_switch_mm) 378 + 386 379 /* 387 380 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 388 381 * 389 382 * Set a PTE and flush it out 390 383 */ 391 384 .align 5 392 - ENTRY(cpu_arm1026_set_pte_ext) 385 + SYM_TYPED_FUNC_START(cpu_arm1026_set_pte_ext) 393 386 #ifdef CONFIG_MMU 394 387 armv3_set_pte_ext 395 388 mov r0, r0 ··· 399 390 #endif 400 391 #endif /* CONFIG_MMU */ 401 392 ret lr 393 + SYM_FUNC_END(cpu_arm1026_set_pte_ext) 402 394 403 395 .type __arm1026_setup, #function 404 396 __arm1026_setup:
+17 -8
arch/arm/mm/proc-arm720.S
··· 20 20 */ 21 21 #include <linux/linkage.h> 22 22 #include <linux/init.h> 23 + #include <linux/cfi_types.h> 23 24 #include <linux/pgtable.h> 24 25 #include <asm/assembler.h> 25 26 #include <asm/asm-offsets.h> ··· 36 35 * 37 36 * Notes : This processor does not require these 38 37 */ 39 - ENTRY(cpu_arm720_dcache_clean_area) 40 - ENTRY(cpu_arm720_proc_init) 38 + SYM_TYPED_FUNC_START(cpu_arm720_dcache_clean_area) 41 39 ret lr 40 + SYM_FUNC_END(cpu_arm720_dcache_clean_area) 42 41 43 - ENTRY(cpu_arm720_proc_fin) 42 + SYM_TYPED_FUNC_START(cpu_arm720_proc_init) 43 + ret lr 44 + SYM_FUNC_END(cpu_arm720_proc_init) 45 + 46 + SYM_TYPED_FUNC_START(cpu_arm720_proc_fin) 44 47 mrc p15, 0, r0, c1, c0, 0 45 48 bic r0, r0, #0x1000 @ ...i............ 46 49 bic r0, r0, #0x000e @ ............wca. 47 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 48 51 ret lr 52 + SYM_FUNC_END(cpu_arm720_proc_fin) 49 53 50 54 /* 51 55 * Function: arm720_proc_do_idle(void) 52 56 * Params : r0 = unused 53 57 * Purpose : put the processor in proper idle mode 54 58 */ 55 - ENTRY(cpu_arm720_do_idle) 59 + SYM_TYPED_FUNC_START(cpu_arm720_do_idle) 56 60 ret lr 61 + SYM_FUNC_END(cpu_arm720_do_idle) 57 62 58 63 /* 59 64 * Function: arm720_switch_mm(unsigned long pgd_phys) ··· 67 60 * Purpose : Perform a task switch, saving the old process' state and restoring 68 61 * the new. 69 62 */ 70 - ENTRY(cpu_arm720_switch_mm) 63 + SYM_TYPED_FUNC_START(cpu_arm720_switch_mm) 71 64 #ifdef CONFIG_MMU 72 65 mov r1, #0 73 66 mcr p15, 0, r1, c7, c7, 0 @ invalidate cache ··· 75 68 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 76 69 #endif 77 70 ret lr 71 + SYM_FUNC_END(cpu_arm720_switch_mm) 78 72 79 73 /* 80 74 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) ··· 84 76 * Purpose : Set a PTE and flush it out of any WB cache 85 77 */ 86 78 .align 5 87 - ENTRY(cpu_arm720_set_pte_ext) 79 + SYM_TYPED_FUNC_START(cpu_arm720_set_pte_ext) 88 80 #ifdef CONFIG_MMU 89 81 armv3_set_pte_ext wc_disable=0 90 82 #endif 91 83 ret lr 84 + SYM_FUNC_END(cpu_arm720_set_pte_ext) 92 85 93 86 /* 94 87 * Function: arm720_reset ··· 97 88 * Notes : This sets up everything for a reset 98 89 */ 99 90 .pushsection .idmap.text, "ax" 100 - ENTRY(cpu_arm720_reset) 91 + SYM_TYPED_FUNC_START(cpu_arm720_reset) 101 92 mov ip, #0 102 93 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache 103 94 #ifdef CONFIG_MMU ··· 108 99 bic ip, ip, #0x2100 @ ..v....s........ 109 100 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 110 101 ret r0 111 - ENDPROC(cpu_arm720_reset) 102 + SYM_FUNC_END(cpu_arm720_reset) 112 103 .popsection 113 104 114 105 .type __arm710_setup, #function
+19 -7
arch/arm/mm/proc-arm740.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/asm-offsets.h> ··· 25 24 * 26 25 * These are not required. 27 26 */ 28 - ENTRY(cpu_arm740_proc_init) 29 - ENTRY(cpu_arm740_do_idle) 30 - ENTRY(cpu_arm740_dcache_clean_area) 31 - ENTRY(cpu_arm740_switch_mm) 27 + SYM_TYPED_FUNC_START(cpu_arm740_proc_init) 32 28 ret lr 29 + SYM_FUNC_END(cpu_arm740_proc_init) 30 + 31 + SYM_TYPED_FUNC_START(cpu_arm740_do_idle) 32 + ret lr 33 + SYM_FUNC_END(cpu_arm740_do_idle) 34 + 35 + SYM_TYPED_FUNC_START(cpu_arm740_dcache_clean_area) 36 + ret lr 37 + SYM_FUNC_END(cpu_arm740_dcache_clean_area) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm740_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm740_switch_mm) 33 42 34 43 /* 35 44 * cpu_arm740_proc_fin() 36 45 */ 37 - ENTRY(cpu_arm740_proc_fin) 46 + SYM_TYPED_FUNC_START(cpu_arm740_proc_fin) 38 47 mrc p15, 0, r0, c1, c0, 0 39 48 bic r0, r0, #0x3f000000 @ bank/f/lock/s 40 49 bic r0, r0, #0x0000000c @ w-buffer/cache 41 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 42 51 ret lr 52 + SYM_FUNC_END(cpu_arm740_proc_fin) 43 53 44 54 /* 45 55 * cpu_arm740_reset(loc) ··· 58 46 * Notes : This sets up everything for a reset 59 47 */ 60 48 .pushsection .idmap.text, "ax" 61 - ENTRY(cpu_arm740_reset) 49 + SYM_TYPED_FUNC_START(cpu_arm740_reset) 62 50 mov ip, #0 63 51 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache 64 52 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 65 53 bic ip, ip, #0x0000000c @ ............wc.. 66 54 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 55 ret r0 68 - ENDPROC(cpu_arm740_reset) 56 + SYM_FUNC_END(cpu_arm740_reset) 69 57 .popsection 70 58 71 59 .type __arm740_setup, #function
+23 -11
arch/arm/mm/proc-arm7tdmi.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/asm-offsets.h> ··· 24 23 * cpu_arm7tdmi_switch_mm() 25 24 * 26 25 * These are not required. 27 - */ 28 - ENTRY(cpu_arm7tdmi_proc_init) 29 - ENTRY(cpu_arm7tdmi_do_idle) 30 - ENTRY(cpu_arm7tdmi_dcache_clean_area) 31 - ENTRY(cpu_arm7tdmi_switch_mm) 32 - ret lr 26 + */ 27 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_init) 28 + ret lr 29 + SYM_FUNC_END(cpu_arm7tdmi_proc_init) 30 + 31 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_do_idle) 32 + ret lr 33 + SYM_FUNC_END(cpu_arm7tdmi_do_idle) 34 + 35 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_dcache_clean_area) 36 + ret lr 37 + SYM_FUNC_END(cpu_arm7tdmi_dcache_clean_area) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm7tdmi_switch_mm) 33 42 34 43 /* 35 44 * cpu_arm7tdmi_proc_fin() 36 - */ 37 - ENTRY(cpu_arm7tdmi_proc_fin) 38 - ret lr 45 + */ 46 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_proc_fin) 47 + ret lr 48 + SYM_FUNC_END(cpu_arm7tdmi_proc_fin) 39 49 40 50 /* 41 51 * Function: cpu_arm7tdmi_reset(loc) ··· 54 42 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 55 43 */ 56 44 .pushsection .idmap.text, "ax" 57 - ENTRY(cpu_arm7tdmi_reset) 45 + SYM_TYPED_FUNC_START(cpu_arm7tdmi_reset) 58 46 ret r0 59 - ENDPROC(cpu_arm7tdmi_reset) 47 + SYM_FUNC_END(cpu_arm7tdmi_reset) 60 48 .popsection 61 49 62 50 .type __arm7tdmi_setup, #function
+42 -34
arch/arm/mm/proc-arm920.S
··· 13 13 */ 14 14 #include <linux/linkage.h> 15 15 #include <linux/init.h> 16 + #include <linux/cfi_types.h> 16 17 #include <linux/pgtable.h> 17 18 #include <asm/assembler.h> 18 19 #include <asm/hwcap.h> ··· 49 48 /* 50 49 * cpu_arm920_proc_init() 51 50 */ 52 - ENTRY(cpu_arm920_proc_init) 51 + SYM_TYPED_FUNC_START(cpu_arm920_proc_init) 53 52 ret lr 53 + SYM_FUNC_END(cpu_arm920_proc_init) 54 54 55 55 /* 56 56 * cpu_arm920_proc_fin() 57 57 */ 58 - ENTRY(cpu_arm920_proc_fin) 58 + SYM_TYPED_FUNC_START(cpu_arm920_proc_fin) 59 59 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 60 60 bic r0, r0, #0x1000 @ ...i............ 61 61 bic r0, r0, #0x000e @ ............wca. 62 62 mcr p15, 0, r0, c1, c0, 0 @ disable caches 63 63 ret lr 64 + SYM_FUNC_END(cpu_arm920_proc_fin) 64 65 65 66 /* 66 67 * cpu_arm920_reset(loc) ··· 75 72 */ 76 73 .align 5 77 74 .pushsection .idmap.text, "ax" 78 - ENTRY(cpu_arm920_reset) 75 + SYM_TYPED_FUNC_START(cpu_arm920_reset) 79 76 mov ip, #0 80 77 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 81 78 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 87 84 bic ip, ip, #0x1100 @ ...i...s........ 88 85 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 89 86 ret r0 90 - ENDPROC(cpu_arm920_reset) 87 + SYM_FUNC_END(cpu_arm920_reset) 91 88 .popsection 92 89 93 90 /* 94 91 * cpu_arm920_do_idle() 95 92 */ 96 93 .align 5 97 - ENTRY(cpu_arm920_do_idle) 94 + SYM_TYPED_FUNC_START(cpu_arm920_do_idle) 98 95 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 99 96 ret lr 100 - 97 + SYM_FUNC_END(cpu_arm920_do_idle) 101 98 102 99 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 103 100 ··· 106 103 * 107 104 * Unconditionally clean and invalidate the entire icache. 108 105 */ 109 - ENTRY(arm920_flush_icache_all) 106 + SYM_TYPED_FUNC_START(arm920_flush_icache_all) 110 107 mov r0, #0 111 108 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 112 109 ret lr 113 - ENDPROC(arm920_flush_icache_all) 110 + SYM_FUNC_END(arm920_flush_icache_all) 114 111 115 112 /* 116 113 * flush_user_cache_all() ··· 118 115 * Invalidate all cache entries in a particular address 119 116 * space. 120 117 */ 121 - ENTRY(arm920_flush_user_cache_all) 122 - /* FALLTHROUGH */ 118 + SYM_FUNC_ALIAS(arm920_flush_user_cache_all, arm920_flush_kern_cache_all) 123 119 124 120 /* 125 121 * flush_kern_cache_all() 126 122 * 127 123 * Clean and invalidate the entire cache. 128 124 */ 129 - ENTRY(arm920_flush_kern_cache_all) 125 + SYM_TYPED_FUNC_START(arm920_flush_kern_cache_all) 130 126 mov r2, #VM_EXEC 131 127 mov ip, #0 132 128 __flush_whole_cache: ··· 140 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 141 139 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 142 140 ret lr 141 + SYM_FUNC_END(arm920_flush_kern_cache_all) 143 142 144 143 /* 145 144 * flush_user_cache_range(start, end, flags) ··· 152 149 * - end - end address (exclusive) 153 150 * - flags - vm_flags for address space 154 151 */ 155 - ENTRY(arm920_flush_user_cache_range) 152 + SYM_TYPED_FUNC_START(arm920_flush_user_cache_range) 156 153 mov ip, #0 157 154 sub r3, r1, r0 @ calculate total size 158 155 cmp r3, #CACHE_DLIMIT ··· 167 164 tst r2, #VM_EXEC 168 165 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 169 166 ret lr 167 + SYM_FUNC_END(arm920_flush_user_cache_range) 170 168 171 169 /* 172 170 * coherent_kern_range(start, end) ··· 179 175 * - start - virtual start address 180 176 * - end - virtual end address 181 177 */ 182 - ENTRY(arm920_coherent_kern_range) 183 - /* FALLTHROUGH */ 178 + SYM_TYPED_FUNC_START(arm920_coherent_kern_range) 179 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 180 + b arm920_coherent_user_range 181 + #endif 182 + SYM_FUNC_END(arm920_coherent_kern_range) 184 183 185 184 /* 186 185 * coherent_user_range(start, end) ··· 195 188 * - start - virtual start address 196 189 * - end - virtual end address 197 190 */ 198 - ENTRY(arm920_coherent_user_range) 191 + SYM_TYPED_FUNC_START(arm920_coherent_user_range) 199 192 bic r0, r0, #CACHE_DLINESIZE - 1 200 193 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 201 194 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 205 198 mcr p15, 0, r0, c7, c10, 4 @ drain WB 206 199 mov r0, #0 207 200 ret lr 201 + SYM_FUNC_END(arm920_coherent_user_range) 208 202 209 203 /* 210 204 * flush_kern_dcache_area(void *addr, size_t size) ··· 216 208 * - addr - kernel address 217 209 * - size - region size 218 210 */ 219 - ENTRY(arm920_flush_kern_dcache_area) 211 + SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_area) 220 212 add r1, r0, r1 221 213 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 222 214 add r0, r0, #CACHE_DLINESIZE ··· 226 218 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 227 219 mcr p15, 0, r0, c7, c10, 4 @ drain WB 228 220 ret lr 221 + SYM_FUNC_END(arm920_flush_kern_dcache_area) 229 222 230 223 /* 231 224 * dma_inv_range(start, end) ··· 281 272 * - start - virtual start address 282 273 * - end - virtual end address 283 274 */ 284 - ENTRY(arm920_dma_flush_range) 275 + SYM_TYPED_FUNC_START(arm920_dma_flush_range) 285 276 bic r0, r0, #CACHE_DLINESIZE - 1 286 277 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 287 278 add r0, r0, #CACHE_DLINESIZE ··· 289 280 blo 1b 290 281 mcr p15, 0, r0, c7, c10, 4 @ drain WB 291 282 ret lr 283 + SYM_FUNC_END(arm920_dma_flush_range) 292 284 293 285 /* 294 286 * dma_map_area(start, size, dir) ··· 297 287 * - size - size of region 298 288 * - dir - DMA direction 299 289 */ 300 - ENTRY(arm920_dma_map_area) 290 + SYM_TYPED_FUNC_START(arm920_dma_map_area) 301 291 add r1, r1, r0 302 292 cmp r2, #DMA_TO_DEVICE 303 293 beq arm920_dma_clean_range 304 294 bcs arm920_dma_inv_range 305 295 b arm920_dma_flush_range 306 - ENDPROC(arm920_dma_map_area) 296 + SYM_FUNC_END(arm920_dma_map_area) 307 297 308 298 /* 309 299 * dma_unmap_area(start, size, dir) ··· 311 301 * - size - size of region 312 302 * - dir - DMA direction 313 303 */ 314 - ENTRY(arm920_dma_unmap_area) 304 + SYM_TYPED_FUNC_START(arm920_dma_unmap_area) 315 305 ret lr 316 - ENDPROC(arm920_dma_unmap_area) 306 + SYM_FUNC_END(arm920_dma_unmap_area) 317 307 318 - .globl arm920_flush_kern_cache_louis 319 - .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all 320 - 321 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 322 - define_cache_functions arm920 323 - #endif 308 + #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 324 309 325 310 326 - ENTRY(cpu_arm920_dcache_clean_area) 311 + SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_area) 327 312 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 328 313 add r0, r0, #CACHE_DLINESIZE 329 314 subs r1, r1, #CACHE_DLINESIZE 330 315 bhi 1b 331 316 ret lr 317 + SYM_FUNC_END(cpu_arm920_dcache_clean_area) 332 318 333 319 /* =============================== PageTable ============================== */ 334 320 ··· 336 330 * pgd: new page tables 337 331 */ 338 332 .align 5 339 - ENTRY(cpu_arm920_switch_mm) 333 + SYM_TYPED_FUNC_START(cpu_arm920_switch_mm) 340 334 #ifdef CONFIG_MMU 341 335 mov ip, #0 342 336 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 360 354 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 361 355 #endif 362 356 ret lr 357 + SYM_FUNC_END(cpu_arm920_switch_mm) 363 358 364 359 /* 365 360 * cpu_arm920_set_pte(ptep, pte, ext) ··· 368 361 * Set a PTE and flush it out 369 362 */ 370 363 .align 5 371 - ENTRY(cpu_arm920_set_pte_ext) 364 + SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext) 372 365 #ifdef CONFIG_MMU 373 366 armv3_set_pte_ext 374 367 mov r0, r0 ··· 376 369 mcr p15, 0, r0, c7, c10, 4 @ drain WB 377 370 #endif 378 371 ret lr 372 + SYM_FUNC_END(cpu_arm920_set_pte_ext) 379 373 380 374 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 381 375 .globl cpu_arm920_suspend_size 382 376 .equ cpu_arm920_suspend_size, 4 * 3 383 377 #ifdef CONFIG_ARM_CPU_SUSPEND 384 - ENTRY(cpu_arm920_do_suspend) 378 + SYM_TYPED_FUNC_START(cpu_arm920_do_suspend) 385 379 stmfd sp!, {r4 - r6, lr} 386 380 mrc p15, 0, r4, c13, c0, 0 @ PID 387 381 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 388 382 mrc p15, 0, r6, c1, c0, 0 @ Control register 389 383 stmia r0, {r4 - r6} 390 384 ldmfd sp!, {r4 - r6, pc} 391 - ENDPROC(cpu_arm920_do_suspend) 385 + SYM_FUNC_END(cpu_arm920_do_suspend) 392 386 393 - ENTRY(cpu_arm920_do_resume) 387 + SYM_TYPED_FUNC_START(cpu_arm920_do_resume) 394 388 mov ip, #0 395 389 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 396 390 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ··· 401 393 mcr p15, 0, r1, c2, c0, 0 @ TTB address 402 394 mov r0, r6 @ control register 403 395 b cpu_resume_mmu 404 - ENDPROC(cpu_arm920_do_resume) 396 + SYM_FUNC_END(cpu_arm920_do_resume) 405 397 #endif 406 398 407 399 .type __arm920_setup, #function
+38 -31
arch/arm/mm/proc-arm922.S
··· 14 14 */ 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 + #include <linux/cfi_types.h> 17 18 #include <linux/pgtable.h> 18 19 #include <asm/assembler.h> 19 20 #include <asm/hwcap.h> ··· 51 50 /* 52 51 * cpu_arm922_proc_init() 53 52 */ 54 - ENTRY(cpu_arm922_proc_init) 53 + SYM_TYPED_FUNC_START(cpu_arm922_proc_init) 55 54 ret lr 55 + SYM_FUNC_END(cpu_arm922_proc_init) 56 56 57 57 /* 58 58 * cpu_arm922_proc_fin() 59 59 */ 60 - ENTRY(cpu_arm922_proc_fin) 60 + SYM_TYPED_FUNC_START(cpu_arm922_proc_fin) 61 61 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 62 62 bic r0, r0, #0x1000 @ ...i............ 63 63 bic r0, r0, #0x000e @ ............wca. 64 64 mcr p15, 0, r0, c1, c0, 0 @ disable caches 65 65 ret lr 66 + SYM_FUNC_END(cpu_arm922_proc_fin) 66 67 67 68 /* 68 69 * cpu_arm922_reset(loc) ··· 77 74 */ 78 75 .align 5 79 76 .pushsection .idmap.text, "ax" 80 - ENTRY(cpu_arm922_reset) 77 + SYM_TYPED_FUNC_START(cpu_arm922_reset) 81 78 mov ip, #0 82 79 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 83 80 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 89 86 bic ip, ip, #0x1100 @ ...i...s........ 90 87 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 88 ret r0 92 - ENDPROC(cpu_arm922_reset) 89 + SYM_FUNC_END(cpu_arm922_reset) 93 90 .popsection 94 91 95 92 /* 96 93 * cpu_arm922_do_idle() 97 94 */ 98 95 .align 5 99 - ENTRY(cpu_arm922_do_idle) 96 + SYM_TYPED_FUNC_START(cpu_arm922_do_idle) 100 97 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 101 98 ret lr 102 - 99 + SYM_FUNC_END(cpu_arm922_do_idle) 103 100 104 101 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 105 102 ··· 108 105 * 109 106 * Unconditionally clean and invalidate the entire icache. 110 107 */ 111 - ENTRY(arm922_flush_icache_all) 108 + SYM_TYPED_FUNC_START(arm922_flush_icache_all) 112 109 mov r0, #0 113 110 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 114 111 ret lr 115 - ENDPROC(arm922_flush_icache_all) 112 + SYM_FUNC_END(arm922_flush_icache_all) 116 113 117 114 /* 118 115 * flush_user_cache_all() ··· 120 117 * Clean and invalidate all cache entries in a particular 121 118 * address space. 122 119 */ 123 - ENTRY(arm922_flush_user_cache_all) 124 - /* FALLTHROUGH */ 120 + SYM_FUNC_ALIAS(arm922_flush_user_cache_all, arm922_flush_kern_cache_all) 125 121 126 122 /* 127 123 * flush_kern_cache_all() 128 124 * 129 125 * Clean and invalidate the entire cache. 130 126 */ 131 - ENTRY(arm922_flush_kern_cache_all) 127 + SYM_TYPED_FUNC_START(arm922_flush_kern_cache_all) 132 128 mov r2, #VM_EXEC 133 129 mov ip, #0 134 130 __flush_whole_cache: ··· 142 140 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 143 141 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 144 142 ret lr 143 + SYM_FUNC_END(arm922_flush_kern_cache_all) 145 144 146 145 /* 147 146 * flush_user_cache_range(start, end, flags) ··· 154 151 * - end - end address (exclusive) 155 152 * - flags - vm_flags describing address space 156 153 */ 157 - ENTRY(arm922_flush_user_cache_range) 154 + SYM_TYPED_FUNC_START(arm922_flush_user_cache_range) 158 155 mov ip, #0 159 156 sub r3, r1, r0 @ calculate total size 160 157 cmp r3, #CACHE_DLIMIT ··· 169 166 tst r2, #VM_EXEC 170 167 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 168 ret lr 169 + SYM_FUNC_END(arm922_flush_user_cache_range) 172 170 173 171 /* 174 172 * coherent_kern_range(start, end) ··· 181 177 * - start - virtual start address 182 178 * - end - virtual end address 183 179 */ 184 - ENTRY(arm922_coherent_kern_range) 185 - /* FALLTHROUGH */ 180 + SYM_TYPED_FUNC_START(arm922_coherent_kern_range) 181 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 182 + b arm922_coherent_user_range 183 + #endif 184 + SYM_FUNC_END(arm922_coherent_kern_range) 186 185 187 186 /* 188 187 * coherent_user_range(start, end) ··· 197 190 * - start - virtual start address 198 191 * - end - virtual end address 199 192 */ 200 - ENTRY(arm922_coherent_user_range) 193 + SYM_TYPED_FUNC_START(arm922_coherent_user_range) 201 194 bic r0, r0, #CACHE_DLINESIZE - 1 202 195 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 203 196 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 207 200 mcr p15, 0, r0, c7, c10, 4 @ drain WB 208 201 mov r0, #0 209 202 ret lr 203 + SYM_FUNC_END(arm922_coherent_user_range) 210 204 211 205 /* 212 206 * flush_kern_dcache_area(void *addr, size_t size) ··· 218 210 * - addr - kernel address 219 211 * - size - region size 220 212 */ 221 - ENTRY(arm922_flush_kern_dcache_area) 213 + SYM_TYPED_FUNC_START(arm922_flush_kern_dcache_area) 222 214 add r1, r0, r1 223 215 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 224 216 add r0, r0, #CACHE_DLINESIZE ··· 228 220 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 229 221 mcr p15, 0, r0, c7, c10, 4 @ drain WB 230 222 ret lr 223 + SYM_FUNC_END(arm922_flush_kern_dcache_area) 231 224 232 225 /* 233 226 * dma_inv_range(start, end) ··· 283 274 * - start - virtual start address 284 275 * - end - virtual end address 285 276 */ 286 - ENTRY(arm922_dma_flush_range) 277 + SYM_TYPED_FUNC_START(arm922_dma_flush_range) 287 278 bic r0, r0, #CACHE_DLINESIZE - 1 288 279 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 289 280 add r0, r0, #CACHE_DLINESIZE ··· 291 282 blo 1b 292 283 mcr p15, 0, r0, c7, c10, 4 @ drain WB 293 284 ret lr 285 + SYM_FUNC_END(arm922_dma_flush_range) 294 286 295 287 /* 296 288 * dma_map_area(start, size, dir) ··· 299 289 * - size - size of region 300 290 * - dir - DMA direction 301 291 */ 302 - ENTRY(arm922_dma_map_area) 292 + SYM_TYPED_FUNC_START(arm922_dma_map_area) 303 293 add r1, r1, r0 304 294 cmp r2, #DMA_TO_DEVICE 305 295 beq arm922_dma_clean_range 306 296 bcs arm922_dma_inv_range 307 297 b arm922_dma_flush_range 308 - ENDPROC(arm922_dma_map_area) 298 + SYM_FUNC_END(arm922_dma_map_area) 309 299 310 300 /* 311 301 * dma_unmap_area(start, size, dir) ··· 313 303 * - size - size of region 314 304 * - dir - DMA direction 315 305 */ 316 - ENTRY(arm922_dma_unmap_area) 306 + SYM_TYPED_FUNC_START(arm922_dma_unmap_area) 317 307 ret lr 318 - ENDPROC(arm922_dma_unmap_area) 308 + SYM_FUNC_END(arm922_dma_unmap_area) 319 309 320 - .globl arm922_flush_kern_cache_louis 321 - .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all 310 + #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 322 311 323 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 324 - define_cache_functions arm922 325 - #endif 326 - 327 - 328 - ENTRY(cpu_arm922_dcache_clean_area) 312 + SYM_TYPED_FUNC_START(cpu_arm922_dcache_clean_area) 329 313 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 330 314 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 331 315 add r0, r0, #CACHE_DLINESIZE ··· 327 323 bhi 1b 328 324 #endif 329 325 ret lr 326 + SYM_FUNC_END(cpu_arm922_dcache_clean_area) 330 327 331 328 /* =============================== PageTable ============================== */ 332 329 ··· 339 334 * pgd: new page tables 340 335 */ 341 336 .align 5 342 - ENTRY(cpu_arm922_switch_mm) 337 + SYM_TYPED_FUNC_START(cpu_arm922_switch_mm) 343 338 #ifdef CONFIG_MMU 344 339 mov ip, #0 345 340 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 363 358 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 364 359 #endif 365 360 ret lr 361 + SYM_FUNC_END(cpu_arm922_switch_mm) 366 362 367 363 /* 368 364 * cpu_arm922_set_pte_ext(ptep, pte, ext) ··· 371 365 * Set a PTE and flush it out 372 366 */ 373 367 .align 5 374 - ENTRY(cpu_arm922_set_pte_ext) 368 + SYM_TYPED_FUNC_START(cpu_arm922_set_pte_ext) 375 369 #ifdef CONFIG_MMU 376 370 armv3_set_pte_ext 377 371 mov r0, r0 ··· 379 373 mcr p15, 0, r0, c7, c10, 4 @ drain WB 380 374 #endif /* CONFIG_MMU */ 381 375 ret lr 376 + SYM_FUNC_END(cpu_arm922_set_pte_ext) 382 377 383 378 .type __arm922_setup, #function 384 379 __arm922_setup:
+37 -29
arch/arm/mm/proc-arm925.S
··· 37 37 38 38 #include <linux/linkage.h> 39 39 #include <linux/init.h> 40 + #include <linux/cfi_types.h> 40 41 #include <linux/pgtable.h> 41 42 #include <asm/assembler.h> 42 43 #include <asm/hwcap.h> ··· 72 71 /* 73 72 * cpu_arm925_proc_init() 74 73 */ 75 - ENTRY(cpu_arm925_proc_init) 74 + SYM_TYPED_FUNC_START(cpu_arm925_proc_init) 76 75 ret lr 76 + SYM_FUNC_END(cpu_arm925_proc_init) 77 77 78 78 /* 79 79 * cpu_arm925_proc_fin() 80 80 */ 81 - ENTRY(cpu_arm925_proc_fin) 81 + SYM_TYPED_FUNC_START(cpu_arm925_proc_fin) 82 82 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 83 83 bic r0, r0, #0x1000 @ ...i............ 84 84 bic r0, r0, #0x000e @ ............wca. 85 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 86 86 ret lr 87 + SYM_FUNC_END(cpu_arm925_proc_fin) 87 88 88 89 /* 89 90 * cpu_arm925_reset(loc) ··· 98 95 */ 99 96 .align 5 100 97 .pushsection .idmap.text, "ax" 101 - ENTRY(cpu_arm925_reset) 98 + SYM_TYPED_FUNC_START(cpu_arm925_reset) 102 99 /* Send software reset to MPU and DSP */ 103 100 mov ip, #0xff000000 104 101 orr ip, ip, #0x00fe0000 105 102 orr ip, ip, #0x0000ce00 106 103 mov r4, #1 107 104 strh r4, [ip, #0x10] 108 - ENDPROC(cpu_arm925_reset) 105 + SYM_FUNC_END(cpu_arm925_reset) 109 106 .popsection 110 107 111 108 mov ip, #0 ··· 126 123 * Called with IRQs disabled 127 124 */ 128 125 .align 10 129 - ENTRY(cpu_arm925_do_idle) 126 + SYM_TYPED_FUNC_START(cpu_arm925_do_idle) 130 127 mov r0, #0 131 128 mrc p15, 0, r1, c1, c0, 0 @ Read control register 132 129 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer ··· 135 132 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 136 133 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 137 134 ret lr 135 + SYM_FUNC_END(cpu_arm925_do_idle) 138 136 139 137 /* 140 138 * flush_icache_all() 141 139 * 142 140 * Unconditionally clean and invalidate the entire icache. 143 141 */ 144 - ENTRY(arm925_flush_icache_all) 142 + SYM_TYPED_FUNC_START(arm925_flush_icache_all) 145 143 mov r0, #0 146 144 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 147 145 ret lr 148 - ENDPROC(arm925_flush_icache_all) 146 + SYM_FUNC_END(arm925_flush_icache_all) 149 147 150 148 /* 151 149 * flush_user_cache_all() ··· 154 150 * Clean and invalidate all cache entries in a particular 155 151 * address space. 156 152 */ 157 - ENTRY(arm925_flush_user_cache_all) 158 - /* FALLTHROUGH */ 153 + SYM_FUNC_ALIAS(arm925_flush_user_cache_all, arm925_flush_kern_cache_all) 159 154 160 155 /* 161 156 * flush_kern_cache_all() 162 157 * 163 158 * Clean and invalidate the entire cache. 164 159 */ 165 - ENTRY(arm925_flush_kern_cache_all) 160 + SYM_TYPED_FUNC_START(arm925_flush_kern_cache_all) 166 161 mov r2, #VM_EXEC 167 162 mov ip, #0 168 163 __flush_whole_cache: ··· 178 175 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 179 176 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 180 177 ret lr 178 + SYM_FUNC_END(arm925_flush_kern_cache_all) 181 179 182 180 /* 183 181 * flush_user_cache_range(start, end, flags) ··· 190 186 * - end - end address (exclusive) 191 187 * - flags - vm_flags describing address space 192 188 */ 193 - ENTRY(arm925_flush_user_cache_range) 189 + SYM_TYPED_FUNC_START(arm925_flush_user_cache_range) 194 190 mov ip, #0 195 191 sub r3, r1, r0 @ calculate total size 196 192 cmp r3, #CACHE_DLIMIT ··· 216 212 tst r2, #VM_EXEC 217 213 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 218 214 ret lr 215 + SYM_FUNC_END(arm925_flush_user_cache_range) 219 216 220 217 /* 221 218 * coherent_kern_range(start, end) ··· 228 223 * - start - virtual start address 229 224 * - end - virtual end address 230 225 */ 231 - ENTRY(arm925_coherent_kern_range) 232 - /* FALLTHROUGH */ 226 + SYM_TYPED_FUNC_START(arm925_coherent_kern_range) 227 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 228 + b arm925_coherent_user_range 229 + #endif 230 + SYM_FUNC_END(arm925_coherent_kern_range) 233 231 234 232 /* 235 233 * coherent_user_range(start, end) ··· 244 236 * - start - virtual start address 245 237 * - end - virtual end address 246 238 */ 247 - ENTRY(arm925_coherent_user_range) 239 + SYM_TYPED_FUNC_START(arm925_coherent_user_range) 248 240 bic r0, r0, #CACHE_DLINESIZE - 1 249 241 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 250 242 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 254 246 mcr p15, 0, r0, c7, c10, 4 @ drain WB 255 247 mov r0, #0 256 248 ret lr 249 + SYM_FUNC_END(arm925_coherent_user_range) 257 250 258 251 /* 259 252 * flush_kern_dcache_area(void *addr, size_t size) ··· 265 256 * - addr - kernel address 266 257 * - size - region size 267 258 */ 268 - ENTRY(arm925_flush_kern_dcache_area) 259 + SYM_TYPED_FUNC_START(arm925_flush_kern_dcache_area) 269 260 add r1, r0, r1 270 261 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 271 262 add r0, r0, #CACHE_DLINESIZE ··· 275 266 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 276 267 mcr p15, 0, r0, c7, c10, 4 @ drain WB 277 268 ret lr 269 + SYM_FUNC_END(arm925_flush_kern_dcache_area) 278 270 279 271 /* 280 272 * dma_inv_range(start, end) ··· 334 324 * - start - virtual start address 335 325 * - end - virtual end address 336 326 */ 337 - ENTRY(arm925_dma_flush_range) 327 + SYM_TYPED_FUNC_START(arm925_dma_flush_range) 338 328 bic r0, r0, #CACHE_DLINESIZE - 1 339 329 1: 340 330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 347 337 blo 1b 348 338 mcr p15, 0, r0, c7, c10, 4 @ drain WB 349 339 ret lr 340 + SYM_FUNC_END(arm925_dma_flush_range) 350 341 351 342 /* 352 343 * dma_map_area(start, size, dir) ··· 355 344 * - size - size of region 356 345 * - dir - DMA direction 357 346 */ 358 - ENTRY(arm925_dma_map_area) 347 + SYM_TYPED_FUNC_START(arm925_dma_map_area) 359 348 add r1, r1, r0 360 349 cmp r2, #DMA_TO_DEVICE 361 350 beq arm925_dma_clean_range 362 351 bcs arm925_dma_inv_range 363 352 b arm925_dma_flush_range 364 - ENDPROC(arm925_dma_map_area) 353 + SYM_FUNC_END(arm925_dma_map_area) 365 354 366 355 /* 367 356 * dma_unmap_area(start, size, dir) ··· 369 358 * - size - size of region 370 359 * - dir - DMA direction 371 360 */ 372 - ENTRY(arm925_dma_unmap_area) 361 + SYM_TYPED_FUNC_START(arm925_dma_unmap_area) 373 362 ret lr 374 - ENDPROC(arm925_dma_unmap_area) 363 + SYM_FUNC_END(arm925_dma_unmap_area) 375 364 376 - .globl arm925_flush_kern_cache_louis 377 - .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all 378 - 379 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 380 - define_cache_functions arm925 381 - 382 - ENTRY(cpu_arm925_dcache_clean_area) 365 + SYM_TYPED_FUNC_START(cpu_arm925_dcache_clean_area) 383 366 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 384 367 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 385 368 add r0, r0, #CACHE_DLINESIZE ··· 382 377 #endif 383 378 mcr p15, 0, r0, c7, c10, 4 @ drain WB 384 379 ret lr 380 + SYM_FUNC_END(cpu_arm925_dcache_clean_area) 385 381 386 382 /* =============================== PageTable ============================== */ 387 383 ··· 394 388 * pgd: new page tables 395 389 */ 396 390 .align 5 397 - ENTRY(cpu_arm925_switch_mm) 391 + SYM_TYPED_FUNC_START(cpu_arm925_switch_mm) 398 392 #ifdef CONFIG_MMU 399 393 mov ip, #0 400 394 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 412 406 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 413 407 #endif 414 408 ret lr 409 + SYM_FUNC_END(cpu_arm925_switch_mm) 415 410 416 411 /* 417 412 * cpu_arm925_set_pte_ext(ptep, pte, ext) ··· 420 413 * Set a PTE and flush it out 421 414 */ 422 415 .align 5 423 - ENTRY(cpu_arm925_set_pte_ext) 416 + SYM_TYPED_FUNC_START(cpu_arm925_set_pte_ext) 424 417 #ifdef CONFIG_MMU 425 418 armv3_set_pte_ext 426 419 mov r0, r0 ··· 430 423 mcr p15, 0, r0, c7, c10, 4 @ drain WB 431 424 #endif /* CONFIG_MMU */ 432 425 ret lr 426 + SYM_FUNC_END(cpu_arm925_set_pte_ext) 433 427 434 428 .type __arm925_setup, #function 435 429 __arm925_setup:
+42 -33
arch/arm/mm/proc-arm926.S
··· 13 13 */ 14 14 #include <linux/linkage.h> 15 15 #include <linux/init.h> 16 + #include <linux/cfi_types.h> 16 17 #include <linux/pgtable.h> 17 18 #include <asm/assembler.h> 18 19 #include <asm/hwcap.h> ··· 41 40 /* 42 41 * cpu_arm926_proc_init() 43 42 */ 44 - ENTRY(cpu_arm926_proc_init) 43 + SYM_TYPED_FUNC_START(cpu_arm926_proc_init) 45 44 ret lr 45 + SYM_FUNC_END(cpu_arm926_proc_init) 46 46 47 47 /* 48 48 * cpu_arm926_proc_fin() 49 49 */ 50 - ENTRY(cpu_arm926_proc_fin) 50 + SYM_TYPED_FUNC_START(cpu_arm926_proc_fin) 51 51 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 52 52 bic r0, r0, #0x1000 @ ...i............ 53 53 bic r0, r0, #0x000e @ ............wca. 54 54 mcr p15, 0, r0, c1, c0, 0 @ disable caches 55 55 ret lr 56 + SYM_FUNC_END(cpu_arm926_proc_fin) 56 57 57 58 /* 58 59 * cpu_arm926_reset(loc) ··· 67 64 */ 68 65 .align 5 69 66 .pushsection .idmap.text, "ax" 70 - ENTRY(cpu_arm926_reset) 67 + SYM_TYPED_FUNC_START(cpu_arm926_reset) 71 68 mov ip, #0 72 69 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 73 70 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 79 76 bic ip, ip, #0x1100 @ ...i...s........ 80 77 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 78 ret r0 82 - ENDPROC(cpu_arm926_reset) 79 + SYM_FUNC_END(cpu_arm926_reset) 83 80 .popsection 84 81 85 82 /* ··· 88 85 * Called with IRQs disabled 89 86 */ 90 87 .align 10 91 - ENTRY(cpu_arm926_do_idle) 88 + SYM_TYPED_FUNC_START(cpu_arm926_do_idle) 92 89 mov r0, #0 93 90 mrc p15, 0, r1, c1, c0, 0 @ Read control register 94 91 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer ··· 101 98 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 102 99 msr cpsr_c, r3 @ Restore FIQ state 103 100 ret lr 101 + SYM_FUNC_END(cpu_arm926_do_idle) 104 102 105 103 /* 106 104 * flush_icache_all() 107 105 * 108 106 * Unconditionally clean and invalidate the entire icache. 109 107 */ 110 - ENTRY(arm926_flush_icache_all) 108 + SYM_TYPED_FUNC_START(arm926_flush_icache_all) 111 109 mov r0, #0 112 110 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 113 111 ret lr 114 - ENDPROC(arm926_flush_icache_all) 112 + SYM_FUNC_END(arm926_flush_icache_all) 115 113 116 114 /* 117 115 * flush_user_cache_all() ··· 120 116 * Clean and invalidate all cache entries in a particular 121 117 * address space. 122 118 */ 123 - ENTRY(arm926_flush_user_cache_all) 124 - /* FALLTHROUGH */ 119 + SYM_FUNC_ALIAS(arm926_flush_user_cache_all, arm926_flush_kern_cache_all) 125 120 126 121 /* 127 122 * flush_kern_cache_all() 128 123 * 129 124 * Clean and invalidate the entire cache. 130 125 */ 131 - ENTRY(arm926_flush_kern_cache_all) 126 + SYM_TYPED_FUNC_START(arm926_flush_kern_cache_all) 132 127 mov r2, #VM_EXEC 133 128 mov ip, #0 134 129 __flush_whole_cache: ··· 141 138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 142 139 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 143 140 ret lr 141 + SYM_FUNC_END(arm926_flush_kern_cache_all) 144 142 145 143 /* 146 144 * flush_user_cache_range(start, end, flags) ··· 153 149 * - end - end address (exclusive) 154 150 * - flags - vm_flags describing address space 155 151 */ 156 - ENTRY(arm926_flush_user_cache_range) 152 + SYM_TYPED_FUNC_START(arm926_flush_user_cache_range) 157 153 mov ip, #0 158 154 sub r3, r1, r0 @ calculate total size 159 155 cmp r3, #CACHE_DLIMIT ··· 179 175 tst r2, #VM_EXEC 180 176 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 181 177 ret lr 178 + SYM_FUNC_END(arm926_flush_user_cache_range) 182 179 183 180 /* 184 181 * coherent_kern_range(start, end) ··· 191 186 * - start - virtual start address 192 187 * - end - virtual end address 193 188 */ 194 - ENTRY(arm926_coherent_kern_range) 195 - /* FALLTHROUGH */ 189 + SYM_TYPED_FUNC_START(arm926_coherent_kern_range) 190 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 191 + b arm926_coherent_user_range 192 + #endif 193 + SYM_FUNC_END(arm926_coherent_kern_range) 196 194 197 195 /* 198 196 * coherent_user_range(start, end) ··· 207 199 * - start - virtual start address 208 200 * - end - virtual end address 209 201 */ 210 - ENTRY(arm926_coherent_user_range) 202 + SYM_TYPED_FUNC_START(arm926_coherent_user_range) 211 203 bic r0, r0, #CACHE_DLINESIZE - 1 212 204 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 213 205 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 217 209 mcr p15, 0, r0, c7, c10, 4 @ drain WB 218 210 mov r0, #0 219 211 ret lr 212 + SYM_FUNC_END(arm926_coherent_user_range) 220 213 221 214 /* 222 215 * flush_kern_dcache_area(void *addr, size_t size) ··· 228 219 * - addr - kernel address 229 220 * - size - region size 230 221 */ 231 - ENTRY(arm926_flush_kern_dcache_area) 222 + SYM_TYPED_FUNC_START(arm926_flush_kern_dcache_area) 232 223 add r1, r0, r1 233 224 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 234 225 add r0, r0, #CACHE_DLINESIZE ··· 238 229 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 239 230 mcr p15, 0, r0, c7, c10, 4 @ drain WB 240 231 ret lr 232 + SYM_FUNC_END(arm926_flush_kern_dcache_area) 241 233 242 234 /* 243 235 * dma_inv_range(start, end) ··· 297 287 * - start - virtual start address 298 288 * - end - virtual end address 299 289 */ 300 - ENTRY(arm926_dma_flush_range) 290 + SYM_TYPED_FUNC_START(arm926_dma_flush_range) 301 291 bic r0, r0, #CACHE_DLINESIZE - 1 302 292 1: 303 293 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 310 300 blo 1b 311 301 mcr p15, 0, r0, c7, c10, 4 @ drain WB 312 302 ret lr 303 + SYM_FUNC_END(arm926_dma_flush_range) 313 304 314 305 /* 315 306 * dma_map_area(start, size, dir) ··· 318 307 * - size - size of region 319 308 * - dir - DMA direction 320 309 */ 321 - ENTRY(arm926_dma_map_area) 310 + SYM_TYPED_FUNC_START(arm926_dma_map_area) 322 311 add r1, r1, r0 323 312 cmp r2, #DMA_TO_DEVICE 324 313 beq arm926_dma_clean_range 325 314 bcs arm926_dma_inv_range 326 315 b arm926_dma_flush_range 327 - ENDPROC(arm926_dma_map_area) 316 + SYM_FUNC_END(arm926_dma_map_area) 328 317 329 318 /* 330 319 * dma_unmap_area(start, size, dir) ··· 332 321 * - size - size of region 333 322 * - dir - DMA direction 334 323 */ 335 - ENTRY(arm926_dma_unmap_area) 324 + SYM_TYPED_FUNC_START(arm926_dma_unmap_area) 336 325 ret lr 337 - ENDPROC(arm926_dma_unmap_area) 326 + SYM_FUNC_END(arm926_dma_unmap_area) 338 327 339 - .globl arm926_flush_kern_cache_louis 340 - .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all 341 - 342 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 343 - define_cache_functions arm926 344 - 345 - ENTRY(cpu_arm926_dcache_clean_area) 328 + SYM_TYPED_FUNC_START(cpu_arm926_dcache_clean_area) 346 329 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 347 330 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 348 331 add r0, r0, #CACHE_DLINESIZE ··· 345 340 #endif 346 341 mcr p15, 0, r0, c7, c10, 4 @ drain WB 347 342 ret lr 343 + SYM_FUNC_END(cpu_arm926_dcache_clean_area) 348 344 349 345 /* =============================== PageTable ============================== */ 350 346 ··· 357 351 * pgd: new page tables 358 352 */ 359 353 .align 5 360 - ENTRY(cpu_arm926_switch_mm) 354 + 355 + SYM_TYPED_FUNC_START(cpu_arm926_switch_mm) 361 356 #ifdef CONFIG_MMU 362 357 mov ip, #0 363 358 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 374 367 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 375 368 #endif 376 369 ret lr 370 + SYM_FUNC_END(cpu_arm926_switch_mm) 377 371 378 372 /* 379 373 * cpu_arm926_set_pte_ext(ptep, pte, ext) ··· 382 374 * Set a PTE and flush it out 383 375 */ 384 376 .align 5 385 - ENTRY(cpu_arm926_set_pte_ext) 377 + SYM_TYPED_FUNC_START(cpu_arm926_set_pte_ext) 386 378 #ifdef CONFIG_MMU 387 379 armv3_set_pte_ext 388 380 mov r0, r0 ··· 392 384 mcr p15, 0, r0, c7, c10, 4 @ drain WB 393 385 #endif 394 386 ret lr 387 + SYM_FUNC_END(cpu_arm926_set_pte_ext) 395 388 396 389 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 397 390 .globl cpu_arm926_suspend_size 398 391 .equ cpu_arm926_suspend_size, 4 * 3 399 392 #ifdef CONFIG_ARM_CPU_SUSPEND 400 - ENTRY(cpu_arm926_do_suspend) 393 + SYM_TYPED_FUNC_START(cpu_arm926_do_suspend) 401 394 stmfd sp!, {r4 - r6, lr} 402 395 mrc p15, 0, r4, c13, c0, 0 @ PID 403 396 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 404 397 mrc p15, 0, r6, c1, c0, 0 @ Control register 405 398 stmia r0, {r4 - r6} 406 399 ldmfd sp!, {r4 - r6, pc} 407 - ENDPROC(cpu_arm926_do_suspend) 400 + SYM_FUNC_END(cpu_arm926_do_suspend) 408 401 409 - ENTRY(cpu_arm926_do_resume) 402 + SYM_TYPED_FUNC_START(cpu_arm926_do_resume) 410 403 mov ip, #0 411 404 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 412 405 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ··· 417 408 mcr p15, 0, r1, c2, c0, 0 @ TTB address 418 409 mov r0, r6 @ control register 419 410 b cpu_resume_mmu 420 - ENDPROC(cpu_arm926_do_resume) 411 + SYM_FUNC_END(cpu_arm926_do_resume) 421 412 #endif 422 413 423 414 .type __arm926_setup, #function
+39 -30
arch/arm/mm/proc-arm940.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/hwcap.h> ··· 26 25 * 27 26 * These are not required. 28 27 */ 29 - ENTRY(cpu_arm940_proc_init) 30 - ENTRY(cpu_arm940_switch_mm) 28 + SYM_TYPED_FUNC_START(cpu_arm940_proc_init) 31 29 ret lr 30 + SYM_FUNC_END(cpu_arm940_proc_init) 31 + 32 + SYM_TYPED_FUNC_START(cpu_arm940_switch_mm) 33 + ret lr 34 + SYM_FUNC_END(cpu_arm940_switch_mm) 32 35 33 36 /* 34 37 * cpu_arm940_proc_fin() 35 38 */ 36 - ENTRY(cpu_arm940_proc_fin) 39 + SYM_TYPED_FUNC_START(cpu_arm940_proc_fin) 37 40 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 38 41 bic r0, r0, #0x00001000 @ i-cache 39 42 bic r0, r0, #0x00000004 @ d-cache 40 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches 41 44 ret lr 45 + SYM_FUNC_END(cpu_arm940_proc_fin) 42 46 43 47 /* 44 48 * cpu_arm940_reset(loc) ··· 51 45 * Notes : This sets up everything for a reset 52 46 */ 53 47 .pushsection .idmap.text, "ax" 54 - ENTRY(cpu_arm940_reset) 48 + SYM_TYPED_FUNC_START(cpu_arm940_reset) 55 49 mov ip, #0 56 50 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 57 51 mcr p15, 0, ip, c7, c6, 0 @ flush D cache ··· 61 55 bic ip, ip, #0x00001000 @ i-cache 62 56 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 63 57 ret r0 64 - ENDPROC(cpu_arm940_reset) 58 + SYM_FUNC_END(cpu_arm940_reset) 65 59 .popsection 66 60 67 61 /* 68 62 * cpu_arm940_do_idle() 69 63 */ 70 64 .align 5 71 - ENTRY(cpu_arm940_do_idle) 65 + SYM_TYPED_FUNC_START(cpu_arm940_do_idle) 72 66 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 73 67 ret lr 68 + SYM_FUNC_END(cpu_arm940_do_idle) 74 69 75 70 /* 76 71 * flush_icache_all() 77 72 * 78 73 * Unconditionally clean and invalidate the entire icache. 79 74 */ 80 - ENTRY(arm940_flush_icache_all) 75 + SYM_TYPED_FUNC_START(arm940_flush_icache_all) 81 76 mov r0, #0 82 77 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 83 78 ret lr 84 - ENDPROC(arm940_flush_icache_all) 79 + SYM_FUNC_END(arm940_flush_icache_all) 85 80 86 81 /* 87 82 * flush_user_cache_all() 88 83 */ 89 - ENTRY(arm940_flush_user_cache_all) 90 - /* FALLTHROUGH */ 84 + SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all) 91 85 92 86 /* 93 87 * flush_kern_cache_all() 94 88 * 95 89 * Clean and invalidate the entire cache. 96 90 */ 97 - ENTRY(arm940_flush_kern_cache_all) 91 + SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all) 98 92 mov r2, #VM_EXEC 99 - /* FALLTHROUGH */ 93 + b arm940_flush_user_cache_range 94 + SYM_FUNC_END(arm940_flush_kern_cache_all) 100 95 101 96 /* 102 97 * flush_user_cache_range(start, end, flags) ··· 109 102 * - end - end address (exclusive) 110 103 * - flags - vm_flags describing address space 111 104 */ 112 - ENTRY(arm940_flush_user_cache_range) 105 + SYM_TYPED_FUNC_START(arm940_flush_user_cache_range) 113 106 mov ip, #0 114 107 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 115 108 mcr p15, 0, ip, c7, c6, 0 @ flush D cache ··· 126 119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 127 120 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 128 121 ret lr 122 + SYM_FUNC_END(arm940_flush_user_cache_range) 129 123 130 124 /* 131 125 * coherent_kern_range(start, end) ··· 138 130 * - start - virtual start address 139 131 * - end - virtual end address 140 132 */ 141 - ENTRY(arm940_coherent_kern_range) 142 - /* FALLTHROUGH */ 133 + SYM_TYPED_FUNC_START(arm940_coherent_kern_range) 134 + b arm940_flush_kern_dcache_area 135 + SYM_FUNC_END(arm940_coherent_kern_range) 143 136 144 137 /* 145 138 * coherent_user_range(start, end) ··· 152 143 * - start - virtual start address 153 144 * - end - virtual end address 154 145 */ 155 - ENTRY(arm940_coherent_user_range) 156 - /* FALLTHROUGH */ 146 + SYM_TYPED_FUNC_START(arm940_coherent_user_range) 147 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 148 + b arm940_flush_kern_dcache_area 149 + #endif 150 + SYM_FUNC_END(arm940_coherent_user_range) 157 151 158 152 /* 159 153 * flush_kern_dcache_area(void *addr, size_t size) ··· 167 155 * - addr - kernel address 168 156 * - size - region size 169 157 */ 170 - ENTRY(arm940_flush_kern_dcache_area) 158 + SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area) 171 159 mov r0, #0 172 160 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 173 161 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 179 167 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 180 168 mcr p15, 0, r0, c7, c10, 4 @ drain WB 181 169 ret lr 170 + SYM_FUNC_END(arm940_flush_kern_dcache_area) 182 171 183 172 /* 184 173 * dma_inv_range(start, end) ··· 212 199 * - end - virtual end address 213 200 */ 214 201 arm940_dma_clean_range: 215 - ENTRY(cpu_arm940_dcache_clean_area) 202 + SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area) 216 203 mov ip, #0 217 204 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 218 205 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments ··· 225 212 #endif 226 213 mcr p15, 0, ip, c7, c10, 4 @ drain WB 227 214 ret lr 215 + SYM_FUNC_END(cpu_arm940_dcache_clean_area) 228 216 229 217 /* 230 218 * dma_flush_range(start, end) ··· 236 222 * - start - virtual start address 237 223 * - end - virtual end address 238 224 */ 239 - ENTRY(arm940_dma_flush_range) 225 + SYM_TYPED_FUNC_START(arm940_dma_flush_range) 240 226 mov ip, #0 241 227 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 242 228 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 252 238 bcs 1b @ segments 7 to 0 253 239 mcr p15, 0, ip, c7, c10, 4 @ drain WB 254 240 ret lr 241 + SYM_FUNC_END(arm940_dma_flush_range) 255 242 256 243 /* 257 244 * dma_map_area(start, size, dir) ··· 260 245 * - size - size of region 261 246 * - dir - DMA direction 262 247 */ 263 - ENTRY(arm940_dma_map_area) 248 + SYM_TYPED_FUNC_START(arm940_dma_map_area) 264 249 add r1, r1, r0 265 250 cmp r2, #DMA_TO_DEVICE 266 251 beq arm940_dma_clean_range 267 252 bcs arm940_dma_inv_range 268 253 b arm940_dma_flush_range 269 - ENDPROC(arm940_dma_map_area) 254 + SYM_FUNC_END(arm940_dma_map_area) 270 255 271 256 /* 272 257 * dma_unmap_area(start, size, dir) ··· 274 259 * - size - size of region 275 260 * - dir - DMA direction 276 261 */ 277 - ENTRY(arm940_dma_unmap_area) 262 + SYM_TYPED_FUNC_START(arm940_dma_unmap_area) 278 263 ret lr 279 - ENDPROC(arm940_dma_unmap_area) 280 - 281 - .globl arm940_flush_kern_cache_louis 282 - .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all 283 - 284 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 285 - define_cache_functions arm940 264 + SYM_FUNC_END(arm940_dma_unmap_area) 286 265 287 266 .type __arm940_setup, #function 288 267 __arm940_setup:
+37 -28
arch/arm/mm/proc-arm946.S
··· 8 8 */ 9 9 #include <linux/linkage.h> 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <linux/pgtable.h> 12 13 #include <asm/assembler.h> 13 14 #include <asm/hwcap.h> ··· 33 32 * 34 33 * These are not required. 35 34 */ 36 - ENTRY(cpu_arm946_proc_init) 37 - ENTRY(cpu_arm946_switch_mm) 35 + SYM_TYPED_FUNC_START(cpu_arm946_proc_init) 38 36 ret lr 37 + SYM_FUNC_END(cpu_arm946_proc_init) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm946_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm946_switch_mm) 39 42 40 43 /* 41 44 * cpu_arm946_proc_fin() 42 45 */ 43 - ENTRY(cpu_arm946_proc_fin) 46 + SYM_TYPED_FUNC_START(cpu_arm946_proc_fin) 44 47 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 45 48 bic r0, r0, #0x00001000 @ i-cache 46 49 bic r0, r0, #0x00000004 @ d-cache 47 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 48 51 ret lr 52 + SYM_FUNC_END(cpu_arm946_proc_fin) 49 53 50 54 /* 51 55 * cpu_arm946_reset(loc) ··· 58 52 * Notes : This sets up everything for a reset 59 53 */ 60 54 .pushsection .idmap.text, "ax" 61 - ENTRY(cpu_arm946_reset) 55 + SYM_TYPED_FUNC_START(cpu_arm946_reset) 62 56 mov ip, #0 63 57 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 64 58 mcr p15, 0, ip, c7, c6, 0 @ flush D cache ··· 68 62 bic ip, ip, #0x00001000 @ i-cache 69 63 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 70 64 ret r0 71 - ENDPROC(cpu_arm946_reset) 65 + SYM_FUNC_END(cpu_arm946_reset) 72 66 .popsection 73 67 74 68 /* 75 69 * cpu_arm946_do_idle() 76 70 */ 77 71 .align 5 78 - ENTRY(cpu_arm946_do_idle) 72 + SYM_TYPED_FUNC_START(cpu_arm946_do_idle) 79 73 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 80 74 ret lr 75 + SYM_FUNC_END(cpu_arm946_do_idle) 81 76 82 77 /* 83 78 * flush_icache_all() 84 79 * 85 80 * Unconditionally clean and invalidate the entire icache. 86 81 */ 87 - ENTRY(arm946_flush_icache_all) 82 + SYM_TYPED_FUNC_START(arm946_flush_icache_all) 88 83 mov r0, #0 89 84 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 90 85 ret lr 91 - ENDPROC(arm946_flush_icache_all) 86 + SYM_FUNC_END(arm946_flush_icache_all) 92 87 93 88 /* 94 89 * flush_user_cache_all() 95 90 */ 96 - ENTRY(arm946_flush_user_cache_all) 97 - /* FALLTHROUGH */ 91 + SYM_FUNC_ALIAS(arm946_flush_user_cache_all, arm946_flush_kern_cache_all) 98 92 99 93 /* 100 94 * flush_kern_cache_all() 101 95 * 102 96 * Clean and invalidate the entire cache. 103 97 */ 104 - ENTRY(arm946_flush_kern_cache_all) 98 + SYM_TYPED_FUNC_START(arm946_flush_kern_cache_all) 105 99 mov r2, #VM_EXEC 106 100 mov ip, #0 107 101 __flush_whole_cache: ··· 120 114 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 121 115 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 122 116 ret lr 117 + SYM_FUNC_END(arm946_flush_kern_cache_all) 123 118 124 119 /* 125 120 * flush_user_cache_range(start, end, flags) ··· 133 126 * - flags - vm_flags describing address space 134 127 * (same as arm926) 135 128 */ 136 - ENTRY(arm946_flush_user_cache_range) 129 + SYM_TYPED_FUNC_START(arm946_flush_user_cache_range) 137 130 mov ip, #0 138 131 sub r3, r1, r0 @ calculate total size 139 132 cmp r3, #CACHE_DLIMIT ··· 160 153 tst r2, #VM_EXEC 161 154 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 162 155 ret lr 156 + SYM_FUNC_END(arm946_flush_user_cache_range) 163 157 164 158 /* 165 159 * coherent_kern_range(start, end) ··· 172 164 * - start - virtual start address 173 165 * - end - virtual end address 174 166 */ 175 - ENTRY(arm946_coherent_kern_range) 176 - /* FALLTHROUGH */ 167 + SYM_TYPED_FUNC_START(arm946_coherent_kern_range) 168 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 169 + b arm946_coherent_user_range 170 + #endif 171 + SYM_FUNC_END(arm946_coherent_kern_range) 177 172 178 173 /* 179 174 * coherent_user_range(start, end) ··· 189 178 * - end - virtual end address 190 179 * (same as arm926) 191 180 */ 192 - ENTRY(arm946_coherent_user_range) 181 + SYM_TYPED_FUNC_START(arm946_coherent_user_range) 193 182 bic r0, r0, #CACHE_DLINESIZE - 1 194 183 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 195 184 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 199 188 mcr p15, 0, r0, c7, c10, 4 @ drain WB 200 189 mov r0, #0 201 190 ret lr 191 + SYM_FUNC_END(arm946_coherent_user_range) 202 192 203 193 /* 204 194 * flush_kern_dcache_area(void *addr, size_t size) ··· 211 199 * - size - region size 212 200 * (same as arm926) 213 201 */ 214 - ENTRY(arm946_flush_kern_dcache_area) 202 + SYM_TYPED_FUNC_START(arm946_flush_kern_dcache_area) 215 203 add r1, r0, r1 216 204 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 217 205 add r0, r0, #CACHE_DLINESIZE ··· 221 209 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 222 210 mcr p15, 0, r0, c7, c10, 4 @ drain WB 223 211 ret lr 212 + SYM_FUNC_END(arm946_flush_kern_dcache_area) 224 213 225 214 /* 226 215 * dma_inv_range(start, end) ··· 281 268 * 282 269 * (same as arm926) 283 270 */ 284 - ENTRY(arm946_dma_flush_range) 271 + SYM_TYPED_FUNC_START(arm946_dma_flush_range) 285 272 bic r0, r0, #CACHE_DLINESIZE - 1 286 273 1: 287 274 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 294 281 blo 1b 295 282 mcr p15, 0, r0, c7, c10, 4 @ drain WB 296 283 ret lr 284 + SYM_FUNC_END(arm946_dma_flush_range) 297 285 298 286 /* 299 287 * dma_map_area(start, size, dir) ··· 302 288 * - size - size of region 303 289 * - dir - DMA direction 304 290 */ 305 - ENTRY(arm946_dma_map_area) 291 + SYM_TYPED_FUNC_START(arm946_dma_map_area) 306 292 add r1, r1, r0 307 293 cmp r2, #DMA_TO_DEVICE 308 294 beq arm946_dma_clean_range 309 295 bcs arm946_dma_inv_range 310 296 b arm946_dma_flush_range 311 - ENDPROC(arm946_dma_map_area) 297 + SYM_FUNC_END(arm946_dma_map_area) 312 298 313 299 /* 314 300 * dma_unmap_area(start, size, dir) ··· 316 302 * - size - size of region 317 303 * - dir - DMA direction 318 304 */ 319 - ENTRY(arm946_dma_unmap_area) 305 + SYM_TYPED_FUNC_START(arm946_dma_unmap_area) 320 306 ret lr 321 - ENDPROC(arm946_dma_unmap_area) 307 + SYM_FUNC_END(arm946_dma_unmap_area) 322 308 323 - .globl arm946_flush_kern_cache_louis 324 - .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all 325 - 326 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 327 - define_cache_functions arm946 328 - 329 - ENTRY(cpu_arm946_dcache_clean_area) 309 + SYM_TYPED_FUNC_START(cpu_arm946_dcache_clean_area) 330 310 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 331 311 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 332 312 add r0, r0, #CACHE_DLINESIZE ··· 329 321 #endif 330 322 mcr p15, 0, r0, c7, c10, 4 @ drain WB 331 323 ret lr 324 + SYM_FUNC_END(cpu_arm946_dcache_clean_area) 332 325 333 326 .type __arm946_setup, #function 334 327 __arm946_setup:
+19 -7
arch/arm/mm/proc-arm9tdmi.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <linux/init.h> 9 + #include <linux/cfi_types.h> 9 10 #include <linux/pgtable.h> 10 11 #include <asm/assembler.h> 11 12 #include <asm/asm-offsets.h> ··· 25 24 * 26 25 * These are not required. 27 26 */ 28 - ENTRY(cpu_arm9tdmi_proc_init) 29 - ENTRY(cpu_arm9tdmi_do_idle) 30 - ENTRY(cpu_arm9tdmi_dcache_clean_area) 31 - ENTRY(cpu_arm9tdmi_switch_mm) 27 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_init) 32 28 ret lr 29 + SYM_FUNC_END(cpu_arm9tdmi_proc_init) 30 + 31 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_do_idle) 32 + ret lr 33 + SYM_FUNC_END(cpu_arm9tdmi_do_idle) 34 + 35 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_dcache_clean_area) 36 + ret lr 37 + SYM_FUNC_END(cpu_arm9tdmi_dcache_clean_area) 38 + 39 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_switch_mm) 40 + ret lr 41 + SYM_FUNC_END(cpu_arm9tdmi_switch_mm) 33 42 34 43 /* 35 44 * cpu_arm9tdmi_proc_fin() 36 45 */ 37 - ENTRY(cpu_arm9tdmi_proc_fin) 46 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_proc_fin) 38 47 ret lr 48 + SYM_FUNC_END(cpu_arm9tdmi_proc_fin) 39 49 40 50 /* 41 51 * Function: cpu_arm9tdmi_reset(loc) ··· 54 42 * Purpose : Sets up everything for a reset and jump to the location for soft reset. 55 43 */ 56 44 .pushsection .idmap.text, "ax" 57 - ENTRY(cpu_arm9tdmi_reset) 45 + SYM_TYPED_FUNC_START(cpu_arm9tdmi_reset) 58 46 ret r0 59 - ENDPROC(cpu_arm9tdmi_reset) 47 + SYM_FUNC_END(cpu_arm9tdmi_reset) 60 48 .popsection 61 49 62 50 .type __arm9tdmi_setup, #function
+15 -9
arch/arm/mm/proc-fa526.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <linux/pgtable.h> 15 16 #include <asm/assembler.h> 16 17 #include <asm/hwcap.h> ··· 27 26 /* 28 27 * cpu_fa526_proc_init() 29 28 */ 30 - ENTRY(cpu_fa526_proc_init) 29 + SYM_TYPED_FUNC_START(cpu_fa526_proc_init) 31 30 ret lr 31 + SYM_FUNC_END(cpu_fa526_proc_init) 32 32 33 33 /* 34 34 * cpu_fa526_proc_fin() 35 35 */ 36 - ENTRY(cpu_fa526_proc_fin) 36 + SYM_TYPED_FUNC_START(cpu_fa526_proc_fin) 37 37 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 38 38 bic r0, r0, #0x1000 @ ...i............ 39 39 bic r0, r0, #0x000e @ ............wca. ··· 42 40 nop 43 41 nop 44 42 ret lr 43 + SYM_FUNC_END(cpu_fa526_proc_fin) 45 44 46 45 /* 47 46 * cpu_fa526_reset(loc) ··· 55 52 */ 56 53 .align 4 57 54 .pushsection .idmap.text, "ax" 58 - ENTRY(cpu_fa526_reset) 55 + SYM_TYPED_FUNC_START(cpu_fa526_reset) 59 56 /* TODO: Use CP8 if possible... */ 60 57 mov ip, #0 61 58 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches ··· 71 68 nop 72 69 nop 73 70 ret r0 74 - ENDPROC(cpu_fa526_reset) 71 + SYM_FUNC_END(cpu_fa526_reset) 75 72 .popsection 76 73 77 74 /* 78 75 * cpu_fa526_do_idle() 79 76 */ 80 77 .align 4 81 - ENTRY(cpu_fa526_do_idle) 78 + SYM_TYPED_FUNC_START(cpu_fa526_do_idle) 82 79 ret lr 80 + SYM_FUNC_END(cpu_fa526_do_idle) 83 81 84 - 85 - ENTRY(cpu_fa526_dcache_clean_area) 82 + SYM_TYPED_FUNC_START(cpu_fa526_dcache_clean_area) 86 83 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 87 84 add r0, r0, #CACHE_DLINESIZE 88 85 subs r1, r1, #CACHE_DLINESIZE 89 86 bhi 1b 90 87 mcr p15, 0, r0, c7, c10, 4 @ drain WB 91 88 ret lr 89 + SYM_FUNC_END(cpu_fa526_dcache_clean_area) 92 90 93 91 /* =============================== PageTable ============================== */ 94 92 ··· 101 97 * pgd: new page tables 102 98 */ 103 99 .align 4 104 - ENTRY(cpu_fa526_switch_mm) 100 + SYM_TYPED_FUNC_START(cpu_fa526_switch_mm) 105 101 #ifdef CONFIG_MMU 106 102 mov ip, #0 107 103 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 117 113 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB 118 114 #endif 119 115 ret lr 116 + SYM_FUNC_END(cpu_fa526_switch_mm) 120 117 121 118 /* 122 119 * cpu_fa526_set_pte_ext(ptep, pte, ext) ··· 125 120 * Set a PTE and flush it out 126 121 */ 127 122 .align 4 128 - ENTRY(cpu_fa526_set_pte_ext) 123 + SYM_TYPED_FUNC_START(cpu_fa526_set_pte_ext) 129 124 #ifdef CONFIG_MMU 130 125 armv3_set_pte_ext 131 126 mov r0, r0 ··· 134 129 mcr p15, 0, r0, c7, c10, 4 @ drain WB 135 130 #endif 136 131 ret lr 132 + SYM_FUNC_END(cpu_fa526_set_pte_ext) 137 133 138 134 .type __fa526_setup, #function 139 135 __fa526_setup:
+47 -58
arch/arm/mm/proc-feroceon.S
··· 8 8 9 9 #include <linux/linkage.h> 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <linux/pgtable.h> 12 13 #include <asm/assembler.h> 13 14 #include <asm/hwcap.h> ··· 44 43 /* 45 44 * cpu_feroceon_proc_init() 46 45 */ 47 - ENTRY(cpu_feroceon_proc_init) 46 + SYM_TYPED_FUNC_START(cpu_feroceon_proc_init) 48 47 mrc p15, 0, r0, c0, c0, 1 @ read cache type register 49 48 ldr r1, __cache_params 50 49 mov r2, #(16 << 5) ··· 62 61 str_l r1, VFP_arch_feroceon, r2 63 62 #endif 64 63 ret lr 64 + SYM_FUNC_END(cpu_feroceon_proc_init) 65 65 66 66 /* 67 67 * cpu_feroceon_proc_fin() 68 68 */ 69 - ENTRY(cpu_feroceon_proc_fin) 69 + SYM_TYPED_FUNC_START(cpu_feroceon_proc_fin) 70 70 #if defined(CONFIG_CACHE_FEROCEON_L2) && \ 71 71 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 72 72 mov r0, #0 ··· 80 78 bic r0, r0, #0x000e @ ............wca. 81 79 mcr p15, 0, r0, c1, c0, 0 @ disable caches 82 80 ret lr 81 + SYM_FUNC_END(cpu_feroceon_proc_fin) 83 82 84 83 /* 85 84 * cpu_feroceon_reset(loc) ··· 93 90 */ 94 91 .align 5 95 92 .pushsection .idmap.text, "ax" 96 - ENTRY(cpu_feroceon_reset) 93 + SYM_TYPED_FUNC_START(cpu_feroceon_reset) 97 94 mov ip, #0 98 95 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 99 96 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 105 102 bic ip, ip, #0x1100 @ ...i...s........ 106 103 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 107 104 ret r0 108 - ENDPROC(cpu_feroceon_reset) 105 + SYM_FUNC_END(cpu_feroceon_reset) 109 106 .popsection 110 107 111 108 /* ··· 114 111 * Called with IRQs disabled 115 112 */ 116 113 .align 5 117 - ENTRY(cpu_feroceon_do_idle) 114 + SYM_TYPED_FUNC_START(cpu_feroceon_do_idle) 118 115 mov r0, #0 119 116 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 120 117 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 121 118 ret lr 119 + SYM_FUNC_END(cpu_feroceon_do_idle) 122 120 123 121 /* 124 122 * flush_icache_all() 125 123 * 126 124 * Unconditionally clean and invalidate the entire icache. 127 125 */ 128 - ENTRY(feroceon_flush_icache_all) 126 + SYM_TYPED_FUNC_START(feroceon_flush_icache_all) 129 127 mov r0, #0 130 128 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 131 129 ret lr 132 - ENDPROC(feroceon_flush_icache_all) 130 + SYM_FUNC_END(feroceon_flush_icache_all) 133 131 134 132 /* 135 133 * flush_user_cache_all() ··· 139 135 * address space. 140 136 */ 141 137 .align 5 142 - ENTRY(feroceon_flush_user_cache_all) 143 - /* FALLTHROUGH */ 138 + SYM_FUNC_ALIAS(feroceon_flush_user_cache_all, feroceon_flush_kern_cache_all) 144 139 145 140 /* 146 141 * flush_kern_cache_all() 147 142 * 148 143 * Clean and invalidate the entire cache. 149 144 */ 150 - ENTRY(feroceon_flush_kern_cache_all) 145 + SYM_TYPED_FUNC_START(feroceon_flush_kern_cache_all) 151 146 mov r2, #VM_EXEC 152 147 153 148 __flush_whole_cache: ··· 164 161 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 165 162 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 166 163 ret lr 164 + SYM_FUNC_END(feroceon_flush_kern_cache_all) 167 165 168 166 /* 169 167 * flush_user_cache_range(start, end, flags) ··· 177 173 * - flags - vm_flags describing address space 178 174 */ 179 175 .align 5 180 - ENTRY(feroceon_flush_user_cache_range) 176 + SYM_TYPED_FUNC_START(feroceon_flush_user_cache_range) 181 177 sub r3, r1, r0 @ calculate total size 182 178 cmp r3, #CACHE_DLIMIT 183 179 bgt __flush_whole_cache ··· 194 190 mov ip, #0 195 191 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 196 192 ret lr 193 + SYM_FUNC_END(feroceon_flush_user_cache_range) 197 194 198 195 /* 199 196 * coherent_kern_range(start, end) ··· 207 202 * - end - virtual end address 208 203 */ 209 204 .align 5 210 - ENTRY(feroceon_coherent_kern_range) 211 - /* FALLTHROUGH */ 205 + SYM_TYPED_FUNC_START(feroceon_coherent_kern_range) 206 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 207 + b feroceon_coherent_user_range 208 + #endif 209 + SYM_FUNC_END(feroceon_coherent_kern_range) 212 210 213 211 /* 214 212 * coherent_user_range(start, end) ··· 223 215 * - start - virtual start address 224 216 * - end - virtual end address 225 217 */ 226 - ENTRY(feroceon_coherent_user_range) 218 + SYM_TYPED_FUNC_START(feroceon_coherent_user_range) 227 219 bic r0, r0, #CACHE_DLINESIZE - 1 228 220 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 229 221 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 233 225 mcr p15, 0, r0, c7, c10, 4 @ drain WB 234 226 mov r0, #0 235 227 ret lr 228 + SYM_FUNC_END(feroceon_coherent_user_range) 236 229 237 230 /* 238 231 * flush_kern_dcache_area(void *addr, size_t size) ··· 245 236 * - size - region size 246 237 */ 247 238 .align 5 248 - ENTRY(feroceon_flush_kern_dcache_area) 239 + SYM_TYPED_FUNC_START(feroceon_flush_kern_dcache_area) 249 240 add r1, r0, r1 250 241 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 251 242 add r0, r0, #CACHE_DLINESIZE ··· 255 246 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 256 247 mcr p15, 0, r0, c7, c10, 4 @ drain WB 257 248 ret lr 249 + SYM_FUNC_END(feroceon_flush_kern_dcache_area) 258 250 259 251 .align 5 260 - ENTRY(feroceon_range_flush_kern_dcache_area) 252 + SYM_TYPED_FUNC_START(feroceon_range_flush_kern_dcache_area) 261 253 mrs r2, cpsr 262 254 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive 263 255 orr r3, r2, #PSR_I_BIT ··· 270 260 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 271 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 272 262 ret lr 263 + SYM_FUNC_END(feroceon_range_flush_kern_dcache_area) 273 264 274 265 /* 275 266 * dma_inv_range(start, end) ··· 357 346 * - end - virtual end address 358 347 */ 359 348 .align 5 360 - ENTRY(feroceon_dma_flush_range) 349 + SYM_TYPED_FUNC_START(feroceon_dma_flush_range) 361 350 bic r0, r0, #CACHE_DLINESIZE - 1 362 351 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 363 352 add r0, r0, #CACHE_DLINESIZE ··· 365 354 blo 1b 366 355 mcr p15, 0, r0, c7, c10, 4 @ drain WB 367 356 ret lr 357 + SYM_FUNC_END(feroceon_dma_flush_range) 368 358 369 359 .align 5 370 - ENTRY(feroceon_range_dma_flush_range) 360 + SYM_TYPED_FUNC_START(feroceon_range_dma_flush_range) 371 361 mrs r2, cpsr 372 362 cmp r1, r0 373 363 subne r1, r1, #1 @ top address is inclusive ··· 379 367 msr cpsr_c, r2 @ restore interrupts 380 368 mcr p15, 0, r0, c7, c10, 4 @ drain WB 381 369 ret lr 370 + SYM_FUNC_END(feroceon_range_dma_flush_range) 382 371 383 372 /* 384 373 * dma_map_area(start, size, dir) ··· 387 374 * - size - size of region 388 375 * - dir - DMA direction 389 376 */ 390 - ENTRY(feroceon_dma_map_area) 377 + SYM_TYPED_FUNC_START(feroceon_dma_map_area) 391 378 add r1, r1, r0 392 379 cmp r2, #DMA_TO_DEVICE 393 380 beq feroceon_dma_clean_range 394 381 bcs feroceon_dma_inv_range 395 382 b feroceon_dma_flush_range 396 - ENDPROC(feroceon_dma_map_area) 383 + SYM_FUNC_END(feroceon_dma_map_area) 397 384 398 385 /* 399 386 * dma_map_area(start, size, dir) ··· 401 388 * - size - size of region 402 389 * - dir - DMA direction 403 390 */ 404 - ENTRY(feroceon_range_dma_map_area) 391 + SYM_TYPED_FUNC_START(feroceon_range_dma_map_area) 405 392 add r1, r1, r0 406 393 cmp r2, #DMA_TO_DEVICE 407 394 beq feroceon_range_dma_clean_range 408 395 bcs feroceon_range_dma_inv_range 409 396 b feroceon_range_dma_flush_range 410 - ENDPROC(feroceon_range_dma_map_area) 397 + SYM_FUNC_END(feroceon_range_dma_map_area) 411 398 412 399 /* 413 400 * dma_unmap_area(start, size, dir) ··· 415 402 * - size - size of region 416 403 * - dir - DMA direction 417 404 */ 418 - ENTRY(feroceon_dma_unmap_area) 405 + SYM_TYPED_FUNC_START(feroceon_dma_unmap_area) 419 406 ret lr 420 - ENDPROC(feroceon_dma_unmap_area) 421 - 422 - .globl feroceon_flush_kern_cache_louis 423 - .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all 424 - 425 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 426 - define_cache_functions feroceon 427 - 428 - .macro range_alias basename 429 - .globl feroceon_range_\basename 430 - .type feroceon_range_\basename , %function 431 - .equ feroceon_range_\basename , feroceon_\basename 432 - .endm 433 - 434 - /* 435 - * Most of the cache functions are unchanged for this case. 436 - * Export suitable alias symbols for the unchanged functions: 437 - */ 438 - range_alias flush_icache_all 439 - range_alias flush_user_cache_all 440 - range_alias flush_kern_cache_all 441 - range_alias flush_kern_cache_louis 442 - range_alias flush_user_cache_range 443 - range_alias coherent_kern_range 444 - range_alias coherent_user_range 445 - range_alias dma_unmap_area 446 - 447 - define_cache_functions feroceon_range 407 + SYM_FUNC_END(feroceon_dma_unmap_area) 448 408 449 409 .align 5 450 - ENTRY(cpu_feroceon_dcache_clean_area) 410 + SYM_TYPED_FUNC_START(cpu_feroceon_dcache_clean_area) 451 411 #if defined(CONFIG_CACHE_FEROCEON_L2) && \ 452 412 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) 453 413 mov r2, r0 ··· 439 453 #endif 440 454 mcr p15, 0, r0, c7, c10, 4 @ drain WB 441 455 ret lr 456 + SYM_FUNC_END(cpu_feroceon_dcache_clean_area) 442 457 443 458 /* =============================== PageTable ============================== */ 444 459 ··· 451 464 * pgd: new page tables 452 465 */ 453 466 .align 5 454 - ENTRY(cpu_feroceon_switch_mm) 467 + SYM_TYPED_FUNC_START(cpu_feroceon_switch_mm) 455 468 #ifdef CONFIG_MMU 456 469 /* 457 470 * Note: we wish to call __flush_whole_cache but we need to preserve ··· 472 485 #else 473 486 ret lr 474 487 #endif 488 + SYM_FUNC_END(cpu_feroceon_switch_mm) 475 489 476 490 /* 477 491 * cpu_feroceon_set_pte_ext(ptep, pte, ext) ··· 480 492 * Set a PTE and flush it out 481 493 */ 482 494 .align 5 483 - ENTRY(cpu_feroceon_set_pte_ext) 495 + SYM_TYPED_FUNC_START(cpu_feroceon_set_pte_ext) 484 496 #ifdef CONFIG_MMU 485 497 armv3_set_pte_ext wc_disable=0 486 498 mov r0, r0 ··· 492 504 mcr p15, 0, r0, c7, c10, 4 @ drain WB 493 505 #endif 494 506 ret lr 507 + SYM_FUNC_END(cpu_feroceon_set_pte_ext) 495 508 496 509 /* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 497 510 .globl cpu_feroceon_suspend_size 498 511 .equ cpu_feroceon_suspend_size, 4 * 3 499 512 #ifdef CONFIG_ARM_CPU_SUSPEND 500 - ENTRY(cpu_feroceon_do_suspend) 513 + SYM_TYPED_FUNC_START(cpu_feroceon_do_suspend) 501 514 stmfd sp!, {r4 - r6, lr} 502 515 mrc p15, 0, r4, c13, c0, 0 @ PID 503 516 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 504 517 mrc p15, 0, r6, c1, c0, 0 @ Control register 505 518 stmia r0, {r4 - r6} 506 519 ldmfd sp!, {r4 - r6, pc} 507 - ENDPROC(cpu_feroceon_do_suspend) 520 + SYM_FUNC_END(cpu_feroceon_do_suspend) 508 521 509 - ENTRY(cpu_feroceon_do_resume) 522 + SYM_TYPED_FUNC_START(cpu_feroceon_do_resume) 510 523 mov ip, #0 511 524 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs 512 525 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches ··· 517 528 mcr p15, 0, r1, c2, c0, 0 @ TTB address 518 529 mov r0, r6 @ control register 519 530 b cpu_resume_mmu 520 - ENDPROC(cpu_feroceon_do_resume) 531 + SYM_FUNC_END(cpu_feroceon_do_resume) 521 532 #endif 522 533 523 534 .type __feroceon_setup, #function
-33
arch/arm/mm/proc-macros.S
··· 320 320 #endif 321 321 .endm 322 322 323 - .macro define_cache_functions name:req 324 - .align 2 325 - .type \name\()_cache_fns, #object 326 - ENTRY(\name\()_cache_fns) 327 - .long \name\()_flush_icache_all 328 - .long \name\()_flush_kern_cache_all 329 - .long \name\()_flush_kern_cache_louis 330 - .long \name\()_flush_user_cache_all 331 - .long \name\()_flush_user_cache_range 332 - .long \name\()_coherent_kern_range 333 - .long \name\()_coherent_user_range 334 - .long \name\()_flush_kern_dcache_area 335 - .long \name\()_dma_map_area 336 - .long \name\()_dma_unmap_area 337 - .long \name\()_dma_flush_range 338 - .size \name\()_cache_fns, . - \name\()_cache_fns 339 - .endm 340 - 341 - .macro define_tlb_functions name:req, flags_up:req, flags_smp 342 - .type \name\()_tlb_fns, #object 343 - .align 2 344 - ENTRY(\name\()_tlb_fns) 345 - .long \name\()_flush_user_tlb_range 346 - .long \name\()_flush_kern_tlb_range 347 - .ifnb \flags_smp 348 - ALT_SMP(.long \flags_smp ) 349 - ALT_UP(.long \flags_up ) 350 - .else 351 - .long \flags_up 352 - .endif 353 - .size \name\()_tlb_fns, . - \name\()_tlb_fns 354 - .endm 355 - 356 323 .macro globl_equ x, y 357 324 .globl \x 358 325 .equ \x, \y
+41 -33
arch/arm/mm/proc-mohawk.S
··· 9 9 10 10 #include <linux/linkage.h> 11 11 #include <linux/init.h> 12 + #include <linux/cfi_types.h> 12 13 #include <linux/pgtable.h> 13 14 #include <asm/assembler.h> 14 15 #include <asm/hwcap.h> ··· 32 31 /* 33 32 * cpu_mohawk_proc_init() 34 33 */ 35 - ENTRY(cpu_mohawk_proc_init) 34 + SYM_TYPED_FUNC_START(cpu_mohawk_proc_init) 36 35 ret lr 36 + SYM_FUNC_END(cpu_mohawk_proc_init) 37 37 38 38 /* 39 39 * cpu_mohawk_proc_fin() 40 40 */ 41 - ENTRY(cpu_mohawk_proc_fin) 41 + SYM_TYPED_FUNC_START(cpu_mohawk_proc_fin) 42 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 43 43 bic r0, r0, #0x1800 @ ...iz........... 44 44 bic r0, r0, #0x0006 @ .............ca. 45 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 46 46 ret lr 47 + SYM_FUNC_END(cpu_mohawk_proc_fin) 47 48 48 49 /* 49 50 * cpu_mohawk_reset(loc) ··· 60 57 */ 61 58 .align 5 62 59 .pushsection .idmap.text, "ax" 63 - ENTRY(cpu_mohawk_reset) 60 + SYM_TYPED_FUNC_START(cpu_mohawk_reset) 64 61 mov ip, #0 65 62 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 66 63 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 70 67 bic ip, ip, #0x1100 @ ...i...s........ 71 68 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 72 69 ret r0 73 - ENDPROC(cpu_mohawk_reset) 70 + SYM_FUNC_END(cpu_mohawk_reset) 74 71 .popsection 75 72 76 73 /* ··· 79 76 * Called with IRQs disabled 80 77 */ 81 78 .align 5 82 - ENTRY(cpu_mohawk_do_idle) 79 + SYM_TYPED_FUNC_START(cpu_mohawk_do_idle) 83 80 mov r0, #0 84 81 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 85 82 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 86 83 ret lr 84 + SYM_FUNC_END(cpu_mohawk_do_idle) 87 85 88 86 /* 89 87 * flush_icache_all() 90 88 * 91 89 * Unconditionally clean and invalidate the entire icache. 92 90 */ 93 - ENTRY(mohawk_flush_icache_all) 91 + SYM_TYPED_FUNC_START(mohawk_flush_icache_all) 94 92 mov r0, #0 95 93 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 96 94 ret lr 97 - ENDPROC(mohawk_flush_icache_all) 95 + SYM_FUNC_END(mohawk_flush_icache_all) 98 96 99 97 /* 100 98 * flush_user_cache_all() ··· 103 99 * Clean and invalidate all cache entries in a particular 104 100 * address space. 105 101 */ 106 - ENTRY(mohawk_flush_user_cache_all) 107 - /* FALLTHROUGH */ 102 + SYM_FUNC_ALIAS(mohawk_flush_user_cache_all, mohawk_flush_kern_cache_all) 108 103 109 104 /* 110 105 * flush_kern_cache_all() 111 106 * 112 107 * Clean and invalidate the entire cache. 113 108 */ 114 - ENTRY(mohawk_flush_kern_cache_all) 109 + SYM_TYPED_FUNC_START(mohawk_flush_kern_cache_all) 115 110 mov r2, #VM_EXEC 116 111 mov ip, #0 117 112 __flush_whole_cache: ··· 119 116 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 120 117 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 121 118 ret lr 119 + SYM_FUNC_END(mohawk_flush_kern_cache_all) 122 120 123 121 /* 124 122 * flush_user_cache_range(start, end, flags) ··· 133 129 * 134 130 * (same as arm926) 135 131 */ 136 - ENTRY(mohawk_flush_user_cache_range) 132 + SYM_TYPED_FUNC_START(mohawk_flush_user_cache_range) 137 133 mov ip, #0 138 134 sub r3, r1, r0 @ calculate total size 139 135 cmp r3, #CACHE_DLIMIT ··· 150 146 tst r2, #VM_EXEC 151 147 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 152 148 ret lr 149 + SYM_FUNC_END(mohawk_flush_user_cache_range) 153 150 154 151 /* 155 152 * coherent_kern_range(start, end) ··· 162 157 * - start - virtual start address 163 158 * - end - virtual end address 164 159 */ 165 - ENTRY(mohawk_coherent_kern_range) 166 - /* FALLTHROUGH */ 160 + SYM_TYPED_FUNC_START(mohawk_coherent_kern_range) 161 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 162 + b mohawk_coherent_user_range 163 + #endif 164 + SYM_FUNC_END(mohawk_coherent_kern_range) 167 165 168 166 /* 169 167 * coherent_user_range(start, end) ··· 180 172 * 181 173 * (same as arm926) 182 174 */ 183 - ENTRY(mohawk_coherent_user_range) 175 + SYM_TYPED_FUNC_START(mohawk_coherent_user_range) 184 176 bic r0, r0, #CACHE_DLINESIZE - 1 185 177 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 186 178 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry ··· 190 182 mcr p15, 0, r0, c7, c10, 4 @ drain WB 191 183 mov r0, #0 192 184 ret lr 185 + SYM_FUNC_END(mohawk_coherent_user_range) 193 186 194 187 /* 195 188 * flush_kern_dcache_area(void *addr, size_t size) ··· 201 192 * - addr - kernel address 202 193 * - size - region size 203 194 */ 204 - ENTRY(mohawk_flush_kern_dcache_area) 195 + SYM_TYPED_FUNC_START(mohawk_flush_kern_dcache_area) 205 196 add r1, r0, r1 206 197 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 207 198 add r0, r0, #CACHE_DLINESIZE ··· 211 202 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 212 203 mcr p15, 0, r0, c7, c10, 4 @ drain WB 213 204 ret lr 205 + SYM_FUNC_END(mohawk_flush_kern_dcache_area) 214 206 215 207 /* 216 208 * dma_inv_range(start, end) ··· 266 256 * - start - virtual start address 267 257 * - end - virtual end address 268 258 */ 269 - ENTRY(mohawk_dma_flush_range) 259 + SYM_TYPED_FUNC_START(mohawk_dma_flush_range) 270 260 bic r0, r0, #CACHE_DLINESIZE - 1 271 261 1: 272 262 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry ··· 275 265 blo 1b 276 266 mcr p15, 0, r0, c7, c10, 4 @ drain WB 277 267 ret lr 268 + SYM_FUNC_END(mohawk_dma_flush_range) 278 269 279 270 /* 280 271 * dma_map_area(start, size, dir) ··· 283 272 * - size - size of region 284 273 * - dir - DMA direction 285 274 */ 286 - ENTRY(mohawk_dma_map_area) 275 + SYM_TYPED_FUNC_START(mohawk_dma_map_area) 287 276 add r1, r1, r0 288 277 cmp r2, #DMA_TO_DEVICE 289 278 beq mohawk_dma_clean_range 290 279 bcs mohawk_dma_inv_range 291 280 b mohawk_dma_flush_range 292 - ENDPROC(mohawk_dma_map_area) 281 + SYM_FUNC_END(mohawk_dma_map_area) 293 282 294 283 /* 295 284 * dma_unmap_area(start, size, dir) ··· 297 286 * - size - size of region 298 287 * - dir - DMA direction 299 288 */ 300 - ENTRY(mohawk_dma_unmap_area) 289 + SYM_TYPED_FUNC_START(mohawk_dma_unmap_area) 301 290 ret lr 302 - ENDPROC(mohawk_dma_unmap_area) 291 + SYM_FUNC_END(mohawk_dma_unmap_area) 303 292 304 - .globl mohawk_flush_kern_cache_louis 305 - .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all 306 - 307 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 308 - define_cache_functions mohawk 309 - 310 - ENTRY(cpu_mohawk_dcache_clean_area) 293 + SYM_TYPED_FUNC_START(cpu_mohawk_dcache_clean_area) 311 294 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 312 295 add r0, r0, #CACHE_DLINESIZE 313 296 subs r1, r1, #CACHE_DLINESIZE 314 297 bhi 1b 315 298 mcr p15, 0, r0, c7, c10, 4 @ drain WB 316 299 ret lr 300 + SYM_FUNC_END(cpu_mohawk_dcache_clean_area) 317 301 318 302 /* 319 303 * cpu_mohawk_switch_mm(pgd) ··· 318 312 * pgd: new page tables 319 313 */ 320 314 .align 5 321 - ENTRY(cpu_mohawk_switch_mm) 315 + SYM_TYPED_FUNC_START(cpu_mohawk_switch_mm) 322 316 mov ip, #0 323 317 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 324 318 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache ··· 327 321 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 328 322 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 329 323 ret lr 324 + SYM_FUNC_END(cpu_mohawk_switch_mm) 330 325 331 326 /* 332 327 * cpu_mohawk_set_pte_ext(ptep, pte, ext) ··· 335 328 * Set a PTE and flush it out 336 329 */ 337 330 .align 5 338 - ENTRY(cpu_mohawk_set_pte_ext) 331 + SYM_TYPED_FUNC_START(cpu_mohawk_set_pte_ext) 339 332 #ifdef CONFIG_MMU 340 333 armv3_set_pte_ext 341 334 mov r0, r0 ··· 343 336 mcr p15, 0, r0, c7, c10, 4 @ drain WB 344 337 ret lr 345 338 #endif 339 + SYM_FUNC_END(cpu_mohawk_set_pte_ext) 346 340 347 341 .globl cpu_mohawk_suspend_size 348 342 .equ cpu_mohawk_suspend_size, 4 * 6 349 343 #ifdef CONFIG_ARM_CPU_SUSPEND 350 - ENTRY(cpu_mohawk_do_suspend) 344 + SYM_TYPED_FUNC_START(cpu_mohawk_do_suspend) 351 345 stmfd sp!, {r4 - r9, lr} 352 346 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 353 347 mrc p15, 0, r5, c15, c1, 0 @ CP access reg ··· 359 351 bic r4, r4, #2 @ clear frequency change bit 360 352 stmia r0, {r4 - r9} @ store cp regs 361 353 ldmia sp!, {r4 - r9, pc} 362 - ENDPROC(cpu_mohawk_do_suspend) 354 + SYM_FUNC_END(cpu_mohawk_do_suspend) 363 355 364 - ENTRY(cpu_mohawk_do_resume) 356 + SYM_TYPED_FUNC_START(cpu_mohawk_do_resume) 365 357 ldmia r0, {r4 - r9} @ load cp regs 366 358 mov ip, #0 367 359 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB ··· 377 369 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 378 370 mov r0, r9 @ control register 379 371 b cpu_resume_mmu 380 - ENDPROC(cpu_mohawk_do_resume) 372 + SYM_FUNC_END(cpu_mohawk_do_resume) 381 373 #endif 382 374 383 375 .type __mohawk_setup, #function
+15 -8
arch/arm/mm/proc-sa110.S
··· 12 12 */ 13 13 #include <linux/linkage.h> 14 14 #include <linux/init.h> 15 + #include <linux/cfi_types.h> 15 16 #include <linux/pgtable.h> 16 17 #include <asm/assembler.h> 17 18 #include <asm/asm-offsets.h> ··· 33 32 /* 34 33 * cpu_sa110_proc_init() 35 34 */ 36 - ENTRY(cpu_sa110_proc_init) 35 + SYM_TYPED_FUNC_START(cpu_sa110_proc_init) 37 36 mov r0, #0 38 37 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 39 38 ret lr 39 + SYM_FUNC_END(cpu_sa110_proc_init) 40 40 41 41 /* 42 42 * cpu_sa110_proc_fin() 43 43 */ 44 - ENTRY(cpu_sa110_proc_fin) 44 + SYM_TYPED_FUNC_START(cpu_sa110_proc_fin) 45 45 mov r0, #0 46 46 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching 47 47 mrc p15, 0, r0, c1, c0, 0 @ ctrl register ··· 50 48 bic r0, r0, #0x000e @ ............wca. 51 49 mcr p15, 0, r0, c1, c0, 0 @ disable caches 52 50 ret lr 51 + SYM_FUNC_END(cpu_sa110_proc_fin) 53 52 54 53 /* 55 54 * cpu_sa110_reset(loc) ··· 63 60 */ 64 61 .align 5 65 62 .pushsection .idmap.text, "ax" 66 - ENTRY(cpu_sa110_reset) 63 + SYM_TYPED_FUNC_START(cpu_sa110_reset) 67 64 mov ip, #0 68 65 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 69 66 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 75 72 bic ip, ip, #0x1100 @ ...i...s........ 76 73 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 77 74 ret r0 78 - ENDPROC(cpu_sa110_reset) 75 + SYM_FUNC_END(cpu_sa110_reset) 79 76 .popsection 80 77 81 78 /* ··· 91 88 */ 92 89 .align 5 93 90 94 - ENTRY(cpu_sa110_do_idle) 91 + SYM_TYPED_FUNC_START(cpu_sa110_do_idle) 95 92 mcr p15, 0, ip, c15, c2, 2 @ disable clock switching 96 93 ldr r1, =UNCACHEABLE_ADDR @ load from uncacheable loc 97 94 ldr r1, [r1, #0] @ force switch to MCLK ··· 104 101 mov r0, r0 @ safety 105 102 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 106 103 ret lr 104 + SYM_FUNC_END(cpu_sa110_do_idle) 107 105 108 106 /* ================================= CACHE ================================ */ 109 107 ··· 117 113 * addr: cache-unaligned virtual address 118 114 */ 119 115 .align 5 120 - ENTRY(cpu_sa110_dcache_clean_area) 116 + SYM_TYPED_FUNC_START(cpu_sa110_dcache_clean_area) 121 117 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 122 118 add r0, r0, #DCACHELINESIZE 123 119 subs r1, r1, #DCACHELINESIZE 124 120 bhi 1b 125 121 ret lr 122 + SYM_FUNC_END(cpu_sa110_dcache_clean_area) 126 123 127 124 /* =============================== PageTable ============================== */ 128 125 ··· 135 130 * pgd: new page tables 136 131 */ 137 132 .align 5 138 - ENTRY(cpu_sa110_switch_mm) 133 + SYM_TYPED_FUNC_START(cpu_sa110_switch_mm) 139 134 #ifdef CONFIG_MMU 140 135 str lr, [sp, #-4]! 141 136 bl v4wb_flush_kern_cache_all @ clears IP ··· 145 140 #else 146 141 ret lr 147 142 #endif 143 + SYM_FUNC_END(cpu_sa110_switch_mm) 148 144 149 145 /* 150 146 * cpu_sa110_set_pte_ext(ptep, pte, ext) ··· 153 147 * Set a PTE and flush it out 154 148 */ 155 149 .align 5 156 - ENTRY(cpu_sa110_set_pte_ext) 150 + SYM_TYPED_FUNC_START(cpu_sa110_set_pte_ext) 157 151 #ifdef CONFIG_MMU 158 152 armv3_set_pte_ext wc_disable=0 159 153 mov r0, r0 ··· 161 155 mcr p15, 0, r0, c7, c10, 4 @ drain WB 162 156 #endif 163 157 ret lr 158 + SYM_FUNC_END(cpu_sa110_set_pte_ext) 164 159 165 160 .type __sa110_setup, #function 166 161 __sa110_setup:
+19 -12
arch/arm/mm/proc-sa1100.S
··· 17 17 */ 18 18 #include <linux/linkage.h> 19 19 #include <linux/init.h> 20 + #include <linux/cfi_types.h> 20 21 #include <linux/pgtable.h> 21 22 #include <asm/assembler.h> 22 23 #include <asm/asm-offsets.h> ··· 37 36 /* 38 37 * cpu_sa1100_proc_init() 39 38 */ 40 - ENTRY(cpu_sa1100_proc_init) 39 + SYM_TYPED_FUNC_START(cpu_sa1100_proc_init) 41 40 mov r0, #0 42 41 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 43 42 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 44 43 ret lr 44 + SYM_FUNC_END(cpu_sa1100_proc_init) 45 45 46 46 /* 47 47 * cpu_sa1100_proc_fin() ··· 51 49 * - Disable interrupts 52 50 * - Clean and turn off caches. 53 51 */ 54 - ENTRY(cpu_sa1100_proc_fin) 52 + SYM_TYPED_FUNC_START(cpu_sa1100_proc_fin) 55 53 mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching 56 54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 57 55 bic r0, r0, #0x1000 @ ...i............ 58 56 bic r0, r0, #0x000e @ ............wca. 59 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 58 ret lr 59 + SYM_FUNC_END(cpu_sa1100_proc_fin) 61 60 62 61 /* 63 62 * cpu_sa1100_reset(loc) ··· 71 68 */ 72 69 .align 5 73 70 .pushsection .idmap.text, "ax" 74 - ENTRY(cpu_sa1100_reset) 71 + SYM_TYPED_FUNC_START(cpu_sa1100_reset) 75 72 mov ip, #0 76 73 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 77 74 mcr p15, 0, ip, c7, c10, 4 @ drain WB ··· 83 80 bic ip, ip, #0x1100 @ ...i...s........ 84 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 85 82 ret r0 86 - ENDPROC(cpu_sa1100_reset) 83 + SYM_FUNC_END(cpu_sa1100_reset) 87 84 .popsection 88 85 89 86 /* ··· 98 95 * 3 = switch to fast processor clock 99 96 */ 100 97 .align 5 101 - ENTRY(cpu_sa1100_do_idle) 98 + SYM_TYPED_FUNC_START(cpu_sa1100_do_idle) 102 99 mov r0, r0 @ 4 nop padding 103 100 mov r0, r0 104 101 mov r0, r0 ··· 114 111 mov r0, r0 @ safety 115 112 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 116 113 ret lr 114 + SYM_FUNC_END(cpu_sa1100_do_idle) 117 115 118 116 /* ================================= CACHE ================================ */ 119 117 ··· 127 123 * addr: cache-unaligned virtual address 128 124 */ 129 125 .align 5 130 - ENTRY(cpu_sa1100_dcache_clean_area) 126 + SYM_TYPED_FUNC_START(cpu_sa1100_dcache_clean_area) 131 127 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 132 128 add r0, r0, #DCACHELINESIZE 133 129 subs r1, r1, #DCACHELINESIZE 134 130 bhi 1b 135 131 ret lr 132 + SYM_FUNC_END(cpu_sa1100_dcache_clean_area) 136 133 137 134 /* =============================== PageTable ============================== */ 138 135 ··· 145 140 * pgd: new page tables 146 141 */ 147 142 .align 5 148 - ENTRY(cpu_sa1100_switch_mm) 143 + SYM_TYPED_FUNC_START(cpu_sa1100_switch_mm) 149 144 #ifdef CONFIG_MMU 150 145 str lr, [sp, #-4]! 151 146 bl v4wb_flush_kern_cache_all @ clears IP ··· 156 151 #else 157 152 ret lr 158 153 #endif 154 + SYM_FUNC_END(cpu_sa1100_switch_mm) 159 155 160 156 /* 161 157 * cpu_sa1100_set_pte_ext(ptep, pte, ext) ··· 164 158 * Set a PTE and flush it out 165 159 */ 166 160 .align 5 167 - ENTRY(cpu_sa1100_set_pte_ext) 161 + SYM_TYPED_FUNC_START(cpu_sa1100_set_pte_ext) 168 162 #ifdef CONFIG_MMU 169 163 armv3_set_pte_ext wc_disable=0 170 164 mov r0, r0 ··· 172 166 mcr p15, 0, r0, c7, c10, 4 @ drain WB 173 167 #endif 174 168 ret lr 169 + SYM_FUNC_END(cpu_sa1100_set_pte_ext) 175 170 176 171 .globl cpu_sa1100_suspend_size 177 172 .equ cpu_sa1100_suspend_size, 4 * 3 178 173 #ifdef CONFIG_ARM_CPU_SUSPEND 179 - ENTRY(cpu_sa1100_do_suspend) 174 + SYM_TYPED_FUNC_START(cpu_sa1100_do_suspend) 180 175 stmfd sp!, {r4 - r6, lr} 181 176 mrc p15, 0, r4, c3, c0, 0 @ domain ID 182 177 mrc p15, 0, r5, c13, c0, 0 @ PID 183 178 mrc p15, 0, r6, c1, c0, 0 @ control reg 184 179 stmia r0, {r4 - r6} @ store cp regs 185 180 ldmfd sp!, {r4 - r6, pc} 186 - ENDPROC(cpu_sa1100_do_suspend) 181 + SYM_FUNC_END(cpu_sa1100_do_suspend) 187 182 188 - ENTRY(cpu_sa1100_do_resume) 183 + SYM_TYPED_FUNC_START(cpu_sa1100_do_resume) 189 184 ldmia r0, {r4 - r6} @ load cp regs 190 185 mov ip, #0 191 186 mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs ··· 199 192 mcr p15, 0, r5, c13, c0, 0 @ PID 200 193 mov r0, r6 @ control register 201 194 b cpu_resume_mmu 202 - ENDPROC(cpu_sa1100_do_resume) 195 + SYM_FUNC_END(cpu_sa1100_do_resume) 203 196 #endif 204 197 205 198 .type __sa1100_setup, #function
+19 -12
arch/arm/mm/proc-v6.S
··· 8 8 * This is the "shell" of the ARMv6 processor support. 9 9 */ 10 10 #include <linux/init.h> 11 + #include <linux/cfi_types.h> 11 12 #include <linux/linkage.h> 12 13 #include <linux/pgtable.h> 13 14 #include <asm/assembler.h> ··· 35 34 36 35 .arch armv6 37 36 38 - ENTRY(cpu_v6_proc_init) 37 + SYM_TYPED_FUNC_START(cpu_v6_proc_init) 39 38 ret lr 39 + SYM_FUNC_END(cpu_v6_proc_init) 40 40 41 - ENTRY(cpu_v6_proc_fin) 41 + SYM_TYPED_FUNC_START(cpu_v6_proc_fin) 42 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 43 43 bic r0, r0, #0x1000 @ ...i............ 44 44 bic r0, r0, #0x0006 @ .............ca. 45 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 46 46 ret lr 47 + SYM_FUNC_END(cpu_v6_proc_fin) 47 48 48 49 /* 49 50 * cpu_v6_reset(loc) ··· 58 55 */ 59 56 .align 5 60 57 .pushsection .idmap.text, "ax" 61 - ENTRY(cpu_v6_reset) 58 + SYM_TYPED_FUNC_START(cpu_v6_reset) 62 59 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 63 60 bic r1, r1, #0x1 @ ...............m 64 61 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 65 62 mov r1, #0 66 63 mcr p15, 0, r1, c7, c5, 4 @ ISB 67 64 ret r0 68 - ENDPROC(cpu_v6_reset) 65 + SYM_FUNC_END(cpu_v6_reset) 69 66 .popsection 70 67 71 68 /* ··· 75 72 * 76 73 * IRQs are already disabled. 77 74 */ 78 - ENTRY(cpu_v6_do_idle) 75 + SYM_TYPED_FUNC_START(cpu_v6_do_idle) 79 76 mov r1, #0 80 77 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode 81 78 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 82 79 ret lr 80 + SYM_FUNC_END(cpu_v6_do_idle) 83 81 84 - ENTRY(cpu_v6_dcache_clean_area) 82 + SYM_TYPED_FUNC_START(cpu_v6_dcache_clean_area) 85 83 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 86 84 add r0, r0, #D_CACHE_LINE_SIZE 87 85 subs r1, r1, #D_CACHE_LINE_SIZE 88 86 bhi 1b 89 87 ret lr 88 + SYM_FUNC_END(cpu_v6_dcache_clean_area) 90 89 91 90 /* 92 91 * cpu_v6_switch_mm(pgd_phys, tsk) ··· 100 95 * It is assumed that: 101 96 * - we are not using split page tables 102 97 */ 103 - ENTRY(cpu_v6_switch_mm) 98 + SYM_TYPED_FUNC_START(cpu_v6_switch_mm) 104 99 #ifdef CONFIG_MMU 105 100 mov r2, #0 106 101 mmid r1, r1 @ get mm->context.id ··· 118 113 mcr p15, 0, r1, c13, c0, 1 @ set context ID 119 114 #endif 120 115 ret lr 116 + SYM_FUNC_END(cpu_v6_switch_mm) 121 117 122 118 /* 123 119 * cpu_v6_set_pte_ext(ptep, pte, ext) ··· 132 126 */ 133 127 armv6_mt_table cpu_v6 134 128 135 - ENTRY(cpu_v6_set_pte_ext) 129 + SYM_TYPED_FUNC_START(cpu_v6_set_pte_ext) 136 130 #ifdef CONFIG_MMU 137 131 armv6_set_pte_ext cpu_v6 138 132 #endif 139 133 ret lr 134 + SYM_FUNC_END(cpu_v6_set_pte_ext) 140 135 141 136 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 142 137 .globl cpu_v6_suspend_size 143 138 .equ cpu_v6_suspend_size, 4 * 6 144 139 #ifdef CONFIG_ARM_CPU_SUSPEND 145 - ENTRY(cpu_v6_do_suspend) 140 + SYM_TYPED_FUNC_START(cpu_v6_do_suspend) 146 141 stmfd sp!, {r4 - r9, lr} 147 142 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 148 143 #ifdef CONFIG_MMU ··· 155 148 mrc p15, 0, r9, c1, c0, 0 @ control register 156 149 stmia r0, {r4 - r9} 157 150 ldmfd sp!, {r4- r9, pc} 158 - ENDPROC(cpu_v6_do_suspend) 151 + SYM_FUNC_END(cpu_v6_do_suspend) 159 152 160 - ENTRY(cpu_v6_do_resume) 153 + SYM_TYPED_FUNC_START(cpu_v6_do_resume) 161 154 mov ip, #0 162 155 mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache 163 156 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache ··· 179 172 mcr p15, 0, ip, c7, c5, 4 @ ISB 180 173 mov r0, r9 @ control register 181 174 b cpu_resume_mmu 182 - ENDPROC(cpu_v6_do_resume) 175 + SYM_FUNC_END(cpu_v6_do_resume) 183 176 #endif 184 177 185 178 string cpu_v6_name, "ARMv6-compatible processor"
+4 -4
arch/arm/mm/proc-v7-2level.S
··· 40 40 * even on Cortex-A8 revisions not affected by 430973. 41 41 * If IBE is not set, the flush BTAC/BTB won't do anything. 42 42 */ 43 - ENTRY(cpu_v7_switch_mm) 43 + SYM_TYPED_FUNC_START(cpu_v7_switch_mm) 44 44 #ifdef CONFIG_MMU 45 45 mmid r1, r1 @ get mm->context.id 46 46 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ··· 59 59 isb 60 60 #endif 61 61 bx lr 62 - ENDPROC(cpu_v7_switch_mm) 62 + SYM_FUNC_END(cpu_v7_switch_mm) 63 63 64 64 /* 65 65 * cpu_v7_set_pte_ext(ptep, pte) ··· 71 71 * - pte - PTE value to store 72 72 * - ext - value for extended PTE bits 73 73 */ 74 - ENTRY(cpu_v7_set_pte_ext) 74 + SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext) 75 75 #ifdef CONFIG_MMU 76 76 str r1, [r0] @ linux version 77 77 ··· 106 106 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 107 107 #endif 108 108 bx lr 109 - ENDPROC(cpu_v7_set_pte_ext) 109 + SYM_FUNC_END(cpu_v7_set_pte_ext) 110 110 111 111 /* 112 112 * Memory region attributes with SCTLR.TRE=1
+4 -4
arch/arm/mm/proc-v7-3level.S
··· 42 42 * Set the translation table base pointer to be pgd_phys (physical address of 43 43 * the new TTB). 44 44 */ 45 - ENTRY(cpu_v7_switch_mm) 45 + SYM_TYPED_FUNC_START(cpu_v7_switch_mm) 46 46 #ifdef CONFIG_MMU 47 47 mmid r2, r2 48 48 asid r2, r2 ··· 51 51 isb 52 52 #endif 53 53 ret lr 54 - ENDPROC(cpu_v7_switch_mm) 54 + SYM_FUNC_END(cpu_v7_switch_mm) 55 55 56 56 #ifdef __ARMEB__ 57 57 #define rl r3 ··· 68 68 * - ptep - pointer to level 3 translation table entry 69 69 * - pte - PTE value to store (64-bit in r2 and r3) 70 70 */ 71 - ENTRY(cpu_v7_set_pte_ext) 71 + SYM_TYPED_FUNC_START(cpu_v7_set_pte_ext) 72 72 #ifdef CONFIG_MMU 73 73 tst rl, #L_PTE_VALID 74 74 beq 1f ··· 87 87 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 88 88 #endif 89 89 ret lr 90 - ENDPROC(cpu_v7_set_pte_ext) 90 + SYM_FUNC_END(cpu_v7_set_pte_ext) 91 91 92 92 /* 93 93 * Memory region attributes for LPAE (defined in pgtable-3level.h):
+34 -32
arch/arm/mm/proc-v7.S
··· 7 7 * This is the "shell" of the ARMv7 processor support. 8 8 */ 9 9 #include <linux/arm-smccc.h> 10 + #include <linux/cfi_types.h> 10 11 #include <linux/init.h> 11 12 #include <linux/linkage.h> 12 13 #include <linux/pgtable.h> ··· 27 26 28 27 .arch armv7-a 29 28 30 - ENTRY(cpu_v7_proc_init) 29 + SYM_TYPED_FUNC_START(cpu_v7_proc_init) 31 30 ret lr 32 - ENDPROC(cpu_v7_proc_init) 31 + SYM_FUNC_END(cpu_v7_proc_init) 33 32 34 - ENTRY(cpu_v7_proc_fin) 33 + SYM_TYPED_FUNC_START(cpu_v7_proc_fin) 35 34 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 36 35 bic r0, r0, #0x1000 @ ...i............ 37 36 bic r0, r0, #0x0006 @ .............ca. 38 37 mcr p15, 0, r0, c1, c0, 0 @ disable caches 39 38 ret lr 40 - ENDPROC(cpu_v7_proc_fin) 39 + SYM_FUNC_END(cpu_v7_proc_fin) 41 40 42 41 /* 43 42 * cpu_v7_reset(loc, hyp) ··· 54 53 */ 55 54 .align 5 56 55 .pushsection .idmap.text, "ax" 57 - ENTRY(cpu_v7_reset) 56 + SYM_TYPED_FUNC_START(cpu_v7_reset) 58 57 mrc p15, 0, r2, c1, c0, 0 @ ctrl register 59 58 bic r2, r2, #0x1 @ ...............m 60 59 THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) ··· 65 64 bne __hyp_soft_restart 66 65 #endif 67 66 bx r0 68 - ENDPROC(cpu_v7_reset) 67 + SYM_FUNC_END(cpu_v7_reset) 69 68 .popsection 70 69 71 70 /* ··· 75 74 * 76 75 * IRQs are already disabled. 77 76 */ 78 - ENTRY(cpu_v7_do_idle) 77 + SYM_TYPED_FUNC_START(cpu_v7_do_idle) 79 78 dsb @ WFI may enter a low-power mode 80 79 wfi 81 80 ret lr 82 - ENDPROC(cpu_v7_do_idle) 81 + SYM_FUNC_END(cpu_v7_do_idle) 83 82 84 - ENTRY(cpu_v7_dcache_clean_area) 83 + SYM_TYPED_FUNC_START(cpu_v7_dcache_clean_area) 85 84 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW 86 85 ALT_UP_B(1f) 87 86 ret lr ··· 92 91 bhi 2b 93 92 dsb ishst 94 93 ret lr 95 - ENDPROC(cpu_v7_dcache_clean_area) 94 + SYM_FUNC_END(cpu_v7_dcache_clean_area) 96 95 97 96 #ifdef CONFIG_ARM_PSCI 98 97 .arch_extension sec 99 - ENTRY(cpu_v7_smc_switch_mm) 98 + SYM_TYPED_FUNC_START(cpu_v7_smc_switch_mm) 100 99 stmfd sp!, {r0 - r3} 101 100 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 102 101 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 103 102 smc #0 104 103 ldmfd sp!, {r0 - r3} 105 104 b cpu_v7_switch_mm 106 - ENDPROC(cpu_v7_smc_switch_mm) 105 + SYM_FUNC_END(cpu_v7_smc_switch_mm) 107 106 .arch_extension virt 108 - ENTRY(cpu_v7_hvc_switch_mm) 107 + SYM_TYPED_FUNC_START(cpu_v7_hvc_switch_mm) 109 108 stmfd sp!, {r0 - r3} 110 109 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1 111 110 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1 112 111 hvc #0 113 112 ldmfd sp!, {r0 - r3} 114 113 b cpu_v7_switch_mm 115 - ENDPROC(cpu_v7_hvc_switch_mm) 114 + SYM_FUNC_END(cpu_v7_hvc_switch_mm) 116 115 #endif 117 - ENTRY(cpu_v7_iciallu_switch_mm) 116 + 117 + SYM_TYPED_FUNC_START(cpu_v7_iciallu_switch_mm) 118 118 mov r3, #0 119 119 mcr p15, 0, r3, c7, c5, 0 @ ICIALLU 120 120 b cpu_v7_switch_mm 121 - ENDPROC(cpu_v7_iciallu_switch_mm) 122 - ENTRY(cpu_v7_bpiall_switch_mm) 121 + SYM_FUNC_END(cpu_v7_iciallu_switch_mm) 122 + SYM_TYPED_FUNC_START(cpu_v7_bpiall_switch_mm) 123 123 mov r3, #0 124 124 mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB 125 125 b cpu_v7_switch_mm 126 - ENDPROC(cpu_v7_bpiall_switch_mm) 126 + SYM_FUNC_END(cpu_v7_bpiall_switch_mm) 127 127 128 128 string cpu_v7_name, "ARMv7 Processor" 129 129 .align ··· 133 131 .globl cpu_v7_suspend_size 134 132 .equ cpu_v7_suspend_size, 4 * 9 135 133 #ifdef CONFIG_ARM_CPU_SUSPEND 136 - ENTRY(cpu_v7_do_suspend) 134 + SYM_TYPED_FUNC_START(cpu_v7_do_suspend) 137 135 stmfd sp!, {r4 - r11, lr} 138 136 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 139 137 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID ··· 152 150 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 153 151 stmia r0, {r5 - r11} 154 152 ldmfd sp!, {r4 - r11, pc} 155 - ENDPROC(cpu_v7_do_suspend) 153 + SYM_FUNC_END(cpu_v7_do_suspend) 156 154 157 - ENTRY(cpu_v7_do_resume) 155 + SYM_TYPED_FUNC_START(cpu_v7_do_resume) 158 156 mov ip, #0 159 157 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 160 158 mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID ··· 188 186 dsb 189 187 mov r0, r8 @ control register 190 188 b cpu_resume_mmu 191 - ENDPROC(cpu_v7_do_resume) 189 + SYM_FUNC_END(cpu_v7_do_resume) 192 190 #endif 193 191 194 192 .globl cpu_ca9mp_suspend_size 195 193 .equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 196 194 #ifdef CONFIG_ARM_CPU_SUSPEND 197 - ENTRY(cpu_ca9mp_do_suspend) 195 + SYM_TYPED_FUNC_START(cpu_ca9mp_do_suspend) 198 196 stmfd sp!, {r4 - r5} 199 197 mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register 200 198 mrc p15, 0, r5, c15, c0, 0 @ Power register 201 199 stmia r0!, {r4 - r5} 202 200 ldmfd sp!, {r4 - r5} 203 201 b cpu_v7_do_suspend 204 - ENDPROC(cpu_ca9mp_do_suspend) 202 + SYM_FUNC_END(cpu_ca9mp_do_suspend) 205 203 206 - ENTRY(cpu_ca9mp_do_resume) 204 + SYM_TYPED_FUNC_START(cpu_ca9mp_do_resume) 207 205 ldmia r0!, {r4 - r5} 208 206 mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register 209 207 teq r4, r10 @ Already restored? ··· 212 210 teq r5, r10 @ Already restored? 213 211 mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it 214 212 b cpu_v7_do_resume 215 - ENDPROC(cpu_ca9mp_do_resume) 213 + SYM_FUNC_END(cpu_ca9mp_do_resume) 216 214 #endif 217 215 218 216 #ifdef CONFIG_CPU_PJ4B ··· 222 220 globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin 223 221 globl_equ cpu_pj4b_reset, cpu_v7_reset 224 222 #ifdef CONFIG_PJ4B_ERRATA_4742 225 - ENTRY(cpu_pj4b_do_idle) 223 + SYM_TYPED_FUNC_START(cpu_pj4b_do_idle) 226 224 dsb @ WFI may enter a low-power mode 227 225 wfi 228 226 dsb @barrier 229 227 ret lr 230 - ENDPROC(cpu_pj4b_do_idle) 228 + SYM_FUNC_END(cpu_pj4b_do_idle) 231 229 #else 232 230 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle 233 231 #endif 234 232 globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area 235 233 #ifdef CONFIG_ARM_CPU_SUSPEND 236 - ENTRY(cpu_pj4b_do_suspend) 234 + SYM_TYPED_FUNC_START(cpu_pj4b_do_suspend) 237 235 stmfd sp!, {r6 - r10} 238 236 mrc p15, 1, r6, c15, c1, 0 @ save CP15 - extra features 239 237 mrc p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 ··· 243 241 stmia r0!, {r6 - r10} 244 242 ldmfd sp!, {r6 - r10} 245 243 b cpu_v7_do_suspend 246 - ENDPROC(cpu_pj4b_do_suspend) 244 + SYM_FUNC_END(cpu_pj4b_do_suspend) 247 245 248 - ENTRY(cpu_pj4b_do_resume) 246 + SYM_TYPED_FUNC_START(cpu_pj4b_do_resume) 249 247 ldmia r0!, {r6 - r10} 250 248 mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features 251 249 mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0 ··· 253 251 mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1 254 252 mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC 255 253 b cpu_v7_do_resume 256 - ENDPROC(cpu_pj4b_do_resume) 254 + SYM_FUNC_END(cpu_pj4b_do_resume) 257 255 #endif 258 256 .globl cpu_pj4b_suspend_size 259 257 .equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
+21 -20
arch/arm/mm/proc-v7m.S
··· 8 8 * This is the "shell" of the ARMv7-M processor support. 9 9 */ 10 10 #include <linux/linkage.h> 11 + #include <linux/cfi_types.h> 11 12 #include <asm/assembler.h> 12 13 #include <asm/page.h> 13 14 #include <asm/v7m.h> 14 15 #include "proc-macros.S" 15 16 16 - ENTRY(cpu_v7m_proc_init) 17 + SYM_TYPED_FUNC_START(cpu_v7m_proc_init) 17 18 ret lr 18 - ENDPROC(cpu_v7m_proc_init) 19 + SYM_FUNC_END(cpu_v7m_proc_init) 19 20 20 - ENTRY(cpu_v7m_proc_fin) 21 + SYM_TYPED_FUNC_START(cpu_v7m_proc_fin) 21 22 ret lr 22 - ENDPROC(cpu_v7m_proc_fin) 23 + SYM_FUNC_END(cpu_v7m_proc_fin) 23 24 24 25 /* 25 26 * cpu_v7m_reset(loc) ··· 32 31 * - loc - location to jump to for soft reset 33 32 */ 34 33 .align 5 35 - ENTRY(cpu_v7m_reset) 34 + SYM_TYPED_FUNC_START(cpu_v7m_reset) 36 35 ret r0 37 - ENDPROC(cpu_v7m_reset) 36 + SYM_FUNC_END(cpu_v7m_reset) 38 37 39 38 /* 40 39 * cpu_v7m_do_idle() ··· 43 42 * 44 43 * IRQs are already disabled. 45 44 */ 46 - ENTRY(cpu_v7m_do_idle) 45 + SYM_TYPED_FUNC_START(cpu_v7m_do_idle) 47 46 wfi 48 47 ret lr 49 - ENDPROC(cpu_v7m_do_idle) 48 + SYM_FUNC_END(cpu_v7m_do_idle) 50 49 51 - ENTRY(cpu_v7m_dcache_clean_area) 50 + SYM_TYPED_FUNC_START(cpu_v7m_dcache_clean_area) 52 51 ret lr 53 - ENDPROC(cpu_v7m_dcache_clean_area) 52 + SYM_FUNC_END(cpu_v7m_dcache_clean_area) 54 53 55 54 /* 56 55 * There is no MMU, so here is nothing to do. 57 56 */ 58 - ENTRY(cpu_v7m_switch_mm) 57 + SYM_TYPED_FUNC_START(cpu_v7m_switch_mm) 59 58 ret lr 60 - ENDPROC(cpu_v7m_switch_mm) 59 + SYM_FUNC_END(cpu_v7m_switch_mm) 61 60 62 61 .globl cpu_v7m_suspend_size 63 62 .equ cpu_v7m_suspend_size, 0 64 63 65 64 #ifdef CONFIG_ARM_CPU_SUSPEND 66 - ENTRY(cpu_v7m_do_suspend) 65 + SYM_TYPED_FUNC_START(cpu_v7m_do_suspend) 67 66 ret lr 68 - ENDPROC(cpu_v7m_do_suspend) 67 + SYM_FUNC_END(cpu_v7m_do_suspend) 69 68 70 - ENTRY(cpu_v7m_do_resume) 69 + SYM_TYPED_FUNC_START(cpu_v7m_do_resume) 71 70 ret lr 72 - ENDPROC(cpu_v7m_do_resume) 71 + SYM_FUNC_END(cpu_v7m_do_resume) 73 72 #endif 74 73 75 - ENTRY(cpu_cm7_dcache_clean_area) 74 + SYM_TYPED_FUNC_START(cpu_cm7_dcache_clean_area) 76 75 dcache_line_size r2, r3 77 76 movw r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC 78 77 movt r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC ··· 83 82 bhi 1b 84 83 dsb 85 84 ret lr 86 - ENDPROC(cpu_cm7_dcache_clean_area) 85 + SYM_FUNC_END(cpu_cm7_dcache_clean_area) 87 86 88 - ENTRY(cpu_cm7_proc_fin) 87 + SYM_TYPED_FUNC_START(cpu_cm7_proc_fin) 89 88 movw r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) 90 89 movt r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) 91 90 ldr r0, [r2] 92 91 bic r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC) 93 92 str r0, [r2] 94 93 ret lr 95 - ENDPROC(cpu_cm7_proc_fin) 94 + SYM_FUNC_END(cpu_cm7_proc_fin) 96 95 97 96 .section ".init.text", "ax" 98 97
+42 -33
arch/arm/mm/proc-xsc3.S
··· 23 23 24 24 #include <linux/linkage.h> 25 25 #include <linux/init.h> 26 + #include <linux/cfi_types.h> 26 27 #include <linux/pgtable.h> 27 28 #include <asm/assembler.h> 28 29 #include <asm/hwcap.h> ··· 80 79 * 81 80 * Nothing too exciting at the moment 82 81 */ 83 - ENTRY(cpu_xsc3_proc_init) 82 + SYM_TYPED_FUNC_START(cpu_xsc3_proc_init) 84 83 ret lr 84 + SYM_FUNC_END(cpu_xsc3_proc_init) 85 85 86 86 /* 87 87 * cpu_xsc3_proc_fin() 88 88 */ 89 - ENTRY(cpu_xsc3_proc_fin) 89 + SYM_TYPED_FUNC_START(cpu_xsc3_proc_fin) 90 90 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 91 91 bic r0, r0, #0x1800 @ ...IZ........... 92 92 bic r0, r0, #0x0006 @ .............CA. 93 93 mcr p15, 0, r0, c1, c0, 0 @ disable caches 94 94 ret lr 95 + SYM_FUNC_END(cpu_xsc3_proc_fin) 95 96 96 97 /* 97 98 * cpu_xsc3_reset(loc) ··· 106 103 */ 107 104 .align 5 108 105 .pushsection .idmap.text, "ax" 109 - ENTRY(cpu_xsc3_reset) 106 + SYM_TYPED_FUNC_START(cpu_xsc3_reset) 110 107 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 111 108 msr cpsr_c, r1 @ reset CPSR 112 109 mrc p15, 0, r1, c1, c0, 0 @ ctrl register ··· 120 117 @ already containing those two last instructions to survive. 121 118 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 122 119 ret r0 123 - ENDPROC(cpu_xsc3_reset) 120 + SYM_FUNC_END(cpu_xsc3_reset) 124 121 .popsection 125 122 126 123 /* ··· 135 132 */ 136 133 .align 5 137 134 138 - ENTRY(cpu_xsc3_do_idle) 135 + SYM_TYPED_FUNC_START(cpu_xsc3_do_idle) 139 136 mov r0, #1 140 137 mcr p14, 0, r0, c7, c0, 0 @ go to idle 141 138 ret lr 139 + SYM_FUNC_END(cpu_xsc3_do_idle) 142 140 143 141 /* ================================= CACHE ================================ */ 144 142 ··· 148 144 * 149 145 * Unconditionally clean and invalidate the entire icache. 150 146 */ 151 - ENTRY(xsc3_flush_icache_all) 147 + SYM_TYPED_FUNC_START(xsc3_flush_icache_all) 152 148 mov r0, #0 153 149 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 154 150 ret lr 155 - ENDPROC(xsc3_flush_icache_all) 151 + SYM_FUNC_END(xsc3_flush_icache_all) 156 152 157 153 /* 158 154 * flush_user_cache_all() ··· 160 156 * Invalidate all cache entries in a particular address 161 157 * space. 162 158 */ 163 - ENTRY(xsc3_flush_user_cache_all) 164 - /* FALLTHROUGH */ 159 + SYM_FUNC_ALIAS(xsc3_flush_user_cache_all, xsc3_flush_kern_cache_all) 165 160 166 161 /* 167 162 * flush_kern_cache_all() 168 163 * 169 164 * Clean and invalidate the entire cache. 170 165 */ 171 - ENTRY(xsc3_flush_kern_cache_all) 166 + SYM_TYPED_FUNC_START(xsc3_flush_kern_cache_all) 172 167 mov r2, #VM_EXEC 173 168 mov ip, #0 174 169 __flush_whole_cache: ··· 177 174 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 178 175 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 179 176 ret lr 177 + SYM_FUNC_END(xsc3_flush_kern_cache_all) 180 178 181 179 /* 182 180 * flush_user_cache_range(start, end, vm_flags) ··· 190 186 * - vma - vma_area_struct describing address space 191 187 */ 192 188 .align 5 193 - ENTRY(xsc3_flush_user_cache_range) 189 + SYM_TYPED_FUNC_START(xsc3_flush_user_cache_range) 194 190 mov ip, #0 195 191 sub r3, r1, r0 @ calculate total size 196 192 cmp r3, #MAX_AREA_SIZE ··· 207 203 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 208 204 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 209 205 ret lr 206 + SYM_FUNC_END(xsc3_flush_user_cache_range) 210 207 211 208 /* 212 209 * coherent_kern_range(start, end) ··· 222 217 * Note: single I-cache line invalidation isn't used here since 223 218 * it also trashes the mini I-cache used by JTAG debuggers. 224 219 */ 225 - ENTRY(xsc3_coherent_kern_range) 226 - /* FALLTHROUGH */ 227 - ENTRY(xsc3_coherent_user_range) 220 + SYM_TYPED_FUNC_START(xsc3_coherent_kern_range) 221 + #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 222 + b xsc3_coherent_user_range 223 + #endif 224 + SYM_FUNC_END(xsc3_coherent_kern_range) 225 + 226 + SYM_TYPED_FUNC_START(xsc3_coherent_user_range) 228 227 bic r0, r0, #CACHELINESIZE - 1 229 228 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 230 229 add r0, r0, #CACHELINESIZE ··· 239 230 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 240 231 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 241 232 ret lr 233 + SYM_FUNC_END(xsc3_coherent_user_range) 242 234 243 235 /* 244 236 * flush_kern_dcache_area(void *addr, size_t size) ··· 250 240 * - addr - kernel address 251 241 * - size - region size 252 242 */ 253 - ENTRY(xsc3_flush_kern_dcache_area) 243 + SYM_TYPED_FUNC_START(xsc3_flush_kern_dcache_area) 254 244 add r1, r0, r1 255 245 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 256 246 add r0, r0, #CACHELINESIZE ··· 261 251 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 262 252 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 263 253 ret lr 254 + SYM_FUNC_END(xsc3_flush_kern_dcache_area) 264 255 265 256 /* 266 257 * dma_inv_range(start, end) ··· 312 301 * - start - virtual start address 313 302 * - end - virtual end address 314 303 */ 315 - ENTRY(xsc3_dma_flush_range) 304 + SYM_TYPED_FUNC_START(xsc3_dma_flush_range) 316 305 bic r0, r0, #CACHELINESIZE - 1 317 306 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 318 307 add r0, r0, #CACHELINESIZE ··· 320 309 blo 1b 321 310 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 322 311 ret lr 312 + SYM_FUNC_END(xsc3_dma_flush_range) 323 313 324 314 /* 325 315 * dma_map_area(start, size, dir) ··· 328 316 * - size - size of region 329 317 * - dir - DMA direction 330 318 */ 331 - ENTRY(xsc3_dma_map_area) 319 + SYM_TYPED_FUNC_START(xsc3_dma_map_area) 332 320 add r1, r1, r0 333 321 cmp r2, #DMA_TO_DEVICE 334 322 beq xsc3_dma_clean_range 335 323 bcs xsc3_dma_inv_range 336 324 b xsc3_dma_flush_range 337 - ENDPROC(xsc3_dma_map_area) 325 + SYM_FUNC_END(xsc3_dma_map_area) 338 326 339 327 /* 340 328 * dma_unmap_area(start, size, dir) ··· 342 330 * - size - size of region 343 331 * - dir - DMA direction 344 332 */ 345 - ENTRY(xsc3_dma_unmap_area) 333 + SYM_TYPED_FUNC_START(xsc3_dma_unmap_area) 346 334 ret lr 347 - ENDPROC(xsc3_dma_unmap_area) 335 + SYM_FUNC_END(xsc3_dma_unmap_area) 348 336 349 - .globl xsc3_flush_kern_cache_louis 350 - .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all 351 - 352 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 353 - define_cache_functions xsc3 354 - 355 - ENTRY(cpu_xsc3_dcache_clean_area) 337 + SYM_TYPED_FUNC_START(cpu_xsc3_dcache_clean_area) 356 338 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 357 339 add r0, r0, #CACHELINESIZE 358 340 subs r1, r1, #CACHELINESIZE 359 341 bhi 1b 360 342 ret lr 343 + SYM_FUNC_END(cpu_xsc3_dcache_clean_area) 361 344 362 345 /* =============================== PageTable ============================== */ 363 346 ··· 364 357 * pgd: new page tables 365 358 */ 366 359 .align 5 367 - ENTRY(cpu_xsc3_switch_mm) 360 + SYM_TYPED_FUNC_START(cpu_xsc3_switch_mm) 368 361 clean_d_cache r1, r2 369 362 mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 370 363 mcr p15, 0, ip, c7, c10, 4 @ data write barrier ··· 373 366 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 374 367 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 375 368 cpwait_ret lr, ip 369 + SYM_FUNC_END(cpu_xsc3_switch_mm) 376 370 377 371 /* 378 372 * cpu_xsc3_set_pte_ext(ptep, pte, ext) ··· 399 391 .long 0x00 @ unused 400 392 401 393 .align 5 402 - ENTRY(cpu_xsc3_set_pte_ext) 394 + SYM_TYPED_FUNC_START(cpu_xsc3_set_pte_ext) 403 395 xscale_set_pte_ext_prologue 404 396 405 397 tst r1, #L_PTE_SHARED @ shared? ··· 412 404 413 405 xscale_set_pte_ext_epilogue 414 406 ret lr 407 + SYM_FUNC_END(cpu_xsc3_set_pte_ext) 415 408 416 409 .ltorg 417 410 .align ··· 420 411 .globl cpu_xsc3_suspend_size 421 412 .equ cpu_xsc3_suspend_size, 4 * 6 422 413 #ifdef CONFIG_ARM_CPU_SUSPEND 423 - ENTRY(cpu_xsc3_do_suspend) 414 + SYM_TYPED_FUNC_START(cpu_xsc3_do_suspend) 424 415 stmfd sp!, {r4 - r9, lr} 425 416 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 426 417 mrc p15, 0, r5, c15, c1, 0 @ CP access reg ··· 431 422 bic r4, r4, #2 @ clear frequency change bit 432 423 stmia r0, {r4 - r9} @ store cp regs 433 424 ldmia sp!, {r4 - r9, pc} 434 - ENDPROC(cpu_xsc3_do_suspend) 425 + SYM_FUNC_END(cpu_xsc3_do_suspend) 435 426 436 - ENTRY(cpu_xsc3_do_resume) 427 + SYM_TYPED_FUNC_START(cpu_xsc3_do_resume) 437 428 ldmia r0, {r4 - r9} @ load cp regs 438 429 mov ip, #0 439 430 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB ··· 449 440 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 450 441 mov r0, r9 @ control register 451 442 b cpu_resume_mmu 452 - ENDPROC(cpu_xsc3_do_resume) 443 + SYM_FUNC_END(cpu_xsc3_do_resume) 453 444 #endif 454 445 455 446 .type __xsc3_setup, #function
+55 -72
arch/arm/mm/proc-xscale.S
··· 19 19 20 20 #include <linux/linkage.h> 21 21 #include <linux/init.h> 22 + #include <linux/cfi_types.h> 22 23 #include <linux/pgtable.h> 23 24 #include <asm/assembler.h> 24 25 #include <asm/hwcap.h> ··· 112 111 * 113 112 * Nothing too exciting at the moment 114 113 */ 115 - ENTRY(cpu_xscale_proc_init) 114 + SYM_TYPED_FUNC_START(cpu_xscale_proc_init) 116 115 @ enable write buffer coalescing. Some bootloader disable it 117 116 mrc p15, 0, r1, c1, c0, 1 118 117 bic r1, r1, #1 119 118 mcr p15, 0, r1, c1, c0, 1 120 119 ret lr 120 + SYM_FUNC_END(cpu_xscale_proc_init) 121 121 122 122 /* 123 123 * cpu_xscale_proc_fin() 124 124 */ 125 - ENTRY(cpu_xscale_proc_fin) 125 + SYM_TYPED_FUNC_START(cpu_xscale_proc_fin) 126 126 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 127 127 bic r0, r0, #0x1800 @ ...IZ........... 128 128 bic r0, r0, #0x0006 @ .............CA. 129 129 mcr p15, 0, r0, c1, c0, 0 @ disable caches 130 130 ret lr 131 + SYM_FUNC_END(cpu_xscale_proc_fin) 131 132 132 133 /* 133 134 * cpu_xscale_reset(loc) ··· 144 141 */ 145 142 .align 5 146 143 .pushsection .idmap.text, "ax" 147 - ENTRY(cpu_xscale_reset) 144 + SYM_TYPED_FUNC_START(cpu_xscale_reset) 148 145 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 149 146 msr cpsr_c, r1 @ reset CPSR 150 147 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB ··· 162 159 @ already containing those two last instructions to survive. 163 160 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 164 161 ret r0 165 - ENDPROC(cpu_xscale_reset) 162 + SYM_FUNC_END(cpu_xscale_reset) 166 163 .popsection 167 164 168 165 /* ··· 177 174 */ 178 175 .align 5 179 176 180 - ENTRY(cpu_xscale_do_idle) 177 + SYM_TYPED_FUNC_START(cpu_xscale_do_idle) 181 178 mov r0, #1 182 179 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 183 180 ret lr 181 + SYM_FUNC_END(cpu_xscale_do_idle) 184 182 185 183 /* ================================= CACHE ================================ */ 186 184 ··· 190 186 * 191 187 * Unconditionally clean and invalidate the entire icache. 192 188 */ 193 - ENTRY(xscale_flush_icache_all) 189 + SYM_TYPED_FUNC_START(xscale_flush_icache_all) 194 190 mov r0, #0 195 191 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 196 192 ret lr 197 - ENDPROC(xscale_flush_icache_all) 193 + SYM_FUNC_END(xscale_flush_icache_all) 198 194 199 195 /* 200 196 * flush_user_cache_all() ··· 202 198 * Invalidate all cache entries in a particular address 203 199 * space. 204 200 */ 205 - ENTRY(xscale_flush_user_cache_all) 206 - /* FALLTHROUGH */ 201 + SYM_FUNC_ALIAS(xscale_flush_user_cache_all, xscale_flush_kern_cache_all) 207 202 208 203 /* 209 204 * flush_kern_cache_all() 210 205 * 211 206 * Clean and invalidate the entire cache. 212 207 */ 213 - ENTRY(xscale_flush_kern_cache_all) 208 + SYM_TYPED_FUNC_START(xscale_flush_kern_cache_all) 214 209 mov r2, #VM_EXEC 215 210 mov ip, #0 216 211 __flush_whole_cache: ··· 218 215 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 219 216 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 220 217 ret lr 218 + SYM_FUNC_END(xscale_flush_kern_cache_all) 221 219 222 220 /* 223 221 * flush_user_cache_range(start, end, vm_flags) ··· 231 227 * - vma - vma_area_struct describing address space 232 228 */ 233 229 .align 5 234 - ENTRY(xscale_flush_user_cache_range) 230 + SYM_TYPED_FUNC_START(xscale_flush_user_cache_range) 235 231 mov ip, #0 236 232 sub r3, r1, r0 @ calculate total size 237 233 cmp r3, #MAX_AREA_SIZE ··· 248 244 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 249 245 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 250 246 ret lr 247 + SYM_FUNC_END(xscale_flush_user_cache_range) 251 248 252 249 /* 253 250 * coherent_kern_range(start, end) ··· 263 258 * Note: single I-cache line invalidation isn't used here since 264 259 * it also trashes the mini I-cache used by JTAG debuggers. 265 260 */ 266 - ENTRY(xscale_coherent_kern_range) 261 + SYM_TYPED_FUNC_START(xscale_coherent_kern_range) 267 262 bic r0, r0, #CACHELINESIZE - 1 268 263 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 269 264 add r0, r0, #CACHELINESIZE ··· 273 268 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 274 269 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 275 270 ret lr 271 + SYM_FUNC_END(xscale_coherent_kern_range) 276 272 277 273 /* 278 274 * coherent_user_range(start, end) ··· 285 279 * - start - virtual start address 286 280 * - end - virtual end address 287 281 */ 288 - ENTRY(xscale_coherent_user_range) 282 + SYM_TYPED_FUNC_START(xscale_coherent_user_range) 289 283 bic r0, r0, #CACHELINESIZE - 1 290 284 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 291 285 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry ··· 296 290 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 297 291 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 298 292 ret lr 293 + SYM_FUNC_END(xscale_coherent_user_range) 299 294 300 295 /* 301 296 * flush_kern_dcache_area(void *addr, size_t size) ··· 307 300 * - addr - kernel address 308 301 * - size - region size 309 302 */ 310 - ENTRY(xscale_flush_kern_dcache_area) 303 + SYM_TYPED_FUNC_START(xscale_flush_kern_dcache_area) 311 304 add r1, r0, r1 312 305 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 313 306 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry ··· 318 311 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 319 312 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 320 313 ret lr 314 + SYM_FUNC_END(xscale_flush_kern_dcache_area) 321 315 322 316 /* 323 317 * dma_inv_range(start, end) ··· 369 361 * - start - virtual start address 370 362 * - end - virtual end address 371 363 */ 372 - ENTRY(xscale_dma_flush_range) 364 + SYM_TYPED_FUNC_START(xscale_dma_flush_range) 373 365 bic r0, r0, #CACHELINESIZE - 1 374 366 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 375 367 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry ··· 378 370 blo 1b 379 371 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 380 372 ret lr 373 + SYM_FUNC_END(xscale_dma_flush_range) 381 374 382 375 /* 383 376 * dma_map_area(start, size, dir) ··· 386 377 * - size - size of region 387 378 * - dir - DMA direction 388 379 */ 389 - ENTRY(xscale_dma_map_area) 380 + SYM_TYPED_FUNC_START(xscale_dma_map_area) 390 381 add r1, r1, r0 391 382 cmp r2, #DMA_TO_DEVICE 392 383 beq xscale_dma_clean_range 393 384 bcs xscale_dma_inv_range 394 385 b xscale_dma_flush_range 395 - ENDPROC(xscale_dma_map_area) 396 - 397 - /* 398 - * dma_map_area(start, size, dir) 399 - * - start - kernel virtual start address 400 - * - size - size of region 401 - * - dir - DMA direction 402 - */ 403 - ENTRY(xscale_80200_A0_A1_dma_map_area) 404 - add r1, r1, r0 405 - teq r2, #DMA_TO_DEVICE 406 - beq xscale_dma_clean_range 407 - b xscale_dma_flush_range 408 - ENDPROC(xscale_80200_A0_A1_dma_map_area) 409 - 410 - /* 411 - * dma_unmap_area(start, size, dir) 412 - * - start - kernel virtual start address 413 - * - size - size of region 414 - * - dir - DMA direction 415 - */ 416 - ENTRY(xscale_dma_unmap_area) 417 - ret lr 418 - ENDPROC(xscale_dma_unmap_area) 419 - 420 - .globl xscale_flush_kern_cache_louis 421 - .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all 422 - 423 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 424 - define_cache_functions xscale 386 + SYM_FUNC_END(xscale_dma_map_area) 425 387 426 388 /* 427 389 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't ··· 407 427 * revision January 22, 2003, available at: 408 428 * http://www.intel.com/design/iio/specupdt/273415.htm 409 429 */ 410 - .macro a0_alias basename 411 - .globl xscale_80200_A0_A1_\basename 412 - .type xscale_80200_A0_A1_\basename , %function 413 - .equ xscale_80200_A0_A1_\basename , xscale_\basename 414 - .endm 415 430 416 431 /* 417 - * Most of the cache functions are unchanged for these processor revisions. 418 - * Export suitable alias symbols for the unchanged functions: 432 + * dma_map_area(start, size, dir) 433 + * - start - kernel virtual start address 434 + * - size - size of region 435 + * - dir - DMA direction 419 436 */ 420 - a0_alias flush_icache_all 421 - a0_alias flush_user_cache_all 422 - a0_alias flush_kern_cache_all 423 - a0_alias flush_kern_cache_louis 424 - a0_alias flush_user_cache_range 425 - a0_alias coherent_kern_range 426 - a0_alias coherent_user_range 427 - a0_alias flush_kern_dcache_area 428 - a0_alias dma_flush_range 429 - a0_alias dma_unmap_area 437 + SYM_TYPED_FUNC_START(xscale_80200_A0_A1_dma_map_area) 438 + add r1, r1, r0 439 + teq r2, #DMA_TO_DEVICE 440 + beq xscale_dma_clean_range 441 + b xscale_dma_flush_range 442 + SYM_FUNC_END(xscale_80200_A0_A1_dma_map_area) 430 443 431 - @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 432 - define_cache_functions xscale_80200_A0_A1 444 + /* 445 + * dma_unmap_area(start, size, dir) 446 + * - start - kernel virtual start address 447 + * - size - size of region 448 + * - dir - DMA direction 449 + */ 450 + SYM_TYPED_FUNC_START(xscale_dma_unmap_area) 451 + ret lr 452 + SYM_FUNC_END(xscale_dma_unmap_area) 433 453 434 - ENTRY(cpu_xscale_dcache_clean_area) 454 + SYM_TYPED_FUNC_START(cpu_xscale_dcache_clean_area) 435 455 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 436 456 add r0, r0, #CACHELINESIZE 437 457 subs r1, r1, #CACHELINESIZE 438 458 bhi 1b 439 459 ret lr 460 + SYM_FUNC_END(cpu_xscale_dcache_clean_area) 440 461 441 462 /* =============================== PageTable ============================== */ 442 463 ··· 449 468 * pgd: new page tables 450 469 */ 451 470 .align 5 452 - ENTRY(cpu_xscale_switch_mm) 471 + SYM_TYPED_FUNC_START(cpu_xscale_switch_mm) 453 472 clean_d_cache r1, r2 454 473 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 455 474 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 456 475 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 457 476 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 458 477 cpwait_ret lr, ip 478 + SYM_FUNC_END(cpu_xscale_switch_mm) 459 479 460 480 /* 461 481 * cpu_xscale_set_pte_ext(ptep, pte, ext) ··· 484 502 .long 0x00 @ unused 485 503 486 504 .align 5 487 - ENTRY(cpu_xscale_set_pte_ext) 505 + SYM_TYPED_FUNC_START(cpu_xscale_set_pte_ext) 488 506 xscale_set_pte_ext_prologue 489 507 490 508 @ ··· 502 520 503 521 xscale_set_pte_ext_epilogue 504 522 ret lr 523 + SYM_FUNC_END(cpu_xscale_set_pte_ext) 505 524 506 525 .ltorg 507 526 .align ··· 510 527 .globl cpu_xscale_suspend_size 511 528 .equ cpu_xscale_suspend_size, 4 * 6 512 529 #ifdef CONFIG_ARM_CPU_SUSPEND 513 - ENTRY(cpu_xscale_do_suspend) 530 + SYM_TYPED_FUNC_START(cpu_xscale_do_suspend) 514 531 stmfd sp!, {r4 - r9, lr} 515 532 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 516 533 mrc p15, 0, r5, c15, c1, 0 @ CP access reg ··· 521 538 bic r4, r4, #2 @ clear frequency change bit 522 539 stmia r0, {r4 - r9} @ store cp regs 523 540 ldmfd sp!, {r4 - r9, pc} 524 - ENDPROC(cpu_xscale_do_suspend) 541 + SYM_FUNC_END(cpu_xscale_do_suspend) 525 542 526 - ENTRY(cpu_xscale_do_resume) 543 + SYM_TYPED_FUNC_START(cpu_xscale_do_resume) 527 544 ldmia r0, {r4 - r9} @ load cp regs 528 545 mov ip, #0 529 546 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs ··· 536 553 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg 537 554 mov r0, r9 @ control register 538 555 b cpu_resume_mmu 539 - ENDPROC(cpu_xscale_do_resume) 556 + SYM_FUNC_END(cpu_xscale_do_resume) 540 557 #endif 541 558 542 559 .type __xscale_setup, #function
+500
arch/arm/mm/proc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * This file defines C prototypes for the low-level processor assembly functions 4 + * and creates a reference for CFI. This needs to be done for every assembly 5 + * processor ("proc") function that is called from C but does not have a 6 + * corresponding C implementation. 7 + * 8 + * Processors are listed in the order they appear in the Makefile. 9 + * 10 + * Functions are listed if and only if they see use on the target CPU, and in 11 + * the order they are defined in struct processor. 12 + */ 13 + #include <asm/proc-fns.h> 14 + 15 + #ifdef CONFIG_CPU_ARM7TDMI 16 + void cpu_arm7tdmi_proc_init(void); 17 + __ADDRESSABLE(cpu_arm7tdmi_proc_init); 18 + void cpu_arm7tdmi_proc_fin(void); 19 + __ADDRESSABLE(cpu_arm7tdmi_proc_fin); 20 + void cpu_arm7tdmi_reset(void); 21 + __ADDRESSABLE(cpu_arm7tdmi_reset); 22 + int cpu_arm7tdmi_do_idle(void); 23 + __ADDRESSABLE(cpu_arm7tdmi_do_idle); 24 + void cpu_arm7tdmi_dcache_clean_area(void *addr, int size); 25 + __ADDRESSABLE(cpu_arm7tdmi_dcache_clean_area); 26 + void cpu_arm7tdmi_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 27 + __ADDRESSABLE(cpu_arm7tdmi_switch_mm); 28 + #endif 29 + 30 + #ifdef CONFIG_CPU_ARM720T 31 + void cpu_arm720_proc_init(void); 32 + __ADDRESSABLE(cpu_arm720_proc_init); 33 + void cpu_arm720_proc_fin(void); 34 + __ADDRESSABLE(cpu_arm720_proc_fin); 35 + void cpu_arm720_reset(void); 36 + __ADDRESSABLE(cpu_arm720_reset); 37 + int cpu_arm720_do_idle(void); 38 + __ADDRESSABLE(cpu_arm720_do_idle); 39 + void cpu_arm720_dcache_clean_area(void *addr, int size); 40 + __ADDRESSABLE(cpu_arm720_dcache_clean_area); 41 + void cpu_arm720_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 42 + __ADDRESSABLE(cpu_arm720_switch_mm); 43 + void cpu_arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 44 + __ADDRESSABLE(cpu_arm720_set_pte_ext); 45 + #endif 46 + 47 + #ifdef CONFIG_CPU_ARM740T 48 + void cpu_arm740_proc_init(void); 49 + __ADDRESSABLE(cpu_arm740_proc_init); 50 + void cpu_arm740_proc_fin(void); 51 + __ADDRESSABLE(cpu_arm740_proc_fin); 52 + void cpu_arm740_reset(void); 53 + __ADDRESSABLE(cpu_arm740_reset); 54 + int cpu_arm740_do_idle(void); 55 + __ADDRESSABLE(cpu_arm740_do_idle); 56 + void cpu_arm740_dcache_clean_area(void *addr, int size); 57 + __ADDRESSABLE(cpu_arm740_dcache_clean_area); 58 + void cpu_arm740_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 59 + __ADDRESSABLE(cpu_arm740_switch_mm); 60 + #endif 61 + 62 + #ifdef CONFIG_CPU_ARM9TDMI 63 + void cpu_arm9tdmi_proc_init(void); 64 + __ADDRESSABLE(cpu_arm9tdmi_proc_init); 65 + void cpu_arm9tdmi_proc_fin(void); 66 + __ADDRESSABLE(cpu_arm9tdmi_proc_fin); 67 + void cpu_arm9tdmi_reset(void); 68 + __ADDRESSABLE(cpu_arm9tdmi_reset); 69 + int cpu_arm9tdmi_do_idle(void); 70 + __ADDRESSABLE(cpu_arm9tdmi_do_idle); 71 + void cpu_arm9tdmi_dcache_clean_area(void *addr, int size); 72 + __ADDRESSABLE(cpu_arm9tdmi_dcache_clean_area); 73 + void cpu_arm9tdmi_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 74 + __ADDRESSABLE(cpu_arm9tdmi_switch_mm); 75 + #endif 76 + 77 + #ifdef CONFIG_CPU_ARM920T 78 + void cpu_arm920_proc_init(void); 79 + __ADDRESSABLE(cpu_arm920_proc_init); 80 + void cpu_arm920_proc_fin(void); 81 + __ADDRESSABLE(cpu_arm920_proc_fin); 82 + void cpu_arm920_reset(void); 83 + __ADDRESSABLE(cpu_arm920_reset); 84 + int cpu_arm920_do_idle(void); 85 + __ADDRESSABLE(cpu_arm920_do_idle); 86 + void cpu_arm920_dcache_clean_area(void *addr, int size); 87 + __ADDRESSABLE(cpu_arm920_dcache_clean_area); 88 + void cpu_arm920_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 89 + __ADDRESSABLE(cpu_arm920_switch_mm); 90 + void cpu_arm920_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 91 + __ADDRESSABLE(cpu_arm920_set_pte_ext); 92 + #ifdef CONFIG_ARM_CPU_SUSPEND 93 + void cpu_arm920_do_suspend(void *); 94 + __ADDRESSABLE(cpu_arm920_do_suspend); 95 + void cpu_arm920_do_resume(void *); 96 + __ADDRESSABLE(cpu_arm920_do_resume); 97 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 98 + #endif /* CONFIG_CPU_ARM920T */ 99 + 100 + #ifdef CONFIG_CPU_ARM922T 101 + void cpu_arm922_proc_init(void); 102 + __ADDRESSABLE(cpu_arm922_proc_init); 103 + void cpu_arm922_proc_fin(void); 104 + __ADDRESSABLE(cpu_arm922_proc_fin); 105 + void cpu_arm922_reset(void); 106 + __ADDRESSABLE(cpu_arm922_reset); 107 + int cpu_arm922_do_idle(void); 108 + __ADDRESSABLE(cpu_arm922_do_idle); 109 + void cpu_arm922_dcache_clean_area(void *addr, int size); 110 + __ADDRESSABLE(cpu_arm922_dcache_clean_area); 111 + void cpu_arm922_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 112 + __ADDRESSABLE(cpu_arm922_switch_mm); 113 + void cpu_arm922_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 114 + __ADDRESSABLE(cpu_arm922_set_pte_ext); 115 + #endif 116 + 117 + #ifdef CONFIG_CPU_ARM925T 118 + void cpu_arm925_proc_init(void); 119 + __ADDRESSABLE(cpu_arm925_proc_init); 120 + void cpu_arm925_proc_fin(void); 121 + __ADDRESSABLE(cpu_arm925_proc_fin); 122 + void cpu_arm925_reset(void); 123 + __ADDRESSABLE(cpu_arm925_reset); 124 + int cpu_arm925_do_idle(void); 125 + __ADDRESSABLE(cpu_arm925_do_idle); 126 + void cpu_arm925_dcache_clean_area(void *addr, int size); 127 + __ADDRESSABLE(cpu_arm925_dcache_clean_area); 128 + void cpu_arm925_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 129 + __ADDRESSABLE(cpu_arm925_switch_mm); 130 + void cpu_arm925_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 131 + __ADDRESSABLE(cpu_arm925_set_pte_ext); 132 + #endif 133 + 134 + #ifdef CONFIG_CPU_ARM926T 135 + void cpu_arm926_proc_init(void); 136 + __ADDRESSABLE(cpu_arm926_proc_init); 137 + void cpu_arm926_proc_fin(void); 138 + __ADDRESSABLE(cpu_arm926_proc_fin); 139 + void cpu_arm926_reset(unsigned long addr, bool hvc); 140 + __ADDRESSABLE(cpu_arm926_reset); 141 + int cpu_arm926_do_idle(void); 142 + __ADDRESSABLE(cpu_arm926_do_idle); 143 + void cpu_arm926_dcache_clean_area(void *addr, int size); 144 + __ADDRESSABLE(cpu_arm926_dcache_clean_area); 145 + void cpu_arm926_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 146 + __ADDRESSABLE(cpu_arm926_switch_mm); 147 + void cpu_arm926_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 148 + __ADDRESSABLE(cpu_arm926_set_pte_ext); 149 + #ifdef CONFIG_ARM_CPU_SUSPEND 150 + void cpu_arm926_do_suspend(void *); 151 + __ADDRESSABLE(cpu_arm926_do_suspend); 152 + void cpu_arm926_do_resume(void *); 153 + __ADDRESSABLE(cpu_arm926_do_resume); 154 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 155 + #endif /* CONFIG_CPU_ARM926T */ 156 + 157 + #ifdef CONFIG_CPU_ARM940T 158 + void cpu_arm940_proc_init(void); 159 + __ADDRESSABLE(cpu_arm940_proc_init); 160 + void cpu_arm940_proc_fin(void); 161 + __ADDRESSABLE(cpu_arm940_proc_fin); 162 + void cpu_arm940_reset(void); 163 + __ADDRESSABLE(cpu_arm940_reset); 164 + int cpu_arm940_do_idle(void); 165 + __ADDRESSABLE(cpu_arm940_do_idle); 166 + void cpu_arm940_dcache_clean_area(void *addr, int size); 167 + __ADDRESSABLE(cpu_arm940_dcache_clean_area); 168 + void cpu_arm940_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 169 + __ADDRESSABLE(cpu_arm940_switch_mm); 170 + #endif 171 + 172 + #ifdef CONFIG_CPU_ARM946E 173 + void cpu_arm946_proc_init(void); 174 + __ADDRESSABLE(cpu_arm946_proc_init); 175 + void cpu_arm946_proc_fin(void); 176 + __ADDRESSABLE(cpu_arm946_proc_fin); 177 + void cpu_arm946_reset(void); 178 + __ADDRESSABLE(cpu_arm946_reset); 179 + int cpu_arm946_do_idle(void); 180 + __ADDRESSABLE(cpu_arm946_do_idle); 181 + void cpu_arm946_dcache_clean_area(void *addr, int size); 182 + __ADDRESSABLE(cpu_arm946_dcache_clean_area); 183 + void cpu_arm946_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 184 + __ADDRESSABLE(cpu_arm946_switch_mm); 185 + #endif 186 + 187 + #ifdef CONFIG_CPU_FA526 188 + void cpu_fa526_proc_init(void); 189 + __ADDRESSABLE(cpu_fa526_proc_init); 190 + void cpu_fa526_proc_fin(void); 191 + __ADDRESSABLE(cpu_fa526_proc_fin); 192 + void cpu_fa526_reset(unsigned long addr, bool hvc); 193 + __ADDRESSABLE(cpu_fa526_reset); 194 + int cpu_fa526_do_idle(void); 195 + __ADDRESSABLE(cpu_fa526_do_idle); 196 + void cpu_fa526_dcache_clean_area(void *addr, int size); 197 + __ADDRESSABLE(cpu_fa526_dcache_clean_area); 198 + void cpu_fa526_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 199 + __ADDRESSABLE(cpu_fa526_switch_mm); 200 + void cpu_fa526_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 201 + __ADDRESSABLE(cpu_fa526_set_pte_ext); 202 + #endif 203 + 204 + #ifdef CONFIG_CPU_ARM1020 205 + void cpu_arm1020_proc_init(void); 206 + __ADDRESSABLE(cpu_arm1020_proc_init); 207 + void cpu_arm1020_proc_fin(void); 208 + __ADDRESSABLE(cpu_arm1020_proc_fin); 209 + void cpu_arm1020_reset(unsigned long addr, bool hvc); 210 + __ADDRESSABLE(cpu_arm1020_reset); 211 + int cpu_arm1020_do_idle(void); 212 + __ADDRESSABLE(cpu_arm1020_do_idle); 213 + void cpu_arm1020_dcache_clean_area(void *addr, int size); 214 + __ADDRESSABLE(cpu_arm1020_dcache_clean_area); 215 + void cpu_arm1020_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 216 + __ADDRESSABLE(cpu_arm1020_switch_mm); 217 + void cpu_arm1020_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 218 + __ADDRESSABLE(cpu_arm1020_set_pte_ext); 219 + #endif 220 + 221 + #ifdef CONFIG_CPU_ARM1020E 222 + void cpu_arm1020e_proc_init(void); 223 + __ADDRESSABLE(cpu_arm1020e_proc_init); 224 + void cpu_arm1020e_proc_fin(void); 225 + __ADDRESSABLE(cpu_arm1020e_proc_fin); 226 + void cpu_arm1020e_reset(unsigned long addr, bool hvc); 227 + __ADDRESSABLE(cpu_arm1020e_reset); 228 + int cpu_arm1020e_do_idle(void); 229 + __ADDRESSABLE(cpu_arm1020e_do_idle); 230 + void cpu_arm1020e_dcache_clean_area(void *addr, int size); 231 + __ADDRESSABLE(cpu_arm1020e_dcache_clean_area); 232 + void cpu_arm1020e_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 233 + __ADDRESSABLE(cpu_arm1020e_switch_mm); 234 + void cpu_arm1020e_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 235 + __ADDRESSABLE(cpu_arm1020e_set_pte_ext); 236 + #endif 237 + 238 + #ifdef CONFIG_CPU_ARM1022 239 + void cpu_arm1022_proc_init(void); 240 + __ADDRESSABLE(cpu_arm1022_proc_init); 241 + void cpu_arm1022_proc_fin(void); 242 + __ADDRESSABLE(cpu_arm1022_proc_fin); 243 + void cpu_arm1022_reset(unsigned long addr, bool hvc); 244 + __ADDRESSABLE(cpu_arm1022_reset); 245 + int cpu_arm1022_do_idle(void); 246 + __ADDRESSABLE(cpu_arm1022_do_idle); 247 + void cpu_arm1022_dcache_clean_area(void *addr, int size); 248 + __ADDRESSABLE(cpu_arm1022_dcache_clean_area); 249 + void cpu_arm1022_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 250 + __ADDRESSABLE(cpu_arm1022_switch_mm); 251 + void cpu_arm1022_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 252 + __ADDRESSABLE(cpu_arm1022_set_pte_ext); 253 + #endif 254 + 255 + #ifdef CONFIG_CPU_ARM1026 256 + void cpu_arm1026_proc_init(void); 257 + __ADDRESSABLE(cpu_arm1026_proc_init); 258 + void cpu_arm1026_proc_fin(void); 259 + __ADDRESSABLE(cpu_arm1026_proc_fin); 260 + void cpu_arm1026_reset(unsigned long addr, bool hvc); 261 + __ADDRESSABLE(cpu_arm1026_reset); 262 + int cpu_arm1026_do_idle(void); 263 + __ADDRESSABLE(cpu_arm1026_do_idle); 264 + void cpu_arm1026_dcache_clean_area(void *addr, int size); 265 + __ADDRESSABLE(cpu_arm1026_dcache_clean_area); 266 + void cpu_arm1026_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 267 + __ADDRESSABLE(cpu_arm1026_switch_mm); 268 + void cpu_arm1026_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 269 + __ADDRESSABLE(cpu_arm1026_set_pte_ext); 270 + #endif 271 + 272 + #ifdef CONFIG_CPU_SA110 273 + void cpu_sa110_proc_init(void); 274 + __ADDRESSABLE(cpu_sa110_proc_init); 275 + void cpu_sa110_proc_fin(void); 276 + __ADDRESSABLE(cpu_sa110_proc_fin); 277 + void cpu_sa110_reset(unsigned long addr, bool hvc); 278 + __ADDRESSABLE(cpu_sa110_reset); 279 + int cpu_sa110_do_idle(void); 280 + __ADDRESSABLE(cpu_sa110_do_idle); 281 + void cpu_sa110_dcache_clean_area(void *addr, int size); 282 + __ADDRESSABLE(cpu_sa110_dcache_clean_area); 283 + void cpu_sa110_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 284 + __ADDRESSABLE(cpu_sa110_switch_mm); 285 + void cpu_sa110_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 286 + __ADDRESSABLE(cpu_sa110_set_pte_ext); 287 + #endif 288 + 289 + #ifdef CONFIG_CPU_SA1100 290 + void cpu_sa1100_proc_init(void); 291 + __ADDRESSABLE(cpu_sa1100_proc_init); 292 + void cpu_sa1100_proc_fin(void); 293 + __ADDRESSABLE(cpu_sa1100_proc_fin); 294 + void cpu_sa1100_reset(unsigned long addr, bool hvc); 295 + __ADDRESSABLE(cpu_sa1100_reset); 296 + int cpu_sa1100_do_idle(void); 297 + __ADDRESSABLE(cpu_sa1100_do_idle); 298 + void cpu_sa1100_dcache_clean_area(void *addr, int size); 299 + __ADDRESSABLE(cpu_sa1100_dcache_clean_area); 300 + void cpu_sa1100_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 301 + __ADDRESSABLE(cpu_sa1100_switch_mm); 302 + void cpu_sa1100_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 303 + __ADDRESSABLE(cpu_sa1100_set_pte_ext); 304 + #ifdef CONFIG_ARM_CPU_SUSPEND 305 + void cpu_sa1100_do_suspend(void *); 306 + __ADDRESSABLE(cpu_sa1100_do_suspend); 307 + void cpu_sa1100_do_resume(void *); 308 + __ADDRESSABLE(cpu_sa1100_do_resume); 309 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 310 + #endif /* CONFIG_CPU_SA1100 */ 311 + 312 + #ifdef CONFIG_CPU_XSCALE 313 + void cpu_xscale_proc_init(void); 314 + __ADDRESSABLE(cpu_xscale_proc_init); 315 + void cpu_xscale_proc_fin(void); 316 + __ADDRESSABLE(cpu_xscale_proc_fin); 317 + void cpu_xscale_reset(unsigned long addr, bool hvc); 318 + __ADDRESSABLE(cpu_xscale_reset); 319 + int cpu_xscale_do_idle(void); 320 + __ADDRESSABLE(cpu_xscale_do_idle); 321 + void cpu_xscale_dcache_clean_area(void *addr, int size); 322 + __ADDRESSABLE(cpu_xscale_dcache_clean_area); 323 + void cpu_xscale_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 324 + __ADDRESSABLE(cpu_xscale_switch_mm); 325 + void cpu_xscale_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 326 + __ADDRESSABLE(cpu_xscale_set_pte_ext); 327 + #ifdef CONFIG_ARM_CPU_SUSPEND 328 + void cpu_xscale_do_suspend(void *); 329 + __ADDRESSABLE(cpu_xscale_do_suspend); 330 + void cpu_xscale_do_resume(void *); 331 + __ADDRESSABLE(cpu_xscale_do_resume); 332 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 333 + #endif /* CONFIG_CPU_XSCALE */ 334 + 335 + #ifdef CONFIG_CPU_XSC3 336 + void cpu_xsc3_proc_init(void); 337 + __ADDRESSABLE(cpu_xsc3_proc_init); 338 + void cpu_xsc3_proc_fin(void); 339 + __ADDRESSABLE(cpu_xsc3_proc_fin); 340 + void cpu_xsc3_reset(unsigned long addr, bool hvc); 341 + __ADDRESSABLE(cpu_xsc3_reset); 342 + int cpu_xsc3_do_idle(void); 343 + __ADDRESSABLE(cpu_xsc3_do_idle); 344 + void cpu_xsc3_dcache_clean_area(void *addr, int size); 345 + __ADDRESSABLE(cpu_xsc3_dcache_clean_area); 346 + void cpu_xsc3_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 347 + __ADDRESSABLE(cpu_xsc3_switch_mm); 348 + void cpu_xsc3_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 349 + __ADDRESSABLE(cpu_xsc3_set_pte_ext); 350 + #ifdef CONFIG_ARM_CPU_SUSPEND 351 + void cpu_xsc3_do_suspend(void *); 352 + __ADDRESSABLE(cpu_xsc3_do_suspend); 353 + void cpu_xsc3_do_resume(void *); 354 + __ADDRESSABLE(cpu_xsc3_do_resume); 355 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 356 + #endif /* CONFIG_CPU_XSC3 */ 357 + 358 + #ifdef CONFIG_CPU_MOHAWK 359 + void cpu_mohawk_proc_init(void); 360 + __ADDRESSABLE(cpu_mohawk_proc_init); 361 + void cpu_mohawk_proc_fin(void); 362 + __ADDRESSABLE(cpu_mohawk_proc_fin); 363 + void cpu_mohawk_reset(unsigned long addr, bool hvc); 364 + __ADDRESSABLE(cpu_mohawk_reset); 365 + int cpu_mohawk_do_idle(void); 366 + __ADDRESSABLE(cpu_mohawk_do_idle); 367 + void cpu_mohawk_dcache_clean_area(void *addr, int size); 368 + __ADDRESSABLE(cpu_mohawk_dcache_clean_area); 369 + void cpu_mohawk_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 370 + __ADDRESSABLE(cpu_mohawk_switch_mm); 371 + void cpu_mohawk_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 372 + __ADDRESSABLE(cpu_mohawk_set_pte_ext); 373 + #ifdef CONFIG_ARM_CPU_SUSPEND 374 + void cpu_mohawk_do_suspend(void *); 375 + __ADDRESSABLE(cpu_mohawk_do_suspend); 376 + void cpu_mohawk_do_resume(void *); 377 + __ADDRESSABLE(cpu_mohawk_do_resume); 378 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 379 + #endif /* CONFIG_CPU_MOHAWK */ 380 + 381 + #ifdef CONFIG_CPU_FEROCEON 382 + void cpu_feroceon_proc_init(void); 383 + __ADDRESSABLE(cpu_feroceon_proc_init); 384 + void cpu_feroceon_proc_fin(void); 385 + __ADDRESSABLE(cpu_feroceon_proc_fin); 386 + void cpu_feroceon_reset(unsigned long addr, bool hvc); 387 + __ADDRESSABLE(cpu_feroceon_reset); 388 + int cpu_feroceon_do_idle(void); 389 + __ADDRESSABLE(cpu_feroceon_do_idle); 390 + void cpu_feroceon_dcache_clean_area(void *addr, int size); 391 + __ADDRESSABLE(cpu_feroceon_dcache_clean_area); 392 + void cpu_feroceon_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 393 + __ADDRESSABLE(cpu_feroceon_switch_mm); 394 + void cpu_feroceon_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 395 + __ADDRESSABLE(cpu_feroceon_set_pte_ext); 396 + #ifdef CONFIG_ARM_CPU_SUSPEND 397 + void cpu_feroceon_do_suspend(void *); 398 + __ADDRESSABLE(cpu_feroceon_do_suspend); 399 + void cpu_feroceon_do_resume(void *); 400 + __ADDRESSABLE(cpu_feroceon_do_resume); 401 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 402 + #endif /* CONFIG_CPU_FEROCEON */ 403 + 404 + #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 405 + void cpu_v6_proc_init(void); 406 + __ADDRESSABLE(cpu_v6_proc_init); 407 + void cpu_v6_proc_fin(void); 408 + __ADDRESSABLE(cpu_v6_proc_fin); 409 + void cpu_v6_reset(unsigned long addr, bool hvc); 410 + __ADDRESSABLE(cpu_v6_reset); 411 + int cpu_v6_do_idle(void); 412 + __ADDRESSABLE(cpu_v6_do_idle); 413 + void cpu_v6_dcache_clean_area(void *addr, int size); 414 + __ADDRESSABLE(cpu_v6_dcache_clean_area); 415 + void cpu_v6_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 416 + __ADDRESSABLE(cpu_v6_switch_mm); 417 + void cpu_v6_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 418 + __ADDRESSABLE(cpu_v6_set_pte_ext); 419 + #ifdef CONFIG_ARM_CPU_SUSPEND 420 + void cpu_v6_do_suspend(void *); 421 + __ADDRESSABLE(cpu_v6_do_suspend); 422 + void cpu_v6_do_resume(void *); 423 + __ADDRESSABLE(cpu_v6_do_resume); 424 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 425 + #endif /* CPU_V6 */ 426 + 427 + #ifdef CONFIG_CPU_V7 428 + void cpu_v7_proc_init(void); 429 + __ADDRESSABLE(cpu_v7_proc_init); 430 + void cpu_v7_proc_fin(void); 431 + __ADDRESSABLE(cpu_v7_proc_fin); 432 + void cpu_v7_reset(void); 433 + __ADDRESSABLE(cpu_v7_reset); 434 + int cpu_v7_do_idle(void); 435 + __ADDRESSABLE(cpu_v7_do_idle); 436 + #ifdef CONFIG_PJ4B_ERRATA_4742 437 + int cpu_pj4b_do_idle(void); 438 + __ADDRESSABLE(cpu_pj4b_do_idle); 439 + #endif 440 + void cpu_v7_dcache_clean_area(void *addr, int size); 441 + __ADDRESSABLE(cpu_v7_dcache_clean_area); 442 + void cpu_v7_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 443 + /* Special switch_mm() callbacks to work around bugs in v7 */ 444 + __ADDRESSABLE(cpu_v7_switch_mm); 445 + void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 446 + __ADDRESSABLE(cpu_v7_iciallu_switch_mm); 447 + void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 448 + __ADDRESSABLE(cpu_v7_bpiall_switch_mm); 449 + #ifdef CONFIG_ARM_LPAE 450 + void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte); 451 + #else 452 + void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 453 + #endif 454 + __ADDRESSABLE(cpu_v7_set_pte_ext); 455 + #ifdef CONFIG_ARM_CPU_SUSPEND 456 + void cpu_v7_do_suspend(void *); 457 + __ADDRESSABLE(cpu_v7_do_suspend); 458 + void cpu_v7_do_resume(void *); 459 + __ADDRESSABLE(cpu_v7_do_resume); 460 + /* Special versions of suspend and resume for the CA9MP cores */ 461 + void cpu_ca9mp_do_suspend(void *); 462 + __ADDRESSABLE(cpu_ca9mp_do_suspend); 463 + void cpu_ca9mp_do_resume(void *); 464 + __ADDRESSABLE(cpu_ca9mp_do_resume); 465 + /* Special versions of suspend and resume for the Marvell PJ4B cores */ 466 + #ifdef CONFIG_CPU_PJ4B 467 + void cpu_pj4b_do_suspend(void *); 468 + __ADDRESSABLE(cpu_pj4b_do_suspend); 469 + void cpu_pj4b_do_resume(void *); 470 + __ADDRESSABLE(cpu_pj4b_do_resume); 471 + #endif /* CONFIG_CPU_PJ4B */ 472 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 473 + #endif /* CONFIG_CPU_V7 */ 474 + 475 + #ifdef CONFIG_CPU_V7M 476 + void cpu_v7m_proc_init(void); 477 + __ADDRESSABLE(cpu_v7m_proc_init); 478 + void cpu_v7m_proc_fin(void); 479 + __ADDRESSABLE(cpu_v7m_proc_fin); 480 + void cpu_v7m_reset(unsigned long addr, bool hvc); 481 + __ADDRESSABLE(cpu_v7m_reset); 482 + int cpu_v7m_do_idle(void); 483 + __ADDRESSABLE(cpu_v7m_do_idle); 484 + void cpu_v7m_dcache_clean_area(void *addr, int size); 485 + __ADDRESSABLE(cpu_v7m_dcache_clean_area); 486 + void cpu_v7m_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); 487 + __ADDRESSABLE(cpu_v7m_switch_mm); 488 + void cpu_v7m_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 489 + __ADDRESSABLE(cpu_v7m_set_pte_ext); 490 + #ifdef CONFIG_ARM_CPU_SUSPEND 491 + void cpu_v7m_do_suspend(void *); 492 + __ADDRESSABLE(cpu_v7m_do_suspend); 493 + void cpu_v7m_do_resume(void *); 494 + __ADDRESSABLE(cpu_v7m_do_resume); 495 + #endif /* CONFIG_ARM_CPU_SUSPEND */ 496 + void cpu_cm7_proc_fin(void); 497 + __ADDRESSABLE(cpu_cm7_proc_fin); 498 + void cpu_cm7_dcache_clean_area(void *addr, int size); 499 + __ADDRESSABLE(cpu_cm7_dcache_clean_area); 500 + #endif /* CONFIG_CPU_V7M */
+5 -7
arch/arm/mm/tlb-fa.S
··· 15 15 */ 16 16 #include <linux/linkage.h> 17 17 #include <linux/init.h> 18 + #include <linux/cfi_types.h> 18 19 #include <asm/assembler.h> 19 20 #include <asm/asm-offsets.h> 20 21 #include <asm/tlbflush.h> ··· 32 31 * - mm - mm_struct describing address space 33 32 */ 34 33 .align 4 35 - ENTRY(fa_flush_user_tlb_range) 34 + SYM_TYPED_FUNC_START(fa_flush_user_tlb_range) 36 35 vma_vm_mm ip, r2 37 36 act_mm r3 @ get current->active_mm 38 37 eors r3, ip, r3 @ == mm ? ··· 47 46 blo 1b 48 47 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 49 48 ret lr 49 + SYM_FUNC_END(fa_flush_user_tlb_range) 50 50 51 51 52 - ENTRY(fa_flush_kern_tlb_range) 52 + SYM_TYPED_FUNC_START(fa_flush_kern_tlb_range) 53 53 mov r3, #0 54 54 mcr p15, 0, r3, c7, c10, 4 @ drain WB 55 55 bic r0, r0, #0x0ff ··· 62 60 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 63 61 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) 64 62 ret lr 65 - 66 - __INITDATA 67 - 68 - /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 69 - define_tlb_functions fa, fa_tlb_flags 63 + SYM_FUNC_END(fa_flush_kern_tlb_range)
+9 -6
arch/arm/mm/tlb-v4.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <asm/assembler.h> 15 16 #include <asm/asm-offsets.h> 16 17 #include <asm/tlbflush.h> ··· 28 27 * - mm - mm_struct describing address space 29 28 */ 30 29 .align 5 31 - ENTRY(v4_flush_user_tlb_range) 30 + SYM_TYPED_FUNC_START(v4_flush_user_tlb_range) 32 31 vma_vm_mm ip, r2 33 32 act_mm r3 @ get current->active_mm 34 33 eors r3, ip, r3 @ == mm ? ··· 41 40 cmp r0, r1 42 41 blo 1b 43 42 ret lr 43 + SYM_FUNC_END(v4_flush_user_tlb_range) 44 44 45 45 /* 46 46 * v4_flush_kern_tlb_range(start, end) ··· 52 50 * - start - virtual address (may not be aligned) 53 51 * - end - virtual address (may not be aligned) 54 52 */ 53 + #ifdef CONFIG_CFI_CLANG 54 + SYM_TYPED_FUNC_START(v4_flush_kern_tlb_range) 55 + b .v4_flush_kern_tlb_range 56 + SYM_FUNC_END(v4_flush_kern_tlb_range) 57 + #else 55 58 .globl v4_flush_kern_tlb_range 56 59 .equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range 57 - 58 - __INITDATA 59 - 60 - /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 61 - define_tlb_functions v4, v4_tlb_flags 60 + #endif
+5 -7
arch/arm/mm/tlb-v4wb.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <asm/assembler.h> 15 16 #include <asm/asm-offsets.h> 16 17 #include <asm/tlbflush.h> ··· 28 27 * - mm - mm_struct describing address space 29 28 */ 30 29 .align 5 31 - ENTRY(v4wb_flush_user_tlb_range) 30 + SYM_TYPED_FUNC_START(v4wb_flush_user_tlb_range) 32 31 vma_vm_mm ip, r2 33 32 act_mm r3 @ get current->active_mm 34 33 eors r3, ip, r3 @ == mm ? ··· 44 43 cmp r0, r1 45 44 blo 1b 46 45 ret lr 46 + SYM_FUNC_END(v4wb_flush_user_tlb_range) 47 47 48 48 /* 49 49 * v4_flush_kern_tlb_range(start, end) ··· 55 53 * - start - virtual address (may not be aligned) 56 54 * - end - virtual address (may not be aligned) 57 55 */ 58 - ENTRY(v4wb_flush_kern_tlb_range) 56 + SYM_TYPED_FUNC_START(v4wb_flush_kern_tlb_range) 59 57 mov r3, #0 60 58 mcr p15, 0, r3, c7, c10, 4 @ drain WB 61 59 bic r0, r0, #0x0ff ··· 66 64 cmp r0, r1 67 65 blo 1b 68 66 ret lr 69 - 70 - __INITDATA 71 - 72 - /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 73 - define_tlb_functions v4wb, v4wb_tlb_flags 67 + SYM_FUNC_END(v4wb_flush_kern_tlb_range)
+5 -7
arch/arm/mm/tlb-v4wbi.S
··· 11 11 */ 12 12 #include <linux/linkage.h> 13 13 #include <linux/init.h> 14 + #include <linux/cfi_types.h> 14 15 #include <asm/assembler.h> 15 16 #include <asm/asm-offsets.h> 16 17 #include <asm/tlbflush.h> ··· 27 26 * - mm - mm_struct describing address space 28 27 */ 29 28 .align 5 30 - ENTRY(v4wbi_flush_user_tlb_range) 29 + SYM_TYPED_FUNC_START(v4wbi_flush_user_tlb_range) 31 30 vma_vm_mm ip, r2 32 31 act_mm r3 @ get current->active_mm 33 32 eors r3, ip, r3 @ == mm ? ··· 44 43 cmp r0, r1 45 44 blo 1b 46 45 ret lr 46 + SYM_FUNC_END(v4wbi_flush_user_tlb_range) 47 47 48 - ENTRY(v4wbi_flush_kern_tlb_range) 48 + SYM_TYPED_FUNC_START(v4wbi_flush_kern_tlb_range) 49 49 mov r3, #0 50 50 mcr p15, 0, r3, c7, c10, 4 @ drain WB 51 51 bic r0, r0, #0x0ff ··· 57 55 cmp r0, r1 58 56 blo 1b 59 57 ret lr 60 - 61 - __INITDATA 62 - 63 - /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 64 - define_tlb_functions v4wbi, v4wbi_tlb_flags 58 + SYM_FUNC_END(v4wbi_flush_kern_tlb_range)
+5 -7
arch/arm/mm/tlb-v6.S
··· 9 9 */ 10 10 #include <linux/init.h> 11 11 #include <linux/linkage.h> 12 + #include <linux/cfi_types.h> 12 13 #include <asm/asm-offsets.h> 13 14 #include <asm/assembler.h> 14 15 #include <asm/page.h> ··· 33 32 * - the "Invalidate single entry" instruction will invalidate 34 33 * both the I and the D TLBs on Harvard-style TLBs 35 34 */ 36 - ENTRY(v6wbi_flush_user_tlb_range) 35 + SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range) 37 36 vma_vm_mm r3, r2 @ get vma->vm_mm 38 37 mov ip, #0 39 38 mmid r3, r3 @ get vm_mm->context.id ··· 57 56 blo 1b 58 57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier 59 58 ret lr 59 + SYM_FUNC_END(v6wbi_flush_user_tlb_range) 60 60 61 61 /* 62 62 * v6wbi_flush_kern_tlb_range(start,end) ··· 67 65 * - start - start address (may not be aligned) 68 66 * - end - end address (exclusive, may not be aligned) 69 67 */ 70 - ENTRY(v6wbi_flush_kern_tlb_range) 68 + SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range) 71 69 mov r2, #0 72 70 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 73 71 mov r0, r0, lsr #PAGE_SHIFT @ align address ··· 87 85 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 88 86 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) 89 87 ret lr 90 - 91 - __INIT 92 - 93 - /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 94 - define_tlb_functions v6wbi, v6wbi_tlb_flags 88 + SYM_FUNC_END(v6wbi_flush_kern_tlb_range)
+5 -9
arch/arm/mm/tlb-v7.S
··· 10 10 */ 11 11 #include <linux/init.h> 12 12 #include <linux/linkage.h> 13 + #include <linux/cfi_types.h> 13 14 #include <asm/assembler.h> 14 15 #include <asm/asm-offsets.h> 15 16 #include <asm/page.h> ··· 32 31 * - the "Invalidate single entry" instruction will invalidate 33 32 * both the I and the D TLBs on Harvard-style TLBs 34 33 */ 35 - ENTRY(v7wbi_flush_user_tlb_range) 34 + SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range) 36 35 vma_vm_mm r3, r2 @ get vma->vm_mm 37 36 mmid r3, r3 @ get vm_mm->context.id 38 37 dsb ish ··· 58 57 blo 1b 59 58 dsb ish 60 59 ret lr 61 - ENDPROC(v7wbi_flush_user_tlb_range) 60 + SYM_FUNC_END(v7wbi_flush_user_tlb_range) 62 61 63 62 /* 64 63 * v7wbi_flush_kern_tlb_range(start,end) ··· 68 67 * - start - start address (may not be aligned) 69 68 * - end - end address (exclusive, may not be aligned) 70 69 */ 71 - ENTRY(v7wbi_flush_kern_tlb_range) 70 + SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range) 72 71 dsb ish 73 72 mov r0, r0, lsr #PAGE_SHIFT @ align address 74 73 mov r1, r1, lsr #PAGE_SHIFT ··· 87 86 dsb ish 88 87 isb 89 88 ret lr 90 - ENDPROC(v7wbi_flush_kern_tlb_range) 91 - 92 - __INIT 93 - 94 - /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ 95 - define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp 89 + SYM_FUNC_END(v7wbi_flush_kern_tlb_range)
+84
arch/arm/mm/tlb.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + // Copyright 2024 Google LLC 3 + // Author: Ard Biesheuvel <ardb@google.com> 4 + 5 + #include <linux/types.h> 6 + #include <asm/tlbflush.h> 7 + 8 + #ifdef CONFIG_CPU_TLB_V4WT 9 + void v4_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 10 + void v4_flush_kern_tlb_range(unsigned long, unsigned long); 11 + 12 + struct cpu_tlb_fns v4_tlb_fns __initconst = { 13 + .flush_user_range = v4_flush_user_tlb_range, 14 + .flush_kern_range = v4_flush_kern_tlb_range, 15 + .tlb_flags = v4_tlb_flags, 16 + }; 17 + #endif 18 + 19 + #ifdef CONFIG_CPU_TLB_V4WB 20 + void v4wb_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 21 + void v4wb_flush_kern_tlb_range(unsigned long, unsigned long); 22 + 23 + struct cpu_tlb_fns v4wb_tlb_fns __initconst = { 24 + .flush_user_range = v4wb_flush_user_tlb_range, 25 + .flush_kern_range = v4wb_flush_kern_tlb_range, 26 + .tlb_flags = v4wb_tlb_flags, 27 + }; 28 + #endif 29 + 30 + #if defined(CONFIG_CPU_TLB_V4WBI) || defined(CONFIG_CPU_TLB_FEROCEON) 31 + void v4wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 32 + void v4wbi_flush_kern_tlb_range(unsigned long, unsigned long); 33 + 34 + struct cpu_tlb_fns v4wbi_tlb_fns __initconst = { 35 + .flush_user_range = v4wbi_flush_user_tlb_range, 36 + .flush_kern_range = v4wbi_flush_kern_tlb_range, 37 + .tlb_flags = v4wbi_tlb_flags, 38 + }; 39 + #endif 40 + 41 + #ifdef CONFIG_CPU_TLB_V6 42 + void v6wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 43 + void v6wbi_flush_kern_tlb_range(unsigned long, unsigned long); 44 + 45 + struct cpu_tlb_fns v6wbi_tlb_fns __initconst = { 46 + .flush_user_range = v6wbi_flush_user_tlb_range, 47 + .flush_kern_range = v6wbi_flush_kern_tlb_range, 48 + .tlb_flags = v6wbi_tlb_flags, 49 + }; 50 + #endif 51 + 52 + #ifdef CONFIG_CPU_TLB_V7 53 + void v7wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 54 + void v7wbi_flush_kern_tlb_range(unsigned long, unsigned long); 55 + 56 + struct cpu_tlb_fns v7wbi_tlb_fns __initconst = { 57 + .flush_user_range = v7wbi_flush_user_tlb_range, 58 + .flush_kern_range = v7wbi_flush_kern_tlb_range, 59 + .tlb_flags = IS_ENABLED(CONFIG_SMP) ? v7wbi_tlb_flags_smp 60 + : v7wbi_tlb_flags_up, 61 + }; 62 + 63 + #ifdef CONFIG_SMP_ON_UP 64 + /* This will be run-time patched so the offset better be right */ 65 + static_assert(offsetof(struct cpu_tlb_fns, tlb_flags) == 8); 66 + 67 + asm(" .pushsection \".alt.smp.init\", \"a\" \n" \ 68 + " .align 2 \n" \ 69 + " .long v7wbi_tlb_fns + 8 - . \n" \ 70 + " .long " __stringify(v7wbi_tlb_flags_up) " \n" \ 71 + " .popsection \n"); 72 + #endif 73 + #endif 74 + 75 + #ifdef CONFIG_CPU_TLB_FA 76 + void fa_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); 77 + void fa_flush_kern_tlb_range(unsigned long, unsigned long); 78 + 79 + struct cpu_tlb_fns fa_tlb_fns __initconst = { 80 + .flush_user_range = fa_flush_user_tlb_range, 81 + .flush_kern_range = fa_flush_kern_tlb_range, 82 + .tlb_flags = fa_tlb_flags, 83 + }; 84 + #endif
+7 -4
drivers/amba/bus.c
··· 488 488 * waiting on amba_match(). So, register a stub driver to make sure 489 489 * amba_match() is called even if no amba driver has been registered. 490 490 */ 491 - return amba_driver_register(&amba_proxy_drv); 491 + return __amba_driver_register(&amba_proxy_drv, NULL); 492 492 } 493 493 late_initcall_sync(amba_stub_drv_init); 494 494 495 495 /** 496 - * amba_driver_register - register an AMBA device driver 496 + * __amba_driver_register - register an AMBA device driver 497 497 * @drv: amba device driver structure 498 + * @owner: owning module/driver 498 499 * 499 500 * Register an AMBA device driver with the Linux device model 500 501 * core. If devices pre-exist, the drivers probe function will 501 502 * be called. 502 503 */ 503 - int amba_driver_register(struct amba_driver *drv) 504 + int __amba_driver_register(struct amba_driver *drv, 505 + struct module *owner) 504 506 { 505 507 if (!drv->probe) 506 508 return -EINVAL; 507 509 510 + drv->drv.owner = owner; 508 511 drv->drv.bus = &amba_bustype; 509 512 510 513 return driver_register(&drv->drv); 511 514 } 512 - EXPORT_SYMBOL(amba_driver_register); 515 + EXPORT_SYMBOL(__amba_driver_register); 513 516 514 517 /** 515 518 * amba_driver_unregister - remove an AMBA device driver
-1
drivers/char/hw_random/nomadik-rng.c
··· 78 78 79 79 static struct amba_driver nmk_rng_driver = { 80 80 .drv = { 81 - .owner = THIS_MODULE, 82 81 .name = "rng", 83 82 }, 84 83 .probe = nmk_rng_probe,
+33 -2
drivers/clk/clkdev.c
··· 158 158 va_list ap) 159 159 { 160 160 struct clk_lookup_alloc *cla; 161 + struct va_format vaf; 162 + const char *failure; 163 + va_list ap_copy; 164 + size_t max_size; 165 + ssize_t res; 161 166 162 167 cla = kzalloc(sizeof(*cla), GFP_KERNEL); 163 168 if (!cla) 164 169 return NULL; 165 170 171 + va_copy(ap_copy, ap); 172 + 166 173 cla->cl.clk_hw = hw; 167 174 if (con_id) { 168 - strscpy(cla->con_id, con_id, sizeof(cla->con_id)); 175 + res = strscpy(cla->con_id, con_id, sizeof(cla->con_id)); 176 + if (res < 0) { 177 + max_size = sizeof(cla->con_id); 178 + failure = "connection"; 179 + goto fail; 180 + } 169 181 cla->cl.con_id = cla->con_id; 170 182 } 171 183 172 184 if (dev_fmt) { 173 - vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); 185 + res = vsnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); 186 + if (res >= sizeof(cla->dev_id)) { 187 + max_size = sizeof(cla->dev_id); 188 + failure = "device"; 189 + goto fail; 190 + } 174 191 cla->cl.dev_id = cla->dev_id; 175 192 } 176 193 194 + va_end(ap_copy); 195 + 177 196 return &cla->cl; 197 + 198 + fail: 199 + if (dev_fmt) 200 + vaf.fmt = dev_fmt; 201 + else 202 + vaf.fmt = "null-device"; 203 + vaf.va = &ap_copy; 204 + pr_err("%pV:%s: %s ID is greater than %zu\n", 205 + &vaf, con_id, failure, max_size); 206 + va_end(ap_copy); 207 + kfree(cla); 208 + return NULL; 178 209 } 179 210 180 211 static struct clk_lookup *
-1
drivers/dma/pl330.c
··· 3262 3262 3263 3263 static struct amba_driver pl330_driver = { 3264 3264 .drv = { 3265 - .owner = THIS_MODULE, 3266 3265 .name = "dma-pl330", 3267 3266 .pm = &pl330_pm, 3268 3267 },
-1
drivers/hwtracing/coresight/coresight-catu.c
··· 590 590 static struct amba_driver catu_driver = { 591 591 .drv = { 592 592 .name = "coresight-catu", 593 - .owner = THIS_MODULE, 594 593 .suppress_bind_attrs = true, 595 594 }, 596 595 .probe = catu_probe,
-1
drivers/hwtracing/coresight/coresight-cti-core.c
··· 982 982 static struct amba_driver cti_driver = { 983 983 .drv = { 984 984 .name = "coresight-cti", 985 - .owner = THIS_MODULE, 986 985 .suppress_bind_attrs = true, 987 986 }, 988 987 .probe = cti_probe,
-1
drivers/hwtracing/coresight/coresight-etb10.c
··· 844 844 static struct amba_driver etb_driver = { 845 845 .drv = { 846 846 .name = "coresight-etb10", 847 - .owner = THIS_MODULE, 848 847 .pm = &etb_dev_pm_ops, 849 848 .suppress_bind_attrs = true, 850 849
-1
drivers/hwtracing/coresight/coresight-etm3x-core.c
··· 1008 1008 static struct amba_driver etm_driver = { 1009 1009 .drv = { 1010 1010 .name = "coresight-etm3x", 1011 - .owner = THIS_MODULE, 1012 1011 .pm = &etm_dev_pm_ops, 1013 1012 .suppress_bind_attrs = true, 1014 1013 },
-1
drivers/hwtracing/coresight/coresight-etm4x-core.c
··· 2344 2344 static struct amba_driver etm4x_amba_driver = { 2345 2345 .drv = { 2346 2346 .name = "coresight-etm4x", 2347 - .owner = THIS_MODULE, 2348 2347 .suppress_bind_attrs = true, 2349 2348 }, 2350 2349 .probe = etm4_probe_amba,
-1
drivers/hwtracing/coresight/coresight-funnel.c
··· 399 399 static struct amba_driver dynamic_funnel_driver = { 400 400 .drv = { 401 401 .name = "coresight-dynamic-funnel", 402 - .owner = THIS_MODULE, 403 402 .pm = &funnel_dev_pm_ops, 404 403 .suppress_bind_attrs = true, 405 404 },
-1
drivers/hwtracing/coresight/coresight-replicator.c
··· 406 406 .drv = { 407 407 .name = "coresight-dynamic-replicator", 408 408 .pm = &replicator_dev_pm_ops, 409 - .owner = THIS_MODULE, 410 409 .suppress_bind_attrs = true, 411 410 }, 412 411 .probe = dynamic_replicator_probe,
-1
drivers/hwtracing/coresight/coresight-stm.c
··· 954 954 static struct amba_driver stm_driver = { 955 955 .drv = { 956 956 .name = "coresight-stm", 957 - .owner = THIS_MODULE, 958 957 .pm = &stm_dev_pm_ops, 959 958 .suppress_bind_attrs = true, 960 959 },
-1
drivers/hwtracing/coresight/coresight-tmc-core.c
··· 602 602 static struct amba_driver tmc_driver = { 603 603 .drv = { 604 604 .name = "coresight-tmc", 605 - .owner = THIS_MODULE, 606 605 .suppress_bind_attrs = true, 607 606 }, 608 607 .probe = tmc_probe,
-1
drivers/hwtracing/coresight/coresight-tpda.c
··· 333 333 static struct amba_driver tpda_driver = { 334 334 .drv = { 335 335 .name = "coresight-tpda", 336 - .owner = THIS_MODULE, 337 336 .suppress_bind_attrs = true, 338 337 }, 339 338 .probe = tpda_probe,
-1
drivers/hwtracing/coresight/coresight-tpdm.c
··· 1310 1310 static struct amba_driver tpdm_driver = { 1311 1311 .drv = { 1312 1312 .name = "coresight-tpdm", 1313 - .owner = THIS_MODULE, 1314 1313 .suppress_bind_attrs = true, 1315 1314 }, 1316 1315 .probe = tpdm_probe,
-1
drivers/hwtracing/coresight/coresight-tpiu.c
··· 236 236 static struct amba_driver tpiu_driver = { 237 237 .drv = { 238 238 .name = "coresight-tpiu", 239 - .owner = THIS_MODULE, 240 239 .pm = &tpiu_dev_pm_ops, 241 240 .suppress_bind_attrs = true, 242 241 },
-1
drivers/i2c/busses/i2c-nomadik.c
··· 1194 1194 1195 1195 static struct amba_driver nmk_i2c_driver = { 1196 1196 .drv = { 1197 - .owner = THIS_MODULE, 1198 1197 .name = DRIVER_NAME, 1199 1198 .pm = pm_ptr(&nmk_i2c_pm), 1200 1199 },
-1
drivers/input/serio/ambakmi.c
··· 195 195 static struct amba_driver ambakmi_driver = { 196 196 .drv = { 197 197 .name = "kmi-pl050", 198 - .owner = THIS_MODULE, 199 198 .pm = pm_sleep_ptr(&amba_kmi_dev_pm_ops), 200 199 }, 201 200 .id_table = amba_kmi_idtable,
-1
drivers/memory/pl353-smc.c
··· 154 154 155 155 static struct amba_driver pl353_smc_driver = { 156 156 .drv = { 157 - .owner = THIS_MODULE, 158 157 .name = "pl353-smc", 159 158 .pm = &pl353_smc_dev_pm_ops, 160 159 },
-1
drivers/vfio/platform/vfio_amba.c
··· 134 134 .id_table = vfio_amba_ids, 135 135 .drv = { 136 136 .name = "vfio-amba", 137 - .owner = THIS_MODULE, 138 137 }, 139 138 .driver_managed_dma = true, 140 139 };
+9 -2
include/linux/amba/bus.h
··· 112 112 #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) 113 113 #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) 114 114 115 + /* 116 + * use a macro to avoid include chaining to get THIS_MODULE 117 + */ 118 + #define amba_driver_register(drv) \ 119 + __amba_driver_register(drv, THIS_MODULE) 120 + 115 121 #ifdef CONFIG_ARM_AMBA 116 - int amba_driver_register(struct amba_driver *); 122 + int __amba_driver_register(struct amba_driver *, struct module *); 117 123 void amba_driver_unregister(struct amba_driver *); 118 124 #else 119 - static inline int amba_driver_register(struct amba_driver *drv) 125 + static inline int __amba_driver_register(struct amba_driver *drv, 126 + struct module *owner) 120 127 { 121 128 return -EINVAL; 122 129 }