Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: use ENDPIPROC() to annotate position independent assembler routines

For more control over which functions are called with the MMU off or
with the UEFI 1:1 mapping active, annotate some assembler routines as
position independent. This is done by introducing ENDPIPROC(), which
replaces the ENDPROC() declaration of those routines.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Ard Biesheuvel and committed by
Catalin Marinas
20791846 d4dddfdb

+24 -13
+11
arch/arm64/include/asm/assembler.h
··· 193 193 str \src, [\tmp, :lo12:\sym] 194 194 .endm 195 195 196 + /* 197 + * Annotate a function as position independent, i.e., safe to be called before 198 + * the kernel virtual mapping is activated. 199 + */ 200 + #define ENDPIPROC(x) \ 201 + .globl __pi_##x; \ 202 + .type __pi_##x, %function; \ 203 + .set __pi_##x, x; \ 204 + .size __pi_##x, . - x; \ 205 + ENDPROC(x) 206 + 196 207 #endif /* __ASM_ASSEMBLER_H */
+1 -1
arch/arm64/lib/memchr.S
··· 41 41 ret 42 42 2: mov x0, #0 43 43 ret 44 - ENDPROC(memchr) 44 + ENDPIPROC(memchr)
+1 -1
arch/arm64/lib/memcmp.S
··· 255 255 .Lret0: 256 256 mov result, #0 257 257 ret 258 - ENDPROC(memcmp) 258 + ENDPIPROC(memcmp)
+1 -1
arch/arm64/lib/memcpy.S
··· 71 71 ENTRY(memcpy) 72 72 #include "copy_template.S" 73 73 ret 74 - ENDPROC(memcpy) 74 + ENDPIPROC(memcpy)
+1 -1
arch/arm64/lib/memmove.S
··· 194 194 tst count, #0x3f 195 195 b.ne .Ltail63 196 196 ret 197 - ENDPROC(memmove) 197 + ENDPIPROC(memmove)
+1 -1
arch/arm64/lib/memset.S
··· 213 213 ands count, count, zva_bits_x 214 214 b.ne .Ltail_maybe_long 215 215 ret 216 - ENDPROC(memset) 216 + ENDPIPROC(memset)
+1 -1
arch/arm64/lib/strcmp.S
··· 231 231 lsr data1, data1, #56 232 232 sub result, data1, data2, lsr #56 233 233 ret 234 - ENDPROC(strcmp) 234 + ENDPIPROC(strcmp)
+1 -1
arch/arm64/lib/strlen.S
··· 123 123 csinv data1, data1, xzr, le 124 124 csel data2, data2, data2a, le 125 125 b .Lrealigned 126 - ENDPROC(strlen) 126 + ENDPIPROC(strlen)
+1 -1
arch/arm64/lib/strncmp.S
··· 307 307 .Lret0: 308 308 mov result, #0 309 309 ret 310 - ENDPROC(strncmp) 310 + ENDPIPROC(strncmp)
+5 -5
arch/arm64/mm/cache.S
··· 98 98 b.lo 1b 99 99 dsb sy 100 100 ret 101 - ENDPROC(__flush_dcache_area) 101 + ENDPIPROC(__flush_dcache_area) 102 102 103 103 /* 104 104 * __inval_cache_range(start, end) ··· 131 131 b.lo 2b 132 132 dsb sy 133 133 ret 134 - ENDPROC(__inval_cache_range) 134 + ENDPIPROC(__inval_cache_range) 135 135 ENDPROC(__dma_inv_range) 136 136 137 137 /* ··· 171 171 b.lo 1b 172 172 dsb sy 173 173 ret 174 - ENDPROC(__dma_flush_range) 174 + ENDPIPROC(__dma_flush_range) 175 175 176 176 /* 177 177 * __dma_map_area(start, size, dir) ··· 184 184 cmp w2, #DMA_FROM_DEVICE 185 185 b.eq __dma_inv_range 186 186 b __dma_clean_range 187 - ENDPROC(__dma_map_area) 187 + ENDPIPROC(__dma_map_area) 188 188 189 189 /* 190 190 * __dma_unmap_area(start, size, dir) ··· 197 197 cmp w2, #DMA_TO_DEVICE 198 198 b.ne __dma_inv_range 199 199 ret 200 - ENDPROC(__dma_unmap_area) 200 + ENDPIPROC(__dma_unmap_area)