Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: arm/aes-scalar - switch to common rev_l/mov_l macros

The scalar AES implementation has some locally defined macros which
reimplement things that are now available in macros defined in
assembler.h. So let's switch to those.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
d5adb9d1 d2f2516a

+10 -32
+10 -32
arch/arm/crypto/aes-cipher-core.S
··· 99 99 __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr 100 100 .endm 101 101 102 - .macro __rev, out, in 103 - .if __LINUX_ARM_ARCH__ < 6 104 - lsl t0, \in, #24 105 - and t1, \in, #0xff00 106 - and t2, \in, #0xff0000 107 - orr \out, t0, \in, lsr #24 108 - orr \out, \out, t1, lsl #8 109 - orr \out, \out, t2, lsr #8 110 - .else 111 - rev \out, \in 112 - .endif 113 - .endm 114 - 115 - .macro __adrl, out, sym, c 116 - .if __LINUX_ARM_ARCH__ < 7 117 - ldr\c \out, =\sym 118 - .else 119 - movw\c \out, #:lower16:\sym 120 - movt\c \out, #:upper16:\sym 121 - .endif 122 - .endm 123 - 124 102 .macro do_crypt, round, ttab, ltab, bsz 125 103 push {r3-r11, lr} 126 104 ··· 111 133 ldr r7, [in, #12] 112 134 113 135 #ifdef CONFIG_CPU_BIG_ENDIAN 114 - __rev r4, r4 115 - __rev r5, r5 116 - __rev r6, r6 117 - __rev r7, r7 136 + rev_l r4, t0 137 + rev_l r5, t0 138 + rev_l r6, t0 139 + rev_l r7, t0 118 140 #endif 119 141 120 142 eor r4, r4, r8 ··· 122 144 eor r6, r6, r10 123 145 eor r7, r7, r11 124 146 125 - __adrl ttab, \ttab 147 + mov_l ttab, \ttab 126 148 /* 127 149 * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into 128 150 * L1 cache, assuming cacheline size >= 32. This is a hardening measure ··· 158 180 2: .ifb \ltab 159 181 add ttab, ttab, #1 160 182 .else 161 - __adrl ttab, \ltab 183 + mov_l ttab, \ltab 162 184 // Prefetch inverse S-box for final round; see explanation above 163 185 .set i, 0 164 186 .rept 256 / 64 ··· 172 194 \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds 173 195 174 196 #ifdef CONFIG_CPU_BIG_ENDIAN 175 - __rev r4, r4 176 - __rev r5, r5 177 - __rev r6, r6 178 - __rev r7, r7 197 + rev_l r4, t0 198 + rev_l r5, t0 199 + rev_l r6, t0 200 + rev_l r7, t0 179 201 #endif 180 202 181 203 ldr out, [sp]