Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: arm64 - Use modern annotations for assembly functions

In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC and also add a new annotation for static functions which previously
had no ENTRY equivalent. Update the annotations in the crypto code to the
new macros.

There are a small number of files imported from OpenSSL where the assembly
is generated using perl programs, these are not currently annotated at all
and have not been modified.

Signed-off-by: Mark Brown <broonie@kernel.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Mark Brown and committed by
Herbert Xu
0e89640b 3907ccfa

+84 -84
+8 -8
arch/arm64/crypto/aes-ce-ccm-core.S
··· 15 15 * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, 16 16 * u32 *macp, u8 const rk[], u32 rounds); 17 17 */ 18 - ENTRY(ce_aes_ccm_auth_data) 18 + SYM_FUNC_START(ce_aes_ccm_auth_data) 19 19 ldr w8, [x3] /* leftover from prev round? */ 20 20 ld1 {v0.16b}, [x0] /* load mac */ 21 21 cbz w8, 1f ··· 81 81 st1 {v0.16b}, [x0] 82 82 10: str w8, [x3] 83 83 ret 84 - ENDPROC(ce_aes_ccm_auth_data) 84 + SYM_FUNC_END(ce_aes_ccm_auth_data) 85 85 86 86 /* 87 87 * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], 88 88 * u32 rounds); 89 89 */ 90 - ENTRY(ce_aes_ccm_final) 90 + SYM_FUNC_START(ce_aes_ccm_final) 91 91 ld1 {v3.4s}, [x2], #16 /* load first round key */ 92 92 ld1 {v0.16b}, [x0] /* load mac */ 93 93 cmp w3, #12 /* which key size? */ ··· 121 121 eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ 122 122 st1 {v0.16b}, [x0] /* store result */ 123 123 ret 124 - ENDPROC(ce_aes_ccm_final) 124 + SYM_FUNC_END(ce_aes_ccm_final) 125 125 126 126 .macro aes_ccm_do_crypt,enc 127 127 ldr x8, [x6, #8] /* load lower ctr */ ··· 212 212 * u8 const rk[], u32 rounds, u8 mac[], 213 213 * u8 ctr[]); 214 214 */ 215 - ENTRY(ce_aes_ccm_encrypt) 215 + SYM_FUNC_START(ce_aes_ccm_encrypt) 216 216 aes_ccm_do_crypt 1 217 - ENDPROC(ce_aes_ccm_encrypt) 217 + SYM_FUNC_END(ce_aes_ccm_encrypt) 218 218 219 - ENTRY(ce_aes_ccm_decrypt) 219 + SYM_FUNC_START(ce_aes_ccm_decrypt) 220 220 aes_ccm_do_crypt 0 221 - ENDPROC(ce_aes_ccm_decrypt) 221 + SYM_FUNC_END(ce_aes_ccm_decrypt)
+8 -8
arch/arm64/crypto/aes-ce-core.S
··· 8 8 9 9 .arch armv8-a+crypto 10 10 11 - ENTRY(__aes_ce_encrypt) 11 + SYM_FUNC_START(__aes_ce_encrypt) 12 12 sub w3, w3, #2 13 13 ld1 {v0.16b}, [x2] 14 14 ld1 {v1.4s}, [x0], #16 ··· 34 34 eor v0.16b, v0.16b, v3.16b 35 35 st1 {v0.16b}, [x1] 36 36 ret 37 - ENDPROC(__aes_ce_encrypt) 37 + SYM_FUNC_END(__aes_ce_encrypt) 38 38 39 - ENTRY(__aes_ce_decrypt) 39 + SYM_FUNC_START(__aes_ce_decrypt) 40 40 sub w3, w3, #2 41 41 ld1 {v0.16b}, [x2] 42 42 ld1 {v1.4s}, [x0], #16 ··· 62 62 eor v0.16b, v0.16b, v3.16b 63 63 st1 {v0.16b}, [x1] 64 64 ret 65 - ENDPROC(__aes_ce_decrypt) 65 + SYM_FUNC_END(__aes_ce_decrypt) 66 66 67 67 /* 68 68 * __aes_ce_sub() - use the aese instruction to perform the AES sbox 69 69 * substitution on each byte in 'input' 70 70 */ 71 - ENTRY(__aes_ce_sub) 71 + SYM_FUNC_START(__aes_ce_sub) 72 72 dup v1.4s, w0 73 73 movi v0.16b, #0 74 74 aese v0.16b, v1.16b 75 75 umov w0, v0.s[0] 76 76 ret 77 - ENDPROC(__aes_ce_sub) 77 + SYM_FUNC_END(__aes_ce_sub) 78 78 79 - ENTRY(__aes_ce_invert) 79 + SYM_FUNC_START(__aes_ce_invert) 80 80 ld1 {v0.4s}, [x1] 81 81 aesimc v1.16b, v0.16b 82 82 st1 {v1.4s}, [x0] 83 83 ret 84 - ENDPROC(__aes_ce_invert) 84 + SYM_FUNC_END(__aes_ce_invert)
+2 -2
arch/arm64/crypto/aes-ce.S
··· 9 9 #include <linux/linkage.h> 10 10 #include <asm/assembler.h> 11 11 12 - #define AES_ENTRY(func) ENTRY(ce_ ## func) 13 - #define AES_ENDPROC(func) ENDPROC(ce_ ## func) 12 + #define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func) 13 + #define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func) 14 14 15 15 .arch armv8-a+crypto 16 16
+4 -4
arch/arm64/crypto/aes-cipher-core.S
··· 122 122 ret 123 123 .endm 124 124 125 - ENTRY(__aes_arm64_encrypt) 125 + SYM_FUNC_START(__aes_arm64_encrypt) 126 126 do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 127 - ENDPROC(__aes_arm64_encrypt) 127 + SYM_FUNC_END(__aes_arm64_encrypt) 128 128 129 129 .align 5 130 - ENTRY(__aes_arm64_decrypt) 130 + SYM_FUNC_START(__aes_arm64_decrypt) 131 131 do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0 132 - ENDPROC(__aes_arm64_decrypt) 132 + SYM_FUNC_END(__aes_arm64_decrypt)
+8 -8
arch/arm64/crypto/aes-modes.S
··· 22 22 #define ST5(x...) x 23 23 #endif 24 24 25 - aes_encrypt_block4x: 25 + SYM_FUNC_START_LOCAL(aes_encrypt_block4x) 26 26 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 27 27 ret 28 - ENDPROC(aes_encrypt_block4x) 28 + SYM_FUNC_END(aes_encrypt_block4x) 29 29 30 - aes_decrypt_block4x: 30 + SYM_FUNC_START_LOCAL(aes_decrypt_block4x) 31 31 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 32 32 ret 33 - ENDPROC(aes_decrypt_block4x) 33 + SYM_FUNC_END(aes_decrypt_block4x) 34 34 35 35 #if MAX_STRIDE == 5 36 - aes_encrypt_block5x: 36 + SYM_FUNC_START_LOCAL(aes_encrypt_block5x) 37 37 encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 38 38 ret 39 - ENDPROC(aes_encrypt_block5x) 39 + SYM_FUNC_END(aes_encrypt_block5x) 40 40 41 - aes_decrypt_block5x: 41 + SYM_FUNC_START_LOCAL(aes_decrypt_block5x) 42 42 decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 43 43 ret 44 - ENDPROC(aes_decrypt_block5x) 44 + SYM_FUNC_END(aes_decrypt_block5x) 45 45 #endif 46 46 47 47 /*
+2 -2
arch/arm64/crypto/aes-neon.S
··· 8 8 #include <linux/linkage.h> 9 9 #include <asm/assembler.h> 10 10 11 - #define AES_ENTRY(func) ENTRY(neon_ ## func) 12 - #define AES_ENDPROC(func) ENDPROC(neon_ ## func) 11 + #define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func) 12 + #define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func) 13 13 14 14 xtsmask .req v7 15 15 cbciv .req v7
+20 -20
arch/arm64/crypto/aes-neonbs-core.S
··· 380 380 /* 381 381 * void aesbs_convert_key(u8 out[], u32 const rk[], int rounds) 382 382 */ 383 - ENTRY(aesbs_convert_key) 383 + SYM_FUNC_START(aesbs_convert_key) 384 384 ld1 {v7.4s}, [x1], #16 // load round 0 key 385 385 ld1 {v17.4s}, [x1], #16 // load round 1 key 386 386 ··· 425 425 eor v17.16b, v17.16b, v7.16b 426 426 str q17, [x0] 427 427 ret 428 - ENDPROC(aesbs_convert_key) 428 + SYM_FUNC_END(aesbs_convert_key) 429 429 430 430 .align 4 431 - aesbs_encrypt8: 431 + SYM_FUNC_START_LOCAL(aesbs_encrypt8) 432 432 ldr q9, [bskey], #16 // round 0 key 433 433 ldr q8, M0SR 434 434 ldr q24, SR ··· 488 488 eor v2.16b, v2.16b, v12.16b 489 489 eor v5.16b, v5.16b, v12.16b 490 490 ret 491 - ENDPROC(aesbs_encrypt8) 491 + SYM_FUNC_END(aesbs_encrypt8) 492 492 493 493 .align 4 494 - aesbs_decrypt8: 494 + SYM_FUNC_START_LOCAL(aesbs_decrypt8) 495 495 lsl x9, rounds, #7 496 496 add bskey, bskey, x9 497 497 ··· 553 553 eor v3.16b, v3.16b, v12.16b 554 554 eor v5.16b, v5.16b, v12.16b 555 555 ret 556 - ENDPROC(aesbs_decrypt8) 556 + SYM_FUNC_END(aesbs_decrypt8) 557 557 558 558 /* 559 559 * aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, ··· 621 621 .endm 622 622 623 623 .align 4 624 - ENTRY(aesbs_ecb_encrypt) 624 + SYM_FUNC_START(aesbs_ecb_encrypt) 625 625 __ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 626 - ENDPROC(aesbs_ecb_encrypt) 626 + SYM_FUNC_END(aesbs_ecb_encrypt) 627 627 628 628 .align 4 629 - ENTRY(aesbs_ecb_decrypt) 629 + SYM_FUNC_START(aesbs_ecb_decrypt) 630 630 __ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 631 - ENDPROC(aesbs_ecb_decrypt) 631 + SYM_FUNC_END(aesbs_ecb_decrypt) 632 632 633 633 /* 634 634 * aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 635 635 * int blocks, u8 iv[]) 636 636 */ 637 637 .align 4 638 - ENTRY(aesbs_cbc_decrypt) 638 + SYM_FUNC_START(aesbs_cbc_decrypt) 639 639 frame_push 6 640 640 641 641 mov x19, x0 ··· 720 720 721 721 2: frame_pop 722 722 ret 723 - ENDPROC(aesbs_cbc_decrypt) 723 + SYM_FUNC_END(aesbs_cbc_decrypt) 724 724 725 725 .macro next_tweak, out, in, const, tmp 726 726 sshr \tmp\().2d, \in\().2d, #63 ··· 736 736 * aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, 737 737 * int blocks, u8 iv[]) 738 738 */ 739 - __xts_crypt8: 739 + SYM_FUNC_START_LOCAL(__xts_crypt8) 740 740 mov x6, #1 741 741 lsl x6, x6, x23 742 742 subs w23, w23, #8 ··· 789 789 0: mov bskey, x21 790 790 mov rounds, x22 791 791 br x7 792 - ENDPROC(__xts_crypt8) 792 + SYM_FUNC_END(__xts_crypt8) 793 793 794 794 .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 795 795 frame_push 6, 64 ··· 854 854 ret 855 855 .endm 856 856 857 - ENTRY(aesbs_xts_encrypt) 857 + SYM_FUNC_START(aesbs_xts_encrypt) 858 858 __xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 859 - ENDPROC(aesbs_xts_encrypt) 859 + SYM_FUNC_END(aesbs_xts_encrypt) 860 860 861 - ENTRY(aesbs_xts_decrypt) 861 + SYM_FUNC_START(aesbs_xts_decrypt) 862 862 __xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 863 - ENDPROC(aesbs_xts_decrypt) 863 + SYM_FUNC_END(aesbs_xts_decrypt) 864 864 865 865 .macro next_ctr, v 866 866 mov \v\().d[1], x8 ··· 874 874 * aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 875 875 * int rounds, int blocks, u8 iv[], u8 final[]) 876 876 */ 877 - ENTRY(aesbs_ctr_encrypt) 877 + SYM_FUNC_START(aesbs_ctr_encrypt) 878 878 frame_push 8 879 879 880 880 mov x19, x0 ··· 1002 1002 7: cbz x25, 8b 1003 1003 st1 {v5.16b}, [x25] 1004 1004 b 8b 1005 - ENDPROC(aesbs_ctr_encrypt) 1005 + SYM_FUNC_END(aesbs_ctr_encrypt)
+8 -8
arch/arm64/crypto/chacha-neon-core.S
··· 36 36 * 37 37 * Clobbers: w3, x10, v4, v12 38 38 */ 39 - chacha_permute: 39 + SYM_FUNC_START_LOCAL(chacha_permute) 40 40 41 41 adr_l x10, ROT8 42 42 ld1 {v12.4s}, [x10] ··· 104 104 b.ne .Ldoubleround 105 105 106 106 ret 107 - ENDPROC(chacha_permute) 107 + SYM_FUNC_END(chacha_permute) 108 108 109 - ENTRY(chacha_block_xor_neon) 109 + SYM_FUNC_START(chacha_block_xor_neon) 110 110 // x0: Input state matrix, s 111 111 // x1: 1 data block output, o 112 112 // x2: 1 data block input, i ··· 143 143 144 144 ldp x29, x30, [sp], #16 145 145 ret 146 - ENDPROC(chacha_block_xor_neon) 146 + SYM_FUNC_END(chacha_block_xor_neon) 147 147 148 - ENTRY(hchacha_block_neon) 148 + SYM_FUNC_START(hchacha_block_neon) 149 149 // x0: Input state matrix, s 150 150 // x1: output (8 32-bit words) 151 151 // w2: nrounds ··· 163 163 164 164 ldp x29, x30, [sp], #16 165 165 ret 166 - ENDPROC(hchacha_block_neon) 166 + SYM_FUNC_END(hchacha_block_neon) 167 167 168 168 a0 .req w12 169 169 a1 .req w13 ··· 183 183 a15 .req w28 184 184 185 185 .align 6 186 - ENTRY(chacha_4block_xor_neon) 186 + SYM_FUNC_START(chacha_4block_xor_neon) 187 187 frame_push 10 188 188 189 189 // x0: Input state matrix, s ··· 845 845 eor v31.16b, v31.16b, v3.16b 846 846 st1 {v28.16b-v31.16b}, [x1] 847 847 b .Lout 848 - ENDPROC(chacha_4block_xor_neon) 848 + SYM_FUNC_END(chacha_4block_xor_neon) 849 849 850 850 .section ".rodata", "a", %progbits 851 851 .align L1_CACHE_SHIFT
+6 -6
arch/arm64/crypto/crct10dif-ce-core.S
··· 131 131 tbl bd4.16b, {\bd\().16b}, perm4.16b 132 132 .endm 133 133 134 - __pmull_p8_core: 134 + SYM_FUNC_START_LOCAL(__pmull_p8_core) 135 135 .L__pmull_p8_core: 136 136 ext t4.8b, ad.8b, ad.8b, #1 // A1 137 137 ext t5.8b, ad.8b, ad.8b, #2 // A2 ··· 194 194 eor t4.16b, t4.16b, t5.16b 195 195 eor t6.16b, t6.16b, t3.16b 196 196 ret 197 - ENDPROC(__pmull_p8_core) 197 + SYM_FUNC_END(__pmull_p8_core) 198 198 199 199 .macro __pmull_p8, rq, ad, bd, i 200 200 .ifnc \bd, fold_consts ··· 488 488 // 489 489 // Assumes len >= 16. 490 490 // 491 - ENTRY(crc_t10dif_pmull_p8) 491 + SYM_FUNC_START(crc_t10dif_pmull_p8) 492 492 crc_t10dif_pmull p8 493 - ENDPROC(crc_t10dif_pmull_p8) 493 + SYM_FUNC_END(crc_t10dif_pmull_p8) 494 494 495 495 .align 5 496 496 // ··· 498 498 // 499 499 // Assumes len >= 16. 500 500 // 501 - ENTRY(crc_t10dif_pmull_p64) 501 + SYM_FUNC_START(crc_t10dif_pmull_p64) 502 502 crc_t10dif_pmull p64 503 - ENDPROC(crc_t10dif_pmull_p64) 503 + SYM_FUNC_END(crc_t10dif_pmull_p64) 504 504 505 505 .section ".rodata", "a" 506 506 .align 4
+4 -4
arch/arm64/crypto/ghash-ce-core.S
··· 350 350 * void pmull_ghash_update(int blocks, u64 dg[], const char *src, 351 351 * struct ghash_key const *k, const char *head) 352 352 */ 353 - ENTRY(pmull_ghash_update_p64) 353 + SYM_FUNC_START(pmull_ghash_update_p64) 354 354 __pmull_ghash p64 355 - ENDPROC(pmull_ghash_update_p64) 355 + SYM_FUNC_END(pmull_ghash_update_p64) 356 356 357 - ENTRY(pmull_ghash_update_p8) 357 + SYM_FUNC_START(pmull_ghash_update_p8) 358 358 __pmull_ghash p8 359 - ENDPROC(pmull_ghash_update_p8) 359 + SYM_FUNC_END(pmull_ghash_update_p8) 360 360 361 361 KS0 .req v8 362 362 KS1 .req v9
+2 -2
arch/arm64/crypto/nh-neon-core.S
··· 62 62 * 63 63 * It's guaranteed that message_len % 16 == 0. 64 64 */ 65 - ENTRY(nh_neon) 65 + SYM_FUNC_START(nh_neon) 66 66 67 67 ld1 {K0.4s,K1.4s}, [KEY], #32 68 68 movi PASS0_SUMS.2d, #0 ··· 100 100 addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d 101 101 st1 {T0.16b,T1.16b}, [HASH] 102 102 ret 103 - ENDPROC(nh_neon) 103 + SYM_FUNC_END(nh_neon)
+2 -2
arch/arm64/crypto/sha1-ce-core.S
··· 65 65 * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, 66 66 * int blocks) 67 67 */ 68 - ENTRY(sha1_ce_transform) 68 + SYM_FUNC_START(sha1_ce_transform) 69 69 frame_push 3 70 70 71 71 mov x19, x0 ··· 160 160 str dgb, [x19, #16] 161 161 frame_pop 162 162 ret 163 - ENDPROC(sha1_ce_transform) 163 + SYM_FUNC_END(sha1_ce_transform)
+2 -2
arch/arm64/crypto/sha2-ce-core.S
··· 75 75 * int blocks) 76 76 */ 77 77 .text 78 - ENTRY(sha2_ce_transform) 78 + SYM_FUNC_START(sha2_ce_transform) 79 79 frame_push 3 80 80 81 81 mov x19, x0 ··· 166 166 4: st1 {dgav.4s, dgbv.4s}, [x19] 167 167 frame_pop 168 168 ret 169 - ENDPROC(sha2_ce_transform) 169 + SYM_FUNC_END(sha2_ce_transform)
+2 -2
arch/arm64/crypto/sha3-ce-core.S
··· 40 40 * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) 41 41 */ 42 42 .text 43 - ENTRY(sha3_ce_transform) 43 + SYM_FUNC_START(sha3_ce_transform) 44 44 frame_push 4 45 45 46 46 mov x19, x0 ··· 218 218 st1 {v24.1d}, [x19] 219 219 frame_pop 220 220 ret 221 - ENDPROC(sha3_ce_transform) 221 + SYM_FUNC_END(sha3_ce_transform) 222 222 223 223 .section ".rodata", "a" 224 224 .align 8
+2 -2
arch/arm64/crypto/sha512-ce-core.S
··· 106 106 * int blocks) 107 107 */ 108 108 .text 109 - ENTRY(sha512_ce_transform) 109 + SYM_FUNC_START(sha512_ce_transform) 110 110 frame_push 3 111 111 112 112 mov x19, x0 ··· 216 216 3: st1 {v8.2d-v11.2d}, [x19] 217 217 frame_pop 218 218 ret 219 - ENDPROC(sha512_ce_transform) 219 + SYM_FUNC_END(sha512_ce_transform)
+2 -2
arch/arm64/crypto/sm3-ce-core.S
··· 73 73 * int blocks) 74 74 */ 75 75 .text 76 - ENTRY(sm3_ce_transform) 76 + SYM_FUNC_START(sm3_ce_transform) 77 77 /* load state */ 78 78 ld1 {v8.4s-v9.4s}, [x0] 79 79 rev64 v8.4s, v8.4s ··· 131 131 ext v9.16b, v9.16b, v9.16b, #8 132 132 st1 {v8.4s-v9.4s}, [x0] 133 133 ret 134 - ENDPROC(sm3_ce_transform) 134 + SYM_FUNC_END(sm3_ce_transform) 135 135 136 136 .section ".rodata", "a" 137 137 .align 3
+2 -2
arch/arm64/crypto/sm4-ce-core.S
··· 15 15 * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); 16 16 */ 17 17 .text 18 - ENTRY(sm4_ce_do_crypt) 18 + SYM_FUNC_START(sm4_ce_do_crypt) 19 19 ld1 {v8.4s}, [x2] 20 20 ld1 {v0.4s-v3.4s}, [x0], #64 21 21 CPU_LE( rev32 v8.16b, v8.16b ) ··· 33 33 CPU_LE( rev32 v8.16b, v8.16b ) 34 34 st1 {v8.4s}, [x1] 35 35 ret 36 - ENDPROC(sm4_ce_do_crypt) 36 + SYM_FUNC_END(sm4_ce_do_crypt)