Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/crypto: arm64/aes: Migrate optimized code into library

Move the ARM64 optimized AES key expansion and single-block AES
en/decryption code into lib/crypto/, wire it up to the AES library API,
and remove the superseded crypto_cipher algorithms.

The result is that both the AES library and crypto_cipher APIs are now
optimized for ARM64, whereas previously only crypto_cipher was (and the
optimizations weren't enabled by default, which this fixes as well).

Note: to see the diff from arch/arm64/crypto/aes-ce-glue.c to
lib/crypto/arm64/aes.h, view this commit with 'git show -M10'.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260112192035.10427-12-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+181 -290
+1 -25
arch/arm64/crypto/Kconfig
··· 37 37 Architecture: arm64 using: 38 38 - ARMv8.2 Crypto Extensions 39 39 40 - config CRYPTO_AES_ARM64 41 - tristate "Ciphers: AES, modes: ECB, CBC, CTR, CTS, XCTR, XTS" 42 - select CRYPTO_AES 43 - help 44 - Block ciphers: AES cipher algorithms (FIPS-197) 45 - Length-preserving ciphers: AES with ECB, CBC, CTR, CTS, 46 - XCTR, and XTS modes 47 - AEAD cipher: AES with CBC, ESSIV, and SHA-256 48 - for fscrypt and dm-crypt 49 - 50 - Architecture: arm64 51 - 52 - config CRYPTO_AES_ARM64_CE 53 - tristate "Ciphers: AES (ARMv8 Crypto Extensions)" 54 - depends on KERNEL_MODE_NEON 55 - select CRYPTO_ALGAPI 56 - select CRYPTO_LIB_AES 57 - help 58 - Block ciphers: AES cipher algorithms (FIPS-197) 59 - 60 - Architecture: arm64 using: 61 - - ARMv8 Crypto Extensions 62 - 63 40 config CRYPTO_AES_ARM64_CE_BLK 64 41 tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (ARMv8 Crypto Extensions)" 65 42 depends on KERNEL_MODE_NEON 66 43 select CRYPTO_SKCIPHER 67 - select CRYPTO_AES_ARM64_CE 44 + select CRYPTO_LIB_AES 68 45 select CRYPTO_LIB_SHA256 69 46 help 70 47 Length-preserving ciphers: AES cipher algorithms (FIPS-197) ··· 142 165 tristate "AEAD cipher: AES in CCM mode (ARMv8 Crypto Extensions)" 143 166 depends on KERNEL_MODE_NEON 144 167 select CRYPTO_ALGAPI 145 - select CRYPTO_AES_ARM64_CE 146 168 select CRYPTO_AES_ARM64_CE_BLK 147 169 select CRYPTO_AEAD 148 170 select CRYPTO_LIB_AES
-6
arch/arm64/crypto/Makefile
··· 29 29 obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o 30 30 ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o 31 31 32 - obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o 33 - aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o 34 - 35 32 obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o 36 33 aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o 37 34 ··· 37 40 38 41 obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o 39 42 aes-neon-blk-y := aes-glue-neon.o aes-neon.o 40 - 41 - obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o 42 - aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o 43 43 44 44 obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o 45 45 aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
-2
arch/arm64/crypto/aes-ce-ccm-glue.c
··· 17 17 18 18 #include <asm/simd.h> 19 19 20 - #include "aes-ce-setkey.h" 21 - 22 20 MODULE_IMPORT_NS("CRYPTO_INTERNAL"); 23 21 24 22 static int num_rounds(struct crypto_aes_ctx *ctx)
arch/arm64/crypto/aes-ce-core.S lib/crypto/arm64/aes-ce-core.S
-178
arch/arm64/crypto/aes-ce-glue.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * aes-ce-cipher.c - core AES cipher using ARMv8 Crypto Extensions 4 - * 5 - * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> 6 - */ 7 - 8 - #include <asm/neon.h> 9 - #include <asm/simd.h> 10 - #include <linux/unaligned.h> 11 - #include <crypto/aes.h> 12 - #include <crypto/algapi.h> 13 - #include <crypto/internal/simd.h> 14 - #include <linux/cpufeature.h> 15 - #include <linux/module.h> 16 - 17 - #include "aes-ce-setkey.h" 18 - 19 - MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions"); 20 - MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 21 - MODULE_LICENSE("GPL v2"); 22 - 23 - struct aes_block { 24 - u8 b[AES_BLOCK_SIZE]; 25 - }; 26 - 27 - asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 28 - asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 29 - 30 - asmlinkage u32 __aes_ce_sub(u32 l); 31 - asmlinkage void __aes_ce_invert(struct aes_block *out, 32 - const struct aes_block *in); 33 - 34 - static int num_rounds(struct crypto_aes_ctx *ctx) 35 - { 36 - /* 37 - * # of rounds specified by AES: 38 - * 128 bit key 10 rounds 39 - * 192 bit key 12 rounds 40 - * 256 bit key 14 rounds 41 - * => n byte key => 6 + (n/4) rounds 42 - */ 43 - return 6 + ctx->key_length / 4; 44 - } 45 - 46 - static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) 47 - { 48 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 49 - 50 - if (!crypto_simd_usable()) { 51 - aes_encrypt(ctx, dst, src); 52 - return; 53 - } 54 - 55 - scoped_ksimd() 56 - __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); 57 - } 58 - 59 - static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) 60 - { 61 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 62 - 63 - if (!crypto_simd_usable()) { 64 - aes_decrypt(ctx, dst, src); 65 - return; 66 - } 67 - 68 - scoped_ksimd() 69 - __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); 70 - } 71 - 72 - int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, 73 - unsigned int key_len) 74 - { 75 - /* 76 - * The AES key schedule round constants 77 - */ 78 - static u8 const rcon[] = { 79 - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 80 - }; 81 - 82 - u32 kwords = key_len / sizeof(u32); 83 - struct aes_block *key_enc, *key_dec; 84 - int i, j; 85 - 86 - if (key_len != AES_KEYSIZE_128 && 87 - key_len != AES_KEYSIZE_192 && 88 - key_len != AES_KEYSIZE_256) 89 - return -EINVAL; 90 - 91 - ctx->key_length = key_len; 92 - for (i = 0; i < kwords; i++) 93 - ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); 94 - 95 - scoped_ksimd() { 96 - for (i = 0; i < sizeof(rcon); i++) { 97 - u32 *rki = ctx->key_enc + (i * kwords); 98 - u32 *rko = rki + kwords; 99 - 100 - rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ 101 - rcon[i] ^ rki[0]; 102 - rko[1] = rko[0] ^ rki[1]; 103 - rko[2] = rko[1] ^ rki[2]; 104 - rko[3] = rko[2] ^ rki[3]; 105 - 106 - if (key_len == AES_KEYSIZE_192) { 107 - if (i >= 7) 108 - break; 109 - rko[4] = rko[3] ^ rki[4]; 110 - rko[5] = rko[4] ^ rki[5]; 111 - } else if (key_len == AES_KEYSIZE_256) { 112 - if (i >= 6) 113 - break; 114 - rko[4] = __aes_ce_sub(rko[3]) ^ rki[4]; 115 - rko[5] = rko[4] ^ rki[5]; 116 - rko[6] = rko[5] ^ rki[6]; 117 - rko[7] = rko[6] ^ rki[7]; 118 - } 119 - } 120 - 121 - /* 122 - * Generate the decryption keys for the Equivalent Inverse 123 - * Cipher. This involves reversing the order of the round 124 - * keys, and applying the Inverse Mix Columns transformation on 125 - * all but the first and the last one. 126 - */ 127 - key_enc = (struct aes_block *)ctx->key_enc; 128 - key_dec = (struct aes_block *)ctx->key_dec; 129 - j = num_rounds(ctx); 130 - 131 - key_dec[0] = key_enc[j]; 132 - for (i = 1, j--; j > 0; i++, j--) 133 - __aes_ce_invert(key_dec + i, key_enc + j); 134 - key_dec[i] = key_enc[0]; 135 - } 136 - 137 - return 0; 138 - } 139 - EXPORT_SYMBOL(ce_aes_expandkey); 140 - 141 - int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, 142 - unsigned int key_len) 143 - { 144 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 145 - 146 - return ce_aes_expandkey(ctx, in_key, key_len); 147 - } 148 - EXPORT_SYMBOL(ce_aes_setkey); 149 - 150 - static struct crypto_alg aes_alg = { 151 - .cra_name = "aes", 152 - .cra_driver_name = "aes-ce", 153 - .cra_priority = 250, 154 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 155 - .cra_blocksize = AES_BLOCK_SIZE, 156 - .cra_ctxsize = sizeof(struct crypto_aes_ctx), 157 - .cra_module = THIS_MODULE, 158 - .cra_cipher = { 159 - .cia_min_keysize = AES_MIN_KEY_SIZE, 160 - .cia_max_keysize = AES_MAX_KEY_SIZE, 161 - .cia_setkey = ce_aes_setkey, 162 - .cia_encrypt = aes_cipher_encrypt, 163 - .cia_decrypt = aes_cipher_decrypt 164 - } 165 - }; 166 - 167 - static int __init aes_mod_init(void) 168 - { 169 - return crypto_register_alg(&aes_alg); 170 - } 171 - 172 - static void __exit aes_mod_exit(void) 173 - { 174 - crypto_unregister_alg(&aes_alg); 175 - } 176 - 177 - module_cpu_feature_match(AES, aes_mod_init); 178 - module_exit(aes_mod_exit);
-6
arch/arm64/crypto/aes-ce-setkey.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - 3 - int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, 4 - unsigned int key_len); 5 - int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, 6 - unsigned int key_len);
arch/arm64/crypto/aes-cipher-core.S lib/crypto/arm64/aes-cipher-core.S
-71
arch/arm64/crypto/aes-cipher-glue.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Scalar AES core transform 4 - * 5 - * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> 6 - */ 7 - 8 - #include <crypto/aes.h> 9 - #include <crypto/algapi.h> 10 - #include <linux/module.h> 11 - 12 - asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 13 - asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 14 - 15 - static int aes_arm64_setkey(struct crypto_tfm *tfm, const u8 *in_key, 16 - unsigned int key_len) 17 - { 18 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 19 - 20 - return aes_expandkey(ctx, in_key, key_len); 21 - } 22 - 23 - static void aes_arm64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 24 - { 25 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 26 - int rounds = 6 + ctx->key_length / 4; 27 - 28 - __aes_arm64_encrypt(ctx->key_enc, out, in, rounds); 29 - } 30 - 31 - static void aes_arm64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 32 - { 33 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 34 - int rounds = 6 + ctx->key_length / 4; 35 - 36 - __aes_arm64_decrypt(ctx->key_dec, out, in, rounds); 37 - } 38 - 39 - static struct crypto_alg aes_alg = { 40 - .cra_name = "aes", 41 - .cra_driver_name = "aes-arm64", 42 - .cra_priority = 200, 43 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 44 - .cra_blocksize = AES_BLOCK_SIZE, 45 - .cra_ctxsize = sizeof(struct crypto_aes_ctx), 46 - .cra_module = THIS_MODULE, 47 - 48 - .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, 49 - .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, 50 - .cra_cipher.cia_setkey = aes_arm64_setkey, 51 - .cra_cipher.cia_encrypt = aes_arm64_encrypt, 52 - .cra_cipher.cia_decrypt = aes_arm64_decrypt 53 - }; 54 - 55 - static int __init aes_init(void) 56 - { 57 - return crypto_register_alg(&aes_alg); 58 - } 59 - 60 - static void __exit aes_fini(void) 61 - { 62 - crypto_unregister_alg(&aes_alg); 63 - } 64 - 65 - module_init(aes_init); 66 - module_exit(aes_fini); 67 - 68 - MODULE_DESCRIPTION("Scalar AES cipher for arm64"); 69 - MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 70 - MODULE_LICENSE("GPL v2"); 71 - MODULE_ALIAS_CRYPTO("aes");
-2
arch/arm64/crypto/aes-glue.c
··· 21 21 #include <asm/hwcap.h> 22 22 #include <asm/simd.h> 23 23 24 - #include "aes-ce-setkey.h" 25 - 26 24 #ifdef USE_V8_CRYPTO_EXTENSIONS 27 25 #define MODE "ce" 28 26 #define PRIO 300
+10
include/crypto/aes.h
··· 116 116 int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, 117 117 unsigned int key_len); 118 118 119 + /* 120 + * The following functions are temporarily exported for use by the AES mode 121 + * implementations in arch/$(SRCARCH)/crypto/. These exports will go away when 122 + * that code is migrated into lib/crypto/. 123 + */ 124 + #ifdef CONFIG_ARM64 125 + int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, 126 + unsigned int key_len); 127 + #endif 128 + 119 129 /** 120 130 * aes_preparekey() - Prepare an AES key for encryption and decryption 121 131 * @key: (output) The key structure to initialize
+1
lib/crypto/Kconfig
··· 15 15 bool 16 16 depends on CRYPTO_LIB_AES && !UML && !KMSAN 17 17 default y if ARM 18 + default y if ARM64 18 19 19 20 config CRYPTO_LIB_AESCFB 20 21 tristate
+5
lib/crypto/Makefile
··· 24 24 25 25 libaes-$(CONFIG_ARM) += arm/aes-cipher-core.o 26 26 27 + ifeq ($(CONFIG_ARM64),y) 28 + libaes-y += arm64/aes-cipher-core.o 29 + libaes-$(CONFIG_KERNEL_MODE_NEON) += arm64/aes-ce-core.o 30 + endif 31 + 27 32 endif # CONFIG_CRYPTO_LIB_AES_ARCH 28 33 29 34 ################################################################################
+164
lib/crypto/arm64/aes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * AES block cipher, optimized for ARM64 4 + * 5 + * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> 6 + * Copyright 2026 Google LLC 7 + */ 8 + 9 + #include <asm/neon.h> 10 + #include <asm/simd.h> 11 + #include <linux/unaligned.h> 12 + #include <linux/cpufeature.h> 13 + 14 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_aes); 15 + 16 + struct aes_block { 17 + u8 b[AES_BLOCK_SIZE]; 18 + }; 19 + 20 + asmlinkage void __aes_arm64_encrypt(const u32 rk[], u8 out[AES_BLOCK_SIZE], 21 + const u8 in[AES_BLOCK_SIZE], int rounds); 22 + asmlinkage void __aes_arm64_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE], 23 + const u8 in[AES_BLOCK_SIZE], int rounds); 24 + asmlinkage void __aes_ce_encrypt(const u32 rk[], u8 out[AES_BLOCK_SIZE], 25 + const u8 in[AES_BLOCK_SIZE], int rounds); 26 + asmlinkage void __aes_ce_decrypt(const u32 inv_rk[], u8 out[AES_BLOCK_SIZE], 27 + const u8 in[AES_BLOCK_SIZE], int rounds); 28 + asmlinkage u32 __aes_ce_sub(u32 l); 29 + asmlinkage void __aes_ce_invert(struct aes_block *out, 30 + const struct aes_block *in); 31 + 32 + /* 33 + * Expand an AES key using the crypto extensions if supported and usable or 34 + * generic code otherwise. The expanded key format is compatible between the 35 + * two cases. The outputs are @rndkeys (required) and @inv_rndkeys (optional). 36 + */ 37 + static void aes_expandkey_arm64(u32 rndkeys[], u32 *inv_rndkeys, 38 + const u8 *in_key, int key_len, int nrounds) 39 + { 40 + /* 41 + * The AES key schedule round constants 42 + */ 43 + static u8 const rcon[] = { 44 + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 45 + }; 46 + 47 + u32 kwords = key_len / sizeof(u32); 48 + struct aes_block *key_enc, *key_dec; 49 + int i, j; 50 + 51 + if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || 52 + !static_branch_likely(&have_aes) || unlikely(!may_use_simd())) { 53 + aes_expandkey_generic(rndkeys, inv_rndkeys, in_key, key_len); 54 + return; 55 + } 56 + 57 + for (i = 0; i < kwords; i++) 58 + rndkeys[i] = get_unaligned_le32(&in_key[i * sizeof(u32)]); 59 + 60 + scoped_ksimd() { 61 + for (i = 0; i < sizeof(rcon); i++) { 62 + u32 *rki = &rndkeys[i * kwords]; 63 + u32 *rko = rki + kwords; 64 + 65 + rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ 66 + rcon[i] ^ rki[0]; 67 + rko[1] = rko[0] ^ rki[1]; 68 + rko[2] = rko[1] ^ rki[2]; 69 + rko[3] = rko[2] ^ rki[3]; 70 + 71 + if (key_len == AES_KEYSIZE_192) { 72 + if (i >= 7) 73 + break; 74 + rko[4] = rko[3] ^ rki[4]; 75 + rko[5] = rko[4] ^ rki[5]; 76 + } else if (key_len == AES_KEYSIZE_256) { 77 + if (i >= 6) 78 + break; 79 + rko[4] = __aes_ce_sub(rko[3]) ^ rki[4]; 80 + rko[5] = rko[4] ^ rki[5]; 81 + rko[6] = rko[5] ^ rki[6]; 82 + rko[7] = rko[6] ^ rki[7]; 83 + } 84 + } 85 + 86 + /* 87 + * Generate the decryption keys for the Equivalent Inverse 88 + * Cipher. This involves reversing the order of the round 89 + * keys, and applying the Inverse Mix Columns transformation on 90 + * all but the first and the last one. 91 + */ 92 + if (inv_rndkeys) { 93 + key_enc = (struct aes_block *)rndkeys; 94 + key_dec = (struct aes_block *)inv_rndkeys; 95 + j = nrounds; 96 + 97 + key_dec[0] = key_enc[j]; 98 + for (i = 1, j--; j > 0; i++, j--) 99 + __aes_ce_invert(key_dec + i, key_enc + j); 100 + key_dec[i] = key_enc[0]; 101 + } 102 + } 103 + } 104 + 105 + static void aes_preparekey_arch(union aes_enckey_arch *k, 106 + union aes_invkey_arch *inv_k, 107 + const u8 *in_key, int key_len, int nrounds) 108 + { 109 + aes_expandkey_arm64(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL, 110 + in_key, key_len, nrounds); 111 + } 112 + 113 + /* 114 + * This is here temporarily until the remaining AES mode implementations are 115 + * migrated from arch/arm64/crypto/ to lib/crypto/arm64/. 116 + */ 117 + int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, 118 + unsigned int key_len) 119 + { 120 + if (aes_check_keylen(key_len) != 0) 121 + return -EINVAL; 122 + ctx->key_length = key_len; 123 + aes_expandkey_arm64(ctx->key_enc, ctx->key_dec, in_key, key_len, 124 + 6 + key_len / 4); 125 + return 0; 126 + } 127 + EXPORT_SYMBOL(ce_aes_expandkey); 128 + 129 + static void aes_encrypt_arch(const struct aes_enckey *key, 130 + u8 out[AES_BLOCK_SIZE], 131 + const u8 in[AES_BLOCK_SIZE]) 132 + { 133 + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 134 + static_branch_likely(&have_aes) && likely(may_use_simd())) { 135 + scoped_ksimd() 136 + __aes_ce_encrypt(key->k.rndkeys, out, in, key->nrounds); 137 + } else { 138 + __aes_arm64_encrypt(key->k.rndkeys, out, in, key->nrounds); 139 + } 140 + } 141 + 142 + static void aes_decrypt_arch(const struct aes_key *key, 143 + u8 out[AES_BLOCK_SIZE], 144 + const u8 in[AES_BLOCK_SIZE]) 145 + { 146 + if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && 147 + static_branch_likely(&have_aes) && likely(may_use_simd())) { 148 + scoped_ksimd() 149 + __aes_ce_decrypt(key->inv_k.inv_rndkeys, out, in, 150 + key->nrounds); 151 + } else { 152 + __aes_arm64_decrypt(key->inv_k.inv_rndkeys, out, in, 153 + key->nrounds); 154 + } 155 + } 156 + 157 + #ifdef CONFIG_KERNEL_MODE_NEON 158 + #define aes_mod_init_arch aes_mod_init_arch 159 + static void aes_mod_init_arch(void) 160 + { 161 + if (cpu_have_named_feature(AES)) 162 + static_branch_enable(&have_aes); 163 + } 164 + #endif /* CONFIG_KERNEL_MODE_NEON */