Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/crypto: arm/aes: Migrate optimized code into library

Move the ARM optimized single-block AES en/decryption code into
lib/crypto/, wire it up to the AES library API, and remove the
superseded "aes-arm" crypto_cipher algorithm.

The result is that both the AES library and crypto_cipher APIs are now
optimized for ARM, whereas previously only crypto_cipher was (and the
optimizations weren't enabled by default, which this fixes as well).

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260112192035.10427-11-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>

+63 -114
-1
arch/arm/configs/milbeaut_m10v_defconfig
··· 98 98 CONFIG_CRYPTO_AES=y 99 99 CONFIG_CRYPTO_SEQIV=m 100 100 CONFIG_CRYPTO_GHASH_ARM_CE=m 101 - CONFIG_CRYPTO_AES_ARM=m 102 101 CONFIG_CRYPTO_AES_ARM_BS=m 103 102 CONFIG_CRYPTO_AES_ARM_CE=m 104 103 # CONFIG_CRYPTO_HW is not set
+1 -1
arch/arm/configs/multi_v7_defconfig
··· 1286 1286 CONFIG_CRYPTO_USER_API_RNG=m 1287 1287 CONFIG_CRYPTO_USER_API_AEAD=m 1288 1288 CONFIG_CRYPTO_GHASH_ARM_CE=m 1289 - CONFIG_CRYPTO_AES_ARM=m 1289 + CONFIG_CRYPTO_AES=m 1290 1290 CONFIG_CRYPTO_AES_ARM_BS=m 1291 1291 CONFIG_CRYPTO_AES_ARM_CE=m 1292 1292 CONFIG_CRYPTO_DEV_SUN4I_SS=m
+1 -1
arch/arm/configs/omap2plus_defconfig
··· 706 706 CONFIG_SECURITY=y 707 707 CONFIG_CRYPTO_MICHAEL_MIC=y 708 708 CONFIG_CRYPTO_GHASH_ARM_CE=m 709 - CONFIG_CRYPTO_AES_ARM=m 709 + CONFIG_CRYPTO_AES=m 710 710 CONFIG_CRYPTO_AES_ARM_BS=m 711 711 CONFIG_CRYPTO_DEV_OMAP=m 712 712 CONFIG_CRYPTO_DEV_OMAP_SHAM=m
+1 -1
arch/arm/configs/pxa_defconfig
··· 657 657 CONFIG_CRYPTO_XCBC=m 658 658 CONFIG_CRYPTO_DEFLATE=y 659 659 CONFIG_CRYPTO_LZO=y 660 - CONFIG_CRYPTO_AES_ARM=m 660 + CONFIG_CRYPTO_AES=m 661 661 CONFIG_FONTS=y 662 662 CONFIG_FONT_8x8=y 663 663 CONFIG_FONT_8x16=y
-18
arch/arm/crypto/Kconfig
··· 23 23 that is part of the ARMv8 Crypto Extensions, or a slower variant that 24 24 uses the vmull.p8 instruction that is part of the basic NEON ISA. 25 25 26 - config CRYPTO_AES_ARM 27 - tristate "Ciphers: AES" 28 - select CRYPTO_ALGAPI 29 - select CRYPTO_AES 30 - help 31 - Block ciphers: AES cipher algorithms (FIPS-197) 32 - 33 - Architecture: arm 34 - 35 - On ARM processors without the Crypto Extensions, this is the 36 - fastest AES implementation for single blocks. For multiple 37 - blocks, the NEON bit-sliced implementation is usually faster. 38 - 39 - This implementation may be vulnerable to cache timing attacks, 40 - since it uses lookup tables. However, as countermeasures it 41 - disables IRQs and preloads the tables; it is hoped this makes 42 - such attacks very difficult. 43 - 44 26 config CRYPTO_AES_ARM_BS 45 27 tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)" 46 28 depends on KERNEL_MODE_NEON
-2
arch/arm/crypto/Makefile
··· 3 3 # Arch-specific CryptoAPI modules. 4 4 # 5 5 6 - obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o 7 6 obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o 8 7 9 8 obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o 10 9 obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o 11 10 12 - aes-arm-y := aes-cipher-core.o aes-cipher-glue.o 13 11 aes-arm-bs-y := aes-neonbs-core.o aes-neonbs-glue.o 14 12 aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o 15 13 ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
arch/arm/crypto/aes-cipher-core.S lib/crypto/arm/aes-cipher-core.S
-77
arch/arm/crypto/aes-cipher-glue.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Scalar AES core transform 4 - * 5 - * Copyright (C) 2017 Linaro Ltd. 6 - * Author: Ard Biesheuvel <ard.biesheuvel@linaro.org> 7 - */ 8 - 9 - #include <crypto/aes.h> 10 - #include <crypto/algapi.h> 11 - #include <linux/module.h> 12 - #include "aes-cipher.h" 13 - 14 - EXPORT_SYMBOL_GPL(__aes_arm_encrypt); 15 - EXPORT_SYMBOL_GPL(__aes_arm_decrypt); 16 - 17 - static int aes_arm_setkey(struct crypto_tfm *tfm, const u8 *in_key, 18 - unsigned int key_len) 19 - { 20 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 21 - 22 - return aes_expandkey(ctx, in_key, key_len); 23 - } 24 - 25 - static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 26 - { 27 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 28 - int rounds = 6 + ctx->key_length / 4; 29 - 30 - __aes_arm_encrypt(ctx->key_enc, rounds, in, out); 31 - } 32 - 33 - static void aes_arm_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 34 - { 35 - struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 36 - int rounds = 6 + ctx->key_length / 4; 37 - 38 - __aes_arm_decrypt(ctx->key_dec, rounds, in, out); 39 - } 40 - 41 - static struct crypto_alg aes_alg = { 42 - .cra_name = "aes", 43 - .cra_driver_name = "aes-arm", 44 - .cra_priority = 200, 45 - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 46 - .cra_blocksize = AES_BLOCK_SIZE, 47 - .cra_ctxsize = sizeof(struct crypto_aes_ctx), 48 - .cra_module = THIS_MODULE, 49 - 50 - .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, 51 - .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, 52 - .cra_cipher.cia_setkey = aes_arm_setkey, 53 - .cra_cipher.cia_encrypt = aes_arm_encrypt, 54 - .cra_cipher.cia_decrypt = aes_arm_decrypt, 55 - 56 - #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 57 - .cra_alignmask = 3, 58 - #endif 59 - }; 60 - 61 - static int __init aes_init(void) 62 - { 63 - return crypto_register_alg(&aes_alg); 64 - } 65 - 66 - static void __exit aes_fini(void) 67 - { 68 - crypto_unregister_alg(&aes_alg); 69 - } 70 - 71 - module_init(aes_init); 72 - module_exit(aes_fini); 73 - 74 - MODULE_DESCRIPTION("Scalar AES cipher for ARM"); 75 - MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 76 - MODULE_LICENSE("GPL v2"); 77 - MODULE_ALIAS_CRYPTO("aes");
-13
arch/arm/crypto/aes-cipher.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - #ifndef ARM_CRYPTO_AES_CIPHER_H 3 - #define ARM_CRYPTO_AES_CIPHER_H 4 - 5 - #include <linux/linkage.h> 6 - #include <linux/types.h> 7 - 8 - asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, 9 - const u8 *in, u8 *out); 10 - asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds, 11 - const u8 *in, u8 *out); 12 - 13 - #endif /* ARM_CRYPTO_AES_CIPHER_H */
+1
lib/crypto/Kconfig
··· 14 14 config CRYPTO_LIB_AES_ARCH 15 15 bool 16 16 depends on CRYPTO_LIB_AES && !UML && !KMSAN 17 + default y if ARM 17 18 18 19 config CRYPTO_LIB_AESCFB 19 20 tristate
+3
lib/crypto/Makefile
··· 21 21 libaes-y := aes.o 22 22 ifeq ($(CONFIG_CRYPTO_LIB_AES_ARCH),y) 23 23 CFLAGS_aes.o += -I$(src)/$(SRCARCH) 24 + 25 + libaes-$(CONFIG_ARM) += arm/aes-cipher-core.o 26 + 24 27 endif # CONFIG_CRYPTO_LIB_AES_ARCH 25 28 26 29 ################################################################################
+56
lib/crypto/arm/aes.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * AES block cipher, optimized for ARM 4 + * 5 + * Copyright (C) 2017 Linaro Ltd. 6 + * Copyright 2026 Google LLC 7 + */ 8 + 9 + asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, 10 + const u8 in[AES_BLOCK_SIZE], 11 + u8 out[AES_BLOCK_SIZE]); 12 + asmlinkage void __aes_arm_decrypt(const u32 inv_rk[], int rounds, 13 + const u8 in[AES_BLOCK_SIZE], 14 + u8 out[AES_BLOCK_SIZE]); 15 + 16 + static void aes_preparekey_arch(union aes_enckey_arch *k, 17 + union aes_invkey_arch *inv_k, 18 + const u8 *in_key, int key_len, int nrounds) 19 + { 20 + aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL, 21 + in_key, key_len); 22 + } 23 + 24 + static void aes_encrypt_arch(const struct aes_enckey *key, 25 + u8 out[AES_BLOCK_SIZE], 26 + const u8 in[AES_BLOCK_SIZE]) 27 + { 28 + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 29 + !IS_ALIGNED((uintptr_t)out | (uintptr_t)in, 4)) { 30 + u8 bounce_buf[AES_BLOCK_SIZE] __aligned(4); 31 + 32 + memcpy(bounce_buf, in, AES_BLOCK_SIZE); 33 + __aes_arm_encrypt(key->k.rndkeys, key->nrounds, bounce_buf, 34 + bounce_buf); 35 + memcpy(out, bounce_buf, AES_BLOCK_SIZE); 36 + return; 37 + } 38 + __aes_arm_encrypt(key->k.rndkeys, key->nrounds, in, out); 39 + } 40 + 41 + static void aes_decrypt_arch(const struct aes_key *key, 42 + u8 out[AES_BLOCK_SIZE], 43 + const u8 in[AES_BLOCK_SIZE]) 44 + { 45 + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 46 + !IS_ALIGNED((uintptr_t)out | (uintptr_t)in, 4)) { 47 + u8 bounce_buf[AES_BLOCK_SIZE] __aligned(4); 48 + 49 + memcpy(bounce_buf, in, AES_BLOCK_SIZE); 50 + __aes_arm_decrypt(key->inv_k.inv_rndkeys, key->nrounds, 51 + bounce_buf, bounce_buf); 52 + memcpy(out, bounce_buf, AES_BLOCK_SIZE); 53 + return; 54 + } 55 + __aes_arm_decrypt(key->inv_k.inv_rndkeys, key->nrounds, in, out); 56 + }