Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: arm/aes-neonbs - resolve fallback cipher at runtime

Currently, the bit sliced NEON AES code for ARM has a link time
dependency on the scalar ARM asm implementation, which it uses as a
fallback to perform CBC encryption and the encryption of the initial
XTS tweak.

The bit sliced NEON code is both fast and time invariant, which makes
it a reasonable default on hardware that supports it. However, the
ARM asm code it pulls in is not time invariant, and due to the way it
is linked in, cannot be overridden by the new generic time invariant
driver. In fact, it will not be used at all, given that the ARM asm
code registers itself as a cipher with a priority that exceeds the
priority of the fixed time cipher.

So remove the link time dependency, and allocate the fallback cipher
via the crypto API. Note that this requires this driver's module_init
call to be replaced with late_initcall, so that the (possibly generic)
fallback cipher is guaranteed to be available when the builtin test
is performed at registration time.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
b56f5cbc 3ea996dd

+46 -16
+1 -1
arch/arm/crypto/Kconfig
··· 73 73 depends on KERNEL_MODE_NEON 74 74 select CRYPTO_BLKCIPHER 75 75 select CRYPTO_SIMD 76 - select CRYPTO_AES_ARM 76 + select CRYPTO_AES 77 77 help 78 78 Use a faster and more secure NEON based implementation of AES in CBC, 79 79 CTR and XTS modes
+45 -15
arch/arm/crypto/aes-neonbs-glue.c
··· 42 42 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 43 43 int rounds, int blocks, u8 iv[]); 44 44 45 - asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds, const u8 in[], 46 - u8 out[]); 47 - 48 45 struct aesbs_ctx { 49 46 int rounds; 50 47 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE); ··· 49 52 50 53 struct aesbs_cbc_ctx { 51 54 struct aesbs_ctx key; 52 - u32 enc[AES_MAX_KEYLENGTH_U32]; 55 + struct crypto_cipher *enc_tfm; 53 56 }; 54 57 55 58 struct aesbs_xts_ctx { 56 59 struct aesbs_ctx key; 57 - u32 twkey[AES_MAX_KEYLENGTH_U32]; 60 + struct crypto_cipher *tweak_tfm; 58 61 }; 59 62 60 63 static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, ··· 129 132 130 133 ctx->key.rounds = 6 + key_len / 4; 131 134 132 - memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc)); 133 - 134 135 kernel_neon_begin(); 135 136 aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); 136 137 kernel_neon_end(); 137 138 138 - return 0; 139 + return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len); 139 140 } 140 141 141 142 static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) 142 143 { 143 144 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 144 145 145 - __aes_arm_encrypt(ctx->enc, ctx->key.rounds, src, dst); 146 + crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src); 146 147 } 147 148 148 149 static int cbc_encrypt(struct skcipher_request *req) ··· 174 179 kernel_neon_end(); 175 180 176 181 return err; 182 + } 183 + 184 + static int cbc_init(struct crypto_tfm *tfm) 185 + { 186 + struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 187 + 188 + ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); 189 + if (IS_ERR(ctx->enc_tfm)) 190 + return PTR_ERR(ctx->enc_tfm); 191 + return 0; 192 + } 193 + 194 + static void cbc_exit(struct crypto_tfm *tfm) 195 + { 196 + struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 197 + 198 + crypto_free_cipher(ctx->enc_tfm); 177 199 } 178 200 179 201 static int ctr_encrypt(struct skcipher_request *req) ··· 240 228 unsigned int key_len) 241 229 { 242 230 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 243 - struct crypto_aes_ctx rk; 244 231 int err; 245 232 246 233 err = xts_verify_key(tfm, in_key, key_len); ··· 247 236 return err; 248 237 249 238 key_len /= 2; 250 - err = crypto_aes_expand_key(&rk, in_key + key_len, key_len); 239 + err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); 251 240 if (err) 252 241 return err; 253 242 254 - memcpy(ctx->twkey, rk.key_enc, sizeof(ctx->twkey)); 255 - 256 243 return aesbs_setkey(tfm, in_key, key_len); 244 + } 245 + 246 + static int xts_init(struct crypto_tfm *tfm) 247 + { 248 + struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 249 + 250 + ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); 251 + if (IS_ERR(ctx->tweak_tfm)) 252 + return PTR_ERR(ctx->tweak_tfm); 253 + return 0; 254 + } 255 + 256 + static void xts_exit(struct crypto_tfm *tfm) 257 + { 258 + struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 259 + 260 + crypto_free_cipher(ctx->tweak_tfm); 257 261 } 258 262 259 263 static int __xts_crypt(struct skcipher_request *req, ··· 282 256 283 257 err = skcipher_walk_virt(&walk, req, true); 284 258 285 - __aes_arm_encrypt(ctx->twkey, ctx->key.rounds, walk.iv, walk.iv); 259 + crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); 286 260 287 261 kernel_neon_begin(); 288 262 while (walk.nbytes >= AES_BLOCK_SIZE) { ··· 335 309 .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), 336 310 .base.cra_module = THIS_MODULE, 337 311 .base.cra_flags = CRYPTO_ALG_INTERNAL, 312 + .base.cra_init = cbc_init, 313 + .base.cra_exit = cbc_exit, 338 314 339 315 .min_keysize = AES_MIN_KEY_SIZE, 340 316 .max_keysize = AES_MAX_KEY_SIZE, ··· 370 342 .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), 371 343 .base.cra_module = THIS_MODULE, 372 344 .base.cra_flags = CRYPTO_ALG_INTERNAL, 345 + .base.cra_init = xts_init, 346 + .base.cra_exit = xts_exit, 373 347 374 348 .min_keysize = 2 * AES_MIN_KEY_SIZE, 375 349 .max_keysize = 2 * AES_MAX_KEY_SIZE, ··· 432 402 return err; 433 403 } 434 404 435 - module_init(aes_init); 405 + late_initcall(aes_init); 436 406 module_exit(aes_exit);