Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: mxs-dcp - permit asynchronous skcipher as fallback

Even though the mxs-dcp driver implements asynchronous versions of
ecb(aes) and cbc(aes), the fallbacks it allocates are required to be
synchronous. Given that SIMD based software implementations are usually
asynchronous as well, even though they rarely complete asynchronously
(this typically only happens in cases where the request was made from
softirq context, while SIMD was already in use in the task context that
it interrupted), these implementations are disregarded, and either the
generic C version or another table based version implemented in assembler
is selected instead.

Since falling back to synchronous AES is not only a performance issue, but
potentially a security issue as well (due to the fact that table based AES
is not time invariant), let's fix this, by allocating an ordinary skcipher
as the fallback, and invoke it with the completion routine that was given
to the outer request.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
c9598d4e d8c6d188

+17 -16
+17 -16
drivers/crypto/mxs-dcp.c
··· 97 97 unsigned int hot:1; 98 98 99 99 /* Crypto-specific context */ 100 - struct crypto_sync_skcipher *fallback; 100 + struct crypto_skcipher *fallback; 101 101 unsigned int key_len; 102 102 uint8_t key[AES_KEYSIZE_128]; 103 103 }; ··· 105 105 struct dcp_aes_req_ctx { 106 106 unsigned int enc:1; 107 107 unsigned int ecb:1; 108 + struct skcipher_request fallback_req; // keep at the end 108 109 }; 109 110 110 111 struct dcp_sha_req_ctx { ··· 427 426 static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc) 428 427 { 429 428 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 429 + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req); 430 430 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm); 431 - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 432 431 int ret; 433 432 434 - skcipher_request_set_sync_tfm(subreq, ctx->fallback); 435 - skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 436 - skcipher_request_set_crypt(subreq, req->src, req->dst, 433 + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); 434 + skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, 435 + req->base.complete, req->base.data); 436 + skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, 437 437 req->cryptlen, req->iv); 438 438 439 439 if (enc) 440 - ret = crypto_skcipher_encrypt(subreq); 440 + ret = crypto_skcipher_encrypt(&rctx->fallback_req); 441 441 else 442 - ret = crypto_skcipher_decrypt(subreq); 443 - 444 - skcipher_request_zero(subreq); 442 + ret = crypto_skcipher_decrypt(&rctx->fallback_req); 445 443 446 444 return ret; 447 445 } ··· 510 510 * but is supported by in-kernel software implementation, we use 511 511 * software fallback. 512 512 */ 513 - crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 514 - crypto_sync_skcipher_set_flags(actx->fallback, 513 + crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 514 + crypto_skcipher_set_flags(actx->fallback, 515 515 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 516 - return crypto_sync_skcipher_setkey(actx->fallback, key, len); 516 + return crypto_skcipher_setkey(actx->fallback, key, len); 517 517 } 518 518 519 519 static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm) 520 520 { 521 521 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm)); 522 522 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 523 - struct crypto_sync_skcipher *blk; 523 + struct crypto_skcipher *blk; 524 524 525 - blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 525 + blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); 526 526 if (IS_ERR(blk)) 527 527 return PTR_ERR(blk); 528 528 529 529 actx->fallback = blk; 530 - crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx)); 530 + crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) + 531 + crypto_skcipher_reqsize(blk)); 531 532 return 0; 532 533 } 533 534 ··· 536 535 { 537 536 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm); 538 537 539 - crypto_free_sync_skcipher(actx->fallback); 538 + crypto_free_skcipher(actx->fallback); 540 539 } 541 540 542 541 /*