Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: chelsio - permit asynchronous skcipher as fallback

Even though the chelsio driver implements asynchronous versions of
cbc(aes) and xts(aes), the fallbacks it allocates are required to be
synchronous. Given that SIMD based software implementations are usually
asynchronous as well, even though they rarely complete asynchronously
(this typically only happens in cases where the request was made from
softirq context, while SIMD was already in use in the task context that
it interrupted), these implementations are disregarded, and either the
generic C version or another table based version implemented in assembler
is selected instead.

Since falling back to synchronous AES is not only a performance issue, but
potentially a security issue as well (due to the fact that table based AES
is not time invariant), let's fix this, by allocating an ordinary skcipher
as the fallback, and invoke it with the completion routine that was given
to the outer request.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
d8c6d188 413b61ce

+25 -35
+23 -34
drivers/crypto/chelsio/chcr_algo.c
··· 690 690 return min(srclen, dstlen); 691 691 } 692 692 693 - static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, 694 - u32 flags, 695 - struct scatterlist *src, 696 - struct scatterlist *dst, 697 - unsigned int nbytes, 693 + static int chcr_cipher_fallback(struct crypto_skcipher *cipher, 694 + struct skcipher_request *req, 698 695 u8 *iv, 699 696 unsigned short op_type) 700 697 { 698 + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); 701 699 int err; 702 700 703 - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); 701 + skcipher_request_set_tfm(&reqctx->fallback_req, cipher); 702 + skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, 703 + req->base.complete, req->base.data); 704 + skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, 705 + req->cryptlen, iv); 704 706 705 - skcipher_request_set_sync_tfm(subreq, cipher); 706 - skcipher_request_set_callback(subreq, flags, NULL, NULL); 707 - skcipher_request_set_crypt(subreq, src, dst, 708 - nbytes, iv); 709 - 710 - err = op_type ? crypto_skcipher_decrypt(subreq) : 711 - crypto_skcipher_encrypt(subreq); 712 - skcipher_request_zero(subreq); 707 + err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : 708 + crypto_skcipher_encrypt(&reqctx->fallback_req); 713 709 714 710 return err; 715 711 ··· 920 924 { 921 925 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); 922 926 923 - crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, 927 + crypto_skcipher_clear_flags(ablkctx->sw_cipher, 924 928 CRYPTO_TFM_REQ_MASK); 925 - crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, 929 + crypto_skcipher_set_flags(ablkctx->sw_cipher, 926 930 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); 927 - return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); 931 + return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); 928 932 } 929 933 930 934 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, ··· 1202 1206 req); 1203 1207 memcpy(req->iv, reqctx->init_iv, IV); 1204 1208 atomic_inc(&adap->chcr_stats.fallback); 1205 - err = chcr_cipher_fallback(ablkctx->sw_cipher, 1206 - req->base.flags, 1207 - req->src, 1208 - req->dst, 1209 - req->cryptlen, 1210 - req->iv, 1211 - reqctx->op); 1209 + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, 1210 + reqctx->op); 1212 1211 goto complete; 1213 1212 } 1214 1213 ··· 1332 1341 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, 1333 1342 req); 1334 1343 fallback: atomic_inc(&adap->chcr_stats.fallback); 1335 - err = chcr_cipher_fallback(ablkctx->sw_cipher, 1336 - req->base.flags, 1337 - req->src, 1338 - req->dst, 1339 - req->cryptlen, 1344 + err = chcr_cipher_fallback(ablkctx->sw_cipher, req, 1340 1345 subtype == 1341 1346 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? 1342 1347 reqctx->iv : req->iv, ··· 1473 1486 struct chcr_context *ctx = crypto_skcipher_ctx(tfm); 1474 1487 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1475 1488 1476 - ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, 1489 + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, 1477 1490 CRYPTO_ALG_NEED_FALLBACK); 1478 1491 if (IS_ERR(ablkctx->sw_cipher)) { 1479 1492 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); 1480 1493 return PTR_ERR(ablkctx->sw_cipher); 1481 1494 } 1482 1495 init_completion(&ctx->cbc_aes_aio_done); 1483 - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); 1496 + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + 1497 + crypto_skcipher_reqsize(ablkctx->sw_cipher)); 1484 1498 1485 1499 return chcr_device_init(ctx); 1486 1500 } ··· 1495 1507 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) 1496 1508 * cannot be used as fallback in chcr_handle_cipher_response 1497 1509 */ 1498 - ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, 1510 + ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, 1499 1511 CRYPTO_ALG_NEED_FALLBACK); 1500 1512 if (IS_ERR(ablkctx->sw_cipher)) { 1501 1513 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); 1502 1514 return PTR_ERR(ablkctx->sw_cipher); 1503 1515 } 1504 - crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); 1516 + crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + 1517 + crypto_skcipher_reqsize(ablkctx->sw_cipher)); 1505 1518 return chcr_device_init(ctx); 1506 1519 } 1507 1520 ··· 1512 1523 struct chcr_context *ctx = crypto_skcipher_ctx(tfm); 1513 1524 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1514 1525 1515 - crypto_free_sync_skcipher(ablkctx->sw_cipher); 1526 + crypto_free_skcipher(ablkctx->sw_cipher); 1516 1527 } 1517 1528 1518 1529 static int get_alg_config(struct algo_param *params,
+2 -1
drivers/crypto/chelsio/chcr_crypto.h
··· 171 171 } 172 172 173 173 struct ablk_ctx { 174 - struct crypto_sync_skcipher *sw_cipher; 174 + struct crypto_skcipher *sw_cipher; 175 175 __be32 key_ctx_hdr; 176 176 unsigned int enckey_len; 177 177 unsigned char ciph_mode; ··· 305 305 u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN]; 306 306 u16 txqidx; 307 307 u16 rxqidx; 308 + struct skcipher_request fallback_req; // keep at the end 308 309 }; 309 310 310 311 struct chcr_alg_template {