Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: mxs-dcp - Use skcipher for fallback

This patch replaces use of the obsolete ablkcipher with skcipher.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

+21 -26
+21 -26
drivers/crypto/mxs-dcp.c
··· 11 11 * http://www.gnu.org/copyleft/gpl.html 12 12 */ 13 13 14 - #include <linux/crypto.h> 15 14 #include <linux/dma-mapping.h> 16 15 #include <linux/interrupt.h> 17 16 #include <linux/io.h> ··· 24 25 #include <crypto/aes.h> 25 26 #include <crypto/sha.h> 26 27 #include <crypto/internal/hash.h> 28 + #include <crypto/internal/skcipher.h> 27 29 28 30 #define DCP_MAX_CHANS 4 29 31 #define DCP_BUF_SZ PAGE_SIZE ··· 84 84 unsigned int hot:1; 85 85 86 86 /* Crypto-specific context */ 87 - struct crypto_ablkcipher *fallback; 87 + struct crypto_skcipher *fallback; 88 88 unsigned int key_len; 89 89 uint8_t key[AES_KEYSIZE_128]; 90 90 }; ··· 374 374 375 375 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) 376 376 { 377 - struct crypto_tfm *tfm = 378 - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); 379 - struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx( 380 - crypto_ablkcipher_reqtfm(req)); 377 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 378 + struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm); 379 + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 381 380 int ret; 382 381 383 - ablkcipher_request_set_tfm(req, ctx->fallback); 382 + skcipher_request_set_tfm(subreq, ctx->fallback); 383 + skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); 384 + skcipher_request_set_crypt(subreq, req->src, req->dst, 385 + req->nbytes, req->info); 384 386 385 387 if (enc) 386 - ret = crypto_ablkcipher_encrypt(req); 388 + ret = crypto_skcipher_encrypt(subreq); 387 389 else 388 - ret = crypto_ablkcipher_decrypt(req); 390 + ret = crypto_skcipher_decrypt(subreq); 389 391 390 - ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); 392 + skcipher_request_zero(subreq); 391 393 392 394 return ret; 393 395 } ··· 455 453 return 0; 456 454 } 457 455 458 - /* Check if the key size is supported by kernel at all. */ 459 - if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { 460 - tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 461 - return -EINVAL; 462 - } 463 - 464 456 /* 465 457 * If the requested AES key size is not supported by the hardware, 466 458 * but is supported by in-kernel software implementation, we use 467 459 * software fallback. 468 460 */ 469 - actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 470 - actx->fallback->base.crt_flags |= 471 - tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK; 461 + crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK); 462 + crypto_skcipher_set_flags(actx->fallback, 463 + tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); 472 464 473 - ret = crypto_ablkcipher_setkey(actx->fallback, key, len); 465 + ret = crypto_skcipher_setkey(actx->fallback, key, len); 474 466 if (!ret) 475 467 return 0; 476 468 477 469 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK; 478 - tfm->base.crt_flags |= 479 - actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK; 470 + tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) & 471 + CRYPTO_TFM_RES_MASK; 480 472 481 473 return ret; 482 474 } ··· 480 484 const char *name = crypto_tfm_alg_name(tfm); 481 485 const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK; 482 486 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 483 - struct crypto_ablkcipher *blk; 487 + struct crypto_skcipher *blk; 484 488 485 - blk = crypto_alloc_ablkcipher(name, 0, flags); 489 + blk = crypto_alloc_skcipher(name, 0, flags); 486 490 if (IS_ERR(blk)) 487 491 return PTR_ERR(blk); 488 492 ··· 495 499 { 496 500 struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm); 497 501 498 - crypto_free_ablkcipher(actx->fallback); 499 - actx->fallback = NULL; 502 + crypto_free_skcipher(actx->fallback); 500 503 } 501 504 502 505 /*