Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: virtio - switch to skcipher API

Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.

So switch this driver to the skcipher API, allowing us to finally drop the
ablkcipher code in the near future.

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Gonglei <arei.gonglei@huawei.com>
Cc: virtualization@lists.linux-foundation.org
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ard Biesheuvel and committed by
Herbert Xu
eee1d6fc 19c5da7d

+92 -97
+91 -96
drivers/crypto/virtio/virtio_crypto_algs.c
··· 8 8 9 9 #include <linux/scatterlist.h> 10 10 #include <crypto/algapi.h> 11 + #include <crypto/internal/skcipher.h> 11 12 #include <linux/err.h> 12 13 #include <crypto/scatterwalk.h> 13 14 #include <linux/atomic.h> ··· 17 16 #include "virtio_crypto_common.h" 18 17 19 18 20 - struct virtio_crypto_ablkcipher_ctx { 19 + struct virtio_crypto_skcipher_ctx { 21 20 struct crypto_engine_ctx enginectx; 22 21 struct virtio_crypto *vcrypto; 23 - struct crypto_tfm *tfm; 22 + struct crypto_skcipher *tfm; 24 23 25 24 struct virtio_crypto_sym_session_info enc_sess_info; 26 25 struct virtio_crypto_sym_session_info dec_sess_info; ··· 31 30 32 31 /* Cipher or aead */ 33 32 uint32_t type; 34 - struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx; 35 - struct ablkcipher_request *ablkcipher_req; 33 + struct virtio_crypto_skcipher_ctx *skcipher_ctx; 34 + struct skcipher_request *skcipher_req; 36 35 uint8_t *iv; 37 36 /* Encryption? */ 38 37 bool encrypt; ··· 42 41 uint32_t algonum; 43 42 uint32_t service; 44 43 unsigned int active_devs; 45 - struct crypto_alg algo; 44 + struct skcipher_alg algo; 46 45 }; 47 46 48 47 /* ··· 50 49 * and crypto algorithms registion. 51 50 */ 52 51 static DEFINE_MUTEX(algs_lock); 53 - static void virtio_crypto_ablkcipher_finalize_req( 52 + static void virtio_crypto_skcipher_finalize_req( 54 53 struct virtio_crypto_sym_request *vc_sym_req, 55 - struct ablkcipher_request *req, 54 + struct skcipher_request *req, 56 55 int err); 57 56 58 57 static void virtio_crypto_dataq_sym_callback ··· 60 59 { 61 60 struct virtio_crypto_sym_request *vc_sym_req = 62 61 container_of(vc_req, struct virtio_crypto_sym_request, base); 63 - struct ablkcipher_request *ablk_req; 62 + struct skcipher_request *ablk_req; 64 63 int error; 65 64 66 65 /* Finish the encrypt or decrypt process */ ··· 80 79 error = -EIO; 81 80 break; 82 81 } 83 - ablk_req = vc_sym_req->ablkcipher_req; 84 - virtio_crypto_ablkcipher_finalize_req(vc_sym_req, 82 + ablk_req = vc_sym_req->skcipher_req; 83 + virtio_crypto_skcipher_finalize_req(vc_sym_req, 85 84 ablk_req, error); 86 85 } 87 86 } ··· 111 110 return 0; 112 111 } 113 112 114 - static int virtio_crypto_alg_ablkcipher_init_session( 115 - struct virtio_crypto_ablkcipher_ctx *ctx, 113 + static int virtio_crypto_alg_skcipher_init_session( 114 + struct virtio_crypto_skcipher_ctx *ctx, 116 115 uint32_t alg, const uint8_t *key, 117 116 unsigned int keylen, 118 117 int encrypt) ··· 201 200 return 0; 202 201 } 203 202 204 - static int virtio_crypto_alg_ablkcipher_close_session( 205 - struct virtio_crypto_ablkcipher_ctx *ctx, 203 + static int virtio_crypto_alg_skcipher_close_session( 204 + struct virtio_crypto_skcipher_ctx *ctx, 206 205 int encrypt) 207 206 { 208 207 struct scatterlist outhdr, status_sg, *sgs[2]; ··· 262 261 return 0; 263 262 } 264 263 265 - static int virtio_crypto_alg_ablkcipher_init_sessions( 266 - struct virtio_crypto_ablkcipher_ctx *ctx, 264 + static int virtio_crypto_alg_skcipher_init_sessions( 265 + struct virtio_crypto_skcipher_ctx *ctx, 267 266 const uint8_t *key, unsigned int keylen) 268 267 { 269 268 uint32_t alg; ··· 279 278 goto bad_key; 280 279 281 280 /* Create encryption session */ 282 - ret = virtio_crypto_alg_ablkcipher_init_session(ctx, 281 + ret = virtio_crypto_alg_skcipher_init_session(ctx, 283 282 alg, key, keylen, 1); 284 283 if (ret) 285 284 return ret; 286 285 /* Create decryption session */ 287 - ret = virtio_crypto_alg_ablkcipher_init_session(ctx, 286 + ret = virtio_crypto_alg_skcipher_init_session(ctx, 288 287 alg, key, keylen, 0); 289 288 if (ret) { 290 - virtio_crypto_alg_ablkcipher_close_session(ctx, 1); 289 + virtio_crypto_alg_skcipher_close_session(ctx, 1); 291 290 return ret; 292 291 } 293 292 return 0; 294 293 295 294 bad_key: 296 - crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 295 + crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 297 296 return -EINVAL; 298 297 } 299 298 300 299 /* Note: kernel crypto API realization */ 301 - static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 300 + static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm, 302 301 const uint8_t *key, 303 302 unsigned int keylen) 304 303 { 305 - struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); 304 + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 306 305 uint32_t alg; 307 306 int ret; 308 307 ··· 324 323 ctx->vcrypto = vcrypto; 325 324 } else { 326 325 /* Rekeying, we should close the created sessions previously */ 327 - virtio_crypto_alg_ablkcipher_close_session(ctx, 1); 328 - virtio_crypto_alg_ablkcipher_close_session(ctx, 0); 326 + virtio_crypto_alg_skcipher_close_session(ctx, 1); 327 + virtio_crypto_alg_skcipher_close_session(ctx, 0); 329 328 } 330 329 331 - ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen); 330 + ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen); 332 331 if (ret) { 333 332 virtcrypto_dev_put(ctx->vcrypto); 334 333 ctx->vcrypto = NULL; ··· 340 339 } 341 340 342 341 static int 343 - __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, 344 - struct ablkcipher_request *req, 342 + __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, 343 + struct skcipher_request *req, 345 344 struct data_queue *data_vq) 346 345 { 347 - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 348 - struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx; 346 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 347 + struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx; 349 348 struct virtio_crypto_request *vc_req = &vc_sym_req->base; 350 - unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); 349 + unsigned int ivsize = crypto_skcipher_ivsize(tfm); 351 350 struct virtio_crypto *vcrypto = ctx->vcrypto; 352 351 struct virtio_crypto_op_data_req *req_data; 353 352 int src_nents, dst_nents; ··· 360 359 int sg_total; 361 360 uint8_t *iv; 362 361 363 - src_nents = sg_nents_for_len(req->src, req->nbytes); 362 + src_nents = sg_nents_for_len(req->src, req->cryptlen); 364 363 dst_nents = sg_nents(req->dst); 365 364 366 365 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", ··· 397 396 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); 398 397 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize); 399 398 req_data->u.sym_req.u.cipher.para.src_data_len = 400 - cpu_to_le32(req->nbytes); 399 + cpu_to_le32(req->cryptlen); 401 400 402 401 dst_len = virtio_crypto_alg_sg_nents_length(req->dst); 403 402 if (unlikely(dst_len > U32_MAX)) { ··· 407 406 } 408 407 409 408 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", 410 - req->nbytes, dst_len); 409 + req->cryptlen, dst_len); 411 410 412 - if (unlikely(req->nbytes + dst_len + ivsize + 411 + if (unlikely(req->cryptlen + dst_len + ivsize + 413 412 sizeof(vc_req->status) > vcrypto->max_size)) { 414 413 pr_err("virtio_crypto: The length is too big\n"); 415 414 err = -EINVAL; ··· 435 434 err = -ENOMEM; 436 435 goto free; 437 436 } 438 - memcpy(iv, req->info, ivsize); 437 + memcpy(iv, req->iv, ivsize); 439 438 if (!vc_sym_req->encrypt) 440 - scatterwalk_map_and_copy(req->info, req->src, 441 - req->nbytes - AES_BLOCK_SIZE, 439 + scatterwalk_map_and_copy(req->iv, req->src, 440 + req->cryptlen - AES_BLOCK_SIZE, 442 441 AES_BLOCK_SIZE, 0); 443 442 444 443 sg_init_one(&iv_sg, iv, ivsize); ··· 477 476 return err; 478 477 } 479 478 480 - static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 479 + static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req) 481 480 { 482 - struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); 483 - struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); 481 + struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req); 482 + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm); 484 483 struct virtio_crypto_sym_request *vc_sym_req = 485 - ablkcipher_request_ctx(req); 484 + skcipher_request_ctx(req); 486 485 struct virtio_crypto_request *vc_req = &vc_sym_req->base; 487 486 struct virtio_crypto *vcrypto = ctx->vcrypto; 488 487 /* Use the first data virtqueue as default */ 489 488 struct data_queue *data_vq = &vcrypto->data_vq[0]; 490 489 491 - if (!req->nbytes) 490 + if (!req->cryptlen) 492 491 return 0; 493 - if (req->nbytes % AES_BLOCK_SIZE) 492 + if (req->cryptlen % AES_BLOCK_SIZE) 494 493 return -EINVAL; 495 494 496 495 vc_req->dataq = data_vq; 497 496 vc_req->alg_cb = virtio_crypto_dataq_sym_callback; 498 - vc_sym_req->ablkcipher_ctx = ctx; 499 - vc_sym_req->ablkcipher_req = req; 497 + vc_sym_req->skcipher_ctx = ctx; 498 + vc_sym_req->skcipher_req = req; 500 499 vc_sym_req->encrypt = true; 501 500 502 - return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req); 501 + return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req); 503 502 } 504 503 505 - static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 504 + static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req) 506 505 { 507 - struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req); 508 - struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm); 506 + struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req); 507 + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm); 509 508 struct virtio_crypto_sym_request *vc_sym_req = 510 - ablkcipher_request_ctx(req); 509 + skcipher_request_ctx(req); 511 510 struct virtio_crypto_request *vc_req = &vc_sym_req->base; 512 511 struct virtio_crypto *vcrypto = ctx->vcrypto; 513 512 /* Use the first data virtqueue as default */ 514 513 struct data_queue *data_vq = &vcrypto->data_vq[0]; 515 514 516 - if (!req->nbytes) 515 + if (!req->cryptlen) 517 516 return 0; 518 - if (req->nbytes % AES_BLOCK_SIZE) 517 + if (req->cryptlen % AES_BLOCK_SIZE) 519 518 return -EINVAL; 520 519 521 520 vc_req->dataq = data_vq; 522 521 vc_req->alg_cb = virtio_crypto_dataq_sym_callback; 523 - vc_sym_req->ablkcipher_ctx = ctx; 524 - vc_sym_req->ablkcipher_req = req; 522 + vc_sym_req->skcipher_ctx = ctx; 523 + vc_sym_req->skcipher_req = req; 525 524 vc_sym_req->encrypt = false; 526 525 527 - return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req); 526 + return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req); 528 527 } 529 528 530 - static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm) 529 + static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm) 531 530 { 532 - struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 531 + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 533 532 534 - tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request); 533 + crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request)); 535 534 ctx->tfm = tfm; 536 535 537 - ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req; 536 + ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req; 538 537 ctx->enginectx.op.prepare_request = NULL; 539 538 ctx->enginectx.op.unprepare_request = NULL; 540 539 return 0; 541 540 } 542 541 543 - static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm) 542 + static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm) 544 543 { 545 - struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); 544 + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 546 545 547 546 if (!ctx->vcrypto) 548 547 return; 549 548 550 - virtio_crypto_alg_ablkcipher_close_session(ctx, 1); 551 - virtio_crypto_alg_ablkcipher_close_session(ctx, 0); 549 + virtio_crypto_alg_skcipher_close_session(ctx, 1); 550 + virtio_crypto_alg_skcipher_close_session(ctx, 0); 552 551 virtcrypto_dev_put(ctx->vcrypto); 553 552 ctx->vcrypto = NULL; 554 553 } 555 554 556 - int virtio_crypto_ablkcipher_crypt_req( 555 + int virtio_crypto_skcipher_crypt_req( 557 556 struct crypto_engine *engine, void *vreq) 558 557 { 559 - struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base); 558 + struct skcipher_request *req = container_of(vreq, struct skcipher_request, base); 560 559 struct virtio_crypto_sym_request *vc_sym_req = 561 - ablkcipher_request_ctx(req); 560 + skcipher_request_ctx(req); 562 561 struct virtio_crypto_request *vc_req = &vc_sym_req->base; 563 562 struct data_queue *data_vq = vc_req->dataq; 564 563 int ret; 565 564 566 - ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq); 565 + ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq); 567 566 if (ret < 0) 568 567 return ret; 569 568 ··· 572 571 return 0; 573 572 } 574 573 575 - static void virtio_crypto_ablkcipher_finalize_req( 574 + static void virtio_crypto_skcipher_finalize_req( 576 575 struct virtio_crypto_sym_request *vc_sym_req, 577 - struct ablkcipher_request *req, 576 + struct skcipher_request *req, 578 577 int err) 579 578 { 580 579 if (vc_sym_req->encrypt) 581 - scatterwalk_map_and_copy(req->info, req->dst, 582 - req->nbytes - AES_BLOCK_SIZE, 580 + scatterwalk_map_and_copy(req->iv, req->dst, 581 + req->cryptlen - AES_BLOCK_SIZE, 583 582 AES_BLOCK_SIZE, 0); 584 - crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, 583 + crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine, 585 584 req, err); 586 585 kzfree(vc_sym_req->iv); 587 586 virtcrypto_clear_request(&vc_sym_req->base); ··· 591 590 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC, 592 591 .service = VIRTIO_CRYPTO_SERVICE_CIPHER, 593 592 .algo = { 594 - .cra_name = "cbc(aes)", 595 - .cra_driver_name = "virtio_crypto_aes_cbc", 596 - .cra_priority = 150, 597 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 598 - .cra_blocksize = AES_BLOCK_SIZE, 599 - .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx), 600 - .cra_alignmask = 0, 601 - .cra_module = THIS_MODULE, 602 - .cra_type = &crypto_ablkcipher_type, 603 - .cra_init = virtio_crypto_ablkcipher_init, 604 - .cra_exit = virtio_crypto_ablkcipher_exit, 605 - .cra_u = { 606 - .ablkcipher = { 607 - .setkey = virtio_crypto_ablkcipher_setkey, 608 - .decrypt = virtio_crypto_ablkcipher_decrypt, 609 - .encrypt = virtio_crypto_ablkcipher_encrypt, 610 - .min_keysize = AES_MIN_KEY_SIZE, 611 - .max_keysize = AES_MAX_KEY_SIZE, 612 - .ivsize = AES_BLOCK_SIZE, 613 - }, 614 - }, 593 + .base.cra_name = "cbc(aes)", 594 + .base.cra_driver_name = "virtio_crypto_aes_cbc", 595 + .base.cra_priority = 150, 596 + .base.cra_flags = CRYPTO_ALG_ASYNC, 597 + .base.cra_blocksize = AES_BLOCK_SIZE, 598 + .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx), 599 + .base.cra_module = THIS_MODULE, 600 + .init = virtio_crypto_skcipher_init, 601 + .exit = virtio_crypto_skcipher_exit, 602 + .setkey = virtio_crypto_skcipher_setkey, 603 + .decrypt = virtio_crypto_skcipher_decrypt, 604 + .encrypt = virtio_crypto_skcipher_encrypt, 605 + .min_keysize = AES_MIN_KEY_SIZE, 606 + .max_keysize = AES_MAX_KEY_SIZE, 607 + .ivsize = AES_BLOCK_SIZE, 615 608 }, 616 609 } }; 617 610 ··· 625 630 continue; 626 631 627 632 if (virtio_crypto_algs[i].active_devs == 0) { 628 - ret = crypto_register_alg(&virtio_crypto_algs[i].algo); 633 + ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo); 629 634 if (ret) 630 635 goto unlock; 631 636 } 632 637 633 638 virtio_crypto_algs[i].active_devs++; 634 639 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n", 635 - virtio_crypto_algs[i].algo.cra_name); 640 + virtio_crypto_algs[i].algo.base.cra_name); 636 641 } 637 642 638 643 unlock: ··· 656 661 continue; 657 662 658 663 if (virtio_crypto_algs[i].active_devs == 1) 659 - crypto_unregister_alg(&virtio_crypto_algs[i].algo); 664 + crypto_unregister_skcipher(&virtio_crypto_algs[i].algo); 660 665 661 666 virtio_crypto_algs[i].active_devs--; 662 667 }
+1 -1
drivers/crypto/virtio/virtio_crypto_common.h
··· 112 112 uint32_t algo); 113 113 int virtcrypto_dev_start(struct virtio_crypto *vcrypto); 114 114 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); 115 - int virtio_crypto_ablkcipher_crypt_req( 115 + int virtio_crypto_skcipher_crypt_req( 116 116 struct crypto_engine *engine, void *vreq); 117 117 118 118 void