Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: sha1-mb - async implementation for sha1-mb

Herbert wants the sha1-mb algorithm to have an async implementation:
https://lkml.org/lkml/2016/4/5/286.
Currently, sha1-mb uses an async interface for the outer algorithm
and a sync interface for the inner algorithm. This patch introduces
a async interface for even the inner algorithm.

Signed-off-by: Megha Dey <megha.dey@linux.intel.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Megha Dey and committed by
Herbert Xu
331bf739 820573eb

+165 -169
+101 -81
arch/x86/crypto/sha-mb/sha1_mb.c
··· 80 80 static inline struct mcryptd_hash_request_ctx 81 81 *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) 82 82 { 83 - struct shash_desc *desc; 83 + struct ahash_request *areq; 84 84 85 - desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); 86 - return container_of(desc, struct mcryptd_hash_request_ctx, desc); 85 + areq = container_of((void *) hash_ctx, struct ahash_request, __ctx); 86 + return container_of(areq, struct mcryptd_hash_request_ctx, areq); 87 87 } 88 88 89 89 static inline struct ahash_request ··· 93 93 } 94 94 95 95 static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, 96 - struct shash_desc *desc) 96 + struct ahash_request *areq) 97 97 { 98 98 rctx->flag = HASH_UPDATE; 99 99 } ··· 375 375 } 376 376 } 377 377 378 - static int sha1_mb_init(struct shash_desc *desc) 378 + static int sha1_mb_init(struct ahash_request *areq) 379 379 { 380 - struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); 380 + struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); 381 381 382 382 hash_ctx_init(sctx); 383 383 sctx->job.result_digest[0] = SHA1_H0; ··· 395 395 static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) 396 396 { 397 397 int i; 398 - struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); 398 + struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq); 399 399 __be32 *dst = (__be32 *) rctx->out; 400 400 401 401 for (i = 0; i < 5; ++i) ··· 427 427 428 428 } 429 429 sha_ctx = (struct sha1_hash_ctx *) 430 - shash_desc_ctx(&rctx->desc); 430 + ahash_request_ctx(&rctx->areq); 431 431 kernel_fpu_begin(); 432 432 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, 433 433 rctx->walk.data, nbytes, flag); ··· 519 519 mcryptd_arm_flusher(cstate, delay); 520 520 } 521 521 522 - static int sha1_mb_update(struct shash_desc *desc, const u8 *data, 523 - unsigned int len) 522 + static int sha1_mb_update(struct ahash_request *areq) 524 523 { 525 524 struct mcryptd_hash_request_ctx *rctx = 526 - container_of(desc, struct mcryptd_hash_request_ctx, desc); 525 + container_of(areq, struct mcryptd_hash_request_ctx, areq); 527 526 struct mcryptd_alg_cstate *cstate = 528 527 this_cpu_ptr(sha1_mb_alg_state.alg_cstate); 529 528 ··· 538 539 } 539 540 540 541 /* need to init context */ 541 - req_ctx_init(rctx, desc); 542 + req_ctx_init(rctx, areq); 542 543 543 544 nbytes = crypto_ahash_walk_first(req, &rctx->walk); 544 545 ··· 551 552 rctx->flag |= HASH_DONE; 552 553 553 554 /* submit */ 554 - sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); 555 + sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); 555 556 sha1_mb_add_list(rctx, cstate); 556 557 kernel_fpu_begin(); 557 558 sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, ··· 578 579 return ret; 579 580 } 580 581 581 - static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, 582 - unsigned int len, u8 *out) 582 + static int sha1_mb_finup(struct ahash_request *areq) 583 583 { 584 584 struct mcryptd_hash_request_ctx *rctx = 585 - container_of(desc, struct mcryptd_hash_request_ctx, desc); 585 + container_of(areq, struct mcryptd_hash_request_ctx, areq); 586 586 struct mcryptd_alg_cstate *cstate = 587 587 this_cpu_ptr(sha1_mb_alg_state.alg_cstate); 588 588 ··· 596 598 } 597 599 598 600 /* need to init context */ 599 - req_ctx_init(rctx, desc); 601 + req_ctx_init(rctx, areq); 600 602 601 603 nbytes = crypto_ahash_walk_first(req, &rctx->walk); 602 604 ··· 609 611 rctx->flag |= HASH_DONE; 610 612 flag = HASH_LAST; 611 613 } 612 - rctx->out = out; 613 614 614 615 /* submit */ 615 616 rctx->flag |= HASH_FINAL; 616 - sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); 617 + sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); 617 618 sha1_mb_add_list(rctx, cstate); 618 619 619 620 kernel_fpu_begin(); ··· 638 641 return ret; 639 642 } 640 643 641 - static int sha1_mb_final(struct shash_desc *desc, u8 *out) 644 + static int sha1_mb_final(struct ahash_request *areq) 642 645 { 643 646 struct mcryptd_hash_request_ctx *rctx = 644 - container_of(desc, struct mcryptd_hash_request_ctx, desc); 647 + container_of(areq, struct mcryptd_hash_request_ctx, areq); 645 648 struct mcryptd_alg_cstate *cstate = 646 649 this_cpu_ptr(sha1_mb_alg_state.alg_cstate); 647 650 ··· 656 659 } 657 660 658 661 /* need to init context */ 659 - req_ctx_init(rctx, desc); 662 + req_ctx_init(rctx, areq); 660 663 661 - rctx->out = out; 662 664 rctx->flag |= HASH_DONE | HASH_FINAL; 663 665 664 - sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); 666 + sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq); 665 667 /* flag HASH_FINAL and 0 data size */ 666 668 sha1_mb_add_list(rctx, cstate); 667 669 kernel_fpu_begin(); ··· 687 691 return ret; 688 692 } 689 693 690 - static int sha1_mb_export(struct shash_desc *desc, void *out) 694 + static int sha1_mb_export(struct ahash_request *areq, void *out) 691 695 { 692 - struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); 696 + struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); 693 697 694 698 memcpy(out, sctx, sizeof(*sctx)); 695 699 696 700 return 0; 697 701 } 698 702 699 - static int sha1_mb_import(struct shash_desc *desc, const void *in) 703 + static int sha1_mb_import(struct ahash_request *areq, const void *in) 700 704 { 701 - struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); 705 + struct sha1_hash_ctx *sctx = ahash_request_ctx(areq); 702 706 703 707 memcpy(sctx, in, sizeof(*sctx)); 704 708 705 709 return 0; 706 710 } 707 711 712 + static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) 713 + { 714 + struct mcryptd_ahash *mcryptd_tfm; 715 + struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); 716 + struct mcryptd_hash_ctx *mctx; 708 717 709 - static struct shash_alg sha1_mb_shash_alg = { 710 - .digestsize = SHA1_DIGEST_SIZE, 718 + mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 719 + CRYPTO_ALG_INTERNAL, 720 + CRYPTO_ALG_INTERNAL); 721 + if (IS_ERR(mcryptd_tfm)) 722 + return PTR_ERR(mcryptd_tfm); 723 + mctx = crypto_ahash_ctx(&mcryptd_tfm->base); 724 + mctx->alg_state = &sha1_mb_alg_state; 725 + ctx->mcryptd_tfm = mcryptd_tfm; 726 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 727 + sizeof(struct ahash_request) + 728 + crypto_ahash_reqsize(&mcryptd_tfm->base)); 729 + 730 + return 0; 731 + } 732 + 733 + static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) 734 + { 735 + struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); 736 + 737 + mcryptd_free_ahash(ctx->mcryptd_tfm); 738 + } 739 + 740 + static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm) 741 + { 742 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 743 + sizeof(struct ahash_request) + 744 + sizeof(struct sha1_hash_ctx)); 745 + 746 + return 0; 747 + } 748 + 749 + static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm) 750 + { 751 + struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); 752 + 753 + mcryptd_free_ahash(ctx->mcryptd_tfm); 754 + } 755 + 756 + static struct ahash_alg sha1_mb_areq_alg = { 711 757 .init = sha1_mb_init, 712 758 .update = sha1_mb_update, 713 759 .final = sha1_mb_final, 714 760 .finup = sha1_mb_finup, 715 761 .export = sha1_mb_export, 716 762 .import = sha1_mb_import, 717 - .descsize = sizeof(struct sha1_hash_ctx), 718 - .statesize = sizeof(struct sha1_hash_ctx), 719 - .base = { 720 - .cra_name = "__sha1-mb", 721 - .cra_driver_name = "__intel_sha1-mb", 722 - .cra_priority = 100, 723 - /* 724 - * use ASYNC flag as some buffers in multi-buffer 725 - * algo may not have completed before hashing thread sleep 726 - */ 727 - .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC | 728 - CRYPTO_ALG_INTERNAL, 729 - .cra_blocksize = SHA1_BLOCK_SIZE, 730 - .cra_module = THIS_MODULE, 731 - .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), 763 + .halg = { 764 + .digestsize = SHA1_DIGEST_SIZE, 765 + .statesize = sizeof(struct sha1_hash_ctx), 766 + .base = { 767 + .cra_name = "__sha1-mb", 768 + .cra_driver_name = "__intel_sha1-mb", 769 + .cra_priority = 100, 770 + /* 771 + * use ASYNC flag as some buffers in multi-buffer 772 + * algo may not have completed before hashing thread 773 + * sleep 774 + */ 775 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | 776 + CRYPTO_ALG_ASYNC | 777 + CRYPTO_ALG_INTERNAL, 778 + .cra_blocksize = SHA1_BLOCK_SIZE, 779 + .cra_module = THIS_MODULE, 780 + .cra_list = LIST_HEAD_INIT 781 + (sha1_mb_areq_alg.halg.base.cra_list), 782 + .cra_init = sha1_mb_areq_init_tfm, 783 + .cra_exit = sha1_mb_areq_exit_tfm, 784 + .cra_ctxsize = sizeof(struct sha1_hash_ctx), 785 + } 732 786 } 733 787 }; 734 788 ··· 863 817 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 864 818 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); 865 819 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; 866 - struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm); 820 + struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm); 867 821 struct mcryptd_hash_request_ctx *rctx; 868 - struct shash_desc *desc; 822 + struct ahash_request *areq; 869 823 870 824 memcpy(mcryptd_req, req, sizeof(*req)); 871 825 ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); 872 826 rctx = ahash_request_ctx(mcryptd_req); 873 - desc = &rctx->desc; 874 - desc->tfm = child; 875 - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 827 + areq = &rctx->areq; 828 + 829 + ahash_request_set_tfm(areq, child); 830 + ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP, 831 + rctx->complete, req); 876 832 877 833 return crypto_ahash_import(mcryptd_req, in); 878 - } 879 - 880 - static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) 881 - { 882 - struct mcryptd_ahash *mcryptd_tfm; 883 - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); 884 - struct mcryptd_hash_ctx *mctx; 885 - 886 - mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", 887 - CRYPTO_ALG_INTERNAL, 888 - CRYPTO_ALG_INTERNAL); 889 - if (IS_ERR(mcryptd_tfm)) 890 - return PTR_ERR(mcryptd_tfm); 891 - mctx = crypto_ahash_ctx(&mcryptd_tfm->base); 892 - mctx->alg_state = &sha1_mb_alg_state; 893 - ctx->mcryptd_tfm = mcryptd_tfm; 894 - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 895 - sizeof(struct ahash_request) + 896 - crypto_ahash_reqsize(&mcryptd_tfm->base)); 897 - 898 - return 0; 899 - } 900 - 901 - static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) 902 - { 903 - struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); 904 - 905 - mcryptd_free_ahash(ctx->mcryptd_tfm); 906 834 } 907 835 908 836 static struct ahash_alg sha1_mb_async_alg = { ··· 985 965 } 986 966 sha1_mb_alg_state.flusher = &sha1_mb_flusher; 987 967 988 - err = crypto_register_shash(&sha1_mb_shash_alg); 968 + err = crypto_register_ahash(&sha1_mb_areq_alg); 989 969 if (err) 990 970 goto err2; 991 971 err = crypto_register_ahash(&sha1_mb_async_alg); ··· 995 975 996 976 return 0; 997 977 err1: 998 - crypto_unregister_shash(&sha1_mb_shash_alg); 978 + crypto_unregister_ahash(&sha1_mb_areq_alg); 999 979 err2: 1000 980 for_each_possible_cpu(cpu) { 1001 981 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); ··· 1011 991 struct mcryptd_alg_cstate *cpu_state; 1012 992 1013 993 crypto_unregister_ahash(&sha1_mb_async_alg); 1014 - crypto_unregister_shash(&sha1_mb_shash_alg); 994 + crypto_unregister_ahash(&sha1_mb_areq_alg); 1015 995 for_each_possible_cpu(cpu) { 1016 996 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); 1017 997 kfree(cpu_state->mgr);
+56 -76
crypto/mcryptd.c
··· 41 41 static struct mcryptd_flush_list __percpu *mcryptd_flist; 42 42 43 43 struct hashd_instance_ctx { 44 - struct crypto_shash_spawn spawn; 44 + struct crypto_ahash_spawn spawn; 45 45 struct mcryptd_queue *queue; 46 46 }; 47 47 ··· 272 272 { 273 273 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 274 274 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); 275 - struct crypto_shash_spawn *spawn = &ictx->spawn; 275 + struct crypto_ahash_spawn *spawn = &ictx->spawn; 276 276 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 277 - struct crypto_shash *hash; 277 + struct crypto_ahash *hash; 278 278 279 - hash = crypto_spawn_shash(spawn); 279 + hash = crypto_spawn_ahash(spawn); 280 280 if (IS_ERR(hash)) 281 281 return PTR_ERR(hash); 282 282 283 283 ctx->child = hash; 284 284 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 285 285 sizeof(struct mcryptd_hash_request_ctx) + 286 - crypto_shash_descsize(hash)); 286 + crypto_ahash_reqsize(hash)); 287 287 return 0; 288 288 } 289 289 ··· 291 291 { 292 292 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 293 293 294 - crypto_free_shash(ctx->child); 294 + crypto_free_ahash(ctx->child); 295 295 } 296 296 297 297 static int mcryptd_hash_setkey(struct crypto_ahash *parent, 298 298 const u8 *key, unsigned int keylen) 299 299 { 300 300 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 301 - struct crypto_shash *child = ctx->child; 301 + struct crypto_ahash *child = ctx->child; 302 302 int err; 303 303 304 - crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 305 - crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & 304 + crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 305 + crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) & 306 306 CRYPTO_TFM_REQ_MASK); 307 - err = crypto_shash_setkey(child, key, keylen); 308 - crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & 307 + err = crypto_ahash_setkey(child, key, keylen); 308 + crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) & 309 309 CRYPTO_TFM_RES_MASK); 310 310 return err; 311 311 } ··· 331 331 static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) 332 332 { 333 333 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 334 - struct crypto_shash *child = ctx->child; 334 + struct crypto_ahash *child = ctx->child; 335 335 struct ahash_request *req = ahash_request_cast(req_async); 336 336 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 337 - struct shash_desc *desc = &rctx->desc; 337 + struct ahash_request *desc = &rctx->areq; 338 338 339 339 if (unlikely(err == -EINPROGRESS)) 340 340 goto out; 341 341 342 - desc->tfm = child; 343 - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 342 + ahash_request_set_tfm(desc, child); 343 + ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, 344 + rctx->complete, req_async); 344 345 345 - err = crypto_shash_init(desc); 346 - 347 - req->base.complete = rctx->complete; 346 + rctx->out = req->result; 347 + err = crypto_ahash_init(desc); 348 348 349 349 out: 350 350 local_bh_disable(); ··· 365 365 if (unlikely(err == -EINPROGRESS)) 366 366 goto out; 367 367 368 - err = shash_ahash_mcryptd_update(req, &rctx->desc); 368 + rctx->out = req->result; 369 + err = ahash_mcryptd_update(&rctx->areq); 369 370 if (err) { 370 371 req->base.complete = rctx->complete; 371 372 goto out; ··· 392 391 if (unlikely(err == -EINPROGRESS)) 393 392 goto out; 394 393 395 - err = shash_ahash_mcryptd_final(req, &rctx->desc); 394 + rctx->out = req->result; 395 + err = ahash_mcryptd_final(&rctx->areq); 396 396 if (err) { 397 397 req->base.complete = rctx->complete; 398 398 goto out; ··· 418 416 419 417 if (unlikely(err == -EINPROGRESS)) 420 418 goto out; 421 - 422 - err = shash_ahash_mcryptd_finup(req, &rctx->desc); 419 + rctx->out = req->result; 420 + err = ahash_mcryptd_finup(&rctx->areq); 423 421 424 422 if (err) { 425 423 req->base.complete = rctx->complete; ··· 441 439 static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) 442 440 { 443 441 struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 444 - struct crypto_shash *child = ctx->child; 442 + struct crypto_ahash *child = ctx->child; 445 443 struct ahash_request *req = ahash_request_cast(req_async); 446 444 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 447 - struct shash_desc *desc = &rctx->desc; 445 + struct ahash_request *desc = &rctx->areq; 448 446 449 447 if (unlikely(err == -EINPROGRESS)) 450 448 goto out; 451 449 452 - desc->tfm = child; 453 - desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; /* check this again */ 450 + ahash_request_set_tfm(desc, child); 451 + ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, 452 + rctx->complete, req_async); 454 453 455 - err = shash_ahash_mcryptd_digest(req, desc); 454 + rctx->out = req->result; 455 + err = ahash_mcryptd_digest(desc); 456 456 457 - if (err) { 458 - req->base.complete = rctx->complete; 459 - goto out; 460 - } 461 - 462 - return; 463 457 out: 464 458 local_bh_disable(); 465 459 rctx->complete(&req->base, err); ··· 471 473 { 472 474 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 473 475 474 - return crypto_shash_export(&rctx->desc, out); 476 + return crypto_ahash_export(&rctx->areq, out); 475 477 } 476 478 477 479 static int mcryptd_hash_import(struct ahash_request *req, const void *in) 478 480 { 479 481 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 480 482 481 - return crypto_shash_import(&rctx->desc, in); 483 + return crypto_ahash_import(&rctx->areq, in); 482 484 } 483 485 484 486 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, ··· 486 488 { 487 489 struct hashd_instance_ctx *ctx; 488 490 struct ahash_instance *inst; 489 - struct shash_alg *salg; 491 + struct hash_alg_common *halg; 490 492 struct crypto_alg *alg; 491 493 u32 type = 0; 492 494 u32 mask = 0; ··· 494 496 495 497 mcryptd_check_internal(tb, &type, &mask); 496 498 497 - salg = shash_attr_alg(tb[1], type, mask); 498 - if (IS_ERR(salg)) 499 - return PTR_ERR(salg); 499 + halg = ahash_attr_alg(tb[1], type, mask); 500 + if (IS_ERR(halg)) 501 + return PTR_ERR(halg); 500 502 501 - alg = &salg->base; 503 + alg = &halg->base; 502 504 pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); 503 505 inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), 504 506 sizeof(*ctx)); ··· 509 511 ctx = ahash_instance_ctx(inst); 510 512 ctx->queue = queue; 511 513 512 - err = crypto_init_shash_spawn(&ctx->spawn, salg, 514 + err = crypto_init_ahash_spawn(&ctx->spawn, halg, 513 515 ahash_crypto_instance(inst)); 514 516 if (err) 515 517 goto out_free_inst; ··· 519 521 type |= CRYPTO_ALG_INTERNAL; 520 522 inst->alg.halg.base.cra_flags = type; 521 523 522 - inst->alg.halg.digestsize = salg->digestsize; 523 - inst->alg.halg.statesize = salg->statesize; 524 + inst->alg.halg.digestsize = halg->digestsize; 525 + inst->alg.halg.statesize = halg->statesize; 524 526 inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); 525 527 526 528 inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; ··· 537 539 538 540 err = ahash_register_instance(tmpl, inst); 539 541 if (err) { 540 - crypto_drop_shash(&ctx->spawn); 542 + crypto_drop_ahash(&ctx->spawn); 541 543 out_free_inst: 542 544 kfree(inst); 543 545 } ··· 573 575 574 576 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 575 577 case CRYPTO_ALG_TYPE_AHASH: 576 - crypto_drop_shash(&hctx->spawn); 578 + crypto_drop_ahash(&hctx->spawn); 577 579 kfree(ahash_instance(inst)); 578 580 return; 579 581 default: ··· 610 612 } 611 613 EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); 612 614 613 - int shash_ahash_mcryptd_digest(struct ahash_request *req, 614 - struct shash_desc *desc) 615 + int ahash_mcryptd_digest(struct ahash_request *desc) 615 616 { 616 617 int err; 617 618 618 - err = crypto_shash_init(desc) ?: 619 - shash_ahash_mcryptd_finup(req, desc); 619 + err = crypto_ahash_init(desc) ?: 620 + ahash_mcryptd_finup(desc); 620 621 621 622 return err; 622 623 } 623 - EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest); 624 624 625 - int shash_ahash_mcryptd_update(struct ahash_request *req, 626 - struct shash_desc *desc) 625 + int ahash_mcryptd_update(struct ahash_request *desc) 627 626 { 628 - struct crypto_shash *tfm = desc->tfm; 629 - struct shash_alg *shash = crypto_shash_alg(tfm); 630 - 631 627 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 632 628 633 - return shash->update(desc, NULL, 0); 629 + return crypto_ahash_update(desc); 634 630 } 635 - EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update); 636 631 637 - int shash_ahash_mcryptd_finup(struct ahash_request *req, 638 - struct shash_desc *desc) 632 + int ahash_mcryptd_finup(struct ahash_request *desc) 639 633 { 640 - struct crypto_shash *tfm = desc->tfm; 641 - struct shash_alg *shash = crypto_shash_alg(tfm); 642 - 643 634 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 644 635 645 - return shash->finup(desc, NULL, 0, req->result); 636 + return crypto_ahash_finup(desc); 646 637 } 647 - EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup); 648 638 649 - int shash_ahash_mcryptd_final(struct ahash_request *req, 650 - struct shash_desc *desc) 639 + int ahash_mcryptd_final(struct ahash_request *desc) 651 640 { 652 - struct crypto_shash *tfm = desc->tfm; 653 - struct shash_alg *shash = crypto_shash_alg(tfm); 654 - 655 641 /* alignment is to be done by multi-buffer crypto algorithm if needed */ 656 642 657 - return shash->final(desc, req->result); 643 + return crypto_ahash_final(desc); 658 644 } 659 - EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final); 660 645 661 - struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) 646 + struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) 662 647 { 663 648 struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); 664 649 ··· 649 668 } 650 669 EXPORT_SYMBOL_GPL(mcryptd_ahash_child); 651 670 652 - struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) 671 + struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req) 653 672 { 654 673 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); 655 - return &rctx->desc; 674 + return &rctx->areq; 656 675 } 657 - EXPORT_SYMBOL_GPL(mcryptd_shash_desc); 676 + EXPORT_SYMBOL_GPL(mcryptd_ahash_desc); 658 677 659 678 void mcryptd_free_ahash(struct mcryptd_ahash *tfm) 660 679 { 661 680 crypto_free_ahash(&tfm->base); 662 681 } 663 682 EXPORT_SYMBOL_GPL(mcryptd_free_ahash); 664 - 665 683 666 684 static int __init mcryptd_init(void) 667 685 {
+4 -8
include/crypto/internal/hash.h
··· 114 114 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc); 115 115 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc); 116 116 117 - int shash_ahash_mcryptd_update(struct ahash_request *req, 118 - struct shash_desc *desc); 119 - int shash_ahash_mcryptd_final(struct ahash_request *req, 120 - struct shash_desc *desc); 121 - int shash_ahash_mcryptd_finup(struct ahash_request *req, 122 - struct shash_desc *desc); 123 - int shash_ahash_mcryptd_digest(struct ahash_request *req, 124 - struct shash_desc *desc); 117 + int ahash_mcryptd_update(struct ahash_request *desc); 118 + int ahash_mcryptd_final(struct ahash_request *desc); 119 + int ahash_mcryptd_finup(struct ahash_request *desc); 120 + int ahash_mcryptd_digest(struct ahash_request *desc); 125 121 126 122 int crypto_init_shash_ops_async(struct crypto_tfm *tfm); 127 123
+4 -4
include/crypto/mcryptd.h
··· 39 39 }; 40 40 41 41 struct mcryptd_hash_ctx { 42 - struct crypto_shash *child; 42 + struct crypto_ahash *child; 43 43 struct mcryptd_alg_state *alg_state; 44 44 }; 45 45 ··· 59 59 struct crypto_hash_walk walk; 60 60 u8 *out; 61 61 int flag; 62 - struct shash_desc desc; 62 + struct ahash_request areq; 63 63 }; 64 64 65 65 struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, 66 66 u32 type, u32 mask); 67 - struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); 68 - struct shash_desc *mcryptd_shash_desc(struct ahash_request *req); 67 + struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm); 68 + struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req); 69 69 void mcryptd_free_ahash(struct mcryptd_ahash *tfm); 70 70 void mcryptd_flusher(struct work_struct *work); 71 71