Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: arm64/sm4 - Use API partial block handling

Use the Crypto API partial block handling.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Herbert Xu ef170084 4dc3c40c

+31 -67
+31 -67
arch/arm64/crypto/sm4-ce-glue.c
··· 8 8 * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> 9 9 */ 10 10 11 - #include <linux/module.h> 12 - #include <linux/crypto.h> 13 - #include <linux/kernel.h> 14 - #include <linux/cpufeature.h> 15 11 #include <asm/neon.h> 16 - #include <asm/simd.h> 17 12 #include <crypto/b128ops.h> 18 - #include <crypto/internal/simd.h> 19 - #include <crypto/internal/skcipher.h> 20 13 #include <crypto/internal/hash.h> 14 + #include <crypto/internal/skcipher.h> 21 15 #include <crypto/scatterwalk.h> 22 - #include <crypto/xts.h> 23 16 #include <crypto/sm4.h> 17 + #include <crypto/utils.h> 18 + #include <crypto/xts.h> 19 + #include <linux/cpufeature.h> 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/string.h> 24 23 25 24 #define BYTES2BLKS(nbytes) ((nbytes) >> 4) 26 25 ··· 63 64 }; 64 65 65 66 struct sm4_mac_desc_ctx { 66 - unsigned int len; 67 67 u8 digest[SM4_BLOCK_SIZE]; 68 68 }; 69 69 ··· 589 591 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); 590 592 591 593 memset(ctx->digest, 0, SM4_BLOCK_SIZE); 592 - ctx->len = 0; 593 - 594 594 return 0; 595 595 } 596 596 ··· 597 601 { 598 602 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 599 603 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); 600 - unsigned int l, nblocks; 604 + unsigned int nblocks = len / SM4_BLOCK_SIZE; 601 605 602 - if (len == 0) 603 - return 0; 604 - 605 - if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) { 606 - l = min(len, SM4_BLOCK_SIZE - ctx->len); 607 - 608 - crypto_xor(ctx->digest + ctx->len, p, l); 609 - ctx->len += l; 610 - len -= l; 611 - p += l; 612 - } 613 - 614 - if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) { 615 - kernel_neon_begin(); 616 - 617 - if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) { 618 - sm4_ce_crypt_block(tctx->key.rkey_enc, 619 - ctx->digest, ctx->digest); 620 - ctx->len = 0; 621 - } else { 622 - nblocks = len / SM4_BLOCK_SIZE; 623 - len %= SM4_BLOCK_SIZE; 624 - 625 - sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p, 626 - nblocks, (ctx->len == SM4_BLOCK_SIZE), 627 - (len != 0)); 628 - 629 - p += nblocks * SM4_BLOCK_SIZE; 630 - 631 - if (len == 0) 632 - ctx->len = SM4_BLOCK_SIZE; 633 - } 634 - 635 - kernel_neon_end(); 636 - 637 - if (len) { 638 - crypto_xor(ctx->digest, p, len); 639 - ctx->len = len; 640 - } 641 - } 642 - 643 - return 0; 606 + len %= SM4_BLOCK_SIZE; 607 + kernel_neon_begin(); 608 + sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p, 609 + nblocks, false, true); 610 + kernel_neon_end(); 611 + return len; 644 612 } 645 613 646 - static int sm4_cmac_final(struct shash_desc *desc, u8 *out) 614 + static int sm4_cmac_finup(struct shash_desc *desc, const u8 *src, 615 + unsigned int len, u8 *out) 647 616 { 648 617 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 649 618 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); 650 619 const u8 *consts = tctx->consts; 651 620 652 - if (ctx->len != SM4_BLOCK_SIZE) { 653 - ctx->digest[ctx->len] ^= 0x80; 621 + crypto_xor(ctx->digest, src, len); 622 + if (len != SM4_BLOCK_SIZE) { 623 + ctx->digest[len] ^= 0x80; 654 624 consts += SM4_BLOCK_SIZE; 655 625 } 656 - 657 626 kernel_neon_begin(); 658 627 sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1, 659 628 false, true); 660 629 kernel_neon_end(); 661 - 662 630 memcpy(out, ctx->digest, SM4_BLOCK_SIZE); 663 - 664 631 return 0; 665 632 } 666 633 667 - static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out) 634 + static int sm4_cbcmac_finup(struct shash_desc *desc, const u8 *src, 635 + unsigned int len, u8 *out) 668 636 { 669 637 struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); 670 638 struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc); 671 639 672 - if (ctx->len) { 640 + if (len) { 641 + crypto_xor(ctx->digest, src, len); 673 642 kernel_neon_begin(); 674 643 sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest, 675 644 ctx->digest); 676 645 kernel_neon_end(); 677 646 } 678 - 679 647 memcpy(out, ctx->digest, SM4_BLOCK_SIZE); 680 - 681 648 return 0; 682 649 } 683 650 ··· 650 691 .cra_name = "cmac(sm4)", 651 692 .cra_driver_name = "cmac-sm4-ce", 652 693 .cra_priority = 400, 694 + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | 695 + CRYPTO_AHASH_ALG_FINAL_NONZERO, 653 696 .cra_blocksize = SM4_BLOCK_SIZE, 654 697 .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx) 655 698 + SM4_BLOCK_SIZE * 2, ··· 660 699 .digestsize = SM4_BLOCK_SIZE, 661 700 .init = sm4_mac_init, 662 701 .update = sm4_mac_update, 663 - .final = sm4_cmac_final, 702 + .finup = sm4_cmac_finup, 664 703 .setkey = sm4_cmac_setkey, 665 704 .descsize = sizeof(struct sm4_mac_desc_ctx), 666 705 }, { ··· 668 707 .cra_name = "xcbc(sm4)", 669 708 .cra_driver_name = "xcbc-sm4-ce", 670 709 .cra_priority = 400, 710 + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | 711 + CRYPTO_AHASH_ALG_FINAL_NONZERO, 671 712 .cra_blocksize = SM4_BLOCK_SIZE, 672 713 .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx) 673 714 + SM4_BLOCK_SIZE * 2, ··· 678 715 .digestsize = SM4_BLOCK_SIZE, 679 716 .init = sm4_mac_init, 680 717 .update = sm4_mac_update, 681 - .final = sm4_cmac_final, 718 + .finup = sm4_cmac_finup, 682 719 .setkey = sm4_xcbc_setkey, 683 720 .descsize = sizeof(struct sm4_mac_desc_ctx), 684 721 }, { ··· 686 723 .cra_name = "cbcmac(sm4)", 687 724 .cra_driver_name = "cbcmac-sm4-ce", 688 725 .cra_priority = 400, 726 + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, 689 727 .cra_blocksize = SM4_BLOCK_SIZE, 690 728 .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx), 691 729 .cra_module = THIS_MODULE, ··· 694 730 .digestsize = SM4_BLOCK_SIZE, 695 731 .init = sm4_mac_init, 696 732 .update = sm4_mac_update, 697 - .final = sm4_cbcmac_final, 733 + .finup = sm4_cbcmac_finup, 698 734 .setkey = sm4_cbcmac_setkey, 699 735 .descsize = sizeof(struct sm4_mac_desc_ctx), 700 736 }