Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: mediatek - make hardware operation flow more efficient

This patch refines data structures, which are used to control engine's
data path, to make it more efficient. Hence current change are:

- gathers the broken pieces of structures 'mtk_aes_ct''mtk_aes_tfm'
into struct mtk_aes_info hence avoiding additional DMA-mapping.

- adds 'keymode' in struct mtk_aes_base_ctx. When .setkey() callback is
called, we store keybit setting in keymode. Doing so, there is no need
to check keylen second time in mtk_aes_info_init() / mtk_aes_gcm_info_init().

Besides, this patch also removes unused macro definitions and adds helper
inline function to write security information(key, IV,...) to info->state.

Signed-off-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Ryder Lee and committed by
Herbert Xu
9aa2fcb8 98b10235

+165 -188
+126 -135
drivers/crypto/mediatek/mtk-aes.c
··· 19 19 #define AES_BUF_ORDER 2 20 20 #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ 21 21 & ~(AES_BLOCK_SIZE - 1)) 22 + #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \ 23 + AES_BLOCK_SIZE * 2) 24 + #define AES_MAX_CT_SIZE 6 22 25 23 - /* AES command token size */ 24 - #define AES_CT_SIZE_ECB 2 25 - #define AES_CT_SIZE_CBC 3 26 - #define AES_CT_SIZE_CTR 3 27 - #define AES_CT_SIZE_GCM_OUT 5 28 - #define AES_CT_SIZE_GCM_IN 6 29 26 #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) 30 27 31 28 /* AES-CBC/ECB/CTR command token */ ··· 47 50 #define AES_TFM_128BITS cpu_to_le32(0xb << 16) 48 51 #define AES_TFM_192BITS cpu_to_le32(0xd << 16) 49 52 #define AES_TFM_256BITS cpu_to_le32(0xf << 16) 53 + #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21) 54 + #define AES_TFM_GHASH cpu_to_le32(0x4 << 23) 50 55 /* AES transform information word 1 fields */ 51 56 #define AES_TFM_ECB cpu_to_le32(0x0 << 0) 52 57 #define AES_TFM_CBC cpu_to_le32(0x1 << 0) ··· 58 59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ 59 60 #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) 60 61 #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) 61 - #define AES_TFM_GHASH_DIG cpu_to_le32(0x2 << 21) 62 - #define AES_TFM_GHASH cpu_to_le32(0x4 << 23) 63 62 64 63 /* AES flags */ 64 + #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0) 65 65 #define AES_FLAGS_ECB BIT(0) 66 66 #define AES_FLAGS_CBC BIT(1) 67 67 #define AES_FLAGS_CTR BIT(2) ··· 71 73 #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) 72 74 73 75 /** 74 - * Command token(CT) is a set of hardware instructions that 75 - * are used to control engine's processing flow of AES. 76 + * mtk_aes_info - hardware information of AES 77 + * @cmd: command token, hardware instruction 78 + * @tfm: transform state of cipher algorithm. 79 + * @state: contains keys and initial vectors. 76 80 * 77 - * Transform information(TFM) is used to define AES state and 78 - * contains all keys and initial vectors. 79 - * 80 - * The engine requires CT and TFM to do: 81 - * - Commands decoding and control of the engine's data path. 82 - * - Coordinating hardware data fetch and store operations. 83 - * - Result token construction and output. 84 - * 85 - * Memory map of GCM's TFM: 81 + * Memory layout of GCM buffer: 86 82 * /-----------\ 87 83 * | AES KEY | 128/196/256 bits 88 84 * |-----------| ··· 84 92 * |-----------| 85 93 * | IVs | 4 * 4 bytes 86 94 * \-----------/ 95 + * 96 + * The engine requires all these info to do: 97 + * - Commands decoding and control of the engine's data path. 98 + * - Coordinating hardware data fetch and store operations. 99 + * - Result token construction and output. 87 100 */ 88 - struct mtk_aes_ct { 89 - __le32 cmd[AES_CT_SIZE_GCM_IN]; 90 - }; 91 - 92 - struct mtk_aes_tfm { 93 - __le32 ctrl[2]; 94 - __le32 state[SIZE_IN_WORDS(AES_KEYSIZE_256 + AES_BLOCK_SIZE * 2)]; 101 + struct mtk_aes_info { 102 + __le32 cmd[AES_MAX_CT_SIZE]; 103 + __le32 tfm[2]; 104 + __le32 state[AES_MAX_STATE_BUF_SIZE]; 95 105 }; 96 106 97 107 struct mtk_aes_reqctx { ··· 103 109 struct mtk_aes_base_ctx { 104 110 struct mtk_cryp *cryp; 105 111 u32 keylen; 112 + __le32 keymode; 113 + 106 114 mtk_aes_fn start; 107 115 108 - struct mtk_aes_ct ct; 116 + struct mtk_aes_info info; 109 117 dma_addr_t ct_dma; 110 - struct mtk_aes_tfm tfm; 111 118 dma_addr_t tfm_dma; 112 119 113 120 __le32 ct_hdr; ··· 245 250 sg->length += dma->remainder; 246 251 } 247 252 253 + static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size) 254 + { 255 + int i; 256 + 257 + for (i = 0; i < SIZE_IN_WORDS(size); i++) 258 + dst[i] = cpu_to_le32(src[i]); 259 + } 260 + 261 + static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size) 262 + { 263 + int i; 264 + 265 + for (i = 0; i < SIZE_IN_WORDS(size); i++) 266 + dst[i] = cpu_to_be32(src[i]); 267 + } 268 + 248 269 static inline int mtk_aes_complete(struct mtk_cryp *cryp, 249 270 struct mtk_aes_rec *aes, 250 271 int err) ··· 342 331 { 343 332 struct mtk_aes_base_ctx *ctx = aes->ctx; 344 333 345 - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct), 346 - DMA_TO_DEVICE); 347 - dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm), 334 + dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 348 335 DMA_TO_DEVICE); 349 336 350 337 if (aes->src.sg == aes->dst.sg) { ··· 373 364 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 374 365 { 375 366 struct mtk_aes_base_ctx *ctx = aes->ctx; 367 + struct mtk_aes_info *info = &ctx->info; 376 368 377 - ctx->ct_dma = dma_map_single(cryp->dev, &ctx->ct, sizeof(ctx->ct), 369 + ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 378 370 DMA_TO_DEVICE); 379 371 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) 380 372 goto exit; 381 373 382 - ctx->tfm_dma = dma_map_single(cryp->dev, &ctx->tfm, sizeof(ctx->tfm), 383 - DMA_TO_DEVICE); 384 - if (unlikely(dma_mapping_error(cryp->dev, ctx->tfm_dma))) 385 - goto tfm_map_err; 374 + ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); 386 375 387 376 if (aes->src.sg == aes->dst.sg) { 388 377 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, ··· 407 400 return mtk_aes_xmit(cryp, aes); 408 401 409 402 sg_map_err: 410 - dma_unmap_single(cryp->dev, ctx->tfm_dma, sizeof(ctx->tfm), 411 - DMA_TO_DEVICE); 412 - tfm_map_err: 413 - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->ct), 414 - DMA_TO_DEVICE); 403 + dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); 415 404 exit: 416 405 return mtk_aes_complete(cryp, aes, -EINVAL); 417 406 } ··· 418 415 { 419 416 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); 420 417 struct mtk_aes_base_ctx *ctx = aes->ctx; 418 + struct mtk_aes_info *info = &ctx->info; 419 + u32 cnt = 0; 421 420 422 421 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 423 - ctx->ct.cmd[0] = AES_CMD0 | cpu_to_le32(len); 424 - ctx->ct.cmd[1] = AES_CMD1; 422 + info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); 423 + info->cmd[cnt++] = AES_CMD1; 425 424 425 + info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; 426 426 if (aes->flags & AES_FLAGS_ENCRYPT) 427 - ctx->tfm.ctrl[0] = AES_TFM_BASIC_OUT; 427 + info->tfm[0] |= AES_TFM_BASIC_OUT; 428 428 else 429 - ctx->tfm.ctrl[0] = AES_TFM_BASIC_IN; 429 + info->tfm[0] |= AES_TFM_BASIC_IN; 430 430 431 - if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) 432 - ctx->tfm.ctrl[0] |= AES_TFM_128BITS; 433 - else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) 434 - ctx->tfm.ctrl[0] |= AES_TFM_256BITS; 435 - else 436 - ctx->tfm.ctrl[0] |= AES_TFM_192BITS; 431 + switch (aes->flags & AES_FLAGS_CIPHER_MSK) { 432 + case AES_FLAGS_CBC: 433 + info->tfm[1] = AES_TFM_CBC; 434 + break; 435 + case AES_FLAGS_ECB: 436 + info->tfm[1] = AES_TFM_ECB; 437 + goto ecb; 438 + case AES_FLAGS_CTR: 439 + info->tfm[1] = AES_TFM_CTR_LOAD; 440 + goto ctr; 437 441 438 - if (aes->flags & AES_FLAGS_CBC) { 439 - const u32 *iv = (const u32 *)req->info; 440 - u32 *iv_state = ctx->tfm.state + ctx->keylen; 441 - int i; 442 - 443 - ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen + 444 - SIZE_IN_WORDS(AES_BLOCK_SIZE)); 445 - ctx->tfm.ctrl[1] = AES_TFM_CBC | AES_TFM_FULL_IV; 446 - 447 - for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) 448 - iv_state[i] = cpu_to_le32(iv[i]); 449 - 450 - ctx->ct.cmd[2] = AES_CMD2; 451 - ctx->ct_size = AES_CT_SIZE_CBC; 452 - } else if (aes->flags & AES_FLAGS_ECB) { 453 - ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen); 454 - ctx->tfm.ctrl[1] = AES_TFM_ECB; 455 - 456 - ctx->ct_size = AES_CT_SIZE_ECB; 457 - } else if (aes->flags & AES_FLAGS_CTR) { 458 - ctx->tfm.ctrl[0] |= AES_TFM_SIZE(ctx->keylen + 459 - SIZE_IN_WORDS(AES_BLOCK_SIZE)); 460 - ctx->tfm.ctrl[1] = AES_TFM_CTR_LOAD | AES_TFM_FULL_IV; 461 - 462 - ctx->ct.cmd[2] = AES_CMD2; 463 - ctx->ct_size = AES_CT_SIZE_CTR; 442 + default: 443 + /* Should not happen... */ 444 + return; 464 445 } 446 + 447 + mtk_aes_write_state_le(info->state + ctx->keylen, req->info, 448 + AES_BLOCK_SIZE); 449 + ctr: 450 + info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)); 451 + info->tfm[1] |= AES_TFM_FULL_IV; 452 + info->cmd[cnt++] = AES_CMD2; 453 + ecb: 454 + ctx->ct_size = cnt; 465 455 } 466 456 467 457 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, ··· 568 572 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); 569 573 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); 570 574 struct scatterlist *src, *dst; 571 - int i; 572 - u32 start, end, ctr, blocks, *iv_state; 575 + u32 start, end, ctr, blocks; 573 576 size_t datalen; 574 577 bool fragmented = false; 575 578 ··· 597 602 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); 598 603 599 604 /* Write IVs into transform state buffer. */ 600 - iv_state = ctx->tfm.state + ctx->keylen; 601 - for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) 602 - iv_state[i] = cpu_to_le32(cctx->iv[i]); 605 + mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv, 606 + AES_BLOCK_SIZE); 603 607 604 608 if (unlikely(fragmented)) { 605 609 /* ··· 633 639 const u8 *key, u32 keylen) 634 640 { 635 641 struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm); 636 - const u32 *aes_key = (const u32 *)key; 637 - u32 *key_state = ctx->tfm.state; 638 - int i; 639 642 640 - if (keylen != AES_KEYSIZE_128 && 641 - keylen != AES_KEYSIZE_192 && 642 - keylen != AES_KEYSIZE_256) { 643 + switch (keylen) { 644 + case AES_KEYSIZE_128: 645 + ctx->keymode = AES_TFM_128BITS; 646 + break; 647 + case AES_KEYSIZE_192: 648 + ctx->keymode = AES_TFM_192BITS; 649 + break; 650 + case AES_KEYSIZE_256: 651 + ctx->keymode = AES_TFM_256BITS; 652 + break; 653 + 654 + default: 643 655 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 644 656 return -EINVAL; 645 657 } 646 658 647 659 ctx->keylen = SIZE_IN_WORDS(keylen); 648 - 649 - for (i = 0; i < ctx->keylen; i++) 650 - key_state[i] = cpu_to_le32(aes_key[i]); 660 + mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); 651 661 652 662 return 0; 653 663 } ··· 823 825 struct aead_request *req = aead_request_cast(aes->areq); 824 826 struct mtk_aes_base_ctx *ctx = aes->ctx; 825 827 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 826 - const u32 *iv = (const u32 *)req->iv; 827 - u32 *iv_state = ctx->tfm.state + ctx->keylen + 828 - SIZE_IN_WORDS(AES_BLOCK_SIZE); 828 + struct mtk_aes_info *info = &ctx->info; 829 829 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 830 - int i; 830 + u32 cnt = 0; 831 831 832 832 ctx->ct_hdr = AES_CT_CTRL_HDR | len; 833 833 834 - ctx->ct.cmd[0] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); 835 - ctx->ct.cmd[1] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); 836 - ctx->ct.cmd[2] = AES_GCM_CMD2; 837 - ctx->ct.cmd[3] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); 834 + info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); 835 + info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); 836 + info->cmd[cnt++] = AES_GCM_CMD2; 837 + info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); 838 838 839 839 if (aes->flags & AES_FLAGS_ENCRYPT) { 840 - ctx->ct.cmd[4] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); 841 - ctx->ct_size = AES_CT_SIZE_GCM_OUT; 842 - ctx->tfm.ctrl[0] = AES_TFM_GCM_OUT; 840 + info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); 841 + info->tfm[0] = AES_TFM_GCM_OUT; 843 842 } else { 844 - ctx->ct.cmd[4] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); 845 - ctx->ct.cmd[5] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); 846 - ctx->ct_size = AES_CT_SIZE_GCM_IN; 847 - ctx->tfm.ctrl[0] = AES_TFM_GCM_IN; 843 + info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); 844 + info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); 845 + info->tfm[0] = AES_TFM_GCM_IN; 848 846 } 847 + ctx->ct_size = cnt; 849 848 850 - if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_128)) 851 - ctx->tfm.ctrl[0] |= AES_TFM_128BITS; 852 - else if (ctx->keylen == SIZE_IN_WORDS(AES_KEYSIZE_256)) 853 - ctx->tfm.ctrl[0] |= AES_TFM_256BITS; 854 - else 855 - ctx->tfm.ctrl[0] |= AES_TFM_192BITS; 849 + info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( 850 + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | 851 + ctx->keymode; 852 + info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | 853 + AES_TFM_ENC_HASH; 856 854 857 - ctx->tfm.ctrl[0] |= AES_TFM_GHASH_DIG | AES_TFM_GHASH | 858 - AES_TFM_SIZE(ctx->keylen + SIZE_IN_WORDS( 859 - AES_BLOCK_SIZE + ivsize)); 860 - ctx->tfm.ctrl[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | 861 - AES_TFM_3IV | AES_TFM_ENC_HASH; 862 - 863 - for (i = 0; i < SIZE_IN_WORDS(ivsize); i++) 864 - iv_state[i] = cpu_to_le32(iv[i]); 855 + mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS( 856 + AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize); 865 857 } 866 858 867 859 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, ··· 967 979 struct scatterlist sg[1]; 968 980 struct skcipher_request req; 969 981 } *data; 970 - const u32 *aes_key; 971 - u32 *key_state, *hash_state; 972 - int err, i; 982 + int err; 973 983 974 - if (keylen != AES_KEYSIZE_256 && 975 - keylen != AES_KEYSIZE_192 && 976 - keylen != AES_KEYSIZE_128) { 984 + switch (keylen) { 985 + case AES_KEYSIZE_128: 986 + ctx->keymode = AES_TFM_128BITS; 987 + break; 988 + case AES_KEYSIZE_192: 989 + ctx->keymode = AES_TFM_192BITS; 990 + break; 991 + case AES_KEYSIZE_256: 992 + ctx->keymode = AES_TFM_256BITS; 993 + break; 994 + 995 + default: 977 996 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); 978 997 return -EINVAL; 979 998 } 980 999 981 - key_state = ctx->tfm.state; 982 - aes_key = (u32 *)key; 983 1000 ctx->keylen = SIZE_IN_WORDS(keylen); 984 - 985 - for (i = 0; i < ctx->keylen; i++) 986 - ctx->tfm.state[i] = cpu_to_le32(aes_key[i]); 987 1001 988 1002 /* Same as crypto_gcm_setkey() from crypto/gcm.c */ 989 1003 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); ··· 1021 1031 if (err) 1022 1032 goto out; 1023 1033 1024 - hash_state = key_state + ctx->keylen; 1025 - 1026 - for (i = 0; i < 4; i++) 1027 - hash_state[i] = cpu_to_be32(data->hash[i]); 1034 + /* Write key into state buffer */ 1035 + mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); 1036 + /* Write key(H) into state buffer */ 1037 + mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash, 1038 + AES_BLOCK_SIZE); 1028 1039 out: 1029 1040 kzfree(data); 1030 1041 return err;
+39 -53
drivers/crypto/mediatek/mtk-sha.c
··· 23 23 #define SHA_OP_FINAL 2 24 24 25 25 #define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0)) 26 + #define SHA_MAX_DIGEST_BUF_SIZE 32 26 27 27 28 /* SHA command token */ 28 29 #define SHA_CT_SIZE 5 ··· 34 33 35 34 /* SHA transform information */ 36 35 #define SHA_TFM_HASH cpu_to_le32(0x2 << 0) 37 - #define SHA_TFM_INNER_DIG cpu_to_le32(0x1 << 21) 38 36 #define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8) 39 37 #define SHA_TFM_START cpu_to_le32(0x1 << 4) 40 38 #define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5) ··· 60 60 #define SHA_FLAGS_PAD BIT(10) 61 61 62 62 /** 63 - * mtk_sha_ct is a set of hardware instructions(command token) 64 - * that are used to control engine's processing flow of SHA, 65 - * and it contains the first two words of transform state. 66 - */ 67 - struct mtk_sha_ct { 68 - __le32 ctrl[2]; 69 - __le32 cmd[3]; 70 - }; 71 - 72 - /** 73 - * mtk_sha_tfm is used to define SHA transform state 74 - * and store result digest that produced by engine. 75 - */ 76 - struct mtk_sha_tfm { 77 - __le32 ctrl[2]; 78 - __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)]; 79 - }; 80 - 81 - /** 82 - * mtk_sha_info consists of command token and transform state 83 - * of SHA, its role is similar to mtk_aes_info. 63 + * mtk_sha_info - hardware information of AES 64 + * @cmd: command token, hardware instruction 65 + * @tfm: transform state of cipher algorithm. 66 + * @state: contains keys and initial vectors. 67 + * 84 68 */ 85 69 struct mtk_sha_info { 86 - struct mtk_sha_ct ct; 87 - struct mtk_sha_tfm tfm; 70 + __le32 ctrl[2]; 71 + __le32 cmd[3]; 72 + __le32 tfm[2]; 73 + __le32 digest[SHA_MAX_DIGEST_BUF_SIZE]; 88 74 }; 89 75 90 76 struct mtk_sha_reqctx { ··· 79 93 unsigned long op; 80 94 81 95 u64 digcnt; 82 - bool start; 83 96 size_t bufcnt; 84 97 dma_addr_t dma_addr; 85 98 ··· 250 265 bits[1] = cpu_to_be64(size << 3); 251 266 bits[0] = cpu_to_be64(size >> 61); 252 267 253 - if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { 268 + switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 269 + case SHA_FLAGS_SHA384: 270 + case SHA_FLAGS_SHA512: 254 271 index = ctx->bufcnt & 0x7f; 255 272 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); 256 273 *(ctx->buffer + ctx->bufcnt) = 0x80; ··· 260 273 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 261 274 ctx->bufcnt += padlen + 16; 262 275 ctx->flags |= SHA_FLAGS_PAD; 263 - } else { 276 + break; 277 + 278 + default: 264 279 index = ctx->bufcnt & 0x3f; 265 280 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); 266 281 *(ctx->buffer + ctx->bufcnt) = 0x80; ··· 270 281 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 271 282 ctx->bufcnt += padlen + 8; 272 283 ctx->flags |= SHA_FLAGS_PAD; 284 + break; 273 285 } 274 286 } 275 287 276 288 /* Initialize basic transform information of SHA */ 277 289 static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) 278 290 { 279 - struct mtk_sha_ct *ct = &ctx->info.ct; 280 - struct mtk_sha_tfm *tfm = &ctx->info.tfm; 291 + struct mtk_sha_info *info = &ctx->info; 281 292 282 293 ctx->ct_hdr = SHA_CT_CTRL_HDR; 283 294 ctx->ct_size = SHA_CT_SIZE; 284 295 285 - tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG | 286 - SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); 296 + info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); 287 297 288 298 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 289 299 case SHA_FLAGS_SHA1: 290 - tfm->ctrl[0] |= SHA_TFM_SHA1; 300 + info->tfm[0] |= SHA_TFM_SHA1; 291 301 break; 292 302 case SHA_FLAGS_SHA224: 293 - tfm->ctrl[0] |= SHA_TFM_SHA224; 303 + info->tfm[0] |= SHA_TFM_SHA224; 294 304 break; 295 305 case SHA_FLAGS_SHA256: 296 - tfm->ctrl[0] |= SHA_TFM_SHA256; 306 + info->tfm[0] |= SHA_TFM_SHA256; 297 307 break; 298 308 case SHA_FLAGS_SHA384: 299 - tfm->ctrl[0] |= SHA_TFM_SHA384; 309 + info->tfm[0] |= SHA_TFM_SHA384; 300 310 break; 301 311 case SHA_FLAGS_SHA512: 302 - tfm->ctrl[0] |= SHA_TFM_SHA512; 312 + info->tfm[0] |= SHA_TFM_SHA512; 303 313 break; 304 314 305 315 default: ··· 306 318 return; 307 319 } 308 320 309 - tfm->ctrl[1] = SHA_TFM_HASH_STORE; 310 - ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START; 311 - ct->ctrl[1] = tfm->ctrl[1]; 321 + info->tfm[1] = SHA_TFM_HASH_STORE; 322 + info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; 323 + info->ctrl[1] = info->tfm[1]; 312 324 313 - ct->cmd[0] = SHA_CMD0; 314 - ct->cmd[1] = SHA_CMD1; 315 - ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); 325 + info->cmd[0] = SHA_CMD0; 326 + info->cmd[1] = SHA_CMD1; 327 + info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); 316 328 } 317 329 318 330 /* ··· 325 337 { 326 338 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 327 339 struct mtk_sha_info *info = &ctx->info; 328 - struct mtk_sha_ct *ct = &info->ct; 329 - 330 - if (ctx->start) 331 - ctx->start = false; 332 - else 333 - ct->ctrl[0] &= ~SHA_TFM_START; 334 340 335 341 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; 336 342 ctx->ct_hdr |= cpu_to_le32(len1 + len2); 337 - ct->cmd[0] &= ~SHA_DATA_LEN_MSK; 338 - ct->cmd[0] |= cpu_to_le32(len1 + len2); 343 + info->cmd[0] &= ~SHA_DATA_LEN_MSK; 344 + info->cmd[0] |= cpu_to_le32(len1 + len2); 345 + 346 + /* Setting SHA_TFM_START only for the first iteration */ 347 + if (ctx->digcnt) 348 + info->ctrl[0] &= ~SHA_TFM_START; 339 349 340 350 ctx->digcnt += len1; 341 351 ··· 343 357 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); 344 358 return -EINVAL; 345 359 } 346 - ctx->tfm_dma = ctx->ct_dma + sizeof(*ct); 360 + 361 + ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); 347 362 348 363 return 0; 349 364 } ··· 409 422 ctx->bufcnt = 0; 410 423 ctx->digcnt = 0; 411 424 ctx->buffer = tctx->buf; 412 - ctx->start = true; 413 425 414 426 if (tctx->flags & SHA_FLAGS_HMAC) { 415 427 struct mtk_sha_hmac_ctx *bctx = tctx->base; ··· 621 635 static int mtk_sha_finish(struct ahash_request *req) 622 636 { 623 637 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 624 - u32 *digest = ctx->info.tfm.digest; 638 + __le32 *digest = ctx->info.digest; 625 639 u32 *result = (u32 *)req->result; 626 640 int i; 627 641