Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: chcr - Add ctr mode and process large sg entries for cipher

It send multiple WRs to H/W to handle large sg lists. Adds ctr(aes)
and rfc(ctr(aes)) modes.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Harsh Jain and committed by
Herbert Xu
b8fd1f41 d600fc8a

+690 -145
+646 -140
drivers/crypto/chelsio/chcr_algo.c
··· 55 55 #include <crypto/hash.h> 56 56 #include <crypto/sha.h> 57 57 #include <crypto/authenc.h> 58 + #include <crypto/ctr.h> 59 + #include <crypto/gf128mul.h> 58 60 #include <crypto/internal/aead.h> 59 61 #include <crypto/null.h> 60 62 #include <crypto/internal/skcipher.h> ··· 153 151 struct chcr_context *ctx = crypto_tfm_ctx(tfm); 154 152 struct uld_ctx *u_ctx = ULD_CTX(ctx); 155 153 struct chcr_req_ctx ctx_req; 156 - struct cpl_fw6_pld *fw6_pld; 157 154 unsigned int digestsize, updated_digestsize; 158 155 159 156 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { 160 157 case CRYPTO_ALG_TYPE_AEAD: 161 - ctx_req.req.aead_req = (struct aead_request *)req; 158 + ctx_req.req.aead_req = aead_request_cast(req); 162 159 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); 163 160 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst, 164 161 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); ··· 170 169 &err); 171 170 ctx_req.ctx.reqctx->verify = VERIFY_HW; 172 171 } 172 + ctx_req.req.aead_req->base.complete(req, err); 173 173 break; 174 174 175 175 case CRYPTO_ALG_TYPE_ABLKCIPHER: 176 - ctx_req.req.ablk_req = (struct ablkcipher_request *)req; 177 - ctx_req.ctx.ablk_ctx = 178 - ablkcipher_request_ctx(ctx_req.req.ablk_req); 179 - if (!err) { 180 - fw6_pld = (struct cpl_fw6_pld *)input; 181 - memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], 182 - AES_BLOCK_SIZE); 183 - } 184 - dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, 185 - ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE); 186 - if (ctx_req.ctx.ablk_ctx->skb) { 187 - kfree_skb(ctx_req.ctx.ablk_ctx->skb); 188 - ctx_req.ctx.ablk_ctx->skb = NULL; 189 - } 176 + err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), 177 + input, err); 190 178 break; 191 179 192 180 case CRYPTO_ALG_TYPE_AHASH: 193 - ctx_req.req.ahash_req = (struct ahash_request *)req; 181 + ctx_req.req.ahash_req = ahash_request_cast(req); 194 182 ctx_req.ctx.ahash_ctx = 195 183 ahash_request_ctx(ctx_req.req.ahash_req); 196 184 digestsize = ··· 204 214 sizeof(struct cpl_fw6_pld), 205 215 updated_digestsize); 206 216 } 217 + ctx_req.req.ahash_req->base.complete(req, err); 207 218 break; 208 219 } 209 220 return err; ··· 383 392 struct phys_sge_parm *sg_param) 384 393 { 385 394 struct phys_sge_pairs *to; 386 - int out_buf_size = sg_param->obsize; 395 + unsigned int len = 0, left_size = sg_param->obsize; 387 396 unsigned int nents = sg_param->nents, i, j = 0; 388 397 389 398 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) ··· 400 409 phys_cpl->rss_hdr_int.hash_val = 0; 401 410 to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + 402 411 sizeof(struct cpl_rx_phys_dsgl)); 403 - 404 - for (i = 0; nents; to++) { 405 - for (j = 0; j < 8 && nents; j++, nents--) { 406 - out_buf_size -= sg_dma_len(sg); 407 - to->len[j] = htons(sg_dma_len(sg)); 412 + for (i = 0; nents && left_size; to++) { 413 + for (j = 0; j < 8 && nents && left_size; j++, nents--) { 414 + len = min(left_size, sg_dma_len(sg)); 415 + to->len[j] = htons(len); 408 416 to->addr[j] = cpu_to_be64(sg_dma_address(sg)); 417 + left_size -= len; 409 418 sg = sg_next(sg); 410 419 } 411 - } 412 - if (out_buf_size) { 413 - j--; 414 - to--; 415 - to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size)); 416 420 } 417 421 } 418 422 ··· 417 431 struct phys_sge_parm *sg_param) 418 432 { 419 433 if (!sg || !sg_param->nents) 420 - return 0; 434 + return -EINVAL; 421 435 422 436 sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); 423 437 if (sg_param->nents == 0) { ··· 484 498 } 485 499 } 486 500 501 + static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) 502 + { 503 + struct adapter *adap = netdev2adap(dev); 504 + struct sge_uld_txq_info *txq_info = 505 + adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 506 + struct sge_uld_txq *txq; 507 + int ret = 0; 508 + 509 + local_bh_disable(); 510 + txq = &txq_info->uldtxq[idx]; 511 + spin_lock(&txq->sendq.lock); 512 + if (txq->full) 513 + ret = -1; 514 + spin_unlock(&txq->sendq.lock); 515 + local_bh_enable(); 516 + return ret; 517 + } 518 + 487 519 static int generate_copy_rrkey(struct ablk_ctx *ablkctx, 488 520 struct _key_ctx *key_ctx) 489 521 { ··· 516 512 } 517 513 return 0; 518 514 } 515 + static int chcr_sg_ent_in_wr(struct scatterlist *src, 516 + struct scatterlist *dst, 517 + unsigned int minsg, 518 + unsigned int space, 519 + short int *sent, 520 + short int *dent) 521 + { 522 + int srclen = 0, dstlen = 0; 523 + int srcsg = minsg, dstsg = 0; 519 524 525 + *sent = 0; 526 + *dent = 0; 527 + while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) && 528 + space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { 529 + srclen += src->length; 530 + srcsg++; 531 + while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && 532 + space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { 533 + if (srclen <= dstlen) 534 + break; 535 + dstlen += dst->length; 536 + dst = sg_next(dst); 537 + dstsg++; 538 + } 539 + src = sg_next(src); 540 + } 541 + *sent = srcsg - minsg; 542 + *dent = dstsg; 543 + return min(srclen, dstlen); 544 + } 545 + 546 + static int chcr_cipher_fallback(struct crypto_skcipher *cipher, 547 + u32 flags, 548 + struct scatterlist *src, 549 + struct scatterlist *dst, 550 + unsigned int nbytes, 551 + u8 *iv, 552 + unsigned short op_type) 553 + { 554 + int err; 555 + 556 + SKCIPHER_REQUEST_ON_STACK(subreq, cipher); 557 + skcipher_request_set_tfm(subreq, cipher); 558 + skcipher_request_set_callback(subreq, flags, NULL, NULL); 559 + skcipher_request_set_crypt(subreq, src, dst, 560 + nbytes, iv); 561 + 562 + err = op_type ? crypto_skcipher_decrypt(subreq) : 563 + crypto_skcipher_encrypt(subreq); 564 + skcipher_request_zero(subreq); 565 + 566 + return err; 567 + 568 + } 520 569 static inline void create_wreq(struct chcr_context *ctx, 521 570 struct chcr_wr *chcr_req, 522 571 void *req, struct sk_buff *skb, ··· 622 565 * @qid: ingress qid where response of this WR should be received. 623 566 * @op_type: encryption or decryption 624 567 */ 625 - static struct sk_buff 626 - *create_cipher_wr(struct ablkcipher_request *req, 627 - unsigned short qid, 628 - unsigned short op_type) 568 + static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) 629 569 { 630 - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 570 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); 631 571 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 632 572 struct uld_ctx *u_ctx = ULD_CTX(ctx); 633 573 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 634 574 struct sk_buff *skb = NULL; 635 575 struct chcr_wr *chcr_req; 636 576 struct cpl_rx_phys_dsgl *phys_cpl; 637 - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 577 + struct chcr_blkcipher_req_ctx *reqctx = 578 + ablkcipher_request_ctx(wrparam->req); 638 579 struct phys_sge_parm sg_param; 639 580 unsigned int frags = 0, transhdr_len, phys_dsgl; 640 - unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; 641 - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 642 - GFP_ATOMIC; 643 - 644 - if (!req->info) 645 - return ERR_PTR(-EINVAL); 646 - reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); 647 - if (reqctx->dst_nents <= 0) { 648 - pr_err("AES:Invalid Destination sg lists\n"); 649 - return ERR_PTR(-EINVAL); 650 - } 651 - if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || 652 - (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) { 653 - pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", 654 - ablkctx->enckey_len, req->nbytes, ivsize); 655 - return ERR_PTR(-EINVAL); 656 - } 581 + int error; 582 + unsigned int ivsize = AES_BLOCK_SIZE, kctx_len; 583 + gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 584 + GFP_KERNEL : GFP_ATOMIC; 657 585 658 586 phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents); 659 587 660 588 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); 661 589 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); 662 590 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags); 663 - if (!skb) 664 - return ERR_PTR(-ENOMEM); 591 + if (!skb) { 592 + error = -ENOMEM; 593 + goto err; 594 + } 665 595 skb_reserve(skb, sizeof(struct sge_opaque_hdr)); 666 596 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len); 667 597 memset(chcr_req, 0, transhdr_len); 668 598 chcr_req->sec_cpl.op_ivinsrtofst = 669 599 FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1); 670 600 671 - chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes); 601 + chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes); 672 602 chcr_req->sec_cpl.aadstart_cipherstop_hi = 673 603 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0); 674 604 675 605 chcr_req->sec_cpl.cipherstop_lo_authinsert = 676 606 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); 677 - chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, 607 + chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, 678 608 ablkctx->ciph_mode, 679 609 0, 0, ivsize >> 1); 680 610 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, 681 611 0, 1, phys_dsgl); 682 612 683 613 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; 684 - if (op_type == CHCR_DECRYPT_OP) { 614 + if ((reqctx->op == CHCR_DECRYPT_OP) && 615 + (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 616 + CRYPTO_ALG_SUB_TYPE_CTR)) && 617 + (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 618 + CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { 685 619 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); 686 620 } else { 687 - if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { 621 + if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || 622 + (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { 688 623 memcpy(chcr_req->key_ctx.key, ablkctx->key, 689 624 ablkctx->enckey_len); 690 625 } else { ··· 691 642 } 692 643 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 693 644 sg_param.nents = reqctx->dst_nents; 694 - sg_param.obsize = req->nbytes; 695 - sg_param.qid = qid; 696 - sg_param.align = 1; 697 - if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, 698 - &sg_param)) 645 + sg_param.obsize = wrparam->bytes; 646 + sg_param.qid = wrparam->qid; 647 + error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, 648 + reqctx->dst, &sg_param); 649 + if (error) 699 650 goto map_fail1; 700 651 701 652 skb_set_transport_header(skb, transhdr_len); 702 - memcpy(reqctx->iv, req->info, ivsize); 703 653 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); 704 - write_sg_to_skb(skb, &frags, req->src, req->nbytes); 705 - create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, 654 + write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes); 655 + create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1, 706 656 sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl, 707 657 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); 708 658 reqctx->skb = skb; ··· 709 661 return skb; 710 662 map_fail1: 711 663 kfree_skb(skb); 712 - return ERR_PTR(-ENOMEM); 664 + err: 665 + return ERR_PTR(error); 713 666 } 714 667 715 - static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 668 + static inline int chcr_keyctx_ck_size(unsigned int keylen) 669 + { 670 + int ck_size = 0; 671 + 672 + if (keylen == AES_KEYSIZE_128) 673 + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 674 + else if (keylen == AES_KEYSIZE_192) 675 + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 676 + else if (keylen == AES_KEYSIZE_256) 677 + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 678 + else 679 + ck_size = 0; 680 + 681 + return ck_size; 682 + } 683 + static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, 684 + const u8 *key, 685 + unsigned int keylen) 686 + { 687 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 688 + struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 689 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 690 + int err = 0; 691 + 692 + crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); 693 + crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & 694 + CRYPTO_TFM_REQ_MASK); 695 + err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); 696 + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 697 + tfm->crt_flags |= 698 + crypto_skcipher_get_flags(ablkctx->sw_cipher) & 699 + CRYPTO_TFM_RES_MASK; 700 + return err; 701 + } 702 + 703 + static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, 704 + const u8 *key, 716 705 unsigned int keylen) 717 706 { 718 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 707 + struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 719 708 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 720 709 unsigned int ck_size, context_size; 721 710 u16 alignment = 0; 711 + int err; 722 712 723 - if (keylen == AES_KEYSIZE_128) { 724 - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 725 - } else if (keylen == AES_KEYSIZE_192) { 726 - alignment = 8; 727 - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 728 - } else if (keylen == AES_KEYSIZE_256) { 729 - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 730 - } else { 713 + err = chcr_cipher_fallback_setkey(cipher, key, keylen); 714 + if (err) 731 715 goto badkey_err; 732 - } 716 + 717 + ck_size = chcr_keyctx_ck_size(keylen); 718 + alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; 733 719 memcpy(ablkctx->key, key, keylen); 734 720 ablkctx->enckey_len = keylen; 735 721 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); ··· 775 693 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; 776 694 return 0; 777 695 badkey_err: 778 - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 696 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 779 697 ablkctx->enckey_len = 0; 780 - return -EINVAL; 698 + 699 + return err; 781 700 } 782 701 783 - static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) 702 + static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, 703 + const u8 *key, 704 + unsigned int keylen) 784 705 { 785 - struct adapter *adap = netdev2adap(dev); 786 - struct sge_uld_txq_info *txq_info = 787 - adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; 788 - struct sge_uld_txq *txq; 706 + struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 707 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 708 + unsigned int ck_size, context_size; 709 + u16 alignment = 0; 710 + int err; 711 + 712 + err = chcr_cipher_fallback_setkey(cipher, key, keylen); 713 + if (err) 714 + goto badkey_err; 715 + ck_size = chcr_keyctx_ck_size(keylen); 716 + alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; 717 + memcpy(ablkctx->key, key, keylen); 718 + ablkctx->enckey_len = keylen; 719 + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + 720 + keylen + alignment) >> 4; 721 + 722 + ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 723 + 0, 0, context_size); 724 + ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; 725 + 726 + return 0; 727 + badkey_err: 728 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 729 + ablkctx->enckey_len = 0; 730 + 731 + return err; 732 + } 733 + 734 + static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, 735 + const u8 *key, 736 + unsigned int keylen) 737 + { 738 + struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 739 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 740 + unsigned int ck_size, context_size; 741 + u16 alignment = 0; 742 + int err; 743 + 744 + if (keylen < CTR_RFC3686_NONCE_SIZE) 745 + return -EINVAL; 746 + memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), 747 + CTR_RFC3686_NONCE_SIZE); 748 + 749 + keylen -= CTR_RFC3686_NONCE_SIZE; 750 + err = chcr_cipher_fallback_setkey(cipher, key, keylen); 751 + if (err) 752 + goto badkey_err; 753 + 754 + ck_size = chcr_keyctx_ck_size(keylen); 755 + alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; 756 + memcpy(ablkctx->key, key, keylen); 757 + ablkctx->enckey_len = keylen; 758 + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + 759 + keylen + alignment) >> 4; 760 + 761 + ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 762 + 0, 0, context_size); 763 + ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; 764 + 765 + return 0; 766 + badkey_err: 767 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 768 + ablkctx->enckey_len = 0; 769 + 770 + return err; 771 + } 772 + static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) 773 + { 774 + unsigned int size = AES_BLOCK_SIZE; 775 + __be32 *b = (__be32 *)(dstiv + size); 776 + u32 c, prev; 777 + 778 + memcpy(dstiv, srciv, AES_BLOCK_SIZE); 779 + for (; size >= 4; size -= 4) { 780 + prev = be32_to_cpu(*--b); 781 + c = prev + add; 782 + *b = cpu_to_be32(c); 783 + if (prev < c) 784 + break; 785 + add = 1; 786 + } 787 + 788 + } 789 + 790 + static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) 791 + { 792 + __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); 793 + u64 c; 794 + u32 temp = be32_to_cpu(*--b); 795 + 796 + temp = ~temp; 797 + c = (u64)temp + 1; // No of block can processed withou overflow 798 + if ((bytes / AES_BLOCK_SIZE) > c) 799 + bytes = c * AES_BLOCK_SIZE; 800 + return bytes; 801 + } 802 + 803 + static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) 804 + { 805 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 806 + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 807 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 808 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 809 + struct crypto_cipher *cipher; 810 + int ret, i; 811 + u8 *key; 812 + unsigned int keylen; 813 + 814 + cipher = crypto_alloc_cipher("aes-generic", 0, 0); 815 + memcpy(iv, req->info, AES_BLOCK_SIZE); 816 + 817 + if (IS_ERR(cipher)) { 818 + ret = -ENOMEM; 819 + goto out; 820 + } 821 + keylen = ablkctx->enckey_len / 2; 822 + key = ablkctx->key + keylen; 823 + ret = crypto_cipher_setkey(cipher, key, keylen); 824 + if (ret) 825 + goto out1; 826 + 827 + crypto_cipher_encrypt_one(cipher, iv, iv); 828 + for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++) 829 + gf128mul_x_ble((le128 *)iv, (le128 *)iv); 830 + 831 + crypto_cipher_decrypt_one(cipher, iv, iv); 832 + out1: 833 + crypto_free_cipher(cipher); 834 + out: 835 + return ret; 836 + } 837 + 838 + static int chcr_update_cipher_iv(struct ablkcipher_request *req, 839 + struct cpl_fw6_pld *fw6_pld, u8 *iv) 840 + { 841 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 842 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 843 + int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); 789 844 int ret = 0; 790 845 791 - local_bh_disable(); 792 - txq = &txq_info->uldtxq[idx]; 793 - spin_lock(&txq->sendq.lock); 794 - if (txq->full) 795 - ret = -1; 796 - spin_unlock(&txq->sendq.lock); 797 - local_bh_enable(); 846 + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) 847 + ctr_add_iv(iv, req->info, (reqctx->processed / 848 + AES_BLOCK_SIZE)); 849 + else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) 850 + *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + 851 + CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / 852 + AES_BLOCK_SIZE) + 1); 853 + else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) 854 + ret = chcr_update_tweak(req, iv); 855 + else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 856 + if (reqctx->op) 857 + sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, 858 + 16, 859 + reqctx->processed - AES_BLOCK_SIZE); 860 + else 861 + memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); 862 + } 863 + 798 864 return ret; 865 + 866 + } 867 + 868 + /* We need separate function for final iv because in rfc3686 Initial counter 869 + * starts from 1 and buffer size of iv is 8 byte only which remains constant 870 + * for subsequent update requests 871 + */ 872 + 873 + static int chcr_final_cipher_iv(struct ablkcipher_request *req, 874 + struct cpl_fw6_pld *fw6_pld, u8 *iv) 875 + { 876 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 877 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 878 + int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); 879 + int ret = 0; 880 + 881 + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) 882 + ctr_add_iv(iv, req->info, (reqctx->processed / 883 + AES_BLOCK_SIZE)); 884 + else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) 885 + ret = chcr_update_tweak(req, iv); 886 + else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 887 + if (reqctx->op) 888 + sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, 889 + 16, 890 + reqctx->processed - AES_BLOCK_SIZE); 891 + else 892 + memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); 893 + 894 + } 895 + return ret; 896 + 897 + } 898 + 899 + 900 + static int chcr_handle_cipher_resp(struct ablkcipher_request *req, 901 + unsigned char *input, int err) 902 + { 903 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 904 + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 905 + struct uld_ctx *u_ctx = ULD_CTX(ctx); 906 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 907 + struct sk_buff *skb; 908 + struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; 909 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 910 + struct cipher_wr_param wrparam; 911 + int bytes; 912 + 913 + dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents, 914 + DMA_FROM_DEVICE); 915 + 916 + if (reqctx->skb) { 917 + kfree_skb(reqctx->skb); 918 + reqctx->skb = NULL; 919 + } 920 + if (err) 921 + goto complete; 922 + 923 + if (req->nbytes == reqctx->processed) { 924 + err = chcr_final_cipher_iv(req, fw6_pld, req->info); 925 + goto complete; 926 + } 927 + 928 + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 929 + ctx->tx_qidx))) { 930 + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { 931 + err = -EBUSY; 932 + goto complete; 933 + } 934 + 935 + } 936 + wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src, 937 + reqctx->processed); 938 + reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg, 939 + reqctx->processed); 940 + if (!wrparam.srcsg || !reqctx->dst) { 941 + pr_err("Input sg list length less that nbytes\n"); 942 + err = -EINVAL; 943 + goto complete; 944 + } 945 + bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1, 946 + SPACE_LEFT(ablkctx->enckey_len), 947 + &wrparam.snent, &reqctx->dst_nents); 948 + if ((bytes + reqctx->processed) >= req->nbytes) 949 + bytes = req->nbytes - reqctx->processed; 950 + else 951 + bytes = ROUND_16(bytes); 952 + err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); 953 + if (err) 954 + goto complete; 955 + 956 + if (unlikely(bytes == 0)) { 957 + err = chcr_cipher_fallback(ablkctx->sw_cipher, 958 + req->base.flags, 959 + wrparam.srcsg, 960 + reqctx->dst, 961 + req->nbytes - reqctx->processed, 962 + reqctx->iv, 963 + reqctx->op); 964 + goto complete; 965 + } 966 + 967 + if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 968 + CRYPTO_ALG_SUB_TYPE_CTR) 969 + bytes = adjust_ctr_overflow(reqctx->iv, bytes); 970 + reqctx->processed += bytes; 971 + wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; 972 + wrparam.req = req; 973 + wrparam.bytes = bytes; 974 + skb = create_cipher_wr(&wrparam); 975 + if (IS_ERR(skb)) { 976 + pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 977 + err = PTR_ERR(skb); 978 + goto complete; 979 + } 980 + skb->dev = u_ctx->lldi.ports[0]; 981 + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 982 + chcr_send_wr(skb); 983 + return 0; 984 + complete: 985 + req->base.complete(&req->base, err); 986 + return err; 987 + } 988 + 989 + static int process_cipher(struct ablkcipher_request *req, 990 + unsigned short qid, 991 + struct sk_buff **skb, 992 + unsigned short op_type) 993 + { 994 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 995 + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); 996 + struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 997 + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 998 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 999 + struct cipher_wr_param wrparam; 1000 + int bytes, err = -EINVAL; 1001 + 1002 + reqctx->newdstsg = NULL; 1003 + reqctx->processed = 0; 1004 + if (!req->info) 1005 + goto error; 1006 + if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || 1007 + (req->nbytes == 0) || 1008 + (req->nbytes % crypto_ablkcipher_blocksize(tfm))) { 1009 + pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", 1010 + ablkctx->enckey_len, req->nbytes, ivsize); 1011 + goto error; 1012 + } 1013 + wrparam.srcsg = req->src; 1014 + reqctx->dstsg = req->dst; 1015 + bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG, 1016 + SPACE_LEFT(ablkctx->enckey_len), 1017 + &wrparam.snent, 1018 + &reqctx->dst_nents); 1019 + if ((bytes + reqctx->processed) >= req->nbytes) 1020 + bytes = req->nbytes - reqctx->processed; 1021 + else 1022 + bytes = ROUND_16(bytes); 1023 + if (unlikely(bytes > req->nbytes)) 1024 + bytes = req->nbytes; 1025 + if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 1026 + CRYPTO_ALG_SUB_TYPE_CTR) { 1027 + bytes = adjust_ctr_overflow(req->info, bytes); 1028 + } 1029 + if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 1030 + CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { 1031 + memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); 1032 + memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info, 1033 + CTR_RFC3686_IV_SIZE); 1034 + 1035 + /* initialize counter portion of counter block */ 1036 + *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + 1037 + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); 1038 + 1039 + } else { 1040 + 1041 + memcpy(reqctx->iv, req->info, ivsize); 1042 + } 1043 + if (unlikely(bytes == 0)) { 1044 + err = chcr_cipher_fallback(ablkctx->sw_cipher, 1045 + req->base.flags, 1046 + req->src, 1047 + req->dst, 1048 + req->nbytes, 1049 + req->info, 1050 + op_type); 1051 + goto error; 1052 + } 1053 + reqctx->processed = bytes; 1054 + reqctx->dst = reqctx->dstsg; 1055 + reqctx->op = op_type; 1056 + wrparam.qid = qid; 1057 + wrparam.req = req; 1058 + wrparam.bytes = bytes; 1059 + *skb = create_cipher_wr(&wrparam); 1060 + if (IS_ERR(*skb)) { 1061 + err = PTR_ERR(*skb); 1062 + goto error; 1063 + } 1064 + 1065 + return 0; 1066 + error: 1067 + return err; 799 1068 } 800 1069 801 1070 static int chcr_aes_encrypt(struct ablkcipher_request *req) 802 1071 { 803 1072 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 804 1073 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1074 + struct sk_buff *skb = NULL; 1075 + int err; 805 1076 struct uld_ctx *u_ctx = ULD_CTX(ctx); 806 - struct sk_buff *skb; 807 1077 808 1078 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 809 1079 ctx->tx_qidx))) { ··· 1163 729 return -EBUSY; 1164 730 } 1165 731 1166 - skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], 732 + err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, 1167 733 CHCR_ENCRYPT_OP); 1168 - if (IS_ERR(skb)) { 1169 - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 1170 - return PTR_ERR(skb); 1171 - } 734 + if (err || !skb) 735 + return err; 1172 736 skb->dev = u_ctx->lldi.ports[0]; 1173 737 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1174 738 chcr_send_wr(skb); ··· 1178 746 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1179 747 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1180 748 struct uld_ctx *u_ctx = ULD_CTX(ctx); 1181 - struct sk_buff *skb; 749 + struct sk_buff *skb = NULL; 750 + int err; 1182 751 1183 752 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], 1184 753 ctx->tx_qidx))) { ··· 1187 754 return -EBUSY; 1188 755 } 1189 756 1190 - skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], 757 + err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb, 1191 758 CHCR_DECRYPT_OP); 1192 - if (IS_ERR(skb)) { 1193 - pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); 1194 - return PTR_ERR(skb); 1195 - } 759 + if (err || !skb) 760 + return err; 1196 761 skb->dev = u_ctx->lldi.ports[0]; 1197 762 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx); 1198 763 chcr_send_wr(skb); ··· 1235 804 1236 805 static int chcr_cra_init(struct crypto_tfm *tfm) 1237 806 { 807 + struct crypto_alg *alg = tfm->__crt_alg; 808 + struct chcr_context *ctx = crypto_tfm_ctx(tfm); 809 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 810 + 811 + ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0, 812 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 813 + if (IS_ERR(ablkctx->sw_cipher)) { 814 + pr_err("failed to allocate fallback for %s\n", alg->cra_name); 815 + return PTR_ERR(ablkctx->sw_cipher); 816 + } 1238 817 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); 1239 818 return chcr_device_init(crypto_tfm_ctx(tfm)); 819 + } 820 + 821 + static int chcr_rfc3686_init(struct crypto_tfm *tfm) 822 + { 823 + struct crypto_alg *alg = tfm->__crt_alg; 824 + struct chcr_context *ctx = crypto_tfm_ctx(tfm); 825 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 826 + 827 + /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) 828 + * cannot be used as fallback in chcr_handle_cipher_response 829 + */ 830 + ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, 831 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 832 + if (IS_ERR(ablkctx->sw_cipher)) { 833 + pr_err("failed to allocate fallback for %s\n", alg->cra_name); 834 + return PTR_ERR(ablkctx->sw_cipher); 835 + } 836 + tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); 837 + return chcr_device_init(crypto_tfm_ctx(tfm)); 838 + } 839 + 840 + 841 + static void chcr_cra_exit(struct crypto_tfm *tfm) 842 + { 843 + struct chcr_context *ctx = crypto_tfm_ctx(tfm); 844 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 845 + 846 + crypto_free_skcipher(ablkctx->sw_cipher); 1240 847 } 1241 848 1242 849 static int get_alg_config(struct algo_param *params, ··· 1394 925 if (param->sg_len != 0) 1395 926 write_sg_to_skb(skb, &frags, req->src, param->sg_len); 1396 927 1397 - create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0, 1398 - DUMMY_BYTES, 0); 928 + create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 929 + hash_size_in_response, 0, DUMMY_BYTES, 0); 1399 930 req_ctx->skb = skb; 1400 931 skb_get(skb); 1401 932 return skb; ··· 1698 1229 return err; 1699 1230 } 1700 1231 1701 - static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 1232 + static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1702 1233 unsigned int key_len) 1703 1234 { 1704 - struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); 1235 + struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher); 1705 1236 struct ablk_ctx *ablkctx = ABLK_CTX(ctx); 1706 1237 unsigned short context_size = 0; 1238 + int err; 1707 1239 1708 - if ((key_len != (AES_KEYSIZE_128 << 1)) && 1709 - (key_len != (AES_KEYSIZE_256 << 1))) { 1710 - crypto_tfm_set_flags((struct crypto_tfm *)tfm, 1711 - CRYPTO_TFM_RES_BAD_KEY_LEN); 1712 - ablkctx->enckey_len = 0; 1713 - return -EINVAL; 1714 - 1715 - } 1240 + err = chcr_cipher_fallback_setkey(cipher, key, key_len); 1241 + if (err) 1242 + goto badkey_err; 1716 1243 1717 1244 memcpy(ablkctx->key, key, key_len); 1718 1245 ablkctx->enckey_len = key_len; ··· 1722 1257 0, context_size); 1723 1258 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; 1724 1259 return 0; 1260 + badkey_err: 1261 + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1262 + ablkctx->enckey_len = 0; 1263 + 1264 + return err; 1725 1265 } 1726 1266 1727 1267 static int chcr_sha_init(struct ahash_request *areq) ··· 1983 1513 } 1984 1514 write_buffer_to_skb(skb, &frags, req->iv, ivsize); 1985 1515 write_sg_to_skb(skb, &frags, src, req->cryptlen); 1986 - create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, 1516 + create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, 1987 1517 sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); 1988 1518 reqctx->skb = skb; 1989 1519 skb_get(skb); ··· 2282 1812 2283 1813 skb_set_transport_header(skb, transhdr_len); 2284 1814 frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx); 2285 - create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1, 1815 + create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1, 2286 1816 sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0); 2287 1817 reqctx->skb = skb; 2288 1818 skb_get(skb); ··· 2421 1951 write_sg_to_skb(skb, &frags, req->src, assoclen); 2422 1952 write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize); 2423 1953 write_sg_to_skb(skb, &frags, src, req->cryptlen); 2424 - create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, 1954 + create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1, 2425 1955 sizeof(struct cpl_rx_phys_dsgl) + dst_size, 2426 1956 reqctx->verify); 2427 1957 reqctx->skb = skb; ··· 3035 2565 static struct chcr_alg_template driver_algs[] = { 3036 2566 /* AES-CBC */ 3037 2567 { 3038 - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2568 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, 3039 2569 .is_registered = 0, 3040 2570 .alg.crypto = { 3041 2571 .cra_name = "cbc(aes)", 3042 2572 .cra_driver_name = "cbc-aes-chcr", 3043 - .cra_priority = CHCR_CRA_PRIORITY, 3044 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 3045 - CRYPTO_ALG_ASYNC, 3046 2573 .cra_blocksize = AES_BLOCK_SIZE, 3047 - .cra_ctxsize = sizeof(struct chcr_context) 3048 - + sizeof(struct ablk_ctx), 3049 - .cra_alignmask = 0, 3050 - .cra_type = &crypto_ablkcipher_type, 3051 - .cra_module = THIS_MODULE, 3052 2574 .cra_init = chcr_cra_init, 3053 - .cra_exit = NULL, 2575 + .cra_exit = chcr_cra_exit, 3054 2576 .cra_u.ablkcipher = { 3055 2577 .min_keysize = AES_MIN_KEY_SIZE, 3056 2578 .max_keysize = AES_MAX_KEY_SIZE, ··· 3054 2592 } 3055 2593 }, 3056 2594 { 3057 - .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2595 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, 3058 2596 .is_registered = 0, 3059 2597 .alg.crypto = { 3060 2598 .cra_name = "xts(aes)", 3061 2599 .cra_driver_name = "xts-aes-chcr", 3062 - .cra_priority = CHCR_CRA_PRIORITY, 3063 - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 3064 - CRYPTO_ALG_ASYNC, 3065 2600 .cra_blocksize = AES_BLOCK_SIZE, 3066 - .cra_ctxsize = sizeof(struct chcr_context) + 3067 - sizeof(struct ablk_ctx), 3068 - .cra_alignmask = 0, 3069 - .cra_type = &crypto_ablkcipher_type, 3070 - .cra_module = THIS_MODULE, 3071 2601 .cra_init = chcr_cra_init, 3072 2602 .cra_exit = NULL, 3073 - .cra_u = { 3074 - .ablkcipher = { 2603 + .cra_u .ablkcipher = { 3075 2604 .min_keysize = 2 * AES_MIN_KEY_SIZE, 3076 2605 .max_keysize = 2 * AES_MAX_KEY_SIZE, 3077 2606 .ivsize = AES_BLOCK_SIZE, ··· 3070 2617 .encrypt = chcr_aes_encrypt, 3071 2618 .decrypt = chcr_aes_decrypt, 3072 2619 } 2620 + } 2621 + }, 2622 + { 2623 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, 2624 + .is_registered = 0, 2625 + .alg.crypto = { 2626 + .cra_name = "ctr(aes)", 2627 + .cra_driver_name = "ctr-aes-chcr", 2628 + .cra_blocksize = 1, 2629 + .cra_init = chcr_cra_init, 2630 + .cra_exit = chcr_cra_exit, 2631 + .cra_u.ablkcipher = { 2632 + .min_keysize = AES_MIN_KEY_SIZE, 2633 + .max_keysize = AES_MAX_KEY_SIZE, 2634 + .ivsize = AES_BLOCK_SIZE, 2635 + .setkey = chcr_aes_ctr_setkey, 2636 + .encrypt = chcr_aes_encrypt, 2637 + .decrypt = chcr_aes_decrypt, 2638 + } 2639 + } 2640 + }, 2641 + { 2642 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER | 2643 + CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, 2644 + .is_registered = 0, 2645 + .alg.crypto = { 2646 + .cra_name = "rfc3686(ctr(aes))", 2647 + .cra_driver_name = "rfc3686-ctr-aes-chcr", 2648 + .cra_blocksize = 1, 2649 + .cra_init = chcr_rfc3686_init, 2650 + .cra_exit = chcr_cra_exit, 2651 + .cra_u.ablkcipher = { 2652 + .min_keysize = AES_MIN_KEY_SIZE + 2653 + CTR_RFC3686_NONCE_SIZE, 2654 + .max_keysize = AES_MAX_KEY_SIZE + 2655 + CTR_RFC3686_NONCE_SIZE, 2656 + .ivsize = CTR_RFC3686_IV_SIZE, 2657 + .setkey = chcr_aes_rfc3686_setkey, 2658 + .encrypt = chcr_aes_encrypt, 2659 + .decrypt = chcr_aes_decrypt, 2660 + .geniv = "seqiv", 3073 2661 } 3074 2662 } 3075 2663 }, ··· 3493 2999 continue; 3494 3000 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { 3495 3001 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3002 + driver_algs[i].alg.crypto.cra_priority = 3003 + CHCR_CRA_PRIORITY; 3004 + driver_algs[i].alg.crypto.cra_module = THIS_MODULE; 3005 + driver_algs[i].alg.crypto.cra_flags = 3006 + CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | 3007 + CRYPTO_ALG_NEED_FALLBACK; 3008 + driver_algs[i].alg.crypto.cra_ctxsize = 3009 + sizeof(struct chcr_context) + 3010 + sizeof(struct ablk_ctx); 3011 + driver_algs[i].alg.crypto.cra_alignmask = 0; 3012 + driver_algs[i].alg.crypto.cra_type = 3013 + &crypto_ablkcipher_type; 3496 3014 err = crypto_register_alg(&driver_algs[i].alg.crypto); 3497 3015 name = driver_algs[i].alg.crypto.cra_driver_name; 3498 3016 break;
+25 -1
drivers/crypto/chelsio/chcr_algo.h
··· 219 219 #define MAX_NK 8 220 220 #define CRYPTO_MAX_IMM_TX_PKT_LEN 256 221 221 #define MAX_WR_SIZE 512 222 + #define ROUND_16(bytes) ((bytes) & 0xFFFFFFF0) 223 + #define MAX_DSGL_ENT 32 224 + #define MAX_DIGEST_SKB_SGE (MAX_SKB_FRAGS - 2) 225 + #define MIN_CIPHER_SG 1 /* IV */ 222 226 #define MIN_AUTH_SG 2 /*IV + AAD*/ 223 227 #define MIN_GCM_SG 2 /* IV + AAD*/ 228 + #define MIN_DIGEST_SG 1 /*Partial Buffer*/ 224 229 #define MIN_CCM_SG 3 /*IV+AAD+B0*/ 230 + #define SPACE_LEFT(len) \ 231 + ((MAX_WR_SIZE - WR_MIN_LEN - (len))) 232 + 233 + unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 234 + 48, 64, 72, 88, 235 + 96, 112, 120, 136, 236 + 144, 160, 168, 184, 237 + 192}; 238 + unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, 239 + 112, 112, 128, 128, 144, 144, 160, 160, 240 + 192, 192, 208, 208, 224, 224, 240, 240, 241 + 272, 272, 288, 288, 304, 304, 320, 320}; 225 242 226 243 struct algo_param { 227 244 unsigned int auth_mode; ··· 256 239 u64 scmd1; 257 240 }; 258 241 242 + struct cipher_wr_param { 243 + struct ablkcipher_request *req; 244 + struct scatterlist *srcsg; 245 + char *iv; 246 + int bytes; 247 + short int snent; 248 + unsigned short qid; 249 + }; 259 250 enum { 260 251 AES_KEYLENGTH_128BIT = 128, 261 252 AES_KEYLENGTH_192BIT = 192, ··· 318 293 unsigned int nents; 319 294 unsigned int obsize; 320 295 unsigned short qid; 321 - unsigned char align; 322 296 }; 323 297 324 298 struct crypto_result {
-1
drivers/crypto/chelsio/chcr_core.c
··· 115 115 /* call completion callback with failure status */ 116 116 if (req) { 117 117 error_status = chcr_handle_resp(req, input, error_status); 118 - req->complete(req, error_status); 119 118 } else { 120 119 pr_err("Incorrect request address from the firmware\n"); 121 120 return -EFAULT;
+3
drivers/crypto/chelsio/chcr_core.h
··· 53 53 #define MAC_ERROR_BIT 0 54 54 #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) 55 55 #define MAX_SALT 4 56 + #define WR_MIN_LEN (sizeof(struct chcr_wr) + \ 57 + sizeof(struct cpl_rx_phys_dsgl) + \ 58 + sizeof(struct ulptx_sgl)) 56 59 57 60 #define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev) 58 61
+16 -3
drivers/crypto/chelsio/chcr_crypto.h
··· 139 139 #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 140 140 #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000 141 141 #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 142 + #define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000 143 + #define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000 144 + #define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000 142 145 #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ 143 146 CRYPTO_ALG_SUB_TYPE_HASH_HMAC) 144 147 ··· 153 150 /* Aligned to 128 bit boundary */ 154 151 155 152 struct ablk_ctx { 153 + struct crypto_skcipher *sw_cipher; 156 154 __be32 key_ctx_hdr; 157 155 unsigned int enckey_len; 158 - u8 key[CHCR_AES_MAX_KEY_LEN]; 159 156 unsigned char ciph_mode; 157 + u8 key[CHCR_AES_MAX_KEY_LEN]; 158 + u8 nonce[4]; 160 159 u8 rrkey[AES_MAX_KEY_SIZE]; 161 160 }; 162 161 struct chcr_aead_reqctx { ··· 238 233 239 234 struct chcr_blkcipher_req_ctx { 240 235 struct sk_buff *skb; 241 - unsigned int dst_nents; 236 + struct scatterlist srcffwd[2]; 237 + struct scatterlist dstffwd[2]; 238 + struct scatterlist *dstsg; 239 + struct scatterlist *dst; 240 + struct scatterlist *newdstsg; 241 + unsigned int processed; 242 + unsigned int op; 243 + short int dst_nents; 242 244 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 243 245 }; 244 246 ··· 287 275 int size, 288 276 create_wr_t create_wr_fn); 289 277 static inline int get_aead_subtype(struct crypto_aead *aead); 290 - 278 + static int chcr_handle_cipher_resp(struct ablkcipher_request *req, 279 + unsigned char *input, int err); 291 280 #endif /* __CHCR_CRYPTO_H__ */