Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
56#include <crypto/gcm.h>
57#include <crypto/sha.h>
58#include <crypto/authenc.h>
59#include <crypto/ctr.h>
60#include <crypto/gf128mul.h>
61#include <crypto/internal/aead.h>
62#include <crypto/null.h>
63#include <crypto/internal/skcipher.h>
64#include <crypto/aead.h>
65#include <crypto/scatterwalk.h>
66#include <crypto/internal/hash.h>
67
68#include "t4fw_api.h"
69#include "t4_msg.h"
70#include "chcr_core.h"
71#include "chcr_algo.h"
72#include "chcr_crypto.h"
73
74#define IV AES_BLOCK_SIZE
75
76static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
81};
82
83static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
88};
89
90static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
94};
95
96static int chcr_handle_cipher_resp(struct skcipher_request *req,
97 unsigned char *input, int err);
98
99static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100{
101 return ctx->crypto_ctx->aeadctx;
102}
103
104static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105{
106 return ctx->crypto_ctx->ablkctx;
107}
108
109static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110{
111 return ctx->crypto_ctx->hmacctx;
112}
113
114static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115{
116 return gctx->ctx->gcm;
117}
118
119static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120{
121 return gctx->ctx->authenc;
122}
123
124static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125{
126 return container_of(ctx->dev, struct uld_ctx, dev);
127}
128
129static inline int is_ofld_imm(const struct sk_buff *skb)
130{
131 return (skb->len <= SGE_MAX_WR_LEN);
132}
133
134static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135{
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137}
138
139static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 unsigned int entlen,
141 unsigned int skip)
142{
143 int nents = 0;
144 unsigned int less;
145 unsigned int skip_len = 0;
146
147 while (sg && skip) {
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
150 skip_len = 0;
151 sg = sg_next(sg);
152 } else {
153 skip_len = skip;
154 skip = 0;
155 }
156 }
157
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
161 reqlen -= less;
162 skip_len = 0;
163 sg = sg_next(sg);
164 }
165 return nents;
166}
167
168static inline int get_aead_subtype(struct crypto_aead *aead)
169{
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174}
175
176void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177{
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
182 int cmp = 0;
183
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 } else {
189
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 }
195 if (cmp)
196 *err = -EBADMSG;
197 else
198 *err = 0;
199}
200
201static int chcr_inc_wrcount(struct chcr_dev *dev)
202{
203 if (dev->state == CHCR_DETACH)
204 return 1;
205 atomic_inc(&dev->inflight);
206 return 0;
207}
208
209static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210{
211 atomic_dec(&dev->inflight);
212}
213
214static inline int chcr_handle_aead_resp(struct aead_request *req,
215 unsigned char *input,
216 int err)
217{
218 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 struct chcr_dev *dev = a_ctx(tfm)->dev;
221
222 chcr_aead_common_exit(req);
223 if (reqctx->verify == VERIFY_SW) {
224 chcr_verify_tag(req, input, &err);
225 reqctx->verify = VERIFY_HW;
226 }
227 chcr_dec_wrcount(dev);
228 req->base.complete(&req->base, err);
229
230 return err;
231}
232
233static void get_aes_decrypt_key(unsigned char *dec_key,
234 const unsigned char *key,
235 unsigned int keylength)
236{
237 u32 temp;
238 u32 w_ring[MAX_NK];
239 int i, j, k;
240 u8 nr, nk;
241
242 switch (keylength) {
243 case AES_KEYLENGTH_128BIT:
244 nk = KEYLENGTH_4BYTES;
245 nr = NUMBER_OF_ROUNDS_10;
246 break;
247 case AES_KEYLENGTH_192BIT:
248 nk = KEYLENGTH_6BYTES;
249 nr = NUMBER_OF_ROUNDS_12;
250 break;
251 case AES_KEYLENGTH_256BIT:
252 nk = KEYLENGTH_8BYTES;
253 nr = NUMBER_OF_ROUNDS_14;
254 break;
255 default:
256 return;
257 }
258 for (i = 0; i < nk; i++)
259 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
260
261 i = 0;
262 temp = w_ring[nk - 1];
263 while (i + nk < (nr + 1) * 4) {
264 if (!(i % nk)) {
265 /* RotWord(temp) */
266 temp = (temp << 8) | (temp >> 24);
267 temp = aes_ks_subword(temp);
268 temp ^= round_constant[i / nk];
269 } else if (nk == 8 && (i % 4 == 0)) {
270 temp = aes_ks_subword(temp);
271 }
272 w_ring[i % nk] ^= temp;
273 temp = w_ring[i % nk];
274 i++;
275 }
276 i--;
277 for (k = 0, j = i % nk; k < nk; k++) {
278 *((u32 *)dec_key + k) = htonl(w_ring[j]);
279 j--;
280 if (j < 0)
281 j += nk;
282 }
283}
284
285static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286{
287 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288
289 switch (ds) {
290 case SHA1_DIGEST_SIZE:
291 base_hash = crypto_alloc_shash("sha1", 0, 0);
292 break;
293 case SHA224_DIGEST_SIZE:
294 base_hash = crypto_alloc_shash("sha224", 0, 0);
295 break;
296 case SHA256_DIGEST_SIZE:
297 base_hash = crypto_alloc_shash("sha256", 0, 0);
298 break;
299 case SHA384_DIGEST_SIZE:
300 base_hash = crypto_alloc_shash("sha384", 0, 0);
301 break;
302 case SHA512_DIGEST_SIZE:
303 base_hash = crypto_alloc_shash("sha512", 0, 0);
304 break;
305 }
306
307 return base_hash;
308}
309
310static int chcr_compute_partial_hash(struct shash_desc *desc,
311 char *iopad, char *result_hash,
312 int digest_size)
313{
314 struct sha1_state sha1_st;
315 struct sha256_state sha256_st;
316 struct sha512_state sha512_st;
317 int error;
318
319 if (digest_size == SHA1_DIGEST_SIZE) {
320 error = crypto_shash_init(desc) ?:
321 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 crypto_shash_export(desc, (void *)&sha1_st);
323 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 } else if (digest_size == SHA224_DIGEST_SIZE) {
325 error = crypto_shash_init(desc) ?:
326 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 crypto_shash_export(desc, (void *)&sha256_st);
328 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329
330 } else if (digest_size == SHA256_DIGEST_SIZE) {
331 error = crypto_shash_init(desc) ?:
332 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 crypto_shash_export(desc, (void *)&sha256_st);
334 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335
336 } else if (digest_size == SHA384_DIGEST_SIZE) {
337 error = crypto_shash_init(desc) ?:
338 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 crypto_shash_export(desc, (void *)&sha512_st);
340 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341
342 } else if (digest_size == SHA512_DIGEST_SIZE) {
343 error = crypto_shash_init(desc) ?:
344 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 crypto_shash_export(desc, (void *)&sha512_st);
346 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347 } else {
348 error = -EINVAL;
349 pr_err("Unknown digest size %d\n", digest_size);
350 }
351 return error;
352}
353
354static void chcr_change_order(char *buf, int ds)
355{
356 int i;
357
358 if (ds == SHA512_DIGEST_SIZE) {
359 for (i = 0; i < (ds / sizeof(u64)); i++)
360 *((__be64 *)buf + i) =
361 cpu_to_be64(*((u64 *)buf + i));
362 } else {
363 for (i = 0; i < (ds / sizeof(u32)); i++)
364 *((__be32 *)buf + i) =
365 cpu_to_be32(*((u32 *)buf + i));
366 }
367}
368
369static inline int is_hmac(struct crypto_tfm *tfm)
370{
371 struct crypto_alg *alg = tfm->__crt_alg;
372 struct chcr_alg_template *chcr_crypto_alg =
373 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374 alg.hash);
375 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376 return 1;
377 return 0;
378}
379
380static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 struct cpl_rx_phys_dsgl *dsgl)
382{
383 walk->dsgl = dsgl;
384 walk->nents = 0;
385 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386}
387
388static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389 int pci_chan_id)
390{
391 struct cpl_rx_phys_dsgl *phys_cpl;
392
393 phys_cpl = walk->dsgl;
394
395 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 phys_cpl->pcirlxorder_to_noofsgentr =
398 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 phys_cpl->rss_hdr_int.qid = htons(qid);
406 phys_cpl->rss_hdr_int.hash_val = 0;
407 phys_cpl->rss_hdr_int.channel = pci_chan_id;
408}
409
410static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411 size_t size,
412 dma_addr_t addr)
413{
414 int j;
415
416 if (!size)
417 return;
418 j = walk->nents;
419 walk->to->len[j % 8] = htons(size);
420 walk->to->addr[j % 8] = cpu_to_be64(addr);
421 j++;
422 if ((j % 8) == 0)
423 walk->to++;
424 walk->nents = j;
425}
426
427static void dsgl_walk_add_sg(struct dsgl_walk *walk,
428 struct scatterlist *sg,
429 unsigned int slen,
430 unsigned int skip)
431{
432 int skip_len = 0;
433 unsigned int left_size = slen, len = 0;
434 unsigned int j = walk->nents;
435 int offset, ent_len;
436
437 if (!slen)
438 return;
439 while (sg && skip) {
440 if (sg_dma_len(sg) <= skip) {
441 skip -= sg_dma_len(sg);
442 skip_len = 0;
443 sg = sg_next(sg);
444 } else {
445 skip_len = skip;
446 skip = 0;
447 }
448 }
449
450 while (left_size && sg) {
451 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452 offset = 0;
453 while (len) {
454 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
455 walk->to->len[j % 8] = htons(ent_len);
456 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457 offset + skip_len);
458 offset += ent_len;
459 len -= ent_len;
460 j++;
461 if ((j % 8) == 0)
462 walk->to++;
463 }
464 walk->last_sg = sg;
465 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 skip_len) + skip_len;
467 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468 skip_len = 0;
469 sg = sg_next(sg);
470 }
471 walk->nents = j;
472}
473
474static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 struct ulptx_sgl *ulp)
476{
477 walk->sgl = ulp;
478 walk->nents = 0;
479 walk->pair_idx = 0;
480 walk->pair = ulp->sge;
481 walk->last_sg = NULL;
482 walk->last_sg_len = 0;
483}
484
485static inline void ulptx_walk_end(struct ulptx_walk *walk)
486{
487 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 ULPTX_NSGE_V(walk->nents));
489}
490
491
492static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493 size_t size,
494 dma_addr_t addr)
495{
496 if (!size)
497 return;
498
499 if (walk->nents == 0) {
500 walk->sgl->len0 = cpu_to_be32(size);
501 walk->sgl->addr0 = cpu_to_be64(addr);
502 } else {
503 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 walk->pair_idx = !walk->pair_idx;
506 if (!walk->pair_idx)
507 walk->pair++;
508 }
509 walk->nents++;
510}
511
512static void ulptx_walk_add_sg(struct ulptx_walk *walk,
513 struct scatterlist *sg,
514 unsigned int len,
515 unsigned int skip)
516{
517 int small;
518 int skip_len = 0;
519 unsigned int sgmin;
520
521 if (!len)
522 return;
523 while (sg && skip) {
524 if (sg_dma_len(sg) <= skip) {
525 skip -= sg_dma_len(sg);
526 skip_len = 0;
527 sg = sg_next(sg);
528 } else {
529 skip_len = skip;
530 skip = 0;
531 }
532 }
533 WARN(!sg, "SG should not be null here\n");
534 if (sg && (walk->nents == 0)) {
535 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 walk->sgl->len0 = cpu_to_be32(sgmin);
538 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539 walk->nents++;
540 len -= sgmin;
541 walk->last_sg = sg;
542 walk->last_sg_len = sgmin + skip_len;
543 skip_len += sgmin;
544 if (sg_dma_len(sg) == skip_len) {
545 sg = sg_next(sg);
546 skip_len = 0;
547 }
548 }
549
550 while (sg && len) {
551 small = min(sg_dma_len(sg) - skip_len, len);
552 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 walk->pair->addr[walk->pair_idx] =
555 cpu_to_be64(sg_dma_address(sg) + skip_len);
556 walk->pair_idx = !walk->pair_idx;
557 walk->nents++;
558 if (!walk->pair_idx)
559 walk->pair++;
560 len -= sgmin;
561 skip_len += sgmin;
562 walk->last_sg = sg;
563 walk->last_sg_len = skip_len;
564 if (sg_dma_len(sg) == skip_len) {
565 sg = sg_next(sg);
566 skip_len = 0;
567 }
568 }
569}
570
571static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
572{
573 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
574 struct chcr_alg_template *chcr_crypto_alg =
575 container_of(alg, struct chcr_alg_template, alg.skcipher);
576
577 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578}
579
580static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581{
582 struct adapter *adap = netdev2adap(dev);
583 struct sge_uld_txq_info *txq_info =
584 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 struct sge_uld_txq *txq;
586 int ret = 0;
587
588 local_bh_disable();
589 txq = &txq_info->uldtxq[idx];
590 spin_lock(&txq->sendq.lock);
591 if (txq->full)
592 ret = -1;
593 spin_unlock(&txq->sendq.lock);
594 local_bh_enable();
595 return ret;
596}
597
598static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 struct _key_ctx *key_ctx)
600{
601 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603 } else {
604 memcpy(key_ctx->key,
605 ablkctx->key + (ablkctx->enckey_len >> 1),
606 ablkctx->enckey_len >> 1);
607 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 ablkctx->rrkey, ablkctx->enckey_len >> 1);
609 }
610 return 0;
611}
612
613static int chcr_hash_ent_in_wr(struct scatterlist *src,
614 unsigned int minsg,
615 unsigned int space,
616 unsigned int srcskip)
617{
618 int srclen = 0;
619 int srcsg = minsg;
620 int soffset = 0, sless;
621
622 if (sg_dma_len(src) == srcskip) {
623 src = sg_next(src);
624 srcskip = 0;
625 }
626 while (src && space > (sgl_ent_len[srcsg + 1])) {
627 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
628 CHCR_SRC_SG_SIZE);
629 srclen += sless;
630 soffset += sless;
631 srcsg++;
632 if (sg_dma_len(src) == (soffset + srcskip)) {
633 src = sg_next(src);
634 soffset = 0;
635 srcskip = 0;
636 }
637 }
638 return srclen;
639}
640
641static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 struct scatterlist *dst,
643 unsigned int minsg,
644 unsigned int space,
645 unsigned int srcskip,
646 unsigned int dstskip)
647{
648 int srclen = 0, dstlen = 0;
649 int srcsg = minsg, dstsg = minsg;
650 int offset = 0, soffset = 0, less, sless = 0;
651
652 if (sg_dma_len(src) == srcskip) {
653 src = sg_next(src);
654 srcskip = 0;
655 }
656 if (sg_dma_len(dst) == dstskip) {
657 dst = sg_next(dst);
658 dstskip = 0;
659 }
660
661 while (src && dst &&
662 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664 CHCR_SRC_SG_SIZE);
665 srclen += sless;
666 srcsg++;
667 offset = 0;
668 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 if (srclen <= dstlen)
671 break;
672 less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 dstskip, CHCR_DST_SG_SIZE);
674 dstlen += less;
675 offset += less;
676 if ((offset + dstskip) == sg_dma_len(dst)) {
677 dst = sg_next(dst);
678 offset = 0;
679 }
680 dstsg++;
681 dstskip = 0;
682 }
683 soffset += sless;
684 if ((soffset + srcskip) == sg_dma_len(src)) {
685 src = sg_next(src);
686 srcskip = 0;
687 soffset = 0;
688 }
689
690 }
691 return min(srclen, dstlen);
692}
693
694static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
695 u32 flags,
696 struct scatterlist *src,
697 struct scatterlist *dst,
698 unsigned int nbytes,
699 u8 *iv,
700 unsigned short op_type)
701{
702 int err;
703
704 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705
706 skcipher_request_set_sync_tfm(subreq, cipher);
707 skcipher_request_set_callback(subreq, flags, NULL, NULL);
708 skcipher_request_set_crypt(subreq, src, dst,
709 nbytes, iv);
710
711 err = op_type ? crypto_skcipher_decrypt(subreq) :
712 crypto_skcipher_encrypt(subreq);
713 skcipher_request_zero(subreq);
714
715 return err;
716
717}
718static inline void create_wreq(struct chcr_context *ctx,
719 struct chcr_wr *chcr_req,
720 struct crypto_async_request *req,
721 unsigned int imm,
722 int hash_sz,
723 unsigned int len16,
724 unsigned int sc_len,
725 unsigned int lcb)
726{
727 struct uld_ctx *u_ctx = ULD_CTX(ctx);
728 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
729
730
731 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
732 chcr_req->wreq.pld_size_hash_size =
733 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
734 chcr_req->wreq.len16_pkd =
735 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
736 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
737 chcr_req->wreq.rx_chid_to_rx_q_id =
738 FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
739 !!lcb, ctx->tx_qidx);
740
741 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
742 qid);
743 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
744 ((sizeof(chcr_req->wreq)) >> 4)));
745
746 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
747 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
748 sizeof(chcr_req->key_ctx) + sc_len);
749}
750
751/**
752 * create_cipher_wr - form the WR for cipher operations
753 * @req: cipher req.
754 * @ctx: crypto driver context of the request.
755 * @qid: ingress qid where response of this WR should be received.
756 * @op_type: encryption or decryption
757 */
758static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
759{
760 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
761 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
762 struct sk_buff *skb = NULL;
763 struct chcr_wr *chcr_req;
764 struct cpl_rx_phys_dsgl *phys_cpl;
765 struct ulptx_sgl *ulptx;
766 struct chcr_skcipher_req_ctx *reqctx =
767 skcipher_request_ctx(wrparam->req);
768 unsigned int temp = 0, transhdr_len, dst_size;
769 int error;
770 int nents;
771 unsigned int kctx_len;
772 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
773 GFP_KERNEL : GFP_ATOMIC;
774 struct adapter *adap = padap(c_ctx(tfm)->dev);
775
776 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
777 reqctx->dst_ofst);
778 dst_size = get_space_for_phys_dsgl(nents);
779 kctx_len = roundup(ablkctx->enckey_len, 16);
780 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
781 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
782 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
783 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
784 (sgl_len(nents) * 8);
785 transhdr_len += temp;
786 transhdr_len = roundup(transhdr_len, 16);
787 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
788 if (!skb) {
789 error = -ENOMEM;
790 goto err;
791 }
792 chcr_req = __skb_put_zero(skb, transhdr_len);
793 chcr_req->sec_cpl.op_ivinsrtofst =
794 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
795
796 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
797 chcr_req->sec_cpl.aadstart_cipherstop_hi =
798 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
799
800 chcr_req->sec_cpl.cipherstop_lo_authinsert =
801 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
802 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
803 ablkctx->ciph_mode,
804 0, 0, IV >> 1);
805 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
806 0, 1, dst_size);
807
808 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
809 if ((reqctx->op == CHCR_DECRYPT_OP) &&
810 (!(get_cryptoalg_subtype(tfm) ==
811 CRYPTO_ALG_SUB_TYPE_CTR)) &&
812 (!(get_cryptoalg_subtype(tfm) ==
813 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
814 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
815 } else {
816 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
817 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
818 memcpy(chcr_req->key_ctx.key, ablkctx->key,
819 ablkctx->enckey_len);
820 } else {
821 memcpy(chcr_req->key_ctx.key, ablkctx->key +
822 (ablkctx->enckey_len >> 1),
823 ablkctx->enckey_len >> 1);
824 memcpy(chcr_req->key_ctx.key +
825 (ablkctx->enckey_len >> 1),
826 ablkctx->key,
827 ablkctx->enckey_len >> 1);
828 }
829 }
830 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
831 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
832 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
833 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
834
835 atomic_inc(&adap->chcr_stats.cipher_rqst);
836 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
837 + (reqctx->imm ? (wrparam->bytes) : 0);
838 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
839 transhdr_len, temp,
840 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
841 reqctx->skb = skb;
842
843 if (reqctx->op && (ablkctx->ciph_mode ==
844 CHCR_SCMD_CIPHER_MODE_AES_CBC))
845 sg_pcopy_to_buffer(wrparam->req->src,
846 sg_nents(wrparam->req->src), wrparam->req->iv, 16,
847 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
848
849 return skb;
850err:
851 return ERR_PTR(error);
852}
853
854static inline int chcr_keyctx_ck_size(unsigned int keylen)
855{
856 int ck_size = 0;
857
858 if (keylen == AES_KEYSIZE_128)
859 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
860 else if (keylen == AES_KEYSIZE_192)
861 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
862 else if (keylen == AES_KEYSIZE_256)
863 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
864 else
865 ck_size = 0;
866
867 return ck_size;
868}
869static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
870 const u8 *key,
871 unsigned int keylen)
872{
873 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
874 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
875 int err = 0;
876
877 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
878 CRYPTO_TFM_REQ_MASK);
879 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
880 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
881 err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
882 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
883 tfm->crt_flags |=
884 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
885 CRYPTO_TFM_RES_MASK;
886 return err;
887}
888
889static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
890 const u8 *key,
891 unsigned int keylen)
892{
893 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
894 unsigned int ck_size, context_size;
895 u16 alignment = 0;
896 int err;
897
898 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
899 if (err)
900 goto badkey_err;
901
902 ck_size = chcr_keyctx_ck_size(keylen);
903 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
904 memcpy(ablkctx->key, key, keylen);
905 ablkctx->enckey_len = keylen;
906 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
907 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
908 keylen + alignment) >> 4;
909
910 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
911 0, 0, context_size);
912 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
913 return 0;
914badkey_err:
915 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
916 ablkctx->enckey_len = 0;
917
918 return err;
919}
920
921static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
922 const u8 *key,
923 unsigned int keylen)
924{
925 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926 unsigned int ck_size, context_size;
927 u16 alignment = 0;
928 int err;
929
930 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
931 if (err)
932 goto badkey_err;
933 ck_size = chcr_keyctx_ck_size(keylen);
934 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
935 memcpy(ablkctx->key, key, keylen);
936 ablkctx->enckey_len = keylen;
937 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
938 keylen + alignment) >> 4;
939
940 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
941 0, 0, context_size);
942 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
943
944 return 0;
945badkey_err:
946 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
947 ablkctx->enckey_len = 0;
948
949 return err;
950}
951
952static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
953 const u8 *key,
954 unsigned int keylen)
955{
956 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
957 unsigned int ck_size, context_size;
958 u16 alignment = 0;
959 int err;
960
961 if (keylen < CTR_RFC3686_NONCE_SIZE)
962 return -EINVAL;
963 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
964 CTR_RFC3686_NONCE_SIZE);
965
966 keylen -= CTR_RFC3686_NONCE_SIZE;
967 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
968 if (err)
969 goto badkey_err;
970
971 ck_size = chcr_keyctx_ck_size(keylen);
972 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973 memcpy(ablkctx->key, key, keylen);
974 ablkctx->enckey_len = keylen;
975 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976 keylen + alignment) >> 4;
977
978 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
979 0, 0, context_size);
980 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
981
982 return 0;
983badkey_err:
984 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
985 ablkctx->enckey_len = 0;
986
987 return err;
988}
989static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
990{
991 unsigned int size = AES_BLOCK_SIZE;
992 __be32 *b = (__be32 *)(dstiv + size);
993 u32 c, prev;
994
995 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
996 for (; size >= 4; size -= 4) {
997 prev = be32_to_cpu(*--b);
998 c = prev + add;
999 *b = cpu_to_be32(c);
1000 if (prev < c)
1001 break;
1002 add = 1;
1003 }
1004
1005}
1006
1007static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1008{
1009 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1010 u64 c;
1011 u32 temp = be32_to_cpu(*--b);
1012
1013 temp = ~temp;
1014 c = (u64)temp + 1; // No of block can processed withou overflow
1015 if ((bytes / AES_BLOCK_SIZE) > c)
1016 bytes = c * AES_BLOCK_SIZE;
1017 return bytes;
1018}
1019
1020static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1021 u32 isfinal)
1022{
1023 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1024 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1026 struct crypto_aes_ctx aes;
1027 int ret, i;
1028 u8 *key;
1029 unsigned int keylen;
1030 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031 int round8 = round / 8;
1032
1033 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1034
1035 keylen = ablkctx->enckey_len / 2;
1036 key = ablkctx->key + keylen;
1037 ret = aes_expandkey(&aes, key, keylen);
1038 if (ret)
1039 return ret;
1040 aes_encrypt(&aes, iv, iv);
1041 for (i = 0; i < round8; i++)
1042 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1043
1044 for (i = 0; i < (round % 8); i++)
1045 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1046
1047 if (!isfinal)
1048 aes_decrypt(&aes, iv, iv);
1049
1050 memzero_explicit(&aes, sizeof(aes));
1051 return 0;
1052}
1053
1054static int chcr_update_cipher_iv(struct skcipher_request *req,
1055 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1056{
1057 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1058 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1059 int subtype = get_cryptoalg_subtype(tfm);
1060 int ret = 0;
1061
1062 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1063 ctr_add_iv(iv, req->iv, (reqctx->processed /
1064 AES_BLOCK_SIZE));
1065 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1066 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1067 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1068 AES_BLOCK_SIZE) + 1);
1069 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1070 ret = chcr_update_tweak(req, iv, 0);
1071 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1072 if (reqctx->op)
1073 /*Updated before sending last WR*/
1074 memcpy(iv, req->iv, AES_BLOCK_SIZE);
1075 else
1076 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1077 }
1078
1079 return ret;
1080
1081}
1082
1083/* We need separate function for final iv because in rfc3686 Initial counter
1084 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1085 * for subsequent update requests
1086 */
1087
1088static int chcr_final_cipher_iv(struct skcipher_request *req,
1089 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1090{
1091 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1092 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1093 int subtype = get_cryptoalg_subtype(tfm);
1094 int ret = 0;
1095
1096 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1097 ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1098 AES_BLOCK_SIZE));
1099 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1100 ret = chcr_update_tweak(req, iv, 1);
1101 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1102 /*Already updated for Decrypt*/
1103 if (!reqctx->op)
1104 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1105
1106 }
1107 return ret;
1108
1109}
1110
1111static int chcr_handle_cipher_resp(struct skcipher_request *req,
1112 unsigned char *input, int err)
1113{
1114 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1115 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1116 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1117 struct sk_buff *skb;
1118 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1119 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1120 struct cipher_wr_param wrparam;
1121 struct chcr_dev *dev = c_ctx(tfm)->dev;
1122 int bytes;
1123
1124 if (err)
1125 goto unmap;
1126 if (req->cryptlen == reqctx->processed) {
1127 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1128 req);
1129 err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1130 goto complete;
1131 }
1132
1133 if (!reqctx->imm) {
1134 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1135 CIP_SPACE_LEFT(ablkctx->enckey_len),
1136 reqctx->src_ofst, reqctx->dst_ofst);
1137 if ((bytes + reqctx->processed) >= req->cryptlen)
1138 bytes = req->cryptlen - reqctx->processed;
1139 else
1140 bytes = rounddown(bytes, 16);
1141 } else {
1142 /*CTR mode counter overfloa*/
1143 bytes = req->cryptlen - reqctx->processed;
1144 }
1145 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1146 if (err)
1147 goto unmap;
1148
1149 if (unlikely(bytes == 0)) {
1150 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1151 req);
1152 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1153 req->base.flags,
1154 req->src,
1155 req->dst,
1156 req->cryptlen,
1157 req->iv,
1158 reqctx->op);
1159 goto complete;
1160 }
1161
1162 if (get_cryptoalg_subtype(tfm) ==
1163 CRYPTO_ALG_SUB_TYPE_CTR)
1164 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1165 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1166 wrparam.req = req;
1167 wrparam.bytes = bytes;
1168 skb = create_cipher_wr(&wrparam);
1169 if (IS_ERR(skb)) {
1170 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1171 err = PTR_ERR(skb);
1172 goto unmap;
1173 }
1174 skb->dev = u_ctx->lldi.ports[0];
1175 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1176 chcr_send_wr(skb);
1177 reqctx->last_req_len = bytes;
1178 reqctx->processed += bytes;
1179 return 0;
1180unmap:
1181 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1182complete:
1183 chcr_dec_wrcount(dev);
1184 req->base.complete(&req->base, err);
1185 return err;
1186}
1187
1188static int process_cipher(struct skcipher_request *req,
1189 unsigned short qid,
1190 struct sk_buff **skb,
1191 unsigned short op_type)
1192{
1193 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1194 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1195 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1196 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1197 struct cipher_wr_param wrparam;
1198 int bytes, err = -EINVAL;
1199
1200 reqctx->processed = 0;
1201 if (!req->iv)
1202 goto error;
1203 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1204 (req->cryptlen == 0) ||
1205 (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1206 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1207 ablkctx->enckey_len, req->cryptlen, ivsize);
1208 goto error;
1209 }
1210
1211 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1212 if (err)
1213 goto error;
1214 if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1215 AES_MIN_KEY_SIZE +
1216 sizeof(struct cpl_rx_phys_dsgl) +
1217 /*Min dsgl size*/
1218 32))) {
1219 /* Can be sent as Imm*/
1220 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1221
1222 dnents = sg_nents_xlen(req->dst, req->cryptlen,
1223 CHCR_DST_SG_SIZE, 0);
1224 phys_dsgl = get_space_for_phys_dsgl(dnents);
1225 kctx_len = roundup(ablkctx->enckey_len, 16);
1226 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1227 reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1228 SGE_MAX_WR_LEN;
1229 bytes = IV + req->cryptlen;
1230
1231 } else {
1232 reqctx->imm = 0;
1233 }
1234
1235 if (!reqctx->imm) {
1236 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1237 CIP_SPACE_LEFT(ablkctx->enckey_len),
1238 0, 0);
1239 if ((bytes + reqctx->processed) >= req->cryptlen)
1240 bytes = req->cryptlen - reqctx->processed;
1241 else
1242 bytes = rounddown(bytes, 16);
1243 } else {
1244 bytes = req->cryptlen;
1245 }
1246 if (get_cryptoalg_subtype(tfm) ==
1247 CRYPTO_ALG_SUB_TYPE_CTR) {
1248 bytes = adjust_ctr_overflow(req->iv, bytes);
1249 }
1250 if (get_cryptoalg_subtype(tfm) ==
1251 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1252 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1253 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1254 CTR_RFC3686_IV_SIZE);
1255
1256 /* initialize counter portion of counter block */
1257 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1258 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1259
1260 } else {
1261
1262 memcpy(reqctx->iv, req->iv, IV);
1263 }
1264 if (unlikely(bytes == 0)) {
1265 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1266 req);
1267 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1268 req->base.flags,
1269 req->src,
1270 req->dst,
1271 req->cryptlen,
1272 reqctx->iv,
1273 op_type);
1274 goto error;
1275 }
1276 reqctx->op = op_type;
1277 reqctx->srcsg = req->src;
1278 reqctx->dstsg = req->dst;
1279 reqctx->src_ofst = 0;
1280 reqctx->dst_ofst = 0;
1281 wrparam.qid = qid;
1282 wrparam.req = req;
1283 wrparam.bytes = bytes;
1284 *skb = create_cipher_wr(&wrparam);
1285 if (IS_ERR(*skb)) {
1286 err = PTR_ERR(*skb);
1287 goto unmap;
1288 }
1289 reqctx->processed = bytes;
1290 reqctx->last_req_len = bytes;
1291
1292 return 0;
1293unmap:
1294 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1295error:
1296 return err;
1297}
1298
1299static int chcr_aes_encrypt(struct skcipher_request *req)
1300{
1301 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1302 struct chcr_dev *dev = c_ctx(tfm)->dev;
1303 struct sk_buff *skb = NULL;
1304 int err, isfull = 0;
1305 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1306
1307 err = chcr_inc_wrcount(dev);
1308 if (err)
1309 return -ENXIO;
1310 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1311 c_ctx(tfm)->tx_qidx))) {
1312 isfull = 1;
1313 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1314 err = -ENOSPC;
1315 goto error;
1316 }
1317 }
1318
1319 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1320 &skb, CHCR_ENCRYPT_OP);
1321 if (err || !skb)
1322 return err;
1323 skb->dev = u_ctx->lldi.ports[0];
1324 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1325 chcr_send_wr(skb);
1326 return isfull ? -EBUSY : -EINPROGRESS;
1327error:
1328 chcr_dec_wrcount(dev);
1329 return err;
1330}
1331
1332static int chcr_aes_decrypt(struct skcipher_request *req)
1333{
1334 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1335 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1336 struct chcr_dev *dev = c_ctx(tfm)->dev;
1337 struct sk_buff *skb = NULL;
1338 int err, isfull = 0;
1339
1340 err = chcr_inc_wrcount(dev);
1341 if (err)
1342 return -ENXIO;
1343
1344 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1345 c_ctx(tfm)->tx_qidx))) {
1346 isfull = 1;
1347 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1348 return -ENOSPC;
1349 }
1350
1351 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1352 &skb, CHCR_DECRYPT_OP);
1353 if (err || !skb)
1354 return err;
1355 skb->dev = u_ctx->lldi.ports[0];
1356 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1357 chcr_send_wr(skb);
1358 return isfull ? -EBUSY : -EINPROGRESS;
1359}
1360
1361static int chcr_device_init(struct chcr_context *ctx)
1362{
1363 struct uld_ctx *u_ctx = NULL;
1364 unsigned int id;
1365 int txq_perchan, txq_idx, ntxq;
1366 int err = 0, rxq_perchan, rxq_idx;
1367
1368 id = smp_processor_id();
1369 if (!ctx->dev) {
1370 u_ctx = assign_chcr_device();
1371 if (!u_ctx) {
1372 err = -ENXIO;
1373 pr_err("chcr device assignment fails\n");
1374 goto out;
1375 }
1376 ctx->dev = &u_ctx->dev;
1377 ntxq = u_ctx->lldi.ntxq;
1378 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1379 txq_perchan = ntxq / u_ctx->lldi.nchan;
1380 spin_lock(&ctx->dev->lock_chcr_dev);
1381 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1382 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1383 spin_unlock(&ctx->dev->lock_chcr_dev);
1384 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1385 rxq_idx += id % rxq_perchan;
1386 txq_idx = ctx->tx_chan_id * txq_perchan;
1387 txq_idx += id % txq_perchan;
1388 ctx->rx_qidx = rxq_idx;
1389 ctx->tx_qidx = txq_idx;
1390 /* Channel Id used by SGE to forward packet to Host.
1391 * Same value should be used in cpl_fw6_pld RSS_CH field
1392 * by FW. Driver programs PCI channel ID to be used in fw
1393 * at the time of queue allocation with value "pi->tx_chan"
1394 */
1395 ctx->pci_chan_id = txq_idx / txq_perchan;
1396 }
1397out:
1398 return err;
1399}
1400
1401static int chcr_init_tfm(struct crypto_skcipher *tfm)
1402{
1403 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1404 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1405 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406
1407 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
1408 CRYPTO_ALG_NEED_FALLBACK);
1409 if (IS_ERR(ablkctx->sw_cipher)) {
1410 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1411 return PTR_ERR(ablkctx->sw_cipher);
1412 }
1413
1414 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1415
1416 return chcr_device_init(ctx);
1417}
1418
1419static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1420{
1421 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1422 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1423 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1424
1425 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1426 * cannot be used as fallback in chcr_handle_cipher_response
1427 */
1428 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1429 CRYPTO_ALG_NEED_FALLBACK);
1430 if (IS_ERR(ablkctx->sw_cipher)) {
1431 pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1432 return PTR_ERR(ablkctx->sw_cipher);
1433 }
1434 crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
1435 return chcr_device_init(ctx);
1436}
1437
1438
1439static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1440{
1441 struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1442 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1443
1444 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1445}
1446
1447static int get_alg_config(struct algo_param *params,
1448 unsigned int auth_size)
1449{
1450 switch (auth_size) {
1451 case SHA1_DIGEST_SIZE:
1452 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1453 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1454 params->result_size = SHA1_DIGEST_SIZE;
1455 break;
1456 case SHA224_DIGEST_SIZE:
1457 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1458 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1459 params->result_size = SHA256_DIGEST_SIZE;
1460 break;
1461 case SHA256_DIGEST_SIZE:
1462 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1463 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1464 params->result_size = SHA256_DIGEST_SIZE;
1465 break;
1466 case SHA384_DIGEST_SIZE:
1467 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1468 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1469 params->result_size = SHA512_DIGEST_SIZE;
1470 break;
1471 case SHA512_DIGEST_SIZE:
1472 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1473 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1474 params->result_size = SHA512_DIGEST_SIZE;
1475 break;
1476 default:
1477 pr_err("chcr : ERROR, unsupported digest size\n");
1478 return -EINVAL;
1479 }
1480 return 0;
1481}
1482
1483static inline void chcr_free_shash(struct crypto_shash *base_hash)
1484{
1485 crypto_free_shash(base_hash);
1486}
1487
1488/**
1489 * create_hash_wr - Create hash work request
1490 * @req - Cipher req base
1491 */
1492static struct sk_buff *create_hash_wr(struct ahash_request *req,
1493 struct hash_wr_param *param)
1494{
1495 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1496 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1497 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1498 struct sk_buff *skb = NULL;
1499 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1500 struct chcr_wr *chcr_req;
1501 struct ulptx_sgl *ulptx;
1502 unsigned int nents = 0, transhdr_len;
1503 unsigned int temp = 0;
1504 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1505 GFP_ATOMIC;
1506 struct adapter *adap = padap(h_ctx(tfm)->dev);
1507 int error = 0;
1508
1509 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1510 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1511 param->sg_len) <= SGE_MAX_WR_LEN;
1512 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1513 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1514 nents += param->bfr_len ? 1 : 0;
1515 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1516 param->sg_len, 16) : (sgl_len(nents) * 8);
1517 transhdr_len = roundup(transhdr_len, 16);
1518
1519 skb = alloc_skb(transhdr_len, flags);
1520 if (!skb)
1521 return ERR_PTR(-ENOMEM);
1522 chcr_req = __skb_put_zero(skb, transhdr_len);
1523
1524 chcr_req->sec_cpl.op_ivinsrtofst =
1525 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1526 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1527
1528 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1529 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1530 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1531 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1532 chcr_req->sec_cpl.seqno_numivs =
1533 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1534 param->opad_needed, 0);
1535
1536 chcr_req->sec_cpl.ivgen_hdrlen =
1537 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1538
1539 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1540 param->alg_prm.result_size);
1541
1542 if (param->opad_needed)
1543 memcpy(chcr_req->key_ctx.key +
1544 ((param->alg_prm.result_size <= 32) ? 32 :
1545 CHCR_HASH_MAX_DIGEST_SIZE),
1546 hmacctx->opad, param->alg_prm.result_size);
1547
1548 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1549 param->alg_prm.mk_size, 0,
1550 param->opad_needed,
1551 ((param->kctx_len +
1552 sizeof(chcr_req->key_ctx)) >> 4));
1553 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1554 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1555 DUMMY_BYTES);
1556 if (param->bfr_len != 0) {
1557 req_ctx->hctx_wr.dma_addr =
1558 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1559 param->bfr_len, DMA_TO_DEVICE);
1560 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1561 req_ctx->hctx_wr. dma_addr)) {
1562 error = -ENOMEM;
1563 goto err;
1564 }
1565 req_ctx->hctx_wr.dma_len = param->bfr_len;
1566 } else {
1567 req_ctx->hctx_wr.dma_addr = 0;
1568 }
1569 chcr_add_hash_src_ent(req, ulptx, param);
1570 /* Request upto max wr size */
1571 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1572 (param->sg_len + param->bfr_len) : 0);
1573 atomic_inc(&adap->chcr_stats.digest_rqst);
1574 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1575 param->hash_size, transhdr_len,
1576 temp, 0);
1577 req_ctx->hctx_wr.skb = skb;
1578 return skb;
1579err:
1580 kfree_skb(skb);
1581 return ERR_PTR(error);
1582}
1583
1584static int chcr_ahash_update(struct ahash_request *req)
1585{
1586 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1587 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1588 struct uld_ctx *u_ctx = NULL;
1589 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1590 struct sk_buff *skb;
1591 u8 remainder = 0, bs;
1592 unsigned int nbytes = req->nbytes;
1593 struct hash_wr_param params;
1594 int error, isfull = 0;
1595
1596 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1597 u_ctx = ULD_CTX(h_ctx(rtfm));
1598
1599 if (nbytes + req_ctx->reqlen >= bs) {
1600 remainder = (nbytes + req_ctx->reqlen) % bs;
1601 nbytes = nbytes + req_ctx->reqlen - remainder;
1602 } else {
1603 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1604 + req_ctx->reqlen, nbytes, 0);
1605 req_ctx->reqlen += nbytes;
1606 return 0;
1607 }
1608 error = chcr_inc_wrcount(dev);
1609 if (error)
1610 return -ENXIO;
1611 /* Detach state for CHCR means lldi or padap is freed. Increasing
1612 * inflight count for dev guarantees that lldi and padap is valid
1613 */
1614 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1615 h_ctx(rtfm)->tx_qidx))) {
1616 isfull = 1;
1617 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1618 error = -ENOSPC;
1619 goto err;
1620 }
1621 }
1622
1623 chcr_init_hctx_per_wr(req_ctx);
1624 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1625 if (error) {
1626 error = -ENOMEM;
1627 goto err;
1628 }
1629 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1630 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1631 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1632 HASH_SPACE_LEFT(params.kctx_len), 0);
1633 if (params.sg_len > req->nbytes)
1634 params.sg_len = req->nbytes;
1635 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1636 req_ctx->reqlen;
1637 params.opad_needed = 0;
1638 params.more = 1;
1639 params.last = 0;
1640 params.bfr_len = req_ctx->reqlen;
1641 params.scmd1 = 0;
1642 req_ctx->hctx_wr.srcsg = req->src;
1643
1644 params.hash_size = params.alg_prm.result_size;
1645 req_ctx->data_len += params.sg_len + params.bfr_len;
1646 skb = create_hash_wr(req, ¶ms);
1647 if (IS_ERR(skb)) {
1648 error = PTR_ERR(skb);
1649 goto unmap;
1650 }
1651
1652 req_ctx->hctx_wr.processed += params.sg_len;
1653 if (remainder) {
1654 /* Swap buffers */
1655 swap(req_ctx->reqbfr, req_ctx->skbfr);
1656 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1657 req_ctx->reqbfr, remainder, req->nbytes -
1658 remainder);
1659 }
1660 req_ctx->reqlen = remainder;
1661 skb->dev = u_ctx->lldi.ports[0];
1662 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1663 chcr_send_wr(skb);
1664
1665 return isfull ? -EBUSY : -EINPROGRESS;
1666unmap:
1667 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1668err:
1669 chcr_dec_wrcount(dev);
1670 return error;
1671}
1672
1673static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1674{
1675 memset(bfr_ptr, 0, bs);
1676 *bfr_ptr = 0x80;
1677 if (bs == 64)
1678 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1679 else
1680 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1681}
1682
1683static int chcr_ahash_final(struct ahash_request *req)
1684{
1685 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1686 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1687 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1688 struct hash_wr_param params;
1689 struct sk_buff *skb;
1690 struct uld_ctx *u_ctx = NULL;
1691 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1692 int error = -EINVAL;
1693
1694 error = chcr_inc_wrcount(dev);
1695 if (error)
1696 return -ENXIO;
1697
1698 chcr_init_hctx_per_wr(req_ctx);
1699 u_ctx = ULD_CTX(h_ctx(rtfm));
1700 if (is_hmac(crypto_ahash_tfm(rtfm)))
1701 params.opad_needed = 1;
1702 else
1703 params.opad_needed = 0;
1704 params.sg_len = 0;
1705 req_ctx->hctx_wr.isfinal = 1;
1706 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1707 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1708 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1709 params.opad_needed = 1;
1710 params.kctx_len *= 2;
1711 } else {
1712 params.opad_needed = 0;
1713 }
1714
1715 req_ctx->hctx_wr.result = 1;
1716 params.bfr_len = req_ctx->reqlen;
1717 req_ctx->data_len += params.bfr_len + params.sg_len;
1718 req_ctx->hctx_wr.srcsg = req->src;
1719 if (req_ctx->reqlen == 0) {
1720 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1721 params.last = 0;
1722 params.more = 1;
1723 params.scmd1 = 0;
1724 params.bfr_len = bs;
1725
1726 } else {
1727 params.scmd1 = req_ctx->data_len;
1728 params.last = 1;
1729 params.more = 0;
1730 }
1731 params.hash_size = crypto_ahash_digestsize(rtfm);
1732 skb = create_hash_wr(req, ¶ms);
1733 if (IS_ERR(skb)) {
1734 error = PTR_ERR(skb);
1735 goto err;
1736 }
1737 req_ctx->reqlen = 0;
1738 skb->dev = u_ctx->lldi.ports[0];
1739 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1740 chcr_send_wr(skb);
1741 return -EINPROGRESS;
1742err:
1743 chcr_dec_wrcount(dev);
1744 return error;
1745}
1746
1747static int chcr_ahash_finup(struct ahash_request *req)
1748{
1749 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1750 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1751 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1752 struct uld_ctx *u_ctx = NULL;
1753 struct sk_buff *skb;
1754 struct hash_wr_param params;
1755 u8 bs;
1756 int error, isfull = 0;
1757
1758 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1759 u_ctx = ULD_CTX(h_ctx(rtfm));
1760 error = chcr_inc_wrcount(dev);
1761 if (error)
1762 return -ENXIO;
1763
1764 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1765 h_ctx(rtfm)->tx_qidx))) {
1766 isfull = 1;
1767 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1768 error = -ENOSPC;
1769 goto err;
1770 }
1771 }
1772 chcr_init_hctx_per_wr(req_ctx);
1773 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1774 if (error) {
1775 error = -ENOMEM;
1776 goto err;
1777 }
1778
1779 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1780 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1781 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1782 params.kctx_len *= 2;
1783 params.opad_needed = 1;
1784 } else {
1785 params.opad_needed = 0;
1786 }
1787
1788 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1789 HASH_SPACE_LEFT(params.kctx_len), 0);
1790 if (params.sg_len < req->nbytes) {
1791 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1792 params.kctx_len /= 2;
1793 params.opad_needed = 0;
1794 }
1795 params.last = 0;
1796 params.more = 1;
1797 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1798 - req_ctx->reqlen;
1799 params.hash_size = params.alg_prm.result_size;
1800 params.scmd1 = 0;
1801 } else {
1802 params.last = 1;
1803 params.more = 0;
1804 params.sg_len = req->nbytes;
1805 params.hash_size = crypto_ahash_digestsize(rtfm);
1806 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1807 params.sg_len;
1808 }
1809 params.bfr_len = req_ctx->reqlen;
1810 req_ctx->data_len += params.bfr_len + params.sg_len;
1811 req_ctx->hctx_wr.result = 1;
1812 req_ctx->hctx_wr.srcsg = req->src;
1813 if ((req_ctx->reqlen + req->nbytes) == 0) {
1814 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1815 params.last = 0;
1816 params.more = 1;
1817 params.scmd1 = 0;
1818 params.bfr_len = bs;
1819 }
1820 skb = create_hash_wr(req, ¶ms);
1821 if (IS_ERR(skb)) {
1822 error = PTR_ERR(skb);
1823 goto unmap;
1824 }
1825 req_ctx->reqlen = 0;
1826 req_ctx->hctx_wr.processed += params.sg_len;
1827 skb->dev = u_ctx->lldi.ports[0];
1828 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1829 chcr_send_wr(skb);
1830
1831 return isfull ? -EBUSY : -EINPROGRESS;
1832unmap:
1833 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1834err:
1835 chcr_dec_wrcount(dev);
1836 return error;
1837}
1838
1839static int chcr_ahash_digest(struct ahash_request *req)
1840{
1841 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1842 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1843 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1844 struct uld_ctx *u_ctx = NULL;
1845 struct sk_buff *skb;
1846 struct hash_wr_param params;
1847 u8 bs;
1848 int error, isfull = 0;
1849
1850 rtfm->init(req);
1851 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1852 error = chcr_inc_wrcount(dev);
1853 if (error)
1854 return -ENXIO;
1855
1856 u_ctx = ULD_CTX(h_ctx(rtfm));
1857 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1858 h_ctx(rtfm)->tx_qidx))) {
1859 isfull = 1;
1860 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1861 error = -ENOSPC;
1862 goto err;
1863 }
1864 }
1865
1866 chcr_init_hctx_per_wr(req_ctx);
1867 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1868 if (error) {
1869 error = -ENOMEM;
1870 goto err;
1871 }
1872
1873 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1874 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1875 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1876 params.kctx_len *= 2;
1877 params.opad_needed = 1;
1878 } else {
1879 params.opad_needed = 0;
1880 }
1881 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1882 HASH_SPACE_LEFT(params.kctx_len), 0);
1883 if (params.sg_len < req->nbytes) {
1884 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1885 params.kctx_len /= 2;
1886 params.opad_needed = 0;
1887 }
1888 params.last = 0;
1889 params.more = 1;
1890 params.scmd1 = 0;
1891 params.sg_len = rounddown(params.sg_len, bs);
1892 params.hash_size = params.alg_prm.result_size;
1893 } else {
1894 params.sg_len = req->nbytes;
1895 params.hash_size = crypto_ahash_digestsize(rtfm);
1896 params.last = 1;
1897 params.more = 0;
1898 params.scmd1 = req->nbytes + req_ctx->data_len;
1899
1900 }
1901 params.bfr_len = 0;
1902 req_ctx->hctx_wr.result = 1;
1903 req_ctx->hctx_wr.srcsg = req->src;
1904 req_ctx->data_len += params.bfr_len + params.sg_len;
1905
1906 if (req->nbytes == 0) {
1907 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1908 params.more = 1;
1909 params.bfr_len = bs;
1910 }
1911
1912 skb = create_hash_wr(req, ¶ms);
1913 if (IS_ERR(skb)) {
1914 error = PTR_ERR(skb);
1915 goto unmap;
1916 }
1917 req_ctx->hctx_wr.processed += params.sg_len;
1918 skb->dev = u_ctx->lldi.ports[0];
1919 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1920 chcr_send_wr(skb);
1921 return isfull ? -EBUSY : -EINPROGRESS;
1922unmap:
1923 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1924err:
1925 chcr_dec_wrcount(dev);
1926 return error;
1927}
1928
1929static int chcr_ahash_continue(struct ahash_request *req)
1930{
1931 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1932 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1933 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1934 struct uld_ctx *u_ctx = NULL;
1935 struct sk_buff *skb;
1936 struct hash_wr_param params;
1937 u8 bs;
1938 int error;
1939
1940 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1941 u_ctx = ULD_CTX(h_ctx(rtfm));
1942 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1943 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1944 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1945 params.kctx_len *= 2;
1946 params.opad_needed = 1;
1947 } else {
1948 params.opad_needed = 0;
1949 }
1950 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1951 HASH_SPACE_LEFT(params.kctx_len),
1952 hctx_wr->src_ofst);
1953 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1954 params.sg_len = req->nbytes - hctx_wr->processed;
1955 if (!hctx_wr->result ||
1956 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1957 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1958 params.kctx_len /= 2;
1959 params.opad_needed = 0;
1960 }
1961 params.last = 0;
1962 params.more = 1;
1963 params.sg_len = rounddown(params.sg_len, bs);
1964 params.hash_size = params.alg_prm.result_size;
1965 params.scmd1 = 0;
1966 } else {
1967 params.last = 1;
1968 params.more = 0;
1969 params.hash_size = crypto_ahash_digestsize(rtfm);
1970 params.scmd1 = reqctx->data_len + params.sg_len;
1971 }
1972 params.bfr_len = 0;
1973 reqctx->data_len += params.sg_len;
1974 skb = create_hash_wr(req, ¶ms);
1975 if (IS_ERR(skb)) {
1976 error = PTR_ERR(skb);
1977 goto err;
1978 }
1979 hctx_wr->processed += params.sg_len;
1980 skb->dev = u_ctx->lldi.ports[0];
1981 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1982 chcr_send_wr(skb);
1983 return 0;
1984err:
1985 return error;
1986}
1987
1988static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1989 unsigned char *input,
1990 int err)
1991{
1992 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1993 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1994 int digestsize, updated_digestsize;
1995 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1996 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1997 struct chcr_dev *dev = h_ctx(tfm)->dev;
1998
1999 if (input == NULL)
2000 goto out;
2001 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2002 updated_digestsize = digestsize;
2003 if (digestsize == SHA224_DIGEST_SIZE)
2004 updated_digestsize = SHA256_DIGEST_SIZE;
2005 else if (digestsize == SHA384_DIGEST_SIZE)
2006 updated_digestsize = SHA512_DIGEST_SIZE;
2007
2008 if (hctx_wr->dma_addr) {
2009 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2010 hctx_wr->dma_len, DMA_TO_DEVICE);
2011 hctx_wr->dma_addr = 0;
2012 }
2013 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2014 req->nbytes)) {
2015 if (hctx_wr->result == 1) {
2016 hctx_wr->result = 0;
2017 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2018 digestsize);
2019 } else {
2020 memcpy(reqctx->partial_hash,
2021 input + sizeof(struct cpl_fw6_pld),
2022 updated_digestsize);
2023
2024 }
2025 goto unmap;
2026 }
2027 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2028 updated_digestsize);
2029
2030 err = chcr_ahash_continue(req);
2031 if (err)
2032 goto unmap;
2033 return;
2034unmap:
2035 if (hctx_wr->is_sg_map)
2036 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2037
2038
2039out:
2040 chcr_dec_wrcount(dev);
2041 req->base.complete(&req->base, err);
2042}
2043
2044/*
2045 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2046 * @req: crypto request
2047 */
2048int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2049 int err)
2050{
2051 struct crypto_tfm *tfm = req->tfm;
2052 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2053 struct adapter *adap = padap(ctx->dev);
2054
2055 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2056 case CRYPTO_ALG_TYPE_AEAD:
2057 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2058 break;
2059
2060 case CRYPTO_ALG_TYPE_SKCIPHER:
2061 chcr_handle_cipher_resp(skcipher_request_cast(req),
2062 input, err);
2063 break;
2064 case CRYPTO_ALG_TYPE_AHASH:
2065 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2066 }
2067 atomic_inc(&adap->chcr_stats.complete);
2068 return err;
2069}
2070static int chcr_ahash_export(struct ahash_request *areq, void *out)
2071{
2072 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2073 struct chcr_ahash_req_ctx *state = out;
2074
2075 state->reqlen = req_ctx->reqlen;
2076 state->data_len = req_ctx->data_len;
2077 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2078 memcpy(state->partial_hash, req_ctx->partial_hash,
2079 CHCR_HASH_MAX_DIGEST_SIZE);
2080 chcr_init_hctx_per_wr(state);
2081 return 0;
2082}
2083
2084static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2085{
2086 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2087 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2088
2089 req_ctx->reqlen = state->reqlen;
2090 req_ctx->data_len = state->data_len;
2091 req_ctx->reqbfr = req_ctx->bfr1;
2092 req_ctx->skbfr = req_ctx->bfr2;
2093 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2094 memcpy(req_ctx->partial_hash, state->partial_hash,
2095 CHCR_HASH_MAX_DIGEST_SIZE);
2096 chcr_init_hctx_per_wr(req_ctx);
2097 return 0;
2098}
2099
2100static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2101 unsigned int keylen)
2102{
2103 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2104 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2105 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2106 unsigned int i, err = 0, updated_digestsize;
2107
2108 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2109
2110 /* use the key to calculate the ipad and opad. ipad will sent with the
2111 * first request's data. opad will be sent with the final hash result
2112 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2113 */
2114 shash->tfm = hmacctx->base_hash;
2115 if (keylen > bs) {
2116 err = crypto_shash_digest(shash, key, keylen,
2117 hmacctx->ipad);
2118 if (err)
2119 goto out;
2120 keylen = digestsize;
2121 } else {
2122 memcpy(hmacctx->ipad, key, keylen);
2123 }
2124 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2125 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2126
2127 for (i = 0; i < bs / sizeof(int); i++) {
2128 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2129 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2130 }
2131
2132 updated_digestsize = digestsize;
2133 if (digestsize == SHA224_DIGEST_SIZE)
2134 updated_digestsize = SHA256_DIGEST_SIZE;
2135 else if (digestsize == SHA384_DIGEST_SIZE)
2136 updated_digestsize = SHA512_DIGEST_SIZE;
2137 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2138 hmacctx->ipad, digestsize);
2139 if (err)
2140 goto out;
2141 chcr_change_order(hmacctx->ipad, updated_digestsize);
2142
2143 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2144 hmacctx->opad, digestsize);
2145 if (err)
2146 goto out;
2147 chcr_change_order(hmacctx->opad, updated_digestsize);
2148out:
2149 return err;
2150}
2151
2152static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2153 unsigned int key_len)
2154{
2155 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2156 unsigned short context_size = 0;
2157 int err;
2158
2159 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2160 if (err)
2161 goto badkey_err;
2162
2163 memcpy(ablkctx->key, key, key_len);
2164 ablkctx->enckey_len = key_len;
2165 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2166 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2167 ablkctx->key_ctx_hdr =
2168 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2169 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2170 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2171 CHCR_KEYCTX_NO_KEY, 1,
2172 0, context_size);
2173 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2174 return 0;
2175badkey_err:
2176 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2177 ablkctx->enckey_len = 0;
2178
2179 return err;
2180}
2181
2182static int chcr_sha_init(struct ahash_request *areq)
2183{
2184 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2185 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2186 int digestsize = crypto_ahash_digestsize(tfm);
2187
2188 req_ctx->data_len = 0;
2189 req_ctx->reqlen = 0;
2190 req_ctx->reqbfr = req_ctx->bfr1;
2191 req_ctx->skbfr = req_ctx->bfr2;
2192 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2193
2194 return 0;
2195}
2196
2197static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2198{
2199 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2200 sizeof(struct chcr_ahash_req_ctx));
2201 return chcr_device_init(crypto_tfm_ctx(tfm));
2202}
2203
2204static int chcr_hmac_init(struct ahash_request *areq)
2205{
2206 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2207 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2208 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2209 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2210 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2211
2212 chcr_sha_init(areq);
2213 req_ctx->data_len = bs;
2214 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2215 if (digestsize == SHA224_DIGEST_SIZE)
2216 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2217 SHA256_DIGEST_SIZE);
2218 else if (digestsize == SHA384_DIGEST_SIZE)
2219 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2220 SHA512_DIGEST_SIZE);
2221 else
2222 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2223 digestsize);
2224 }
2225 return 0;
2226}
2227
2228static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2229{
2230 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2231 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2232 unsigned int digestsize =
2233 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2234
2235 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2236 sizeof(struct chcr_ahash_req_ctx));
2237 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2238 if (IS_ERR(hmacctx->base_hash))
2239 return PTR_ERR(hmacctx->base_hash);
2240 return chcr_device_init(crypto_tfm_ctx(tfm));
2241}
2242
2243static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2244{
2245 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2246 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2247
2248 if (hmacctx->base_hash) {
2249 chcr_free_shash(hmacctx->base_hash);
2250 hmacctx->base_hash = NULL;
2251 }
2252}
2253
2254inline void chcr_aead_common_exit(struct aead_request *req)
2255{
2256 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2257 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2258 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2259
2260 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2261}
2262
2263static int chcr_aead_common_init(struct aead_request *req)
2264{
2265 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2266 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2267 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2268 unsigned int authsize = crypto_aead_authsize(tfm);
2269 int error = -EINVAL;
2270
2271 /* validate key size */
2272 if (aeadctx->enckey_len == 0)
2273 goto err;
2274 if (reqctx->op && req->cryptlen < authsize)
2275 goto err;
2276 if (reqctx->b0_len)
2277 reqctx->scratch_pad = reqctx->iv + IV;
2278 else
2279 reqctx->scratch_pad = NULL;
2280
2281 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2282 reqctx->op);
2283 if (error) {
2284 error = -ENOMEM;
2285 goto err;
2286 }
2287
2288 return 0;
2289err:
2290 return error;
2291}
2292
2293static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2294 int aadmax, int wrlen,
2295 unsigned short op_type)
2296{
2297 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2298
2299 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2300 dst_nents > MAX_DSGL_ENT ||
2301 (req->assoclen > aadmax) ||
2302 (wrlen > SGE_MAX_WR_LEN))
2303 return 1;
2304 return 0;
2305}
2306
2307static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2308{
2309 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2310 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2311 struct aead_request *subreq = aead_request_ctx(req);
2312
2313 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2314 aead_request_set_callback(subreq, req->base.flags,
2315 req->base.complete, req->base.data);
2316 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2317 req->iv);
2318 aead_request_set_ad(subreq, req->assoclen);
2319 return op_type ? crypto_aead_decrypt(subreq) :
2320 crypto_aead_encrypt(subreq);
2321}
2322
2323static struct sk_buff *create_authenc_wr(struct aead_request *req,
2324 unsigned short qid,
2325 int size)
2326{
2327 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2328 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2329 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2330 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2331 struct sk_buff *skb = NULL;
2332 struct chcr_wr *chcr_req;
2333 struct cpl_rx_phys_dsgl *phys_cpl;
2334 struct ulptx_sgl *ulptx;
2335 unsigned int transhdr_len;
2336 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2337 unsigned int kctx_len = 0, dnents, snents;
2338 unsigned int authsize = crypto_aead_authsize(tfm);
2339 int error = -EINVAL;
2340 u8 *ivptr;
2341 int null = 0;
2342 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2343 GFP_ATOMIC;
2344 struct adapter *adap = padap(a_ctx(tfm)->dev);
2345
2346 if (req->cryptlen == 0)
2347 return NULL;
2348
2349 reqctx->b0_len = 0;
2350 error = chcr_aead_common_init(req);
2351 if (error)
2352 return ERR_PTR(error);
2353
2354 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2355 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2356 null = 1;
2357 }
2358 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2359 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2360 dnents += MIN_AUTH_SG; // For IV
2361 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2362 CHCR_SRC_SG_SIZE, 0);
2363 dst_size = get_space_for_phys_dsgl(dnents);
2364 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2365 - sizeof(chcr_req->key_ctx);
2366 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2367 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2368 SGE_MAX_WR_LEN;
2369 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2370 : (sgl_len(snents) * 8);
2371 transhdr_len += temp;
2372 transhdr_len = roundup(transhdr_len, 16);
2373
2374 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2375 transhdr_len, reqctx->op)) {
2376 atomic_inc(&adap->chcr_stats.fallback);
2377 chcr_aead_common_exit(req);
2378 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2379 }
2380 skb = alloc_skb(transhdr_len, flags);
2381 if (!skb) {
2382 error = -ENOMEM;
2383 goto err;
2384 }
2385
2386 chcr_req = __skb_put_zero(skb, transhdr_len);
2387
2388 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2389
2390 /*
2391 * Input order is AAD,IV and Payload. where IV should be included as
2392 * the part of authdata. All other fields should be filled according
2393 * to the hardware spec
2394 */
2395 chcr_req->sec_cpl.op_ivinsrtofst =
2396 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2397 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2398 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2399 null ? 0 : 1 + IV,
2400 null ? 0 : IV + req->assoclen,
2401 req->assoclen + IV + 1,
2402 (temp & 0x1F0) >> 4);
2403 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2404 temp & 0xF,
2405 null ? 0 : req->assoclen + IV + 1,
2406 temp, temp);
2407 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2408 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2409 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2410 else
2411 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2412 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2413 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2414 temp,
2415 actx->auth_mode, aeadctx->hmac_ctrl,
2416 IV >> 1);
2417 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2418 0, 0, dst_size);
2419
2420 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2421 if (reqctx->op == CHCR_ENCRYPT_OP ||
2422 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2423 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2424 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2425 aeadctx->enckey_len);
2426 else
2427 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2428 aeadctx->enckey_len);
2429
2430 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2431 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2432 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2433 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2434 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2435 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2436 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2437 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2438 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2439 CTR_RFC3686_IV_SIZE);
2440 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2441 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2442 } else {
2443 memcpy(ivptr, req->iv, IV);
2444 }
2445 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2446 chcr_add_aead_src_ent(req, ulptx);
2447 atomic_inc(&adap->chcr_stats.cipher_rqst);
2448 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2449 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2450 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2451 transhdr_len, temp, 0);
2452 reqctx->skb = skb;
2453
2454 return skb;
2455err:
2456 chcr_aead_common_exit(req);
2457
2458 return ERR_PTR(error);
2459}
2460
2461int chcr_aead_dma_map(struct device *dev,
2462 struct aead_request *req,
2463 unsigned short op_type)
2464{
2465 int error;
2466 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2467 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2468 unsigned int authsize = crypto_aead_authsize(tfm);
2469 int dst_size;
2470
2471 dst_size = req->assoclen + req->cryptlen + (op_type ?
2472 -authsize : authsize);
2473 if (!req->cryptlen || !dst_size)
2474 return 0;
2475 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2476 DMA_BIDIRECTIONAL);
2477 if (dma_mapping_error(dev, reqctx->iv_dma))
2478 return -ENOMEM;
2479 if (reqctx->b0_len)
2480 reqctx->b0_dma = reqctx->iv_dma + IV;
2481 else
2482 reqctx->b0_dma = 0;
2483 if (req->src == req->dst) {
2484 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2485 DMA_BIDIRECTIONAL);
2486 if (!error)
2487 goto err;
2488 } else {
2489 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2490 DMA_TO_DEVICE);
2491 if (!error)
2492 goto err;
2493 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2494 DMA_FROM_DEVICE);
2495 if (!error) {
2496 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2497 DMA_TO_DEVICE);
2498 goto err;
2499 }
2500 }
2501
2502 return 0;
2503err:
2504 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2505 return -ENOMEM;
2506}
2507
2508void chcr_aead_dma_unmap(struct device *dev,
2509 struct aead_request *req,
2510 unsigned short op_type)
2511{
2512 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2513 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2514 unsigned int authsize = crypto_aead_authsize(tfm);
2515 int dst_size;
2516
2517 dst_size = req->assoclen + req->cryptlen + (op_type ?
2518 -authsize : authsize);
2519 if (!req->cryptlen || !dst_size)
2520 return;
2521
2522 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2523 DMA_BIDIRECTIONAL);
2524 if (req->src == req->dst) {
2525 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2526 DMA_BIDIRECTIONAL);
2527 } else {
2528 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2529 DMA_TO_DEVICE);
2530 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2531 DMA_FROM_DEVICE);
2532 }
2533}
2534
2535void chcr_add_aead_src_ent(struct aead_request *req,
2536 struct ulptx_sgl *ulptx)
2537{
2538 struct ulptx_walk ulp_walk;
2539 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2540
2541 if (reqctx->imm) {
2542 u8 *buf = (u8 *)ulptx;
2543
2544 if (reqctx->b0_len) {
2545 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2546 buf += reqctx->b0_len;
2547 }
2548 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2549 buf, req->cryptlen + req->assoclen, 0);
2550 } else {
2551 ulptx_walk_init(&ulp_walk, ulptx);
2552 if (reqctx->b0_len)
2553 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2554 reqctx->b0_dma);
2555 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2556 req->assoclen, 0);
2557 ulptx_walk_end(&ulp_walk);
2558 }
2559}
2560
2561void chcr_add_aead_dst_ent(struct aead_request *req,
2562 struct cpl_rx_phys_dsgl *phys_cpl,
2563 unsigned short qid)
2564{
2565 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2566 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2567 struct dsgl_walk dsgl_walk;
2568 unsigned int authsize = crypto_aead_authsize(tfm);
2569 struct chcr_context *ctx = a_ctx(tfm);
2570 u32 temp;
2571
2572 dsgl_walk_init(&dsgl_walk, phys_cpl);
2573 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2574 temp = req->assoclen + req->cryptlen +
2575 (reqctx->op ? -authsize : authsize);
2576 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2577 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2578}
2579
2580void chcr_add_cipher_src_ent(struct skcipher_request *req,
2581 void *ulptx,
2582 struct cipher_wr_param *wrparam)
2583{
2584 struct ulptx_walk ulp_walk;
2585 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2586 u8 *buf = ulptx;
2587
2588 memcpy(buf, reqctx->iv, IV);
2589 buf += IV;
2590 if (reqctx->imm) {
2591 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2592 buf, wrparam->bytes, reqctx->processed);
2593 } else {
2594 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2595 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2596 reqctx->src_ofst);
2597 reqctx->srcsg = ulp_walk.last_sg;
2598 reqctx->src_ofst = ulp_walk.last_sg_len;
2599 ulptx_walk_end(&ulp_walk);
2600 }
2601}
2602
2603void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2604 struct cpl_rx_phys_dsgl *phys_cpl,
2605 struct cipher_wr_param *wrparam,
2606 unsigned short qid)
2607{
2608 struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2609 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2610 struct chcr_context *ctx = c_ctx(tfm);
2611 struct dsgl_walk dsgl_walk;
2612
2613 dsgl_walk_init(&dsgl_walk, phys_cpl);
2614 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2615 reqctx->dst_ofst);
2616 reqctx->dstsg = dsgl_walk.last_sg;
2617 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2618
2619 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2620}
2621
2622void chcr_add_hash_src_ent(struct ahash_request *req,
2623 struct ulptx_sgl *ulptx,
2624 struct hash_wr_param *param)
2625{
2626 struct ulptx_walk ulp_walk;
2627 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2628
2629 if (reqctx->hctx_wr.imm) {
2630 u8 *buf = (u8 *)ulptx;
2631
2632 if (param->bfr_len) {
2633 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2634 buf += param->bfr_len;
2635 }
2636
2637 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2638 sg_nents(reqctx->hctx_wr.srcsg), buf,
2639 param->sg_len, 0);
2640 } else {
2641 ulptx_walk_init(&ulp_walk, ulptx);
2642 if (param->bfr_len)
2643 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2644 reqctx->hctx_wr.dma_addr);
2645 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2646 param->sg_len, reqctx->hctx_wr.src_ofst);
2647 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2648 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2649 ulptx_walk_end(&ulp_walk);
2650 }
2651}
2652
2653int chcr_hash_dma_map(struct device *dev,
2654 struct ahash_request *req)
2655{
2656 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2657 int error = 0;
2658
2659 if (!req->nbytes)
2660 return 0;
2661 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2662 DMA_TO_DEVICE);
2663 if (!error)
2664 return -ENOMEM;
2665 req_ctx->hctx_wr.is_sg_map = 1;
2666 return 0;
2667}
2668
2669void chcr_hash_dma_unmap(struct device *dev,
2670 struct ahash_request *req)
2671{
2672 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2673
2674 if (!req->nbytes)
2675 return;
2676
2677 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2678 DMA_TO_DEVICE);
2679 req_ctx->hctx_wr.is_sg_map = 0;
2680
2681}
2682
2683int chcr_cipher_dma_map(struct device *dev,
2684 struct skcipher_request *req)
2685{
2686 int error;
2687
2688 if (req->src == req->dst) {
2689 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2690 DMA_BIDIRECTIONAL);
2691 if (!error)
2692 goto err;
2693 } else {
2694 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2695 DMA_TO_DEVICE);
2696 if (!error)
2697 goto err;
2698 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2699 DMA_FROM_DEVICE);
2700 if (!error) {
2701 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2702 DMA_TO_DEVICE);
2703 goto err;
2704 }
2705 }
2706
2707 return 0;
2708err:
2709 return -ENOMEM;
2710}
2711
2712void chcr_cipher_dma_unmap(struct device *dev,
2713 struct skcipher_request *req)
2714{
2715 if (req->src == req->dst) {
2716 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2717 DMA_BIDIRECTIONAL);
2718 } else {
2719 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2720 DMA_TO_DEVICE);
2721 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2722 DMA_FROM_DEVICE);
2723 }
2724}
2725
2726static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2727{
2728 __be32 data;
2729
2730 memset(block, 0, csize);
2731 block += csize;
2732
2733 if (csize >= 4)
2734 csize = 4;
2735 else if (msglen > (unsigned int)(1 << (8 * csize)))
2736 return -EOVERFLOW;
2737
2738 data = cpu_to_be32(msglen);
2739 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2740
2741 return 0;
2742}
2743
2744static int generate_b0(struct aead_request *req, u8 *ivptr,
2745 unsigned short op_type)
2746{
2747 unsigned int l, lp, m;
2748 int rc;
2749 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2750 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2751 u8 *b0 = reqctx->scratch_pad;
2752
2753 m = crypto_aead_authsize(aead);
2754
2755 memcpy(b0, ivptr, 16);
2756
2757 lp = b0[0];
2758 l = lp + 1;
2759
2760 /* set m, bits 3-5 */
2761 *b0 |= (8 * ((m - 2) / 2));
2762
2763 /* set adata, bit 6, if associated data is used */
2764 if (req->assoclen)
2765 *b0 |= 64;
2766 rc = set_msg_len(b0 + 16 - l,
2767 (op_type == CHCR_DECRYPT_OP) ?
2768 req->cryptlen - m : req->cryptlen, l);
2769
2770 return rc;
2771}
2772
2773static inline int crypto_ccm_check_iv(const u8 *iv)
2774{
2775 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2776 if (iv[0] < 1 || iv[0] > 7)
2777 return -EINVAL;
2778
2779 return 0;
2780}
2781
2782static int ccm_format_packet(struct aead_request *req,
2783 u8 *ivptr,
2784 unsigned int sub_type,
2785 unsigned short op_type,
2786 unsigned int assoclen)
2787{
2788 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2789 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2790 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2791 int rc = 0;
2792
2793 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2794 ivptr[0] = 3;
2795 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2796 memcpy(ivptr + 4, req->iv, 8);
2797 memset(ivptr + 12, 0, 4);
2798 } else {
2799 memcpy(ivptr, req->iv, 16);
2800 }
2801 if (assoclen)
2802 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2803 htons(assoclen);
2804
2805 rc = generate_b0(req, ivptr, op_type);
2806 /* zero the ctr value */
2807 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2808 return rc;
2809}
2810
2811static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2812 unsigned int dst_size,
2813 struct aead_request *req,
2814 unsigned short op_type)
2815{
2816 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2817 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2818 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2819 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2820 unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2821 unsigned int ccm_xtra;
2822 unsigned char tag_offset = 0, auth_offset = 0;
2823 unsigned int assoclen;
2824
2825 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2826 assoclen = req->assoclen - 8;
2827 else
2828 assoclen = req->assoclen;
2829 ccm_xtra = CCM_B0_SIZE +
2830 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2831
2832 auth_offset = req->cryptlen ?
2833 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2834 if (op_type == CHCR_DECRYPT_OP) {
2835 if (crypto_aead_authsize(tfm) != req->cryptlen)
2836 tag_offset = crypto_aead_authsize(tfm);
2837 else
2838 auth_offset = 0;
2839 }
2840
2841
2842 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2843 2, 1);
2844 sec_cpl->pldlen =
2845 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2846 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2847 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2848 1 + IV, IV + assoclen + ccm_xtra,
2849 req->assoclen + IV + 1 + ccm_xtra, 0);
2850
2851 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2852 auth_offset, tag_offset,
2853 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2854 crypto_aead_authsize(tfm));
2855 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2856 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2857 cipher_mode, mac_mode,
2858 aeadctx->hmac_ctrl, IV >> 1);
2859
2860 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2861 0, dst_size);
2862}
2863
2864static int aead_ccm_validate_input(unsigned short op_type,
2865 struct aead_request *req,
2866 struct chcr_aead_ctx *aeadctx,
2867 unsigned int sub_type)
2868{
2869 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2870 if (crypto_ccm_check_iv(req->iv)) {
2871 pr_err("CCM: IV check fails\n");
2872 return -EINVAL;
2873 }
2874 } else {
2875 if (req->assoclen != 16 && req->assoclen != 20) {
2876 pr_err("RFC4309: Invalid AAD length %d\n",
2877 req->assoclen);
2878 return -EINVAL;
2879 }
2880 }
2881 return 0;
2882}
2883
2884static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2885 unsigned short qid,
2886 int size)
2887{
2888 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2889 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2890 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2891 struct sk_buff *skb = NULL;
2892 struct chcr_wr *chcr_req;
2893 struct cpl_rx_phys_dsgl *phys_cpl;
2894 struct ulptx_sgl *ulptx;
2895 unsigned int transhdr_len;
2896 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2897 unsigned int sub_type, assoclen = req->assoclen;
2898 unsigned int authsize = crypto_aead_authsize(tfm);
2899 int error = -EINVAL;
2900 u8 *ivptr;
2901 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2902 GFP_ATOMIC;
2903 struct adapter *adap = padap(a_ctx(tfm)->dev);
2904
2905 sub_type = get_aead_subtype(tfm);
2906 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2907 assoclen -= 8;
2908 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2909 error = chcr_aead_common_init(req);
2910 if (error)
2911 return ERR_PTR(error);
2912
2913 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2914 if (error)
2915 goto err;
2916 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2917 + (reqctx->op ? -authsize : authsize),
2918 CHCR_DST_SG_SIZE, 0);
2919 dnents += MIN_CCM_SG; // For IV and B0
2920 dst_size = get_space_for_phys_dsgl(dnents);
2921 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2922 CHCR_SRC_SG_SIZE, 0);
2923 snents += MIN_CCM_SG; //For B0
2924 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2925 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2926 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2927 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2928 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2929 reqctx->b0_len, 16) :
2930 (sgl_len(snents) * 8);
2931 transhdr_len += temp;
2932 transhdr_len = roundup(transhdr_len, 16);
2933
2934 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2935 reqctx->b0_len, transhdr_len, reqctx->op)) {
2936 atomic_inc(&adap->chcr_stats.fallback);
2937 chcr_aead_common_exit(req);
2938 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2939 }
2940 skb = alloc_skb(transhdr_len, flags);
2941
2942 if (!skb) {
2943 error = -ENOMEM;
2944 goto err;
2945 }
2946
2947 chcr_req = __skb_put_zero(skb, transhdr_len);
2948
2949 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2950
2951 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2952 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2953 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2954 aeadctx->key, aeadctx->enckey_len);
2955
2956 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2957 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2958 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2959 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2960 if (error)
2961 goto dstmap_fail;
2962 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2963 chcr_add_aead_src_ent(req, ulptx);
2964
2965 atomic_inc(&adap->chcr_stats.aead_rqst);
2966 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2967 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2968 reqctx->b0_len) : 0);
2969 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2970 transhdr_len, temp, 0);
2971 reqctx->skb = skb;
2972
2973 return skb;
2974dstmap_fail:
2975 kfree_skb(skb);
2976err:
2977 chcr_aead_common_exit(req);
2978 return ERR_PTR(error);
2979}
2980
2981static struct sk_buff *create_gcm_wr(struct aead_request *req,
2982 unsigned short qid,
2983 int size)
2984{
2985 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2986 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2987 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2988 struct sk_buff *skb = NULL;
2989 struct chcr_wr *chcr_req;
2990 struct cpl_rx_phys_dsgl *phys_cpl;
2991 struct ulptx_sgl *ulptx;
2992 unsigned int transhdr_len, dnents = 0, snents;
2993 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2994 unsigned int authsize = crypto_aead_authsize(tfm);
2995 int error = -EINVAL;
2996 u8 *ivptr;
2997 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2998 GFP_ATOMIC;
2999 struct adapter *adap = padap(a_ctx(tfm)->dev);
3000
3001 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3002 assoclen = req->assoclen - 8;
3003
3004 reqctx->b0_len = 0;
3005 error = chcr_aead_common_init(req);
3006 if (error)
3007 return ERR_PTR(error);
3008 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3009 (reqctx->op ? -authsize : authsize),
3010 CHCR_DST_SG_SIZE, 0);
3011 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3012 CHCR_SRC_SG_SIZE, 0);
3013 dnents += MIN_GCM_SG; // For IV
3014 dst_size = get_space_for_phys_dsgl(dnents);
3015 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3016 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3017 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3018 SGE_MAX_WR_LEN;
3019 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3020 (sgl_len(snents) * 8);
3021 transhdr_len += temp;
3022 transhdr_len = roundup(transhdr_len, 16);
3023 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3024 transhdr_len, reqctx->op)) {
3025
3026 atomic_inc(&adap->chcr_stats.fallback);
3027 chcr_aead_common_exit(req);
3028 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3029 }
3030 skb = alloc_skb(transhdr_len, flags);
3031 if (!skb) {
3032 error = -ENOMEM;
3033 goto err;
3034 }
3035
3036 chcr_req = __skb_put_zero(skb, transhdr_len);
3037
3038 //Offset of tag from end
3039 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3040 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3041 a_ctx(tfm)->tx_chan_id, 2, 1);
3042 chcr_req->sec_cpl.pldlen =
3043 htonl(req->assoclen + IV + req->cryptlen);
3044 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3045 assoclen ? 1 + IV : 0,
3046 assoclen ? IV + assoclen : 0,
3047 req->assoclen + IV + 1, 0);
3048 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3049 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3050 temp, temp);
3051 chcr_req->sec_cpl.seqno_numivs =
3052 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3053 CHCR_ENCRYPT_OP) ? 1 : 0,
3054 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3055 CHCR_SCMD_AUTH_MODE_GHASH,
3056 aeadctx->hmac_ctrl, IV >> 1);
3057 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3058 0, 0, dst_size);
3059 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3060 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3061 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3062 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3063
3064 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3065 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3066 /* prepare a 16 byte iv */
3067 /* S A L T | IV | 0x00000001 */
3068 if (get_aead_subtype(tfm) ==
3069 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3070 memcpy(ivptr, aeadctx->salt, 4);
3071 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3072 } else {
3073 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3074 }
3075 *((unsigned int *)(ivptr + 12)) = htonl(0x01);
3076
3077 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3078
3079 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3080 chcr_add_aead_src_ent(req, ulptx);
3081 atomic_inc(&adap->chcr_stats.aead_rqst);
3082 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3083 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3084 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3085 transhdr_len, temp, reqctx->verify);
3086 reqctx->skb = skb;
3087 return skb;
3088
3089err:
3090 chcr_aead_common_exit(req);
3091 return ERR_PTR(error);
3092}
3093
3094
3095
3096static int chcr_aead_cra_init(struct crypto_aead *tfm)
3097{
3098 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3099 struct aead_alg *alg = crypto_aead_alg(tfm);
3100
3101 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3102 CRYPTO_ALG_NEED_FALLBACK |
3103 CRYPTO_ALG_ASYNC);
3104 if (IS_ERR(aeadctx->sw_cipher))
3105 return PTR_ERR(aeadctx->sw_cipher);
3106 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3107 sizeof(struct aead_request) +
3108 crypto_aead_reqsize(aeadctx->sw_cipher)));
3109 return chcr_device_init(a_ctx(tfm));
3110}
3111
3112static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3113{
3114 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3115
3116 crypto_free_aead(aeadctx->sw_cipher);
3117}
3118
3119static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3120 unsigned int authsize)
3121{
3122 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3123
3124 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3125 aeadctx->mayverify = VERIFY_HW;
3126 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3127}
3128static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3129 unsigned int authsize)
3130{
3131 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3132 u32 maxauth = crypto_aead_maxauthsize(tfm);
3133
3134 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3135 * true for sha1. authsize == 12 condition should be before
3136 * authsize == (maxauth >> 1)
3137 */
3138 if (authsize == ICV_4) {
3139 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3140 aeadctx->mayverify = VERIFY_HW;
3141 } else if (authsize == ICV_6) {
3142 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3143 aeadctx->mayverify = VERIFY_HW;
3144 } else if (authsize == ICV_10) {
3145 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3146 aeadctx->mayverify = VERIFY_HW;
3147 } else if (authsize == ICV_12) {
3148 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3149 aeadctx->mayverify = VERIFY_HW;
3150 } else if (authsize == ICV_14) {
3151 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3152 aeadctx->mayverify = VERIFY_HW;
3153 } else if (authsize == (maxauth >> 1)) {
3154 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3155 aeadctx->mayverify = VERIFY_HW;
3156 } else if (authsize == maxauth) {
3157 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3158 aeadctx->mayverify = VERIFY_HW;
3159 } else {
3160 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3161 aeadctx->mayverify = VERIFY_SW;
3162 }
3163 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3164}
3165
3166
3167static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3168{
3169 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3170
3171 switch (authsize) {
3172 case ICV_4:
3173 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3174 aeadctx->mayverify = VERIFY_HW;
3175 break;
3176 case ICV_8:
3177 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3178 aeadctx->mayverify = VERIFY_HW;
3179 break;
3180 case ICV_12:
3181 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3182 aeadctx->mayverify = VERIFY_HW;
3183 break;
3184 case ICV_14:
3185 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3186 aeadctx->mayverify = VERIFY_HW;
3187 break;
3188 case ICV_16:
3189 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3190 aeadctx->mayverify = VERIFY_HW;
3191 break;
3192 case ICV_13:
3193 case ICV_15:
3194 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3195 aeadctx->mayverify = VERIFY_SW;
3196 break;
3197 default:
3198
3199 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3200 CRYPTO_TFM_RES_BAD_KEY_LEN);
3201 return -EINVAL;
3202 }
3203 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3204}
3205
3206static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3207 unsigned int authsize)
3208{
3209 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3210
3211 switch (authsize) {
3212 case ICV_8:
3213 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3214 aeadctx->mayverify = VERIFY_HW;
3215 break;
3216 case ICV_12:
3217 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3218 aeadctx->mayverify = VERIFY_HW;
3219 break;
3220 case ICV_16:
3221 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3222 aeadctx->mayverify = VERIFY_HW;
3223 break;
3224 default:
3225 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3226 CRYPTO_TFM_RES_BAD_KEY_LEN);
3227 return -EINVAL;
3228 }
3229 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3230}
3231
3232static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3233 unsigned int authsize)
3234{
3235 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3236
3237 switch (authsize) {
3238 case ICV_4:
3239 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3240 aeadctx->mayverify = VERIFY_HW;
3241 break;
3242 case ICV_6:
3243 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3244 aeadctx->mayverify = VERIFY_HW;
3245 break;
3246 case ICV_8:
3247 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3248 aeadctx->mayverify = VERIFY_HW;
3249 break;
3250 case ICV_10:
3251 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3252 aeadctx->mayverify = VERIFY_HW;
3253 break;
3254 case ICV_12:
3255 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3256 aeadctx->mayverify = VERIFY_HW;
3257 break;
3258 case ICV_14:
3259 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3260 aeadctx->mayverify = VERIFY_HW;
3261 break;
3262 case ICV_16:
3263 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3264 aeadctx->mayverify = VERIFY_HW;
3265 break;
3266 default:
3267 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3268 CRYPTO_TFM_RES_BAD_KEY_LEN);
3269 return -EINVAL;
3270 }
3271 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3272}
3273
3274static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3275 const u8 *key,
3276 unsigned int keylen)
3277{
3278 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3279 unsigned char ck_size, mk_size;
3280 int key_ctx_size = 0;
3281
3282 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3283 if (keylen == AES_KEYSIZE_128) {
3284 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3285 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3286 } else if (keylen == AES_KEYSIZE_192) {
3287 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3288 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3289 } else if (keylen == AES_KEYSIZE_256) {
3290 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3291 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3292 } else {
3293 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3294 CRYPTO_TFM_RES_BAD_KEY_LEN);
3295 aeadctx->enckey_len = 0;
3296 return -EINVAL;
3297 }
3298 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3299 key_ctx_size >> 4);
3300 memcpy(aeadctx->key, key, keylen);
3301 aeadctx->enckey_len = keylen;
3302
3303 return 0;
3304}
3305
3306static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3307 const u8 *key,
3308 unsigned int keylen)
3309{
3310 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3311 int error;
3312
3313 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3314 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3315 CRYPTO_TFM_REQ_MASK);
3316 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3317 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3318 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3319 CRYPTO_TFM_RES_MASK);
3320 if (error)
3321 return error;
3322 return chcr_ccm_common_setkey(aead, key, keylen);
3323}
3324
3325static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3326 unsigned int keylen)
3327{
3328 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3329 int error;
3330
3331 if (keylen < 3) {
3332 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3333 CRYPTO_TFM_RES_BAD_KEY_LEN);
3334 aeadctx->enckey_len = 0;
3335 return -EINVAL;
3336 }
3337 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3338 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3339 CRYPTO_TFM_REQ_MASK);
3340 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3341 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3342 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3343 CRYPTO_TFM_RES_MASK);
3344 if (error)
3345 return error;
3346 keylen -= 3;
3347 memcpy(aeadctx->salt, key + keylen, 3);
3348 return chcr_ccm_common_setkey(aead, key, keylen);
3349}
3350
3351static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3352 unsigned int keylen)
3353{
3354 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3355 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3356 unsigned int ck_size;
3357 int ret = 0, key_ctx_size = 0;
3358 struct crypto_aes_ctx aes;
3359
3360 aeadctx->enckey_len = 0;
3361 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3362 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3363 & CRYPTO_TFM_REQ_MASK);
3364 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3365 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3366 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3367 CRYPTO_TFM_RES_MASK);
3368 if (ret)
3369 goto out;
3370
3371 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3372 keylen > 3) {
3373 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3374 memcpy(aeadctx->salt, key + keylen, 4);
3375 }
3376 if (keylen == AES_KEYSIZE_128) {
3377 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3378 } else if (keylen == AES_KEYSIZE_192) {
3379 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3380 } else if (keylen == AES_KEYSIZE_256) {
3381 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3382 } else {
3383 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3384 CRYPTO_TFM_RES_BAD_KEY_LEN);
3385 pr_err("GCM: Invalid key length %d\n", keylen);
3386 ret = -EINVAL;
3387 goto out;
3388 }
3389
3390 memcpy(aeadctx->key, key, keylen);
3391 aeadctx->enckey_len = keylen;
3392 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3393 AEAD_H_SIZE;
3394 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3395 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3396 0, 0,
3397 key_ctx_size >> 4);
3398 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3399 * It will go in key context
3400 */
3401 ret = aes_expandkey(&aes, key, keylen);
3402 if (ret) {
3403 aeadctx->enckey_len = 0;
3404 goto out;
3405 }
3406 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3407 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3408 memzero_explicit(&aes, sizeof(aes));
3409
3410out:
3411 return ret;
3412}
3413
3414static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3415 unsigned int keylen)
3416{
3417 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3418 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3419 /* it contains auth and cipher key both*/
3420 struct crypto_authenc_keys keys;
3421 unsigned int bs, subtype;
3422 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3423 int err = 0, i, key_ctx_len = 0;
3424 unsigned char ck_size = 0;
3425 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3426 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3427 struct algo_param param;
3428 int align;
3429 u8 *o_ptr = NULL;
3430
3431 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3432 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3433 & CRYPTO_TFM_REQ_MASK);
3434 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3435 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3436 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3437 & CRYPTO_TFM_RES_MASK);
3438 if (err)
3439 goto out;
3440
3441 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3442 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3443 goto out;
3444 }
3445
3446 if (get_alg_config(¶m, max_authsize)) {
3447 pr_err("chcr : Unsupported digest size\n");
3448 goto out;
3449 }
3450 subtype = get_aead_subtype(authenc);
3451 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3452 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3453 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3454 goto out;
3455 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3456 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3457 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3458 }
3459 if (keys.enckeylen == AES_KEYSIZE_128) {
3460 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3461 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3462 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3463 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3464 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3465 } else {
3466 pr_err("chcr : Unsupported cipher key\n");
3467 goto out;
3468 }
3469
3470 /* Copy only encryption key. We use authkey to generate h(ipad) and
3471 * h(opad) so authkey is not needed again. authkeylen size have the
3472 * size of the hash digest size.
3473 */
3474 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3475 aeadctx->enckey_len = keys.enckeylen;
3476 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3477 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3478
3479 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3480 aeadctx->enckey_len << 3);
3481 }
3482 base_hash = chcr_alloc_shash(max_authsize);
3483 if (IS_ERR(base_hash)) {
3484 pr_err("chcr : Base driver cannot be loaded\n");
3485 aeadctx->enckey_len = 0;
3486 memzero_explicit(&keys, sizeof(keys));
3487 return -EINVAL;
3488 }
3489 {
3490 SHASH_DESC_ON_STACK(shash, base_hash);
3491
3492 shash->tfm = base_hash;
3493 bs = crypto_shash_blocksize(base_hash);
3494 align = KEYCTX_ALIGN_PAD(max_authsize);
3495 o_ptr = actx->h_iopad + param.result_size + align;
3496
3497 if (keys.authkeylen > bs) {
3498 err = crypto_shash_digest(shash, keys.authkey,
3499 keys.authkeylen,
3500 o_ptr);
3501 if (err) {
3502 pr_err("chcr : Base driver cannot be loaded\n");
3503 goto out;
3504 }
3505 keys.authkeylen = max_authsize;
3506 } else
3507 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3508
3509 /* Compute the ipad-digest*/
3510 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3511 memcpy(pad, o_ptr, keys.authkeylen);
3512 for (i = 0; i < bs >> 2; i++)
3513 *((unsigned int *)pad + i) ^= IPAD_DATA;
3514
3515 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3516 max_authsize))
3517 goto out;
3518 /* Compute the opad-digest */
3519 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3520 memcpy(pad, o_ptr, keys.authkeylen);
3521 for (i = 0; i < bs >> 2; i++)
3522 *((unsigned int *)pad + i) ^= OPAD_DATA;
3523
3524 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3525 goto out;
3526
3527 /* convert the ipad and opad digest to network order */
3528 chcr_change_order(actx->h_iopad, param.result_size);
3529 chcr_change_order(o_ptr, param.result_size);
3530 key_ctx_len = sizeof(struct _key_ctx) +
3531 roundup(keys.enckeylen, 16) +
3532 (param.result_size + align) * 2;
3533 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3534 0, 1, key_ctx_len >> 4);
3535 actx->auth_mode = param.auth_mode;
3536 chcr_free_shash(base_hash);
3537
3538 memzero_explicit(&keys, sizeof(keys));
3539 return 0;
3540 }
3541out:
3542 aeadctx->enckey_len = 0;
3543 memzero_explicit(&keys, sizeof(keys));
3544 if (!IS_ERR(base_hash))
3545 chcr_free_shash(base_hash);
3546 return -EINVAL;
3547}
3548
3549static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3550 const u8 *key, unsigned int keylen)
3551{
3552 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3553 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3554 struct crypto_authenc_keys keys;
3555 int err;
3556 /* it contains auth and cipher key both*/
3557 unsigned int subtype;
3558 int key_ctx_len = 0;
3559 unsigned char ck_size = 0;
3560
3561 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3562 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3563 & CRYPTO_TFM_REQ_MASK);
3564 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3565 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3566 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3567 & CRYPTO_TFM_RES_MASK);
3568 if (err)
3569 goto out;
3570
3571 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3572 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3573 goto out;
3574 }
3575 subtype = get_aead_subtype(authenc);
3576 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3577 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3578 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3579 goto out;
3580 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3581 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3582 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3583 }
3584 if (keys.enckeylen == AES_KEYSIZE_128) {
3585 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3586 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3587 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3588 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3589 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3590 } else {
3591 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3592 goto out;
3593 }
3594 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3595 aeadctx->enckey_len = keys.enckeylen;
3596 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3597 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3598 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3599 aeadctx->enckey_len << 3);
3600 }
3601 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3602
3603 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3604 0, key_ctx_len >> 4);
3605 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3606 memzero_explicit(&keys, sizeof(keys));
3607 return 0;
3608out:
3609 aeadctx->enckey_len = 0;
3610 memzero_explicit(&keys, sizeof(keys));
3611 return -EINVAL;
3612}
3613
3614static int chcr_aead_op(struct aead_request *req,
3615 int size,
3616 create_wr_t create_wr_fn)
3617{
3618 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3619 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3620 struct uld_ctx *u_ctx;
3621 struct sk_buff *skb;
3622 int isfull = 0;
3623 struct chcr_dev *cdev;
3624
3625 cdev = a_ctx(tfm)->dev;
3626 if (!cdev) {
3627 pr_err("chcr : %s : No crypto device.\n", __func__);
3628 return -ENXIO;
3629 }
3630
3631 if (chcr_inc_wrcount(cdev)) {
3632 /* Detach state for CHCR means lldi or padap is freed.
3633 * We cannot increment fallback here.
3634 */
3635 return chcr_aead_fallback(req, reqctx->op);
3636 }
3637
3638 u_ctx = ULD_CTX(a_ctx(tfm));
3639 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3640 a_ctx(tfm)->tx_qidx)) {
3641 isfull = 1;
3642 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3643 chcr_dec_wrcount(cdev);
3644 return -ENOSPC;
3645 }
3646 }
3647
3648 /* Form a WR from req */
3649 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3650
3651 if (IS_ERR_OR_NULL(skb)) {
3652 chcr_dec_wrcount(cdev);
3653 return PTR_ERR_OR_ZERO(skb);
3654 }
3655
3656 skb->dev = u_ctx->lldi.ports[0];
3657 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3658 chcr_send_wr(skb);
3659 return isfull ? -EBUSY : -EINPROGRESS;
3660}
3661
3662static int chcr_aead_encrypt(struct aead_request *req)
3663{
3664 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3665 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3666
3667 reqctx->verify = VERIFY_HW;
3668 reqctx->op = CHCR_ENCRYPT_OP;
3669
3670 switch (get_aead_subtype(tfm)) {
3671 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3672 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3673 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3674 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3675 return chcr_aead_op(req, 0, create_authenc_wr);
3676 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3677 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3678 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3679 default:
3680 return chcr_aead_op(req, 0, create_gcm_wr);
3681 }
3682}
3683
3684static int chcr_aead_decrypt(struct aead_request *req)
3685{
3686 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3687 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3688 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3689 int size;
3690
3691 if (aeadctx->mayverify == VERIFY_SW) {
3692 size = crypto_aead_maxauthsize(tfm);
3693 reqctx->verify = VERIFY_SW;
3694 } else {
3695 size = 0;
3696 reqctx->verify = VERIFY_HW;
3697 }
3698 reqctx->op = CHCR_DECRYPT_OP;
3699 switch (get_aead_subtype(tfm)) {
3700 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3701 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3702 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3703 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3704 return chcr_aead_op(req, size, create_authenc_wr);
3705 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3706 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3707 return chcr_aead_op(req, size, create_aead_ccm_wr);
3708 default:
3709 return chcr_aead_op(req, size, create_gcm_wr);
3710 }
3711}
3712
3713static struct chcr_alg_template driver_algs[] = {
3714 /* AES-CBC */
3715 {
3716 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3717 .is_registered = 0,
3718 .alg.skcipher = {
3719 .base.cra_name = "cbc(aes)",
3720 .base.cra_driver_name = "cbc-aes-chcr",
3721 .base.cra_blocksize = AES_BLOCK_SIZE,
3722
3723 .init = chcr_init_tfm,
3724 .exit = chcr_exit_tfm,
3725 .min_keysize = AES_MIN_KEY_SIZE,
3726 .max_keysize = AES_MAX_KEY_SIZE,
3727 .ivsize = AES_BLOCK_SIZE,
3728 .setkey = chcr_aes_cbc_setkey,
3729 .encrypt = chcr_aes_encrypt,
3730 .decrypt = chcr_aes_decrypt,
3731 }
3732 },
3733 {
3734 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3735 .is_registered = 0,
3736 .alg.skcipher = {
3737 .base.cra_name = "xts(aes)",
3738 .base.cra_driver_name = "xts-aes-chcr",
3739 .base.cra_blocksize = AES_BLOCK_SIZE,
3740
3741 .init = chcr_init_tfm,
3742 .exit = chcr_exit_tfm,
3743 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3744 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3745 .ivsize = AES_BLOCK_SIZE,
3746 .setkey = chcr_aes_xts_setkey,
3747 .encrypt = chcr_aes_encrypt,
3748 .decrypt = chcr_aes_decrypt,
3749 }
3750 },
3751 {
3752 .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3753 .is_registered = 0,
3754 .alg.skcipher = {
3755 .base.cra_name = "ctr(aes)",
3756 .base.cra_driver_name = "ctr-aes-chcr",
3757 .base.cra_blocksize = 1,
3758
3759 .init = chcr_init_tfm,
3760 .exit = chcr_exit_tfm,
3761 .min_keysize = AES_MIN_KEY_SIZE,
3762 .max_keysize = AES_MAX_KEY_SIZE,
3763 .ivsize = AES_BLOCK_SIZE,
3764 .setkey = chcr_aes_ctr_setkey,
3765 .encrypt = chcr_aes_encrypt,
3766 .decrypt = chcr_aes_decrypt,
3767 }
3768 },
3769 {
3770 .type = CRYPTO_ALG_TYPE_SKCIPHER |
3771 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3772 .is_registered = 0,
3773 .alg.skcipher = {
3774 .base.cra_name = "rfc3686(ctr(aes))",
3775 .base.cra_driver_name = "rfc3686-ctr-aes-chcr",
3776 .base.cra_blocksize = 1,
3777
3778 .init = chcr_rfc3686_init,
3779 .exit = chcr_exit_tfm,
3780 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3781 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3782 .ivsize = CTR_RFC3686_IV_SIZE,
3783 .setkey = chcr_aes_rfc3686_setkey,
3784 .encrypt = chcr_aes_encrypt,
3785 .decrypt = chcr_aes_decrypt,
3786 }
3787 },
3788 /* SHA */
3789 {
3790 .type = CRYPTO_ALG_TYPE_AHASH,
3791 .is_registered = 0,
3792 .alg.hash = {
3793 .halg.digestsize = SHA1_DIGEST_SIZE,
3794 .halg.base = {
3795 .cra_name = "sha1",
3796 .cra_driver_name = "sha1-chcr",
3797 .cra_blocksize = SHA1_BLOCK_SIZE,
3798 }
3799 }
3800 },
3801 {
3802 .type = CRYPTO_ALG_TYPE_AHASH,
3803 .is_registered = 0,
3804 .alg.hash = {
3805 .halg.digestsize = SHA256_DIGEST_SIZE,
3806 .halg.base = {
3807 .cra_name = "sha256",
3808 .cra_driver_name = "sha256-chcr",
3809 .cra_blocksize = SHA256_BLOCK_SIZE,
3810 }
3811 }
3812 },
3813 {
3814 .type = CRYPTO_ALG_TYPE_AHASH,
3815 .is_registered = 0,
3816 .alg.hash = {
3817 .halg.digestsize = SHA224_DIGEST_SIZE,
3818 .halg.base = {
3819 .cra_name = "sha224",
3820 .cra_driver_name = "sha224-chcr",
3821 .cra_blocksize = SHA224_BLOCK_SIZE,
3822 }
3823 }
3824 },
3825 {
3826 .type = CRYPTO_ALG_TYPE_AHASH,
3827 .is_registered = 0,
3828 .alg.hash = {
3829 .halg.digestsize = SHA384_DIGEST_SIZE,
3830 .halg.base = {
3831 .cra_name = "sha384",
3832 .cra_driver_name = "sha384-chcr",
3833 .cra_blocksize = SHA384_BLOCK_SIZE,
3834 }
3835 }
3836 },
3837 {
3838 .type = CRYPTO_ALG_TYPE_AHASH,
3839 .is_registered = 0,
3840 .alg.hash = {
3841 .halg.digestsize = SHA512_DIGEST_SIZE,
3842 .halg.base = {
3843 .cra_name = "sha512",
3844 .cra_driver_name = "sha512-chcr",
3845 .cra_blocksize = SHA512_BLOCK_SIZE,
3846 }
3847 }
3848 },
3849 /* HMAC */
3850 {
3851 .type = CRYPTO_ALG_TYPE_HMAC,
3852 .is_registered = 0,
3853 .alg.hash = {
3854 .halg.digestsize = SHA1_DIGEST_SIZE,
3855 .halg.base = {
3856 .cra_name = "hmac(sha1)",
3857 .cra_driver_name = "hmac-sha1-chcr",
3858 .cra_blocksize = SHA1_BLOCK_SIZE,
3859 }
3860 }
3861 },
3862 {
3863 .type = CRYPTO_ALG_TYPE_HMAC,
3864 .is_registered = 0,
3865 .alg.hash = {
3866 .halg.digestsize = SHA224_DIGEST_SIZE,
3867 .halg.base = {
3868 .cra_name = "hmac(sha224)",
3869 .cra_driver_name = "hmac-sha224-chcr",
3870 .cra_blocksize = SHA224_BLOCK_SIZE,
3871 }
3872 }
3873 },
3874 {
3875 .type = CRYPTO_ALG_TYPE_HMAC,
3876 .is_registered = 0,
3877 .alg.hash = {
3878 .halg.digestsize = SHA256_DIGEST_SIZE,
3879 .halg.base = {
3880 .cra_name = "hmac(sha256)",
3881 .cra_driver_name = "hmac-sha256-chcr",
3882 .cra_blocksize = SHA256_BLOCK_SIZE,
3883 }
3884 }
3885 },
3886 {
3887 .type = CRYPTO_ALG_TYPE_HMAC,
3888 .is_registered = 0,
3889 .alg.hash = {
3890 .halg.digestsize = SHA384_DIGEST_SIZE,
3891 .halg.base = {
3892 .cra_name = "hmac(sha384)",
3893 .cra_driver_name = "hmac-sha384-chcr",
3894 .cra_blocksize = SHA384_BLOCK_SIZE,
3895 }
3896 }
3897 },
3898 {
3899 .type = CRYPTO_ALG_TYPE_HMAC,
3900 .is_registered = 0,
3901 .alg.hash = {
3902 .halg.digestsize = SHA512_DIGEST_SIZE,
3903 .halg.base = {
3904 .cra_name = "hmac(sha512)",
3905 .cra_driver_name = "hmac-sha512-chcr",
3906 .cra_blocksize = SHA512_BLOCK_SIZE,
3907 }
3908 }
3909 },
3910 /* Add AEAD Algorithms */
3911 {
3912 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3913 .is_registered = 0,
3914 .alg.aead = {
3915 .base = {
3916 .cra_name = "gcm(aes)",
3917 .cra_driver_name = "gcm-aes-chcr",
3918 .cra_blocksize = 1,
3919 .cra_priority = CHCR_AEAD_PRIORITY,
3920 .cra_ctxsize = sizeof(struct chcr_context) +
3921 sizeof(struct chcr_aead_ctx) +
3922 sizeof(struct chcr_gcm_ctx),
3923 },
3924 .ivsize = GCM_AES_IV_SIZE,
3925 .maxauthsize = GHASH_DIGEST_SIZE,
3926 .setkey = chcr_gcm_setkey,
3927 .setauthsize = chcr_gcm_setauthsize,
3928 }
3929 },
3930 {
3931 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3932 .is_registered = 0,
3933 .alg.aead = {
3934 .base = {
3935 .cra_name = "rfc4106(gcm(aes))",
3936 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3937 .cra_blocksize = 1,
3938 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3939 .cra_ctxsize = sizeof(struct chcr_context) +
3940 sizeof(struct chcr_aead_ctx) +
3941 sizeof(struct chcr_gcm_ctx),
3942
3943 },
3944 .ivsize = GCM_RFC4106_IV_SIZE,
3945 .maxauthsize = GHASH_DIGEST_SIZE,
3946 .setkey = chcr_gcm_setkey,
3947 .setauthsize = chcr_4106_4309_setauthsize,
3948 }
3949 },
3950 {
3951 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3952 .is_registered = 0,
3953 .alg.aead = {
3954 .base = {
3955 .cra_name = "ccm(aes)",
3956 .cra_driver_name = "ccm-aes-chcr",
3957 .cra_blocksize = 1,
3958 .cra_priority = CHCR_AEAD_PRIORITY,
3959 .cra_ctxsize = sizeof(struct chcr_context) +
3960 sizeof(struct chcr_aead_ctx),
3961
3962 },
3963 .ivsize = AES_BLOCK_SIZE,
3964 .maxauthsize = GHASH_DIGEST_SIZE,
3965 .setkey = chcr_aead_ccm_setkey,
3966 .setauthsize = chcr_ccm_setauthsize,
3967 }
3968 },
3969 {
3970 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3971 .is_registered = 0,
3972 .alg.aead = {
3973 .base = {
3974 .cra_name = "rfc4309(ccm(aes))",
3975 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3976 .cra_blocksize = 1,
3977 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3978 .cra_ctxsize = sizeof(struct chcr_context) +
3979 sizeof(struct chcr_aead_ctx),
3980
3981 },
3982 .ivsize = 8,
3983 .maxauthsize = GHASH_DIGEST_SIZE,
3984 .setkey = chcr_aead_rfc4309_setkey,
3985 .setauthsize = chcr_4106_4309_setauthsize,
3986 }
3987 },
3988 {
3989 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3990 .is_registered = 0,
3991 .alg.aead = {
3992 .base = {
3993 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3994 .cra_driver_name =
3995 "authenc-hmac-sha1-cbc-aes-chcr",
3996 .cra_blocksize = AES_BLOCK_SIZE,
3997 .cra_priority = CHCR_AEAD_PRIORITY,
3998 .cra_ctxsize = sizeof(struct chcr_context) +
3999 sizeof(struct chcr_aead_ctx) +
4000 sizeof(struct chcr_authenc_ctx),
4001
4002 },
4003 .ivsize = AES_BLOCK_SIZE,
4004 .maxauthsize = SHA1_DIGEST_SIZE,
4005 .setkey = chcr_authenc_setkey,
4006 .setauthsize = chcr_authenc_setauthsize,
4007 }
4008 },
4009 {
4010 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4011 .is_registered = 0,
4012 .alg.aead = {
4013 .base = {
4014
4015 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4016 .cra_driver_name =
4017 "authenc-hmac-sha256-cbc-aes-chcr",
4018 .cra_blocksize = AES_BLOCK_SIZE,
4019 .cra_priority = CHCR_AEAD_PRIORITY,
4020 .cra_ctxsize = sizeof(struct chcr_context) +
4021 sizeof(struct chcr_aead_ctx) +
4022 sizeof(struct chcr_authenc_ctx),
4023
4024 },
4025 .ivsize = AES_BLOCK_SIZE,
4026 .maxauthsize = SHA256_DIGEST_SIZE,
4027 .setkey = chcr_authenc_setkey,
4028 .setauthsize = chcr_authenc_setauthsize,
4029 }
4030 },
4031 {
4032 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4033 .is_registered = 0,
4034 .alg.aead = {
4035 .base = {
4036 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4037 .cra_driver_name =
4038 "authenc-hmac-sha224-cbc-aes-chcr",
4039 .cra_blocksize = AES_BLOCK_SIZE,
4040 .cra_priority = CHCR_AEAD_PRIORITY,
4041 .cra_ctxsize = sizeof(struct chcr_context) +
4042 sizeof(struct chcr_aead_ctx) +
4043 sizeof(struct chcr_authenc_ctx),
4044 },
4045 .ivsize = AES_BLOCK_SIZE,
4046 .maxauthsize = SHA224_DIGEST_SIZE,
4047 .setkey = chcr_authenc_setkey,
4048 .setauthsize = chcr_authenc_setauthsize,
4049 }
4050 },
4051 {
4052 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4053 .is_registered = 0,
4054 .alg.aead = {
4055 .base = {
4056 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4057 .cra_driver_name =
4058 "authenc-hmac-sha384-cbc-aes-chcr",
4059 .cra_blocksize = AES_BLOCK_SIZE,
4060 .cra_priority = CHCR_AEAD_PRIORITY,
4061 .cra_ctxsize = sizeof(struct chcr_context) +
4062 sizeof(struct chcr_aead_ctx) +
4063 sizeof(struct chcr_authenc_ctx),
4064
4065 },
4066 .ivsize = AES_BLOCK_SIZE,
4067 .maxauthsize = SHA384_DIGEST_SIZE,
4068 .setkey = chcr_authenc_setkey,
4069 .setauthsize = chcr_authenc_setauthsize,
4070 }
4071 },
4072 {
4073 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4074 .is_registered = 0,
4075 .alg.aead = {
4076 .base = {
4077 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4078 .cra_driver_name =
4079 "authenc-hmac-sha512-cbc-aes-chcr",
4080 .cra_blocksize = AES_BLOCK_SIZE,
4081 .cra_priority = CHCR_AEAD_PRIORITY,
4082 .cra_ctxsize = sizeof(struct chcr_context) +
4083 sizeof(struct chcr_aead_ctx) +
4084 sizeof(struct chcr_authenc_ctx),
4085
4086 },
4087 .ivsize = AES_BLOCK_SIZE,
4088 .maxauthsize = SHA512_DIGEST_SIZE,
4089 .setkey = chcr_authenc_setkey,
4090 .setauthsize = chcr_authenc_setauthsize,
4091 }
4092 },
4093 {
4094 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4095 .is_registered = 0,
4096 .alg.aead = {
4097 .base = {
4098 .cra_name = "authenc(digest_null,cbc(aes))",
4099 .cra_driver_name =
4100 "authenc-digest_null-cbc-aes-chcr",
4101 .cra_blocksize = AES_BLOCK_SIZE,
4102 .cra_priority = CHCR_AEAD_PRIORITY,
4103 .cra_ctxsize = sizeof(struct chcr_context) +
4104 sizeof(struct chcr_aead_ctx) +
4105 sizeof(struct chcr_authenc_ctx),
4106
4107 },
4108 .ivsize = AES_BLOCK_SIZE,
4109 .maxauthsize = 0,
4110 .setkey = chcr_aead_digest_null_setkey,
4111 .setauthsize = chcr_authenc_null_setauthsize,
4112 }
4113 },
4114 {
4115 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4116 .is_registered = 0,
4117 .alg.aead = {
4118 .base = {
4119 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4120 .cra_driver_name =
4121 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4122 .cra_blocksize = 1,
4123 .cra_priority = CHCR_AEAD_PRIORITY,
4124 .cra_ctxsize = sizeof(struct chcr_context) +
4125 sizeof(struct chcr_aead_ctx) +
4126 sizeof(struct chcr_authenc_ctx),
4127
4128 },
4129 .ivsize = CTR_RFC3686_IV_SIZE,
4130 .maxauthsize = SHA1_DIGEST_SIZE,
4131 .setkey = chcr_authenc_setkey,
4132 .setauthsize = chcr_authenc_setauthsize,
4133 }
4134 },
4135 {
4136 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4137 .is_registered = 0,
4138 .alg.aead = {
4139 .base = {
4140
4141 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4142 .cra_driver_name =
4143 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4144 .cra_blocksize = 1,
4145 .cra_priority = CHCR_AEAD_PRIORITY,
4146 .cra_ctxsize = sizeof(struct chcr_context) +
4147 sizeof(struct chcr_aead_ctx) +
4148 sizeof(struct chcr_authenc_ctx),
4149
4150 },
4151 .ivsize = CTR_RFC3686_IV_SIZE,
4152 .maxauthsize = SHA256_DIGEST_SIZE,
4153 .setkey = chcr_authenc_setkey,
4154 .setauthsize = chcr_authenc_setauthsize,
4155 }
4156 },
4157 {
4158 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4159 .is_registered = 0,
4160 .alg.aead = {
4161 .base = {
4162 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4163 .cra_driver_name =
4164 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4165 .cra_blocksize = 1,
4166 .cra_priority = CHCR_AEAD_PRIORITY,
4167 .cra_ctxsize = sizeof(struct chcr_context) +
4168 sizeof(struct chcr_aead_ctx) +
4169 sizeof(struct chcr_authenc_ctx),
4170 },
4171 .ivsize = CTR_RFC3686_IV_SIZE,
4172 .maxauthsize = SHA224_DIGEST_SIZE,
4173 .setkey = chcr_authenc_setkey,
4174 .setauthsize = chcr_authenc_setauthsize,
4175 }
4176 },
4177 {
4178 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4179 .is_registered = 0,
4180 .alg.aead = {
4181 .base = {
4182 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4183 .cra_driver_name =
4184 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4185 .cra_blocksize = 1,
4186 .cra_priority = CHCR_AEAD_PRIORITY,
4187 .cra_ctxsize = sizeof(struct chcr_context) +
4188 sizeof(struct chcr_aead_ctx) +
4189 sizeof(struct chcr_authenc_ctx),
4190
4191 },
4192 .ivsize = CTR_RFC3686_IV_SIZE,
4193 .maxauthsize = SHA384_DIGEST_SIZE,
4194 .setkey = chcr_authenc_setkey,
4195 .setauthsize = chcr_authenc_setauthsize,
4196 }
4197 },
4198 {
4199 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4200 .is_registered = 0,
4201 .alg.aead = {
4202 .base = {
4203 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4204 .cra_driver_name =
4205 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4206 .cra_blocksize = 1,
4207 .cra_priority = CHCR_AEAD_PRIORITY,
4208 .cra_ctxsize = sizeof(struct chcr_context) +
4209 sizeof(struct chcr_aead_ctx) +
4210 sizeof(struct chcr_authenc_ctx),
4211
4212 },
4213 .ivsize = CTR_RFC3686_IV_SIZE,
4214 .maxauthsize = SHA512_DIGEST_SIZE,
4215 .setkey = chcr_authenc_setkey,
4216 .setauthsize = chcr_authenc_setauthsize,
4217 }
4218 },
4219 {
4220 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4221 .is_registered = 0,
4222 .alg.aead = {
4223 .base = {
4224 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4225 .cra_driver_name =
4226 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4227 .cra_blocksize = 1,
4228 .cra_priority = CHCR_AEAD_PRIORITY,
4229 .cra_ctxsize = sizeof(struct chcr_context) +
4230 sizeof(struct chcr_aead_ctx) +
4231 sizeof(struct chcr_authenc_ctx),
4232
4233 },
4234 .ivsize = CTR_RFC3686_IV_SIZE,
4235 .maxauthsize = 0,
4236 .setkey = chcr_aead_digest_null_setkey,
4237 .setauthsize = chcr_authenc_null_setauthsize,
4238 }
4239 },
4240};
4241
4242/*
4243 * chcr_unregister_alg - Deregister crypto algorithms with
4244 * kernel framework.
4245 */
4246static int chcr_unregister_alg(void)
4247{
4248 int i;
4249
4250 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4251 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4252 case CRYPTO_ALG_TYPE_SKCIPHER:
4253 if (driver_algs[i].is_registered)
4254 crypto_unregister_skcipher(
4255 &driver_algs[i].alg.skcipher);
4256 break;
4257 case CRYPTO_ALG_TYPE_AEAD:
4258 if (driver_algs[i].is_registered)
4259 crypto_unregister_aead(
4260 &driver_algs[i].alg.aead);
4261 break;
4262 case CRYPTO_ALG_TYPE_AHASH:
4263 if (driver_algs[i].is_registered)
4264 crypto_unregister_ahash(
4265 &driver_algs[i].alg.hash);
4266 break;
4267 }
4268 driver_algs[i].is_registered = 0;
4269 }
4270 return 0;
4271}
4272
4273#define SZ_AHASH_CTX sizeof(struct chcr_context)
4274#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4275#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4276
4277/*
4278 * chcr_register_alg - Register crypto algorithms with kernel framework.
4279 */
4280static int chcr_register_alg(void)
4281{
4282 struct crypto_alg ai;
4283 struct ahash_alg *a_hash;
4284 int err = 0, i;
4285 char *name = NULL;
4286
4287 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4288 if (driver_algs[i].is_registered)
4289 continue;
4290 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4291 case CRYPTO_ALG_TYPE_SKCIPHER:
4292 driver_algs[i].alg.skcipher.base.cra_priority =
4293 CHCR_CRA_PRIORITY;
4294 driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4295 driver_algs[i].alg.skcipher.base.cra_flags =
4296 CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4297 CRYPTO_ALG_NEED_FALLBACK;
4298 driver_algs[i].alg.skcipher.base.cra_ctxsize =
4299 sizeof(struct chcr_context) +
4300 sizeof(struct ablk_ctx);
4301 driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4302
4303 err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4304 name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4305 break;
4306 case CRYPTO_ALG_TYPE_AEAD:
4307 driver_algs[i].alg.aead.base.cra_flags =
4308 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4309 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4310 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4311 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4312 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4313 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4314 err = crypto_register_aead(&driver_algs[i].alg.aead);
4315 name = driver_algs[i].alg.aead.base.cra_driver_name;
4316 break;
4317 case CRYPTO_ALG_TYPE_AHASH:
4318 a_hash = &driver_algs[i].alg.hash;
4319 a_hash->update = chcr_ahash_update;
4320 a_hash->final = chcr_ahash_final;
4321 a_hash->finup = chcr_ahash_finup;
4322 a_hash->digest = chcr_ahash_digest;
4323 a_hash->export = chcr_ahash_export;
4324 a_hash->import = chcr_ahash_import;
4325 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4326 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4327 a_hash->halg.base.cra_module = THIS_MODULE;
4328 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4329 a_hash->halg.base.cra_alignmask = 0;
4330 a_hash->halg.base.cra_exit = NULL;
4331
4332 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4333 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4334 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4335 a_hash->init = chcr_hmac_init;
4336 a_hash->setkey = chcr_ahash_setkey;
4337 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4338 } else {
4339 a_hash->init = chcr_sha_init;
4340 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4341 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4342 }
4343 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4344 ai = driver_algs[i].alg.hash.halg.base;
4345 name = ai.cra_driver_name;
4346 break;
4347 }
4348 if (err) {
4349 pr_err("chcr : %s : Algorithm registration failed\n",
4350 name);
4351 goto register_err;
4352 } else {
4353 driver_algs[i].is_registered = 1;
4354 }
4355 }
4356 return 0;
4357
4358register_err:
4359 chcr_unregister_alg();
4360 return err;
4361}
4362
4363/*
4364 * start_crypto - Register the crypto algorithms.
4365 * This should called once when the first device comesup. After this
4366 * kernel will start calling driver APIs for crypto operations.
4367 */
4368int start_crypto(void)
4369{
4370 return chcr_register_alg();
4371}
4372
4373/*
4374 * stop_crypto - Deregister all the crypto algorithms with kernel.
4375 * This should be called once when the last device goes down. After this
4376 * kernel will not call the driver API for crypto operations.
4377 */
4378int stop_crypto(void)
4379{
4380 chcr_unregister_alg();
4381 return 0;
4382}