Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
3 *
4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
5 * Author: Arnaud Ebalard <arno@natisbad.org>
6 *
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/md5.h>
16#include <crypto/sha.h>
17
18#include "cesa.h"
19
20struct mv_cesa_ahash_dma_iter {
21 struct mv_cesa_dma_iter base;
22 struct mv_cesa_sg_dma_iter src;
23};
24
25static inline void
26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req)
28{
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes + creq->cache_ptr;
31
32 if (!creq->last_req)
33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
34
35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37 iter->src.op_offset = creq->cache_ptr;
38}
39
40static inline bool
41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
42{
43 iter->src.op_offset = 0;
44
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
46}
47
48static inline int
49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
50{
51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
52 &req->cache_dma);
53 if (!req->cache)
54 return -ENOMEM;
55
56 return 0;
57}
58
59static inline void
60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
61{
62 if (!req->cache)
63 return;
64
65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
66 req->cache_dma);
67}
68
69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
70 gfp_t flags)
71{
72 if (req->padding)
73 return 0;
74
75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
76 &req->padding_dma);
77 if (!req->padding)
78 return -ENOMEM;
79
80 return 0;
81}
82
83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
84{
85 if (!req->padding)
86 return;
87
88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
89 req->padding_dma);
90 req->padding = NULL;
91}
92
93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
94{
95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
96
97 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
98}
99
100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
101{
102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
103
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106 mv_cesa_dma_cleanup(&creq->req.dma.base);
107}
108
109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
110{
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
112
113 if (creq->req.base.type == CESA_DMA_REQ)
114 mv_cesa_ahash_dma_cleanup(req);
115}
116
117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
118{
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
120
121 if (creq->req.base.type == CESA_DMA_REQ)
122 mv_cesa_ahash_dma_last_cleanup(req);
123}
124
125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
126{
127 unsigned int index, padlen;
128
129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
131
132 return padlen;
133}
134
135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
136{
137 unsigned int index, padlen;
138
139 buf[0] = 0x80;
140 /* Pad out to 56 mod 64 */
141 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
142 padlen = mv_cesa_ahash_pad_len(creq);
143 memset(buf + 1, 0, padlen - 1);
144
145 if (creq->algo_le) {
146 __le64 bits = cpu_to_le64(creq->len << 3);
147 memcpy(buf + padlen, &bits, sizeof(bits));
148 } else {
149 __be64 bits = cpu_to_be64(creq->len << 3);
150 memcpy(buf + padlen, &bits, sizeof(bits));
151 }
152
153 return padlen + 8;
154}
155
156static void mv_cesa_ahash_std_step(struct ahash_request *req)
157{
158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
160 struct mv_cesa_engine *engine = sreq->base.engine;
161 struct mv_cesa_op_ctx *op;
162 unsigned int new_cache_ptr = 0;
163 u32 frag_mode;
164 size_t len;
165
166 if (creq->cache_ptr)
167 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
168 creq->cache, creq->cache_ptr);
169
170 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
171 CESA_SA_SRAM_PAYLOAD_SIZE);
172
173 if (!creq->last_req) {
174 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
175 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
176 }
177
178 if (len - creq->cache_ptr)
179 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
180 engine->sram +
181 CESA_SA_DATA_SRAM_OFFSET +
182 creq->cache_ptr,
183 len - creq->cache_ptr,
184 sreq->offset);
185
186 op = &creq->op_tmpl;
187
188 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
189
190 if (creq->last_req && sreq->offset == req->nbytes &&
191 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
192 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
193 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
194 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
195 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
196 }
197
198 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
199 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
200 if (len &&
201 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
202 mv_cesa_set_mac_op_total_len(op, creq->len);
203 } else {
204 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
205
206 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
207 len &= CESA_HASH_BLOCK_SIZE_MSK;
208 new_cache_ptr = 64 - trailerlen;
209 memcpy_fromio(creq->cache,
210 engine->sram +
211 CESA_SA_DATA_SRAM_OFFSET + len,
212 new_cache_ptr);
213 } else {
214 len += mv_cesa_ahash_pad_req(creq,
215 engine->sram + len +
216 CESA_SA_DATA_SRAM_OFFSET);
217 }
218
219 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
220 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
221 else
222 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
223 }
224 }
225
226 mv_cesa_set_mac_op_frag_len(op, len);
227 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
228
229 /* FIXME: only update enc_len field */
230 memcpy_toio(engine->sram, op, sizeof(*op));
231
232 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
233 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
234 CESA_SA_DESC_CFG_FRAG_MSK);
235
236 creq->cache_ptr = new_cache_ptr;
237
238 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
239 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
240 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
241}
242
243static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
244{
245 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
246 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
247
248 if (sreq->offset < (req->nbytes - creq->cache_ptr))
249 return -EINPROGRESS;
250
251 return 0;
252}
253
254static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
255{
256 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
257 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base;
258
259 mv_cesa_dma_prepare(dreq, dreq->base.engine);
260}
261
262static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
263{
264 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
265 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
266 struct mv_cesa_engine *engine = sreq->base.engine;
267
268 sreq->offset = 0;
269 mv_cesa_adjust_op(engine, &creq->op_tmpl);
270 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
271}
272
273static void mv_cesa_ahash_step(struct crypto_async_request *req)
274{
275 struct ahash_request *ahashreq = ahash_request_cast(req);
276 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
277
278 if (creq->req.base.type == CESA_DMA_REQ)
279 mv_cesa_dma_step(&creq->req.dma.base);
280 else
281 mv_cesa_ahash_std_step(ahashreq);
282}
283
284static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
285{
286 struct ahash_request *ahashreq = ahash_request_cast(req);
287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
288 struct mv_cesa_engine *engine = creq->req.base.engine;
289 unsigned int digsize;
290 int ret, i;
291
292 if (creq->req.base.type == CESA_DMA_REQ)
293 ret = mv_cesa_dma_process(&creq->req.dma.base, status);
294 else
295 ret = mv_cesa_ahash_std_process(ahashreq, status);
296
297 if (ret == -EINPROGRESS)
298 return ret;
299
300 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
301 for (i = 0; i < digsize / 4; i++)
302 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
303
304 if (creq->cache_ptr)
305 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
306 creq->cache,
307 creq->cache_ptr,
308 ahashreq->nbytes - creq->cache_ptr);
309
310 if (creq->last_req) {
311 /*
312 * Hardware's MD5 digest is in little endian format, but
313 * SHA in big endian format
314 */
315 if (creq->algo_le) {
316 __le32 *result = (void *)ahashreq->result;
317
318 for (i = 0; i < digsize / 4; i++)
319 result[i] = cpu_to_le32(creq->state[i]);
320 } else {
321 __be32 *result = (void *)ahashreq->result;
322
323 for (i = 0; i < digsize / 4; i++)
324 result[i] = cpu_to_be32(creq->state[i]);
325 }
326 }
327
328 return ret;
329}
330
331static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
332 struct mv_cesa_engine *engine)
333{
334 struct ahash_request *ahashreq = ahash_request_cast(req);
335 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
336 unsigned int digsize;
337 int i;
338
339 creq->req.base.engine = engine;
340
341 if (creq->req.base.type == CESA_DMA_REQ)
342 mv_cesa_ahash_dma_prepare(ahashreq);
343 else
344 mv_cesa_ahash_std_prepare(ahashreq);
345
346 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
347 for (i = 0; i < digsize / 4; i++)
348 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
349}
350
351static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
352{
353 struct ahash_request *ahashreq = ahash_request_cast(req);
354 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
355
356 if (creq->last_req)
357 mv_cesa_ahash_last_cleanup(ahashreq);
358
359 mv_cesa_ahash_cleanup(ahashreq);
360}
361
362static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
363 .step = mv_cesa_ahash_step,
364 .process = mv_cesa_ahash_process,
365 .prepare = mv_cesa_ahash_prepare,
366 .cleanup = mv_cesa_ahash_req_cleanup,
367};
368
369static int mv_cesa_ahash_init(struct ahash_request *req,
370 struct mv_cesa_op_ctx *tmpl, bool algo_le)
371{
372 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
373
374 memset(creq, 0, sizeof(*creq));
375 mv_cesa_update_op_cfg(tmpl,
376 CESA_SA_DESC_CFG_OP_MAC_ONLY |
377 CESA_SA_DESC_CFG_FIRST_FRAG,
378 CESA_SA_DESC_CFG_OP_MSK |
379 CESA_SA_DESC_CFG_FRAG_MSK);
380 mv_cesa_set_mac_op_total_len(tmpl, 0);
381 mv_cesa_set_mac_op_frag_len(tmpl, 0);
382 creq->op_tmpl = *tmpl;
383 creq->len = 0;
384 creq->algo_le = algo_le;
385
386 return 0;
387}
388
389static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
390{
391 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
392
393 ctx->base.ops = &mv_cesa_ahash_req_ops;
394
395 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
396 sizeof(struct mv_cesa_ahash_req));
397 return 0;
398}
399
400static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
401{
402 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
403
404 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
405 *cached = true;
406
407 if (!req->nbytes)
408 return 0;
409
410 sg_pcopy_to_buffer(req->src, creq->src_nents,
411 creq->cache + creq->cache_ptr,
412 req->nbytes, 0);
413
414 creq->cache_ptr += req->nbytes;
415 }
416
417 return 0;
418}
419
420static struct mv_cesa_op_ctx *
421mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
422 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
423 gfp_t flags)
424{
425 struct mv_cesa_op_ctx *op;
426 int ret;
427
428 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
429 if (IS_ERR(op))
430 return op;
431
432 /* Set the operation block fragment length. */
433 mv_cesa_set_mac_op_frag_len(op, frag_len);
434
435 /* Append dummy desc to launch operation */
436 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
437 if (ret)
438 return ERR_PTR(ret);
439
440 if (mv_cesa_mac_op_is_first_frag(tmpl))
441 mv_cesa_update_op_cfg(tmpl,
442 CESA_SA_DESC_CFG_MID_FRAG,
443 CESA_SA_DESC_CFG_FRAG_MSK);
444
445 return op;
446}
447
448static int
449mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
450 struct mv_cesa_ahash_dma_iter *dma_iter,
451 struct mv_cesa_ahash_req *creq,
452 gfp_t flags)
453{
454 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
455 int ret;
456
457 if (!creq->cache_ptr)
458 return 0;
459
460 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
461 if (ret)
462 return ret;
463
464 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
465
466 return mv_cesa_dma_add_data_transfer(chain,
467 CESA_SA_DATA_SRAM_OFFSET,
468 ahashdreq->cache_dma,
469 creq->cache_ptr,
470 CESA_TDMA_DST_IN_SRAM,
471 flags);
472}
473
474static struct mv_cesa_op_ctx *
475mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
476 struct mv_cesa_ahash_dma_iter *dma_iter,
477 struct mv_cesa_ahash_req *creq,
478 unsigned int frag_len, gfp_t flags)
479{
480 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
481 unsigned int len, trailerlen, padoff = 0;
482 struct mv_cesa_op_ctx *op;
483 int ret;
484
485 /*
486 * If the transfer is smaller than our maximum length, and we have
487 * some data outstanding, we can ask the engine to finish the hash.
488 */
489 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
490 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
491 flags);
492 if (IS_ERR(op))
493 return op;
494
495 mv_cesa_set_mac_op_total_len(op, creq->len);
496 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
497 CESA_SA_DESC_CFG_NOT_FRAG :
498 CESA_SA_DESC_CFG_LAST_FRAG,
499 CESA_SA_DESC_CFG_FRAG_MSK);
500
501 return op;
502 }
503
504 /*
505 * The request is longer than the engine can handle, or we have
506 * no data outstanding. Manually generate the padding, adding it
507 * as a "mid" fragment.
508 */
509 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
510 if (ret)
511 return ERR_PTR(ret);
512
513 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
514
515 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
516 if (len) {
517 ret = mv_cesa_dma_add_data_transfer(chain,
518 CESA_SA_DATA_SRAM_OFFSET +
519 frag_len,
520 ahashdreq->padding_dma,
521 len, CESA_TDMA_DST_IN_SRAM,
522 flags);
523 if (ret)
524 return ERR_PTR(ret);
525
526 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
527 flags);
528 if (IS_ERR(op))
529 return op;
530
531 if (len == trailerlen)
532 return op;
533
534 padoff += len;
535 }
536
537 ret = mv_cesa_dma_add_data_transfer(chain,
538 CESA_SA_DATA_SRAM_OFFSET,
539 ahashdreq->padding_dma +
540 padoff,
541 trailerlen - padoff,
542 CESA_TDMA_DST_IN_SRAM,
543 flags);
544 if (ret)
545 return ERR_PTR(ret);
546
547 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
548 flags);
549}
550
551static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
552{
553 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
554 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
555 GFP_KERNEL : GFP_ATOMIC;
556 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
557 struct mv_cesa_tdma_req *dreq = &ahashdreq->base;
558 struct mv_cesa_ahash_dma_iter iter;
559 struct mv_cesa_op_ctx *op = NULL;
560 unsigned int frag_len;
561 int ret;
562
563 dreq->chain.first = NULL;
564 dreq->chain.last = NULL;
565
566 if (creq->src_nents) {
567 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
568 DMA_TO_DEVICE);
569 if (!ret) {
570 ret = -ENOMEM;
571 goto err;
572 }
573 }
574
575 mv_cesa_tdma_desc_iter_init(&dreq->chain);
576 mv_cesa_ahash_req_iter_init(&iter, req);
577
578 /*
579 * Add the cache (left-over data from a previous block) first.
580 * This will never overflow the SRAM size.
581 */
582 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags);
583 if (ret)
584 goto err_free_tdma;
585
586 if (iter.src.sg) {
587 /*
588 * Add all the new data, inserting an operation block and
589 * launch command between each full SRAM block-worth of
590 * data. We intentionally do not add the final op block.
591 */
592 while (true) {
593 ret = mv_cesa_dma_add_op_transfers(&dreq->chain,
594 &iter.base,
595 &iter.src, flags);
596 if (ret)
597 goto err_free_tdma;
598
599 frag_len = iter.base.op_len;
600
601 if (!mv_cesa_ahash_req_iter_next_op(&iter))
602 break;
603
604 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
605 frag_len, flags);
606 if (IS_ERR(op)) {
607 ret = PTR_ERR(op);
608 goto err_free_tdma;
609 }
610 }
611 } else {
612 /* Account for the data that was in the cache. */
613 frag_len = iter.base.op_len;
614 }
615
616 /*
617 * At this point, frag_len indicates whether we have any data
618 * outstanding which needs an operation. Queue up the final
619 * operation, which depends whether this is the final request.
620 */
621 if (creq->last_req)
622 op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq,
623 frag_len, flags);
624 else if (frag_len)
625 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl,
626 frag_len, flags);
627
628 if (IS_ERR(op)) {
629 ret = PTR_ERR(op);
630 goto err_free_tdma;
631 }
632
633 if (op) {
634 /* Add dummy desc to wait for crypto operation end */
635 ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags);
636 if (ret)
637 goto err_free_tdma;
638 }
639
640 if (!creq->last_req)
641 creq->cache_ptr = req->nbytes + creq->cache_ptr -
642 iter.base.len;
643 else
644 creq->cache_ptr = 0;
645
646 return 0;
647
648err_free_tdma:
649 mv_cesa_dma_cleanup(dreq);
650 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
651
652err:
653 mv_cesa_ahash_last_cleanup(req);
654
655 return ret;
656}
657
658static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
659{
660 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
661 int ret;
662
663 if (cesa_dev->caps->has_tdma)
664 creq->req.base.type = CESA_DMA_REQ;
665 else
666 creq->req.base.type = CESA_STD_REQ;
667
668 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
669 if (creq->src_nents < 0) {
670 dev_err(cesa_dev->dev, "Invalid number of src SG");
671 return creq->src_nents;
672 }
673
674 ret = mv_cesa_ahash_cache_req(req, cached);
675 if (ret)
676 return ret;
677
678 if (*cached)
679 return 0;
680
681 if (creq->req.base.type == CESA_DMA_REQ)
682 ret = mv_cesa_ahash_dma_req_init(req);
683
684 return ret;
685}
686
687static int mv_cesa_ahash_update(struct ahash_request *req)
688{
689 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
690 bool cached = false;
691 int ret;
692
693 creq->len += req->nbytes;
694 ret = mv_cesa_ahash_req_init(req, &cached);
695 if (ret)
696 return ret;
697
698 if (cached)
699 return 0;
700
701 ret = mv_cesa_queue_req(&req->base);
702 if (mv_cesa_req_needs_cleanup(&req->base, ret))
703 mv_cesa_ahash_cleanup(req);
704
705 return ret;
706}
707
708static int mv_cesa_ahash_final(struct ahash_request *req)
709{
710 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
711 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
712 bool cached = false;
713 int ret;
714
715 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
716 creq->last_req = true;
717 req->nbytes = 0;
718
719 ret = mv_cesa_ahash_req_init(req, &cached);
720 if (ret)
721 return ret;
722
723 if (cached)
724 return 0;
725
726 ret = mv_cesa_queue_req(&req->base);
727 if (mv_cesa_req_needs_cleanup(&req->base, ret))
728 mv_cesa_ahash_cleanup(req);
729
730 return ret;
731}
732
733static int mv_cesa_ahash_finup(struct ahash_request *req)
734{
735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
736 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
737 bool cached = false;
738 int ret;
739
740 creq->len += req->nbytes;
741 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
742 creq->last_req = true;
743
744 ret = mv_cesa_ahash_req_init(req, &cached);
745 if (ret)
746 return ret;
747
748 if (cached)
749 return 0;
750
751 ret = mv_cesa_queue_req(&req->base);
752 if (mv_cesa_req_needs_cleanup(&req->base, ret))
753 mv_cesa_ahash_cleanup(req);
754
755 return ret;
756}
757
758static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
759 u64 *len, void *cache)
760{
761 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
762 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
763 unsigned int digsize = crypto_ahash_digestsize(ahash);
764 unsigned int blocksize;
765
766 blocksize = crypto_ahash_blocksize(ahash);
767
768 *len = creq->len;
769 memcpy(hash, creq->state, digsize);
770 memset(cache, 0, blocksize);
771 memcpy(cache, creq->cache, creq->cache_ptr);
772
773 return 0;
774}
775
776static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
777 u64 len, const void *cache)
778{
779 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
780 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
781 unsigned int digsize = crypto_ahash_digestsize(ahash);
782 unsigned int blocksize;
783 unsigned int cache_ptr;
784 int ret;
785
786 ret = crypto_ahash_init(req);
787 if (ret)
788 return ret;
789
790 blocksize = crypto_ahash_blocksize(ahash);
791 if (len >= blocksize)
792 mv_cesa_update_op_cfg(&creq->op_tmpl,
793 CESA_SA_DESC_CFG_MID_FRAG,
794 CESA_SA_DESC_CFG_FRAG_MSK);
795
796 creq->len = len;
797 memcpy(creq->state, hash, digsize);
798 creq->cache_ptr = 0;
799
800 cache_ptr = do_div(len, blocksize);
801 if (!cache_ptr)
802 return 0;
803
804 memcpy(creq->cache, cache, cache_ptr);
805 creq->cache_ptr = cache_ptr;
806
807 return 0;
808}
809
810static int mv_cesa_md5_init(struct ahash_request *req)
811{
812 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
813 struct mv_cesa_op_ctx tmpl = { };
814
815 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
816 creq->state[0] = MD5_H0;
817 creq->state[1] = MD5_H1;
818 creq->state[2] = MD5_H2;
819 creq->state[3] = MD5_H3;
820
821 mv_cesa_ahash_init(req, &tmpl, true);
822
823 return 0;
824}
825
826static int mv_cesa_md5_export(struct ahash_request *req, void *out)
827{
828 struct md5_state *out_state = out;
829
830 return mv_cesa_ahash_export(req, out_state->hash,
831 &out_state->byte_count, out_state->block);
832}
833
834static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
835{
836 const struct md5_state *in_state = in;
837
838 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
839 in_state->block);
840}
841
842static int mv_cesa_md5_digest(struct ahash_request *req)
843{
844 int ret;
845
846 ret = mv_cesa_md5_init(req);
847 if (ret)
848 return ret;
849
850 return mv_cesa_ahash_finup(req);
851}
852
853struct ahash_alg mv_md5_alg = {
854 .init = mv_cesa_md5_init,
855 .update = mv_cesa_ahash_update,
856 .final = mv_cesa_ahash_final,
857 .finup = mv_cesa_ahash_finup,
858 .digest = mv_cesa_md5_digest,
859 .export = mv_cesa_md5_export,
860 .import = mv_cesa_md5_import,
861 .halg = {
862 .digestsize = MD5_DIGEST_SIZE,
863 .statesize = sizeof(struct md5_state),
864 .base = {
865 .cra_name = "md5",
866 .cra_driver_name = "mv-md5",
867 .cra_priority = 300,
868 .cra_flags = CRYPTO_ALG_ASYNC |
869 CRYPTO_ALG_KERN_DRIVER_ONLY,
870 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
871 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
872 .cra_init = mv_cesa_ahash_cra_init,
873 .cra_module = THIS_MODULE,
874 }
875 }
876};
877
878static int mv_cesa_sha1_init(struct ahash_request *req)
879{
880 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
881 struct mv_cesa_op_ctx tmpl = { };
882
883 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
884 creq->state[0] = SHA1_H0;
885 creq->state[1] = SHA1_H1;
886 creq->state[2] = SHA1_H2;
887 creq->state[3] = SHA1_H3;
888 creq->state[4] = SHA1_H4;
889
890 mv_cesa_ahash_init(req, &tmpl, false);
891
892 return 0;
893}
894
895static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
896{
897 struct sha1_state *out_state = out;
898
899 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
900 out_state->buffer);
901}
902
903static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
904{
905 const struct sha1_state *in_state = in;
906
907 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
908 in_state->buffer);
909}
910
911static int mv_cesa_sha1_digest(struct ahash_request *req)
912{
913 int ret;
914
915 ret = mv_cesa_sha1_init(req);
916 if (ret)
917 return ret;
918
919 return mv_cesa_ahash_finup(req);
920}
921
922struct ahash_alg mv_sha1_alg = {
923 .init = mv_cesa_sha1_init,
924 .update = mv_cesa_ahash_update,
925 .final = mv_cesa_ahash_final,
926 .finup = mv_cesa_ahash_finup,
927 .digest = mv_cesa_sha1_digest,
928 .export = mv_cesa_sha1_export,
929 .import = mv_cesa_sha1_import,
930 .halg = {
931 .digestsize = SHA1_DIGEST_SIZE,
932 .statesize = sizeof(struct sha1_state),
933 .base = {
934 .cra_name = "sha1",
935 .cra_driver_name = "mv-sha1",
936 .cra_priority = 300,
937 .cra_flags = CRYPTO_ALG_ASYNC |
938 CRYPTO_ALG_KERN_DRIVER_ONLY,
939 .cra_blocksize = SHA1_BLOCK_SIZE,
940 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
941 .cra_init = mv_cesa_ahash_cra_init,
942 .cra_module = THIS_MODULE,
943 }
944 }
945};
946
947static int mv_cesa_sha256_init(struct ahash_request *req)
948{
949 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
950 struct mv_cesa_op_ctx tmpl = { };
951
952 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
953 creq->state[0] = SHA256_H0;
954 creq->state[1] = SHA256_H1;
955 creq->state[2] = SHA256_H2;
956 creq->state[3] = SHA256_H3;
957 creq->state[4] = SHA256_H4;
958 creq->state[5] = SHA256_H5;
959 creq->state[6] = SHA256_H6;
960 creq->state[7] = SHA256_H7;
961
962 mv_cesa_ahash_init(req, &tmpl, false);
963
964 return 0;
965}
966
967static int mv_cesa_sha256_digest(struct ahash_request *req)
968{
969 int ret;
970
971 ret = mv_cesa_sha256_init(req);
972 if (ret)
973 return ret;
974
975 return mv_cesa_ahash_finup(req);
976}
977
978static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
979{
980 struct sha256_state *out_state = out;
981
982 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
983 out_state->buf);
984}
985
986static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
987{
988 const struct sha256_state *in_state = in;
989
990 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
991 in_state->buf);
992}
993
994struct ahash_alg mv_sha256_alg = {
995 .init = mv_cesa_sha256_init,
996 .update = mv_cesa_ahash_update,
997 .final = mv_cesa_ahash_final,
998 .finup = mv_cesa_ahash_finup,
999 .digest = mv_cesa_sha256_digest,
1000 .export = mv_cesa_sha256_export,
1001 .import = mv_cesa_sha256_import,
1002 .halg = {
1003 .digestsize = SHA256_DIGEST_SIZE,
1004 .statesize = sizeof(struct sha256_state),
1005 .base = {
1006 .cra_name = "sha256",
1007 .cra_driver_name = "mv-sha256",
1008 .cra_priority = 300,
1009 .cra_flags = CRYPTO_ALG_ASYNC |
1010 CRYPTO_ALG_KERN_DRIVER_ONLY,
1011 .cra_blocksize = SHA256_BLOCK_SIZE,
1012 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1013 .cra_init = mv_cesa_ahash_cra_init,
1014 .cra_module = THIS_MODULE,
1015 }
1016 }
1017};
1018
1019struct mv_cesa_ahash_result {
1020 struct completion completion;
1021 int error;
1022};
1023
1024static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1025 int error)
1026{
1027 struct mv_cesa_ahash_result *result = req->data;
1028
1029 if (error == -EINPROGRESS)
1030 return;
1031
1032 result->error = error;
1033 complete(&result->completion);
1034}
1035
1036static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1037 void *state, unsigned int blocksize)
1038{
1039 struct mv_cesa_ahash_result result;
1040 struct scatterlist sg;
1041 int ret;
1042
1043 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1044 mv_cesa_hmac_ahash_complete, &result);
1045 sg_init_one(&sg, pad, blocksize);
1046 ahash_request_set_crypt(req, &sg, pad, blocksize);
1047 init_completion(&result.completion);
1048
1049 ret = crypto_ahash_init(req);
1050 if (ret)
1051 return ret;
1052
1053 ret = crypto_ahash_update(req);
1054 if (ret && ret != -EINPROGRESS)
1055 return ret;
1056
1057 wait_for_completion_interruptible(&result.completion);
1058 if (result.error)
1059 return result.error;
1060
1061 ret = crypto_ahash_export(req, state);
1062 if (ret)
1063 return ret;
1064
1065 return 0;
1066}
1067
1068static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1069 const u8 *key, unsigned int keylen,
1070 u8 *ipad, u8 *opad,
1071 unsigned int blocksize)
1072{
1073 struct mv_cesa_ahash_result result;
1074 struct scatterlist sg;
1075 int ret;
1076 int i;
1077
1078 if (keylen <= blocksize) {
1079 memcpy(ipad, key, keylen);
1080 } else {
1081 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1082
1083 if (!keydup)
1084 return -ENOMEM;
1085
1086 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1087 mv_cesa_hmac_ahash_complete,
1088 &result);
1089 sg_init_one(&sg, keydup, keylen);
1090 ahash_request_set_crypt(req, &sg, ipad, keylen);
1091 init_completion(&result.completion);
1092
1093 ret = crypto_ahash_digest(req);
1094 if (ret == -EINPROGRESS) {
1095 wait_for_completion_interruptible(&result.completion);
1096 ret = result.error;
1097 }
1098
1099 /* Set the memory region to 0 to avoid any leak. */
1100 memset(keydup, 0, keylen);
1101 kfree(keydup);
1102
1103 if (ret)
1104 return ret;
1105
1106 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1107 }
1108
1109 memset(ipad + keylen, 0, blocksize - keylen);
1110 memcpy(opad, ipad, blocksize);
1111
1112 for (i = 0; i < blocksize; i++) {
1113 ipad[i] ^= 0x36;
1114 opad[i] ^= 0x5c;
1115 }
1116
1117 return 0;
1118}
1119
1120static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1121 const u8 *key, unsigned int keylen,
1122 void *istate, void *ostate)
1123{
1124 struct ahash_request *req;
1125 struct crypto_ahash *tfm;
1126 unsigned int blocksize;
1127 u8 *ipad = NULL;
1128 u8 *opad;
1129 int ret;
1130
1131 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
1132 CRYPTO_ALG_TYPE_AHASH_MASK);
1133 if (IS_ERR(tfm))
1134 return PTR_ERR(tfm);
1135
1136 req = ahash_request_alloc(tfm, GFP_KERNEL);
1137 if (!req) {
1138 ret = -ENOMEM;
1139 goto free_ahash;
1140 }
1141
1142 crypto_ahash_clear_flags(tfm, ~0);
1143
1144 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1145
1146 ipad = kzalloc(2 * blocksize, GFP_KERNEL);
1147 if (!ipad) {
1148 ret = -ENOMEM;
1149 goto free_req;
1150 }
1151
1152 opad = ipad + blocksize;
1153
1154 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1155 if (ret)
1156 goto free_ipad;
1157
1158 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1159 if (ret)
1160 goto free_ipad;
1161
1162 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1163
1164free_ipad:
1165 kfree(ipad);
1166free_req:
1167 ahash_request_free(req);
1168free_ahash:
1169 crypto_free_ahash(tfm);
1170
1171 return ret;
1172}
1173
1174static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1175{
1176 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1177
1178 ctx->base.ops = &mv_cesa_ahash_req_ops;
1179
1180 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1181 sizeof(struct mv_cesa_ahash_req));
1182 return 0;
1183}
1184
1185static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1186{
1187 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1188 struct mv_cesa_op_ctx tmpl = { };
1189
1190 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1191 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1192
1193 mv_cesa_ahash_init(req, &tmpl, true);
1194
1195 return 0;
1196}
1197
1198static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1199 unsigned int keylen)
1200{
1201 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1202 struct md5_state istate, ostate;
1203 int ret, i;
1204
1205 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1206 if (ret)
1207 return ret;
1208
1209 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1210 ctx->iv[i] = be32_to_cpu(istate.hash[i]);
1211
1212 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1213 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]);
1214
1215 return 0;
1216}
1217
1218static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1219{
1220 int ret;
1221
1222 ret = mv_cesa_ahmac_md5_init(req);
1223 if (ret)
1224 return ret;
1225
1226 return mv_cesa_ahash_finup(req);
1227}
1228
1229struct ahash_alg mv_ahmac_md5_alg = {
1230 .init = mv_cesa_ahmac_md5_init,
1231 .update = mv_cesa_ahash_update,
1232 .final = mv_cesa_ahash_final,
1233 .finup = mv_cesa_ahash_finup,
1234 .digest = mv_cesa_ahmac_md5_digest,
1235 .setkey = mv_cesa_ahmac_md5_setkey,
1236 .export = mv_cesa_md5_export,
1237 .import = mv_cesa_md5_import,
1238 .halg = {
1239 .digestsize = MD5_DIGEST_SIZE,
1240 .statesize = sizeof(struct md5_state),
1241 .base = {
1242 .cra_name = "hmac(md5)",
1243 .cra_driver_name = "mv-hmac-md5",
1244 .cra_priority = 300,
1245 .cra_flags = CRYPTO_ALG_ASYNC |
1246 CRYPTO_ALG_KERN_DRIVER_ONLY,
1247 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1248 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1249 .cra_init = mv_cesa_ahmac_cra_init,
1250 .cra_module = THIS_MODULE,
1251 }
1252 }
1253};
1254
1255static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1256{
1257 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1258 struct mv_cesa_op_ctx tmpl = { };
1259
1260 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1261 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1262
1263 mv_cesa_ahash_init(req, &tmpl, false);
1264
1265 return 0;
1266}
1267
1268static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1269 unsigned int keylen)
1270{
1271 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1272 struct sha1_state istate, ostate;
1273 int ret, i;
1274
1275 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1276 if (ret)
1277 return ret;
1278
1279 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1280 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1281
1282 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1283 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1284
1285 return 0;
1286}
1287
1288static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1289{
1290 int ret;
1291
1292 ret = mv_cesa_ahmac_sha1_init(req);
1293 if (ret)
1294 return ret;
1295
1296 return mv_cesa_ahash_finup(req);
1297}
1298
1299struct ahash_alg mv_ahmac_sha1_alg = {
1300 .init = mv_cesa_ahmac_sha1_init,
1301 .update = mv_cesa_ahash_update,
1302 .final = mv_cesa_ahash_final,
1303 .finup = mv_cesa_ahash_finup,
1304 .digest = mv_cesa_ahmac_sha1_digest,
1305 .setkey = mv_cesa_ahmac_sha1_setkey,
1306 .export = mv_cesa_sha1_export,
1307 .import = mv_cesa_sha1_import,
1308 .halg = {
1309 .digestsize = SHA1_DIGEST_SIZE,
1310 .statesize = sizeof(struct sha1_state),
1311 .base = {
1312 .cra_name = "hmac(sha1)",
1313 .cra_driver_name = "mv-hmac-sha1",
1314 .cra_priority = 300,
1315 .cra_flags = CRYPTO_ALG_ASYNC |
1316 CRYPTO_ALG_KERN_DRIVER_ONLY,
1317 .cra_blocksize = SHA1_BLOCK_SIZE,
1318 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1319 .cra_init = mv_cesa_ahmac_cra_init,
1320 .cra_module = THIS_MODULE,
1321 }
1322 }
1323};
1324
1325static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1326 unsigned int keylen)
1327{
1328 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1329 struct sha256_state istate, ostate;
1330 int ret, i;
1331
1332 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1333 if (ret)
1334 return ret;
1335
1336 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1337 ctx->iv[i] = be32_to_cpu(istate.state[i]);
1338
1339 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1340 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]);
1341
1342 return 0;
1343}
1344
1345static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1346{
1347 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1348 struct mv_cesa_op_ctx tmpl = { };
1349
1350 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1351 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1352
1353 mv_cesa_ahash_init(req, &tmpl, false);
1354
1355 return 0;
1356}
1357
1358static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1359{
1360 int ret;
1361
1362 ret = mv_cesa_ahmac_sha256_init(req);
1363 if (ret)
1364 return ret;
1365
1366 return mv_cesa_ahash_finup(req);
1367}
1368
1369struct ahash_alg mv_ahmac_sha256_alg = {
1370 .init = mv_cesa_ahmac_sha256_init,
1371 .update = mv_cesa_ahash_update,
1372 .final = mv_cesa_ahash_final,
1373 .finup = mv_cesa_ahash_finup,
1374 .digest = mv_cesa_ahmac_sha256_digest,
1375 .setkey = mv_cesa_ahmac_sha256_setkey,
1376 .export = mv_cesa_sha256_export,
1377 .import = mv_cesa_sha256_import,
1378 .halg = {
1379 .digestsize = SHA256_DIGEST_SIZE,
1380 .statesize = sizeof(struct sha256_state),
1381 .base = {
1382 .cra_name = "hmac(sha256)",
1383 .cra_driver_name = "mv-hmac-sha256",
1384 .cra_priority = 300,
1385 .cra_flags = CRYPTO_ALG_ASYNC |
1386 CRYPTO_ALG_KERN_DRIVER_ONLY,
1387 .cra_blocksize = SHA256_BLOCK_SIZE,
1388 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1389 .cra_init = mv_cesa_ahmac_cra_init,
1390 .cra_module = THIS_MODULE,
1391 }
1392 }
1393};