Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Cryptographic API.
3 *
4 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
5 *
6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
13 */
14
15#include <crypto/sha.h>
16#include "mtk-platform.h"
17
18#define SHA_ALIGN_MSK (sizeof(u32) - 1)
19#define SHA_QUEUE_SIZE 512
20#define SHA_TMP_BUF_SIZE 512
21#define SHA_BUF_SIZE ((u32)PAGE_SIZE)
22
23#define SHA_OP_UPDATE 1
24#define SHA_OP_FINAL 2
25
26#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0))
27
28/* SHA command token */
29#define SHA_CT_SIZE 5
30#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000)
31#define SHA_CMD0 cpu_to_le32(0x03020000)
32#define SHA_CMD1 cpu_to_le32(0x21060000)
33#define SHA_CMD2 cpu_to_le32(0xe0e63802)
34
35/* SHA transform information */
36#define SHA_TFM_HASH cpu_to_le32(0x2 << 0)
37#define SHA_TFM_INNER_DIG cpu_to_le32(0x1 << 21)
38#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8)
39#define SHA_TFM_START cpu_to_le32(0x1 << 4)
40#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5)
41#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19)
42#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23)
43#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23)
44#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23)
45#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23)
46#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23)
47#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
48
49/* SHA flags */
50#define SHA_FLAGS_BUSY BIT(0)
51#define SHA_FLAGS_FINAL BIT(1)
52#define SHA_FLAGS_FINUP BIT(2)
53#define SHA_FLAGS_SG BIT(3)
54#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4)
55#define SHA_FLAGS_SHA1 BIT(4)
56#define SHA_FLAGS_SHA224 BIT(5)
57#define SHA_FLAGS_SHA256 BIT(6)
58#define SHA_FLAGS_SHA384 BIT(7)
59#define SHA_FLAGS_SHA512 BIT(8)
60#define SHA_FLAGS_HMAC BIT(9)
61#define SHA_FLAGS_PAD BIT(10)
62
63/**
64 * mtk_sha_ct is a set of hardware instructions(command token)
65 * that are used to control engine's processing flow of SHA,
66 * and it contains the first two words of transform state.
67 */
68struct mtk_sha_ct {
69 __le32 ctrl[2];
70 __le32 cmd[3];
71};
72
73/**
74 * mtk_sha_tfm is used to define SHA transform state
75 * and store result digest that produced by engine.
76 */
77struct mtk_sha_tfm {
78 __le32 ctrl[2];
79 __le32 digest[SIZE_IN_WORDS(SHA512_DIGEST_SIZE)];
80};
81
82/**
83 * mtk_sha_info consists of command token and transform state
84 * of SHA, its role is similar to mtk_aes_info.
85 */
86struct mtk_sha_info {
87 struct mtk_sha_ct ct;
88 struct mtk_sha_tfm tfm;
89};
90
91struct mtk_sha_reqctx {
92 struct mtk_sha_info info;
93 unsigned long flags;
94 unsigned long op;
95
96 u64 digcnt;
97 bool start;
98 size_t bufcnt;
99 dma_addr_t dma_addr;
100
101 __le32 ct_hdr;
102 u32 ct_size;
103 dma_addr_t ct_dma;
104 dma_addr_t tfm_dma;
105
106 /* Walk state */
107 struct scatterlist *sg;
108 u32 offset; /* Offset in current sg */
109 u32 total; /* Total request */
110 size_t ds;
111 size_t bs;
112
113 u8 *buffer;
114};
115
116struct mtk_sha_hmac_ctx {
117 struct crypto_shash *shash;
118 u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
119 u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
120};
121
122struct mtk_sha_ctx {
123 struct mtk_cryp *cryp;
124 unsigned long flags;
125 u8 id;
126 u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
127
128 struct mtk_sha_hmac_ctx base[0];
129};
130
131struct mtk_sha_drv {
132 struct list_head dev_list;
133 /* Device list lock */
134 spinlock_t lock;
135};
136
137static struct mtk_sha_drv mtk_sha = {
138 .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
139 .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
140};
141
142static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
143 struct ahash_request *req);
144
145static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
146{
147 return readl_relaxed(cryp->base + offset);
148}
149
150static inline void mtk_sha_write(struct mtk_cryp *cryp,
151 u32 offset, u32 value)
152{
153 writel_relaxed(value, cryp->base + offset);
154}
155
156static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
157{
158 struct mtk_cryp *cryp = NULL;
159 struct mtk_cryp *tmp;
160
161 spin_lock_bh(&mtk_sha.lock);
162 if (!tctx->cryp) {
163 list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
164 cryp = tmp;
165 break;
166 }
167 tctx->cryp = cryp;
168 } else {
169 cryp = tctx->cryp;
170 }
171
172 /*
173 * Assign record id to tfm in round-robin fashion, and this
174 * will help tfm to bind to corresponding descriptor rings.
175 */
176 tctx->id = cryp->rec;
177 cryp->rec = !cryp->rec;
178
179 spin_unlock_bh(&mtk_sha.lock);
180
181 return cryp;
182}
183
184static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
185{
186 size_t count;
187
188 while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
189 count = min(ctx->sg->length - ctx->offset, ctx->total);
190 count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
191
192 if (count <= 0) {
193 /*
194 * Check if count <= 0 because the buffer is full or
195 * because the sg length is 0. In the latest case,
196 * check if there is another sg in the list, a 0 length
197 * sg doesn't necessarily mean the end of the sg list.
198 */
199 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
200 ctx->sg = sg_next(ctx->sg);
201 continue;
202 } else {
203 break;
204 }
205 }
206
207 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
208 ctx->offset, count, 0);
209
210 ctx->bufcnt += count;
211 ctx->offset += count;
212 ctx->total -= count;
213
214 if (ctx->offset == ctx->sg->length) {
215 ctx->sg = sg_next(ctx->sg);
216 if (ctx->sg)
217 ctx->offset = 0;
218 else
219 ctx->total = 0;
220 }
221 }
222
223 return 0;
224}
225
226/*
227 * The purpose of this padding is to ensure that the padded message is a
228 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
229 * The bit "1" is appended at the end of the message followed by
230 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
231 * 128 bits block (SHA384/SHA512) equals to the message length in bits
232 * is appended.
233 *
234 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
235 * - if message length < 56 bytes then padlen = 56 - message length
236 * - else padlen = 64 + 56 - message length
237 *
238 * For SHA384/SHA512, padlen is calculated as followed:
239 * - if message length < 112 bytes then padlen = 112 - message length
240 * - else padlen = 128 + 112 - message length
241 */
242static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
243{
244 u32 index, padlen;
245 u64 bits[2];
246 u64 size = ctx->digcnt;
247
248 size += ctx->bufcnt;
249 size += len;
250
251 bits[1] = cpu_to_be64(size << 3);
252 bits[0] = cpu_to_be64(size >> 61);
253
254 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) {
255 index = ctx->bufcnt & 0x7f;
256 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
257 *(ctx->buffer + ctx->bufcnt) = 0x80;
258 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
259 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
260 ctx->bufcnt += padlen + 16;
261 ctx->flags |= SHA_FLAGS_PAD;
262 } else {
263 index = ctx->bufcnt & 0x3f;
264 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
265 *(ctx->buffer + ctx->bufcnt) = 0x80;
266 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
267 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
268 ctx->bufcnt += padlen + 8;
269 ctx->flags |= SHA_FLAGS_PAD;
270 }
271}
272
273/* Initialize basic transform information of SHA */
274static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
275{
276 struct mtk_sha_ct *ct = &ctx->info.ct;
277 struct mtk_sha_tfm *tfm = &ctx->info.tfm;
278
279 ctx->ct_hdr = SHA_CT_CTRL_HDR;
280 ctx->ct_size = SHA_CT_SIZE;
281
282 tfm->ctrl[0] = SHA_TFM_HASH | SHA_TFM_INNER_DIG |
283 SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
284
285 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
286 case SHA_FLAGS_SHA1:
287 tfm->ctrl[0] |= SHA_TFM_SHA1;
288 break;
289 case SHA_FLAGS_SHA224:
290 tfm->ctrl[0] |= SHA_TFM_SHA224;
291 break;
292 case SHA_FLAGS_SHA256:
293 tfm->ctrl[0] |= SHA_TFM_SHA256;
294 break;
295 case SHA_FLAGS_SHA384:
296 tfm->ctrl[0] |= SHA_TFM_SHA384;
297 break;
298 case SHA_FLAGS_SHA512:
299 tfm->ctrl[0] |= SHA_TFM_SHA512;
300 break;
301
302 default:
303 /* Should not happen... */
304 return;
305 }
306
307 tfm->ctrl[1] = SHA_TFM_HASH_STORE;
308 ct->ctrl[0] = tfm->ctrl[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
309 ct->ctrl[1] = tfm->ctrl[1];
310
311 ct->cmd[0] = SHA_CMD0;
312 ct->cmd[1] = SHA_CMD1;
313 ct->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
314}
315
316/*
317 * Update input data length field of transform information and
318 * map it to DMA region.
319 */
320static int mtk_sha_info_update(struct mtk_cryp *cryp,
321 struct mtk_sha_rec *sha,
322 size_t len)
323{
324 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
325 struct mtk_sha_info *info = &ctx->info;
326 struct mtk_sha_ct *ct = &info->ct;
327
328 if (ctx->start)
329 ctx->start = false;
330 else
331 ct->ctrl[0] &= ~SHA_TFM_START;
332
333 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
334 ctx->ct_hdr |= cpu_to_le32(len);
335 ct->cmd[0] &= ~SHA_DATA_LEN_MSK;
336 ct->cmd[0] |= cpu_to_le32(len);
337
338 ctx->digcnt += len;
339
340 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
341 DMA_BIDIRECTIONAL);
342 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
343 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
344 return -EINVAL;
345 }
346 ctx->tfm_dma = ctx->ct_dma + sizeof(*ct);
347
348 return 0;
349}
350
351/*
352 * Because of hardware limitation, we must pre-calculate the inner
353 * and outer digest that need to be processed firstly by engine, then
354 * apply the result digest to the input message. These complex hashing
355 * procedures limits HMAC performance, so we use fallback SW encoding.
356 */
357static int mtk_sha_finish_hmac(struct ahash_request *req)
358{
359 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
360 struct mtk_sha_hmac_ctx *bctx = tctx->base;
361 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
362
363 SHASH_DESC_ON_STACK(shash, bctx->shash);
364
365 shash->tfm = bctx->shash;
366 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
367
368 return crypto_shash_init(shash) ?:
369 crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
370 crypto_shash_finup(shash, req->result, ctx->ds, req->result);
371}
372
373/* Initialize request context */
374static int mtk_sha_init(struct ahash_request *req)
375{
376 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
377 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
378 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
379
380 ctx->flags = 0;
381 ctx->ds = crypto_ahash_digestsize(tfm);
382
383 switch (ctx->ds) {
384 case SHA1_DIGEST_SIZE:
385 ctx->flags |= SHA_FLAGS_SHA1;
386 ctx->bs = SHA1_BLOCK_SIZE;
387 break;
388 case SHA224_DIGEST_SIZE:
389 ctx->flags |= SHA_FLAGS_SHA224;
390 ctx->bs = SHA224_BLOCK_SIZE;
391 break;
392 case SHA256_DIGEST_SIZE:
393 ctx->flags |= SHA_FLAGS_SHA256;
394 ctx->bs = SHA256_BLOCK_SIZE;
395 break;
396 case SHA384_DIGEST_SIZE:
397 ctx->flags |= SHA_FLAGS_SHA384;
398 ctx->bs = SHA384_BLOCK_SIZE;
399 break;
400 case SHA512_DIGEST_SIZE:
401 ctx->flags |= SHA_FLAGS_SHA512;
402 ctx->bs = SHA512_BLOCK_SIZE;
403 break;
404 default:
405 return -EINVAL;
406 }
407
408 ctx->bufcnt = 0;
409 ctx->digcnt = 0;
410 ctx->buffer = tctx->buf;
411 ctx->start = true;
412
413 if (tctx->flags & SHA_FLAGS_HMAC) {
414 struct mtk_sha_hmac_ctx *bctx = tctx->base;
415
416 memcpy(ctx->buffer, bctx->ipad, ctx->bs);
417 ctx->bufcnt = ctx->bs;
418 ctx->flags |= SHA_FLAGS_HMAC;
419 }
420
421 return 0;
422}
423
424static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
425 dma_addr_t addr, size_t len)
426{
427 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
428 struct mtk_ring *ring = cryp->ring[sha->id];
429 struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
430 struct mtk_desc *res = ring->res_base + ring->res_pos;
431 int err;
432
433 err = mtk_sha_info_update(cryp, sha, len);
434 if (err)
435 return err;
436
437 /* Fill in the command/result descriptors */
438 res->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len);
439 res->buf = cpu_to_le32(cryp->tmp_dma);
440
441 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_LAST | MTK_DESC_BUF_LEN(len) |
442 MTK_DESC_CT_LEN(ctx->ct_size);
443
444 cmd->buf = cpu_to_le32(addr);
445 cmd->ct = cpu_to_le32(ctx->ct_dma);
446 cmd->ct_hdr = ctx->ct_hdr;
447 cmd->tfm = cpu_to_le32(ctx->tfm_dma);
448
449 if (++ring->cmd_pos == MTK_DESC_NUM)
450 ring->cmd_pos = 0;
451
452 ring->res_pos = ring->cmd_pos;
453 /*
454 * Make sure that all changes to the DMA ring are done before we
455 * start engine.
456 */
457 wmb();
458 /* Start DMA transfer */
459 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
460 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(1));
461
462 return -EINPROGRESS;
463}
464
465static int mtk_sha_xmit2(struct mtk_cryp *cryp,
466 struct mtk_sha_rec *sha,
467 struct mtk_sha_reqctx *ctx,
468 size_t len1, size_t len2)
469{
470 struct mtk_ring *ring = cryp->ring[sha->id];
471 struct mtk_desc *cmd = ring->cmd_base + ring->cmd_pos;
472 struct mtk_desc *res = ring->res_base + ring->res_pos;
473 int err;
474
475 err = mtk_sha_info_update(cryp, sha, len1 + len2);
476 if (err)
477 return err;
478
479 /* Fill in the command/result descriptors */
480 res->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST;
481 res->buf = cpu_to_le32(cryp->tmp_dma);
482
483 cmd->hdr = MTK_DESC_BUF_LEN(len1) | MTK_DESC_FIRST |
484 MTK_DESC_CT_LEN(ctx->ct_size);
485 cmd->buf = cpu_to_le32(sg_dma_address(ctx->sg));
486 cmd->ct = cpu_to_le32(ctx->ct_dma);
487 cmd->ct_hdr = ctx->ct_hdr;
488 cmd->tfm = cpu_to_le32(ctx->tfm_dma);
489
490 if (++ring->cmd_pos == MTK_DESC_NUM)
491 ring->cmd_pos = 0;
492
493 ring->res_pos = ring->cmd_pos;
494
495 cmd = ring->cmd_base + ring->cmd_pos;
496 res = ring->res_base + ring->res_pos;
497
498 res->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
499 res->buf = cpu_to_le32(cryp->tmp_dma);
500
501 cmd->hdr = MTK_DESC_BUF_LEN(len2) | MTK_DESC_LAST;
502 cmd->buf = cpu_to_le32(ctx->dma_addr);
503
504 if (++ring->cmd_pos == MTK_DESC_NUM)
505 ring->cmd_pos = 0;
506
507 ring->res_pos = ring->cmd_pos;
508
509 /*
510 * Make sure that all changes to the DMA ring are done before we
511 * start engine.
512 */
513 wmb();
514 /* Start DMA transfer */
515 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
516 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(2));
517
518 return -EINPROGRESS;
519}
520
521static int mtk_sha_dma_map(struct mtk_cryp *cryp,
522 struct mtk_sha_rec *sha,
523 struct mtk_sha_reqctx *ctx,
524 size_t count)
525{
526 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
527 SHA_BUF_SIZE, DMA_TO_DEVICE);
528 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
529 dev_err(cryp->dev, "dma map error\n");
530 return -EINVAL;
531 }
532
533 ctx->flags &= ~SHA_FLAGS_SG;
534
535 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
536}
537
538static int mtk_sha_update_slow(struct mtk_cryp *cryp,
539 struct mtk_sha_rec *sha)
540{
541 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
542 size_t count;
543 u32 final;
544
545 mtk_sha_append_sg(ctx);
546
547 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
548
549 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
550
551 if (final) {
552 sha->flags |= SHA_FLAGS_FINAL;
553 mtk_sha_fill_padding(ctx, 0);
554 }
555
556 if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
557 count = ctx->bufcnt;
558 ctx->bufcnt = 0;
559
560 return mtk_sha_dma_map(cryp, sha, ctx, count);
561 }
562 return 0;
563}
564
565static int mtk_sha_update_start(struct mtk_cryp *cryp,
566 struct mtk_sha_rec *sha)
567{
568 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
569 u32 len, final, tail;
570 struct scatterlist *sg;
571
572 if (!ctx->total)
573 return 0;
574
575 if (ctx->bufcnt || ctx->offset)
576 return mtk_sha_update_slow(cryp, sha);
577
578 sg = ctx->sg;
579
580 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
581 return mtk_sha_update_slow(cryp, sha);
582
583 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
584 /* size is not ctx->bs aligned */
585 return mtk_sha_update_slow(cryp, sha);
586
587 len = min(ctx->total, sg->length);
588
589 if (sg_is_last(sg)) {
590 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
591 /* not last sg must be ctx->bs aligned */
592 tail = len & (ctx->bs - 1);
593 len -= tail;
594 }
595 }
596
597 ctx->total -= len;
598 ctx->offset = len; /* offset where to start slow */
599
600 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
601
602 /* Add padding */
603 if (final) {
604 size_t count;
605
606 tail = len & (ctx->bs - 1);
607 len -= tail;
608 ctx->total += tail;
609 ctx->offset = len; /* offset where to start slow */
610
611 sg = ctx->sg;
612 mtk_sha_append_sg(ctx);
613 mtk_sha_fill_padding(ctx, len);
614
615 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
616 SHA_BUF_SIZE, DMA_TO_DEVICE);
617 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
618 dev_err(cryp->dev, "dma map bytes error\n");
619 return -EINVAL;
620 }
621
622 sha->flags |= SHA_FLAGS_FINAL;
623 count = ctx->bufcnt;
624 ctx->bufcnt = 0;
625
626 if (len == 0) {
627 ctx->flags &= ~SHA_FLAGS_SG;
628 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count);
629
630 } else {
631 ctx->sg = sg;
632 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
633 dev_err(cryp->dev, "dma_map_sg error\n");
634 return -EINVAL;
635 }
636
637 ctx->flags |= SHA_FLAGS_SG;
638 return mtk_sha_xmit2(cryp, sha, ctx, len, count);
639 }
640 }
641
642 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
643 dev_err(cryp->dev, "dma_map_sg error\n");
644 return -EINVAL;
645 }
646
647 ctx->flags |= SHA_FLAGS_SG;
648
649 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), len);
650}
651
652static int mtk_sha_final_req(struct mtk_cryp *cryp,
653 struct mtk_sha_rec *sha)
654{
655 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
656 size_t count;
657
658 mtk_sha_fill_padding(ctx, 0);
659
660 sha->flags |= SHA_FLAGS_FINAL;
661 count = ctx->bufcnt;
662 ctx->bufcnt = 0;
663
664 return mtk_sha_dma_map(cryp, sha, ctx, count);
665}
666
667/* Copy ready hash (+ finalize hmac) */
668static int mtk_sha_finish(struct ahash_request *req)
669{
670 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
671 u32 *digest = ctx->info.tfm.digest;
672 u32 *result = (u32 *)req->result;
673 int i;
674
675 /* Get the hash from the digest buffer */
676 for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
677 result[i] = le32_to_cpu(digest[i]);
678
679 if (ctx->flags & SHA_FLAGS_HMAC)
680 return mtk_sha_finish_hmac(req);
681
682 return 0;
683}
684
685static void mtk_sha_finish_req(struct mtk_cryp *cryp,
686 struct mtk_sha_rec *sha,
687 int err)
688{
689 if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
690 err = mtk_sha_finish(sha->req);
691
692 sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
693
694 sha->req->base.complete(&sha->req->base, err);
695
696 /* Handle new request */
697 mtk_sha_handle_queue(cryp, sha->id - RING2, NULL);
698}
699
700static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
701 struct ahash_request *req)
702{
703 struct mtk_sha_rec *sha = cryp->sha[id];
704 struct crypto_async_request *async_req, *backlog;
705 struct mtk_sha_reqctx *ctx;
706 unsigned long flags;
707 int err = 0, ret = 0;
708
709 spin_lock_irqsave(&sha->lock, flags);
710 if (req)
711 ret = ahash_enqueue_request(&sha->queue, req);
712
713 if (SHA_FLAGS_BUSY & sha->flags) {
714 spin_unlock_irqrestore(&sha->lock, flags);
715 return ret;
716 }
717
718 backlog = crypto_get_backlog(&sha->queue);
719 async_req = crypto_dequeue_request(&sha->queue);
720 if (async_req)
721 sha->flags |= SHA_FLAGS_BUSY;
722 spin_unlock_irqrestore(&sha->lock, flags);
723
724 if (!async_req)
725 return ret;
726
727 if (backlog)
728 backlog->complete(backlog, -EINPROGRESS);
729
730 req = ahash_request_cast(async_req);
731 ctx = ahash_request_ctx(req);
732
733 sha->req = req;
734
735 mtk_sha_info_init(ctx);
736
737 if (ctx->op == SHA_OP_UPDATE) {
738 err = mtk_sha_update_start(cryp, sha);
739 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
740 /* No final() after finup() */
741 err = mtk_sha_final_req(cryp, sha);
742 } else if (ctx->op == SHA_OP_FINAL) {
743 err = mtk_sha_final_req(cryp, sha);
744 }
745
746 if (unlikely(err != -EINPROGRESS))
747 /* Task will not finish it, so do it here */
748 mtk_sha_finish_req(cryp, sha, err);
749
750 return ret;
751}
752
753static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
754{
755 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
756 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
757
758 ctx->op = op;
759
760 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
761}
762
763static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
764{
765 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
766
767 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
768 DMA_BIDIRECTIONAL);
769
770 if (ctx->flags & SHA_FLAGS_SG) {
771 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
772 if (ctx->sg->length == ctx->offset) {
773 ctx->sg = sg_next(ctx->sg);
774 if (ctx->sg)
775 ctx->offset = 0;
776 }
777 if (ctx->flags & SHA_FLAGS_PAD) {
778 dma_unmap_single(cryp->dev, ctx->dma_addr,
779 SHA_BUF_SIZE, DMA_TO_DEVICE);
780 }
781 } else
782 dma_unmap_single(cryp->dev, ctx->dma_addr,
783 SHA_BUF_SIZE, DMA_TO_DEVICE);
784}
785
786static void mtk_sha_complete(struct mtk_cryp *cryp,
787 struct mtk_sha_rec *sha)
788{
789 int err = 0;
790
791 err = mtk_sha_update_start(cryp, sha);
792 if (err != -EINPROGRESS)
793 mtk_sha_finish_req(cryp, sha, err);
794}
795
796static int mtk_sha_update(struct ahash_request *req)
797{
798 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
799
800 ctx->total = req->nbytes;
801 ctx->sg = req->src;
802 ctx->offset = 0;
803
804 if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
805 !(ctx->flags & SHA_FLAGS_FINUP))
806 return mtk_sha_append_sg(ctx);
807
808 return mtk_sha_enqueue(req, SHA_OP_UPDATE);
809}
810
811static int mtk_sha_final(struct ahash_request *req)
812{
813 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
814
815 ctx->flags |= SHA_FLAGS_FINUP;
816
817 if (ctx->flags & SHA_FLAGS_PAD)
818 return mtk_sha_finish(req);
819
820 return mtk_sha_enqueue(req, SHA_OP_FINAL);
821}
822
823static int mtk_sha_finup(struct ahash_request *req)
824{
825 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
826 int err1, err2;
827
828 ctx->flags |= SHA_FLAGS_FINUP;
829
830 err1 = mtk_sha_update(req);
831 if (err1 == -EINPROGRESS || err1 == -EBUSY)
832 return err1;
833 /*
834 * final() has to be always called to cleanup resources
835 * even if update() failed
836 */
837 err2 = mtk_sha_final(req);
838
839 return err1 ?: err2;
840}
841
842static int mtk_sha_digest(struct ahash_request *req)
843{
844 return mtk_sha_init(req) ?: mtk_sha_finup(req);
845}
846
847static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
848 u32 keylen)
849{
850 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
851 struct mtk_sha_hmac_ctx *bctx = tctx->base;
852 size_t bs = crypto_shash_blocksize(bctx->shash);
853 size_t ds = crypto_shash_digestsize(bctx->shash);
854 int err, i;
855
856 SHASH_DESC_ON_STACK(shash, bctx->shash);
857
858 shash->tfm = bctx->shash;
859 shash->flags = crypto_shash_get_flags(bctx->shash) &
860 CRYPTO_TFM_REQ_MAY_SLEEP;
861
862 if (keylen > bs) {
863 err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
864 if (err)
865 return err;
866 keylen = ds;
867 } else {
868 memcpy(bctx->ipad, key, keylen);
869 }
870
871 memset(bctx->ipad + keylen, 0, bs - keylen);
872 memcpy(bctx->opad, bctx->ipad, bs);
873
874 for (i = 0; i < bs; i++) {
875 bctx->ipad[i] ^= 0x36;
876 bctx->opad[i] ^= 0x5c;
877 }
878
879 return 0;
880}
881
882static int mtk_sha_export(struct ahash_request *req, void *out)
883{
884 const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
885
886 memcpy(out, ctx, sizeof(*ctx));
887 return 0;
888}
889
890static int mtk_sha_import(struct ahash_request *req, const void *in)
891{
892 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
893
894 memcpy(ctx, in, sizeof(*ctx));
895 return 0;
896}
897
898static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
899 const char *alg_base)
900{
901 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
902 struct mtk_cryp *cryp = NULL;
903
904 cryp = mtk_sha_find_dev(tctx);
905 if (!cryp)
906 return -ENODEV;
907
908 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
909 sizeof(struct mtk_sha_reqctx));
910
911 if (alg_base) {
912 struct mtk_sha_hmac_ctx *bctx = tctx->base;
913
914 tctx->flags |= SHA_FLAGS_HMAC;
915 bctx->shash = crypto_alloc_shash(alg_base, 0,
916 CRYPTO_ALG_NEED_FALLBACK);
917 if (IS_ERR(bctx->shash)) {
918 pr_err("base driver %s could not be loaded.\n",
919 alg_base);
920
921 return PTR_ERR(bctx->shash);
922 }
923 }
924 return 0;
925}
926
927static int mtk_sha_cra_init(struct crypto_tfm *tfm)
928{
929 return mtk_sha_cra_init_alg(tfm, NULL);
930}
931
932static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
933{
934 return mtk_sha_cra_init_alg(tfm, "sha1");
935}
936
937static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
938{
939 return mtk_sha_cra_init_alg(tfm, "sha224");
940}
941
942static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
943{
944 return mtk_sha_cra_init_alg(tfm, "sha256");
945}
946
947static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
948{
949 return mtk_sha_cra_init_alg(tfm, "sha384");
950}
951
952static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
953{
954 return mtk_sha_cra_init_alg(tfm, "sha512");
955}
956
957static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
958{
959 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
960
961 if (tctx->flags & SHA_FLAGS_HMAC) {
962 struct mtk_sha_hmac_ctx *bctx = tctx->base;
963
964 crypto_free_shash(bctx->shash);
965 }
966}
967
968static struct ahash_alg algs_sha1_sha224_sha256[] = {
969{
970 .init = mtk_sha_init,
971 .update = mtk_sha_update,
972 .final = mtk_sha_final,
973 .finup = mtk_sha_finup,
974 .digest = mtk_sha_digest,
975 .export = mtk_sha_export,
976 .import = mtk_sha_import,
977 .halg.digestsize = SHA1_DIGEST_SIZE,
978 .halg.statesize = sizeof(struct mtk_sha_reqctx),
979 .halg.base = {
980 .cra_name = "sha1",
981 .cra_driver_name = "mtk-sha1",
982 .cra_priority = 400,
983 .cra_flags = CRYPTO_ALG_ASYNC,
984 .cra_blocksize = SHA1_BLOCK_SIZE,
985 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
986 .cra_alignmask = SHA_ALIGN_MSK,
987 .cra_module = THIS_MODULE,
988 .cra_init = mtk_sha_cra_init,
989 .cra_exit = mtk_sha_cra_exit,
990 }
991},
992{
993 .init = mtk_sha_init,
994 .update = mtk_sha_update,
995 .final = mtk_sha_final,
996 .finup = mtk_sha_finup,
997 .digest = mtk_sha_digest,
998 .export = mtk_sha_export,
999 .import = mtk_sha_import,
1000 .halg.digestsize = SHA224_DIGEST_SIZE,
1001 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1002 .halg.base = {
1003 .cra_name = "sha224",
1004 .cra_driver_name = "mtk-sha224",
1005 .cra_priority = 400,
1006 .cra_flags = CRYPTO_ALG_ASYNC,
1007 .cra_blocksize = SHA224_BLOCK_SIZE,
1008 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1009 .cra_alignmask = SHA_ALIGN_MSK,
1010 .cra_module = THIS_MODULE,
1011 .cra_init = mtk_sha_cra_init,
1012 .cra_exit = mtk_sha_cra_exit,
1013 }
1014},
1015{
1016 .init = mtk_sha_init,
1017 .update = mtk_sha_update,
1018 .final = mtk_sha_final,
1019 .finup = mtk_sha_finup,
1020 .digest = mtk_sha_digest,
1021 .export = mtk_sha_export,
1022 .import = mtk_sha_import,
1023 .halg.digestsize = SHA256_DIGEST_SIZE,
1024 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1025 .halg.base = {
1026 .cra_name = "sha256",
1027 .cra_driver_name = "mtk-sha256",
1028 .cra_priority = 400,
1029 .cra_flags = CRYPTO_ALG_ASYNC,
1030 .cra_blocksize = SHA256_BLOCK_SIZE,
1031 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1032 .cra_alignmask = SHA_ALIGN_MSK,
1033 .cra_module = THIS_MODULE,
1034 .cra_init = mtk_sha_cra_init,
1035 .cra_exit = mtk_sha_cra_exit,
1036 }
1037},
1038{
1039 .init = mtk_sha_init,
1040 .update = mtk_sha_update,
1041 .final = mtk_sha_final,
1042 .finup = mtk_sha_finup,
1043 .digest = mtk_sha_digest,
1044 .export = mtk_sha_export,
1045 .import = mtk_sha_import,
1046 .setkey = mtk_sha_setkey,
1047 .halg.digestsize = SHA1_DIGEST_SIZE,
1048 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1049 .halg.base = {
1050 .cra_name = "hmac(sha1)",
1051 .cra_driver_name = "mtk-hmac-sha1",
1052 .cra_priority = 400,
1053 .cra_flags = CRYPTO_ALG_ASYNC |
1054 CRYPTO_ALG_NEED_FALLBACK,
1055 .cra_blocksize = SHA1_BLOCK_SIZE,
1056 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1057 sizeof(struct mtk_sha_hmac_ctx),
1058 .cra_alignmask = SHA_ALIGN_MSK,
1059 .cra_module = THIS_MODULE,
1060 .cra_init = mtk_sha_cra_sha1_init,
1061 .cra_exit = mtk_sha_cra_exit,
1062 }
1063},
1064{
1065 .init = mtk_sha_init,
1066 .update = mtk_sha_update,
1067 .final = mtk_sha_final,
1068 .finup = mtk_sha_finup,
1069 .digest = mtk_sha_digest,
1070 .export = mtk_sha_export,
1071 .import = mtk_sha_import,
1072 .setkey = mtk_sha_setkey,
1073 .halg.digestsize = SHA224_DIGEST_SIZE,
1074 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1075 .halg.base = {
1076 .cra_name = "hmac(sha224)",
1077 .cra_driver_name = "mtk-hmac-sha224",
1078 .cra_priority = 400,
1079 .cra_flags = CRYPTO_ALG_ASYNC |
1080 CRYPTO_ALG_NEED_FALLBACK,
1081 .cra_blocksize = SHA224_BLOCK_SIZE,
1082 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1083 sizeof(struct mtk_sha_hmac_ctx),
1084 .cra_alignmask = SHA_ALIGN_MSK,
1085 .cra_module = THIS_MODULE,
1086 .cra_init = mtk_sha_cra_sha224_init,
1087 .cra_exit = mtk_sha_cra_exit,
1088 }
1089},
1090{
1091 .init = mtk_sha_init,
1092 .update = mtk_sha_update,
1093 .final = mtk_sha_final,
1094 .finup = mtk_sha_finup,
1095 .digest = mtk_sha_digest,
1096 .export = mtk_sha_export,
1097 .import = mtk_sha_import,
1098 .setkey = mtk_sha_setkey,
1099 .halg.digestsize = SHA256_DIGEST_SIZE,
1100 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1101 .halg.base = {
1102 .cra_name = "hmac(sha256)",
1103 .cra_driver_name = "mtk-hmac-sha256",
1104 .cra_priority = 400,
1105 .cra_flags = CRYPTO_ALG_ASYNC |
1106 CRYPTO_ALG_NEED_FALLBACK,
1107 .cra_blocksize = SHA256_BLOCK_SIZE,
1108 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1109 sizeof(struct mtk_sha_hmac_ctx),
1110 .cra_alignmask = SHA_ALIGN_MSK,
1111 .cra_module = THIS_MODULE,
1112 .cra_init = mtk_sha_cra_sha256_init,
1113 .cra_exit = mtk_sha_cra_exit,
1114 }
1115},
1116};
1117
1118static struct ahash_alg algs_sha384_sha512[] = {
1119{
1120 .init = mtk_sha_init,
1121 .update = mtk_sha_update,
1122 .final = mtk_sha_final,
1123 .finup = mtk_sha_finup,
1124 .digest = mtk_sha_digest,
1125 .export = mtk_sha_export,
1126 .import = mtk_sha_import,
1127 .halg.digestsize = SHA384_DIGEST_SIZE,
1128 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1129 .halg.base = {
1130 .cra_name = "sha384",
1131 .cra_driver_name = "mtk-sha384",
1132 .cra_priority = 400,
1133 .cra_flags = CRYPTO_ALG_ASYNC,
1134 .cra_blocksize = SHA384_BLOCK_SIZE,
1135 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1136 .cra_alignmask = SHA_ALIGN_MSK,
1137 .cra_module = THIS_MODULE,
1138 .cra_init = mtk_sha_cra_init,
1139 .cra_exit = mtk_sha_cra_exit,
1140 }
1141},
1142{
1143 .init = mtk_sha_init,
1144 .update = mtk_sha_update,
1145 .final = mtk_sha_final,
1146 .finup = mtk_sha_finup,
1147 .digest = mtk_sha_digest,
1148 .export = mtk_sha_export,
1149 .import = mtk_sha_import,
1150 .halg.digestsize = SHA512_DIGEST_SIZE,
1151 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1152 .halg.base = {
1153 .cra_name = "sha512",
1154 .cra_driver_name = "mtk-sha512",
1155 .cra_priority = 400,
1156 .cra_flags = CRYPTO_ALG_ASYNC,
1157 .cra_blocksize = SHA512_BLOCK_SIZE,
1158 .cra_ctxsize = sizeof(struct mtk_sha_ctx),
1159 .cra_alignmask = SHA_ALIGN_MSK,
1160 .cra_module = THIS_MODULE,
1161 .cra_init = mtk_sha_cra_init,
1162 .cra_exit = mtk_sha_cra_exit,
1163 }
1164},
1165{
1166 .init = mtk_sha_init,
1167 .update = mtk_sha_update,
1168 .final = mtk_sha_final,
1169 .finup = mtk_sha_finup,
1170 .digest = mtk_sha_digest,
1171 .export = mtk_sha_export,
1172 .import = mtk_sha_import,
1173 .setkey = mtk_sha_setkey,
1174 .halg.digestsize = SHA384_DIGEST_SIZE,
1175 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1176 .halg.base = {
1177 .cra_name = "hmac(sha384)",
1178 .cra_driver_name = "mtk-hmac-sha384",
1179 .cra_priority = 400,
1180 .cra_flags = CRYPTO_ALG_ASYNC |
1181 CRYPTO_ALG_NEED_FALLBACK,
1182 .cra_blocksize = SHA384_BLOCK_SIZE,
1183 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1184 sizeof(struct mtk_sha_hmac_ctx),
1185 .cra_alignmask = SHA_ALIGN_MSK,
1186 .cra_module = THIS_MODULE,
1187 .cra_init = mtk_sha_cra_sha384_init,
1188 .cra_exit = mtk_sha_cra_exit,
1189 }
1190},
1191{
1192 .init = mtk_sha_init,
1193 .update = mtk_sha_update,
1194 .final = mtk_sha_final,
1195 .finup = mtk_sha_finup,
1196 .digest = mtk_sha_digest,
1197 .export = mtk_sha_export,
1198 .import = mtk_sha_import,
1199 .setkey = mtk_sha_setkey,
1200 .halg.digestsize = SHA512_DIGEST_SIZE,
1201 .halg.statesize = sizeof(struct mtk_sha_reqctx),
1202 .halg.base = {
1203 .cra_name = "hmac(sha512)",
1204 .cra_driver_name = "mtk-hmac-sha512",
1205 .cra_priority = 400,
1206 .cra_flags = CRYPTO_ALG_ASYNC |
1207 CRYPTO_ALG_NEED_FALLBACK,
1208 .cra_blocksize = SHA512_BLOCK_SIZE,
1209 .cra_ctxsize = sizeof(struct mtk_sha_ctx) +
1210 sizeof(struct mtk_sha_hmac_ctx),
1211 .cra_alignmask = SHA_ALIGN_MSK,
1212 .cra_module = THIS_MODULE,
1213 .cra_init = mtk_sha_cra_sha512_init,
1214 .cra_exit = mtk_sha_cra_exit,
1215 }
1216},
1217};
1218
1219static void mtk_sha_task0(unsigned long data)
1220{
1221 struct mtk_cryp *cryp = (struct mtk_cryp *)data;
1222 struct mtk_sha_rec *sha = cryp->sha[0];
1223
1224 mtk_sha_unmap(cryp, sha);
1225 mtk_sha_complete(cryp, sha);
1226}
1227
1228static void mtk_sha_task1(unsigned long data)
1229{
1230 struct mtk_cryp *cryp = (struct mtk_cryp *)data;
1231 struct mtk_sha_rec *sha = cryp->sha[1];
1232
1233 mtk_sha_unmap(cryp, sha);
1234 mtk_sha_complete(cryp, sha);
1235}
1236
1237static irqreturn_t mtk_sha_ring2_irq(int irq, void *dev_id)
1238{
1239 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
1240 struct mtk_sha_rec *sha = cryp->sha[0];
1241 u32 val = mtk_sha_read(cryp, RDR_STAT(RING2));
1242
1243 mtk_sha_write(cryp, RDR_STAT(RING2), val);
1244
1245 if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1246 mtk_sha_write(cryp, RDR_PROC_COUNT(RING2), MTK_CNT_RST);
1247 mtk_sha_write(cryp, RDR_THRESH(RING2),
1248 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1249
1250 tasklet_schedule(&sha->task);
1251 } else {
1252 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1253 }
1254 return IRQ_HANDLED;
1255}
1256
1257static irqreturn_t mtk_sha_ring3_irq(int irq, void *dev_id)
1258{
1259 struct mtk_cryp *cryp = (struct mtk_cryp *)dev_id;
1260 struct mtk_sha_rec *sha = cryp->sha[1];
1261 u32 val = mtk_sha_read(cryp, RDR_STAT(RING3));
1262
1263 mtk_sha_write(cryp, RDR_STAT(RING3), val);
1264
1265 if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1266 mtk_sha_write(cryp, RDR_PROC_COUNT(RING3), MTK_CNT_RST);
1267 mtk_sha_write(cryp, RDR_THRESH(RING3),
1268 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1269
1270 tasklet_schedule(&sha->task);
1271 } else {
1272 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1273 }
1274 return IRQ_HANDLED;
1275}
1276
1277/*
1278 * The purpose of two SHA records is used to get extra performance.
1279 * It is similar to mtk_aes_record_init().
1280 */
1281static int mtk_sha_record_init(struct mtk_cryp *cryp)
1282{
1283 struct mtk_sha_rec **sha = cryp->sha;
1284 int i, err = -ENOMEM;
1285
1286 for (i = 0; i < MTK_REC_NUM; i++) {
1287 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
1288 if (!sha[i])
1289 goto err_cleanup;
1290
1291 sha[i]->id = i + RING2;
1292
1293 spin_lock_init(&sha[i]->lock);
1294 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1295 }
1296
1297 tasklet_init(&sha[0]->task, mtk_sha_task0, (unsigned long)cryp);
1298 tasklet_init(&sha[1]->task, mtk_sha_task1, (unsigned long)cryp);
1299
1300 cryp->rec = 1;
1301
1302 return 0;
1303
1304err_cleanup:
1305 for (; i--; )
1306 kfree(sha[i]);
1307 return err;
1308}
1309
1310static void mtk_sha_record_free(struct mtk_cryp *cryp)
1311{
1312 int i;
1313
1314 for (i = 0; i < MTK_REC_NUM; i++) {
1315 tasklet_kill(&cryp->sha[i]->task);
1316 kfree(cryp->sha[i]);
1317 }
1318}
1319
1320static void mtk_sha_unregister_algs(void)
1321{
1322 int i;
1323
1324 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
1325 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1326
1327 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
1328 crypto_unregister_ahash(&algs_sha384_sha512[i]);
1329}
1330
1331static int mtk_sha_register_algs(void)
1332{
1333 int err, i;
1334
1335 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
1336 err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
1337 if (err)
1338 goto err_sha_224_256_algs;
1339 }
1340
1341 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
1342 err = crypto_register_ahash(&algs_sha384_sha512[i]);
1343 if (err)
1344 goto err_sha_384_512_algs;
1345 }
1346
1347 return 0;
1348
1349err_sha_384_512_algs:
1350 for (; i--; )
1351 crypto_unregister_ahash(&algs_sha384_sha512[i]);
1352 i = ARRAY_SIZE(algs_sha1_sha224_sha256);
1353err_sha_224_256_algs:
1354 for (; i--; )
1355 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1356
1357 return err;
1358}
1359
1360int mtk_hash_alg_register(struct mtk_cryp *cryp)
1361{
1362 int err;
1363
1364 INIT_LIST_HEAD(&cryp->sha_list);
1365
1366 /* Initialize two hash records */
1367 err = mtk_sha_record_init(cryp);
1368 if (err)
1369 goto err_record;
1370
1371 /* Ring2 is use by SHA record0 */
1372 err = devm_request_irq(cryp->dev, cryp->irq[RING2],
1373 mtk_sha_ring2_irq, IRQF_TRIGGER_LOW,
1374 "mtk-sha", cryp);
1375 if (err) {
1376 dev_err(cryp->dev, "unable to request sha irq0.\n");
1377 goto err_res;
1378 }
1379
1380 /* Ring3 is use by SHA record1 */
1381 err = devm_request_irq(cryp->dev, cryp->irq[RING3],
1382 mtk_sha_ring3_irq, IRQF_TRIGGER_LOW,
1383 "mtk-sha", cryp);
1384 if (err) {
1385 dev_err(cryp->dev, "unable to request sha irq1.\n");
1386 goto err_res;
1387 }
1388
1389 /* Enable ring2 and ring3 interrupt for hash */
1390 mtk_sha_write(cryp, AIC_ENABLE_SET(RING2), MTK_IRQ_RDR2);
1391 mtk_sha_write(cryp, AIC_ENABLE_SET(RING3), MTK_IRQ_RDR3);
1392
1393 cryp->tmp = dma_alloc_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1394 &cryp->tmp_dma, GFP_KERNEL);
1395 if (!cryp->tmp) {
1396 dev_err(cryp->dev, "unable to allocate tmp buffer.\n");
1397 err = -EINVAL;
1398 goto err_res;
1399 }
1400
1401 spin_lock(&mtk_sha.lock);
1402 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
1403 spin_unlock(&mtk_sha.lock);
1404
1405 err = mtk_sha_register_algs();
1406 if (err)
1407 goto err_algs;
1408
1409 return 0;
1410
1411err_algs:
1412 spin_lock(&mtk_sha.lock);
1413 list_del(&cryp->sha_list);
1414 spin_unlock(&mtk_sha.lock);
1415 dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1416 cryp->tmp, cryp->tmp_dma);
1417err_res:
1418 mtk_sha_record_free(cryp);
1419err_record:
1420
1421 dev_err(cryp->dev, "mtk-sha initialization failed.\n");
1422 return err;
1423}
1424
1425void mtk_hash_alg_release(struct mtk_cryp *cryp)
1426{
1427 spin_lock(&mtk_sha.lock);
1428 list_del(&cryp->sha_list);
1429 spin_unlock(&mtk_sha.lock);
1430
1431 mtk_sha_unregister_algs();
1432 dma_free_coherent(cryp->dev, SHA_TMP_BUF_SIZE,
1433 cryp->tmp, cryp->tmp_dma);
1434 mtk_sha_record_free(cryp);
1435}