Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of STM32 Crypto driver for Linux.
4 *
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7 */
8
9#include <linux/clk.h>
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iopoll.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
21#include <linux/reset.h>
22
23#include <crypto/engine.h>
24#include <crypto/hash.h>
25#include <crypto/md5.h>
26#include <crypto/scatterwalk.h>
27#include <crypto/sha1.h>
28#include <crypto/sha2.h>
29#include <crypto/internal/hash.h>
30
31#define HASH_CR 0x00
32#define HASH_DIN 0x04
33#define HASH_STR 0x08
34#define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04))
35#define HASH_IMR 0x20
36#define HASH_SR 0x24
37#define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
38#define HASH_HREG(x) (0x310 + ((x) * 0x04))
39#define HASH_HWCFGR 0x3F0
40#define HASH_VER 0x3F4
41#define HASH_ID 0x3F8
42
43/* Control Register */
44#define HASH_CR_INIT BIT(2)
45#define HASH_CR_DMAE BIT(3)
46#define HASH_CR_DATATYPE_POS 4
47#define HASH_CR_MODE BIT(6)
48#define HASH_CR_MDMAT BIT(13)
49#define HASH_CR_DMAA BIT(14)
50#define HASH_CR_LKEY BIT(16)
51
52#define HASH_CR_ALGO_SHA1 0x0
53#define HASH_CR_ALGO_MD5 0x80
54#define HASH_CR_ALGO_SHA224 0x40000
55#define HASH_CR_ALGO_SHA256 0x40080
56
57#define HASH_CR_UX500_EMPTYMSG BIT(20)
58#define HASH_CR_UX500_ALGO_SHA1 BIT(7)
59#define HASH_CR_UX500_ALGO_SHA256 0x0
60
61/* Interrupt */
62#define HASH_DINIE BIT(0)
63#define HASH_DCIE BIT(1)
64
65/* Interrupt Mask */
66#define HASH_MASK_CALC_COMPLETION BIT(0)
67#define HASH_MASK_DATA_INPUT BIT(1)
68
69/* Context swap register */
70#define HASH_CSR_REGISTER_NUMBER 54
71
72/* Status Flags */
73#define HASH_SR_DATA_INPUT_READY BIT(0)
74#define HASH_SR_OUTPUT_READY BIT(1)
75#define HASH_SR_DMA_ACTIVE BIT(2)
76#define HASH_SR_BUSY BIT(3)
77
78/* STR Register */
79#define HASH_STR_NBLW_MASK GENMASK(4, 0)
80#define HASH_STR_DCAL BIT(8)
81
82#define HASH_FLAGS_INIT BIT(0)
83#define HASH_FLAGS_OUTPUT_READY BIT(1)
84#define HASH_FLAGS_CPU BIT(2)
85#define HASH_FLAGS_DMA_READY BIT(3)
86#define HASH_FLAGS_DMA_ACTIVE BIT(4)
87#define HASH_FLAGS_HMAC_INIT BIT(5)
88#define HASH_FLAGS_HMAC_FINAL BIT(6)
89#define HASH_FLAGS_HMAC_KEY BIT(7)
90
91#define HASH_FLAGS_FINAL BIT(15)
92#define HASH_FLAGS_FINUP BIT(16)
93#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
94#define HASH_FLAGS_MD5 BIT(18)
95#define HASH_FLAGS_SHA1 BIT(19)
96#define HASH_FLAGS_SHA224 BIT(20)
97#define HASH_FLAGS_SHA256 BIT(21)
98#define HASH_FLAGS_EMPTY BIT(22)
99#define HASH_FLAGS_HMAC BIT(23)
100
101#define HASH_OP_UPDATE 1
102#define HASH_OP_FINAL 2
103
104enum stm32_hash_data_format {
105 HASH_DATA_32_BITS = 0x0,
106 HASH_DATA_16_BITS = 0x1,
107 HASH_DATA_8_BITS = 0x2,
108 HASH_DATA_1_BIT = 0x3
109};
110
111#define HASH_BUFLEN 256
112#define HASH_LONG_KEY 64
113#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
114#define HASH_QUEUE_LENGTH 16
115#define HASH_DMA_THRESHOLD 50
116
117#define HASH_AUTOSUSPEND_DELAY 50
118
119struct stm32_hash_ctx {
120 struct crypto_engine_ctx enginectx;
121 struct stm32_hash_dev *hdev;
122 struct crypto_shash *xtfm;
123 unsigned long flags;
124
125 u8 key[HASH_MAX_KEY_SIZE];
126 int keylen;
127};
128
129struct stm32_hash_state {
130 u32 flags;
131
132 u16 bufcnt;
133 u16 buflen;
134
135 u8 buffer[HASH_BUFLEN] __aligned(4);
136
137 /* hash state */
138 u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER];
139};
140
141struct stm32_hash_request_ctx {
142 struct stm32_hash_dev *hdev;
143 unsigned long op;
144
145 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
146 size_t digcnt;
147
148 /* DMA */
149 struct scatterlist *sg;
150 unsigned int offset;
151 unsigned int total;
152 struct scatterlist sg_key;
153
154 dma_addr_t dma_addr;
155 size_t dma_ct;
156 int nents;
157
158 u8 data_type;
159
160 struct stm32_hash_state state;
161};
162
163struct stm32_hash_algs_info {
164 struct ahash_alg *algs_list;
165 size_t size;
166};
167
168struct stm32_hash_pdata {
169 struct stm32_hash_algs_info *algs_info;
170 size_t algs_info_size;
171 bool has_sr;
172 bool has_mdmat;
173 bool broken_emptymsg;
174 bool ux500;
175};
176
177struct stm32_hash_dev {
178 struct list_head list;
179 struct device *dev;
180 struct clk *clk;
181 struct reset_control *rst;
182 void __iomem *io_base;
183 phys_addr_t phys_base;
184 u32 dma_mode;
185 u32 dma_maxburst;
186 bool polled;
187
188 struct ahash_request *req;
189 struct crypto_engine *engine;
190
191 unsigned long flags;
192
193 struct dma_chan *dma_lch;
194 struct completion dma_completion;
195
196 const struct stm32_hash_pdata *pdata;
197};
198
199struct stm32_hash_drv {
200 struct list_head dev_list;
201 spinlock_t lock; /* List protection access */
202};
203
204static struct stm32_hash_drv stm32_hash = {
205 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
206 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
207};
208
209static void stm32_hash_dma_callback(void *param);
210
211static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
212{
213 return readl_relaxed(hdev->io_base + offset);
214}
215
216static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
217 u32 offset, u32 value)
218{
219 writel_relaxed(value, hdev->io_base + offset);
220}
221
222static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
223{
224 u32 status;
225
226 /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
227 if (!hdev->pdata->has_sr)
228 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
229 !(status & HASH_STR_DCAL), 10, 10000);
230
231 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
232 !(status & HASH_SR_BUSY), 10, 10000);
233}
234
235static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
236{
237 u32 reg;
238
239 reg = stm32_hash_read(hdev, HASH_STR);
240 reg &= ~(HASH_STR_NBLW_MASK);
241 reg |= (8U * ((length) % 4U));
242 stm32_hash_write(hdev, HASH_STR, reg);
243}
244
245static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
246{
247 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
248 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
249 u32 reg;
250 int keylen = ctx->keylen;
251 void *key = ctx->key;
252
253 if (keylen) {
254 stm32_hash_set_nblw(hdev, keylen);
255
256 while (keylen > 0) {
257 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
258 keylen -= 4;
259 key += 4;
260 }
261
262 reg = stm32_hash_read(hdev, HASH_STR);
263 reg |= HASH_STR_DCAL;
264 stm32_hash_write(hdev, HASH_STR, reg);
265
266 return -EINPROGRESS;
267 }
268
269 return 0;
270}
271
272static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
273{
274 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
275 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
276 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
277 struct stm32_hash_state *state = &rctx->state;
278
279 u32 reg = HASH_CR_INIT;
280
281 if (!(hdev->flags & HASH_FLAGS_INIT)) {
282 switch (state->flags & HASH_FLAGS_ALGO_MASK) {
283 case HASH_FLAGS_MD5:
284 reg |= HASH_CR_ALGO_MD5;
285 break;
286 case HASH_FLAGS_SHA1:
287 if (hdev->pdata->ux500)
288 reg |= HASH_CR_UX500_ALGO_SHA1;
289 else
290 reg |= HASH_CR_ALGO_SHA1;
291 break;
292 case HASH_FLAGS_SHA224:
293 reg |= HASH_CR_ALGO_SHA224;
294 break;
295 case HASH_FLAGS_SHA256:
296 if (hdev->pdata->ux500)
297 reg |= HASH_CR_UX500_ALGO_SHA256;
298 else
299 reg |= HASH_CR_ALGO_SHA256;
300 break;
301 default:
302 reg |= HASH_CR_ALGO_MD5;
303 }
304
305 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
306
307 if (state->flags & HASH_FLAGS_HMAC) {
308 hdev->flags |= HASH_FLAGS_HMAC;
309 reg |= HASH_CR_MODE;
310 if (ctx->keylen > HASH_LONG_KEY)
311 reg |= HASH_CR_LKEY;
312 }
313
314 if (!hdev->polled)
315 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
316
317 stm32_hash_write(hdev, HASH_CR, reg);
318
319 hdev->flags |= HASH_FLAGS_INIT;
320
321 dev_dbg(hdev->dev, "Write Control %x\n", reg);
322 }
323}
324
325static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
326{
327 struct stm32_hash_state *state = &rctx->state;
328 size_t count;
329
330 while ((state->bufcnt < state->buflen) && rctx->total) {
331 count = min(rctx->sg->length - rctx->offset, rctx->total);
332 count = min_t(size_t, count, state->buflen - state->bufcnt);
333
334 if (count <= 0) {
335 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
336 rctx->sg = sg_next(rctx->sg);
337 continue;
338 } else {
339 break;
340 }
341 }
342
343 scatterwalk_map_and_copy(state->buffer + state->bufcnt,
344 rctx->sg, rctx->offset, count, 0);
345
346 state->bufcnt += count;
347 rctx->offset += count;
348 rctx->total -= count;
349
350 if (rctx->offset == rctx->sg->length) {
351 rctx->sg = sg_next(rctx->sg);
352 if (rctx->sg)
353 rctx->offset = 0;
354 else
355 rctx->total = 0;
356 }
357 }
358}
359
360static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
361 const u8 *buf, size_t length, int final)
362{
363 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
364 struct stm32_hash_state *state = &rctx->state;
365 unsigned int count, len32;
366 const u32 *buffer = (const u32 *)buf;
367 u32 reg;
368
369 if (final) {
370 hdev->flags |= HASH_FLAGS_FINAL;
371
372 /* Do not process empty messages if hw is buggy. */
373 if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
374 hdev->pdata->broken_emptymsg) {
375 state->flags |= HASH_FLAGS_EMPTY;
376 return 0;
377 }
378 }
379
380 len32 = DIV_ROUND_UP(length, sizeof(u32));
381
382 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
383 __func__, length, final, len32);
384
385 hdev->flags |= HASH_FLAGS_CPU;
386
387 stm32_hash_write_ctrl(hdev, length);
388
389 if (stm32_hash_wait_busy(hdev))
390 return -ETIMEDOUT;
391
392 if ((hdev->flags & HASH_FLAGS_HMAC) &&
393 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
394 hdev->flags |= HASH_FLAGS_HMAC_KEY;
395 stm32_hash_write_key(hdev);
396 if (stm32_hash_wait_busy(hdev))
397 return -ETIMEDOUT;
398 }
399
400 for (count = 0; count < len32; count++)
401 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
402
403 if (final) {
404 if (stm32_hash_wait_busy(hdev))
405 return -ETIMEDOUT;
406
407 stm32_hash_set_nblw(hdev, length);
408 reg = stm32_hash_read(hdev, HASH_STR);
409 reg |= HASH_STR_DCAL;
410 stm32_hash_write(hdev, HASH_STR, reg);
411 if (hdev->flags & HASH_FLAGS_HMAC) {
412 if (stm32_hash_wait_busy(hdev))
413 return -ETIMEDOUT;
414 stm32_hash_write_key(hdev);
415 }
416 return -EINPROGRESS;
417 }
418
419 return 0;
420}
421
422static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
423{
424 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
425 struct stm32_hash_state *state = &rctx->state;
426 u32 *preg = state->hw_context;
427 int bufcnt, err = 0, final;
428 int i;
429
430 dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
431
432 final = state->flags & HASH_FLAGS_FINAL;
433
434 while ((rctx->total >= state->buflen) ||
435 (state->bufcnt + rctx->total >= state->buflen)) {
436 stm32_hash_append_sg(rctx);
437 bufcnt = state->bufcnt;
438 state->bufcnt = 0;
439 err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
440 if (err)
441 return err;
442 }
443
444 stm32_hash_append_sg(rctx);
445
446 if (final) {
447 bufcnt = state->bufcnt;
448 state->bufcnt = 0;
449 return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
450 }
451
452 if (!(hdev->flags & HASH_FLAGS_INIT))
453 return 0;
454
455 if (stm32_hash_wait_busy(hdev))
456 return -ETIMEDOUT;
457
458 if (!hdev->pdata->ux500)
459 *preg++ = stm32_hash_read(hdev, HASH_IMR);
460 *preg++ = stm32_hash_read(hdev, HASH_STR);
461 *preg++ = stm32_hash_read(hdev, HASH_CR);
462 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
463 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
464
465 state->flags |= HASH_FLAGS_INIT;
466
467 return err;
468}
469
470static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
471 struct scatterlist *sg, int length, int mdma)
472{
473 struct dma_async_tx_descriptor *in_desc;
474 dma_cookie_t cookie;
475 u32 reg;
476 int err;
477
478 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
479 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
480 DMA_CTRL_ACK);
481 if (!in_desc) {
482 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
483 return -ENOMEM;
484 }
485
486 reinit_completion(&hdev->dma_completion);
487 in_desc->callback = stm32_hash_dma_callback;
488 in_desc->callback_param = hdev;
489
490 hdev->flags |= HASH_FLAGS_FINAL;
491 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
492
493 reg = stm32_hash_read(hdev, HASH_CR);
494
495 if (!hdev->pdata->has_mdmat) {
496 if (mdma)
497 reg |= HASH_CR_MDMAT;
498 else
499 reg &= ~HASH_CR_MDMAT;
500 }
501 reg |= HASH_CR_DMAE;
502
503 stm32_hash_write(hdev, HASH_CR, reg);
504
505 stm32_hash_set_nblw(hdev, length);
506
507 cookie = dmaengine_submit(in_desc);
508 err = dma_submit_error(cookie);
509 if (err)
510 return -ENOMEM;
511
512 dma_async_issue_pending(hdev->dma_lch);
513
514 if (!wait_for_completion_timeout(&hdev->dma_completion,
515 msecs_to_jiffies(100)))
516 err = -ETIMEDOUT;
517
518 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
519 NULL, NULL) != DMA_COMPLETE)
520 err = -ETIMEDOUT;
521
522 if (err) {
523 dev_err(hdev->dev, "DMA Error %i\n", err);
524 dmaengine_terminate_all(hdev->dma_lch);
525 return err;
526 }
527
528 return -EINPROGRESS;
529}
530
531static void stm32_hash_dma_callback(void *param)
532{
533 struct stm32_hash_dev *hdev = param;
534
535 complete(&hdev->dma_completion);
536
537 hdev->flags |= HASH_FLAGS_DMA_READY;
538}
539
540static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
541{
542 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
543 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
544 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
545 int err;
546
547 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
548 err = stm32_hash_write_key(hdev);
549 if (stm32_hash_wait_busy(hdev))
550 return -ETIMEDOUT;
551 } else {
552 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
553 sg_init_one(&rctx->sg_key, ctx->key,
554 ALIGN(ctx->keylen, sizeof(u32)));
555
556 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
557 DMA_TO_DEVICE);
558 if (rctx->dma_ct == 0) {
559 dev_err(hdev->dev, "dma_map_sg error\n");
560 return -ENOMEM;
561 }
562
563 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
564
565 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
566 }
567
568 return err;
569}
570
571static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
572{
573 struct dma_slave_config dma_conf;
574 struct dma_chan *chan;
575 int err;
576
577 memset(&dma_conf, 0, sizeof(dma_conf));
578
579 dma_conf.direction = DMA_MEM_TO_DEV;
580 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
581 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
582 dma_conf.src_maxburst = hdev->dma_maxburst;
583 dma_conf.dst_maxburst = hdev->dma_maxburst;
584 dma_conf.device_fc = false;
585
586 chan = dma_request_chan(hdev->dev, "in");
587 if (IS_ERR(chan))
588 return PTR_ERR(chan);
589
590 hdev->dma_lch = chan;
591
592 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
593 if (err) {
594 dma_release_channel(hdev->dma_lch);
595 hdev->dma_lch = NULL;
596 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
597 return err;
598 }
599
600 init_completion(&hdev->dma_completion);
601
602 return 0;
603}
604
605static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
606{
607 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
608 u32 *buffer = (void *)rctx->state.buffer;
609 struct scatterlist sg[1], *tsg;
610 int err = 0, len = 0, reg, ncp = 0;
611 unsigned int i;
612
613 rctx->sg = hdev->req->src;
614 rctx->total = hdev->req->nbytes;
615
616 rctx->nents = sg_nents(rctx->sg);
617
618 if (rctx->nents < 0)
619 return -EINVAL;
620
621 stm32_hash_write_ctrl(hdev, rctx->total);
622
623 if (hdev->flags & HASH_FLAGS_HMAC) {
624 err = stm32_hash_hmac_dma_send(hdev);
625 if (err != -EINPROGRESS)
626 return err;
627 }
628
629 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
630 len = sg->length;
631
632 sg[0] = *tsg;
633 if (sg_is_last(sg)) {
634 if (hdev->dma_mode == 1) {
635 len = (ALIGN(sg->length, 16) - 16);
636
637 ncp = sg_pcopy_to_buffer(
638 rctx->sg, rctx->nents,
639 rctx->state.buffer, sg->length - len,
640 rctx->total - sg->length + len);
641
642 sg->length = len;
643 } else {
644 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
645 len = sg->length;
646 sg->length = ALIGN(sg->length,
647 sizeof(u32));
648 }
649 }
650 }
651
652 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
653 DMA_TO_DEVICE);
654 if (rctx->dma_ct == 0) {
655 dev_err(hdev->dev, "dma_map_sg error\n");
656 return -ENOMEM;
657 }
658
659 err = stm32_hash_xmit_dma(hdev, sg, len,
660 !sg_is_last(sg));
661
662 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
663
664 if (err == -ENOMEM)
665 return err;
666 }
667
668 if (hdev->dma_mode == 1) {
669 if (stm32_hash_wait_busy(hdev))
670 return -ETIMEDOUT;
671 reg = stm32_hash_read(hdev, HASH_CR);
672 reg &= ~HASH_CR_DMAE;
673 reg |= HASH_CR_DMAA;
674 stm32_hash_write(hdev, HASH_CR, reg);
675
676 if (ncp) {
677 memset(buffer + ncp, 0,
678 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
679 writesl(hdev->io_base + HASH_DIN, buffer,
680 DIV_ROUND_UP(ncp, sizeof(u32)));
681 }
682 stm32_hash_set_nblw(hdev, ncp);
683 reg = stm32_hash_read(hdev, HASH_STR);
684 reg |= HASH_STR_DCAL;
685 stm32_hash_write(hdev, HASH_STR, reg);
686 err = -EINPROGRESS;
687 }
688
689 if (hdev->flags & HASH_FLAGS_HMAC) {
690 if (stm32_hash_wait_busy(hdev))
691 return -ETIMEDOUT;
692 err = stm32_hash_hmac_dma_send(hdev);
693 }
694
695 return err;
696}
697
698static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
699{
700 struct stm32_hash_dev *hdev = NULL, *tmp;
701
702 spin_lock_bh(&stm32_hash.lock);
703 if (!ctx->hdev) {
704 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
705 hdev = tmp;
706 break;
707 }
708 ctx->hdev = hdev;
709 } else {
710 hdev = ctx->hdev;
711 }
712
713 spin_unlock_bh(&stm32_hash.lock);
714
715 return hdev;
716}
717
718static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
719{
720 struct scatterlist *sg;
721 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
722 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
723 int i;
724
725 if (req->nbytes <= HASH_DMA_THRESHOLD)
726 return false;
727
728 if (sg_nents(req->src) > 1) {
729 if (hdev->dma_mode == 1)
730 return false;
731 for_each_sg(req->src, sg, sg_nents(req->src), i) {
732 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
733 (!sg_is_last(sg)))
734 return false;
735 }
736 }
737
738 if (req->src->offset % 4)
739 return false;
740
741 return true;
742}
743
744static int stm32_hash_init(struct ahash_request *req)
745{
746 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
747 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
748 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
749 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
750 struct stm32_hash_state *state = &rctx->state;
751
752 rctx->hdev = hdev;
753
754 state->flags = HASH_FLAGS_CPU;
755
756 rctx->digcnt = crypto_ahash_digestsize(tfm);
757 switch (rctx->digcnt) {
758 case MD5_DIGEST_SIZE:
759 state->flags |= HASH_FLAGS_MD5;
760 break;
761 case SHA1_DIGEST_SIZE:
762 state->flags |= HASH_FLAGS_SHA1;
763 break;
764 case SHA224_DIGEST_SIZE:
765 state->flags |= HASH_FLAGS_SHA224;
766 break;
767 case SHA256_DIGEST_SIZE:
768 state->flags |= HASH_FLAGS_SHA256;
769 break;
770 default:
771 return -EINVAL;
772 }
773
774 rctx->state.bufcnt = 0;
775 rctx->state.buflen = HASH_BUFLEN;
776 rctx->total = 0;
777 rctx->offset = 0;
778 rctx->data_type = HASH_DATA_8_BITS;
779
780 if (ctx->flags & HASH_FLAGS_HMAC)
781 state->flags |= HASH_FLAGS_HMAC;
782
783 dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
784
785 return 0;
786}
787
788static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
789{
790 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
791 struct stm32_hash_state *state = &rctx->state;
792
793 if (!(state->flags & HASH_FLAGS_CPU))
794 return stm32_hash_dma_send(hdev);
795
796 return stm32_hash_update_cpu(hdev);
797}
798
799static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
800{
801 struct ahash_request *req = hdev->req;
802 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
803 struct stm32_hash_state *state = &rctx->state;
804 int buflen = state->bufcnt;
805
806 if (state->flags & HASH_FLAGS_FINUP)
807 return stm32_hash_update_req(hdev);
808
809 state->bufcnt = 0;
810
811 return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
812}
813
814static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
815{
816 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
817 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
818 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
819 struct stm32_hash_dev *hdev = rctx->hdev;
820 int ret;
821
822 dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
823 ctx->keylen);
824
825 if (!ctx->xtfm) {
826 dev_err(hdev->dev, "no fallback engine\n");
827 return;
828 }
829
830 if (ctx->keylen) {
831 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
832 if (ret) {
833 dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
834 return;
835 }
836 }
837
838 ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
839 if (ret)
840 dev_err(hdev->dev, "shash digest error\n");
841}
842
843static void stm32_hash_copy_hash(struct ahash_request *req)
844{
845 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
846 struct stm32_hash_state *state = &rctx->state;
847 struct stm32_hash_dev *hdev = rctx->hdev;
848 __be32 *hash = (void *)rctx->digest;
849 unsigned int i, hashsize;
850
851 if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
852 return stm32_hash_emptymsg_fallback(req);
853
854 switch (state->flags & HASH_FLAGS_ALGO_MASK) {
855 case HASH_FLAGS_MD5:
856 hashsize = MD5_DIGEST_SIZE;
857 break;
858 case HASH_FLAGS_SHA1:
859 hashsize = SHA1_DIGEST_SIZE;
860 break;
861 case HASH_FLAGS_SHA224:
862 hashsize = SHA224_DIGEST_SIZE;
863 break;
864 case HASH_FLAGS_SHA256:
865 hashsize = SHA256_DIGEST_SIZE;
866 break;
867 default:
868 return;
869 }
870
871 for (i = 0; i < hashsize / sizeof(u32); i++) {
872 if (hdev->pdata->ux500)
873 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
874 HASH_UX500_HREG(i)));
875 else
876 hash[i] = cpu_to_be32(stm32_hash_read(hdev,
877 HASH_HREG(i)));
878 }
879}
880
881static int stm32_hash_finish(struct ahash_request *req)
882{
883 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
884
885 if (!req->result)
886 return -EINVAL;
887
888 memcpy(req->result, rctx->digest, rctx->digcnt);
889
890 return 0;
891}
892
893static void stm32_hash_finish_req(struct ahash_request *req, int err)
894{
895 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
896 struct stm32_hash_dev *hdev = rctx->hdev;
897
898 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
899 stm32_hash_copy_hash(req);
900 err = stm32_hash_finish(req);
901 }
902
903 pm_runtime_mark_last_busy(hdev->dev);
904 pm_runtime_put_autosuspend(hdev->dev);
905
906 crypto_finalize_hash_request(hdev->engine, req, err);
907}
908
909static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
910 struct ahash_request *req)
911{
912 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
913}
914
915static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
916{
917 struct ahash_request *req = container_of(areq, struct ahash_request,
918 base);
919 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
920 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
921 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
922 struct stm32_hash_state *state = &rctx->state;
923 int err = 0;
924
925 if (!hdev)
926 return -ENODEV;
927
928 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
929 rctx->op, req->nbytes);
930
931 pm_runtime_get_sync(hdev->dev);
932
933 hdev->req = req;
934 hdev->flags = 0;
935
936 if (state->flags & HASH_FLAGS_INIT) {
937 u32 *preg = rctx->state.hw_context;
938 u32 reg;
939 int i;
940
941 if (!hdev->pdata->ux500)
942 stm32_hash_write(hdev, HASH_IMR, *preg++);
943 stm32_hash_write(hdev, HASH_STR, *preg++);
944 stm32_hash_write(hdev, HASH_CR, *preg);
945 reg = *preg++ | HASH_CR_INIT;
946 stm32_hash_write(hdev, HASH_CR, reg);
947
948 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
949 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
950
951 hdev->flags |= HASH_FLAGS_INIT;
952
953 if (state->flags & HASH_FLAGS_HMAC)
954 hdev->flags |= HASH_FLAGS_HMAC |
955 HASH_FLAGS_HMAC_KEY;
956 }
957
958 if (rctx->op == HASH_OP_UPDATE)
959 err = stm32_hash_update_req(hdev);
960 else if (rctx->op == HASH_OP_FINAL)
961 err = stm32_hash_final_req(hdev);
962
963 /* If we have an IRQ, wait for that, else poll for completion */
964 if (err == -EINPROGRESS && hdev->polled) {
965 if (stm32_hash_wait_busy(hdev))
966 err = -ETIMEDOUT;
967 else {
968 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
969 err = 0;
970 }
971 }
972
973 if (err != -EINPROGRESS)
974 /* done task will not finish it, so do it here */
975 stm32_hash_finish_req(req, err);
976
977 return 0;
978}
979
980static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
981{
982 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
983 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
984 struct stm32_hash_dev *hdev = ctx->hdev;
985
986 rctx->op = op;
987
988 return stm32_hash_handle_queue(hdev, req);
989}
990
991static int stm32_hash_update(struct ahash_request *req)
992{
993 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
994 struct stm32_hash_state *state = &rctx->state;
995
996 if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU))
997 return 0;
998
999 rctx->total = req->nbytes;
1000 rctx->sg = req->src;
1001 rctx->offset = 0;
1002
1003 if ((state->bufcnt + rctx->total < state->buflen)) {
1004 stm32_hash_append_sg(rctx);
1005 return 0;
1006 }
1007
1008 return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1009}
1010
1011static int stm32_hash_final(struct ahash_request *req)
1012{
1013 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1014 struct stm32_hash_state *state = &rctx->state;
1015
1016 state->flags |= HASH_FLAGS_FINAL;
1017
1018 return stm32_hash_enqueue(req, HASH_OP_FINAL);
1019}
1020
1021static int stm32_hash_finup(struct ahash_request *req)
1022{
1023 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1024 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1025 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1026 struct stm32_hash_state *state = &rctx->state;
1027
1028 if (!req->nbytes)
1029 goto out;
1030
1031 state->flags |= HASH_FLAGS_FINUP;
1032 rctx->total = req->nbytes;
1033 rctx->sg = req->src;
1034 rctx->offset = 0;
1035
1036 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
1037 state->flags &= ~HASH_FLAGS_CPU;
1038
1039out:
1040 return stm32_hash_final(req);
1041}
1042
1043static int stm32_hash_digest(struct ahash_request *req)
1044{
1045 return stm32_hash_init(req) ?: stm32_hash_finup(req);
1046}
1047
1048static int stm32_hash_export(struct ahash_request *req, void *out)
1049{
1050 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1051
1052 memcpy(out, &rctx->state, sizeof(rctx->state));
1053
1054 return 0;
1055}
1056
1057static int stm32_hash_import(struct ahash_request *req, const void *in)
1058{
1059 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1060
1061 stm32_hash_init(req);
1062 memcpy(&rctx->state, in, sizeof(rctx->state));
1063
1064 return 0;
1065}
1066
1067static int stm32_hash_setkey(struct crypto_ahash *tfm,
1068 const u8 *key, unsigned int keylen)
1069{
1070 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1071
1072 if (keylen <= HASH_MAX_KEY_SIZE) {
1073 memcpy(ctx->key, key, keylen);
1074 ctx->keylen = keylen;
1075 } else {
1076 return -ENOMEM;
1077 }
1078
1079 return 0;
1080}
1081
1082static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1083{
1084 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1085 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1086 const char *name = crypto_tfm_alg_name(tfm);
1087 struct crypto_shash *xtfm;
1088
1089 /* The fallback is only needed on Ux500 */
1090 if (!hdev->pdata->ux500)
1091 return 0;
1092
1093 xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1094 if (IS_ERR(xtfm)) {
1095 dev_err(hdev->dev, "failed to allocate %s fallback\n",
1096 name);
1097 return PTR_ERR(xtfm);
1098 }
1099 dev_info(hdev->dev, "allocated %s fallback\n", name);
1100 ctx->xtfm = xtfm;
1101
1102 return 0;
1103}
1104
1105static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1106 const char *algs_hmac_name)
1107{
1108 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1109
1110 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1111 sizeof(struct stm32_hash_request_ctx));
1112
1113 ctx->keylen = 0;
1114
1115 if (algs_hmac_name)
1116 ctx->flags |= HASH_FLAGS_HMAC;
1117
1118 ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1119
1120 return stm32_hash_init_fallback(tfm);
1121}
1122
1123static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1124{
1125 return stm32_hash_cra_init_algs(tfm, NULL);
1126}
1127
1128static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1129{
1130 return stm32_hash_cra_init_algs(tfm, "md5");
1131}
1132
1133static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1134{
1135 return stm32_hash_cra_init_algs(tfm, "sha1");
1136}
1137
1138static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1139{
1140 return stm32_hash_cra_init_algs(tfm, "sha224");
1141}
1142
1143static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1144{
1145 return stm32_hash_cra_init_algs(tfm, "sha256");
1146}
1147
1148static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1149{
1150 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1151
1152 if (ctx->xtfm)
1153 crypto_free_shash(ctx->xtfm);
1154}
1155
1156static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1157{
1158 struct stm32_hash_dev *hdev = dev_id;
1159
1160 if (HASH_FLAGS_CPU & hdev->flags) {
1161 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1162 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1163 goto finish;
1164 }
1165 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1166 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1167 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1168 goto finish;
1169 }
1170 }
1171
1172 return IRQ_HANDLED;
1173
1174finish:
1175 /* Finish current request */
1176 stm32_hash_finish_req(hdev->req, 0);
1177
1178 return IRQ_HANDLED;
1179}
1180
1181static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1182{
1183 struct stm32_hash_dev *hdev = dev_id;
1184 u32 reg;
1185
1186 reg = stm32_hash_read(hdev, HASH_SR);
1187 if (reg & HASH_SR_OUTPUT_READY) {
1188 reg &= ~HASH_SR_OUTPUT_READY;
1189 stm32_hash_write(hdev, HASH_SR, reg);
1190 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1191 /* Disable IT*/
1192 stm32_hash_write(hdev, HASH_IMR, 0);
1193 return IRQ_WAKE_THREAD;
1194 }
1195
1196 return IRQ_NONE;
1197}
1198
1199static struct ahash_alg algs_md5[] = {
1200 {
1201 .init = stm32_hash_init,
1202 .update = stm32_hash_update,
1203 .final = stm32_hash_final,
1204 .finup = stm32_hash_finup,
1205 .digest = stm32_hash_digest,
1206 .export = stm32_hash_export,
1207 .import = stm32_hash_import,
1208 .halg = {
1209 .digestsize = MD5_DIGEST_SIZE,
1210 .statesize = sizeof(struct stm32_hash_state),
1211 .base = {
1212 .cra_name = "md5",
1213 .cra_driver_name = "stm32-md5",
1214 .cra_priority = 200,
1215 .cra_flags = CRYPTO_ALG_ASYNC |
1216 CRYPTO_ALG_KERN_DRIVER_ONLY,
1217 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1218 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1219 .cra_alignmask = 3,
1220 .cra_init = stm32_hash_cra_init,
1221 .cra_exit = stm32_hash_cra_exit,
1222 .cra_module = THIS_MODULE,
1223 }
1224 }
1225 },
1226 {
1227 .init = stm32_hash_init,
1228 .update = stm32_hash_update,
1229 .final = stm32_hash_final,
1230 .finup = stm32_hash_finup,
1231 .digest = stm32_hash_digest,
1232 .export = stm32_hash_export,
1233 .import = stm32_hash_import,
1234 .setkey = stm32_hash_setkey,
1235 .halg = {
1236 .digestsize = MD5_DIGEST_SIZE,
1237 .statesize = sizeof(struct stm32_hash_state),
1238 .base = {
1239 .cra_name = "hmac(md5)",
1240 .cra_driver_name = "stm32-hmac-md5",
1241 .cra_priority = 200,
1242 .cra_flags = CRYPTO_ALG_ASYNC |
1243 CRYPTO_ALG_KERN_DRIVER_ONLY,
1244 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1245 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1246 .cra_alignmask = 3,
1247 .cra_init = stm32_hash_cra_md5_init,
1248 .cra_exit = stm32_hash_cra_exit,
1249 .cra_module = THIS_MODULE,
1250 }
1251 }
1252 },
1253};
1254
1255static struct ahash_alg algs_sha1[] = {
1256 {
1257 .init = stm32_hash_init,
1258 .update = stm32_hash_update,
1259 .final = stm32_hash_final,
1260 .finup = stm32_hash_finup,
1261 .digest = stm32_hash_digest,
1262 .export = stm32_hash_export,
1263 .import = stm32_hash_import,
1264 .halg = {
1265 .digestsize = SHA1_DIGEST_SIZE,
1266 .statesize = sizeof(struct stm32_hash_state),
1267 .base = {
1268 .cra_name = "sha1",
1269 .cra_driver_name = "stm32-sha1",
1270 .cra_priority = 200,
1271 .cra_flags = CRYPTO_ALG_ASYNC |
1272 CRYPTO_ALG_KERN_DRIVER_ONLY,
1273 .cra_blocksize = SHA1_BLOCK_SIZE,
1274 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1275 .cra_alignmask = 3,
1276 .cra_init = stm32_hash_cra_init,
1277 .cra_exit = stm32_hash_cra_exit,
1278 .cra_module = THIS_MODULE,
1279 }
1280 }
1281 },
1282 {
1283 .init = stm32_hash_init,
1284 .update = stm32_hash_update,
1285 .final = stm32_hash_final,
1286 .finup = stm32_hash_finup,
1287 .digest = stm32_hash_digest,
1288 .export = stm32_hash_export,
1289 .import = stm32_hash_import,
1290 .setkey = stm32_hash_setkey,
1291 .halg = {
1292 .digestsize = SHA1_DIGEST_SIZE,
1293 .statesize = sizeof(struct stm32_hash_state),
1294 .base = {
1295 .cra_name = "hmac(sha1)",
1296 .cra_driver_name = "stm32-hmac-sha1",
1297 .cra_priority = 200,
1298 .cra_flags = CRYPTO_ALG_ASYNC |
1299 CRYPTO_ALG_KERN_DRIVER_ONLY,
1300 .cra_blocksize = SHA1_BLOCK_SIZE,
1301 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1302 .cra_alignmask = 3,
1303 .cra_init = stm32_hash_cra_sha1_init,
1304 .cra_exit = stm32_hash_cra_exit,
1305 .cra_module = THIS_MODULE,
1306 }
1307 }
1308 },
1309};
1310
1311static struct ahash_alg algs_sha224[] = {
1312 {
1313 .init = stm32_hash_init,
1314 .update = stm32_hash_update,
1315 .final = stm32_hash_final,
1316 .finup = stm32_hash_finup,
1317 .digest = stm32_hash_digest,
1318 .export = stm32_hash_export,
1319 .import = stm32_hash_import,
1320 .halg = {
1321 .digestsize = SHA224_DIGEST_SIZE,
1322 .statesize = sizeof(struct stm32_hash_state),
1323 .base = {
1324 .cra_name = "sha224",
1325 .cra_driver_name = "stm32-sha224",
1326 .cra_priority = 200,
1327 .cra_flags = CRYPTO_ALG_ASYNC |
1328 CRYPTO_ALG_KERN_DRIVER_ONLY,
1329 .cra_blocksize = SHA224_BLOCK_SIZE,
1330 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1331 .cra_alignmask = 3,
1332 .cra_init = stm32_hash_cra_init,
1333 .cra_exit = stm32_hash_cra_exit,
1334 .cra_module = THIS_MODULE,
1335 }
1336 }
1337 },
1338 {
1339 .init = stm32_hash_init,
1340 .update = stm32_hash_update,
1341 .final = stm32_hash_final,
1342 .finup = stm32_hash_finup,
1343 .digest = stm32_hash_digest,
1344 .setkey = stm32_hash_setkey,
1345 .export = stm32_hash_export,
1346 .import = stm32_hash_import,
1347 .halg = {
1348 .digestsize = SHA224_DIGEST_SIZE,
1349 .statesize = sizeof(struct stm32_hash_state),
1350 .base = {
1351 .cra_name = "hmac(sha224)",
1352 .cra_driver_name = "stm32-hmac-sha224",
1353 .cra_priority = 200,
1354 .cra_flags = CRYPTO_ALG_ASYNC |
1355 CRYPTO_ALG_KERN_DRIVER_ONLY,
1356 .cra_blocksize = SHA224_BLOCK_SIZE,
1357 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1358 .cra_alignmask = 3,
1359 .cra_init = stm32_hash_cra_sha224_init,
1360 .cra_exit = stm32_hash_cra_exit,
1361 .cra_module = THIS_MODULE,
1362 }
1363 }
1364 },
1365};
1366
1367static struct ahash_alg algs_sha256[] = {
1368 {
1369 .init = stm32_hash_init,
1370 .update = stm32_hash_update,
1371 .final = stm32_hash_final,
1372 .finup = stm32_hash_finup,
1373 .digest = stm32_hash_digest,
1374 .export = stm32_hash_export,
1375 .import = stm32_hash_import,
1376 .halg = {
1377 .digestsize = SHA256_DIGEST_SIZE,
1378 .statesize = sizeof(struct stm32_hash_state),
1379 .base = {
1380 .cra_name = "sha256",
1381 .cra_driver_name = "stm32-sha256",
1382 .cra_priority = 200,
1383 .cra_flags = CRYPTO_ALG_ASYNC |
1384 CRYPTO_ALG_KERN_DRIVER_ONLY,
1385 .cra_blocksize = SHA256_BLOCK_SIZE,
1386 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1387 .cra_alignmask = 3,
1388 .cra_init = stm32_hash_cra_init,
1389 .cra_exit = stm32_hash_cra_exit,
1390 .cra_module = THIS_MODULE,
1391 }
1392 }
1393 },
1394 {
1395 .init = stm32_hash_init,
1396 .update = stm32_hash_update,
1397 .final = stm32_hash_final,
1398 .finup = stm32_hash_finup,
1399 .digest = stm32_hash_digest,
1400 .export = stm32_hash_export,
1401 .import = stm32_hash_import,
1402 .setkey = stm32_hash_setkey,
1403 .halg = {
1404 .digestsize = SHA256_DIGEST_SIZE,
1405 .statesize = sizeof(struct stm32_hash_state),
1406 .base = {
1407 .cra_name = "hmac(sha256)",
1408 .cra_driver_name = "stm32-hmac-sha256",
1409 .cra_priority = 200,
1410 .cra_flags = CRYPTO_ALG_ASYNC |
1411 CRYPTO_ALG_KERN_DRIVER_ONLY,
1412 .cra_blocksize = SHA256_BLOCK_SIZE,
1413 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1414 .cra_alignmask = 3,
1415 .cra_init = stm32_hash_cra_sha256_init,
1416 .cra_exit = stm32_hash_cra_exit,
1417 .cra_module = THIS_MODULE,
1418 }
1419 }
1420 },
1421};
1422
1423static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1424{
1425 unsigned int i, j;
1426 int err;
1427
1428 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1429 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1430 err = crypto_register_ahash(
1431 &hdev->pdata->algs_info[i].algs_list[j]);
1432 if (err)
1433 goto err_algs;
1434 }
1435 }
1436
1437 return 0;
1438err_algs:
1439 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1440 for (; i--; ) {
1441 for (; j--;)
1442 crypto_unregister_ahash(
1443 &hdev->pdata->algs_info[i].algs_list[j]);
1444 }
1445
1446 return err;
1447}
1448
1449static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1450{
1451 unsigned int i, j;
1452
1453 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1454 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1455 crypto_unregister_ahash(
1456 &hdev->pdata->algs_info[i].algs_list[j]);
1457 }
1458
1459 return 0;
1460}
1461
1462static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
1463 {
1464 .algs_list = algs_sha1,
1465 .size = ARRAY_SIZE(algs_sha1),
1466 },
1467 {
1468 .algs_list = algs_sha256,
1469 .size = ARRAY_SIZE(algs_sha256),
1470 },
1471};
1472
1473static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
1474 .algs_info = stm32_hash_algs_info_ux500,
1475 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
1476 .broken_emptymsg = true,
1477 .ux500 = true,
1478};
1479
1480static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1481 {
1482 .algs_list = algs_md5,
1483 .size = ARRAY_SIZE(algs_md5),
1484 },
1485 {
1486 .algs_list = algs_sha1,
1487 .size = ARRAY_SIZE(algs_sha1),
1488 },
1489};
1490
1491static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1492 .algs_info = stm32_hash_algs_info_stm32f4,
1493 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1494 .has_sr = true,
1495 .has_mdmat = true,
1496};
1497
1498static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1499 {
1500 .algs_list = algs_md5,
1501 .size = ARRAY_SIZE(algs_md5),
1502 },
1503 {
1504 .algs_list = algs_sha1,
1505 .size = ARRAY_SIZE(algs_sha1),
1506 },
1507 {
1508 .algs_list = algs_sha224,
1509 .size = ARRAY_SIZE(algs_sha224),
1510 },
1511 {
1512 .algs_list = algs_sha256,
1513 .size = ARRAY_SIZE(algs_sha256),
1514 },
1515};
1516
1517static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1518 .algs_info = stm32_hash_algs_info_stm32f7,
1519 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1520 .has_sr = true,
1521 .has_mdmat = true,
1522};
1523
1524static const struct of_device_id stm32_hash_of_match[] = {
1525 {
1526 .compatible = "stericsson,ux500-hash",
1527 .data = &stm32_hash_pdata_ux500,
1528 },
1529 {
1530 .compatible = "st,stm32f456-hash",
1531 .data = &stm32_hash_pdata_stm32f4,
1532 },
1533 {
1534 .compatible = "st,stm32f756-hash",
1535 .data = &stm32_hash_pdata_stm32f7,
1536 },
1537 {},
1538};
1539
1540MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1541
1542static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1543 struct device *dev)
1544{
1545 hdev->pdata = of_device_get_match_data(dev);
1546 if (!hdev->pdata) {
1547 dev_err(dev, "no compatible OF match\n");
1548 return -EINVAL;
1549 }
1550
1551 if (of_property_read_u32(dev->of_node, "dma-maxburst",
1552 &hdev->dma_maxburst)) {
1553 dev_info(dev, "dma-maxburst not specified, using 0\n");
1554 hdev->dma_maxburst = 0;
1555 }
1556
1557 return 0;
1558}
1559
1560static int stm32_hash_probe(struct platform_device *pdev)
1561{
1562 struct stm32_hash_dev *hdev;
1563 struct device *dev = &pdev->dev;
1564 struct resource *res;
1565 int ret, irq;
1566
1567 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1568 if (!hdev)
1569 return -ENOMEM;
1570
1571 hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1572 if (IS_ERR(hdev->io_base))
1573 return PTR_ERR(hdev->io_base);
1574
1575 hdev->phys_base = res->start;
1576
1577 ret = stm32_hash_get_of_match(hdev, dev);
1578 if (ret)
1579 return ret;
1580
1581 irq = platform_get_irq_optional(pdev, 0);
1582 if (irq < 0 && irq != -ENXIO)
1583 return irq;
1584
1585 if (irq > 0) {
1586 ret = devm_request_threaded_irq(dev, irq,
1587 stm32_hash_irq_handler,
1588 stm32_hash_irq_thread,
1589 IRQF_ONESHOT,
1590 dev_name(dev), hdev);
1591 if (ret) {
1592 dev_err(dev, "Cannot grab IRQ\n");
1593 return ret;
1594 }
1595 } else {
1596 dev_info(dev, "No IRQ, use polling mode\n");
1597 hdev->polled = true;
1598 }
1599
1600 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1601 if (IS_ERR(hdev->clk))
1602 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1603 "failed to get clock for hash\n");
1604
1605 ret = clk_prepare_enable(hdev->clk);
1606 if (ret) {
1607 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1608 return ret;
1609 }
1610
1611 pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1612 pm_runtime_use_autosuspend(dev);
1613
1614 pm_runtime_get_noresume(dev);
1615 pm_runtime_set_active(dev);
1616 pm_runtime_enable(dev);
1617
1618 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1619 if (IS_ERR(hdev->rst)) {
1620 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1621 ret = -EPROBE_DEFER;
1622 goto err_reset;
1623 }
1624 } else {
1625 reset_control_assert(hdev->rst);
1626 udelay(2);
1627 reset_control_deassert(hdev->rst);
1628 }
1629
1630 hdev->dev = dev;
1631
1632 platform_set_drvdata(pdev, hdev);
1633
1634 ret = stm32_hash_dma_init(hdev);
1635 switch (ret) {
1636 case 0:
1637 break;
1638 case -ENOENT:
1639 case -ENODEV:
1640 dev_info(dev, "DMA mode not available\n");
1641 break;
1642 default:
1643 dev_err(dev, "DMA init error %d\n", ret);
1644 goto err_dma;
1645 }
1646
1647 spin_lock(&stm32_hash.lock);
1648 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1649 spin_unlock(&stm32_hash.lock);
1650
1651 /* Initialize crypto engine */
1652 hdev->engine = crypto_engine_alloc_init(dev, 1);
1653 if (!hdev->engine) {
1654 ret = -ENOMEM;
1655 goto err_engine;
1656 }
1657
1658 ret = crypto_engine_start(hdev->engine);
1659 if (ret)
1660 goto err_engine_start;
1661
1662 if (hdev->pdata->ux500)
1663 /* FIXME: implement DMA mode for Ux500 */
1664 hdev->dma_mode = 0;
1665 else
1666 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1667
1668 /* Register algos */
1669 ret = stm32_hash_register_algs(hdev);
1670 if (ret)
1671 goto err_algs;
1672
1673 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1674 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1675
1676 pm_runtime_put_sync(dev);
1677
1678 return 0;
1679
1680err_algs:
1681err_engine_start:
1682 crypto_engine_exit(hdev->engine);
1683err_engine:
1684 spin_lock(&stm32_hash.lock);
1685 list_del(&hdev->list);
1686 spin_unlock(&stm32_hash.lock);
1687err_dma:
1688 if (hdev->dma_lch)
1689 dma_release_channel(hdev->dma_lch);
1690err_reset:
1691 pm_runtime_disable(dev);
1692 pm_runtime_put_noidle(dev);
1693
1694 clk_disable_unprepare(hdev->clk);
1695
1696 return ret;
1697}
1698
1699static int stm32_hash_remove(struct platform_device *pdev)
1700{
1701 struct stm32_hash_dev *hdev;
1702 int ret;
1703
1704 hdev = platform_get_drvdata(pdev);
1705 if (!hdev)
1706 return -ENODEV;
1707
1708 ret = pm_runtime_resume_and_get(hdev->dev);
1709 if (ret < 0)
1710 return ret;
1711
1712 stm32_hash_unregister_algs(hdev);
1713
1714 crypto_engine_exit(hdev->engine);
1715
1716 spin_lock(&stm32_hash.lock);
1717 list_del(&hdev->list);
1718 spin_unlock(&stm32_hash.lock);
1719
1720 if (hdev->dma_lch)
1721 dma_release_channel(hdev->dma_lch);
1722
1723 pm_runtime_disable(hdev->dev);
1724 pm_runtime_put_noidle(hdev->dev);
1725
1726 clk_disable_unprepare(hdev->clk);
1727
1728 return 0;
1729}
1730
1731#ifdef CONFIG_PM
1732static int stm32_hash_runtime_suspend(struct device *dev)
1733{
1734 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1735
1736 clk_disable_unprepare(hdev->clk);
1737
1738 return 0;
1739}
1740
1741static int stm32_hash_runtime_resume(struct device *dev)
1742{
1743 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1744 int ret;
1745
1746 ret = clk_prepare_enable(hdev->clk);
1747 if (ret) {
1748 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1749 return ret;
1750 }
1751
1752 return 0;
1753}
1754#endif
1755
1756static const struct dev_pm_ops stm32_hash_pm_ops = {
1757 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1758 pm_runtime_force_resume)
1759 SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1760 stm32_hash_runtime_resume, NULL)
1761};
1762
1763static struct platform_driver stm32_hash_driver = {
1764 .probe = stm32_hash_probe,
1765 .remove = stm32_hash_remove,
1766 .driver = {
1767 .name = "stm32-hash",
1768 .pm = &stm32_hash_pm_ops,
1769 .of_match_table = stm32_hash_of_match,
1770 }
1771};
1772
1773module_platform_driver(stm32_hash_driver);
1774
1775MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1776MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1777MODULE_LICENSE("GPL v2");