Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
4 *
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/kernel.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/stmp_device.h>
17#include <linux/clk.h>
18#include <soc/fsl/dcp.h>
19
20#include <crypto/aes.h>
21#include <crypto/sha1.h>
22#include <crypto/sha2.h>
23#include <crypto/internal/hash.h>
24#include <crypto/internal/skcipher.h>
25#include <crypto/scatterwalk.h>
26
27#define DCP_MAX_CHANS 4
28#define DCP_BUF_SZ PAGE_SIZE
29#define DCP_SHA_PAY_SZ 64
30
31#define DCP_ALIGNMENT 64
32
33/*
34 * Null hashes to align with hw behavior on imx6sl and ull
35 * these are flipped for consistency with hw output
36 */
37static const uint8_t sha1_null_hash[] =
38 "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
39 "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
40
41static const uint8_t sha256_null_hash[] =
42 "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
43 "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
44 "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
45 "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
46
47/* DCP DMA descriptor. */
48struct dcp_dma_desc {
49 uint32_t next_cmd_addr;
50 uint32_t control0;
51 uint32_t control1;
52 uint32_t source;
53 uint32_t destination;
54 uint32_t size;
55 uint32_t payload;
56 uint32_t status;
57};
58
59/* Coherent aligned block for bounce buffering. */
60struct dcp_coherent_block {
61 uint8_t aes_in_buf[DCP_BUF_SZ];
62 uint8_t aes_out_buf[DCP_BUF_SZ];
63 uint8_t sha_in_buf[DCP_BUF_SZ];
64 uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
65
66 uint8_t aes_key[2 * AES_KEYSIZE_128];
67
68 struct dcp_dma_desc desc[DCP_MAX_CHANS];
69};
70
71struct dcp {
72 struct device *dev;
73 void __iomem *base;
74
75 uint32_t caps;
76
77 struct dcp_coherent_block *coh;
78
79 struct completion completion[DCP_MAX_CHANS];
80 spinlock_t lock[DCP_MAX_CHANS];
81 struct task_struct *thread[DCP_MAX_CHANS];
82 struct crypto_queue queue[DCP_MAX_CHANS];
83 struct clk *dcp_clk;
84};
85
86enum dcp_chan {
87 DCP_CHAN_HASH_SHA = 0,
88 DCP_CHAN_CRYPTO = 2,
89};
90
91struct dcp_async_ctx {
92 /* Common context */
93 enum dcp_chan chan;
94 uint32_t fill;
95
96 /* SHA Hash-specific context */
97 struct mutex mutex;
98 uint32_t alg;
99 unsigned int hot:1;
100
101 /* Crypto-specific context */
102 struct crypto_skcipher *fallback;
103 unsigned int key_len;
104 uint8_t key[AES_KEYSIZE_128];
105 bool key_referenced;
106};
107
108struct dcp_aes_req_ctx {
109 unsigned int enc:1;
110 unsigned int ecb:1;
111 struct skcipher_request fallback_req; // keep at the end
112};
113
114struct dcp_sha_req_ctx {
115 unsigned int init:1;
116 unsigned int fini:1;
117};
118
119struct dcp_export_state {
120 struct dcp_sha_req_ctx req_ctx;
121 struct dcp_async_ctx async_ctx;
122};
123
124/*
125 * There can even be only one instance of the MXS DCP due to the
126 * design of Linux Crypto API.
127 */
128static struct dcp *global_sdcp;
129
130/* DCP register layout. */
131#define MXS_DCP_CTRL 0x00
132#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES (1 << 23)
133#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING (1 << 22)
134
135#define MXS_DCP_STAT 0x10
136#define MXS_DCP_STAT_CLR 0x18
137#define MXS_DCP_STAT_IRQ_MASK 0xf
138
139#define MXS_DCP_CHANNELCTRL 0x20
140#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK 0xff
141
142#define MXS_DCP_CAPABILITY1 0x40
143#define MXS_DCP_CAPABILITY1_SHA256 (4 << 16)
144#define MXS_DCP_CAPABILITY1_SHA1 (1 << 16)
145#define MXS_DCP_CAPABILITY1_AES128 (1 << 0)
146
147#define MXS_DCP_CONTEXT 0x50
148
149#define MXS_DCP_CH_N_CMDPTR(n) (0x100 + ((n) * 0x40))
150
151#define MXS_DCP_CH_N_SEMA(n) (0x110 + ((n) * 0x40))
152
153#define MXS_DCP_CH_N_STAT(n) (0x120 + ((n) * 0x40))
154#define MXS_DCP_CH_N_STAT_CLR(n) (0x128 + ((n) * 0x40))
155
156/* DMA descriptor bits. */
157#define MXS_DCP_CONTROL0_HASH_TERM (1 << 13)
158#define MXS_DCP_CONTROL0_HASH_INIT (1 << 12)
159#define MXS_DCP_CONTROL0_PAYLOAD_KEY (1 << 11)
160#define MXS_DCP_CONTROL0_OTP_KEY (1 << 10)
161#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT (1 << 8)
162#define MXS_DCP_CONTROL0_CIPHER_INIT (1 << 9)
163#define MXS_DCP_CONTROL0_ENABLE_HASH (1 << 6)
164#define MXS_DCP_CONTROL0_ENABLE_CIPHER (1 << 5)
165#define MXS_DCP_CONTROL0_DECR_SEMAPHORE (1 << 1)
166#define MXS_DCP_CONTROL0_INTERRUPT (1 << 0)
167
168#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256 (2 << 16)
169#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1 (0 << 16)
170#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC (1 << 4)
171#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB (0 << 4)
172#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128 (0 << 0)
173
174#define MXS_DCP_CONTROL1_KEY_SELECT_SHIFT 8
175
176static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
177{
178 int dma_err;
179 struct dcp *sdcp = global_sdcp;
180 const int chan = actx->chan;
181 uint32_t stat;
182 unsigned long ret;
183 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
184 dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
185 DMA_TO_DEVICE);
186
187 dma_err = dma_mapping_error(sdcp->dev, desc_phys);
188 if (dma_err)
189 return dma_err;
190
191 reinit_completion(&sdcp->completion[chan]);
192
193 /* Clear status register. */
194 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
195
196 /* Load the DMA descriptor. */
197 writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
198
199 /* Increment the semaphore to start the DMA transfer. */
200 writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
201
202 ret = wait_for_completion_timeout(&sdcp->completion[chan],
203 msecs_to_jiffies(1000));
204 if (!ret) {
205 dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
206 chan, readl(sdcp->base + MXS_DCP_STAT));
207 return -ETIMEDOUT;
208 }
209
210 stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
211 if (stat & 0xff) {
212 dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
213 chan, stat);
214 return -EINVAL;
215 }
216
217 dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
218
219 return 0;
220}
221
222/*
223 * Encryption (AES128)
224 */
225static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
226 struct skcipher_request *req, int init)
227{
228 dma_addr_t key_phys, src_phys, dst_phys;
229 struct dcp *sdcp = global_sdcp;
230 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
231 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
232 bool key_referenced = actx->key_referenced;
233 int ret;
234
235 if (!key_referenced) {
236 key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
237 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
238 ret = dma_mapping_error(sdcp->dev, key_phys);
239 if (ret)
240 return ret;
241 }
242
243 src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
244 DCP_BUF_SZ, DMA_TO_DEVICE);
245 ret = dma_mapping_error(sdcp->dev, src_phys);
246 if (ret)
247 goto err_src;
248
249 dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
250 DCP_BUF_SZ, DMA_FROM_DEVICE);
251 ret = dma_mapping_error(sdcp->dev, dst_phys);
252 if (ret)
253 goto err_dst;
254
255 if (actx->fill % AES_BLOCK_SIZE) {
256 dev_err(sdcp->dev, "Invalid block size!\n");
257 ret = -EINVAL;
258 goto aes_done_run;
259 }
260
261 /* Fill in the DMA descriptor. */
262 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
263 MXS_DCP_CONTROL0_INTERRUPT |
264 MXS_DCP_CONTROL0_ENABLE_CIPHER;
265
266 if (key_referenced)
267 /* Set OTP key bit to select the key via KEY_SELECT. */
268 desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY;
269 else
270 /* Payload contains the key. */
271 desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
272
273 if (rctx->enc)
274 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
275 if (init)
276 desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
277
278 desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
279
280 if (rctx->ecb)
281 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
282 else
283 desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
284
285 if (key_referenced)
286 desc->control1 |= sdcp->coh->aes_key[0] << MXS_DCP_CONTROL1_KEY_SELECT_SHIFT;
287
288 desc->next_cmd_addr = 0;
289 desc->source = src_phys;
290 desc->destination = dst_phys;
291 desc->size = actx->fill;
292 desc->payload = key_phys;
293 desc->status = 0;
294
295 ret = mxs_dcp_start_dma(actx);
296
297aes_done_run:
298 dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
299err_dst:
300 dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
301err_src:
302 if (!key_referenced)
303 dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
304 DMA_TO_DEVICE);
305 return ret;
306}
307
308static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
309{
310 struct dcp *sdcp = global_sdcp;
311
312 struct skcipher_request *req = skcipher_request_cast(arq);
313 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
314 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
315
316 struct scatterlist *dst = req->dst;
317 struct scatterlist *src = req->src;
318 int dst_nents = sg_nents(dst);
319
320 const int out_off = DCP_BUF_SZ;
321 uint8_t *in_buf = sdcp->coh->aes_in_buf;
322 uint8_t *out_buf = sdcp->coh->aes_out_buf;
323
324 uint32_t dst_off = 0;
325 uint8_t *src_buf = NULL;
326 uint32_t last_out_len = 0;
327
328 uint8_t *key = sdcp->coh->aes_key;
329
330 int ret = 0;
331 unsigned int i, len, clen, tlen = 0;
332 int init = 0;
333 bool limit_hit = false;
334
335 actx->fill = 0;
336
337 /* Copy the key from the temporary location. */
338 memcpy(key, actx->key, actx->key_len);
339
340 if (!rctx->ecb) {
341 /* Copy the CBC IV just past the key. */
342 memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
343 /* CBC needs the INIT set. */
344 init = 1;
345 } else {
346 memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
347 }
348
349 for_each_sg(req->src, src, sg_nents(req->src), i) {
350 src_buf = sg_virt(src);
351 len = sg_dma_len(src);
352 tlen += len;
353 limit_hit = tlen > req->cryptlen;
354
355 if (limit_hit)
356 len = req->cryptlen - (tlen - len);
357
358 do {
359 if (actx->fill + len > out_off)
360 clen = out_off - actx->fill;
361 else
362 clen = len;
363
364 memcpy(in_buf + actx->fill, src_buf, clen);
365 len -= clen;
366 src_buf += clen;
367 actx->fill += clen;
368
369 /*
370 * If we filled the buffer or this is the last SG,
371 * submit the buffer.
372 */
373 if (actx->fill == out_off || sg_is_last(src) ||
374 limit_hit) {
375 ret = mxs_dcp_run_aes(actx, req, init);
376 if (ret)
377 return ret;
378 init = 0;
379
380 sg_pcopy_from_buffer(dst, dst_nents, out_buf,
381 actx->fill, dst_off);
382 dst_off += actx->fill;
383 last_out_len = actx->fill;
384 actx->fill = 0;
385 }
386 } while (len);
387
388 if (limit_hit)
389 break;
390 }
391
392 /* Copy the IV for CBC for chaining */
393 if (!rctx->ecb) {
394 if (rctx->enc)
395 memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
396 AES_BLOCK_SIZE);
397 else
398 memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
399 AES_BLOCK_SIZE);
400 }
401
402 return ret;
403}
404
405static int dcp_chan_thread_aes(void *data)
406{
407 struct dcp *sdcp = global_sdcp;
408 const int chan = DCP_CHAN_CRYPTO;
409
410 struct crypto_async_request *backlog;
411 struct crypto_async_request *arq;
412
413 int ret;
414
415 while (!kthread_should_stop()) {
416 set_current_state(TASK_INTERRUPTIBLE);
417
418 spin_lock(&sdcp->lock[chan]);
419 backlog = crypto_get_backlog(&sdcp->queue[chan]);
420 arq = crypto_dequeue_request(&sdcp->queue[chan]);
421 spin_unlock(&sdcp->lock[chan]);
422
423 if (!backlog && !arq) {
424 schedule();
425 continue;
426 }
427
428 set_current_state(TASK_RUNNING);
429
430 if (backlog)
431 crypto_request_complete(backlog, -EINPROGRESS);
432
433 if (arq) {
434 ret = mxs_dcp_aes_block_crypt(arq);
435 crypto_request_complete(arq, ret);
436 }
437 }
438
439 return 0;
440}
441
442static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
443{
444 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
445 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
446 struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
447 int ret;
448
449 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
450 skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
451 req->base.complete, req->base.data);
452 skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
453 req->cryptlen, req->iv);
454
455 if (enc)
456 ret = crypto_skcipher_encrypt(&rctx->fallback_req);
457 else
458 ret = crypto_skcipher_decrypt(&rctx->fallback_req);
459
460 return ret;
461}
462
463static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
464{
465 struct dcp *sdcp = global_sdcp;
466 struct crypto_async_request *arq = &req->base;
467 struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
468 struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
469 int ret;
470
471 if (unlikely(actx->key_len != AES_KEYSIZE_128 && !actx->key_referenced))
472 return mxs_dcp_block_fallback(req, enc);
473
474 rctx->enc = enc;
475 rctx->ecb = ecb;
476 actx->chan = DCP_CHAN_CRYPTO;
477
478 spin_lock(&sdcp->lock[actx->chan]);
479 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
480 spin_unlock(&sdcp->lock[actx->chan]);
481
482 wake_up_process(sdcp->thread[actx->chan]);
483
484 return ret;
485}
486
487static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
488{
489 return mxs_dcp_aes_enqueue(req, 0, 1);
490}
491
492static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
493{
494 return mxs_dcp_aes_enqueue(req, 1, 1);
495}
496
497static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
498{
499 return mxs_dcp_aes_enqueue(req, 0, 0);
500}
501
502static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
503{
504 return mxs_dcp_aes_enqueue(req, 1, 0);
505}
506
507static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
508 unsigned int len)
509{
510 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
511
512 /*
513 * AES 128 is supposed by the hardware, store key into temporary
514 * buffer and exit. We must use the temporary buffer here, since
515 * there can still be an operation in progress.
516 */
517 actx->key_len = len;
518 actx->key_referenced = false;
519 if (len == AES_KEYSIZE_128) {
520 memcpy(actx->key, key, len);
521 return 0;
522 }
523
524 /*
525 * If the requested AES key size is not supported by the hardware,
526 * but is supported by in-kernel software implementation, we use
527 * software fallback.
528 */
529 crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
530 crypto_skcipher_set_flags(actx->fallback,
531 tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
532 return crypto_skcipher_setkey(actx->fallback, key, len);
533}
534
535static int mxs_dcp_aes_setrefkey(struct crypto_skcipher *tfm, const u8 *key,
536 unsigned int len)
537{
538 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
539
540 if (len != DCP_PAES_KEYSIZE)
541 return -EINVAL;
542
543 switch (key[0]) {
544 case DCP_PAES_KEY_SLOT0:
545 case DCP_PAES_KEY_SLOT1:
546 case DCP_PAES_KEY_SLOT2:
547 case DCP_PAES_KEY_SLOT3:
548 case DCP_PAES_KEY_UNIQUE:
549 case DCP_PAES_KEY_OTP:
550 memcpy(actx->key, key, len);
551 actx->key_len = len;
552 actx->key_referenced = true;
553 break;
554 default:
555 return -EINVAL;
556 }
557
558 return 0;
559}
560
561static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
562{
563 const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
564 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
565 struct crypto_skcipher *blk;
566
567 blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
568 if (IS_ERR(blk))
569 return PTR_ERR(blk);
570
571 actx->fallback = blk;
572 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
573 crypto_skcipher_reqsize(blk));
574 return 0;
575}
576
577static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
578{
579 struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
580
581 crypto_free_skcipher(actx->fallback);
582}
583
584static int mxs_dcp_paes_init_tfm(struct crypto_skcipher *tfm)
585{
586 crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
587
588 return 0;
589}
590
591/*
592 * Hashing (SHA1/SHA256)
593 */
594static int mxs_dcp_run_sha(struct ahash_request *req)
595{
596 struct dcp *sdcp = global_sdcp;
597 int ret;
598
599 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
600 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
601 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
602 struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
603
604 dma_addr_t digest_phys = 0;
605 dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
606 DCP_BUF_SZ, DMA_TO_DEVICE);
607
608 ret = dma_mapping_error(sdcp->dev, buf_phys);
609 if (ret)
610 return ret;
611
612 /* Fill in the DMA descriptor. */
613 desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
614 MXS_DCP_CONTROL0_INTERRUPT |
615 MXS_DCP_CONTROL0_ENABLE_HASH;
616 if (rctx->init)
617 desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
618
619 desc->control1 = actx->alg;
620 desc->next_cmd_addr = 0;
621 desc->source = buf_phys;
622 desc->destination = 0;
623 desc->size = actx->fill;
624 desc->payload = 0;
625 desc->status = 0;
626
627 /*
628 * Align driver with hw behavior when generating null hashes
629 */
630 if (rctx->init && rctx->fini && desc->size == 0) {
631 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
632 const uint8_t *sha_buf =
633 (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
634 sha1_null_hash : sha256_null_hash;
635 memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
636 ret = 0;
637 goto done_run;
638 }
639
640 /* Set HASH_TERM bit for last transfer block. */
641 if (rctx->fini) {
642 digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
643 DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
644 ret = dma_mapping_error(sdcp->dev, digest_phys);
645 if (ret)
646 goto done_run;
647
648 desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
649 desc->payload = digest_phys;
650 }
651
652 ret = mxs_dcp_start_dma(actx);
653
654 if (rctx->fini)
655 dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
656 DMA_FROM_DEVICE);
657
658done_run:
659 dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
660
661 return ret;
662}
663
664static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
665{
666 struct dcp *sdcp = global_sdcp;
667
668 struct ahash_request *req = ahash_request_cast(arq);
669 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
670 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
671 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
672 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
673
674 uint8_t *in_buf = sdcp->coh->sha_in_buf;
675 uint8_t *out_buf = sdcp->coh->sha_out_buf;
676
677 struct scatterlist *src;
678
679 unsigned int i, len, clen, oft = 0;
680 int ret;
681
682 int fin = rctx->fini;
683 if (fin)
684 rctx->fini = 0;
685
686 src = req->src;
687 len = req->nbytes;
688
689 while (len) {
690 if (actx->fill + len > DCP_BUF_SZ)
691 clen = DCP_BUF_SZ - actx->fill;
692 else
693 clen = len;
694
695 scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
696 0);
697
698 len -= clen;
699 oft += clen;
700 actx->fill += clen;
701
702 /*
703 * If we filled the buffer and still have some
704 * more data, submit the buffer.
705 */
706 if (len && actx->fill == DCP_BUF_SZ) {
707 ret = mxs_dcp_run_sha(req);
708 if (ret)
709 return ret;
710 actx->fill = 0;
711 rctx->init = 0;
712 }
713 }
714
715 if (fin) {
716 rctx->fini = 1;
717
718 /* Submit whatever is left. */
719 if (!req->result)
720 return -EINVAL;
721
722 ret = mxs_dcp_run_sha(req);
723 if (ret)
724 return ret;
725
726 actx->fill = 0;
727
728 /* For some reason the result is flipped */
729 for (i = 0; i < halg->digestsize; i++)
730 req->result[i] = out_buf[halg->digestsize - i - 1];
731 }
732
733 return 0;
734}
735
736static int dcp_chan_thread_sha(void *data)
737{
738 struct dcp *sdcp = global_sdcp;
739 const int chan = DCP_CHAN_HASH_SHA;
740
741 struct crypto_async_request *backlog;
742 struct crypto_async_request *arq;
743 int ret;
744
745 while (!kthread_should_stop()) {
746 set_current_state(TASK_INTERRUPTIBLE);
747
748 spin_lock(&sdcp->lock[chan]);
749 backlog = crypto_get_backlog(&sdcp->queue[chan]);
750 arq = crypto_dequeue_request(&sdcp->queue[chan]);
751 spin_unlock(&sdcp->lock[chan]);
752
753 if (!backlog && !arq) {
754 schedule();
755 continue;
756 }
757
758 set_current_state(TASK_RUNNING);
759
760 if (backlog)
761 crypto_request_complete(backlog, -EINPROGRESS);
762
763 if (arq) {
764 ret = dcp_sha_req_to_buf(arq);
765 crypto_request_complete(arq, ret);
766 }
767 }
768
769 return 0;
770}
771
772static int dcp_sha_init(struct ahash_request *req)
773{
774 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
775 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
776
777 struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
778
779 /*
780 * Start hashing session. The code below only inits the
781 * hashing session context, nothing more.
782 */
783 memset(actx, 0, sizeof(*actx));
784
785 if (strcmp(halg->base.cra_name, "sha1") == 0)
786 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
787 else
788 actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
789
790 actx->fill = 0;
791 actx->hot = 0;
792 actx->chan = DCP_CHAN_HASH_SHA;
793
794 mutex_init(&actx->mutex);
795
796 return 0;
797}
798
799static int dcp_sha_update_fx(struct ahash_request *req, int fini)
800{
801 struct dcp *sdcp = global_sdcp;
802
803 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
804 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
805 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
806
807 int ret;
808
809 /*
810 * Ignore requests that have no data in them and are not
811 * the trailing requests in the stream of requests.
812 */
813 if (!req->nbytes && !fini)
814 return 0;
815
816 mutex_lock(&actx->mutex);
817
818 rctx->fini = fini;
819
820 if (!actx->hot) {
821 actx->hot = 1;
822 rctx->init = 1;
823 }
824
825 spin_lock(&sdcp->lock[actx->chan]);
826 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
827 spin_unlock(&sdcp->lock[actx->chan]);
828
829 wake_up_process(sdcp->thread[actx->chan]);
830 mutex_unlock(&actx->mutex);
831
832 return ret;
833}
834
835static int dcp_sha_update(struct ahash_request *req)
836{
837 return dcp_sha_update_fx(req, 0);
838}
839
840static int dcp_sha_final(struct ahash_request *req)
841{
842 ahash_request_set_crypt(req, NULL, req->result, 0);
843 req->nbytes = 0;
844 return dcp_sha_update_fx(req, 1);
845}
846
847static int dcp_sha_finup(struct ahash_request *req)
848{
849 return dcp_sha_update_fx(req, 1);
850}
851
852static int dcp_sha_digest(struct ahash_request *req)
853{
854 int ret;
855
856 ret = dcp_sha_init(req);
857 if (ret)
858 return ret;
859
860 return dcp_sha_finup(req);
861}
862
863static int dcp_sha_import(struct ahash_request *req, const void *in)
864{
865 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
866 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
867 struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
868 const struct dcp_export_state *export = in;
869
870 memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
871 memset(actx, 0, sizeof(struct dcp_async_ctx));
872 memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
873 memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
874
875 return 0;
876}
877
878static int dcp_sha_export(struct ahash_request *req, void *out)
879{
880 struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
881 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
882 struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
883 struct dcp_export_state *export = out;
884
885 memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
886 memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
887
888 return 0;
889}
890
891static int dcp_sha_cra_init(struct crypto_tfm *tfm)
892{
893 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
894 sizeof(struct dcp_sha_req_ctx));
895 return 0;
896}
897
898static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
899{
900}
901
902/* AES 128 ECB and AES 128 CBC */
903static struct skcipher_alg dcp_aes_algs[] = {
904 {
905 .base.cra_name = "ecb(aes)",
906 .base.cra_driver_name = "ecb-aes-dcp",
907 .base.cra_priority = 400,
908 .base.cra_alignmask = 15,
909 .base.cra_flags = CRYPTO_ALG_ASYNC |
910 CRYPTO_ALG_NEED_FALLBACK,
911 .base.cra_blocksize = AES_BLOCK_SIZE,
912 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
913 .base.cra_module = THIS_MODULE,
914
915 .min_keysize = AES_MIN_KEY_SIZE,
916 .max_keysize = AES_MAX_KEY_SIZE,
917 .setkey = mxs_dcp_aes_setkey,
918 .encrypt = mxs_dcp_aes_ecb_encrypt,
919 .decrypt = mxs_dcp_aes_ecb_decrypt,
920 .init = mxs_dcp_aes_fallback_init_tfm,
921 .exit = mxs_dcp_aes_fallback_exit_tfm,
922 }, {
923 .base.cra_name = "cbc(aes)",
924 .base.cra_driver_name = "cbc-aes-dcp",
925 .base.cra_priority = 400,
926 .base.cra_alignmask = 15,
927 .base.cra_flags = CRYPTO_ALG_ASYNC |
928 CRYPTO_ALG_NEED_FALLBACK,
929 .base.cra_blocksize = AES_BLOCK_SIZE,
930 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
931 .base.cra_module = THIS_MODULE,
932
933 .min_keysize = AES_MIN_KEY_SIZE,
934 .max_keysize = AES_MAX_KEY_SIZE,
935 .setkey = mxs_dcp_aes_setkey,
936 .encrypt = mxs_dcp_aes_cbc_encrypt,
937 .decrypt = mxs_dcp_aes_cbc_decrypt,
938 .ivsize = AES_BLOCK_SIZE,
939 .init = mxs_dcp_aes_fallback_init_tfm,
940 .exit = mxs_dcp_aes_fallback_exit_tfm,
941 }, {
942 .base.cra_name = "ecb(paes)",
943 .base.cra_driver_name = "ecb-paes-dcp",
944 .base.cra_priority = 401,
945 .base.cra_alignmask = 15,
946 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
947 .base.cra_blocksize = AES_BLOCK_SIZE,
948 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
949 .base.cra_module = THIS_MODULE,
950
951 .min_keysize = DCP_PAES_KEYSIZE,
952 .max_keysize = DCP_PAES_KEYSIZE,
953 .setkey = mxs_dcp_aes_setrefkey,
954 .encrypt = mxs_dcp_aes_ecb_encrypt,
955 .decrypt = mxs_dcp_aes_ecb_decrypt,
956 .init = mxs_dcp_paes_init_tfm,
957 }, {
958 .base.cra_name = "cbc(paes)",
959 .base.cra_driver_name = "cbc-paes-dcp",
960 .base.cra_priority = 401,
961 .base.cra_alignmask = 15,
962 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_INTERNAL,
963 .base.cra_blocksize = AES_BLOCK_SIZE,
964 .base.cra_ctxsize = sizeof(struct dcp_async_ctx),
965 .base.cra_module = THIS_MODULE,
966
967 .min_keysize = DCP_PAES_KEYSIZE,
968 .max_keysize = DCP_PAES_KEYSIZE,
969 .setkey = mxs_dcp_aes_setrefkey,
970 .encrypt = mxs_dcp_aes_cbc_encrypt,
971 .decrypt = mxs_dcp_aes_cbc_decrypt,
972 .ivsize = AES_BLOCK_SIZE,
973 .init = mxs_dcp_paes_init_tfm,
974 },
975};
976
977/* SHA1 */
978static struct ahash_alg dcp_sha1_alg = {
979 .init = dcp_sha_init,
980 .update = dcp_sha_update,
981 .final = dcp_sha_final,
982 .finup = dcp_sha_finup,
983 .digest = dcp_sha_digest,
984 .import = dcp_sha_import,
985 .export = dcp_sha_export,
986 .halg = {
987 .digestsize = SHA1_DIGEST_SIZE,
988 .statesize = sizeof(struct dcp_export_state),
989 .base = {
990 .cra_name = "sha1",
991 .cra_driver_name = "sha1-dcp",
992 .cra_priority = 400,
993 .cra_flags = CRYPTO_ALG_ASYNC,
994 .cra_blocksize = SHA1_BLOCK_SIZE,
995 .cra_ctxsize = sizeof(struct dcp_async_ctx),
996 .cra_module = THIS_MODULE,
997 .cra_init = dcp_sha_cra_init,
998 .cra_exit = dcp_sha_cra_exit,
999 },
1000 },
1001};
1002
1003/* SHA256 */
1004static struct ahash_alg dcp_sha256_alg = {
1005 .init = dcp_sha_init,
1006 .update = dcp_sha_update,
1007 .final = dcp_sha_final,
1008 .finup = dcp_sha_finup,
1009 .digest = dcp_sha_digest,
1010 .import = dcp_sha_import,
1011 .export = dcp_sha_export,
1012 .halg = {
1013 .digestsize = SHA256_DIGEST_SIZE,
1014 .statesize = sizeof(struct dcp_export_state),
1015 .base = {
1016 .cra_name = "sha256",
1017 .cra_driver_name = "sha256-dcp",
1018 .cra_priority = 400,
1019 .cra_flags = CRYPTO_ALG_ASYNC,
1020 .cra_blocksize = SHA256_BLOCK_SIZE,
1021 .cra_ctxsize = sizeof(struct dcp_async_ctx),
1022 .cra_module = THIS_MODULE,
1023 .cra_init = dcp_sha_cra_init,
1024 .cra_exit = dcp_sha_cra_exit,
1025 },
1026 },
1027};
1028
1029static irqreturn_t mxs_dcp_irq(int irq, void *context)
1030{
1031 struct dcp *sdcp = context;
1032 uint32_t stat;
1033 int i;
1034
1035 stat = readl(sdcp->base + MXS_DCP_STAT);
1036 stat &= MXS_DCP_STAT_IRQ_MASK;
1037 if (!stat)
1038 return IRQ_NONE;
1039
1040 /* Clear the interrupts. */
1041 writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
1042
1043 /* Complete the DMA requests that finished. */
1044 for (i = 0; i < DCP_MAX_CHANS; i++)
1045 if (stat & (1 << i))
1046 complete(&sdcp->completion[i]);
1047
1048 return IRQ_HANDLED;
1049}
1050
1051static int mxs_dcp_probe(struct platform_device *pdev)
1052{
1053 struct device *dev = &pdev->dev;
1054 struct dcp *sdcp = NULL;
1055 int i, ret;
1056 int dcp_vmi_irq, dcp_irq;
1057
1058 if (global_sdcp) {
1059 dev_err(dev, "Only one DCP instance allowed!\n");
1060 return -ENODEV;
1061 }
1062
1063 dcp_vmi_irq = platform_get_irq(pdev, 0);
1064 if (dcp_vmi_irq < 0)
1065 return dcp_vmi_irq;
1066
1067 dcp_irq = platform_get_irq(pdev, 1);
1068 if (dcp_irq < 0)
1069 return dcp_irq;
1070
1071 sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1072 if (!sdcp)
1073 return -ENOMEM;
1074
1075 sdcp->dev = dev;
1076 sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1077 if (IS_ERR(sdcp->base))
1078 return PTR_ERR(sdcp->base);
1079
1080
1081 ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1082 "dcp-vmi-irq", sdcp);
1083 if (ret) {
1084 dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1085 return ret;
1086 }
1087
1088 ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1089 "dcp-irq", sdcp);
1090 if (ret) {
1091 dev_err(dev, "Failed to claim DCP IRQ!\n");
1092 return ret;
1093 }
1094
1095 /* Allocate coherent helper block. */
1096 sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1097 GFP_KERNEL);
1098 if (!sdcp->coh)
1099 return -ENOMEM;
1100
1101 /* Re-align the structure so it fits the DCP constraints. */
1102 sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1103
1104 /* DCP clock is optional, only used on some SOCs */
1105 sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1106 if (IS_ERR(sdcp->dcp_clk))
1107 return PTR_ERR(sdcp->dcp_clk);
1108
1109 /* Restart the DCP block. */
1110 ret = stmp_reset_block(sdcp->base);
1111 if (ret) {
1112 dev_err(dev, "Failed reset\n");
1113 return ret;
1114 }
1115
1116 /* Initialize control register. */
1117 writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1118 MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1119 sdcp->base + MXS_DCP_CTRL);
1120
1121 /* Enable all DCP DMA channels. */
1122 writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1123 sdcp->base + MXS_DCP_CHANNELCTRL);
1124
1125 /*
1126 * We do not enable context switching. Give the context buffer a
1127 * pointer to an illegal address so if context switching is
1128 * inadvertantly enabled, the DCP will return an error instead of
1129 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1130 * address will do.
1131 */
1132 writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1133 for (i = 0; i < DCP_MAX_CHANS; i++)
1134 writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1135 writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1136
1137 global_sdcp = sdcp;
1138
1139 platform_set_drvdata(pdev, sdcp);
1140
1141 for (i = 0; i < DCP_MAX_CHANS; i++) {
1142 spin_lock_init(&sdcp->lock[i]);
1143 init_completion(&sdcp->completion[i]);
1144 crypto_init_queue(&sdcp->queue[i], 50);
1145 }
1146
1147 /* Create the SHA and AES handler threads. */
1148 sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1149 NULL, "mxs_dcp_chan/sha");
1150 if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1151 dev_err(dev, "Error starting SHA thread!\n");
1152 ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1153 return ret;
1154 }
1155
1156 sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1157 NULL, "mxs_dcp_chan/aes");
1158 if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1159 dev_err(dev, "Error starting SHA thread!\n");
1160 ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1161 goto err_destroy_sha_thread;
1162 }
1163
1164 /* Register the various crypto algorithms. */
1165 sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1166
1167 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1168 ret = crypto_register_skciphers(dcp_aes_algs,
1169 ARRAY_SIZE(dcp_aes_algs));
1170 if (ret) {
1171 /* Failed to register algorithm. */
1172 dev_err(dev, "Failed to register AES crypto!\n");
1173 goto err_destroy_aes_thread;
1174 }
1175 }
1176
1177 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1178 ret = crypto_register_ahash(&dcp_sha1_alg);
1179 if (ret) {
1180 dev_err(dev, "Failed to register %s hash!\n",
1181 dcp_sha1_alg.halg.base.cra_name);
1182 goto err_unregister_aes;
1183 }
1184 }
1185
1186 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1187 ret = crypto_register_ahash(&dcp_sha256_alg);
1188 if (ret) {
1189 dev_err(dev, "Failed to register %s hash!\n",
1190 dcp_sha256_alg.halg.base.cra_name);
1191 goto err_unregister_sha1;
1192 }
1193 }
1194
1195 return 0;
1196
1197err_unregister_sha1:
1198 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1199 crypto_unregister_ahash(&dcp_sha1_alg);
1200
1201err_unregister_aes:
1202 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1203 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1204
1205err_destroy_aes_thread:
1206 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1207
1208err_destroy_sha_thread:
1209 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1210
1211 return ret;
1212}
1213
1214static void mxs_dcp_remove(struct platform_device *pdev)
1215{
1216 struct dcp *sdcp = platform_get_drvdata(pdev);
1217
1218 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1219 crypto_unregister_ahash(&dcp_sha256_alg);
1220
1221 if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1222 crypto_unregister_ahash(&dcp_sha1_alg);
1223
1224 if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1225 crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1226
1227 kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1228 kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1229
1230 platform_set_drvdata(pdev, NULL);
1231
1232 global_sdcp = NULL;
1233}
1234
1235static const struct of_device_id mxs_dcp_dt_ids[] = {
1236 { .compatible = "fsl,imx23-dcp", .data = NULL, },
1237 { .compatible = "fsl,imx28-dcp", .data = NULL, },
1238 { /* sentinel */ }
1239};
1240
1241MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1242
1243static struct platform_driver mxs_dcp_driver = {
1244 .probe = mxs_dcp_probe,
1245 .remove_new = mxs_dcp_remove,
1246 .driver = {
1247 .name = "mxs-dcp",
1248 .of_match_table = mxs_dcp_dt_ids,
1249 },
1250};
1251
1252module_platform_driver(mxs_dcp_driver);
1253
1254MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1255MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1256MODULE_LICENSE("GPL");
1257MODULE_ALIAS("platform:mxs-dcp");