Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Driver for EIP97 AES acceleration.
6 *
7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
8 *
9 * Some ideas are from atmel-aes.c drivers.
10 */
11
12#include <crypto/aes.h>
13#include <crypto/gcm.h>
14#include <crypto/internal/skcipher.h>
15#include "mtk-platform.h"
16
17#define AES_QUEUE_SIZE 512
18#define AES_BUF_ORDER 2
19#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
20 & ~(AES_BLOCK_SIZE - 1))
21#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
22 AES_BLOCK_SIZE * 2)
23#define AES_MAX_CT_SIZE 6
24
25#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
26
27/* AES-CBC/ECB/CTR/OFB/CFB command token */
28#define AES_CMD0 cpu_to_le32(0x05000000)
29#define AES_CMD1 cpu_to_le32(0x2d060000)
30#define AES_CMD2 cpu_to_le32(0xe4a63806)
31/* AES-GCM command token */
32#define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
33#define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
34#define AES_GCM_CMD2 cpu_to_le32(0x25000010)
35#define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
36#define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
37#define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
38#define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
39
40/* AES transform information word 0 fields */
41#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
42#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
43#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
44#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
45#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
46#define AES_TFM_128BITS cpu_to_le32(0xb << 16)
47#define AES_TFM_192BITS cpu_to_le32(0xd << 16)
48#define AES_TFM_256BITS cpu_to_le32(0xf << 16)
49#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
50#define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
51/* AES transform information word 1 fields */
52#define AES_TFM_ECB cpu_to_le32(0x0 << 0)
53#define AES_TFM_CBC cpu_to_le32(0x1 << 0)
54#define AES_TFM_OFB cpu_to_le32(0x4 << 0)
55#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0)
56#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
57#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
58#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
60#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
61#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
62
63/* AES flags */
64#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0)
65#define AES_FLAGS_ECB BIT(0)
66#define AES_FLAGS_CBC BIT(1)
67#define AES_FLAGS_CTR BIT(2)
68#define AES_FLAGS_OFB BIT(3)
69#define AES_FLAGS_CFB128 BIT(4)
70#define AES_FLAGS_GCM BIT(5)
71#define AES_FLAGS_ENCRYPT BIT(6)
72#define AES_FLAGS_BUSY BIT(7)
73
74#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
75
76/**
77 * mtk_aes_info - hardware information of AES
78 * @cmd: command token, hardware instruction
79 * @tfm: transform state of cipher algorithm.
80 * @state: contains keys and initial vectors.
81 *
82 * Memory layout of GCM buffer:
83 * /-----------\
84 * | AES KEY | 128/196/256 bits
85 * |-----------|
86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher
87 * |-----------|
88 * | IVs | 4 * 4 bytes
89 * \-----------/
90 *
91 * The engine requires all these info to do:
92 * - Commands decoding and control of the engine's data path.
93 * - Coordinating hardware data fetch and store operations.
94 * - Result token construction and output.
95 */
96struct mtk_aes_info {
97 __le32 cmd[AES_MAX_CT_SIZE];
98 __le32 tfm[2];
99 __le32 state[AES_MAX_STATE_BUF_SIZE];
100};
101
102struct mtk_aes_reqctx {
103 u64 mode;
104};
105
106struct mtk_aes_base_ctx {
107 struct mtk_cryp *cryp;
108 u32 keylen;
109 __le32 key[12];
110 __le32 keymode;
111
112 mtk_aes_fn start;
113
114 struct mtk_aes_info info;
115 dma_addr_t ct_dma;
116 dma_addr_t tfm_dma;
117
118 __le32 ct_hdr;
119 u32 ct_size;
120};
121
122struct mtk_aes_ctx {
123 struct mtk_aes_base_ctx base;
124};
125
126struct mtk_aes_ctr_ctx {
127 struct mtk_aes_base_ctx base;
128
129 u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
130 size_t offset;
131 struct scatterlist src[2];
132 struct scatterlist dst[2];
133};
134
135struct mtk_aes_gcm_ctx {
136 struct mtk_aes_base_ctx base;
137
138 u32 authsize;
139 size_t textlen;
140
141 struct crypto_skcipher *ctr;
142};
143
144struct mtk_aes_drv {
145 struct list_head dev_list;
146 /* Device list lock */
147 spinlock_t lock;
148};
149
150static struct mtk_aes_drv mtk_aes = {
151 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
152 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
153};
154
155static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
156{
157 return readl_relaxed(cryp->base + offset);
158}
159
160static inline void mtk_aes_write(struct mtk_cryp *cryp,
161 u32 offset, u32 value)
162{
163 writel_relaxed(value, cryp->base + offset);
164}
165
166static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
167{
168 struct mtk_cryp *cryp = NULL;
169 struct mtk_cryp *tmp;
170
171 spin_lock_bh(&mtk_aes.lock);
172 if (!ctx->cryp) {
173 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
174 cryp = tmp;
175 break;
176 }
177 ctx->cryp = cryp;
178 } else {
179 cryp = ctx->cryp;
180 }
181 spin_unlock_bh(&mtk_aes.lock);
182
183 return cryp;
184}
185
186static inline size_t mtk_aes_padlen(size_t len)
187{
188 len &= AES_BLOCK_SIZE - 1;
189 return len ? AES_BLOCK_SIZE - len : 0;
190}
191
192static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
193 struct mtk_aes_dma *dma)
194{
195 int nents;
196
197 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
198 return false;
199
200 for (nents = 0; sg; sg = sg_next(sg), ++nents) {
201 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
202 return false;
203
204 if (len <= sg->length) {
205 if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
206 return false;
207
208 dma->nents = nents + 1;
209 dma->remainder = sg->length - len;
210 sg->length = len;
211 return true;
212 }
213
214 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
215 return false;
216
217 len -= sg->length;
218 }
219
220 return false;
221}
222
223static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
224 const struct mtk_aes_reqctx *rctx)
225{
226 /* Clear all but persistent flags and set request flags. */
227 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
228}
229
230static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
231{
232 struct scatterlist *sg = dma->sg;
233 int nents = dma->nents;
234
235 if (!dma->remainder)
236 return;
237
238 while (--nents > 0 && sg)
239 sg = sg_next(sg);
240
241 if (!sg)
242 return;
243
244 sg->length += dma->remainder;
245}
246
247static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
248{
249 int i;
250
251 for (i = 0; i < SIZE_IN_WORDS(size); i++)
252 dst[i] = cpu_to_le32(src[i]);
253}
254
255static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
256{
257 int i;
258
259 for (i = 0; i < SIZE_IN_WORDS(size); i++)
260 dst[i] = cpu_to_be32(src[i]);
261}
262
263static inline int mtk_aes_complete(struct mtk_cryp *cryp,
264 struct mtk_aes_rec *aes,
265 int err)
266{
267 aes->flags &= ~AES_FLAGS_BUSY;
268 aes->areq->complete(aes->areq, err);
269 /* Handle new request */
270 tasklet_schedule(&aes->queue_task);
271 return err;
272}
273
274/*
275 * Write descriptors for processing. This will configure the engine, load
276 * the transform information and then start the packet processing.
277 */
278static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
279{
280 struct mtk_ring *ring = cryp->ring[aes->id];
281 struct mtk_desc *cmd = NULL, *res = NULL;
282 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
283 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
284 int nents;
285
286 /* Write command descriptors */
287 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
288 cmd = ring->cmd_next;
289 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
290 cmd->buf = cpu_to_le32(sg_dma_address(ssg));
291
292 if (nents == 0) {
293 cmd->hdr |= MTK_DESC_FIRST |
294 MTK_DESC_CT_LEN(aes->ctx->ct_size);
295 cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
296 cmd->ct_hdr = aes->ctx->ct_hdr;
297 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
298 }
299
300 /* Shift ring buffer and check boundary */
301 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
302 ring->cmd_next = ring->cmd_base;
303 }
304 cmd->hdr |= MTK_DESC_LAST;
305
306 /* Prepare result descriptors */
307 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
308 res = ring->res_next;
309 res->hdr = MTK_DESC_BUF_LEN(dsg->length);
310 res->buf = cpu_to_le32(sg_dma_address(dsg));
311
312 if (nents == 0)
313 res->hdr |= MTK_DESC_FIRST;
314
315 /* Shift ring buffer and check boundary */
316 if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
317 ring->res_next = ring->res_base;
318 }
319 res->hdr |= MTK_DESC_LAST;
320
321 /* Pointer to current result descriptor */
322 ring->res_prev = res;
323
324 /* Prepare enough space for authenticated tag */
325 if (aes->flags & AES_FLAGS_GCM)
326 res->hdr += AES_BLOCK_SIZE;
327
328 /*
329 * Make sure that all changes to the DMA ring are done before we
330 * start engine.
331 */
332 wmb();
333 /* Start DMA transfer */
334 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
335 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
336
337 return -EINPROGRESS;
338}
339
340static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
341{
342 struct mtk_aes_base_ctx *ctx = aes->ctx;
343
344 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
345 DMA_TO_DEVICE);
346
347 if (aes->src.sg == aes->dst.sg) {
348 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
349 DMA_BIDIRECTIONAL);
350
351 if (aes->src.sg != &aes->aligned_sg)
352 mtk_aes_restore_sg(&aes->src);
353 } else {
354 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
355 DMA_FROM_DEVICE);
356
357 if (aes->dst.sg != &aes->aligned_sg)
358 mtk_aes_restore_sg(&aes->dst);
359
360 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
361 DMA_TO_DEVICE);
362
363 if (aes->src.sg != &aes->aligned_sg)
364 mtk_aes_restore_sg(&aes->src);
365 }
366
367 if (aes->dst.sg == &aes->aligned_sg)
368 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
369 aes->buf, aes->total);
370}
371
372static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
373{
374 struct mtk_aes_base_ctx *ctx = aes->ctx;
375 struct mtk_aes_info *info = &ctx->info;
376
377 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
378 DMA_TO_DEVICE);
379 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
380 goto exit;
381
382 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
383
384 if (aes->src.sg == aes->dst.sg) {
385 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
386 aes->src.nents,
387 DMA_BIDIRECTIONAL);
388 aes->dst.sg_len = aes->src.sg_len;
389 if (unlikely(!aes->src.sg_len))
390 goto sg_map_err;
391 } else {
392 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
393 aes->src.nents, DMA_TO_DEVICE);
394 if (unlikely(!aes->src.sg_len))
395 goto sg_map_err;
396
397 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
398 aes->dst.nents, DMA_FROM_DEVICE);
399 if (unlikely(!aes->dst.sg_len)) {
400 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
401 DMA_TO_DEVICE);
402 goto sg_map_err;
403 }
404 }
405
406 return mtk_aes_xmit(cryp, aes);
407
408sg_map_err:
409 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
410exit:
411 return mtk_aes_complete(cryp, aes, -EINVAL);
412}
413
414/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */
415static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
416 size_t len)
417{
418 struct skcipher_request *req = skcipher_request_cast(aes->areq);
419 struct mtk_aes_base_ctx *ctx = aes->ctx;
420 struct mtk_aes_info *info = &ctx->info;
421 u32 cnt = 0;
422
423 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
424 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
425 info->cmd[cnt++] = AES_CMD1;
426
427 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
428 if (aes->flags & AES_FLAGS_ENCRYPT)
429 info->tfm[0] |= AES_TFM_BASIC_OUT;
430 else
431 info->tfm[0] |= AES_TFM_BASIC_IN;
432
433 switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
434 case AES_FLAGS_CBC:
435 info->tfm[1] = AES_TFM_CBC;
436 break;
437 case AES_FLAGS_ECB:
438 info->tfm[1] = AES_TFM_ECB;
439 goto ecb;
440 case AES_FLAGS_CTR:
441 info->tfm[1] = AES_TFM_CTR_LOAD;
442 goto ctr;
443 case AES_FLAGS_OFB:
444 info->tfm[1] = AES_TFM_OFB;
445 break;
446 case AES_FLAGS_CFB128:
447 info->tfm[1] = AES_TFM_CFB128;
448 break;
449 default:
450 /* Should not happen... */
451 return;
452 }
453
454 mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv,
455 AES_BLOCK_SIZE);
456ctr:
457 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
458 info->tfm[1] |= AES_TFM_FULL_IV;
459 info->cmd[cnt++] = AES_CMD2;
460ecb:
461 ctx->ct_size = cnt;
462}
463
464static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
465 struct scatterlist *src, struct scatterlist *dst,
466 size_t len)
467{
468 size_t padlen = 0;
469 bool src_aligned, dst_aligned;
470
471 aes->total = len;
472 aes->src.sg = src;
473 aes->dst.sg = dst;
474 aes->real_dst = dst;
475
476 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
477 if (src == dst)
478 dst_aligned = src_aligned;
479 else
480 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
481
482 if (!src_aligned || !dst_aligned) {
483 padlen = mtk_aes_padlen(len);
484
485 if (len + padlen > AES_BUF_SIZE)
486 return mtk_aes_complete(cryp, aes, -ENOMEM);
487
488 if (!src_aligned) {
489 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
490 aes->src.sg = &aes->aligned_sg;
491 aes->src.nents = 1;
492 aes->src.remainder = 0;
493 }
494
495 if (!dst_aligned) {
496 aes->dst.sg = &aes->aligned_sg;
497 aes->dst.nents = 1;
498 aes->dst.remainder = 0;
499 }
500
501 sg_init_table(&aes->aligned_sg, 1);
502 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
503 }
504
505 mtk_aes_info_init(cryp, aes, len + padlen);
506
507 return mtk_aes_map(cryp, aes);
508}
509
510static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
511 struct crypto_async_request *new_areq)
512{
513 struct mtk_aes_rec *aes = cryp->aes[id];
514 struct crypto_async_request *areq, *backlog;
515 struct mtk_aes_base_ctx *ctx;
516 unsigned long flags;
517 int ret = 0;
518
519 spin_lock_irqsave(&aes->lock, flags);
520 if (new_areq)
521 ret = crypto_enqueue_request(&aes->queue, new_areq);
522 if (aes->flags & AES_FLAGS_BUSY) {
523 spin_unlock_irqrestore(&aes->lock, flags);
524 return ret;
525 }
526 backlog = crypto_get_backlog(&aes->queue);
527 areq = crypto_dequeue_request(&aes->queue);
528 if (areq)
529 aes->flags |= AES_FLAGS_BUSY;
530 spin_unlock_irqrestore(&aes->lock, flags);
531
532 if (!areq)
533 return ret;
534
535 if (backlog)
536 backlog->complete(backlog, -EINPROGRESS);
537
538 ctx = crypto_tfm_ctx(areq->tfm);
539 /* Write key into state buffer */
540 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key));
541
542 aes->areq = areq;
543 aes->ctx = ctx;
544
545 return ctx->start(cryp, aes);
546}
547
548static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
549 struct mtk_aes_rec *aes)
550{
551 return mtk_aes_complete(cryp, aes, 0);
552}
553
554static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
555{
556 struct skcipher_request *req = skcipher_request_cast(aes->areq);
557 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
558
559 mtk_aes_set_mode(aes, rctx);
560 aes->resume = mtk_aes_transfer_complete;
561
562 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen);
563}
564
565static inline struct mtk_aes_ctr_ctx *
566mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
567{
568 return container_of(ctx, struct mtk_aes_ctr_ctx, base);
569}
570
571static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
572{
573 struct mtk_aes_base_ctx *ctx = aes->ctx;
574 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
575 struct skcipher_request *req = skcipher_request_cast(aes->areq);
576 struct scatterlist *src, *dst;
577 u32 start, end, ctr, blocks;
578 size_t datalen;
579 bool fragmented = false;
580
581 /* Check for transfer completion. */
582 cctx->offset += aes->total;
583 if (cctx->offset >= req->cryptlen)
584 return mtk_aes_transfer_complete(cryp, aes);
585
586 /* Compute data length. */
587 datalen = req->cryptlen - cctx->offset;
588 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
589 ctr = be32_to_cpu(cctx->iv[3]);
590
591 /* Check 32bit counter overflow. */
592 start = ctr;
593 end = start + blocks - 1;
594 if (end < start) {
595 ctr = 0xffffffff;
596 datalen = AES_BLOCK_SIZE * -start;
597 fragmented = true;
598 }
599
600 /* Jump to offset. */
601 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
602 dst = ((req->src == req->dst) ? src :
603 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
604
605 /* Write IVs into transform state buffer. */
606 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
607 AES_BLOCK_SIZE);
608
609 if (unlikely(fragmented)) {
610 /*
611 * Increment the counter manually to cope with the hardware
612 * counter overflow.
613 */
614 cctx->iv[3] = cpu_to_be32(ctr);
615 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
616 }
617
618 return mtk_aes_dma(cryp, aes, src, dst, datalen);
619}
620
621static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
622{
623 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
624 struct skcipher_request *req = skcipher_request_cast(aes->areq);
625 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req);
626
627 mtk_aes_set_mode(aes, rctx);
628
629 memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE);
630 cctx->offset = 0;
631 aes->total = 0;
632 aes->resume = mtk_aes_ctr_transfer;
633
634 return mtk_aes_ctr_transfer(cryp, aes);
635}
636
637/* Check and set the AES key to transform state buffer */
638static int mtk_aes_setkey(struct crypto_skcipher *tfm,
639 const u8 *key, u32 keylen)
640{
641 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
642
643 switch (keylen) {
644 case AES_KEYSIZE_128:
645 ctx->keymode = AES_TFM_128BITS;
646 break;
647 case AES_KEYSIZE_192:
648 ctx->keymode = AES_TFM_192BITS;
649 break;
650 case AES_KEYSIZE_256:
651 ctx->keymode = AES_TFM_256BITS;
652 break;
653
654 default:
655 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
656 return -EINVAL;
657 }
658
659 ctx->keylen = SIZE_IN_WORDS(keylen);
660 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
661
662 return 0;
663}
664
665static int mtk_aes_crypt(struct skcipher_request *req, u64 mode)
666{
667 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
668 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
669 struct mtk_aes_reqctx *rctx;
670 struct mtk_cryp *cryp;
671
672 cryp = mtk_aes_find_dev(ctx);
673 if (!cryp)
674 return -ENODEV;
675
676 rctx = skcipher_request_ctx(req);
677 rctx->mode = mode;
678
679 return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT),
680 &req->base);
681}
682
683static int mtk_aes_ecb_encrypt(struct skcipher_request *req)
684{
685 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
686}
687
688static int mtk_aes_ecb_decrypt(struct skcipher_request *req)
689{
690 return mtk_aes_crypt(req, AES_FLAGS_ECB);
691}
692
693static int mtk_aes_cbc_encrypt(struct skcipher_request *req)
694{
695 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
696}
697
698static int mtk_aes_cbc_decrypt(struct skcipher_request *req)
699{
700 return mtk_aes_crypt(req, AES_FLAGS_CBC);
701}
702
703static int mtk_aes_ctr_encrypt(struct skcipher_request *req)
704{
705 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
706}
707
708static int mtk_aes_ctr_decrypt(struct skcipher_request *req)
709{
710 return mtk_aes_crypt(req, AES_FLAGS_CTR);
711}
712
713static int mtk_aes_ofb_encrypt(struct skcipher_request *req)
714{
715 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
716}
717
718static int mtk_aes_ofb_decrypt(struct skcipher_request *req)
719{
720 return mtk_aes_crypt(req, AES_FLAGS_OFB);
721}
722
723static int mtk_aes_cfb_encrypt(struct skcipher_request *req)
724{
725 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128);
726}
727
728static int mtk_aes_cfb_decrypt(struct skcipher_request *req)
729{
730 return mtk_aes_crypt(req, AES_FLAGS_CFB128);
731}
732
733static int mtk_aes_init_tfm(struct crypto_skcipher *tfm)
734{
735 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
736
737 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
738 ctx->base.start = mtk_aes_start;
739 return 0;
740}
741
742static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
743{
744 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
745
746 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx));
747 ctx->base.start = mtk_aes_ctr_start;
748 return 0;
749}
750
751static struct skcipher_alg aes_algs[] = {
752{
753 .base.cra_name = "cbc(aes)",
754 .base.cra_driver_name = "cbc-aes-mtk",
755 .base.cra_priority = 400,
756 .base.cra_flags = CRYPTO_ALG_ASYNC,
757 .base.cra_blocksize = AES_BLOCK_SIZE,
758 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
759 .base.cra_alignmask = 0xf,
760 .base.cra_module = THIS_MODULE,
761
762 .min_keysize = AES_MIN_KEY_SIZE,
763 .max_keysize = AES_MAX_KEY_SIZE,
764 .setkey = mtk_aes_setkey,
765 .encrypt = mtk_aes_cbc_encrypt,
766 .decrypt = mtk_aes_cbc_decrypt,
767 .ivsize = AES_BLOCK_SIZE,
768 .init = mtk_aes_init_tfm,
769},
770{
771 .base.cra_name = "ecb(aes)",
772 .base.cra_driver_name = "ecb-aes-mtk",
773 .base.cra_priority = 400,
774 .base.cra_flags = CRYPTO_ALG_ASYNC,
775 .base.cra_blocksize = AES_BLOCK_SIZE,
776 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
777 .base.cra_alignmask = 0xf,
778 .base.cra_module = THIS_MODULE,
779
780 .min_keysize = AES_MIN_KEY_SIZE,
781 .max_keysize = AES_MAX_KEY_SIZE,
782 .setkey = mtk_aes_setkey,
783 .encrypt = mtk_aes_ecb_encrypt,
784 .decrypt = mtk_aes_ecb_decrypt,
785 .init = mtk_aes_init_tfm,
786},
787{
788 .base.cra_name = "ctr(aes)",
789 .base.cra_driver_name = "ctr-aes-mtk",
790 .base.cra_priority = 400,
791 .base.cra_flags = CRYPTO_ALG_ASYNC,
792 .base.cra_blocksize = 1,
793 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
794 .base.cra_alignmask = 0xf,
795 .base.cra_module = THIS_MODULE,
796
797 .min_keysize = AES_MIN_KEY_SIZE,
798 .max_keysize = AES_MAX_KEY_SIZE,
799 .ivsize = AES_BLOCK_SIZE,
800 .setkey = mtk_aes_setkey,
801 .encrypt = mtk_aes_ctr_encrypt,
802 .decrypt = mtk_aes_ctr_decrypt,
803 .init = mtk_aes_ctr_init_tfm,
804},
805{
806 .base.cra_name = "ofb(aes)",
807 .base.cra_driver_name = "ofb-aes-mtk",
808 .base.cra_priority = 400,
809 .base.cra_flags = CRYPTO_ALG_ASYNC,
810 .base.cra_blocksize = AES_BLOCK_SIZE,
811 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
812 .base.cra_alignmask = 0xf,
813 .base.cra_module = THIS_MODULE,
814
815 .min_keysize = AES_MIN_KEY_SIZE,
816 .max_keysize = AES_MAX_KEY_SIZE,
817 .ivsize = AES_BLOCK_SIZE,
818 .setkey = mtk_aes_setkey,
819 .encrypt = mtk_aes_ofb_encrypt,
820 .decrypt = mtk_aes_ofb_decrypt,
821},
822{
823 .base.cra_name = "cfb(aes)",
824 .base.cra_driver_name = "cfb-aes-mtk",
825 .base.cra_priority = 400,
826 .base.cra_flags = CRYPTO_ALG_ASYNC,
827 .base.cra_blocksize = 1,
828 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx),
829 .base.cra_alignmask = 0xf,
830 .base.cra_module = THIS_MODULE,
831
832 .min_keysize = AES_MIN_KEY_SIZE,
833 .max_keysize = AES_MAX_KEY_SIZE,
834 .ivsize = AES_BLOCK_SIZE,
835 .setkey = mtk_aes_setkey,
836 .encrypt = mtk_aes_cfb_encrypt,
837 .decrypt = mtk_aes_cfb_decrypt,
838},
839};
840
841static inline struct mtk_aes_gcm_ctx *
842mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
843{
844 return container_of(ctx, struct mtk_aes_gcm_ctx, base);
845}
846
847/*
848 * Engine will verify and compare tag automatically, so we just need
849 * to check returned status which stored in the result descriptor.
850 */
851static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
852 struct mtk_aes_rec *aes)
853{
854 u32 status = cryp->ring[aes->id]->res_prev->ct;
855
856 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
857 -EBADMSG : 0);
858}
859
860/* Initialize transform information of GCM mode */
861static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
862 struct mtk_aes_rec *aes,
863 size_t len)
864{
865 struct aead_request *req = aead_request_cast(aes->areq);
866 struct mtk_aes_base_ctx *ctx = aes->ctx;
867 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
868 struct mtk_aes_info *info = &ctx->info;
869 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
870 u32 cnt = 0;
871
872 ctx->ct_hdr = AES_CT_CTRL_HDR | len;
873
874 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
875 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
876 info->cmd[cnt++] = AES_GCM_CMD2;
877 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
878
879 if (aes->flags & AES_FLAGS_ENCRYPT) {
880 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
881 info->tfm[0] = AES_TFM_GCM_OUT;
882 } else {
883 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
884 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
885 info->tfm[0] = AES_TFM_GCM_IN;
886 }
887 ctx->ct_size = cnt;
888
889 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
890 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
891 ctx->keymode;
892 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
893 AES_TFM_ENC_HASH;
894
895 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
896 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
897}
898
899static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
900 struct scatterlist *src, struct scatterlist *dst,
901 size_t len)
902{
903 bool src_aligned, dst_aligned;
904
905 aes->src.sg = src;
906 aes->dst.sg = dst;
907 aes->real_dst = dst;
908
909 src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
910 if (src == dst)
911 dst_aligned = src_aligned;
912 else
913 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
914
915 if (!src_aligned || !dst_aligned) {
916 if (aes->total > AES_BUF_SIZE)
917 return mtk_aes_complete(cryp, aes, -ENOMEM);
918
919 if (!src_aligned) {
920 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
921 aes->src.sg = &aes->aligned_sg;
922 aes->src.nents = 1;
923 aes->src.remainder = 0;
924 }
925
926 if (!dst_aligned) {
927 aes->dst.sg = &aes->aligned_sg;
928 aes->dst.nents = 1;
929 aes->dst.remainder = 0;
930 }
931
932 sg_init_table(&aes->aligned_sg, 1);
933 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
934 }
935
936 mtk_aes_gcm_info_init(cryp, aes, len);
937
938 return mtk_aes_map(cryp, aes);
939}
940
941/* Todo: GMAC */
942static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
943{
944 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
945 struct aead_request *req = aead_request_cast(aes->areq);
946 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
947 u32 len = req->assoclen + req->cryptlen;
948
949 mtk_aes_set_mode(aes, rctx);
950
951 if (aes->flags & AES_FLAGS_ENCRYPT) {
952 u32 tag[4];
953
954 aes->resume = mtk_aes_transfer_complete;
955 /* Compute total process length. */
956 aes->total = len + gctx->authsize;
957 /* Hardware will append authenticated tag to output buffer */
958 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
959 } else {
960 aes->resume = mtk_aes_gcm_tag_verify;
961 aes->total = len;
962 }
963
964 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
965}
966
967static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
968{
969 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
970 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
971 struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
972 struct mtk_cryp *cryp;
973 bool enc = !!(mode & AES_FLAGS_ENCRYPT);
974
975 cryp = mtk_aes_find_dev(ctx);
976 if (!cryp)
977 return -ENODEV;
978
979 /* Compute text length. */
980 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize);
981
982 /* Empty messages are not supported yet */
983 if (!gctx->textlen && !req->assoclen)
984 return -EINVAL;
985
986 rctx->mode = AES_FLAGS_GCM | mode;
987
988 return mtk_aes_handle_queue(cryp, enc, &req->base);
989}
990
991/*
992 * Because of the hardware limitation, we need to pre-calculate key(H)
993 * for the GHASH operation. The result of the encryption operation
994 * need to be stored in the transform state buffer.
995 */
996static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
997 u32 keylen)
998{
999 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1000 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1001 struct crypto_skcipher *ctr = gctx->ctr;
1002 struct {
1003 u32 hash[4];
1004 u8 iv[8];
1005
1006 struct crypto_wait wait;
1007
1008 struct scatterlist sg[1];
1009 struct skcipher_request req;
1010 } *data;
1011 int err;
1012
1013 switch (keylen) {
1014 case AES_KEYSIZE_128:
1015 ctx->keymode = AES_TFM_128BITS;
1016 break;
1017 case AES_KEYSIZE_192:
1018 ctx->keymode = AES_TFM_192BITS;
1019 break;
1020 case AES_KEYSIZE_256:
1021 ctx->keymode = AES_TFM_256BITS;
1022 break;
1023
1024 default:
1025 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1026 return -EINVAL;
1027 }
1028
1029 ctx->keylen = SIZE_IN_WORDS(keylen);
1030
1031 /* Same as crypto_gcm_setkey() from crypto/gcm.c */
1032 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
1033 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
1034 CRYPTO_TFM_REQ_MASK);
1035 err = crypto_skcipher_setkey(ctr, key, keylen);
1036 crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
1037 CRYPTO_TFM_RES_MASK);
1038 if (err)
1039 return err;
1040
1041 data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
1042 GFP_KERNEL);
1043 if (!data)
1044 return -ENOMEM;
1045
1046 crypto_init_wait(&data->wait);
1047 sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
1048 skcipher_request_set_tfm(&data->req, ctr);
1049 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
1050 CRYPTO_TFM_REQ_MAY_BACKLOG,
1051 crypto_req_done, &data->wait);
1052 skcipher_request_set_crypt(&data->req, data->sg, data->sg,
1053 AES_BLOCK_SIZE, data->iv);
1054
1055 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
1056 &data->wait);
1057 if (err)
1058 goto out;
1059
1060 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen);
1061 mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash,
1062 AES_BLOCK_SIZE);
1063out:
1064 kzfree(data);
1065 return err;
1066}
1067
1068static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
1069 u32 authsize)
1070{
1071 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
1072 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
1073
1074 /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1075 switch (authsize) {
1076 case 8:
1077 case 12:
1078 case 16:
1079 break;
1080 default:
1081 return -EINVAL;
1082 }
1083
1084 gctx->authsize = authsize;
1085 return 0;
1086}
1087
1088static int mtk_aes_gcm_encrypt(struct aead_request *req)
1089{
1090 return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1091}
1092
1093static int mtk_aes_gcm_decrypt(struct aead_request *req)
1094{
1095 return mtk_aes_gcm_crypt(req, 0);
1096}
1097
1098static int mtk_aes_gcm_init(struct crypto_aead *aead)
1099{
1100 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1101
1102 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
1103 CRYPTO_ALG_ASYNC);
1104 if (IS_ERR(ctx->ctr)) {
1105 pr_err("Error allocating ctr(aes)\n");
1106 return PTR_ERR(ctx->ctr);
1107 }
1108
1109 crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
1110 ctx->base.start = mtk_aes_gcm_start;
1111 return 0;
1112}
1113
1114static void mtk_aes_gcm_exit(struct crypto_aead *aead)
1115{
1116 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
1117
1118 crypto_free_skcipher(ctx->ctr);
1119}
1120
1121static struct aead_alg aes_gcm_alg = {
1122 .setkey = mtk_aes_gcm_setkey,
1123 .setauthsize = mtk_aes_gcm_setauthsize,
1124 .encrypt = mtk_aes_gcm_encrypt,
1125 .decrypt = mtk_aes_gcm_decrypt,
1126 .init = mtk_aes_gcm_init,
1127 .exit = mtk_aes_gcm_exit,
1128 .ivsize = GCM_AES_IV_SIZE,
1129 .maxauthsize = AES_BLOCK_SIZE,
1130
1131 .base = {
1132 .cra_name = "gcm(aes)",
1133 .cra_driver_name = "gcm-aes-mtk",
1134 .cra_priority = 400,
1135 .cra_flags = CRYPTO_ALG_ASYNC,
1136 .cra_blocksize = 1,
1137 .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
1138 .cra_alignmask = 0xf,
1139 .cra_module = THIS_MODULE,
1140 },
1141};
1142
1143static void mtk_aes_queue_task(unsigned long data)
1144{
1145 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1146
1147 mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
1148}
1149
1150static void mtk_aes_done_task(unsigned long data)
1151{
1152 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
1153 struct mtk_cryp *cryp = aes->cryp;
1154
1155 mtk_aes_unmap(cryp, aes);
1156 aes->resume(cryp, aes);
1157}
1158
1159static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
1160{
1161 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
1162 struct mtk_cryp *cryp = aes->cryp;
1163 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
1164
1165 mtk_aes_write(cryp, RDR_STAT(aes->id), val);
1166
1167 if (likely(AES_FLAGS_BUSY & aes->flags)) {
1168 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
1169 mtk_aes_write(cryp, RDR_THRESH(aes->id),
1170 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1171
1172 tasklet_schedule(&aes->done_task);
1173 } else {
1174 dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
1175 }
1176 return IRQ_HANDLED;
1177}
1178
1179/*
1180 * The purpose of creating encryption and decryption records is
1181 * to process outbound/inbound data in parallel, it can improve
1182 * performance in most use cases, such as IPSec VPN, especially
1183 * under heavy network traffic.
1184 */
1185static int mtk_aes_record_init(struct mtk_cryp *cryp)
1186{
1187 struct mtk_aes_rec **aes = cryp->aes;
1188 int i, err = -ENOMEM;
1189
1190 for (i = 0; i < MTK_REC_NUM; i++) {
1191 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
1192 if (!aes[i])
1193 goto err_cleanup;
1194
1195 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
1196 AES_BUF_ORDER);
1197 if (!aes[i]->buf)
1198 goto err_cleanup;
1199
1200 aes[i]->cryp = cryp;
1201
1202 spin_lock_init(&aes[i]->lock);
1203 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
1204
1205 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
1206 (unsigned long)aes[i]);
1207 tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
1208 (unsigned long)aes[i]);
1209 }
1210
1211 /* Link to ring0 and ring1 respectively */
1212 aes[0]->id = MTK_RING0;
1213 aes[1]->id = MTK_RING1;
1214
1215 return 0;
1216
1217err_cleanup:
1218 for (; i--; ) {
1219 free_page((unsigned long)aes[i]->buf);
1220 kfree(aes[i]);
1221 }
1222
1223 return err;
1224}
1225
1226static void mtk_aes_record_free(struct mtk_cryp *cryp)
1227{
1228 int i;
1229
1230 for (i = 0; i < MTK_REC_NUM; i++) {
1231 tasklet_kill(&cryp->aes[i]->done_task);
1232 tasklet_kill(&cryp->aes[i]->queue_task);
1233
1234 free_page((unsigned long)cryp->aes[i]->buf);
1235 kfree(cryp->aes[i]);
1236 }
1237}
1238
1239static void mtk_aes_unregister_algs(void)
1240{
1241 int i;
1242
1243 crypto_unregister_aead(&aes_gcm_alg);
1244
1245 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1246 crypto_unregister_skcipher(&aes_algs[i]);
1247}
1248
1249static int mtk_aes_register_algs(void)
1250{
1251 int err, i;
1252
1253 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1254 err = crypto_register_skcipher(&aes_algs[i]);
1255 if (err)
1256 goto err_aes_algs;
1257 }
1258
1259 err = crypto_register_aead(&aes_gcm_alg);
1260 if (err)
1261 goto err_aes_algs;
1262
1263 return 0;
1264
1265err_aes_algs:
1266 for (; i--; )
1267 crypto_unregister_skcipher(&aes_algs[i]);
1268
1269 return err;
1270}
1271
1272int mtk_cipher_alg_register(struct mtk_cryp *cryp)
1273{
1274 int ret;
1275
1276 INIT_LIST_HEAD(&cryp->aes_list);
1277
1278 /* Initialize two cipher records */
1279 ret = mtk_aes_record_init(cryp);
1280 if (ret)
1281 goto err_record;
1282
1283 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
1284 0, "mtk-aes", cryp->aes[0]);
1285 if (ret) {
1286 dev_err(cryp->dev, "unable to request AES irq.\n");
1287 goto err_res;
1288 }
1289
1290 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
1291 0, "mtk-aes", cryp->aes[1]);
1292 if (ret) {
1293 dev_err(cryp->dev, "unable to request AES irq.\n");
1294 goto err_res;
1295 }
1296
1297 /* Enable ring0 and ring1 interrupt */
1298 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
1299 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
1300
1301 spin_lock(&mtk_aes.lock);
1302 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
1303 spin_unlock(&mtk_aes.lock);
1304
1305 ret = mtk_aes_register_algs();
1306 if (ret)
1307 goto err_algs;
1308
1309 return 0;
1310
1311err_algs:
1312 spin_lock(&mtk_aes.lock);
1313 list_del(&cryp->aes_list);
1314 spin_unlock(&mtk_aes.lock);
1315err_res:
1316 mtk_aes_record_free(cryp);
1317err_record:
1318
1319 dev_err(cryp->dev, "mtk-aes initialization failed.\n");
1320 return ret;
1321}
1322
1323void mtk_cipher_alg_release(struct mtk_cryp *cryp)
1324{
1325 spin_lock(&mtk_aes.lock);
1326 list_del(&cryp->aes_list);
1327 spin_unlock(&mtk_aes.lock);
1328
1329 mtk_aes_unregister_algs();
1330 mtk_aes_record_free(cryp);
1331}