Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.7-rc3 1327 lines 34 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Cryptographic API. 4 * 5 * Driver for EIP97 AES acceleration. 6 * 7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 8 * 9 * Some ideas are from atmel-aes.c drivers. 10 */ 11 12#include <crypto/aes.h> 13#include <crypto/gcm.h> 14#include <crypto/internal/skcipher.h> 15#include "mtk-platform.h" 16 17#define AES_QUEUE_SIZE 512 18#define AES_BUF_ORDER 2 19#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ 20 & ~(AES_BLOCK_SIZE - 1)) 21#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \ 22 AES_BLOCK_SIZE * 2) 23#define AES_MAX_CT_SIZE 6 24 25#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) 26 27/* AES-CBC/ECB/CTR/OFB/CFB command token */ 28#define AES_CMD0 cpu_to_le32(0x05000000) 29#define AES_CMD1 cpu_to_le32(0x2d060000) 30#define AES_CMD2 cpu_to_le32(0xe4a63806) 31/* AES-GCM command token */ 32#define AES_GCM_CMD0 cpu_to_le32(0x0b000000) 33#define AES_GCM_CMD1 cpu_to_le32(0xa0800000) 34#define AES_GCM_CMD2 cpu_to_le32(0x25000010) 35#define AES_GCM_CMD3 cpu_to_le32(0x0f020000) 36#define AES_GCM_CMD4 cpu_to_le32(0x21e60000) 37#define AES_GCM_CMD5 cpu_to_le32(0x40e60000) 38#define AES_GCM_CMD6 cpu_to_le32(0xd0070000) 39 40/* AES transform information word 0 fields */ 41#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0) 42#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0) 43#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0) 44#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0) 45#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8) 46#define AES_TFM_128BITS cpu_to_le32(0xb << 16) 47#define AES_TFM_192BITS cpu_to_le32(0xd << 16) 48#define AES_TFM_256BITS cpu_to_le32(0xf << 16) 49#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21) 50#define AES_TFM_GHASH cpu_to_le32(0x4 << 23) 51/* AES transform information word 1 fields */ 52#define AES_TFM_ECB cpu_to_le32(0x0 << 0) 53#define AES_TFM_CBC cpu_to_le32(0x1 << 0) 54#define AES_TFM_OFB cpu_to_le32(0x4 << 0) 55#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0) 56#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ 57#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ 58#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ 59#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ 60#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) 61#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) 62 63/* AES flags */ 64#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0) 65#define AES_FLAGS_ECB BIT(0) 66#define AES_FLAGS_CBC BIT(1) 67#define AES_FLAGS_CTR BIT(2) 68#define AES_FLAGS_OFB BIT(3) 69#define AES_FLAGS_CFB128 BIT(4) 70#define AES_FLAGS_GCM BIT(5) 71#define AES_FLAGS_ENCRYPT BIT(6) 72#define AES_FLAGS_BUSY BIT(7) 73 74#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) 75 76/** 77 * mtk_aes_info - hardware information of AES 78 * @cmd: command token, hardware instruction 79 * @tfm: transform state of cipher algorithm. 80 * @state: contains keys and initial vectors. 81 * 82 * Memory layout of GCM buffer: 83 * /-----------\ 84 * | AES KEY | 128/196/256 bits 85 * |-----------| 86 * | HASH KEY | a string 128 zero bits encrypted using the block cipher 87 * |-----------| 88 * | IVs | 4 * 4 bytes 89 * \-----------/ 90 * 91 * The engine requires all these info to do: 92 * - Commands decoding and control of the engine's data path. 93 * - Coordinating hardware data fetch and store operations. 94 * - Result token construction and output. 95 */ 96struct mtk_aes_info { 97 __le32 cmd[AES_MAX_CT_SIZE]; 98 __le32 tfm[2]; 99 __le32 state[AES_MAX_STATE_BUF_SIZE]; 100}; 101 102struct mtk_aes_reqctx { 103 u64 mode; 104}; 105 106struct mtk_aes_base_ctx { 107 struct mtk_cryp *cryp; 108 u32 keylen; 109 __le32 key[12]; 110 __le32 keymode; 111 112 mtk_aes_fn start; 113 114 struct mtk_aes_info info; 115 dma_addr_t ct_dma; 116 dma_addr_t tfm_dma; 117 118 __le32 ct_hdr; 119 u32 ct_size; 120}; 121 122struct mtk_aes_ctx { 123 struct mtk_aes_base_ctx base; 124}; 125 126struct mtk_aes_ctr_ctx { 127 struct mtk_aes_base_ctx base; 128 129 u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; 130 size_t offset; 131 struct scatterlist src[2]; 132 struct scatterlist dst[2]; 133}; 134 135struct mtk_aes_gcm_ctx { 136 struct mtk_aes_base_ctx base; 137 138 u32 authsize; 139 size_t textlen; 140 141 struct crypto_skcipher *ctr; 142}; 143 144struct mtk_aes_drv { 145 struct list_head dev_list; 146 /* Device list lock */ 147 spinlock_t lock; 148}; 149 150static struct mtk_aes_drv mtk_aes = { 151 .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list), 152 .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock), 153}; 154 155static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset) 156{ 157 return readl_relaxed(cryp->base + offset); 158} 159 160static inline void mtk_aes_write(struct mtk_cryp *cryp, 161 u32 offset, u32 value) 162{ 163 writel_relaxed(value, cryp->base + offset); 164} 165 166static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx) 167{ 168 struct mtk_cryp *cryp = NULL; 169 struct mtk_cryp *tmp; 170 171 spin_lock_bh(&mtk_aes.lock); 172 if (!ctx->cryp) { 173 list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) { 174 cryp = tmp; 175 break; 176 } 177 ctx->cryp = cryp; 178 } else { 179 cryp = ctx->cryp; 180 } 181 spin_unlock_bh(&mtk_aes.lock); 182 183 return cryp; 184} 185 186static inline size_t mtk_aes_padlen(size_t len) 187{ 188 len &= AES_BLOCK_SIZE - 1; 189 return len ? AES_BLOCK_SIZE - len : 0; 190} 191 192static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len, 193 struct mtk_aes_dma *dma) 194{ 195 int nents; 196 197 if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) 198 return false; 199 200 for (nents = 0; sg; sg = sg_next(sg), ++nents) { 201 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 202 return false; 203 204 if (len <= sg->length) { 205 if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) 206 return false; 207 208 dma->nents = nents + 1; 209 dma->remainder = sg->length - len; 210 sg->length = len; 211 return true; 212 } 213 214 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 215 return false; 216 217 len -= sg->length; 218 } 219 220 return false; 221} 222 223static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes, 224 const struct mtk_aes_reqctx *rctx) 225{ 226 /* Clear all but persistent flags and set request flags. */ 227 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode; 228} 229 230static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma) 231{ 232 struct scatterlist *sg = dma->sg; 233 int nents = dma->nents; 234 235 if (!dma->remainder) 236 return; 237 238 while (--nents > 0 && sg) 239 sg = sg_next(sg); 240 241 if (!sg) 242 return; 243 244 sg->length += dma->remainder; 245} 246 247static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size) 248{ 249 int i; 250 251 for (i = 0; i < SIZE_IN_WORDS(size); i++) 252 dst[i] = cpu_to_le32(src[i]); 253} 254 255static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size) 256{ 257 int i; 258 259 for (i = 0; i < SIZE_IN_WORDS(size); i++) 260 dst[i] = cpu_to_be32(src[i]); 261} 262 263static inline int mtk_aes_complete(struct mtk_cryp *cryp, 264 struct mtk_aes_rec *aes, 265 int err) 266{ 267 aes->flags &= ~AES_FLAGS_BUSY; 268 aes->areq->complete(aes->areq, err); 269 /* Handle new request */ 270 tasklet_schedule(&aes->queue_task); 271 return err; 272} 273 274/* 275 * Write descriptors for processing. This will configure the engine, load 276 * the transform information and then start the packet processing. 277 */ 278static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 279{ 280 struct mtk_ring *ring = cryp->ring[aes->id]; 281 struct mtk_desc *cmd = NULL, *res = NULL; 282 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg; 283 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len; 284 int nents; 285 286 /* Write command descriptors */ 287 for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) { 288 cmd = ring->cmd_next; 289 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); 290 cmd->buf = cpu_to_le32(sg_dma_address(ssg)); 291 292 if (nents == 0) { 293 cmd->hdr |= MTK_DESC_FIRST | 294 MTK_DESC_CT_LEN(aes->ctx->ct_size); 295 cmd->ct = cpu_to_le32(aes->ctx->ct_dma); 296 cmd->ct_hdr = aes->ctx->ct_hdr; 297 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); 298 } 299 300 /* Shift ring buffer and check boundary */ 301 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) 302 ring->cmd_next = ring->cmd_base; 303 } 304 cmd->hdr |= MTK_DESC_LAST; 305 306 /* Prepare result descriptors */ 307 for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) { 308 res = ring->res_next; 309 res->hdr = MTK_DESC_BUF_LEN(dsg->length); 310 res->buf = cpu_to_le32(sg_dma_address(dsg)); 311 312 if (nents == 0) 313 res->hdr |= MTK_DESC_FIRST; 314 315 /* Shift ring buffer and check boundary */ 316 if (++ring->res_next == ring->res_base + MTK_DESC_NUM) 317 ring->res_next = ring->res_base; 318 } 319 res->hdr |= MTK_DESC_LAST; 320 321 /* Pointer to current result descriptor */ 322 ring->res_prev = res; 323 324 /* Prepare enough space for authenticated tag */ 325 if (aes->flags & AES_FLAGS_GCM) 326 res->hdr += AES_BLOCK_SIZE; 327 328 /* 329 * Make sure that all changes to the DMA ring are done before we 330 * start engine. 331 */ 332 wmb(); 333 /* Start DMA transfer */ 334 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen)); 335 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen)); 336 337 return -EINPROGRESS; 338} 339 340static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 341{ 342 struct mtk_aes_base_ctx *ctx = aes->ctx; 343 344 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 345 DMA_TO_DEVICE); 346 347 if (aes->src.sg == aes->dst.sg) { 348 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 349 DMA_BIDIRECTIONAL); 350 351 if (aes->src.sg != &aes->aligned_sg) 352 mtk_aes_restore_sg(&aes->src); 353 } else { 354 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents, 355 DMA_FROM_DEVICE); 356 357 if (aes->dst.sg != &aes->aligned_sg) 358 mtk_aes_restore_sg(&aes->dst); 359 360 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 361 DMA_TO_DEVICE); 362 363 if (aes->src.sg != &aes->aligned_sg) 364 mtk_aes_restore_sg(&aes->src); 365 } 366 367 if (aes->dst.sg == &aes->aligned_sg) 368 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst), 369 aes->buf, aes->total); 370} 371 372static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 373{ 374 struct mtk_aes_base_ctx *ctx = aes->ctx; 375 struct mtk_aes_info *info = &ctx->info; 376 377 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 378 DMA_TO_DEVICE); 379 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) 380 goto exit; 381 382 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); 383 384 if (aes->src.sg == aes->dst.sg) { 385 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 386 aes->src.nents, 387 DMA_BIDIRECTIONAL); 388 aes->dst.sg_len = aes->src.sg_len; 389 if (unlikely(!aes->src.sg_len)) 390 goto sg_map_err; 391 } else { 392 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 393 aes->src.nents, DMA_TO_DEVICE); 394 if (unlikely(!aes->src.sg_len)) 395 goto sg_map_err; 396 397 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, 398 aes->dst.nents, DMA_FROM_DEVICE); 399 if (unlikely(!aes->dst.sg_len)) { 400 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 401 DMA_TO_DEVICE); 402 goto sg_map_err; 403 } 404 } 405 406 return mtk_aes_xmit(cryp, aes); 407 408sg_map_err: 409 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); 410exit: 411 return mtk_aes_complete(cryp, aes, -EINVAL); 412} 413 414/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */ 415static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 416 size_t len) 417{ 418 struct skcipher_request *req = skcipher_request_cast(aes->areq); 419 struct mtk_aes_base_ctx *ctx = aes->ctx; 420 struct mtk_aes_info *info = &ctx->info; 421 u32 cnt = 0; 422 423 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 424 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); 425 info->cmd[cnt++] = AES_CMD1; 426 427 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; 428 if (aes->flags & AES_FLAGS_ENCRYPT) 429 info->tfm[0] |= AES_TFM_BASIC_OUT; 430 else 431 info->tfm[0] |= AES_TFM_BASIC_IN; 432 433 switch (aes->flags & AES_FLAGS_CIPHER_MSK) { 434 case AES_FLAGS_CBC: 435 info->tfm[1] = AES_TFM_CBC; 436 break; 437 case AES_FLAGS_ECB: 438 info->tfm[1] = AES_TFM_ECB; 439 goto ecb; 440 case AES_FLAGS_CTR: 441 info->tfm[1] = AES_TFM_CTR_LOAD; 442 goto ctr; 443 case AES_FLAGS_OFB: 444 info->tfm[1] = AES_TFM_OFB; 445 break; 446 case AES_FLAGS_CFB128: 447 info->tfm[1] = AES_TFM_CFB128; 448 break; 449 default: 450 /* Should not happen... */ 451 return; 452 } 453 454 mtk_aes_write_state_le(info->state + ctx->keylen, (void *)req->iv, 455 AES_BLOCK_SIZE); 456ctr: 457 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)); 458 info->tfm[1] |= AES_TFM_FULL_IV; 459 info->cmd[cnt++] = AES_CMD2; 460ecb: 461 ctx->ct_size = cnt; 462} 463 464static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 465 struct scatterlist *src, struct scatterlist *dst, 466 size_t len) 467{ 468 size_t padlen = 0; 469 bool src_aligned, dst_aligned; 470 471 aes->total = len; 472 aes->src.sg = src; 473 aes->dst.sg = dst; 474 aes->real_dst = dst; 475 476 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); 477 if (src == dst) 478 dst_aligned = src_aligned; 479 else 480 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); 481 482 if (!src_aligned || !dst_aligned) { 483 padlen = mtk_aes_padlen(len); 484 485 if (len + padlen > AES_BUF_SIZE) 486 return mtk_aes_complete(cryp, aes, -ENOMEM); 487 488 if (!src_aligned) { 489 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 490 aes->src.sg = &aes->aligned_sg; 491 aes->src.nents = 1; 492 aes->src.remainder = 0; 493 } 494 495 if (!dst_aligned) { 496 aes->dst.sg = &aes->aligned_sg; 497 aes->dst.nents = 1; 498 aes->dst.remainder = 0; 499 } 500 501 sg_init_table(&aes->aligned_sg, 1); 502 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); 503 } 504 505 mtk_aes_info_init(cryp, aes, len + padlen); 506 507 return mtk_aes_map(cryp, aes); 508} 509 510static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, 511 struct crypto_async_request *new_areq) 512{ 513 struct mtk_aes_rec *aes = cryp->aes[id]; 514 struct crypto_async_request *areq, *backlog; 515 struct mtk_aes_base_ctx *ctx; 516 unsigned long flags; 517 int ret = 0; 518 519 spin_lock_irqsave(&aes->lock, flags); 520 if (new_areq) 521 ret = crypto_enqueue_request(&aes->queue, new_areq); 522 if (aes->flags & AES_FLAGS_BUSY) { 523 spin_unlock_irqrestore(&aes->lock, flags); 524 return ret; 525 } 526 backlog = crypto_get_backlog(&aes->queue); 527 areq = crypto_dequeue_request(&aes->queue); 528 if (areq) 529 aes->flags |= AES_FLAGS_BUSY; 530 spin_unlock_irqrestore(&aes->lock, flags); 531 532 if (!areq) 533 return ret; 534 535 if (backlog) 536 backlog->complete(backlog, -EINPROGRESS); 537 538 ctx = crypto_tfm_ctx(areq->tfm); 539 /* Write key into state buffer */ 540 memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); 541 542 aes->areq = areq; 543 aes->ctx = ctx; 544 545 return ctx->start(cryp, aes); 546} 547 548static int mtk_aes_transfer_complete(struct mtk_cryp *cryp, 549 struct mtk_aes_rec *aes) 550{ 551 return mtk_aes_complete(cryp, aes, 0); 552} 553 554static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 555{ 556 struct skcipher_request *req = skcipher_request_cast(aes->areq); 557 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); 558 559 mtk_aes_set_mode(aes, rctx); 560 aes->resume = mtk_aes_transfer_complete; 561 562 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen); 563} 564 565static inline struct mtk_aes_ctr_ctx * 566mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx) 567{ 568 return container_of(ctx, struct mtk_aes_ctr_ctx, base); 569} 570 571static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 572{ 573 struct mtk_aes_base_ctx *ctx = aes->ctx; 574 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); 575 struct skcipher_request *req = skcipher_request_cast(aes->areq); 576 struct scatterlist *src, *dst; 577 u32 start, end, ctr, blocks; 578 size_t datalen; 579 bool fragmented = false; 580 581 /* Check for transfer completion. */ 582 cctx->offset += aes->total; 583 if (cctx->offset >= req->cryptlen) 584 return mtk_aes_transfer_complete(cryp, aes); 585 586 /* Compute data length. */ 587 datalen = req->cryptlen - cctx->offset; 588 blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); 589 ctr = be32_to_cpu(cctx->iv[3]); 590 591 /* Check 32bit counter overflow. */ 592 start = ctr; 593 end = start + blocks - 1; 594 if (end < start) { 595 ctr = 0xffffffff; 596 datalen = AES_BLOCK_SIZE * -start; 597 fragmented = true; 598 } 599 600 /* Jump to offset. */ 601 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset); 602 dst = ((req->src == req->dst) ? src : 603 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); 604 605 /* Write IVs into transform state buffer. */ 606 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv, 607 AES_BLOCK_SIZE); 608 609 if (unlikely(fragmented)) { 610 /* 611 * Increment the counter manually to cope with the hardware 612 * counter overflow. 613 */ 614 cctx->iv[3] = cpu_to_be32(ctr); 615 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); 616 } 617 618 return mtk_aes_dma(cryp, aes, src, dst, datalen); 619} 620 621static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 622{ 623 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx); 624 struct skcipher_request *req = skcipher_request_cast(aes->areq); 625 struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); 626 627 mtk_aes_set_mode(aes, rctx); 628 629 memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE); 630 cctx->offset = 0; 631 aes->total = 0; 632 aes->resume = mtk_aes_ctr_transfer; 633 634 return mtk_aes_ctr_transfer(cryp, aes); 635} 636 637/* Check and set the AES key to transform state buffer */ 638static int mtk_aes_setkey(struct crypto_skcipher *tfm, 639 const u8 *key, u32 keylen) 640{ 641 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm); 642 643 switch (keylen) { 644 case AES_KEYSIZE_128: 645 ctx->keymode = AES_TFM_128BITS; 646 break; 647 case AES_KEYSIZE_192: 648 ctx->keymode = AES_TFM_192BITS; 649 break; 650 case AES_KEYSIZE_256: 651 ctx->keymode = AES_TFM_256BITS; 652 break; 653 654 default: 655 return -EINVAL; 656 } 657 658 ctx->keylen = SIZE_IN_WORDS(keylen); 659 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); 660 661 return 0; 662} 663 664static int mtk_aes_crypt(struct skcipher_request *req, u64 mode) 665{ 666 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 667 struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher); 668 struct mtk_aes_reqctx *rctx; 669 struct mtk_cryp *cryp; 670 671 cryp = mtk_aes_find_dev(ctx); 672 if (!cryp) 673 return -ENODEV; 674 675 rctx = skcipher_request_ctx(req); 676 rctx->mode = mode; 677 678 return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT), 679 &req->base); 680} 681 682static int mtk_aes_ecb_encrypt(struct skcipher_request *req) 683{ 684 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB); 685} 686 687static int mtk_aes_ecb_decrypt(struct skcipher_request *req) 688{ 689 return mtk_aes_crypt(req, AES_FLAGS_ECB); 690} 691 692static int mtk_aes_cbc_encrypt(struct skcipher_request *req) 693{ 694 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); 695} 696 697static int mtk_aes_cbc_decrypt(struct skcipher_request *req) 698{ 699 return mtk_aes_crypt(req, AES_FLAGS_CBC); 700} 701 702static int mtk_aes_ctr_encrypt(struct skcipher_request *req) 703{ 704 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); 705} 706 707static int mtk_aes_ctr_decrypt(struct skcipher_request *req) 708{ 709 return mtk_aes_crypt(req, AES_FLAGS_CTR); 710} 711 712static int mtk_aes_ofb_encrypt(struct skcipher_request *req) 713{ 714 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); 715} 716 717static int mtk_aes_ofb_decrypt(struct skcipher_request *req) 718{ 719 return mtk_aes_crypt(req, AES_FLAGS_OFB); 720} 721 722static int mtk_aes_cfb_encrypt(struct skcipher_request *req) 723{ 724 return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128); 725} 726 727static int mtk_aes_cfb_decrypt(struct skcipher_request *req) 728{ 729 return mtk_aes_crypt(req, AES_FLAGS_CFB128); 730} 731 732static int mtk_aes_init_tfm(struct crypto_skcipher *tfm) 733{ 734 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 735 736 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); 737 ctx->base.start = mtk_aes_start; 738 return 0; 739} 740 741static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm) 742{ 743 struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 744 745 crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); 746 ctx->base.start = mtk_aes_ctr_start; 747 return 0; 748} 749 750static struct skcipher_alg aes_algs[] = { 751{ 752 .base.cra_name = "cbc(aes)", 753 .base.cra_driver_name = "cbc-aes-mtk", 754 .base.cra_priority = 400, 755 .base.cra_flags = CRYPTO_ALG_ASYNC, 756 .base.cra_blocksize = AES_BLOCK_SIZE, 757 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 758 .base.cra_alignmask = 0xf, 759 .base.cra_module = THIS_MODULE, 760 761 .min_keysize = AES_MIN_KEY_SIZE, 762 .max_keysize = AES_MAX_KEY_SIZE, 763 .setkey = mtk_aes_setkey, 764 .encrypt = mtk_aes_cbc_encrypt, 765 .decrypt = mtk_aes_cbc_decrypt, 766 .ivsize = AES_BLOCK_SIZE, 767 .init = mtk_aes_init_tfm, 768}, 769{ 770 .base.cra_name = "ecb(aes)", 771 .base.cra_driver_name = "ecb-aes-mtk", 772 .base.cra_priority = 400, 773 .base.cra_flags = CRYPTO_ALG_ASYNC, 774 .base.cra_blocksize = AES_BLOCK_SIZE, 775 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 776 .base.cra_alignmask = 0xf, 777 .base.cra_module = THIS_MODULE, 778 779 .min_keysize = AES_MIN_KEY_SIZE, 780 .max_keysize = AES_MAX_KEY_SIZE, 781 .setkey = mtk_aes_setkey, 782 .encrypt = mtk_aes_ecb_encrypt, 783 .decrypt = mtk_aes_ecb_decrypt, 784 .init = mtk_aes_init_tfm, 785}, 786{ 787 .base.cra_name = "ctr(aes)", 788 .base.cra_driver_name = "ctr-aes-mtk", 789 .base.cra_priority = 400, 790 .base.cra_flags = CRYPTO_ALG_ASYNC, 791 .base.cra_blocksize = 1, 792 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 793 .base.cra_alignmask = 0xf, 794 .base.cra_module = THIS_MODULE, 795 796 .min_keysize = AES_MIN_KEY_SIZE, 797 .max_keysize = AES_MAX_KEY_SIZE, 798 .ivsize = AES_BLOCK_SIZE, 799 .setkey = mtk_aes_setkey, 800 .encrypt = mtk_aes_ctr_encrypt, 801 .decrypt = mtk_aes_ctr_decrypt, 802 .init = mtk_aes_ctr_init_tfm, 803}, 804{ 805 .base.cra_name = "ofb(aes)", 806 .base.cra_driver_name = "ofb-aes-mtk", 807 .base.cra_priority = 400, 808 .base.cra_flags = CRYPTO_ALG_ASYNC, 809 .base.cra_blocksize = AES_BLOCK_SIZE, 810 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 811 .base.cra_alignmask = 0xf, 812 .base.cra_module = THIS_MODULE, 813 814 .min_keysize = AES_MIN_KEY_SIZE, 815 .max_keysize = AES_MAX_KEY_SIZE, 816 .ivsize = AES_BLOCK_SIZE, 817 .setkey = mtk_aes_setkey, 818 .encrypt = mtk_aes_ofb_encrypt, 819 .decrypt = mtk_aes_ofb_decrypt, 820}, 821{ 822 .base.cra_name = "cfb(aes)", 823 .base.cra_driver_name = "cfb-aes-mtk", 824 .base.cra_priority = 400, 825 .base.cra_flags = CRYPTO_ALG_ASYNC, 826 .base.cra_blocksize = 1, 827 .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 828 .base.cra_alignmask = 0xf, 829 .base.cra_module = THIS_MODULE, 830 831 .min_keysize = AES_MIN_KEY_SIZE, 832 .max_keysize = AES_MAX_KEY_SIZE, 833 .ivsize = AES_BLOCK_SIZE, 834 .setkey = mtk_aes_setkey, 835 .encrypt = mtk_aes_cfb_encrypt, 836 .decrypt = mtk_aes_cfb_decrypt, 837}, 838}; 839 840static inline struct mtk_aes_gcm_ctx * 841mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx) 842{ 843 return container_of(ctx, struct mtk_aes_gcm_ctx, base); 844} 845 846/* 847 * Engine will verify and compare tag automatically, so we just need 848 * to check returned status which stored in the result descriptor. 849 */ 850static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp, 851 struct mtk_aes_rec *aes) 852{ 853 u32 status = cryp->ring[aes->id]->res_prev->ct; 854 855 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? 856 -EBADMSG : 0); 857} 858 859/* Initialize transform information of GCM mode */ 860static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, 861 struct mtk_aes_rec *aes, 862 size_t len) 863{ 864 struct aead_request *req = aead_request_cast(aes->areq); 865 struct mtk_aes_base_ctx *ctx = aes->ctx; 866 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 867 struct mtk_aes_info *info = &ctx->info; 868 u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 869 u32 cnt = 0; 870 871 ctx->ct_hdr = AES_CT_CTRL_HDR | len; 872 873 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); 874 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); 875 info->cmd[cnt++] = AES_GCM_CMD2; 876 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); 877 878 if (aes->flags & AES_FLAGS_ENCRYPT) { 879 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); 880 info->tfm[0] = AES_TFM_GCM_OUT; 881 } else { 882 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); 883 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); 884 info->tfm[0] = AES_TFM_GCM_IN; 885 } 886 ctx->ct_size = cnt; 887 888 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( 889 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | 890 ctx->keymode; 891 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | 892 AES_TFM_ENC_HASH; 893 894 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS( 895 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize); 896} 897 898static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 899 struct scatterlist *src, struct scatterlist *dst, 900 size_t len) 901{ 902 bool src_aligned, dst_aligned; 903 904 aes->src.sg = src; 905 aes->dst.sg = dst; 906 aes->real_dst = dst; 907 908 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); 909 if (src == dst) 910 dst_aligned = src_aligned; 911 else 912 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); 913 914 if (!src_aligned || !dst_aligned) { 915 if (aes->total > AES_BUF_SIZE) 916 return mtk_aes_complete(cryp, aes, -ENOMEM); 917 918 if (!src_aligned) { 919 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 920 aes->src.sg = &aes->aligned_sg; 921 aes->src.nents = 1; 922 aes->src.remainder = 0; 923 } 924 925 if (!dst_aligned) { 926 aes->dst.sg = &aes->aligned_sg; 927 aes->dst.nents = 1; 928 aes->dst.remainder = 0; 929 } 930 931 sg_init_table(&aes->aligned_sg, 1); 932 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total); 933 } 934 935 mtk_aes_gcm_info_init(cryp, aes, len); 936 937 return mtk_aes_map(cryp, aes); 938} 939 940/* Todo: GMAC */ 941static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 942{ 943 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx); 944 struct aead_request *req = aead_request_cast(aes->areq); 945 struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 946 u32 len = req->assoclen + req->cryptlen; 947 948 mtk_aes_set_mode(aes, rctx); 949 950 if (aes->flags & AES_FLAGS_ENCRYPT) { 951 u32 tag[4]; 952 953 aes->resume = mtk_aes_transfer_complete; 954 /* Compute total process length. */ 955 aes->total = len + gctx->authsize; 956 /* Hardware will append authenticated tag to output buffer */ 957 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); 958 } else { 959 aes->resume = mtk_aes_gcm_tag_verify; 960 aes->total = len; 961 } 962 963 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); 964} 965 966static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) 967{ 968 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 969 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 970 struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 971 struct mtk_cryp *cryp; 972 bool enc = !!(mode & AES_FLAGS_ENCRYPT); 973 974 cryp = mtk_aes_find_dev(ctx); 975 if (!cryp) 976 return -ENODEV; 977 978 /* Compute text length. */ 979 gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); 980 981 /* Empty messages are not supported yet */ 982 if (!gctx->textlen && !req->assoclen) 983 return -EINVAL; 984 985 rctx->mode = AES_FLAGS_GCM | mode; 986 987 return mtk_aes_handle_queue(cryp, enc, &req->base); 988} 989 990/* 991 * Because of the hardware limitation, we need to pre-calculate key(H) 992 * for the GHASH operation. The result of the encryption operation 993 * need to be stored in the transform state buffer. 994 */ 995static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, 996 u32 keylen) 997{ 998 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); 999 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 1000 struct crypto_skcipher *ctr = gctx->ctr; 1001 struct { 1002 u32 hash[4]; 1003 u8 iv[8]; 1004 1005 struct crypto_wait wait; 1006 1007 struct scatterlist sg[1]; 1008 struct skcipher_request req; 1009 } *data; 1010 int err; 1011 1012 switch (keylen) { 1013 case AES_KEYSIZE_128: 1014 ctx->keymode = AES_TFM_128BITS; 1015 break; 1016 case AES_KEYSIZE_192: 1017 ctx->keymode = AES_TFM_192BITS; 1018 break; 1019 case AES_KEYSIZE_256: 1020 ctx->keymode = AES_TFM_256BITS; 1021 break; 1022 1023 default: 1024 return -EINVAL; 1025 } 1026 1027 ctx->keylen = SIZE_IN_WORDS(keylen); 1028 1029 /* Same as crypto_gcm_setkey() from crypto/gcm.c */ 1030 crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); 1031 crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & 1032 CRYPTO_TFM_REQ_MASK); 1033 err = crypto_skcipher_setkey(ctr, key, keylen); 1034 if (err) 1035 return err; 1036 1037 data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), 1038 GFP_KERNEL); 1039 if (!data) 1040 return -ENOMEM; 1041 1042 crypto_init_wait(&data->wait); 1043 sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); 1044 skcipher_request_set_tfm(&data->req, ctr); 1045 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | 1046 CRYPTO_TFM_REQ_MAY_BACKLOG, 1047 crypto_req_done, &data->wait); 1048 skcipher_request_set_crypt(&data->req, data->sg, data->sg, 1049 AES_BLOCK_SIZE, data->iv); 1050 1051 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), 1052 &data->wait); 1053 if (err) 1054 goto out; 1055 1056 mtk_aes_write_state_le(ctx->key, (const u32 *)key, keylen); 1057 mtk_aes_write_state_be(ctx->key + ctx->keylen, data->hash, 1058 AES_BLOCK_SIZE); 1059out: 1060 kzfree(data); 1061 return err; 1062} 1063 1064static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, 1065 u32 authsize) 1066{ 1067 struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); 1068 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 1069 1070 /* Same as crypto_gcm_authsize() from crypto/gcm.c */ 1071 switch (authsize) { 1072 case 8: 1073 case 12: 1074 case 16: 1075 break; 1076 default: 1077 return -EINVAL; 1078 } 1079 1080 gctx->authsize = authsize; 1081 return 0; 1082} 1083 1084static int mtk_aes_gcm_encrypt(struct aead_request *req) 1085{ 1086 return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); 1087} 1088 1089static int mtk_aes_gcm_decrypt(struct aead_request *req) 1090{ 1091 return mtk_aes_gcm_crypt(req, 0); 1092} 1093 1094static int mtk_aes_gcm_init(struct crypto_aead *aead) 1095{ 1096 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); 1097 1098 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, 1099 CRYPTO_ALG_ASYNC); 1100 if (IS_ERR(ctx->ctr)) { 1101 pr_err("Error allocating ctr(aes)\n"); 1102 return PTR_ERR(ctx->ctr); 1103 } 1104 1105 crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); 1106 ctx->base.start = mtk_aes_gcm_start; 1107 return 0; 1108} 1109 1110static void mtk_aes_gcm_exit(struct crypto_aead *aead) 1111{ 1112 struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); 1113 1114 crypto_free_skcipher(ctx->ctr); 1115} 1116 1117static struct aead_alg aes_gcm_alg = { 1118 .setkey = mtk_aes_gcm_setkey, 1119 .setauthsize = mtk_aes_gcm_setauthsize, 1120 .encrypt = mtk_aes_gcm_encrypt, 1121 .decrypt = mtk_aes_gcm_decrypt, 1122 .init = mtk_aes_gcm_init, 1123 .exit = mtk_aes_gcm_exit, 1124 .ivsize = GCM_AES_IV_SIZE, 1125 .maxauthsize = AES_BLOCK_SIZE, 1126 1127 .base = { 1128 .cra_name = "gcm(aes)", 1129 .cra_driver_name = "gcm-aes-mtk", 1130 .cra_priority = 400, 1131 .cra_flags = CRYPTO_ALG_ASYNC, 1132 .cra_blocksize = 1, 1133 .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx), 1134 .cra_alignmask = 0xf, 1135 .cra_module = THIS_MODULE, 1136 }, 1137}; 1138 1139static void mtk_aes_queue_task(unsigned long data) 1140{ 1141 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; 1142 1143 mtk_aes_handle_queue(aes->cryp, aes->id, NULL); 1144} 1145 1146static void mtk_aes_done_task(unsigned long data) 1147{ 1148 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; 1149 struct mtk_cryp *cryp = aes->cryp; 1150 1151 mtk_aes_unmap(cryp, aes); 1152 aes->resume(cryp, aes); 1153} 1154 1155static irqreturn_t mtk_aes_irq(int irq, void *dev_id) 1156{ 1157 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; 1158 struct mtk_cryp *cryp = aes->cryp; 1159 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); 1160 1161 mtk_aes_write(cryp, RDR_STAT(aes->id), val); 1162 1163 if (likely(AES_FLAGS_BUSY & aes->flags)) { 1164 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); 1165 mtk_aes_write(cryp, RDR_THRESH(aes->id), 1166 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1167 1168 tasklet_schedule(&aes->done_task); 1169 } else { 1170 dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); 1171 } 1172 return IRQ_HANDLED; 1173} 1174 1175/* 1176 * The purpose of creating encryption and decryption records is 1177 * to process outbound/inbound data in parallel, it can improve 1178 * performance in most use cases, such as IPSec VPN, especially 1179 * under heavy network traffic. 1180 */ 1181static int mtk_aes_record_init(struct mtk_cryp *cryp) 1182{ 1183 struct mtk_aes_rec **aes = cryp->aes; 1184 int i, err = -ENOMEM; 1185 1186 for (i = 0; i < MTK_REC_NUM; i++) { 1187 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL); 1188 if (!aes[i]) 1189 goto err_cleanup; 1190 1191 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL, 1192 AES_BUF_ORDER); 1193 if (!aes[i]->buf) 1194 goto err_cleanup; 1195 1196 aes[i]->cryp = cryp; 1197 1198 spin_lock_init(&aes[i]->lock); 1199 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); 1200 1201 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, 1202 (unsigned long)aes[i]); 1203 tasklet_init(&aes[i]->done_task, mtk_aes_done_task, 1204 (unsigned long)aes[i]); 1205 } 1206 1207 /* Link to ring0 and ring1 respectively */ 1208 aes[0]->id = MTK_RING0; 1209 aes[1]->id = MTK_RING1; 1210 1211 return 0; 1212 1213err_cleanup: 1214 for (; i--; ) { 1215 free_page((unsigned long)aes[i]->buf); 1216 kfree(aes[i]); 1217 } 1218 1219 return err; 1220} 1221 1222static void mtk_aes_record_free(struct mtk_cryp *cryp) 1223{ 1224 int i; 1225 1226 for (i = 0; i < MTK_REC_NUM; i++) { 1227 tasklet_kill(&cryp->aes[i]->done_task); 1228 tasklet_kill(&cryp->aes[i]->queue_task); 1229 1230 free_page((unsigned long)cryp->aes[i]->buf); 1231 kfree(cryp->aes[i]); 1232 } 1233} 1234 1235static void mtk_aes_unregister_algs(void) 1236{ 1237 int i; 1238 1239 crypto_unregister_aead(&aes_gcm_alg); 1240 1241 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) 1242 crypto_unregister_skcipher(&aes_algs[i]); 1243} 1244 1245static int mtk_aes_register_algs(void) 1246{ 1247 int err, i; 1248 1249 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 1250 err = crypto_register_skcipher(&aes_algs[i]); 1251 if (err) 1252 goto err_aes_algs; 1253 } 1254 1255 err = crypto_register_aead(&aes_gcm_alg); 1256 if (err) 1257 goto err_aes_algs; 1258 1259 return 0; 1260 1261err_aes_algs: 1262 for (; i--; ) 1263 crypto_unregister_skcipher(&aes_algs[i]); 1264 1265 return err; 1266} 1267 1268int mtk_cipher_alg_register(struct mtk_cryp *cryp) 1269{ 1270 int ret; 1271 1272 INIT_LIST_HEAD(&cryp->aes_list); 1273 1274 /* Initialize two cipher records */ 1275 ret = mtk_aes_record_init(cryp); 1276 if (ret) 1277 goto err_record; 1278 1279 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, 1280 0, "mtk-aes", cryp->aes[0]); 1281 if (ret) { 1282 dev_err(cryp->dev, "unable to request AES irq.\n"); 1283 goto err_res; 1284 } 1285 1286 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, 1287 0, "mtk-aes", cryp->aes[1]); 1288 if (ret) { 1289 dev_err(cryp->dev, "unable to request AES irq.\n"); 1290 goto err_res; 1291 } 1292 1293 /* Enable ring0 and ring1 interrupt */ 1294 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0); 1295 mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1); 1296 1297 spin_lock(&mtk_aes.lock); 1298 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); 1299 spin_unlock(&mtk_aes.lock); 1300 1301 ret = mtk_aes_register_algs(); 1302 if (ret) 1303 goto err_algs; 1304 1305 return 0; 1306 1307err_algs: 1308 spin_lock(&mtk_aes.lock); 1309 list_del(&cryp->aes_list); 1310 spin_unlock(&mtk_aes.lock); 1311err_res: 1312 mtk_aes_record_free(cryp); 1313err_record: 1314 1315 dev_err(cryp->dev, "mtk-aes initialization failed.\n"); 1316 return ret; 1317} 1318 1319void mtk_cipher_alg_release(struct mtk_cryp *cryp) 1320{ 1321 spin_lock(&mtk_aes.lock); 1322 list_del(&cryp->aes_list); 1323 spin_unlock(&mtk_aes.lock); 1324 1325 mtk_aes_unregister_algs(); 1326 mtk_aes_record_free(cryp); 1327}