Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

crypto: mediatek - remove obsolete driver

The crypto mediatek driver has been replaced by the inside-secure
driver now. Remove this driver to avoid having duplicate drivers.

Signed-off-by: Vic Wu <vic.wu@mediatek.com>
Acked-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

authored by

Vic Wu and committed by
Herbert Xu
6a702fa5 0aa171e9

-3650
-15
drivers/crypto/Kconfig
··· 772 772 accelerator. Select this if you want to use the ZynqMP module 773 773 for AES algorithms. 774 774 775 - config CRYPTO_DEV_MEDIATEK 776 - tristate "MediaTek's EIP97 Cryptographic Engine driver" 777 - depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST 778 - select CRYPTO_LIB_AES 779 - select CRYPTO_AEAD 780 - select CRYPTO_SKCIPHER 781 - select CRYPTO_SHA1 782 - select CRYPTO_SHA256 783 - select CRYPTO_SHA512 784 - select CRYPTO_HMAC 785 - help 786 - This driver allows you to utilize the hardware crypto accelerator 787 - EIP97 which can be found on the MT7623 MT2701, MT8521p, etc .... 788 - Select this if you want to use it for AES/SHA1/SHA2 algorithms. 789 - 790 775 source "drivers/crypto/chelsio/Kconfig" 791 776 792 777 source "drivers/crypto/virtio/Kconfig"
-1
drivers/crypto/Makefile
··· 19 19 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o 20 20 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 21 21 obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/ 22 - obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ 23 22 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o 24 23 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o 25 24 n2_crypto-y := n2_core.o n2_asm.o
-3
drivers/crypto/mediatek/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mtk-crypto.o 3 - mtk-crypto-objs:= mtk-platform.o mtk-aes.o mtk-sha.o
-1271
drivers/crypto/mediatek/mtk-aes.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Cryptographic API. 4 - * 5 - * Driver for EIP97 AES acceleration. 6 - * 7 - * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 8 - * 9 - * Some ideas are from atmel-aes.c drivers. 10 - */ 11 - 12 - #include <crypto/aes.h> 13 - #include <crypto/gcm.h> 14 - #include <crypto/internal/skcipher.h> 15 - #include "mtk-platform.h" 16 - 17 - #define AES_QUEUE_SIZE 512 18 - #define AES_BUF_ORDER 2 19 - #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ 20 - & ~(AES_BLOCK_SIZE - 1)) 21 - #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \ 22 - AES_BLOCK_SIZE * 2) 23 - #define AES_MAX_CT_SIZE 6 24 - 25 - #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) 26 - 27 - /* AES-CBC/ECB/CTR/OFB/CFB command token */ 28 - #define AES_CMD0 cpu_to_le32(0x05000000) 29 - #define AES_CMD1 cpu_to_le32(0x2d060000) 30 - #define AES_CMD2 cpu_to_le32(0xe4a63806) 31 - /* AES-GCM command token */ 32 - #define AES_GCM_CMD0 cpu_to_le32(0x0b000000) 33 - #define AES_GCM_CMD1 cpu_to_le32(0xa0800000) 34 - #define AES_GCM_CMD2 cpu_to_le32(0x25000010) 35 - #define AES_GCM_CMD3 cpu_to_le32(0x0f020000) 36 - #define AES_GCM_CMD4 cpu_to_le32(0x21e60000) 37 - #define AES_GCM_CMD5 cpu_to_le32(0x40e60000) 38 - #define AES_GCM_CMD6 cpu_to_le32(0xd0070000) 39 - 40 - /* AES transform information word 0 fields */ 41 - #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0) 42 - #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0) 43 - #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0) 44 - #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0) 45 - #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8) 46 - #define AES_TFM_128BITS cpu_to_le32(0xb << 16) 47 - #define AES_TFM_192BITS cpu_to_le32(0xd << 16) 48 - #define AES_TFM_256BITS cpu_to_le32(0xf << 16) 49 - #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21) 50 - #define AES_TFM_GHASH cpu_to_le32(0x4 << 23) 51 - /* AES transform information word 1 fields */ 52 - #define AES_TFM_ECB cpu_to_le32(0x0 << 0) 53 - #define AES_TFM_CBC cpu_to_le32(0x1 << 0) 54 - #define AES_TFM_OFB cpu_to_le32(0x4 << 0) 55 - #define AES_TFM_CFB128 cpu_to_le32(0x5 << 0) 56 - #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ 57 - #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ 58 - #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ 59 - #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ 60 - #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) 61 - #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) 62 - 63 - /* AES flags */ 64 - #define AES_FLAGS_CIPHER_MSK GENMASK(4, 0) 65 - #define AES_FLAGS_ECB BIT(0) 66 - #define AES_FLAGS_CBC BIT(1) 67 - #define AES_FLAGS_CTR BIT(2) 68 - #define AES_FLAGS_OFB BIT(3) 69 - #define AES_FLAGS_CFB128 BIT(4) 70 - #define AES_FLAGS_GCM BIT(5) 71 - #define AES_FLAGS_ENCRYPT BIT(6) 72 - #define AES_FLAGS_BUSY BIT(7) 73 - 74 - #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) 75 - 76 - /** 77 - * mtk_aes_info - hardware information of AES 78 - * @cmd: command token, hardware instruction 79 - * @tfm: transform state of cipher algorithm. 80 - * @state: contains keys and initial vectors. 81 - * 82 - * Memory layout of GCM buffer: 83 - * /-----------\ 84 - * | AES KEY | 128/196/256 bits 85 - * |-----------| 86 - * | HASH KEY | a string 128 zero bits encrypted using the block cipher 87 - * |-----------| 88 - * | IVs | 4 * 4 bytes 89 - * \-----------/ 90 - * 91 - * The engine requires all these info to do: 92 - * - Commands decoding and control of the engine's data path. 93 - * - Coordinating hardware data fetch and store operations. 94 - * - Result token construction and output. 95 - */ 96 - struct mtk_aes_info { 97 - __le32 cmd[AES_MAX_CT_SIZE]; 98 - __le32 tfm[2]; 99 - __le32 state[AES_MAX_STATE_BUF_SIZE]; 100 - }; 101 - 102 - struct mtk_aes_reqctx { 103 - u64 mode; 104 - }; 105 - 106 - struct mtk_aes_base_ctx { 107 - struct mtk_cryp *cryp; 108 - u32 keylen; 109 - __le32 key[12]; 110 - __le32 keymode; 111 - 112 - mtk_aes_fn start; 113 - 114 - struct mtk_aes_info info; 115 - dma_addr_t ct_dma; 116 - dma_addr_t tfm_dma; 117 - 118 - __le32 ct_hdr; 119 - u32 ct_size; 120 - }; 121 - 122 - struct mtk_aes_ctx { 123 - struct mtk_aes_base_ctx base; 124 - }; 125 - 126 - struct mtk_aes_ctr_ctx { 127 - struct mtk_aes_base_ctx base; 128 - 129 - __be32 iv[AES_BLOCK_SIZE / sizeof(u32)]; 130 - size_t offset; 131 - struct scatterlist src[2]; 132 - struct scatterlist dst[2]; 133 - }; 134 - 135 - struct mtk_aes_gcm_ctx { 136 - struct mtk_aes_base_ctx base; 137 - 138 - u32 authsize; 139 - size_t textlen; 140 - }; 141 - 142 - struct mtk_aes_drv { 143 - struct list_head dev_list; 144 - /* Device list lock */ 145 - spinlock_t lock; 146 - }; 147 - 148 - static struct mtk_aes_drv mtk_aes = { 149 - .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list), 150 - .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock), 151 - }; 152 - 153 - static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset) 154 - { 155 - return readl_relaxed(cryp->base + offset); 156 - } 157 - 158 - static inline void mtk_aes_write(struct mtk_cryp *cryp, 159 - u32 offset, u32 value) 160 - { 161 - writel_relaxed(value, cryp->base + offset); 162 - } 163 - 164 - static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx) 165 - { 166 - struct mtk_cryp *cryp = NULL; 167 - struct mtk_cryp *tmp; 168 - 169 - spin_lock_bh(&mtk_aes.lock); 170 - if (!ctx->cryp) { 171 - list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) { 172 - cryp = tmp; 173 - break; 174 - } 175 - ctx->cryp = cryp; 176 - } else { 177 - cryp = ctx->cryp; 178 - } 179 - spin_unlock_bh(&mtk_aes.lock); 180 - 181 - return cryp; 182 - } 183 - 184 - static inline size_t mtk_aes_padlen(size_t len) 185 - { 186 - len &= AES_BLOCK_SIZE - 1; 187 - return len ? AES_BLOCK_SIZE - len : 0; 188 - } 189 - 190 - static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len, 191 - struct mtk_aes_dma *dma) 192 - { 193 - int nents; 194 - 195 - if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) 196 - return false; 197 - 198 - for (nents = 0; sg; sg = sg_next(sg), ++nents) { 199 - if (!IS_ALIGNED(sg->offset, sizeof(u32))) 200 - return false; 201 - 202 - if (len <= sg->length) { 203 - if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) 204 - return false; 205 - 206 - dma->nents = nents + 1; 207 - dma->remainder = sg->length - len; 208 - sg->length = len; 209 - return true; 210 - } 211 - 212 - if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) 213 - return false; 214 - 215 - len -= sg->length; 216 - } 217 - 218 - return false; 219 - } 220 - 221 - static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes, 222 - const struct mtk_aes_reqctx *rctx) 223 - { 224 - /* Clear all but persistent flags and set request flags. */ 225 - aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode; 226 - } 227 - 228 - static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma) 229 - { 230 - struct scatterlist *sg = dma->sg; 231 - int nents = dma->nents; 232 - 233 - if (!dma->remainder) 234 - return; 235 - 236 - while (--nents > 0 && sg) 237 - sg = sg_next(sg); 238 - 239 - if (!sg) 240 - return; 241 - 242 - sg->length += dma->remainder; 243 - } 244 - 245 - static inline int mtk_aes_complete(struct mtk_cryp *cryp, 246 - struct mtk_aes_rec *aes, 247 - int err) 248 - { 249 - aes->flags &= ~AES_FLAGS_BUSY; 250 - aes->areq->complete(aes->areq, err); 251 - /* Handle new request */ 252 - tasklet_schedule(&aes->queue_task); 253 - return err; 254 - } 255 - 256 - /* 257 - * Write descriptors for processing. This will configure the engine, load 258 - * the transform information and then start the packet processing. 259 - */ 260 - static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 261 - { 262 - struct mtk_ring *ring = cryp->ring[aes->id]; 263 - struct mtk_desc *cmd = NULL, *res = NULL; 264 - struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg; 265 - u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len; 266 - int nents; 267 - 268 - /* Write command descriptors */ 269 - for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) { 270 - cmd = ring->cmd_next; 271 - cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); 272 - cmd->buf = cpu_to_le32(sg_dma_address(ssg)); 273 - 274 - if (nents == 0) { 275 - cmd->hdr |= MTK_DESC_FIRST | 276 - MTK_DESC_CT_LEN(aes->ctx->ct_size); 277 - cmd->ct = cpu_to_le32(aes->ctx->ct_dma); 278 - cmd->ct_hdr = aes->ctx->ct_hdr; 279 - cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); 280 - } 281 - 282 - /* Shift ring buffer and check boundary */ 283 - if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) 284 - ring->cmd_next = ring->cmd_base; 285 - } 286 - cmd->hdr |= MTK_DESC_LAST; 287 - 288 - /* Prepare result descriptors */ 289 - for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) { 290 - res = ring->res_next; 291 - res->hdr = MTK_DESC_BUF_LEN(dsg->length); 292 - res->buf = cpu_to_le32(sg_dma_address(dsg)); 293 - 294 - if (nents == 0) 295 - res->hdr |= MTK_DESC_FIRST; 296 - 297 - /* Shift ring buffer and check boundary */ 298 - if (++ring->res_next == ring->res_base + MTK_DESC_NUM) 299 - ring->res_next = ring->res_base; 300 - } 301 - res->hdr |= MTK_DESC_LAST; 302 - 303 - /* Pointer to current result descriptor */ 304 - ring->res_prev = res; 305 - 306 - /* Prepare enough space for authenticated tag */ 307 - if (aes->flags & AES_FLAGS_GCM) 308 - le32_add_cpu(&res->hdr, AES_BLOCK_SIZE); 309 - 310 - /* 311 - * Make sure that all changes to the DMA ring are done before we 312 - * start engine. 313 - */ 314 - wmb(); 315 - /* Start DMA transfer */ 316 - mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen)); 317 - mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen)); 318 - 319 - return -EINPROGRESS; 320 - } 321 - 322 - static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 323 - { 324 - struct mtk_aes_base_ctx *ctx = aes->ctx; 325 - 326 - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 327 - DMA_TO_DEVICE); 328 - 329 - if (aes->src.sg == aes->dst.sg) { 330 - dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 331 - DMA_BIDIRECTIONAL); 332 - 333 - if (aes->src.sg != &aes->aligned_sg) 334 - mtk_aes_restore_sg(&aes->src); 335 - } else { 336 - dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents, 337 - DMA_FROM_DEVICE); 338 - 339 - if (aes->dst.sg != &aes->aligned_sg) 340 - mtk_aes_restore_sg(&aes->dst); 341 - 342 - dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 343 - DMA_TO_DEVICE); 344 - 345 - if (aes->src.sg != &aes->aligned_sg) 346 - mtk_aes_restore_sg(&aes->src); 347 - } 348 - 349 - if (aes->dst.sg == &aes->aligned_sg) 350 - sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst), 351 - aes->buf, aes->total); 352 - } 353 - 354 - static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 355 - { 356 - struct mtk_aes_base_ctx *ctx = aes->ctx; 357 - struct mtk_aes_info *info = &ctx->info; 358 - 359 - ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 360 - DMA_TO_DEVICE); 361 - if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) 362 - goto exit; 363 - 364 - ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); 365 - 366 - if (aes->src.sg == aes->dst.sg) { 367 - aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 368 - aes->src.nents, 369 - DMA_BIDIRECTIONAL); 370 - aes->dst.sg_len = aes->src.sg_len; 371 - if (unlikely(!aes->src.sg_len)) 372 - goto sg_map_err; 373 - } else { 374 - aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, 375 - aes->src.nents, DMA_TO_DEVICE); 376 - if (unlikely(!aes->src.sg_len)) 377 - goto sg_map_err; 378 - 379 - aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, 380 - aes->dst.nents, DMA_FROM_DEVICE); 381 - if (unlikely(!aes->dst.sg_len)) { 382 - dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, 383 - DMA_TO_DEVICE); 384 - goto sg_map_err; 385 - } 386 - } 387 - 388 - return mtk_aes_xmit(cryp, aes); 389 - 390 - sg_map_err: 391 - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); 392 - exit: 393 - return mtk_aes_complete(cryp, aes, -EINVAL); 394 - } 395 - 396 - /* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */ 397 - static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 398 - size_t len) 399 - { 400 - struct skcipher_request *req = skcipher_request_cast(aes->areq); 401 - struct mtk_aes_base_ctx *ctx = aes->ctx; 402 - struct mtk_aes_info *info = &ctx->info; 403 - u32 cnt = 0; 404 - 405 - ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 406 - info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); 407 - info->cmd[cnt++] = AES_CMD1; 408 - 409 - info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; 410 - if (aes->flags & AES_FLAGS_ENCRYPT) 411 - info->tfm[0] |= AES_TFM_BASIC_OUT; 412 - else 413 - info->tfm[0] |= AES_TFM_BASIC_IN; 414 - 415 - switch (aes->flags & AES_FLAGS_CIPHER_MSK) { 416 - case AES_FLAGS_CBC: 417 - info->tfm[1] = AES_TFM_CBC; 418 - break; 419 - case AES_FLAGS_ECB: 420 - info->tfm[1] = AES_TFM_ECB; 421 - goto ecb; 422 - case AES_FLAGS_CTR: 423 - info->tfm[1] = AES_TFM_CTR_LOAD; 424 - goto ctr; 425 - case AES_FLAGS_OFB: 426 - info->tfm[1] = AES_TFM_OFB; 427 - break; 428 - case AES_FLAGS_CFB128: 429 - info->tfm[1] = AES_TFM_CFB128; 430 - break; 431 - default: 432 - /* Should not happen... */ 433 - return; 434 - } 435 - 436 - memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE); 437 - ctr: 438 - le32_add_cpu(&info->tfm[0], 439 - le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)))); 440 - info->tfm[1] |= AES_TFM_FULL_IV; 441 - info->cmd[cnt++] = AES_CMD2; 442 - ecb: 443 - ctx->ct_size = cnt; 444 - } 445 - 446 - static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 447 - struct scatterlist *src, struct scatterlist *dst, 448 - size_t len) 449 - { 450 - size_t padlen = 0; 451 - bool src_aligned, dst_aligned; 452 - 453 - aes->total = len; 454 - aes->src.sg = src; 455 - aes->dst.sg = dst; 456 - aes->real_dst = dst; 457 - 458 - src_aligned = mtk_aes_check_aligned(src, len, &aes->src); 459 - if (src == dst) 460 - dst_aligned = src_aligned; 461 - else 462 - dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); 463 - 464 - if (!src_aligned || !dst_aligned) { 465 - padlen = mtk_aes_padlen(len); 466 - 467 - if (len + padlen > AES_BUF_SIZE) 468 - return mtk_aes_complete(cryp, aes, -ENOMEM); 469 - 470 - if (!src_aligned) { 471 - sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 472 - aes->src.sg = &aes->aligned_sg; 473 - aes->src.nents = 1; 474 - aes->src.remainder = 0; 475 - } 476 - 477 - if (!dst_aligned) { 478 - aes->dst.sg = &aes->aligned_sg; 479 - aes->dst.nents = 1; 480 - aes->dst.remainder = 0; 481 - } 482 - 483 - sg_init_table(&aes->aligned_sg, 1); 484 - sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); 485 - } 486 - 487 - mtk_aes_info_init(cryp, aes, len + padlen); 488 - 489 - return mtk_aes_map(cryp, aes); 490 - } 491 - 492 - static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, 493 - struct crypto_async_request *new_areq) 494 - { 495 - struct mtk_aes_rec *aes = cryp->aes[id]; 496 - struct crypto_async_request *areq, *backlog; 497 - struct mtk_aes_base_ctx *ctx; 498 - unsigned long flags; 499 - int ret = 0; 500 - 501 - spin_lock_irqsave(&aes->lock, flags); 502 - if (new_areq) 503 - ret = crypto_enqueue_request(&aes->queue, new_areq); 504 - if (aes->flags & AES_FLAGS_BUSY) { 505 - spin_unlock_irqrestore(&aes->lock, flags); 506 - return ret; 507 - } 508 - backlog = crypto_get_backlog(&aes->queue); 509 - areq = crypto_dequeue_request(&aes->queue); 510 - if (areq) 511 - aes->flags |= AES_FLAGS_BUSY; 512 - spin_unlock_irqrestore(&aes->lock, flags); 513 - 514 - if (!areq) 515 - return ret; 516 - 517 - if (backlog) 518 - backlog->complete(backlog, -EINPROGRESS); 519 - 520 - ctx = crypto_tfm_ctx(areq->tfm); 521 - /* Write key into state buffer */ 522 - memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); 523 - 524 - aes->areq = areq; 525 - aes->ctx = ctx; 526 - 527 - return ctx->start(cryp, aes); 528 - } 529 - 530 - static int mtk_aes_transfer_complete(struct mtk_cryp *cryp, 531 - struct mtk_aes_rec *aes) 532 - { 533 - return mtk_aes_complete(cryp, aes, 0); 534 - } 535 - 536 - static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 537 - { 538 - struct skcipher_request *req = skcipher_request_cast(aes->areq); 539 - struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); 540 - 541 - mtk_aes_set_mode(aes, rctx); 542 - aes->resume = mtk_aes_transfer_complete; 543 - 544 - return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen); 545 - } 546 - 547 - static inline struct mtk_aes_ctr_ctx * 548 - mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx) 549 - { 550 - return container_of(ctx, struct mtk_aes_ctr_ctx, base); 551 - } 552 - 553 - static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 554 - { 555 - struct mtk_aes_base_ctx *ctx = aes->ctx; 556 - struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); 557 - struct skcipher_request *req = skcipher_request_cast(aes->areq); 558 - struct scatterlist *src, *dst; 559 - u32 start, end, ctr, blocks; 560 - size_t datalen; 561 - bool fragmented = false; 562 - 563 - /* Check for transfer completion. */ 564 - cctx->offset += aes->total; 565 - if (cctx->offset >= req->cryptlen) 566 - return mtk_aes_transfer_complete(cryp, aes); 567 - 568 - /* Compute data length. */ 569 - datalen = req->cryptlen - cctx->offset; 570 - blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); 571 - ctr = be32_to_cpu(cctx->iv[3]); 572 - 573 - /* Check 32bit counter overflow. */ 574 - start = ctr; 575 - end = start + blocks - 1; 576 - if (end < start) { 577 - ctr = 0xffffffff; 578 - datalen = AES_BLOCK_SIZE * -start; 579 - fragmented = true; 580 - } 581 - 582 - /* Jump to offset. */ 583 - src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset); 584 - dst = ((req->src == req->dst) ? src : 585 - scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); 586 - 587 - /* Write IVs into transform state buffer. */ 588 - memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE); 589 - 590 - if (unlikely(fragmented)) { 591 - /* 592 - * Increment the counter manually to cope with the hardware 593 - * counter overflow. 594 - */ 595 - cctx->iv[3] = cpu_to_be32(ctr); 596 - crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); 597 - } 598 - 599 - return mtk_aes_dma(cryp, aes, src, dst, datalen); 600 - } 601 - 602 - static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 603 - { 604 - struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx); 605 - struct skcipher_request *req = skcipher_request_cast(aes->areq); 606 - struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); 607 - 608 - mtk_aes_set_mode(aes, rctx); 609 - 610 - memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE); 611 - cctx->offset = 0; 612 - aes->total = 0; 613 - aes->resume = mtk_aes_ctr_transfer; 614 - 615 - return mtk_aes_ctr_transfer(cryp, aes); 616 - } 617 - 618 - /* Check and set the AES key to transform state buffer */ 619 - static int mtk_aes_setkey(struct crypto_skcipher *tfm, 620 - const u8 *key, u32 keylen) 621 - { 622 - struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm); 623 - 624 - switch (keylen) { 625 - case AES_KEYSIZE_128: 626 - ctx->keymode = AES_TFM_128BITS; 627 - break; 628 - case AES_KEYSIZE_192: 629 - ctx->keymode = AES_TFM_192BITS; 630 - break; 631 - case AES_KEYSIZE_256: 632 - ctx->keymode = AES_TFM_256BITS; 633 - break; 634 - 635 - default: 636 - return -EINVAL; 637 - } 638 - 639 - ctx->keylen = SIZE_IN_WORDS(keylen); 640 - memcpy(ctx->key, key, keylen); 641 - 642 - return 0; 643 - } 644 - 645 - static int mtk_aes_crypt(struct skcipher_request *req, u64 mode) 646 - { 647 - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 648 - struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher); 649 - struct mtk_aes_reqctx *rctx; 650 - struct mtk_cryp *cryp; 651 - 652 - cryp = mtk_aes_find_dev(ctx); 653 - if (!cryp) 654 - return -ENODEV; 655 - 656 - rctx = skcipher_request_ctx(req); 657 - rctx->mode = mode; 658 - 659 - return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT), 660 - &req->base); 661 - } 662 - 663 - static int mtk_aes_ecb_encrypt(struct skcipher_request *req) 664 - { 665 - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB); 666 - } 667 - 668 - static int mtk_aes_ecb_decrypt(struct skcipher_request *req) 669 - { 670 - return mtk_aes_crypt(req, AES_FLAGS_ECB); 671 - } 672 - 673 - static int mtk_aes_cbc_encrypt(struct skcipher_request *req) 674 - { 675 - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); 676 - } 677 - 678 - static int mtk_aes_cbc_decrypt(struct skcipher_request *req) 679 - { 680 - return mtk_aes_crypt(req, AES_FLAGS_CBC); 681 - } 682 - 683 - static int mtk_aes_ctr_encrypt(struct skcipher_request *req) 684 - { 685 - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); 686 - } 687 - 688 - static int mtk_aes_ctr_decrypt(struct skcipher_request *req) 689 - { 690 - return mtk_aes_crypt(req, AES_FLAGS_CTR); 691 - } 692 - 693 - static int mtk_aes_ofb_encrypt(struct skcipher_request *req) 694 - { 695 - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); 696 - } 697 - 698 - static int mtk_aes_ofb_decrypt(struct skcipher_request *req) 699 - { 700 - return mtk_aes_crypt(req, AES_FLAGS_OFB); 701 - } 702 - 703 - static int mtk_aes_cfb_encrypt(struct skcipher_request *req) 704 - { 705 - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128); 706 - } 707 - 708 - static int mtk_aes_cfb_decrypt(struct skcipher_request *req) 709 - { 710 - return mtk_aes_crypt(req, AES_FLAGS_CFB128); 711 - } 712 - 713 - static int mtk_aes_init_tfm(struct crypto_skcipher *tfm) 714 - { 715 - struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 716 - 717 - crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); 718 - ctx->base.start = mtk_aes_start; 719 - return 0; 720 - } 721 - 722 - static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm) 723 - { 724 - struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 725 - 726 - crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); 727 - ctx->base.start = mtk_aes_ctr_start; 728 - return 0; 729 - } 730 - 731 - static struct skcipher_alg aes_algs[] = { 732 - { 733 - .base.cra_name = "cbc(aes)", 734 - .base.cra_driver_name = "cbc-aes-mtk", 735 - .base.cra_priority = 400, 736 - .base.cra_flags = CRYPTO_ALG_ASYNC, 737 - .base.cra_blocksize = AES_BLOCK_SIZE, 738 - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 739 - .base.cra_alignmask = 0xf, 740 - .base.cra_module = THIS_MODULE, 741 - 742 - .min_keysize = AES_MIN_KEY_SIZE, 743 - .max_keysize = AES_MAX_KEY_SIZE, 744 - .setkey = mtk_aes_setkey, 745 - .encrypt = mtk_aes_cbc_encrypt, 746 - .decrypt = mtk_aes_cbc_decrypt, 747 - .ivsize = AES_BLOCK_SIZE, 748 - .init = mtk_aes_init_tfm, 749 - }, 750 - { 751 - .base.cra_name = "ecb(aes)", 752 - .base.cra_driver_name = "ecb-aes-mtk", 753 - .base.cra_priority = 400, 754 - .base.cra_flags = CRYPTO_ALG_ASYNC, 755 - .base.cra_blocksize = AES_BLOCK_SIZE, 756 - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 757 - .base.cra_alignmask = 0xf, 758 - .base.cra_module = THIS_MODULE, 759 - 760 - .min_keysize = AES_MIN_KEY_SIZE, 761 - .max_keysize = AES_MAX_KEY_SIZE, 762 - .setkey = mtk_aes_setkey, 763 - .encrypt = mtk_aes_ecb_encrypt, 764 - .decrypt = mtk_aes_ecb_decrypt, 765 - .init = mtk_aes_init_tfm, 766 - }, 767 - { 768 - .base.cra_name = "ctr(aes)", 769 - .base.cra_driver_name = "ctr-aes-mtk", 770 - .base.cra_priority = 400, 771 - .base.cra_flags = CRYPTO_ALG_ASYNC, 772 - .base.cra_blocksize = 1, 773 - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 774 - .base.cra_alignmask = 0xf, 775 - .base.cra_module = THIS_MODULE, 776 - 777 - .min_keysize = AES_MIN_KEY_SIZE, 778 - .max_keysize = AES_MAX_KEY_SIZE, 779 - .ivsize = AES_BLOCK_SIZE, 780 - .setkey = mtk_aes_setkey, 781 - .encrypt = mtk_aes_ctr_encrypt, 782 - .decrypt = mtk_aes_ctr_decrypt, 783 - .init = mtk_aes_ctr_init_tfm, 784 - }, 785 - { 786 - .base.cra_name = "ofb(aes)", 787 - .base.cra_driver_name = "ofb-aes-mtk", 788 - .base.cra_priority = 400, 789 - .base.cra_flags = CRYPTO_ALG_ASYNC, 790 - .base.cra_blocksize = AES_BLOCK_SIZE, 791 - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 792 - .base.cra_alignmask = 0xf, 793 - .base.cra_module = THIS_MODULE, 794 - 795 - .min_keysize = AES_MIN_KEY_SIZE, 796 - .max_keysize = AES_MAX_KEY_SIZE, 797 - .ivsize = AES_BLOCK_SIZE, 798 - .setkey = mtk_aes_setkey, 799 - .encrypt = mtk_aes_ofb_encrypt, 800 - .decrypt = mtk_aes_ofb_decrypt, 801 - }, 802 - { 803 - .base.cra_name = "cfb(aes)", 804 - .base.cra_driver_name = "cfb-aes-mtk", 805 - .base.cra_priority = 400, 806 - .base.cra_flags = CRYPTO_ALG_ASYNC, 807 - .base.cra_blocksize = 1, 808 - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), 809 - .base.cra_alignmask = 0xf, 810 - .base.cra_module = THIS_MODULE, 811 - 812 - .min_keysize = AES_MIN_KEY_SIZE, 813 - .max_keysize = AES_MAX_KEY_SIZE, 814 - .ivsize = AES_BLOCK_SIZE, 815 - .setkey = mtk_aes_setkey, 816 - .encrypt = mtk_aes_cfb_encrypt, 817 - .decrypt = mtk_aes_cfb_decrypt, 818 - }, 819 - }; 820 - 821 - static inline struct mtk_aes_gcm_ctx * 822 - mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx) 823 - { 824 - return container_of(ctx, struct mtk_aes_gcm_ctx, base); 825 - } 826 - 827 - /* 828 - * Engine will verify and compare tag automatically, so we just need 829 - * to check returned status which stored in the result descriptor. 830 - */ 831 - static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp, 832 - struct mtk_aes_rec *aes) 833 - { 834 - __le32 status = cryp->ring[aes->id]->res_prev->ct; 835 - 836 - return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? 837 - -EBADMSG : 0); 838 - } 839 - 840 - /* Initialize transform information of GCM mode */ 841 - static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, 842 - struct mtk_aes_rec *aes, 843 - size_t len) 844 - { 845 - struct aead_request *req = aead_request_cast(aes->areq); 846 - struct mtk_aes_base_ctx *ctx = aes->ctx; 847 - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 848 - struct mtk_aes_info *info = &ctx->info; 849 - u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); 850 - u32 cnt = 0; 851 - 852 - ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); 853 - 854 - info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); 855 - info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); 856 - info->cmd[cnt++] = AES_GCM_CMD2; 857 - info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); 858 - 859 - if (aes->flags & AES_FLAGS_ENCRYPT) { 860 - info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); 861 - info->tfm[0] = AES_TFM_GCM_OUT; 862 - } else { 863 - info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); 864 - info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); 865 - info->tfm[0] = AES_TFM_GCM_IN; 866 - } 867 - ctx->ct_size = cnt; 868 - 869 - info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( 870 - ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | 871 - ctx->keymode; 872 - info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | 873 - AES_TFM_ENC_HASH; 874 - 875 - memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE), 876 - req->iv, ivsize); 877 - } 878 - 879 - static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, 880 - struct scatterlist *src, struct scatterlist *dst, 881 - size_t len) 882 - { 883 - bool src_aligned, dst_aligned; 884 - 885 - aes->src.sg = src; 886 - aes->dst.sg = dst; 887 - aes->real_dst = dst; 888 - 889 - src_aligned = mtk_aes_check_aligned(src, len, &aes->src); 890 - if (src == dst) 891 - dst_aligned = src_aligned; 892 - else 893 - dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); 894 - 895 - if (!src_aligned || !dst_aligned) { 896 - if (aes->total > AES_BUF_SIZE) 897 - return mtk_aes_complete(cryp, aes, -ENOMEM); 898 - 899 - if (!src_aligned) { 900 - sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); 901 - aes->src.sg = &aes->aligned_sg; 902 - aes->src.nents = 1; 903 - aes->src.remainder = 0; 904 - } 905 - 906 - if (!dst_aligned) { 907 - aes->dst.sg = &aes->aligned_sg; 908 - aes->dst.nents = 1; 909 - aes->dst.remainder = 0; 910 - } 911 - 912 - sg_init_table(&aes->aligned_sg, 1); 913 - sg_set_buf(&aes->aligned_sg, aes->buf, aes->total); 914 - } 915 - 916 - mtk_aes_gcm_info_init(cryp, aes, len); 917 - 918 - return mtk_aes_map(cryp, aes); 919 - } 920 - 921 - /* Todo: GMAC */ 922 - static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) 923 - { 924 - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx); 925 - struct aead_request *req = aead_request_cast(aes->areq); 926 - struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 927 - u32 len = req->assoclen + req->cryptlen; 928 - 929 - mtk_aes_set_mode(aes, rctx); 930 - 931 - if (aes->flags & AES_FLAGS_ENCRYPT) { 932 - u32 tag[4]; 933 - 934 - aes->resume = mtk_aes_transfer_complete; 935 - /* Compute total process length. */ 936 - aes->total = len + gctx->authsize; 937 - /* Hardware will append authenticated tag to output buffer */ 938 - scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); 939 - } else { 940 - aes->resume = mtk_aes_gcm_tag_verify; 941 - aes->total = len; 942 - } 943 - 944 - return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); 945 - } 946 - 947 - static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) 948 - { 949 - struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 950 - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 951 - struct mtk_aes_reqctx *rctx = aead_request_ctx(req); 952 - struct mtk_cryp *cryp; 953 - bool enc = !!(mode & AES_FLAGS_ENCRYPT); 954 - 955 - cryp = mtk_aes_find_dev(ctx); 956 - if (!cryp) 957 - return -ENODEV; 958 - 959 - /* Compute text length. */ 960 - gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); 961 - 962 - /* Empty messages are not supported yet */ 963 - if (!gctx->textlen && !req->assoclen) 964 - return -EINVAL; 965 - 966 - rctx->mode = AES_FLAGS_GCM | mode; 967 - 968 - return mtk_aes_handle_queue(cryp, enc, &req->base); 969 - } 970 - 971 - /* 972 - * Because of the hardware limitation, we need to pre-calculate key(H) 973 - * for the GHASH operation. The result of the encryption operation 974 - * need to be stored in the transform state buffer. 975 - */ 976 - static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, 977 - u32 keylen) 978 - { 979 - struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); 980 - union { 981 - u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)]; 982 - u8 x8[AES_BLOCK_SIZE]; 983 - } hash = {}; 984 - struct crypto_aes_ctx aes_ctx; 985 - int err; 986 - int i; 987 - 988 - switch (keylen) { 989 - case AES_KEYSIZE_128: 990 - ctx->keymode = AES_TFM_128BITS; 991 - break; 992 - case AES_KEYSIZE_192: 993 - ctx->keymode = AES_TFM_192BITS; 994 - break; 995 - case AES_KEYSIZE_256: 996 - ctx->keymode = AES_TFM_256BITS; 997 - break; 998 - 999 - default: 1000 - return -EINVAL; 1001 - } 1002 - 1003 - ctx->keylen = SIZE_IN_WORDS(keylen); 1004 - 1005 - err = aes_expandkey(&aes_ctx, key, keylen); 1006 - if (err) 1007 - return err; 1008 - 1009 - aes_encrypt(&aes_ctx, hash.x8, hash.x8); 1010 - memzero_explicit(&aes_ctx, sizeof(aes_ctx)); 1011 - 1012 - memcpy(ctx->key, key, keylen); 1013 - 1014 - /* Why do we need to do this? */ 1015 - for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) 1016 - hash.x32[i] = swab32(hash.x32[i]); 1017 - 1018 - memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE); 1019 - 1020 - return 0; 1021 - } 1022 - 1023 - static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, 1024 - u32 authsize) 1025 - { 1026 - struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); 1027 - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); 1028 - 1029 - /* Same as crypto_gcm_authsize() from crypto/gcm.c */ 1030 - switch (authsize) { 1031 - case 8: 1032 - case 12: 1033 - case 16: 1034 - break; 1035 - default: 1036 - return -EINVAL; 1037 - } 1038 - 1039 - gctx->authsize = authsize; 1040 - return 0; 1041 - } 1042 - 1043 - static int mtk_aes_gcm_encrypt(struct aead_request *req) 1044 - { 1045 - return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); 1046 - } 1047 - 1048 - static int mtk_aes_gcm_decrypt(struct aead_request *req) 1049 - { 1050 - return mtk_aes_gcm_crypt(req, 0); 1051 - } 1052 - 1053 - static int mtk_aes_gcm_init(struct crypto_aead *aead) 1054 - { 1055 - struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); 1056 - 1057 - crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); 1058 - ctx->base.start = mtk_aes_gcm_start; 1059 - return 0; 1060 - } 1061 - 1062 - static struct aead_alg aes_gcm_alg = { 1063 - .setkey = mtk_aes_gcm_setkey, 1064 - .setauthsize = mtk_aes_gcm_setauthsize, 1065 - .encrypt = mtk_aes_gcm_encrypt, 1066 - .decrypt = mtk_aes_gcm_decrypt, 1067 - .init = mtk_aes_gcm_init, 1068 - .ivsize = GCM_AES_IV_SIZE, 1069 - .maxauthsize = AES_BLOCK_SIZE, 1070 - 1071 - .base = { 1072 - .cra_name = "gcm(aes)", 1073 - .cra_driver_name = "gcm-aes-mtk", 1074 - .cra_priority = 400, 1075 - .cra_flags = CRYPTO_ALG_ASYNC, 1076 - .cra_blocksize = 1, 1077 - .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx), 1078 - .cra_alignmask = 0xf, 1079 - .cra_module = THIS_MODULE, 1080 - }, 1081 - }; 1082 - 1083 - static void mtk_aes_queue_task(unsigned long data) 1084 - { 1085 - struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; 1086 - 1087 - mtk_aes_handle_queue(aes->cryp, aes->id, NULL); 1088 - } 1089 - 1090 - static void mtk_aes_done_task(unsigned long data) 1091 - { 1092 - struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; 1093 - struct mtk_cryp *cryp = aes->cryp; 1094 - 1095 - mtk_aes_unmap(cryp, aes); 1096 - aes->resume(cryp, aes); 1097 - } 1098 - 1099 - static irqreturn_t mtk_aes_irq(int irq, void *dev_id) 1100 - { 1101 - struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; 1102 - struct mtk_cryp *cryp = aes->cryp; 1103 - u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); 1104 - 1105 - mtk_aes_write(cryp, RDR_STAT(aes->id), val); 1106 - 1107 - if (likely(AES_FLAGS_BUSY & aes->flags)) { 1108 - mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); 1109 - mtk_aes_write(cryp, RDR_THRESH(aes->id), 1110 - MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1111 - 1112 - tasklet_schedule(&aes->done_task); 1113 - } else { 1114 - dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); 1115 - } 1116 - return IRQ_HANDLED; 1117 - } 1118 - 1119 - /* 1120 - * The purpose of creating encryption and decryption records is 1121 - * to process outbound/inbound data in parallel, it can improve 1122 - * performance in most use cases, such as IPSec VPN, especially 1123 - * under heavy network traffic. 1124 - */ 1125 - static int mtk_aes_record_init(struct mtk_cryp *cryp) 1126 - { 1127 - struct mtk_aes_rec **aes = cryp->aes; 1128 - int i, err = -ENOMEM; 1129 - 1130 - for (i = 0; i < MTK_REC_NUM; i++) { 1131 - aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL); 1132 - if (!aes[i]) 1133 - goto err_cleanup; 1134 - 1135 - aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL, 1136 - AES_BUF_ORDER); 1137 - if (!aes[i]->buf) 1138 - goto err_cleanup; 1139 - 1140 - aes[i]->cryp = cryp; 1141 - 1142 - spin_lock_init(&aes[i]->lock); 1143 - crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); 1144 - 1145 - tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, 1146 - (unsigned long)aes[i]); 1147 - tasklet_init(&aes[i]->done_task, mtk_aes_done_task, 1148 - (unsigned long)aes[i]); 1149 - } 1150 - 1151 - /* Link to ring0 and ring1 respectively */ 1152 - aes[0]->id = MTK_RING0; 1153 - aes[1]->id = MTK_RING1; 1154 - 1155 - return 0; 1156 - 1157 - err_cleanup: 1158 - for (; i--; ) { 1159 - free_page((unsigned long)aes[i]->buf); 1160 - kfree(aes[i]); 1161 - } 1162 - 1163 - return err; 1164 - } 1165 - 1166 - static void mtk_aes_record_free(struct mtk_cryp *cryp) 1167 - { 1168 - int i; 1169 - 1170 - for (i = 0; i < MTK_REC_NUM; i++) { 1171 - tasklet_kill(&cryp->aes[i]->done_task); 1172 - tasklet_kill(&cryp->aes[i]->queue_task); 1173 - 1174 - free_page((unsigned long)cryp->aes[i]->buf); 1175 - kfree(cryp->aes[i]); 1176 - } 1177 - } 1178 - 1179 - static void mtk_aes_unregister_algs(void) 1180 - { 1181 - int i; 1182 - 1183 - crypto_unregister_aead(&aes_gcm_alg); 1184 - 1185 - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) 1186 - crypto_unregister_skcipher(&aes_algs[i]); 1187 - } 1188 - 1189 - static int mtk_aes_register_algs(void) 1190 - { 1191 - int err, i; 1192 - 1193 - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 1194 - err = crypto_register_skcipher(&aes_algs[i]); 1195 - if (err) 1196 - goto err_aes_algs; 1197 - } 1198 - 1199 - err = crypto_register_aead(&aes_gcm_alg); 1200 - if (err) 1201 - goto err_aes_algs; 1202 - 1203 - return 0; 1204 - 1205 - err_aes_algs: 1206 - for (; i--; ) 1207 - crypto_unregister_skcipher(&aes_algs[i]); 1208 - 1209 - return err; 1210 - } 1211 - 1212 - int mtk_cipher_alg_register(struct mtk_cryp *cryp) 1213 - { 1214 - int ret; 1215 - 1216 - INIT_LIST_HEAD(&cryp->aes_list); 1217 - 1218 - /* Initialize two cipher records */ 1219 - ret = mtk_aes_record_init(cryp); 1220 - if (ret) 1221 - goto err_record; 1222 - 1223 - ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, 1224 - 0, "mtk-aes", cryp->aes[0]); 1225 - if (ret) { 1226 - dev_err(cryp->dev, "unable to request AES irq.\n"); 1227 - goto err_res; 1228 - } 1229 - 1230 - ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, 1231 - 0, "mtk-aes", cryp->aes[1]); 1232 - if (ret) { 1233 - dev_err(cryp->dev, "unable to request AES irq.\n"); 1234 - goto err_res; 1235 - } 1236 - 1237 - /* Enable ring0 and ring1 interrupt */ 1238 - mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0); 1239 - mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1); 1240 - 1241 - spin_lock(&mtk_aes.lock); 1242 - list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); 1243 - spin_unlock(&mtk_aes.lock); 1244 - 1245 - ret = mtk_aes_register_algs(); 1246 - if (ret) 1247 - goto err_algs; 1248 - 1249 - return 0; 1250 - 1251 - err_algs: 1252 - spin_lock(&mtk_aes.lock); 1253 - list_del(&cryp->aes_list); 1254 - spin_unlock(&mtk_aes.lock); 1255 - err_res: 1256 - mtk_aes_record_free(cryp); 1257 - err_record: 1258 - 1259 - dev_err(cryp->dev, "mtk-aes initialization failed.\n"); 1260 - return ret; 1261 - } 1262 - 1263 - void mtk_cipher_alg_release(struct mtk_cryp *cryp) 1264 - { 1265 - spin_lock(&mtk_aes.lock); 1266 - list_del(&cryp->aes_list); 1267 - spin_unlock(&mtk_aes.lock); 1268 - 1269 - mtk_aes_unregister_algs(); 1270 - mtk_aes_record_free(cryp); 1271 - }
-586
drivers/crypto/mediatek/mtk-platform.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Driver for EIP97 cryptographic accelerator. 4 - * 5 - * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 6 - */ 7 - 8 - #include <linux/clk.h> 9 - #include <linux/init.h> 10 - #include <linux/kernel.h> 11 - #include <linux/module.h> 12 - #include <linux/mod_devicetable.h> 13 - #include <linux/platform_device.h> 14 - #include <linux/pm_runtime.h> 15 - #include "mtk-platform.h" 16 - 17 - #define MTK_BURST_SIZE_MSK GENMASK(7, 4) 18 - #define MTK_BURST_SIZE(x) ((x) << 4) 19 - #define MTK_DESC_SIZE(x) ((x) << 0) 20 - #define MTK_DESC_OFFSET(x) ((x) << 16) 21 - #define MTK_DESC_FETCH_SIZE(x) ((x) << 0) 22 - #define MTK_DESC_FETCH_THRESH(x) ((x) << 16) 23 - #define MTK_DESC_OVL_IRQ_EN BIT(25) 24 - #define MTK_DESC_ATP_PRESENT BIT(30) 25 - 26 - #define MTK_DFSE_IDLE GENMASK(3, 0) 27 - #define MTK_DFSE_THR_CTRL_EN BIT(30) 28 - #define MTK_DFSE_THR_CTRL_RESET BIT(31) 29 - #define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0)) 30 - #define MTK_DFSE_MIN_DATA(x) ((x) << 0) 31 - #define MTK_DFSE_MAX_DATA(x) ((x) << 8) 32 - #define MTK_DFE_MIN_CTRL(x) ((x) << 16) 33 - #define MTK_DFE_MAX_CTRL(x) ((x) << 24) 34 - 35 - #define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8) 36 - #define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12) 37 - #define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0) 38 - #define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4) 39 - #define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0)) 40 - #define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) 41 - #define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0)) 42 - #define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) 43 - #define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0)) 44 - 45 - #define MTK_PE_TK_LOC_AVL BIT(2) 46 - #define MTK_PE_PROC_HELD BIT(14) 47 - #define MTK_PE_TK_TIMEOUT_EN BIT(22) 48 - #define MTK_PE_INPUT_DMA_ERR BIT(0) 49 - #define MTK_PE_OUTPUT_DMA_ERR BIT(1) 50 - #define MTK_PE_PKT_PORC_ERR BIT(2) 51 - #define MTK_PE_PKT_TIMEOUT BIT(3) 52 - #define MTK_PE_FATAL_ERR BIT(14) 53 - #define MTK_PE_INPUT_DMA_ERR_EN BIT(16) 54 - #define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17) 55 - #define MTK_PE_PKT_PORC_ERR_EN BIT(18) 56 - #define MTK_PE_PKT_TIMEOUT_EN BIT(19) 57 - #define MTK_PE_FATAL_ERR_EN BIT(30) 58 - #define MTK_PE_INT_OUT_EN BIT(31) 59 - 60 - #define MTK_HIA_SIGNATURE ((u16)0x35ca) 61 - #define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0)) 62 - #define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0)) 63 - #define MTK_CDR_STAT_CLR GENMASK(4, 0) 64 - #define MTK_RDR_STAT_CLR GENMASK(7, 0) 65 - 66 - #define MTK_AIC_INT_MSK GENMASK(5, 0) 67 - #define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20)) 68 - #define MTK_AIC_VER11 0x011036c9 69 - #define MTK_AIC_VER12 0x012036c9 70 - #define MTK_AIC_G_CLR GENMASK(30, 20) 71 - 72 - /** 73 - * EIP97 is an integrated security subsystem to accelerate cryptographic 74 - * functions and protocols to offload the host processor. 75 - * Some important hardware modules are briefly introduced below: 76 - * 77 - * Host Interface Adapter(HIA) - the main interface between the host 78 - * system and the hardware subsystem. It is responsible for attaching 79 - * processing engine to the specific host bus interface and provides a 80 - * standardized software view for off loading tasks to the engine. 81 - * 82 - * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many 83 - * CD the host has prepared in the CDR. It monitors the fill level of its 84 - * CD-FIFO and if there's sufficient space for the next block of descriptors, 85 - * then it fires off a DMA request to fetch a block of CDs. 86 - * 87 - * Data fetch engine(DFE) - It is responsible for parsing the CD and 88 - * setting up the required control and packet data DMA transfers from 89 - * system memory to the processing engine. 90 - * 91 - * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager, 92 - * but target is result descriptors, Moreover, it also handles the RD 93 - * updates under control of the DSE. For each packet data segment 94 - * processed, the DSE triggers the RDR Manager to write the updated RD. 95 - * If triggered to update, the RDR Manager sets up a DMA operation to 96 - * copy the RD from the DSE to the correct location in the RDR. 97 - * 98 - * Data Store Engine(DSE) - It is responsible for parsing the prepared RD 99 - * and setting up the required control and packet data DMA transfers from 100 - * the processing engine to system memory. 101 - * 102 - * Advanced Interrupt Controllers(AICs) - receive interrupt request signals 103 - * from various sources and combine them into one interrupt output. 104 - * The AICs are used by: 105 - * - One for the HIA global and processing engine interrupts. 106 - * - The others for the descriptor ring interrupts. 107 - */ 108 - 109 - /* Cryptographic engine capabilities */ 110 - struct mtk_sys_cap { 111 - /* host interface adapter */ 112 - u32 hia_ver; 113 - u32 hia_opt; 114 - /* packet engine */ 115 - u32 pkt_eng_opt; 116 - /* global hardware */ 117 - u32 hw_opt; 118 - }; 119 - 120 - static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask) 121 - { 122 - /* Assign rings to DFE/DSE thread and enable it */ 123 - writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL); 124 - writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL); 125 - } 126 - 127 - static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp, 128 - struct mtk_sys_cap *cap) 129 - { 130 - u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2; 131 - u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1; 132 - u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len); 133 - u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len); 134 - u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len); 135 - 136 - writel(MTK_DFSE_MIN_DATA(ipbuf - 1) | 137 - MTK_DFSE_MAX_DATA(ipbuf) | 138 - MTK_DFE_MIN_CTRL(itbuf - 1) | 139 - MTK_DFE_MAX_CTRL(itbuf), 140 - cryp->base + DFE_CFG); 141 - 142 - writel(MTK_DFSE_MIN_DATA(opbuf - 1) | 143 - MTK_DFSE_MAX_DATA(opbuf), 144 - cryp->base + DSE_CFG); 145 - 146 - writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) | 147 - MTK_IN_BUF_MAX_THRESH(ipbuf), 148 - cryp->base + PE_IN_DBUF_THRESH); 149 - 150 - writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) | 151 - MTK_IN_BUF_MAX_THRESH(itbuf), 152 - cryp->base + PE_IN_TBUF_THRESH); 153 - 154 - writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) | 155 - MTK_OUT_BUF_MAX_THRESH(opbuf), 156 - cryp->base + PE_OUT_DBUF_THRESH); 157 - 158 - writel(0, cryp->base + PE_OUT_TBUF_THRESH); 159 - writel(0, cryp->base + PE_OUT_BUF_CTRL); 160 - } 161 - 162 - static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp) 163 - { 164 - int ret = -EINVAL; 165 - u32 val; 166 - 167 - /* Check for completion of all DMA transfers */ 168 - val = readl(cryp->base + DFE_THR_STAT); 169 - if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) { 170 - val = readl(cryp->base + DSE_THR_STAT); 171 - if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) 172 - ret = 0; 173 - } 174 - 175 - if (!ret) { 176 - /* Take DFE/DSE thread out of reset */ 177 - writel(0, cryp->base + DFE_THR_CTRL); 178 - writel(0, cryp->base + DSE_THR_CTRL); 179 - } else { 180 - return -EBUSY; 181 - } 182 - 183 - return 0; 184 - } 185 - 186 - static int mtk_dfe_dse_reset(struct mtk_cryp *cryp) 187 - { 188 - /* Reset DSE/DFE and correct system priorities for all rings. */ 189 - writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL); 190 - writel(0, cryp->base + DFE_PRIO_0); 191 - writel(0, cryp->base + DFE_PRIO_1); 192 - writel(0, cryp->base + DFE_PRIO_2); 193 - writel(0, cryp->base + DFE_PRIO_3); 194 - 195 - writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL); 196 - writel(0, cryp->base + DSE_PRIO_0); 197 - writel(0, cryp->base + DSE_PRIO_1); 198 - writel(0, cryp->base + DSE_PRIO_2); 199 - writel(0, cryp->base + DSE_PRIO_3); 200 - 201 - return mtk_dfe_dse_state_check(cryp); 202 - } 203 - 204 - static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp, 205 - int i, struct mtk_sys_cap *cap) 206 - { 207 - /* Full descriptor that fits FIFO minus one */ 208 - u32 count = 209 - ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1; 210 - 211 - /* Temporarily disable external triggering */ 212 - writel(0, cryp->base + CDR_CFG(i)); 213 - 214 - /* Clear CDR count */ 215 - writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i)); 216 - writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i)); 217 - 218 - writel(0, cryp->base + CDR_PREP_PNTR(i)); 219 - writel(0, cryp->base + CDR_PROC_PNTR(i)); 220 - writel(0, cryp->base + CDR_DMA_CFG(i)); 221 - 222 - /* Configure CDR host address space */ 223 - writel(0, cryp->base + CDR_BASE_ADDR_HI(i)); 224 - writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i)); 225 - 226 - writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i)); 227 - 228 - /* Clear and disable all CDR interrupts */ 229 - writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i)); 230 - 231 - /* 232 - * Set command descriptor offset and enable additional 233 - * token present in descriptor. 234 - */ 235 - writel(MTK_DESC_SIZE(MTK_DESC_SZ) | 236 - MTK_DESC_OFFSET(MTK_DESC_OFF) | 237 - MTK_DESC_ATP_PRESENT, 238 - cryp->base + CDR_DESC_SIZE(i)); 239 - 240 - writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | 241 - MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ), 242 - cryp->base + CDR_CFG(i)); 243 - } 244 - 245 - static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp, 246 - int i, struct mtk_sys_cap *cap) 247 - { 248 - u32 rndup = 2; 249 - u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1; 250 - 251 - /* Temporarily disable external triggering */ 252 - writel(0, cryp->base + RDR_CFG(i)); 253 - 254 - /* Clear RDR count */ 255 - writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i)); 256 - writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i)); 257 - 258 - writel(0, cryp->base + RDR_PREP_PNTR(i)); 259 - writel(0, cryp->base + RDR_PROC_PNTR(i)); 260 - writel(0, cryp->base + RDR_DMA_CFG(i)); 261 - 262 - /* Configure RDR host address space */ 263 - writel(0, cryp->base + RDR_BASE_ADDR_HI(i)); 264 - writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i)); 265 - 266 - writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i)); 267 - writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i)); 268 - 269 - /* 270 - * RDR manager generates update interrupts on a per-completed-packet, 271 - * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count 272 - * for the RDR exceeds the number of packets. 273 - */ 274 - writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE, 275 - cryp->base + RDR_THRESH(i)); 276 - 277 - /* 278 - * Configure a threshold and time-out value for the processed 279 - * result descriptors (or complete packets) that are written to 280 - * the RDR. 281 - */ 282 - writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF), 283 - cryp->base + RDR_DESC_SIZE(i)); 284 - 285 - /* 286 - * Configure HIA fetch size and fetch threshold that are used to 287 - * fetch blocks of multiple descriptors. 288 - */ 289 - writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | 290 - MTK_DESC_FETCH_THRESH(count * rndup) | 291 - MTK_DESC_OVL_IRQ_EN, 292 - cryp->base + RDR_CFG(i)); 293 - } 294 - 295 - static int mtk_packet_engine_setup(struct mtk_cryp *cryp) 296 - { 297 - struct mtk_sys_cap cap; 298 - int i, err; 299 - u32 val; 300 - 301 - cap.hia_ver = readl(cryp->base + HIA_VERSION); 302 - cap.hia_opt = readl(cryp->base + HIA_OPTIONS); 303 - cap.hw_opt = readl(cryp->base + EIP97_OPTIONS); 304 - 305 - if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE)) 306 - return -EINVAL; 307 - 308 - /* Configure endianness conversion method for master (DMA) interface */ 309 - writel(0, cryp->base + EIP97_MST_CTRL); 310 - 311 - /* Set HIA burst size */ 312 - val = readl(cryp->base + HIA_MST_CTRL); 313 - val &= ~MTK_BURST_SIZE_MSK; 314 - val |= MTK_BURST_SIZE(5); 315 - writel(val, cryp->base + HIA_MST_CTRL); 316 - 317 - err = mtk_dfe_dse_reset(cryp); 318 - if (err) { 319 - dev_err(cryp->dev, "Failed to reset DFE and DSE.\n"); 320 - return err; 321 - } 322 - 323 - mtk_dfe_dse_buf_setup(cryp, &cap); 324 - 325 - /* Enable the 4 rings for the packet engines. */ 326 - mtk_desc_ring_link(cryp, 0xf); 327 - 328 - for (i = 0; i < MTK_RING_MAX; i++) { 329 - mtk_cmd_desc_ring_setup(cryp, i, &cap); 330 - mtk_res_desc_ring_setup(cryp, i, &cap); 331 - } 332 - 333 - writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN, 334 - cryp->base + PE_TOKEN_CTRL_STAT); 335 - 336 - /* Clear all pending interrupts */ 337 - writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK); 338 - writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR | 339 - MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT | 340 - MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN | 341 - MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN | 342 - MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN | 343 - MTK_PE_INT_OUT_EN, 344 - cryp->base + PE_INTERRUPT_CTRL_STAT); 345 - 346 - return 0; 347 - } 348 - 349 - static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw) 350 - { 351 - u32 val; 352 - 353 - if (hw == MTK_RING_MAX) 354 - val = readl(cryp->base + AIC_G_VERSION); 355 - else 356 - val = readl(cryp->base + AIC_VERSION(hw)); 357 - 358 - val &= MTK_AIC_VER_MSK; 359 - if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12) 360 - return -ENXIO; 361 - 362 - if (hw == MTK_RING_MAX) 363 - val = readl(cryp->base + AIC_G_OPTIONS); 364 - else 365 - val = readl(cryp->base + AIC_OPTIONS(hw)); 366 - 367 - val &= MTK_AIC_INT_MSK; 368 - if (!val || val > 32) 369 - return -ENXIO; 370 - 371 - return 0; 372 - } 373 - 374 - static int mtk_aic_init(struct mtk_cryp *cryp, int hw) 375 - { 376 - int err; 377 - 378 - err = mtk_aic_cap_check(cryp, hw); 379 - if (err) 380 - return err; 381 - 382 - /* Disable all interrupts and set initial configuration */ 383 - if (hw == MTK_RING_MAX) { 384 - writel(0, cryp->base + AIC_G_ENABLE_CTRL); 385 - writel(0, cryp->base + AIC_G_POL_CTRL); 386 - writel(0, cryp->base + AIC_G_TYPE_CTRL); 387 - writel(0, cryp->base + AIC_G_ENABLE_SET); 388 - } else { 389 - writel(0, cryp->base + AIC_ENABLE_CTRL(hw)); 390 - writel(0, cryp->base + AIC_POL_CTRL(hw)); 391 - writel(0, cryp->base + AIC_TYPE_CTRL(hw)); 392 - writel(0, cryp->base + AIC_ENABLE_SET(hw)); 393 - } 394 - 395 - return 0; 396 - } 397 - 398 - static int mtk_accelerator_init(struct mtk_cryp *cryp) 399 - { 400 - int i, err; 401 - 402 - /* Initialize advanced interrupt controller(AIC) */ 403 - for (i = 0; i < MTK_IRQ_NUM; i++) { 404 - err = mtk_aic_init(cryp, i); 405 - if (err) { 406 - dev_err(cryp->dev, "Failed to initialize AIC.\n"); 407 - return err; 408 - } 409 - } 410 - 411 - /* Initialize packet engine */ 412 - err = mtk_packet_engine_setup(cryp); 413 - if (err) { 414 - dev_err(cryp->dev, "Failed to configure packet engine.\n"); 415 - return err; 416 - } 417 - 418 - return 0; 419 - } 420 - 421 - static void mtk_desc_dma_free(struct mtk_cryp *cryp) 422 - { 423 - int i; 424 - 425 - for (i = 0; i < MTK_RING_MAX; i++) { 426 - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 427 - cryp->ring[i]->res_base, 428 - cryp->ring[i]->res_dma); 429 - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 430 - cryp->ring[i]->cmd_base, 431 - cryp->ring[i]->cmd_dma); 432 - kfree(cryp->ring[i]); 433 - } 434 - } 435 - 436 - static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) 437 - { 438 - struct mtk_ring **ring = cryp->ring; 439 - int i; 440 - 441 - for (i = 0; i < MTK_RING_MAX; i++) { 442 - ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); 443 - if (!ring[i]) 444 - goto err_cleanup; 445 - 446 - ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, 447 - MTK_DESC_RING_SZ, 448 - &ring[i]->cmd_dma, 449 - GFP_KERNEL); 450 - if (!ring[i]->cmd_base) 451 - goto err_cleanup; 452 - 453 - ring[i]->res_base = dma_alloc_coherent(cryp->dev, 454 - MTK_DESC_RING_SZ, 455 - &ring[i]->res_dma, 456 - GFP_KERNEL); 457 - if (!ring[i]->res_base) 458 - goto err_cleanup; 459 - 460 - ring[i]->cmd_next = ring[i]->cmd_base; 461 - ring[i]->res_next = ring[i]->res_base; 462 - } 463 - return 0; 464 - 465 - err_cleanup: 466 - do { 467 - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 468 - ring[i]->res_base, ring[i]->res_dma); 469 - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, 470 - ring[i]->cmd_base, ring[i]->cmd_dma); 471 - kfree(ring[i]); 472 - } while (i--); 473 - return -ENOMEM; 474 - } 475 - 476 - static int mtk_crypto_probe(struct platform_device *pdev) 477 - { 478 - struct mtk_cryp *cryp; 479 - int i, err; 480 - 481 - cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); 482 - if (!cryp) 483 - return -ENOMEM; 484 - 485 - cryp->base = devm_platform_ioremap_resource(pdev, 0); 486 - if (IS_ERR(cryp->base)) 487 - return PTR_ERR(cryp->base); 488 - 489 - for (i = 0; i < MTK_IRQ_NUM; i++) { 490 - cryp->irq[i] = platform_get_irq(pdev, i); 491 - if (cryp->irq[i] < 0) 492 - return cryp->irq[i]; 493 - } 494 - 495 - cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); 496 - if (IS_ERR(cryp->clk_cryp)) 497 - return -EPROBE_DEFER; 498 - 499 - cryp->dev = &pdev->dev; 500 - pm_runtime_enable(cryp->dev); 501 - pm_runtime_get_sync(cryp->dev); 502 - 503 - err = clk_prepare_enable(cryp->clk_cryp); 504 - if (err) 505 - goto err_clk_cryp; 506 - 507 - /* Allocate four command/result descriptor rings */ 508 - err = mtk_desc_ring_alloc(cryp); 509 - if (err) { 510 - dev_err(cryp->dev, "Unable to allocate descriptor rings.\n"); 511 - goto err_resource; 512 - } 513 - 514 - /* Initialize hardware modules */ 515 - err = mtk_accelerator_init(cryp); 516 - if (err) { 517 - dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n"); 518 - goto err_engine; 519 - } 520 - 521 - err = mtk_cipher_alg_register(cryp); 522 - if (err) { 523 - dev_err(cryp->dev, "Unable to register cipher algorithm.\n"); 524 - goto err_cipher; 525 - } 526 - 527 - err = mtk_hash_alg_register(cryp); 528 - if (err) { 529 - dev_err(cryp->dev, "Unable to register hash algorithm.\n"); 530 - goto err_hash; 531 - } 532 - 533 - platform_set_drvdata(pdev, cryp); 534 - return 0; 535 - 536 - err_hash: 537 - mtk_cipher_alg_release(cryp); 538 - err_cipher: 539 - mtk_dfe_dse_reset(cryp); 540 - err_engine: 541 - mtk_desc_dma_free(cryp); 542 - err_resource: 543 - clk_disable_unprepare(cryp->clk_cryp); 544 - err_clk_cryp: 545 - pm_runtime_put_sync(cryp->dev); 546 - pm_runtime_disable(cryp->dev); 547 - 548 - return err; 549 - } 550 - 551 - static int mtk_crypto_remove(struct platform_device *pdev) 552 - { 553 - struct mtk_cryp *cryp = platform_get_drvdata(pdev); 554 - 555 - mtk_hash_alg_release(cryp); 556 - mtk_cipher_alg_release(cryp); 557 - mtk_desc_dma_free(cryp); 558 - 559 - clk_disable_unprepare(cryp->clk_cryp); 560 - 561 - pm_runtime_put_sync(cryp->dev); 562 - pm_runtime_disable(cryp->dev); 563 - platform_set_drvdata(pdev, NULL); 564 - 565 - return 0; 566 - } 567 - 568 - static const struct of_device_id of_crypto_id[] = { 569 - { .compatible = "mediatek,eip97-crypto" }, 570 - {}, 571 - }; 572 - MODULE_DEVICE_TABLE(of, of_crypto_id); 573 - 574 - static struct platform_driver mtk_crypto_driver = { 575 - .probe = mtk_crypto_probe, 576 - .remove = mtk_crypto_remove, 577 - .driver = { 578 - .name = "mtk-crypto", 579 - .of_match_table = of_crypto_id, 580 - }, 581 - }; 582 - module_platform_driver(mtk_crypto_driver); 583 - 584 - MODULE_LICENSE("GPL"); 585 - MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>"); 586 - MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");
-231
drivers/crypto/mediatek/mtk-platform.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Driver for EIP97 cryptographic accelerator. 4 - * 5 - * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 6 - */ 7 - 8 - #ifndef __MTK_PLATFORM_H_ 9 - #define __MTK_PLATFORM_H_ 10 - 11 - #include <crypto/algapi.h> 12 - #include <crypto/internal/aead.h> 13 - #include <crypto/internal/hash.h> 14 - #include <crypto/scatterwalk.h> 15 - #include <crypto/skcipher.h> 16 - #include <linux/crypto.h> 17 - #include <linux/dma-mapping.h> 18 - #include <linux/interrupt.h> 19 - #include <linux/scatterlist.h> 20 - #include "mtk-regs.h" 21 - 22 - #define MTK_RDR_PROC_THRESH BIT(0) 23 - #define MTK_RDR_PROC_MODE BIT(23) 24 - #define MTK_CNT_RST BIT(31) 25 - #define MTK_IRQ_RDR0 BIT(1) 26 - #define MTK_IRQ_RDR1 BIT(3) 27 - #define MTK_IRQ_RDR2 BIT(5) 28 - #define MTK_IRQ_RDR3 BIT(7) 29 - 30 - #define SIZE_IN_WORDS(x) ((x) >> 2) 31 - 32 - /** 33 - * Ring 0/1 are used by AES encrypt and decrypt. 34 - * Ring 2/3 are used by SHA. 35 - */ 36 - enum { 37 - MTK_RING0, 38 - MTK_RING1, 39 - MTK_RING2, 40 - MTK_RING3, 41 - MTK_RING_MAX 42 - }; 43 - 44 - #define MTK_REC_NUM (MTK_RING_MAX / 2) 45 - #define MTK_IRQ_NUM 5 46 - 47 - /** 48 - * struct mtk_desc - DMA descriptor 49 - * @hdr: the descriptor control header 50 - * @buf: DMA address of input buffer segment 51 - * @ct: DMA address of command token that control operation flow 52 - * @ct_hdr: the command token control header 53 - * @tag: the user-defined field 54 - * @tfm: DMA address of transform state 55 - * @bound: align descriptors offset boundary 56 - * 57 - * Structure passed to the crypto engine to describe where source 58 - * data needs to be fetched and how it needs to be processed. 59 - */ 60 - struct mtk_desc { 61 - __le32 hdr; 62 - __le32 buf; 63 - __le32 ct; 64 - __le32 ct_hdr; 65 - __le32 tag; 66 - __le32 tfm; 67 - __le32 bound[2]; 68 - }; 69 - 70 - #define MTK_DESC_NUM 512 71 - #define MTK_DESC_OFF SIZE_IN_WORDS(sizeof(struct mtk_desc)) 72 - #define MTK_DESC_SZ (MTK_DESC_OFF - 2) 73 - #define MTK_DESC_RING_SZ ((sizeof(struct mtk_desc) * MTK_DESC_NUM)) 74 - #define MTK_DESC_CNT(x) ((MTK_DESC_OFF * (x)) << 2) 75 - #define MTK_DESC_LAST cpu_to_le32(BIT(22)) 76 - #define MTK_DESC_FIRST cpu_to_le32(BIT(23)) 77 - #define MTK_DESC_BUF_LEN(x) cpu_to_le32(x) 78 - #define MTK_DESC_CT_LEN(x) cpu_to_le32((x) << 24) 79 - 80 - /** 81 - * struct mtk_ring - Descriptor ring 82 - * @cmd_base: pointer to command descriptor ring base 83 - * @cmd_next: pointer to the next command descriptor 84 - * @cmd_dma: DMA address of command descriptor ring 85 - * @res_base: pointer to result descriptor ring base 86 - * @res_next: pointer to the next result descriptor 87 - * @res_prev: pointer to the previous result descriptor 88 - * @res_dma: DMA address of result descriptor ring 89 - * 90 - * A descriptor ring is a circular buffer that is used to manage 91 - * one or more descriptors. There are two type of descriptor rings; 92 - * the command descriptor ring and result descriptor ring. 93 - */ 94 - struct mtk_ring { 95 - struct mtk_desc *cmd_base; 96 - struct mtk_desc *cmd_next; 97 - dma_addr_t cmd_dma; 98 - struct mtk_desc *res_base; 99 - struct mtk_desc *res_next; 100 - struct mtk_desc *res_prev; 101 - dma_addr_t res_dma; 102 - }; 103 - 104 - /** 105 - * struct mtk_aes_dma - Structure that holds sg list info 106 - * @sg: pointer to scatter-gather list 107 - * @nents: number of entries in the sg list 108 - * @remainder: remainder of sg list 109 - * @sg_len: number of entries in the sg mapped list 110 - */ 111 - struct mtk_aes_dma { 112 - struct scatterlist *sg; 113 - int nents; 114 - u32 remainder; 115 - u32 sg_len; 116 - }; 117 - 118 - struct mtk_aes_base_ctx; 119 - struct mtk_aes_rec; 120 - struct mtk_cryp; 121 - 122 - typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes); 123 - 124 - /** 125 - * struct mtk_aes_rec - AES operation record 126 - * @cryp: pointer to Cryptographic device 127 - * @queue: crypto request queue 128 - * @areq: pointer to async request 129 - * @done_task: the tasklet is use in AES interrupt 130 - * @queue_task: the tasklet is used to dequeue request 131 - * @ctx: pointer to current context 132 - * @src: the structure that holds source sg list info 133 - * @dst: the structure that holds destination sg list info 134 - * @aligned_sg: the scatter list is use to alignment 135 - * @real_dst: pointer to the destination sg list 136 - * @resume: pointer to resume function 137 - * @total: request buffer length 138 - * @buf: pointer to page buffer 139 - * @id: the current use of ring 140 - * @flags: it's describing AES operation state 141 - * @lock: the async queue lock 142 - * 143 - * Structure used to record AES execution state. 144 - */ 145 - struct mtk_aes_rec { 146 - struct mtk_cryp *cryp; 147 - struct crypto_queue queue; 148 - struct crypto_async_request *areq; 149 - struct tasklet_struct done_task; 150 - struct tasklet_struct queue_task; 151 - struct mtk_aes_base_ctx *ctx; 152 - struct mtk_aes_dma src; 153 - struct mtk_aes_dma dst; 154 - 155 - struct scatterlist aligned_sg; 156 - struct scatterlist *real_dst; 157 - 158 - mtk_aes_fn resume; 159 - 160 - size_t total; 161 - void *buf; 162 - 163 - u8 id; 164 - unsigned long flags; 165 - /* queue lock */ 166 - spinlock_t lock; 167 - }; 168 - 169 - /** 170 - * struct mtk_sha_rec - SHA operation record 171 - * @cryp: pointer to Cryptographic device 172 - * @queue: crypto request queue 173 - * @req: pointer to ahash request 174 - * @done_task: the tasklet is use in SHA interrupt 175 - * @queue_task: the tasklet is used to dequeue request 176 - * @id: the current use of ring 177 - * @flags: it's describing SHA operation state 178 - * @lock: the async queue lock 179 - * 180 - * Structure used to record SHA execution state. 181 - */ 182 - struct mtk_sha_rec { 183 - struct mtk_cryp *cryp; 184 - struct crypto_queue queue; 185 - struct ahash_request *req; 186 - struct tasklet_struct done_task; 187 - struct tasklet_struct queue_task; 188 - 189 - u8 id; 190 - unsigned long flags; 191 - /* queue lock */ 192 - spinlock_t lock; 193 - }; 194 - 195 - /** 196 - * struct mtk_cryp - Cryptographic device 197 - * @base: pointer to mapped register I/O base 198 - * @dev: pointer to device 199 - * @clk_cryp: pointer to crypto clock 200 - * @irq: global system and rings IRQ 201 - * @ring: pointer to descriptor rings 202 - * @aes: pointer to operation record of AES 203 - * @sha: pointer to operation record of SHA 204 - * @aes_list: device list of AES 205 - * @sha_list: device list of SHA 206 - * @rec: it's used to select SHA record for tfm 207 - * 208 - * Structure storing cryptographic device information. 209 - */ 210 - struct mtk_cryp { 211 - void __iomem *base; 212 - struct device *dev; 213 - struct clk *clk_cryp; 214 - int irq[MTK_IRQ_NUM]; 215 - 216 - struct mtk_ring *ring[MTK_RING_MAX]; 217 - struct mtk_aes_rec *aes[MTK_REC_NUM]; 218 - struct mtk_sha_rec *sha[MTK_REC_NUM]; 219 - 220 - struct list_head aes_list; 221 - struct list_head sha_list; 222 - 223 - bool rec; 224 - }; 225 - 226 - int mtk_cipher_alg_register(struct mtk_cryp *cryp); 227 - void mtk_cipher_alg_release(struct mtk_cryp *cryp); 228 - int mtk_hash_alg_register(struct mtk_cryp *cryp); 229 - void mtk_hash_alg_release(struct mtk_cryp *cryp); 230 - 231 - #endif /* __MTK_PLATFORM_H_ */
-190
drivers/crypto/mediatek/mtk-regs.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Support for MediaTek cryptographic accelerator. 4 - * 5 - * Copyright (c) 2016 MediaTek Inc. 6 - * Author: Ryder Lee <ryder.lee@mediatek.com> 7 - */ 8 - 9 - #ifndef __MTK_REGS_H__ 10 - #define __MTK_REGS_H__ 11 - 12 - /* HIA, Command Descriptor Ring Manager */ 13 - #define CDR_BASE_ADDR_LO(x) (0x0 + ((x) << 12)) 14 - #define CDR_BASE_ADDR_HI(x) (0x4 + ((x) << 12)) 15 - #define CDR_DATA_BASE_ADDR_LO(x) (0x8 + ((x) << 12)) 16 - #define CDR_DATA_BASE_ADDR_HI(x) (0xC + ((x) << 12)) 17 - #define CDR_ACD_BASE_ADDR_LO(x) (0x10 + ((x) << 12)) 18 - #define CDR_ACD_BASE_ADDR_HI(x) (0x14 + ((x) << 12)) 19 - #define CDR_RING_SIZE(x) (0x18 + ((x) << 12)) 20 - #define CDR_DESC_SIZE(x) (0x1C + ((x) << 12)) 21 - #define CDR_CFG(x) (0x20 + ((x) << 12)) 22 - #define CDR_DMA_CFG(x) (0x24 + ((x) << 12)) 23 - #define CDR_THRESH(x) (0x28 + ((x) << 12)) 24 - #define CDR_PREP_COUNT(x) (0x2C + ((x) << 12)) 25 - #define CDR_PROC_COUNT(x) (0x30 + ((x) << 12)) 26 - #define CDR_PREP_PNTR(x) (0x34 + ((x) << 12)) 27 - #define CDR_PROC_PNTR(x) (0x38 + ((x) << 12)) 28 - #define CDR_STAT(x) (0x3C + ((x) << 12)) 29 - 30 - /* HIA, Result Descriptor Ring Manager */ 31 - #define RDR_BASE_ADDR_LO(x) (0x800 + ((x) << 12)) 32 - #define RDR_BASE_ADDR_HI(x) (0x804 + ((x) << 12)) 33 - #define RDR_DATA_BASE_ADDR_LO(x) (0x808 + ((x) << 12)) 34 - #define RDR_DATA_BASE_ADDR_HI(x) (0x80C + ((x) << 12)) 35 - #define RDR_ACD_BASE_ADDR_LO(x) (0x810 + ((x) << 12)) 36 - #define RDR_ACD_BASE_ADDR_HI(x) (0x814 + ((x) << 12)) 37 - #define RDR_RING_SIZE(x) (0x818 + ((x) << 12)) 38 - #define RDR_DESC_SIZE(x) (0x81C + ((x) << 12)) 39 - #define RDR_CFG(x) (0x820 + ((x) << 12)) 40 - #define RDR_DMA_CFG(x) (0x824 + ((x) << 12)) 41 - #define RDR_THRESH(x) (0x828 + ((x) << 12)) 42 - #define RDR_PREP_COUNT(x) (0x82C + ((x) << 12)) 43 - #define RDR_PROC_COUNT(x) (0x830 + ((x) << 12)) 44 - #define RDR_PREP_PNTR(x) (0x834 + ((x) << 12)) 45 - #define RDR_PROC_PNTR(x) (0x838 + ((x) << 12)) 46 - #define RDR_STAT(x) (0x83C + ((x) << 12)) 47 - 48 - /* HIA, Ring AIC */ 49 - #define AIC_POL_CTRL(x) (0xE000 - ((x) << 12)) 50 - #define AIC_TYPE_CTRL(x) (0xE004 - ((x) << 12)) 51 - #define AIC_ENABLE_CTRL(x) (0xE008 - ((x) << 12)) 52 - #define AIC_RAW_STAL(x) (0xE00C - ((x) << 12)) 53 - #define AIC_ENABLE_SET(x) (0xE00C - ((x) << 12)) 54 - #define AIC_ENABLED_STAT(x) (0xE010 - ((x) << 12)) 55 - #define AIC_ACK(x) (0xE010 - ((x) << 12)) 56 - #define AIC_ENABLE_CLR(x) (0xE014 - ((x) << 12)) 57 - #define AIC_OPTIONS(x) (0xE018 - ((x) << 12)) 58 - #define AIC_VERSION(x) (0xE01C - ((x) << 12)) 59 - 60 - /* HIA, Global AIC */ 61 - #define AIC_G_POL_CTRL 0xF800 62 - #define AIC_G_TYPE_CTRL 0xF804 63 - #define AIC_G_ENABLE_CTRL 0xF808 64 - #define AIC_G_RAW_STAT 0xF80C 65 - #define AIC_G_ENABLE_SET 0xF80C 66 - #define AIC_G_ENABLED_STAT 0xF810 67 - #define AIC_G_ACK 0xF810 68 - #define AIC_G_ENABLE_CLR 0xF814 69 - #define AIC_G_OPTIONS 0xF818 70 - #define AIC_G_VERSION 0xF81C 71 - 72 - /* HIA, Data Fetch Engine */ 73 - #define DFE_CFG 0xF000 74 - #define DFE_PRIO_0 0xF010 75 - #define DFE_PRIO_1 0xF014 76 - #define DFE_PRIO_2 0xF018 77 - #define DFE_PRIO_3 0xF01C 78 - 79 - /* HIA, Data Fetch Engine access monitoring for CDR */ 80 - #define DFE_RING_REGION_LO(x) (0xF080 + ((x) << 3)) 81 - #define DFE_RING_REGION_HI(x) (0xF084 + ((x) << 3)) 82 - 83 - /* HIA, Data Fetch Engine thread control and status for thread */ 84 - #define DFE_THR_CTRL 0xF200 85 - #define DFE_THR_STAT 0xF204 86 - #define DFE_THR_DESC_CTRL 0xF208 87 - #define DFE_THR_DESC_DPTR_LO 0xF210 88 - #define DFE_THR_DESC_DPTR_HI 0xF214 89 - #define DFE_THR_DESC_ACDPTR_LO 0xF218 90 - #define DFE_THR_DESC_ACDPTR_HI 0xF21C 91 - 92 - /* HIA, Data Store Engine */ 93 - #define DSE_CFG 0xF400 94 - #define DSE_PRIO_0 0xF410 95 - #define DSE_PRIO_1 0xF414 96 - #define DSE_PRIO_2 0xF418 97 - #define DSE_PRIO_3 0xF41C 98 - 99 - /* HIA, Data Store Engine access monitoring for RDR */ 100 - #define DSE_RING_REGION_LO(x) (0xF480 + ((x) << 3)) 101 - #define DSE_RING_REGION_HI(x) (0xF484 + ((x) << 3)) 102 - 103 - /* HIA, Data Store Engine thread control and status for thread */ 104 - #define DSE_THR_CTRL 0xF600 105 - #define DSE_THR_STAT 0xF604 106 - #define DSE_THR_DESC_CTRL 0xF608 107 - #define DSE_THR_DESC_DPTR_LO 0xF610 108 - #define DSE_THR_DESC_DPTR_HI 0xF614 109 - #define DSE_THR_DESC_S_DPTR_LO 0xF618 110 - #define DSE_THR_DESC_S_DPTR_HI 0xF61C 111 - #define DSE_THR_ERROR_STAT 0xF620 112 - 113 - /* HIA Global */ 114 - #define HIA_MST_CTRL 0xFFF4 115 - #define HIA_OPTIONS 0xFFF8 116 - #define HIA_VERSION 0xFFFC 117 - 118 - /* Processing Engine Input Side, Processing Engine */ 119 - #define PE_IN_DBUF_THRESH 0x10000 120 - #define PE_IN_TBUF_THRESH 0x10100 121 - 122 - /* Packet Engine Configuration / Status Registers */ 123 - #define PE_TOKEN_CTRL_STAT 0x11000 124 - #define PE_FUNCTION_EN 0x11004 125 - #define PE_CONTEXT_CTRL 0x11008 126 - #define PE_INTERRUPT_CTRL_STAT 0x11010 127 - #define PE_CONTEXT_STAT 0x1100C 128 - #define PE_OUT_TRANS_CTRL_STAT 0x11018 129 - #define PE_OUT_BUF_CTRL 0x1101C 130 - 131 - /* Packet Engine PRNG Registers */ 132 - #define PE_PRNG_STAT 0x11040 133 - #define PE_PRNG_CTRL 0x11044 134 - #define PE_PRNG_SEED_L 0x11048 135 - #define PE_PRNG_SEED_H 0x1104C 136 - #define PE_PRNG_KEY_0_L 0x11050 137 - #define PE_PRNG_KEY_0_H 0x11054 138 - #define PE_PRNG_KEY_1_L 0x11058 139 - #define PE_PRNG_KEY_1_H 0x1105C 140 - #define PE_PRNG_RES_0 0x11060 141 - #define PE_PRNG_RES_1 0x11064 142 - #define PE_PRNG_RES_2 0x11068 143 - #define PE_PRNG_RES_3 0x1106C 144 - #define PE_PRNG_LFSR_L 0x11070 145 - #define PE_PRNG_LFSR_H 0x11074 146 - 147 - /* Packet Engine AIC */ 148 - #define PE_EIP96_AIC_POL_CTRL 0x113C0 149 - #define PE_EIP96_AIC_TYPE_CTRL 0x113C4 150 - #define PE_EIP96_AIC_ENABLE_CTRL 0x113C8 151 - #define PE_EIP96_AIC_RAW_STAT 0x113CC 152 - #define PE_EIP96_AIC_ENABLE_SET 0x113CC 153 - #define PE_EIP96_AIC_ENABLED_STAT 0x113D0 154 - #define PE_EIP96_AIC_ACK 0x113D0 155 - #define PE_EIP96_AIC_ENABLE_CLR 0x113D4 156 - #define PE_EIP96_AIC_OPTIONS 0x113D8 157 - #define PE_EIP96_AIC_VERSION 0x113DC 158 - 159 - /* Packet Engine Options & Version Registers */ 160 - #define PE_EIP96_OPTIONS 0x113F8 161 - #define PE_EIP96_VERSION 0x113FC 162 - 163 - /* Processing Engine Output Side */ 164 - #define PE_OUT_DBUF_THRESH 0x11C00 165 - #define PE_OUT_TBUF_THRESH 0x11D00 166 - 167 - /* Processing Engine Local AIC */ 168 - #define PE_AIC_POL_CTRL 0x11F00 169 - #define PE_AIC_TYPE_CTRL 0x11F04 170 - #define PE_AIC_ENABLE_CTRL 0x11F08 171 - #define PE_AIC_RAW_STAT 0x11F0C 172 - #define PE_AIC_ENABLE_SET 0x11F0C 173 - #define PE_AIC_ENABLED_STAT 0x11F10 174 - #define PE_AIC_ENABLE_CLR 0x11F14 175 - #define PE_AIC_OPTIONS 0x11F18 176 - #define PE_AIC_VERSION 0x11F1C 177 - 178 - /* Processing Engine General Configuration and Version */ 179 - #define PE_IN_FLIGHT 0x11FF0 180 - #define PE_OPTIONS 0x11FF8 181 - #define PE_VERSION 0x11FFC 182 - 183 - /* EIP-97 - Global */ 184 - #define EIP97_CLOCK_STATE 0x1FFE4 185 - #define EIP97_FORCE_CLOCK_ON 0x1FFE8 186 - #define EIP97_FORCE_CLOCK_OFF 0x1FFEC 187 - #define EIP97_MST_CTRL 0x1FFF4 188 - #define EIP97_OPTIONS 0x1FFF8 189 - #define EIP97_VERSION 0x1FFFC 190 - #endif /* __MTK_REGS_H__ */
-1353
drivers/crypto/mediatek/mtk-sha.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-only 2 - /* 3 - * Cryptographic API. 4 - * 5 - * Driver for EIP97 SHA1/SHA2(HMAC) acceleration. 6 - * 7 - * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 8 - * 9 - * Some ideas are from atmel-sha.c and omap-sham.c drivers. 10 - */ 11 - 12 - #include <crypto/hmac.h> 13 - #include <crypto/sha1.h> 14 - #include <crypto/sha2.h> 15 - #include "mtk-platform.h" 16 - 17 - #define SHA_ALIGN_MSK (sizeof(u32) - 1) 18 - #define SHA_QUEUE_SIZE 512 19 - #define SHA_BUF_SIZE ((u32)PAGE_SIZE) 20 - 21 - #define SHA_OP_UPDATE 1 22 - #define SHA_OP_FINAL 2 23 - 24 - #define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0)) 25 - #define SHA_MAX_DIGEST_BUF_SIZE 32 26 - 27 - /* SHA command token */ 28 - #define SHA_CT_SIZE 5 29 - #define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000) 30 - #define SHA_CMD0 cpu_to_le32(0x03020000) 31 - #define SHA_CMD1 cpu_to_le32(0x21060000) 32 - #define SHA_CMD2 cpu_to_le32(0xe0e63802) 33 - 34 - /* SHA transform information */ 35 - #define SHA_TFM_HASH cpu_to_le32(0x2 << 0) 36 - #define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8) 37 - #define SHA_TFM_START cpu_to_le32(0x1 << 4) 38 - #define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5) 39 - #define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19) 40 - #define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23) 41 - #define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23) 42 - #define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23) 43 - #define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23) 44 - #define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23) 45 - #define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24) 46 - 47 - /* SHA flags */ 48 - #define SHA_FLAGS_BUSY BIT(0) 49 - #define SHA_FLAGS_FINAL BIT(1) 50 - #define SHA_FLAGS_FINUP BIT(2) 51 - #define SHA_FLAGS_SG BIT(3) 52 - #define SHA_FLAGS_ALGO_MSK GENMASK(8, 4) 53 - #define SHA_FLAGS_SHA1 BIT(4) 54 - #define SHA_FLAGS_SHA224 BIT(5) 55 - #define SHA_FLAGS_SHA256 BIT(6) 56 - #define SHA_FLAGS_SHA384 BIT(7) 57 - #define SHA_FLAGS_SHA512 BIT(8) 58 - #define SHA_FLAGS_HMAC BIT(9) 59 - #define SHA_FLAGS_PAD BIT(10) 60 - 61 - /** 62 - * mtk_sha_info - hardware information of AES 63 - * @cmd: command token, hardware instruction 64 - * @tfm: transform state of cipher algorithm. 65 - * @state: contains keys and initial vectors. 66 - * 67 - */ 68 - struct mtk_sha_info { 69 - __le32 ctrl[2]; 70 - __le32 cmd[3]; 71 - __le32 tfm[2]; 72 - __le32 digest[SHA_MAX_DIGEST_BUF_SIZE]; 73 - }; 74 - 75 - struct mtk_sha_reqctx { 76 - struct mtk_sha_info info; 77 - unsigned long flags; 78 - unsigned long op; 79 - 80 - u64 digcnt; 81 - size_t bufcnt; 82 - dma_addr_t dma_addr; 83 - 84 - __le32 ct_hdr; 85 - u32 ct_size; 86 - dma_addr_t ct_dma; 87 - dma_addr_t tfm_dma; 88 - 89 - /* Walk state */ 90 - struct scatterlist *sg; 91 - u32 offset; /* Offset in current sg */ 92 - u32 total; /* Total request */ 93 - size_t ds; 94 - size_t bs; 95 - 96 - u8 *buffer; 97 - }; 98 - 99 - struct mtk_sha_hmac_ctx { 100 - struct crypto_shash *shash; 101 - u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 102 - u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 103 - }; 104 - 105 - struct mtk_sha_ctx { 106 - struct mtk_cryp *cryp; 107 - unsigned long flags; 108 - u8 id; 109 - u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32)); 110 - 111 - struct mtk_sha_hmac_ctx base[]; 112 - }; 113 - 114 - struct mtk_sha_drv { 115 - struct list_head dev_list; 116 - /* Device list lock */ 117 - spinlock_t lock; 118 - }; 119 - 120 - static struct mtk_sha_drv mtk_sha = { 121 - .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list), 122 - .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock), 123 - }; 124 - 125 - static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 126 - struct ahash_request *req); 127 - 128 - static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset) 129 - { 130 - return readl_relaxed(cryp->base + offset); 131 - } 132 - 133 - static inline void mtk_sha_write(struct mtk_cryp *cryp, 134 - u32 offset, u32 value) 135 - { 136 - writel_relaxed(value, cryp->base + offset); 137 - } 138 - 139 - static inline void mtk_sha_ring_shift(struct mtk_ring *ring, 140 - struct mtk_desc **cmd_curr, 141 - struct mtk_desc **res_curr, 142 - int *count) 143 - { 144 - *cmd_curr = ring->cmd_next++; 145 - *res_curr = ring->res_next++; 146 - (*count)++; 147 - 148 - if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) { 149 - ring->cmd_next = ring->cmd_base; 150 - ring->res_next = ring->res_base; 151 - } 152 - } 153 - 154 - static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx) 155 - { 156 - struct mtk_cryp *cryp = NULL; 157 - struct mtk_cryp *tmp; 158 - 159 - spin_lock_bh(&mtk_sha.lock); 160 - if (!tctx->cryp) { 161 - list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) { 162 - cryp = tmp; 163 - break; 164 - } 165 - tctx->cryp = cryp; 166 - } else { 167 - cryp = tctx->cryp; 168 - } 169 - 170 - /* 171 - * Assign record id to tfm in round-robin fashion, and this 172 - * will help tfm to bind to corresponding descriptor rings. 173 - */ 174 - tctx->id = cryp->rec; 175 - cryp->rec = !cryp->rec; 176 - 177 - spin_unlock_bh(&mtk_sha.lock); 178 - 179 - return cryp; 180 - } 181 - 182 - static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx) 183 - { 184 - size_t count; 185 - 186 - while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) { 187 - count = min(ctx->sg->length - ctx->offset, ctx->total); 188 - count = min(count, SHA_BUF_SIZE - ctx->bufcnt); 189 - 190 - if (count <= 0) { 191 - /* 192 - * Check if count <= 0 because the buffer is full or 193 - * because the sg length is 0. In the latest case, 194 - * check if there is another sg in the list, a 0 length 195 - * sg doesn't necessarily mean the end of the sg list. 196 - */ 197 - if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { 198 - ctx->sg = sg_next(ctx->sg); 199 - continue; 200 - } else { 201 - break; 202 - } 203 - } 204 - 205 - scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, 206 - ctx->offset, count, 0); 207 - 208 - ctx->bufcnt += count; 209 - ctx->offset += count; 210 - ctx->total -= count; 211 - 212 - if (ctx->offset == ctx->sg->length) { 213 - ctx->sg = sg_next(ctx->sg); 214 - if (ctx->sg) 215 - ctx->offset = 0; 216 - else 217 - ctx->total = 0; 218 - } 219 - } 220 - 221 - return 0; 222 - } 223 - 224 - /* 225 - * The purpose of this padding is to ensure that the padded message is a 226 - * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 227 - * The bit "1" is appended at the end of the message followed by 228 - * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 229 - * 128 bits block (SHA384/SHA512) equals to the message length in bits 230 - * is appended. 231 - * 232 - * For SHA1/SHA224/SHA256, padlen is calculated as followed: 233 - * - if message length < 56 bytes then padlen = 56 - message length 234 - * - else padlen = 64 + 56 - message length 235 - * 236 - * For SHA384/SHA512, padlen is calculated as followed: 237 - * - if message length < 112 bytes then padlen = 112 - message length 238 - * - else padlen = 128 + 112 - message length 239 - */ 240 - static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len) 241 - { 242 - u32 index, padlen; 243 - __be64 bits[2]; 244 - u64 size = ctx->digcnt; 245 - 246 - size += ctx->bufcnt; 247 - size += len; 248 - 249 - bits[1] = cpu_to_be64(size << 3); 250 - bits[0] = cpu_to_be64(size >> 61); 251 - 252 - switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 253 - case SHA_FLAGS_SHA384: 254 - case SHA_FLAGS_SHA512: 255 - index = ctx->bufcnt & 0x7f; 256 - padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); 257 - *(ctx->buffer + ctx->bufcnt) = 0x80; 258 - memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); 259 - memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 260 - ctx->bufcnt += padlen + 16; 261 - ctx->flags |= SHA_FLAGS_PAD; 262 - break; 263 - 264 - default: 265 - index = ctx->bufcnt & 0x3f; 266 - padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); 267 - *(ctx->buffer + ctx->bufcnt) = 0x80; 268 - memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); 269 - memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 270 - ctx->bufcnt += padlen + 8; 271 - ctx->flags |= SHA_FLAGS_PAD; 272 - break; 273 - } 274 - } 275 - 276 - /* Initialize basic transform information of SHA */ 277 - static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) 278 - { 279 - struct mtk_sha_info *info = &ctx->info; 280 - 281 - ctx->ct_hdr = SHA_CT_CTRL_HDR; 282 - ctx->ct_size = SHA_CT_SIZE; 283 - 284 - info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); 285 - 286 - switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 287 - case SHA_FLAGS_SHA1: 288 - info->tfm[0] |= SHA_TFM_SHA1; 289 - break; 290 - case SHA_FLAGS_SHA224: 291 - info->tfm[0] |= SHA_TFM_SHA224; 292 - break; 293 - case SHA_FLAGS_SHA256: 294 - info->tfm[0] |= SHA_TFM_SHA256; 295 - break; 296 - case SHA_FLAGS_SHA384: 297 - info->tfm[0] |= SHA_TFM_SHA384; 298 - break; 299 - case SHA_FLAGS_SHA512: 300 - info->tfm[0] |= SHA_TFM_SHA512; 301 - break; 302 - 303 - default: 304 - /* Should not happen... */ 305 - return; 306 - } 307 - 308 - info->tfm[1] = SHA_TFM_HASH_STORE; 309 - info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; 310 - info->ctrl[1] = info->tfm[1]; 311 - 312 - info->cmd[0] = SHA_CMD0; 313 - info->cmd[1] = SHA_CMD1; 314 - info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); 315 - } 316 - 317 - /* 318 - * Update input data length field of transform information and 319 - * map it to DMA region. 320 - */ 321 - static int mtk_sha_info_update(struct mtk_cryp *cryp, 322 - struct mtk_sha_rec *sha, 323 - size_t len1, size_t len2) 324 - { 325 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 326 - struct mtk_sha_info *info = &ctx->info; 327 - 328 - ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; 329 - ctx->ct_hdr |= cpu_to_le32(len1 + len2); 330 - info->cmd[0] &= ~SHA_DATA_LEN_MSK; 331 - info->cmd[0] |= cpu_to_le32(len1 + len2); 332 - 333 - /* Setting SHA_TFM_START only for the first iteration */ 334 - if (ctx->digcnt) 335 - info->ctrl[0] &= ~SHA_TFM_START; 336 - 337 - ctx->digcnt += len1; 338 - 339 - ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 340 - DMA_BIDIRECTIONAL); 341 - if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { 342 - dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); 343 - return -EINVAL; 344 - } 345 - 346 - ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); 347 - 348 - return 0; 349 - } 350 - 351 - /* 352 - * Because of hardware limitation, we must pre-calculate the inner 353 - * and outer digest that need to be processed firstly by engine, then 354 - * apply the result digest to the input message. These complex hashing 355 - * procedures limits HMAC performance, so we use fallback SW encoding. 356 - */ 357 - static int mtk_sha_finish_hmac(struct ahash_request *req) 358 - { 359 - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 360 - struct mtk_sha_hmac_ctx *bctx = tctx->base; 361 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 362 - 363 - SHASH_DESC_ON_STACK(shash, bctx->shash); 364 - 365 - shash->tfm = bctx->shash; 366 - 367 - return crypto_shash_init(shash) ?: 368 - crypto_shash_update(shash, bctx->opad, ctx->bs) ?: 369 - crypto_shash_finup(shash, req->result, ctx->ds, req->result); 370 - } 371 - 372 - /* Initialize request context */ 373 - static int mtk_sha_init(struct ahash_request *req) 374 - { 375 - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 376 - struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); 377 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 378 - 379 - ctx->flags = 0; 380 - ctx->ds = crypto_ahash_digestsize(tfm); 381 - 382 - switch (ctx->ds) { 383 - case SHA1_DIGEST_SIZE: 384 - ctx->flags |= SHA_FLAGS_SHA1; 385 - ctx->bs = SHA1_BLOCK_SIZE; 386 - break; 387 - case SHA224_DIGEST_SIZE: 388 - ctx->flags |= SHA_FLAGS_SHA224; 389 - ctx->bs = SHA224_BLOCK_SIZE; 390 - break; 391 - case SHA256_DIGEST_SIZE: 392 - ctx->flags |= SHA_FLAGS_SHA256; 393 - ctx->bs = SHA256_BLOCK_SIZE; 394 - break; 395 - case SHA384_DIGEST_SIZE: 396 - ctx->flags |= SHA_FLAGS_SHA384; 397 - ctx->bs = SHA384_BLOCK_SIZE; 398 - break; 399 - case SHA512_DIGEST_SIZE: 400 - ctx->flags |= SHA_FLAGS_SHA512; 401 - ctx->bs = SHA512_BLOCK_SIZE; 402 - break; 403 - default: 404 - return -EINVAL; 405 - } 406 - 407 - ctx->bufcnt = 0; 408 - ctx->digcnt = 0; 409 - ctx->buffer = tctx->buf; 410 - 411 - if (tctx->flags & SHA_FLAGS_HMAC) { 412 - struct mtk_sha_hmac_ctx *bctx = tctx->base; 413 - 414 - memcpy(ctx->buffer, bctx->ipad, ctx->bs); 415 - ctx->bufcnt = ctx->bs; 416 - ctx->flags |= SHA_FLAGS_HMAC; 417 - } 418 - 419 - return 0; 420 - } 421 - 422 - static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, 423 - dma_addr_t addr1, size_t len1, 424 - dma_addr_t addr2, size_t len2) 425 - { 426 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 427 - struct mtk_ring *ring = cryp->ring[sha->id]; 428 - struct mtk_desc *cmd, *res; 429 - int err, count = 0; 430 - 431 - err = mtk_sha_info_update(cryp, sha, len1, len2); 432 - if (err) 433 - return err; 434 - 435 - /* Fill in the command/result descriptors */ 436 - mtk_sha_ring_shift(ring, &cmd, &res, &count); 437 - 438 - res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1); 439 - cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) | 440 - MTK_DESC_CT_LEN(ctx->ct_size); 441 - cmd->buf = cpu_to_le32(addr1); 442 - cmd->ct = cpu_to_le32(ctx->ct_dma); 443 - cmd->ct_hdr = ctx->ct_hdr; 444 - cmd->tfm = cpu_to_le32(ctx->tfm_dma); 445 - 446 - if (len2) { 447 - mtk_sha_ring_shift(ring, &cmd, &res, &count); 448 - 449 - res->hdr = MTK_DESC_BUF_LEN(len2); 450 - cmd->hdr = MTK_DESC_BUF_LEN(len2); 451 - cmd->buf = cpu_to_le32(addr2); 452 - } 453 - 454 - cmd->hdr |= MTK_DESC_LAST; 455 - res->hdr |= MTK_DESC_LAST; 456 - 457 - /* 458 - * Make sure that all changes to the DMA ring are done before we 459 - * start engine. 460 - */ 461 - wmb(); 462 - /* Start DMA transfer */ 463 - mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); 464 - mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); 465 - 466 - return -EINPROGRESS; 467 - } 468 - 469 - static int mtk_sha_dma_map(struct mtk_cryp *cryp, 470 - struct mtk_sha_rec *sha, 471 - struct mtk_sha_reqctx *ctx, 472 - size_t count) 473 - { 474 - ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, 475 - SHA_BUF_SIZE, DMA_TO_DEVICE); 476 - if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { 477 - dev_err(cryp->dev, "dma map error\n"); 478 - return -EINVAL; 479 - } 480 - 481 - ctx->flags &= ~SHA_FLAGS_SG; 482 - 483 - return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); 484 - } 485 - 486 - static int mtk_sha_update_slow(struct mtk_cryp *cryp, 487 - struct mtk_sha_rec *sha) 488 - { 489 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 490 - size_t count; 491 - u32 final; 492 - 493 - mtk_sha_append_sg(ctx); 494 - 495 - final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 496 - 497 - dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); 498 - 499 - if (final) { 500 - sha->flags |= SHA_FLAGS_FINAL; 501 - mtk_sha_fill_padding(ctx, 0); 502 - } 503 - 504 - if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) { 505 - count = ctx->bufcnt; 506 - ctx->bufcnt = 0; 507 - 508 - return mtk_sha_dma_map(cryp, sha, ctx, count); 509 - } 510 - return 0; 511 - } 512 - 513 - static int mtk_sha_update_start(struct mtk_cryp *cryp, 514 - struct mtk_sha_rec *sha) 515 - { 516 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 517 - u32 len, final, tail; 518 - struct scatterlist *sg; 519 - 520 - if (!ctx->total) 521 - return 0; 522 - 523 - if (ctx->bufcnt || ctx->offset) 524 - return mtk_sha_update_slow(cryp, sha); 525 - 526 - sg = ctx->sg; 527 - 528 - if (!IS_ALIGNED(sg->offset, sizeof(u32))) 529 - return mtk_sha_update_slow(cryp, sha); 530 - 531 - if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs)) 532 - /* size is not ctx->bs aligned */ 533 - return mtk_sha_update_slow(cryp, sha); 534 - 535 - len = min(ctx->total, sg->length); 536 - 537 - if (sg_is_last(sg)) { 538 - if (!(ctx->flags & SHA_FLAGS_FINUP)) { 539 - /* not last sg must be ctx->bs aligned */ 540 - tail = len & (ctx->bs - 1); 541 - len -= tail; 542 - } 543 - } 544 - 545 - ctx->total -= len; 546 - ctx->offset = len; /* offset where to start slow */ 547 - 548 - final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 549 - 550 - /* Add padding */ 551 - if (final) { 552 - size_t count; 553 - 554 - tail = len & (ctx->bs - 1); 555 - len -= tail; 556 - ctx->total += tail; 557 - ctx->offset = len; /* offset where to start slow */ 558 - 559 - sg = ctx->sg; 560 - mtk_sha_append_sg(ctx); 561 - mtk_sha_fill_padding(ctx, len); 562 - 563 - ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, 564 - SHA_BUF_SIZE, DMA_TO_DEVICE); 565 - if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { 566 - dev_err(cryp->dev, "dma map bytes error\n"); 567 - return -EINVAL; 568 - } 569 - 570 - sha->flags |= SHA_FLAGS_FINAL; 571 - count = ctx->bufcnt; 572 - ctx->bufcnt = 0; 573 - 574 - if (len == 0) { 575 - ctx->flags &= ~SHA_FLAGS_SG; 576 - return mtk_sha_xmit(cryp, sha, ctx->dma_addr, 577 - count, 0, 0); 578 - 579 - } else { 580 - ctx->sg = sg; 581 - if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 582 - dev_err(cryp->dev, "dma_map_sg error\n"); 583 - return -EINVAL; 584 - } 585 - 586 - ctx->flags |= SHA_FLAGS_SG; 587 - return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), 588 - len, ctx->dma_addr, count); 589 - } 590 - } 591 - 592 - if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 593 - dev_err(cryp->dev, "dma_map_sg error\n"); 594 - return -EINVAL; 595 - } 596 - 597 - ctx->flags |= SHA_FLAGS_SG; 598 - 599 - return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), 600 - len, 0, 0); 601 - } 602 - 603 - static int mtk_sha_final_req(struct mtk_cryp *cryp, 604 - struct mtk_sha_rec *sha) 605 - { 606 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 607 - size_t count; 608 - 609 - mtk_sha_fill_padding(ctx, 0); 610 - 611 - sha->flags |= SHA_FLAGS_FINAL; 612 - count = ctx->bufcnt; 613 - ctx->bufcnt = 0; 614 - 615 - return mtk_sha_dma_map(cryp, sha, ctx, count); 616 - } 617 - 618 - /* Copy ready hash (+ finalize hmac) */ 619 - static int mtk_sha_finish(struct ahash_request *req) 620 - { 621 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 622 - __le32 *digest = ctx->info.digest; 623 - u32 *result = (u32 *)req->result; 624 - int i; 625 - 626 - /* Get the hash from the digest buffer */ 627 - for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++) 628 - result[i] = le32_to_cpu(digest[i]); 629 - 630 - if (ctx->flags & SHA_FLAGS_HMAC) 631 - return mtk_sha_finish_hmac(req); 632 - 633 - return 0; 634 - } 635 - 636 - static void mtk_sha_finish_req(struct mtk_cryp *cryp, 637 - struct mtk_sha_rec *sha, 638 - int err) 639 - { 640 - if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) 641 - err = mtk_sha_finish(sha->req); 642 - 643 - sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL); 644 - 645 - sha->req->base.complete(&sha->req->base, err); 646 - 647 - /* Handle new request */ 648 - tasklet_schedule(&sha->queue_task); 649 - } 650 - 651 - static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 652 - struct ahash_request *req) 653 - { 654 - struct mtk_sha_rec *sha = cryp->sha[id]; 655 - struct crypto_async_request *async_req, *backlog; 656 - struct mtk_sha_reqctx *ctx; 657 - unsigned long flags; 658 - int err = 0, ret = 0; 659 - 660 - spin_lock_irqsave(&sha->lock, flags); 661 - if (req) 662 - ret = ahash_enqueue_request(&sha->queue, req); 663 - 664 - if (SHA_FLAGS_BUSY & sha->flags) { 665 - spin_unlock_irqrestore(&sha->lock, flags); 666 - return ret; 667 - } 668 - 669 - backlog = crypto_get_backlog(&sha->queue); 670 - async_req = crypto_dequeue_request(&sha->queue); 671 - if (async_req) 672 - sha->flags |= SHA_FLAGS_BUSY; 673 - spin_unlock_irqrestore(&sha->lock, flags); 674 - 675 - if (!async_req) 676 - return ret; 677 - 678 - if (backlog) 679 - backlog->complete(backlog, -EINPROGRESS); 680 - 681 - req = ahash_request_cast(async_req); 682 - ctx = ahash_request_ctx(req); 683 - 684 - sha->req = req; 685 - 686 - mtk_sha_info_init(ctx); 687 - 688 - if (ctx->op == SHA_OP_UPDATE) { 689 - err = mtk_sha_update_start(cryp, sha); 690 - if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) 691 - /* No final() after finup() */ 692 - err = mtk_sha_final_req(cryp, sha); 693 - } else if (ctx->op == SHA_OP_FINAL) { 694 - err = mtk_sha_final_req(cryp, sha); 695 - } 696 - 697 - if (unlikely(err != -EINPROGRESS)) 698 - /* Task will not finish it, so do it here */ 699 - mtk_sha_finish_req(cryp, sha, err); 700 - 701 - return ret; 702 - } 703 - 704 - static int mtk_sha_enqueue(struct ahash_request *req, u32 op) 705 - { 706 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 707 - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 708 - 709 - ctx->op = op; 710 - 711 - return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); 712 - } 713 - 714 - static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) 715 - { 716 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 717 - 718 - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 719 - DMA_BIDIRECTIONAL); 720 - 721 - if (ctx->flags & SHA_FLAGS_SG) { 722 - dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); 723 - if (ctx->sg->length == ctx->offset) { 724 - ctx->sg = sg_next(ctx->sg); 725 - if (ctx->sg) 726 - ctx->offset = 0; 727 - } 728 - if (ctx->flags & SHA_FLAGS_PAD) { 729 - dma_unmap_single(cryp->dev, ctx->dma_addr, 730 - SHA_BUF_SIZE, DMA_TO_DEVICE); 731 - } 732 - } else 733 - dma_unmap_single(cryp->dev, ctx->dma_addr, 734 - SHA_BUF_SIZE, DMA_TO_DEVICE); 735 - } 736 - 737 - static void mtk_sha_complete(struct mtk_cryp *cryp, 738 - struct mtk_sha_rec *sha) 739 - { 740 - int err = 0; 741 - 742 - err = mtk_sha_update_start(cryp, sha); 743 - if (err != -EINPROGRESS) 744 - mtk_sha_finish_req(cryp, sha, err); 745 - } 746 - 747 - static int mtk_sha_update(struct ahash_request *req) 748 - { 749 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 750 - 751 - ctx->total = req->nbytes; 752 - ctx->sg = req->src; 753 - ctx->offset = 0; 754 - 755 - if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) && 756 - !(ctx->flags & SHA_FLAGS_FINUP)) 757 - return mtk_sha_append_sg(ctx); 758 - 759 - return mtk_sha_enqueue(req, SHA_OP_UPDATE); 760 - } 761 - 762 - static int mtk_sha_final(struct ahash_request *req) 763 - { 764 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 765 - 766 - ctx->flags |= SHA_FLAGS_FINUP; 767 - 768 - if (ctx->flags & SHA_FLAGS_PAD) 769 - return mtk_sha_finish(req); 770 - 771 - return mtk_sha_enqueue(req, SHA_OP_FINAL); 772 - } 773 - 774 - static int mtk_sha_finup(struct ahash_request *req) 775 - { 776 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 777 - int err1, err2; 778 - 779 - ctx->flags |= SHA_FLAGS_FINUP; 780 - 781 - err1 = mtk_sha_update(req); 782 - if (err1 == -EINPROGRESS || 783 - (err1 == -EBUSY && (ahash_request_flags(req) & 784 - CRYPTO_TFM_REQ_MAY_BACKLOG))) 785 - return err1; 786 - /* 787 - * final() has to be always called to cleanup resources 788 - * even if update() failed 789 - */ 790 - err2 = mtk_sha_final(req); 791 - 792 - return err1 ?: err2; 793 - } 794 - 795 - static int mtk_sha_digest(struct ahash_request *req) 796 - { 797 - return mtk_sha_init(req) ?: mtk_sha_finup(req); 798 - } 799 - 800 - static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, 801 - u32 keylen) 802 - { 803 - struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); 804 - struct mtk_sha_hmac_ctx *bctx = tctx->base; 805 - size_t bs = crypto_shash_blocksize(bctx->shash); 806 - size_t ds = crypto_shash_digestsize(bctx->shash); 807 - int err, i; 808 - 809 - if (keylen > bs) { 810 - err = crypto_shash_tfm_digest(bctx->shash, key, keylen, 811 - bctx->ipad); 812 - if (err) 813 - return err; 814 - keylen = ds; 815 - } else { 816 - memcpy(bctx->ipad, key, keylen); 817 - } 818 - 819 - memset(bctx->ipad + keylen, 0, bs - keylen); 820 - memcpy(bctx->opad, bctx->ipad, bs); 821 - 822 - for (i = 0; i < bs; i++) { 823 - bctx->ipad[i] ^= HMAC_IPAD_VALUE; 824 - bctx->opad[i] ^= HMAC_OPAD_VALUE; 825 - } 826 - 827 - return 0; 828 - } 829 - 830 - static int mtk_sha_export(struct ahash_request *req, void *out) 831 - { 832 - const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 833 - 834 - memcpy(out, ctx, sizeof(*ctx)); 835 - return 0; 836 - } 837 - 838 - static int mtk_sha_import(struct ahash_request *req, const void *in) 839 - { 840 - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 841 - 842 - memcpy(ctx, in, sizeof(*ctx)); 843 - return 0; 844 - } 845 - 846 - static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm, 847 - const char *alg_base) 848 - { 849 - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); 850 - struct mtk_cryp *cryp = NULL; 851 - 852 - cryp = mtk_sha_find_dev(tctx); 853 - if (!cryp) 854 - return -ENODEV; 855 - 856 - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 857 - sizeof(struct mtk_sha_reqctx)); 858 - 859 - if (alg_base) { 860 - struct mtk_sha_hmac_ctx *bctx = tctx->base; 861 - 862 - tctx->flags |= SHA_FLAGS_HMAC; 863 - bctx->shash = crypto_alloc_shash(alg_base, 0, 864 - CRYPTO_ALG_NEED_FALLBACK); 865 - if (IS_ERR(bctx->shash)) { 866 - pr_err("base driver %s could not be loaded.\n", 867 - alg_base); 868 - 869 - return PTR_ERR(bctx->shash); 870 - } 871 - } 872 - return 0; 873 - } 874 - 875 - static int mtk_sha_cra_init(struct crypto_tfm *tfm) 876 - { 877 - return mtk_sha_cra_init_alg(tfm, NULL); 878 - } 879 - 880 - static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm) 881 - { 882 - return mtk_sha_cra_init_alg(tfm, "sha1"); 883 - } 884 - 885 - static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm) 886 - { 887 - return mtk_sha_cra_init_alg(tfm, "sha224"); 888 - } 889 - 890 - static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm) 891 - { 892 - return mtk_sha_cra_init_alg(tfm, "sha256"); 893 - } 894 - 895 - static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm) 896 - { 897 - return mtk_sha_cra_init_alg(tfm, "sha384"); 898 - } 899 - 900 - static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm) 901 - { 902 - return mtk_sha_cra_init_alg(tfm, "sha512"); 903 - } 904 - 905 - static void mtk_sha_cra_exit(struct crypto_tfm *tfm) 906 - { 907 - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); 908 - 909 - if (tctx->flags & SHA_FLAGS_HMAC) { 910 - struct mtk_sha_hmac_ctx *bctx = tctx->base; 911 - 912 - crypto_free_shash(bctx->shash); 913 - } 914 - } 915 - 916 - static struct ahash_alg algs_sha1_sha224_sha256[] = { 917 - { 918 - .init = mtk_sha_init, 919 - .update = mtk_sha_update, 920 - .final = mtk_sha_final, 921 - .finup = mtk_sha_finup, 922 - .digest = mtk_sha_digest, 923 - .export = mtk_sha_export, 924 - .import = mtk_sha_import, 925 - .halg.digestsize = SHA1_DIGEST_SIZE, 926 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 927 - .halg.base = { 928 - .cra_name = "sha1", 929 - .cra_driver_name = "mtk-sha1", 930 - .cra_priority = 400, 931 - .cra_flags = CRYPTO_ALG_ASYNC, 932 - .cra_blocksize = SHA1_BLOCK_SIZE, 933 - .cra_ctxsize = sizeof(struct mtk_sha_ctx), 934 - .cra_alignmask = SHA_ALIGN_MSK, 935 - .cra_module = THIS_MODULE, 936 - .cra_init = mtk_sha_cra_init, 937 - .cra_exit = mtk_sha_cra_exit, 938 - } 939 - }, 940 - { 941 - .init = mtk_sha_init, 942 - .update = mtk_sha_update, 943 - .final = mtk_sha_final, 944 - .finup = mtk_sha_finup, 945 - .digest = mtk_sha_digest, 946 - .export = mtk_sha_export, 947 - .import = mtk_sha_import, 948 - .halg.digestsize = SHA224_DIGEST_SIZE, 949 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 950 - .halg.base = { 951 - .cra_name = "sha224", 952 - .cra_driver_name = "mtk-sha224", 953 - .cra_priority = 400, 954 - .cra_flags = CRYPTO_ALG_ASYNC, 955 - .cra_blocksize = SHA224_BLOCK_SIZE, 956 - .cra_ctxsize = sizeof(struct mtk_sha_ctx), 957 - .cra_alignmask = SHA_ALIGN_MSK, 958 - .cra_module = THIS_MODULE, 959 - .cra_init = mtk_sha_cra_init, 960 - .cra_exit = mtk_sha_cra_exit, 961 - } 962 - }, 963 - { 964 - .init = mtk_sha_init, 965 - .update = mtk_sha_update, 966 - .final = mtk_sha_final, 967 - .finup = mtk_sha_finup, 968 - .digest = mtk_sha_digest, 969 - .export = mtk_sha_export, 970 - .import = mtk_sha_import, 971 - .halg.digestsize = SHA256_DIGEST_SIZE, 972 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 973 - .halg.base = { 974 - .cra_name = "sha256", 975 - .cra_driver_name = "mtk-sha256", 976 - .cra_priority = 400, 977 - .cra_flags = CRYPTO_ALG_ASYNC, 978 - .cra_blocksize = SHA256_BLOCK_SIZE, 979 - .cra_ctxsize = sizeof(struct mtk_sha_ctx), 980 - .cra_alignmask = SHA_ALIGN_MSK, 981 - .cra_module = THIS_MODULE, 982 - .cra_init = mtk_sha_cra_init, 983 - .cra_exit = mtk_sha_cra_exit, 984 - } 985 - }, 986 - { 987 - .init = mtk_sha_init, 988 - .update = mtk_sha_update, 989 - .final = mtk_sha_final, 990 - .finup = mtk_sha_finup, 991 - .digest = mtk_sha_digest, 992 - .export = mtk_sha_export, 993 - .import = mtk_sha_import, 994 - .setkey = mtk_sha_setkey, 995 - .halg.digestsize = SHA1_DIGEST_SIZE, 996 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 997 - .halg.base = { 998 - .cra_name = "hmac(sha1)", 999 - .cra_driver_name = "mtk-hmac-sha1", 1000 - .cra_priority = 400, 1001 - .cra_flags = CRYPTO_ALG_ASYNC | 1002 - CRYPTO_ALG_NEED_FALLBACK, 1003 - .cra_blocksize = SHA1_BLOCK_SIZE, 1004 - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1005 - sizeof(struct mtk_sha_hmac_ctx), 1006 - .cra_alignmask = SHA_ALIGN_MSK, 1007 - .cra_module = THIS_MODULE, 1008 - .cra_init = mtk_sha_cra_sha1_init, 1009 - .cra_exit = mtk_sha_cra_exit, 1010 - } 1011 - }, 1012 - { 1013 - .init = mtk_sha_init, 1014 - .update = mtk_sha_update, 1015 - .final = mtk_sha_final, 1016 - .finup = mtk_sha_finup, 1017 - .digest = mtk_sha_digest, 1018 - .export = mtk_sha_export, 1019 - .import = mtk_sha_import, 1020 - .setkey = mtk_sha_setkey, 1021 - .halg.digestsize = SHA224_DIGEST_SIZE, 1022 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 1023 - .halg.base = { 1024 - .cra_name = "hmac(sha224)", 1025 - .cra_driver_name = "mtk-hmac-sha224", 1026 - .cra_priority = 400, 1027 - .cra_flags = CRYPTO_ALG_ASYNC | 1028 - CRYPTO_ALG_NEED_FALLBACK, 1029 - .cra_blocksize = SHA224_BLOCK_SIZE, 1030 - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1031 - sizeof(struct mtk_sha_hmac_ctx), 1032 - .cra_alignmask = SHA_ALIGN_MSK, 1033 - .cra_module = THIS_MODULE, 1034 - .cra_init = mtk_sha_cra_sha224_init, 1035 - .cra_exit = mtk_sha_cra_exit, 1036 - } 1037 - }, 1038 - { 1039 - .init = mtk_sha_init, 1040 - .update = mtk_sha_update, 1041 - .final = mtk_sha_final, 1042 - .finup = mtk_sha_finup, 1043 - .digest = mtk_sha_digest, 1044 - .export = mtk_sha_export, 1045 - .import = mtk_sha_import, 1046 - .setkey = mtk_sha_setkey, 1047 - .halg.digestsize = SHA256_DIGEST_SIZE, 1048 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 1049 - .halg.base = { 1050 - .cra_name = "hmac(sha256)", 1051 - .cra_driver_name = "mtk-hmac-sha256", 1052 - .cra_priority = 400, 1053 - .cra_flags = CRYPTO_ALG_ASYNC | 1054 - CRYPTO_ALG_NEED_FALLBACK, 1055 - .cra_blocksize = SHA256_BLOCK_SIZE, 1056 - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1057 - sizeof(struct mtk_sha_hmac_ctx), 1058 - .cra_alignmask = SHA_ALIGN_MSK, 1059 - .cra_module = THIS_MODULE, 1060 - .cra_init = mtk_sha_cra_sha256_init, 1061 - .cra_exit = mtk_sha_cra_exit, 1062 - } 1063 - }, 1064 - }; 1065 - 1066 - static struct ahash_alg algs_sha384_sha512[] = { 1067 - { 1068 - .init = mtk_sha_init, 1069 - .update = mtk_sha_update, 1070 - .final = mtk_sha_final, 1071 - .finup = mtk_sha_finup, 1072 - .digest = mtk_sha_digest, 1073 - .export = mtk_sha_export, 1074 - .import = mtk_sha_import, 1075 - .halg.digestsize = SHA384_DIGEST_SIZE, 1076 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 1077 - .halg.base = { 1078 - .cra_name = "sha384", 1079 - .cra_driver_name = "mtk-sha384", 1080 - .cra_priority = 400, 1081 - .cra_flags = CRYPTO_ALG_ASYNC, 1082 - .cra_blocksize = SHA384_BLOCK_SIZE, 1083 - .cra_ctxsize = sizeof(struct mtk_sha_ctx), 1084 - .cra_alignmask = SHA_ALIGN_MSK, 1085 - .cra_module = THIS_MODULE, 1086 - .cra_init = mtk_sha_cra_init, 1087 - .cra_exit = mtk_sha_cra_exit, 1088 - } 1089 - }, 1090 - { 1091 - .init = mtk_sha_init, 1092 - .update = mtk_sha_update, 1093 - .final = mtk_sha_final, 1094 - .finup = mtk_sha_finup, 1095 - .digest = mtk_sha_digest, 1096 - .export = mtk_sha_export, 1097 - .import = mtk_sha_import, 1098 - .halg.digestsize = SHA512_DIGEST_SIZE, 1099 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 1100 - .halg.base = { 1101 - .cra_name = "sha512", 1102 - .cra_driver_name = "mtk-sha512", 1103 - .cra_priority = 400, 1104 - .cra_flags = CRYPTO_ALG_ASYNC, 1105 - .cra_blocksize = SHA512_BLOCK_SIZE, 1106 - .cra_ctxsize = sizeof(struct mtk_sha_ctx), 1107 - .cra_alignmask = SHA_ALIGN_MSK, 1108 - .cra_module = THIS_MODULE, 1109 - .cra_init = mtk_sha_cra_init, 1110 - .cra_exit = mtk_sha_cra_exit, 1111 - } 1112 - }, 1113 - { 1114 - .init = mtk_sha_init, 1115 - .update = mtk_sha_update, 1116 - .final = mtk_sha_final, 1117 - .finup = mtk_sha_finup, 1118 - .digest = mtk_sha_digest, 1119 - .export = mtk_sha_export, 1120 - .import = mtk_sha_import, 1121 - .setkey = mtk_sha_setkey, 1122 - .halg.digestsize = SHA384_DIGEST_SIZE, 1123 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 1124 - .halg.base = { 1125 - .cra_name = "hmac(sha384)", 1126 - .cra_driver_name = "mtk-hmac-sha384", 1127 - .cra_priority = 400, 1128 - .cra_flags = CRYPTO_ALG_ASYNC | 1129 - CRYPTO_ALG_NEED_FALLBACK, 1130 - .cra_blocksize = SHA384_BLOCK_SIZE, 1131 - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1132 - sizeof(struct mtk_sha_hmac_ctx), 1133 - .cra_alignmask = SHA_ALIGN_MSK, 1134 - .cra_module = THIS_MODULE, 1135 - .cra_init = mtk_sha_cra_sha384_init, 1136 - .cra_exit = mtk_sha_cra_exit, 1137 - } 1138 - }, 1139 - { 1140 - .init = mtk_sha_init, 1141 - .update = mtk_sha_update, 1142 - .final = mtk_sha_final, 1143 - .finup = mtk_sha_finup, 1144 - .digest = mtk_sha_digest, 1145 - .export = mtk_sha_export, 1146 - .import = mtk_sha_import, 1147 - .setkey = mtk_sha_setkey, 1148 - .halg.digestsize = SHA512_DIGEST_SIZE, 1149 - .halg.statesize = sizeof(struct mtk_sha_reqctx), 1150 - .halg.base = { 1151 - .cra_name = "hmac(sha512)", 1152 - .cra_driver_name = "mtk-hmac-sha512", 1153 - .cra_priority = 400, 1154 - .cra_flags = CRYPTO_ALG_ASYNC | 1155 - CRYPTO_ALG_NEED_FALLBACK, 1156 - .cra_blocksize = SHA512_BLOCK_SIZE, 1157 - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1158 - sizeof(struct mtk_sha_hmac_ctx), 1159 - .cra_alignmask = SHA_ALIGN_MSK, 1160 - .cra_module = THIS_MODULE, 1161 - .cra_init = mtk_sha_cra_sha512_init, 1162 - .cra_exit = mtk_sha_cra_exit, 1163 - } 1164 - }, 1165 - }; 1166 - 1167 - static void mtk_sha_queue_task(unsigned long data) 1168 - { 1169 - struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; 1170 - 1171 - mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); 1172 - } 1173 - 1174 - static void mtk_sha_done_task(unsigned long data) 1175 - { 1176 - struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; 1177 - struct mtk_cryp *cryp = sha->cryp; 1178 - 1179 - mtk_sha_unmap(cryp, sha); 1180 - mtk_sha_complete(cryp, sha); 1181 - } 1182 - 1183 - static irqreturn_t mtk_sha_irq(int irq, void *dev_id) 1184 - { 1185 - struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id; 1186 - struct mtk_cryp *cryp = sha->cryp; 1187 - u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); 1188 - 1189 - mtk_sha_write(cryp, RDR_STAT(sha->id), val); 1190 - 1191 - if (likely((SHA_FLAGS_BUSY & sha->flags))) { 1192 - mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); 1193 - mtk_sha_write(cryp, RDR_THRESH(sha->id), 1194 - MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1195 - 1196 - tasklet_schedule(&sha->done_task); 1197 - } else { 1198 - dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); 1199 - } 1200 - return IRQ_HANDLED; 1201 - } 1202 - 1203 - /* 1204 - * The purpose of two SHA records is used to get extra performance. 1205 - * It is similar to mtk_aes_record_init(). 1206 - */ 1207 - static int mtk_sha_record_init(struct mtk_cryp *cryp) 1208 - { 1209 - struct mtk_sha_rec **sha = cryp->sha; 1210 - int i, err = -ENOMEM; 1211 - 1212 - for (i = 0; i < MTK_REC_NUM; i++) { 1213 - sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL); 1214 - if (!sha[i]) 1215 - goto err_cleanup; 1216 - 1217 - sha[i]->cryp = cryp; 1218 - 1219 - spin_lock_init(&sha[i]->lock); 1220 - crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); 1221 - 1222 - tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task, 1223 - (unsigned long)sha[i]); 1224 - tasklet_init(&sha[i]->done_task, mtk_sha_done_task, 1225 - (unsigned long)sha[i]); 1226 - } 1227 - 1228 - /* Link to ring2 and ring3 respectively */ 1229 - sha[0]->id = MTK_RING2; 1230 - sha[1]->id = MTK_RING3; 1231 - 1232 - cryp->rec = 1; 1233 - 1234 - return 0; 1235 - 1236 - err_cleanup: 1237 - for (; i--; ) 1238 - kfree(sha[i]); 1239 - return err; 1240 - } 1241 - 1242 - static void mtk_sha_record_free(struct mtk_cryp *cryp) 1243 - { 1244 - int i; 1245 - 1246 - for (i = 0; i < MTK_REC_NUM; i++) { 1247 - tasklet_kill(&cryp->sha[i]->done_task); 1248 - tasklet_kill(&cryp->sha[i]->queue_task); 1249 - 1250 - kfree(cryp->sha[i]); 1251 - } 1252 - } 1253 - 1254 - static void mtk_sha_unregister_algs(void) 1255 - { 1256 - int i; 1257 - 1258 - for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) 1259 - crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); 1260 - 1261 - for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) 1262 - crypto_unregister_ahash(&algs_sha384_sha512[i]); 1263 - } 1264 - 1265 - static int mtk_sha_register_algs(void) 1266 - { 1267 - int err, i; 1268 - 1269 - for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) { 1270 - err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]); 1271 - if (err) 1272 - goto err_sha_224_256_algs; 1273 - } 1274 - 1275 - for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) { 1276 - err = crypto_register_ahash(&algs_sha384_sha512[i]); 1277 - if (err) 1278 - goto err_sha_384_512_algs; 1279 - } 1280 - 1281 - return 0; 1282 - 1283 - err_sha_384_512_algs: 1284 - for (; i--; ) 1285 - crypto_unregister_ahash(&algs_sha384_sha512[i]); 1286 - i = ARRAY_SIZE(algs_sha1_sha224_sha256); 1287 - err_sha_224_256_algs: 1288 - for (; i--; ) 1289 - crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); 1290 - 1291 - return err; 1292 - } 1293 - 1294 - int mtk_hash_alg_register(struct mtk_cryp *cryp) 1295 - { 1296 - int err; 1297 - 1298 - INIT_LIST_HEAD(&cryp->sha_list); 1299 - 1300 - /* Initialize two hash records */ 1301 - err = mtk_sha_record_init(cryp); 1302 - if (err) 1303 - goto err_record; 1304 - 1305 - err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, 1306 - 0, "mtk-sha", cryp->sha[0]); 1307 - if (err) { 1308 - dev_err(cryp->dev, "unable to request sha irq0.\n"); 1309 - goto err_res; 1310 - } 1311 - 1312 - err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, 1313 - 0, "mtk-sha", cryp->sha[1]); 1314 - if (err) { 1315 - dev_err(cryp->dev, "unable to request sha irq1.\n"); 1316 - goto err_res; 1317 - } 1318 - 1319 - /* Enable ring2 and ring3 interrupt for hash */ 1320 - mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2); 1321 - mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3); 1322 - 1323 - spin_lock(&mtk_sha.lock); 1324 - list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); 1325 - spin_unlock(&mtk_sha.lock); 1326 - 1327 - err = mtk_sha_register_algs(); 1328 - if (err) 1329 - goto err_algs; 1330 - 1331 - return 0; 1332 - 1333 - err_algs: 1334 - spin_lock(&mtk_sha.lock); 1335 - list_del(&cryp->sha_list); 1336 - spin_unlock(&mtk_sha.lock); 1337 - err_res: 1338 - mtk_sha_record_free(cryp); 1339 - err_record: 1340 - 1341 - dev_err(cryp->dev, "mtk-sha initialization failed.\n"); 1342 - return err; 1343 - } 1344 - 1345 - void mtk_hash_alg_release(struct mtk_cryp *cryp) 1346 - { 1347 - spin_lock(&mtk_sha.lock); 1348 - list_del(&cryp->sha_list); 1349 - spin_unlock(&mtk_sha.lock); 1350 - 1351 - mtk_sha_unregister_algs(); 1352 - mtk_sha_record_free(cryp); 1353 - }