Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.11-rc1 1353 lines 34 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Cryptographic API. 4 * 5 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration. 6 * 7 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 8 * 9 * Some ideas are from atmel-sha.c and omap-sham.c drivers. 10 */ 11 12#include <crypto/hmac.h> 13#include <crypto/sha1.h> 14#include <crypto/sha2.h> 15#include "mtk-platform.h" 16 17#define SHA_ALIGN_MSK (sizeof(u32) - 1) 18#define SHA_QUEUE_SIZE 512 19#define SHA_BUF_SIZE ((u32)PAGE_SIZE) 20 21#define SHA_OP_UPDATE 1 22#define SHA_OP_FINAL 2 23 24#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0)) 25#define SHA_MAX_DIGEST_BUF_SIZE 32 26 27/* SHA command token */ 28#define SHA_CT_SIZE 5 29#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000) 30#define SHA_CMD0 cpu_to_le32(0x03020000) 31#define SHA_CMD1 cpu_to_le32(0x21060000) 32#define SHA_CMD2 cpu_to_le32(0xe0e63802) 33 34/* SHA transform information */ 35#define SHA_TFM_HASH cpu_to_le32(0x2 << 0) 36#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8) 37#define SHA_TFM_START cpu_to_le32(0x1 << 4) 38#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5) 39#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19) 40#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23) 41#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23) 42#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23) 43#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23) 44#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23) 45#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24) 46 47/* SHA flags */ 48#define SHA_FLAGS_BUSY BIT(0) 49#define SHA_FLAGS_FINAL BIT(1) 50#define SHA_FLAGS_FINUP BIT(2) 51#define SHA_FLAGS_SG BIT(3) 52#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4) 53#define SHA_FLAGS_SHA1 BIT(4) 54#define SHA_FLAGS_SHA224 BIT(5) 55#define SHA_FLAGS_SHA256 BIT(6) 56#define SHA_FLAGS_SHA384 BIT(7) 57#define SHA_FLAGS_SHA512 BIT(8) 58#define SHA_FLAGS_HMAC BIT(9) 59#define SHA_FLAGS_PAD BIT(10) 60 61/** 62 * mtk_sha_info - hardware information of AES 63 * @cmd: command token, hardware instruction 64 * @tfm: transform state of cipher algorithm. 65 * @state: contains keys and initial vectors. 66 * 67 */ 68struct mtk_sha_info { 69 __le32 ctrl[2]; 70 __le32 cmd[3]; 71 __le32 tfm[2]; 72 __le32 digest[SHA_MAX_DIGEST_BUF_SIZE]; 73}; 74 75struct mtk_sha_reqctx { 76 struct mtk_sha_info info; 77 unsigned long flags; 78 unsigned long op; 79 80 u64 digcnt; 81 size_t bufcnt; 82 dma_addr_t dma_addr; 83 84 __le32 ct_hdr; 85 u32 ct_size; 86 dma_addr_t ct_dma; 87 dma_addr_t tfm_dma; 88 89 /* Walk state */ 90 struct scatterlist *sg; 91 u32 offset; /* Offset in current sg */ 92 u32 total; /* Total request */ 93 size_t ds; 94 size_t bs; 95 96 u8 *buffer; 97}; 98 99struct mtk_sha_hmac_ctx { 100 struct crypto_shash *shash; 101 u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 102 u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 103}; 104 105struct mtk_sha_ctx { 106 struct mtk_cryp *cryp; 107 unsigned long flags; 108 u8 id; 109 u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32)); 110 111 struct mtk_sha_hmac_ctx base[]; 112}; 113 114struct mtk_sha_drv { 115 struct list_head dev_list; 116 /* Device list lock */ 117 spinlock_t lock; 118}; 119 120static struct mtk_sha_drv mtk_sha = { 121 .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list), 122 .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock), 123}; 124 125static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 126 struct ahash_request *req); 127 128static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset) 129{ 130 return readl_relaxed(cryp->base + offset); 131} 132 133static inline void mtk_sha_write(struct mtk_cryp *cryp, 134 u32 offset, u32 value) 135{ 136 writel_relaxed(value, cryp->base + offset); 137} 138 139static inline void mtk_sha_ring_shift(struct mtk_ring *ring, 140 struct mtk_desc **cmd_curr, 141 struct mtk_desc **res_curr, 142 int *count) 143{ 144 *cmd_curr = ring->cmd_next++; 145 *res_curr = ring->res_next++; 146 (*count)++; 147 148 if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) { 149 ring->cmd_next = ring->cmd_base; 150 ring->res_next = ring->res_base; 151 } 152} 153 154static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx) 155{ 156 struct mtk_cryp *cryp = NULL; 157 struct mtk_cryp *tmp; 158 159 spin_lock_bh(&mtk_sha.lock); 160 if (!tctx->cryp) { 161 list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) { 162 cryp = tmp; 163 break; 164 } 165 tctx->cryp = cryp; 166 } else { 167 cryp = tctx->cryp; 168 } 169 170 /* 171 * Assign record id to tfm in round-robin fashion, and this 172 * will help tfm to bind to corresponding descriptor rings. 173 */ 174 tctx->id = cryp->rec; 175 cryp->rec = !cryp->rec; 176 177 spin_unlock_bh(&mtk_sha.lock); 178 179 return cryp; 180} 181 182static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx) 183{ 184 size_t count; 185 186 while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) { 187 count = min(ctx->sg->length - ctx->offset, ctx->total); 188 count = min(count, SHA_BUF_SIZE - ctx->bufcnt); 189 190 if (count <= 0) { 191 /* 192 * Check if count <= 0 because the buffer is full or 193 * because the sg length is 0. In the latest case, 194 * check if there is another sg in the list, a 0 length 195 * sg doesn't necessarily mean the end of the sg list. 196 */ 197 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { 198 ctx->sg = sg_next(ctx->sg); 199 continue; 200 } else { 201 break; 202 } 203 } 204 205 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, 206 ctx->offset, count, 0); 207 208 ctx->bufcnt += count; 209 ctx->offset += count; 210 ctx->total -= count; 211 212 if (ctx->offset == ctx->sg->length) { 213 ctx->sg = sg_next(ctx->sg); 214 if (ctx->sg) 215 ctx->offset = 0; 216 else 217 ctx->total = 0; 218 } 219 } 220 221 return 0; 222} 223 224/* 225 * The purpose of this padding is to ensure that the padded message is a 226 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 227 * The bit "1" is appended at the end of the message followed by 228 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 229 * 128 bits block (SHA384/SHA512) equals to the message length in bits 230 * is appended. 231 * 232 * For SHA1/SHA224/SHA256, padlen is calculated as followed: 233 * - if message length < 56 bytes then padlen = 56 - message length 234 * - else padlen = 64 + 56 - message length 235 * 236 * For SHA384/SHA512, padlen is calculated as followed: 237 * - if message length < 112 bytes then padlen = 112 - message length 238 * - else padlen = 128 + 112 - message length 239 */ 240static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len) 241{ 242 u32 index, padlen; 243 __be64 bits[2]; 244 u64 size = ctx->digcnt; 245 246 size += ctx->bufcnt; 247 size += len; 248 249 bits[1] = cpu_to_be64(size << 3); 250 bits[0] = cpu_to_be64(size >> 61); 251 252 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 253 case SHA_FLAGS_SHA384: 254 case SHA_FLAGS_SHA512: 255 index = ctx->bufcnt & 0x7f; 256 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); 257 *(ctx->buffer + ctx->bufcnt) = 0x80; 258 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); 259 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 260 ctx->bufcnt += padlen + 16; 261 ctx->flags |= SHA_FLAGS_PAD; 262 break; 263 264 default: 265 index = ctx->bufcnt & 0x3f; 266 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); 267 *(ctx->buffer + ctx->bufcnt) = 0x80; 268 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); 269 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 270 ctx->bufcnt += padlen + 8; 271 ctx->flags |= SHA_FLAGS_PAD; 272 break; 273 } 274} 275 276/* Initialize basic transform information of SHA */ 277static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) 278{ 279 struct mtk_sha_info *info = &ctx->info; 280 281 ctx->ct_hdr = SHA_CT_CTRL_HDR; 282 ctx->ct_size = SHA_CT_SIZE; 283 284 info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); 285 286 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 287 case SHA_FLAGS_SHA1: 288 info->tfm[0] |= SHA_TFM_SHA1; 289 break; 290 case SHA_FLAGS_SHA224: 291 info->tfm[0] |= SHA_TFM_SHA224; 292 break; 293 case SHA_FLAGS_SHA256: 294 info->tfm[0] |= SHA_TFM_SHA256; 295 break; 296 case SHA_FLAGS_SHA384: 297 info->tfm[0] |= SHA_TFM_SHA384; 298 break; 299 case SHA_FLAGS_SHA512: 300 info->tfm[0] |= SHA_TFM_SHA512; 301 break; 302 303 default: 304 /* Should not happen... */ 305 return; 306 } 307 308 info->tfm[1] = SHA_TFM_HASH_STORE; 309 info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; 310 info->ctrl[1] = info->tfm[1]; 311 312 info->cmd[0] = SHA_CMD0; 313 info->cmd[1] = SHA_CMD1; 314 info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); 315} 316 317/* 318 * Update input data length field of transform information and 319 * map it to DMA region. 320 */ 321static int mtk_sha_info_update(struct mtk_cryp *cryp, 322 struct mtk_sha_rec *sha, 323 size_t len1, size_t len2) 324{ 325 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 326 struct mtk_sha_info *info = &ctx->info; 327 328 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; 329 ctx->ct_hdr |= cpu_to_le32(len1 + len2); 330 info->cmd[0] &= ~SHA_DATA_LEN_MSK; 331 info->cmd[0] |= cpu_to_le32(len1 + len2); 332 333 /* Setting SHA_TFM_START only for the first iteration */ 334 if (ctx->digcnt) 335 info->ctrl[0] &= ~SHA_TFM_START; 336 337 ctx->digcnt += len1; 338 339 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 340 DMA_BIDIRECTIONAL); 341 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { 342 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); 343 return -EINVAL; 344 } 345 346 ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); 347 348 return 0; 349} 350 351/* 352 * Because of hardware limitation, we must pre-calculate the inner 353 * and outer digest that need to be processed firstly by engine, then 354 * apply the result digest to the input message. These complex hashing 355 * procedures limits HMAC performance, so we use fallback SW encoding. 356 */ 357static int mtk_sha_finish_hmac(struct ahash_request *req) 358{ 359 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 360 struct mtk_sha_hmac_ctx *bctx = tctx->base; 361 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 362 363 SHASH_DESC_ON_STACK(shash, bctx->shash); 364 365 shash->tfm = bctx->shash; 366 367 return crypto_shash_init(shash) ?: 368 crypto_shash_update(shash, bctx->opad, ctx->bs) ?: 369 crypto_shash_finup(shash, req->result, ctx->ds, req->result); 370} 371 372/* Initialize request context */ 373static int mtk_sha_init(struct ahash_request *req) 374{ 375 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 376 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); 377 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 378 379 ctx->flags = 0; 380 ctx->ds = crypto_ahash_digestsize(tfm); 381 382 switch (ctx->ds) { 383 case SHA1_DIGEST_SIZE: 384 ctx->flags |= SHA_FLAGS_SHA1; 385 ctx->bs = SHA1_BLOCK_SIZE; 386 break; 387 case SHA224_DIGEST_SIZE: 388 ctx->flags |= SHA_FLAGS_SHA224; 389 ctx->bs = SHA224_BLOCK_SIZE; 390 break; 391 case SHA256_DIGEST_SIZE: 392 ctx->flags |= SHA_FLAGS_SHA256; 393 ctx->bs = SHA256_BLOCK_SIZE; 394 break; 395 case SHA384_DIGEST_SIZE: 396 ctx->flags |= SHA_FLAGS_SHA384; 397 ctx->bs = SHA384_BLOCK_SIZE; 398 break; 399 case SHA512_DIGEST_SIZE: 400 ctx->flags |= SHA_FLAGS_SHA512; 401 ctx->bs = SHA512_BLOCK_SIZE; 402 break; 403 default: 404 return -EINVAL; 405 } 406 407 ctx->bufcnt = 0; 408 ctx->digcnt = 0; 409 ctx->buffer = tctx->buf; 410 411 if (tctx->flags & SHA_FLAGS_HMAC) { 412 struct mtk_sha_hmac_ctx *bctx = tctx->base; 413 414 memcpy(ctx->buffer, bctx->ipad, ctx->bs); 415 ctx->bufcnt = ctx->bs; 416 ctx->flags |= SHA_FLAGS_HMAC; 417 } 418 419 return 0; 420} 421 422static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, 423 dma_addr_t addr1, size_t len1, 424 dma_addr_t addr2, size_t len2) 425{ 426 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 427 struct mtk_ring *ring = cryp->ring[sha->id]; 428 struct mtk_desc *cmd, *res; 429 int err, count = 0; 430 431 err = mtk_sha_info_update(cryp, sha, len1, len2); 432 if (err) 433 return err; 434 435 /* Fill in the command/result descriptors */ 436 mtk_sha_ring_shift(ring, &cmd, &res, &count); 437 438 res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1); 439 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) | 440 MTK_DESC_CT_LEN(ctx->ct_size); 441 cmd->buf = cpu_to_le32(addr1); 442 cmd->ct = cpu_to_le32(ctx->ct_dma); 443 cmd->ct_hdr = ctx->ct_hdr; 444 cmd->tfm = cpu_to_le32(ctx->tfm_dma); 445 446 if (len2) { 447 mtk_sha_ring_shift(ring, &cmd, &res, &count); 448 449 res->hdr = MTK_DESC_BUF_LEN(len2); 450 cmd->hdr = MTK_DESC_BUF_LEN(len2); 451 cmd->buf = cpu_to_le32(addr2); 452 } 453 454 cmd->hdr |= MTK_DESC_LAST; 455 res->hdr |= MTK_DESC_LAST; 456 457 /* 458 * Make sure that all changes to the DMA ring are done before we 459 * start engine. 460 */ 461 wmb(); 462 /* Start DMA transfer */ 463 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); 464 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); 465 466 return -EINPROGRESS; 467} 468 469static int mtk_sha_dma_map(struct mtk_cryp *cryp, 470 struct mtk_sha_rec *sha, 471 struct mtk_sha_reqctx *ctx, 472 size_t count) 473{ 474 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, 475 SHA_BUF_SIZE, DMA_TO_DEVICE); 476 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { 477 dev_err(cryp->dev, "dma map error\n"); 478 return -EINVAL; 479 } 480 481 ctx->flags &= ~SHA_FLAGS_SG; 482 483 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); 484} 485 486static int mtk_sha_update_slow(struct mtk_cryp *cryp, 487 struct mtk_sha_rec *sha) 488{ 489 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 490 size_t count; 491 u32 final; 492 493 mtk_sha_append_sg(ctx); 494 495 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 496 497 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); 498 499 if (final) { 500 sha->flags |= SHA_FLAGS_FINAL; 501 mtk_sha_fill_padding(ctx, 0); 502 } 503 504 if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) { 505 count = ctx->bufcnt; 506 ctx->bufcnt = 0; 507 508 return mtk_sha_dma_map(cryp, sha, ctx, count); 509 } 510 return 0; 511} 512 513static int mtk_sha_update_start(struct mtk_cryp *cryp, 514 struct mtk_sha_rec *sha) 515{ 516 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 517 u32 len, final, tail; 518 struct scatterlist *sg; 519 520 if (!ctx->total) 521 return 0; 522 523 if (ctx->bufcnt || ctx->offset) 524 return mtk_sha_update_slow(cryp, sha); 525 526 sg = ctx->sg; 527 528 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 529 return mtk_sha_update_slow(cryp, sha); 530 531 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs)) 532 /* size is not ctx->bs aligned */ 533 return mtk_sha_update_slow(cryp, sha); 534 535 len = min(ctx->total, sg->length); 536 537 if (sg_is_last(sg)) { 538 if (!(ctx->flags & SHA_FLAGS_FINUP)) { 539 /* not last sg must be ctx->bs aligned */ 540 tail = len & (ctx->bs - 1); 541 len -= tail; 542 } 543 } 544 545 ctx->total -= len; 546 ctx->offset = len; /* offset where to start slow */ 547 548 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 549 550 /* Add padding */ 551 if (final) { 552 size_t count; 553 554 tail = len & (ctx->bs - 1); 555 len -= tail; 556 ctx->total += tail; 557 ctx->offset = len; /* offset where to start slow */ 558 559 sg = ctx->sg; 560 mtk_sha_append_sg(ctx); 561 mtk_sha_fill_padding(ctx, len); 562 563 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, 564 SHA_BUF_SIZE, DMA_TO_DEVICE); 565 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { 566 dev_err(cryp->dev, "dma map bytes error\n"); 567 return -EINVAL; 568 } 569 570 sha->flags |= SHA_FLAGS_FINAL; 571 count = ctx->bufcnt; 572 ctx->bufcnt = 0; 573 574 if (len == 0) { 575 ctx->flags &= ~SHA_FLAGS_SG; 576 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, 577 count, 0, 0); 578 579 } else { 580 ctx->sg = sg; 581 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 582 dev_err(cryp->dev, "dma_map_sg error\n"); 583 return -EINVAL; 584 } 585 586 ctx->flags |= SHA_FLAGS_SG; 587 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), 588 len, ctx->dma_addr, count); 589 } 590 } 591 592 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 593 dev_err(cryp->dev, "dma_map_sg error\n"); 594 return -EINVAL; 595 } 596 597 ctx->flags |= SHA_FLAGS_SG; 598 599 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), 600 len, 0, 0); 601} 602 603static int mtk_sha_final_req(struct mtk_cryp *cryp, 604 struct mtk_sha_rec *sha) 605{ 606 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 607 size_t count; 608 609 mtk_sha_fill_padding(ctx, 0); 610 611 sha->flags |= SHA_FLAGS_FINAL; 612 count = ctx->bufcnt; 613 ctx->bufcnt = 0; 614 615 return mtk_sha_dma_map(cryp, sha, ctx, count); 616} 617 618/* Copy ready hash (+ finalize hmac) */ 619static int mtk_sha_finish(struct ahash_request *req) 620{ 621 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 622 __le32 *digest = ctx->info.digest; 623 u32 *result = (u32 *)req->result; 624 int i; 625 626 /* Get the hash from the digest buffer */ 627 for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++) 628 result[i] = le32_to_cpu(digest[i]); 629 630 if (ctx->flags & SHA_FLAGS_HMAC) 631 return mtk_sha_finish_hmac(req); 632 633 return 0; 634} 635 636static void mtk_sha_finish_req(struct mtk_cryp *cryp, 637 struct mtk_sha_rec *sha, 638 int err) 639{ 640 if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) 641 err = mtk_sha_finish(sha->req); 642 643 sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL); 644 645 sha->req->base.complete(&sha->req->base, err); 646 647 /* Handle new request */ 648 tasklet_schedule(&sha->queue_task); 649} 650 651static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 652 struct ahash_request *req) 653{ 654 struct mtk_sha_rec *sha = cryp->sha[id]; 655 struct crypto_async_request *async_req, *backlog; 656 struct mtk_sha_reqctx *ctx; 657 unsigned long flags; 658 int err = 0, ret = 0; 659 660 spin_lock_irqsave(&sha->lock, flags); 661 if (req) 662 ret = ahash_enqueue_request(&sha->queue, req); 663 664 if (SHA_FLAGS_BUSY & sha->flags) { 665 spin_unlock_irqrestore(&sha->lock, flags); 666 return ret; 667 } 668 669 backlog = crypto_get_backlog(&sha->queue); 670 async_req = crypto_dequeue_request(&sha->queue); 671 if (async_req) 672 sha->flags |= SHA_FLAGS_BUSY; 673 spin_unlock_irqrestore(&sha->lock, flags); 674 675 if (!async_req) 676 return ret; 677 678 if (backlog) 679 backlog->complete(backlog, -EINPROGRESS); 680 681 req = ahash_request_cast(async_req); 682 ctx = ahash_request_ctx(req); 683 684 sha->req = req; 685 686 mtk_sha_info_init(ctx); 687 688 if (ctx->op == SHA_OP_UPDATE) { 689 err = mtk_sha_update_start(cryp, sha); 690 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) 691 /* No final() after finup() */ 692 err = mtk_sha_final_req(cryp, sha); 693 } else if (ctx->op == SHA_OP_FINAL) { 694 err = mtk_sha_final_req(cryp, sha); 695 } 696 697 if (unlikely(err != -EINPROGRESS)) 698 /* Task will not finish it, so do it here */ 699 mtk_sha_finish_req(cryp, sha, err); 700 701 return ret; 702} 703 704static int mtk_sha_enqueue(struct ahash_request *req, u32 op) 705{ 706 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 707 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 708 709 ctx->op = op; 710 711 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); 712} 713 714static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) 715{ 716 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 717 718 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 719 DMA_BIDIRECTIONAL); 720 721 if (ctx->flags & SHA_FLAGS_SG) { 722 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); 723 if (ctx->sg->length == ctx->offset) { 724 ctx->sg = sg_next(ctx->sg); 725 if (ctx->sg) 726 ctx->offset = 0; 727 } 728 if (ctx->flags & SHA_FLAGS_PAD) { 729 dma_unmap_single(cryp->dev, ctx->dma_addr, 730 SHA_BUF_SIZE, DMA_TO_DEVICE); 731 } 732 } else 733 dma_unmap_single(cryp->dev, ctx->dma_addr, 734 SHA_BUF_SIZE, DMA_TO_DEVICE); 735} 736 737static void mtk_sha_complete(struct mtk_cryp *cryp, 738 struct mtk_sha_rec *sha) 739{ 740 int err = 0; 741 742 err = mtk_sha_update_start(cryp, sha); 743 if (err != -EINPROGRESS) 744 mtk_sha_finish_req(cryp, sha, err); 745} 746 747static int mtk_sha_update(struct ahash_request *req) 748{ 749 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 750 751 ctx->total = req->nbytes; 752 ctx->sg = req->src; 753 ctx->offset = 0; 754 755 if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) && 756 !(ctx->flags & SHA_FLAGS_FINUP)) 757 return mtk_sha_append_sg(ctx); 758 759 return mtk_sha_enqueue(req, SHA_OP_UPDATE); 760} 761 762static int mtk_sha_final(struct ahash_request *req) 763{ 764 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 765 766 ctx->flags |= SHA_FLAGS_FINUP; 767 768 if (ctx->flags & SHA_FLAGS_PAD) 769 return mtk_sha_finish(req); 770 771 return mtk_sha_enqueue(req, SHA_OP_FINAL); 772} 773 774static int mtk_sha_finup(struct ahash_request *req) 775{ 776 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 777 int err1, err2; 778 779 ctx->flags |= SHA_FLAGS_FINUP; 780 781 err1 = mtk_sha_update(req); 782 if (err1 == -EINPROGRESS || 783 (err1 == -EBUSY && (ahash_request_flags(req) & 784 CRYPTO_TFM_REQ_MAY_BACKLOG))) 785 return err1; 786 /* 787 * final() has to be always called to cleanup resources 788 * even if update() failed 789 */ 790 err2 = mtk_sha_final(req); 791 792 return err1 ?: err2; 793} 794 795static int mtk_sha_digest(struct ahash_request *req) 796{ 797 return mtk_sha_init(req) ?: mtk_sha_finup(req); 798} 799 800static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, 801 u32 keylen) 802{ 803 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); 804 struct mtk_sha_hmac_ctx *bctx = tctx->base; 805 size_t bs = crypto_shash_blocksize(bctx->shash); 806 size_t ds = crypto_shash_digestsize(bctx->shash); 807 int err, i; 808 809 if (keylen > bs) { 810 err = crypto_shash_tfm_digest(bctx->shash, key, keylen, 811 bctx->ipad); 812 if (err) 813 return err; 814 keylen = ds; 815 } else { 816 memcpy(bctx->ipad, key, keylen); 817 } 818 819 memset(bctx->ipad + keylen, 0, bs - keylen); 820 memcpy(bctx->opad, bctx->ipad, bs); 821 822 for (i = 0; i < bs; i++) { 823 bctx->ipad[i] ^= HMAC_IPAD_VALUE; 824 bctx->opad[i] ^= HMAC_OPAD_VALUE; 825 } 826 827 return 0; 828} 829 830static int mtk_sha_export(struct ahash_request *req, void *out) 831{ 832 const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 833 834 memcpy(out, ctx, sizeof(*ctx)); 835 return 0; 836} 837 838static int mtk_sha_import(struct ahash_request *req, const void *in) 839{ 840 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 841 842 memcpy(ctx, in, sizeof(*ctx)); 843 return 0; 844} 845 846static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm, 847 const char *alg_base) 848{ 849 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); 850 struct mtk_cryp *cryp = NULL; 851 852 cryp = mtk_sha_find_dev(tctx); 853 if (!cryp) 854 return -ENODEV; 855 856 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 857 sizeof(struct mtk_sha_reqctx)); 858 859 if (alg_base) { 860 struct mtk_sha_hmac_ctx *bctx = tctx->base; 861 862 tctx->flags |= SHA_FLAGS_HMAC; 863 bctx->shash = crypto_alloc_shash(alg_base, 0, 864 CRYPTO_ALG_NEED_FALLBACK); 865 if (IS_ERR(bctx->shash)) { 866 pr_err("base driver %s could not be loaded.\n", 867 alg_base); 868 869 return PTR_ERR(bctx->shash); 870 } 871 } 872 return 0; 873} 874 875static int mtk_sha_cra_init(struct crypto_tfm *tfm) 876{ 877 return mtk_sha_cra_init_alg(tfm, NULL); 878} 879 880static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm) 881{ 882 return mtk_sha_cra_init_alg(tfm, "sha1"); 883} 884 885static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm) 886{ 887 return mtk_sha_cra_init_alg(tfm, "sha224"); 888} 889 890static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm) 891{ 892 return mtk_sha_cra_init_alg(tfm, "sha256"); 893} 894 895static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm) 896{ 897 return mtk_sha_cra_init_alg(tfm, "sha384"); 898} 899 900static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm) 901{ 902 return mtk_sha_cra_init_alg(tfm, "sha512"); 903} 904 905static void mtk_sha_cra_exit(struct crypto_tfm *tfm) 906{ 907 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); 908 909 if (tctx->flags & SHA_FLAGS_HMAC) { 910 struct mtk_sha_hmac_ctx *bctx = tctx->base; 911 912 crypto_free_shash(bctx->shash); 913 } 914} 915 916static struct ahash_alg algs_sha1_sha224_sha256[] = { 917{ 918 .init = mtk_sha_init, 919 .update = mtk_sha_update, 920 .final = mtk_sha_final, 921 .finup = mtk_sha_finup, 922 .digest = mtk_sha_digest, 923 .export = mtk_sha_export, 924 .import = mtk_sha_import, 925 .halg.digestsize = SHA1_DIGEST_SIZE, 926 .halg.statesize = sizeof(struct mtk_sha_reqctx), 927 .halg.base = { 928 .cra_name = "sha1", 929 .cra_driver_name = "mtk-sha1", 930 .cra_priority = 400, 931 .cra_flags = CRYPTO_ALG_ASYNC, 932 .cra_blocksize = SHA1_BLOCK_SIZE, 933 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 934 .cra_alignmask = SHA_ALIGN_MSK, 935 .cra_module = THIS_MODULE, 936 .cra_init = mtk_sha_cra_init, 937 .cra_exit = mtk_sha_cra_exit, 938 } 939}, 940{ 941 .init = mtk_sha_init, 942 .update = mtk_sha_update, 943 .final = mtk_sha_final, 944 .finup = mtk_sha_finup, 945 .digest = mtk_sha_digest, 946 .export = mtk_sha_export, 947 .import = mtk_sha_import, 948 .halg.digestsize = SHA224_DIGEST_SIZE, 949 .halg.statesize = sizeof(struct mtk_sha_reqctx), 950 .halg.base = { 951 .cra_name = "sha224", 952 .cra_driver_name = "mtk-sha224", 953 .cra_priority = 400, 954 .cra_flags = CRYPTO_ALG_ASYNC, 955 .cra_blocksize = SHA224_BLOCK_SIZE, 956 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 957 .cra_alignmask = SHA_ALIGN_MSK, 958 .cra_module = THIS_MODULE, 959 .cra_init = mtk_sha_cra_init, 960 .cra_exit = mtk_sha_cra_exit, 961 } 962}, 963{ 964 .init = mtk_sha_init, 965 .update = mtk_sha_update, 966 .final = mtk_sha_final, 967 .finup = mtk_sha_finup, 968 .digest = mtk_sha_digest, 969 .export = mtk_sha_export, 970 .import = mtk_sha_import, 971 .halg.digestsize = SHA256_DIGEST_SIZE, 972 .halg.statesize = sizeof(struct mtk_sha_reqctx), 973 .halg.base = { 974 .cra_name = "sha256", 975 .cra_driver_name = "mtk-sha256", 976 .cra_priority = 400, 977 .cra_flags = CRYPTO_ALG_ASYNC, 978 .cra_blocksize = SHA256_BLOCK_SIZE, 979 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 980 .cra_alignmask = SHA_ALIGN_MSK, 981 .cra_module = THIS_MODULE, 982 .cra_init = mtk_sha_cra_init, 983 .cra_exit = mtk_sha_cra_exit, 984 } 985}, 986{ 987 .init = mtk_sha_init, 988 .update = mtk_sha_update, 989 .final = mtk_sha_final, 990 .finup = mtk_sha_finup, 991 .digest = mtk_sha_digest, 992 .export = mtk_sha_export, 993 .import = mtk_sha_import, 994 .setkey = mtk_sha_setkey, 995 .halg.digestsize = SHA1_DIGEST_SIZE, 996 .halg.statesize = sizeof(struct mtk_sha_reqctx), 997 .halg.base = { 998 .cra_name = "hmac(sha1)", 999 .cra_driver_name = "mtk-hmac-sha1", 1000 .cra_priority = 400, 1001 .cra_flags = CRYPTO_ALG_ASYNC | 1002 CRYPTO_ALG_NEED_FALLBACK, 1003 .cra_blocksize = SHA1_BLOCK_SIZE, 1004 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1005 sizeof(struct mtk_sha_hmac_ctx), 1006 .cra_alignmask = SHA_ALIGN_MSK, 1007 .cra_module = THIS_MODULE, 1008 .cra_init = mtk_sha_cra_sha1_init, 1009 .cra_exit = mtk_sha_cra_exit, 1010 } 1011}, 1012{ 1013 .init = mtk_sha_init, 1014 .update = mtk_sha_update, 1015 .final = mtk_sha_final, 1016 .finup = mtk_sha_finup, 1017 .digest = mtk_sha_digest, 1018 .export = mtk_sha_export, 1019 .import = mtk_sha_import, 1020 .setkey = mtk_sha_setkey, 1021 .halg.digestsize = SHA224_DIGEST_SIZE, 1022 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1023 .halg.base = { 1024 .cra_name = "hmac(sha224)", 1025 .cra_driver_name = "mtk-hmac-sha224", 1026 .cra_priority = 400, 1027 .cra_flags = CRYPTO_ALG_ASYNC | 1028 CRYPTO_ALG_NEED_FALLBACK, 1029 .cra_blocksize = SHA224_BLOCK_SIZE, 1030 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1031 sizeof(struct mtk_sha_hmac_ctx), 1032 .cra_alignmask = SHA_ALIGN_MSK, 1033 .cra_module = THIS_MODULE, 1034 .cra_init = mtk_sha_cra_sha224_init, 1035 .cra_exit = mtk_sha_cra_exit, 1036 } 1037}, 1038{ 1039 .init = mtk_sha_init, 1040 .update = mtk_sha_update, 1041 .final = mtk_sha_final, 1042 .finup = mtk_sha_finup, 1043 .digest = mtk_sha_digest, 1044 .export = mtk_sha_export, 1045 .import = mtk_sha_import, 1046 .setkey = mtk_sha_setkey, 1047 .halg.digestsize = SHA256_DIGEST_SIZE, 1048 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1049 .halg.base = { 1050 .cra_name = "hmac(sha256)", 1051 .cra_driver_name = "mtk-hmac-sha256", 1052 .cra_priority = 400, 1053 .cra_flags = CRYPTO_ALG_ASYNC | 1054 CRYPTO_ALG_NEED_FALLBACK, 1055 .cra_blocksize = SHA256_BLOCK_SIZE, 1056 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1057 sizeof(struct mtk_sha_hmac_ctx), 1058 .cra_alignmask = SHA_ALIGN_MSK, 1059 .cra_module = THIS_MODULE, 1060 .cra_init = mtk_sha_cra_sha256_init, 1061 .cra_exit = mtk_sha_cra_exit, 1062 } 1063}, 1064}; 1065 1066static struct ahash_alg algs_sha384_sha512[] = { 1067{ 1068 .init = mtk_sha_init, 1069 .update = mtk_sha_update, 1070 .final = mtk_sha_final, 1071 .finup = mtk_sha_finup, 1072 .digest = mtk_sha_digest, 1073 .export = mtk_sha_export, 1074 .import = mtk_sha_import, 1075 .halg.digestsize = SHA384_DIGEST_SIZE, 1076 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1077 .halg.base = { 1078 .cra_name = "sha384", 1079 .cra_driver_name = "mtk-sha384", 1080 .cra_priority = 400, 1081 .cra_flags = CRYPTO_ALG_ASYNC, 1082 .cra_blocksize = SHA384_BLOCK_SIZE, 1083 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 1084 .cra_alignmask = SHA_ALIGN_MSK, 1085 .cra_module = THIS_MODULE, 1086 .cra_init = mtk_sha_cra_init, 1087 .cra_exit = mtk_sha_cra_exit, 1088 } 1089}, 1090{ 1091 .init = mtk_sha_init, 1092 .update = mtk_sha_update, 1093 .final = mtk_sha_final, 1094 .finup = mtk_sha_finup, 1095 .digest = mtk_sha_digest, 1096 .export = mtk_sha_export, 1097 .import = mtk_sha_import, 1098 .halg.digestsize = SHA512_DIGEST_SIZE, 1099 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1100 .halg.base = { 1101 .cra_name = "sha512", 1102 .cra_driver_name = "mtk-sha512", 1103 .cra_priority = 400, 1104 .cra_flags = CRYPTO_ALG_ASYNC, 1105 .cra_blocksize = SHA512_BLOCK_SIZE, 1106 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 1107 .cra_alignmask = SHA_ALIGN_MSK, 1108 .cra_module = THIS_MODULE, 1109 .cra_init = mtk_sha_cra_init, 1110 .cra_exit = mtk_sha_cra_exit, 1111 } 1112}, 1113{ 1114 .init = mtk_sha_init, 1115 .update = mtk_sha_update, 1116 .final = mtk_sha_final, 1117 .finup = mtk_sha_finup, 1118 .digest = mtk_sha_digest, 1119 .export = mtk_sha_export, 1120 .import = mtk_sha_import, 1121 .setkey = mtk_sha_setkey, 1122 .halg.digestsize = SHA384_DIGEST_SIZE, 1123 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1124 .halg.base = { 1125 .cra_name = "hmac(sha384)", 1126 .cra_driver_name = "mtk-hmac-sha384", 1127 .cra_priority = 400, 1128 .cra_flags = CRYPTO_ALG_ASYNC | 1129 CRYPTO_ALG_NEED_FALLBACK, 1130 .cra_blocksize = SHA384_BLOCK_SIZE, 1131 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1132 sizeof(struct mtk_sha_hmac_ctx), 1133 .cra_alignmask = SHA_ALIGN_MSK, 1134 .cra_module = THIS_MODULE, 1135 .cra_init = mtk_sha_cra_sha384_init, 1136 .cra_exit = mtk_sha_cra_exit, 1137 } 1138}, 1139{ 1140 .init = mtk_sha_init, 1141 .update = mtk_sha_update, 1142 .final = mtk_sha_final, 1143 .finup = mtk_sha_finup, 1144 .digest = mtk_sha_digest, 1145 .export = mtk_sha_export, 1146 .import = mtk_sha_import, 1147 .setkey = mtk_sha_setkey, 1148 .halg.digestsize = SHA512_DIGEST_SIZE, 1149 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1150 .halg.base = { 1151 .cra_name = "hmac(sha512)", 1152 .cra_driver_name = "mtk-hmac-sha512", 1153 .cra_priority = 400, 1154 .cra_flags = CRYPTO_ALG_ASYNC | 1155 CRYPTO_ALG_NEED_FALLBACK, 1156 .cra_blocksize = SHA512_BLOCK_SIZE, 1157 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1158 sizeof(struct mtk_sha_hmac_ctx), 1159 .cra_alignmask = SHA_ALIGN_MSK, 1160 .cra_module = THIS_MODULE, 1161 .cra_init = mtk_sha_cra_sha512_init, 1162 .cra_exit = mtk_sha_cra_exit, 1163 } 1164}, 1165}; 1166 1167static void mtk_sha_queue_task(unsigned long data) 1168{ 1169 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; 1170 1171 mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); 1172} 1173 1174static void mtk_sha_done_task(unsigned long data) 1175{ 1176 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; 1177 struct mtk_cryp *cryp = sha->cryp; 1178 1179 mtk_sha_unmap(cryp, sha); 1180 mtk_sha_complete(cryp, sha); 1181} 1182 1183static irqreturn_t mtk_sha_irq(int irq, void *dev_id) 1184{ 1185 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id; 1186 struct mtk_cryp *cryp = sha->cryp; 1187 u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); 1188 1189 mtk_sha_write(cryp, RDR_STAT(sha->id), val); 1190 1191 if (likely((SHA_FLAGS_BUSY & sha->flags))) { 1192 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); 1193 mtk_sha_write(cryp, RDR_THRESH(sha->id), 1194 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1195 1196 tasklet_schedule(&sha->done_task); 1197 } else { 1198 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); 1199 } 1200 return IRQ_HANDLED; 1201} 1202 1203/* 1204 * The purpose of two SHA records is used to get extra performance. 1205 * It is similar to mtk_aes_record_init(). 1206 */ 1207static int mtk_sha_record_init(struct mtk_cryp *cryp) 1208{ 1209 struct mtk_sha_rec **sha = cryp->sha; 1210 int i, err = -ENOMEM; 1211 1212 for (i = 0; i < MTK_REC_NUM; i++) { 1213 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL); 1214 if (!sha[i]) 1215 goto err_cleanup; 1216 1217 sha[i]->cryp = cryp; 1218 1219 spin_lock_init(&sha[i]->lock); 1220 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); 1221 1222 tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task, 1223 (unsigned long)sha[i]); 1224 tasklet_init(&sha[i]->done_task, mtk_sha_done_task, 1225 (unsigned long)sha[i]); 1226 } 1227 1228 /* Link to ring2 and ring3 respectively */ 1229 sha[0]->id = MTK_RING2; 1230 sha[1]->id = MTK_RING3; 1231 1232 cryp->rec = 1; 1233 1234 return 0; 1235 1236err_cleanup: 1237 for (; i--; ) 1238 kfree(sha[i]); 1239 return err; 1240} 1241 1242static void mtk_sha_record_free(struct mtk_cryp *cryp) 1243{ 1244 int i; 1245 1246 for (i = 0; i < MTK_REC_NUM; i++) { 1247 tasklet_kill(&cryp->sha[i]->done_task); 1248 tasklet_kill(&cryp->sha[i]->queue_task); 1249 1250 kfree(cryp->sha[i]); 1251 } 1252} 1253 1254static void mtk_sha_unregister_algs(void) 1255{ 1256 int i; 1257 1258 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) 1259 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); 1260 1261 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) 1262 crypto_unregister_ahash(&algs_sha384_sha512[i]); 1263} 1264 1265static int mtk_sha_register_algs(void) 1266{ 1267 int err, i; 1268 1269 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) { 1270 err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]); 1271 if (err) 1272 goto err_sha_224_256_algs; 1273 } 1274 1275 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) { 1276 err = crypto_register_ahash(&algs_sha384_sha512[i]); 1277 if (err) 1278 goto err_sha_384_512_algs; 1279 } 1280 1281 return 0; 1282 1283err_sha_384_512_algs: 1284 for (; i--; ) 1285 crypto_unregister_ahash(&algs_sha384_sha512[i]); 1286 i = ARRAY_SIZE(algs_sha1_sha224_sha256); 1287err_sha_224_256_algs: 1288 for (; i--; ) 1289 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); 1290 1291 return err; 1292} 1293 1294int mtk_hash_alg_register(struct mtk_cryp *cryp) 1295{ 1296 int err; 1297 1298 INIT_LIST_HEAD(&cryp->sha_list); 1299 1300 /* Initialize two hash records */ 1301 err = mtk_sha_record_init(cryp); 1302 if (err) 1303 goto err_record; 1304 1305 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, 1306 0, "mtk-sha", cryp->sha[0]); 1307 if (err) { 1308 dev_err(cryp->dev, "unable to request sha irq0.\n"); 1309 goto err_res; 1310 } 1311 1312 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, 1313 0, "mtk-sha", cryp->sha[1]); 1314 if (err) { 1315 dev_err(cryp->dev, "unable to request sha irq1.\n"); 1316 goto err_res; 1317 } 1318 1319 /* Enable ring2 and ring3 interrupt for hash */ 1320 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2); 1321 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3); 1322 1323 spin_lock(&mtk_sha.lock); 1324 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); 1325 spin_unlock(&mtk_sha.lock); 1326 1327 err = mtk_sha_register_algs(); 1328 if (err) 1329 goto err_algs; 1330 1331 return 0; 1332 1333err_algs: 1334 spin_lock(&mtk_sha.lock); 1335 list_del(&cryp->sha_list); 1336 spin_unlock(&mtk_sha.lock); 1337err_res: 1338 mtk_sha_record_free(cryp); 1339err_record: 1340 1341 dev_err(cryp->dev, "mtk-sha initialization failed.\n"); 1342 return err; 1343} 1344 1345void mtk_hash_alg_release(struct mtk_cryp *cryp) 1346{ 1347 spin_lock(&mtk_sha.lock); 1348 list_del(&cryp->sha_list); 1349 spin_unlock(&mtk_sha.lock); 1350 1351 mtk_sha_unregister_algs(); 1352 mtk_sha_record_free(cryp); 1353}