Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.0-rc8 1359 lines 34 kB view raw
1/* 2 * Cryptographic API. 3 * 4 * Driver for EIP97 SHA1/SHA2(HMAC) acceleration. 5 * 6 * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * Some ideas are from atmel-sha.c and omap-sham.c drivers. 13 */ 14 15#include <crypto/hmac.h> 16#include <crypto/sha.h> 17#include "mtk-platform.h" 18 19#define SHA_ALIGN_MSK (sizeof(u32) - 1) 20#define SHA_QUEUE_SIZE 512 21#define SHA_BUF_SIZE ((u32)PAGE_SIZE) 22 23#define SHA_OP_UPDATE 1 24#define SHA_OP_FINAL 2 25 26#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0)) 27#define SHA_MAX_DIGEST_BUF_SIZE 32 28 29/* SHA command token */ 30#define SHA_CT_SIZE 5 31#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000) 32#define SHA_CMD0 cpu_to_le32(0x03020000) 33#define SHA_CMD1 cpu_to_le32(0x21060000) 34#define SHA_CMD2 cpu_to_le32(0xe0e63802) 35 36/* SHA transform information */ 37#define SHA_TFM_HASH cpu_to_le32(0x2 << 0) 38#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8) 39#define SHA_TFM_START cpu_to_le32(0x1 << 4) 40#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5) 41#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19) 42#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23) 43#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23) 44#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23) 45#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23) 46#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23) 47#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24) 48 49/* SHA flags */ 50#define SHA_FLAGS_BUSY BIT(0) 51#define SHA_FLAGS_FINAL BIT(1) 52#define SHA_FLAGS_FINUP BIT(2) 53#define SHA_FLAGS_SG BIT(3) 54#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4) 55#define SHA_FLAGS_SHA1 BIT(4) 56#define SHA_FLAGS_SHA224 BIT(5) 57#define SHA_FLAGS_SHA256 BIT(6) 58#define SHA_FLAGS_SHA384 BIT(7) 59#define SHA_FLAGS_SHA512 BIT(8) 60#define SHA_FLAGS_HMAC BIT(9) 61#define SHA_FLAGS_PAD BIT(10) 62 63/** 64 * mtk_sha_info - hardware information of AES 65 * @cmd: command token, hardware instruction 66 * @tfm: transform state of cipher algorithm. 67 * @state: contains keys and initial vectors. 68 * 69 */ 70struct mtk_sha_info { 71 __le32 ctrl[2]; 72 __le32 cmd[3]; 73 __le32 tfm[2]; 74 __le32 digest[SHA_MAX_DIGEST_BUF_SIZE]; 75}; 76 77struct mtk_sha_reqctx { 78 struct mtk_sha_info info; 79 unsigned long flags; 80 unsigned long op; 81 82 u64 digcnt; 83 size_t bufcnt; 84 dma_addr_t dma_addr; 85 86 __le32 ct_hdr; 87 u32 ct_size; 88 dma_addr_t ct_dma; 89 dma_addr_t tfm_dma; 90 91 /* Walk state */ 92 struct scatterlist *sg; 93 u32 offset; /* Offset in current sg */ 94 u32 total; /* Total request */ 95 size_t ds; 96 size_t bs; 97 98 u8 *buffer; 99}; 100 101struct mtk_sha_hmac_ctx { 102 struct crypto_shash *shash; 103 u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 104 u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); 105}; 106 107struct mtk_sha_ctx { 108 struct mtk_cryp *cryp; 109 unsigned long flags; 110 u8 id; 111 u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32)); 112 113 struct mtk_sha_hmac_ctx base[0]; 114}; 115 116struct mtk_sha_drv { 117 struct list_head dev_list; 118 /* Device list lock */ 119 spinlock_t lock; 120}; 121 122static struct mtk_sha_drv mtk_sha = { 123 .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list), 124 .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock), 125}; 126 127static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 128 struct ahash_request *req); 129 130static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset) 131{ 132 return readl_relaxed(cryp->base + offset); 133} 134 135static inline void mtk_sha_write(struct mtk_cryp *cryp, 136 u32 offset, u32 value) 137{ 138 writel_relaxed(value, cryp->base + offset); 139} 140 141static inline void mtk_sha_ring_shift(struct mtk_ring *ring, 142 struct mtk_desc **cmd_curr, 143 struct mtk_desc **res_curr, 144 int *count) 145{ 146 *cmd_curr = ring->cmd_next++; 147 *res_curr = ring->res_next++; 148 (*count)++; 149 150 if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) { 151 ring->cmd_next = ring->cmd_base; 152 ring->res_next = ring->res_base; 153 } 154} 155 156static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx) 157{ 158 struct mtk_cryp *cryp = NULL; 159 struct mtk_cryp *tmp; 160 161 spin_lock_bh(&mtk_sha.lock); 162 if (!tctx->cryp) { 163 list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) { 164 cryp = tmp; 165 break; 166 } 167 tctx->cryp = cryp; 168 } else { 169 cryp = tctx->cryp; 170 } 171 172 /* 173 * Assign record id to tfm in round-robin fashion, and this 174 * will help tfm to bind to corresponding descriptor rings. 175 */ 176 tctx->id = cryp->rec; 177 cryp->rec = !cryp->rec; 178 179 spin_unlock_bh(&mtk_sha.lock); 180 181 return cryp; 182} 183 184static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx) 185{ 186 size_t count; 187 188 while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) { 189 count = min(ctx->sg->length - ctx->offset, ctx->total); 190 count = min(count, SHA_BUF_SIZE - ctx->bufcnt); 191 192 if (count <= 0) { 193 /* 194 * Check if count <= 0 because the buffer is full or 195 * because the sg length is 0. In the latest case, 196 * check if there is another sg in the list, a 0 length 197 * sg doesn't necessarily mean the end of the sg list. 198 */ 199 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { 200 ctx->sg = sg_next(ctx->sg); 201 continue; 202 } else { 203 break; 204 } 205 } 206 207 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, 208 ctx->offset, count, 0); 209 210 ctx->bufcnt += count; 211 ctx->offset += count; 212 ctx->total -= count; 213 214 if (ctx->offset == ctx->sg->length) { 215 ctx->sg = sg_next(ctx->sg); 216 if (ctx->sg) 217 ctx->offset = 0; 218 else 219 ctx->total = 0; 220 } 221 } 222 223 return 0; 224} 225 226/* 227 * The purpose of this padding is to ensure that the padded message is a 228 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). 229 * The bit "1" is appended at the end of the message followed by 230 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or 231 * 128 bits block (SHA384/SHA512) equals to the message length in bits 232 * is appended. 233 * 234 * For SHA1/SHA224/SHA256, padlen is calculated as followed: 235 * - if message length < 56 bytes then padlen = 56 - message length 236 * - else padlen = 64 + 56 - message length 237 * 238 * For SHA384/SHA512, padlen is calculated as followed: 239 * - if message length < 112 bytes then padlen = 112 - message length 240 * - else padlen = 128 + 112 - message length 241 */ 242static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len) 243{ 244 u32 index, padlen; 245 u64 bits[2]; 246 u64 size = ctx->digcnt; 247 248 size += ctx->bufcnt; 249 size += len; 250 251 bits[1] = cpu_to_be64(size << 3); 252 bits[0] = cpu_to_be64(size >> 61); 253 254 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 255 case SHA_FLAGS_SHA384: 256 case SHA_FLAGS_SHA512: 257 index = ctx->bufcnt & 0x7f; 258 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); 259 *(ctx->buffer + ctx->bufcnt) = 0x80; 260 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); 261 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); 262 ctx->bufcnt += padlen + 16; 263 ctx->flags |= SHA_FLAGS_PAD; 264 break; 265 266 default: 267 index = ctx->bufcnt & 0x3f; 268 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); 269 *(ctx->buffer + ctx->bufcnt) = 0x80; 270 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); 271 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); 272 ctx->bufcnt += padlen + 8; 273 ctx->flags |= SHA_FLAGS_PAD; 274 break; 275 } 276} 277 278/* Initialize basic transform information of SHA */ 279static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) 280{ 281 struct mtk_sha_info *info = &ctx->info; 282 283 ctx->ct_hdr = SHA_CT_CTRL_HDR; 284 ctx->ct_size = SHA_CT_SIZE; 285 286 info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); 287 288 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { 289 case SHA_FLAGS_SHA1: 290 info->tfm[0] |= SHA_TFM_SHA1; 291 break; 292 case SHA_FLAGS_SHA224: 293 info->tfm[0] |= SHA_TFM_SHA224; 294 break; 295 case SHA_FLAGS_SHA256: 296 info->tfm[0] |= SHA_TFM_SHA256; 297 break; 298 case SHA_FLAGS_SHA384: 299 info->tfm[0] |= SHA_TFM_SHA384; 300 break; 301 case SHA_FLAGS_SHA512: 302 info->tfm[0] |= SHA_TFM_SHA512; 303 break; 304 305 default: 306 /* Should not happen... */ 307 return; 308 } 309 310 info->tfm[1] = SHA_TFM_HASH_STORE; 311 info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; 312 info->ctrl[1] = info->tfm[1]; 313 314 info->cmd[0] = SHA_CMD0; 315 info->cmd[1] = SHA_CMD1; 316 info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); 317} 318 319/* 320 * Update input data length field of transform information and 321 * map it to DMA region. 322 */ 323static int mtk_sha_info_update(struct mtk_cryp *cryp, 324 struct mtk_sha_rec *sha, 325 size_t len1, size_t len2) 326{ 327 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 328 struct mtk_sha_info *info = &ctx->info; 329 330 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; 331 ctx->ct_hdr |= cpu_to_le32(len1 + len2); 332 info->cmd[0] &= ~SHA_DATA_LEN_MSK; 333 info->cmd[0] |= cpu_to_le32(len1 + len2); 334 335 /* Setting SHA_TFM_START only for the first iteration */ 336 if (ctx->digcnt) 337 info->ctrl[0] &= ~SHA_TFM_START; 338 339 ctx->digcnt += len1; 340 341 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), 342 DMA_BIDIRECTIONAL); 343 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { 344 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); 345 return -EINVAL; 346 } 347 348 ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); 349 350 return 0; 351} 352 353/* 354 * Because of hardware limitation, we must pre-calculate the inner 355 * and outer digest that need to be processed firstly by engine, then 356 * apply the result digest to the input message. These complex hashing 357 * procedures limits HMAC performance, so we use fallback SW encoding. 358 */ 359static int mtk_sha_finish_hmac(struct ahash_request *req) 360{ 361 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 362 struct mtk_sha_hmac_ctx *bctx = tctx->base; 363 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 364 365 SHASH_DESC_ON_STACK(shash, bctx->shash); 366 367 shash->tfm = bctx->shash; 368 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ 369 370 return crypto_shash_init(shash) ?: 371 crypto_shash_update(shash, bctx->opad, ctx->bs) ?: 372 crypto_shash_finup(shash, req->result, ctx->ds, req->result); 373} 374 375/* Initialize request context */ 376static int mtk_sha_init(struct ahash_request *req) 377{ 378 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 379 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); 380 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 381 382 ctx->flags = 0; 383 ctx->ds = crypto_ahash_digestsize(tfm); 384 385 switch (ctx->ds) { 386 case SHA1_DIGEST_SIZE: 387 ctx->flags |= SHA_FLAGS_SHA1; 388 ctx->bs = SHA1_BLOCK_SIZE; 389 break; 390 case SHA224_DIGEST_SIZE: 391 ctx->flags |= SHA_FLAGS_SHA224; 392 ctx->bs = SHA224_BLOCK_SIZE; 393 break; 394 case SHA256_DIGEST_SIZE: 395 ctx->flags |= SHA_FLAGS_SHA256; 396 ctx->bs = SHA256_BLOCK_SIZE; 397 break; 398 case SHA384_DIGEST_SIZE: 399 ctx->flags |= SHA_FLAGS_SHA384; 400 ctx->bs = SHA384_BLOCK_SIZE; 401 break; 402 case SHA512_DIGEST_SIZE: 403 ctx->flags |= SHA_FLAGS_SHA512; 404 ctx->bs = SHA512_BLOCK_SIZE; 405 break; 406 default: 407 return -EINVAL; 408 } 409 410 ctx->bufcnt = 0; 411 ctx->digcnt = 0; 412 ctx->buffer = tctx->buf; 413 414 if (tctx->flags & SHA_FLAGS_HMAC) { 415 struct mtk_sha_hmac_ctx *bctx = tctx->base; 416 417 memcpy(ctx->buffer, bctx->ipad, ctx->bs); 418 ctx->bufcnt = ctx->bs; 419 ctx->flags |= SHA_FLAGS_HMAC; 420 } 421 422 return 0; 423} 424 425static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, 426 dma_addr_t addr1, size_t len1, 427 dma_addr_t addr2, size_t len2) 428{ 429 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 430 struct mtk_ring *ring = cryp->ring[sha->id]; 431 struct mtk_desc *cmd, *res; 432 int err, count = 0; 433 434 err = mtk_sha_info_update(cryp, sha, len1, len2); 435 if (err) 436 return err; 437 438 /* Fill in the command/result descriptors */ 439 mtk_sha_ring_shift(ring, &cmd, &res, &count); 440 441 res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1); 442 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) | 443 MTK_DESC_CT_LEN(ctx->ct_size); 444 cmd->buf = cpu_to_le32(addr1); 445 cmd->ct = cpu_to_le32(ctx->ct_dma); 446 cmd->ct_hdr = ctx->ct_hdr; 447 cmd->tfm = cpu_to_le32(ctx->tfm_dma); 448 449 if (len2) { 450 mtk_sha_ring_shift(ring, &cmd, &res, &count); 451 452 res->hdr = MTK_DESC_BUF_LEN(len2); 453 cmd->hdr = MTK_DESC_BUF_LEN(len2); 454 cmd->buf = cpu_to_le32(addr2); 455 } 456 457 cmd->hdr |= MTK_DESC_LAST; 458 res->hdr |= MTK_DESC_LAST; 459 460 /* 461 * Make sure that all changes to the DMA ring are done before we 462 * start engine. 463 */ 464 wmb(); 465 /* Start DMA transfer */ 466 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); 467 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); 468 469 return -EINPROGRESS; 470} 471 472static int mtk_sha_dma_map(struct mtk_cryp *cryp, 473 struct mtk_sha_rec *sha, 474 struct mtk_sha_reqctx *ctx, 475 size_t count) 476{ 477 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, 478 SHA_BUF_SIZE, DMA_TO_DEVICE); 479 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { 480 dev_err(cryp->dev, "dma map error\n"); 481 return -EINVAL; 482 } 483 484 ctx->flags &= ~SHA_FLAGS_SG; 485 486 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); 487} 488 489static int mtk_sha_update_slow(struct mtk_cryp *cryp, 490 struct mtk_sha_rec *sha) 491{ 492 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 493 size_t count; 494 u32 final; 495 496 mtk_sha_append_sg(ctx); 497 498 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 499 500 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); 501 502 if (final) { 503 sha->flags |= SHA_FLAGS_FINAL; 504 mtk_sha_fill_padding(ctx, 0); 505 } 506 507 if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) { 508 count = ctx->bufcnt; 509 ctx->bufcnt = 0; 510 511 return mtk_sha_dma_map(cryp, sha, ctx, count); 512 } 513 return 0; 514} 515 516static int mtk_sha_update_start(struct mtk_cryp *cryp, 517 struct mtk_sha_rec *sha) 518{ 519 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 520 u32 len, final, tail; 521 struct scatterlist *sg; 522 523 if (!ctx->total) 524 return 0; 525 526 if (ctx->bufcnt || ctx->offset) 527 return mtk_sha_update_slow(cryp, sha); 528 529 sg = ctx->sg; 530 531 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 532 return mtk_sha_update_slow(cryp, sha); 533 534 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs)) 535 /* size is not ctx->bs aligned */ 536 return mtk_sha_update_slow(cryp, sha); 537 538 len = min(ctx->total, sg->length); 539 540 if (sg_is_last(sg)) { 541 if (!(ctx->flags & SHA_FLAGS_FINUP)) { 542 /* not last sg must be ctx->bs aligned */ 543 tail = len & (ctx->bs - 1); 544 len -= tail; 545 } 546 } 547 548 ctx->total -= len; 549 ctx->offset = len; /* offset where to start slow */ 550 551 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; 552 553 /* Add padding */ 554 if (final) { 555 size_t count; 556 557 tail = len & (ctx->bs - 1); 558 len -= tail; 559 ctx->total += tail; 560 ctx->offset = len; /* offset where to start slow */ 561 562 sg = ctx->sg; 563 mtk_sha_append_sg(ctx); 564 mtk_sha_fill_padding(ctx, len); 565 566 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, 567 SHA_BUF_SIZE, DMA_TO_DEVICE); 568 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { 569 dev_err(cryp->dev, "dma map bytes error\n"); 570 return -EINVAL; 571 } 572 573 sha->flags |= SHA_FLAGS_FINAL; 574 count = ctx->bufcnt; 575 ctx->bufcnt = 0; 576 577 if (len == 0) { 578 ctx->flags &= ~SHA_FLAGS_SG; 579 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, 580 count, 0, 0); 581 582 } else { 583 ctx->sg = sg; 584 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 585 dev_err(cryp->dev, "dma_map_sg error\n"); 586 return -EINVAL; 587 } 588 589 ctx->flags |= SHA_FLAGS_SG; 590 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), 591 len, ctx->dma_addr, count); 592 } 593 } 594 595 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { 596 dev_err(cryp->dev, "dma_map_sg error\n"); 597 return -EINVAL; 598 } 599 600 ctx->flags |= SHA_FLAGS_SG; 601 602 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), 603 len, 0, 0); 604} 605 606static int mtk_sha_final_req(struct mtk_cryp *cryp, 607 struct mtk_sha_rec *sha) 608{ 609 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 610 size_t count; 611 612 mtk_sha_fill_padding(ctx, 0); 613 614 sha->flags |= SHA_FLAGS_FINAL; 615 count = ctx->bufcnt; 616 ctx->bufcnt = 0; 617 618 return mtk_sha_dma_map(cryp, sha, ctx, count); 619} 620 621/* Copy ready hash (+ finalize hmac) */ 622static int mtk_sha_finish(struct ahash_request *req) 623{ 624 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 625 __le32 *digest = ctx->info.digest; 626 u32 *result = (u32 *)req->result; 627 int i; 628 629 /* Get the hash from the digest buffer */ 630 for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++) 631 result[i] = le32_to_cpu(digest[i]); 632 633 if (ctx->flags & SHA_FLAGS_HMAC) 634 return mtk_sha_finish_hmac(req); 635 636 return 0; 637} 638 639static void mtk_sha_finish_req(struct mtk_cryp *cryp, 640 struct mtk_sha_rec *sha, 641 int err) 642{ 643 if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) 644 err = mtk_sha_finish(sha->req); 645 646 sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL); 647 648 sha->req->base.complete(&sha->req->base, err); 649 650 /* Handle new request */ 651 tasklet_schedule(&sha->queue_task); 652} 653 654static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, 655 struct ahash_request *req) 656{ 657 struct mtk_sha_rec *sha = cryp->sha[id]; 658 struct crypto_async_request *async_req, *backlog; 659 struct mtk_sha_reqctx *ctx; 660 unsigned long flags; 661 int err = 0, ret = 0; 662 663 spin_lock_irqsave(&sha->lock, flags); 664 if (req) 665 ret = ahash_enqueue_request(&sha->queue, req); 666 667 if (SHA_FLAGS_BUSY & sha->flags) { 668 spin_unlock_irqrestore(&sha->lock, flags); 669 return ret; 670 } 671 672 backlog = crypto_get_backlog(&sha->queue); 673 async_req = crypto_dequeue_request(&sha->queue); 674 if (async_req) 675 sha->flags |= SHA_FLAGS_BUSY; 676 spin_unlock_irqrestore(&sha->lock, flags); 677 678 if (!async_req) 679 return ret; 680 681 if (backlog) 682 backlog->complete(backlog, -EINPROGRESS); 683 684 req = ahash_request_cast(async_req); 685 ctx = ahash_request_ctx(req); 686 687 sha->req = req; 688 689 mtk_sha_info_init(ctx); 690 691 if (ctx->op == SHA_OP_UPDATE) { 692 err = mtk_sha_update_start(cryp, sha); 693 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) 694 /* No final() after finup() */ 695 err = mtk_sha_final_req(cryp, sha); 696 } else if (ctx->op == SHA_OP_FINAL) { 697 err = mtk_sha_final_req(cryp, sha); 698 } 699 700 if (unlikely(err != -EINPROGRESS)) 701 /* Task will not finish it, so do it here */ 702 mtk_sha_finish_req(cryp, sha, err); 703 704 return ret; 705} 706 707static int mtk_sha_enqueue(struct ahash_request *req, u32 op) 708{ 709 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 710 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); 711 712 ctx->op = op; 713 714 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); 715} 716 717static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) 718{ 719 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); 720 721 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), 722 DMA_BIDIRECTIONAL); 723 724 if (ctx->flags & SHA_FLAGS_SG) { 725 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); 726 if (ctx->sg->length == ctx->offset) { 727 ctx->sg = sg_next(ctx->sg); 728 if (ctx->sg) 729 ctx->offset = 0; 730 } 731 if (ctx->flags & SHA_FLAGS_PAD) { 732 dma_unmap_single(cryp->dev, ctx->dma_addr, 733 SHA_BUF_SIZE, DMA_TO_DEVICE); 734 } 735 } else 736 dma_unmap_single(cryp->dev, ctx->dma_addr, 737 SHA_BUF_SIZE, DMA_TO_DEVICE); 738} 739 740static void mtk_sha_complete(struct mtk_cryp *cryp, 741 struct mtk_sha_rec *sha) 742{ 743 int err = 0; 744 745 err = mtk_sha_update_start(cryp, sha); 746 if (err != -EINPROGRESS) 747 mtk_sha_finish_req(cryp, sha, err); 748} 749 750static int mtk_sha_update(struct ahash_request *req) 751{ 752 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 753 754 ctx->total = req->nbytes; 755 ctx->sg = req->src; 756 ctx->offset = 0; 757 758 if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) && 759 !(ctx->flags & SHA_FLAGS_FINUP)) 760 return mtk_sha_append_sg(ctx); 761 762 return mtk_sha_enqueue(req, SHA_OP_UPDATE); 763} 764 765static int mtk_sha_final(struct ahash_request *req) 766{ 767 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 768 769 ctx->flags |= SHA_FLAGS_FINUP; 770 771 if (ctx->flags & SHA_FLAGS_PAD) 772 return mtk_sha_finish(req); 773 774 return mtk_sha_enqueue(req, SHA_OP_FINAL); 775} 776 777static int mtk_sha_finup(struct ahash_request *req) 778{ 779 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 780 int err1, err2; 781 782 ctx->flags |= SHA_FLAGS_FINUP; 783 784 err1 = mtk_sha_update(req); 785 if (err1 == -EINPROGRESS || err1 == -EBUSY) 786 return err1; 787 /* 788 * final() has to be always called to cleanup resources 789 * even if update() failed 790 */ 791 err2 = mtk_sha_final(req); 792 793 return err1 ?: err2; 794} 795 796static int mtk_sha_digest(struct ahash_request *req) 797{ 798 return mtk_sha_init(req) ?: mtk_sha_finup(req); 799} 800 801static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, 802 u32 keylen) 803{ 804 struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); 805 struct mtk_sha_hmac_ctx *bctx = tctx->base; 806 size_t bs = crypto_shash_blocksize(bctx->shash); 807 size_t ds = crypto_shash_digestsize(bctx->shash); 808 int err, i; 809 810 SHASH_DESC_ON_STACK(shash, bctx->shash); 811 812 shash->tfm = bctx->shash; 813 shash->flags = crypto_shash_get_flags(bctx->shash) & 814 CRYPTO_TFM_REQ_MAY_SLEEP; 815 816 if (keylen > bs) { 817 err = crypto_shash_digest(shash, key, keylen, bctx->ipad); 818 if (err) 819 return err; 820 keylen = ds; 821 } else { 822 memcpy(bctx->ipad, key, keylen); 823 } 824 825 memset(bctx->ipad + keylen, 0, bs - keylen); 826 memcpy(bctx->opad, bctx->ipad, bs); 827 828 for (i = 0; i < bs; i++) { 829 bctx->ipad[i] ^= HMAC_IPAD_VALUE; 830 bctx->opad[i] ^= HMAC_OPAD_VALUE; 831 } 832 833 return 0; 834} 835 836static int mtk_sha_export(struct ahash_request *req, void *out) 837{ 838 const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 839 840 memcpy(out, ctx, sizeof(*ctx)); 841 return 0; 842} 843 844static int mtk_sha_import(struct ahash_request *req, const void *in) 845{ 846 struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); 847 848 memcpy(ctx, in, sizeof(*ctx)); 849 return 0; 850} 851 852static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm, 853 const char *alg_base) 854{ 855 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); 856 struct mtk_cryp *cryp = NULL; 857 858 cryp = mtk_sha_find_dev(tctx); 859 if (!cryp) 860 return -ENODEV; 861 862 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 863 sizeof(struct mtk_sha_reqctx)); 864 865 if (alg_base) { 866 struct mtk_sha_hmac_ctx *bctx = tctx->base; 867 868 tctx->flags |= SHA_FLAGS_HMAC; 869 bctx->shash = crypto_alloc_shash(alg_base, 0, 870 CRYPTO_ALG_NEED_FALLBACK); 871 if (IS_ERR(bctx->shash)) { 872 pr_err("base driver %s could not be loaded.\n", 873 alg_base); 874 875 return PTR_ERR(bctx->shash); 876 } 877 } 878 return 0; 879} 880 881static int mtk_sha_cra_init(struct crypto_tfm *tfm) 882{ 883 return mtk_sha_cra_init_alg(tfm, NULL); 884} 885 886static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm) 887{ 888 return mtk_sha_cra_init_alg(tfm, "sha1"); 889} 890 891static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm) 892{ 893 return mtk_sha_cra_init_alg(tfm, "sha224"); 894} 895 896static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm) 897{ 898 return mtk_sha_cra_init_alg(tfm, "sha256"); 899} 900 901static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm) 902{ 903 return mtk_sha_cra_init_alg(tfm, "sha384"); 904} 905 906static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm) 907{ 908 return mtk_sha_cra_init_alg(tfm, "sha512"); 909} 910 911static void mtk_sha_cra_exit(struct crypto_tfm *tfm) 912{ 913 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); 914 915 if (tctx->flags & SHA_FLAGS_HMAC) { 916 struct mtk_sha_hmac_ctx *bctx = tctx->base; 917 918 crypto_free_shash(bctx->shash); 919 } 920} 921 922static struct ahash_alg algs_sha1_sha224_sha256[] = { 923{ 924 .init = mtk_sha_init, 925 .update = mtk_sha_update, 926 .final = mtk_sha_final, 927 .finup = mtk_sha_finup, 928 .digest = mtk_sha_digest, 929 .export = mtk_sha_export, 930 .import = mtk_sha_import, 931 .halg.digestsize = SHA1_DIGEST_SIZE, 932 .halg.statesize = sizeof(struct mtk_sha_reqctx), 933 .halg.base = { 934 .cra_name = "sha1", 935 .cra_driver_name = "mtk-sha1", 936 .cra_priority = 400, 937 .cra_flags = CRYPTO_ALG_ASYNC, 938 .cra_blocksize = SHA1_BLOCK_SIZE, 939 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 940 .cra_alignmask = SHA_ALIGN_MSK, 941 .cra_module = THIS_MODULE, 942 .cra_init = mtk_sha_cra_init, 943 .cra_exit = mtk_sha_cra_exit, 944 } 945}, 946{ 947 .init = mtk_sha_init, 948 .update = mtk_sha_update, 949 .final = mtk_sha_final, 950 .finup = mtk_sha_finup, 951 .digest = mtk_sha_digest, 952 .export = mtk_sha_export, 953 .import = mtk_sha_import, 954 .halg.digestsize = SHA224_DIGEST_SIZE, 955 .halg.statesize = sizeof(struct mtk_sha_reqctx), 956 .halg.base = { 957 .cra_name = "sha224", 958 .cra_driver_name = "mtk-sha224", 959 .cra_priority = 400, 960 .cra_flags = CRYPTO_ALG_ASYNC, 961 .cra_blocksize = SHA224_BLOCK_SIZE, 962 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 963 .cra_alignmask = SHA_ALIGN_MSK, 964 .cra_module = THIS_MODULE, 965 .cra_init = mtk_sha_cra_init, 966 .cra_exit = mtk_sha_cra_exit, 967 } 968}, 969{ 970 .init = mtk_sha_init, 971 .update = mtk_sha_update, 972 .final = mtk_sha_final, 973 .finup = mtk_sha_finup, 974 .digest = mtk_sha_digest, 975 .export = mtk_sha_export, 976 .import = mtk_sha_import, 977 .halg.digestsize = SHA256_DIGEST_SIZE, 978 .halg.statesize = sizeof(struct mtk_sha_reqctx), 979 .halg.base = { 980 .cra_name = "sha256", 981 .cra_driver_name = "mtk-sha256", 982 .cra_priority = 400, 983 .cra_flags = CRYPTO_ALG_ASYNC, 984 .cra_blocksize = SHA256_BLOCK_SIZE, 985 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 986 .cra_alignmask = SHA_ALIGN_MSK, 987 .cra_module = THIS_MODULE, 988 .cra_init = mtk_sha_cra_init, 989 .cra_exit = mtk_sha_cra_exit, 990 } 991}, 992{ 993 .init = mtk_sha_init, 994 .update = mtk_sha_update, 995 .final = mtk_sha_final, 996 .finup = mtk_sha_finup, 997 .digest = mtk_sha_digest, 998 .export = mtk_sha_export, 999 .import = mtk_sha_import, 1000 .setkey = mtk_sha_setkey, 1001 .halg.digestsize = SHA1_DIGEST_SIZE, 1002 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1003 .halg.base = { 1004 .cra_name = "hmac(sha1)", 1005 .cra_driver_name = "mtk-hmac-sha1", 1006 .cra_priority = 400, 1007 .cra_flags = CRYPTO_ALG_ASYNC | 1008 CRYPTO_ALG_NEED_FALLBACK, 1009 .cra_blocksize = SHA1_BLOCK_SIZE, 1010 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1011 sizeof(struct mtk_sha_hmac_ctx), 1012 .cra_alignmask = SHA_ALIGN_MSK, 1013 .cra_module = THIS_MODULE, 1014 .cra_init = mtk_sha_cra_sha1_init, 1015 .cra_exit = mtk_sha_cra_exit, 1016 } 1017}, 1018{ 1019 .init = mtk_sha_init, 1020 .update = mtk_sha_update, 1021 .final = mtk_sha_final, 1022 .finup = mtk_sha_finup, 1023 .digest = mtk_sha_digest, 1024 .export = mtk_sha_export, 1025 .import = mtk_sha_import, 1026 .setkey = mtk_sha_setkey, 1027 .halg.digestsize = SHA224_DIGEST_SIZE, 1028 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1029 .halg.base = { 1030 .cra_name = "hmac(sha224)", 1031 .cra_driver_name = "mtk-hmac-sha224", 1032 .cra_priority = 400, 1033 .cra_flags = CRYPTO_ALG_ASYNC | 1034 CRYPTO_ALG_NEED_FALLBACK, 1035 .cra_blocksize = SHA224_BLOCK_SIZE, 1036 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1037 sizeof(struct mtk_sha_hmac_ctx), 1038 .cra_alignmask = SHA_ALIGN_MSK, 1039 .cra_module = THIS_MODULE, 1040 .cra_init = mtk_sha_cra_sha224_init, 1041 .cra_exit = mtk_sha_cra_exit, 1042 } 1043}, 1044{ 1045 .init = mtk_sha_init, 1046 .update = mtk_sha_update, 1047 .final = mtk_sha_final, 1048 .finup = mtk_sha_finup, 1049 .digest = mtk_sha_digest, 1050 .export = mtk_sha_export, 1051 .import = mtk_sha_import, 1052 .setkey = mtk_sha_setkey, 1053 .halg.digestsize = SHA256_DIGEST_SIZE, 1054 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1055 .halg.base = { 1056 .cra_name = "hmac(sha256)", 1057 .cra_driver_name = "mtk-hmac-sha256", 1058 .cra_priority = 400, 1059 .cra_flags = CRYPTO_ALG_ASYNC | 1060 CRYPTO_ALG_NEED_FALLBACK, 1061 .cra_blocksize = SHA256_BLOCK_SIZE, 1062 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1063 sizeof(struct mtk_sha_hmac_ctx), 1064 .cra_alignmask = SHA_ALIGN_MSK, 1065 .cra_module = THIS_MODULE, 1066 .cra_init = mtk_sha_cra_sha256_init, 1067 .cra_exit = mtk_sha_cra_exit, 1068 } 1069}, 1070}; 1071 1072static struct ahash_alg algs_sha384_sha512[] = { 1073{ 1074 .init = mtk_sha_init, 1075 .update = mtk_sha_update, 1076 .final = mtk_sha_final, 1077 .finup = mtk_sha_finup, 1078 .digest = mtk_sha_digest, 1079 .export = mtk_sha_export, 1080 .import = mtk_sha_import, 1081 .halg.digestsize = SHA384_DIGEST_SIZE, 1082 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1083 .halg.base = { 1084 .cra_name = "sha384", 1085 .cra_driver_name = "mtk-sha384", 1086 .cra_priority = 400, 1087 .cra_flags = CRYPTO_ALG_ASYNC, 1088 .cra_blocksize = SHA384_BLOCK_SIZE, 1089 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 1090 .cra_alignmask = SHA_ALIGN_MSK, 1091 .cra_module = THIS_MODULE, 1092 .cra_init = mtk_sha_cra_init, 1093 .cra_exit = mtk_sha_cra_exit, 1094 } 1095}, 1096{ 1097 .init = mtk_sha_init, 1098 .update = mtk_sha_update, 1099 .final = mtk_sha_final, 1100 .finup = mtk_sha_finup, 1101 .digest = mtk_sha_digest, 1102 .export = mtk_sha_export, 1103 .import = mtk_sha_import, 1104 .halg.digestsize = SHA512_DIGEST_SIZE, 1105 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1106 .halg.base = { 1107 .cra_name = "sha512", 1108 .cra_driver_name = "mtk-sha512", 1109 .cra_priority = 400, 1110 .cra_flags = CRYPTO_ALG_ASYNC, 1111 .cra_blocksize = SHA512_BLOCK_SIZE, 1112 .cra_ctxsize = sizeof(struct mtk_sha_ctx), 1113 .cra_alignmask = SHA_ALIGN_MSK, 1114 .cra_module = THIS_MODULE, 1115 .cra_init = mtk_sha_cra_init, 1116 .cra_exit = mtk_sha_cra_exit, 1117 } 1118}, 1119{ 1120 .init = mtk_sha_init, 1121 .update = mtk_sha_update, 1122 .final = mtk_sha_final, 1123 .finup = mtk_sha_finup, 1124 .digest = mtk_sha_digest, 1125 .export = mtk_sha_export, 1126 .import = mtk_sha_import, 1127 .setkey = mtk_sha_setkey, 1128 .halg.digestsize = SHA384_DIGEST_SIZE, 1129 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1130 .halg.base = { 1131 .cra_name = "hmac(sha384)", 1132 .cra_driver_name = "mtk-hmac-sha384", 1133 .cra_priority = 400, 1134 .cra_flags = CRYPTO_ALG_ASYNC | 1135 CRYPTO_ALG_NEED_FALLBACK, 1136 .cra_blocksize = SHA384_BLOCK_SIZE, 1137 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1138 sizeof(struct mtk_sha_hmac_ctx), 1139 .cra_alignmask = SHA_ALIGN_MSK, 1140 .cra_module = THIS_MODULE, 1141 .cra_init = mtk_sha_cra_sha384_init, 1142 .cra_exit = mtk_sha_cra_exit, 1143 } 1144}, 1145{ 1146 .init = mtk_sha_init, 1147 .update = mtk_sha_update, 1148 .final = mtk_sha_final, 1149 .finup = mtk_sha_finup, 1150 .digest = mtk_sha_digest, 1151 .export = mtk_sha_export, 1152 .import = mtk_sha_import, 1153 .setkey = mtk_sha_setkey, 1154 .halg.digestsize = SHA512_DIGEST_SIZE, 1155 .halg.statesize = sizeof(struct mtk_sha_reqctx), 1156 .halg.base = { 1157 .cra_name = "hmac(sha512)", 1158 .cra_driver_name = "mtk-hmac-sha512", 1159 .cra_priority = 400, 1160 .cra_flags = CRYPTO_ALG_ASYNC | 1161 CRYPTO_ALG_NEED_FALLBACK, 1162 .cra_blocksize = SHA512_BLOCK_SIZE, 1163 .cra_ctxsize = sizeof(struct mtk_sha_ctx) + 1164 sizeof(struct mtk_sha_hmac_ctx), 1165 .cra_alignmask = SHA_ALIGN_MSK, 1166 .cra_module = THIS_MODULE, 1167 .cra_init = mtk_sha_cra_sha512_init, 1168 .cra_exit = mtk_sha_cra_exit, 1169 } 1170}, 1171}; 1172 1173static void mtk_sha_queue_task(unsigned long data) 1174{ 1175 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; 1176 1177 mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); 1178} 1179 1180static void mtk_sha_done_task(unsigned long data) 1181{ 1182 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; 1183 struct mtk_cryp *cryp = sha->cryp; 1184 1185 mtk_sha_unmap(cryp, sha); 1186 mtk_sha_complete(cryp, sha); 1187} 1188 1189static irqreturn_t mtk_sha_irq(int irq, void *dev_id) 1190{ 1191 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id; 1192 struct mtk_cryp *cryp = sha->cryp; 1193 u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); 1194 1195 mtk_sha_write(cryp, RDR_STAT(sha->id), val); 1196 1197 if (likely((SHA_FLAGS_BUSY & sha->flags))) { 1198 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); 1199 mtk_sha_write(cryp, RDR_THRESH(sha->id), 1200 MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); 1201 1202 tasklet_schedule(&sha->done_task); 1203 } else { 1204 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); 1205 } 1206 return IRQ_HANDLED; 1207} 1208 1209/* 1210 * The purpose of two SHA records is used to get extra performance. 1211 * It is similar to mtk_aes_record_init(). 1212 */ 1213static int mtk_sha_record_init(struct mtk_cryp *cryp) 1214{ 1215 struct mtk_sha_rec **sha = cryp->sha; 1216 int i, err = -ENOMEM; 1217 1218 for (i = 0; i < MTK_REC_NUM; i++) { 1219 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL); 1220 if (!sha[i]) 1221 goto err_cleanup; 1222 1223 sha[i]->cryp = cryp; 1224 1225 spin_lock_init(&sha[i]->lock); 1226 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); 1227 1228 tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task, 1229 (unsigned long)sha[i]); 1230 tasklet_init(&sha[i]->done_task, mtk_sha_done_task, 1231 (unsigned long)sha[i]); 1232 } 1233 1234 /* Link to ring2 and ring3 respectively */ 1235 sha[0]->id = MTK_RING2; 1236 sha[1]->id = MTK_RING3; 1237 1238 cryp->rec = 1; 1239 1240 return 0; 1241 1242err_cleanup: 1243 for (; i--; ) 1244 kfree(sha[i]); 1245 return err; 1246} 1247 1248static void mtk_sha_record_free(struct mtk_cryp *cryp) 1249{ 1250 int i; 1251 1252 for (i = 0; i < MTK_REC_NUM; i++) { 1253 tasklet_kill(&cryp->sha[i]->done_task); 1254 tasklet_kill(&cryp->sha[i]->queue_task); 1255 1256 kfree(cryp->sha[i]); 1257 } 1258} 1259 1260static void mtk_sha_unregister_algs(void) 1261{ 1262 int i; 1263 1264 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) 1265 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); 1266 1267 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) 1268 crypto_unregister_ahash(&algs_sha384_sha512[i]); 1269} 1270 1271static int mtk_sha_register_algs(void) 1272{ 1273 int err, i; 1274 1275 for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) { 1276 err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]); 1277 if (err) 1278 goto err_sha_224_256_algs; 1279 } 1280 1281 for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) { 1282 err = crypto_register_ahash(&algs_sha384_sha512[i]); 1283 if (err) 1284 goto err_sha_384_512_algs; 1285 } 1286 1287 return 0; 1288 1289err_sha_384_512_algs: 1290 for (; i--; ) 1291 crypto_unregister_ahash(&algs_sha384_sha512[i]); 1292 i = ARRAY_SIZE(algs_sha1_sha224_sha256); 1293err_sha_224_256_algs: 1294 for (; i--; ) 1295 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); 1296 1297 return err; 1298} 1299 1300int mtk_hash_alg_register(struct mtk_cryp *cryp) 1301{ 1302 int err; 1303 1304 INIT_LIST_HEAD(&cryp->sha_list); 1305 1306 /* Initialize two hash records */ 1307 err = mtk_sha_record_init(cryp); 1308 if (err) 1309 goto err_record; 1310 1311 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, 1312 0, "mtk-sha", cryp->sha[0]); 1313 if (err) { 1314 dev_err(cryp->dev, "unable to request sha irq0.\n"); 1315 goto err_res; 1316 } 1317 1318 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, 1319 0, "mtk-sha", cryp->sha[1]); 1320 if (err) { 1321 dev_err(cryp->dev, "unable to request sha irq1.\n"); 1322 goto err_res; 1323 } 1324 1325 /* Enable ring2 and ring3 interrupt for hash */ 1326 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2); 1327 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3); 1328 1329 spin_lock(&mtk_sha.lock); 1330 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); 1331 spin_unlock(&mtk_sha.lock); 1332 1333 err = mtk_sha_register_algs(); 1334 if (err) 1335 goto err_algs; 1336 1337 return 0; 1338 1339err_algs: 1340 spin_lock(&mtk_sha.lock); 1341 list_del(&cryp->sha_list); 1342 spin_unlock(&mtk_sha.lock); 1343err_res: 1344 mtk_sha_record_free(cryp); 1345err_record: 1346 1347 dev_err(cryp->dev, "mtk-sha initialization failed.\n"); 1348 return err; 1349} 1350 1351void mtk_hash_alg_release(struct mtk_cryp *cryp) 1352{ 1353 spin_lock(&mtk_sha.lock); 1354 list_del(&cryp->sha_list); 1355 spin_unlock(&mtk_sha.lock); 1356 1357 mtk_sha_unregister_algs(); 1358 mtk_sha_record_free(cryp); 1359}