at v4.8 1385 lines 35 kB view raw
1/* 2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/md5.h> 16#include <crypto/sha.h> 17 18#include "cesa.h" 19 20struct mv_cesa_ahash_dma_iter { 21 struct mv_cesa_dma_iter base; 22 struct mv_cesa_sg_dma_iter src; 23}; 24 25static inline void 26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 27 struct ahash_request *req) 28{ 29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 30 unsigned int len = req->nbytes + creq->cache_ptr; 31 32 if (!creq->last_req) 33 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 34 35 mv_cesa_req_dma_iter_init(&iter->base, len); 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 37 iter->src.op_offset = creq->cache_ptr; 38} 39 40static inline bool 41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 42{ 43 iter->src.op_offset = 0; 44 45 return mv_cesa_req_dma_iter_next_op(&iter->base); 46} 47 48static inline int 49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) 50{ 51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 52 &req->cache_dma); 53 if (!req->cache) 54 return -ENOMEM; 55 56 return 0; 57} 58 59static inline void 60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) 61{ 62 if (!req->cache) 63 return; 64 65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache, 66 req->cache_dma); 67} 68 69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 70 gfp_t flags) 71{ 72 if (req->padding) 73 return 0; 74 75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 76 &req->padding_dma); 77 if (!req->padding) 78 return -ENOMEM; 79 80 return 0; 81} 82 83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 84{ 85 if (!req->padding) 86 return; 87 88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 89 req->padding_dma); 90 req->padding = NULL; 91} 92 93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 94{ 95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 96 97 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 98} 99 100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 101{ 102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 103 104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 105 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 106 mv_cesa_dma_cleanup(&creq->base); 107} 108 109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 110{ 111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 112 113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 114 mv_cesa_ahash_dma_cleanup(req); 115} 116 117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 118{ 119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 120 121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 122 mv_cesa_ahash_dma_last_cleanup(req); 123} 124 125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 126{ 127 unsigned int index, padlen; 128 129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 131 132 return padlen; 133} 134 135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 136{ 137 unsigned int index, padlen; 138 139 buf[0] = 0x80; 140 /* Pad out to 56 mod 64 */ 141 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 142 padlen = mv_cesa_ahash_pad_len(creq); 143 memset(buf + 1, 0, padlen - 1); 144 145 if (creq->algo_le) { 146 __le64 bits = cpu_to_le64(creq->len << 3); 147 memcpy(buf + padlen, &bits, sizeof(bits)); 148 } else { 149 __be64 bits = cpu_to_be64(creq->len << 3); 150 memcpy(buf + padlen, &bits, sizeof(bits)); 151 } 152 153 return padlen + 8; 154} 155 156static void mv_cesa_ahash_std_step(struct ahash_request *req) 157{ 158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 160 struct mv_cesa_engine *engine = creq->base.engine; 161 struct mv_cesa_op_ctx *op; 162 unsigned int new_cache_ptr = 0; 163 u32 frag_mode; 164 size_t len; 165 unsigned int digsize; 166 int i; 167 168 mv_cesa_adjust_op(engine, &creq->op_tmpl); 169 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 170 171 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 172 for (i = 0; i < digsize / 4; i++) 173 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); 174 175 mv_cesa_adjust_op(engine, &creq->op_tmpl); 176 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 177 178 if (creq->cache_ptr) 179 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 180 creq->cache, creq->cache_ptr); 181 182 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 183 CESA_SA_SRAM_PAYLOAD_SIZE); 184 185 if (!creq->last_req) { 186 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 187 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 188 } 189 190 if (len - creq->cache_ptr) 191 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 192 engine->sram + 193 CESA_SA_DATA_SRAM_OFFSET + 194 creq->cache_ptr, 195 len - creq->cache_ptr, 196 sreq->offset); 197 198 op = &creq->op_tmpl; 199 200 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 201 202 if (creq->last_req && sreq->offset == req->nbytes && 203 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 204 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 205 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 206 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 207 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 208 } 209 210 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 211 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 212 if (len && 213 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 214 mv_cesa_set_mac_op_total_len(op, creq->len); 215 } else { 216 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 217 218 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 219 len &= CESA_HASH_BLOCK_SIZE_MSK; 220 new_cache_ptr = 64 - trailerlen; 221 memcpy_fromio(creq->cache, 222 engine->sram + 223 CESA_SA_DATA_SRAM_OFFSET + len, 224 new_cache_ptr); 225 } else { 226 len += mv_cesa_ahash_pad_req(creq, 227 engine->sram + len + 228 CESA_SA_DATA_SRAM_OFFSET); 229 } 230 231 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 232 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 233 else 234 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 235 } 236 } 237 238 mv_cesa_set_mac_op_frag_len(op, len); 239 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 240 241 /* FIXME: only update enc_len field */ 242 memcpy_toio(engine->sram, op, sizeof(*op)); 243 244 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 245 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 246 CESA_SA_DESC_CFG_FRAG_MSK); 247 248 creq->cache_ptr = new_cache_ptr; 249 250 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 251 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 252 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 253 CESA_SA_CMD_EN_CESA_SA_ACCL0); 254 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 255} 256 257static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 258{ 259 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 260 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 261 262 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 263 return -EINPROGRESS; 264 265 return 0; 266} 267 268static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 269{ 270 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 271 struct mv_cesa_req *basereq = &creq->base; 272 273 mv_cesa_dma_prepare(basereq, basereq->engine); 274} 275 276static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 277{ 278 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 279 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 280 281 sreq->offset = 0; 282} 283 284static void mv_cesa_ahash_step(struct crypto_async_request *req) 285{ 286 struct ahash_request *ahashreq = ahash_request_cast(req); 287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 288 289 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 290 mv_cesa_dma_step(&creq->base); 291 else 292 mv_cesa_ahash_std_step(ahashreq); 293} 294 295static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 296{ 297 struct ahash_request *ahashreq = ahash_request_cast(req); 298 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 299 300 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 301 return mv_cesa_dma_process(&creq->base, status); 302 303 return mv_cesa_ahash_std_process(ahashreq, status); 304} 305 306static void mv_cesa_ahash_complete(struct crypto_async_request *req) 307{ 308 struct ahash_request *ahashreq = ahash_request_cast(req); 309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 310 struct mv_cesa_engine *engine = creq->base.engine; 311 unsigned int digsize; 312 int i; 313 314 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 315 for (i = 0; i < digsize / 4; i++) 316 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); 317 318 if (creq->last_req) { 319 /* 320 * Hardware's MD5 digest is in little endian format, but 321 * SHA in big endian format 322 */ 323 if (creq->algo_le) { 324 __le32 *result = (void *)ahashreq->result; 325 326 for (i = 0; i < digsize / 4; i++) 327 result[i] = cpu_to_le32(creq->state[i]); 328 } else { 329 __be32 *result = (void *)ahashreq->result; 330 331 for (i = 0; i < digsize / 4; i++) 332 result[i] = cpu_to_be32(creq->state[i]); 333 } 334 } 335 336 atomic_sub(ahashreq->nbytes, &engine->load); 337} 338 339static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 340 struct mv_cesa_engine *engine) 341{ 342 struct ahash_request *ahashreq = ahash_request_cast(req); 343 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 344 345 creq->base.engine = engine; 346 347 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 348 mv_cesa_ahash_dma_prepare(ahashreq); 349 else 350 mv_cesa_ahash_std_prepare(ahashreq); 351} 352 353static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 354{ 355 struct ahash_request *ahashreq = ahash_request_cast(req); 356 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 357 358 if (creq->last_req) 359 mv_cesa_ahash_last_cleanup(ahashreq); 360 361 mv_cesa_ahash_cleanup(ahashreq); 362 363 if (creq->cache_ptr) 364 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 365 creq->cache, 366 creq->cache_ptr, 367 ahashreq->nbytes - creq->cache_ptr); 368} 369 370static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 371 .step = mv_cesa_ahash_step, 372 .process = mv_cesa_ahash_process, 373 .cleanup = mv_cesa_ahash_req_cleanup, 374 .complete = mv_cesa_ahash_complete, 375}; 376 377static int mv_cesa_ahash_init(struct ahash_request *req, 378 struct mv_cesa_op_ctx *tmpl, bool algo_le) 379{ 380 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 381 382 memset(creq, 0, sizeof(*creq)); 383 mv_cesa_update_op_cfg(tmpl, 384 CESA_SA_DESC_CFG_OP_MAC_ONLY | 385 CESA_SA_DESC_CFG_FIRST_FRAG, 386 CESA_SA_DESC_CFG_OP_MSK | 387 CESA_SA_DESC_CFG_FRAG_MSK); 388 mv_cesa_set_mac_op_total_len(tmpl, 0); 389 mv_cesa_set_mac_op_frag_len(tmpl, 0); 390 creq->op_tmpl = *tmpl; 391 creq->len = 0; 392 creq->algo_le = algo_le; 393 394 return 0; 395} 396 397static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 398{ 399 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 400 401 ctx->base.ops = &mv_cesa_ahash_req_ops; 402 403 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 404 sizeof(struct mv_cesa_ahash_req)); 405 return 0; 406} 407 408static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) 409{ 410 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 411 412 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { 413 *cached = true; 414 415 if (!req->nbytes) 416 return 0; 417 418 sg_pcopy_to_buffer(req->src, creq->src_nents, 419 creq->cache + creq->cache_ptr, 420 req->nbytes, 0); 421 422 creq->cache_ptr += req->nbytes; 423 } 424 425 return 0; 426} 427 428static struct mv_cesa_op_ctx * 429mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 430 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 431 gfp_t flags) 432{ 433 struct mv_cesa_op_ctx *op; 434 int ret; 435 436 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 437 if (IS_ERR(op)) 438 return op; 439 440 /* Set the operation block fragment length. */ 441 mv_cesa_set_mac_op_frag_len(op, frag_len); 442 443 /* Append dummy desc to launch operation */ 444 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 445 if (ret) 446 return ERR_PTR(ret); 447 448 if (mv_cesa_mac_op_is_first_frag(tmpl)) 449 mv_cesa_update_op_cfg(tmpl, 450 CESA_SA_DESC_CFG_MID_FRAG, 451 CESA_SA_DESC_CFG_FRAG_MSK); 452 453 return op; 454} 455 456static int 457mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 458 struct mv_cesa_ahash_dma_iter *dma_iter, 459 struct mv_cesa_ahash_req *creq, 460 gfp_t flags) 461{ 462 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 463 int ret; 464 465 if (!creq->cache_ptr) 466 return 0; 467 468 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); 469 if (ret) 470 return ret; 471 472 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); 473 474 return mv_cesa_dma_add_data_transfer(chain, 475 CESA_SA_DATA_SRAM_OFFSET, 476 ahashdreq->cache_dma, 477 creq->cache_ptr, 478 CESA_TDMA_DST_IN_SRAM, 479 flags); 480} 481 482static struct mv_cesa_op_ctx * 483mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 484 struct mv_cesa_ahash_dma_iter *dma_iter, 485 struct mv_cesa_ahash_req *creq, 486 unsigned int frag_len, gfp_t flags) 487{ 488 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 489 unsigned int len, trailerlen, padoff = 0; 490 struct mv_cesa_op_ctx *op; 491 int ret; 492 493 /* 494 * If the transfer is smaller than our maximum length, and we have 495 * some data outstanding, we can ask the engine to finish the hash. 496 */ 497 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 498 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 499 flags); 500 if (IS_ERR(op)) 501 return op; 502 503 mv_cesa_set_mac_op_total_len(op, creq->len); 504 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 505 CESA_SA_DESC_CFG_NOT_FRAG : 506 CESA_SA_DESC_CFG_LAST_FRAG, 507 CESA_SA_DESC_CFG_FRAG_MSK); 508 509 return op; 510 } 511 512 /* 513 * The request is longer than the engine can handle, or we have 514 * no data outstanding. Manually generate the padding, adding it 515 * as a "mid" fragment. 516 */ 517 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 518 if (ret) 519 return ERR_PTR(ret); 520 521 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 522 523 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 524 if (len) { 525 ret = mv_cesa_dma_add_data_transfer(chain, 526 CESA_SA_DATA_SRAM_OFFSET + 527 frag_len, 528 ahashdreq->padding_dma, 529 len, CESA_TDMA_DST_IN_SRAM, 530 flags); 531 if (ret) 532 return ERR_PTR(ret); 533 534 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 535 flags); 536 if (IS_ERR(op)) 537 return op; 538 539 if (len == trailerlen) 540 return op; 541 542 padoff += len; 543 } 544 545 ret = mv_cesa_dma_add_data_transfer(chain, 546 CESA_SA_DATA_SRAM_OFFSET, 547 ahashdreq->padding_dma + 548 padoff, 549 trailerlen - padoff, 550 CESA_TDMA_DST_IN_SRAM, 551 flags); 552 if (ret) 553 return ERR_PTR(ret); 554 555 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 556 flags); 557} 558 559static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 560{ 561 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 562 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 563 GFP_KERNEL : GFP_ATOMIC; 564 struct mv_cesa_req *basereq = &creq->base; 565 struct mv_cesa_ahash_dma_iter iter; 566 struct mv_cesa_op_ctx *op = NULL; 567 unsigned int frag_len; 568 int ret; 569 570 basereq->chain.first = NULL; 571 basereq->chain.last = NULL; 572 573 if (creq->src_nents) { 574 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 575 DMA_TO_DEVICE); 576 if (!ret) { 577 ret = -ENOMEM; 578 goto err; 579 } 580 } 581 582 mv_cesa_tdma_desc_iter_init(&basereq->chain); 583 mv_cesa_ahash_req_iter_init(&iter, req); 584 585 /* 586 * Add the cache (left-over data from a previous block) first. 587 * This will never overflow the SRAM size. 588 */ 589 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, &iter, creq, flags); 590 if (ret) 591 goto err_free_tdma; 592 593 if (iter.src.sg) { 594 /* 595 * Add all the new data, inserting an operation block and 596 * launch command between each full SRAM block-worth of 597 * data. We intentionally do not add the final op block. 598 */ 599 while (true) { 600 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, 601 &iter.base, 602 &iter.src, flags); 603 if (ret) 604 goto err_free_tdma; 605 606 frag_len = iter.base.op_len; 607 608 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 609 break; 610 611 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 612 frag_len, flags); 613 if (IS_ERR(op)) { 614 ret = PTR_ERR(op); 615 goto err_free_tdma; 616 } 617 } 618 } else { 619 /* Account for the data that was in the cache. */ 620 frag_len = iter.base.op_len; 621 } 622 623 /* 624 * At this point, frag_len indicates whether we have any data 625 * outstanding which needs an operation. Queue up the final 626 * operation, which depends whether this is the final request. 627 */ 628 if (creq->last_req) 629 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, 630 frag_len, flags); 631 else if (frag_len) 632 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 633 frag_len, flags); 634 635 if (IS_ERR(op)) { 636 ret = PTR_ERR(op); 637 goto err_free_tdma; 638 } 639 640 if (op) { 641 /* Add dummy desc to wait for crypto operation end */ 642 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); 643 if (ret) 644 goto err_free_tdma; 645 } 646 647 if (!creq->last_req) 648 creq->cache_ptr = req->nbytes + creq->cache_ptr - 649 iter.base.len; 650 else 651 creq->cache_ptr = 0; 652 653 basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ | 654 CESA_TDMA_BREAK_CHAIN); 655 656 return 0; 657 658err_free_tdma: 659 mv_cesa_dma_cleanup(basereq); 660 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 661 662err: 663 mv_cesa_ahash_last_cleanup(req); 664 665 return ret; 666} 667 668static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 669{ 670 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 671 int ret; 672 673 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 674 if (creq->src_nents < 0) { 675 dev_err(cesa_dev->dev, "Invalid number of src SG"); 676 return creq->src_nents; 677 } 678 679 ret = mv_cesa_ahash_cache_req(req, cached); 680 if (ret) 681 return ret; 682 683 if (*cached) 684 return 0; 685 686 if (cesa_dev->caps->has_tdma) 687 ret = mv_cesa_ahash_dma_req_init(req); 688 689 return ret; 690} 691 692static int mv_cesa_ahash_queue_req(struct ahash_request *req) 693{ 694 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 695 struct mv_cesa_engine *engine; 696 bool cached = false; 697 int ret; 698 699 ret = mv_cesa_ahash_req_init(req, &cached); 700 if (ret) 701 return ret; 702 703 if (cached) 704 return 0; 705 706 engine = mv_cesa_select_engine(req->nbytes); 707 mv_cesa_ahash_prepare(&req->base, engine); 708 709 ret = mv_cesa_queue_req(&req->base, &creq->base); 710 711 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 712 mv_cesa_ahash_cleanup(req); 713 714 return ret; 715} 716 717static int mv_cesa_ahash_update(struct ahash_request *req) 718{ 719 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 720 721 creq->len += req->nbytes; 722 723 return mv_cesa_ahash_queue_req(req); 724} 725 726static int mv_cesa_ahash_final(struct ahash_request *req) 727{ 728 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 729 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 730 731 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 732 creq->last_req = true; 733 req->nbytes = 0; 734 735 return mv_cesa_ahash_queue_req(req); 736} 737 738static int mv_cesa_ahash_finup(struct ahash_request *req) 739{ 740 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 741 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 742 743 creq->len += req->nbytes; 744 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 745 creq->last_req = true; 746 747 return mv_cesa_ahash_queue_req(req); 748} 749 750static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 751 u64 *len, void *cache) 752{ 753 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 754 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 755 unsigned int digsize = crypto_ahash_digestsize(ahash); 756 unsigned int blocksize; 757 758 blocksize = crypto_ahash_blocksize(ahash); 759 760 *len = creq->len; 761 memcpy(hash, creq->state, digsize); 762 memset(cache, 0, blocksize); 763 memcpy(cache, creq->cache, creq->cache_ptr); 764 765 return 0; 766} 767 768static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 769 u64 len, const void *cache) 770{ 771 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 772 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 773 unsigned int digsize = crypto_ahash_digestsize(ahash); 774 unsigned int blocksize; 775 unsigned int cache_ptr; 776 int ret; 777 778 ret = crypto_ahash_init(req); 779 if (ret) 780 return ret; 781 782 blocksize = crypto_ahash_blocksize(ahash); 783 if (len >= blocksize) 784 mv_cesa_update_op_cfg(&creq->op_tmpl, 785 CESA_SA_DESC_CFG_MID_FRAG, 786 CESA_SA_DESC_CFG_FRAG_MSK); 787 788 creq->len = len; 789 memcpy(creq->state, hash, digsize); 790 creq->cache_ptr = 0; 791 792 cache_ptr = do_div(len, blocksize); 793 if (!cache_ptr) 794 return 0; 795 796 memcpy(creq->cache, cache, cache_ptr); 797 creq->cache_ptr = cache_ptr; 798 799 return 0; 800} 801 802static int mv_cesa_md5_init(struct ahash_request *req) 803{ 804 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 805 struct mv_cesa_op_ctx tmpl = { }; 806 807 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 808 creq->state[0] = MD5_H0; 809 creq->state[1] = MD5_H1; 810 creq->state[2] = MD5_H2; 811 creq->state[3] = MD5_H3; 812 813 mv_cesa_ahash_init(req, &tmpl, true); 814 815 return 0; 816} 817 818static int mv_cesa_md5_export(struct ahash_request *req, void *out) 819{ 820 struct md5_state *out_state = out; 821 822 return mv_cesa_ahash_export(req, out_state->hash, 823 &out_state->byte_count, out_state->block); 824} 825 826static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 827{ 828 const struct md5_state *in_state = in; 829 830 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 831 in_state->block); 832} 833 834static int mv_cesa_md5_digest(struct ahash_request *req) 835{ 836 int ret; 837 838 ret = mv_cesa_md5_init(req); 839 if (ret) 840 return ret; 841 842 return mv_cesa_ahash_finup(req); 843} 844 845struct ahash_alg mv_md5_alg = { 846 .init = mv_cesa_md5_init, 847 .update = mv_cesa_ahash_update, 848 .final = mv_cesa_ahash_final, 849 .finup = mv_cesa_ahash_finup, 850 .digest = mv_cesa_md5_digest, 851 .export = mv_cesa_md5_export, 852 .import = mv_cesa_md5_import, 853 .halg = { 854 .digestsize = MD5_DIGEST_SIZE, 855 .statesize = sizeof(struct md5_state), 856 .base = { 857 .cra_name = "md5", 858 .cra_driver_name = "mv-md5", 859 .cra_priority = 300, 860 .cra_flags = CRYPTO_ALG_ASYNC | 861 CRYPTO_ALG_KERN_DRIVER_ONLY, 862 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 863 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 864 .cra_init = mv_cesa_ahash_cra_init, 865 .cra_module = THIS_MODULE, 866 } 867 } 868}; 869 870static int mv_cesa_sha1_init(struct ahash_request *req) 871{ 872 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 873 struct mv_cesa_op_ctx tmpl = { }; 874 875 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 876 creq->state[0] = SHA1_H0; 877 creq->state[1] = SHA1_H1; 878 creq->state[2] = SHA1_H2; 879 creq->state[3] = SHA1_H3; 880 creq->state[4] = SHA1_H4; 881 882 mv_cesa_ahash_init(req, &tmpl, false); 883 884 return 0; 885} 886 887static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 888{ 889 struct sha1_state *out_state = out; 890 891 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 892 out_state->buffer); 893} 894 895static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 896{ 897 const struct sha1_state *in_state = in; 898 899 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 900 in_state->buffer); 901} 902 903static int mv_cesa_sha1_digest(struct ahash_request *req) 904{ 905 int ret; 906 907 ret = mv_cesa_sha1_init(req); 908 if (ret) 909 return ret; 910 911 return mv_cesa_ahash_finup(req); 912} 913 914struct ahash_alg mv_sha1_alg = { 915 .init = mv_cesa_sha1_init, 916 .update = mv_cesa_ahash_update, 917 .final = mv_cesa_ahash_final, 918 .finup = mv_cesa_ahash_finup, 919 .digest = mv_cesa_sha1_digest, 920 .export = mv_cesa_sha1_export, 921 .import = mv_cesa_sha1_import, 922 .halg = { 923 .digestsize = SHA1_DIGEST_SIZE, 924 .statesize = sizeof(struct sha1_state), 925 .base = { 926 .cra_name = "sha1", 927 .cra_driver_name = "mv-sha1", 928 .cra_priority = 300, 929 .cra_flags = CRYPTO_ALG_ASYNC | 930 CRYPTO_ALG_KERN_DRIVER_ONLY, 931 .cra_blocksize = SHA1_BLOCK_SIZE, 932 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 933 .cra_init = mv_cesa_ahash_cra_init, 934 .cra_module = THIS_MODULE, 935 } 936 } 937}; 938 939static int mv_cesa_sha256_init(struct ahash_request *req) 940{ 941 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 942 struct mv_cesa_op_ctx tmpl = { }; 943 944 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 945 creq->state[0] = SHA256_H0; 946 creq->state[1] = SHA256_H1; 947 creq->state[2] = SHA256_H2; 948 creq->state[3] = SHA256_H3; 949 creq->state[4] = SHA256_H4; 950 creq->state[5] = SHA256_H5; 951 creq->state[6] = SHA256_H6; 952 creq->state[7] = SHA256_H7; 953 954 mv_cesa_ahash_init(req, &tmpl, false); 955 956 return 0; 957} 958 959static int mv_cesa_sha256_digest(struct ahash_request *req) 960{ 961 int ret; 962 963 ret = mv_cesa_sha256_init(req); 964 if (ret) 965 return ret; 966 967 return mv_cesa_ahash_finup(req); 968} 969 970static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 971{ 972 struct sha256_state *out_state = out; 973 974 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 975 out_state->buf); 976} 977 978static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 979{ 980 const struct sha256_state *in_state = in; 981 982 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 983 in_state->buf); 984} 985 986struct ahash_alg mv_sha256_alg = { 987 .init = mv_cesa_sha256_init, 988 .update = mv_cesa_ahash_update, 989 .final = mv_cesa_ahash_final, 990 .finup = mv_cesa_ahash_finup, 991 .digest = mv_cesa_sha256_digest, 992 .export = mv_cesa_sha256_export, 993 .import = mv_cesa_sha256_import, 994 .halg = { 995 .digestsize = SHA256_DIGEST_SIZE, 996 .statesize = sizeof(struct sha256_state), 997 .base = { 998 .cra_name = "sha256", 999 .cra_driver_name = "mv-sha256", 1000 .cra_priority = 300, 1001 .cra_flags = CRYPTO_ALG_ASYNC | 1002 CRYPTO_ALG_KERN_DRIVER_ONLY, 1003 .cra_blocksize = SHA256_BLOCK_SIZE, 1004 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1005 .cra_init = mv_cesa_ahash_cra_init, 1006 .cra_module = THIS_MODULE, 1007 } 1008 } 1009}; 1010 1011struct mv_cesa_ahash_result { 1012 struct completion completion; 1013 int error; 1014}; 1015 1016static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1017 int error) 1018{ 1019 struct mv_cesa_ahash_result *result = req->data; 1020 1021 if (error == -EINPROGRESS) 1022 return; 1023 1024 result->error = error; 1025 complete(&result->completion); 1026} 1027 1028static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1029 void *state, unsigned int blocksize) 1030{ 1031 struct mv_cesa_ahash_result result; 1032 struct scatterlist sg; 1033 int ret; 1034 1035 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1036 mv_cesa_hmac_ahash_complete, &result); 1037 sg_init_one(&sg, pad, blocksize); 1038 ahash_request_set_crypt(req, &sg, pad, blocksize); 1039 init_completion(&result.completion); 1040 1041 ret = crypto_ahash_init(req); 1042 if (ret) 1043 return ret; 1044 1045 ret = crypto_ahash_update(req); 1046 if (ret && ret != -EINPROGRESS) 1047 return ret; 1048 1049 wait_for_completion_interruptible(&result.completion); 1050 if (result.error) 1051 return result.error; 1052 1053 ret = crypto_ahash_export(req, state); 1054 if (ret) 1055 return ret; 1056 1057 return 0; 1058} 1059 1060static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1061 const u8 *key, unsigned int keylen, 1062 u8 *ipad, u8 *opad, 1063 unsigned int blocksize) 1064{ 1065 struct mv_cesa_ahash_result result; 1066 struct scatterlist sg; 1067 int ret; 1068 int i; 1069 1070 if (keylen <= blocksize) { 1071 memcpy(ipad, key, keylen); 1072 } else { 1073 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1074 1075 if (!keydup) 1076 return -ENOMEM; 1077 1078 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1079 mv_cesa_hmac_ahash_complete, 1080 &result); 1081 sg_init_one(&sg, keydup, keylen); 1082 ahash_request_set_crypt(req, &sg, ipad, keylen); 1083 init_completion(&result.completion); 1084 1085 ret = crypto_ahash_digest(req); 1086 if (ret == -EINPROGRESS) { 1087 wait_for_completion_interruptible(&result.completion); 1088 ret = result.error; 1089 } 1090 1091 /* Set the memory region to 0 to avoid any leak. */ 1092 memset(keydup, 0, keylen); 1093 kfree(keydup); 1094 1095 if (ret) 1096 return ret; 1097 1098 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1099 } 1100 1101 memset(ipad + keylen, 0, blocksize - keylen); 1102 memcpy(opad, ipad, blocksize); 1103 1104 for (i = 0; i < blocksize; i++) { 1105 ipad[i] ^= 0x36; 1106 opad[i] ^= 0x5c; 1107 } 1108 1109 return 0; 1110} 1111 1112static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1113 const u8 *key, unsigned int keylen, 1114 void *istate, void *ostate) 1115{ 1116 struct ahash_request *req; 1117 struct crypto_ahash *tfm; 1118 unsigned int blocksize; 1119 u8 *ipad = NULL; 1120 u8 *opad; 1121 int ret; 1122 1123 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, 1124 CRYPTO_ALG_TYPE_AHASH_MASK); 1125 if (IS_ERR(tfm)) 1126 return PTR_ERR(tfm); 1127 1128 req = ahash_request_alloc(tfm, GFP_KERNEL); 1129 if (!req) { 1130 ret = -ENOMEM; 1131 goto free_ahash; 1132 } 1133 1134 crypto_ahash_clear_flags(tfm, ~0); 1135 1136 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1137 1138 ipad = kzalloc(2 * blocksize, GFP_KERNEL); 1139 if (!ipad) { 1140 ret = -ENOMEM; 1141 goto free_req; 1142 } 1143 1144 opad = ipad + blocksize; 1145 1146 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1147 if (ret) 1148 goto free_ipad; 1149 1150 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1151 if (ret) 1152 goto free_ipad; 1153 1154 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1155 1156free_ipad: 1157 kfree(ipad); 1158free_req: 1159 ahash_request_free(req); 1160free_ahash: 1161 crypto_free_ahash(tfm); 1162 1163 return ret; 1164} 1165 1166static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1167{ 1168 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1169 1170 ctx->base.ops = &mv_cesa_ahash_req_ops; 1171 1172 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1173 sizeof(struct mv_cesa_ahash_req)); 1174 return 0; 1175} 1176 1177static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1178{ 1179 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1180 struct mv_cesa_op_ctx tmpl = { }; 1181 1182 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1183 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1184 1185 mv_cesa_ahash_init(req, &tmpl, true); 1186 1187 return 0; 1188} 1189 1190static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1191 unsigned int keylen) 1192{ 1193 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1194 struct md5_state istate, ostate; 1195 int ret, i; 1196 1197 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1198 if (ret) 1199 return ret; 1200 1201 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1202 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1203 1204 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1205 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1206 1207 return 0; 1208} 1209 1210static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1211{ 1212 int ret; 1213 1214 ret = mv_cesa_ahmac_md5_init(req); 1215 if (ret) 1216 return ret; 1217 1218 return mv_cesa_ahash_finup(req); 1219} 1220 1221struct ahash_alg mv_ahmac_md5_alg = { 1222 .init = mv_cesa_ahmac_md5_init, 1223 .update = mv_cesa_ahash_update, 1224 .final = mv_cesa_ahash_final, 1225 .finup = mv_cesa_ahash_finup, 1226 .digest = mv_cesa_ahmac_md5_digest, 1227 .setkey = mv_cesa_ahmac_md5_setkey, 1228 .export = mv_cesa_md5_export, 1229 .import = mv_cesa_md5_import, 1230 .halg = { 1231 .digestsize = MD5_DIGEST_SIZE, 1232 .statesize = sizeof(struct md5_state), 1233 .base = { 1234 .cra_name = "hmac(md5)", 1235 .cra_driver_name = "mv-hmac-md5", 1236 .cra_priority = 300, 1237 .cra_flags = CRYPTO_ALG_ASYNC | 1238 CRYPTO_ALG_KERN_DRIVER_ONLY, 1239 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1240 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1241 .cra_init = mv_cesa_ahmac_cra_init, 1242 .cra_module = THIS_MODULE, 1243 } 1244 } 1245}; 1246 1247static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1248{ 1249 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1250 struct mv_cesa_op_ctx tmpl = { }; 1251 1252 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1253 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1254 1255 mv_cesa_ahash_init(req, &tmpl, false); 1256 1257 return 0; 1258} 1259 1260static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1261 unsigned int keylen) 1262{ 1263 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1264 struct sha1_state istate, ostate; 1265 int ret, i; 1266 1267 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1268 if (ret) 1269 return ret; 1270 1271 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1272 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1273 1274 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1275 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1276 1277 return 0; 1278} 1279 1280static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1281{ 1282 int ret; 1283 1284 ret = mv_cesa_ahmac_sha1_init(req); 1285 if (ret) 1286 return ret; 1287 1288 return mv_cesa_ahash_finup(req); 1289} 1290 1291struct ahash_alg mv_ahmac_sha1_alg = { 1292 .init = mv_cesa_ahmac_sha1_init, 1293 .update = mv_cesa_ahash_update, 1294 .final = mv_cesa_ahash_final, 1295 .finup = mv_cesa_ahash_finup, 1296 .digest = mv_cesa_ahmac_sha1_digest, 1297 .setkey = mv_cesa_ahmac_sha1_setkey, 1298 .export = mv_cesa_sha1_export, 1299 .import = mv_cesa_sha1_import, 1300 .halg = { 1301 .digestsize = SHA1_DIGEST_SIZE, 1302 .statesize = sizeof(struct sha1_state), 1303 .base = { 1304 .cra_name = "hmac(sha1)", 1305 .cra_driver_name = "mv-hmac-sha1", 1306 .cra_priority = 300, 1307 .cra_flags = CRYPTO_ALG_ASYNC | 1308 CRYPTO_ALG_KERN_DRIVER_ONLY, 1309 .cra_blocksize = SHA1_BLOCK_SIZE, 1310 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1311 .cra_init = mv_cesa_ahmac_cra_init, 1312 .cra_module = THIS_MODULE, 1313 } 1314 } 1315}; 1316 1317static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1318 unsigned int keylen) 1319{ 1320 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1321 struct sha256_state istate, ostate; 1322 int ret, i; 1323 1324 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1325 if (ret) 1326 return ret; 1327 1328 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1329 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1330 1331 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1332 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1333 1334 return 0; 1335} 1336 1337static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1338{ 1339 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1340 struct mv_cesa_op_ctx tmpl = { }; 1341 1342 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1343 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1344 1345 mv_cesa_ahash_init(req, &tmpl, false); 1346 1347 return 0; 1348} 1349 1350static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1351{ 1352 int ret; 1353 1354 ret = mv_cesa_ahmac_sha256_init(req); 1355 if (ret) 1356 return ret; 1357 1358 return mv_cesa_ahash_finup(req); 1359} 1360 1361struct ahash_alg mv_ahmac_sha256_alg = { 1362 .init = mv_cesa_ahmac_sha256_init, 1363 .update = mv_cesa_ahash_update, 1364 .final = mv_cesa_ahash_final, 1365 .finup = mv_cesa_ahash_finup, 1366 .digest = mv_cesa_ahmac_sha256_digest, 1367 .setkey = mv_cesa_ahmac_sha256_setkey, 1368 .export = mv_cesa_sha256_export, 1369 .import = mv_cesa_sha256_import, 1370 .halg = { 1371 .digestsize = SHA256_DIGEST_SIZE, 1372 .statesize = sizeof(struct sha256_state), 1373 .base = { 1374 .cra_name = "hmac(sha256)", 1375 .cra_driver_name = "mv-hmac-sha256", 1376 .cra_priority = 300, 1377 .cra_flags = CRYPTO_ALG_ASYNC | 1378 CRYPTO_ALG_KERN_DRIVER_ONLY, 1379 .cra_blocksize = SHA256_BLOCK_SIZE, 1380 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1381 .cra_init = mv_cesa_ahmac_cra_init, 1382 .cra_module = THIS_MODULE, 1383 } 1384 } 1385};