at v4.3 1439 lines 35 kB view raw
1/* 2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/md5.h> 16#include <crypto/sha.h> 17 18#include "cesa.h" 19 20struct mv_cesa_ahash_dma_iter { 21 struct mv_cesa_dma_iter base; 22 struct mv_cesa_sg_dma_iter src; 23}; 24 25static inline void 26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 27 struct ahash_request *req) 28{ 29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 30 unsigned int len = req->nbytes; 31 32 if (!creq->last_req) 33 len = (len + creq->cache_ptr) & ~CESA_HASH_BLOCK_SIZE_MSK; 34 35 mv_cesa_req_dma_iter_init(&iter->base, len); 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 37 iter->src.op_offset = creq->cache_ptr; 38} 39 40static inline bool 41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 42{ 43 iter->src.op_offset = 0; 44 45 return mv_cesa_req_dma_iter_next_op(&iter->base); 46} 47 48static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, 49 gfp_t flags) 50{ 51 struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; 52 53 creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 54 &dreq->cache_dma); 55 if (!creq->cache) 56 return -ENOMEM; 57 58 return 0; 59} 60 61static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, 62 gfp_t flags) 63{ 64 creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags); 65 if (!creq->cache) 66 return -ENOMEM; 67 68 return 0; 69} 70 71static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) 72{ 73 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 74 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 75 GFP_KERNEL : GFP_ATOMIC; 76 int ret; 77 78 if (creq->cache) 79 return 0; 80 81 if (creq->req.base.type == CESA_DMA_REQ) 82 ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); 83 else 84 ret = mv_cesa_ahash_std_alloc_cache(creq, flags); 85 86 return ret; 87} 88 89static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) 90{ 91 dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, 92 creq->req.dma.cache_dma); 93} 94 95static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) 96{ 97 kfree(creq->cache); 98} 99 100static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) 101{ 102 if (!creq->cache) 103 return; 104 105 if (creq->req.base.type == CESA_DMA_REQ) 106 mv_cesa_ahash_dma_free_cache(creq); 107 else 108 mv_cesa_ahash_std_free_cache(creq); 109 110 creq->cache = NULL; 111} 112 113static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 114 gfp_t flags) 115{ 116 if (req->padding) 117 return 0; 118 119 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 120 &req->padding_dma); 121 if (!req->padding) 122 return -ENOMEM; 123 124 return 0; 125} 126 127static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 128{ 129 if (!req->padding) 130 return; 131 132 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 133 req->padding_dma); 134 req->padding = NULL; 135} 136 137static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 138{ 139 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 140 141 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 142} 143 144static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 145{ 146 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 147 148 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 149 mv_cesa_dma_cleanup(&creq->req.dma.base); 150} 151 152static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 153{ 154 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 155 156 if (creq->req.base.type == CESA_DMA_REQ) 157 mv_cesa_ahash_dma_cleanup(req); 158} 159 160static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 161{ 162 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 163 164 mv_cesa_ahash_free_cache(creq); 165 166 if (creq->req.base.type == CESA_DMA_REQ) 167 mv_cesa_ahash_dma_last_cleanup(req); 168} 169 170static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 171{ 172 unsigned int index, padlen; 173 174 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 175 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 176 177 return padlen; 178} 179 180static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 181{ 182 __be64 bits = cpu_to_be64(creq->len << 3); 183 unsigned int index, padlen; 184 185 buf[0] = 0x80; 186 /* Pad out to 56 mod 64 */ 187 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 188 padlen = mv_cesa_ahash_pad_len(creq); 189 memset(buf + 1, 0, padlen - 1); 190 memcpy(buf + padlen, &bits, sizeof(bits)); 191 192 return padlen + 8; 193} 194 195static void mv_cesa_ahash_std_step(struct ahash_request *req) 196{ 197 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 198 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 199 struct mv_cesa_engine *engine = sreq->base.engine; 200 struct mv_cesa_op_ctx *op; 201 unsigned int new_cache_ptr = 0; 202 u32 frag_mode; 203 size_t len; 204 205 if (creq->cache_ptr) 206 memcpy(engine->sram + CESA_SA_DATA_SRAM_OFFSET, creq->cache, 207 creq->cache_ptr); 208 209 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 210 CESA_SA_SRAM_PAYLOAD_SIZE); 211 212 if (!creq->last_req) { 213 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 214 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 215 } 216 217 if (len - creq->cache_ptr) 218 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 219 engine->sram + 220 CESA_SA_DATA_SRAM_OFFSET + 221 creq->cache_ptr, 222 len - creq->cache_ptr, 223 sreq->offset); 224 225 op = &creq->op_tmpl; 226 227 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 228 229 if (creq->last_req && sreq->offset == req->nbytes && 230 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 231 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 232 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 233 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 234 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 235 } 236 237 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 238 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 239 if (len && 240 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 241 mv_cesa_set_mac_op_total_len(op, creq->len); 242 } else { 243 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 244 245 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 246 len &= CESA_HASH_BLOCK_SIZE_MSK; 247 new_cache_ptr = 64 - trailerlen; 248 memcpy(creq->cache, 249 engine->sram + 250 CESA_SA_DATA_SRAM_OFFSET + len, 251 new_cache_ptr); 252 } else { 253 len += mv_cesa_ahash_pad_req(creq, 254 engine->sram + len + 255 CESA_SA_DATA_SRAM_OFFSET); 256 } 257 258 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 259 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 260 else 261 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 262 } 263 } 264 265 mv_cesa_set_mac_op_frag_len(op, len); 266 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 267 268 /* FIXME: only update enc_len field */ 269 memcpy(engine->sram, op, sizeof(*op)); 270 271 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 272 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 273 CESA_SA_DESC_CFG_FRAG_MSK); 274 275 creq->cache_ptr = new_cache_ptr; 276 277 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 278 writel(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 279 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 280} 281 282static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 283{ 284 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 285 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 286 287 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 288 return -EINPROGRESS; 289 290 return 0; 291} 292 293static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 294{ 295 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 296 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; 297 298 mv_cesa_dma_prepare(dreq, dreq->base.engine); 299} 300 301static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 302{ 303 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 304 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 305 struct mv_cesa_engine *engine = sreq->base.engine; 306 307 sreq->offset = 0; 308 mv_cesa_adjust_op(engine, &creq->op_tmpl); 309 memcpy(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 310} 311 312static void mv_cesa_ahash_step(struct crypto_async_request *req) 313{ 314 struct ahash_request *ahashreq = ahash_request_cast(req); 315 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 316 317 if (creq->req.base.type == CESA_DMA_REQ) 318 mv_cesa_dma_step(&creq->req.dma.base); 319 else 320 mv_cesa_ahash_std_step(ahashreq); 321} 322 323static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 324{ 325 struct ahash_request *ahashreq = ahash_request_cast(req); 326 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 327 struct mv_cesa_engine *engine = creq->req.base.engine; 328 unsigned int digsize; 329 int ret, i; 330 331 if (creq->req.base.type == CESA_DMA_REQ) 332 ret = mv_cesa_dma_process(&creq->req.dma.base, status); 333 else 334 ret = mv_cesa_ahash_std_process(ahashreq, status); 335 336 if (ret == -EINPROGRESS) 337 return ret; 338 339 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 340 for (i = 0; i < digsize / 4; i++) 341 creq->state[i] = readl(engine->regs + CESA_IVDIG(i)); 342 343 if (creq->cache_ptr) 344 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 345 creq->cache, 346 creq->cache_ptr, 347 ahashreq->nbytes - creq->cache_ptr); 348 349 if (creq->last_req) { 350 for (i = 0; i < digsize / 4; i++) { 351 /* 352 * Hardware provides MD5 digest in a different 353 * endianness than SHA-1 and SHA-256 ones. 354 */ 355 if (digsize == MD5_DIGEST_SIZE) 356 creq->state[i] = cpu_to_le32(creq->state[i]); 357 else 358 creq->state[i] = cpu_to_be32(creq->state[i]); 359 } 360 361 memcpy(ahashreq->result, creq->state, digsize); 362 } 363 364 return ret; 365} 366 367static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 368 struct mv_cesa_engine *engine) 369{ 370 struct ahash_request *ahashreq = ahash_request_cast(req); 371 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 372 unsigned int digsize; 373 int i; 374 375 creq->req.base.engine = engine; 376 377 if (creq->req.base.type == CESA_DMA_REQ) 378 mv_cesa_ahash_dma_prepare(ahashreq); 379 else 380 mv_cesa_ahash_std_prepare(ahashreq); 381 382 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 383 for (i = 0; i < digsize / 4; i++) 384 writel(creq->state[i], 385 engine->regs + CESA_IVDIG(i)); 386} 387 388static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 389{ 390 struct ahash_request *ahashreq = ahash_request_cast(req); 391 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 392 393 if (creq->last_req) 394 mv_cesa_ahash_last_cleanup(ahashreq); 395 396 mv_cesa_ahash_cleanup(ahashreq); 397} 398 399static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 400 .step = mv_cesa_ahash_step, 401 .process = mv_cesa_ahash_process, 402 .prepare = mv_cesa_ahash_prepare, 403 .cleanup = mv_cesa_ahash_req_cleanup, 404}; 405 406static int mv_cesa_ahash_init(struct ahash_request *req, 407 struct mv_cesa_op_ctx *tmpl) 408{ 409 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 410 411 memset(creq, 0, sizeof(*creq)); 412 mv_cesa_update_op_cfg(tmpl, 413 CESA_SA_DESC_CFG_OP_MAC_ONLY | 414 CESA_SA_DESC_CFG_FIRST_FRAG, 415 CESA_SA_DESC_CFG_OP_MSK | 416 CESA_SA_DESC_CFG_FRAG_MSK); 417 mv_cesa_set_mac_op_total_len(tmpl, 0); 418 mv_cesa_set_mac_op_frag_len(tmpl, 0); 419 creq->op_tmpl = *tmpl; 420 creq->len = 0; 421 422 return 0; 423} 424 425static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 426{ 427 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 428 429 ctx->base.ops = &mv_cesa_ahash_req_ops; 430 431 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 432 sizeof(struct mv_cesa_ahash_req)); 433 return 0; 434} 435 436static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) 437{ 438 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 439 int ret; 440 441 if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) && 442 !creq->last_req) { 443 ret = mv_cesa_ahash_alloc_cache(req); 444 if (ret) 445 return ret; 446 } 447 448 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { 449 *cached = true; 450 451 if (!req->nbytes) 452 return 0; 453 454 sg_pcopy_to_buffer(req->src, creq->src_nents, 455 creq->cache + creq->cache_ptr, 456 req->nbytes, 0); 457 458 creq->cache_ptr += req->nbytes; 459 } 460 461 return 0; 462} 463 464static struct mv_cesa_op_ctx * 465mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 466 struct mv_cesa_ahash_dma_iter *dma_iter, 467 struct mv_cesa_ahash_req *creq, 468 gfp_t flags) 469{ 470 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 471 struct mv_cesa_op_ctx *op = NULL; 472 int ret; 473 474 if (!creq->cache_ptr) 475 return NULL; 476 477 ret = mv_cesa_dma_add_data_transfer(chain, 478 CESA_SA_DATA_SRAM_OFFSET, 479 ahashdreq->cache_dma, 480 creq->cache_ptr, 481 CESA_TDMA_DST_IN_SRAM, 482 flags); 483 if (ret) 484 return ERR_PTR(ret); 485 486 if (!dma_iter->base.op_len) { 487 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); 488 if (IS_ERR(op)) 489 return op; 490 491 mv_cesa_set_mac_op_frag_len(op, creq->cache_ptr); 492 493 /* Add dummy desc to launch crypto operation */ 494 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 495 if (ret) 496 return ERR_PTR(ret); 497 } 498 499 return op; 500} 501 502static struct mv_cesa_op_ctx * 503mv_cesa_ahash_dma_add_data(struct mv_cesa_tdma_chain *chain, 504 struct mv_cesa_ahash_dma_iter *dma_iter, 505 struct mv_cesa_ahash_req *creq, 506 gfp_t flags) 507{ 508 struct mv_cesa_op_ctx *op; 509 int ret; 510 511 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); 512 if (IS_ERR(op)) 513 return op; 514 515 mv_cesa_set_mac_op_frag_len(op, dma_iter->base.op_len); 516 517 if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) == 518 CESA_SA_DESC_CFG_FIRST_FRAG) 519 mv_cesa_update_op_cfg(&creq->op_tmpl, 520 CESA_SA_DESC_CFG_MID_FRAG, 521 CESA_SA_DESC_CFG_FRAG_MSK); 522 523 /* Add input transfers */ 524 ret = mv_cesa_dma_add_op_transfers(chain, &dma_iter->base, 525 &dma_iter->src, flags); 526 if (ret) 527 return ERR_PTR(ret); 528 529 /* Add dummy desc to launch crypto operation */ 530 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 531 if (ret) 532 return ERR_PTR(ret); 533 534 return op; 535} 536 537static struct mv_cesa_op_ctx * 538mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 539 struct mv_cesa_ahash_dma_iter *dma_iter, 540 struct mv_cesa_ahash_req *creq, 541 struct mv_cesa_op_ctx *op, 542 gfp_t flags) 543{ 544 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 545 unsigned int len, trailerlen, padoff = 0; 546 int ret; 547 548 if (!creq->last_req) 549 return op; 550 551 if (op && creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 552 u32 frag = CESA_SA_DESC_CFG_NOT_FRAG; 553 554 if ((mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK) != 555 CESA_SA_DESC_CFG_FIRST_FRAG) 556 frag = CESA_SA_DESC_CFG_LAST_FRAG; 557 558 mv_cesa_update_op_cfg(op, frag, CESA_SA_DESC_CFG_FRAG_MSK); 559 560 return op; 561 } 562 563 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 564 if (ret) 565 return ERR_PTR(ret); 566 567 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 568 569 if (op) { 570 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - dma_iter->base.op_len, 571 trailerlen); 572 if (len) { 573 ret = mv_cesa_dma_add_data_transfer(chain, 574 CESA_SA_DATA_SRAM_OFFSET + 575 dma_iter->base.op_len, 576 ahashdreq->padding_dma, 577 len, CESA_TDMA_DST_IN_SRAM, 578 flags); 579 if (ret) 580 return ERR_PTR(ret); 581 582 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 583 CESA_SA_DESC_CFG_FRAG_MSK); 584 mv_cesa_set_mac_op_frag_len(op, 585 dma_iter->base.op_len + len); 586 padoff += len; 587 } 588 } 589 590 if (padoff >= trailerlen) 591 return op; 592 593 if ((mv_cesa_get_op_cfg(&creq->op_tmpl) & CESA_SA_DESC_CFG_FRAG_MSK) != 594 CESA_SA_DESC_CFG_FIRST_FRAG) 595 mv_cesa_update_op_cfg(&creq->op_tmpl, 596 CESA_SA_DESC_CFG_MID_FRAG, 597 CESA_SA_DESC_CFG_FRAG_MSK); 598 599 op = mv_cesa_dma_add_op(chain, &creq->op_tmpl, false, flags); 600 if (IS_ERR(op)) 601 return op; 602 603 mv_cesa_set_mac_op_frag_len(op, trailerlen - padoff); 604 605 ret = mv_cesa_dma_add_data_transfer(chain, 606 CESA_SA_DATA_SRAM_OFFSET, 607 ahashdreq->padding_dma + 608 padoff, 609 trailerlen - padoff, 610 CESA_TDMA_DST_IN_SRAM, 611 flags); 612 if (ret) 613 return ERR_PTR(ret); 614 615 /* Add dummy desc to launch crypto operation */ 616 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 617 if (ret) 618 return ERR_PTR(ret); 619 620 return op; 621} 622 623static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 624{ 625 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 626 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 627 GFP_KERNEL : GFP_ATOMIC; 628 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 629 struct mv_cesa_tdma_req *dreq = &ahashdreq->base; 630 struct mv_cesa_tdma_chain chain; 631 struct mv_cesa_ahash_dma_iter iter; 632 struct mv_cesa_op_ctx *op = NULL; 633 int ret; 634 635 dreq->chain.first = NULL; 636 dreq->chain.last = NULL; 637 638 if (creq->src_nents) { 639 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 640 DMA_TO_DEVICE); 641 if (!ret) { 642 ret = -ENOMEM; 643 goto err; 644 } 645 } 646 647 mv_cesa_tdma_desc_iter_init(&chain); 648 mv_cesa_ahash_req_iter_init(&iter, req); 649 650 op = mv_cesa_ahash_dma_add_cache(&chain, &iter, 651 creq, flags); 652 if (IS_ERR(op)) { 653 ret = PTR_ERR(op); 654 goto err_free_tdma; 655 } 656 657 do { 658 if (!iter.base.op_len) 659 break; 660 661 op = mv_cesa_ahash_dma_add_data(&chain, &iter, 662 creq, flags); 663 if (IS_ERR(op)) { 664 ret = PTR_ERR(op); 665 goto err_free_tdma; 666 } 667 } while (mv_cesa_ahash_req_iter_next_op(&iter)); 668 669 op = mv_cesa_ahash_dma_last_req(&chain, &iter, creq, op, flags); 670 if (IS_ERR(op)) { 671 ret = PTR_ERR(op); 672 goto err_free_tdma; 673 } 674 675 if (op) { 676 /* Add dummy desc to wait for crypto operation end */ 677 ret = mv_cesa_dma_add_dummy_end(&chain, flags); 678 if (ret) 679 goto err_free_tdma; 680 } 681 682 if (!creq->last_req) 683 creq->cache_ptr = req->nbytes + creq->cache_ptr - 684 iter.base.len; 685 else 686 creq->cache_ptr = 0; 687 688 dreq->chain = chain; 689 690 return 0; 691 692err_free_tdma: 693 mv_cesa_dma_cleanup(dreq); 694 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 695 696err: 697 mv_cesa_ahash_last_cleanup(req); 698 699 return ret; 700} 701 702static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 703{ 704 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 705 int ret; 706 707 if (cesa_dev->caps->has_tdma) 708 creq->req.base.type = CESA_DMA_REQ; 709 else 710 creq->req.base.type = CESA_STD_REQ; 711 712 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 713 714 ret = mv_cesa_ahash_cache_req(req, cached); 715 if (ret) 716 return ret; 717 718 if (*cached) 719 return 0; 720 721 if (creq->req.base.type == CESA_DMA_REQ) 722 ret = mv_cesa_ahash_dma_req_init(req); 723 724 return ret; 725} 726 727static int mv_cesa_ahash_update(struct ahash_request *req) 728{ 729 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 730 bool cached = false; 731 int ret; 732 733 creq->len += req->nbytes; 734 ret = mv_cesa_ahash_req_init(req, &cached); 735 if (ret) 736 return ret; 737 738 if (cached) 739 return 0; 740 741 ret = mv_cesa_queue_req(&req->base); 742 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 743 mv_cesa_ahash_cleanup(req); 744 745 return ret; 746} 747 748static int mv_cesa_ahash_final(struct ahash_request *req) 749{ 750 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 751 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 752 bool cached = false; 753 int ret; 754 755 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 756 creq->last_req = true; 757 req->nbytes = 0; 758 759 ret = mv_cesa_ahash_req_init(req, &cached); 760 if (ret) 761 return ret; 762 763 if (cached) 764 return 0; 765 766 ret = mv_cesa_queue_req(&req->base); 767 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 768 mv_cesa_ahash_cleanup(req); 769 770 return ret; 771} 772 773static int mv_cesa_ahash_finup(struct ahash_request *req) 774{ 775 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 776 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 777 bool cached = false; 778 int ret; 779 780 creq->len += req->nbytes; 781 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 782 creq->last_req = true; 783 784 ret = mv_cesa_ahash_req_init(req, &cached); 785 if (ret) 786 return ret; 787 788 if (cached) 789 return 0; 790 791 ret = mv_cesa_queue_req(&req->base); 792 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 793 mv_cesa_ahash_cleanup(req); 794 795 return ret; 796} 797 798static int mv_cesa_md5_init(struct ahash_request *req) 799{ 800 struct mv_cesa_op_ctx tmpl; 801 802 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 803 804 mv_cesa_ahash_init(req, &tmpl); 805 806 return 0; 807} 808 809static int mv_cesa_md5_export(struct ahash_request *req, void *out) 810{ 811 struct md5_state *out_state = out; 812 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 813 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 814 unsigned int digsize = crypto_ahash_digestsize(ahash); 815 816 out_state->byte_count = creq->len; 817 memcpy(out_state->hash, creq->state, digsize); 818 memset(out_state->block, 0, sizeof(out_state->block)); 819 if (creq->cache) 820 memcpy(out_state->block, creq->cache, creq->cache_ptr); 821 822 return 0; 823} 824 825static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 826{ 827 const struct md5_state *in_state = in; 828 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 829 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 830 unsigned int digsize = crypto_ahash_digestsize(ahash); 831 unsigned int cache_ptr; 832 int ret; 833 834 creq->len = in_state->byte_count; 835 memcpy(creq->state, in_state->hash, digsize); 836 creq->cache_ptr = 0; 837 838 cache_ptr = creq->len % sizeof(in_state->block); 839 if (!cache_ptr) 840 return 0; 841 842 ret = mv_cesa_ahash_alloc_cache(req); 843 if (ret) 844 return ret; 845 846 memcpy(creq->cache, in_state->block, cache_ptr); 847 creq->cache_ptr = cache_ptr; 848 849 return 0; 850} 851 852static int mv_cesa_md5_digest(struct ahash_request *req) 853{ 854 int ret; 855 856 ret = mv_cesa_md5_init(req); 857 if (ret) 858 return ret; 859 860 return mv_cesa_ahash_finup(req); 861} 862 863struct ahash_alg mv_md5_alg = { 864 .init = mv_cesa_md5_init, 865 .update = mv_cesa_ahash_update, 866 .final = mv_cesa_ahash_final, 867 .finup = mv_cesa_ahash_finup, 868 .digest = mv_cesa_md5_digest, 869 .export = mv_cesa_md5_export, 870 .import = mv_cesa_md5_import, 871 .halg = { 872 .digestsize = MD5_DIGEST_SIZE, 873 .base = { 874 .cra_name = "md5", 875 .cra_driver_name = "mv-md5", 876 .cra_priority = 300, 877 .cra_flags = CRYPTO_ALG_ASYNC | 878 CRYPTO_ALG_KERN_DRIVER_ONLY, 879 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 880 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 881 .cra_init = mv_cesa_ahash_cra_init, 882 .cra_module = THIS_MODULE, 883 } 884 } 885}; 886 887static int mv_cesa_sha1_init(struct ahash_request *req) 888{ 889 struct mv_cesa_op_ctx tmpl; 890 891 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 892 893 mv_cesa_ahash_init(req, &tmpl); 894 895 return 0; 896} 897 898static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 899{ 900 struct sha1_state *out_state = out; 901 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 902 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 903 unsigned int digsize = crypto_ahash_digestsize(ahash); 904 905 out_state->count = creq->len; 906 memcpy(out_state->state, creq->state, digsize); 907 memset(out_state->buffer, 0, sizeof(out_state->buffer)); 908 if (creq->cache) 909 memcpy(out_state->buffer, creq->cache, creq->cache_ptr); 910 911 return 0; 912} 913 914static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 915{ 916 const struct sha1_state *in_state = in; 917 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 918 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 919 unsigned int digsize = crypto_ahash_digestsize(ahash); 920 unsigned int cache_ptr; 921 int ret; 922 923 creq->len = in_state->count; 924 memcpy(creq->state, in_state->state, digsize); 925 creq->cache_ptr = 0; 926 927 cache_ptr = creq->len % SHA1_BLOCK_SIZE; 928 if (!cache_ptr) 929 return 0; 930 931 ret = mv_cesa_ahash_alloc_cache(req); 932 if (ret) 933 return ret; 934 935 memcpy(creq->cache, in_state->buffer, cache_ptr); 936 creq->cache_ptr = cache_ptr; 937 938 return 0; 939} 940 941static int mv_cesa_sha1_digest(struct ahash_request *req) 942{ 943 int ret; 944 945 ret = mv_cesa_sha1_init(req); 946 if (ret) 947 return ret; 948 949 return mv_cesa_ahash_finup(req); 950} 951 952struct ahash_alg mv_sha1_alg = { 953 .init = mv_cesa_sha1_init, 954 .update = mv_cesa_ahash_update, 955 .final = mv_cesa_ahash_final, 956 .finup = mv_cesa_ahash_finup, 957 .digest = mv_cesa_sha1_digest, 958 .export = mv_cesa_sha1_export, 959 .import = mv_cesa_sha1_import, 960 .halg = { 961 .digestsize = SHA1_DIGEST_SIZE, 962 .base = { 963 .cra_name = "sha1", 964 .cra_driver_name = "mv-sha1", 965 .cra_priority = 300, 966 .cra_flags = CRYPTO_ALG_ASYNC | 967 CRYPTO_ALG_KERN_DRIVER_ONLY, 968 .cra_blocksize = SHA1_BLOCK_SIZE, 969 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 970 .cra_init = mv_cesa_ahash_cra_init, 971 .cra_module = THIS_MODULE, 972 } 973 } 974}; 975 976static int mv_cesa_sha256_init(struct ahash_request *req) 977{ 978 struct mv_cesa_op_ctx tmpl; 979 980 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 981 982 mv_cesa_ahash_init(req, &tmpl); 983 984 return 0; 985} 986 987static int mv_cesa_sha256_digest(struct ahash_request *req) 988{ 989 int ret; 990 991 ret = mv_cesa_sha256_init(req); 992 if (ret) 993 return ret; 994 995 return mv_cesa_ahash_finup(req); 996} 997 998static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 999{ 1000 struct sha256_state *out_state = out; 1001 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1002 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 1003 unsigned int ds = crypto_ahash_digestsize(ahash); 1004 1005 out_state->count = creq->len; 1006 memcpy(out_state->state, creq->state, ds); 1007 memset(out_state->buf, 0, sizeof(out_state->buf)); 1008 if (creq->cache) 1009 memcpy(out_state->buf, creq->cache, creq->cache_ptr); 1010 1011 return 0; 1012} 1013 1014static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1015{ 1016 const struct sha256_state *in_state = in; 1017 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1018 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 1019 unsigned int digsize = crypto_ahash_digestsize(ahash); 1020 unsigned int cache_ptr; 1021 int ret; 1022 1023 creq->len = in_state->count; 1024 memcpy(creq->state, in_state->state, digsize); 1025 creq->cache_ptr = 0; 1026 1027 cache_ptr = creq->len % SHA256_BLOCK_SIZE; 1028 if (!cache_ptr) 1029 return 0; 1030 1031 ret = mv_cesa_ahash_alloc_cache(req); 1032 if (ret) 1033 return ret; 1034 1035 memcpy(creq->cache, in_state->buf, cache_ptr); 1036 creq->cache_ptr = cache_ptr; 1037 1038 return 0; 1039} 1040 1041struct ahash_alg mv_sha256_alg = { 1042 .init = mv_cesa_sha256_init, 1043 .update = mv_cesa_ahash_update, 1044 .final = mv_cesa_ahash_final, 1045 .finup = mv_cesa_ahash_finup, 1046 .digest = mv_cesa_sha256_digest, 1047 .export = mv_cesa_sha256_export, 1048 .import = mv_cesa_sha256_import, 1049 .halg = { 1050 .digestsize = SHA256_DIGEST_SIZE, 1051 .base = { 1052 .cra_name = "sha256", 1053 .cra_driver_name = "mv-sha256", 1054 .cra_priority = 300, 1055 .cra_flags = CRYPTO_ALG_ASYNC | 1056 CRYPTO_ALG_KERN_DRIVER_ONLY, 1057 .cra_blocksize = SHA256_BLOCK_SIZE, 1058 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1059 .cra_init = mv_cesa_ahash_cra_init, 1060 .cra_module = THIS_MODULE, 1061 } 1062 } 1063}; 1064 1065struct mv_cesa_ahash_result { 1066 struct completion completion; 1067 int error; 1068}; 1069 1070static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1071 int error) 1072{ 1073 struct mv_cesa_ahash_result *result = req->data; 1074 1075 if (error == -EINPROGRESS) 1076 return; 1077 1078 result->error = error; 1079 complete(&result->completion); 1080} 1081 1082static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1083 void *state, unsigned int blocksize) 1084{ 1085 struct mv_cesa_ahash_result result; 1086 struct scatterlist sg; 1087 int ret; 1088 1089 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1090 mv_cesa_hmac_ahash_complete, &result); 1091 sg_init_one(&sg, pad, blocksize); 1092 ahash_request_set_crypt(req, &sg, pad, blocksize); 1093 init_completion(&result.completion); 1094 1095 ret = crypto_ahash_init(req); 1096 if (ret) 1097 return ret; 1098 1099 ret = crypto_ahash_update(req); 1100 if (ret && ret != -EINPROGRESS) 1101 return ret; 1102 1103 wait_for_completion_interruptible(&result.completion); 1104 if (result.error) 1105 return result.error; 1106 1107 ret = crypto_ahash_export(req, state); 1108 if (ret) 1109 return ret; 1110 1111 return 0; 1112} 1113 1114static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1115 const u8 *key, unsigned int keylen, 1116 u8 *ipad, u8 *opad, 1117 unsigned int blocksize) 1118{ 1119 struct mv_cesa_ahash_result result; 1120 struct scatterlist sg; 1121 int ret; 1122 int i; 1123 1124 if (keylen <= blocksize) { 1125 memcpy(ipad, key, keylen); 1126 } else { 1127 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1128 1129 if (!keydup) 1130 return -ENOMEM; 1131 1132 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1133 mv_cesa_hmac_ahash_complete, 1134 &result); 1135 sg_init_one(&sg, keydup, keylen); 1136 ahash_request_set_crypt(req, &sg, ipad, keylen); 1137 init_completion(&result.completion); 1138 1139 ret = crypto_ahash_digest(req); 1140 if (ret == -EINPROGRESS) { 1141 wait_for_completion_interruptible(&result.completion); 1142 ret = result.error; 1143 } 1144 1145 /* Set the memory region to 0 to avoid any leak. */ 1146 memset(keydup, 0, keylen); 1147 kfree(keydup); 1148 1149 if (ret) 1150 return ret; 1151 1152 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1153 } 1154 1155 memset(ipad + keylen, 0, blocksize - keylen); 1156 memcpy(opad, ipad, blocksize); 1157 1158 for (i = 0; i < blocksize; i++) { 1159 ipad[i] ^= 0x36; 1160 opad[i] ^= 0x5c; 1161 } 1162 1163 return 0; 1164} 1165 1166static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1167 const u8 *key, unsigned int keylen, 1168 void *istate, void *ostate) 1169{ 1170 struct ahash_request *req; 1171 struct crypto_ahash *tfm; 1172 unsigned int blocksize; 1173 u8 *ipad = NULL; 1174 u8 *opad; 1175 int ret; 1176 1177 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, 1178 CRYPTO_ALG_TYPE_AHASH_MASK); 1179 if (IS_ERR(tfm)) 1180 return PTR_ERR(tfm); 1181 1182 req = ahash_request_alloc(tfm, GFP_KERNEL); 1183 if (!req) { 1184 ret = -ENOMEM; 1185 goto free_ahash; 1186 } 1187 1188 crypto_ahash_clear_flags(tfm, ~0); 1189 1190 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1191 1192 ipad = kzalloc(2 * blocksize, GFP_KERNEL); 1193 if (!ipad) { 1194 ret = -ENOMEM; 1195 goto free_req; 1196 } 1197 1198 opad = ipad + blocksize; 1199 1200 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1201 if (ret) 1202 goto free_ipad; 1203 1204 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1205 if (ret) 1206 goto free_ipad; 1207 1208 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1209 1210free_ipad: 1211 kfree(ipad); 1212free_req: 1213 ahash_request_free(req); 1214free_ahash: 1215 crypto_free_ahash(tfm); 1216 1217 return ret; 1218} 1219 1220static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1221{ 1222 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1223 1224 ctx->base.ops = &mv_cesa_ahash_req_ops; 1225 1226 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1227 sizeof(struct mv_cesa_ahash_req)); 1228 return 0; 1229} 1230 1231static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1232{ 1233 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1234 struct mv_cesa_op_ctx tmpl; 1235 1236 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1237 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1238 1239 mv_cesa_ahash_init(req, &tmpl); 1240 1241 return 0; 1242} 1243 1244static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1245 unsigned int keylen) 1246{ 1247 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1248 struct md5_state istate, ostate; 1249 int ret, i; 1250 1251 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1252 if (ret) 1253 return ret; 1254 1255 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1256 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1257 1258 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1259 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1260 1261 return 0; 1262} 1263 1264static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1265{ 1266 int ret; 1267 1268 ret = mv_cesa_ahmac_md5_init(req); 1269 if (ret) 1270 return ret; 1271 1272 return mv_cesa_ahash_finup(req); 1273} 1274 1275struct ahash_alg mv_ahmac_md5_alg = { 1276 .init = mv_cesa_ahmac_md5_init, 1277 .update = mv_cesa_ahash_update, 1278 .final = mv_cesa_ahash_final, 1279 .finup = mv_cesa_ahash_finup, 1280 .digest = mv_cesa_ahmac_md5_digest, 1281 .setkey = mv_cesa_ahmac_md5_setkey, 1282 .export = mv_cesa_md5_export, 1283 .import = mv_cesa_md5_import, 1284 .halg = { 1285 .digestsize = MD5_DIGEST_SIZE, 1286 .statesize = sizeof(struct md5_state), 1287 .base = { 1288 .cra_name = "hmac(md5)", 1289 .cra_driver_name = "mv-hmac-md5", 1290 .cra_priority = 300, 1291 .cra_flags = CRYPTO_ALG_ASYNC | 1292 CRYPTO_ALG_KERN_DRIVER_ONLY, 1293 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1294 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1295 .cra_init = mv_cesa_ahmac_cra_init, 1296 .cra_module = THIS_MODULE, 1297 } 1298 } 1299}; 1300 1301static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1302{ 1303 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1304 struct mv_cesa_op_ctx tmpl; 1305 1306 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1307 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1308 1309 mv_cesa_ahash_init(req, &tmpl); 1310 1311 return 0; 1312} 1313 1314static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1315 unsigned int keylen) 1316{ 1317 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1318 struct sha1_state istate, ostate; 1319 int ret, i; 1320 1321 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1322 if (ret) 1323 return ret; 1324 1325 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1326 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1327 1328 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1329 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1330 1331 return 0; 1332} 1333 1334static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1335{ 1336 int ret; 1337 1338 ret = mv_cesa_ahmac_sha1_init(req); 1339 if (ret) 1340 return ret; 1341 1342 return mv_cesa_ahash_finup(req); 1343} 1344 1345struct ahash_alg mv_ahmac_sha1_alg = { 1346 .init = mv_cesa_ahmac_sha1_init, 1347 .update = mv_cesa_ahash_update, 1348 .final = mv_cesa_ahash_final, 1349 .finup = mv_cesa_ahash_finup, 1350 .digest = mv_cesa_ahmac_sha1_digest, 1351 .setkey = mv_cesa_ahmac_sha1_setkey, 1352 .export = mv_cesa_sha1_export, 1353 .import = mv_cesa_sha1_import, 1354 .halg = { 1355 .digestsize = SHA1_DIGEST_SIZE, 1356 .statesize = sizeof(struct sha1_state), 1357 .base = { 1358 .cra_name = "hmac(sha1)", 1359 .cra_driver_name = "mv-hmac-sha1", 1360 .cra_priority = 300, 1361 .cra_flags = CRYPTO_ALG_ASYNC | 1362 CRYPTO_ALG_KERN_DRIVER_ONLY, 1363 .cra_blocksize = SHA1_BLOCK_SIZE, 1364 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1365 .cra_init = mv_cesa_ahmac_cra_init, 1366 .cra_module = THIS_MODULE, 1367 } 1368 } 1369}; 1370 1371static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1372 unsigned int keylen) 1373{ 1374 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1375 struct sha256_state istate, ostate; 1376 int ret, i; 1377 1378 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1379 if (ret) 1380 return ret; 1381 1382 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1383 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1384 1385 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1386 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1387 1388 return 0; 1389} 1390 1391static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1392{ 1393 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1394 struct mv_cesa_op_ctx tmpl; 1395 1396 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1397 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1398 1399 mv_cesa_ahash_init(req, &tmpl); 1400 1401 return 0; 1402} 1403 1404static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1405{ 1406 int ret; 1407 1408 ret = mv_cesa_ahmac_sha256_init(req); 1409 if (ret) 1410 return ret; 1411 1412 return mv_cesa_ahash_finup(req); 1413} 1414 1415struct ahash_alg mv_ahmac_sha256_alg = { 1416 .init = mv_cesa_ahmac_sha256_init, 1417 .update = mv_cesa_ahash_update, 1418 .final = mv_cesa_ahash_final, 1419 .finup = mv_cesa_ahash_finup, 1420 .digest = mv_cesa_ahmac_sha256_digest, 1421 .setkey = mv_cesa_ahmac_sha256_setkey, 1422 .export = mv_cesa_sha256_export, 1423 .import = mv_cesa_sha256_import, 1424 .halg = { 1425 .digestsize = SHA256_DIGEST_SIZE, 1426 .statesize = sizeof(struct sha256_state), 1427 .base = { 1428 .cra_name = "hmac(sha256)", 1429 .cra_driver_name = "mv-hmac-sha256", 1430 .cra_priority = 300, 1431 .cra_flags = CRYPTO_ALG_ASYNC | 1432 CRYPTO_ALG_KERN_DRIVER_ONLY, 1433 .cra_blocksize = SHA256_BLOCK_SIZE, 1434 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1435 .cra_init = mv_cesa_ahmac_cra_init, 1436 .cra_module = THIS_MODULE, 1437 } 1438 } 1439};