at v4.4 1420 lines 35 kB view raw
1/* 2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/md5.h> 16#include <crypto/sha.h> 17 18#include "cesa.h" 19 20struct mv_cesa_ahash_dma_iter { 21 struct mv_cesa_dma_iter base; 22 struct mv_cesa_sg_dma_iter src; 23}; 24 25static inline void 26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 27 struct ahash_request *req) 28{ 29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 30 unsigned int len = req->nbytes + creq->cache_ptr; 31 32 if (!creq->last_req) 33 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 34 35 mv_cesa_req_dma_iter_init(&iter->base, len); 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 37 iter->src.op_offset = creq->cache_ptr; 38} 39 40static inline bool 41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 42{ 43 iter->src.op_offset = 0; 44 45 return mv_cesa_req_dma_iter_next_op(&iter->base); 46} 47 48static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq, 49 gfp_t flags) 50{ 51 struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; 52 53 creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 54 &dreq->cache_dma); 55 if (!creq->cache) 56 return -ENOMEM; 57 58 return 0; 59} 60 61static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq, 62 gfp_t flags) 63{ 64 creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags); 65 if (!creq->cache) 66 return -ENOMEM; 67 68 return 0; 69} 70 71static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) 72{ 73 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 74 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 75 GFP_KERNEL : GFP_ATOMIC; 76 int ret; 77 78 if (creq->cache) 79 return 0; 80 81 if (creq->req.base.type == CESA_DMA_REQ) 82 ret = mv_cesa_ahash_dma_alloc_cache(creq, flags); 83 else 84 ret = mv_cesa_ahash_std_alloc_cache(creq, flags); 85 86 return ret; 87} 88 89static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq) 90{ 91 dma_pool_free(cesa_dev->dma->cache_pool, creq->cache, 92 creq->req.dma.cache_dma); 93} 94 95static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq) 96{ 97 kfree(creq->cache); 98} 99 100static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq) 101{ 102 if (!creq->cache) 103 return; 104 105 if (creq->req.base.type == CESA_DMA_REQ) 106 mv_cesa_ahash_dma_free_cache(creq); 107 else 108 mv_cesa_ahash_std_free_cache(creq); 109 110 creq->cache = NULL; 111} 112 113static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 114 gfp_t flags) 115{ 116 if (req->padding) 117 return 0; 118 119 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 120 &req->padding_dma); 121 if (!req->padding) 122 return -ENOMEM; 123 124 return 0; 125} 126 127static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 128{ 129 if (!req->padding) 130 return; 131 132 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 133 req->padding_dma); 134 req->padding = NULL; 135} 136 137static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 138{ 139 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 140 141 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 142} 143 144static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 145{ 146 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 147 148 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 149 mv_cesa_dma_cleanup(&creq->req.dma.base); 150} 151 152static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 153{ 154 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 155 156 if (creq->req.base.type == CESA_DMA_REQ) 157 mv_cesa_ahash_dma_cleanup(req); 158} 159 160static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 161{ 162 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 163 164 mv_cesa_ahash_free_cache(creq); 165 166 if (creq->req.base.type == CESA_DMA_REQ) 167 mv_cesa_ahash_dma_last_cleanup(req); 168} 169 170static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 171{ 172 unsigned int index, padlen; 173 174 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 175 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 176 177 return padlen; 178} 179 180static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 181{ 182 unsigned int index, padlen; 183 184 buf[0] = 0x80; 185 /* Pad out to 56 mod 64 */ 186 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 187 padlen = mv_cesa_ahash_pad_len(creq); 188 memset(buf + 1, 0, padlen - 1); 189 190 if (creq->algo_le) { 191 __le64 bits = cpu_to_le64(creq->len << 3); 192 memcpy(buf + padlen, &bits, sizeof(bits)); 193 } else { 194 __be64 bits = cpu_to_be64(creq->len << 3); 195 memcpy(buf + padlen, &bits, sizeof(bits)); 196 } 197 198 return padlen + 8; 199} 200 201static void mv_cesa_ahash_std_step(struct ahash_request *req) 202{ 203 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 204 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 205 struct mv_cesa_engine *engine = sreq->base.engine; 206 struct mv_cesa_op_ctx *op; 207 unsigned int new_cache_ptr = 0; 208 u32 frag_mode; 209 size_t len; 210 211 if (creq->cache_ptr) 212 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 213 creq->cache, creq->cache_ptr); 214 215 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 216 CESA_SA_SRAM_PAYLOAD_SIZE); 217 218 if (!creq->last_req) { 219 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 220 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 221 } 222 223 if (len - creq->cache_ptr) 224 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 225 engine->sram + 226 CESA_SA_DATA_SRAM_OFFSET + 227 creq->cache_ptr, 228 len - creq->cache_ptr, 229 sreq->offset); 230 231 op = &creq->op_tmpl; 232 233 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 234 235 if (creq->last_req && sreq->offset == req->nbytes && 236 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 237 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 238 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 239 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 240 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 241 } 242 243 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 244 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 245 if (len && 246 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 247 mv_cesa_set_mac_op_total_len(op, creq->len); 248 } else { 249 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 250 251 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 252 len &= CESA_HASH_BLOCK_SIZE_MSK; 253 new_cache_ptr = 64 - trailerlen; 254 memcpy_fromio(creq->cache, 255 engine->sram + 256 CESA_SA_DATA_SRAM_OFFSET + len, 257 new_cache_ptr); 258 } else { 259 len += mv_cesa_ahash_pad_req(creq, 260 engine->sram + len + 261 CESA_SA_DATA_SRAM_OFFSET); 262 } 263 264 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 265 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 266 else 267 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 268 } 269 } 270 271 mv_cesa_set_mac_op_frag_len(op, len); 272 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 273 274 /* FIXME: only update enc_len field */ 275 memcpy_toio(engine->sram, op, sizeof(*op)); 276 277 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 278 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 279 CESA_SA_DESC_CFG_FRAG_MSK); 280 281 creq->cache_ptr = new_cache_ptr; 282 283 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 284 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 285 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 286} 287 288static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 289{ 290 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 291 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 292 293 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 294 return -EINPROGRESS; 295 296 return 0; 297} 298 299static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 300{ 301 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 302 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; 303 304 mv_cesa_dma_prepare(dreq, dreq->base.engine); 305} 306 307static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 308{ 309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 310 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 311 struct mv_cesa_engine *engine = sreq->base.engine; 312 313 sreq->offset = 0; 314 mv_cesa_adjust_op(engine, &creq->op_tmpl); 315 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 316} 317 318static void mv_cesa_ahash_step(struct crypto_async_request *req) 319{ 320 struct ahash_request *ahashreq = ahash_request_cast(req); 321 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 322 323 if (creq->req.base.type == CESA_DMA_REQ) 324 mv_cesa_dma_step(&creq->req.dma.base); 325 else 326 mv_cesa_ahash_std_step(ahashreq); 327} 328 329static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 330{ 331 struct ahash_request *ahashreq = ahash_request_cast(req); 332 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 333 struct mv_cesa_engine *engine = creq->req.base.engine; 334 unsigned int digsize; 335 int ret, i; 336 337 if (creq->req.base.type == CESA_DMA_REQ) 338 ret = mv_cesa_dma_process(&creq->req.dma.base, status); 339 else 340 ret = mv_cesa_ahash_std_process(ahashreq, status); 341 342 if (ret == -EINPROGRESS) 343 return ret; 344 345 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 346 for (i = 0; i < digsize / 4; i++) 347 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); 348 349 if (creq->cache_ptr) 350 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 351 creq->cache, 352 creq->cache_ptr, 353 ahashreq->nbytes - creq->cache_ptr); 354 355 if (creq->last_req) { 356 /* 357 * Hardware's MD5 digest is in little endian format, but 358 * SHA in big endian format 359 */ 360 if (creq->algo_le) { 361 __le32 *result = (void *)ahashreq->result; 362 363 for (i = 0; i < digsize / 4; i++) 364 result[i] = cpu_to_le32(creq->state[i]); 365 } else { 366 __be32 *result = (void *)ahashreq->result; 367 368 for (i = 0; i < digsize / 4; i++) 369 result[i] = cpu_to_be32(creq->state[i]); 370 } 371 } 372 373 return ret; 374} 375 376static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 377 struct mv_cesa_engine *engine) 378{ 379 struct ahash_request *ahashreq = ahash_request_cast(req); 380 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 381 unsigned int digsize; 382 int i; 383 384 creq->req.base.engine = engine; 385 386 if (creq->req.base.type == CESA_DMA_REQ) 387 mv_cesa_ahash_dma_prepare(ahashreq); 388 else 389 mv_cesa_ahash_std_prepare(ahashreq); 390 391 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 392 for (i = 0; i < digsize / 4; i++) 393 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); 394} 395 396static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 397{ 398 struct ahash_request *ahashreq = ahash_request_cast(req); 399 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 400 401 if (creq->last_req) 402 mv_cesa_ahash_last_cleanup(ahashreq); 403 404 mv_cesa_ahash_cleanup(ahashreq); 405} 406 407static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 408 .step = mv_cesa_ahash_step, 409 .process = mv_cesa_ahash_process, 410 .prepare = mv_cesa_ahash_prepare, 411 .cleanup = mv_cesa_ahash_req_cleanup, 412}; 413 414static int mv_cesa_ahash_init(struct ahash_request *req, 415 struct mv_cesa_op_ctx *tmpl, bool algo_le) 416{ 417 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 418 419 memset(creq, 0, sizeof(*creq)); 420 mv_cesa_update_op_cfg(tmpl, 421 CESA_SA_DESC_CFG_OP_MAC_ONLY | 422 CESA_SA_DESC_CFG_FIRST_FRAG, 423 CESA_SA_DESC_CFG_OP_MSK | 424 CESA_SA_DESC_CFG_FRAG_MSK); 425 mv_cesa_set_mac_op_total_len(tmpl, 0); 426 mv_cesa_set_mac_op_frag_len(tmpl, 0); 427 creq->op_tmpl = *tmpl; 428 creq->len = 0; 429 creq->algo_le = algo_le; 430 431 return 0; 432} 433 434static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 435{ 436 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 437 438 ctx->base.ops = &mv_cesa_ahash_req_ops; 439 440 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 441 sizeof(struct mv_cesa_ahash_req)); 442 return 0; 443} 444 445static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached) 446{ 447 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 448 int ret; 449 450 if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) && 451 !creq->last_req) { 452 ret = mv_cesa_ahash_alloc_cache(req); 453 if (ret) 454 return ret; 455 } 456 457 if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) { 458 *cached = true; 459 460 if (!req->nbytes) 461 return 0; 462 463 sg_pcopy_to_buffer(req->src, creq->src_nents, 464 creq->cache + creq->cache_ptr, 465 req->nbytes, 0); 466 467 creq->cache_ptr += req->nbytes; 468 } 469 470 return 0; 471} 472 473static struct mv_cesa_op_ctx * 474mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 475 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 476 gfp_t flags) 477{ 478 struct mv_cesa_op_ctx *op; 479 int ret; 480 481 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 482 if (IS_ERR(op)) 483 return op; 484 485 /* Set the operation block fragment length. */ 486 mv_cesa_set_mac_op_frag_len(op, frag_len); 487 488 /* Append dummy desc to launch operation */ 489 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 490 if (ret) 491 return ERR_PTR(ret); 492 493 if (mv_cesa_mac_op_is_first_frag(tmpl)) 494 mv_cesa_update_op_cfg(tmpl, 495 CESA_SA_DESC_CFG_MID_FRAG, 496 CESA_SA_DESC_CFG_FRAG_MSK); 497 498 return op; 499} 500 501static int 502mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 503 struct mv_cesa_ahash_dma_iter *dma_iter, 504 struct mv_cesa_ahash_req *creq, 505 gfp_t flags) 506{ 507 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 508 509 if (!creq->cache_ptr) 510 return 0; 511 512 return mv_cesa_dma_add_data_transfer(chain, 513 CESA_SA_DATA_SRAM_OFFSET, 514 ahashdreq->cache_dma, 515 creq->cache_ptr, 516 CESA_TDMA_DST_IN_SRAM, 517 flags); 518} 519 520static struct mv_cesa_op_ctx * 521mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 522 struct mv_cesa_ahash_dma_iter *dma_iter, 523 struct mv_cesa_ahash_req *creq, 524 unsigned int frag_len, gfp_t flags) 525{ 526 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 527 unsigned int len, trailerlen, padoff = 0; 528 struct mv_cesa_op_ctx *op; 529 int ret; 530 531 /* 532 * If the transfer is smaller than our maximum length, and we have 533 * some data outstanding, we can ask the engine to finish the hash. 534 */ 535 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 536 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 537 flags); 538 if (IS_ERR(op)) 539 return op; 540 541 mv_cesa_set_mac_op_total_len(op, creq->len); 542 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 543 CESA_SA_DESC_CFG_NOT_FRAG : 544 CESA_SA_DESC_CFG_LAST_FRAG, 545 CESA_SA_DESC_CFG_FRAG_MSK); 546 547 return op; 548 } 549 550 /* 551 * The request is longer than the engine can handle, or we have 552 * no data outstanding. Manually generate the padding, adding it 553 * as a "mid" fragment. 554 */ 555 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 556 if (ret) 557 return ERR_PTR(ret); 558 559 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 560 561 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 562 if (len) { 563 ret = mv_cesa_dma_add_data_transfer(chain, 564 CESA_SA_DATA_SRAM_OFFSET + 565 frag_len, 566 ahashdreq->padding_dma, 567 len, CESA_TDMA_DST_IN_SRAM, 568 flags); 569 if (ret) 570 return ERR_PTR(ret); 571 572 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 573 flags); 574 if (IS_ERR(op)) 575 return op; 576 577 if (len == trailerlen) 578 return op; 579 580 padoff += len; 581 } 582 583 ret = mv_cesa_dma_add_data_transfer(chain, 584 CESA_SA_DATA_SRAM_OFFSET, 585 ahashdreq->padding_dma + 586 padoff, 587 trailerlen - padoff, 588 CESA_TDMA_DST_IN_SRAM, 589 flags); 590 if (ret) 591 return ERR_PTR(ret); 592 593 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 594 flags); 595} 596 597static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 598{ 599 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 600 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 601 GFP_KERNEL : GFP_ATOMIC; 602 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 603 struct mv_cesa_tdma_req *dreq = &ahashdreq->base; 604 struct mv_cesa_ahash_dma_iter iter; 605 struct mv_cesa_op_ctx *op = NULL; 606 unsigned int frag_len; 607 int ret; 608 609 dreq->chain.first = NULL; 610 dreq->chain.last = NULL; 611 612 if (creq->src_nents) { 613 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 614 DMA_TO_DEVICE); 615 if (!ret) { 616 ret = -ENOMEM; 617 goto err; 618 } 619 } 620 621 mv_cesa_tdma_desc_iter_init(&dreq->chain); 622 mv_cesa_ahash_req_iter_init(&iter, req); 623 624 /* 625 * Add the cache (left-over data from a previous block) first. 626 * This will never overflow the SRAM size. 627 */ 628 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags); 629 if (ret) 630 goto err_free_tdma; 631 632 if (iter.src.sg) { 633 /* 634 * Add all the new data, inserting an operation block and 635 * launch command between each full SRAM block-worth of 636 * data. We intentionally do not add the final op block. 637 */ 638 while (true) { 639 ret = mv_cesa_dma_add_op_transfers(&dreq->chain, 640 &iter.base, 641 &iter.src, flags); 642 if (ret) 643 goto err_free_tdma; 644 645 frag_len = iter.base.op_len; 646 647 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 648 break; 649 650 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, 651 frag_len, flags); 652 if (IS_ERR(op)) { 653 ret = PTR_ERR(op); 654 goto err_free_tdma; 655 } 656 } 657 } else { 658 /* Account for the data that was in the cache. */ 659 frag_len = iter.base.op_len; 660 } 661 662 /* 663 * At this point, frag_len indicates whether we have any data 664 * outstanding which needs an operation. Queue up the final 665 * operation, which depends whether this is the final request. 666 */ 667 if (creq->last_req) 668 op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq, 669 frag_len, flags); 670 else if (frag_len) 671 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, 672 frag_len, flags); 673 674 if (IS_ERR(op)) { 675 ret = PTR_ERR(op); 676 goto err_free_tdma; 677 } 678 679 if (op) { 680 /* Add dummy desc to wait for crypto operation end */ 681 ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags); 682 if (ret) 683 goto err_free_tdma; 684 } 685 686 if (!creq->last_req) 687 creq->cache_ptr = req->nbytes + creq->cache_ptr - 688 iter.base.len; 689 else 690 creq->cache_ptr = 0; 691 692 return 0; 693 694err_free_tdma: 695 mv_cesa_dma_cleanup(dreq); 696 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 697 698err: 699 mv_cesa_ahash_last_cleanup(req); 700 701 return ret; 702} 703 704static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 705{ 706 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 707 int ret; 708 709 if (cesa_dev->caps->has_tdma) 710 creq->req.base.type = CESA_DMA_REQ; 711 else 712 creq->req.base.type = CESA_STD_REQ; 713 714 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 715 716 ret = mv_cesa_ahash_cache_req(req, cached); 717 if (ret) 718 return ret; 719 720 if (*cached) 721 return 0; 722 723 if (creq->req.base.type == CESA_DMA_REQ) 724 ret = mv_cesa_ahash_dma_req_init(req); 725 726 return ret; 727} 728 729static int mv_cesa_ahash_update(struct ahash_request *req) 730{ 731 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 732 bool cached = false; 733 int ret; 734 735 creq->len += req->nbytes; 736 ret = mv_cesa_ahash_req_init(req, &cached); 737 if (ret) 738 return ret; 739 740 if (cached) 741 return 0; 742 743 ret = mv_cesa_queue_req(&req->base); 744 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 745 mv_cesa_ahash_cleanup(req); 746 747 return ret; 748} 749 750static int mv_cesa_ahash_final(struct ahash_request *req) 751{ 752 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 753 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 754 bool cached = false; 755 int ret; 756 757 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 758 creq->last_req = true; 759 req->nbytes = 0; 760 761 ret = mv_cesa_ahash_req_init(req, &cached); 762 if (ret) 763 return ret; 764 765 if (cached) 766 return 0; 767 768 ret = mv_cesa_queue_req(&req->base); 769 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 770 mv_cesa_ahash_cleanup(req); 771 772 return ret; 773} 774 775static int mv_cesa_ahash_finup(struct ahash_request *req) 776{ 777 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 778 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 779 bool cached = false; 780 int ret; 781 782 creq->len += req->nbytes; 783 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 784 creq->last_req = true; 785 786 ret = mv_cesa_ahash_req_init(req, &cached); 787 if (ret) 788 return ret; 789 790 if (cached) 791 return 0; 792 793 ret = mv_cesa_queue_req(&req->base); 794 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 795 mv_cesa_ahash_cleanup(req); 796 797 return ret; 798} 799 800static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 801 u64 *len, void *cache) 802{ 803 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 804 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 805 unsigned int digsize = crypto_ahash_digestsize(ahash); 806 unsigned int blocksize; 807 808 blocksize = crypto_ahash_blocksize(ahash); 809 810 *len = creq->len; 811 memcpy(hash, creq->state, digsize); 812 memset(cache, 0, blocksize); 813 if (creq->cache) 814 memcpy(cache, creq->cache, creq->cache_ptr); 815 816 return 0; 817} 818 819static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 820 u64 len, const void *cache) 821{ 822 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 823 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 824 unsigned int digsize = crypto_ahash_digestsize(ahash); 825 unsigned int blocksize; 826 unsigned int cache_ptr; 827 int ret; 828 829 ret = crypto_ahash_init(req); 830 if (ret) 831 return ret; 832 833 blocksize = crypto_ahash_blocksize(ahash); 834 if (len >= blocksize) 835 mv_cesa_update_op_cfg(&creq->op_tmpl, 836 CESA_SA_DESC_CFG_MID_FRAG, 837 CESA_SA_DESC_CFG_FRAG_MSK); 838 839 creq->len = len; 840 memcpy(creq->state, hash, digsize); 841 creq->cache_ptr = 0; 842 843 cache_ptr = do_div(len, blocksize); 844 if (!cache_ptr) 845 return 0; 846 847 ret = mv_cesa_ahash_alloc_cache(req); 848 if (ret) 849 return ret; 850 851 memcpy(creq->cache, cache, cache_ptr); 852 creq->cache_ptr = cache_ptr; 853 854 return 0; 855} 856 857static int mv_cesa_md5_init(struct ahash_request *req) 858{ 859 struct mv_cesa_op_ctx tmpl = { }; 860 861 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 862 863 mv_cesa_ahash_init(req, &tmpl, true); 864 865 return 0; 866} 867 868static int mv_cesa_md5_export(struct ahash_request *req, void *out) 869{ 870 struct md5_state *out_state = out; 871 872 return mv_cesa_ahash_export(req, out_state->hash, 873 &out_state->byte_count, out_state->block); 874} 875 876static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 877{ 878 const struct md5_state *in_state = in; 879 880 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 881 in_state->block); 882} 883 884static int mv_cesa_md5_digest(struct ahash_request *req) 885{ 886 int ret; 887 888 ret = mv_cesa_md5_init(req); 889 if (ret) 890 return ret; 891 892 return mv_cesa_ahash_finup(req); 893} 894 895struct ahash_alg mv_md5_alg = { 896 .init = mv_cesa_md5_init, 897 .update = mv_cesa_ahash_update, 898 .final = mv_cesa_ahash_final, 899 .finup = mv_cesa_ahash_finup, 900 .digest = mv_cesa_md5_digest, 901 .export = mv_cesa_md5_export, 902 .import = mv_cesa_md5_import, 903 .halg = { 904 .digestsize = MD5_DIGEST_SIZE, 905 .statesize = sizeof(struct md5_state), 906 .base = { 907 .cra_name = "md5", 908 .cra_driver_name = "mv-md5", 909 .cra_priority = 300, 910 .cra_flags = CRYPTO_ALG_ASYNC | 911 CRYPTO_ALG_KERN_DRIVER_ONLY, 912 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 913 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 914 .cra_init = mv_cesa_ahash_cra_init, 915 .cra_module = THIS_MODULE, 916 } 917 } 918}; 919 920static int mv_cesa_sha1_init(struct ahash_request *req) 921{ 922 struct mv_cesa_op_ctx tmpl = { }; 923 924 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 925 926 mv_cesa_ahash_init(req, &tmpl, false); 927 928 return 0; 929} 930 931static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 932{ 933 struct sha1_state *out_state = out; 934 935 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 936 out_state->buffer); 937} 938 939static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 940{ 941 const struct sha1_state *in_state = in; 942 943 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 944 in_state->buffer); 945} 946 947static int mv_cesa_sha1_digest(struct ahash_request *req) 948{ 949 int ret; 950 951 ret = mv_cesa_sha1_init(req); 952 if (ret) 953 return ret; 954 955 return mv_cesa_ahash_finup(req); 956} 957 958struct ahash_alg mv_sha1_alg = { 959 .init = mv_cesa_sha1_init, 960 .update = mv_cesa_ahash_update, 961 .final = mv_cesa_ahash_final, 962 .finup = mv_cesa_ahash_finup, 963 .digest = mv_cesa_sha1_digest, 964 .export = mv_cesa_sha1_export, 965 .import = mv_cesa_sha1_import, 966 .halg = { 967 .digestsize = SHA1_DIGEST_SIZE, 968 .statesize = sizeof(struct sha1_state), 969 .base = { 970 .cra_name = "sha1", 971 .cra_driver_name = "mv-sha1", 972 .cra_priority = 300, 973 .cra_flags = CRYPTO_ALG_ASYNC | 974 CRYPTO_ALG_KERN_DRIVER_ONLY, 975 .cra_blocksize = SHA1_BLOCK_SIZE, 976 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 977 .cra_init = mv_cesa_ahash_cra_init, 978 .cra_module = THIS_MODULE, 979 } 980 } 981}; 982 983static int mv_cesa_sha256_init(struct ahash_request *req) 984{ 985 struct mv_cesa_op_ctx tmpl = { }; 986 987 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 988 989 mv_cesa_ahash_init(req, &tmpl, false); 990 991 return 0; 992} 993 994static int mv_cesa_sha256_digest(struct ahash_request *req) 995{ 996 int ret; 997 998 ret = mv_cesa_sha256_init(req); 999 if (ret) 1000 return ret; 1001 1002 return mv_cesa_ahash_finup(req); 1003} 1004 1005static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 1006{ 1007 struct sha256_state *out_state = out; 1008 1009 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 1010 out_state->buf); 1011} 1012 1013static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1014{ 1015 const struct sha256_state *in_state = in; 1016 1017 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 1018 in_state->buf); 1019} 1020 1021struct ahash_alg mv_sha256_alg = { 1022 .init = mv_cesa_sha256_init, 1023 .update = mv_cesa_ahash_update, 1024 .final = mv_cesa_ahash_final, 1025 .finup = mv_cesa_ahash_finup, 1026 .digest = mv_cesa_sha256_digest, 1027 .export = mv_cesa_sha256_export, 1028 .import = mv_cesa_sha256_import, 1029 .halg = { 1030 .digestsize = SHA256_DIGEST_SIZE, 1031 .statesize = sizeof(struct sha256_state), 1032 .base = { 1033 .cra_name = "sha256", 1034 .cra_driver_name = "mv-sha256", 1035 .cra_priority = 300, 1036 .cra_flags = CRYPTO_ALG_ASYNC | 1037 CRYPTO_ALG_KERN_DRIVER_ONLY, 1038 .cra_blocksize = SHA256_BLOCK_SIZE, 1039 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1040 .cra_init = mv_cesa_ahash_cra_init, 1041 .cra_module = THIS_MODULE, 1042 } 1043 } 1044}; 1045 1046struct mv_cesa_ahash_result { 1047 struct completion completion; 1048 int error; 1049}; 1050 1051static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1052 int error) 1053{ 1054 struct mv_cesa_ahash_result *result = req->data; 1055 1056 if (error == -EINPROGRESS) 1057 return; 1058 1059 result->error = error; 1060 complete(&result->completion); 1061} 1062 1063static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1064 void *state, unsigned int blocksize) 1065{ 1066 struct mv_cesa_ahash_result result; 1067 struct scatterlist sg; 1068 int ret; 1069 1070 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1071 mv_cesa_hmac_ahash_complete, &result); 1072 sg_init_one(&sg, pad, blocksize); 1073 ahash_request_set_crypt(req, &sg, pad, blocksize); 1074 init_completion(&result.completion); 1075 1076 ret = crypto_ahash_init(req); 1077 if (ret) 1078 return ret; 1079 1080 ret = crypto_ahash_update(req); 1081 if (ret && ret != -EINPROGRESS) 1082 return ret; 1083 1084 wait_for_completion_interruptible(&result.completion); 1085 if (result.error) 1086 return result.error; 1087 1088 ret = crypto_ahash_export(req, state); 1089 if (ret) 1090 return ret; 1091 1092 return 0; 1093} 1094 1095static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1096 const u8 *key, unsigned int keylen, 1097 u8 *ipad, u8 *opad, 1098 unsigned int blocksize) 1099{ 1100 struct mv_cesa_ahash_result result; 1101 struct scatterlist sg; 1102 int ret; 1103 int i; 1104 1105 if (keylen <= blocksize) { 1106 memcpy(ipad, key, keylen); 1107 } else { 1108 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1109 1110 if (!keydup) 1111 return -ENOMEM; 1112 1113 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1114 mv_cesa_hmac_ahash_complete, 1115 &result); 1116 sg_init_one(&sg, keydup, keylen); 1117 ahash_request_set_crypt(req, &sg, ipad, keylen); 1118 init_completion(&result.completion); 1119 1120 ret = crypto_ahash_digest(req); 1121 if (ret == -EINPROGRESS) { 1122 wait_for_completion_interruptible(&result.completion); 1123 ret = result.error; 1124 } 1125 1126 /* Set the memory region to 0 to avoid any leak. */ 1127 memset(keydup, 0, keylen); 1128 kfree(keydup); 1129 1130 if (ret) 1131 return ret; 1132 1133 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1134 } 1135 1136 memset(ipad + keylen, 0, blocksize - keylen); 1137 memcpy(opad, ipad, blocksize); 1138 1139 for (i = 0; i < blocksize; i++) { 1140 ipad[i] ^= 0x36; 1141 opad[i] ^= 0x5c; 1142 } 1143 1144 return 0; 1145} 1146 1147static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1148 const u8 *key, unsigned int keylen, 1149 void *istate, void *ostate) 1150{ 1151 struct ahash_request *req; 1152 struct crypto_ahash *tfm; 1153 unsigned int blocksize; 1154 u8 *ipad = NULL; 1155 u8 *opad; 1156 int ret; 1157 1158 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, 1159 CRYPTO_ALG_TYPE_AHASH_MASK); 1160 if (IS_ERR(tfm)) 1161 return PTR_ERR(tfm); 1162 1163 req = ahash_request_alloc(tfm, GFP_KERNEL); 1164 if (!req) { 1165 ret = -ENOMEM; 1166 goto free_ahash; 1167 } 1168 1169 crypto_ahash_clear_flags(tfm, ~0); 1170 1171 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1172 1173 ipad = kzalloc(2 * blocksize, GFP_KERNEL); 1174 if (!ipad) { 1175 ret = -ENOMEM; 1176 goto free_req; 1177 } 1178 1179 opad = ipad + blocksize; 1180 1181 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1182 if (ret) 1183 goto free_ipad; 1184 1185 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1186 if (ret) 1187 goto free_ipad; 1188 1189 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1190 1191free_ipad: 1192 kfree(ipad); 1193free_req: 1194 ahash_request_free(req); 1195free_ahash: 1196 crypto_free_ahash(tfm); 1197 1198 return ret; 1199} 1200 1201static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1202{ 1203 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1204 1205 ctx->base.ops = &mv_cesa_ahash_req_ops; 1206 1207 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1208 sizeof(struct mv_cesa_ahash_req)); 1209 return 0; 1210} 1211 1212static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1213{ 1214 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1215 struct mv_cesa_op_ctx tmpl = { }; 1216 1217 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1218 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1219 1220 mv_cesa_ahash_init(req, &tmpl, true); 1221 1222 return 0; 1223} 1224 1225static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1226 unsigned int keylen) 1227{ 1228 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1229 struct md5_state istate, ostate; 1230 int ret, i; 1231 1232 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1233 if (ret) 1234 return ret; 1235 1236 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1237 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1238 1239 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1240 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1241 1242 return 0; 1243} 1244 1245static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1246{ 1247 int ret; 1248 1249 ret = mv_cesa_ahmac_md5_init(req); 1250 if (ret) 1251 return ret; 1252 1253 return mv_cesa_ahash_finup(req); 1254} 1255 1256struct ahash_alg mv_ahmac_md5_alg = { 1257 .init = mv_cesa_ahmac_md5_init, 1258 .update = mv_cesa_ahash_update, 1259 .final = mv_cesa_ahash_final, 1260 .finup = mv_cesa_ahash_finup, 1261 .digest = mv_cesa_ahmac_md5_digest, 1262 .setkey = mv_cesa_ahmac_md5_setkey, 1263 .export = mv_cesa_md5_export, 1264 .import = mv_cesa_md5_import, 1265 .halg = { 1266 .digestsize = MD5_DIGEST_SIZE, 1267 .statesize = sizeof(struct md5_state), 1268 .base = { 1269 .cra_name = "hmac(md5)", 1270 .cra_driver_name = "mv-hmac-md5", 1271 .cra_priority = 300, 1272 .cra_flags = CRYPTO_ALG_ASYNC | 1273 CRYPTO_ALG_KERN_DRIVER_ONLY, 1274 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1275 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1276 .cra_init = mv_cesa_ahmac_cra_init, 1277 .cra_module = THIS_MODULE, 1278 } 1279 } 1280}; 1281 1282static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1283{ 1284 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1285 struct mv_cesa_op_ctx tmpl = { }; 1286 1287 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1288 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1289 1290 mv_cesa_ahash_init(req, &tmpl, false); 1291 1292 return 0; 1293} 1294 1295static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1296 unsigned int keylen) 1297{ 1298 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1299 struct sha1_state istate, ostate; 1300 int ret, i; 1301 1302 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1303 if (ret) 1304 return ret; 1305 1306 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1307 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1308 1309 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1310 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1311 1312 return 0; 1313} 1314 1315static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1316{ 1317 int ret; 1318 1319 ret = mv_cesa_ahmac_sha1_init(req); 1320 if (ret) 1321 return ret; 1322 1323 return mv_cesa_ahash_finup(req); 1324} 1325 1326struct ahash_alg mv_ahmac_sha1_alg = { 1327 .init = mv_cesa_ahmac_sha1_init, 1328 .update = mv_cesa_ahash_update, 1329 .final = mv_cesa_ahash_final, 1330 .finup = mv_cesa_ahash_finup, 1331 .digest = mv_cesa_ahmac_sha1_digest, 1332 .setkey = mv_cesa_ahmac_sha1_setkey, 1333 .export = mv_cesa_sha1_export, 1334 .import = mv_cesa_sha1_import, 1335 .halg = { 1336 .digestsize = SHA1_DIGEST_SIZE, 1337 .statesize = sizeof(struct sha1_state), 1338 .base = { 1339 .cra_name = "hmac(sha1)", 1340 .cra_driver_name = "mv-hmac-sha1", 1341 .cra_priority = 300, 1342 .cra_flags = CRYPTO_ALG_ASYNC | 1343 CRYPTO_ALG_KERN_DRIVER_ONLY, 1344 .cra_blocksize = SHA1_BLOCK_SIZE, 1345 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1346 .cra_init = mv_cesa_ahmac_cra_init, 1347 .cra_module = THIS_MODULE, 1348 } 1349 } 1350}; 1351 1352static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1353 unsigned int keylen) 1354{ 1355 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1356 struct sha256_state istate, ostate; 1357 int ret, i; 1358 1359 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1360 if (ret) 1361 return ret; 1362 1363 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1364 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1365 1366 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1367 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1368 1369 return 0; 1370} 1371 1372static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1373{ 1374 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1375 struct mv_cesa_op_ctx tmpl = { }; 1376 1377 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1378 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1379 1380 mv_cesa_ahash_init(req, &tmpl, false); 1381 1382 return 0; 1383} 1384 1385static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1386{ 1387 int ret; 1388 1389 ret = mv_cesa_ahmac_sha256_init(req); 1390 if (ret) 1391 return ret; 1392 1393 return mv_cesa_ahash_finup(req); 1394} 1395 1396struct ahash_alg mv_ahmac_sha256_alg = { 1397 .init = mv_cesa_ahmac_sha256_init, 1398 .update = mv_cesa_ahash_update, 1399 .final = mv_cesa_ahash_final, 1400 .finup = mv_cesa_ahash_finup, 1401 .digest = mv_cesa_ahmac_sha256_digest, 1402 .setkey = mv_cesa_ahmac_sha256_setkey, 1403 .export = mv_cesa_sha256_export, 1404 .import = mv_cesa_sha256_import, 1405 .halg = { 1406 .digestsize = SHA256_DIGEST_SIZE, 1407 .statesize = sizeof(struct sha256_state), 1408 .base = { 1409 .cra_name = "hmac(sha256)", 1410 .cra_driver_name = "mv-hmac-sha256", 1411 .cra_priority = 300, 1412 .cra_flags = CRYPTO_ALG_ASYNC | 1413 CRYPTO_ALG_KERN_DRIVER_ONLY, 1414 .cra_blocksize = SHA256_BLOCK_SIZE, 1415 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1416 .cra_init = mv_cesa_ahmac_cra_init, 1417 .cra_module = THIS_MODULE, 1418 } 1419 } 1420};