Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc7 1383 lines 35 kB view raw
1/* 2 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/md5.h> 16#include <crypto/sha.h> 17 18#include "cesa.h" 19 20struct mv_cesa_ahash_dma_iter { 21 struct mv_cesa_dma_iter base; 22 struct mv_cesa_sg_dma_iter src; 23}; 24 25static inline void 26mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 27 struct ahash_request *req) 28{ 29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 30 unsigned int len = req->nbytes + creq->cache_ptr; 31 32 if (!creq->last_req) 33 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 34 35 mv_cesa_req_dma_iter_init(&iter->base, len); 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 37 iter->src.op_offset = creq->cache_ptr; 38} 39 40static inline bool 41mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 42{ 43 iter->src.op_offset = 0; 44 45 return mv_cesa_req_dma_iter_next_op(&iter->base); 46} 47 48static inline int 49mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) 50{ 51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 52 &req->cache_dma); 53 if (!req->cache) 54 return -ENOMEM; 55 56 return 0; 57} 58 59static inline void 60mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) 61{ 62 if (!req->cache) 63 return; 64 65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache, 66 req->cache_dma); 67} 68 69static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 70 gfp_t flags) 71{ 72 if (req->padding) 73 return 0; 74 75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 76 &req->padding_dma); 77 if (!req->padding) 78 return -ENOMEM; 79 80 return 0; 81} 82 83static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 84{ 85 if (!req->padding) 86 return; 87 88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 89 req->padding_dma); 90 req->padding = NULL; 91} 92 93static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 94{ 95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 96 97 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 98} 99 100static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 101{ 102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 103 104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 105 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 106 mv_cesa_dma_cleanup(&creq->base); 107} 108 109static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 110{ 111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 112 113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 114 mv_cesa_ahash_dma_cleanup(req); 115} 116 117static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 118{ 119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 120 121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 122 mv_cesa_ahash_dma_last_cleanup(req); 123} 124 125static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 126{ 127 unsigned int index, padlen; 128 129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 131 132 return padlen; 133} 134 135static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 136{ 137 unsigned int index, padlen; 138 139 buf[0] = 0x80; 140 /* Pad out to 56 mod 64 */ 141 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 142 padlen = mv_cesa_ahash_pad_len(creq); 143 memset(buf + 1, 0, padlen - 1); 144 145 if (creq->algo_le) { 146 __le64 bits = cpu_to_le64(creq->len << 3); 147 memcpy(buf + padlen, &bits, sizeof(bits)); 148 } else { 149 __be64 bits = cpu_to_be64(creq->len << 3); 150 memcpy(buf + padlen, &bits, sizeof(bits)); 151 } 152 153 return padlen + 8; 154} 155 156static void mv_cesa_ahash_std_step(struct ahash_request *req) 157{ 158 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 159 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 160 struct mv_cesa_engine *engine = creq->base.engine; 161 struct mv_cesa_op_ctx *op; 162 unsigned int new_cache_ptr = 0; 163 u32 frag_mode; 164 size_t len; 165 unsigned int digsize; 166 int i; 167 168 mv_cesa_adjust_op(engine, &creq->op_tmpl); 169 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 170 171 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 172 for (i = 0; i < digsize / 4; i++) 173 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); 174 175 mv_cesa_adjust_op(engine, &creq->op_tmpl); 176 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 177 178 if (creq->cache_ptr) 179 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 180 creq->cache, creq->cache_ptr); 181 182 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 183 CESA_SA_SRAM_PAYLOAD_SIZE); 184 185 if (!creq->last_req) { 186 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 187 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 188 } 189 190 if (len - creq->cache_ptr) 191 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 192 engine->sram + 193 CESA_SA_DATA_SRAM_OFFSET + 194 creq->cache_ptr, 195 len - creq->cache_ptr, 196 sreq->offset); 197 198 op = &creq->op_tmpl; 199 200 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 201 202 if (creq->last_req && sreq->offset == req->nbytes && 203 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 204 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 205 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 206 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 207 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 208 } 209 210 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 211 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 212 if (len && 213 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 214 mv_cesa_set_mac_op_total_len(op, creq->len); 215 } else { 216 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 217 218 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 219 len &= CESA_HASH_BLOCK_SIZE_MSK; 220 new_cache_ptr = 64 - trailerlen; 221 memcpy_fromio(creq->cache, 222 engine->sram + 223 CESA_SA_DATA_SRAM_OFFSET + len, 224 new_cache_ptr); 225 } else { 226 len += mv_cesa_ahash_pad_req(creq, 227 engine->sram + len + 228 CESA_SA_DATA_SRAM_OFFSET); 229 } 230 231 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 232 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 233 else 234 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 235 } 236 } 237 238 mv_cesa_set_mac_op_frag_len(op, len); 239 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 240 241 /* FIXME: only update enc_len field */ 242 memcpy_toio(engine->sram, op, sizeof(*op)); 243 244 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 245 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 246 CESA_SA_DESC_CFG_FRAG_MSK); 247 248 creq->cache_ptr = new_cache_ptr; 249 250 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 251 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 252 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 253 CESA_SA_CMD_EN_CESA_SA_ACCL0); 254 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 255} 256 257static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 258{ 259 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 260 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 261 262 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 263 return -EINPROGRESS; 264 265 return 0; 266} 267 268static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 269{ 270 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 271 struct mv_cesa_req *basereq = &creq->base; 272 273 mv_cesa_dma_prepare(basereq, basereq->engine); 274} 275 276static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 277{ 278 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 279 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 280 281 sreq->offset = 0; 282} 283 284static void mv_cesa_ahash_step(struct crypto_async_request *req) 285{ 286 struct ahash_request *ahashreq = ahash_request_cast(req); 287 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 288 289 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 290 mv_cesa_dma_step(&creq->base); 291 else 292 mv_cesa_ahash_std_step(ahashreq); 293} 294 295static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 296{ 297 struct ahash_request *ahashreq = ahash_request_cast(req); 298 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 299 300 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 301 return mv_cesa_dma_process(&creq->base, status); 302 303 return mv_cesa_ahash_std_process(ahashreq, status); 304} 305 306static void mv_cesa_ahash_complete(struct crypto_async_request *req) 307{ 308 struct ahash_request *ahashreq = ahash_request_cast(req); 309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 310 struct mv_cesa_engine *engine = creq->base.engine; 311 unsigned int digsize; 312 int i; 313 314 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 315 for (i = 0; i < digsize / 4; i++) 316 creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i)); 317 318 if (creq->last_req) { 319 /* 320 * Hardware's MD5 digest is in little endian format, but 321 * SHA in big endian format 322 */ 323 if (creq->algo_le) { 324 __le32 *result = (void *)ahashreq->result; 325 326 for (i = 0; i < digsize / 4; i++) 327 result[i] = cpu_to_le32(creq->state[i]); 328 } else { 329 __be32 *result = (void *)ahashreq->result; 330 331 for (i = 0; i < digsize / 4; i++) 332 result[i] = cpu_to_be32(creq->state[i]); 333 } 334 } 335 336 atomic_sub(ahashreq->nbytes, &engine->load); 337} 338 339static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 340 struct mv_cesa_engine *engine) 341{ 342 struct ahash_request *ahashreq = ahash_request_cast(req); 343 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 344 345 creq->base.engine = engine; 346 347 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 348 mv_cesa_ahash_dma_prepare(ahashreq); 349 else 350 mv_cesa_ahash_std_prepare(ahashreq); 351} 352 353static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 354{ 355 struct ahash_request *ahashreq = ahash_request_cast(req); 356 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 357 358 if (creq->last_req) 359 mv_cesa_ahash_last_cleanup(ahashreq); 360 361 mv_cesa_ahash_cleanup(ahashreq); 362 363 if (creq->cache_ptr) 364 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 365 creq->cache, 366 creq->cache_ptr, 367 ahashreq->nbytes - creq->cache_ptr); 368} 369 370static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 371 .step = mv_cesa_ahash_step, 372 .process = mv_cesa_ahash_process, 373 .cleanup = mv_cesa_ahash_req_cleanup, 374 .complete = mv_cesa_ahash_complete, 375}; 376 377static void mv_cesa_ahash_init(struct ahash_request *req, 378 struct mv_cesa_op_ctx *tmpl, bool algo_le) 379{ 380 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 381 382 memset(creq, 0, sizeof(*creq)); 383 mv_cesa_update_op_cfg(tmpl, 384 CESA_SA_DESC_CFG_OP_MAC_ONLY | 385 CESA_SA_DESC_CFG_FIRST_FRAG, 386 CESA_SA_DESC_CFG_OP_MSK | 387 CESA_SA_DESC_CFG_FRAG_MSK); 388 mv_cesa_set_mac_op_total_len(tmpl, 0); 389 mv_cesa_set_mac_op_frag_len(tmpl, 0); 390 creq->op_tmpl = *tmpl; 391 creq->len = 0; 392 creq->algo_le = algo_le; 393} 394 395static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 396{ 397 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 398 399 ctx->base.ops = &mv_cesa_ahash_req_ops; 400 401 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 402 sizeof(struct mv_cesa_ahash_req)); 403 return 0; 404} 405 406static bool mv_cesa_ahash_cache_req(struct ahash_request *req) 407{ 408 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 409 bool cached = false; 410 411 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) { 412 cached = true; 413 414 if (!req->nbytes) 415 return cached; 416 417 sg_pcopy_to_buffer(req->src, creq->src_nents, 418 creq->cache + creq->cache_ptr, 419 req->nbytes, 0); 420 421 creq->cache_ptr += req->nbytes; 422 } 423 424 return cached; 425} 426 427static struct mv_cesa_op_ctx * 428mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 429 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 430 gfp_t flags) 431{ 432 struct mv_cesa_op_ctx *op; 433 int ret; 434 435 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 436 if (IS_ERR(op)) 437 return op; 438 439 /* Set the operation block fragment length. */ 440 mv_cesa_set_mac_op_frag_len(op, frag_len); 441 442 /* Append dummy desc to launch operation */ 443 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 444 if (ret) 445 return ERR_PTR(ret); 446 447 if (mv_cesa_mac_op_is_first_frag(tmpl)) 448 mv_cesa_update_op_cfg(tmpl, 449 CESA_SA_DESC_CFG_MID_FRAG, 450 CESA_SA_DESC_CFG_FRAG_MSK); 451 452 return op; 453} 454 455static int 456mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 457 struct mv_cesa_ahash_req *creq, 458 gfp_t flags) 459{ 460 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 461 int ret; 462 463 if (!creq->cache_ptr) 464 return 0; 465 466 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); 467 if (ret) 468 return ret; 469 470 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); 471 472 return mv_cesa_dma_add_data_transfer(chain, 473 CESA_SA_DATA_SRAM_OFFSET, 474 ahashdreq->cache_dma, 475 creq->cache_ptr, 476 CESA_TDMA_DST_IN_SRAM, 477 flags); 478} 479 480static struct mv_cesa_op_ctx * 481mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 482 struct mv_cesa_ahash_dma_iter *dma_iter, 483 struct mv_cesa_ahash_req *creq, 484 unsigned int frag_len, gfp_t flags) 485{ 486 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 487 unsigned int len, trailerlen, padoff = 0; 488 struct mv_cesa_op_ctx *op; 489 int ret; 490 491 /* 492 * If the transfer is smaller than our maximum length, and we have 493 * some data outstanding, we can ask the engine to finish the hash. 494 */ 495 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 496 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 497 flags); 498 if (IS_ERR(op)) 499 return op; 500 501 mv_cesa_set_mac_op_total_len(op, creq->len); 502 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 503 CESA_SA_DESC_CFG_NOT_FRAG : 504 CESA_SA_DESC_CFG_LAST_FRAG, 505 CESA_SA_DESC_CFG_FRAG_MSK); 506 507 return op; 508 } 509 510 /* 511 * The request is longer than the engine can handle, or we have 512 * no data outstanding. Manually generate the padding, adding it 513 * as a "mid" fragment. 514 */ 515 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 516 if (ret) 517 return ERR_PTR(ret); 518 519 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 520 521 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 522 if (len) { 523 ret = mv_cesa_dma_add_data_transfer(chain, 524 CESA_SA_DATA_SRAM_OFFSET + 525 frag_len, 526 ahashdreq->padding_dma, 527 len, CESA_TDMA_DST_IN_SRAM, 528 flags); 529 if (ret) 530 return ERR_PTR(ret); 531 532 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 533 flags); 534 if (IS_ERR(op)) 535 return op; 536 537 if (len == trailerlen) 538 return op; 539 540 padoff += len; 541 } 542 543 ret = mv_cesa_dma_add_data_transfer(chain, 544 CESA_SA_DATA_SRAM_OFFSET, 545 ahashdreq->padding_dma + 546 padoff, 547 trailerlen - padoff, 548 CESA_TDMA_DST_IN_SRAM, 549 flags); 550 if (ret) 551 return ERR_PTR(ret); 552 553 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 554 flags); 555} 556 557static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 558{ 559 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 560 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 561 GFP_KERNEL : GFP_ATOMIC; 562 struct mv_cesa_req *basereq = &creq->base; 563 struct mv_cesa_ahash_dma_iter iter; 564 struct mv_cesa_op_ctx *op = NULL; 565 unsigned int frag_len; 566 int ret; 567 568 basereq->chain.first = NULL; 569 basereq->chain.last = NULL; 570 571 if (creq->src_nents) { 572 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 573 DMA_TO_DEVICE); 574 if (!ret) { 575 ret = -ENOMEM; 576 goto err; 577 } 578 } 579 580 mv_cesa_tdma_desc_iter_init(&basereq->chain); 581 mv_cesa_ahash_req_iter_init(&iter, req); 582 583 /* 584 * Add the cache (left-over data from a previous block) first. 585 * This will never overflow the SRAM size. 586 */ 587 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); 588 if (ret) 589 goto err_free_tdma; 590 591 if (iter.src.sg) { 592 /* 593 * Add all the new data, inserting an operation block and 594 * launch command between each full SRAM block-worth of 595 * data. We intentionally do not add the final op block. 596 */ 597 while (true) { 598 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, 599 &iter.base, 600 &iter.src, flags); 601 if (ret) 602 goto err_free_tdma; 603 604 frag_len = iter.base.op_len; 605 606 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 607 break; 608 609 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 610 frag_len, flags); 611 if (IS_ERR(op)) { 612 ret = PTR_ERR(op); 613 goto err_free_tdma; 614 } 615 } 616 } else { 617 /* Account for the data that was in the cache. */ 618 frag_len = iter.base.op_len; 619 } 620 621 /* 622 * At this point, frag_len indicates whether we have any data 623 * outstanding which needs an operation. Queue up the final 624 * operation, which depends whether this is the final request. 625 */ 626 if (creq->last_req) 627 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, 628 frag_len, flags); 629 else if (frag_len) 630 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 631 frag_len, flags); 632 633 if (IS_ERR(op)) { 634 ret = PTR_ERR(op); 635 goto err_free_tdma; 636 } 637 638 if (op) { 639 /* Add dummy desc to wait for crypto operation end */ 640 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); 641 if (ret) 642 goto err_free_tdma; 643 } 644 645 if (!creq->last_req) 646 creq->cache_ptr = req->nbytes + creq->cache_ptr - 647 iter.base.len; 648 else 649 creq->cache_ptr = 0; 650 651 basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ | 652 CESA_TDMA_BREAK_CHAIN); 653 654 return 0; 655 656err_free_tdma: 657 mv_cesa_dma_cleanup(basereq); 658 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 659 660err: 661 mv_cesa_ahash_last_cleanup(req); 662 663 return ret; 664} 665 666static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 667{ 668 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 669 670 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 671 if (creq->src_nents < 0) { 672 dev_err(cesa_dev->dev, "Invalid number of src SG"); 673 return creq->src_nents; 674 } 675 676 *cached = mv_cesa_ahash_cache_req(req); 677 678 if (*cached) 679 return 0; 680 681 if (cesa_dev->caps->has_tdma) 682 return mv_cesa_ahash_dma_req_init(req); 683 else 684 return 0; 685} 686 687static int mv_cesa_ahash_queue_req(struct ahash_request *req) 688{ 689 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 690 struct mv_cesa_engine *engine; 691 bool cached = false; 692 int ret; 693 694 ret = mv_cesa_ahash_req_init(req, &cached); 695 if (ret) 696 return ret; 697 698 if (cached) 699 return 0; 700 701 engine = mv_cesa_select_engine(req->nbytes); 702 mv_cesa_ahash_prepare(&req->base, engine); 703 704 ret = mv_cesa_queue_req(&req->base, &creq->base); 705 706 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 707 mv_cesa_ahash_cleanup(req); 708 709 return ret; 710} 711 712static int mv_cesa_ahash_update(struct ahash_request *req) 713{ 714 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 715 716 creq->len += req->nbytes; 717 718 return mv_cesa_ahash_queue_req(req); 719} 720 721static int mv_cesa_ahash_final(struct ahash_request *req) 722{ 723 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 724 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 725 726 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 727 creq->last_req = true; 728 req->nbytes = 0; 729 730 return mv_cesa_ahash_queue_req(req); 731} 732 733static int mv_cesa_ahash_finup(struct ahash_request *req) 734{ 735 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 736 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 737 738 creq->len += req->nbytes; 739 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 740 creq->last_req = true; 741 742 return mv_cesa_ahash_queue_req(req); 743} 744 745static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 746 u64 *len, void *cache) 747{ 748 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 749 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 750 unsigned int digsize = crypto_ahash_digestsize(ahash); 751 unsigned int blocksize; 752 753 blocksize = crypto_ahash_blocksize(ahash); 754 755 *len = creq->len; 756 memcpy(hash, creq->state, digsize); 757 memset(cache, 0, blocksize); 758 memcpy(cache, creq->cache, creq->cache_ptr); 759 760 return 0; 761} 762 763static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 764 u64 len, const void *cache) 765{ 766 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 767 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 768 unsigned int digsize = crypto_ahash_digestsize(ahash); 769 unsigned int blocksize; 770 unsigned int cache_ptr; 771 int ret; 772 773 ret = crypto_ahash_init(req); 774 if (ret) 775 return ret; 776 777 blocksize = crypto_ahash_blocksize(ahash); 778 if (len >= blocksize) 779 mv_cesa_update_op_cfg(&creq->op_tmpl, 780 CESA_SA_DESC_CFG_MID_FRAG, 781 CESA_SA_DESC_CFG_FRAG_MSK); 782 783 creq->len = len; 784 memcpy(creq->state, hash, digsize); 785 creq->cache_ptr = 0; 786 787 cache_ptr = do_div(len, blocksize); 788 if (!cache_ptr) 789 return 0; 790 791 memcpy(creq->cache, cache, cache_ptr); 792 creq->cache_ptr = cache_ptr; 793 794 return 0; 795} 796 797static int mv_cesa_md5_init(struct ahash_request *req) 798{ 799 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 800 struct mv_cesa_op_ctx tmpl = { }; 801 802 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 803 804 mv_cesa_ahash_init(req, &tmpl, true); 805 806 creq->state[0] = MD5_H0; 807 creq->state[1] = MD5_H1; 808 creq->state[2] = MD5_H2; 809 creq->state[3] = MD5_H3; 810 811 return 0; 812} 813 814static int mv_cesa_md5_export(struct ahash_request *req, void *out) 815{ 816 struct md5_state *out_state = out; 817 818 return mv_cesa_ahash_export(req, out_state->hash, 819 &out_state->byte_count, out_state->block); 820} 821 822static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 823{ 824 const struct md5_state *in_state = in; 825 826 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 827 in_state->block); 828} 829 830static int mv_cesa_md5_digest(struct ahash_request *req) 831{ 832 int ret; 833 834 ret = mv_cesa_md5_init(req); 835 if (ret) 836 return ret; 837 838 return mv_cesa_ahash_finup(req); 839} 840 841struct ahash_alg mv_md5_alg = { 842 .init = mv_cesa_md5_init, 843 .update = mv_cesa_ahash_update, 844 .final = mv_cesa_ahash_final, 845 .finup = mv_cesa_ahash_finup, 846 .digest = mv_cesa_md5_digest, 847 .export = mv_cesa_md5_export, 848 .import = mv_cesa_md5_import, 849 .halg = { 850 .digestsize = MD5_DIGEST_SIZE, 851 .statesize = sizeof(struct md5_state), 852 .base = { 853 .cra_name = "md5", 854 .cra_driver_name = "mv-md5", 855 .cra_priority = 300, 856 .cra_flags = CRYPTO_ALG_ASYNC | 857 CRYPTO_ALG_KERN_DRIVER_ONLY, 858 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 859 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 860 .cra_init = mv_cesa_ahash_cra_init, 861 .cra_module = THIS_MODULE, 862 } 863 } 864}; 865 866static int mv_cesa_sha1_init(struct ahash_request *req) 867{ 868 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 869 struct mv_cesa_op_ctx tmpl = { }; 870 871 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 872 873 mv_cesa_ahash_init(req, &tmpl, false); 874 875 creq->state[0] = SHA1_H0; 876 creq->state[1] = SHA1_H1; 877 creq->state[2] = SHA1_H2; 878 creq->state[3] = SHA1_H3; 879 creq->state[4] = SHA1_H4; 880 881 return 0; 882} 883 884static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 885{ 886 struct sha1_state *out_state = out; 887 888 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 889 out_state->buffer); 890} 891 892static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 893{ 894 const struct sha1_state *in_state = in; 895 896 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 897 in_state->buffer); 898} 899 900static int mv_cesa_sha1_digest(struct ahash_request *req) 901{ 902 int ret; 903 904 ret = mv_cesa_sha1_init(req); 905 if (ret) 906 return ret; 907 908 return mv_cesa_ahash_finup(req); 909} 910 911struct ahash_alg mv_sha1_alg = { 912 .init = mv_cesa_sha1_init, 913 .update = mv_cesa_ahash_update, 914 .final = mv_cesa_ahash_final, 915 .finup = mv_cesa_ahash_finup, 916 .digest = mv_cesa_sha1_digest, 917 .export = mv_cesa_sha1_export, 918 .import = mv_cesa_sha1_import, 919 .halg = { 920 .digestsize = SHA1_DIGEST_SIZE, 921 .statesize = sizeof(struct sha1_state), 922 .base = { 923 .cra_name = "sha1", 924 .cra_driver_name = "mv-sha1", 925 .cra_priority = 300, 926 .cra_flags = CRYPTO_ALG_ASYNC | 927 CRYPTO_ALG_KERN_DRIVER_ONLY, 928 .cra_blocksize = SHA1_BLOCK_SIZE, 929 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 930 .cra_init = mv_cesa_ahash_cra_init, 931 .cra_module = THIS_MODULE, 932 } 933 } 934}; 935 936static int mv_cesa_sha256_init(struct ahash_request *req) 937{ 938 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 939 struct mv_cesa_op_ctx tmpl = { }; 940 941 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 942 943 mv_cesa_ahash_init(req, &tmpl, false); 944 945 creq->state[0] = SHA256_H0; 946 creq->state[1] = SHA256_H1; 947 creq->state[2] = SHA256_H2; 948 creq->state[3] = SHA256_H3; 949 creq->state[4] = SHA256_H4; 950 creq->state[5] = SHA256_H5; 951 creq->state[6] = SHA256_H6; 952 creq->state[7] = SHA256_H7; 953 954 return 0; 955} 956 957static int mv_cesa_sha256_digest(struct ahash_request *req) 958{ 959 int ret; 960 961 ret = mv_cesa_sha256_init(req); 962 if (ret) 963 return ret; 964 965 return mv_cesa_ahash_finup(req); 966} 967 968static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 969{ 970 struct sha256_state *out_state = out; 971 972 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 973 out_state->buf); 974} 975 976static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 977{ 978 const struct sha256_state *in_state = in; 979 980 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 981 in_state->buf); 982} 983 984struct ahash_alg mv_sha256_alg = { 985 .init = mv_cesa_sha256_init, 986 .update = mv_cesa_ahash_update, 987 .final = mv_cesa_ahash_final, 988 .finup = mv_cesa_ahash_finup, 989 .digest = mv_cesa_sha256_digest, 990 .export = mv_cesa_sha256_export, 991 .import = mv_cesa_sha256_import, 992 .halg = { 993 .digestsize = SHA256_DIGEST_SIZE, 994 .statesize = sizeof(struct sha256_state), 995 .base = { 996 .cra_name = "sha256", 997 .cra_driver_name = "mv-sha256", 998 .cra_priority = 300, 999 .cra_flags = CRYPTO_ALG_ASYNC | 1000 CRYPTO_ALG_KERN_DRIVER_ONLY, 1001 .cra_blocksize = SHA256_BLOCK_SIZE, 1002 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1003 .cra_init = mv_cesa_ahash_cra_init, 1004 .cra_module = THIS_MODULE, 1005 } 1006 } 1007}; 1008 1009struct mv_cesa_ahash_result { 1010 struct completion completion; 1011 int error; 1012}; 1013 1014static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1015 int error) 1016{ 1017 struct mv_cesa_ahash_result *result = req->data; 1018 1019 if (error == -EINPROGRESS) 1020 return; 1021 1022 result->error = error; 1023 complete(&result->completion); 1024} 1025 1026static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1027 void *state, unsigned int blocksize) 1028{ 1029 struct mv_cesa_ahash_result result; 1030 struct scatterlist sg; 1031 int ret; 1032 1033 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1034 mv_cesa_hmac_ahash_complete, &result); 1035 sg_init_one(&sg, pad, blocksize); 1036 ahash_request_set_crypt(req, &sg, pad, blocksize); 1037 init_completion(&result.completion); 1038 1039 ret = crypto_ahash_init(req); 1040 if (ret) 1041 return ret; 1042 1043 ret = crypto_ahash_update(req); 1044 if (ret && ret != -EINPROGRESS) 1045 return ret; 1046 1047 wait_for_completion_interruptible(&result.completion); 1048 if (result.error) 1049 return result.error; 1050 1051 ret = crypto_ahash_export(req, state); 1052 if (ret) 1053 return ret; 1054 1055 return 0; 1056} 1057 1058static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1059 const u8 *key, unsigned int keylen, 1060 u8 *ipad, u8 *opad, 1061 unsigned int blocksize) 1062{ 1063 struct mv_cesa_ahash_result result; 1064 struct scatterlist sg; 1065 int ret; 1066 int i; 1067 1068 if (keylen <= blocksize) { 1069 memcpy(ipad, key, keylen); 1070 } else { 1071 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1072 1073 if (!keydup) 1074 return -ENOMEM; 1075 1076 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1077 mv_cesa_hmac_ahash_complete, 1078 &result); 1079 sg_init_one(&sg, keydup, keylen); 1080 ahash_request_set_crypt(req, &sg, ipad, keylen); 1081 init_completion(&result.completion); 1082 1083 ret = crypto_ahash_digest(req); 1084 if (ret == -EINPROGRESS) { 1085 wait_for_completion_interruptible(&result.completion); 1086 ret = result.error; 1087 } 1088 1089 /* Set the memory region to 0 to avoid any leak. */ 1090 memset(keydup, 0, keylen); 1091 kfree(keydup); 1092 1093 if (ret) 1094 return ret; 1095 1096 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1097 } 1098 1099 memset(ipad + keylen, 0, blocksize - keylen); 1100 memcpy(opad, ipad, blocksize); 1101 1102 for (i = 0; i < blocksize; i++) { 1103 ipad[i] ^= 0x36; 1104 opad[i] ^= 0x5c; 1105 } 1106 1107 return 0; 1108} 1109 1110static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1111 const u8 *key, unsigned int keylen, 1112 void *istate, void *ostate) 1113{ 1114 struct ahash_request *req; 1115 struct crypto_ahash *tfm; 1116 unsigned int blocksize; 1117 u8 *ipad = NULL; 1118 u8 *opad; 1119 int ret; 1120 1121 tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH, 1122 CRYPTO_ALG_TYPE_AHASH_MASK); 1123 if (IS_ERR(tfm)) 1124 return PTR_ERR(tfm); 1125 1126 req = ahash_request_alloc(tfm, GFP_KERNEL); 1127 if (!req) { 1128 ret = -ENOMEM; 1129 goto free_ahash; 1130 } 1131 1132 crypto_ahash_clear_flags(tfm, ~0); 1133 1134 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1135 1136 ipad = kzalloc(2 * blocksize, GFP_KERNEL); 1137 if (!ipad) { 1138 ret = -ENOMEM; 1139 goto free_req; 1140 } 1141 1142 opad = ipad + blocksize; 1143 1144 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1145 if (ret) 1146 goto free_ipad; 1147 1148 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1149 if (ret) 1150 goto free_ipad; 1151 1152 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1153 1154free_ipad: 1155 kfree(ipad); 1156free_req: 1157 ahash_request_free(req); 1158free_ahash: 1159 crypto_free_ahash(tfm); 1160 1161 return ret; 1162} 1163 1164static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1165{ 1166 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1167 1168 ctx->base.ops = &mv_cesa_ahash_req_ops; 1169 1170 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1171 sizeof(struct mv_cesa_ahash_req)); 1172 return 0; 1173} 1174 1175static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1176{ 1177 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1178 struct mv_cesa_op_ctx tmpl = { }; 1179 1180 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1181 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1182 1183 mv_cesa_ahash_init(req, &tmpl, true); 1184 1185 return 0; 1186} 1187 1188static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1189 unsigned int keylen) 1190{ 1191 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1192 struct md5_state istate, ostate; 1193 int ret, i; 1194 1195 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1196 if (ret) 1197 return ret; 1198 1199 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1200 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1201 1202 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1203 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1204 1205 return 0; 1206} 1207 1208static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1209{ 1210 int ret; 1211 1212 ret = mv_cesa_ahmac_md5_init(req); 1213 if (ret) 1214 return ret; 1215 1216 return mv_cesa_ahash_finup(req); 1217} 1218 1219struct ahash_alg mv_ahmac_md5_alg = { 1220 .init = mv_cesa_ahmac_md5_init, 1221 .update = mv_cesa_ahash_update, 1222 .final = mv_cesa_ahash_final, 1223 .finup = mv_cesa_ahash_finup, 1224 .digest = mv_cesa_ahmac_md5_digest, 1225 .setkey = mv_cesa_ahmac_md5_setkey, 1226 .export = mv_cesa_md5_export, 1227 .import = mv_cesa_md5_import, 1228 .halg = { 1229 .digestsize = MD5_DIGEST_SIZE, 1230 .statesize = sizeof(struct md5_state), 1231 .base = { 1232 .cra_name = "hmac(md5)", 1233 .cra_driver_name = "mv-hmac-md5", 1234 .cra_priority = 300, 1235 .cra_flags = CRYPTO_ALG_ASYNC | 1236 CRYPTO_ALG_KERN_DRIVER_ONLY, 1237 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1238 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1239 .cra_init = mv_cesa_ahmac_cra_init, 1240 .cra_module = THIS_MODULE, 1241 } 1242 } 1243}; 1244 1245static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1246{ 1247 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1248 struct mv_cesa_op_ctx tmpl = { }; 1249 1250 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1251 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1252 1253 mv_cesa_ahash_init(req, &tmpl, false); 1254 1255 return 0; 1256} 1257 1258static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1259 unsigned int keylen) 1260{ 1261 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1262 struct sha1_state istate, ostate; 1263 int ret, i; 1264 1265 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1266 if (ret) 1267 return ret; 1268 1269 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1270 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1271 1272 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1273 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1274 1275 return 0; 1276} 1277 1278static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1279{ 1280 int ret; 1281 1282 ret = mv_cesa_ahmac_sha1_init(req); 1283 if (ret) 1284 return ret; 1285 1286 return mv_cesa_ahash_finup(req); 1287} 1288 1289struct ahash_alg mv_ahmac_sha1_alg = { 1290 .init = mv_cesa_ahmac_sha1_init, 1291 .update = mv_cesa_ahash_update, 1292 .final = mv_cesa_ahash_final, 1293 .finup = mv_cesa_ahash_finup, 1294 .digest = mv_cesa_ahmac_sha1_digest, 1295 .setkey = mv_cesa_ahmac_sha1_setkey, 1296 .export = mv_cesa_sha1_export, 1297 .import = mv_cesa_sha1_import, 1298 .halg = { 1299 .digestsize = SHA1_DIGEST_SIZE, 1300 .statesize = sizeof(struct sha1_state), 1301 .base = { 1302 .cra_name = "hmac(sha1)", 1303 .cra_driver_name = "mv-hmac-sha1", 1304 .cra_priority = 300, 1305 .cra_flags = CRYPTO_ALG_ASYNC | 1306 CRYPTO_ALG_KERN_DRIVER_ONLY, 1307 .cra_blocksize = SHA1_BLOCK_SIZE, 1308 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1309 .cra_init = mv_cesa_ahmac_cra_init, 1310 .cra_module = THIS_MODULE, 1311 } 1312 } 1313}; 1314 1315static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1316 unsigned int keylen) 1317{ 1318 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1319 struct sha256_state istate, ostate; 1320 int ret, i; 1321 1322 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1323 if (ret) 1324 return ret; 1325 1326 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1327 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1328 1329 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1330 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1331 1332 return 0; 1333} 1334 1335static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1336{ 1337 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1338 struct mv_cesa_op_ctx tmpl = { }; 1339 1340 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1341 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1342 1343 mv_cesa_ahash_init(req, &tmpl, false); 1344 1345 return 0; 1346} 1347 1348static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1349{ 1350 int ret; 1351 1352 ret = mv_cesa_ahmac_sha256_init(req); 1353 if (ret) 1354 return ret; 1355 1356 return mv_cesa_ahash_finup(req); 1357} 1358 1359struct ahash_alg mv_ahmac_sha256_alg = { 1360 .init = mv_cesa_ahmac_sha256_init, 1361 .update = mv_cesa_ahash_update, 1362 .final = mv_cesa_ahash_final, 1363 .finup = mv_cesa_ahash_finup, 1364 .digest = mv_cesa_ahmac_sha256_digest, 1365 .setkey = mv_cesa_ahmac_sha256_setkey, 1366 .export = mv_cesa_sha256_export, 1367 .import = mv_cesa_sha256_import, 1368 .halg = { 1369 .digestsize = SHA256_DIGEST_SIZE, 1370 .statesize = sizeof(struct sha256_state), 1371 .base = { 1372 .cra_name = "hmac(sha256)", 1373 .cra_driver_name = "mv-hmac-sha256", 1374 .cra_priority = 300, 1375 .cra_flags = CRYPTO_ALG_ASYNC | 1376 CRYPTO_ALG_KERN_DRIVER_ONLY, 1377 .cra_blocksize = SHA256_BLOCK_SIZE, 1378 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1379 .cra_init = mv_cesa_ahmac_cra_init, 1380 .cra_module = THIS_MODULE, 1381 } 1382 } 1383};