at v5.3 1443 lines 36 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256. 4 * 5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 6 * Author: Arnaud Ebalard <arno@natisbad.org> 7 * 8 * This work is based on an initial version written by 9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 10 */ 11 12#include <crypto/hmac.h> 13#include <crypto/md5.h> 14#include <crypto/sha.h> 15 16#include "cesa.h" 17 18struct mv_cesa_ahash_dma_iter { 19 struct mv_cesa_dma_iter base; 20 struct mv_cesa_sg_dma_iter src; 21}; 22 23static inline void 24mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, 25 struct ahash_request *req) 26{ 27 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 28 unsigned int len = req->nbytes + creq->cache_ptr; 29 30 if (!creq->last_req) 31 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 32 33 mv_cesa_req_dma_iter_init(&iter->base, len); 34 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 35 iter->src.op_offset = creq->cache_ptr; 36} 37 38static inline bool 39mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) 40{ 41 iter->src.op_offset = 0; 42 43 return mv_cesa_req_dma_iter_next_op(&iter->base); 44} 45 46static inline int 47mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags) 48{ 49 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, 50 &req->cache_dma); 51 if (!req->cache) 52 return -ENOMEM; 53 54 return 0; 55} 56 57static inline void 58mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req) 59{ 60 if (!req->cache) 61 return; 62 63 dma_pool_free(cesa_dev->dma->cache_pool, req->cache, 64 req->cache_dma); 65} 66 67static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req, 68 gfp_t flags) 69{ 70 if (req->padding) 71 return 0; 72 73 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags, 74 &req->padding_dma); 75 if (!req->padding) 76 return -ENOMEM; 77 78 return 0; 79} 80 81static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req) 82{ 83 if (!req->padding) 84 return; 85 86 dma_pool_free(cesa_dev->dma->padding_pool, req->padding, 87 req->padding_dma); 88 req->padding = NULL; 89} 90 91static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req) 92{ 93 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 94 95 mv_cesa_ahash_dma_free_padding(&creq->req.dma); 96} 97 98static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req) 99{ 100 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 101 102 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 103 mv_cesa_ahash_dma_free_cache(&creq->req.dma); 104 mv_cesa_dma_cleanup(&creq->base); 105} 106 107static inline void mv_cesa_ahash_cleanup(struct ahash_request *req) 108{ 109 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 110 111 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 112 mv_cesa_ahash_dma_cleanup(req); 113} 114 115static void mv_cesa_ahash_last_cleanup(struct ahash_request *req) 116{ 117 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 118 119 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 120 mv_cesa_ahash_dma_last_cleanup(req); 121} 122 123static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq) 124{ 125 unsigned int index, padlen; 126 127 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK; 128 padlen = (index < 56) ? (56 - index) : (64 + 56 - index); 129 130 return padlen; 131} 132 133static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) 134{ 135 unsigned int padlen; 136 137 buf[0] = 0x80; 138 /* Pad out to 56 mod 64 */ 139 padlen = mv_cesa_ahash_pad_len(creq); 140 memset(buf + 1, 0, padlen - 1); 141 142 if (creq->algo_le) { 143 __le64 bits = cpu_to_le64(creq->len << 3); 144 memcpy(buf + padlen, &bits, sizeof(bits)); 145 } else { 146 __be64 bits = cpu_to_be64(creq->len << 3); 147 memcpy(buf + padlen, &bits, sizeof(bits)); 148 } 149 150 return padlen + 8; 151} 152 153static void mv_cesa_ahash_std_step(struct ahash_request *req) 154{ 155 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 156 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 157 struct mv_cesa_engine *engine = creq->base.engine; 158 struct mv_cesa_op_ctx *op; 159 unsigned int new_cache_ptr = 0; 160 u32 frag_mode; 161 size_t len; 162 unsigned int digsize; 163 int i; 164 165 mv_cesa_adjust_op(engine, &creq->op_tmpl); 166 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl)); 167 168 if (!sreq->offset) { 169 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 170 for (i = 0; i < digsize / 4; i++) 171 writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); 172 } 173 174 if (creq->cache_ptr) 175 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET, 176 creq->cache, creq->cache_ptr); 177 178 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset, 179 CESA_SA_SRAM_PAYLOAD_SIZE); 180 181 if (!creq->last_req) { 182 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK; 183 len &= ~CESA_HASH_BLOCK_SIZE_MSK; 184 } 185 186 if (len - creq->cache_ptr) 187 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents, 188 engine->sram + 189 CESA_SA_DATA_SRAM_OFFSET + 190 creq->cache_ptr, 191 len - creq->cache_ptr, 192 sreq->offset); 193 194 op = &creq->op_tmpl; 195 196 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK; 197 198 if (creq->last_req && sreq->offset == req->nbytes && 199 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 200 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 201 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG; 202 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG) 203 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG; 204 } 205 206 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG || 207 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) { 208 if (len && 209 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) { 210 mv_cesa_set_mac_op_total_len(op, creq->len); 211 } else { 212 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8; 213 214 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) { 215 len &= CESA_HASH_BLOCK_SIZE_MSK; 216 new_cache_ptr = 64 - trailerlen; 217 memcpy_fromio(creq->cache, 218 engine->sram + 219 CESA_SA_DATA_SRAM_OFFSET + len, 220 new_cache_ptr); 221 } else { 222 len += mv_cesa_ahash_pad_req(creq, 223 engine->sram + len + 224 CESA_SA_DATA_SRAM_OFFSET); 225 } 226 227 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) 228 frag_mode = CESA_SA_DESC_CFG_MID_FRAG; 229 else 230 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG; 231 } 232 } 233 234 mv_cesa_set_mac_op_frag_len(op, len); 235 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK); 236 237 /* FIXME: only update enc_len field */ 238 memcpy_toio(engine->sram, op, sizeof(*op)); 239 240 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG) 241 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG, 242 CESA_SA_DESC_CFG_FRAG_MSK); 243 244 creq->cache_ptr = new_cache_ptr; 245 246 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 247 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 248 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 249 CESA_SA_CMD_EN_CESA_SA_ACCL0); 250 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 251} 252 253static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status) 254{ 255 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 256 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 257 258 if (sreq->offset < (req->nbytes - creq->cache_ptr)) 259 return -EINPROGRESS; 260 261 return 0; 262} 263 264static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req) 265{ 266 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 267 struct mv_cesa_req *basereq = &creq->base; 268 269 mv_cesa_dma_prepare(basereq, basereq->engine); 270} 271 272static void mv_cesa_ahash_std_prepare(struct ahash_request *req) 273{ 274 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 275 struct mv_cesa_ahash_std_req *sreq = &creq->req.std; 276 277 sreq->offset = 0; 278} 279 280static void mv_cesa_ahash_dma_step(struct ahash_request *req) 281{ 282 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 283 struct mv_cesa_req *base = &creq->base; 284 285 /* We must explicitly set the digest state. */ 286 if (base->chain.first->flags & CESA_TDMA_SET_STATE) { 287 struct mv_cesa_engine *engine = base->engine; 288 int i; 289 290 /* Set the hash state in the IVDIG regs. */ 291 for (i = 0; i < ARRAY_SIZE(creq->state); i++) 292 writel_relaxed(creq->state[i], engine->regs + 293 CESA_IVDIG(i)); 294 } 295 296 mv_cesa_dma_step(base); 297} 298 299static void mv_cesa_ahash_step(struct crypto_async_request *req) 300{ 301 struct ahash_request *ahashreq = ahash_request_cast(req); 302 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 303 304 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 305 mv_cesa_ahash_dma_step(ahashreq); 306 else 307 mv_cesa_ahash_std_step(ahashreq); 308} 309 310static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status) 311{ 312 struct ahash_request *ahashreq = ahash_request_cast(req); 313 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 314 315 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 316 return mv_cesa_dma_process(&creq->base, status); 317 318 return mv_cesa_ahash_std_process(ahashreq, status); 319} 320 321static void mv_cesa_ahash_complete(struct crypto_async_request *req) 322{ 323 struct ahash_request *ahashreq = ahash_request_cast(req); 324 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 325 struct mv_cesa_engine *engine = creq->base.engine; 326 unsigned int digsize; 327 int i; 328 329 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); 330 331 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ && 332 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) { 333 __le32 *data = NULL; 334 335 /* 336 * Result is already in the correct endianess when the SA is 337 * used 338 */ 339 data = creq->base.chain.last->op->ctx.hash.hash; 340 for (i = 0; i < digsize / 4; i++) 341 creq->state[i] = cpu_to_le32(data[i]); 342 343 memcpy(ahashreq->result, data, digsize); 344 } else { 345 for (i = 0; i < digsize / 4; i++) 346 creq->state[i] = readl_relaxed(engine->regs + 347 CESA_IVDIG(i)); 348 if (creq->last_req) { 349 /* 350 * Hardware's MD5 digest is in little endian format, but 351 * SHA in big endian format 352 */ 353 if (creq->algo_le) { 354 __le32 *result = (void *)ahashreq->result; 355 356 for (i = 0; i < digsize / 4; i++) 357 result[i] = cpu_to_le32(creq->state[i]); 358 } else { 359 __be32 *result = (void *)ahashreq->result; 360 361 for (i = 0; i < digsize / 4; i++) 362 result[i] = cpu_to_be32(creq->state[i]); 363 } 364 } 365 } 366 367 atomic_sub(ahashreq->nbytes, &engine->load); 368} 369 370static void mv_cesa_ahash_prepare(struct crypto_async_request *req, 371 struct mv_cesa_engine *engine) 372{ 373 struct ahash_request *ahashreq = ahash_request_cast(req); 374 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 375 376 creq->base.engine = engine; 377 378 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 379 mv_cesa_ahash_dma_prepare(ahashreq); 380 else 381 mv_cesa_ahash_std_prepare(ahashreq); 382} 383 384static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req) 385{ 386 struct ahash_request *ahashreq = ahash_request_cast(req); 387 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 388 389 if (creq->last_req) 390 mv_cesa_ahash_last_cleanup(ahashreq); 391 392 mv_cesa_ahash_cleanup(ahashreq); 393 394 if (creq->cache_ptr) 395 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents, 396 creq->cache, 397 creq->cache_ptr, 398 ahashreq->nbytes - creq->cache_ptr); 399} 400 401static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = { 402 .step = mv_cesa_ahash_step, 403 .process = mv_cesa_ahash_process, 404 .cleanup = mv_cesa_ahash_req_cleanup, 405 .complete = mv_cesa_ahash_complete, 406}; 407 408static void mv_cesa_ahash_init(struct ahash_request *req, 409 struct mv_cesa_op_ctx *tmpl, bool algo_le) 410{ 411 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 412 413 memset(creq, 0, sizeof(*creq)); 414 mv_cesa_update_op_cfg(tmpl, 415 CESA_SA_DESC_CFG_OP_MAC_ONLY | 416 CESA_SA_DESC_CFG_FIRST_FRAG, 417 CESA_SA_DESC_CFG_OP_MSK | 418 CESA_SA_DESC_CFG_FRAG_MSK); 419 mv_cesa_set_mac_op_total_len(tmpl, 0); 420 mv_cesa_set_mac_op_frag_len(tmpl, 0); 421 creq->op_tmpl = *tmpl; 422 creq->len = 0; 423 creq->algo_le = algo_le; 424} 425 426static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm) 427{ 428 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm); 429 430 ctx->base.ops = &mv_cesa_ahash_req_ops; 431 432 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 433 sizeof(struct mv_cesa_ahash_req)); 434 return 0; 435} 436 437static bool mv_cesa_ahash_cache_req(struct ahash_request *req) 438{ 439 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 440 bool cached = false; 441 442 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) { 443 cached = true; 444 445 if (!req->nbytes) 446 return cached; 447 448 sg_pcopy_to_buffer(req->src, creq->src_nents, 449 creq->cache + creq->cache_ptr, 450 req->nbytes, 0); 451 452 creq->cache_ptr += req->nbytes; 453 } 454 455 return cached; 456} 457 458static struct mv_cesa_op_ctx * 459mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain, 460 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len, 461 gfp_t flags) 462{ 463 struct mv_cesa_op_ctx *op; 464 int ret; 465 466 op = mv_cesa_dma_add_op(chain, tmpl, false, flags); 467 if (IS_ERR(op)) 468 return op; 469 470 /* Set the operation block fragment length. */ 471 mv_cesa_set_mac_op_frag_len(op, frag_len); 472 473 /* Append dummy desc to launch operation */ 474 ret = mv_cesa_dma_add_dummy_launch(chain, flags); 475 if (ret) 476 return ERR_PTR(ret); 477 478 if (mv_cesa_mac_op_is_first_frag(tmpl)) 479 mv_cesa_update_op_cfg(tmpl, 480 CESA_SA_DESC_CFG_MID_FRAG, 481 CESA_SA_DESC_CFG_FRAG_MSK); 482 483 return op; 484} 485 486static int 487mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain, 488 struct mv_cesa_ahash_req *creq, 489 gfp_t flags) 490{ 491 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 492 int ret; 493 494 if (!creq->cache_ptr) 495 return 0; 496 497 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags); 498 if (ret) 499 return ret; 500 501 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); 502 503 return mv_cesa_dma_add_data_transfer(chain, 504 CESA_SA_DATA_SRAM_OFFSET, 505 ahashdreq->cache_dma, 506 creq->cache_ptr, 507 CESA_TDMA_DST_IN_SRAM, 508 flags); 509} 510 511static struct mv_cesa_op_ctx * 512mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain, 513 struct mv_cesa_ahash_dma_iter *dma_iter, 514 struct mv_cesa_ahash_req *creq, 515 unsigned int frag_len, gfp_t flags) 516{ 517 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma; 518 unsigned int len, trailerlen, padoff = 0; 519 struct mv_cesa_op_ctx *op; 520 int ret; 521 522 /* 523 * If the transfer is smaller than our maximum length, and we have 524 * some data outstanding, we can ask the engine to finish the hash. 525 */ 526 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) { 527 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len, 528 flags); 529 if (IS_ERR(op)) 530 return op; 531 532 mv_cesa_set_mac_op_total_len(op, creq->len); 533 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ? 534 CESA_SA_DESC_CFG_NOT_FRAG : 535 CESA_SA_DESC_CFG_LAST_FRAG, 536 CESA_SA_DESC_CFG_FRAG_MSK); 537 538 ret = mv_cesa_dma_add_result_op(chain, 539 CESA_SA_CFG_SRAM_OFFSET, 540 CESA_SA_DATA_SRAM_OFFSET, 541 CESA_TDMA_SRC_IN_SRAM, flags); 542 if (ret) 543 return ERR_PTR(-ENOMEM); 544 return op; 545 } 546 547 /* 548 * The request is longer than the engine can handle, or we have 549 * no data outstanding. Manually generate the padding, adding it 550 * as a "mid" fragment. 551 */ 552 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags); 553 if (ret) 554 return ERR_PTR(ret); 555 556 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding); 557 558 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen); 559 if (len) { 560 ret = mv_cesa_dma_add_data_transfer(chain, 561 CESA_SA_DATA_SRAM_OFFSET + 562 frag_len, 563 ahashdreq->padding_dma, 564 len, CESA_TDMA_DST_IN_SRAM, 565 flags); 566 if (ret) 567 return ERR_PTR(ret); 568 569 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len, 570 flags); 571 if (IS_ERR(op)) 572 return op; 573 574 if (len == trailerlen) 575 return op; 576 577 padoff += len; 578 } 579 580 ret = mv_cesa_dma_add_data_transfer(chain, 581 CESA_SA_DATA_SRAM_OFFSET, 582 ahashdreq->padding_dma + 583 padoff, 584 trailerlen - padoff, 585 CESA_TDMA_DST_IN_SRAM, 586 flags); 587 if (ret) 588 return ERR_PTR(ret); 589 590 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff, 591 flags); 592} 593 594static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) 595{ 596 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 597 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 598 GFP_KERNEL : GFP_ATOMIC; 599 struct mv_cesa_req *basereq = &creq->base; 600 struct mv_cesa_ahash_dma_iter iter; 601 struct mv_cesa_op_ctx *op = NULL; 602 unsigned int frag_len; 603 bool set_state = false; 604 int ret; 605 u32 type; 606 607 basereq->chain.first = NULL; 608 basereq->chain.last = NULL; 609 610 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl)) 611 set_state = true; 612 613 if (creq->src_nents) { 614 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 615 DMA_TO_DEVICE); 616 if (!ret) { 617 ret = -ENOMEM; 618 goto err; 619 } 620 } 621 622 mv_cesa_tdma_desc_iter_init(&basereq->chain); 623 mv_cesa_ahash_req_iter_init(&iter, req); 624 625 /* 626 * Add the cache (left-over data from a previous block) first. 627 * This will never overflow the SRAM size. 628 */ 629 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags); 630 if (ret) 631 goto err_free_tdma; 632 633 if (iter.src.sg) { 634 /* 635 * Add all the new data, inserting an operation block and 636 * launch command between each full SRAM block-worth of 637 * data. We intentionally do not add the final op block. 638 */ 639 while (true) { 640 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, 641 &iter.base, 642 &iter.src, flags); 643 if (ret) 644 goto err_free_tdma; 645 646 frag_len = iter.base.op_len; 647 648 if (!mv_cesa_ahash_req_iter_next_op(&iter)) 649 break; 650 651 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 652 frag_len, flags); 653 if (IS_ERR(op)) { 654 ret = PTR_ERR(op); 655 goto err_free_tdma; 656 } 657 } 658 } else { 659 /* Account for the data that was in the cache. */ 660 frag_len = iter.base.op_len; 661 } 662 663 /* 664 * At this point, frag_len indicates whether we have any data 665 * outstanding which needs an operation. Queue up the final 666 * operation, which depends whether this is the final request. 667 */ 668 if (creq->last_req) 669 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq, 670 frag_len, flags); 671 else if (frag_len) 672 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, 673 frag_len, flags); 674 675 if (IS_ERR(op)) { 676 ret = PTR_ERR(op); 677 goto err_free_tdma; 678 } 679 680 /* 681 * If results are copied via DMA, this means that this 682 * request can be directly processed by the engine, 683 * without partial updates. So we can chain it at the 684 * DMA level with other requests. 685 */ 686 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK; 687 688 if (op && type != CESA_TDMA_RESULT) { 689 /* Add dummy desc to wait for crypto operation end */ 690 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags); 691 if (ret) 692 goto err_free_tdma; 693 } 694 695 if (!creq->last_req) 696 creq->cache_ptr = req->nbytes + creq->cache_ptr - 697 iter.base.len; 698 else 699 creq->cache_ptr = 0; 700 701 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 702 703 if (type != CESA_TDMA_RESULT) 704 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; 705 706 if (set_state) { 707 /* 708 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to 709 * let the step logic know that the IVDIG registers should be 710 * explicitly set before launching a TDMA chain. 711 */ 712 basereq->chain.first->flags |= CESA_TDMA_SET_STATE; 713 } 714 715 return 0; 716 717err_free_tdma: 718 mv_cesa_dma_cleanup(basereq); 719 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); 720 721err: 722 mv_cesa_ahash_last_cleanup(req); 723 724 return ret; 725} 726 727static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached) 728{ 729 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 730 731 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 732 if (creq->src_nents < 0) { 733 dev_err(cesa_dev->dev, "Invalid number of src SG"); 734 return creq->src_nents; 735 } 736 737 *cached = mv_cesa_ahash_cache_req(req); 738 739 if (*cached) 740 return 0; 741 742 if (cesa_dev->caps->has_tdma) 743 return mv_cesa_ahash_dma_req_init(req); 744 else 745 return 0; 746} 747 748static int mv_cesa_ahash_queue_req(struct ahash_request *req) 749{ 750 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 751 struct mv_cesa_engine *engine; 752 bool cached = false; 753 int ret; 754 755 ret = mv_cesa_ahash_req_init(req, &cached); 756 if (ret) 757 return ret; 758 759 if (cached) 760 return 0; 761 762 engine = mv_cesa_select_engine(req->nbytes); 763 mv_cesa_ahash_prepare(&req->base, engine); 764 765 ret = mv_cesa_queue_req(&req->base, &creq->base); 766 767 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 768 mv_cesa_ahash_cleanup(req); 769 770 return ret; 771} 772 773static int mv_cesa_ahash_update(struct ahash_request *req) 774{ 775 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 776 777 creq->len += req->nbytes; 778 779 return mv_cesa_ahash_queue_req(req); 780} 781 782static int mv_cesa_ahash_final(struct ahash_request *req) 783{ 784 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 785 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 786 787 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 788 creq->last_req = true; 789 req->nbytes = 0; 790 791 return mv_cesa_ahash_queue_req(req); 792} 793 794static int mv_cesa_ahash_finup(struct ahash_request *req) 795{ 796 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 797 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl; 798 799 creq->len += req->nbytes; 800 mv_cesa_set_mac_op_total_len(tmpl, creq->len); 801 creq->last_req = true; 802 803 return mv_cesa_ahash_queue_req(req); 804} 805 806static int mv_cesa_ahash_export(struct ahash_request *req, void *hash, 807 u64 *len, void *cache) 808{ 809 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 810 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 811 unsigned int digsize = crypto_ahash_digestsize(ahash); 812 unsigned int blocksize; 813 814 blocksize = crypto_ahash_blocksize(ahash); 815 816 *len = creq->len; 817 memcpy(hash, creq->state, digsize); 818 memset(cache, 0, blocksize); 819 memcpy(cache, creq->cache, creq->cache_ptr); 820 821 return 0; 822} 823 824static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash, 825 u64 len, const void *cache) 826{ 827 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 828 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 829 unsigned int digsize = crypto_ahash_digestsize(ahash); 830 unsigned int blocksize; 831 unsigned int cache_ptr; 832 int ret; 833 834 ret = crypto_ahash_init(req); 835 if (ret) 836 return ret; 837 838 blocksize = crypto_ahash_blocksize(ahash); 839 if (len >= blocksize) 840 mv_cesa_update_op_cfg(&creq->op_tmpl, 841 CESA_SA_DESC_CFG_MID_FRAG, 842 CESA_SA_DESC_CFG_FRAG_MSK); 843 844 creq->len = len; 845 memcpy(creq->state, hash, digsize); 846 creq->cache_ptr = 0; 847 848 cache_ptr = do_div(len, blocksize); 849 if (!cache_ptr) 850 return 0; 851 852 memcpy(creq->cache, cache, cache_ptr); 853 creq->cache_ptr = cache_ptr; 854 855 return 0; 856} 857 858static int mv_cesa_md5_init(struct ahash_request *req) 859{ 860 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 861 struct mv_cesa_op_ctx tmpl = { }; 862 863 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5); 864 865 mv_cesa_ahash_init(req, &tmpl, true); 866 867 creq->state[0] = MD5_H0; 868 creq->state[1] = MD5_H1; 869 creq->state[2] = MD5_H2; 870 creq->state[3] = MD5_H3; 871 872 return 0; 873} 874 875static int mv_cesa_md5_export(struct ahash_request *req, void *out) 876{ 877 struct md5_state *out_state = out; 878 879 return mv_cesa_ahash_export(req, out_state->hash, 880 &out_state->byte_count, out_state->block); 881} 882 883static int mv_cesa_md5_import(struct ahash_request *req, const void *in) 884{ 885 const struct md5_state *in_state = in; 886 887 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count, 888 in_state->block); 889} 890 891static int mv_cesa_md5_digest(struct ahash_request *req) 892{ 893 int ret; 894 895 ret = mv_cesa_md5_init(req); 896 if (ret) 897 return ret; 898 899 return mv_cesa_ahash_finup(req); 900} 901 902struct ahash_alg mv_md5_alg = { 903 .init = mv_cesa_md5_init, 904 .update = mv_cesa_ahash_update, 905 .final = mv_cesa_ahash_final, 906 .finup = mv_cesa_ahash_finup, 907 .digest = mv_cesa_md5_digest, 908 .export = mv_cesa_md5_export, 909 .import = mv_cesa_md5_import, 910 .halg = { 911 .digestsize = MD5_DIGEST_SIZE, 912 .statesize = sizeof(struct md5_state), 913 .base = { 914 .cra_name = "md5", 915 .cra_driver_name = "mv-md5", 916 .cra_priority = 300, 917 .cra_flags = CRYPTO_ALG_ASYNC | 918 CRYPTO_ALG_KERN_DRIVER_ONLY, 919 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 920 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 921 .cra_init = mv_cesa_ahash_cra_init, 922 .cra_module = THIS_MODULE, 923 } 924 } 925}; 926 927static int mv_cesa_sha1_init(struct ahash_request *req) 928{ 929 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 930 struct mv_cesa_op_ctx tmpl = { }; 931 932 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1); 933 934 mv_cesa_ahash_init(req, &tmpl, false); 935 936 creq->state[0] = SHA1_H0; 937 creq->state[1] = SHA1_H1; 938 creq->state[2] = SHA1_H2; 939 creq->state[3] = SHA1_H3; 940 creq->state[4] = SHA1_H4; 941 942 return 0; 943} 944 945static int mv_cesa_sha1_export(struct ahash_request *req, void *out) 946{ 947 struct sha1_state *out_state = out; 948 949 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 950 out_state->buffer); 951} 952 953static int mv_cesa_sha1_import(struct ahash_request *req, const void *in) 954{ 955 const struct sha1_state *in_state = in; 956 957 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 958 in_state->buffer); 959} 960 961static int mv_cesa_sha1_digest(struct ahash_request *req) 962{ 963 int ret; 964 965 ret = mv_cesa_sha1_init(req); 966 if (ret) 967 return ret; 968 969 return mv_cesa_ahash_finup(req); 970} 971 972struct ahash_alg mv_sha1_alg = { 973 .init = mv_cesa_sha1_init, 974 .update = mv_cesa_ahash_update, 975 .final = mv_cesa_ahash_final, 976 .finup = mv_cesa_ahash_finup, 977 .digest = mv_cesa_sha1_digest, 978 .export = mv_cesa_sha1_export, 979 .import = mv_cesa_sha1_import, 980 .halg = { 981 .digestsize = SHA1_DIGEST_SIZE, 982 .statesize = sizeof(struct sha1_state), 983 .base = { 984 .cra_name = "sha1", 985 .cra_driver_name = "mv-sha1", 986 .cra_priority = 300, 987 .cra_flags = CRYPTO_ALG_ASYNC | 988 CRYPTO_ALG_KERN_DRIVER_ONLY, 989 .cra_blocksize = SHA1_BLOCK_SIZE, 990 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 991 .cra_init = mv_cesa_ahash_cra_init, 992 .cra_module = THIS_MODULE, 993 } 994 } 995}; 996 997static int mv_cesa_sha256_init(struct ahash_request *req) 998{ 999 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); 1000 struct mv_cesa_op_ctx tmpl = { }; 1001 1002 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256); 1003 1004 mv_cesa_ahash_init(req, &tmpl, false); 1005 1006 creq->state[0] = SHA256_H0; 1007 creq->state[1] = SHA256_H1; 1008 creq->state[2] = SHA256_H2; 1009 creq->state[3] = SHA256_H3; 1010 creq->state[4] = SHA256_H4; 1011 creq->state[5] = SHA256_H5; 1012 creq->state[6] = SHA256_H6; 1013 creq->state[7] = SHA256_H7; 1014 1015 return 0; 1016} 1017 1018static int mv_cesa_sha256_digest(struct ahash_request *req) 1019{ 1020 int ret; 1021 1022 ret = mv_cesa_sha256_init(req); 1023 if (ret) 1024 return ret; 1025 1026 return mv_cesa_ahash_finup(req); 1027} 1028 1029static int mv_cesa_sha256_export(struct ahash_request *req, void *out) 1030{ 1031 struct sha256_state *out_state = out; 1032 1033 return mv_cesa_ahash_export(req, out_state->state, &out_state->count, 1034 out_state->buf); 1035} 1036 1037static int mv_cesa_sha256_import(struct ahash_request *req, const void *in) 1038{ 1039 const struct sha256_state *in_state = in; 1040 1041 return mv_cesa_ahash_import(req, in_state->state, in_state->count, 1042 in_state->buf); 1043} 1044 1045struct ahash_alg mv_sha256_alg = { 1046 .init = mv_cesa_sha256_init, 1047 .update = mv_cesa_ahash_update, 1048 .final = mv_cesa_ahash_final, 1049 .finup = mv_cesa_ahash_finup, 1050 .digest = mv_cesa_sha256_digest, 1051 .export = mv_cesa_sha256_export, 1052 .import = mv_cesa_sha256_import, 1053 .halg = { 1054 .digestsize = SHA256_DIGEST_SIZE, 1055 .statesize = sizeof(struct sha256_state), 1056 .base = { 1057 .cra_name = "sha256", 1058 .cra_driver_name = "mv-sha256", 1059 .cra_priority = 300, 1060 .cra_flags = CRYPTO_ALG_ASYNC | 1061 CRYPTO_ALG_KERN_DRIVER_ONLY, 1062 .cra_blocksize = SHA256_BLOCK_SIZE, 1063 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), 1064 .cra_init = mv_cesa_ahash_cra_init, 1065 .cra_module = THIS_MODULE, 1066 } 1067 } 1068}; 1069 1070struct mv_cesa_ahash_result { 1071 struct completion completion; 1072 int error; 1073}; 1074 1075static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req, 1076 int error) 1077{ 1078 struct mv_cesa_ahash_result *result = req->data; 1079 1080 if (error == -EINPROGRESS) 1081 return; 1082 1083 result->error = error; 1084 complete(&result->completion); 1085} 1086 1087static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad, 1088 void *state, unsigned int blocksize) 1089{ 1090 struct mv_cesa_ahash_result result; 1091 struct scatterlist sg; 1092 int ret; 1093 1094 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1095 mv_cesa_hmac_ahash_complete, &result); 1096 sg_init_one(&sg, pad, blocksize); 1097 ahash_request_set_crypt(req, &sg, pad, blocksize); 1098 init_completion(&result.completion); 1099 1100 ret = crypto_ahash_init(req); 1101 if (ret) 1102 return ret; 1103 1104 ret = crypto_ahash_update(req); 1105 if (ret && ret != -EINPROGRESS) 1106 return ret; 1107 1108 wait_for_completion_interruptible(&result.completion); 1109 if (result.error) 1110 return result.error; 1111 1112 ret = crypto_ahash_export(req, state); 1113 if (ret) 1114 return ret; 1115 1116 return 0; 1117} 1118 1119static int mv_cesa_ahmac_pad_init(struct ahash_request *req, 1120 const u8 *key, unsigned int keylen, 1121 u8 *ipad, u8 *opad, 1122 unsigned int blocksize) 1123{ 1124 struct mv_cesa_ahash_result result; 1125 struct scatterlist sg; 1126 int ret; 1127 int i; 1128 1129 if (keylen <= blocksize) { 1130 memcpy(ipad, key, keylen); 1131 } else { 1132 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL); 1133 1134 if (!keydup) 1135 return -ENOMEM; 1136 1137 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 1138 mv_cesa_hmac_ahash_complete, 1139 &result); 1140 sg_init_one(&sg, keydup, keylen); 1141 ahash_request_set_crypt(req, &sg, ipad, keylen); 1142 init_completion(&result.completion); 1143 1144 ret = crypto_ahash_digest(req); 1145 if (ret == -EINPROGRESS) { 1146 wait_for_completion_interruptible(&result.completion); 1147 ret = result.error; 1148 } 1149 1150 /* Set the memory region to 0 to avoid any leak. */ 1151 memset(keydup, 0, keylen); 1152 kfree(keydup); 1153 1154 if (ret) 1155 return ret; 1156 1157 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 1158 } 1159 1160 memset(ipad + keylen, 0, blocksize - keylen); 1161 memcpy(opad, ipad, blocksize); 1162 1163 for (i = 0; i < blocksize; i++) { 1164 ipad[i] ^= HMAC_IPAD_VALUE; 1165 opad[i] ^= HMAC_OPAD_VALUE; 1166 } 1167 1168 return 0; 1169} 1170 1171static int mv_cesa_ahmac_setkey(const char *hash_alg_name, 1172 const u8 *key, unsigned int keylen, 1173 void *istate, void *ostate) 1174{ 1175 struct ahash_request *req; 1176 struct crypto_ahash *tfm; 1177 unsigned int blocksize; 1178 u8 *ipad = NULL; 1179 u8 *opad; 1180 int ret; 1181 1182 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0); 1183 if (IS_ERR(tfm)) 1184 return PTR_ERR(tfm); 1185 1186 req = ahash_request_alloc(tfm, GFP_KERNEL); 1187 if (!req) { 1188 ret = -ENOMEM; 1189 goto free_ahash; 1190 } 1191 1192 crypto_ahash_clear_flags(tfm, ~0); 1193 1194 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1195 1196 ipad = kcalloc(2, blocksize, GFP_KERNEL); 1197 if (!ipad) { 1198 ret = -ENOMEM; 1199 goto free_req; 1200 } 1201 1202 opad = ipad + blocksize; 1203 1204 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize); 1205 if (ret) 1206 goto free_ipad; 1207 1208 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize); 1209 if (ret) 1210 goto free_ipad; 1211 1212 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize); 1213 1214free_ipad: 1215 kfree(ipad); 1216free_req: 1217 ahash_request_free(req); 1218free_ahash: 1219 crypto_free_ahash(tfm); 1220 1221 return ret; 1222} 1223 1224static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm) 1225{ 1226 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm); 1227 1228 ctx->base.ops = &mv_cesa_ahash_req_ops; 1229 1230 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 1231 sizeof(struct mv_cesa_ahash_req)); 1232 return 0; 1233} 1234 1235static int mv_cesa_ahmac_md5_init(struct ahash_request *req) 1236{ 1237 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1238 struct mv_cesa_op_ctx tmpl = { }; 1239 1240 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5); 1241 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1242 1243 mv_cesa_ahash_init(req, &tmpl, true); 1244 1245 return 0; 1246} 1247 1248static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key, 1249 unsigned int keylen) 1250{ 1251 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1252 struct md5_state istate, ostate; 1253 int ret, i; 1254 1255 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate); 1256 if (ret) 1257 return ret; 1258 1259 for (i = 0; i < ARRAY_SIZE(istate.hash); i++) 1260 ctx->iv[i] = be32_to_cpu(istate.hash[i]); 1261 1262 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++) 1263 ctx->iv[i + 8] = be32_to_cpu(ostate.hash[i]); 1264 1265 return 0; 1266} 1267 1268static int mv_cesa_ahmac_md5_digest(struct ahash_request *req) 1269{ 1270 int ret; 1271 1272 ret = mv_cesa_ahmac_md5_init(req); 1273 if (ret) 1274 return ret; 1275 1276 return mv_cesa_ahash_finup(req); 1277} 1278 1279struct ahash_alg mv_ahmac_md5_alg = { 1280 .init = mv_cesa_ahmac_md5_init, 1281 .update = mv_cesa_ahash_update, 1282 .final = mv_cesa_ahash_final, 1283 .finup = mv_cesa_ahash_finup, 1284 .digest = mv_cesa_ahmac_md5_digest, 1285 .setkey = mv_cesa_ahmac_md5_setkey, 1286 .export = mv_cesa_md5_export, 1287 .import = mv_cesa_md5_import, 1288 .halg = { 1289 .digestsize = MD5_DIGEST_SIZE, 1290 .statesize = sizeof(struct md5_state), 1291 .base = { 1292 .cra_name = "hmac(md5)", 1293 .cra_driver_name = "mv-hmac-md5", 1294 .cra_priority = 300, 1295 .cra_flags = CRYPTO_ALG_ASYNC | 1296 CRYPTO_ALG_KERN_DRIVER_ONLY, 1297 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 1298 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1299 .cra_init = mv_cesa_ahmac_cra_init, 1300 .cra_module = THIS_MODULE, 1301 } 1302 } 1303}; 1304 1305static int mv_cesa_ahmac_sha1_init(struct ahash_request *req) 1306{ 1307 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1308 struct mv_cesa_op_ctx tmpl = { }; 1309 1310 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1); 1311 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1312 1313 mv_cesa_ahash_init(req, &tmpl, false); 1314 1315 return 0; 1316} 1317 1318static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, 1319 unsigned int keylen) 1320{ 1321 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1322 struct sha1_state istate, ostate; 1323 int ret, i; 1324 1325 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate); 1326 if (ret) 1327 return ret; 1328 1329 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1330 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1331 1332 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1333 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1334 1335 return 0; 1336} 1337 1338static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req) 1339{ 1340 int ret; 1341 1342 ret = mv_cesa_ahmac_sha1_init(req); 1343 if (ret) 1344 return ret; 1345 1346 return mv_cesa_ahash_finup(req); 1347} 1348 1349struct ahash_alg mv_ahmac_sha1_alg = { 1350 .init = mv_cesa_ahmac_sha1_init, 1351 .update = mv_cesa_ahash_update, 1352 .final = mv_cesa_ahash_final, 1353 .finup = mv_cesa_ahash_finup, 1354 .digest = mv_cesa_ahmac_sha1_digest, 1355 .setkey = mv_cesa_ahmac_sha1_setkey, 1356 .export = mv_cesa_sha1_export, 1357 .import = mv_cesa_sha1_import, 1358 .halg = { 1359 .digestsize = SHA1_DIGEST_SIZE, 1360 .statesize = sizeof(struct sha1_state), 1361 .base = { 1362 .cra_name = "hmac(sha1)", 1363 .cra_driver_name = "mv-hmac-sha1", 1364 .cra_priority = 300, 1365 .cra_flags = CRYPTO_ALG_ASYNC | 1366 CRYPTO_ALG_KERN_DRIVER_ONLY, 1367 .cra_blocksize = SHA1_BLOCK_SIZE, 1368 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1369 .cra_init = mv_cesa_ahmac_cra_init, 1370 .cra_module = THIS_MODULE, 1371 } 1372 } 1373}; 1374 1375static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key, 1376 unsigned int keylen) 1377{ 1378 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 1379 struct sha256_state istate, ostate; 1380 int ret, i; 1381 1382 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate); 1383 if (ret) 1384 return ret; 1385 1386 for (i = 0; i < ARRAY_SIZE(istate.state); i++) 1387 ctx->iv[i] = be32_to_cpu(istate.state[i]); 1388 1389 for (i = 0; i < ARRAY_SIZE(ostate.state); i++) 1390 ctx->iv[i + 8] = be32_to_cpu(ostate.state[i]); 1391 1392 return 0; 1393} 1394 1395static int mv_cesa_ahmac_sha256_init(struct ahash_request *req) 1396{ 1397 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 1398 struct mv_cesa_op_ctx tmpl = { }; 1399 1400 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256); 1401 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv)); 1402 1403 mv_cesa_ahash_init(req, &tmpl, false); 1404 1405 return 0; 1406} 1407 1408static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req) 1409{ 1410 int ret; 1411 1412 ret = mv_cesa_ahmac_sha256_init(req); 1413 if (ret) 1414 return ret; 1415 1416 return mv_cesa_ahash_finup(req); 1417} 1418 1419struct ahash_alg mv_ahmac_sha256_alg = { 1420 .init = mv_cesa_ahmac_sha256_init, 1421 .update = mv_cesa_ahash_update, 1422 .final = mv_cesa_ahash_final, 1423 .finup = mv_cesa_ahash_finup, 1424 .digest = mv_cesa_ahmac_sha256_digest, 1425 .setkey = mv_cesa_ahmac_sha256_setkey, 1426 .export = mv_cesa_sha256_export, 1427 .import = mv_cesa_sha256_import, 1428 .halg = { 1429 .digestsize = SHA256_DIGEST_SIZE, 1430 .statesize = sizeof(struct sha256_state), 1431 .base = { 1432 .cra_name = "hmac(sha256)", 1433 .cra_driver_name = "mv-hmac-sha256", 1434 .cra_priority = 300, 1435 .cra_flags = CRYPTO_ALG_ASYNC | 1436 CRYPTO_ALG_KERN_DRIVER_ONLY, 1437 .cra_blocksize = SHA256_BLOCK_SIZE, 1438 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), 1439 .cra_init = mv_cesa_ahmac_cra_init, 1440 .cra_module = THIS_MODULE, 1441 } 1442 } 1443};