at v5.3 809 lines 22 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Cipher algorithms supported by the CESA: DES, 3DES and AES. 4 * 5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 6 * Author: Arnaud Ebalard <arno@natisbad.org> 7 * 8 * This work is based on an initial version written by 9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 10 */ 11 12#include <crypto/aes.h> 13#include <crypto/des.h> 14 15#include "cesa.h" 16 17struct mv_cesa_des_ctx { 18 struct mv_cesa_ctx base; 19 u8 key[DES_KEY_SIZE]; 20}; 21 22struct mv_cesa_des3_ctx { 23 struct mv_cesa_ctx base; 24 u8 key[DES3_EDE_KEY_SIZE]; 25}; 26 27struct mv_cesa_aes_ctx { 28 struct mv_cesa_ctx base; 29 struct crypto_aes_ctx aes; 30}; 31 32struct mv_cesa_skcipher_dma_iter { 33 struct mv_cesa_dma_iter base; 34 struct mv_cesa_sg_dma_iter src; 35 struct mv_cesa_sg_dma_iter dst; 36}; 37 38static inline void 39mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter, 40 struct skcipher_request *req) 41{ 42 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen); 43 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 44 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); 45} 46 47static inline bool 48mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter) 49{ 50 iter->src.op_offset = 0; 51 iter->dst.op_offset = 0; 52 53 return mv_cesa_req_dma_iter_next_op(&iter->base); 54} 55 56static inline void 57mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req) 58{ 59 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 60 61 if (req->dst != req->src) { 62 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 63 DMA_FROM_DEVICE); 64 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 65 DMA_TO_DEVICE); 66 } else { 67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 68 DMA_BIDIRECTIONAL); 69 } 70 mv_cesa_dma_cleanup(&creq->base); 71} 72 73static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req) 74{ 75 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 76 77 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 78 mv_cesa_skcipher_dma_cleanup(req); 79} 80 81static void mv_cesa_skcipher_std_step(struct skcipher_request *req) 82{ 83 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 84 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 85 struct mv_cesa_engine *engine = creq->base.engine; 86 size_t len = min_t(size_t, req->cryptlen - sreq->offset, 87 CESA_SA_SRAM_PAYLOAD_SIZE); 88 89 mv_cesa_adjust_op(engine, &sreq->op); 90 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 91 92 len = sg_pcopy_to_buffer(req->src, creq->src_nents, 93 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 94 len, sreq->offset); 95 96 sreq->size = len; 97 mv_cesa_set_crypt_op_len(&sreq->op, len); 98 99 /* FIXME: only update enc_len field */ 100 if (!sreq->skip_ctx) { 101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 102 sreq->skip_ctx = true; 103 } else { 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc)); 105 } 106 107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 109 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 110 CESA_SA_CMD_EN_CESA_SA_ACCL0); 111 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 112} 113 114static int mv_cesa_skcipher_std_process(struct skcipher_request *req, 115 u32 status) 116{ 117 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 118 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 119 struct mv_cesa_engine *engine = creq->base.engine; 120 size_t len; 121 122 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, 123 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 124 sreq->size, sreq->offset); 125 126 sreq->offset += len; 127 if (sreq->offset < req->cryptlen) 128 return -EINPROGRESS; 129 130 return 0; 131} 132 133static int mv_cesa_skcipher_process(struct crypto_async_request *req, 134 u32 status) 135{ 136 struct skcipher_request *skreq = skcipher_request_cast(req); 137 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 138 struct mv_cesa_req *basereq = &creq->base; 139 140 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) 141 return mv_cesa_skcipher_std_process(skreq, status); 142 143 return mv_cesa_dma_process(basereq, status); 144} 145 146static void mv_cesa_skcipher_step(struct crypto_async_request *req) 147{ 148 struct skcipher_request *skreq = skcipher_request_cast(req); 149 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 150 151 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 152 mv_cesa_dma_step(&creq->base); 153 else 154 mv_cesa_skcipher_std_step(skreq); 155} 156 157static inline void 158mv_cesa_skcipher_dma_prepare(struct skcipher_request *req) 159{ 160 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 161 struct mv_cesa_req *basereq = &creq->base; 162 163 mv_cesa_dma_prepare(basereq, basereq->engine); 164} 165 166static inline void 167mv_cesa_skcipher_std_prepare(struct skcipher_request *req) 168{ 169 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 170 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 171 172 sreq->size = 0; 173 sreq->offset = 0; 174} 175 176static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req, 177 struct mv_cesa_engine *engine) 178{ 179 struct skcipher_request *skreq = skcipher_request_cast(req); 180 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 181 creq->base.engine = engine; 182 183 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 184 mv_cesa_skcipher_dma_prepare(skreq); 185 else 186 mv_cesa_skcipher_std_prepare(skreq); 187} 188 189static inline void 190mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req) 191{ 192 struct skcipher_request *skreq = skcipher_request_cast(req); 193 194 mv_cesa_skcipher_cleanup(skreq); 195} 196 197static void 198mv_cesa_skcipher_complete(struct crypto_async_request *req) 199{ 200 struct skcipher_request *skreq = skcipher_request_cast(req); 201 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 202 struct mv_cesa_engine *engine = creq->base.engine; 203 unsigned int ivsize; 204 205 atomic_sub(skreq->cryptlen, &engine->load); 206 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq)); 207 208 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { 209 struct mv_cesa_req *basereq; 210 211 basereq = &creq->base; 212 memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv, 213 ivsize); 214 } else { 215 memcpy_fromio(skreq->iv, 216 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 217 ivsize); 218 } 219} 220 221static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = { 222 .step = mv_cesa_skcipher_step, 223 .process = mv_cesa_skcipher_process, 224 .cleanup = mv_cesa_skcipher_req_cleanup, 225 .complete = mv_cesa_skcipher_complete, 226}; 227 228static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm) 229{ 230 void *ctx = crypto_tfm_ctx(tfm); 231 232 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize); 233} 234 235static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm) 236{ 237 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm); 238 239 ctx->ops = &mv_cesa_skcipher_req_ops; 240 241 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), 242 sizeof(struct mv_cesa_skcipher_req)); 243 244 return 0; 245} 246 247static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 248 unsigned int len) 249{ 250 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 251 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 252 int remaining; 253 int offset; 254 int ret; 255 int i; 256 257 ret = crypto_aes_expand_key(&ctx->aes, key, len); 258 if (ret) { 259 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 260 return ret; 261 } 262 263 remaining = (ctx->aes.key_length - 16) / 4; 264 offset = ctx->aes.key_length + 24 - remaining; 265 for (i = 0; i < remaining; i++) 266 ctx->aes.key_dec[4 + i] = 267 cpu_to_le32(ctx->aes.key_enc[offset + i]); 268 269 return 0; 270} 271 272static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, 273 unsigned int len) 274{ 275 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 276 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 277 u32 tmp[DES_EXPKEY_WORDS]; 278 int ret; 279 280 if (len != DES_KEY_SIZE) { 281 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 282 return -EINVAL; 283 } 284 285 ret = des_ekey(tmp, key); 286 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 287 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 288 return -EINVAL; 289 } 290 291 memcpy(ctx->key, key, DES_KEY_SIZE); 292 293 return 0; 294} 295 296static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, 297 const u8 *key, unsigned int len) 298{ 299 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher); 300 int err; 301 302 err = des3_verify_key(cipher, key); 303 if (unlikely(err)) 304 return err; 305 306 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); 307 308 return 0; 309} 310 311static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, 312 const struct mv_cesa_op_ctx *op_templ) 313{ 314 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 315 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 316 GFP_KERNEL : GFP_ATOMIC; 317 struct mv_cesa_req *basereq = &creq->base; 318 struct mv_cesa_skcipher_dma_iter iter; 319 bool skip_ctx = false; 320 int ret; 321 322 basereq->chain.first = NULL; 323 basereq->chain.last = NULL; 324 325 if (req->src != req->dst) { 326 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 327 DMA_TO_DEVICE); 328 if (!ret) 329 return -ENOMEM; 330 331 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, 332 DMA_FROM_DEVICE); 333 if (!ret) { 334 ret = -ENOMEM; 335 goto err_unmap_src; 336 } 337 } else { 338 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 339 DMA_BIDIRECTIONAL); 340 if (!ret) 341 return -ENOMEM; 342 } 343 344 mv_cesa_tdma_desc_iter_init(&basereq->chain); 345 mv_cesa_skcipher_req_iter_init(&iter, req); 346 347 do { 348 struct mv_cesa_op_ctx *op; 349 350 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags); 351 if (IS_ERR(op)) { 352 ret = PTR_ERR(op); 353 goto err_free_tdma; 354 } 355 skip_ctx = true; 356 357 mv_cesa_set_crypt_op_len(op, iter.base.op_len); 358 359 /* Add input transfers */ 360 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, 361 &iter.src, flags); 362 if (ret) 363 goto err_free_tdma; 364 365 /* Add dummy desc to launch the crypto operation */ 366 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags); 367 if (ret) 368 goto err_free_tdma; 369 370 /* Add output transfers */ 371 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, 372 &iter.dst, flags); 373 if (ret) 374 goto err_free_tdma; 375 376 } while (mv_cesa_skcipher_req_iter_next_op(&iter)); 377 378 /* Add output data for IV */ 379 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, 380 CESA_SA_DATA_SRAM_OFFSET, 381 CESA_TDMA_SRC_IN_SRAM, flags); 382 383 if (ret) 384 goto err_free_tdma; 385 386 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 387 388 return 0; 389 390err_free_tdma: 391 mv_cesa_dma_cleanup(basereq); 392 if (req->dst != req->src) 393 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 394 DMA_FROM_DEVICE); 395 396err_unmap_src: 397 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 398 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 399 400 return ret; 401} 402 403static inline int 404mv_cesa_skcipher_std_req_init(struct skcipher_request *req, 405 const struct mv_cesa_op_ctx *op_templ) 406{ 407 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 408 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 409 struct mv_cesa_req *basereq = &creq->base; 410 411 sreq->op = *op_templ; 412 sreq->skip_ctx = false; 413 basereq->chain.first = NULL; 414 basereq->chain.last = NULL; 415 416 return 0; 417} 418 419static int mv_cesa_skcipher_req_init(struct skcipher_request *req, 420 struct mv_cesa_op_ctx *tmpl) 421{ 422 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 423 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 424 unsigned int blksize = crypto_skcipher_blocksize(tfm); 425 int ret; 426 427 if (!IS_ALIGNED(req->cryptlen, blksize)) 428 return -EINVAL; 429 430 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen); 431 if (creq->src_nents < 0) { 432 dev_err(cesa_dev->dev, "Invalid number of src SG"); 433 return creq->src_nents; 434 } 435 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 436 if (creq->dst_nents < 0) { 437 dev_err(cesa_dev->dev, "Invalid number of dst SG"); 438 return creq->dst_nents; 439 } 440 441 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, 442 CESA_SA_DESC_CFG_OP_MSK); 443 444 if (cesa_dev->caps->has_tdma) 445 ret = mv_cesa_skcipher_dma_req_init(req, tmpl); 446 else 447 ret = mv_cesa_skcipher_std_req_init(req, tmpl); 448 449 return ret; 450} 451 452static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, 453 struct mv_cesa_op_ctx *tmpl) 454{ 455 int ret; 456 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 457 struct mv_cesa_engine *engine; 458 459 ret = mv_cesa_skcipher_req_init(req, tmpl); 460 if (ret) 461 return ret; 462 463 engine = mv_cesa_select_engine(req->cryptlen); 464 mv_cesa_skcipher_prepare(&req->base, engine); 465 466 ret = mv_cesa_queue_req(&req->base, &creq->base); 467 468 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 469 mv_cesa_skcipher_cleanup(req); 470 471 return ret; 472} 473 474static int mv_cesa_des_op(struct skcipher_request *req, 475 struct mv_cesa_op_ctx *tmpl) 476{ 477 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 478 479 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, 480 CESA_SA_DESC_CFG_CRYPTM_MSK); 481 482 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); 483 484 return mv_cesa_skcipher_queue_req(req, tmpl); 485} 486 487static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req) 488{ 489 struct mv_cesa_op_ctx tmpl; 490 491 mv_cesa_set_op_cfg(&tmpl, 492 CESA_SA_DESC_CFG_CRYPTCM_ECB | 493 CESA_SA_DESC_CFG_DIR_ENC); 494 495 return mv_cesa_des_op(req, &tmpl); 496} 497 498static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req) 499{ 500 struct mv_cesa_op_ctx tmpl; 501 502 mv_cesa_set_op_cfg(&tmpl, 503 CESA_SA_DESC_CFG_CRYPTCM_ECB | 504 CESA_SA_DESC_CFG_DIR_DEC); 505 506 return mv_cesa_des_op(req, &tmpl); 507} 508 509struct skcipher_alg mv_cesa_ecb_des_alg = { 510 .setkey = mv_cesa_des_setkey, 511 .encrypt = mv_cesa_ecb_des_encrypt, 512 .decrypt = mv_cesa_ecb_des_decrypt, 513 .min_keysize = DES_KEY_SIZE, 514 .max_keysize = DES_KEY_SIZE, 515 .base = { 516 .cra_name = "ecb(des)", 517 .cra_driver_name = "mv-ecb-des", 518 .cra_priority = 300, 519 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 520 .cra_blocksize = DES_BLOCK_SIZE, 521 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 522 .cra_alignmask = 0, 523 .cra_module = THIS_MODULE, 524 .cra_init = mv_cesa_skcipher_cra_init, 525 .cra_exit = mv_cesa_skcipher_cra_exit, 526 }, 527}; 528 529static int mv_cesa_cbc_des_op(struct skcipher_request *req, 530 struct mv_cesa_op_ctx *tmpl) 531{ 532 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 533 CESA_SA_DESC_CFG_CRYPTCM_MSK); 534 535 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE); 536 537 return mv_cesa_des_op(req, tmpl); 538} 539 540static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req) 541{ 542 struct mv_cesa_op_ctx tmpl; 543 544 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 545 546 return mv_cesa_cbc_des_op(req, &tmpl); 547} 548 549static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req) 550{ 551 struct mv_cesa_op_ctx tmpl; 552 553 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 554 555 return mv_cesa_cbc_des_op(req, &tmpl); 556} 557 558struct skcipher_alg mv_cesa_cbc_des_alg = { 559 .setkey = mv_cesa_des_setkey, 560 .encrypt = mv_cesa_cbc_des_encrypt, 561 .decrypt = mv_cesa_cbc_des_decrypt, 562 .min_keysize = DES_KEY_SIZE, 563 .max_keysize = DES_KEY_SIZE, 564 .ivsize = DES_BLOCK_SIZE, 565 .base = { 566 .cra_name = "cbc(des)", 567 .cra_driver_name = "mv-cbc-des", 568 .cra_priority = 300, 569 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 570 .cra_blocksize = DES_BLOCK_SIZE, 571 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 572 .cra_alignmask = 0, 573 .cra_module = THIS_MODULE, 574 .cra_init = mv_cesa_skcipher_cra_init, 575 .cra_exit = mv_cesa_skcipher_cra_exit, 576 }, 577}; 578 579static int mv_cesa_des3_op(struct skcipher_request *req, 580 struct mv_cesa_op_ctx *tmpl) 581{ 582 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 583 584 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, 585 CESA_SA_DESC_CFG_CRYPTM_MSK); 586 587 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); 588 589 return mv_cesa_skcipher_queue_req(req, tmpl); 590} 591 592static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req) 593{ 594 struct mv_cesa_op_ctx tmpl; 595 596 mv_cesa_set_op_cfg(&tmpl, 597 CESA_SA_DESC_CFG_CRYPTCM_ECB | 598 CESA_SA_DESC_CFG_3DES_EDE | 599 CESA_SA_DESC_CFG_DIR_ENC); 600 601 return mv_cesa_des3_op(req, &tmpl); 602} 603 604static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req) 605{ 606 struct mv_cesa_op_ctx tmpl; 607 608 mv_cesa_set_op_cfg(&tmpl, 609 CESA_SA_DESC_CFG_CRYPTCM_ECB | 610 CESA_SA_DESC_CFG_3DES_EDE | 611 CESA_SA_DESC_CFG_DIR_DEC); 612 613 return mv_cesa_des3_op(req, &tmpl); 614} 615 616struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { 617 .setkey = mv_cesa_des3_ede_setkey, 618 .encrypt = mv_cesa_ecb_des3_ede_encrypt, 619 .decrypt = mv_cesa_ecb_des3_ede_decrypt, 620 .min_keysize = DES3_EDE_KEY_SIZE, 621 .max_keysize = DES3_EDE_KEY_SIZE, 622 .ivsize = DES3_EDE_BLOCK_SIZE, 623 .base = { 624 .cra_name = "ecb(des3_ede)", 625 .cra_driver_name = "mv-ecb-des3-ede", 626 .cra_priority = 300, 627 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 628 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 629 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 630 .cra_alignmask = 0, 631 .cra_module = THIS_MODULE, 632 .cra_init = mv_cesa_skcipher_cra_init, 633 .cra_exit = mv_cesa_skcipher_cra_exit, 634 }, 635}; 636 637static int mv_cesa_cbc_des3_op(struct skcipher_request *req, 638 struct mv_cesa_op_ctx *tmpl) 639{ 640 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE); 641 642 return mv_cesa_des3_op(req, tmpl); 643} 644 645static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req) 646{ 647 struct mv_cesa_op_ctx tmpl; 648 649 mv_cesa_set_op_cfg(&tmpl, 650 CESA_SA_DESC_CFG_CRYPTCM_CBC | 651 CESA_SA_DESC_CFG_3DES_EDE | 652 CESA_SA_DESC_CFG_DIR_ENC); 653 654 return mv_cesa_cbc_des3_op(req, &tmpl); 655} 656 657static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req) 658{ 659 struct mv_cesa_op_ctx tmpl; 660 661 mv_cesa_set_op_cfg(&tmpl, 662 CESA_SA_DESC_CFG_CRYPTCM_CBC | 663 CESA_SA_DESC_CFG_3DES_EDE | 664 CESA_SA_DESC_CFG_DIR_DEC); 665 666 return mv_cesa_cbc_des3_op(req, &tmpl); 667} 668 669struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { 670 .setkey = mv_cesa_des3_ede_setkey, 671 .encrypt = mv_cesa_cbc_des3_ede_encrypt, 672 .decrypt = mv_cesa_cbc_des3_ede_decrypt, 673 .min_keysize = DES3_EDE_KEY_SIZE, 674 .max_keysize = DES3_EDE_KEY_SIZE, 675 .ivsize = DES3_EDE_BLOCK_SIZE, 676 .base = { 677 .cra_name = "cbc(des3_ede)", 678 .cra_driver_name = "mv-cbc-des3-ede", 679 .cra_priority = 300, 680 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 681 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 682 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 683 .cra_alignmask = 0, 684 .cra_module = THIS_MODULE, 685 .cra_init = mv_cesa_skcipher_cra_init, 686 .cra_exit = mv_cesa_skcipher_cra_exit, 687 }, 688}; 689 690static int mv_cesa_aes_op(struct skcipher_request *req, 691 struct mv_cesa_op_ctx *tmpl) 692{ 693 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 694 int i; 695 u32 *key; 696 u32 cfg; 697 698 cfg = CESA_SA_DESC_CFG_CRYPTM_AES; 699 700 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) 701 key = ctx->aes.key_dec; 702 else 703 key = ctx->aes.key_enc; 704 705 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) 706 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); 707 708 if (ctx->aes.key_length == 24) 709 cfg |= CESA_SA_DESC_CFG_AES_LEN_192; 710 else if (ctx->aes.key_length == 32) 711 cfg |= CESA_SA_DESC_CFG_AES_LEN_256; 712 713 mv_cesa_update_op_cfg(tmpl, cfg, 714 CESA_SA_DESC_CFG_CRYPTM_MSK | 715 CESA_SA_DESC_CFG_AES_LEN_MSK); 716 717 return mv_cesa_skcipher_queue_req(req, tmpl); 718} 719 720static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req) 721{ 722 struct mv_cesa_op_ctx tmpl; 723 724 mv_cesa_set_op_cfg(&tmpl, 725 CESA_SA_DESC_CFG_CRYPTCM_ECB | 726 CESA_SA_DESC_CFG_DIR_ENC); 727 728 return mv_cesa_aes_op(req, &tmpl); 729} 730 731static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req) 732{ 733 struct mv_cesa_op_ctx tmpl; 734 735 mv_cesa_set_op_cfg(&tmpl, 736 CESA_SA_DESC_CFG_CRYPTCM_ECB | 737 CESA_SA_DESC_CFG_DIR_DEC); 738 739 return mv_cesa_aes_op(req, &tmpl); 740} 741 742struct skcipher_alg mv_cesa_ecb_aes_alg = { 743 .setkey = mv_cesa_aes_setkey, 744 .encrypt = mv_cesa_ecb_aes_encrypt, 745 .decrypt = mv_cesa_ecb_aes_decrypt, 746 .min_keysize = AES_MIN_KEY_SIZE, 747 .max_keysize = AES_MAX_KEY_SIZE, 748 .base = { 749 .cra_name = "ecb(aes)", 750 .cra_driver_name = "mv-ecb-aes", 751 .cra_priority = 300, 752 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 753 .cra_blocksize = AES_BLOCK_SIZE, 754 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 755 .cra_alignmask = 0, 756 .cra_module = THIS_MODULE, 757 .cra_init = mv_cesa_skcipher_cra_init, 758 .cra_exit = mv_cesa_skcipher_cra_exit, 759 }, 760}; 761 762static int mv_cesa_cbc_aes_op(struct skcipher_request *req, 763 struct mv_cesa_op_ctx *tmpl) 764{ 765 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 766 CESA_SA_DESC_CFG_CRYPTCM_MSK); 767 memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE); 768 769 return mv_cesa_aes_op(req, tmpl); 770} 771 772static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req) 773{ 774 struct mv_cesa_op_ctx tmpl; 775 776 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 777 778 return mv_cesa_cbc_aes_op(req, &tmpl); 779} 780 781static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req) 782{ 783 struct mv_cesa_op_ctx tmpl; 784 785 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 786 787 return mv_cesa_cbc_aes_op(req, &tmpl); 788} 789 790struct skcipher_alg mv_cesa_cbc_aes_alg = { 791 .setkey = mv_cesa_aes_setkey, 792 .encrypt = mv_cesa_cbc_aes_encrypt, 793 .decrypt = mv_cesa_cbc_aes_decrypt, 794 .min_keysize = AES_MIN_KEY_SIZE, 795 .max_keysize = AES_MAX_KEY_SIZE, 796 .ivsize = AES_BLOCK_SIZE, 797 .base = { 798 .cra_name = "cbc(aes)", 799 .cra_driver_name = "mv-cbc-aes", 800 .cra_priority = 300, 801 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 802 .cra_blocksize = AES_BLOCK_SIZE, 803 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 804 .cra_alignmask = 0, 805 .cra_module = THIS_MODULE, 806 .cra_init = mv_cesa_skcipher_cra_init, 807 .cra_exit = mv_cesa_skcipher_cra_exit, 808 }, 809};