Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.1 813 lines 22 kB view raw
1/* 2 * Cipher algorithms supported by the CESA: DES, 3DES and AES. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/aes.h> 16#include <crypto/des.h> 17 18#include "cesa.h" 19 20struct mv_cesa_des_ctx { 21 struct mv_cesa_ctx base; 22 u8 key[DES_KEY_SIZE]; 23}; 24 25struct mv_cesa_des3_ctx { 26 struct mv_cesa_ctx base; 27 u8 key[DES3_EDE_KEY_SIZE]; 28}; 29 30struct mv_cesa_aes_ctx { 31 struct mv_cesa_ctx base; 32 struct crypto_aes_ctx aes; 33}; 34 35struct mv_cesa_skcipher_dma_iter { 36 struct mv_cesa_dma_iter base; 37 struct mv_cesa_sg_dma_iter src; 38 struct mv_cesa_sg_dma_iter dst; 39}; 40 41static inline void 42mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter, 43 struct skcipher_request *req) 44{ 45 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen); 46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); 48} 49 50static inline bool 51mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter) 52{ 53 iter->src.op_offset = 0; 54 iter->dst.op_offset = 0; 55 56 return mv_cesa_req_dma_iter_next_op(&iter->base); 57} 58 59static inline void 60mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req) 61{ 62 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 63 64 if (req->dst != req->src) { 65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 66 DMA_FROM_DEVICE); 67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 68 DMA_TO_DEVICE); 69 } else { 70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 71 DMA_BIDIRECTIONAL); 72 } 73 mv_cesa_dma_cleanup(&creq->base); 74} 75 76static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req) 77{ 78 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 79 80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 81 mv_cesa_skcipher_dma_cleanup(req); 82} 83 84static void mv_cesa_skcipher_std_step(struct skcipher_request *req) 85{ 86 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 87 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 88 struct mv_cesa_engine *engine = creq->base.engine; 89 size_t len = min_t(size_t, req->cryptlen - sreq->offset, 90 CESA_SA_SRAM_PAYLOAD_SIZE); 91 92 mv_cesa_adjust_op(engine, &sreq->op); 93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 94 95 len = sg_pcopy_to_buffer(req->src, creq->src_nents, 96 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 97 len, sreq->offset); 98 99 sreq->size = len; 100 mv_cesa_set_crypt_op_len(&sreq->op, len); 101 102 /* FIXME: only update enc_len field */ 103 if (!sreq->skip_ctx) { 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 105 sreq->skip_ctx = true; 106 } else { 107 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc)); 108 } 109 110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 112 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 113 CESA_SA_CMD_EN_CESA_SA_ACCL0); 114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 115} 116 117static int mv_cesa_skcipher_std_process(struct skcipher_request *req, 118 u32 status) 119{ 120 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 121 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 122 struct mv_cesa_engine *engine = creq->base.engine; 123 size_t len; 124 125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, 126 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 127 sreq->size, sreq->offset); 128 129 sreq->offset += len; 130 if (sreq->offset < req->cryptlen) 131 return -EINPROGRESS; 132 133 return 0; 134} 135 136static int mv_cesa_skcipher_process(struct crypto_async_request *req, 137 u32 status) 138{ 139 struct skcipher_request *skreq = skcipher_request_cast(req); 140 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 141 struct mv_cesa_req *basereq = &creq->base; 142 143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) 144 return mv_cesa_skcipher_std_process(skreq, status); 145 146 return mv_cesa_dma_process(basereq, status); 147} 148 149static void mv_cesa_skcipher_step(struct crypto_async_request *req) 150{ 151 struct skcipher_request *skreq = skcipher_request_cast(req); 152 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 153 154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 155 mv_cesa_dma_step(&creq->base); 156 else 157 mv_cesa_skcipher_std_step(skreq); 158} 159 160static inline void 161mv_cesa_skcipher_dma_prepare(struct skcipher_request *req) 162{ 163 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 164 struct mv_cesa_req *basereq = &creq->base; 165 166 mv_cesa_dma_prepare(basereq, basereq->engine); 167} 168 169static inline void 170mv_cesa_skcipher_std_prepare(struct skcipher_request *req) 171{ 172 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 173 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 174 175 sreq->size = 0; 176 sreq->offset = 0; 177} 178 179static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req, 180 struct mv_cesa_engine *engine) 181{ 182 struct skcipher_request *skreq = skcipher_request_cast(req); 183 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 184 creq->base.engine = engine; 185 186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 187 mv_cesa_skcipher_dma_prepare(skreq); 188 else 189 mv_cesa_skcipher_std_prepare(skreq); 190} 191 192static inline void 193mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req) 194{ 195 struct skcipher_request *skreq = skcipher_request_cast(req); 196 197 mv_cesa_skcipher_cleanup(skreq); 198} 199 200static void 201mv_cesa_skcipher_complete(struct crypto_async_request *req) 202{ 203 struct skcipher_request *skreq = skcipher_request_cast(req); 204 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); 205 struct mv_cesa_engine *engine = creq->base.engine; 206 unsigned int ivsize; 207 208 atomic_sub(skreq->cryptlen, &engine->load); 209 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq)); 210 211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { 212 struct mv_cesa_req *basereq; 213 214 basereq = &creq->base; 215 memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv, 216 ivsize); 217 } else { 218 memcpy_fromio(skreq->iv, 219 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 220 ivsize); 221 } 222} 223 224static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = { 225 .step = mv_cesa_skcipher_step, 226 .process = mv_cesa_skcipher_process, 227 .cleanup = mv_cesa_skcipher_req_cleanup, 228 .complete = mv_cesa_skcipher_complete, 229}; 230 231static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm) 232{ 233 void *ctx = crypto_tfm_ctx(tfm); 234 235 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize); 236} 237 238static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm) 239{ 240 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm); 241 242 ctx->ops = &mv_cesa_skcipher_req_ops; 243 244 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), 245 sizeof(struct mv_cesa_skcipher_req)); 246 247 return 0; 248} 249 250static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, 251 unsigned int len) 252{ 253 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 254 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 255 int remaining; 256 int offset; 257 int ret; 258 int i; 259 260 ret = crypto_aes_expand_key(&ctx->aes, key, len); 261 if (ret) { 262 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 263 return ret; 264 } 265 266 remaining = (ctx->aes.key_length - 16) / 4; 267 offset = ctx->aes.key_length + 24 - remaining; 268 for (i = 0; i < remaining; i++) 269 ctx->aes.key_dec[4 + i] = 270 cpu_to_le32(ctx->aes.key_enc[offset + i]); 271 272 return 0; 273} 274 275static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, 276 unsigned int len) 277{ 278 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 279 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 280 u32 tmp[DES_EXPKEY_WORDS]; 281 int ret; 282 283 if (len != DES_KEY_SIZE) { 284 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 285 return -EINVAL; 286 } 287 288 ret = des_ekey(tmp, key); 289 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 290 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 291 return -EINVAL; 292 } 293 294 memcpy(ctx->key, key, DES_KEY_SIZE); 295 296 return 0; 297} 298 299static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher, 300 const u8 *key, unsigned int len) 301{ 302 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); 303 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 304 305 if (len != DES3_EDE_KEY_SIZE) { 306 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 307 return -EINVAL; 308 } 309 310 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); 311 312 return 0; 313} 314 315static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, 316 const struct mv_cesa_op_ctx *op_templ) 317{ 318 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 319 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 320 GFP_KERNEL : GFP_ATOMIC; 321 struct mv_cesa_req *basereq = &creq->base; 322 struct mv_cesa_skcipher_dma_iter iter; 323 bool skip_ctx = false; 324 int ret; 325 326 basereq->chain.first = NULL; 327 basereq->chain.last = NULL; 328 329 if (req->src != req->dst) { 330 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 331 DMA_TO_DEVICE); 332 if (!ret) 333 return -ENOMEM; 334 335 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, 336 DMA_FROM_DEVICE); 337 if (!ret) { 338 ret = -ENOMEM; 339 goto err_unmap_src; 340 } 341 } else { 342 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 343 DMA_BIDIRECTIONAL); 344 if (!ret) 345 return -ENOMEM; 346 } 347 348 mv_cesa_tdma_desc_iter_init(&basereq->chain); 349 mv_cesa_skcipher_req_iter_init(&iter, req); 350 351 do { 352 struct mv_cesa_op_ctx *op; 353 354 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags); 355 if (IS_ERR(op)) { 356 ret = PTR_ERR(op); 357 goto err_free_tdma; 358 } 359 skip_ctx = true; 360 361 mv_cesa_set_crypt_op_len(op, iter.base.op_len); 362 363 /* Add input transfers */ 364 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, 365 &iter.src, flags); 366 if (ret) 367 goto err_free_tdma; 368 369 /* Add dummy desc to launch the crypto operation */ 370 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags); 371 if (ret) 372 goto err_free_tdma; 373 374 /* Add output transfers */ 375 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, 376 &iter.dst, flags); 377 if (ret) 378 goto err_free_tdma; 379 380 } while (mv_cesa_skcipher_req_iter_next_op(&iter)); 381 382 /* Add output data for IV */ 383 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, 384 CESA_SA_DATA_SRAM_OFFSET, 385 CESA_TDMA_SRC_IN_SRAM, flags); 386 387 if (ret) 388 goto err_free_tdma; 389 390 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 391 392 return 0; 393 394err_free_tdma: 395 mv_cesa_dma_cleanup(basereq); 396 if (req->dst != req->src) 397 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 398 DMA_FROM_DEVICE); 399 400err_unmap_src: 401 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 402 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 403 404 return ret; 405} 406 407static inline int 408mv_cesa_skcipher_std_req_init(struct skcipher_request *req, 409 const struct mv_cesa_op_ctx *op_templ) 410{ 411 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 412 struct mv_cesa_skcipher_std_req *sreq = &creq->std; 413 struct mv_cesa_req *basereq = &creq->base; 414 415 sreq->op = *op_templ; 416 sreq->skip_ctx = false; 417 basereq->chain.first = NULL; 418 basereq->chain.last = NULL; 419 420 return 0; 421} 422 423static int mv_cesa_skcipher_req_init(struct skcipher_request *req, 424 struct mv_cesa_op_ctx *tmpl) 425{ 426 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 427 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 428 unsigned int blksize = crypto_skcipher_blocksize(tfm); 429 int ret; 430 431 if (!IS_ALIGNED(req->cryptlen, blksize)) 432 return -EINVAL; 433 434 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen); 435 if (creq->src_nents < 0) { 436 dev_err(cesa_dev->dev, "Invalid number of src SG"); 437 return creq->src_nents; 438 } 439 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); 440 if (creq->dst_nents < 0) { 441 dev_err(cesa_dev->dev, "Invalid number of dst SG"); 442 return creq->dst_nents; 443 } 444 445 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, 446 CESA_SA_DESC_CFG_OP_MSK); 447 448 if (cesa_dev->caps->has_tdma) 449 ret = mv_cesa_skcipher_dma_req_init(req, tmpl); 450 else 451 ret = mv_cesa_skcipher_std_req_init(req, tmpl); 452 453 return ret; 454} 455 456static int mv_cesa_skcipher_queue_req(struct skcipher_request *req, 457 struct mv_cesa_op_ctx *tmpl) 458{ 459 int ret; 460 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req); 461 struct mv_cesa_engine *engine; 462 463 ret = mv_cesa_skcipher_req_init(req, tmpl); 464 if (ret) 465 return ret; 466 467 engine = mv_cesa_select_engine(req->cryptlen); 468 mv_cesa_skcipher_prepare(&req->base, engine); 469 470 ret = mv_cesa_queue_req(&req->base, &creq->base); 471 472 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 473 mv_cesa_skcipher_cleanup(req); 474 475 return ret; 476} 477 478static int mv_cesa_des_op(struct skcipher_request *req, 479 struct mv_cesa_op_ctx *tmpl) 480{ 481 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 482 483 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, 484 CESA_SA_DESC_CFG_CRYPTM_MSK); 485 486 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); 487 488 return mv_cesa_skcipher_queue_req(req, tmpl); 489} 490 491static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req) 492{ 493 struct mv_cesa_op_ctx tmpl; 494 495 mv_cesa_set_op_cfg(&tmpl, 496 CESA_SA_DESC_CFG_CRYPTCM_ECB | 497 CESA_SA_DESC_CFG_DIR_ENC); 498 499 return mv_cesa_des_op(req, &tmpl); 500} 501 502static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req) 503{ 504 struct mv_cesa_op_ctx tmpl; 505 506 mv_cesa_set_op_cfg(&tmpl, 507 CESA_SA_DESC_CFG_CRYPTCM_ECB | 508 CESA_SA_DESC_CFG_DIR_DEC); 509 510 return mv_cesa_des_op(req, &tmpl); 511} 512 513struct skcipher_alg mv_cesa_ecb_des_alg = { 514 .setkey = mv_cesa_des_setkey, 515 .encrypt = mv_cesa_ecb_des_encrypt, 516 .decrypt = mv_cesa_ecb_des_decrypt, 517 .min_keysize = DES_KEY_SIZE, 518 .max_keysize = DES_KEY_SIZE, 519 .base = { 520 .cra_name = "ecb(des)", 521 .cra_driver_name = "mv-ecb-des", 522 .cra_priority = 300, 523 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 524 .cra_blocksize = DES_BLOCK_SIZE, 525 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 526 .cra_alignmask = 0, 527 .cra_module = THIS_MODULE, 528 .cra_init = mv_cesa_skcipher_cra_init, 529 .cra_exit = mv_cesa_skcipher_cra_exit, 530 }, 531}; 532 533static int mv_cesa_cbc_des_op(struct skcipher_request *req, 534 struct mv_cesa_op_ctx *tmpl) 535{ 536 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 537 CESA_SA_DESC_CFG_CRYPTCM_MSK); 538 539 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE); 540 541 return mv_cesa_des_op(req, tmpl); 542} 543 544static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req) 545{ 546 struct mv_cesa_op_ctx tmpl; 547 548 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 549 550 return mv_cesa_cbc_des_op(req, &tmpl); 551} 552 553static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req) 554{ 555 struct mv_cesa_op_ctx tmpl; 556 557 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 558 559 return mv_cesa_cbc_des_op(req, &tmpl); 560} 561 562struct skcipher_alg mv_cesa_cbc_des_alg = { 563 .setkey = mv_cesa_des_setkey, 564 .encrypt = mv_cesa_cbc_des_encrypt, 565 .decrypt = mv_cesa_cbc_des_decrypt, 566 .min_keysize = DES_KEY_SIZE, 567 .max_keysize = DES_KEY_SIZE, 568 .ivsize = DES_BLOCK_SIZE, 569 .base = { 570 .cra_name = "cbc(des)", 571 .cra_driver_name = "mv-cbc-des", 572 .cra_priority = 300, 573 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 574 .cra_blocksize = DES_BLOCK_SIZE, 575 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 576 .cra_alignmask = 0, 577 .cra_module = THIS_MODULE, 578 .cra_init = mv_cesa_skcipher_cra_init, 579 .cra_exit = mv_cesa_skcipher_cra_exit, 580 }, 581}; 582 583static int mv_cesa_des3_op(struct skcipher_request *req, 584 struct mv_cesa_op_ctx *tmpl) 585{ 586 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 587 588 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, 589 CESA_SA_DESC_CFG_CRYPTM_MSK); 590 591 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); 592 593 return mv_cesa_skcipher_queue_req(req, tmpl); 594} 595 596static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req) 597{ 598 struct mv_cesa_op_ctx tmpl; 599 600 mv_cesa_set_op_cfg(&tmpl, 601 CESA_SA_DESC_CFG_CRYPTCM_ECB | 602 CESA_SA_DESC_CFG_3DES_EDE | 603 CESA_SA_DESC_CFG_DIR_ENC); 604 605 return mv_cesa_des3_op(req, &tmpl); 606} 607 608static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req) 609{ 610 struct mv_cesa_op_ctx tmpl; 611 612 mv_cesa_set_op_cfg(&tmpl, 613 CESA_SA_DESC_CFG_CRYPTCM_ECB | 614 CESA_SA_DESC_CFG_3DES_EDE | 615 CESA_SA_DESC_CFG_DIR_DEC); 616 617 return mv_cesa_des3_op(req, &tmpl); 618} 619 620struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { 621 .setkey = mv_cesa_des3_ede_setkey, 622 .encrypt = mv_cesa_ecb_des3_ede_encrypt, 623 .decrypt = mv_cesa_ecb_des3_ede_decrypt, 624 .min_keysize = DES3_EDE_KEY_SIZE, 625 .max_keysize = DES3_EDE_KEY_SIZE, 626 .ivsize = DES3_EDE_BLOCK_SIZE, 627 .base = { 628 .cra_name = "ecb(des3_ede)", 629 .cra_driver_name = "mv-ecb-des3-ede", 630 .cra_priority = 300, 631 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 632 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 633 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 634 .cra_alignmask = 0, 635 .cra_module = THIS_MODULE, 636 .cra_init = mv_cesa_skcipher_cra_init, 637 .cra_exit = mv_cesa_skcipher_cra_exit, 638 }, 639}; 640 641static int mv_cesa_cbc_des3_op(struct skcipher_request *req, 642 struct mv_cesa_op_ctx *tmpl) 643{ 644 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE); 645 646 return mv_cesa_des3_op(req, tmpl); 647} 648 649static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req) 650{ 651 struct mv_cesa_op_ctx tmpl; 652 653 mv_cesa_set_op_cfg(&tmpl, 654 CESA_SA_DESC_CFG_CRYPTCM_CBC | 655 CESA_SA_DESC_CFG_3DES_EDE | 656 CESA_SA_DESC_CFG_DIR_ENC); 657 658 return mv_cesa_cbc_des3_op(req, &tmpl); 659} 660 661static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req) 662{ 663 struct mv_cesa_op_ctx tmpl; 664 665 mv_cesa_set_op_cfg(&tmpl, 666 CESA_SA_DESC_CFG_CRYPTCM_CBC | 667 CESA_SA_DESC_CFG_3DES_EDE | 668 CESA_SA_DESC_CFG_DIR_DEC); 669 670 return mv_cesa_cbc_des3_op(req, &tmpl); 671} 672 673struct skcipher_alg mv_cesa_cbc_des3_ede_alg = { 674 .setkey = mv_cesa_des3_ede_setkey, 675 .encrypt = mv_cesa_cbc_des3_ede_encrypt, 676 .decrypt = mv_cesa_cbc_des3_ede_decrypt, 677 .min_keysize = DES3_EDE_KEY_SIZE, 678 .max_keysize = DES3_EDE_KEY_SIZE, 679 .ivsize = DES3_EDE_BLOCK_SIZE, 680 .base = { 681 .cra_name = "cbc(des3_ede)", 682 .cra_driver_name = "mv-cbc-des3-ede", 683 .cra_priority = 300, 684 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 685 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 686 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 687 .cra_alignmask = 0, 688 .cra_module = THIS_MODULE, 689 .cra_init = mv_cesa_skcipher_cra_init, 690 .cra_exit = mv_cesa_skcipher_cra_exit, 691 }, 692}; 693 694static int mv_cesa_aes_op(struct skcipher_request *req, 695 struct mv_cesa_op_ctx *tmpl) 696{ 697 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 698 int i; 699 u32 *key; 700 u32 cfg; 701 702 cfg = CESA_SA_DESC_CFG_CRYPTM_AES; 703 704 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) 705 key = ctx->aes.key_dec; 706 else 707 key = ctx->aes.key_enc; 708 709 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) 710 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); 711 712 if (ctx->aes.key_length == 24) 713 cfg |= CESA_SA_DESC_CFG_AES_LEN_192; 714 else if (ctx->aes.key_length == 32) 715 cfg |= CESA_SA_DESC_CFG_AES_LEN_256; 716 717 mv_cesa_update_op_cfg(tmpl, cfg, 718 CESA_SA_DESC_CFG_CRYPTM_MSK | 719 CESA_SA_DESC_CFG_AES_LEN_MSK); 720 721 return mv_cesa_skcipher_queue_req(req, tmpl); 722} 723 724static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req) 725{ 726 struct mv_cesa_op_ctx tmpl; 727 728 mv_cesa_set_op_cfg(&tmpl, 729 CESA_SA_DESC_CFG_CRYPTCM_ECB | 730 CESA_SA_DESC_CFG_DIR_ENC); 731 732 return mv_cesa_aes_op(req, &tmpl); 733} 734 735static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req) 736{ 737 struct mv_cesa_op_ctx tmpl; 738 739 mv_cesa_set_op_cfg(&tmpl, 740 CESA_SA_DESC_CFG_CRYPTCM_ECB | 741 CESA_SA_DESC_CFG_DIR_DEC); 742 743 return mv_cesa_aes_op(req, &tmpl); 744} 745 746struct skcipher_alg mv_cesa_ecb_aes_alg = { 747 .setkey = mv_cesa_aes_setkey, 748 .encrypt = mv_cesa_ecb_aes_encrypt, 749 .decrypt = mv_cesa_ecb_aes_decrypt, 750 .min_keysize = AES_MIN_KEY_SIZE, 751 .max_keysize = AES_MAX_KEY_SIZE, 752 .base = { 753 .cra_name = "ecb(aes)", 754 .cra_driver_name = "mv-ecb-aes", 755 .cra_priority = 300, 756 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 757 .cra_blocksize = AES_BLOCK_SIZE, 758 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 759 .cra_alignmask = 0, 760 .cra_module = THIS_MODULE, 761 .cra_init = mv_cesa_skcipher_cra_init, 762 .cra_exit = mv_cesa_skcipher_cra_exit, 763 }, 764}; 765 766static int mv_cesa_cbc_aes_op(struct skcipher_request *req, 767 struct mv_cesa_op_ctx *tmpl) 768{ 769 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 770 CESA_SA_DESC_CFG_CRYPTCM_MSK); 771 memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE); 772 773 return mv_cesa_aes_op(req, tmpl); 774} 775 776static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req) 777{ 778 struct mv_cesa_op_ctx tmpl; 779 780 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 781 782 return mv_cesa_cbc_aes_op(req, &tmpl); 783} 784 785static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req) 786{ 787 struct mv_cesa_op_ctx tmpl; 788 789 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 790 791 return mv_cesa_cbc_aes_op(req, &tmpl); 792} 793 794struct skcipher_alg mv_cesa_cbc_aes_alg = { 795 .setkey = mv_cesa_aes_setkey, 796 .encrypt = mv_cesa_cbc_aes_encrypt, 797 .decrypt = mv_cesa_cbc_aes_decrypt, 798 .min_keysize = AES_MIN_KEY_SIZE, 799 .max_keysize = AES_MAX_KEY_SIZE, 800 .ivsize = AES_BLOCK_SIZE, 801 .base = { 802 .cra_name = "cbc(aes)", 803 .cra_driver_name = "mv-cbc-aes", 804 .cra_priority = 300, 805 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 806 .cra_blocksize = AES_BLOCK_SIZE, 807 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 808 .cra_alignmask = 0, 809 .cra_module = THIS_MODULE, 810 .cra_init = mv_cesa_skcipher_cra_init, 811 .cra_exit = mv_cesa_skcipher_cra_exit, 812 }, 813};