at v4.8 823 lines 22 kB view raw
1/* 2 * Cipher algorithms supported by the CESA: DES, 3DES and AES. 3 * 4 * Author: Boris Brezillon <boris.brezillon@free-electrons.com> 5 * Author: Arnaud Ebalard <arno@natisbad.org> 6 * 7 * This work is based on an initial version written by 8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License version 2 as published 12 * by the Free Software Foundation. 13 */ 14 15#include <crypto/aes.h> 16#include <crypto/des.h> 17 18#include "cesa.h" 19 20struct mv_cesa_des_ctx { 21 struct mv_cesa_ctx base; 22 u8 key[DES_KEY_SIZE]; 23}; 24 25struct mv_cesa_des3_ctx { 26 struct mv_cesa_ctx base; 27 u8 key[DES3_EDE_KEY_SIZE]; 28}; 29 30struct mv_cesa_aes_ctx { 31 struct mv_cesa_ctx base; 32 struct crypto_aes_ctx aes; 33}; 34 35struct mv_cesa_ablkcipher_dma_iter { 36 struct mv_cesa_dma_iter base; 37 struct mv_cesa_sg_dma_iter src; 38 struct mv_cesa_sg_dma_iter dst; 39}; 40 41static inline void 42mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, 43 struct ablkcipher_request *req) 44{ 45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); 46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); 47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); 48} 49 50static inline bool 51mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) 52{ 53 iter->src.op_offset = 0; 54 iter->dst.op_offset = 0; 55 56 return mv_cesa_req_dma_iter_next_op(&iter->base); 57} 58 59static inline void 60mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) 61{ 62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 63 64 if (req->dst != req->src) { 65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 66 DMA_FROM_DEVICE); 67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 68 DMA_TO_DEVICE); 69 } else { 70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 71 DMA_BIDIRECTIONAL); 72 } 73 mv_cesa_dma_cleanup(&creq->base); 74} 75 76static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req) 77{ 78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 79 80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 81 mv_cesa_ablkcipher_dma_cleanup(req); 82} 83 84static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req) 85{ 86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 88 struct mv_cesa_engine *engine = creq->base.engine; 89 size_t len = min_t(size_t, req->nbytes - sreq->offset, 90 CESA_SA_SRAM_PAYLOAD_SIZE); 91 92 mv_cesa_adjust_op(engine, &sreq->op); 93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 94 95 len = sg_pcopy_to_buffer(req->src, creq->src_nents, 96 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 97 len, sreq->offset); 98 99 sreq->size = len; 100 mv_cesa_set_crypt_op_len(&sreq->op, len); 101 102 /* FIXME: only update enc_len field */ 103 if (!sreq->skip_ctx) { 104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op)); 105 sreq->skip_ctx = true; 106 } else { 107 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc)); 108 } 109 110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); 111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); 112 BUG_ON(readl(engine->regs + CESA_SA_CMD) & 113 CESA_SA_CMD_EN_CESA_SA_ACCL0); 114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); 115} 116 117static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req, 118 u32 status) 119{ 120 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 121 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 122 struct mv_cesa_engine *engine = creq->base.engine; 123 size_t len; 124 125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents, 126 engine->sram + CESA_SA_DATA_SRAM_OFFSET, 127 sreq->size, sreq->offset); 128 129 sreq->offset += len; 130 if (sreq->offset < req->nbytes) 131 return -EINPROGRESS; 132 133 return 0; 134} 135 136static int mv_cesa_ablkcipher_process(struct crypto_async_request *req, 137 u32 status) 138{ 139 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 140 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 141 struct mv_cesa_req *basereq = &creq->base; 142 143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ) 144 return mv_cesa_ablkcipher_std_process(ablkreq, status); 145 146 return mv_cesa_dma_process(basereq, status); 147} 148 149static void mv_cesa_ablkcipher_step(struct crypto_async_request *req) 150{ 151 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 152 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 153 154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 155 mv_cesa_dma_step(&creq->base); 156 else 157 mv_cesa_ablkcipher_std_step(ablkreq); 158} 159 160static inline void 161mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req) 162{ 163 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 164 struct mv_cesa_req *basereq = &creq->base; 165 166 mv_cesa_dma_prepare(basereq, basereq->engine); 167} 168 169static inline void 170mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req) 171{ 172 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 173 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 174 175 sreq->size = 0; 176 sreq->offset = 0; 177} 178 179static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req, 180 struct mv_cesa_engine *engine) 181{ 182 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 183 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 184 creq->base.engine = engine; 185 186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 187 mv_cesa_ablkcipher_dma_prepare(ablkreq); 188 else 189 mv_cesa_ablkcipher_std_prepare(ablkreq); 190} 191 192static inline void 193mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req) 194{ 195 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 196 197 mv_cesa_ablkcipher_cleanup(ablkreq); 198} 199 200static void 201mv_cesa_ablkcipher_complete(struct crypto_async_request *req) 202{ 203 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 204 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 205 struct mv_cesa_engine *engine = creq->base.engine; 206 unsigned int ivsize; 207 208 atomic_sub(ablkreq->nbytes, &engine->load); 209 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)); 210 211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) { 212 struct mv_cesa_req *basereq; 213 214 basereq = &creq->base; 215 memcpy(ablkreq->info, basereq->chain.last->data, ivsize); 216 } else { 217 memcpy_fromio(ablkreq->info, 218 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, 219 ivsize); 220 } 221} 222 223static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = { 224 .step = mv_cesa_ablkcipher_step, 225 .process = mv_cesa_ablkcipher_process, 226 .cleanup = mv_cesa_ablkcipher_req_cleanup, 227 .complete = mv_cesa_ablkcipher_complete, 228}; 229 230static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm) 231{ 232 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 233 234 ctx->base.ops = &mv_cesa_ablkcipher_req_ops; 235 236 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req); 237 238 return 0; 239} 240 241static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 242 unsigned int len) 243{ 244 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 245 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm); 246 int remaining; 247 int offset; 248 int ret; 249 int i; 250 251 ret = crypto_aes_expand_key(&ctx->aes, key, len); 252 if (ret) { 253 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 254 return ret; 255 } 256 257 remaining = (ctx->aes.key_length - 16) / 4; 258 offset = ctx->aes.key_length + 24 - remaining; 259 for (i = 0; i < remaining; i++) 260 ctx->aes.key_dec[4 + i] = 261 cpu_to_le32(ctx->aes.key_enc[offset + i]); 262 263 return 0; 264} 265 266static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 267 unsigned int len) 268{ 269 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 270 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 271 u32 tmp[DES_EXPKEY_WORDS]; 272 int ret; 273 274 if (len != DES_KEY_SIZE) { 275 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 276 return -EINVAL; 277 } 278 279 ret = des_ekey(tmp, key); 280 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 281 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 282 return -EINVAL; 283 } 284 285 memcpy(ctx->key, key, DES_KEY_SIZE); 286 287 return 0; 288} 289 290static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher, 291 const u8 *key, unsigned int len) 292{ 293 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 294 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm); 295 296 if (len != DES3_EDE_KEY_SIZE) { 297 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 298 return -EINVAL; 299 } 300 301 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE); 302 303 return 0; 304} 305 306static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req, 307 const struct mv_cesa_op_ctx *op_templ) 308{ 309 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 310 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 311 GFP_KERNEL : GFP_ATOMIC; 312 struct mv_cesa_req *basereq = &creq->base; 313 struct mv_cesa_ablkcipher_dma_iter iter; 314 bool skip_ctx = false; 315 int ret; 316 unsigned int ivsize; 317 318 basereq->chain.first = NULL; 319 basereq->chain.last = NULL; 320 321 if (req->src != req->dst) { 322 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 323 DMA_TO_DEVICE); 324 if (!ret) 325 return -ENOMEM; 326 327 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents, 328 DMA_FROM_DEVICE); 329 if (!ret) { 330 ret = -ENOMEM; 331 goto err_unmap_src; 332 } 333 } else { 334 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 335 DMA_BIDIRECTIONAL); 336 if (!ret) 337 return -ENOMEM; 338 } 339 340 mv_cesa_tdma_desc_iter_init(&basereq->chain); 341 mv_cesa_ablkcipher_req_iter_init(&iter, req); 342 343 do { 344 struct mv_cesa_op_ctx *op; 345 346 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags); 347 if (IS_ERR(op)) { 348 ret = PTR_ERR(op); 349 goto err_free_tdma; 350 } 351 skip_ctx = true; 352 353 mv_cesa_set_crypt_op_len(op, iter.base.op_len); 354 355 /* Add input transfers */ 356 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, 357 &iter.src, flags); 358 if (ret) 359 goto err_free_tdma; 360 361 /* Add dummy desc to launch the crypto operation */ 362 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags); 363 if (ret) 364 goto err_free_tdma; 365 366 /* Add output transfers */ 367 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base, 368 &iter.dst, flags); 369 if (ret) 370 goto err_free_tdma; 371 372 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter)); 373 374 /* Add output data for IV */ 375 ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); 376 ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET, 377 ivsize, CESA_TDMA_SRC_IN_SRAM, flags); 378 379 if (ret) 380 goto err_free_tdma; 381 382 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ; 383 384 return 0; 385 386err_free_tdma: 387 mv_cesa_dma_cleanup(basereq); 388 if (req->dst != req->src) 389 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, 390 DMA_FROM_DEVICE); 391 392err_unmap_src: 393 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, 394 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); 395 396 return ret; 397} 398 399static inline int 400mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req, 401 const struct mv_cesa_op_ctx *op_templ) 402{ 403 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 404 struct mv_cesa_ablkcipher_std_req *sreq = &creq->std; 405 struct mv_cesa_req *basereq = &creq->base; 406 407 sreq->op = *op_templ; 408 sreq->skip_ctx = false; 409 basereq->chain.first = NULL; 410 basereq->chain.last = NULL; 411 412 return 0; 413} 414 415static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req, 416 struct mv_cesa_op_ctx *tmpl) 417{ 418 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 419 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 420 unsigned int blksize = crypto_ablkcipher_blocksize(tfm); 421 int ret; 422 423 if (!IS_ALIGNED(req->nbytes, blksize)) 424 return -EINVAL; 425 426 creq->src_nents = sg_nents_for_len(req->src, req->nbytes); 427 if (creq->src_nents < 0) { 428 dev_err(cesa_dev->dev, "Invalid number of src SG"); 429 return creq->src_nents; 430 } 431 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes); 432 if (creq->dst_nents < 0) { 433 dev_err(cesa_dev->dev, "Invalid number of dst SG"); 434 return creq->dst_nents; 435 } 436 437 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY, 438 CESA_SA_DESC_CFG_OP_MSK); 439 440 if (cesa_dev->caps->has_tdma) 441 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl); 442 else 443 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl); 444 445 return ret; 446} 447 448static int mv_cesa_ablkcipher_queue_req(struct ablkcipher_request *req, 449 struct mv_cesa_op_ctx *tmpl) 450{ 451 int ret; 452 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); 453 struct mv_cesa_engine *engine; 454 455 ret = mv_cesa_ablkcipher_req_init(req, tmpl); 456 if (ret) 457 return ret; 458 459 engine = mv_cesa_select_engine(req->nbytes); 460 mv_cesa_ablkcipher_prepare(&req->base, engine); 461 462 ret = mv_cesa_queue_req(&req->base, &creq->base); 463 464 if (mv_cesa_req_needs_cleanup(&req->base, ret)) 465 mv_cesa_ablkcipher_cleanup(req); 466 467 return ret; 468} 469 470static int mv_cesa_des_op(struct ablkcipher_request *req, 471 struct mv_cesa_op_ctx *tmpl) 472{ 473 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 474 475 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES, 476 CESA_SA_DESC_CFG_CRYPTM_MSK); 477 478 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE); 479 480 return mv_cesa_ablkcipher_queue_req(req, tmpl); 481} 482 483static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req) 484{ 485 struct mv_cesa_op_ctx tmpl; 486 487 mv_cesa_set_op_cfg(&tmpl, 488 CESA_SA_DESC_CFG_CRYPTCM_ECB | 489 CESA_SA_DESC_CFG_DIR_ENC); 490 491 return mv_cesa_des_op(req, &tmpl); 492} 493 494static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req) 495{ 496 struct mv_cesa_op_ctx tmpl; 497 498 mv_cesa_set_op_cfg(&tmpl, 499 CESA_SA_DESC_CFG_CRYPTCM_ECB | 500 CESA_SA_DESC_CFG_DIR_DEC); 501 502 return mv_cesa_des_op(req, &tmpl); 503} 504 505struct crypto_alg mv_cesa_ecb_des_alg = { 506 .cra_name = "ecb(des)", 507 .cra_driver_name = "mv-ecb-des", 508 .cra_priority = 300, 509 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 510 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 511 .cra_blocksize = DES_BLOCK_SIZE, 512 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 513 .cra_alignmask = 0, 514 .cra_type = &crypto_ablkcipher_type, 515 .cra_module = THIS_MODULE, 516 .cra_init = mv_cesa_ablkcipher_cra_init, 517 .cra_u = { 518 .ablkcipher = { 519 .min_keysize = DES_KEY_SIZE, 520 .max_keysize = DES_KEY_SIZE, 521 .setkey = mv_cesa_des_setkey, 522 .encrypt = mv_cesa_ecb_des_encrypt, 523 .decrypt = mv_cesa_ecb_des_decrypt, 524 }, 525 }, 526}; 527 528static int mv_cesa_cbc_des_op(struct ablkcipher_request *req, 529 struct mv_cesa_op_ctx *tmpl) 530{ 531 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 532 CESA_SA_DESC_CFG_CRYPTCM_MSK); 533 534 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE); 535 536 return mv_cesa_des_op(req, tmpl); 537} 538 539static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req) 540{ 541 struct mv_cesa_op_ctx tmpl; 542 543 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 544 545 return mv_cesa_cbc_des_op(req, &tmpl); 546} 547 548static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req) 549{ 550 struct mv_cesa_op_ctx tmpl; 551 552 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 553 554 return mv_cesa_cbc_des_op(req, &tmpl); 555} 556 557struct crypto_alg mv_cesa_cbc_des_alg = { 558 .cra_name = "cbc(des)", 559 .cra_driver_name = "mv-cbc-des", 560 .cra_priority = 300, 561 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 562 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 563 .cra_blocksize = DES_BLOCK_SIZE, 564 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx), 565 .cra_alignmask = 0, 566 .cra_type = &crypto_ablkcipher_type, 567 .cra_module = THIS_MODULE, 568 .cra_init = mv_cesa_ablkcipher_cra_init, 569 .cra_u = { 570 .ablkcipher = { 571 .min_keysize = DES_KEY_SIZE, 572 .max_keysize = DES_KEY_SIZE, 573 .ivsize = DES_BLOCK_SIZE, 574 .setkey = mv_cesa_des_setkey, 575 .encrypt = mv_cesa_cbc_des_encrypt, 576 .decrypt = mv_cesa_cbc_des_decrypt, 577 }, 578 }, 579}; 580 581static int mv_cesa_des3_op(struct ablkcipher_request *req, 582 struct mv_cesa_op_ctx *tmpl) 583{ 584 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 585 586 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES, 587 CESA_SA_DESC_CFG_CRYPTM_MSK); 588 589 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE); 590 591 return mv_cesa_ablkcipher_queue_req(req, tmpl); 592} 593 594static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req) 595{ 596 struct mv_cesa_op_ctx tmpl; 597 598 mv_cesa_set_op_cfg(&tmpl, 599 CESA_SA_DESC_CFG_CRYPTCM_ECB | 600 CESA_SA_DESC_CFG_3DES_EDE | 601 CESA_SA_DESC_CFG_DIR_ENC); 602 603 return mv_cesa_des3_op(req, &tmpl); 604} 605 606static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req) 607{ 608 struct mv_cesa_op_ctx tmpl; 609 610 mv_cesa_set_op_cfg(&tmpl, 611 CESA_SA_DESC_CFG_CRYPTCM_ECB | 612 CESA_SA_DESC_CFG_3DES_EDE | 613 CESA_SA_DESC_CFG_DIR_DEC); 614 615 return mv_cesa_des3_op(req, &tmpl); 616} 617 618struct crypto_alg mv_cesa_ecb_des3_ede_alg = { 619 .cra_name = "ecb(des3_ede)", 620 .cra_driver_name = "mv-ecb-des3-ede", 621 .cra_priority = 300, 622 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 623 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 624 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 625 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 626 .cra_alignmask = 0, 627 .cra_type = &crypto_ablkcipher_type, 628 .cra_module = THIS_MODULE, 629 .cra_init = mv_cesa_ablkcipher_cra_init, 630 .cra_u = { 631 .ablkcipher = { 632 .min_keysize = DES3_EDE_KEY_SIZE, 633 .max_keysize = DES3_EDE_KEY_SIZE, 634 .ivsize = DES3_EDE_BLOCK_SIZE, 635 .setkey = mv_cesa_des3_ede_setkey, 636 .encrypt = mv_cesa_ecb_des3_ede_encrypt, 637 .decrypt = mv_cesa_ecb_des3_ede_decrypt, 638 }, 639 }, 640}; 641 642static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req, 643 struct mv_cesa_op_ctx *tmpl) 644{ 645 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE); 646 647 return mv_cesa_des3_op(req, tmpl); 648} 649 650static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req) 651{ 652 struct mv_cesa_op_ctx tmpl; 653 654 mv_cesa_set_op_cfg(&tmpl, 655 CESA_SA_DESC_CFG_CRYPTCM_CBC | 656 CESA_SA_DESC_CFG_3DES_EDE | 657 CESA_SA_DESC_CFG_DIR_ENC); 658 659 return mv_cesa_cbc_des3_op(req, &tmpl); 660} 661 662static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req) 663{ 664 struct mv_cesa_op_ctx tmpl; 665 666 mv_cesa_set_op_cfg(&tmpl, 667 CESA_SA_DESC_CFG_CRYPTCM_CBC | 668 CESA_SA_DESC_CFG_3DES_EDE | 669 CESA_SA_DESC_CFG_DIR_DEC); 670 671 return mv_cesa_cbc_des3_op(req, &tmpl); 672} 673 674struct crypto_alg mv_cesa_cbc_des3_ede_alg = { 675 .cra_name = "cbc(des3_ede)", 676 .cra_driver_name = "mv-cbc-des3-ede", 677 .cra_priority = 300, 678 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 679 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 680 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 681 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx), 682 .cra_alignmask = 0, 683 .cra_type = &crypto_ablkcipher_type, 684 .cra_module = THIS_MODULE, 685 .cra_init = mv_cesa_ablkcipher_cra_init, 686 .cra_u = { 687 .ablkcipher = { 688 .min_keysize = DES3_EDE_KEY_SIZE, 689 .max_keysize = DES3_EDE_KEY_SIZE, 690 .ivsize = DES3_EDE_BLOCK_SIZE, 691 .setkey = mv_cesa_des3_ede_setkey, 692 .encrypt = mv_cesa_cbc_des3_ede_encrypt, 693 .decrypt = mv_cesa_cbc_des3_ede_decrypt, 694 }, 695 }, 696}; 697 698static int mv_cesa_aes_op(struct ablkcipher_request *req, 699 struct mv_cesa_op_ctx *tmpl) 700{ 701 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 702 int i; 703 u32 *key; 704 u32 cfg; 705 706 cfg = CESA_SA_DESC_CFG_CRYPTM_AES; 707 708 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC) 709 key = ctx->aes.key_dec; 710 else 711 key = ctx->aes.key_enc; 712 713 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++) 714 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]); 715 716 if (ctx->aes.key_length == 24) 717 cfg |= CESA_SA_DESC_CFG_AES_LEN_192; 718 else if (ctx->aes.key_length == 32) 719 cfg |= CESA_SA_DESC_CFG_AES_LEN_256; 720 721 mv_cesa_update_op_cfg(tmpl, cfg, 722 CESA_SA_DESC_CFG_CRYPTM_MSK | 723 CESA_SA_DESC_CFG_AES_LEN_MSK); 724 725 return mv_cesa_ablkcipher_queue_req(req, tmpl); 726} 727 728static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req) 729{ 730 struct mv_cesa_op_ctx tmpl; 731 732 mv_cesa_set_op_cfg(&tmpl, 733 CESA_SA_DESC_CFG_CRYPTCM_ECB | 734 CESA_SA_DESC_CFG_DIR_ENC); 735 736 return mv_cesa_aes_op(req, &tmpl); 737} 738 739static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req) 740{ 741 struct mv_cesa_op_ctx tmpl; 742 743 mv_cesa_set_op_cfg(&tmpl, 744 CESA_SA_DESC_CFG_CRYPTCM_ECB | 745 CESA_SA_DESC_CFG_DIR_DEC); 746 747 return mv_cesa_aes_op(req, &tmpl); 748} 749 750struct crypto_alg mv_cesa_ecb_aes_alg = { 751 .cra_name = "ecb(aes)", 752 .cra_driver_name = "mv-ecb-aes", 753 .cra_priority = 300, 754 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 755 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 756 .cra_blocksize = AES_BLOCK_SIZE, 757 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 758 .cra_alignmask = 0, 759 .cra_type = &crypto_ablkcipher_type, 760 .cra_module = THIS_MODULE, 761 .cra_init = mv_cesa_ablkcipher_cra_init, 762 .cra_u = { 763 .ablkcipher = { 764 .min_keysize = AES_MIN_KEY_SIZE, 765 .max_keysize = AES_MAX_KEY_SIZE, 766 .setkey = mv_cesa_aes_setkey, 767 .encrypt = mv_cesa_ecb_aes_encrypt, 768 .decrypt = mv_cesa_ecb_aes_decrypt, 769 }, 770 }, 771}; 772 773static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req, 774 struct mv_cesa_op_ctx *tmpl) 775{ 776 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC, 777 CESA_SA_DESC_CFG_CRYPTCM_MSK); 778 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE); 779 780 return mv_cesa_aes_op(req, tmpl); 781} 782 783static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req) 784{ 785 struct mv_cesa_op_ctx tmpl; 786 787 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC); 788 789 return mv_cesa_cbc_aes_op(req, &tmpl); 790} 791 792static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req) 793{ 794 struct mv_cesa_op_ctx tmpl; 795 796 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC); 797 798 return mv_cesa_cbc_aes_op(req, &tmpl); 799} 800 801struct crypto_alg mv_cesa_cbc_aes_alg = { 802 .cra_name = "cbc(aes)", 803 .cra_driver_name = "mv-cbc-aes", 804 .cra_priority = 300, 805 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 806 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, 807 .cra_blocksize = AES_BLOCK_SIZE, 808 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx), 809 .cra_alignmask = 0, 810 .cra_type = &crypto_ablkcipher_type, 811 .cra_module = THIS_MODULE, 812 .cra_init = mv_cesa_ablkcipher_cra_init, 813 .cra_u = { 814 .ablkcipher = { 815 .min_keysize = AES_MIN_KEY_SIZE, 816 .max_keysize = AES_MAX_KEY_SIZE, 817 .ivsize = AES_BLOCK_SIZE, 818 .setkey = mv_cesa_aes_setkey, 819 .encrypt = mv_cesa_cbc_aes_encrypt, 820 .decrypt = mv_cesa_cbc_aes_decrypt, 821 }, 822 }, 823};