Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc5 606 lines 14 kB view raw
1/* 2 * Support for Marvell's crypto engine which can be found on some Orion5X 3 * boards. 4 * 5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > 6 * License: GPLv2 7 * 8 */ 9#include <crypto/aes.h> 10#include <crypto/algapi.h> 11#include <linux/crypto.h> 12#include <linux/interrupt.h> 13#include <linux/io.h> 14#include <linux/kthread.h> 15#include <linux/platform_device.h> 16#include <linux/scatterlist.h> 17 18#include "mv_cesa.h" 19/* 20 * STM: 21 * /---------------------------------------\ 22 * | | request complete 23 * \./ | 24 * IDLE -> new request -> BUSY -> done -> DEQUEUE 25 * /°\ | 26 * | | more scatter entries 27 * \________________/ 28 */ 29enum engine_status { 30 ENGINE_IDLE, 31 ENGINE_BUSY, 32 ENGINE_W_DEQUEUE, 33}; 34 35/** 36 * struct req_progress - used for every crypt request 37 * @src_sg_it: sg iterator for src 38 * @dst_sg_it: sg iterator for dst 39 * @sg_src_left: bytes left in src to process (scatter list) 40 * @src_start: offset to add to src start position (scatter list) 41 * @crypt_len: length of current crypt process 42 * @sg_dst_left: bytes left dst to process in this scatter list 43 * @dst_start: offset to add to dst start position (scatter list) 44 * @total_req_bytes: total number of bytes processed (request). 45 * 46 * sg helper are used to iterate over the scatterlist. Since the size of the 47 * SRAM may be less than the scatter size, this struct struct is used to keep 48 * track of progress within current scatterlist. 49 */ 50struct req_progress { 51 struct sg_mapping_iter src_sg_it; 52 struct sg_mapping_iter dst_sg_it; 53 54 /* src mostly */ 55 int sg_src_left; 56 int src_start; 57 int crypt_len; 58 /* dst mostly */ 59 int sg_dst_left; 60 int dst_start; 61 int total_req_bytes; 62}; 63 64struct crypto_priv { 65 void __iomem *reg; 66 void __iomem *sram; 67 int irq; 68 struct task_struct *queue_th; 69 70 /* the lock protects queue and eng_st */ 71 spinlock_t lock; 72 struct crypto_queue queue; 73 enum engine_status eng_st; 74 struct ablkcipher_request *cur_req; 75 struct req_progress p; 76 int max_req_size; 77 int sram_size; 78}; 79 80static struct crypto_priv *cpg; 81 82struct mv_ctx { 83 u8 aes_enc_key[AES_KEY_LEN]; 84 u32 aes_dec_key[8]; 85 int key_len; 86 u32 need_calc_aes_dkey; 87}; 88 89enum crypto_op { 90 COP_AES_ECB, 91 COP_AES_CBC, 92}; 93 94struct mv_req_ctx { 95 enum crypto_op op; 96 int decrypt; 97}; 98 99static void compute_aes_dec_key(struct mv_ctx *ctx) 100{ 101 struct crypto_aes_ctx gen_aes_key; 102 int key_pos; 103 104 if (!ctx->need_calc_aes_dkey) 105 return; 106 107 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); 108 109 key_pos = ctx->key_len + 24; 110 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); 111 switch (ctx->key_len) { 112 case AES_KEYSIZE_256: 113 key_pos -= 2; 114 /* fall */ 115 case AES_KEYSIZE_192: 116 key_pos -= 2; 117 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], 118 4 * 4); 119 break; 120 } 121 ctx->need_calc_aes_dkey = 0; 122} 123 124static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, 125 unsigned int len) 126{ 127 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 128 struct mv_ctx *ctx = crypto_tfm_ctx(tfm); 129 130 switch (len) { 131 case AES_KEYSIZE_128: 132 case AES_KEYSIZE_192: 133 case AES_KEYSIZE_256: 134 break; 135 default: 136 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 137 return -EINVAL; 138 } 139 ctx->key_len = len; 140 ctx->need_calc_aes_dkey = 1; 141 142 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); 143 return 0; 144} 145 146static void setup_data_in(struct ablkcipher_request *req) 147{ 148 int ret; 149 void *buf; 150 151 if (!cpg->p.sg_src_left) { 152 ret = sg_miter_next(&cpg->p.src_sg_it); 153 BUG_ON(!ret); 154 cpg->p.sg_src_left = cpg->p.src_sg_it.length; 155 cpg->p.src_start = 0; 156 } 157 158 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size); 159 160 buf = cpg->p.src_sg_it.addr; 161 buf += cpg->p.src_start; 162 163 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len); 164 165 cpg->p.sg_src_left -= cpg->p.crypt_len; 166 cpg->p.src_start += cpg->p.crypt_len; 167} 168 169static void mv_process_current_q(int first_block) 170{ 171 struct ablkcipher_request *req = cpg->cur_req; 172 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 173 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 174 struct sec_accel_config op; 175 176 switch (req_ctx->op) { 177 case COP_AES_ECB: 178 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; 179 break; 180 case COP_AES_CBC: 181 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; 182 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | 183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); 184 if (first_block) 185 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); 186 break; 187 } 188 if (req_ctx->decrypt) { 189 op.config |= CFG_DIR_DEC; 190 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, 191 AES_KEY_LEN); 192 } else { 193 op.config |= CFG_DIR_ENC; 194 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, 195 AES_KEY_LEN); 196 } 197 198 switch (ctx->key_len) { 199 case AES_KEYSIZE_128: 200 op.config |= CFG_AES_LEN_128; 201 break; 202 case AES_KEYSIZE_192: 203 op.config |= CFG_AES_LEN_192; 204 break; 205 case AES_KEYSIZE_256: 206 op.config |= CFG_AES_LEN_256; 207 break; 208 } 209 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | 210 ENC_P_DST(SRAM_DATA_OUT_START); 211 op.enc_key_p = SRAM_DATA_KEY_P; 212 213 setup_data_in(req); 214 op.enc_len = cpg->p.crypt_len; 215 memcpy(cpg->sram + SRAM_CONFIG, &op, 216 sizeof(struct sec_accel_config)); 217 218 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); 219 /* GO */ 220 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); 221 222 /* 223 * XXX: add timer if the interrupt does not occur for some mystery 224 * reason 225 */ 226} 227 228static void mv_crypto_algo_completion(void) 229{ 230 struct ablkcipher_request *req = cpg->cur_req; 231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 232 233 if (req_ctx->op != COP_AES_CBC) 234 return ; 235 236 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); 237} 238 239static void dequeue_complete_req(void) 240{ 241 struct ablkcipher_request *req = cpg->cur_req; 242 void *buf; 243 int ret; 244 245 cpg->p.total_req_bytes += cpg->p.crypt_len; 246 do { 247 int dst_copy; 248 249 if (!cpg->p.sg_dst_left) { 250 ret = sg_miter_next(&cpg->p.dst_sg_it); 251 BUG_ON(!ret); 252 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; 253 cpg->p.dst_start = 0; 254 } 255 256 buf = cpg->p.dst_sg_it.addr; 257 buf += cpg->p.dst_start; 258 259 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left); 260 261 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy); 262 263 cpg->p.sg_dst_left -= dst_copy; 264 cpg->p.crypt_len -= dst_copy; 265 cpg->p.dst_start += dst_copy; 266 } while (cpg->p.crypt_len > 0); 267 268 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); 269 if (cpg->p.total_req_bytes < req->nbytes) { 270 /* process next scatter list entry */ 271 cpg->eng_st = ENGINE_BUSY; 272 mv_process_current_q(0); 273 } else { 274 sg_miter_stop(&cpg->p.src_sg_it); 275 sg_miter_stop(&cpg->p.dst_sg_it); 276 mv_crypto_algo_completion(); 277 cpg->eng_st = ENGINE_IDLE; 278 req->base.complete(&req->base, 0); 279 } 280} 281 282static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) 283{ 284 int i = 0; 285 286 do { 287 total_bytes -= sl[i].length; 288 i++; 289 290 } while (total_bytes > 0); 291 292 return i; 293} 294 295static void mv_enqueue_new_req(struct ablkcipher_request *req) 296{ 297 int num_sgs; 298 299 cpg->cur_req = req; 300 memset(&cpg->p, 0, sizeof(struct req_progress)); 301 302 num_sgs = count_sgs(req->src, req->nbytes); 303 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); 304 305 num_sgs = count_sgs(req->dst, req->nbytes); 306 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); 307 mv_process_current_q(1); 308} 309 310static int queue_manag(void *data) 311{ 312 cpg->eng_st = ENGINE_IDLE; 313 do { 314 struct ablkcipher_request *req; 315 struct crypto_async_request *async_req = NULL; 316 struct crypto_async_request *backlog; 317 318 __set_current_state(TASK_INTERRUPTIBLE); 319 320 if (cpg->eng_st == ENGINE_W_DEQUEUE) 321 dequeue_complete_req(); 322 323 spin_lock_irq(&cpg->lock); 324 if (cpg->eng_st == ENGINE_IDLE) { 325 backlog = crypto_get_backlog(&cpg->queue); 326 async_req = crypto_dequeue_request(&cpg->queue); 327 if (async_req) { 328 BUG_ON(cpg->eng_st != ENGINE_IDLE); 329 cpg->eng_st = ENGINE_BUSY; 330 } 331 } 332 spin_unlock_irq(&cpg->lock); 333 334 if (backlog) { 335 backlog->complete(backlog, -EINPROGRESS); 336 backlog = NULL; 337 } 338 339 if (async_req) { 340 req = container_of(async_req, 341 struct ablkcipher_request, base); 342 mv_enqueue_new_req(req); 343 async_req = NULL; 344 } 345 346 schedule(); 347 348 } while (!kthread_should_stop()); 349 return 0; 350} 351 352static int mv_handle_req(struct ablkcipher_request *req) 353{ 354 unsigned long flags; 355 int ret; 356 357 spin_lock_irqsave(&cpg->lock, flags); 358 ret = ablkcipher_enqueue_request(&cpg->queue, req); 359 spin_unlock_irqrestore(&cpg->lock, flags); 360 wake_up_process(cpg->queue_th); 361 return ret; 362} 363 364static int mv_enc_aes_ecb(struct ablkcipher_request *req) 365{ 366 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 367 368 req_ctx->op = COP_AES_ECB; 369 req_ctx->decrypt = 0; 370 371 return mv_handle_req(req); 372} 373 374static int mv_dec_aes_ecb(struct ablkcipher_request *req) 375{ 376 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 377 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 378 379 req_ctx->op = COP_AES_ECB; 380 req_ctx->decrypt = 1; 381 382 compute_aes_dec_key(ctx); 383 return mv_handle_req(req); 384} 385 386static int mv_enc_aes_cbc(struct ablkcipher_request *req) 387{ 388 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 389 390 req_ctx->op = COP_AES_CBC; 391 req_ctx->decrypt = 0; 392 393 return mv_handle_req(req); 394} 395 396static int mv_dec_aes_cbc(struct ablkcipher_request *req) 397{ 398 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 399 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); 400 401 req_ctx->op = COP_AES_CBC; 402 req_ctx->decrypt = 1; 403 404 compute_aes_dec_key(ctx); 405 return mv_handle_req(req); 406} 407 408static int mv_cra_init(struct crypto_tfm *tfm) 409{ 410 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); 411 return 0; 412} 413 414irqreturn_t crypto_int(int irq, void *priv) 415{ 416 u32 val; 417 418 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); 419 if (!(val & SEC_INT_ACCEL0_DONE)) 420 return IRQ_NONE; 421 422 val &= ~SEC_INT_ACCEL0_DONE; 423 writel(val, cpg->reg + FPGA_INT_STATUS); 424 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); 425 BUG_ON(cpg->eng_st != ENGINE_BUSY); 426 cpg->eng_st = ENGINE_W_DEQUEUE; 427 wake_up_process(cpg->queue_th); 428 return IRQ_HANDLED; 429} 430 431struct crypto_alg mv_aes_alg_ecb = { 432 .cra_name = "ecb(aes)", 433 .cra_driver_name = "mv-ecb-aes", 434 .cra_priority = 300, 435 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 436 .cra_blocksize = 16, 437 .cra_ctxsize = sizeof(struct mv_ctx), 438 .cra_alignmask = 0, 439 .cra_type = &crypto_ablkcipher_type, 440 .cra_module = THIS_MODULE, 441 .cra_init = mv_cra_init, 442 .cra_u = { 443 .ablkcipher = { 444 .min_keysize = AES_MIN_KEY_SIZE, 445 .max_keysize = AES_MAX_KEY_SIZE, 446 .setkey = mv_setkey_aes, 447 .encrypt = mv_enc_aes_ecb, 448 .decrypt = mv_dec_aes_ecb, 449 }, 450 }, 451}; 452 453struct crypto_alg mv_aes_alg_cbc = { 454 .cra_name = "cbc(aes)", 455 .cra_driver_name = "mv-cbc-aes", 456 .cra_priority = 300, 457 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 458 .cra_blocksize = AES_BLOCK_SIZE, 459 .cra_ctxsize = sizeof(struct mv_ctx), 460 .cra_alignmask = 0, 461 .cra_type = &crypto_ablkcipher_type, 462 .cra_module = THIS_MODULE, 463 .cra_init = mv_cra_init, 464 .cra_u = { 465 .ablkcipher = { 466 .ivsize = AES_BLOCK_SIZE, 467 .min_keysize = AES_MIN_KEY_SIZE, 468 .max_keysize = AES_MAX_KEY_SIZE, 469 .setkey = mv_setkey_aes, 470 .encrypt = mv_enc_aes_cbc, 471 .decrypt = mv_dec_aes_cbc, 472 }, 473 }, 474}; 475 476static int mv_probe(struct platform_device *pdev) 477{ 478 struct crypto_priv *cp; 479 struct resource *res; 480 int irq; 481 int ret; 482 483 if (cpg) { 484 printk(KERN_ERR "Second crypto dev?\n"); 485 return -EEXIST; 486 } 487 488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 489 if (!res) 490 return -ENXIO; 491 492 cp = kzalloc(sizeof(*cp), GFP_KERNEL); 493 if (!cp) 494 return -ENOMEM; 495 496 spin_lock_init(&cp->lock); 497 crypto_init_queue(&cp->queue, 50); 498 cp->reg = ioremap(res->start, res->end - res->start + 1); 499 if (!cp->reg) { 500 ret = -ENOMEM; 501 goto err; 502 } 503 504 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); 505 if (!res) { 506 ret = -ENXIO; 507 goto err_unmap_reg; 508 } 509 cp->sram_size = res->end - res->start + 1; 510 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; 511 cp->sram = ioremap(res->start, cp->sram_size); 512 if (!cp->sram) { 513 ret = -ENOMEM; 514 goto err_unmap_reg; 515 } 516 517 irq = platform_get_irq(pdev, 0); 518 if (irq < 0 || irq == NO_IRQ) { 519 ret = irq; 520 goto err_unmap_sram; 521 } 522 cp->irq = irq; 523 524 platform_set_drvdata(pdev, cp); 525 cpg = cp; 526 527 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); 528 if (IS_ERR(cp->queue_th)) { 529 ret = PTR_ERR(cp->queue_th); 530 goto err_thread; 531 } 532 533 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), 534 cp); 535 if (ret) 536 goto err_unmap_sram; 537 538 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 539 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 540 541 ret = crypto_register_alg(&mv_aes_alg_ecb); 542 if (ret) 543 goto err_reg; 544 545 ret = crypto_register_alg(&mv_aes_alg_cbc); 546 if (ret) 547 goto err_unreg_ecb; 548 return 0; 549err_unreg_ecb: 550 crypto_unregister_alg(&mv_aes_alg_ecb); 551err_thread: 552 free_irq(irq, cp); 553err_reg: 554 kthread_stop(cp->queue_th); 555err_unmap_sram: 556 iounmap(cp->sram); 557err_unmap_reg: 558 iounmap(cp->reg); 559err: 560 kfree(cp); 561 cpg = NULL; 562 platform_set_drvdata(pdev, NULL); 563 return ret; 564} 565 566static int mv_remove(struct platform_device *pdev) 567{ 568 struct crypto_priv *cp = platform_get_drvdata(pdev); 569 570 crypto_unregister_alg(&mv_aes_alg_ecb); 571 crypto_unregister_alg(&mv_aes_alg_cbc); 572 kthread_stop(cp->queue_th); 573 free_irq(cp->irq, cp); 574 memset(cp->sram, 0, cp->sram_size); 575 iounmap(cp->sram); 576 iounmap(cp->reg); 577 kfree(cp); 578 cpg = NULL; 579 return 0; 580} 581 582static struct platform_driver marvell_crypto = { 583 .probe = mv_probe, 584 .remove = mv_remove, 585 .driver = { 586 .owner = THIS_MODULE, 587 .name = "mv_crypto", 588 }, 589}; 590MODULE_ALIAS("platform:mv_crypto"); 591 592static int __init mv_crypto_init(void) 593{ 594 return platform_driver_register(&marvell_crypto); 595} 596module_init(mv_crypto_init); 597 598static void __exit mv_crypto_exit(void) 599{ 600 platform_driver_unregister(&marvell_crypto); 601} 602module_exit(mv_crypto_exit); 603 604MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); 605MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); 606MODULE_LICENSE("GPL");