Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
crypto: Makefile - replace the use of <module>-objs with <module>-y
crypto: hifn_795x - use cancel_delayed_work_sync()
crypto: talitos - sparse check endian fixes
crypto: talitos - fix checkpatch warning
crypto: talitos - fix warning: 'alg' may be used uninitialized in this function
crypto: cryptd - Adding the AEAD interface type support to cryptd
crypto: n2_crypto - Niagara2 driver needs to depend upon CRYPTO_DES
crypto: Kconfig - update broken web addresses
crypto: omap-sham - Adjust DMA parameters
crypto: fips - FIPS requires algorithm self-tests
crypto: omap-aes - OMAP2/3 AES hw accelerator driver
crypto: updates to enable omap aes
padata: add missing __percpu markup in include/linux/padata.h
MAINTAINERS: Add maintainer entries for padata/pcrypt

+1310 -38
+16
MAINTAINERS
··· 4453 4453 S: Maintained 4454 4454 F: drivers/i2c/busses/i2c-pasemi.c 4455 4455 4456 + PADATA PARALLEL EXECUTION MECHANISM 4457 + M: Steffen Klassert <steffen.klassert@secunet.com> 4458 + L: linux-kernel@vger.kernel.org 4459 + L: linux-crypto@vger.kernel.org 4460 + S: Maintained 4461 + F: kernel/padata.c 4462 + F: include/linux/padata.h 4463 + F: Documentation/padata.txt 4464 + 4456 4465 PANASONIC LAPTOP ACPI EXTRAS DRIVER 4457 4466 M: Harald Welte <laforge@gnumonks.org> 4458 4467 L: platform-driver-x86@vger.kernel.org ··· 4588 4579 L: netdev@vger.kernel.org 4589 4580 S: Maintained 4590 4581 F: drivers/net/pcnet32.c 4582 + 4583 + PCRYPT PARALLEL CRYPTO ENGINE 4584 + M: Steffen Klassert <steffen.klassert@secunet.com> 4585 + L: linux-crypto@vger.kernel.org 4586 + S: Maintained 4587 + F: crypto/pcrypt.c 4588 + F: include/crypto/pcrypt.h 4591 4589 4592 4590 PER-TASK DELAY ACCOUNTING 4593 4591 M: Balbir Singh <balbir@linux.vnet.ibm.com>
+1 -1
arch/arm/mach-omap2/clock2420_data.c
··· 1838 1838 CLK(NULL, "des_ick", &des_ick, CK_242X), 1839 1839 CLK("omap-sham", "ick", &sha_ick, CK_242X), 1840 1840 CLK("omap_rng", "ick", &rng_ick, CK_242X), 1841 - CLK(NULL, "aes_ick", &aes_ick, CK_242X), 1841 + CLK("omap-aes", "ick", &aes_ick, CK_242X), 1842 1842 CLK(NULL, "pka_ick", &pka_ick, CK_242X), 1843 1843 CLK(NULL, "usb_fck", &usb_fck, CK_242X), 1844 1844 CLK("musb_hdrc", "fck", &osc_ck, CK_242X),
+1 -1
arch/arm/mach-omap2/clock2430_data.c
··· 1926 1926 CLK(NULL, "des_ick", &des_ick, CK_243X), 1927 1927 CLK("omap-sham", "ick", &sha_ick, CK_243X), 1928 1928 CLK("omap_rng", "ick", &rng_ick, CK_243X), 1929 - CLK(NULL, "aes_ick", &aes_ick, CK_243X), 1929 + CLK("omap-aes", "ick", &aes_ick, CK_243X), 1930 1930 CLK(NULL, "pka_ick", &pka_ick, CK_243X), 1931 1931 CLK(NULL, "usb_fck", &usb_fck, CK_243X), 1932 1932 CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X),
+1 -1
arch/arm/mach-omap2/clock3xxx_data.c
··· 3288 3288 CLK(NULL, "usbtll_ick", &usbtll_ick, CK_3430ES2 | CK_AM35XX), 3289 3289 CLK("mmci-omap-hs.2", "ick", &mmchs3_ick, CK_3430ES2 | CK_AM35XX), 3290 3290 CLK(NULL, "icr_ick", &icr_ick, CK_343X), 3291 - CLK(NULL, "aes2_ick", &aes2_ick, CK_343X), 3291 + CLK("omap-aes", "ick", &aes2_ick, CK_343X), 3292 3292 CLK("omap-sham", "ick", &sha12_ick, CK_343X), 3293 3293 CLK(NULL, "des2_ick", &des2_ick, CK_343X), 3294 3294 CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_3XXX),
+71
arch/arm/mach-omap2/devices.c
··· 498 498 static inline void omap_init_sham(void) { } 499 499 #endif 500 500 501 + #if defined(CONFIG_CRYPTO_DEV_OMAP_AES) || defined(CONFIG_CRYPTO_DEV_OMAP_AES_MODULE) 502 + 503 + #ifdef CONFIG_ARCH_OMAP24XX 504 + static struct resource omap2_aes_resources[] = { 505 + { 506 + .start = OMAP24XX_SEC_AES_BASE, 507 + .end = OMAP24XX_SEC_AES_BASE + 0x4C, 508 + .flags = IORESOURCE_MEM, 509 + }, 510 + { 511 + .start = OMAP24XX_DMA_AES_TX, 512 + .flags = IORESOURCE_DMA, 513 + }, 514 + { 515 + .start = OMAP24XX_DMA_AES_RX, 516 + .flags = IORESOURCE_DMA, 517 + } 518 + }; 519 + static int omap2_aes_resources_sz = ARRAY_SIZE(omap2_aes_resources); 520 + #else 521 + #define omap2_aes_resources NULL 522 + #define omap2_aes_resources_sz 0 523 + #endif 524 + 525 + #ifdef CONFIG_ARCH_OMAP34XX 526 + static struct resource omap3_aes_resources[] = { 527 + { 528 + .start = OMAP34XX_SEC_AES_BASE, 529 + .end = OMAP34XX_SEC_AES_BASE + 0x4C, 530 + .flags = IORESOURCE_MEM, 531 + }, 532 + { 533 + .start = OMAP34XX_DMA_AES2_TX, 534 + .flags = IORESOURCE_DMA, 535 + }, 536 + { 537 + .start = OMAP34XX_DMA_AES2_RX, 538 + .flags = IORESOURCE_DMA, 539 + } 540 + }; 541 + static int omap3_aes_resources_sz = ARRAY_SIZE(omap3_aes_resources); 542 + #else 543 + #define omap3_aes_resources NULL 544 + #define omap3_aes_resources_sz 0 545 + #endif 546 + 547 + static struct platform_device aes_device = { 548 + .name = "omap-aes", 549 + .id = -1, 550 + }; 551 + 552 + static void omap_init_aes(void) 553 + { 554 + if (cpu_is_omap24xx()) { 555 + aes_device.resource = omap2_aes_resources; 556 + aes_device.num_resources = omap2_aes_resources_sz; 557 + } else if (cpu_is_omap34xx()) { 558 + aes_device.resource = omap3_aes_resources; 559 + aes_device.num_resources = omap3_aes_resources_sz; 560 + } else { 561 + pr_err("%s: platform not supported\n", __func__); 562 + return; 563 + } 564 + platform_device_register(&aes_device); 565 + } 566 + 567 + #else 568 + static inline void omap_init_aes(void) { } 569 + #endif 570 + 501 571 /*-------------------------------------------------------------------------*/ 502 572 503 573 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) ··· 924 854 omap_hdq_init(); 925 855 omap_init_sti(); 926 856 omap_init_sham(); 857 + omap_init_aes(); 927 858 omap_init_vout(); 928 859 929 860 return 0;
+10 -11
crypto/Kconfig
··· 23 23 24 24 config CRYPTO_FIPS 25 25 bool "FIPS 200 compliance" 26 - depends on CRYPTO_ANSI_CPRNG 26 + depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS 27 27 help 28 28 This options enables the fips boot option which is 29 29 required if you want to system to operate in a FIPS 200 30 30 certification. You should say no unless you know what 31 - this is. Note that CRYPTO_ANSI_CPRNG is required if this 32 - option is selected 31 + this is. 33 32 34 33 config CRYPTO_ALGAPI 35 34 tristate ··· 364 365 RIPEMD-160 should be used. 365 366 366 367 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 367 - See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 368 + See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> 368 369 369 370 config CRYPTO_RMD160 370 371 tristate "RIPEMD-160 digest algorithm" ··· 381 382 against RIPEMD-160. 382 383 383 384 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 384 - See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 385 + See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> 385 386 386 387 config CRYPTO_RMD256 387 388 tristate "RIPEMD-256 digest algorithm" ··· 393 394 (than RIPEMD-128). 394 395 395 396 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 396 - See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 397 + See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> 397 398 398 399 config CRYPTO_RMD320 399 400 tristate "RIPEMD-320 digest algorithm" ··· 405 406 (than RIPEMD-160). 406 407 407 408 Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel. 408 - See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html> 409 + See <http://homes.esat.kuleuven.be/~bosselae/ripemd160.html> 409 410 410 411 config CRYPTO_SHA1 411 412 tristate "SHA1 digest algorithm" ··· 460 461 Whirlpool will be part of the ISO/IEC 10118-3:2003(E) standard 461 462 462 463 See also: 463 - <http://planeta.terra.com.br/informatica/paulobarreto/WhirlpoolPage.html> 464 + <http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html> 464 465 465 466 config CRYPTO_GHASH_CLMUL_NI_INTEL 466 467 tristate "GHASH digest algorithm (CLMUL-NI accelerated)" ··· 578 579 in the NESSIE competition. 579 580 580 581 See also: 581 - <https://www.cosic.esat.kuleuven.ac.be/nessie/reports/> 582 - <http://planeta.terra.com.br/informatica/paulobarreto/AnubisPage.html> 582 + <https://www.cosic.esat.kuleuven.be/nessie/reports/> 583 + <http://www.larc.usp.br/~pbarreto/AnubisPage.html> 583 584 584 585 config CRYPTO_ARC4 585 586 tristate "ARC4 cipher algorithm" ··· 658 659 on 32-bit processors. Khazad uses an 128 bit key size. 659 660 660 661 See also: 661 - <http://planeta.terra.com.br/informatica/paulobarreto/KhazadPage.html> 662 + <http://www.larc.usp.br/~pbarreto/KhazadPage.html> 662 663 663 664 config CRYPTO_SALSA20 664 665 tristate "Salsa20 stream cipher algorithm (EXPERIMENTAL)"
+203 -3
crypto/cryptd.c
··· 3 3 * 4 4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> 5 5 * 6 + * Added AEAD support to cryptd. 7 + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 8 + * Adrian Hoban <adrian.hoban@intel.com> 9 + * Gabriele Paoloni <gabriele.paoloni@intel.com> 10 + * Aidan O'Mahony (aidan.o.mahony@intel.com) 11 + * Copyright (c) 2010, Intel Corporation. 12 + * 6 13 * This program is free software; you can redistribute it and/or modify it 7 14 * under the terms of the GNU General Public License as published by the Free 8 15 * Software Foundation; either version 2 of the License, or (at your option) ··· 19 12 20 13 #include <crypto/algapi.h> 21 14 #include <crypto/internal/hash.h> 15 + #include <crypto/internal/aead.h> 22 16 #include <crypto/cryptd.h> 23 17 #include <crypto/crypto_wq.h> 24 18 #include <linux/err.h> ··· 52 44 struct cryptd_queue *queue; 53 45 }; 54 46 47 + struct aead_instance_ctx { 48 + struct crypto_aead_spawn aead_spawn; 49 + struct cryptd_queue *queue; 50 + }; 51 + 55 52 struct cryptd_blkcipher_ctx { 56 53 struct crypto_blkcipher *child; 57 54 }; ··· 72 59 struct cryptd_hash_request_ctx { 73 60 crypto_completion_t complete; 74 61 struct shash_desc desc; 62 + }; 63 + 64 + struct cryptd_aead_ctx { 65 + struct crypto_aead *child; 66 + }; 67 + 68 + struct cryptd_aead_request_ctx { 69 + crypto_completion_t complete; 75 70 }; 76 71 77 72 static void cryptd_queue_worker(struct work_struct *work); ··· 622 601 return err; 623 602 } 624 603 604 + static void cryptd_aead_crypt(struct aead_request *req, 605 + struct crypto_aead *child, 606 + int err, 607 + int (*crypt)(struct aead_request *req)) 608 + { 609 + struct cryptd_aead_request_ctx *rctx; 610 + rctx = aead_request_ctx(req); 611 + 612 + if (unlikely(err == -EINPROGRESS)) 613 + goto out; 614 + aead_request_set_tfm(req, child); 615 + err = crypt( req ); 616 + req->base.complete = rctx->complete; 617 + out: 618 + local_bh_disable(); 619 + rctx->complete(&req->base, err); 620 + local_bh_enable(); 621 + } 622 + 623 + static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) 624 + { 625 + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 626 + struct crypto_aead *child = ctx->child; 627 + struct aead_request *req; 628 + 629 + req = container_of(areq, struct aead_request, base); 630 + cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); 631 + } 632 + 633 + static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) 634 + { 635 + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); 636 + struct crypto_aead *child = ctx->child; 637 + struct aead_request *req; 638 + 639 + req = container_of(areq, struct aead_request, base); 640 + cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); 641 + } 642 + 643 + static int cryptd_aead_enqueue(struct aead_request *req, 644 + crypto_completion_t complete) 645 + { 646 + struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); 647 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 648 + struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); 649 + 650 + rctx->complete = req->base.complete; 651 + req->base.complete = complete; 652 + return cryptd_enqueue_request(queue, &req->base); 653 + } 654 + 655 + static int cryptd_aead_encrypt_enqueue(struct aead_request *req) 656 + { 657 + return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); 658 + } 659 + 660 + static int cryptd_aead_decrypt_enqueue(struct aead_request *req) 661 + { 662 + return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); 663 + } 664 + 665 + static int cryptd_aead_init_tfm(struct crypto_tfm *tfm) 666 + { 667 + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 668 + struct aead_instance_ctx *ictx = crypto_instance_ctx(inst); 669 + struct crypto_aead_spawn *spawn = &ictx->aead_spawn; 670 + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); 671 + struct crypto_aead *cipher; 672 + 673 + cipher = crypto_spawn_aead(spawn); 674 + if (IS_ERR(cipher)) 675 + return PTR_ERR(cipher); 676 + 677 + crypto_aead_set_flags(cipher, CRYPTO_TFM_REQ_MAY_SLEEP); 678 + ctx->child = cipher; 679 + tfm->crt_aead.reqsize = sizeof(struct cryptd_aead_request_ctx); 680 + return 0; 681 + } 682 + 683 + static void cryptd_aead_exit_tfm(struct crypto_tfm *tfm) 684 + { 685 + struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(tfm); 686 + crypto_free_aead(ctx->child); 687 + } 688 + 689 + static int cryptd_create_aead(struct crypto_template *tmpl, 690 + struct rtattr **tb, 691 + struct cryptd_queue *queue) 692 + { 693 + struct aead_instance_ctx *ctx; 694 + struct crypto_instance *inst; 695 + struct crypto_alg *alg; 696 + int err; 697 + 698 + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_AEAD, 699 + CRYPTO_ALG_TYPE_MASK); 700 + if (IS_ERR(alg)) 701 + return PTR_ERR(alg); 702 + 703 + inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); 704 + err = PTR_ERR(inst); 705 + if (IS_ERR(inst)) 706 + goto out_put_alg; 707 + 708 + ctx = crypto_instance_ctx(inst); 709 + ctx->queue = queue; 710 + 711 + err = crypto_init_spawn(&ctx->aead_spawn.base, alg, inst, 712 + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 713 + if (err) 714 + goto out_free_inst; 715 + 716 + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 717 + inst->alg.cra_type = alg->cra_type; 718 + inst->alg.cra_ctxsize = sizeof(struct cryptd_aead_ctx); 719 + inst->alg.cra_init = cryptd_aead_init_tfm; 720 + inst->alg.cra_exit = cryptd_aead_exit_tfm; 721 + inst->alg.cra_aead.setkey = alg->cra_aead.setkey; 722 + inst->alg.cra_aead.setauthsize = alg->cra_aead.setauthsize; 723 + inst->alg.cra_aead.geniv = alg->cra_aead.geniv; 724 + inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; 725 + inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; 726 + inst->alg.cra_aead.encrypt = cryptd_aead_encrypt_enqueue; 727 + inst->alg.cra_aead.decrypt = cryptd_aead_decrypt_enqueue; 728 + inst->alg.cra_aead.givencrypt = alg->cra_aead.givencrypt; 729 + inst->alg.cra_aead.givdecrypt = alg->cra_aead.givdecrypt; 730 + 731 + err = crypto_register_instance(tmpl, inst); 732 + if (err) { 733 + crypto_drop_spawn(&ctx->aead_spawn.base); 734 + out_free_inst: 735 + kfree(inst); 736 + } 737 + out_put_alg: 738 + crypto_mod_put(alg); 739 + return err; 740 + } 741 + 625 742 static struct cryptd_queue queue; 626 743 627 744 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) ··· 775 616 return cryptd_create_blkcipher(tmpl, tb, &queue); 776 617 case CRYPTO_ALG_TYPE_DIGEST: 777 618 return cryptd_create_hash(tmpl, tb, &queue); 619 + case CRYPTO_ALG_TYPE_AEAD: 620 + return cryptd_create_aead(tmpl, tb, &queue); 778 621 } 779 622 780 623 return -EINVAL; ··· 786 625 { 787 626 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 788 627 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); 628 + struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); 789 629 790 630 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { 791 631 case CRYPTO_ALG_TYPE_AHASH: 792 632 crypto_drop_shash(&hctx->spawn); 793 633 kfree(ahash_instance(inst)); 794 634 return; 635 + case CRYPTO_ALG_TYPE_AEAD: 636 + crypto_drop_spawn(&aead_ctx->aead_spawn.base); 637 + kfree(inst); 638 + return; 639 + default: 640 + crypto_drop_spawn(&ctx->spawn); 641 + kfree(inst); 795 642 } 796 - 797 - crypto_drop_spawn(&ctx->spawn); 798 - kfree(inst); 799 643 } 800 644 801 645 static struct crypto_template cryptd_tmpl = { ··· 889 723 crypto_free_ahash(&tfm->base); 890 724 } 891 725 EXPORT_SYMBOL_GPL(cryptd_free_ahash); 726 + 727 + struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 728 + u32 type, u32 mask) 729 + { 730 + char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; 731 + struct crypto_aead *tfm; 732 + 733 + if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, 734 + "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) 735 + return ERR_PTR(-EINVAL); 736 + tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); 737 + if (IS_ERR(tfm)) 738 + return ERR_CAST(tfm); 739 + if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { 740 + crypto_free_aead(tfm); 741 + return ERR_PTR(-EINVAL); 742 + } 743 + return __cryptd_aead_cast(tfm); 744 + } 745 + EXPORT_SYMBOL_GPL(cryptd_alloc_aead); 746 + 747 + struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) 748 + { 749 + struct cryptd_aead_ctx *ctx; 750 + ctx = crypto_aead_ctx(&tfm->base); 751 + return ctx->child; 752 + } 753 + EXPORT_SYMBOL_GPL(cryptd_aead_child); 754 + 755 + void cryptd_free_aead(struct cryptd_aead *tfm) 756 + { 757 + crypto_free_aead(&tfm->base); 758 + } 759 + EXPORT_SYMBOL_GPL(cryptd_free_aead); 892 760 893 761 static int __init cryptd_init(void) 894 762 {
+9
drivers/crypto/Kconfig
··· 172 172 173 173 config CRYPTO_DEV_NIAGARA2 174 174 tristate "Niagara2 Stream Processing Unit driver" 175 + select CRYPTO_DES 175 176 select CRYPTO_ALGAPI 176 177 depends on SPARC64 177 178 help ··· 243 242 help 244 243 OMAP processors have SHA1/MD5 hw accelerator. Select this if you 245 244 want to use the OMAP module for SHA1/MD5 algorithms. 245 + 246 + config CRYPTO_DEV_OMAP_AES 247 + tristate "Support for OMAP AES hw engine" 248 + depends on ARCH_OMAP2 || ARCH_OMAP3 249 + select CRYPTO_AES 250 + help 251 + OMAP processors have AES module accelerator. Select this if you 252 + want to use the OMAP module for AES algorithms. 246 253 247 254 endif # CRYPTO_HW
+2 -1
drivers/crypto/Makefile
··· 2 2 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 3 3 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 4 4 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o 5 - n2_crypto-objs := n2_core.o n2_asm.o 5 + n2_crypto-y := n2_core.o n2_asm.o 6 6 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 7 7 obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o 8 8 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 9 9 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 10 10 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 11 11 obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o 12 + obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o 12 13
+1 -1
drivers/crypto/amcc/Makefile
··· 1 1 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o 2 - crypto4xx-objs := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o 2 + crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+1 -2
drivers/crypto/hifn_795x.c
··· 2700 2700 dev = pci_get_drvdata(pdev); 2701 2701 2702 2702 if (dev) { 2703 - cancel_delayed_work(&dev->work); 2704 - flush_scheduled_work(); 2703 + cancel_delayed_work_sync(&dev->work); 2705 2704 2706 2705 hifn_unregister_rng(dev); 2707 2706 hifn_unregister_alg(dev);
+948
drivers/crypto/omap-aes.c
··· 1 + /* 2 + * Cryptographic API. 3 + * 4 + * Support for OMAP AES HW acceleration. 5 + * 6 + * Copyright (c) 2010 Nokia Corporation 7 + * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as published 11 + * by the Free Software Foundation. 12 + * 13 + */ 14 + 15 + #define pr_fmt(fmt) "%s: " fmt, __func__ 16 + 17 + #include <linux/err.h> 18 + #include <linux/module.h> 19 + #include <linux/init.h> 20 + #include <linux/errno.h> 21 + #include <linux/kernel.h> 22 + #include <linux/clk.h> 23 + #include <linux/platform_device.h> 24 + #include <linux/scatterlist.h> 25 + #include <linux/dma-mapping.h> 26 + #include <linux/io.h> 27 + #include <linux/crypto.h> 28 + #include <linux/interrupt.h> 29 + #include <crypto/scatterwalk.h> 30 + #include <crypto/aes.h> 31 + 32 + #include <plat/cpu.h> 33 + #include <plat/dma.h> 34 + 35 + /* OMAP TRM gives bitfields as start:end, where start is the higher bit 36 + number. For example 7:0 */ 37 + #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) 38 + #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) 39 + 40 + #define AES_REG_KEY(x) (0x1C - ((x ^ 0x01) * 0x04)) 41 + #define AES_REG_IV(x) (0x20 + ((x) * 0x04)) 42 + 43 + #define AES_REG_CTRL 0x30 44 + #define AES_REG_CTRL_CTR_WIDTH (1 << 7) 45 + #define AES_REG_CTRL_CTR (1 << 6) 46 + #define AES_REG_CTRL_CBC (1 << 5) 47 + #define AES_REG_CTRL_KEY_SIZE (3 << 3) 48 + #define AES_REG_CTRL_DIRECTION (1 << 2) 49 + #define AES_REG_CTRL_INPUT_READY (1 << 1) 50 + #define AES_REG_CTRL_OUTPUT_READY (1 << 0) 51 + 52 + #define AES_REG_DATA 0x34 53 + #define AES_REG_DATA_N(x) (0x34 + ((x) * 0x04)) 54 + 55 + #define AES_REG_REV 0x44 56 + #define AES_REG_REV_MAJOR 0xF0 57 + #define AES_REG_REV_MINOR 0x0F 58 + 59 + #define AES_REG_MASK 0x48 60 + #define AES_REG_MASK_SIDLE (1 << 6) 61 + #define AES_REG_MASK_START (1 << 5) 62 + #define AES_REG_MASK_DMA_OUT_EN (1 << 3) 63 + #define AES_REG_MASK_DMA_IN_EN (1 << 2) 64 + #define AES_REG_MASK_SOFTRESET (1 << 1) 65 + #define AES_REG_AUTOIDLE (1 << 0) 66 + 67 + #define AES_REG_SYSSTATUS 0x4C 68 + #define AES_REG_SYSSTATUS_RESETDONE (1 << 0) 69 + 70 + #define DEFAULT_TIMEOUT (5*HZ) 71 + 72 + #define FLAGS_MODE_MASK 0x000f 73 + #define FLAGS_ENCRYPT BIT(0) 74 + #define FLAGS_CBC BIT(1) 75 + #define FLAGS_GIV BIT(2) 76 + 77 + #define FLAGS_NEW_KEY BIT(4) 78 + #define FLAGS_NEW_IV BIT(5) 79 + #define FLAGS_INIT BIT(6) 80 + #define FLAGS_FAST BIT(7) 81 + #define FLAGS_BUSY 8 82 + 83 + struct omap_aes_ctx { 84 + struct omap_aes_dev *dd; 85 + 86 + int keylen; 87 + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 88 + unsigned long flags; 89 + }; 90 + 91 + struct omap_aes_reqctx { 92 + unsigned long mode; 93 + }; 94 + 95 + #define OMAP_AES_QUEUE_LENGTH 1 96 + #define OMAP_AES_CACHE_SIZE 0 97 + 98 + struct omap_aes_dev { 99 + struct list_head list; 100 + unsigned long phys_base; 101 + void __iomem *io_base; 102 + struct clk *iclk; 103 + struct omap_aes_ctx *ctx; 104 + struct device *dev; 105 + unsigned long flags; 106 + 107 + u32 *iv; 108 + u32 ctrl; 109 + 110 + spinlock_t lock; 111 + struct crypto_queue queue; 112 + 113 + struct tasklet_struct task; 114 + 115 + struct ablkcipher_request *req; 116 + size_t total; 117 + struct scatterlist *in_sg; 118 + size_t in_offset; 119 + struct scatterlist *out_sg; 120 + size_t out_offset; 121 + 122 + size_t buflen; 123 + void *buf_in; 124 + size_t dma_size; 125 + int dma_in; 126 + int dma_lch_in; 127 + dma_addr_t dma_addr_in; 128 + void *buf_out; 129 + int dma_out; 130 + int dma_lch_out; 131 + dma_addr_t dma_addr_out; 132 + }; 133 + 134 + /* keep registered devices data here */ 135 + static LIST_HEAD(dev_list); 136 + static DEFINE_SPINLOCK(list_lock); 137 + 138 + static inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset) 139 + { 140 + return __raw_readl(dd->io_base + offset); 141 + } 142 + 143 + static inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset, 144 + u32 value) 145 + { 146 + __raw_writel(value, dd->io_base + offset); 147 + } 148 + 149 + static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset, 150 + u32 value, u32 mask) 151 + { 152 + u32 val; 153 + 154 + val = omap_aes_read(dd, offset); 155 + val &= ~mask; 156 + val |= value; 157 + omap_aes_write(dd, offset, val); 158 + } 159 + 160 + static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset, 161 + u32 *value, int count) 162 + { 163 + for (; count--; value++, offset += 4) 164 + omap_aes_write(dd, offset, *value); 165 + } 166 + 167 + static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) 168 + { 169 + unsigned long timeout = jiffies + DEFAULT_TIMEOUT; 170 + 171 + while (!(omap_aes_read(dd, offset) & bit)) { 172 + if (time_is_before_jiffies(timeout)) { 173 + dev_err(dd->dev, "omap-aes timeout\n"); 174 + return -ETIMEDOUT; 175 + } 176 + } 177 + return 0; 178 + } 179 + 180 + static int omap_aes_hw_init(struct omap_aes_dev *dd) 181 + { 182 + int err = 0; 183 + 184 + clk_enable(dd->iclk); 185 + if (!(dd->flags & FLAGS_INIT)) { 186 + /* is it necessary to reset before every operation? */ 187 + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, 188 + AES_REG_MASK_SOFTRESET); 189 + /* 190 + * prevent OCP bus error (SRESP) in case an access to the module 191 + * is performed while the module is coming out of soft reset 192 + */ 193 + __asm__ __volatile__("nop"); 194 + __asm__ __volatile__("nop"); 195 + 196 + err = omap_aes_wait(dd, AES_REG_SYSSTATUS, 197 + AES_REG_SYSSTATUS_RESETDONE); 198 + if (!err) 199 + dd->flags |= FLAGS_INIT; 200 + } 201 + 202 + return err; 203 + } 204 + 205 + static void omap_aes_hw_cleanup(struct omap_aes_dev *dd) 206 + { 207 + clk_disable(dd->iclk); 208 + } 209 + 210 + static void omap_aes_write_ctrl(struct omap_aes_dev *dd) 211 + { 212 + unsigned int key32; 213 + int i; 214 + u32 val, mask; 215 + 216 + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); 217 + if (dd->flags & FLAGS_CBC) 218 + val |= AES_REG_CTRL_CBC; 219 + if (dd->flags & FLAGS_ENCRYPT) 220 + val |= AES_REG_CTRL_DIRECTION; 221 + 222 + if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && 223 + !(dd->ctx->flags & FLAGS_NEW_KEY)) 224 + goto out; 225 + 226 + /* only need to write control registers for new settings */ 227 + 228 + dd->ctrl = val; 229 + 230 + val = 0; 231 + if (dd->dma_lch_out >= 0) 232 + val |= AES_REG_MASK_DMA_OUT_EN; 233 + if (dd->dma_lch_in >= 0) 234 + val |= AES_REG_MASK_DMA_IN_EN; 235 + 236 + mask = AES_REG_MASK_DMA_IN_EN | AES_REG_MASK_DMA_OUT_EN; 237 + 238 + omap_aes_write_mask(dd, AES_REG_MASK, val, mask); 239 + 240 + pr_debug("Set key\n"); 241 + key32 = dd->ctx->keylen / sizeof(u32); 242 + /* set a key */ 243 + for (i = 0; i < key32; i++) { 244 + omap_aes_write(dd, AES_REG_KEY(i), 245 + __le32_to_cpu(dd->ctx->key[i])); 246 + } 247 + dd->ctx->flags &= ~FLAGS_NEW_KEY; 248 + 249 + if (dd->flags & FLAGS_NEW_IV) { 250 + pr_debug("Set IV\n"); 251 + omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4); 252 + dd->flags &= ~FLAGS_NEW_IV; 253 + } 254 + 255 + mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | 256 + AES_REG_CTRL_KEY_SIZE; 257 + 258 + omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask); 259 + 260 + out: 261 + /* start DMA or disable idle mode */ 262 + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, 263 + AES_REG_MASK_START); 264 + } 265 + 266 + static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) 267 + { 268 + struct omap_aes_dev *dd = NULL, *tmp; 269 + 270 + spin_lock_bh(&list_lock); 271 + if (!ctx->dd) { 272 + list_for_each_entry(tmp, &dev_list, list) { 273 + /* FIXME: take fist available aes core */ 274 + dd = tmp; 275 + break; 276 + } 277 + ctx->dd = dd; 278 + } else { 279 + /* already found before */ 280 + dd = ctx->dd; 281 + } 282 + spin_unlock_bh(&list_lock); 283 + 284 + return dd; 285 + } 286 + 287 + static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) 288 + { 289 + struct omap_aes_dev *dd = data; 290 + 291 + if (lch == dd->dma_lch_out) 292 + tasklet_schedule(&dd->task); 293 + } 294 + 295 + static int omap_aes_dma_init(struct omap_aes_dev *dd) 296 + { 297 + int err = -ENOMEM; 298 + 299 + dd->dma_lch_out = -1; 300 + dd->dma_lch_in = -1; 301 + 302 + dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); 303 + dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, OMAP_AES_CACHE_SIZE); 304 + dd->buflen = PAGE_SIZE << OMAP_AES_CACHE_SIZE; 305 + dd->buflen &= ~(AES_BLOCK_SIZE - 1); 306 + 307 + if (!dd->buf_in || !dd->buf_out) { 308 + dev_err(dd->dev, "unable to alloc pages.\n"); 309 + goto err_alloc; 310 + } 311 + 312 + /* MAP here */ 313 + dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in, dd->buflen, 314 + DMA_TO_DEVICE); 315 + if (dma_mapping_error(dd->dev, dd->dma_addr_in)) { 316 + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); 317 + err = -EINVAL; 318 + goto err_map_in; 319 + } 320 + 321 + dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out, dd->buflen, 322 + DMA_FROM_DEVICE); 323 + if (dma_mapping_error(dd->dev, dd->dma_addr_out)) { 324 + dev_err(dd->dev, "dma %d bytes error\n", dd->buflen); 325 + err = -EINVAL; 326 + goto err_map_out; 327 + } 328 + 329 + err = omap_request_dma(dd->dma_in, "omap-aes-rx", 330 + omap_aes_dma_callback, dd, &dd->dma_lch_in); 331 + if (err) { 332 + dev_err(dd->dev, "Unable to request DMA channel\n"); 333 + goto err_dma_in; 334 + } 335 + err = omap_request_dma(dd->dma_out, "omap-aes-tx", 336 + omap_aes_dma_callback, dd, &dd->dma_lch_out); 337 + if (err) { 338 + dev_err(dd->dev, "Unable to request DMA channel\n"); 339 + goto err_dma_out; 340 + } 341 + 342 + omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, 343 + dd->phys_base + AES_REG_DATA, 0, 4); 344 + 345 + omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); 346 + omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); 347 + 348 + omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, 349 + dd->phys_base + AES_REG_DATA, 0, 4); 350 + 351 + omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); 352 + omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); 353 + 354 + return 0; 355 + 356 + err_dma_out: 357 + omap_free_dma(dd->dma_lch_in); 358 + err_dma_in: 359 + dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, 360 + DMA_FROM_DEVICE); 361 + err_map_out: 362 + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); 363 + err_map_in: 364 + free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); 365 + free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); 366 + err_alloc: 367 + if (err) 368 + pr_err("error: %d\n", err); 369 + return err; 370 + } 371 + 372 + static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) 373 + { 374 + omap_free_dma(dd->dma_lch_out); 375 + omap_free_dma(dd->dma_lch_in); 376 + dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen, 377 + DMA_FROM_DEVICE); 378 + dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen, DMA_TO_DEVICE); 379 + free_pages((unsigned long)dd->buf_out, OMAP_AES_CACHE_SIZE); 380 + free_pages((unsigned long)dd->buf_in, OMAP_AES_CACHE_SIZE); 381 + } 382 + 383 + static void sg_copy_buf(void *buf, struct scatterlist *sg, 384 + unsigned int start, unsigned int nbytes, int out) 385 + { 386 + struct scatter_walk walk; 387 + 388 + if (!nbytes) 389 + return; 390 + 391 + scatterwalk_start(&walk, sg); 392 + scatterwalk_advance(&walk, start); 393 + scatterwalk_copychunks(buf, &walk, nbytes, out); 394 + scatterwalk_done(&walk, out, 0); 395 + } 396 + 397 + static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, 398 + size_t buflen, size_t total, int out) 399 + { 400 + unsigned int count, off = 0; 401 + 402 + while (buflen && total) { 403 + count = min((*sg)->length - *offset, total); 404 + count = min(count, buflen); 405 + 406 + if (!count) 407 + return off; 408 + 409 + sg_copy_buf(buf + off, *sg, *offset, count, out); 410 + 411 + off += count; 412 + buflen -= count; 413 + *offset += count; 414 + total -= count; 415 + 416 + if (*offset == (*sg)->length) { 417 + *sg = sg_next(*sg); 418 + if (*sg) 419 + *offset = 0; 420 + else 421 + total = 0; 422 + } 423 + } 424 + 425 + return off; 426 + } 427 + 428 + static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, 429 + dma_addr_t dma_addr_out, int length) 430 + { 431 + struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); 432 + struct omap_aes_dev *dd = ctx->dd; 433 + int len32; 434 + 435 + pr_debug("len: %d\n", length); 436 + 437 + dd->dma_size = length; 438 + 439 + if (!(dd->flags & FLAGS_FAST)) 440 + dma_sync_single_for_device(dd->dev, dma_addr_in, length, 441 + DMA_TO_DEVICE); 442 + 443 + len32 = DIV_ROUND_UP(length, sizeof(u32)); 444 + 445 + /* IN */ 446 + omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, 447 + len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, 448 + OMAP_DMA_DST_SYNC); 449 + 450 + omap_set_dma_src_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_POST_INC, 451 + dma_addr_in, 0, 0); 452 + 453 + /* OUT */ 454 + omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, 455 + len32, 1, OMAP_DMA_SYNC_PACKET, 456 + dd->dma_out, OMAP_DMA_SRC_SYNC); 457 + 458 + omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, 459 + dma_addr_out, 0, 0); 460 + 461 + omap_start_dma(dd->dma_lch_in); 462 + omap_start_dma(dd->dma_lch_out); 463 + 464 + omap_aes_write_ctrl(dd); 465 + 466 + return 0; 467 + } 468 + 469 + static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) 470 + { 471 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm( 472 + crypto_ablkcipher_reqtfm(dd->req)); 473 + int err, fast = 0, in, out; 474 + size_t count; 475 + dma_addr_t addr_in, addr_out; 476 + 477 + pr_debug("total: %d\n", dd->total); 478 + 479 + if (sg_is_last(dd->in_sg) && sg_is_last(dd->out_sg)) { 480 + /* check for alignment */ 481 + in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)); 482 + out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)); 483 + 484 + fast = in && out; 485 + } 486 + 487 + if (fast) { 488 + count = min(dd->total, sg_dma_len(dd->in_sg)); 489 + count = min(count, sg_dma_len(dd->out_sg)); 490 + 491 + if (count != dd->total) 492 + return -EINVAL; 493 + 494 + pr_debug("fast\n"); 495 + 496 + err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 497 + if (!err) { 498 + dev_err(dd->dev, "dma_map_sg() error\n"); 499 + return -EINVAL; 500 + } 501 + 502 + err = dma_map_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 503 + if (!err) { 504 + dev_err(dd->dev, "dma_map_sg() error\n"); 505 + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 506 + return -EINVAL; 507 + } 508 + 509 + addr_in = sg_dma_address(dd->in_sg); 510 + addr_out = sg_dma_address(dd->out_sg); 511 + 512 + dd->flags |= FLAGS_FAST; 513 + 514 + } else { 515 + /* use cache buffers */ 516 + count = sg_copy(&dd->in_sg, &dd->in_offset, dd->buf_in, 517 + dd->buflen, dd->total, 0); 518 + 519 + addr_in = dd->dma_addr_in; 520 + addr_out = dd->dma_addr_out; 521 + 522 + dd->flags &= ~FLAGS_FAST; 523 + 524 + } 525 + 526 + dd->total -= count; 527 + 528 + err = omap_aes_hw_init(dd); 529 + 530 + err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); 531 + 532 + return err; 533 + } 534 + 535 + static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) 536 + { 537 + struct omap_aes_ctx *ctx; 538 + 539 + pr_debug("err: %d\n", err); 540 + 541 + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); 542 + 543 + if (!dd->total) 544 + dd->req->base.complete(&dd->req->base, err); 545 + } 546 + 547 + static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) 548 + { 549 + int err = 0; 550 + size_t count; 551 + 552 + pr_debug("total: %d\n", dd->total); 553 + 554 + omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); 555 + 556 + omap_aes_hw_cleanup(dd); 557 + 558 + omap_stop_dma(dd->dma_lch_in); 559 + omap_stop_dma(dd->dma_lch_out); 560 + 561 + if (dd->flags & FLAGS_FAST) { 562 + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); 563 + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); 564 + } else { 565 + dma_sync_single_for_device(dd->dev, dd->dma_addr_out, 566 + dd->dma_size, DMA_FROM_DEVICE); 567 + 568 + /* copy data */ 569 + count = sg_copy(&dd->out_sg, &dd->out_offset, dd->buf_out, 570 + dd->buflen, dd->dma_size, 1); 571 + if (count != dd->dma_size) { 572 + err = -EINVAL; 573 + pr_err("not all data converted: %u\n", count); 574 + } 575 + } 576 + 577 + if (err || !dd->total) 578 + omap_aes_finish_req(dd, err); 579 + 580 + return err; 581 + } 582 + 583 + static int omap_aes_handle_req(struct omap_aes_dev *dd) 584 + { 585 + struct crypto_async_request *async_req, *backlog; 586 + struct omap_aes_ctx *ctx; 587 + struct omap_aes_reqctx *rctx; 588 + struct ablkcipher_request *req; 589 + unsigned long flags; 590 + 591 + if (dd->total) 592 + goto start; 593 + 594 + spin_lock_irqsave(&dd->lock, flags); 595 + backlog = crypto_get_backlog(&dd->queue); 596 + async_req = crypto_dequeue_request(&dd->queue); 597 + if (!async_req) 598 + clear_bit(FLAGS_BUSY, &dd->flags); 599 + spin_unlock_irqrestore(&dd->lock, flags); 600 + 601 + if (!async_req) 602 + return 0; 603 + 604 + if (backlog) 605 + backlog->complete(backlog, -EINPROGRESS); 606 + 607 + req = ablkcipher_request_cast(async_req); 608 + 609 + pr_debug("get new req\n"); 610 + 611 + /* assign new request to device */ 612 + dd->req = req; 613 + dd->total = req->nbytes; 614 + dd->in_offset = 0; 615 + dd->in_sg = req->src; 616 + dd->out_offset = 0; 617 + dd->out_sg = req->dst; 618 + 619 + rctx = ablkcipher_request_ctx(req); 620 + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 621 + rctx->mode &= FLAGS_MODE_MASK; 622 + dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; 623 + 624 + dd->iv = req->info; 625 + if ((dd->flags & FLAGS_CBC) && dd->iv) 626 + dd->flags |= FLAGS_NEW_IV; 627 + else 628 + dd->flags &= ~FLAGS_NEW_IV; 629 + 630 + ctx->dd = dd; 631 + if (dd->ctx != ctx) { 632 + /* assign new context to device */ 633 + dd->ctx = ctx; 634 + ctx->flags |= FLAGS_NEW_KEY; 635 + } 636 + 637 + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) 638 + pr_err("request size is not exact amount of AES blocks\n"); 639 + 640 + start: 641 + return omap_aes_crypt_dma_start(dd); 642 + } 643 + 644 + static void omap_aes_task(unsigned long data) 645 + { 646 + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; 647 + int err; 648 + 649 + pr_debug("enter\n"); 650 + 651 + err = omap_aes_crypt_dma_stop(dd); 652 + 653 + err = omap_aes_handle_req(dd); 654 + 655 + pr_debug("exit\n"); 656 + } 657 + 658 + static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) 659 + { 660 + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( 661 + crypto_ablkcipher_reqtfm(req)); 662 + struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); 663 + struct omap_aes_dev *dd; 664 + unsigned long flags; 665 + int err; 666 + 667 + pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, 668 + !!(mode & FLAGS_ENCRYPT), 669 + !!(mode & FLAGS_CBC)); 670 + 671 + dd = omap_aes_find_dev(ctx); 672 + if (!dd) 673 + return -ENODEV; 674 + 675 + rctx->mode = mode; 676 + 677 + spin_lock_irqsave(&dd->lock, flags); 678 + err = ablkcipher_enqueue_request(&dd->queue, req); 679 + spin_unlock_irqrestore(&dd->lock, flags); 680 + 681 + if (!test_and_set_bit(FLAGS_BUSY, &dd->flags)) 682 + omap_aes_handle_req(dd); 683 + 684 + pr_debug("exit\n"); 685 + 686 + return err; 687 + } 688 + 689 + /* ********************** ALG API ************************************ */ 690 + 691 + static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 692 + unsigned int keylen) 693 + { 694 + struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 695 + 696 + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 697 + keylen != AES_KEYSIZE_256) 698 + return -EINVAL; 699 + 700 + pr_debug("enter, keylen: %d\n", keylen); 701 + 702 + memcpy(ctx->key, key, keylen); 703 + ctx->keylen = keylen; 704 + ctx->flags |= FLAGS_NEW_KEY; 705 + 706 + return 0; 707 + } 708 + 709 + static int omap_aes_ecb_encrypt(struct ablkcipher_request *req) 710 + { 711 + return omap_aes_crypt(req, FLAGS_ENCRYPT); 712 + } 713 + 714 + static int omap_aes_ecb_decrypt(struct ablkcipher_request *req) 715 + { 716 + return omap_aes_crypt(req, 0); 717 + } 718 + 719 + static int omap_aes_cbc_encrypt(struct ablkcipher_request *req) 720 + { 721 + return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); 722 + } 723 + 724 + static int omap_aes_cbc_decrypt(struct ablkcipher_request *req) 725 + { 726 + return omap_aes_crypt(req, FLAGS_CBC); 727 + } 728 + 729 + static int omap_aes_cra_init(struct crypto_tfm *tfm) 730 + { 731 + pr_debug("enter\n"); 732 + 733 + tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx); 734 + 735 + return 0; 736 + } 737 + 738 + static void omap_aes_cra_exit(struct crypto_tfm *tfm) 739 + { 740 + pr_debug("enter\n"); 741 + } 742 + 743 + /* ********************** ALGS ************************************ */ 744 + 745 + static struct crypto_alg algs[] = { 746 + { 747 + .cra_name = "ecb(aes)", 748 + .cra_driver_name = "ecb-aes-omap", 749 + .cra_priority = 100, 750 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 751 + .cra_blocksize = AES_BLOCK_SIZE, 752 + .cra_ctxsize = sizeof(struct omap_aes_ctx), 753 + .cra_alignmask = 0, 754 + .cra_type = &crypto_ablkcipher_type, 755 + .cra_module = THIS_MODULE, 756 + .cra_init = omap_aes_cra_init, 757 + .cra_exit = omap_aes_cra_exit, 758 + .cra_u.ablkcipher = { 759 + .min_keysize = AES_MIN_KEY_SIZE, 760 + .max_keysize = AES_MAX_KEY_SIZE, 761 + .setkey = omap_aes_setkey, 762 + .encrypt = omap_aes_ecb_encrypt, 763 + .decrypt = omap_aes_ecb_decrypt, 764 + } 765 + }, 766 + { 767 + .cra_name = "cbc(aes)", 768 + .cra_driver_name = "cbc-aes-omap", 769 + .cra_priority = 100, 770 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 771 + .cra_blocksize = AES_BLOCK_SIZE, 772 + .cra_ctxsize = sizeof(struct omap_aes_ctx), 773 + .cra_alignmask = 0, 774 + .cra_type = &crypto_ablkcipher_type, 775 + .cra_module = THIS_MODULE, 776 + .cra_init = omap_aes_cra_init, 777 + .cra_exit = omap_aes_cra_exit, 778 + .cra_u.ablkcipher = { 779 + .min_keysize = AES_MIN_KEY_SIZE, 780 + .max_keysize = AES_MAX_KEY_SIZE, 781 + .ivsize = AES_BLOCK_SIZE, 782 + .setkey = omap_aes_setkey, 783 + .encrypt = omap_aes_cbc_encrypt, 784 + .decrypt = omap_aes_cbc_decrypt, 785 + } 786 + } 787 + }; 788 + 789 + static int omap_aes_probe(struct platform_device *pdev) 790 + { 791 + struct device *dev = &pdev->dev; 792 + struct omap_aes_dev *dd; 793 + struct resource *res; 794 + int err = -ENOMEM, i, j; 795 + u32 reg; 796 + 797 + dd = kzalloc(sizeof(struct omap_aes_dev), GFP_KERNEL); 798 + if (dd == NULL) { 799 + dev_err(dev, "unable to alloc data struct.\n"); 800 + goto err_data; 801 + } 802 + dd->dev = dev; 803 + platform_set_drvdata(pdev, dd); 804 + 805 + spin_lock_init(&dd->lock); 806 + crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH); 807 + 808 + /* Get the base address */ 809 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 810 + if (!res) { 811 + dev_err(dev, "invalid resource type\n"); 812 + err = -ENODEV; 813 + goto err_res; 814 + } 815 + dd->phys_base = res->start; 816 + 817 + /* Get the DMA */ 818 + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 819 + if (!res) 820 + dev_info(dev, "no DMA info\n"); 821 + else 822 + dd->dma_out = res->start; 823 + 824 + /* Get the DMA */ 825 + res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 826 + if (!res) 827 + dev_info(dev, "no DMA info\n"); 828 + else 829 + dd->dma_in = res->start; 830 + 831 + /* Initializing the clock */ 832 + dd->iclk = clk_get(dev, "ick"); 833 + if (!dd->iclk) { 834 + dev_err(dev, "clock intialization failed.\n"); 835 + err = -ENODEV; 836 + goto err_res; 837 + } 838 + 839 + dd->io_base = ioremap(dd->phys_base, SZ_4K); 840 + if (!dd->io_base) { 841 + dev_err(dev, "can't ioremap\n"); 842 + err = -ENOMEM; 843 + goto err_io; 844 + } 845 + 846 + clk_enable(dd->iclk); 847 + reg = omap_aes_read(dd, AES_REG_REV); 848 + dev_info(dev, "OMAP AES hw accel rev: %u.%u\n", 849 + (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); 850 + clk_disable(dd->iclk); 851 + 852 + tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); 853 + 854 + err = omap_aes_dma_init(dd); 855 + if (err) 856 + goto err_dma; 857 + 858 + INIT_LIST_HEAD(&dd->list); 859 + spin_lock(&list_lock); 860 + list_add_tail(&dd->list, &dev_list); 861 + spin_unlock(&list_lock); 862 + 863 + for (i = 0; i < ARRAY_SIZE(algs); i++) { 864 + pr_debug("i: %d\n", i); 865 + INIT_LIST_HEAD(&algs[i].cra_list); 866 + err = crypto_register_alg(&algs[i]); 867 + if (err) 868 + goto err_algs; 869 + } 870 + 871 + pr_info("probe() done\n"); 872 + 873 + return 0; 874 + err_algs: 875 + for (j = 0; j < i; j++) 876 + crypto_unregister_alg(&algs[j]); 877 + omap_aes_dma_cleanup(dd); 878 + err_dma: 879 + tasklet_kill(&dd->task); 880 + iounmap(dd->io_base); 881 + err_io: 882 + clk_put(dd->iclk); 883 + err_res: 884 + kfree(dd); 885 + dd = NULL; 886 + err_data: 887 + dev_err(dev, "initialization failed.\n"); 888 + return err; 889 + } 890 + 891 + static int omap_aes_remove(struct platform_device *pdev) 892 + { 893 + struct omap_aes_dev *dd = platform_get_drvdata(pdev); 894 + int i; 895 + 896 + if (!dd) 897 + return -ENODEV; 898 + 899 + spin_lock(&list_lock); 900 + list_del(&dd->list); 901 + spin_unlock(&list_lock); 902 + 903 + for (i = 0; i < ARRAY_SIZE(algs); i++) 904 + crypto_unregister_alg(&algs[i]); 905 + 906 + tasklet_kill(&dd->task); 907 + omap_aes_dma_cleanup(dd); 908 + iounmap(dd->io_base); 909 + clk_put(dd->iclk); 910 + kfree(dd); 911 + dd = NULL; 912 + 913 + return 0; 914 + } 915 + 916 + static struct platform_driver omap_aes_driver = { 917 + .probe = omap_aes_probe, 918 + .remove = omap_aes_remove, 919 + .driver = { 920 + .name = "omap-aes", 921 + .owner = THIS_MODULE, 922 + }, 923 + }; 924 + 925 + static int __init omap_aes_mod_init(void) 926 + { 927 + pr_info("loading %s driver\n", "omap-aes"); 928 + 929 + if (!cpu_class_is_omap2() || omap_type() != OMAP2_DEVICE_TYPE_SEC) { 930 + pr_err("Unsupported cpu\n"); 931 + return -ENODEV; 932 + } 933 + 934 + return platform_driver_register(&omap_aes_driver); 935 + } 936 + 937 + static void __exit omap_aes_mod_exit(void) 938 + { 939 + platform_driver_unregister(&omap_aes_driver); 940 + } 941 + 942 + module_init(omap_aes_mod_init); 943 + module_exit(omap_aes_mod_exit); 944 + 945 + MODULE_DESCRIPTION("OMAP AES hw acceleration support."); 946 + MODULE_LICENSE("GPL v2"); 947 + MODULE_AUTHOR("Dmitry Kasatkin"); 948 +
+5 -1
drivers/crypto/omap-sham.c
··· 311 311 len32 = DIV_ROUND_UP(length, sizeof(u32)); 312 312 313 313 omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32, 314 - 1, OMAP_DMA_SYNC_PACKET, dd->dma, OMAP_DMA_DST_SYNC); 314 + 1, OMAP_DMA_SYNC_PACKET, dd->dma, 315 + OMAP_DMA_DST_SYNC_PREFETCH); 315 316 316 317 omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, 317 318 dma_addr, 0, 0); ··· 1072 1071 1073 1072 omap_set_dma_dest_burst_mode(dd->dma_lch, 1074 1073 OMAP_DMA_DATA_BURST_16); 1074 + 1075 + omap_set_dma_src_burst_mode(dd->dma_lch, 1076 + OMAP_DMA_DATA_BURST_4); 1075 1077 1076 1078 return 0; 1077 1079 }
+15 -14
drivers/crypto/talitos.c
··· 161 161 static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr) 162 162 { 163 163 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 164 - talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr)); 164 + talitos_ptr->eptr = upper_32_bits(dma_addr); 165 165 } 166 166 167 167 /* ··· 332 332 333 333 /* GO! */ 334 334 wmb(); 335 - out_be32(priv->reg + TALITOS_FF(ch), 336 - cpu_to_be32(upper_32_bits(request->dma_desc))); 335 + out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc)); 337 336 out_be32(priv->reg + TALITOS_FF_LO(ch), 338 - cpu_to_be32(lower_32_bits(request->dma_desc))); 337 + lower_32_bits(request->dma_desc)); 339 338 340 339 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 341 340 ··· 1750 1751 ahash_init(areq); 1751 1752 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1752 1753 1753 - req_ctx->hw_context[0] = cpu_to_be32(SHA224_H0); 1754 - req_ctx->hw_context[1] = cpu_to_be32(SHA224_H1); 1755 - req_ctx->hw_context[2] = cpu_to_be32(SHA224_H2); 1756 - req_ctx->hw_context[3] = cpu_to_be32(SHA224_H3); 1757 - req_ctx->hw_context[4] = cpu_to_be32(SHA224_H4); 1758 - req_ctx->hw_context[5] = cpu_to_be32(SHA224_H5); 1759 - req_ctx->hw_context[6] = cpu_to_be32(SHA224_H6); 1760 - req_ctx->hw_context[7] = cpu_to_be32(SHA224_H7); 1754 + req_ctx->hw_context[0] = SHA224_H0; 1755 + req_ctx->hw_context[1] = SHA224_H1; 1756 + req_ctx->hw_context[2] = SHA224_H2; 1757 + req_ctx->hw_context[3] = SHA224_H3; 1758 + req_ctx->hw_context[4] = SHA224_H4; 1759 + req_ctx->hw_context[5] = SHA224_H5; 1760 + req_ctx->hw_context[6] = SHA224_H6; 1761 + req_ctx->hw_context[7] = SHA224_H7; 1761 1762 1762 1763 /* init 64-bit count */ 1763 1764 req_ctx->hw_context[8] = 0; ··· 2332 2333 talitos_unregister_rng(dev); 2333 2334 2334 2335 for (i = 0; i < priv->num_channels; i++) 2335 - if (priv->chan[i].fifo) 2336 - kfree(priv->chan[i].fifo); 2336 + kfree(priv->chan[i].fifo); 2337 2337 2338 2338 kfree(priv->chan); 2339 2339 ··· 2387 2389 DESC_HDR_MODE0_MDEU_SHA256; 2388 2390 } 2389 2391 break; 2392 + default: 2393 + dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 2394 + return ERR_PTR(-EINVAL); 2390 2395 } 2391 2396 2392 2397 alg->cra_module = THIS_MODULE;
+24
include/crypto/cryptd.h
··· 1 1 /* 2 2 * Software async crypto daemon 3 + * 4 + * Added AEAD support to cryptd. 5 + * Authors: Tadeusz Struk (tadeusz.struk@intel.com) 6 + * Adrian Hoban <adrian.hoban@intel.com> 7 + * Gabriele Paoloni <gabriele.paoloni@intel.com> 8 + * Aidan O'Mahony (aidan.o.mahony@intel.com) 9 + * Copyright (c) 2010, Intel Corporation. 3 10 */ 4 11 5 12 #ifndef _CRYPTO_CRYPT_H ··· 48 41 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); 49 42 struct shash_desc *cryptd_shash_desc(struct ahash_request *req); 50 43 void cryptd_free_ahash(struct cryptd_ahash *tfm); 44 + 45 + struct cryptd_aead { 46 + struct crypto_aead base; 47 + }; 48 + 49 + static inline struct cryptd_aead *__cryptd_aead_cast( 50 + struct crypto_aead *tfm) 51 + { 52 + return (struct cryptd_aead *)tfm; 53 + } 54 + 55 + struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, 56 + u32 type, u32 mask); 57 + 58 + struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); 59 + 60 + void cryptd_free_aead(struct cryptd_aead *tfm); 51 61 52 62 #endif
+2 -2
include/linux/padata.h
··· 127 127 */ 128 128 struct parallel_data { 129 129 struct padata_instance *pinst; 130 - struct padata_parallel_queue *pqueue; 131 - struct padata_serial_queue *squeue; 130 + struct padata_parallel_queue __percpu *pqueue; 131 + struct padata_serial_queue __percpu *squeue; 132 132 atomic_t seq_nr; 133 133 atomic_t reorder_objects; 134 134 atomic_t refcnt;