Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
- Made x86 ablk_helper generic for ARM
- Phase out chainiv in favour of eseqiv (affects IPsec)
- Fixed aes-cbc IV corruption on s390
- Added constant-time crypto_memneq which replaces memcmp
- Fixed aes-ctr in omap-aes
- Added OMAP3 ROM RNG support
- Add PRNG support for MSM SoC's
- Add and use Job Ring API in caam
- Misc fixes

[ NOTE! This pull request was sent within the merge window, but Herbert
has some questionable email sending setup that makes him public enemy
#1 as far as gmail is concerned. So most of his emails seem to be
trapped by gmail as spam, resulting in me not seeing them. - Linus ]

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (49 commits)
crypto: s390 - Fix aes-cbc IV corruption
crypto: omap-aes - Fix CTR mode counter length
crypto: omap-sham - Add missing modalias
padata: make the sequence counter an atomic_t
crypto: caam - Modify the interface layers to use JR API's
crypto: caam - Add API's to allocate/free Job Rings
crypto: caam - Add Platform driver for Job Ring
hwrng: msm - Add PRNG support for MSM SoC's
ARM: DT: msm: Add Qualcomm's PRNG driver binding document
crypto: skcipher - Use eseqiv even on UP machines
crypto: talitos - Simplify key parsing
crypto: picoxcell - Simplify and harden key parsing
crypto: ixp4xx - Simplify and harden key parsing
crypto: authencesn - Simplify key parsing
crypto: authenc - Export key parsing helper function
crypto: mv_cesa: remove deprecated IRQF_DISABLED
hwrng: OMAP3 ROM Random Number Generator support
crypto: sha256_ssse3 - also test for BMI2
crypto: mv_cesa - Remove redundant of_match_ptr
crypto: sahara - Remove redundant of_match_ptr
...

+1448 -645
+17
Documentation/devicetree/bindings/rng/qcom,prng.txt
··· 1 + Qualcomm MSM pseudo random number generator. 2 + 3 + Required properties: 4 + 5 + - compatible : should be "qcom,prng" 6 + - reg : specifies base physical address and size of the registers map 7 + - clocks : phandle to clock-controller plus clock-specifier pair 8 + - clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block 9 + 10 + Example: 11 + 12 + rng@f9bff000 { 13 + compatible = "qcom,prng"; 14 + reg = <0xf9bff000 0x200>; 15 + clocks = <&clock GCC_PRNG_AHB_CLK>; 16 + clock-names = "core"; 17 + };
-10
arch/arm/mach-tegra/fuse.c
··· 209 209 tegra_sku_id, tegra_cpu_process_id, 210 210 tegra_core_process_id); 211 211 } 212 - 213 - unsigned long long tegra_chip_uid(void) 214 - { 215 - unsigned long long lo, hi; 216 - 217 - lo = tegra_fuse_readl(FUSE_UID_LOW); 218 - hi = tegra_fuse_readl(FUSE_UID_HIGH); 219 - return (hi << 32ull) | lo; 220 - } 221 - EXPORT_SYMBOL(tegra_chip_uid);
+12 -7
arch/s390/crypto/aes_s390.c
··· 35 35 static char keylen_flag; 36 36 37 37 struct s390_aes_ctx { 38 - u8 iv[AES_BLOCK_SIZE]; 39 38 u8 key[AES_MAX_KEY_SIZE]; 40 39 long enc; 41 40 long dec; ··· 440 441 return aes_set_key(tfm, in_key, key_len); 441 442 } 442 443 443 - static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 444 + static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, 444 445 struct blkcipher_walk *walk) 445 446 { 447 + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 446 448 int ret = blkcipher_walk_virt(desc, walk); 447 449 unsigned int nbytes = walk->nbytes; 450 + struct { 451 + u8 iv[AES_BLOCK_SIZE]; 452 + u8 key[AES_MAX_KEY_SIZE]; 453 + } param; 448 454 449 455 if (!nbytes) 450 456 goto out; 451 457 452 - memcpy(param, walk->iv, AES_BLOCK_SIZE); 458 + memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); 459 + memcpy(param.key, sctx->key, sctx->key_len); 453 460 do { 454 461 /* only use complete blocks */ 455 462 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 456 463 u8 *out = walk->dst.virt.addr; 457 464 u8 *in = walk->src.virt.addr; 458 465 459 - ret = crypt_s390_kmc(func, param, out, in, n); 466 + ret = crypt_s390_kmc(func, &param, out, in, n); 460 467 if (ret < 0 || ret != n) 461 468 return -EIO; 462 469 463 470 nbytes &= AES_BLOCK_SIZE - 1; 464 471 ret = blkcipher_walk_done(desc, walk, nbytes); 465 472 } while ((nbytes = walk->nbytes)); 466 - memcpy(walk->iv, param, AES_BLOCK_SIZE); 473 + memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); 467 474 468 475 out: 469 476 return ret; ··· 486 481 return fallback_blk_enc(desc, dst, src, nbytes); 487 482 488 483 blkcipher_walk_init(&walk, dst, src, nbytes); 489 - return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 484 + return cbc_aes_crypt(desc, sctx->enc, &walk); 490 485 } 491 486 492 487 static int cbc_aes_decrypt(struct blkcipher_desc *desc, ··· 500 495 return fallback_blk_dec(desc, dst, src, nbytes); 501 496 502 497 blkcipher_walk_init(&walk, dst, src, nbytes); 503 - return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 498 + return cbc_aes_crypt(desc, sctx->dec, &walk); 504 499 } 505 500 506 501 static struct crypto_alg cbc_aes_alg = {
+2 -1
arch/x86/crypto/Makefile
··· 3 3 # 4 4 5 5 avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) 6 + avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ 7 + $(comma)4)$(comma)%ymm2,yes,no) 6 8 7 - obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o 8 9 obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o 9 10 10 11 obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
+7 -6
arch/x86/crypto/ablk_helper.c crypto/ablk_helper.c
··· 28 28 #include <linux/crypto.h> 29 29 #include <linux/init.h> 30 30 #include <linux/module.h> 31 + #include <linux/hardirq.h> 31 32 #include <crypto/algapi.h> 32 33 #include <crypto/cryptd.h> 33 - #include <asm/i387.h> 34 - #include <asm/crypto/ablk_helper.h> 34 + #include <crypto/ablk_helper.h> 35 + #include <asm/simd.h> 35 36 36 37 int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, 37 38 unsigned int key_len) ··· 71 70 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 72 71 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 73 72 74 - if (!irq_fpu_usable()) { 73 + if (!may_use_simd()) { 75 74 struct ablkcipher_request *cryptd_req = 76 75 ablkcipher_request_ctx(req); 77 76 78 - memcpy(cryptd_req, req, sizeof(*req)); 77 + *cryptd_req = *req; 79 78 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 80 79 81 80 return crypto_ablkcipher_encrypt(cryptd_req); ··· 90 89 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 91 90 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); 92 91 93 - if (!irq_fpu_usable()) { 92 + if (!may_use_simd()) { 94 93 struct ablkcipher_request *cryptd_req = 95 94 ablkcipher_request_ctx(req); 96 95 97 - memcpy(cryptd_req, req, sizeof(*req)); 96 + *cryptd_req = *req; 98 97 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); 99 98 100 99 return crypto_ablkcipher_decrypt(cryptd_req);
+1 -1
arch/x86/crypto/aesni-intel_glue.c
··· 34 34 #include <asm/cpu_device_id.h> 35 35 #include <asm/i387.h> 36 36 #include <asm/crypto/aes.h> 37 - #include <asm/crypto/ablk_helper.h> 37 + #include <crypto/ablk_helper.h> 38 38 #include <crypto/scatterwalk.h> 39 39 #include <crypto/internal/aead.h> 40 40 #include <linux/workqueue.h>
+1 -1
arch/x86/crypto/camellia_aesni_avx2_glue.c
··· 14 14 #include <linux/types.h> 15 15 #include <linux/crypto.h> 16 16 #include <linux/err.h> 17 + #include <crypto/ablk_helper.h> 17 18 #include <crypto/algapi.h> 18 19 #include <crypto/ctr.h> 19 20 #include <crypto/lrw.h> ··· 22 21 #include <asm/xcr.h> 23 22 #include <asm/xsave.h> 24 23 #include <asm/crypto/camellia.h> 25 - #include <asm/crypto/ablk_helper.h> 26 24 #include <asm/crypto/glue_helper.h> 27 25 28 26 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
+1 -1
arch/x86/crypto/camellia_aesni_avx_glue.c
··· 14 14 #include <linux/types.h> 15 15 #include <linux/crypto.h> 16 16 #include <linux/err.h> 17 + #include <crypto/ablk_helper.h> 17 18 #include <crypto/algapi.h> 18 19 #include <crypto/ctr.h> 19 20 #include <crypto/lrw.h> ··· 22 21 #include <asm/xcr.h> 23 22 #include <asm/xsave.h> 24 23 #include <asm/crypto/camellia.h> 25 - #include <asm/crypto/ablk_helper.h> 26 24 #include <asm/crypto/glue_helper.h> 27 25 28 26 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
+1 -1
arch/x86/crypto/cast5_avx_glue.c
··· 26 26 #include <linux/types.h> 27 27 #include <linux/crypto.h> 28 28 #include <linux/err.h> 29 + #include <crypto/ablk_helper.h> 29 30 #include <crypto/algapi.h> 30 31 #include <crypto/cast5.h> 31 32 #include <crypto/cryptd.h> 32 33 #include <crypto/ctr.h> 33 34 #include <asm/xcr.h> 34 35 #include <asm/xsave.h> 35 - #include <asm/crypto/ablk_helper.h> 36 36 #include <asm/crypto/glue_helper.h> 37 37 38 38 #define CAST5_PARALLEL_BLOCKS 16
+1 -1
arch/x86/crypto/cast6_avx_glue.c
··· 28 28 #include <linux/types.h> 29 29 #include <linux/crypto.h> 30 30 #include <linux/err.h> 31 + #include <crypto/ablk_helper.h> 31 32 #include <crypto/algapi.h> 32 33 #include <crypto/cast6.h> 33 34 #include <crypto/cryptd.h> ··· 38 37 #include <crypto/xts.h> 39 38 #include <asm/xcr.h> 40 39 #include <asm/xsave.h> 41 - #include <asm/crypto/ablk_helper.h> 42 40 #include <asm/crypto/glue_helper.h> 43 41 44 42 #define CAST6_PARALLEL_BLOCKS 8
+1 -1
arch/x86/crypto/serpent_avx2_glue.c
··· 14 14 #include <linux/types.h> 15 15 #include <linux/crypto.h> 16 16 #include <linux/err.h> 17 + #include <crypto/ablk_helper.h> 17 18 #include <crypto/algapi.h> 18 19 #include <crypto/ctr.h> 19 20 #include <crypto/lrw.h> ··· 23 22 #include <asm/xcr.h> 24 23 #include <asm/xsave.h> 25 24 #include <asm/crypto/serpent-avx.h> 26 - #include <asm/crypto/ablk_helper.h> 27 25 #include <asm/crypto/glue_helper.h> 28 26 29 27 #define SERPENT_AVX2_PARALLEL_BLOCKS 16
+1 -1
arch/x86/crypto/serpent_avx_glue.c
··· 28 28 #include <linux/types.h> 29 29 #include <linux/crypto.h> 30 30 #include <linux/err.h> 31 + #include <crypto/ablk_helper.h> 31 32 #include <crypto/algapi.h> 32 33 #include <crypto/serpent.h> 33 34 #include <crypto/cryptd.h> ··· 39 38 #include <asm/xcr.h> 40 39 #include <asm/xsave.h> 41 40 #include <asm/crypto/serpent-avx.h> 42 - #include <asm/crypto/ablk_helper.h> 43 41 #include <asm/crypto/glue_helper.h> 44 42 45 43 /* 8-way parallel cipher functions */
+1 -1
arch/x86/crypto/serpent_sse2_glue.c
··· 34 34 #include <linux/types.h> 35 35 #include <linux/crypto.h> 36 36 #include <linux/err.h> 37 + #include <crypto/ablk_helper.h> 37 38 #include <crypto/algapi.h> 38 39 #include <crypto/serpent.h> 39 40 #include <crypto/cryptd.h> ··· 43 42 #include <crypto/lrw.h> 44 43 #include <crypto/xts.h> 45 44 #include <asm/crypto/serpent-sse2.h> 46 - #include <asm/crypto/ablk_helper.h> 47 45 #include <asm/crypto/glue_helper.h> 48 46 49 47 static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
+2 -2
arch/x86/crypto/sha256_ssse3_glue.c
··· 281 281 /* allow AVX to override SSSE3, it's a little faster */ 282 282 if (avx_usable()) { 283 283 #ifdef CONFIG_AS_AVX2 284 - if (boot_cpu_has(X86_FEATURE_AVX2)) 284 + if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2)) 285 285 sha256_transform_asm = sha256_transform_rorx; 286 286 else 287 287 #endif ··· 319 319 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated"); 320 320 321 321 MODULE_ALIAS("sha256"); 322 - MODULE_ALIAS("sha384"); 322 + MODULE_ALIAS("sha224");
+1 -1
arch/x86/crypto/twofish_avx_glue.c
··· 28 28 #include <linux/types.h> 29 29 #include <linux/crypto.h> 30 30 #include <linux/err.h> 31 + #include <crypto/ablk_helper.h> 31 32 #include <crypto/algapi.h> 32 33 #include <crypto/twofish.h> 33 34 #include <crypto/cryptd.h> ··· 40 39 #include <asm/xcr.h> 41 40 #include <asm/xsave.h> 42 41 #include <asm/crypto/twofish.h> 43 - #include <asm/crypto/ablk_helper.h> 44 42 #include <asm/crypto/glue_helper.h> 45 43 #include <crypto/scatterwalk.h> 46 44 #include <linux/workqueue.h>
arch/x86/include/asm/crypto/ablk_helper.h include/crypto/ablk_helper.h
+11
arch/x86/include/asm/simd.h
··· 1 + 2 + #include <asm/i387.h> 3 + 4 + /* 5 + * may_use_simd - whether it is allowable at this time to issue SIMD 6 + * instructions or access the SIMD register file 7 + */ 8 + static __must_check inline bool may_use_simd(void) 9 + { 10 + return irq_fpu_usable(); 11 + }
+11 -12
crypto/Kconfig
··· 174 174 help 175 175 Quick & dirty crypto test module. 176 176 177 - config CRYPTO_ABLK_HELPER_X86 177 + config CRYPTO_ABLK_HELPER 178 178 tristate 179 - depends on X86 180 179 select CRYPTO_CRYPTD 181 180 182 181 config CRYPTO_GLUE_HELPER_X86 ··· 694 695 select CRYPTO_AES_X86_64 if 64BIT 695 696 select CRYPTO_AES_586 if !64BIT 696 697 select CRYPTO_CRYPTD 697 - select CRYPTO_ABLK_HELPER_X86 698 + select CRYPTO_ABLK_HELPER 698 699 select CRYPTO_ALGAPI 699 700 select CRYPTO_GLUE_HELPER_X86 if 64BIT 700 701 select CRYPTO_LRW ··· 894 895 depends on CRYPTO 895 896 select CRYPTO_ALGAPI 896 897 select CRYPTO_CRYPTD 897 - select CRYPTO_ABLK_HELPER_X86 898 + select CRYPTO_ABLK_HELPER 898 899 select CRYPTO_GLUE_HELPER_X86 899 900 select CRYPTO_CAMELLIA_X86_64 900 901 select CRYPTO_LRW ··· 916 917 depends on CRYPTO 917 918 select CRYPTO_ALGAPI 918 919 select CRYPTO_CRYPTD 919 - select CRYPTO_ABLK_HELPER_X86 920 + select CRYPTO_ABLK_HELPER 920 921 select CRYPTO_GLUE_HELPER_X86 921 922 select CRYPTO_CAMELLIA_X86_64 922 923 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 ··· 968 969 depends on X86 && 64BIT 969 970 select CRYPTO_ALGAPI 970 971 select CRYPTO_CRYPTD 971 - select CRYPTO_ABLK_HELPER_X86 972 + select CRYPTO_ABLK_HELPER 972 973 select CRYPTO_CAST_COMMON 973 974 select CRYPTO_CAST5 974 975 help ··· 991 992 depends on X86 && 64BIT 992 993 select CRYPTO_ALGAPI 993 994 select CRYPTO_CRYPTD 994 - select CRYPTO_ABLK_HELPER_X86 995 + select CRYPTO_ABLK_HELPER 995 996 select CRYPTO_GLUE_HELPER_X86 996 997 select CRYPTO_CAST_COMMON 997 998 select CRYPTO_CAST6 ··· 1109 1110 depends on X86 && 64BIT 1110 1111 select CRYPTO_ALGAPI 1111 1112 select CRYPTO_CRYPTD 1112 - select CRYPTO_ABLK_HELPER_X86 1113 + select CRYPTO_ABLK_HELPER 1113 1114 select CRYPTO_GLUE_HELPER_X86 1114 1115 select CRYPTO_SERPENT 1115 1116 select CRYPTO_LRW ··· 1131 1132 depends on X86 && !64BIT 1132 1133 select CRYPTO_ALGAPI 1133 1134 select CRYPTO_CRYPTD 1134 - select CRYPTO_ABLK_HELPER_X86 1135 + select CRYPTO_ABLK_HELPER 1135 1136 select CRYPTO_GLUE_HELPER_X86 1136 1137 select CRYPTO_SERPENT 1137 1138 select CRYPTO_LRW ··· 1153 1154 depends on X86 && 64BIT 1154 1155 select CRYPTO_ALGAPI 1155 1156 select CRYPTO_CRYPTD 1156 - select CRYPTO_ABLK_HELPER_X86 1157 + select CRYPTO_ABLK_HELPER 1157 1158 select CRYPTO_GLUE_HELPER_X86 1158 1159 select CRYPTO_SERPENT 1159 1160 select CRYPTO_LRW ··· 1175 1176 depends on X86 && 64BIT 1176 1177 select CRYPTO_ALGAPI 1177 1178 select CRYPTO_CRYPTD 1178 - select CRYPTO_ABLK_HELPER_X86 1179 + select CRYPTO_ABLK_HELPER 1179 1180 select CRYPTO_GLUE_HELPER_X86 1180 1181 select CRYPTO_SERPENT 1181 1182 select CRYPTO_SERPENT_AVX_X86_64 ··· 1291 1292 depends on X86 && 64BIT 1292 1293 select CRYPTO_ALGAPI 1293 1294 select CRYPTO_CRYPTD 1294 - select CRYPTO_ABLK_HELPER_X86 1295 + select CRYPTO_ABLK_HELPER 1295 1296 select CRYPTO_GLUE_HELPER_X86 1296 1297 select CRYPTO_TWOFISH_COMMON 1297 1298 select CRYPTO_TWOFISH_X86_64
+7 -1
crypto/Makefile
··· 2 2 # Cryptographic API 3 3 # 4 4 5 + # memneq MUST be built with -Os or -O0 to prevent early-return optimizations 6 + # that will defeat memneq's actual purpose to prevent timing attacks. 7 + CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3 8 + CFLAGS_memneq.o := -Os 9 + 5 10 obj-$(CONFIG_CRYPTO) += crypto.o 6 - crypto-y := api.o cipher.o compress.o 11 + crypto-y := api.o cipher.o compress.o memneq.o 7 12 8 13 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o 9 14 ··· 110 105 obj-$(CONFIG_ASYNC_CORE) += async_tx/ 111 106 obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ 112 107 obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o 108 + obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
+1 -20
crypto/ablkcipher.c
··· 16 16 #include <crypto/internal/skcipher.h> 17 17 #include <linux/cpumask.h> 18 18 #include <linux/err.h> 19 - #include <linux/init.h> 20 19 #include <linux/kernel.h> 21 - #include <linux/module.h> 22 20 #include <linux/rtnetlink.h> 23 21 #include <linux/sched.h> 24 22 #include <linux/slab.h> ··· 27 29 #include <crypto/scatterwalk.h> 28 30 29 31 #include "internal.h" 30 - 31 - static const char *skcipher_default_geniv __read_mostly; 32 32 33 33 struct ablkcipher_buffer { 34 34 struct list_head entry; ··· 523 527 alg->cra_blocksize) 524 528 return "chainiv"; 525 529 526 - return alg->cra_flags & CRYPTO_ALG_ASYNC ? 527 - "eseqiv" : skcipher_default_geniv; 530 + return "eseqiv"; 528 531 } 529 532 530 533 static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) ··· 704 709 return ERR_PTR(err); 705 710 } 706 711 EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); 707 - 708 - static int __init skcipher_module_init(void) 709 - { 710 - skcipher_default_geniv = num_possible_cpus() > 1 ? 711 - "eseqiv" : "chainiv"; 712 - return 0; 713 - } 714 - 715 - static void skcipher_module_exit(void) 716 - { 717 - } 718 - 719 - module_init(skcipher_module_init); 720 - module_exit(skcipher_module_exit);
+2 -2
crypto/ansi_cprng.c
··· 230 230 */ 231 231 if (byte_count < DEFAULT_BLK_SZ) { 232 232 empty_rbuf: 233 - for (; ctx->rand_data_valid < DEFAULT_BLK_SZ; 234 - ctx->rand_data_valid++) { 233 + while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { 235 234 *ptr = ctx->rand_data[ctx->rand_data_valid]; 236 235 ptr++; 237 236 byte_count--; 237 + ctx->rand_data_valid++; 238 238 if (byte_count == 0) 239 239 goto done; 240 240 }
+3 -2
crypto/asymmetric_keys/rsa.c
··· 13 13 #include <linux/module.h> 14 14 #include <linux/kernel.h> 15 15 #include <linux/slab.h> 16 + #include <crypto/algapi.h> 16 17 #include "public_key.h" 17 18 18 19 MODULE_LICENSE("GPL"); ··· 190 189 } 191 190 } 192 191 193 - if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) { 192 + if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) { 194 193 kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]"); 195 194 return -EBADMSG; 196 195 } 197 196 198 - if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) { 197 + if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) { 199 198 kleave(" = -EKEYREJECTED [EM[T] hash mismatch]"); 200 199 return -EKEYREJECTED; 201 200 }
+33 -21
crypto/authenc.c
··· 52 52 aead_request_complete(req, err); 53 53 } 54 54 55 - static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 56 - unsigned int keylen) 55 + int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, 56 + unsigned int keylen) 57 57 { 58 - unsigned int authkeylen; 59 - unsigned int enckeylen; 60 - struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 61 - struct crypto_ahash *auth = ctx->auth; 62 - struct crypto_ablkcipher *enc = ctx->enc; 63 - struct rtattr *rta = (void *)key; 58 + struct rtattr *rta = (struct rtattr *)key; 64 59 struct crypto_authenc_key_param *param; 65 - int err = -EINVAL; 66 60 67 61 if (!RTA_OK(rta, keylen)) 68 - goto badkey; 62 + return -EINVAL; 69 63 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 70 - goto badkey; 64 + return -EINVAL; 71 65 if (RTA_PAYLOAD(rta) < sizeof(*param)) 72 - goto badkey; 66 + return -EINVAL; 73 67 74 68 param = RTA_DATA(rta); 75 - enckeylen = be32_to_cpu(param->enckeylen); 69 + keys->enckeylen = be32_to_cpu(param->enckeylen); 76 70 77 71 key += RTA_ALIGN(rta->rta_len); 78 72 keylen -= RTA_ALIGN(rta->rta_len); 79 73 80 - if (keylen < enckeylen) 81 - goto badkey; 74 + if (keylen < keys->enckeylen) 75 + return -EINVAL; 82 76 83 - authkeylen = keylen - enckeylen; 77 + keys->authkeylen = keylen - keys->enckeylen; 78 + keys->authkey = key; 79 + keys->enckey = key + keys->authkeylen; 80 + 81 + return 0; 82 + } 83 + EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys); 84 + 85 + static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 86 + unsigned int keylen) 87 + { 88 + struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 89 + struct crypto_ahash *auth = ctx->auth; 90 + struct crypto_ablkcipher *enc = ctx->enc; 91 + struct crypto_authenc_keys keys; 92 + int err = -EINVAL; 93 + 94 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 95 + goto badkey; 84 96 85 97 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 86 98 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) & 87 99 CRYPTO_TFM_REQ_MASK); 88 - err = crypto_ahash_setkey(auth, key, authkeylen); 100 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); 89 101 crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) & 90 102 CRYPTO_TFM_RES_MASK); 91 103 ··· 107 95 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 108 96 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) & 109 97 CRYPTO_TFM_REQ_MASK); 110 - err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); 98 + err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); 111 99 crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) & 112 100 CRYPTO_TFM_RES_MASK); 113 101 ··· 200 188 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 201 189 authsize, 0); 202 190 203 - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 191 + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 204 192 if (err) 205 193 goto out; 206 194 ··· 239 227 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 240 228 authsize, 0); 241 229 242 - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 230 + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 243 231 if (err) 244 232 goto out; 245 233 ··· 474 462 ihash = ohash + authsize; 475 463 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 476 464 authsize, 0); 477 - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; 465 + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; 478 466 } 479 467 480 468 static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
+8 -26
crypto/authencesn.c
··· 59 59 static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key, 60 60 unsigned int keylen) 61 61 { 62 - unsigned int authkeylen; 63 - unsigned int enckeylen; 64 62 struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn); 65 63 struct crypto_ahash *auth = ctx->auth; 66 64 struct crypto_ablkcipher *enc = ctx->enc; 67 - struct rtattr *rta = (void *)key; 68 - struct crypto_authenc_key_param *param; 65 + struct crypto_authenc_keys keys; 69 66 int err = -EINVAL; 70 67 71 - if (!RTA_OK(rta, keylen)) 68 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 72 69 goto badkey; 73 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 74 - goto badkey; 75 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 76 - goto badkey; 77 - 78 - param = RTA_DATA(rta); 79 - enckeylen = be32_to_cpu(param->enckeylen); 80 - 81 - key += RTA_ALIGN(rta->rta_len); 82 - keylen -= RTA_ALIGN(rta->rta_len); 83 - 84 - if (keylen < enckeylen) 85 - goto badkey; 86 - 87 - authkeylen = keylen - enckeylen; 88 70 89 71 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 90 72 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) & 91 73 CRYPTO_TFM_REQ_MASK); 92 - err = crypto_ahash_setkey(auth, key, authkeylen); 74 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); 93 75 crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) & 94 76 CRYPTO_TFM_RES_MASK); 95 77 ··· 81 99 crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); 82 100 crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) & 83 101 CRYPTO_TFM_REQ_MASK); 84 - err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen); 102 + err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen); 85 103 crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) & 86 104 CRYPTO_TFM_RES_MASK); 87 105 ··· 229 247 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 230 248 authsize, 0); 231 249 232 - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 250 + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 233 251 if (err) 234 252 goto out; 235 253 ··· 278 296 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 279 297 authsize, 0); 280 298 281 - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 299 + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 282 300 if (err) 283 301 goto out; 284 302 ··· 318 336 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 319 337 authsize, 0); 320 338 321 - err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 339 + err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0; 322 340 if (err) 323 341 goto out; 324 342 ··· 550 568 ihash = ohash + authsize; 551 569 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, 552 570 authsize, 0); 553 - return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0; 571 + return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0; 554 572 } 555 573 556 574 static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
+2 -2
crypto/ccm.c
··· 363 363 364 364 if (!err) { 365 365 err = crypto_ccm_auth(req, req->dst, cryptlen); 366 - if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize)) 366 + if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize)) 367 367 err = -EBADMSG; 368 368 } 369 369 aead_request_complete(req, err); ··· 422 422 return err; 423 423 424 424 /* verify */ 425 - if (memcmp(authtag, odata, authsize)) 425 + if (crypto_memneq(authtag, odata, authsize)) 426 426 return -EBADMSG; 427 427 428 428 return err;
+1 -1
crypto/gcm.c
··· 582 582 583 583 crypto_xor(auth_tag, iauth_tag, 16); 584 584 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); 585 - return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; 585 + return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; 586 586 } 587 587 588 588 static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
+138
crypto/memneq.c
··· 1 + /* 2 + * Constant-time equality testing of memory regions. 3 + * 4 + * Authors: 5 + * 6 + * James Yonan <james@openvpn.net> 7 + * Daniel Borkmann <dborkman@redhat.com> 8 + * 9 + * This file is provided under a dual BSD/GPLv2 license. When using or 10 + * redistributing this file, you may do so under either license. 11 + * 12 + * GPL LICENSE SUMMARY 13 + * 14 + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. 15 + * 16 + * This program is free software; you can redistribute it and/or modify 17 + * it under the terms of version 2 of the GNU General Public License as 18 + * published by the Free Software Foundation. 19 + * 20 + * This program is distributed in the hope that it will be useful, but 21 + * WITHOUT ANY WARRANTY; without even the implied warranty of 22 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 23 + * General Public License for more details. 24 + * 25 + * You should have received a copy of the GNU General Public License 26 + * along with this program; if not, write to the Free Software 27 + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 28 + * The full GNU General Public License is included in this distribution 29 + * in the file called LICENSE.GPL. 30 + * 31 + * BSD LICENSE 32 + * 33 + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. 34 + * 35 + * Redistribution and use in source and binary forms, with or without 36 + * modification, are permitted provided that the following conditions 37 + * are met: 38 + * 39 + * * Redistributions of source code must retain the above copyright 40 + * notice, this list of conditions and the following disclaimer. 41 + * * Redistributions in binary form must reproduce the above copyright 42 + * notice, this list of conditions and the following disclaimer in 43 + * the documentation and/or other materials provided with the 44 + * distribution. 45 + * * Neither the name of OpenVPN Technologies nor the names of its 46 + * contributors may be used to endorse or promote products derived 47 + * from this software without specific prior written permission. 48 + * 49 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 50 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 51 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 52 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 53 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 54 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 55 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 59 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 + */ 61 + 62 + #include <crypto/algapi.h> 63 + 64 + #ifndef __HAVE_ARCH_CRYPTO_MEMNEQ 65 + 66 + /* Generic path for arbitrary size */ 67 + static inline unsigned long 68 + __crypto_memneq_generic(const void *a, const void *b, size_t size) 69 + { 70 + unsigned long neq = 0; 71 + 72 + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 73 + while (size >= sizeof(unsigned long)) { 74 + neq |= *(unsigned long *)a ^ *(unsigned long *)b; 75 + a += sizeof(unsigned long); 76 + b += sizeof(unsigned long); 77 + size -= sizeof(unsigned long); 78 + } 79 + #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ 80 + while (size > 0) { 81 + neq |= *(unsigned char *)a ^ *(unsigned char *)b; 82 + a += 1; 83 + b += 1; 84 + size -= 1; 85 + } 86 + return neq; 87 + } 88 + 89 + /* Loop-free fast-path for frequently used 16-byte size */ 90 + static inline unsigned long __crypto_memneq_16(const void *a, const void *b) 91 + { 92 + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 93 + if (sizeof(unsigned long) == 8) 94 + return ((*(unsigned long *)(a) ^ *(unsigned long *)(b)) 95 + | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8))); 96 + else if (sizeof(unsigned int) == 4) 97 + return ((*(unsigned int *)(a) ^ *(unsigned int *)(b)) 98 + | (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4)) 99 + | (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8)) 100 + | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12))); 101 + else 102 + #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ 103 + return ((*(unsigned char *)(a) ^ *(unsigned char *)(b)) 104 + | (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1)) 105 + | (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2)) 106 + | (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3)) 107 + | (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4)) 108 + | (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5)) 109 + | (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6)) 110 + | (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7)) 111 + | (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8)) 112 + | (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9)) 113 + | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10)) 114 + | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11)) 115 + | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12)) 116 + | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13)) 117 + | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14)) 118 + | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15))); 119 + } 120 + 121 + /* Compare two areas of memory without leaking timing information, 122 + * and with special optimizations for common sizes. Users should 123 + * not call this function directly, but should instead use 124 + * crypto_memneq defined in crypto/algapi.h. 125 + */ 126 + noinline unsigned long __crypto_memneq(const void *a, const void *b, 127 + size_t size) 128 + { 129 + switch (size) { 130 + case 16: 131 + return __crypto_memneq_16(a, b); 132 + default: 133 + return __crypto_memneq_generic(a, b, size); 134 + } 135 + } 136 + EXPORT_SYMBOL(__crypto_memneq); 137 + 138 + #endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
+25
drivers/char/hw_random/Kconfig
··· 165 165 166 166 If unsure, say Y. 167 167 168 + config HW_RANDOM_OMAP3_ROM 169 + tristate "OMAP3 ROM Random Number Generator support" 170 + depends on HW_RANDOM && ARCH_OMAP3 171 + default HW_RANDOM 172 + ---help--- 173 + This driver provides kernel-side support for the Random Number 174 + Generator hardware found on OMAP34xx processors. 175 + 176 + To compile this driver as a module, choose M here: the 177 + module will be called omap3-rom-rng. 178 + 179 + If unsure, say Y. 180 + 168 181 config HW_RANDOM_OCTEON 169 182 tristate "Octeon Random Number Generator support" 170 183 depends on HW_RANDOM && CAVIUM_OCTEON_SOC ··· 338 325 339 326 To compile this driver as a module, choose M here: the 340 327 module will be called tpm-rng. 328 + 329 + If unsure, say Y. 330 + 331 + config HW_RANDOM_MSM 332 + tristate "Qualcomm MSM Random Number Generator support" 333 + depends on HW_RANDOM && ARCH_MSM 334 + ---help--- 335 + This driver provides kernel-side support for the Random Number 336 + Generator hardware found on Qualcomm MSM SoCs. 337 + 338 + To compile this driver as a module, choose M here. the 339 + module will be called msm-rng. 341 340 342 341 If unsure, say Y.
+2
drivers/char/hw_random/Makefile
··· 15 15 obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 16 16 obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 17 17 obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 18 + obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o 18 19 obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o 19 20 obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o 20 21 obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o ··· 29 28 obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o 30 29 obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o 31 30 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o 31 + obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
+197
drivers/char/hw_random/msm-rng.c
··· 1 + /* 2 + * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + */ 14 + #include <linux/clk.h> 15 + #include <linux/err.h> 16 + #include <linux/hw_random.h> 17 + #include <linux/io.h> 18 + #include <linux/module.h> 19 + #include <linux/of.h> 20 + #include <linux/platform_device.h> 21 + 22 + /* Device specific register offsets */ 23 + #define PRNG_DATA_OUT 0x0000 24 + #define PRNG_STATUS 0x0004 25 + #define PRNG_LFSR_CFG 0x0100 26 + #define PRNG_CONFIG 0x0104 27 + 28 + /* Device specific register masks and config values */ 29 + #define PRNG_LFSR_CFG_MASK 0x0000ffff 30 + #define PRNG_LFSR_CFG_CLOCKS 0x0000dddd 31 + #define PRNG_CONFIG_HW_ENABLE BIT(1) 32 + #define PRNG_STATUS_DATA_AVAIL BIT(0) 33 + 34 + #define MAX_HW_FIFO_DEPTH 16 35 + #define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) 36 + #define WORD_SZ 4 37 + 38 + struct msm_rng { 39 + void __iomem *base; 40 + struct clk *clk; 41 + struct hwrng hwrng; 42 + }; 43 + 44 + #define to_msm_rng(p) container_of(p, struct msm_rng, hwrng) 45 + 46 + static int msm_rng_enable(struct hwrng *hwrng, int enable) 47 + { 48 + struct msm_rng *rng = to_msm_rng(hwrng); 49 + u32 val; 50 + int ret; 51 + 52 + ret = clk_prepare_enable(rng->clk); 53 + if (ret) 54 + return ret; 55 + 56 + if (enable) { 57 + /* Enable PRNG only if it is not already enabled */ 58 + val = readl_relaxed(rng->base + PRNG_CONFIG); 59 + if (val & PRNG_CONFIG_HW_ENABLE) 60 + goto already_enabled; 61 + 62 + val = readl_relaxed(rng->base + PRNG_LFSR_CFG); 63 + val &= ~PRNG_LFSR_CFG_MASK; 64 + val |= PRNG_LFSR_CFG_CLOCKS; 65 + writel(val, rng->base + PRNG_LFSR_CFG); 66 + 67 + val = readl_relaxed(rng->base + PRNG_CONFIG); 68 + val |= PRNG_CONFIG_HW_ENABLE; 69 + writel(val, rng->base + PRNG_CONFIG); 70 + } else { 71 + val = readl_relaxed(rng->base + PRNG_CONFIG); 72 + val &= ~PRNG_CONFIG_HW_ENABLE; 73 + writel(val, rng->base + PRNG_CONFIG); 74 + } 75 + 76 + already_enabled: 77 + clk_disable_unprepare(rng->clk); 78 + return 0; 79 + } 80 + 81 + static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait) 82 + { 83 + struct msm_rng *rng = to_msm_rng(hwrng); 84 + size_t currsize = 0; 85 + u32 *retdata = data; 86 + size_t maxsize; 87 + int ret; 88 + u32 val; 89 + 90 + /* calculate max size bytes to transfer back to caller */ 91 + maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max); 92 + 93 + /* no room for word data */ 94 + if (maxsize < WORD_SZ) 95 + return 0; 96 + 97 + ret = clk_prepare_enable(rng->clk); 98 + if (ret) 99 + return ret; 100 + 101 + /* read random data from hardware */ 102 + do { 103 + val = readl_relaxed(rng->base + PRNG_STATUS); 104 + if (!(val & PRNG_STATUS_DATA_AVAIL)) 105 + break; 106 + 107 + val = readl_relaxed(rng->base + PRNG_DATA_OUT); 108 + if (!val) 109 + break; 110 + 111 + *retdata++ = val; 112 + currsize += WORD_SZ; 113 + 114 + /* make sure we stay on 32bit boundary */ 115 + if ((maxsize - currsize) < WORD_SZ) 116 + break; 117 + } while (currsize < maxsize); 118 + 119 + clk_disable_unprepare(rng->clk); 120 + 121 + return currsize; 122 + } 123 + 124 + static int msm_rng_init(struct hwrng *hwrng) 125 + { 126 + return msm_rng_enable(hwrng, 1); 127 + } 128 + 129 + static void msm_rng_cleanup(struct hwrng *hwrng) 130 + { 131 + msm_rng_enable(hwrng, 0); 132 + } 133 + 134 + static int msm_rng_probe(struct platform_device *pdev) 135 + { 136 + struct resource *res; 137 + struct msm_rng *rng; 138 + int ret; 139 + 140 + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); 141 + if (!rng) 142 + return -ENOMEM; 143 + 144 + platform_set_drvdata(pdev, rng); 145 + 146 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 147 + rng->base = devm_ioremap_resource(&pdev->dev, res); 148 + if (IS_ERR(rng->base)) 149 + return PTR_ERR(rng->base); 150 + 151 + rng->clk = devm_clk_get(&pdev->dev, "core"); 152 + if (IS_ERR(rng->clk)) 153 + return PTR_ERR(rng->clk); 154 + 155 + rng->hwrng.name = KBUILD_MODNAME, 156 + rng->hwrng.init = msm_rng_init, 157 + rng->hwrng.cleanup = msm_rng_cleanup, 158 + rng->hwrng.read = msm_rng_read, 159 + 160 + ret = hwrng_register(&rng->hwrng); 161 + if (ret) { 162 + dev_err(&pdev->dev, "failed to register hwrng\n"); 163 + return ret; 164 + } 165 + 166 + return 0; 167 + } 168 + 169 + static int msm_rng_remove(struct platform_device *pdev) 170 + { 171 + struct msm_rng *rng = platform_get_drvdata(pdev); 172 + 173 + hwrng_unregister(&rng->hwrng); 174 + return 0; 175 + } 176 + 177 + static const struct of_device_id msm_rng_of_match[] = { 178 + { .compatible = "qcom,prng", }, 179 + {} 180 + }; 181 + MODULE_DEVICE_TABLE(of, msm_rng_of_match); 182 + 183 + static struct platform_driver msm_rng_driver = { 184 + .probe = msm_rng_probe, 185 + .remove = msm_rng_remove, 186 + .driver = { 187 + .name = KBUILD_MODNAME, 188 + .owner = THIS_MODULE, 189 + .of_match_table = of_match_ptr(msm_rng_of_match), 190 + } 191 + }; 192 + module_platform_driver(msm_rng_driver); 193 + 194 + MODULE_ALIAS("platform:" KBUILD_MODNAME); 195 + MODULE_AUTHOR("The Linux Foundation"); 196 + MODULE_DESCRIPTION("Qualcomm MSM random number generator driver"); 197 + MODULE_LICENSE("GPL v2");
+141
drivers/char/hw_random/omap3-rom-rng.c
··· 1 + /* 2 + * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family 3 + * 4 + * Copyright (C) 2009 Nokia Corporation 5 + * Author: Juha Yrjola <juha.yrjola@solidboot.com> 6 + * 7 + * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com> 8 + * 9 + * This file is licensed under the terms of the GNU General Public 10 + * License version 2. This program is licensed "as is" without any 11 + * warranty of any kind, whether express or implied. 12 + */ 13 + 14 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 + 16 + #include <linux/module.h> 17 + #include <linux/init.h> 18 + #include <linux/random.h> 19 + #include <linux/hw_random.h> 20 + #include <linux/timer.h> 21 + #include <linux/clk.h> 22 + #include <linux/err.h> 23 + #include <linux/platform_device.h> 24 + 25 + #define RNG_RESET 0x01 26 + #define RNG_GEN_PRNG_HW_INIT 0x02 27 + #define RNG_GEN_HW 0x08 28 + 29 + /* param1: ptr, param2: count, param3: flag */ 30 + static u32 (*omap3_rom_rng_call)(u32, u32, u32); 31 + 32 + static struct timer_list idle_timer; 33 + static int rng_idle; 34 + static struct clk *rng_clk; 35 + 36 + static void omap3_rom_rng_idle(unsigned long data) 37 + { 38 + int r; 39 + 40 + r = omap3_rom_rng_call(0, 0, RNG_RESET); 41 + if (r != 0) { 42 + pr_err("reset failed: %d\n", r); 43 + return; 44 + } 45 + clk_disable_unprepare(rng_clk); 46 + rng_idle = 1; 47 + } 48 + 49 + static int omap3_rom_rng_get_random(void *buf, unsigned int count) 50 + { 51 + u32 r; 52 + u32 ptr; 53 + 54 + del_timer_sync(&idle_timer); 55 + if (rng_idle) { 56 + clk_prepare_enable(rng_clk); 57 + r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); 58 + if (r != 0) { 59 + clk_disable_unprepare(rng_clk); 60 + pr_err("HW init failed: %d\n", r); 61 + return -EIO; 62 + } 63 + rng_idle = 0; 64 + } 65 + 66 + ptr = virt_to_phys(buf); 67 + r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW); 68 + mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500)); 69 + if (r != 0) 70 + return -EINVAL; 71 + return 0; 72 + } 73 + 74 + static int omap3_rom_rng_data_present(struct hwrng *rng, int wait) 75 + { 76 + return 1; 77 + } 78 + 79 + static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data) 80 + { 81 + int r; 82 + 83 + r = omap3_rom_rng_get_random(data, 4); 84 + if (r < 0) 85 + return r; 86 + return 4; 87 + } 88 + 89 + static struct hwrng omap3_rom_rng_ops = { 90 + .name = "omap3-rom", 91 + .data_present = omap3_rom_rng_data_present, 92 + .data_read = omap3_rom_rng_data_read, 93 + }; 94 + 95 + static int omap3_rom_rng_probe(struct platform_device *pdev) 96 + { 97 + pr_info("initializing\n"); 98 + 99 + omap3_rom_rng_call = pdev->dev.platform_data; 100 + if (!omap3_rom_rng_call) { 101 + pr_err("omap3_rom_rng_call is NULL\n"); 102 + return -EINVAL; 103 + } 104 + 105 + setup_timer(&idle_timer, omap3_rom_rng_idle, 0); 106 + rng_clk = clk_get(&pdev->dev, "ick"); 107 + if (IS_ERR(rng_clk)) { 108 + pr_err("unable to get RNG clock\n"); 109 + return PTR_ERR(rng_clk); 110 + } 111 + 112 + /* Leave the RNG in reset state. */ 113 + clk_prepare_enable(rng_clk); 114 + omap3_rom_rng_idle(0); 115 + 116 + return hwrng_register(&omap3_rom_rng_ops); 117 + } 118 + 119 + static int omap3_rom_rng_remove(struct platform_device *pdev) 120 + { 121 + hwrng_unregister(&omap3_rom_rng_ops); 122 + clk_disable_unprepare(rng_clk); 123 + clk_put(rng_clk); 124 + return 0; 125 + } 126 + 127 + static struct platform_driver omap3_rom_rng_driver = { 128 + .driver = { 129 + .name = "omap3-rom-rng", 130 + .owner = THIS_MODULE, 131 + }, 132 + .probe = omap3_rom_rng_probe, 133 + .remove = omap3_rom_rng_remove, 134 + }; 135 + 136 + module_platform_driver(omap3_rom_rng_driver); 137 + 138 + MODULE_ALIAS("platform:omap3-rom-rng"); 139 + MODULE_AUTHOR("Juha Yrjola"); 140 + MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); 141 + MODULE_LICENSE("GPL");
+2 -3
drivers/char/hw_random/pseries-rng.c
··· 24 24 #include <linux/hw_random.h> 25 25 #include <asm/vio.h> 26 26 27 - #define MODULE_NAME "pseries-rng" 28 27 29 28 static int pseries_rng_data_read(struct hwrng *rng, u32 *data) 30 29 { ··· 54 55 }; 55 56 56 57 static struct hwrng pseries_rng = { 57 - .name = MODULE_NAME, 58 + .name = KBUILD_MODNAME, 58 59 .data_read = pseries_rng_data_read, 59 60 }; 60 61 ··· 77 78 MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids); 78 79 79 80 static struct vio_driver pseries_rng_driver = { 80 - .name = MODULE_NAME, 81 + .name = KBUILD_MODNAME, 81 82 .probe = pseries_rng_probe, 82 83 .remove = pseries_rng_remove, 83 84 .get_desired_dma = pseries_rng_get_desired_dma,
+1 -1
drivers/char/hw_random/via-rng.c
··· 221 221 module_init(mod_init); 222 222 module_exit(mod_exit); 223 223 224 - static struct x86_cpu_id via_rng_cpu_id[] = { 224 + static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { 225 225 X86_FEATURE_MATCH(X86_FEATURE_XSTORE), 226 226 {} 227 227 };
+19 -6
drivers/crypto/caam/Kconfig
··· 4 4 help 5 5 Enables the driver module for Freescale's Cryptographic Accelerator 6 6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). 7 - This module adds a job ring operation interface, and configures h/w 7 + This module creates job ring devices, and configures h/w 8 8 to operate as a DPAA component automatically, depending 9 9 on h/w feature availability. 10 10 11 11 To compile this driver as a module, choose M here: the module 12 12 will be called caam. 13 13 14 + config CRYPTO_DEV_FSL_CAAM_JR 15 + tristate "Freescale CAAM Job Ring driver backend" 16 + depends on CRYPTO_DEV_FSL_CAAM 17 + default y 18 + help 19 + Enables the driver module for Job Rings which are part of 20 + Freescale's Cryptographic Accelerator 21 + and Assurance Module (CAAM). This module adds a job ring operation 22 + interface. 23 + 24 + To compile this driver as a module, choose M here: the module 25 + will be called caam_jr. 26 + 14 27 config CRYPTO_DEV_FSL_CAAM_RINGSIZE 15 28 int "Job Ring size" 16 - depends on CRYPTO_DEV_FSL_CAAM 29 + depends on CRYPTO_DEV_FSL_CAAM_JR 17 30 range 2 9 18 31 default "9" 19 32 help ··· 44 31 45 32 config CRYPTO_DEV_FSL_CAAM_INTC 46 33 bool "Job Ring interrupt coalescing" 47 - depends on CRYPTO_DEV_FSL_CAAM 34 + depends on CRYPTO_DEV_FSL_CAAM_JR 48 35 default n 49 36 help 50 37 Enable the Job Ring's interrupt coalescing feature. ··· 75 62 76 63 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API 77 64 tristate "Register algorithm implementations with the Crypto API" 78 - depends on CRYPTO_DEV_FSL_CAAM 65 + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR 79 66 default y 80 67 select CRYPTO_ALGAPI 81 68 select CRYPTO_AUTHENC ··· 89 76 90 77 config CRYPTO_DEV_FSL_CAAM_AHASH_API 91 78 tristate "Register hash algorithm implementations with Crypto API" 92 - depends on CRYPTO_DEV_FSL_CAAM 79 + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR 93 80 default y 94 81 select CRYPTO_HASH 95 82 help ··· 101 88 102 89 config CRYPTO_DEV_FSL_CAAM_RNG_API 103 90 tristate "Register caam device for hwrng API" 104 - depends on CRYPTO_DEV_FSL_CAAM 91 + depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR 105 92 default y 106 93 select CRYPTO_RNG 107 94 select HW_RANDOM
+3 -1
drivers/crypto/caam/Makefile
··· 6 6 endif 7 7 8 8 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o 9 + obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o 9 10 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o 10 11 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o 11 12 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o 12 13 13 - caam-objs := ctrl.o jr.o error.o key_gen.o 14 + caam-objs := ctrl.o 15 + caam_jr-objs := jr.o key_gen.o error.o
+20 -63
drivers/crypto/caam/caamalg.c
··· 86 86 #else 87 87 #define debug(format, arg...) 88 88 #endif 89 + static struct list_head alg_list; 89 90 90 91 /* Set DK bit in class 1 operation if shared */ 91 92 static inline void append_dec_op1(u32 *desc, u32 type) ··· 2058 2057 2059 2058 struct caam_crypto_alg { 2060 2059 struct list_head entry; 2061 - struct device *ctrldev; 2062 2060 int class1_alg_type; 2063 2061 int class2_alg_type; 2064 2062 int alg_op; ··· 2070 2070 struct caam_crypto_alg *caam_alg = 2071 2071 container_of(alg, struct caam_crypto_alg, crypto_alg); 2072 2072 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2073 - struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev); 2074 - int tgt_jr = atomic_inc_return(&priv->tfm_count); 2075 2073 2076 - /* 2077 - * distribute tfms across job rings to ensure in-order 2078 - * crypto request processing per tfm 2079 - */ 2080 - ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs]; 2074 + ctx->jrdev = caam_jr_alloc(); 2075 + if (IS_ERR(ctx->jrdev)) { 2076 + pr_err("Job Ring Device allocation for transform failed\n"); 2077 + return PTR_ERR(ctx->jrdev); 2078 + } 2081 2079 2082 2080 /* copy descriptor header template value */ 2083 2081 ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type; ··· 2102 2104 dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma, 2103 2105 desc_bytes(ctx->sh_desc_givenc), 2104 2106 DMA_TO_DEVICE); 2107 + 2108 + caam_jr_free(ctx->jrdev); 2105 2109 } 2106 2110 2107 2111 static void __exit caam_algapi_exit(void) 2108 2112 { 2109 2113 2110 - struct device_node *dev_node; 2111 - struct platform_device *pdev; 2112 - struct device *ctrldev; 2113 - struct caam_drv_private *priv; 2114 2114 struct caam_crypto_alg *t_alg, *n; 2115 2115 2116 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2117 - if (!dev_node) { 2118 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2119 - if (!dev_node) 2120 - return; 2121 - } 2122 - 2123 - pdev = of_find_device_by_node(dev_node); 2124 - if (!pdev) 2116 + if (!alg_list.next) 2125 2117 return; 2126 2118 2127 - ctrldev = &pdev->dev; 2128 - of_node_put(dev_node); 2129 - priv = dev_get_drvdata(ctrldev); 2130 - 2131 - if (!priv->alg_list.next) 2132 - return; 2133 - 2134 - list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2119 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) { 2135 2120 crypto_unregister_alg(&t_alg->crypto_alg); 2136 2121 list_del(&t_alg->entry); 2137 2122 kfree(t_alg); 2138 2123 } 2139 2124 } 2140 2125 2141 - static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev, 2142 - struct caam_alg_template 2126 + static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template 2143 2127 *template) 2144 2128 { 2145 2129 struct caam_crypto_alg *t_alg; ··· 2129 2149 2130 2150 t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL); 2131 2151 if (!t_alg) { 2132 - dev_err(ctrldev, "failed to allocate t_alg\n"); 2152 + pr_err("failed to allocate t_alg\n"); 2133 2153 return ERR_PTR(-ENOMEM); 2134 2154 } 2135 2155 ··· 2161 2181 t_alg->class1_alg_type = template->class1_alg_type; 2162 2182 t_alg->class2_alg_type = template->class2_alg_type; 2163 2183 t_alg->alg_op = template->alg_op; 2164 - t_alg->ctrldev = ctrldev; 2165 2184 2166 2185 return t_alg; 2167 2186 } 2168 2187 2169 2188 static int __init caam_algapi_init(void) 2170 2189 { 2171 - struct device_node *dev_node; 2172 - struct platform_device *pdev; 2173 - struct device *ctrldev; 2174 - struct caam_drv_private *priv; 2175 2190 int i = 0, err = 0; 2176 2191 2177 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 2178 - if (!dev_node) { 2179 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 2180 - if (!dev_node) 2181 - return -ENODEV; 2182 - } 2183 - 2184 - pdev = of_find_device_by_node(dev_node); 2185 - if (!pdev) 2186 - return -ENODEV; 2187 - 2188 - ctrldev = &pdev->dev; 2189 - priv = dev_get_drvdata(ctrldev); 2190 - of_node_put(dev_node); 2191 - 2192 - INIT_LIST_HEAD(&priv->alg_list); 2193 - 2194 - atomic_set(&priv->tfm_count, -1); 2192 + INIT_LIST_HEAD(&alg_list); 2195 2193 2196 2194 /* register crypto algorithms the device supports */ 2197 2195 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2198 2196 /* TODO: check if h/w supports alg */ 2199 2197 struct caam_crypto_alg *t_alg; 2200 2198 2201 - t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); 2199 + t_alg = caam_alg_alloc(&driver_algs[i]); 2202 2200 if (IS_ERR(t_alg)) { 2203 2201 err = PTR_ERR(t_alg); 2204 - dev_warn(ctrldev, "%s alg allocation failed\n", 2205 - driver_algs[i].driver_name); 2202 + pr_warn("%s alg allocation failed\n", 2203 + driver_algs[i].driver_name); 2206 2204 continue; 2207 2205 } 2208 2206 2209 2207 err = crypto_register_alg(&t_alg->crypto_alg); 2210 2208 if (err) { 2211 - dev_warn(ctrldev, "%s alg registration failed\n", 2209 + pr_warn("%s alg registration failed\n", 2212 2210 t_alg->crypto_alg.cra_driver_name); 2213 2211 kfree(t_alg); 2214 2212 } else 2215 - list_add_tail(&t_alg->entry, &priv->alg_list); 2213 + list_add_tail(&t_alg->entry, &alg_list); 2216 2214 } 2217 - if (!list_empty(&priv->alg_list)) 2218 - dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", 2219 - (char *)of_get_property(dev_node, "compatible", NULL)); 2215 + if (!list_empty(&alg_list)) 2216 + pr_info("caam algorithms registered in /proc/crypto\n"); 2220 2217 2221 2218 return err; 2222 2219 }
+26 -62
drivers/crypto/caam/caamhash.c
··· 94 94 #define debug(format, arg...) 95 95 #endif 96 96 97 + 98 + static struct list_head hash_list; 99 + 97 100 /* ahash per-session context */ 98 101 struct caam_hash_ctx { 99 102 struct device *jrdev; ··· 1656 1653 1657 1654 struct caam_hash_alg { 1658 1655 struct list_head entry; 1659 - struct device *ctrldev; 1660 1656 int alg_type; 1661 1657 int alg_op; 1662 1658 struct ahash_alg ahash_alg; ··· 1672 1670 struct caam_hash_alg *caam_hash = 1673 1671 container_of(alg, struct caam_hash_alg, ahash_alg); 1674 1672 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); 1675 - struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev); 1676 1673 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ 1677 1674 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, 1678 1675 HASH_MSG_LEN + SHA1_DIGEST_SIZE, ··· 1679 1678 HASH_MSG_LEN + SHA256_DIGEST_SIZE, 1680 1679 HASH_MSG_LEN + 64, 1681 1680 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1682 - int tgt_jr = atomic_inc_return(&priv->tfm_count); 1683 1681 int ret = 0; 1684 1682 1685 1683 /* 1686 - * distribute tfms across job rings to ensure in-order 1684 + * Get a Job ring from Job Ring driver to ensure in-order 1687 1685 * crypto request processing per tfm 1688 1686 */ 1689 - ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs]; 1690 - 1687 + ctx->jrdev = caam_jr_alloc(); 1688 + if (IS_ERR(ctx->jrdev)) { 1689 + pr_err("Job Ring Device allocation for transform failed\n"); 1690 + return PTR_ERR(ctx->jrdev); 1691 + } 1691 1692 /* copy descriptor header template value */ 1692 1693 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; 1693 1694 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; ··· 1732 1729 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) 1733 1730 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, 1734 1731 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); 1732 + 1733 + caam_jr_free(ctx->jrdev); 1735 1734 } 1736 1735 1737 1736 static void __exit caam_algapi_hash_exit(void) 1738 1737 { 1739 - struct device_node *dev_node; 1740 - struct platform_device *pdev; 1741 - struct device *ctrldev; 1742 - struct caam_drv_private *priv; 1743 1738 struct caam_hash_alg *t_alg, *n; 1744 1739 1745 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1746 - if (!dev_node) { 1747 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1748 - if (!dev_node) 1749 - return; 1750 - } 1751 - 1752 - pdev = of_find_device_by_node(dev_node); 1753 - if (!pdev) 1740 + if (!hash_list.next) 1754 1741 return; 1755 1742 1756 - ctrldev = &pdev->dev; 1757 - of_node_put(dev_node); 1758 - priv = dev_get_drvdata(ctrldev); 1759 - 1760 - if (!priv->hash_list.next) 1761 - return; 1762 - 1763 - list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) { 1743 + list_for_each_entry_safe(t_alg, n, &hash_list, entry) { 1764 1744 crypto_unregister_ahash(&t_alg->ahash_alg); 1765 1745 list_del(&t_alg->entry); 1766 1746 kfree(t_alg); ··· 1751 1765 } 1752 1766 1753 1767 static struct caam_hash_alg * 1754 - caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template, 1768 + caam_hash_alloc(struct caam_hash_template *template, 1755 1769 bool keyed) 1756 1770 { 1757 1771 struct caam_hash_alg *t_alg; ··· 1760 1774 1761 1775 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL); 1762 1776 if (!t_alg) { 1763 - dev_err(ctrldev, "failed to allocate t_alg\n"); 1777 + pr_err("failed to allocate t_alg\n"); 1764 1778 return ERR_PTR(-ENOMEM); 1765 1779 } 1766 1780 ··· 1791 1805 1792 1806 t_alg->alg_type = template->alg_type; 1793 1807 t_alg->alg_op = template->alg_op; 1794 - t_alg->ctrldev = ctrldev; 1795 1808 1796 1809 return t_alg; 1797 1810 } 1798 1811 1799 1812 static int __init caam_algapi_hash_init(void) 1800 1813 { 1801 - struct device_node *dev_node; 1802 - struct platform_device *pdev; 1803 - struct device *ctrldev; 1804 - struct caam_drv_private *priv; 1805 1814 int i = 0, err = 0; 1806 1815 1807 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 1808 - if (!dev_node) { 1809 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 1810 - if (!dev_node) 1811 - return -ENODEV; 1812 - } 1813 - 1814 - pdev = of_find_device_by_node(dev_node); 1815 - if (!pdev) 1816 - return -ENODEV; 1817 - 1818 - ctrldev = &pdev->dev; 1819 - priv = dev_get_drvdata(ctrldev); 1820 - of_node_put(dev_node); 1821 - 1822 - INIT_LIST_HEAD(&priv->hash_list); 1823 - 1824 - atomic_set(&priv->tfm_count, -1); 1816 + INIT_LIST_HEAD(&hash_list); 1825 1817 1826 1818 /* register crypto algorithms the device supports */ 1827 1819 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { ··· 1807 1843 struct caam_hash_alg *t_alg; 1808 1844 1809 1845 /* register hmac version */ 1810 - t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true); 1846 + t_alg = caam_hash_alloc(&driver_hash[i], true); 1811 1847 if (IS_ERR(t_alg)) { 1812 1848 err = PTR_ERR(t_alg); 1813 - dev_warn(ctrldev, "%s alg allocation failed\n", 1814 - driver_hash[i].driver_name); 1849 + pr_warn("%s alg allocation failed\n", 1850 + driver_hash[i].driver_name); 1815 1851 continue; 1816 1852 } 1817 1853 1818 1854 err = crypto_register_ahash(&t_alg->ahash_alg); 1819 1855 if (err) { 1820 - dev_warn(ctrldev, "%s alg registration failed\n", 1856 + pr_warn("%s alg registration failed\n", 1821 1857 t_alg->ahash_alg.halg.base.cra_driver_name); 1822 1858 kfree(t_alg); 1823 1859 } else 1824 - list_add_tail(&t_alg->entry, &priv->hash_list); 1860 + list_add_tail(&t_alg->entry, &hash_list); 1825 1861 1826 1862 /* register unkeyed version */ 1827 - t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false); 1863 + t_alg = caam_hash_alloc(&driver_hash[i], false); 1828 1864 if (IS_ERR(t_alg)) { 1829 1865 err = PTR_ERR(t_alg); 1830 - dev_warn(ctrldev, "%s alg allocation failed\n", 1831 - driver_hash[i].driver_name); 1866 + pr_warn("%s alg allocation failed\n", 1867 + driver_hash[i].driver_name); 1832 1868 continue; 1833 1869 } 1834 1870 1835 1871 err = crypto_register_ahash(&t_alg->ahash_alg); 1836 1872 if (err) { 1837 - dev_warn(ctrldev, "%s alg registration failed\n", 1873 + pr_warn("%s alg registration failed\n", 1838 1874 t_alg->ahash_alg.halg.base.cra_driver_name); 1839 1875 kfree(t_alg); 1840 1876 } else 1841 - list_add_tail(&t_alg->entry, &priv->hash_list); 1877 + list_add_tail(&t_alg->entry, &hash_list); 1842 1878 } 1843 1879 1844 1880 return err;
+8 -19
drivers/crypto/caam/caamrng.c
··· 273 273 274 274 static void __exit caam_rng_exit(void) 275 275 { 276 + caam_jr_free(rng_ctx.jrdev); 276 277 hwrng_unregister(&caam_rng); 277 278 } 278 279 279 280 static int __init caam_rng_init(void) 280 281 { 281 - struct device_node *dev_node; 282 - struct platform_device *pdev; 283 - struct device *ctrldev; 284 - struct caam_drv_private *priv; 282 + struct device *dev; 285 283 286 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 287 - if (!dev_node) { 288 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 289 - if (!dev_node) 290 - return -ENODEV; 284 + dev = caam_jr_alloc(); 285 + if (IS_ERR(dev)) { 286 + pr_err("Job Ring Device allocation for transform failed\n"); 287 + return PTR_ERR(dev); 291 288 } 292 289 293 - pdev = of_find_device_by_node(dev_node); 294 - if (!pdev) 295 - return -ENODEV; 290 + caam_init_rng(&rng_ctx, dev); 296 291 297 - ctrldev = &pdev->dev; 298 - priv = dev_get_drvdata(ctrldev); 299 - of_node_put(dev_node); 300 - 301 - caam_init_rng(&rng_ctx, priv->jrdev[0]); 302 - 303 - dev_info(priv->jrdev[0], "registering rng-caam\n"); 292 + dev_info(dev, "registering rng-caam\n"); 304 293 return hwrng_register(&caam_rng); 305 294 } 306 295
+332 -88
drivers/crypto/caam/ctrl.c
··· 16 16 #include "error.h" 17 17 #include "ctrl.h" 18 18 19 - static int caam_remove(struct platform_device *pdev) 20 - { 21 - struct device *ctrldev; 22 - struct caam_drv_private *ctrlpriv; 23 - struct caam_drv_private_jr *jrpriv; 24 - struct caam_full __iomem *topregs; 25 - int ring, ret = 0; 26 - 27 - ctrldev = &pdev->dev; 28 - ctrlpriv = dev_get_drvdata(ctrldev); 29 - topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 30 - 31 - /* shut down JobRs */ 32 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 33 - ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]); 34 - jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); 35 - irq_dispose_mapping(jrpriv->irq); 36 - } 37 - 38 - /* Shut down debug views */ 39 - #ifdef CONFIG_DEBUG_FS 40 - debugfs_remove_recursive(ctrlpriv->dfs_root); 41 - #endif 42 - 43 - /* Unmap controller region */ 44 - iounmap(&topregs->ctrl); 45 - 46 - kfree(ctrlpriv->jrdev); 47 - kfree(ctrlpriv); 48 - 49 - return ret; 50 - } 51 - 52 19 /* 53 20 * Descriptor to instantiate RNG State Handle 0 in normal mode and 54 21 * load the JDKEK, TDKEK and TDSK registers 55 22 */ 56 - static void build_instantiation_desc(u32 *desc) 23 + static void build_instantiation_desc(u32 *desc, int handle, int do_sk) 57 24 { 58 - u32 *jump_cmd; 25 + u32 *jump_cmd, op_flags; 59 26 60 27 init_job_desc(desc, 0); 61 28 29 + op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 30 + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT; 31 + 62 32 /* INIT RNG in non-test mode */ 63 - append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 64 - OP_ALG_AS_INIT); 33 + append_operation(desc, op_flags); 65 34 66 - /* wait for done */ 67 - jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); 68 - set_jump_tgt_here(desc, jump_cmd); 35 + if (!handle && do_sk) { 36 + /* 37 + * For SH0, Secure Keys must be generated as well 38 + */ 69 39 70 - /* 71 - * load 1 to clear written reg: 72 - * resets the done interrupt and returns the RNG to idle. 73 - */ 74 - append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); 40 + /* wait for done */ 41 + jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); 42 + set_jump_tgt_here(desc, jump_cmd); 75 43 76 - /* generate secure keys (non-test) */ 77 - append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 78 - OP_ALG_RNG4_SK); 44 + /* 45 + * load 1 to clear written reg: 46 + * resets the done interrrupt and returns the RNG to idle. 47 + */ 48 + append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); 49 + 50 + /* Initialize State Handle */ 51 + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 52 + OP_ALG_AAI_RNG4_SK); 53 + } 54 + 55 + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); 79 56 } 80 57 81 - static int instantiate_rng(struct device *ctrldev) 58 + /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ 59 + static void build_deinstantiation_desc(u32 *desc, int handle) 60 + { 61 + init_job_desc(desc, 0); 62 + 63 + /* Uninstantiate State Handle 0 */ 64 + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | 65 + (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); 66 + 67 + append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); 68 + } 69 + 70 + /* 71 + * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of 72 + * the software (no JR/QI used). 73 + * @ctrldev - pointer to device 74 + * @status - descriptor status, after being run 75 + * 76 + * Return: - 0 if no error occurred 77 + * - -ENODEV if the DECO couldn't be acquired 78 + * - -EAGAIN if an error occurred while executing the descriptor 79 + */ 80 + static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, 81 + u32 *status) 82 82 { 83 83 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 84 84 struct caam_full __iomem *topregs; 85 85 unsigned int timeout = 100000; 86 - u32 *desc; 87 - int i, ret = 0; 88 - 89 - desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA); 90 - if (!desc) { 91 - dev_err(ctrldev, "can't allocate RNG init descriptor memory\n"); 92 - return -ENOMEM; 93 - } 94 - build_instantiation_desc(desc); 86 + u32 deco_dbg_reg, flags; 87 + int i; 95 88 96 89 /* Set the bit to request direct access to DECO0 */ 97 90 topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; ··· 96 103 97 104 if (!timeout) { 98 105 dev_err(ctrldev, "failed to acquire DECO 0\n"); 99 - ret = -EIO; 100 - goto out; 106 + clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 107 + return -ENODEV; 101 108 } 102 109 103 110 for (i = 0; i < desc_len(desc); i++) 104 - topregs->deco.descbuf[i] = *(desc + i); 111 + wr_reg32(&topregs->deco.descbuf[i], *(desc + i)); 105 112 106 - wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR); 113 + flags = DECO_JQCR_WHL; 114 + /* 115 + * If the descriptor length is longer than 4 words, then the 116 + * FOUR bit in JRCTRL register must be set. 117 + */ 118 + if (desc_len(desc) >= 4) 119 + flags |= DECO_JQCR_FOUR; 120 + 121 + /* Instruct the DECO to execute it */ 122 + wr_reg32(&topregs->deco.jr_ctl_hi, flags); 107 123 108 124 timeout = 10000000; 109 - while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) && 110 - --timeout) 125 + do { 126 + deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg); 127 + /* 128 + * If an error occured in the descriptor, then 129 + * the DECO status field will be set to 0x0D 130 + */ 131 + if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == 132 + DESC_DBG_DECO_STAT_HOST_ERR) 133 + break; 111 134 cpu_relax(); 135 + } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); 112 136 113 - if (!timeout) { 114 - dev_err(ctrldev, "failed to instantiate RNG\n"); 115 - ret = -EIO; 137 + *status = rd_reg32(&topregs->deco.op_status_hi) & 138 + DECO_OP_STATUS_HI_ERR_MASK; 139 + 140 + /* Mark the DECO as free */ 141 + clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 142 + 143 + if (!timeout) 144 + return -EAGAIN; 145 + 146 + return 0; 147 + } 148 + 149 + /* 150 + * instantiate_rng - builds and executes a descriptor on DECO0, 151 + * which initializes the RNG block. 152 + * @ctrldev - pointer to device 153 + * @state_handle_mask - bitmask containing the instantiation status 154 + * for the RNG4 state handles which exist in 155 + * the RNG4 block: 1 if it's been instantiated 156 + * by an external entry, 0 otherwise. 157 + * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; 158 + * Caution: this can be done only once; if the keys need to be 159 + * regenerated, a POR is required 160 + * 161 + * Return: - 0 if no error occurred 162 + * - -ENOMEM if there isn't enough memory to allocate the descriptor 163 + * - -ENODEV if DECO0 couldn't be acquired 164 + * - -EAGAIN if an error occurred when executing the descriptor 165 + * f.i. there was a RNG hardware error due to not "good enough" 166 + * entropy being aquired. 167 + */ 168 + static int instantiate_rng(struct device *ctrldev, int state_handle_mask, 169 + int gen_sk) 170 + { 171 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 172 + struct caam_full __iomem *topregs; 173 + struct rng4tst __iomem *r4tst; 174 + u32 *desc, status, rdsta_val; 175 + int ret = 0, sh_idx; 176 + 177 + topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 178 + r4tst = &topregs->ctrl.r4tst[0]; 179 + 180 + desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); 181 + if (!desc) 182 + return -ENOMEM; 183 + 184 + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { 185 + /* 186 + * If the corresponding bit is set, this state handle 187 + * was initialized by somebody else, so it's left alone. 188 + */ 189 + if ((1 << sh_idx) & state_handle_mask) 190 + continue; 191 + 192 + /* Create the descriptor for instantiating RNG State Handle */ 193 + build_instantiation_desc(desc, sh_idx, gen_sk); 194 + 195 + /* Try to run it through DECO0 */ 196 + ret = run_descriptor_deco0(ctrldev, desc, &status); 197 + 198 + /* 199 + * If ret is not 0, or descriptor status is not 0, then 200 + * something went wrong. No need to try the next state 201 + * handle (if available), bail out here. 202 + * Also, if for some reason, the State Handle didn't get 203 + * instantiated although the descriptor has finished 204 + * without any error (HW optimizations for later 205 + * CAAM eras), then try again. 206 + */ 207 + rdsta_val = 208 + rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK; 209 + if (status || !(rdsta_val & (1 << sh_idx))) 210 + ret = -EAGAIN; 211 + if (ret) 212 + break; 213 + 214 + dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); 215 + /* Clear the contents before recreating the descriptor */ 216 + memset(desc, 0x00, CAAM_CMD_SZ * 7); 116 217 } 117 218 118 - clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE); 119 - out: 120 219 kfree(desc); 220 + 121 221 return ret; 122 222 } 123 223 124 224 /* 125 - * By default, the TRNG runs for 200 clocks per sample; 126 - * 1600 clocks per sample generates better entropy. 225 + * deinstantiate_rng - builds and executes a descriptor on DECO0, 226 + * which deinitializes the RNG block. 227 + * @ctrldev - pointer to device 228 + * @state_handle_mask - bitmask containing the instantiation status 229 + * for the RNG4 state handles which exist in 230 + * the RNG4 block: 1 if it's been instantiated 231 + * 232 + * Return: - 0 if no error occurred 233 + * - -ENOMEM if there isn't enough memory to allocate the descriptor 234 + * - -ENODEV if DECO0 couldn't be acquired 235 + * - -EAGAIN if an error occurred when executing the descriptor 127 236 */ 128 - static void kick_trng(struct platform_device *pdev) 237 + static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) 238 + { 239 + u32 *desc, status; 240 + int sh_idx, ret = 0; 241 + 242 + desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); 243 + if (!desc) 244 + return -ENOMEM; 245 + 246 + for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { 247 + /* 248 + * If the corresponding bit is set, then it means the state 249 + * handle was initialized by us, and thus it needs to be 250 + * deintialized as well 251 + */ 252 + if ((1 << sh_idx) & state_handle_mask) { 253 + /* 254 + * Create the descriptor for deinstantating this state 255 + * handle 256 + */ 257 + build_deinstantiation_desc(desc, sh_idx); 258 + 259 + /* Try to run it through DECO0 */ 260 + ret = run_descriptor_deco0(ctrldev, desc, &status); 261 + 262 + if (ret || status) { 263 + dev_err(ctrldev, 264 + "Failed to deinstantiate RNG4 SH%d\n", 265 + sh_idx); 266 + break; 267 + } 268 + dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); 269 + } 270 + } 271 + 272 + kfree(desc); 273 + 274 + return ret; 275 + } 276 + 277 + static int caam_remove(struct platform_device *pdev) 278 + { 279 + struct device *ctrldev; 280 + struct caam_drv_private *ctrlpriv; 281 + struct caam_full __iomem *topregs; 282 + int ring, ret = 0; 283 + 284 + ctrldev = &pdev->dev; 285 + ctrlpriv = dev_get_drvdata(ctrldev); 286 + topregs = (struct caam_full __iomem *)ctrlpriv->ctrl; 287 + 288 + /* Remove platform devices for JobRs */ 289 + for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 290 + if (ctrlpriv->jrpdev[ring]) 291 + of_device_unregister(ctrlpriv->jrpdev[ring]); 292 + } 293 + 294 + /* De-initialize RNG state handles initialized by this driver. */ 295 + if (ctrlpriv->rng4_sh_init) 296 + deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); 297 + 298 + /* Shut down debug views */ 299 + #ifdef CONFIG_DEBUG_FS 300 + debugfs_remove_recursive(ctrlpriv->dfs_root); 301 + #endif 302 + 303 + /* Unmap controller region */ 304 + iounmap(&topregs->ctrl); 305 + 306 + kfree(ctrlpriv->jrpdev); 307 + kfree(ctrlpriv); 308 + 309 + return ret; 310 + } 311 + 312 + /* 313 + * kick_trng - sets the various parameters for enabling the initialization 314 + * of the RNG4 block in CAAM 315 + * @pdev - pointer to the platform device 316 + * @ent_delay - Defines the length (in system clocks) of each entropy sample. 317 + */ 318 + static void kick_trng(struct platform_device *pdev, int ent_delay) 129 319 { 130 320 struct device *ctrldev = &pdev->dev; 131 321 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); ··· 321 145 322 146 /* put RNG4 into program mode */ 323 147 setbits32(&r4tst->rtmctl, RTMCTL_PRGM); 324 - /* 1600 clocks per sample */ 148 + 149 + /* 150 + * Performance-wise, it does not make sense to 151 + * set the delay to a value that is lower 152 + * than the last one that worked (i.e. the state handles 153 + * were instantiated properly. Thus, instead of wasting 154 + * time trying to set the values controlling the sample 155 + * frequency, the function simply returns. 156 + */ 157 + val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK) 158 + >> RTSDCTL_ENT_DLY_SHIFT; 159 + if (ent_delay <= val) { 160 + /* put RNG4 into run mode */ 161 + clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 162 + return; 163 + } 164 + 325 165 val = rd_reg32(&r4tst->rtsdctl); 326 - val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT); 166 + val = (val & ~RTSDCTL_ENT_DLY_MASK) | 167 + (ent_delay << RTSDCTL_ENT_DLY_SHIFT); 327 168 wr_reg32(&r4tst->rtsdctl, val); 328 - /* min. freq. count */ 329 - wr_reg32(&r4tst->rtfrqmin, 400); 330 - /* max. freq. count */ 331 - wr_reg32(&r4tst->rtfrqmax, 6400); 169 + /* min. freq. count, equal to 1/4 of the entropy sample length */ 170 + wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2); 171 + /* max. freq. count, equal to 8 times the entropy sample length */ 172 + wr_reg32(&r4tst->rtfrqmax, ent_delay << 3); 332 173 /* put RNG4 into run mode */ 333 174 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM); 334 175 } ··· 386 193 /* Probe routine for CAAM top (controller) level */ 387 194 static int caam_probe(struct platform_device *pdev) 388 195 { 389 - int ret, ring, rspec; 196 + int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 390 197 u64 caam_id; 391 198 struct device *dev; 392 199 struct device_node *nprop, *np; ··· 451 258 rspec++; 452 259 } 453 260 454 - ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL); 455 - if (ctrlpriv->jrdev == NULL) { 261 + ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec, 262 + GFP_KERNEL); 263 + if (ctrlpriv->jrpdev == NULL) { 456 264 iounmap(&topregs->ctrl); 457 265 return -ENOMEM; 458 266 } ··· 461 267 ring = 0; 462 268 ctrlpriv->total_jobrs = 0; 463 269 for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") { 464 - caam_jr_probe(pdev, np, ring); 270 + ctrlpriv->jrpdev[ring] = 271 + of_platform_device_create(np, NULL, dev); 272 + if (!ctrlpriv->jrpdev[ring]) { 273 + pr_warn("JR%d Platform device creation error\n", ring); 274 + continue; 275 + } 465 276 ctrlpriv->total_jobrs++; 466 277 ring++; 467 278 } 468 279 if (!ring) { 469 280 for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") { 470 - caam_jr_probe(pdev, np, ring); 281 + ctrlpriv->jrpdev[ring] = 282 + of_platform_device_create(np, NULL, dev); 283 + if (!ctrlpriv->jrpdev[ring]) { 284 + pr_warn("JR%d Platform device creation error\n", 285 + ring); 286 + continue; 287 + } 471 288 ctrlpriv->total_jobrs++; 472 289 ring++; 473 290 } ··· 504 299 505 300 /* 506 301 * If SEC has RNG version >= 4 and RNG state handle has not been 507 - * already instantiated ,do RNG instantiation 302 + * already instantiated, do RNG instantiation 508 303 */ 509 - if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 && 510 - !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) { 511 - kick_trng(pdev); 512 - ret = instantiate_rng(dev); 304 + if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) { 305 + ctrlpriv->rng4_sh_init = 306 + rd_reg32(&topregs->ctrl.r4tst[0].rdsta); 307 + /* 308 + * If the secure keys (TDKEK, JDKEK, TDSK), were already 309 + * generated, signal this to the function that is instantiating 310 + * the state handles. An error would occur if RNG4 attempts 311 + * to regenerate these keys before the next POR. 312 + */ 313 + gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; 314 + ctrlpriv->rng4_sh_init &= RDSTA_IFMASK; 315 + do { 316 + int inst_handles = 317 + rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & 318 + RDSTA_IFMASK; 319 + /* 320 + * If either SH were instantiated by somebody else 321 + * (e.g. u-boot) then it is assumed that the entropy 322 + * parameters are properly set and thus the function 323 + * setting these (kick_trng(...)) is skipped. 324 + * Also, if a handle was instantiated, do not change 325 + * the TRNG parameters. 326 + */ 327 + if (!(ctrlpriv->rng4_sh_init || inst_handles)) { 328 + kick_trng(pdev, ent_delay); 329 + ent_delay += 400; 330 + } 331 + /* 332 + * if instantiate_rng(...) fails, the loop will rerun 333 + * and the kick_trng(...) function will modfiy the 334 + * upper and lower limits of the entropy sampling 335 + * interval, leading to a sucessful initialization of 336 + * the RNG. 337 + */ 338 + ret = instantiate_rng(dev, inst_handles, 339 + gen_sk); 340 + } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); 513 341 if (ret) { 342 + dev_err(dev, "failed to instantiate RNG"); 514 343 caam_remove(pdev); 515 344 return ret; 516 345 } 346 + /* 347 + * Set handles init'ed by this module as the complement of the 348 + * already initialized ones 349 + */ 350 + ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK; 517 351 518 352 /* Enable RDB bit so that RNG works faster */ 519 353 setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
+9 -8
drivers/crypto/caam/desc.h
··· 1155 1155 1156 1156 /* randomizer AAI set */ 1157 1157 #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT) 1158 - #define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT) 1159 - #define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT) 1158 + #define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT) 1159 + #define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT) 1160 + 1161 + /* RNG4 AAI set */ 1162 + #define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT) 1163 + #define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT) 1164 + #define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT) 1165 + #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT) 1166 + #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT) 1160 1167 1161 1168 /* hmac/smac AAI set */ 1162 1169 #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT) ··· 1184 1177 #define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT) 1185 1178 #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT) 1186 1179 #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT) 1187 - 1188 - /* RNG4 set */ 1189 - #define OP_ALG_RNG4_SHIFT 4 1190 - #define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT) 1191 - 1192 - #define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT) 1193 1180 1194 1181 #define OP_ALG_AS_SHIFT 2 1195 1182 #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+11 -9
drivers/crypto/caam/intern.h
··· 37 37 38 38 /* Private sub-storage for a single JobR */ 39 39 struct caam_drv_private_jr { 40 - struct device *parentdev; /* points back to controller dev */ 41 - struct platform_device *jr_pdev;/* points to platform device for JR */ 40 + struct list_head list_node; /* Job Ring device list */ 41 + struct device *dev; 42 42 int ridx; 43 43 struct caam_job_ring __iomem *rregs; /* JobR's register space */ 44 44 struct tasklet_struct irqtask; 45 45 int irq; /* One per queue */ 46 + 47 + /* Number of scatterlist crypt transforms active on the JobR */ 48 + atomic_t tfm_count ____cacheline_aligned; 46 49 47 50 /* Job ring info */ 48 51 int ringsize; /* Size of rings (assume input = output) */ ··· 66 63 struct caam_drv_private { 67 64 68 65 struct device *dev; 69 - struct device **jrdev; /* Alloc'ed array per sub-device */ 66 + struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ 70 67 struct platform_device *pdev; 71 68 72 69 /* Physical-presence section */ ··· 83 80 u8 qi_present; /* Nonzero if QI present in device */ 84 81 int secvio_irq; /* Security violation interrupt number */ 85 82 86 - /* which jr allocated to scatterlist crypto */ 87 - atomic_t tfm_count ____cacheline_aligned; 88 - /* list of registered crypto algorithms (mk generic context handle?) */ 89 - struct list_head alg_list; 90 - /* list of registered hash algorithms (mk generic context handle?) */ 91 - struct list_head hash_list; 83 + #define RNG4_MAX_HANDLES 2 84 + /* RNG4 block */ 85 + u32 rng4_sh_init; /* This bitmap shows which of the State 86 + Handles of the RNG4 block are initialized 87 + by this driver */ 92 88 93 89 /* 94 90 * debugfs entries for developer view into driver/device
+233 -110
drivers/crypto/caam/jr.c
··· 13 13 #include "desc.h" 14 14 #include "intern.h" 15 15 16 + struct jr_driver_data { 17 + /* List of Physical JobR's with the Driver */ 18 + struct list_head jr_list; 19 + spinlock_t jr_alloc_lock; /* jr_list lock */ 20 + } ____cacheline_aligned; 21 + 22 + static struct jr_driver_data driver_data; 23 + 24 + static int caam_reset_hw_jr(struct device *dev) 25 + { 26 + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 27 + unsigned int timeout = 100000; 28 + 29 + /* 30 + * mask interrupts since we are going to poll 31 + * for reset completion status 32 + */ 33 + setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 34 + 35 + /* initiate flush (required prior to reset) */ 36 + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 37 + while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == 38 + JRINT_ERR_HALT_INPROGRESS) && --timeout) 39 + cpu_relax(); 40 + 41 + if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != 42 + JRINT_ERR_HALT_COMPLETE || timeout == 0) { 43 + dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); 44 + return -EIO; 45 + } 46 + 47 + /* initiate reset */ 48 + timeout = 100000; 49 + wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 50 + while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) 51 + cpu_relax(); 52 + 53 + if (timeout == 0) { 54 + dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); 55 + return -EIO; 56 + } 57 + 58 + /* unmask interrupts */ 59 + clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 60 + 61 + return 0; 62 + } 63 + 64 + /* 65 + * Shutdown JobR independent of platform property code 66 + */ 67 + int caam_jr_shutdown(struct device *dev) 68 + { 69 + struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 70 + dma_addr_t inpbusaddr, outbusaddr; 71 + int ret; 72 + 73 + ret = caam_reset_hw_jr(dev); 74 + 75 + tasklet_kill(&jrp->irqtask); 76 + 77 + /* Release interrupt */ 78 + free_irq(jrp->irq, dev); 79 + 80 + /* Free rings */ 81 + inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); 82 + outbusaddr = rd_reg64(&jrp->rregs->outring_base); 83 + dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, 84 + jrp->inpring, inpbusaddr); 85 + dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, 86 + jrp->outring, outbusaddr); 87 + kfree(jrp->entinfo); 88 + 89 + return ret; 90 + } 91 + 92 + static int caam_jr_remove(struct platform_device *pdev) 93 + { 94 + int ret; 95 + struct device *jrdev; 96 + struct caam_drv_private_jr *jrpriv; 97 + 98 + jrdev = &pdev->dev; 99 + jrpriv = dev_get_drvdata(jrdev); 100 + 101 + /* 102 + * Return EBUSY if job ring already allocated. 103 + */ 104 + if (atomic_read(&jrpriv->tfm_count)) { 105 + dev_err(jrdev, "Device is busy\n"); 106 + return -EBUSY; 107 + } 108 + 109 + /* Remove the node from Physical JobR list maintained by driver */ 110 + spin_lock(&driver_data.jr_alloc_lock); 111 + list_del(&jrpriv->list_node); 112 + spin_unlock(&driver_data.jr_alloc_lock); 113 + 114 + /* Release ring */ 115 + ret = caam_jr_shutdown(jrdev); 116 + if (ret) 117 + dev_err(jrdev, "Failed to shut down job ring\n"); 118 + irq_dispose_mapping(jrpriv->irq); 119 + 120 + return ret; 121 + } 122 + 16 123 /* Main per-ring interrupt handler */ 17 124 static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) 18 125 { ··· 235 128 } 236 129 237 130 /** 131 + * caam_jr_alloc() - Alloc a job ring for someone to use as needed. 132 + * 133 + * returns : pointer to the newly allocated physical 134 + * JobR dev can be written to if successful. 135 + **/ 136 + struct device *caam_jr_alloc(void) 137 + { 138 + struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; 139 + struct device *dev = NULL; 140 + int min_tfm_cnt = INT_MAX; 141 + int tfm_cnt; 142 + 143 + spin_lock(&driver_data.jr_alloc_lock); 144 + 145 + if (list_empty(&driver_data.jr_list)) { 146 + spin_unlock(&driver_data.jr_alloc_lock); 147 + return ERR_PTR(-ENODEV); 148 + } 149 + 150 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { 151 + tfm_cnt = atomic_read(&jrpriv->tfm_count); 152 + if (tfm_cnt < min_tfm_cnt) { 153 + min_tfm_cnt = tfm_cnt; 154 + min_jrpriv = jrpriv; 155 + } 156 + if (!min_tfm_cnt) 157 + break; 158 + } 159 + 160 + if (min_jrpriv) { 161 + atomic_inc(&min_jrpriv->tfm_count); 162 + dev = min_jrpriv->dev; 163 + } 164 + spin_unlock(&driver_data.jr_alloc_lock); 165 + 166 + return dev; 167 + } 168 + EXPORT_SYMBOL(caam_jr_alloc); 169 + 170 + /** 171 + * caam_jr_free() - Free the Job Ring 172 + * @rdev - points to the dev that identifies the Job ring to 173 + * be released. 174 + **/ 175 + void caam_jr_free(struct device *rdev) 176 + { 177 + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); 178 + 179 + atomic_dec(&jrpriv->tfm_count); 180 + } 181 + EXPORT_SYMBOL(caam_jr_free); 182 + 183 + /** 238 184 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, 239 185 * -EBUSY if the queue is full, -EIO if it cannot map the caller's 240 186 * descriptor. ··· 367 207 } 368 208 EXPORT_SYMBOL(caam_jr_enqueue); 369 209 370 - static int caam_reset_hw_jr(struct device *dev) 371 - { 372 - struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 373 - unsigned int timeout = 100000; 374 - 375 - /* 376 - * mask interrupts since we are going to poll 377 - * for reset completion status 378 - */ 379 - setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 380 - 381 - /* initiate flush (required prior to reset) */ 382 - wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 383 - while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == 384 - JRINT_ERR_HALT_INPROGRESS) && --timeout) 385 - cpu_relax(); 386 - 387 - if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != 388 - JRINT_ERR_HALT_COMPLETE || timeout == 0) { 389 - dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); 390 - return -EIO; 391 - } 392 - 393 - /* initiate reset */ 394 - timeout = 100000; 395 - wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 396 - while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) 397 - cpu_relax(); 398 - 399 - if (timeout == 0) { 400 - dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); 401 - return -EIO; 402 - } 403 - 404 - /* unmask interrupts */ 405 - clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 406 - 407 - return 0; 408 - } 409 - 410 210 /* 411 211 * Init JobR independent of platform property detection 412 212 */ ··· 382 262 383 263 /* Connect job ring interrupt handler. */ 384 264 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 385 - "caam-jobr", dev); 265 + dev_name(dev), dev); 386 266 if (error) { 387 267 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 388 268 jrp->ridx, jrp->irq); ··· 438 318 return 0; 439 319 } 440 320 441 - /* 442 - * Shutdown JobR independent of platform property code 443 - */ 444 - int caam_jr_shutdown(struct device *dev) 445 - { 446 - struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 447 - dma_addr_t inpbusaddr, outbusaddr; 448 - int ret; 449 - 450 - ret = caam_reset_hw_jr(dev); 451 - 452 - tasklet_kill(&jrp->irqtask); 453 - 454 - /* Release interrupt */ 455 - free_irq(jrp->irq, dev); 456 - 457 - /* Free rings */ 458 - inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); 459 - outbusaddr = rd_reg64(&jrp->rregs->outring_base); 460 - dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH, 461 - jrp->inpring, inpbusaddr); 462 - dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH, 463 - jrp->outring, outbusaddr); 464 - kfree(jrp->entinfo); 465 - of_device_unregister(jrp->jr_pdev); 466 - 467 - return ret; 468 - } 469 321 470 322 /* 471 - * Probe routine for each detected JobR subsystem. It assumes that 472 - * property detection was picked up externally. 323 + * Probe routine for each detected JobR subsystem. 473 324 */ 474 - int caam_jr_probe(struct platform_device *pdev, struct device_node *np, 475 - int ring) 325 + static int caam_jr_probe(struct platform_device *pdev) 476 326 { 477 - struct device *ctrldev, *jrdev; 478 - struct platform_device *jr_pdev; 479 - struct caam_drv_private *ctrlpriv; 327 + struct device *jrdev; 328 + struct device_node *nprop; 329 + struct caam_job_ring __iomem *ctrl; 480 330 struct caam_drv_private_jr *jrpriv; 481 - u32 *jroffset; 331 + static int total_jobrs; 482 332 int error; 483 333 484 - ctrldev = &pdev->dev; 485 - ctrlpriv = dev_get_drvdata(ctrldev); 486 - 334 + jrdev = &pdev->dev; 487 335 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 488 336 GFP_KERNEL); 489 - if (jrpriv == NULL) { 490 - dev_err(ctrldev, "can't alloc private mem for job ring %d\n", 491 - ring); 337 + if (!jrpriv) 338 + return -ENOMEM; 339 + 340 + dev_set_drvdata(jrdev, jrpriv); 341 + 342 + /* save ring identity relative to detection */ 343 + jrpriv->ridx = total_jobrs++; 344 + 345 + nprop = pdev->dev.of_node; 346 + /* Get configuration properties from device tree */ 347 + /* First, get register page */ 348 + ctrl = of_iomap(nprop, 0); 349 + if (!ctrl) { 350 + dev_err(jrdev, "of_iomap() failed\n"); 492 351 return -ENOMEM; 493 352 } 494 - jrpriv->parentdev = ctrldev; /* point back to parent */ 495 - jrpriv->ridx = ring; /* save ring identity relative to detection */ 496 353 497 - /* 498 - * Derive a pointer to the detected JobRs regs 499 - * Driver has already iomapped the entire space, we just 500 - * need to add in the offset to this JobR. Don't know if I 501 - * like this long-term, but it'll run 502 - */ 503 - jroffset = (u32 *)of_get_property(np, "reg", NULL); 504 - jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl 505 - + *jroffset); 506 - 507 - /* Build a local dev for each detected queue */ 508 - jr_pdev = of_platform_device_create(np, NULL, ctrldev); 509 - if (jr_pdev == NULL) { 510 - kfree(jrpriv); 511 - return -EINVAL; 512 - } 513 - 514 - jrpriv->jr_pdev = jr_pdev; 515 - jrdev = &jr_pdev->dev; 516 - dev_set_drvdata(jrdev, jrpriv); 517 - ctrlpriv->jrdev[ring] = jrdev; 354 + jrpriv->rregs = (struct caam_job_ring __force *)ctrl; 518 355 519 356 if (sizeof(dma_addr_t) == sizeof(u64)) 520 - if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring")) 357 + if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring")) 521 358 dma_set_mask(jrdev, DMA_BIT_MASK(40)); 522 359 else 523 360 dma_set_mask(jrdev, DMA_BIT_MASK(36)); ··· 482 405 dma_set_mask(jrdev, DMA_BIT_MASK(32)); 483 406 484 407 /* Identify the interrupt */ 485 - jrpriv->irq = irq_of_parse_and_map(np, 0); 408 + jrpriv->irq = irq_of_parse_and_map(nprop, 0); 486 409 487 410 /* Now do the platform independent part */ 488 411 error = caam_jr_init(jrdev); /* now turn on hardware */ 489 412 if (error) { 490 - of_device_unregister(jr_pdev); 491 413 kfree(jrpriv); 492 414 return error; 493 415 } 494 416 495 - return error; 417 + jrpriv->dev = jrdev; 418 + spin_lock(&driver_data.jr_alloc_lock); 419 + list_add_tail(&jrpriv->list_node, &driver_data.jr_list); 420 + spin_unlock(&driver_data.jr_alloc_lock); 421 + 422 + atomic_set(&jrpriv->tfm_count, 0); 423 + 424 + return 0; 496 425 } 426 + 427 + static struct of_device_id caam_jr_match[] = { 428 + { 429 + .compatible = "fsl,sec-v4.0-job-ring", 430 + }, 431 + { 432 + .compatible = "fsl,sec4.0-job-ring", 433 + }, 434 + {}, 435 + }; 436 + MODULE_DEVICE_TABLE(of, caam_jr_match); 437 + 438 + static struct platform_driver caam_jr_driver = { 439 + .driver = { 440 + .name = "caam_jr", 441 + .owner = THIS_MODULE, 442 + .of_match_table = caam_jr_match, 443 + }, 444 + .probe = caam_jr_probe, 445 + .remove = caam_jr_remove, 446 + }; 447 + 448 + static int __init jr_driver_init(void) 449 + { 450 + spin_lock_init(&driver_data.jr_alloc_lock); 451 + INIT_LIST_HEAD(&driver_data.jr_list); 452 + return platform_driver_register(&caam_jr_driver); 453 + } 454 + 455 + static void __exit jr_driver_exit(void) 456 + { 457 + platform_driver_unregister(&caam_jr_driver); 458 + } 459 + 460 + module_init(jr_driver_init); 461 + module_exit(jr_driver_exit); 462 + 463 + MODULE_LICENSE("GPL"); 464 + MODULE_DESCRIPTION("FSL CAAM JR request backend"); 465 + MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
+2 -3
drivers/crypto/caam/jr.h
··· 8 8 #define JR_H 9 9 10 10 /* Prototypes for backend-level services exposed to APIs */ 11 + struct device *caam_jr_alloc(void); 12 + void caam_jr_free(struct device *rdev); 11 13 int caam_jr_enqueue(struct device *dev, u32 *desc, 12 14 void (*cbk)(struct device *dev, u32 *desc, u32 status, 13 15 void *areq), 14 16 void *areq); 15 17 16 - extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np, 17 - int ring); 18 - extern int caam_jr_shutdown(struct device *dev); 19 18 #endif /* JR_H */
+11 -3
drivers/crypto/caam/regs.h
··· 245 245 246 246 /* RNG4 TRNG test registers */ 247 247 struct rng4tst { 248 - #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ 248 + #define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */ 249 249 u32 rtmctl; /* misc. control register */ 250 250 u32 rtscmisc; /* statistical check misc. register */ 251 251 u32 rtpkrrng; /* poker range register */ ··· 255 255 }; 256 256 #define RTSDCTL_ENT_DLY_SHIFT 16 257 257 #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT) 258 + #define RTSDCTL_ENT_DLY_MIN 1200 259 + #define RTSDCTL_ENT_DLY_MAX 12800 258 260 u32 rtsdctl; /* seed control register */ 259 261 union { 260 262 u32 rtsblim; /* PRGM=1: sparse bit limit register */ ··· 268 266 u32 rtfrqcnt; /* PRGM=0: freq. count register */ 269 267 }; 270 268 u32 rsvd1[40]; 269 + #define RDSTA_SKVT 0x80000000 270 + #define RDSTA_SKVN 0x40000000 271 271 #define RDSTA_IF0 0x00000001 272 + #define RDSTA_IF1 0x00000002 273 + #define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0) 272 274 u32 rdsta; 273 275 u32 rsvd2[15]; 274 276 }; ··· 698 692 u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */ 699 693 u32 jr_ctl_lo; 700 694 u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */ 695 + #define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF 701 696 u32 op_status_hi; /* DxOPSTA - DECO Operation Status */ 702 697 u32 op_status_lo; 703 698 u32 rsvd24[2]; ··· 713 706 u32 rsvd29[48]; 714 707 u32 descbuf[64]; /* DxDESB - Descriptor buffer */ 715 708 u32 rscvd30[193]; 709 + #define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 710 + #define DESC_DBG_DECO_STAT_VALID 0x80000000 711 + #define DESC_DBG_DECO_STAT_MASK 0x00F00000 716 712 u32 desc_dbg; /* DxDDR - DECO Debug Register */ 717 713 u32 rsvd31[126]; 718 714 }; 719 715 720 - /* DECO DBG Register Valid Bit*/ 721 - #define DECO_DBG_VALID 0x80000000 722 716 #define DECO_JQCR_WHL 0x20000000 723 717 #define DECO_JQCR_FOUR 0x10000000 724 718
+25 -9
drivers/crypto/caam/sg_sw_sec4.h
··· 117 117 return nents; 118 118 } 119 119 120 + /* Map SG page in kernel virtual address space and copy */ 121 + static inline void sg_map_copy(u8 *dest, struct scatterlist *sg, 122 + int len, int offset) 123 + { 124 + u8 *mapped_addr; 125 + 126 + /* 127 + * Page here can be user-space pinned using get_user_pages 128 + * Same must be kmapped before use and kunmapped subsequently 129 + */ 130 + mapped_addr = kmap_atomic(sg_page(sg)); 131 + memcpy(dest, mapped_addr + offset, len); 132 + kunmap_atomic(mapped_addr); 133 + } 134 + 120 135 /* Copy from len bytes of sg to dest, starting from beginning */ 121 136 static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) 122 137 { ··· 139 124 int cpy_index = 0, next_cpy_index = current_sg->length; 140 125 141 126 while (next_cpy_index < len) { 142 - memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), 143 - current_sg->length); 127 + sg_map_copy(dest + cpy_index, current_sg, current_sg->length, 128 + current_sg->offset); 144 129 current_sg = scatterwalk_sg_next(current_sg); 145 130 cpy_index = next_cpy_index; 146 131 next_cpy_index += current_sg->length; 147 132 } 148 133 if (cpy_index < len) 149 - memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), 150 - len - cpy_index); 134 + sg_map_copy(dest + cpy_index, current_sg, len-cpy_index, 135 + current_sg->offset); 151 136 } 152 137 153 138 /* Copy sg data, from to_skip to end, to dest */ ··· 155 140 int to_skip, unsigned int end) 156 141 { 157 142 struct scatterlist *current_sg = sg; 158 - int sg_index, cpy_index; 143 + int sg_index, cpy_index, offset; 159 144 160 145 sg_index = current_sg->length; 161 146 while (sg_index <= to_skip) { ··· 163 148 sg_index += current_sg->length; 164 149 } 165 150 cpy_index = sg_index - to_skip; 166 - memcpy(dest, (u8 *) sg_virt(current_sg) + 167 - current_sg->length - cpy_index, cpy_index); 168 - current_sg = scatterwalk_sg_next(current_sg); 169 - if (end - sg_index) 151 + offset = current_sg->offset + current_sg->length - cpy_index; 152 + sg_map_copy(dest, current_sg, cpy_index, offset); 153 + if (end - sg_index) { 154 + current_sg = scatterwalk_sg_next(current_sg); 170 155 sg_copy(dest + cpy_index, current_sg, end - sg_index); 156 + } 171 157 }
+22 -31
drivers/crypto/dcp.c
··· 733 733 platform_set_drvdata(pdev, dev); 734 734 735 735 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 736 - if (!r) { 737 - dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n"); 738 - return -ENXIO; 739 - } 740 - dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start, 741 - resource_size(r)); 736 + dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r); 737 + if (IS_ERR(dev->dcp_regs_base)) 738 + return PTR_ERR(dev->dcp_regs_base); 742 739 743 740 dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL); 744 741 udelay(10); ··· 759 762 return -EIO; 760 763 } 761 764 dev->dcp_vmi_irq = r->start; 762 - ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev); 765 + ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0, 766 + "dcp", dev); 763 767 if (ret != 0) { 764 768 dev_err(&pdev->dev, "can't request_irq (0)\n"); 765 769 return -EIO; ··· 769 771 r = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 770 772 if (!r) { 771 773 dev_err(&pdev->dev, "can't get IRQ resource (1)\n"); 772 - ret = -EIO; 773 - goto err_free_irq0; 774 + return -EIO; 774 775 } 775 776 dev->dcp_irq = r->start; 776 - ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev); 777 + ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp", 778 + dev); 777 779 if (ret != 0) { 778 780 dev_err(&pdev->dev, "can't request_irq (1)\n"); 779 - ret = -EIO; 780 - goto err_free_irq0; 781 + return -EIO; 781 782 } 782 783 783 784 dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev, ··· 785 788 GFP_KERNEL); 786 789 if (!dev->hw_pkg[0]) { 787 790 dev_err(&pdev->dev, "Could not allocate hw descriptors\n"); 788 - ret = -ENOMEM; 789 - goto err_free_irq1; 791 + return -ENOMEM; 790 792 } 791 793 792 794 for (i = 1; i < DCP_MAX_PKG; i++) { ··· 844 848 for (j = 0; j < i; j++) 845 849 crypto_unregister_alg(&algs[j]); 846 850 err_free_key_iv: 851 + tasklet_kill(&dev->done_task); 852 + tasklet_kill(&dev->queue_task); 847 853 dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, 848 854 dev->payload_base_dma); 849 855 err_free_hw_packet: 850 856 dma_free_coherent(&pdev->dev, DCP_MAX_PKG * 851 857 sizeof(struct dcp_hw_packet), dev->hw_pkg[0], 852 858 dev->hw_phys_pkg); 853 - err_free_irq1: 854 - free_irq(dev->dcp_irq, dev); 855 - err_free_irq0: 856 - free_irq(dev->dcp_vmi_irq, dev); 857 859 858 860 return ret; 859 861 } ··· 862 868 int j; 863 869 dev = platform_get_drvdata(pdev); 864 870 865 - dma_free_coherent(&pdev->dev, 866 - DCP_MAX_PKG * sizeof(struct dcp_hw_packet), 867 - dev->hw_pkg[0], dev->hw_phys_pkg); 868 - 869 - dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, 870 - dev->payload_base_dma); 871 - 872 - free_irq(dev->dcp_irq, dev); 873 - free_irq(dev->dcp_vmi_irq, dev); 874 - 875 - tasklet_kill(&dev->done_task); 876 - tasklet_kill(&dev->queue_task); 871 + misc_deregister(&dev->dcp_bootstream_misc); 877 872 878 873 for (j = 0; j < ARRAY_SIZE(algs); j++) 879 874 crypto_unregister_alg(&algs[j]); 880 875 881 - misc_deregister(&dev->dcp_bootstream_misc); 876 + tasklet_kill(&dev->done_task); 877 + tasklet_kill(&dev->queue_task); 878 + 879 + dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base, 880 + dev->payload_base_dma); 881 + 882 + dma_free_coherent(&pdev->dev, 883 + DCP_MAX_PKG * sizeof(struct dcp_hw_packet), 884 + dev->hw_pkg[0], dev->hw_phys_pkg); 882 885 883 886 return 0; 884 887 }
+10 -18
drivers/crypto/ixp4xx_crypto.c
··· 1149 1149 unsigned int keylen) 1150 1150 { 1151 1151 struct ixp_ctx *ctx = crypto_aead_ctx(tfm); 1152 - struct rtattr *rta = (struct rtattr *)key; 1153 - struct crypto_authenc_key_param *param; 1152 + struct crypto_authenc_keys keys; 1154 1153 1155 - if (!RTA_OK(rta, keylen)) 1156 - goto badkey; 1157 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 1158 - goto badkey; 1159 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 1154 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 1160 1155 goto badkey; 1161 1156 1162 - param = RTA_DATA(rta); 1163 - ctx->enckey_len = be32_to_cpu(param->enckeylen); 1164 - 1165 - key += RTA_ALIGN(rta->rta_len); 1166 - keylen -= RTA_ALIGN(rta->rta_len); 1167 - 1168 - if (keylen < ctx->enckey_len) 1157 + if (keys.authkeylen > sizeof(ctx->authkey)) 1169 1158 goto badkey; 1170 1159 1171 - ctx->authkey_len = keylen - ctx->enckey_len; 1172 - memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len); 1173 - memcpy(ctx->authkey, key, ctx->authkey_len); 1160 + if (keys.enckeylen > sizeof(ctx->enckey)) 1161 + goto badkey; 1162 + 1163 + memcpy(ctx->authkey, keys.authkey, keys.authkeylen); 1164 + memcpy(ctx->enckey, keys.enckey, keys.enckeylen); 1165 + ctx->authkey_len = keys.authkeylen; 1166 + ctx->enckey_len = keys.enckeylen; 1174 1167 1175 1168 return aead_setup(tfm, crypto_aead_authsize(tfm)); 1176 1169 badkey: 1177 - ctx->enckey_len = 0; 1178 1170 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1179 1171 return -EINVAL; 1180 1172 }
+7 -7
drivers/crypto/mv_cesa.c
··· 907 907 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); 908 908 } 909 909 910 - irqreturn_t crypto_int(int irq, void *priv) 910 + static irqreturn_t crypto_int(int irq, void *priv) 911 911 { 912 912 u32 val; 913 913 ··· 928 928 return IRQ_HANDLED; 929 929 } 930 930 931 - struct crypto_alg mv_aes_alg_ecb = { 931 + static struct crypto_alg mv_aes_alg_ecb = { 932 932 .cra_name = "ecb(aes)", 933 933 .cra_driver_name = "mv-ecb-aes", 934 934 .cra_priority = 300, ··· 951 951 }, 952 952 }; 953 953 954 - struct crypto_alg mv_aes_alg_cbc = { 954 + static struct crypto_alg mv_aes_alg_cbc = { 955 955 .cra_name = "cbc(aes)", 956 956 .cra_driver_name = "mv-cbc-aes", 957 957 .cra_priority = 300, ··· 975 975 }, 976 976 }; 977 977 978 - struct ahash_alg mv_sha1_alg = { 978 + static struct ahash_alg mv_sha1_alg = { 979 979 .init = mv_hash_init, 980 980 .update = mv_hash_update, 981 981 .final = mv_hash_final, ··· 999 999 } 1000 1000 }; 1001 1001 1002 - struct ahash_alg mv_hmac_sha1_alg = { 1002 + static struct ahash_alg mv_hmac_sha1_alg = { 1003 1003 .init = mv_hash_init, 1004 1004 .update = mv_hash_update, 1005 1005 .final = mv_hash_final, ··· 1084 1084 goto err_unmap_sram; 1085 1085 } 1086 1086 1087 - ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), 1087 + ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev), 1088 1088 cp); 1089 1089 if (ret) 1090 1090 goto err_thread; ··· 1187 1187 .driver = { 1188 1188 .owner = THIS_MODULE, 1189 1189 .name = "mv_crypto", 1190 - .of_match_table = of_match_ptr(mv_cesa_of_match_table), 1190 + .of_match_table = mv_cesa_of_match_table, 1191 1191 }, 1192 1192 }; 1193 1193 MODULE_ALIAS("platform:mv_crypto");
+3 -3
drivers/crypto/omap-aes.c
··· 275 275 if (dd->flags & FLAGS_CBC) 276 276 val |= AES_REG_CTRL_CBC; 277 277 if (dd->flags & FLAGS_CTR) { 278 - val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_32; 278 + val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128; 279 279 mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK; 280 280 } 281 281 if (dd->flags & FLAGS_ENCRYPT) ··· 554 554 return err; 555 555 } 556 556 557 - int omap_aes_check_aligned(struct scatterlist *sg) 557 + static int omap_aes_check_aligned(struct scatterlist *sg) 558 558 { 559 559 while (sg) { 560 560 if (!IS_ALIGNED(sg->offset, 4)) ··· 566 566 return 0; 567 567 } 568 568 569 - int omap_aes_copy_sgs(struct omap_aes_dev *dd) 569 + static int omap_aes_copy_sgs(struct omap_aes_dev *dd) 570 570 { 571 571 void *buf_in, *buf_out; 572 572 int pages;
+1
drivers/crypto/omap-sham.c
··· 2033 2033 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); 2034 2034 MODULE_LICENSE("GPL v2"); 2035 2035 MODULE_AUTHOR("Dmitry Kasatkin"); 2036 + MODULE_ALIAS("platform:omap-sham");
+8 -24
drivers/crypto/picoxcell_crypto.c
··· 495 495 { 496 496 struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); 497 497 struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); 498 - struct rtattr *rta = (void *)key; 499 - struct crypto_authenc_key_param *param; 500 - unsigned int authkeylen, enckeylen; 498 + struct crypto_authenc_keys keys; 501 499 int err = -EINVAL; 502 500 503 - if (!RTA_OK(rta, keylen)) 501 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 504 502 goto badkey; 505 503 506 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 504 + if (keys.enckeylen > AES_MAX_KEY_SIZE) 507 505 goto badkey; 508 506 509 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 510 - goto badkey; 511 - 512 - param = RTA_DATA(rta); 513 - enckeylen = be32_to_cpu(param->enckeylen); 514 - 515 - key += RTA_ALIGN(rta->rta_len); 516 - keylen -= RTA_ALIGN(rta->rta_len); 517 - 518 - if (keylen < enckeylen) 519 - goto badkey; 520 - 521 - authkeylen = keylen - enckeylen; 522 - 523 - if (enckeylen > AES_MAX_KEY_SIZE) 507 + if (keys.authkeylen > sizeof(ctx->hash_ctx)) 524 508 goto badkey; 525 509 526 510 if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == 527 511 SPA_CTRL_CIPH_ALG_AES) 528 - err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); 512 + err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen); 529 513 else 530 - err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); 514 + err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen); 531 515 532 516 if (err) 533 517 goto badkey; 534 518 535 - memcpy(ctx->hash_ctx, key, authkeylen); 536 - ctx->hash_key_len = authkeylen; 519 + memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen); 520 + ctx->hash_key_len = keys.authkeylen; 537 521 538 522 return 0; 539 523
+1 -1
drivers/crypto/sahara.c
··· 1058 1058 .driver = { 1059 1059 .name = SAHARA_NAME, 1060 1060 .owner = THIS_MODULE, 1061 - .of_match_table = of_match_ptr(sahara_dt_ids), 1061 + .of_match_table = sahara_dt_ids, 1062 1062 }, 1063 1063 .id_table = sahara_platform_ids, 1064 1064 };
+8 -27
drivers/crypto/talitos.c
··· 673 673 const u8 *key, unsigned int keylen) 674 674 { 675 675 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 676 - struct rtattr *rta = (void *)key; 677 - struct crypto_authenc_key_param *param; 678 - unsigned int authkeylen; 679 - unsigned int enckeylen; 676 + struct crypto_authenc_keys keys; 680 677 681 - if (!RTA_OK(rta, keylen)) 678 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 682 679 goto badkey; 683 680 684 - if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 681 + if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 685 682 goto badkey; 686 683 687 - if (RTA_PAYLOAD(rta) < sizeof(*param)) 688 - goto badkey; 684 + memcpy(ctx->key, keys.authkey, keys.authkeylen); 685 + memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 689 686 690 - param = RTA_DATA(rta); 691 - enckeylen = be32_to_cpu(param->enckeylen); 692 - 693 - key += RTA_ALIGN(rta->rta_len); 694 - keylen -= RTA_ALIGN(rta->rta_len); 695 - 696 - if (keylen < enckeylen) 697 - goto badkey; 698 - 699 - authkeylen = keylen - enckeylen; 700 - 701 - if (keylen > TALITOS_MAX_KEY_SIZE) 702 - goto badkey; 703 - 704 - memcpy(&ctx->key, key, keylen); 705 - 706 - ctx->keylen = keylen; 707 - ctx->enckeylen = enckeylen; 708 - ctx->authkeylen = authkeylen; 687 + ctx->keylen = keys.authkeylen + keys.enckeylen; 688 + ctx->enckeylen = keys.enckeylen; 689 + ctx->authkeylen = keys.authkeylen; 709 690 710 691 return 0; 711 692
+8 -18
drivers/crypto/tegra-aes.c
··· 27 27 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 28 28 */ 29 29 30 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 + 30 32 #include <linux/module.h> 31 33 #include <linux/init.h> 32 34 #include <linux/errno.h> ··· 200 198 static void aes_workqueue_handler(struct work_struct *work); 201 199 static DECLARE_WORK(aes_work, aes_workqueue_handler); 202 200 static struct workqueue_struct *aes_wq; 203 - 204 - extern unsigned long long tegra_chip_uid(void); 205 201 206 202 static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset) 207 203 { ··· 713 713 struct tegra_aes_dev *dd = aes_dev; 714 714 struct tegra_aes_ctx *ctx = &rng_ctx; 715 715 struct tegra_aes_slot *key_slot; 716 - struct timespec ts; 717 716 int ret = 0; 718 - u64 nsec, tmp[2]; 717 + u8 tmp[16]; /* 16 bytes = 128 bits of entropy */ 719 718 u8 *dt; 720 719 721 720 if (!ctx || !dd) { 722 - dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n", 721 + pr_err("ctx=0x%x, dd=0x%x\n", 723 722 (unsigned int)ctx, (unsigned int)dd); 724 723 return -EINVAL; 725 724 } ··· 777 778 if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) { 778 779 dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128; 779 780 } else { 780 - getnstimeofday(&ts); 781 - nsec = timespec_to_ns(&ts); 782 - do_div(nsec, 1000); 783 - nsec ^= dd->ctr << 56; 784 - dd->ctr++; 785 - tmp[0] = nsec; 786 - tmp[1] = tegra_chip_uid(); 787 - dt = (u8 *)tmp; 781 + get_random_bytes(tmp, sizeof(tmp)); 782 + dt = tmp; 788 783 } 789 784 memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ); 790 785 ··· 797 804 return 0; 798 805 } 799 806 800 - void tegra_aes_cra_exit(struct crypto_tfm *tfm) 807 + static void tegra_aes_cra_exit(struct crypto_tfm *tfm) 801 808 { 802 809 struct tegra_aes_ctx *ctx = 803 810 crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm); ··· 917 924 } 918 925 919 926 /* Initialize the vde clock */ 920 - dd->aes_clk = clk_get(dev, "vde"); 927 + dd->aes_clk = devm_clk_get(dev, "vde"); 921 928 if (IS_ERR(dd->aes_clk)) { 922 929 dev_err(dev, "iclock intialization failed.\n"); 923 930 err = -ENODEV; ··· 1026 1033 if (dd->buf_out) 1027 1034 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, 1028 1035 dd->buf_out, dd->dma_buf_out); 1029 - if (!IS_ERR(dd->aes_clk)) 1030 - clk_put(dd->aes_clk); 1031 1036 if (aes_wq) 1032 1037 destroy_workqueue(aes_wq); 1033 1038 spin_lock(&list_lock); ··· 1059 1068 dd->buf_in, dd->dma_buf_in); 1060 1069 dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES, 1061 1070 dd->buf_out, dd->dma_buf_out); 1062 - clk_put(dd->aes_clk); 1063 1071 aes_dev = NULL; 1064 1072 1065 1073 return 0;
+14
include/asm-generic/simd.h
··· 1 + 2 + #include <linux/hardirq.h> 3 + 4 + /* 5 + * may_use_simd - whether it is allowable at this time to issue SIMD 6 + * instructions or access the SIMD register file 7 + * 8 + * As architectures typically don't preserve the SIMD register file when 9 + * taking an interrupt, !in_interrupt() should be a reasonable default. 10 + */ 11 + static __must_check inline bool may_use_simd(void) 12 + { 13 + return !in_interrupt(); 14 + }
+17 -1
include/crypto/algapi.h
··· 386 386 return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; 387 387 } 388 388 389 - #endif /* _CRYPTO_ALGAPI_H */ 389 + noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); 390 390 391 + /** 392 + * crypto_memneq - Compare two areas of memory without leaking 393 + * timing information. 394 + * 395 + * @a: One area of memory 396 + * @b: Another area of memory 397 + * @size: The size of the area. 398 + * 399 + * Returns 0 when data is equal, 1 otherwise. 400 + */ 401 + static inline int crypto_memneq(const void *a, const void *b, size_t size) 402 + { 403 + return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; 404 + } 405 + 406 + #endif /* _CRYPTO_ALGAPI_H */
+11 -1
include/crypto/authenc.h
··· 23 23 __be32 enckeylen; 24 24 }; 25 25 26 - #endif /* _CRYPTO_AUTHENC_H */ 26 + struct crypto_authenc_keys { 27 + const u8 *authkey; 28 + const u8 *enckey; 27 29 30 + unsigned int authkeylen; 31 + unsigned int enckeylen; 32 + }; 33 + 34 + int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, 35 + unsigned int keylen); 36 + 37 + #endif /* _CRYPTO_AUTHENC_H */
+1 -2
include/linux/padata.h
··· 129 129 struct padata_serial_queue __percpu *squeue; 130 130 atomic_t reorder_objects; 131 131 atomic_t refcnt; 132 + atomic_t seq_nr; 132 133 struct padata_cpumask cpumask; 133 134 spinlock_t lock ____cacheline_aligned; 134 - spinlock_t seq_lock; 135 - unsigned int seq_nr; 136 135 unsigned int processed; 137 136 struct timer_list timer; 138 137 };
+4 -5
kernel/padata.c
··· 46 46 47 47 static int padata_cpu_hash(struct parallel_data *pd) 48 48 { 49 + unsigned int seq_nr; 49 50 int cpu_index; 50 51 51 52 /* ··· 54 53 * seq_nr mod. number of cpus in use. 55 54 */ 56 55 57 - spin_lock(&pd->seq_lock); 58 - cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); 59 - pd->seq_nr++; 60 - spin_unlock(&pd->seq_lock); 56 + seq_nr = atomic_inc_return(&pd->seq_nr); 57 + cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); 61 58 62 59 return padata_index_to_cpu(pd, cpu_index); 63 60 } ··· 428 429 padata_init_pqueues(pd); 429 430 padata_init_squeues(pd); 430 431 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); 431 - pd->seq_nr = 0; 432 + atomic_set(&pd->seq_nr, -1); 432 433 atomic_set(&pd->reorder_objects, 0); 433 434 atomic_set(&pd->refcnt, 0); 434 435 pd->pinst = pinst;