Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
"API:
- Enforce the setting of keys for keyed aead/hash/skcipher
algorithms.
- Add multibuf speed tests in tcrypt.

Algorithms:
- Improve performance of sha3-generic.
- Add native sha512 support on arm64.
- Add v8.2 Crypto Extentions version of sha3/sm3 on arm64.
- Avoid hmac nesting by requiring underlying algorithm to be unkeyed.
- Add cryptd_max_cpu_qlen module parameter to cryptd.

Drivers:
- Add support for EIP97 engine in inside-secure.
- Add inline IPsec support to chelsio.
- Add RevB core support to crypto4xx.
- Fix AEAD ICV check in crypto4xx.
- Add stm32 crypto driver.
- Add support for BCM63xx platforms in bcm2835 and remove bcm63xx.
- Add Derived Key Protocol (DKP) support in caam.
- Add Samsung Exynos True RNG driver.
- Add support for Exynos5250+ SoCs in exynos PRNG driver"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (166 commits)
crypto: picoxcell - Fix error handling in spacc_probe()
crypto: arm64/sha512 - fix/improve new v8.2 Crypto Extensions code
crypto: arm64/sm3 - new v8.2 Crypto Extensions implementation
crypto: arm64/sha3 - new v8.2 Crypto Extensions implementation
crypto: testmgr - add new testcases for sha3
crypto: sha3-generic - export init/update/final routines
crypto: sha3-generic - simplify code
crypto: sha3-generic - rewrite KECCAK transform to help the compiler optimize
crypto: sha3-generic - fixes for alignment and big endian operation
crypto: aesni - handle zero length dst buffer
crypto: artpec6 - remove select on non-existing CRYPTO_SHA384
hwrng: bcm2835 - Remove redundant dev_err call in bcm2835_rng_probe()
crypto: stm32 - remove redundant dev_err call in stm32_cryp_probe()
crypto: axis - remove unnecessary platform_get_resource() error check
crypto: testmgr - test misuse of result in ahash
crypto: inside-secure - make function safexcel_try_push_requests static
crypto: aes-generic - fix aes-generic regression on powerpc
crypto: chelsio - Fix indentation warning
crypto: arm64/sha1-ce - get rid of literal pool
crypto: arm64/sha2-ce - move the round constant table to .rodata section
...

+7494 -2705
+22
Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
··· 1 + Arm TrustZone CryptoCell cryptographic engine 2 + 3 + Required properties: 4 + - compatible: Should be "arm,cryptocell-712-ree". 5 + - reg: Base physical address of the engine and length of memory mapped region. 6 + - interrupts: Interrupt number for the device. 7 + 8 + Optional properties: 9 + - interrupt-parent: The phandle for the interrupt controller that services 10 + interrupts for this device. 11 + - clocks: Reference to the crypto engine clock. 12 + - dma-coherent: Present if dma operations are coherent. 13 + 14 + Examples: 15 + 16 + arm_cc712: crypto@80000000 { 17 + compatible = "arm,cryptocell-712-ree"; 18 + interrupt-parent = <&intc>; 19 + interrupts = < 0 30 4 >; 20 + reg = < 0x80000000 0x10000 >; 21 + 22 + };
+2 -1
Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
··· 1 1 Inside Secure SafeXcel cryptographic engine 2 2 3 3 Required properties: 4 - - compatible: Should be "inside-secure,safexcel-eip197". 4 + - compatible: Should be "inside-secure,safexcel-eip197" or 5 + "inside-secure,safexcel-eip97". 5 6 - reg: Base physical address of the engine and length of memory mapped region. 6 7 - interrupts: Interrupt numbers for the rings and engine. 7 8 - interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
+3 -1
Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
··· 2 2 3 3 Required properties: 4 4 5 - - compatible : Should be "samsung,exynos4-rng". 5 + - compatible : One of: 6 + - "samsung,exynos4-rng" for Exynos4210 and Exynos4412 7 + - "samsung,exynos5250-prng" for Exynos5250+ 6 8 - reg : Specifies base physical address and size of the registers map. 7 9 - clocks : Phandle to clock-controller plus clock-specifier pair. 8 10 - clock-names : "secss" as a clock name.
+19
Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
··· 1 + * STMicroelectronics STM32 CRYP 2 + 3 + Required properties: 4 + - compatible: Should be "st,stm32f756-cryp". 5 + - reg: The address and length of the peripheral registers space 6 + - clocks: The input clock of the CRYP instance 7 + - interrupts: The CRYP interrupt 8 + 9 + Optional properties: 10 + - resets: The input reset of the CRYP instance 11 + 12 + Example: 13 + crypto@50060000 { 14 + compatible = "st,stm32f756-cryp"; 15 + reg = <0x50060000 0x400>; 16 + interrupts = <79>; 17 + clocks = <&rcc 0 STM32F7_AHB2_CLOCK(CRYP)>; 18 + resets = <&rcc STM32F7_AHB2_RESET(CRYP)>; 19 + };
+19 -3
Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
··· 1 - BCM2835 Random number generator 1 + BCM2835/6368 Random number generator 2 2 3 3 Required properties: 4 4 5 - - compatible : should be "brcm,bcm2835-rng" or "brcm,bcm-nsp-rng" or 6 - "brcm,bcm5301x-rng" 5 + - compatible : should be one of 6 + "brcm,bcm2835-rng" 7 + "brcm,bcm-nsp-rng" 8 + "brcm,bcm5301x-rng" or 9 + "brcm,bcm6368-rng" 7 10 - reg : Specifies base physical address and size of the registers. 11 + 12 + Optional properties: 13 + 14 + - clocks : phandle to clock-controller plus clock-specifier pair 15 + - clock-names : "ipsec" as a clock name 8 16 9 17 Example: 10 18 ··· 24 16 rng@18033000 { 25 17 compatible = "brcm,bcm-nsp-rng"; 26 18 reg = <0x18033000 0x14>; 19 + }; 20 + 21 + random: rng@10004180 { 22 + compatible = "brcm,bcm6368-rng"; 23 + reg = <0x10004180 0x14>; 24 + 25 + clocks = <&periph_clk 18>; 26 + clock-names = "ipsec"; 27 27 };
-17
Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
··· 1 - BCM6368 Random number generator 2 - 3 - Required properties: 4 - 5 - - compatible : should be "brcm,bcm6368-rng" 6 - - reg : Specifies base physical address and size of the registers 7 - - clocks : phandle to clock-controller plus clock-specifier pair 8 - - clock-names : "ipsec" as a clock name 9 - 10 - Example: 11 - random: rng@10004180 { 12 - compatible = "brcm,bcm6368-rng"; 13 - reg = <0x10004180 0x14>; 14 - 15 - clocks = <&periph_clk 18>; 16 - clock-names = "ipsec"; 17 - };
+8
MAINTAINERS
··· 11964 11964 F: drivers/crypto/exynos-rng.c 11965 11965 F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt 11966 11966 11967 + SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER 11968 + M: Łukasz Stelmach <l.stelmach@samsung.com> 11969 + L: linux-samsung-soc@vger.kernel.org 11970 + S: Maintained 11971 + F: drivers/char/hw_random/exynos-trng.c 11972 + F: Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt 11973 + 11967 11974 SAMSUNG FRAMEBUFFER DRIVER 11968 11975 M: Jingoo Han <jingoohan1@gmail.com> 11969 11976 L: linux-fbdev@vger.kernel.org ··· 12033 12026 SAMSUNG S5P Security SubSystem (SSS) DRIVER 12034 12027 M: Krzysztof Kozlowski <krzk@kernel.org> 12035 12028 M: Vladimir Zapolskiy <vz@mleia.com> 12029 + M: Kamil Konieczny <k.konieczny@partner.samsung.com> 12036 12030 L: linux-crypto@vger.kernel.org 12037 12031 L: linux-samsung-soc@vger.kernel.org 12038 12032 S: Maintained
+4 -6
arch/arm/crypto/aes-neonbs-glue.c
··· 181 181 struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 182 182 183 183 ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); 184 - if (IS_ERR(ctx->enc_tfm)) 185 - return PTR_ERR(ctx->enc_tfm); 186 - return 0; 184 + 185 + return PTR_ERR_OR_ZERO(ctx->enc_tfm); 187 186 } 188 187 189 188 static void cbc_exit(struct crypto_tfm *tfm) ··· 257 258 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 258 259 259 260 ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); 260 - if (IS_ERR(ctx->tweak_tfm)) 261 - return PTR_ERR(ctx->tweak_tfm); 262 - return 0; 261 + 262 + return PTR_ERR_OR_ZERO(ctx->tweak_tfm); 263 263 } 264 264 265 265 static void xts_exit(struct crypto_tfm *tfm)
+2
arch/arm/crypto/crc32-ce-glue.c
··· 188 188 .base.cra_name = "crc32", 189 189 .base.cra_driver_name = "crc32-arm-ce", 190 190 .base.cra_priority = 200, 191 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 191 192 .base.cra_blocksize = 1, 192 193 .base.cra_module = THIS_MODULE, 193 194 }, { ··· 204 203 .base.cra_name = "crc32c", 205 204 .base.cra_driver_name = "crc32c-arm-ce", 206 205 .base.cra_priority = 200, 206 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 207 207 .base.cra_blocksize = 1, 208 208 .base.cra_module = THIS_MODULE, 209 209 } };
+18
arch/arm64/crypto/Kconfig
··· 29 29 select CRYPTO_HASH 30 30 select CRYPTO_SHA256_ARM64 31 31 32 + config CRYPTO_SHA512_ARM64_CE 33 + tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)" 34 + depends on KERNEL_MODE_NEON 35 + select CRYPTO_HASH 36 + select CRYPTO_SHA512_ARM64 37 + 38 + config CRYPTO_SHA3_ARM64 39 + tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)" 40 + depends on KERNEL_MODE_NEON 41 + select CRYPTO_HASH 42 + select CRYPTO_SHA3 43 + 44 + config CRYPTO_SM3_ARM64_CE 45 + tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" 46 + depends on KERNEL_MODE_NEON 47 + select CRYPTO_HASH 48 + select CRYPTO_SM3 49 + 32 50 config CRYPTO_GHASH_ARM64_CE 33 51 tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" 34 52 depends on KERNEL_MODE_NEON
+10 -1
arch/arm64/crypto/Makefile
··· 14 14 obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o 15 15 sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o 16 16 17 + obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o 18 + sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o 19 + 20 + obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o 21 + sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o 22 + 23 + obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o 24 + sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o 25 + 17 26 obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o 18 27 ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o 19 28 ··· 33 24 crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o 34 25 35 26 obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o 36 - CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto 27 + aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o 37 28 38 29 obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o 39 30 aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
+12 -103
arch/arm64/crypto/aes-ce-cipher.c arch/arm64/crypto/aes-ce-glue.c
··· 29 29 u8 b[AES_BLOCK_SIZE]; 30 30 }; 31 31 32 + asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 33 + asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds); 34 + 35 + asmlinkage u32 __aes_ce_sub(u32 l); 36 + asmlinkage void __aes_ce_invert(struct aes_block *out, 37 + const struct aes_block *in); 38 + 32 39 static int num_rounds(struct crypto_aes_ctx *ctx) 33 40 { 34 41 /* ··· 51 44 static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) 52 45 { 53 46 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 54 - struct aes_block *out = (struct aes_block *)dst; 55 - struct aes_block const *in = (struct aes_block *)src; 56 - void *dummy0; 57 - int dummy1; 58 47 59 48 if (!may_use_simd()) { 60 49 __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); ··· 58 55 } 59 56 60 57 kernel_neon_begin(); 61 - 62 - __asm__(" ld1 {v0.16b}, %[in] ;" 63 - " ld1 {v1.4s}, [%[key]], #16 ;" 64 - " cmp %w[rounds], #10 ;" 65 - " bmi 0f ;" 66 - " bne 3f ;" 67 - " mov v3.16b, v1.16b ;" 68 - " b 2f ;" 69 - "0: mov v2.16b, v1.16b ;" 70 - " ld1 {v3.4s}, [%[key]], #16 ;" 71 - "1: aese v0.16b, v2.16b ;" 72 - " aesmc v0.16b, v0.16b ;" 73 - "2: ld1 {v1.4s}, [%[key]], #16 ;" 74 - " aese v0.16b, v3.16b ;" 75 - " aesmc v0.16b, v0.16b ;" 76 - "3: ld1 {v2.4s}, [%[key]], #16 ;" 77 - " subs %w[rounds], %w[rounds], #3 ;" 78 - " aese v0.16b, v1.16b ;" 79 - " aesmc v0.16b, v0.16b ;" 80 - " ld1 {v3.4s}, [%[key]], #16 ;" 81 - " bpl 1b ;" 82 - " aese v0.16b, v2.16b ;" 83 - " eor v0.16b, v0.16b, v3.16b ;" 84 - " st1 {v0.16b}, %[out] ;" 85 - 86 - : [out] "=Q"(*out), 87 - [key] "=r"(dummy0), 88 - [rounds] "=r"(dummy1) 89 - : [in] "Q"(*in), 90 - "1"(ctx->key_enc), 91 - "2"(num_rounds(ctx) - 2) 92 - : "cc"); 93 - 58 + __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); 94 59 kernel_neon_end(); 95 60 } 96 61 97 62 static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) 98 63 { 99 64 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); 100 - struct aes_block *out = (struct aes_block *)dst; 101 - struct aes_block const *in = (struct aes_block *)src; 102 - void *dummy0; 103 - int dummy1; 104 65 105 66 if (!may_use_simd()) { 106 67 __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); ··· 72 105 } 73 106 74 107 kernel_neon_begin(); 75 - 76 - __asm__(" ld1 {v0.16b}, %[in] ;" 77 - " ld1 {v1.4s}, [%[key]], #16 ;" 78 - " cmp %w[rounds], #10 ;" 79 - " bmi 0f ;" 80 - " bne 3f ;" 81 - " mov v3.16b, v1.16b ;" 82 - " b 2f ;" 83 - "0: mov v2.16b, v1.16b ;" 84 - " ld1 {v3.4s}, [%[key]], #16 ;" 85 - "1: aesd v0.16b, v2.16b ;" 86 - " aesimc v0.16b, v0.16b ;" 87 - "2: ld1 {v1.4s}, [%[key]], #16 ;" 88 - " aesd v0.16b, v3.16b ;" 89 - " aesimc v0.16b, v0.16b ;" 90 - "3: ld1 {v2.4s}, [%[key]], #16 ;" 91 - " subs %w[rounds], %w[rounds], #3 ;" 92 - " aesd v0.16b, v1.16b ;" 93 - " aesimc v0.16b, v0.16b ;" 94 - " ld1 {v3.4s}, [%[key]], #16 ;" 95 - " bpl 1b ;" 96 - " aesd v0.16b, v2.16b ;" 97 - " eor v0.16b, v0.16b, v3.16b ;" 98 - " st1 {v0.16b}, %[out] ;" 99 - 100 - : [out] "=Q"(*out), 101 - [key] "=r"(dummy0), 102 - [rounds] "=r"(dummy1) 103 - : [in] "Q"(*in), 104 - "1"(ctx->key_dec), 105 - "2"(num_rounds(ctx) - 2) 106 - : "cc"); 107 - 108 + __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); 108 109 kernel_neon_end(); 109 - } 110 - 111 - /* 112 - * aes_sub() - use the aese instruction to perform the AES sbox substitution 113 - * on each byte in 'input' 114 - */ 115 - static u32 aes_sub(u32 input) 116 - { 117 - u32 ret; 118 - 119 - __asm__("dup v1.4s, %w[in] ;" 120 - "movi v0.16b, #0 ;" 121 - "aese v0.16b, v1.16b ;" 122 - "umov %w[out], v0.4s[0] ;" 123 - 124 - : [out] "=r"(ret) 125 - : [in] "r"(input) 126 - : "v0","v1"); 127 - 128 - return ret; 129 110 } 130 111 131 112 int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, ··· 104 189 u32 *rki = ctx->key_enc + (i * kwords); 105 190 u32 *rko = rki + kwords; 106 191 107 - rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; 192 + rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0]; 108 193 rko[1] = rko[0] ^ rki[1]; 109 194 rko[2] = rko[1] ^ rki[2]; 110 195 rko[3] = rko[2] ^ rki[3]; ··· 117 202 } else if (key_len == AES_KEYSIZE_256) { 118 203 if (i >= 6) 119 204 break; 120 - rko[4] = aes_sub(rko[3]) ^ rki[4]; 205 + rko[4] = __aes_ce_sub(rko[3]) ^ rki[4]; 121 206 rko[5] = rko[4] ^ rki[5]; 122 207 rko[6] = rko[5] ^ rki[6]; 123 208 rko[7] = rko[6] ^ rki[7]; ··· 136 221 137 222 key_dec[0] = key_enc[j]; 138 223 for (i = 1, j--; j > 0; i++, j--) 139 - __asm__("ld1 {v0.4s}, %[in] ;" 140 - "aesimc v1.16b, v0.16b ;" 141 - "st1 {v1.4s}, %[out] ;" 142 - 143 - : [out] "=Q"(key_dec[i]) 144 - : [in] "Q"(key_enc[j]) 145 - : "v0","v1"); 224 + __aes_ce_invert(key_dec + i, key_enc + j); 146 225 key_dec[i] = key_enc[0]; 147 226 148 227 kernel_neon_end();
+87
arch/arm64/crypto/aes-ce-core.S
··· 1 + /* 2 + * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #include <linux/linkage.h> 10 + #include <asm/assembler.h> 11 + 12 + .arch armv8-a+crypto 13 + 14 + ENTRY(__aes_ce_encrypt) 15 + sub w3, w3, #2 16 + ld1 {v0.16b}, [x2] 17 + ld1 {v1.4s}, [x0], #16 18 + cmp w3, #10 19 + bmi 0f 20 + bne 3f 21 + mov v3.16b, v1.16b 22 + b 2f 23 + 0: mov v2.16b, v1.16b 24 + ld1 {v3.4s}, [x0], #16 25 + 1: aese v0.16b, v2.16b 26 + aesmc v0.16b, v0.16b 27 + 2: ld1 {v1.4s}, [x0], #16 28 + aese v0.16b, v3.16b 29 + aesmc v0.16b, v0.16b 30 + 3: ld1 {v2.4s}, [x0], #16 31 + subs w3, w3, #3 32 + aese v0.16b, v1.16b 33 + aesmc v0.16b, v0.16b 34 + ld1 {v3.4s}, [x0], #16 35 + bpl 1b 36 + aese v0.16b, v2.16b 37 + eor v0.16b, v0.16b, v3.16b 38 + st1 {v0.16b}, [x1] 39 + ret 40 + ENDPROC(__aes_ce_encrypt) 41 + 42 + ENTRY(__aes_ce_decrypt) 43 + sub w3, w3, #2 44 + ld1 {v0.16b}, [x2] 45 + ld1 {v1.4s}, [x0], #16 46 + cmp w3, #10 47 + bmi 0f 48 + bne 3f 49 + mov v3.16b, v1.16b 50 + b 2f 51 + 0: mov v2.16b, v1.16b 52 + ld1 {v3.4s}, [x0], #16 53 + 1: aesd v0.16b, v2.16b 54 + aesimc v0.16b, v0.16b 55 + 2: ld1 {v1.4s}, [x0], #16 56 + aesd v0.16b, v3.16b 57 + aesimc v0.16b, v0.16b 58 + 3: ld1 {v2.4s}, [x0], #16 59 + subs w3, w3, #3 60 + aesd v0.16b, v1.16b 61 + aesimc v0.16b, v0.16b 62 + ld1 {v3.4s}, [x0], #16 63 + bpl 1b 64 + aesd v0.16b, v2.16b 65 + eor v0.16b, v0.16b, v3.16b 66 + st1 {v0.16b}, [x1] 67 + ret 68 + ENDPROC(__aes_ce_decrypt) 69 + 70 + /* 71 + * __aes_ce_sub() - use the aese instruction to perform the AES sbox 72 + * substitution on each byte in 'input' 73 + */ 74 + ENTRY(__aes_ce_sub) 75 + dup v1.4s, w0 76 + movi v0.16b, #0 77 + aese v0.16b, v1.16b 78 + umov w0, v0.s[0] 79 + ret 80 + ENDPROC(__aes_ce_sub) 81 + 82 + ENTRY(__aes_ce_invert) 83 + ld1 {v0.4s}, [x1] 84 + aesimc v1.16b, v0.16b 85 + st1 {v1.4s}, [x0] 86 + ret 87 + ENDPROC(__aes_ce_invert)
+10 -9
arch/arm64/crypto/aes-cipher-core.S
··· 125 125 ret 126 126 .endm 127 127 128 + ENTRY(__aes_arm64_encrypt) 129 + do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 130 + ENDPROC(__aes_arm64_encrypt) 131 + 132 + .align 5 133 + ENTRY(__aes_arm64_decrypt) 134 + do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 135 + ENDPROC(__aes_arm64_decrypt) 136 + 137 + .section ".rodata", "a" 128 138 .align L1_CACHE_SHIFT 129 139 .type __aes_arm64_inverse_sbox, %object 130 140 __aes_arm64_inverse_sbox: ··· 171 161 .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26 172 162 .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d 173 163 .size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox 174 - 175 - ENTRY(__aes_arm64_encrypt) 176 - do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 177 - ENDPROC(__aes_arm64_encrypt) 178 - 179 - .align 5 180 - ENTRY(__aes_arm64_decrypt) 181 - do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0 182 - ENDPROC(__aes_arm64_decrypt)
+1
arch/arm64/crypto/aes-glue.c
··· 665 665 666 666 unregister_simds: 667 667 aes_exit(); 668 + return err; 668 669 unregister_ciphers: 669 670 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); 670 671 return err;
+4 -4
arch/arm64/crypto/aes-neon.S
··· 32 32 33 33 /* preload the entire Sbox */ 34 34 .macro prepare, sbox, shiftrows, temp 35 - adr \temp, \sbox 36 35 movi v12.16b, #0x1b 37 - ldr q13, \shiftrows 38 - ldr q14, .Lror32by8 36 + ldr_l q13, \shiftrows, \temp 37 + ldr_l q14, .Lror32by8, \temp 38 + adr_l \temp, \sbox 39 39 ld1 {v16.16b-v19.16b}, [\temp], #64 40 40 ld1 {v20.16b-v23.16b}, [\temp], #64 41 41 ld1 {v24.16b-v27.16b}, [\temp], #64 ··· 272 272 273 273 #include "aes-modes.S" 274 274 275 - .text 275 + .section ".rodata", "a" 276 276 .align 6 277 277 .LForward_Sbox: 278 278 .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+4 -3
arch/arm64/crypto/crc32-ce-core.S
··· 50 50 #include <linux/linkage.h> 51 51 #include <asm/assembler.h> 52 52 53 - .text 53 + .section ".rodata", "a" 54 54 .align 6 55 55 .cpu generic+crypto+crc 56 56 ··· 115 115 * uint crc32_pmull_le(unsigned char const *buffer, 116 116 * size_t len, uint crc32) 117 117 */ 118 + .text 118 119 ENTRY(crc32_pmull_le) 119 - adr x3, .Lcrc32_constants 120 + adr_l x3, .Lcrc32_constants 120 121 b 0f 121 122 122 123 ENTRY(crc32c_pmull_le) 123 - adr x3, .Lcrc32c_constants 124 + adr_l x3, .Lcrc32c_constants 124 125 125 126 0: bic LEN, LEN, #15 126 127 ld1 {v1.16b-v4.16b}, [BUF], #0x40
+2
arch/arm64/crypto/crc32-ce-glue.c
··· 185 185 .base.cra_name = "crc32", 186 186 .base.cra_driver_name = "crc32-arm64-ce", 187 187 .base.cra_priority = 200, 188 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 188 189 .base.cra_blocksize = 1, 189 190 .base.cra_module = THIS_MODULE, 190 191 }, { ··· 201 200 .base.cra_name = "crc32c", 202 201 .base.cra_driver_name = "crc32c-arm64-ce", 203 202 .base.cra_priority = 200, 203 + .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 204 204 .base.cra_blocksize = 1, 205 205 .base.cra_module = THIS_MODULE, 206 206 } };
+9 -8
arch/arm64/crypto/crct10dif-ce-core.S
··· 128 128 // XOR the initial_crc value 129 129 eor v0.16b, v0.16b, v10.16b 130 130 131 - ldr q10, rk3 // xmm10 has rk3 and rk4 131 + ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 132 132 // type of pmull instruction 133 133 // will determine which constant to use 134 134 ··· 184 184 // fold the 8 vector registers to 1 vector register with different 185 185 // constants 186 186 187 - ldr q10, rk9 187 + ldr_l q10, rk9, x8 188 188 189 189 .macro fold16, reg, rk 190 190 pmull v8.1q, \reg\().1d, v10.1d 191 191 pmull2 \reg\().1q, \reg\().2d, v10.2d 192 192 .ifnb \rk 193 - ldr q10, \rk 193 + ldr_l q10, \rk, x8 194 194 .endif 195 195 eor v7.16b, v7.16b, v8.16b 196 196 eor v7.16b, v7.16b, \reg\().16b ··· 251 251 252 252 // get rid of the extra data that was loaded before 253 253 // load the shift constant 254 - adr x4, tbl_shf_table + 16 254 + adr_l x4, tbl_shf_table + 16 255 255 sub x4, x4, arg3 256 256 ld1 {v0.16b}, [x4] 257 257 ··· 275 275 276 276 _128_done: 277 277 // compute crc of a 128-bit value 278 - ldr q10, rk5 // rk5 and rk6 in xmm10 278 + ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 279 279 280 280 // 64b fold 281 281 ext v0.16b, vzr.16b, v7.16b, #8 ··· 291 291 292 292 // barrett reduction 293 293 _barrett: 294 - ldr q10, rk7 294 + ldr_l q10, rk7, x8 295 295 mov v0.d[0], v7.d[1] 296 296 297 297 pmull v0.1q, v0.1d, v10.1d ··· 321 321 b.eq _128_done // exactly 16 left 322 322 b.lt _less_than_16_left 323 323 324 - ldr q10, rk1 // rk1 and rk2 in xmm10 324 + ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 325 325 326 326 // update the counter. subtract 32 instead of 16 to save one 327 327 // instruction from the loop ··· 333 333 334 334 _less_than_16_left: 335 335 // shl r9, 4 336 - adr x0, tbl_shf_table + 16 336 + adr_l x0, tbl_shf_table + 16 337 337 sub x0, x0, arg3 338 338 ld1 {v0.16b}, [x0] 339 339 movi v9.16b, #0x80 ··· 345 345 // precomputed constants 346 346 // these constants are precomputed from the poly: 347 347 // 0x8bb70000 (0x8bb7 scaled to 32 bits) 348 + .section ".rodata", "a" 348 349 .align 4 349 350 // Q = 0x18BB70000 350 351 // rk1 = 2^(32*3) mod Q << 32
+9 -11
arch/arm64/crypto/sha1-ce-core.S
··· 58 58 sha1su1 v\s0\().4s, v\s3\().4s 59 59 .endm 60 60 61 - /* 62 - * The SHA1 round constants 63 - */ 64 - .align 4 65 - .Lsha1_rcon: 66 - .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 61 + .macro loadrc, k, val, tmp 62 + movz \tmp, :abs_g0_nc:\val 63 + movk \tmp, :abs_g1:\val 64 + dup \k, \tmp 65 + .endm 67 66 68 67 /* 69 68 * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, ··· 70 71 */ 71 72 ENTRY(sha1_ce_transform) 72 73 /* load round constants */ 73 - adr x6, .Lsha1_rcon 74 - ld1r {k0.4s}, [x6], #4 75 - ld1r {k1.4s}, [x6], #4 76 - ld1r {k2.4s}, [x6], #4 77 - ld1r {k3.4s}, [x6] 74 + loadrc k0.4s, 0x5a827999, w6 75 + loadrc k1.4s, 0x6ed9eba1, w6 76 + loadrc k2.4s, 0x8f1bbcdc, w6 77 + loadrc k3.4s, 0xca62c1d6, w6 78 78 79 79 /* load state */ 80 80 ld1 {dgav.4s}, [x0]
+3 -1
arch/arm64/crypto/sha2-ce-core.S
··· 53 53 /* 54 54 * The SHA-256 round constants 55 55 */ 56 + .section ".rodata", "a" 56 57 .align 4 57 58 .Lsha2_rcon: 58 59 .word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 ··· 77 76 * void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, 78 77 * int blocks) 79 78 */ 79 + .text 80 80 ENTRY(sha2_ce_transform) 81 81 /* load round constants */ 82 - adr x8, .Lsha2_rcon 82 + adr_l x8, .Lsha2_rcon 83 83 ld1 { v0.4s- v3.4s}, [x8], #64 84 84 ld1 { v4.4s- v7.4s}, [x8], #64 85 85 ld1 { v8.4s-v11.4s}, [x8], #64
+210
arch/arm64/crypto/sha3-ce-core.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions 4 + * 5 + * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/linkage.h> 13 + #include <asm/assembler.h> 14 + 15 + .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 16 + .set .Lv\b\().2d, \b 17 + .set .Lv\b\().16b, \b 18 + .endr 19 + 20 + /* 21 + * ARMv8.2 Crypto Extensions instructions 22 + */ 23 + .macro eor3, rd, rn, rm, ra 24 + .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) 25 + .endm 26 + 27 + .macro rax1, rd, rn, rm 28 + .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16) 29 + .endm 30 + 31 + .macro bcax, rd, rn, rm, ra 32 + .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) 33 + .endm 34 + 35 + .macro xar, rd, rn, rm, imm6 36 + .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16) 37 + .endm 38 + 39 + /* 40 + * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) 41 + */ 42 + .text 43 + ENTRY(sha3_ce_transform) 44 + /* load state */ 45 + add x8, x0, #32 46 + ld1 { v0.1d- v3.1d}, [x0] 47 + ld1 { v4.1d- v7.1d}, [x8], #32 48 + ld1 { v8.1d-v11.1d}, [x8], #32 49 + ld1 {v12.1d-v15.1d}, [x8], #32 50 + ld1 {v16.1d-v19.1d}, [x8], #32 51 + ld1 {v20.1d-v23.1d}, [x8], #32 52 + ld1 {v24.1d}, [x8] 53 + 54 + 0: sub w2, w2, #1 55 + mov w8, #24 56 + adr_l x9, .Lsha3_rcon 57 + 58 + /* load input */ 59 + ld1 {v25.8b-v28.8b}, [x1], #32 60 + ld1 {v29.8b-v31.8b}, [x1], #24 61 + eor v0.8b, v0.8b, v25.8b 62 + eor v1.8b, v1.8b, v26.8b 63 + eor v2.8b, v2.8b, v27.8b 64 + eor v3.8b, v3.8b, v28.8b 65 + eor v4.8b, v4.8b, v29.8b 66 + eor v5.8b, v5.8b, v30.8b 67 + eor v6.8b, v6.8b, v31.8b 68 + 69 + tbnz x3, #6, 2f // SHA3-512 70 + 71 + ld1 {v25.8b-v28.8b}, [x1], #32 72 + ld1 {v29.8b-v30.8b}, [x1], #16 73 + eor v7.8b, v7.8b, v25.8b 74 + eor v8.8b, v8.8b, v26.8b 75 + eor v9.8b, v9.8b, v27.8b 76 + eor v10.8b, v10.8b, v28.8b 77 + eor v11.8b, v11.8b, v29.8b 78 + eor v12.8b, v12.8b, v30.8b 79 + 80 + tbnz x3, #4, 1f // SHA3-384 or SHA3-224 81 + 82 + // SHA3-256 83 + ld1 {v25.8b-v28.8b}, [x1], #32 84 + eor v13.8b, v13.8b, v25.8b 85 + eor v14.8b, v14.8b, v26.8b 86 + eor v15.8b, v15.8b, v27.8b 87 + eor v16.8b, v16.8b, v28.8b 88 + b 3f 89 + 90 + 1: tbz x3, #2, 3f // bit 2 cleared? SHA-384 91 + 92 + // SHA3-224 93 + ld1 {v25.8b-v28.8b}, [x1], #32 94 + ld1 {v29.8b}, [x1], #8 95 + eor v13.8b, v13.8b, v25.8b 96 + eor v14.8b, v14.8b, v26.8b 97 + eor v15.8b, v15.8b, v27.8b 98 + eor v16.8b, v16.8b, v28.8b 99 + eor v17.8b, v17.8b, v29.8b 100 + b 3f 101 + 102 + // SHA3-512 103 + 2: ld1 {v25.8b-v26.8b}, [x1], #16 104 + eor v7.8b, v7.8b, v25.8b 105 + eor v8.8b, v8.8b, v26.8b 106 + 107 + 3: sub w8, w8, #1 108 + 109 + eor3 v29.16b, v4.16b, v9.16b, v14.16b 110 + eor3 v26.16b, v1.16b, v6.16b, v11.16b 111 + eor3 v28.16b, v3.16b, v8.16b, v13.16b 112 + eor3 v25.16b, v0.16b, v5.16b, v10.16b 113 + eor3 v27.16b, v2.16b, v7.16b, v12.16b 114 + eor3 v29.16b, v29.16b, v19.16b, v24.16b 115 + eor3 v26.16b, v26.16b, v16.16b, v21.16b 116 + eor3 v28.16b, v28.16b, v18.16b, v23.16b 117 + eor3 v25.16b, v25.16b, v15.16b, v20.16b 118 + eor3 v27.16b, v27.16b, v17.16b, v22.16b 119 + 120 + rax1 v30.2d, v29.2d, v26.2d // bc[0] 121 + rax1 v26.2d, v26.2d, v28.2d // bc[2] 122 + rax1 v28.2d, v28.2d, v25.2d // bc[4] 123 + rax1 v25.2d, v25.2d, v27.2d // bc[1] 124 + rax1 v27.2d, v27.2d, v29.2d // bc[3] 125 + 126 + eor v0.16b, v0.16b, v30.16b 127 + xar v29.2d, v1.2d, v25.2d, (64 - 1) 128 + xar v1.2d, v6.2d, v25.2d, (64 - 44) 129 + xar v6.2d, v9.2d, v28.2d, (64 - 20) 130 + xar v9.2d, v22.2d, v26.2d, (64 - 61) 131 + xar v22.2d, v14.2d, v28.2d, (64 - 39) 132 + xar v14.2d, v20.2d, v30.2d, (64 - 18) 133 + xar v31.2d, v2.2d, v26.2d, (64 - 62) 134 + xar v2.2d, v12.2d, v26.2d, (64 - 43) 135 + xar v12.2d, v13.2d, v27.2d, (64 - 25) 136 + xar v13.2d, v19.2d, v28.2d, (64 - 8) 137 + xar v19.2d, v23.2d, v27.2d, (64 - 56) 138 + xar v23.2d, v15.2d, v30.2d, (64 - 41) 139 + xar v15.2d, v4.2d, v28.2d, (64 - 27) 140 + xar v28.2d, v24.2d, v28.2d, (64 - 14) 141 + xar v24.2d, v21.2d, v25.2d, (64 - 2) 142 + xar v8.2d, v8.2d, v27.2d, (64 - 55) 143 + xar v4.2d, v16.2d, v25.2d, (64 - 45) 144 + xar v16.2d, v5.2d, v30.2d, (64 - 36) 145 + xar v5.2d, v3.2d, v27.2d, (64 - 28) 146 + xar v27.2d, v18.2d, v27.2d, (64 - 21) 147 + xar v3.2d, v17.2d, v26.2d, (64 - 15) 148 + xar v25.2d, v11.2d, v25.2d, (64 - 10) 149 + xar v26.2d, v7.2d, v26.2d, (64 - 6) 150 + xar v30.2d, v10.2d, v30.2d, (64 - 3) 151 + 152 + bcax v20.16b, v31.16b, v22.16b, v8.16b 153 + bcax v21.16b, v8.16b, v23.16b, v22.16b 154 + bcax v22.16b, v22.16b, v24.16b, v23.16b 155 + bcax v23.16b, v23.16b, v31.16b, v24.16b 156 + bcax v24.16b, v24.16b, v8.16b, v31.16b 157 + 158 + ld1r {v31.2d}, [x9], #8 159 + 160 + bcax v17.16b, v25.16b, v19.16b, v3.16b 161 + bcax v18.16b, v3.16b, v15.16b, v19.16b 162 + bcax v19.16b, v19.16b, v16.16b, v15.16b 163 + bcax v15.16b, v15.16b, v25.16b, v16.16b 164 + bcax v16.16b, v16.16b, v3.16b, v25.16b 165 + 166 + bcax v10.16b, v29.16b, v12.16b, v26.16b 167 + bcax v11.16b, v26.16b, v13.16b, v12.16b 168 + bcax v12.16b, v12.16b, v14.16b, v13.16b 169 + bcax v13.16b, v13.16b, v29.16b, v14.16b 170 + bcax v14.16b, v14.16b, v26.16b, v29.16b 171 + 172 + bcax v7.16b, v30.16b, v9.16b, v4.16b 173 + bcax v8.16b, v4.16b, v5.16b, v9.16b 174 + bcax v9.16b, v9.16b, v6.16b, v5.16b 175 + bcax v5.16b, v5.16b, v30.16b, v6.16b 176 + bcax v6.16b, v6.16b, v4.16b, v30.16b 177 + 178 + bcax v3.16b, v27.16b, v0.16b, v28.16b 179 + bcax v4.16b, v28.16b, v1.16b, v0.16b 180 + bcax v0.16b, v0.16b, v2.16b, v1.16b 181 + bcax v1.16b, v1.16b, v27.16b, v2.16b 182 + bcax v2.16b, v2.16b, v28.16b, v27.16b 183 + 184 + eor v0.16b, v0.16b, v31.16b 185 + 186 + cbnz w8, 3b 187 + cbnz w2, 0b 188 + 189 + /* save state */ 190 + st1 { v0.1d- v3.1d}, [x0], #32 191 + st1 { v4.1d- v7.1d}, [x0], #32 192 + st1 { v8.1d-v11.1d}, [x0], #32 193 + st1 {v12.1d-v15.1d}, [x0], #32 194 + st1 {v16.1d-v19.1d}, [x0], #32 195 + st1 {v20.1d-v23.1d}, [x0], #32 196 + st1 {v24.1d}, [x0] 197 + ret 198 + ENDPROC(sha3_ce_transform) 199 + 200 + .section ".rodata", "a" 201 + .align 8 202 + .Lsha3_rcon: 203 + .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a 204 + .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001 205 + .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a 206 + .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a 207 + .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089 208 + .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080 209 + .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081 210 + .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
+161
arch/arm64/crypto/sha3-ce-glue.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * sha3-ce-glue.c - core SHA-3 transform using v8.2 Crypto Extensions 4 + * 5 + * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <asm/hwcap.h> 13 + #include <asm/neon.h> 14 + #include <asm/simd.h> 15 + #include <asm/unaligned.h> 16 + #include <crypto/internal/hash.h> 17 + #include <crypto/sha3.h> 18 + #include <linux/cpufeature.h> 19 + #include <linux/crypto.h> 20 + #include <linux/module.h> 21 + 22 + MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions"); 23 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 24 + MODULE_LICENSE("GPL v2"); 25 + 26 + asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks, 27 + int md_len); 28 + 29 + static int sha3_update(struct shash_desc *desc, const u8 *data, 30 + unsigned int len) 31 + { 32 + struct sha3_state *sctx = shash_desc_ctx(desc); 33 + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 34 + 35 + if (!may_use_simd()) 36 + return crypto_sha3_update(desc, data, len); 37 + 38 + if ((sctx->partial + len) >= sctx->rsiz) { 39 + int blocks; 40 + 41 + if (sctx->partial) { 42 + int p = sctx->rsiz - sctx->partial; 43 + 44 + memcpy(sctx->buf + sctx->partial, data, p); 45 + kernel_neon_begin(); 46 + sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size); 47 + kernel_neon_end(); 48 + 49 + data += p; 50 + len -= p; 51 + sctx->partial = 0; 52 + } 53 + 54 + blocks = len / sctx->rsiz; 55 + len %= sctx->rsiz; 56 + 57 + if (blocks) { 58 + kernel_neon_begin(); 59 + sha3_ce_transform(sctx->st, data, blocks, digest_size); 60 + kernel_neon_end(); 61 + data += blocks * sctx->rsiz; 62 + } 63 + } 64 + 65 + if (len) { 66 + memcpy(sctx->buf + sctx->partial, data, len); 67 + sctx->partial += len; 68 + } 69 + return 0; 70 + } 71 + 72 + static int sha3_final(struct shash_desc *desc, u8 *out) 73 + { 74 + struct sha3_state *sctx = shash_desc_ctx(desc); 75 + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 76 + __le64 *digest = (__le64 *)out; 77 + int i; 78 + 79 + if (!may_use_simd()) 80 + return crypto_sha3_final(desc, out); 81 + 82 + sctx->buf[sctx->partial++] = 0x06; 83 + memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial); 84 + sctx->buf[sctx->rsiz - 1] |= 0x80; 85 + 86 + kernel_neon_begin(); 87 + sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size); 88 + kernel_neon_end(); 89 + 90 + for (i = 0; i < digest_size / 8; i++) 91 + put_unaligned_le64(sctx->st[i], digest++); 92 + 93 + if (digest_size & 4) 94 + put_unaligned_le32(sctx->st[i], (__le32 *)digest); 95 + 96 + *sctx = (struct sha3_state){}; 97 + return 0; 98 + } 99 + 100 + static struct shash_alg algs[] = { { 101 + .digestsize = SHA3_224_DIGEST_SIZE, 102 + .init = crypto_sha3_init, 103 + .update = sha3_update, 104 + .final = sha3_final, 105 + .descsize = sizeof(struct sha3_state), 106 + .base.cra_name = "sha3-224", 107 + .base.cra_driver_name = "sha3-224-ce", 108 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 109 + .base.cra_blocksize = SHA3_224_BLOCK_SIZE, 110 + .base.cra_module = THIS_MODULE, 111 + .base.cra_priority = 200, 112 + }, { 113 + .digestsize = SHA3_256_DIGEST_SIZE, 114 + .init = crypto_sha3_init, 115 + .update = sha3_update, 116 + .final = sha3_final, 117 + .descsize = sizeof(struct sha3_state), 118 + .base.cra_name = "sha3-256", 119 + .base.cra_driver_name = "sha3-256-ce", 120 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 121 + .base.cra_blocksize = SHA3_256_BLOCK_SIZE, 122 + .base.cra_module = THIS_MODULE, 123 + .base.cra_priority = 200, 124 + }, { 125 + .digestsize = SHA3_384_DIGEST_SIZE, 126 + .init = crypto_sha3_init, 127 + .update = sha3_update, 128 + .final = sha3_final, 129 + .descsize = sizeof(struct sha3_state), 130 + .base.cra_name = "sha3-384", 131 + .base.cra_driver_name = "sha3-384-ce", 132 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 133 + .base.cra_blocksize = SHA3_384_BLOCK_SIZE, 134 + .base.cra_module = THIS_MODULE, 135 + .base.cra_priority = 200, 136 + }, { 137 + .digestsize = SHA3_512_DIGEST_SIZE, 138 + .init = crypto_sha3_init, 139 + .update = sha3_update, 140 + .final = sha3_final, 141 + .descsize = sizeof(struct sha3_state), 142 + .base.cra_name = "sha3-512", 143 + .base.cra_driver_name = "sha3-512-ce", 144 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 145 + .base.cra_blocksize = SHA3_512_BLOCK_SIZE, 146 + .base.cra_module = THIS_MODULE, 147 + .base.cra_priority = 200, 148 + } }; 149 + 150 + static int __init sha3_neon_mod_init(void) 151 + { 152 + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); 153 + } 154 + 155 + static void __exit sha3_neon_mod_fini(void) 156 + { 157 + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 158 + } 159 + 160 + module_cpu_feature_match(SHA3, sha3_neon_mod_init); 161 + module_exit(sha3_neon_mod_fini);
+204
arch/arm64/crypto/sha512-ce-core.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions 4 + * 5 + * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/linkage.h> 13 + #include <asm/assembler.h> 14 + 15 + .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19 16 + .set .Lq\b, \b 17 + .set .Lv\b\().2d, \b 18 + .endr 19 + 20 + .macro sha512h, rd, rn, rm 21 + .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) 22 + .endm 23 + 24 + .macro sha512h2, rd, rn, rm 25 + .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) 26 + .endm 27 + 28 + .macro sha512su0, rd, rn 29 + .inst 0xcec08000 | .L\rd | (.L\rn << 5) 30 + .endm 31 + 32 + .macro sha512su1, rd, rn, rm 33 + .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16) 34 + .endm 35 + 36 + /* 37 + * The SHA-512 round constants 38 + */ 39 + .section ".rodata", "a" 40 + .align 4 41 + .Lsha512_rcon: 42 + .quad 0x428a2f98d728ae22, 0x7137449123ef65cd 43 + .quad 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc 44 + .quad 0x3956c25bf348b538, 0x59f111f1b605d019 45 + .quad 0x923f82a4af194f9b, 0xab1c5ed5da6d8118 46 + .quad 0xd807aa98a3030242, 0x12835b0145706fbe 47 + .quad 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2 48 + .quad 0x72be5d74f27b896f, 0x80deb1fe3b1696b1 49 + .quad 0x9bdc06a725c71235, 0xc19bf174cf692694 50 + .quad 0xe49b69c19ef14ad2, 0xefbe4786384f25e3 51 + .quad 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65 52 + .quad 0x2de92c6f592b0275, 0x4a7484aa6ea6e483 53 + .quad 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5 54 + .quad 0x983e5152ee66dfab, 0xa831c66d2db43210 55 + .quad 0xb00327c898fb213f, 0xbf597fc7beef0ee4 56 + .quad 0xc6e00bf33da88fc2, 0xd5a79147930aa725 57 + .quad 0x06ca6351e003826f, 0x142929670a0e6e70 58 + .quad 0x27b70a8546d22ffc, 0x2e1b21385c26c926 59 + .quad 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df 60 + .quad 0x650a73548baf63de, 0x766a0abb3c77b2a8 61 + .quad 0x81c2c92e47edaee6, 0x92722c851482353b 62 + .quad 0xa2bfe8a14cf10364, 0xa81a664bbc423001 63 + .quad 0xc24b8b70d0f89791, 0xc76c51a30654be30 64 + .quad 0xd192e819d6ef5218, 0xd69906245565a910 65 + .quad 0xf40e35855771202a, 0x106aa07032bbd1b8 66 + .quad 0x19a4c116b8d2d0c8, 0x1e376c085141ab53 67 + .quad 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8 68 + .quad 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb 69 + .quad 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3 70 + .quad 0x748f82ee5defb2fc, 0x78a5636f43172f60 71 + .quad 0x84c87814a1f0ab72, 0x8cc702081a6439ec 72 + .quad 0x90befffa23631e28, 0xa4506cebde82bde9 73 + .quad 0xbef9a3f7b2c67915, 0xc67178f2e372532b 74 + .quad 0xca273eceea26619c, 0xd186b8c721c0c207 75 + .quad 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178 76 + .quad 0x06f067aa72176fba, 0x0a637dc5a2c898a6 77 + .quad 0x113f9804bef90dae, 0x1b710b35131c471b 78 + .quad 0x28db77f523047d84, 0x32caab7b40c72493 79 + .quad 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c 80 + .quad 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a 81 + .quad 0x5fcb6fab3ad6faec, 0x6c44198c4a475817 82 + 83 + .macro dround, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4 84 + .ifnb \rc1 85 + ld1 {v\rc1\().2d}, [x4], #16 86 + .endif 87 + add v5.2d, v\rc0\().2d, v\in0\().2d 88 + ext v6.16b, v\i2\().16b, v\i3\().16b, #8 89 + ext v5.16b, v5.16b, v5.16b, #8 90 + ext v7.16b, v\i1\().16b, v\i2\().16b, #8 91 + add v\i3\().2d, v\i3\().2d, v5.2d 92 + .ifnb \in1 93 + ext v5.16b, v\in3\().16b, v\in4\().16b, #8 94 + sha512su0 v\in0\().2d, v\in1\().2d 95 + .endif 96 + sha512h q\i3, q6, v7.2d 97 + .ifnb \in1 98 + sha512su1 v\in0\().2d, v\in2\().2d, v5.2d 99 + .endif 100 + add v\i4\().2d, v\i1\().2d, v\i3\().2d 101 + sha512h2 q\i3, q\i1, v\i0\().2d 102 + .endm 103 + 104 + /* 105 + * void sha512_ce_transform(struct sha512_state *sst, u8 const *src, 106 + * int blocks) 107 + */ 108 + .text 109 + ENTRY(sha512_ce_transform) 110 + /* load state */ 111 + ld1 {v8.2d-v11.2d}, [x0] 112 + 113 + /* load first 4 round constants */ 114 + adr_l x3, .Lsha512_rcon 115 + ld1 {v20.2d-v23.2d}, [x3], #64 116 + 117 + /* load input */ 118 + 0: ld1 {v12.2d-v15.2d}, [x1], #64 119 + ld1 {v16.2d-v19.2d}, [x1], #64 120 + sub w2, w2, #1 121 + 122 + CPU_LE( rev64 v12.16b, v12.16b ) 123 + CPU_LE( rev64 v13.16b, v13.16b ) 124 + CPU_LE( rev64 v14.16b, v14.16b ) 125 + CPU_LE( rev64 v15.16b, v15.16b ) 126 + CPU_LE( rev64 v16.16b, v16.16b ) 127 + CPU_LE( rev64 v17.16b, v17.16b ) 128 + CPU_LE( rev64 v18.16b, v18.16b ) 129 + CPU_LE( rev64 v19.16b, v19.16b ) 130 + 131 + mov x4, x3 // rc pointer 132 + 133 + mov v0.16b, v8.16b 134 + mov v1.16b, v9.16b 135 + mov v2.16b, v10.16b 136 + mov v3.16b, v11.16b 137 + 138 + // v0 ab cd -- ef gh ab 139 + // v1 cd -- ef gh ab cd 140 + // v2 ef gh ab cd -- ef 141 + // v3 gh ab cd -- ef gh 142 + // v4 -- ef gh ab cd -- 143 + 144 + dround 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17 145 + dround 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18 146 + dround 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19 147 + dround 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12 148 + dround 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13 149 + 150 + dround 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14 151 + dround 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15 152 + dround 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16 153 + dround 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17 154 + dround 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18 155 + 156 + dround 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19 157 + dround 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12 158 + dround 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13 159 + dround 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14 160 + dround 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15 161 + 162 + dround 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16 163 + dround 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17 164 + dround 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18 165 + dround 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19 166 + dround 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12 167 + 168 + dround 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13 169 + dround 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14 170 + dround 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15 171 + dround 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16 172 + dround 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17 173 + 174 + dround 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18 175 + dround 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19 176 + dround 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12 177 + dround 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13 178 + dround 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14 179 + 180 + dround 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15 181 + dround 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16 182 + dround 2, 3, 1, 4, 0, 28, 24, 12 183 + dround 4, 2, 0, 1, 3, 29, 25, 13 184 + dround 1, 4, 3, 0, 2, 30, 26, 14 185 + 186 + dround 0, 1, 2, 3, 4, 31, 27, 15 187 + dround 3, 0, 4, 2, 1, 24, , 16 188 + dround 2, 3, 1, 4, 0, 25, , 17 189 + dround 4, 2, 0, 1, 3, 26, , 18 190 + dround 1, 4, 3, 0, 2, 27, , 19 191 + 192 + /* update state */ 193 + add v8.2d, v8.2d, v0.2d 194 + add v9.2d, v9.2d, v1.2d 195 + add v10.2d, v10.2d, v2.2d 196 + add v11.2d, v11.2d, v3.2d 197 + 198 + /* handled all input blocks? */ 199 + cbnz w2, 0b 200 + 201 + /* store new state */ 202 + 3: st1 {v8.2d-v11.2d}, [x0] 203 + ret 204 + ENDPROC(sha512_ce_transform)
+119
arch/arm64/crypto/sha512-ce-glue.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * sha512-ce-glue.c - SHA-384/SHA-512 using ARMv8 Crypto Extensions 4 + * 5 + * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <asm/neon.h> 13 + #include <asm/simd.h> 14 + #include <asm/unaligned.h> 15 + #include <crypto/internal/hash.h> 16 + #include <crypto/sha.h> 17 + #include <crypto/sha512_base.h> 18 + #include <linux/cpufeature.h> 19 + #include <linux/crypto.h> 20 + #include <linux/module.h> 21 + 22 + MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions"); 23 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 24 + MODULE_LICENSE("GPL v2"); 25 + 26 + asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src, 27 + int blocks); 28 + 29 + asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); 30 + 31 + static int sha512_ce_update(struct shash_desc *desc, const u8 *data, 32 + unsigned int len) 33 + { 34 + if (!may_use_simd()) 35 + return sha512_base_do_update(desc, data, len, 36 + (sha512_block_fn *)sha512_block_data_order); 37 + 38 + kernel_neon_begin(); 39 + sha512_base_do_update(desc, data, len, 40 + (sha512_block_fn *)sha512_ce_transform); 41 + kernel_neon_end(); 42 + 43 + return 0; 44 + } 45 + 46 + static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, 47 + unsigned int len, u8 *out) 48 + { 49 + if (!may_use_simd()) { 50 + if (len) 51 + sha512_base_do_update(desc, data, len, 52 + (sha512_block_fn *)sha512_block_data_order); 53 + sha512_base_do_finalize(desc, 54 + (sha512_block_fn *)sha512_block_data_order); 55 + return sha512_base_finish(desc, out); 56 + } 57 + 58 + kernel_neon_begin(); 59 + sha512_base_do_update(desc, data, len, 60 + (sha512_block_fn *)sha512_ce_transform); 61 + sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform); 62 + kernel_neon_end(); 63 + return sha512_base_finish(desc, out); 64 + } 65 + 66 + static int sha512_ce_final(struct shash_desc *desc, u8 *out) 67 + { 68 + if (!may_use_simd()) { 69 + sha512_base_do_finalize(desc, 70 + (sha512_block_fn *)sha512_block_data_order); 71 + return sha512_base_finish(desc, out); 72 + } 73 + 74 + kernel_neon_begin(); 75 + sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform); 76 + kernel_neon_end(); 77 + return sha512_base_finish(desc, out); 78 + } 79 + 80 + static struct shash_alg algs[] = { { 81 + .init = sha384_base_init, 82 + .update = sha512_ce_update, 83 + .final = sha512_ce_final, 84 + .finup = sha512_ce_finup, 85 + .descsize = sizeof(struct sha512_state), 86 + .digestsize = SHA384_DIGEST_SIZE, 87 + .base.cra_name = "sha384", 88 + .base.cra_driver_name = "sha384-ce", 89 + .base.cra_priority = 200, 90 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 91 + .base.cra_blocksize = SHA512_BLOCK_SIZE, 92 + .base.cra_module = THIS_MODULE, 93 + }, { 94 + .init = sha512_base_init, 95 + .update = sha512_ce_update, 96 + .final = sha512_ce_final, 97 + .finup = sha512_ce_finup, 98 + .descsize = sizeof(struct sha512_state), 99 + .digestsize = SHA512_DIGEST_SIZE, 100 + .base.cra_name = "sha512", 101 + .base.cra_driver_name = "sha512-ce", 102 + .base.cra_priority = 200, 103 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 104 + .base.cra_blocksize = SHA512_BLOCK_SIZE, 105 + .base.cra_module = THIS_MODULE, 106 + } }; 107 + 108 + static int __init sha512_ce_mod_init(void) 109 + { 110 + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); 111 + } 112 + 113 + static void __exit sha512_ce_mod_fini(void) 114 + { 115 + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 116 + } 117 + 118 + module_cpu_feature_match(SHA512, sha512_ce_mod_init); 119 + module_exit(sha512_ce_mod_fini);
+1
arch/arm64/crypto/sha512-glue.c
··· 27 27 28 28 asmlinkage void sha512_block_data_order(u32 *digest, const void *data, 29 29 unsigned int num_blks); 30 + EXPORT_SYMBOL(sha512_block_data_order); 30 31 31 32 static int sha512_update(struct shash_desc *desc, const u8 *data, 32 33 unsigned int len)
+141
arch/arm64/crypto/sm3-ce-core.S
··· 1 + /* 2 + * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions 3 + * 4 + * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/linkage.h> 12 + #include <asm/assembler.h> 13 + 14 + .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 15 + .set .Lv\b\().4s, \b 16 + .endr 17 + 18 + .macro sm3partw1, rd, rn, rm 19 + .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16) 20 + .endm 21 + 22 + .macro sm3partw2, rd, rn, rm 23 + .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16) 24 + .endm 25 + 26 + .macro sm3ss1, rd, rn, rm, ra 27 + .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16) 28 + .endm 29 + 30 + .macro sm3tt1a, rd, rn, rm, imm2 31 + .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) 32 + .endm 33 + 34 + .macro sm3tt1b, rd, rn, rm, imm2 35 + .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) 36 + .endm 37 + 38 + .macro sm3tt2a, rd, rn, rm, imm2 39 + .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) 40 + .endm 41 + 42 + .macro sm3tt2b, rd, rn, rm, imm2 43 + .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16) 44 + .endm 45 + 46 + .macro round, ab, s0, t0, t1, i 47 + sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s 48 + shl \t1\().4s, \t0\().4s, #1 49 + sri \t1\().4s, \t0\().4s, #31 50 + sm3tt1\ab v8.4s, v5.4s, v10.4s, \i 51 + sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i 52 + .endm 53 + 54 + .macro qround, ab, s0, s1, s2, s3, s4 55 + .ifnb \s4 56 + ext \s4\().16b, \s1\().16b, \s2\().16b, #12 57 + ext v6.16b, \s0\().16b, \s1\().16b, #12 58 + ext v7.16b, \s2\().16b, \s3\().16b, #8 59 + sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s 60 + .endif 61 + 62 + eor v10.16b, \s0\().16b, \s1\().16b 63 + 64 + round \ab, \s0, v11, v12, 0 65 + round \ab, \s0, v12, v11, 1 66 + round \ab, \s0, v11, v12, 2 67 + round \ab, \s0, v12, v11, 3 68 + 69 + .ifnb \s4 70 + sm3partw2 \s4\().4s, v7.4s, v6.4s 71 + .endif 72 + .endm 73 + 74 + /* 75 + * void sm3_ce_transform(struct sm3_state *sst, u8 const *src, 76 + * int blocks) 77 + */ 78 + .text 79 + ENTRY(sm3_ce_transform) 80 + /* load state */ 81 + ld1 {v8.4s-v9.4s}, [x0] 82 + rev64 v8.4s, v8.4s 83 + rev64 v9.4s, v9.4s 84 + ext v8.16b, v8.16b, v8.16b, #8 85 + ext v9.16b, v9.16b, v9.16b, #8 86 + 87 + adr_l x8, .Lt 88 + ldp s13, s14, [x8] 89 + 90 + /* load input */ 91 + 0: ld1 {v0.16b-v3.16b}, [x1], #64 92 + sub w2, w2, #1 93 + 94 + mov v15.16b, v8.16b 95 + mov v16.16b, v9.16b 96 + 97 + CPU_LE( rev32 v0.16b, v0.16b ) 98 + CPU_LE( rev32 v1.16b, v1.16b ) 99 + CPU_LE( rev32 v2.16b, v2.16b ) 100 + CPU_LE( rev32 v3.16b, v3.16b ) 101 + 102 + ext v11.16b, v13.16b, v13.16b, #4 103 + 104 + qround a, v0, v1, v2, v3, v4 105 + qround a, v1, v2, v3, v4, v0 106 + qround a, v2, v3, v4, v0, v1 107 + qround a, v3, v4, v0, v1, v2 108 + 109 + ext v11.16b, v14.16b, v14.16b, #4 110 + 111 + qround b, v4, v0, v1, v2, v3 112 + qround b, v0, v1, v2, v3, v4 113 + qround b, v1, v2, v3, v4, v0 114 + qround b, v2, v3, v4, v0, v1 115 + qround b, v3, v4, v0, v1, v2 116 + qround b, v4, v0, v1, v2, v3 117 + qround b, v0, v1, v2, v3, v4 118 + qround b, v1, v2, v3, v4, v0 119 + qround b, v2, v3, v4, v0, v1 120 + qround b, v3, v4 121 + qround b, v4, v0 122 + qround b, v0, v1 123 + 124 + eor v8.16b, v8.16b, v15.16b 125 + eor v9.16b, v9.16b, v16.16b 126 + 127 + /* handled all input blocks? */ 128 + cbnz w2, 0b 129 + 130 + /* save state */ 131 + rev64 v8.4s, v8.4s 132 + rev64 v9.4s, v9.4s 133 + ext v8.16b, v8.16b, v8.16b, #8 134 + ext v9.16b, v9.16b, v9.16b, #8 135 + st1 {v8.4s-v9.4s}, [x0] 136 + ret 137 + ENDPROC(sm3_ce_transform) 138 + 139 + .section ".rodata", "a" 140 + .align 3 141 + .Lt: .word 0x79cc4519, 0x9d8a7a87
+92
arch/arm64/crypto/sm3-ce-glue.c
··· 1 + /* 2 + * sm3-ce-glue.c - SM3 secure hash using ARMv8.2 Crypto Extensions 3 + * 4 + * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <asm/neon.h> 12 + #include <asm/simd.h> 13 + #include <asm/unaligned.h> 14 + #include <crypto/internal/hash.h> 15 + #include <crypto/sm3.h> 16 + #include <crypto/sm3_base.h> 17 + #include <linux/cpufeature.h> 18 + #include <linux/crypto.h> 19 + #include <linux/module.h> 20 + 21 + MODULE_DESCRIPTION("SM3 secure hash using ARMv8 Crypto Extensions"); 22 + MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 23 + MODULE_LICENSE("GPL v2"); 24 + 25 + asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src, 26 + int blocks); 27 + 28 + static int sm3_ce_update(struct shash_desc *desc, const u8 *data, 29 + unsigned int len) 30 + { 31 + if (!may_use_simd()) 32 + return crypto_sm3_update(desc, data, len); 33 + 34 + kernel_neon_begin(); 35 + sm3_base_do_update(desc, data, len, sm3_ce_transform); 36 + kernel_neon_end(); 37 + 38 + return 0; 39 + } 40 + 41 + static int sm3_ce_final(struct shash_desc *desc, u8 *out) 42 + { 43 + if (!may_use_simd()) 44 + return crypto_sm3_finup(desc, NULL, 0, out); 45 + 46 + kernel_neon_begin(); 47 + sm3_base_do_finalize(desc, sm3_ce_transform); 48 + kernel_neon_end(); 49 + 50 + return sm3_base_finish(desc, out); 51 + } 52 + 53 + static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, 54 + unsigned int len, u8 *out) 55 + { 56 + if (!may_use_simd()) 57 + return crypto_sm3_finup(desc, data, len, out); 58 + 59 + kernel_neon_begin(); 60 + sm3_base_do_update(desc, data, len, sm3_ce_transform); 61 + kernel_neon_end(); 62 + 63 + return sm3_ce_final(desc, out); 64 + } 65 + 66 + static struct shash_alg sm3_alg = { 67 + .digestsize = SM3_DIGEST_SIZE, 68 + .init = sm3_base_init, 69 + .update = sm3_ce_update, 70 + .final = sm3_ce_final, 71 + .finup = sm3_ce_finup, 72 + .descsize = sizeof(struct sm3_state), 73 + .base.cra_name = "sm3", 74 + .base.cra_driver_name = "sm3-ce", 75 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 76 + .base.cra_blocksize = SM3_BLOCK_SIZE, 77 + .base.cra_module = THIS_MODULE, 78 + .base.cra_priority = 200, 79 + }; 80 + 81 + static int __init sm3_ce_mod_init(void) 82 + { 83 + return crypto_register_shash(&sm3_alg); 84 + } 85 + 86 + static void __exit sm3_ce_mod_fini(void) 87 + { 88 + crypto_unregister_shash(&sm3_alg); 89 + } 90 + 91 + module_cpu_feature_match(SM3, sm3_ce_mod_init); 92 + module_exit(sm3_ce_mod_fini);
+1
arch/powerpc/crypto/crc32c-vpmsum_glue.c
··· 141 141 .cra_name = "crc32c", 142 142 .cra_driver_name = "crc32c-vpmsum", 143 143 .cra_priority = 200, 144 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 144 145 .cra_blocksize = CHKSUM_BLOCK_SIZE, 145 146 .cra_ctxsize = sizeof(u32), 146 147 .cra_module = THIS_MODULE,
+3
arch/s390/crypto/crc32-vx.c
··· 239 239 .cra_name = "crc32", 240 240 .cra_driver_name = "crc32-vx", 241 241 .cra_priority = 200, 242 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 242 243 .cra_blocksize = CRC32_BLOCK_SIZE, 243 244 .cra_ctxsize = sizeof(struct crc_ctx), 244 245 .cra_module = THIS_MODULE, ··· 260 259 .cra_name = "crc32be", 261 260 .cra_driver_name = "crc32be-vx", 262 261 .cra_priority = 200, 262 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 263 263 .cra_blocksize = CRC32_BLOCK_SIZE, 264 264 .cra_ctxsize = sizeof(struct crc_ctx), 265 265 .cra_module = THIS_MODULE, ··· 281 279 .cra_name = "crc32c", 282 280 .cra_driver_name = "crc32c-vx", 283 281 .cra_priority = 200, 282 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 284 283 .cra_blocksize = CRC32_BLOCK_SIZE, 285 284 .cra_ctxsize = sizeof(struct crc_ctx), 286 285 .cra_module = THIS_MODULE,
+1
arch/sparc/crypto/crc32c_glue.c
··· 133 133 .cra_name = "crc32c", 134 134 .cra_driver_name = "crc32c-sparc64", 135 135 .cra_priority = SPARC_CR_OPCODE_PRIORITY, 136 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 136 137 .cra_blocksize = CHKSUM_BLOCK_SIZE, 137 138 .cra_ctxsize = sizeof(u32), 138 139 .cra_alignmask = 7,
+57 -142
arch/x86/crypto/aesni-intel_asm.S
··· 90 90 ALL_F: .octa 0xffffffffffffffffffffffffffffffff 91 91 .octa 0x00000000000000000000000000000000 92 92 93 - .section .rodata 94 - .align 16 95 - .type aad_shift_arr, @object 96 - .size aad_shift_arr, 272 97 - aad_shift_arr: 98 - .octa 0xffffffffffffffffffffffffffffffff 99 - .octa 0xffffffffffffffffffffffffffffff0C 100 - .octa 0xffffffffffffffffffffffffffff0D0C 101 - .octa 0xffffffffffffffffffffffffff0E0D0C 102 - .octa 0xffffffffffffffffffffffff0F0E0D0C 103 - .octa 0xffffffffffffffffffffff0C0B0A0908 104 - .octa 0xffffffffffffffffffff0D0C0B0A0908 105 - .octa 0xffffffffffffffffff0E0D0C0B0A0908 106 - .octa 0xffffffffffffffff0F0E0D0C0B0A0908 107 - .octa 0xffffffffffffff0C0B0A090807060504 108 - .octa 0xffffffffffff0D0C0B0A090807060504 109 - .octa 0xffffffffff0E0D0C0B0A090807060504 110 - .octa 0xffffffff0F0E0D0C0B0A090807060504 111 - .octa 0xffffff0C0B0A09080706050403020100 112 - .octa 0xffff0D0C0B0A09080706050403020100 113 - .octa 0xff0E0D0C0B0A09080706050403020100 114 - .octa 0x0F0E0D0C0B0A09080706050403020100 115 - 116 - 117 93 .text 118 94 119 95 ··· 233 257 pxor \TMP1, \GH # result is in TMP1 234 258 .endm 235 259 260 + # Reads DLEN bytes starting at DPTR and stores in XMMDst 261 + # where 0 < DLEN < 16 262 + # Clobbers %rax, DLEN and XMM1 263 + .macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst 264 + cmp $8, \DLEN 265 + jl _read_lt8_\@ 266 + mov (\DPTR), %rax 267 + MOVQ_R64_XMM %rax, \XMMDst 268 + sub $8, \DLEN 269 + jz _done_read_partial_block_\@ 270 + xor %eax, %eax 271 + _read_next_byte_\@: 272 + shl $8, %rax 273 + mov 7(\DPTR, \DLEN, 1), %al 274 + dec \DLEN 275 + jnz _read_next_byte_\@ 276 + MOVQ_R64_XMM %rax, \XMM1 277 + pslldq $8, \XMM1 278 + por \XMM1, \XMMDst 279 + jmp _done_read_partial_block_\@ 280 + _read_lt8_\@: 281 + xor %eax, %eax 282 + _read_next_byte_lt8_\@: 283 + shl $8, %rax 284 + mov -1(\DPTR, \DLEN, 1), %al 285 + dec \DLEN 286 + jnz _read_next_byte_lt8_\@ 287 + MOVQ_R64_XMM %rax, \XMMDst 288 + _done_read_partial_block_\@: 289 + .endm 290 + 236 291 /* 237 292 * if a = number of total plaintext bytes 238 293 * b = floor(a/16) ··· 280 273 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation 281 274 MOVADQ SHUF_MASK(%rip), %xmm14 282 275 mov arg7, %r10 # %r10 = AAD 283 - mov arg8, %r12 # %r12 = aadLen 284 - mov %r12, %r11 276 + mov arg8, %r11 # %r11 = aadLen 285 277 pxor %xmm\i, %xmm\i 286 278 pxor \XMM2, \XMM2 287 279 288 280 cmp $16, %r11 289 - jl _get_AAD_rest8\num_initial_blocks\operation 281 + jl _get_AAD_rest\num_initial_blocks\operation 290 282 _get_AAD_blocks\num_initial_blocks\operation: 291 283 movdqu (%r10), %xmm\i 292 284 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 293 285 pxor %xmm\i, \XMM2 294 286 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 295 287 add $16, %r10 296 - sub $16, %r12 297 288 sub $16, %r11 298 289 cmp $16, %r11 299 290 jge _get_AAD_blocks\num_initial_blocks\operation 300 291 301 292 movdqu \XMM2, %xmm\i 293 + 294 + /* read the last <16B of AAD */ 295 + _get_AAD_rest\num_initial_blocks\operation: 302 296 cmp $0, %r11 303 297 je _get_AAD_done\num_initial_blocks\operation 304 298 305 - pxor %xmm\i,%xmm\i 306 - 307 - /* read the last <16B of AAD. since we have at least 4B of 308 - data right after the AAD (the ICV, and maybe some CT), we can 309 - read 4B/8B blocks safely, and then get rid of the extra stuff */ 310 - _get_AAD_rest8\num_initial_blocks\operation: 311 - cmp $4, %r11 312 - jle _get_AAD_rest4\num_initial_blocks\operation 313 - movq (%r10), \TMP1 314 - add $8, %r10 315 - sub $8, %r11 316 - pslldq $8, \TMP1 317 - psrldq $8, %xmm\i 318 - pxor \TMP1, %xmm\i 319 - jmp _get_AAD_rest8\num_initial_blocks\operation 320 - _get_AAD_rest4\num_initial_blocks\operation: 321 - cmp $0, %r11 322 - jle _get_AAD_rest0\num_initial_blocks\operation 323 - mov (%r10), %eax 324 - movq %rax, \TMP1 325 - add $4, %r10 326 - sub $4, %r10 327 - pslldq $12, \TMP1 328 - psrldq $4, %xmm\i 329 - pxor \TMP1, %xmm\i 330 - _get_AAD_rest0\num_initial_blocks\operation: 331 - /* finalize: shift out the extra bytes we read, and align 332 - left. since pslldq can only shift by an immediate, we use 333 - vpshufb and an array of shuffle masks */ 334 - movq %r12, %r11 335 - salq $4, %r11 336 - movdqu aad_shift_arr(%r11), \TMP1 337 - PSHUFB_XMM \TMP1, %xmm\i 338 - _get_AAD_rest_final\num_initial_blocks\operation: 299 + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i 339 300 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 340 301 pxor \XMM2, %xmm\i 341 302 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 ··· 507 532 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation 508 533 MOVADQ SHUF_MASK(%rip), %xmm14 509 534 mov arg7, %r10 # %r10 = AAD 510 - mov arg8, %r12 # %r12 = aadLen 511 - mov %r12, %r11 535 + mov arg8, %r11 # %r11 = aadLen 512 536 pxor %xmm\i, %xmm\i 513 537 pxor \XMM2, \XMM2 514 538 515 539 cmp $16, %r11 516 - jl _get_AAD_rest8\num_initial_blocks\operation 540 + jl _get_AAD_rest\num_initial_blocks\operation 517 541 _get_AAD_blocks\num_initial_blocks\operation: 518 542 movdqu (%r10), %xmm\i 519 543 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 520 544 pxor %xmm\i, \XMM2 521 545 GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 522 546 add $16, %r10 523 - sub $16, %r12 524 547 sub $16, %r11 525 548 cmp $16, %r11 526 549 jge _get_AAD_blocks\num_initial_blocks\operation 527 550 528 551 movdqu \XMM2, %xmm\i 552 + 553 + /* read the last <16B of AAD */ 554 + _get_AAD_rest\num_initial_blocks\operation: 529 555 cmp $0, %r11 530 556 je _get_AAD_done\num_initial_blocks\operation 531 557 532 - pxor %xmm\i,%xmm\i 533 - 534 - /* read the last <16B of AAD. since we have at least 4B of 535 - data right after the AAD (the ICV, and maybe some PT), we can 536 - read 4B/8B blocks safely, and then get rid of the extra stuff */ 537 - _get_AAD_rest8\num_initial_blocks\operation: 538 - cmp $4, %r11 539 - jle _get_AAD_rest4\num_initial_blocks\operation 540 - movq (%r10), \TMP1 541 - add $8, %r10 542 - sub $8, %r11 543 - pslldq $8, \TMP1 544 - psrldq $8, %xmm\i 545 - pxor \TMP1, %xmm\i 546 - jmp _get_AAD_rest8\num_initial_blocks\operation 547 - _get_AAD_rest4\num_initial_blocks\operation: 548 - cmp $0, %r11 549 - jle _get_AAD_rest0\num_initial_blocks\operation 550 - mov (%r10), %eax 551 - movq %rax, \TMP1 552 - add $4, %r10 553 - sub $4, %r10 554 - pslldq $12, \TMP1 555 - psrldq $4, %xmm\i 556 - pxor \TMP1, %xmm\i 557 - _get_AAD_rest0\num_initial_blocks\operation: 558 - /* finalize: shift out the extra bytes we read, and align 559 - left. since pslldq can only shift by an immediate, we use 560 - vpshufb and an array of shuffle masks */ 561 - movq %r12, %r11 562 - salq $4, %r11 563 - movdqu aad_shift_arr(%r11), \TMP1 564 - PSHUFB_XMM \TMP1, %xmm\i 565 - _get_AAD_rest_final\num_initial_blocks\operation: 558 + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i 566 559 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data 567 560 pxor \XMM2, %xmm\i 568 561 GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1 ··· 1329 1386 * 1330 1387 * AAD Format with 64-bit Extended Sequence Number 1331 1388 * 1332 - * aadLen: 1333 - * from the definition of the spec, aadLen can only be 8 or 12 bytes. 1334 - * The code supports 16 too but for other sizes, the code will fail. 1335 - * 1336 - * TLen: 1337 - * from the definition of the spec, TLen can only be 8, 12 or 16 bytes. 1338 - * For other sizes, the code will fail. 1339 - * 1340 1389 * poly = x^128 + x^127 + x^126 + x^121 + 1 1341 1390 * 1342 1391 *****************************************************************************/ ··· 1422 1487 PSHUFB_XMM %xmm10, %xmm0 1423 1488 1424 1489 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn) 1425 - sub $16, %r11 1426 - add %r13, %r11 1427 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block 1428 - lea SHIFT_MASK+16(%rip), %r12 1429 - sub %r13, %r12 1430 - # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes 1431 - # (%r13 is the number of bytes in plaintext mod 16) 1432 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask 1433 - PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes 1434 1490 1491 + lea (%arg3,%r11,1), %r10 1492 + mov %r13, %r12 1493 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 1494 + 1495 + lea ALL_F+16(%rip), %r12 1496 + sub %r13, %r12 1435 1497 movdqa %xmm1, %xmm2 1436 1498 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) 1437 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 1499 + movdqu (%r12), %xmm1 1438 1500 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 1439 1501 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 1440 1502 pand %xmm1, %xmm2 ··· 1440 1508 1441 1509 pxor %xmm2, %xmm8 1442 1510 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 1443 - # GHASH computation for the last <16 byte block 1444 - sub %r13, %r11 1445 - add $16, %r11 1446 1511 1447 1512 # output %r13 bytes 1448 1513 MOVQ_R64_XMM %xmm0, %rax ··· 1593 1664 * 1594 1665 * AAD Format with 64-bit Extended Sequence Number 1595 1666 * 1596 - * aadLen: 1597 - * from the definition of the spec, aadLen can only be 8 or 12 bytes. 1598 - * The code supports 16 too but for other sizes, the code will fail. 1599 - * 1600 - * TLen: 1601 - * from the definition of the spec, TLen can only be 8, 12 or 16 bytes. 1602 - * For other sizes, the code will fail. 1603 - * 1604 1667 * poly = x^128 + x^127 + x^126 + x^121 + 1 1605 1668 ***************************************************************************/ 1606 1669 ENTRY(aesni_gcm_enc) ··· 1685 1764 movdqa SHUF_MASK(%rip), %xmm10 1686 1765 PSHUFB_XMM %xmm10, %xmm0 1687 1766 1688 - 1689 1767 ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn) 1690 - sub $16, %r11 1691 - add %r13, %r11 1692 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks 1693 - lea SHIFT_MASK+16(%rip), %r12 1768 + 1769 + lea (%arg3,%r11,1), %r10 1770 + mov %r13, %r12 1771 + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1 1772 + 1773 + lea ALL_F+16(%rip), %r12 1694 1774 sub %r13, %r12 1695 - # adjust the shuffle mask pointer to be able to shift 16-r13 bytes 1696 - # (%r13 is the number of bytes in plaintext mod 16) 1697 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask 1698 - PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte 1699 1775 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) 1700 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 1776 + movdqu (%r12), %xmm1 1701 1777 # get the appropriate mask to mask out top 16-r13 bytes of xmm0 1702 1778 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 1703 1779 movdqa SHUF_MASK(%rip), %xmm10 ··· 1703 1785 pxor %xmm0, %xmm8 1704 1786 GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6 1705 1787 # GHASH computation for the last <16 byte block 1706 - sub %r13, %r11 1707 - add $16, %r11 1708 - 1709 1788 movdqa SHUF_MASK(%rip), %xmm10 1710 1789 PSHUFB_XMM %xmm10, %xmm0 1711 1790
+56 -14
arch/x86/crypto/aesni-intel_glue.c
··· 690 690 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); 691 691 } 692 692 693 - static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key, 694 - unsigned int key_len) 693 + static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key, 694 + unsigned int key_len) 695 695 { 696 696 struct cryptd_aead **ctx = crypto_aead_ctx(parent); 697 697 struct cryptd_aead *cryptd_tfm = *ctx; ··· 716 716 717 717 /* This is the Integrity Check Value (aka the authentication tag length and can 718 718 * be 8, 12 or 16 bytes long. */ 719 - static int rfc4106_set_authsize(struct crypto_aead *parent, 720 - unsigned int authsize) 719 + static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent, 720 + unsigned int authsize) 721 721 { 722 722 struct cryptd_aead **ctx = crypto_aead_ctx(parent); 723 723 struct cryptd_aead *cryptd_tfm = *ctx; ··· 824 824 if (sg_is_last(req->src) && 825 825 (!PageHighMem(sg_page(req->src)) || 826 826 req->src->offset + req->src->length <= PAGE_SIZE) && 827 - sg_is_last(req->dst) && 827 + sg_is_last(req->dst) && req->dst->length && 828 828 (!PageHighMem(sg_page(req->dst)) || 829 829 req->dst->offset + req->dst->length <= PAGE_SIZE)) { 830 830 one_entry_in_sg = 1; ··· 929 929 aes_ctx); 930 930 } 931 931 932 - static int rfc4106_encrypt(struct aead_request *req) 932 + static int gcmaes_wrapper_encrypt(struct aead_request *req) 933 933 { 934 934 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 935 935 struct cryptd_aead **ctx = crypto_aead_ctx(tfm); ··· 945 945 return crypto_aead_encrypt(req); 946 946 } 947 947 948 - static int rfc4106_decrypt(struct aead_request *req) 948 + static int gcmaes_wrapper_decrypt(struct aead_request *req) 949 949 { 950 950 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 951 951 struct cryptd_aead **ctx = crypto_aead_ctx(tfm); ··· 1117 1117 { 1118 1118 __be32 counter = cpu_to_be32(1); 1119 1119 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1120 - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); 1120 + struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm); 1121 1121 void *aes_ctx = &(ctx->aes_key_expanded); 1122 1122 u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN))); 1123 1123 ··· 1126 1126 1127 1127 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv, 1128 1128 aes_ctx); 1129 + } 1130 + 1131 + static int generic_gcmaes_init(struct crypto_aead *aead) 1132 + { 1133 + struct cryptd_aead *cryptd_tfm; 1134 + struct cryptd_aead **ctx = crypto_aead_ctx(aead); 1135 + 1136 + cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni", 1137 + CRYPTO_ALG_INTERNAL, 1138 + CRYPTO_ALG_INTERNAL); 1139 + if (IS_ERR(cryptd_tfm)) 1140 + return PTR_ERR(cryptd_tfm); 1141 + 1142 + *ctx = cryptd_tfm; 1143 + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); 1144 + 1145 + return 0; 1146 + } 1147 + 1148 + static void generic_gcmaes_exit(struct crypto_aead *aead) 1149 + { 1150 + struct cryptd_aead **ctx = crypto_aead_ctx(aead); 1151 + 1152 + cryptd_free_aead(*ctx); 1129 1153 } 1130 1154 1131 1155 static struct aead_alg aesni_aead_algs[] = { { ··· 1171 1147 }, { 1172 1148 .init = rfc4106_init, 1173 1149 .exit = rfc4106_exit, 1174 - .setkey = rfc4106_set_key, 1175 - .setauthsize = rfc4106_set_authsize, 1176 - .encrypt = rfc4106_encrypt, 1177 - .decrypt = rfc4106_decrypt, 1150 + .setkey = gcmaes_wrapper_set_key, 1151 + .setauthsize = gcmaes_wrapper_set_authsize, 1152 + .encrypt = gcmaes_wrapper_encrypt, 1153 + .decrypt = gcmaes_wrapper_decrypt, 1178 1154 .ivsize = GCM_RFC4106_IV_SIZE, 1179 1155 .maxauthsize = 16, 1180 1156 .base = { ··· 1194 1170 .ivsize = GCM_AES_IV_SIZE, 1195 1171 .maxauthsize = 16, 1196 1172 .base = { 1173 + .cra_name = "__generic-gcm-aes-aesni", 1174 + .cra_driver_name = "__driver-generic-gcm-aes-aesni", 1175 + .cra_priority = 0, 1176 + .cra_flags = CRYPTO_ALG_INTERNAL, 1177 + .cra_blocksize = 1, 1178 + .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), 1179 + .cra_alignmask = AESNI_ALIGN - 1, 1180 + .cra_module = THIS_MODULE, 1181 + }, 1182 + }, { 1183 + .init = generic_gcmaes_init, 1184 + .exit = generic_gcmaes_exit, 1185 + .setkey = gcmaes_wrapper_set_key, 1186 + .setauthsize = gcmaes_wrapper_set_authsize, 1187 + .encrypt = gcmaes_wrapper_encrypt, 1188 + .decrypt = gcmaes_wrapper_decrypt, 1189 + .ivsize = GCM_AES_IV_SIZE, 1190 + .maxauthsize = 16, 1191 + .base = { 1197 1192 .cra_name = "gcm(aes)", 1198 1193 .cra_driver_name = "generic-gcm-aesni", 1199 1194 .cra_priority = 400, 1200 1195 .cra_flags = CRYPTO_ALG_ASYNC, 1201 1196 .cra_blocksize = 1, 1202 - .cra_ctxsize = sizeof(struct generic_gcmaes_ctx), 1203 - .cra_alignmask = AESNI_ALIGN - 1, 1197 + .cra_ctxsize = sizeof(struct cryptd_aead *), 1204 1198 .cra_module = THIS_MODULE, 1205 1199 }, 1206 1200 } };
-1
arch/x86/crypto/chacha20_glue.c
··· 107 107 .base.cra_priority = 300, 108 108 .base.cra_blocksize = 1, 109 109 .base.cra_ctxsize = sizeof(struct chacha20_ctx), 110 - .base.cra_alignmask = sizeof(u32) - 1, 111 110 .base.cra_module = THIS_MODULE, 112 111 113 112 .min_keysize = CHACHA20_KEY_SIZE,
+1
arch/x86/crypto/crc32-pclmul_glue.c
··· 162 162 .cra_name = "crc32", 163 163 .cra_driver_name = "crc32-pclmul", 164 164 .cra_priority = 200, 165 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 165 166 .cra_blocksize = CHKSUM_BLOCK_SIZE, 166 167 .cra_ctxsize = sizeof(u32), 167 168 .cra_module = THIS_MODULE,
+1
arch/x86/crypto/crc32c-intel_glue.c
··· 226 226 .cra_name = "crc32c", 227 227 .cra_driver_name = "crc32c-intel", 228 228 .cra_priority = 200, 229 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 229 230 .cra_blocksize = CHKSUM_BLOCK_SIZE, 230 231 .cra_ctxsize = sizeof(u32), 231 232 .cra_module = THIS_MODULE,
-2
arch/x86/crypto/poly1305_glue.c
··· 164 164 .init = poly1305_simd_init, 165 165 .update = poly1305_simd_update, 166 166 .final = crypto_poly1305_final, 167 - .setkey = crypto_poly1305_setkey, 168 167 .descsize = sizeof(struct poly1305_simd_desc_ctx), 169 168 .base = { 170 169 .cra_name = "poly1305", 171 170 .cra_driver_name = "poly1305-simd", 172 171 .cra_priority = 300, 173 172 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 174 - .cra_alignmask = sizeof(u32) - 1, 175 173 .cra_blocksize = POLY1305_BLOCK_SIZE, 176 174 .cra_module = THIS_MODULE, 177 175 },
+4 -180
arch/x86/crypto/salsa20-i586-asm_32.S
··· 1 - # salsa20_pm.s version 20051229 2 - # D. J. Bernstein 3 - # Public domain. 1 + # Derived from: 2 + # salsa20_pm.s version 20051229 3 + # D. J. Bernstein 4 + # Public domain. 4 5 5 6 #include <linux/linkage.h> 6 7 ··· 936 935 # goto bytesatleast1 937 936 jmp ._bytesatleast1 938 937 ENDPROC(salsa20_encrypt_bytes) 939 - 940 - # enter salsa20_keysetup 941 - ENTRY(salsa20_keysetup) 942 - mov %esp,%eax 943 - and $31,%eax 944 - add $256,%eax 945 - sub %eax,%esp 946 - # eax_stack = eax 947 - movl %eax,64(%esp) 948 - # ebx_stack = ebx 949 - movl %ebx,68(%esp) 950 - # esi_stack = esi 951 - movl %esi,72(%esp) 952 - # edi_stack = edi 953 - movl %edi,76(%esp) 954 - # ebp_stack = ebp 955 - movl %ebp,80(%esp) 956 - # k = arg2 957 - movl 8(%esp,%eax),%ecx 958 - # kbits = arg3 959 - movl 12(%esp,%eax),%edx 960 - # x = arg1 961 - movl 4(%esp,%eax),%eax 962 - # in1 = *(uint32 *) (k + 0) 963 - movl 0(%ecx),%ebx 964 - # in2 = *(uint32 *) (k + 4) 965 - movl 4(%ecx),%esi 966 - # in3 = *(uint32 *) (k + 8) 967 - movl 8(%ecx),%edi 968 - # in4 = *(uint32 *) (k + 12) 969 - movl 12(%ecx),%ebp 970 - # *(uint32 *) (x + 4) = in1 971 - movl %ebx,4(%eax) 972 - # *(uint32 *) (x + 8) = in2 973 - movl %esi,8(%eax) 974 - # *(uint32 *) (x + 12) = in3 975 - movl %edi,12(%eax) 976 - # *(uint32 *) (x + 16) = in4 977 - movl %ebp,16(%eax) 978 - # kbits - 256 979 - cmp $256,%edx 980 - # goto kbits128 if unsigned< 981 - jb ._kbits128 982 - ._kbits256: 983 - # in11 = *(uint32 *) (k + 16) 984 - movl 16(%ecx),%edx 985 - # in12 = *(uint32 *) (k + 20) 986 - movl 20(%ecx),%ebx 987 - # in13 = *(uint32 *) (k + 24) 988 - movl 24(%ecx),%esi 989 - # in14 = *(uint32 *) (k + 28) 990 - movl 28(%ecx),%ecx 991 - # *(uint32 *) (x + 44) = in11 992 - movl %edx,44(%eax) 993 - # *(uint32 *) (x + 48) = in12 994 - movl %ebx,48(%eax) 995 - # *(uint32 *) (x + 52) = in13 996 - movl %esi,52(%eax) 997 - # *(uint32 *) (x + 56) = in14 998 - movl %ecx,56(%eax) 999 - # in0 = 1634760805 1000 - mov $1634760805,%ecx 1001 - # in5 = 857760878 1002 - mov $857760878,%edx 1003 - # in10 = 2036477234 1004 - mov $2036477234,%ebx 1005 - # in15 = 1797285236 1006 - mov $1797285236,%esi 1007 - # *(uint32 *) (x + 0) = in0 1008 - movl %ecx,0(%eax) 1009 - # *(uint32 *) (x + 20) = in5 1010 - movl %edx,20(%eax) 1011 - # *(uint32 *) (x + 40) = in10 1012 - movl %ebx,40(%eax) 1013 - # *(uint32 *) (x + 60) = in15 1014 - movl %esi,60(%eax) 1015 - # goto keysetupdone 1016 - jmp ._keysetupdone 1017 - ._kbits128: 1018 - # in11 = *(uint32 *) (k + 0) 1019 - movl 0(%ecx),%edx 1020 - # in12 = *(uint32 *) (k + 4) 1021 - movl 4(%ecx),%ebx 1022 - # in13 = *(uint32 *) (k + 8) 1023 - movl 8(%ecx),%esi 1024 - # in14 = *(uint32 *) (k + 12) 1025 - movl 12(%ecx),%ecx 1026 - # *(uint32 *) (x + 44) = in11 1027 - movl %edx,44(%eax) 1028 - # *(uint32 *) (x + 48) = in12 1029 - movl %ebx,48(%eax) 1030 - # *(uint32 *) (x + 52) = in13 1031 - movl %esi,52(%eax) 1032 - # *(uint32 *) (x + 56) = in14 1033 - movl %ecx,56(%eax) 1034 - # in0 = 1634760805 1035 - mov $1634760805,%ecx 1036 - # in5 = 824206446 1037 - mov $824206446,%edx 1038 - # in10 = 2036477238 1039 - mov $2036477238,%ebx 1040 - # in15 = 1797285236 1041 - mov $1797285236,%esi 1042 - # *(uint32 *) (x + 0) = in0 1043 - movl %ecx,0(%eax) 1044 - # *(uint32 *) (x + 20) = in5 1045 - movl %edx,20(%eax) 1046 - # *(uint32 *) (x + 40) = in10 1047 - movl %ebx,40(%eax) 1048 - # *(uint32 *) (x + 60) = in15 1049 - movl %esi,60(%eax) 1050 - ._keysetupdone: 1051 - # eax = eax_stack 1052 - movl 64(%esp),%eax 1053 - # ebx = ebx_stack 1054 - movl 68(%esp),%ebx 1055 - # esi = esi_stack 1056 - movl 72(%esp),%esi 1057 - # edi = edi_stack 1058 - movl 76(%esp),%edi 1059 - # ebp = ebp_stack 1060 - movl 80(%esp),%ebp 1061 - # leave 1062 - add %eax,%esp 1063 - ret 1064 - ENDPROC(salsa20_keysetup) 1065 - 1066 - # enter salsa20_ivsetup 1067 - ENTRY(salsa20_ivsetup) 1068 - mov %esp,%eax 1069 - and $31,%eax 1070 - add $256,%eax 1071 - sub %eax,%esp 1072 - # eax_stack = eax 1073 - movl %eax,64(%esp) 1074 - # ebx_stack = ebx 1075 - movl %ebx,68(%esp) 1076 - # esi_stack = esi 1077 - movl %esi,72(%esp) 1078 - # edi_stack = edi 1079 - movl %edi,76(%esp) 1080 - # ebp_stack = ebp 1081 - movl %ebp,80(%esp) 1082 - # iv = arg2 1083 - movl 8(%esp,%eax),%ecx 1084 - # x = arg1 1085 - movl 4(%esp,%eax),%eax 1086 - # in6 = *(uint32 *) (iv + 0) 1087 - movl 0(%ecx),%edx 1088 - # in7 = *(uint32 *) (iv + 4) 1089 - movl 4(%ecx),%ecx 1090 - # in8 = 0 1091 - mov $0,%ebx 1092 - # in9 = 0 1093 - mov $0,%esi 1094 - # *(uint32 *) (x + 24) = in6 1095 - movl %edx,24(%eax) 1096 - # *(uint32 *) (x + 28) = in7 1097 - movl %ecx,28(%eax) 1098 - # *(uint32 *) (x + 32) = in8 1099 - movl %ebx,32(%eax) 1100 - # *(uint32 *) (x + 36) = in9 1101 - movl %esi,36(%eax) 1102 - # eax = eax_stack 1103 - movl 64(%esp),%eax 1104 - # ebx = ebx_stack 1105 - movl 68(%esp),%ebx 1106 - # esi = esi_stack 1107 - movl 72(%esp),%esi 1108 - # edi = edi_stack 1109 - movl 76(%esp),%edi 1110 - # ebp = ebp_stack 1111 - movl 80(%esp),%ebp 1112 - # leave 1113 - add %eax,%esp 1114 - ret 1115 - ENDPROC(salsa20_ivsetup)
-114
arch/x86/crypto/salsa20-x86_64-asm_64.S
··· 803 803 # goto bytesatleast1 804 804 jmp ._bytesatleast1 805 805 ENDPROC(salsa20_encrypt_bytes) 806 - 807 - # enter salsa20_keysetup 808 - ENTRY(salsa20_keysetup) 809 - mov %rsp,%r11 810 - and $31,%r11 811 - add $256,%r11 812 - sub %r11,%rsp 813 - # k = arg2 814 - mov %rsi,%rsi 815 - # kbits = arg3 816 - mov %rdx,%rdx 817 - # x = arg1 818 - mov %rdi,%rdi 819 - # in0 = *(uint64 *) (k + 0) 820 - movq 0(%rsi),%r8 821 - # in2 = *(uint64 *) (k + 8) 822 - movq 8(%rsi),%r9 823 - # *(uint64 *) (x + 4) = in0 824 - movq %r8,4(%rdi) 825 - # *(uint64 *) (x + 12) = in2 826 - movq %r9,12(%rdi) 827 - # unsigned<? kbits - 256 828 - cmp $256,%rdx 829 - # comment:fp stack unchanged by jump 830 - # goto kbits128 if unsigned< 831 - jb ._kbits128 832 - # kbits256: 833 - ._kbits256: 834 - # in10 = *(uint64 *) (k + 16) 835 - movq 16(%rsi),%rdx 836 - # in12 = *(uint64 *) (k + 24) 837 - movq 24(%rsi),%rsi 838 - # *(uint64 *) (x + 44) = in10 839 - movq %rdx,44(%rdi) 840 - # *(uint64 *) (x + 52) = in12 841 - movq %rsi,52(%rdi) 842 - # in0 = 1634760805 843 - mov $1634760805,%rsi 844 - # in4 = 857760878 845 - mov $857760878,%rdx 846 - # in10 = 2036477234 847 - mov $2036477234,%rcx 848 - # in14 = 1797285236 849 - mov $1797285236,%r8 850 - # *(uint32 *) (x + 0) = in0 851 - movl %esi,0(%rdi) 852 - # *(uint32 *) (x + 20) = in4 853 - movl %edx,20(%rdi) 854 - # *(uint32 *) (x + 40) = in10 855 - movl %ecx,40(%rdi) 856 - # *(uint32 *) (x + 60) = in14 857 - movl %r8d,60(%rdi) 858 - # comment:fp stack unchanged by jump 859 - # goto keysetupdone 860 - jmp ._keysetupdone 861 - # kbits128: 862 - ._kbits128: 863 - # in10 = *(uint64 *) (k + 0) 864 - movq 0(%rsi),%rdx 865 - # in12 = *(uint64 *) (k + 8) 866 - movq 8(%rsi),%rsi 867 - # *(uint64 *) (x + 44) = in10 868 - movq %rdx,44(%rdi) 869 - # *(uint64 *) (x + 52) = in12 870 - movq %rsi,52(%rdi) 871 - # in0 = 1634760805 872 - mov $1634760805,%rsi 873 - # in4 = 824206446 874 - mov $824206446,%rdx 875 - # in10 = 2036477238 876 - mov $2036477238,%rcx 877 - # in14 = 1797285236 878 - mov $1797285236,%r8 879 - # *(uint32 *) (x + 0) = in0 880 - movl %esi,0(%rdi) 881 - # *(uint32 *) (x + 20) = in4 882 - movl %edx,20(%rdi) 883 - # *(uint32 *) (x + 40) = in10 884 - movl %ecx,40(%rdi) 885 - # *(uint32 *) (x + 60) = in14 886 - movl %r8d,60(%rdi) 887 - # keysetupdone: 888 - ._keysetupdone: 889 - # leave 890 - add %r11,%rsp 891 - mov %rdi,%rax 892 - mov %rsi,%rdx 893 - ret 894 - ENDPROC(salsa20_keysetup) 895 - 896 - # enter salsa20_ivsetup 897 - ENTRY(salsa20_ivsetup) 898 - mov %rsp,%r11 899 - and $31,%r11 900 - add $256,%r11 901 - sub %r11,%rsp 902 - # iv = arg2 903 - mov %rsi,%rsi 904 - # x = arg1 905 - mov %rdi,%rdi 906 - # in6 = *(uint64 *) (iv + 0) 907 - movq 0(%rsi),%rsi 908 - # in8 = 0 909 - mov $0,%r8 910 - # *(uint64 *) (x + 24) = in6 911 - movq %rsi,24(%rdi) 912 - # *(uint64 *) (x + 32) = in8 913 - movq %r8,32(%rdi) 914 - # leave 915 - add %r11,%rsp 916 - mov %rdi,%rax 917 - mov %rsi,%rdx 918 - ret 919 - ENDPROC(salsa20_ivsetup)
+40 -65
arch/x86/crypto/salsa20_glue.c
··· 11 11 * - x86-64 version, renamed as salsa20-x86_64-asm_64.S 12 12 * available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s> 13 13 * 14 + * Also modified to set up the initial state using the generic C code rather 15 + * than in assembly. 16 + * 14 17 * This program is free software; you can redistribute it and/or modify it 15 18 * under the terms of the GNU General Public License as published by the Free 16 19 * Software Foundation; either version 2 of the License, or (at your option) ··· 21 18 * 22 19 */ 23 20 24 - #include <crypto/algapi.h> 21 + #include <asm/unaligned.h> 22 + #include <crypto/internal/skcipher.h> 23 + #include <crypto/salsa20.h> 25 24 #include <linux/module.h> 26 - #include <linux/crypto.h> 27 25 28 - #define SALSA20_IV_SIZE 8U 29 - #define SALSA20_MIN_KEY_SIZE 16U 30 - #define SALSA20_MAX_KEY_SIZE 32U 26 + asmlinkage void salsa20_encrypt_bytes(u32 state[16], const u8 *src, u8 *dst, 27 + u32 bytes); 31 28 32 - struct salsa20_ctx 29 + static int salsa20_asm_crypt(struct skcipher_request *req) 33 30 { 34 - u32 input[16]; 35 - }; 36 - 37 - asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, 38 - u32 keysize, u32 ivsize); 39 - asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv); 40 - asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, 41 - const u8 *src, u8 *dst, u32 bytes); 42 - 43 - static int setkey(struct crypto_tfm *tfm, const u8 *key, 44 - unsigned int keysize) 45 - { 46 - struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); 47 - salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8); 48 - return 0; 49 - } 50 - 51 - static int encrypt(struct blkcipher_desc *desc, 52 - struct scatterlist *dst, struct scatterlist *src, 53 - unsigned int nbytes) 54 - { 55 - struct blkcipher_walk walk; 56 - struct crypto_blkcipher *tfm = desc->tfm; 57 - struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); 31 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 32 + const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); 33 + struct skcipher_walk walk; 34 + u32 state[16]; 58 35 int err; 59 36 60 - blkcipher_walk_init(&walk, dst, src, nbytes); 61 - err = blkcipher_walk_virt_block(desc, &walk, 64); 37 + err = skcipher_walk_virt(&walk, req, true); 62 38 63 - salsa20_ivsetup(ctx, walk.iv); 39 + crypto_salsa20_init(state, ctx, walk.iv); 64 40 65 - while (walk.nbytes >= 64) { 66 - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, 67 - walk.dst.virt.addr, 68 - walk.nbytes - (walk.nbytes % 64)); 69 - err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); 70 - } 41 + while (walk.nbytes > 0) { 42 + unsigned int nbytes = walk.nbytes; 71 43 72 - if (walk.nbytes) { 73 - salsa20_encrypt_bytes(ctx, walk.src.virt.addr, 74 - walk.dst.virt.addr, walk.nbytes); 75 - err = blkcipher_walk_done(desc, &walk, 0); 44 + if (nbytes < walk.total) 45 + nbytes = round_down(nbytes, walk.stride); 46 + 47 + salsa20_encrypt_bytes(state, walk.src.virt.addr, 48 + walk.dst.virt.addr, nbytes); 49 + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); 76 50 } 77 51 78 52 return err; 79 53 } 80 54 81 - static struct crypto_alg alg = { 82 - .cra_name = "salsa20", 83 - .cra_driver_name = "salsa20-asm", 84 - .cra_priority = 200, 85 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 86 - .cra_type = &crypto_blkcipher_type, 87 - .cra_blocksize = 1, 88 - .cra_ctxsize = sizeof(struct salsa20_ctx), 89 - .cra_alignmask = 3, 90 - .cra_module = THIS_MODULE, 91 - .cra_u = { 92 - .blkcipher = { 93 - .setkey = setkey, 94 - .encrypt = encrypt, 95 - .decrypt = encrypt, 96 - .min_keysize = SALSA20_MIN_KEY_SIZE, 97 - .max_keysize = SALSA20_MAX_KEY_SIZE, 98 - .ivsize = SALSA20_IV_SIZE, 99 - } 100 - } 55 + static struct skcipher_alg alg = { 56 + .base.cra_name = "salsa20", 57 + .base.cra_driver_name = "salsa20-asm", 58 + .base.cra_priority = 200, 59 + .base.cra_blocksize = 1, 60 + .base.cra_ctxsize = sizeof(struct salsa20_ctx), 61 + .base.cra_module = THIS_MODULE, 62 + 63 + .min_keysize = SALSA20_MIN_KEY_SIZE, 64 + .max_keysize = SALSA20_MAX_KEY_SIZE, 65 + .ivsize = SALSA20_IV_SIZE, 66 + .chunksize = SALSA20_BLOCK_SIZE, 67 + .setkey = crypto_salsa20_setkey, 68 + .encrypt = salsa20_asm_crypt, 69 + .decrypt = salsa20_asm_crypt, 101 70 }; 102 71 103 72 static int __init init(void) 104 73 { 105 - return crypto_register_alg(&alg); 74 + return crypto_register_skcipher(&alg); 106 75 } 107 76 108 77 static void __exit fini(void) 109 78 { 110 - crypto_unregister_alg(&alg); 79 + crypto_unregister_skcipher(&alg); 111 80 } 112 81 113 82 module_init(init);
+60 -52
arch/x86/crypto/twofish-x86_64-asm_64-3way.S
··· 55 55 #define RAB1bl %bl 56 56 #define RAB2bl %cl 57 57 58 + #define CD0 0x0(%rsp) 59 + #define CD1 0x8(%rsp) 60 + #define CD2 0x10(%rsp) 61 + 62 + # used only before/after all rounds 58 63 #define RCD0 %r8 59 64 #define RCD1 %r9 60 65 #define RCD2 %r10 61 66 62 - #define RCD0d %r8d 63 - #define RCD1d %r9d 64 - #define RCD2d %r10d 67 + # used only during rounds 68 + #define RX0 %r8 69 + #define RX1 %r9 70 + #define RX2 %r10 65 71 66 - #define RX0 %rbp 67 - #define RX1 %r11 68 - #define RX2 %r12 72 + #define RX0d %r8d 73 + #define RX1d %r9d 74 + #define RX2d %r10d 69 75 70 - #define RX0d %ebp 71 - #define RX1d %r11d 72 - #define RX2d %r12d 76 + #define RY0 %r11 77 + #define RY1 %r12 78 + #define RY2 %r13 73 79 74 - #define RY0 %r13 75 - #define RY1 %r14 76 - #define RY2 %r15 77 - 78 - #define RY0d %r13d 79 - #define RY1d %r14d 80 - #define RY2d %r15d 80 + #define RY0d %r11d 81 + #define RY1d %r12d 82 + #define RY2d %r13d 81 83 82 84 #define RT0 %rdx 83 85 #define RT1 %rsi ··· 87 85 #define RT0d %edx 88 86 #define RT1d %esi 89 87 88 + #define RT1bl %sil 89 + 90 90 #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \ 91 91 movzbl ab ## bl, tmp2 ## d; \ 92 92 movzbl ab ## bh, tmp1 ## d; \ 93 93 rorq $(rot), ab; \ 94 94 op1##l T0(CTX, tmp2, 4), dst ## d; \ 95 95 op2##l T1(CTX, tmp1, 4), dst ## d; 96 + 97 + #define swap_ab_with_cd(ab, cd, tmp) \ 98 + movq cd, tmp; \ 99 + movq ab, cd; \ 100 + movq tmp, ab; 96 101 97 102 /* 98 103 * Combined G1 & G2 function. Reordered with help of rotates to have moves ··· 119 110 /* G1,2 && G2,2 */ \ 120 111 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \ 121 112 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \ 122 - xchgq cd ## 0, ab ## 0; \ 113 + swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \ 123 114 \ 124 115 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \ 125 116 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \ 126 - xchgq cd ## 1, ab ## 1; \ 117 + swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \ 127 118 \ 128 119 do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \ 129 120 do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \ 130 - xchgq cd ## 2, ab ## 2; 121 + swap_ab_with_cd(ab ## 2, cd ## 2, RT0); 131 122 132 123 #define enc_round_end(ab, x, y, n) \ 133 124 addl y ## d, x ## d; \ ··· 176 167 #define decrypt_cycle3(ba, dc, n) \ 177 168 decrypt_round3(ba, dc, (n*2)+1); \ 178 169 decrypt_round3(ba, dc, (n*2)); 170 + 171 + #define push_cd() \ 172 + pushq RCD2; \ 173 + pushq RCD1; \ 174 + pushq RCD0; 175 + 176 + #define pop_cd() \ 177 + popq RCD0; \ 178 + popq RCD1; \ 179 + popq RCD2; 179 180 180 181 #define inpack3(in, n, xy, m) \ 181 182 movq 4*(n)(in), xy ## 0; \ ··· 242 223 * %rdx: src, RIO 243 224 * %rcx: bool, if true: xor output 244 225 */ 245 - pushq %r15; 246 - pushq %r14; 247 226 pushq %r13; 248 227 pushq %r12; 249 - pushq %rbp; 250 228 pushq %rbx; 251 229 252 230 pushq %rcx; /* bool xor */ ··· 251 235 252 236 inpack_enc3(); 253 237 254 - encrypt_cycle3(RAB, RCD, 0); 255 - encrypt_cycle3(RAB, RCD, 1); 256 - encrypt_cycle3(RAB, RCD, 2); 257 - encrypt_cycle3(RAB, RCD, 3); 258 - encrypt_cycle3(RAB, RCD, 4); 259 - encrypt_cycle3(RAB, RCD, 5); 260 - encrypt_cycle3(RAB, RCD, 6); 261 - encrypt_cycle3(RAB, RCD, 7); 238 + push_cd(); 239 + encrypt_cycle3(RAB, CD, 0); 240 + encrypt_cycle3(RAB, CD, 1); 241 + encrypt_cycle3(RAB, CD, 2); 242 + encrypt_cycle3(RAB, CD, 3); 243 + encrypt_cycle3(RAB, CD, 4); 244 + encrypt_cycle3(RAB, CD, 5); 245 + encrypt_cycle3(RAB, CD, 6); 246 + encrypt_cycle3(RAB, CD, 7); 247 + pop_cd(); 262 248 263 249 popq RIO; /* dst */ 264 - popq %rbp; /* bool xor */ 250 + popq RT1; /* bool xor */ 265 251 266 - testb %bpl, %bpl; 252 + testb RT1bl, RT1bl; 267 253 jnz .L__enc_xor3; 268 254 269 255 outunpack_enc3(mov); 270 256 271 257 popq %rbx; 272 - popq %rbp; 273 258 popq %r12; 274 259 popq %r13; 275 - popq %r14; 276 - popq %r15; 277 260 ret; 278 261 279 262 .L__enc_xor3: 280 263 outunpack_enc3(xor); 281 264 282 265 popq %rbx; 283 - popq %rbp; 284 266 popq %r12; 285 267 popq %r13; 286 - popq %r14; 287 - popq %r15; 288 268 ret; 289 269 ENDPROC(__twofish_enc_blk_3way) 290 270 ··· 290 278 * %rsi: dst 291 279 * %rdx: src, RIO 292 280 */ 293 - pushq %r15; 294 - pushq %r14; 295 281 pushq %r13; 296 282 pushq %r12; 297 - pushq %rbp; 298 283 pushq %rbx; 299 284 300 285 pushq %rsi; /* dst */ 301 286 302 287 inpack_dec3(); 303 288 304 - decrypt_cycle3(RAB, RCD, 7); 305 - decrypt_cycle3(RAB, RCD, 6); 306 - decrypt_cycle3(RAB, RCD, 5); 307 - decrypt_cycle3(RAB, RCD, 4); 308 - decrypt_cycle3(RAB, RCD, 3); 309 - decrypt_cycle3(RAB, RCD, 2); 310 - decrypt_cycle3(RAB, RCD, 1); 311 - decrypt_cycle3(RAB, RCD, 0); 289 + push_cd(); 290 + decrypt_cycle3(RAB, CD, 7); 291 + decrypt_cycle3(RAB, CD, 6); 292 + decrypt_cycle3(RAB, CD, 5); 293 + decrypt_cycle3(RAB, CD, 4); 294 + decrypt_cycle3(RAB, CD, 3); 295 + decrypt_cycle3(RAB, CD, 2); 296 + decrypt_cycle3(RAB, CD, 1); 297 + decrypt_cycle3(RAB, CD, 0); 298 + pop_cd(); 312 299 313 300 popq RIO; /* dst */ 314 301 315 302 outunpack_dec3(); 316 303 317 304 popq %rbx; 318 - popq %rbp; 319 305 popq %r12; 320 306 popq %r13; 321 - popq %r14; 322 - popq %r15; 323 307 ret; 324 308 ENDPROC(twofish_dec_blk_3way)
+3 -1
crypto/Kconfig
··· 131 131 132 132 config CRYPTO_ECDH 133 133 tristate "ECDH algorithm" 134 - select CRYTPO_KPP 134 + select CRYPTO_KPP 135 135 select CRYPTO_RNG_DEFAULT 136 136 help 137 137 Generic implementation of the ECDH algorithm ··· 1340 1340 tristate "Salsa20 stream cipher algorithm (i586)" 1341 1341 depends on (X86 || UML_X86) && !64BIT 1342 1342 select CRYPTO_BLKCIPHER 1343 + select CRYPTO_SALSA20 1343 1344 help 1344 1345 Salsa20 stream cipher algorithm. 1345 1346 ··· 1354 1353 tristate "Salsa20 stream cipher algorithm (x86_64)" 1355 1354 depends on (X86 || UML_X86) && 64BIT 1356 1355 select CRYPTO_BLKCIPHER 1356 + select CRYPTO_SALSA20 1357 1357 help 1358 1358 Salsa20 stream cipher algorithm. 1359 1359
+1
crypto/Makefile
··· 99 99 obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o 100 100 CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 101 101 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o 102 + CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 102 103 obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o 103 104 obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o 104 105 obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
+1 -4
crypto/ablk_helper.c
··· 18 18 * GNU General Public License for more details. 19 19 * 20 20 * You should have received a copy of the GNU General Public License 21 - * along with this program; if not, write to the Free Software 22 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 23 - * USA 21 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 24 22 * 25 23 */ 26 24 ··· 26 28 #include <linux/crypto.h> 27 29 #include <linux/init.h> 28 30 #include <linux/module.h> 29 - #include <linux/hardirq.h> 30 31 #include <crypto/algapi.h> 31 32 #include <crypto/cryptd.h> 32 33 #include <crypto/ablk_helper.h>
+14 -5
crypto/aead.c
··· 54 54 const u8 *key, unsigned int keylen) 55 55 { 56 56 unsigned long alignmask = crypto_aead_alignmask(tfm); 57 + int err; 57 58 58 59 if ((unsigned long)key & alignmask) 59 - return setkey_unaligned(tfm, key, keylen); 60 + err = setkey_unaligned(tfm, key, keylen); 61 + else 62 + err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen); 60 63 61 - return crypto_aead_alg(tfm)->setkey(tfm, key, keylen); 64 + if (err) 65 + return err; 66 + 67 + crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 68 + return 0; 62 69 } 63 70 EXPORT_SYMBOL_GPL(crypto_aead_setkey); 64 71 ··· 99 92 { 100 93 struct crypto_aead *aead = __crypto_aead_cast(tfm); 101 94 struct aead_alg *alg = crypto_aead_alg(aead); 95 + 96 + crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY); 102 97 103 98 aead->authsize = alg->maxauthsize; 104 99 ··· 304 295 if (err) 305 296 goto out; 306 297 307 - ctx->sknull = crypto_get_default_null_skcipher2(); 298 + ctx->sknull = crypto_get_default_null_skcipher(); 308 299 err = PTR_ERR(ctx->sknull); 309 300 if (IS_ERR(ctx->sknull)) 310 301 goto out; ··· 324 315 return err; 325 316 326 317 drop_null: 327 - crypto_put_default_null_skcipher2(); 318 + crypto_put_default_null_skcipher(); 328 319 goto out; 329 320 } 330 321 EXPORT_SYMBOL_GPL(aead_init_geniv); ··· 334 325 struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); 335 326 336 327 crypto_free_aead(ctx->child); 337 - crypto_put_default_null_skcipher2(); 328 + crypto_put_default_null_skcipher(); 338 329 } 339 330 EXPORT_SYMBOL_GPL(aead_exit_geniv); 340 331
+6 -4
crypto/af_alg.c
··· 150 150 151 151 static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 152 152 { 153 - const u32 forbidden = CRYPTO_ALG_INTERNAL; 153 + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; 154 154 struct sock *sk = sock->sk; 155 155 struct alg_sock *ask = alg_sk(sk); 156 156 struct sockaddr_alg *sa = (void *)uaddr; 157 157 const struct af_alg_type *type; 158 158 void *private; 159 159 int err; 160 + 161 + /* If caller uses non-allowed flag, return error. */ 162 + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) 163 + return -EINVAL; 160 164 161 165 if (sock->state == SS_CONNECTED) 162 166 return -EINVAL; ··· 180 176 if (IS_ERR(type)) 181 177 return PTR_ERR(type); 182 178 183 - private = type->bind(sa->salg_name, 184 - sa->salg_feat & ~forbidden, 185 - sa->salg_mask & ~forbidden); 179 + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); 186 180 if (IS_ERR(private)) { 187 181 module_put(type->owner); 188 182 return PTR_ERR(private);
+28 -5
crypto/ahash.c
··· 193 193 unsigned int keylen) 194 194 { 195 195 unsigned long alignmask = crypto_ahash_alignmask(tfm); 196 + int err; 196 197 197 198 if ((unsigned long)key & alignmask) 198 - return ahash_setkey_unaligned(tfm, key, keylen); 199 + err = ahash_setkey_unaligned(tfm, key, keylen); 200 + else 201 + err = tfm->setkey(tfm, key, keylen); 199 202 200 - return tfm->setkey(tfm, key, keylen); 203 + if (err) 204 + return err; 205 + 206 + crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 207 + return 0; 201 208 } 202 209 EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 203 210 ··· 375 368 376 369 int crypto_ahash_digest(struct ahash_request *req) 377 370 { 378 - return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); 371 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 372 + 373 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 374 + return -ENOKEY; 375 + 376 + return crypto_ahash_op(req, tfm->digest); 379 377 } 380 378 EXPORT_SYMBOL_GPL(crypto_ahash_digest); 381 379 ··· 462 450 struct ahash_alg *alg = crypto_ahash_alg(hash); 463 451 464 452 hash->setkey = ahash_nosetkey; 465 - hash->has_setkey = false; 466 453 hash->export = ahash_no_export; 467 454 hash->import = ahash_no_import; 468 455 ··· 476 465 477 466 if (alg->setkey) { 478 467 hash->setkey = alg->setkey; 479 - hash->has_setkey = true; 468 + if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) 469 + crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); 480 470 } 481 471 if (alg->export) 482 472 hash->export = alg->export; ··· 660 648 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); 661 649 } 662 650 EXPORT_SYMBOL_GPL(ahash_attr_alg); 651 + 652 + bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) 653 + { 654 + struct crypto_alg *alg = &halg->base; 655 + 656 + if (alg->cra_type != &crypto_ahash_type) 657 + return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); 658 + 659 + return __crypto_ahash_alg(alg)->setkey != NULL; 660 + } 661 + EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey); 663 662 664 663 MODULE_LICENSE("GPL"); 665 664 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
+4 -9
crypto/algapi.c
··· 62 62 if (alg->cra_priority < 0) 63 63 return -EINVAL; 64 64 65 - atomic_set(&alg->cra_refcnt, 1); 65 + refcount_set(&alg->cra_refcnt, 1); 66 66 67 67 return crypto_set_driver_name(alg); 68 68 } ··· 123 123 if (!tmpl || !crypto_tmpl_get(tmpl)) 124 124 return; 125 125 126 - crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); 127 126 list_move(&inst->alg.cra_list, list); 128 127 hlist_del(&inst->list); 129 128 inst->alg.cra_destroy = crypto_destroy_instance; ··· 235 236 if (!larval->adult) 236 237 goto free_larval; 237 238 238 - atomic_set(&larval->alg.cra_refcnt, 1); 239 + refcount_set(&larval->alg.cra_refcnt, 1); 239 240 memcpy(larval->alg.cra_driver_name, alg->cra_driver_name, 240 241 CRYPTO_MAX_ALG_NAME); 241 242 larval->alg.cra_priority = alg->cra_priority; ··· 391 392 392 393 alg->cra_flags |= CRYPTO_ALG_DEAD; 393 394 394 - crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); 395 395 list_del_init(&alg->cra_list); 396 396 crypto_remove_spawns(alg, list, NULL); 397 397 ··· 409 411 if (ret) 410 412 return ret; 411 413 412 - BUG_ON(atomic_read(&alg->cra_refcnt) != 1); 414 + BUG_ON(refcount_read(&alg->cra_refcnt) != 1); 413 415 if (alg->cra_destroy) 414 416 alg->cra_destroy(alg); 415 417 ··· 468 470 } 469 471 470 472 list_add(&tmpl->list, &crypto_template_list); 471 - crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); 472 473 err = 0; 473 474 out: 474 475 up_write(&crypto_alg_sem); ··· 494 497 BUG_ON(err); 495 498 } 496 499 497 - crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); 498 - 499 500 up_write(&crypto_alg_sem); 500 501 501 502 hlist_for_each_entry_safe(inst, n, list, list) { 502 - BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); 503 + BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1); 503 504 crypto_free_instance(inst); 504 505 } 505 506 crypto_remove_final(&users);
+5 -10
crypto/algif_aead.c
··· 42 42 43 43 struct aead_tfm { 44 44 struct crypto_aead *aead; 45 - bool has_key; 46 45 struct crypto_skcipher *null_tfm; 47 46 }; 48 47 ··· 397 398 398 399 err = -ENOKEY; 399 400 lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 400 - if (!tfm->has_key) 401 + if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 401 402 goto unlock; 402 403 403 404 if (!pask->refcnt++) ··· 490 491 return ERR_CAST(aead); 491 492 } 492 493 493 - null_tfm = crypto_get_default_null_skcipher2(); 494 + null_tfm = crypto_get_default_null_skcipher(); 494 495 if (IS_ERR(null_tfm)) { 495 496 crypto_free_aead(aead); 496 497 kfree(tfm); ··· 508 509 struct aead_tfm *tfm = private; 509 510 510 511 crypto_free_aead(tfm->aead); 511 - crypto_put_default_null_skcipher2(); 512 + crypto_put_default_null_skcipher(); 512 513 kfree(tfm); 513 514 } 514 515 ··· 522 523 static int aead_setkey(void *private, const u8 *key, unsigned int keylen) 523 524 { 524 525 struct aead_tfm *tfm = private; 525 - int err; 526 526 527 - err = crypto_aead_setkey(tfm->aead, key, keylen); 528 - tfm->has_key = !err; 529 - 530 - return err; 527 + return crypto_aead_setkey(tfm->aead, key, keylen); 531 528 } 532 529 533 530 static void aead_sock_destruct(struct sock *sk) ··· 584 589 { 585 590 struct aead_tfm *tfm = private; 586 591 587 - if (!tfm->has_key) 592 + if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY) 588 593 return -ENOKEY; 589 594 590 595 return aead_accept_parent_nokey(private, sk);
+11 -41
crypto/algif_hash.c
··· 34 34 struct ahash_request req; 35 35 }; 36 36 37 - struct algif_hash_tfm { 38 - struct crypto_ahash *hash; 39 - bool has_key; 40 - }; 41 - 42 37 static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx) 43 38 { 44 39 unsigned ds; ··· 302 307 int err = 0; 303 308 struct sock *psk; 304 309 struct alg_sock *pask; 305 - struct algif_hash_tfm *tfm; 310 + struct crypto_ahash *tfm; 306 311 struct sock *sk = sock->sk; 307 312 struct alg_sock *ask = alg_sk(sk); 308 313 ··· 316 321 317 322 err = -ENOKEY; 318 323 lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 319 - if (!tfm->has_key) 324 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 320 325 goto unlock; 321 326 322 327 if (!pask->refcnt++) ··· 407 412 408 413 static void *hash_bind(const char *name, u32 type, u32 mask) 409 414 { 410 - struct algif_hash_tfm *tfm; 411 - struct crypto_ahash *hash; 412 - 413 - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 414 - if (!tfm) 415 - return ERR_PTR(-ENOMEM); 416 - 417 - hash = crypto_alloc_ahash(name, type, mask); 418 - if (IS_ERR(hash)) { 419 - kfree(tfm); 420 - return ERR_CAST(hash); 421 - } 422 - 423 - tfm->hash = hash; 424 - 425 - return tfm; 415 + return crypto_alloc_ahash(name, type, mask); 426 416 } 427 417 428 418 static void hash_release(void *private) 429 419 { 430 - struct algif_hash_tfm *tfm = private; 431 - 432 - crypto_free_ahash(tfm->hash); 433 - kfree(tfm); 420 + crypto_free_ahash(private); 434 421 } 435 422 436 423 static int hash_setkey(void *private, const u8 *key, unsigned int keylen) 437 424 { 438 - struct algif_hash_tfm *tfm = private; 439 - int err; 440 - 441 - err = crypto_ahash_setkey(tfm->hash, key, keylen); 442 - tfm->has_key = !err; 443 - 444 - return err; 425 + return crypto_ahash_setkey(private, key, keylen); 445 426 } 446 427 447 428 static void hash_sock_destruct(struct sock *sk) ··· 432 461 433 462 static int hash_accept_parent_nokey(void *private, struct sock *sk) 434 463 { 435 - struct hash_ctx *ctx; 464 + struct crypto_ahash *tfm = private; 436 465 struct alg_sock *ask = alg_sk(sk); 437 - struct algif_hash_tfm *tfm = private; 438 - struct crypto_ahash *hash = tfm->hash; 439 - unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); 466 + struct hash_ctx *ctx; 467 + unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm); 440 468 441 469 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 442 470 if (!ctx) ··· 448 478 449 479 ask->private = ctx; 450 480 451 - ahash_request_set_tfm(&ctx->req, hash); 481 + ahash_request_set_tfm(&ctx->req, tfm); 452 482 ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 453 483 crypto_req_done, &ctx->wait); 454 484 ··· 459 489 460 490 static int hash_accept_parent(void *private, struct sock *sk) 461 491 { 462 - struct algif_hash_tfm *tfm = private; 492 + struct crypto_ahash *tfm = private; 463 493 464 - if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) 494 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 465 495 return -ENOKEY; 466 496 467 497 return hash_accept_parent_nokey(private, sk);
+13 -46
crypto/algif_skcipher.c
··· 38 38 #include <linux/net.h> 39 39 #include <net/sock.h> 40 40 41 - struct skcipher_tfm { 42 - struct crypto_skcipher *skcipher; 43 - bool has_key; 44 - }; 45 - 46 41 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 47 42 size_t size) 48 43 { ··· 45 50 struct alg_sock *ask = alg_sk(sk); 46 51 struct sock *psk = ask->parent; 47 52 struct alg_sock *pask = alg_sk(psk); 48 - struct skcipher_tfm *skc = pask->private; 49 - struct crypto_skcipher *tfm = skc->skcipher; 53 + struct crypto_skcipher *tfm = pask->private; 50 54 unsigned ivsize = crypto_skcipher_ivsize(tfm); 51 55 52 56 return af_alg_sendmsg(sock, msg, size, ivsize); ··· 59 65 struct sock *psk = ask->parent; 60 66 struct alg_sock *pask = alg_sk(psk); 61 67 struct af_alg_ctx *ctx = ask->private; 62 - struct skcipher_tfm *skc = pask->private; 63 - struct crypto_skcipher *tfm = skc->skcipher; 68 + struct crypto_skcipher *tfm = pask->private; 64 69 unsigned int bs = crypto_skcipher_blocksize(tfm); 65 70 struct af_alg_async_req *areq; 66 71 int err = 0; ··· 213 220 int err = 0; 214 221 struct sock *psk; 215 222 struct alg_sock *pask; 216 - struct skcipher_tfm *tfm; 223 + struct crypto_skcipher *tfm; 217 224 struct sock *sk = sock->sk; 218 225 struct alg_sock *ask = alg_sk(sk); 219 226 ··· 227 234 228 235 err = -ENOKEY; 229 236 lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 230 - if (!tfm->has_key) 237 + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 231 238 goto unlock; 232 239 233 240 if (!pask->refcnt++) ··· 306 313 307 314 static void *skcipher_bind(const char *name, u32 type, u32 mask) 308 315 { 309 - struct skcipher_tfm *tfm; 310 - struct crypto_skcipher *skcipher; 311 - 312 - tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 313 - if (!tfm) 314 - return ERR_PTR(-ENOMEM); 315 - 316 - skcipher = crypto_alloc_skcipher(name, type, mask); 317 - if (IS_ERR(skcipher)) { 318 - kfree(tfm); 319 - return ERR_CAST(skcipher); 320 - } 321 - 322 - tfm->skcipher = skcipher; 323 - 324 - return tfm; 316 + return crypto_alloc_skcipher(name, type, mask); 325 317 } 326 318 327 319 static void skcipher_release(void *private) 328 320 { 329 - struct skcipher_tfm *tfm = private; 330 - 331 - crypto_free_skcipher(tfm->skcipher); 332 - kfree(tfm); 321 + crypto_free_skcipher(private); 333 322 } 334 323 335 324 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 336 325 { 337 - struct skcipher_tfm *tfm = private; 338 - int err; 339 - 340 - err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); 341 - tfm->has_key = !err; 342 - 343 - return err; 326 + return crypto_skcipher_setkey(private, key, keylen); 344 327 } 345 328 346 329 static void skcipher_sock_destruct(struct sock *sk) ··· 325 356 struct af_alg_ctx *ctx = ask->private; 326 357 struct sock *psk = ask->parent; 327 358 struct alg_sock *pask = alg_sk(psk); 328 - struct skcipher_tfm *skc = pask->private; 329 - struct crypto_skcipher *tfm = skc->skcipher; 359 + struct crypto_skcipher *tfm = pask->private; 330 360 331 361 af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 332 362 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); ··· 337 369 { 338 370 struct af_alg_ctx *ctx; 339 371 struct alg_sock *ask = alg_sk(sk); 340 - struct skcipher_tfm *tfm = private; 341 - struct crypto_skcipher *skcipher = tfm->skcipher; 372 + struct crypto_skcipher *tfm = private; 342 373 unsigned int len = sizeof(*ctx); 343 374 344 375 ctx = sock_kmalloc(sk, len, GFP_KERNEL); 345 376 if (!ctx) 346 377 return -ENOMEM; 347 378 348 - ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), 379 + ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), 349 380 GFP_KERNEL); 350 381 if (!ctx->iv) { 351 382 sock_kfree_s(sk, ctx, len); 352 383 return -ENOMEM; 353 384 } 354 385 355 - memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); 386 + memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); 356 387 357 388 INIT_LIST_HEAD(&ctx->tsgl_list); 358 389 ctx->len = len; ··· 371 404 372 405 static int skcipher_accept_parent(void *private, struct sock *sk) 373 406 { 374 - struct skcipher_tfm *tfm = private; 407 + struct crypto_skcipher *tfm = private; 375 408 376 - if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) 409 + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 377 410 return -ENOKEY; 378 411 379 412 return skcipher_accept_parent_nokey(private, sk);
+3 -3
crypto/api.c
··· 137 137 if (IS_ERR(larval)) 138 138 return ERR_CAST(larval); 139 139 140 - atomic_set(&larval->alg.cra_refcnt, 2); 140 + refcount_set(&larval->alg.cra_refcnt, 2); 141 141 142 142 down_write(&crypto_alg_sem); 143 143 alg = __crypto_alg_lookup(name, type, mask); ··· 205 205 } 206 206 EXPORT_SYMBOL_GPL(crypto_alg_lookup); 207 207 208 - struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask) 208 + static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, 209 + u32 mask) 209 210 { 210 211 struct crypto_alg *alg; 211 212 ··· 232 231 233 232 return crypto_larval_add(name, type, mask); 234 233 } 235 - EXPORT_SYMBOL_GPL(crypto_larval_lookup); 236 234 237 235 int crypto_probing_notify(unsigned long val, void *v) 238 236 {
+2 -2
crypto/authenc.c
··· 329 329 if (IS_ERR(enc)) 330 330 goto err_free_ahash; 331 331 332 - null = crypto_get_default_null_skcipher2(); 332 + null = crypto_get_default_null_skcipher(); 333 333 err = PTR_ERR(null); 334 334 if (IS_ERR(null)) 335 335 goto err_free_skcipher; ··· 363 363 364 364 crypto_free_ahash(ctx->auth); 365 365 crypto_free_skcipher(ctx->enc); 366 - crypto_put_default_null_skcipher2(); 366 + crypto_put_default_null_skcipher(); 367 367 } 368 368 369 369 static void crypto_authenc_free(struct aead_instance *inst)
+2 -2
crypto/authencesn.c
··· 352 352 if (IS_ERR(enc)) 353 353 goto err_free_ahash; 354 354 355 - null = crypto_get_default_null_skcipher2(); 355 + null = crypto_get_default_null_skcipher(); 356 356 err = PTR_ERR(null); 357 357 if (IS_ERR(null)) 358 358 goto err_free_skcipher; ··· 389 389 390 390 crypto_free_ahash(ctx->auth); 391 391 crypto_free_skcipher(ctx->enc); 392 - crypto_put_default_null_skcipher2(); 392 + crypto_put_default_null_skcipher(); 393 393 } 394 394 395 395 static void crypto_authenc_esn_free(struct aead_instance *inst)
-1
crypto/blkcipher.c
··· 18 18 #include <crypto/internal/skcipher.h> 19 19 #include <crypto/scatterwalk.h> 20 20 #include <linux/errno.h> 21 - #include <linux/hardirq.h> 22 21 #include <linux/kernel.h> 23 22 #include <linux/module.h> 24 23 #include <linux/seq_file.h>
+1 -2
crypto/camellia_generic.c
··· 13 13 * GNU General Public License for more details. 14 14 * 15 15 * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 17 */ 19 18 20 19 /*
+1 -2
crypto/cast5_generic.c
··· 16 16 * any later version. 17 17 * 18 18 * You should have received a copy of the GNU General Public License 19 - * along with this program; if not, write to the Free Software 20 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 19 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 20 */ 22 21 23 22
+1 -2
crypto/cast6_generic.c
··· 13 13 * any later version. 14 14 * 15 15 * You should have received a copy of the GNU General Public License 16 - * along with this program; if not, write to the Free Software 17 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 17 */ 19 18 20 19
+13 -20
crypto/chacha20_generic.c
··· 9 9 * (at your option) any later version. 10 10 */ 11 11 12 + #include <asm/unaligned.h> 12 13 #include <crypto/algapi.h> 13 14 #include <crypto/chacha20.h> 14 15 #include <crypto/internal/skcipher.h> 15 16 #include <linux/module.h> 16 17 17 - static inline u32 le32_to_cpuvp(const void *p) 18 - { 19 - return le32_to_cpup(p); 20 - } 21 - 22 18 static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src, 23 19 unsigned int bytes) 24 20 { 25 - u8 stream[CHACHA20_BLOCK_SIZE]; 21 + u32 stream[CHACHA20_BLOCK_WORDS]; 26 22 27 23 if (dst != src) 28 24 memcpy(dst, src, bytes); 29 25 30 26 while (bytes >= CHACHA20_BLOCK_SIZE) { 31 27 chacha20_block(state, stream); 32 - crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE); 28 + crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE); 33 29 bytes -= CHACHA20_BLOCK_SIZE; 34 30 dst += CHACHA20_BLOCK_SIZE; 35 31 } 36 32 if (bytes) { 37 33 chacha20_block(state, stream); 38 - crypto_xor(dst, stream, bytes); 34 + crypto_xor(dst, (const u8 *)stream, bytes); 39 35 } 40 36 } 41 37 42 38 void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv) 43 39 { 44 - static const char constant[16] = "expand 32-byte k"; 45 - 46 - state[0] = le32_to_cpuvp(constant + 0); 47 - state[1] = le32_to_cpuvp(constant + 4); 48 - state[2] = le32_to_cpuvp(constant + 8); 49 - state[3] = le32_to_cpuvp(constant + 12); 40 + state[0] = 0x61707865; /* "expa" */ 41 + state[1] = 0x3320646e; /* "nd 3" */ 42 + state[2] = 0x79622d32; /* "2-by" */ 43 + state[3] = 0x6b206574; /* "te k" */ 50 44 state[4] = ctx->key[0]; 51 45 state[5] = ctx->key[1]; 52 46 state[6] = ctx->key[2]; ··· 49 55 state[9] = ctx->key[5]; 50 56 state[10] = ctx->key[6]; 51 57 state[11] = ctx->key[7]; 52 - state[12] = le32_to_cpuvp(iv + 0); 53 - state[13] = le32_to_cpuvp(iv + 4); 54 - state[14] = le32_to_cpuvp(iv + 8); 55 - state[15] = le32_to_cpuvp(iv + 12); 58 + state[12] = get_unaligned_le32(iv + 0); 59 + state[13] = get_unaligned_le32(iv + 4); 60 + state[14] = get_unaligned_le32(iv + 8); 61 + state[15] = get_unaligned_le32(iv + 12); 56 62 } 57 63 EXPORT_SYMBOL_GPL(crypto_chacha20_init); 58 64 ··· 66 72 return -EINVAL; 67 73 68 74 for (i = 0; i < ARRAY_SIZE(ctx->key); i++) 69 - ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32)); 75 + ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); 70 76 71 77 return 0; 72 78 } ··· 105 111 .base.cra_priority = 100, 106 112 .base.cra_blocksize = 1, 107 113 .base.cra_ctxsize = sizeof(struct chacha20_ctx), 108 - .base.cra_alignmask = sizeof(u32) - 1, 109 114 .base.cra_module = THIS_MODULE, 110 115 111 116 .min_keysize = CHACHA20_KEY_SIZE,
+1
crypto/crc32_generic.c
··· 133 133 .cra_name = "crc32", 134 134 .cra_driver_name = "crc32-generic", 135 135 .cra_priority = 100, 136 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 136 137 .cra_blocksize = CHKSUM_BLOCK_SIZE, 137 138 .cra_ctxsize = sizeof(u32), 138 139 .cra_module = THIS_MODULE,
+1
crypto/crc32c_generic.c
··· 146 146 .cra_name = "crc32c", 147 147 .cra_driver_name = "crc32c-generic", 148 148 .cra_priority = 100, 149 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 149 150 .cra_blocksize = CHKSUM_BLOCK_SIZE, 150 151 .cra_alignmask = 3, 151 152 .cra_ctxsize = sizeof(struct chksum_ctx),
+10 -7
crypto/cryptd.c
··· 32 32 #include <linux/sched.h> 33 33 #include <linux/slab.h> 34 34 35 - #define CRYPTD_MAX_CPU_QLEN 1000 35 + static unsigned int cryptd_max_cpu_qlen = 1000; 36 + module_param(cryptd_max_cpu_qlen, uint, 0); 37 + MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); 36 38 37 39 struct cryptd_cpu_queue { 38 40 struct crypto_queue queue; ··· 118 116 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); 119 117 INIT_WORK(&cpu_queue->work, cryptd_queue_worker); 120 118 } 119 + pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); 121 120 return 0; 122 121 } 123 122 ··· 896 893 if (err) 897 894 goto out_free_inst; 898 895 899 - type = CRYPTO_ALG_ASYNC; 900 - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) 901 - type |= CRYPTO_ALG_INTERNAL; 902 - inst->alg.halg.base.cra_flags = type; 896 + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 897 + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | 898 + CRYPTO_ALG_OPTIONAL_KEY)); 903 899 904 900 inst->alg.halg.digestsize = salg->digestsize; 905 901 inst->alg.halg.statesize = salg->statesize; ··· 913 911 inst->alg.finup = cryptd_hash_finup_enqueue; 914 912 inst->alg.export = cryptd_hash_export; 915 913 inst->alg.import = cryptd_hash_import; 916 - inst->alg.setkey = cryptd_hash_setkey; 914 + if (crypto_shash_alg_has_setkey(salg)) 915 + inst->alg.setkey = cryptd_hash_setkey; 917 916 inst->alg.digest = cryptd_hash_digest_enqueue; 918 917 919 918 err = ahash_register_instance(tmpl, inst); ··· 1375 1372 { 1376 1373 int err; 1377 1374 1378 - err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); 1375 + err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); 1379 1376 if (err) 1380 1377 return err; 1381 1378
+2 -2
crypto/crypto_user.c
··· 169 169 ualg->cru_type = 0; 170 170 ualg->cru_mask = 0; 171 171 ualg->cru_flags = alg->cra_flags; 172 - ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); 172 + ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); 173 173 174 174 if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) 175 175 goto nla_put_failure; ··· 387 387 goto drop_alg; 388 388 389 389 err = -EBUSY; 390 - if (atomic_read(&alg->cra_refcnt) > 2) 390 + if (refcount_read(&alg->cra_refcnt) > 2) 391 391 goto drop_alg; 392 392 393 393 err = crypto_unregister_instance((struct crypto_instance *)alg);
+1 -1
crypto/ecc.c
··· 964 964 * DRBG with a security strength of 256. 965 965 */ 966 966 if (crypto_get_default_rng()) 967 - err = -EFAULT; 967 + return -EFAULT; 968 968 969 969 err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes); 970 970 crypto_put_default_rng();
-5
crypto/echainiv.c
··· 118 118 struct rtattr **tb) 119 119 { 120 120 struct aead_instance *inst; 121 - struct crypto_aead_spawn *spawn; 122 - struct aead_alg *alg; 123 121 int err; 124 122 125 123 inst = aead_geniv_alloc(tmpl, tb, 0, 0); 126 124 127 125 if (IS_ERR(inst)) 128 126 return PTR_ERR(inst); 129 - 130 - spawn = aead_instance_ctx(inst); 131 - alg = crypto_spawn_aead_alg(spawn); 132 127 133 128 err = -EINVAL; 134 129 if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
+2 -2
crypto/gcm.c
··· 1101 1101 if (IS_ERR(aead)) 1102 1102 return PTR_ERR(aead); 1103 1103 1104 - null = crypto_get_default_null_skcipher2(); 1104 + null = crypto_get_default_null_skcipher(); 1105 1105 err = PTR_ERR(null); 1106 1106 if (IS_ERR(null)) 1107 1107 goto err_free_aead; ··· 1129 1129 struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); 1130 1130 1131 1131 crypto_free_aead(ctx->child); 1132 - crypto_put_default_null_skcipher2(); 1132 + crypto_put_default_null_skcipher(); 1133 1133 } 1134 1134 1135 1135 static void crypto_rfc4543_free(struct aead_instance *inst)
-2
crypto/gf128mul.c
··· 160 160 { 161 161 u64 a = le64_to_cpu(x->a); 162 162 u64 b = le64_to_cpu(x->b); 163 - 164 - /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */ 165 163 u64 _tt = gf128mul_table_be[a >> 56]; 166 164 167 165 r->a = cpu_to_le64((a << 8) | (b >> 56));
-6
crypto/ghash-generic.c
··· 56 56 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 57 57 u8 *dst = dctx->buffer; 58 58 59 - if (!ctx->gf128) 60 - return -ENOKEY; 61 - 62 59 if (dctx->bytes) { 63 60 int n = min(srclen, dctx->bytes); 64 61 u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); ··· 107 110 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 108 111 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 109 112 u8 *buf = dctx->buffer; 110 - 111 - if (!ctx->gf128) 112 - return -ENOKEY; 113 113 114 114 ghash_flush(ctx, dctx); 115 115 memcpy(dst, buf, GHASH_BLOCK_SIZE);
+2 -6
crypto/internal.h
··· 30 30 enum { 31 31 CRYPTO_MSG_ALG_REQUEST, 32 32 CRYPTO_MSG_ALG_REGISTER, 33 - CRYPTO_MSG_ALG_UNREGISTER, 34 - CRYPTO_MSG_TMPL_REGISTER, 35 - CRYPTO_MSG_TMPL_UNREGISTER, 36 33 }; 37 34 38 35 struct crypto_instance; ··· 75 78 76 79 struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask); 77 80 void crypto_larval_kill(struct crypto_alg *alg); 78 - struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask); 79 81 void crypto_alg_tested(const char *name, int err); 80 82 81 83 void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, ··· 102 106 103 107 static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) 104 108 { 105 - atomic_inc(&alg->cra_refcnt); 109 + refcount_inc(&alg->cra_refcnt); 106 110 return alg; 107 111 } 108 112 109 113 static inline void crypto_alg_put(struct crypto_alg *alg) 110 114 { 111 - if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) 115 + if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) 112 116 alg->cra_destroy(alg); 113 117 } 114 118
+2 -2
crypto/keywrap.c
··· 188 188 } 189 189 190 190 /* Perform authentication check */ 191 - if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6)) 191 + if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL)) 192 192 ret = -EBADMSG; 193 193 194 194 memzero_explicit(&block, sizeof(struct crypto_kw_block)); ··· 221 221 * Place the predefined IV into block A -- for encrypt, the caller 222 222 * does not need to provide an IV, but he needs to fetch the final IV. 223 223 */ 224 - block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6); 224 + block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL); 225 225 226 226 /* 227 227 * src scatterlist is read-only. dst scatterlist is r/w. During the
+5 -6
crypto/mcryptd.c
··· 26 26 #include <linux/sched.h> 27 27 #include <linux/sched/stat.h> 28 28 #include <linux/slab.h> 29 - #include <linux/hardirq.h> 30 29 31 30 #define MCRYPTD_MAX_CPU_QLEN 100 32 31 #define MCRYPTD_BATCH 9 ··· 516 517 if (err) 517 518 goto out_free_inst; 518 519 519 - type = CRYPTO_ALG_ASYNC; 520 - if (alg->cra_flags & CRYPTO_ALG_INTERNAL) 521 - type |= CRYPTO_ALG_INTERNAL; 522 - inst->alg.halg.base.cra_flags = type; 520 + inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | 521 + (alg->cra_flags & (CRYPTO_ALG_INTERNAL | 522 + CRYPTO_ALG_OPTIONAL_KEY)); 523 523 524 524 inst->alg.halg.digestsize = halg->digestsize; 525 525 inst->alg.halg.statesize = halg->statesize; ··· 533 535 inst->alg.finup = mcryptd_hash_finup_enqueue; 534 536 inst->alg.export = mcryptd_hash_export; 535 537 inst->alg.import = mcryptd_hash_import; 536 - inst->alg.setkey = mcryptd_hash_setkey; 538 + if (crypto_hash_alg_has_setkey(halg)) 539 + inst->alg.setkey = mcryptd_hash_setkey; 537 540 inst->alg.digest = mcryptd_hash_digest_enqueue; 538 541 539 542 err = ahash_register_instance(tmpl, inst);
+9 -18
crypto/poly1305_generic.c
··· 47 47 } 48 48 EXPORT_SYMBOL_GPL(crypto_poly1305_init); 49 49 50 - int crypto_poly1305_setkey(struct crypto_shash *tfm, 51 - const u8 *key, unsigned int keylen) 52 - { 53 - /* Poly1305 requires a unique key for each tag, which implies that 54 - * we can't set it on the tfm that gets accessed by multiple users 55 - * simultaneously. Instead we expect the key as the first 32 bytes in 56 - * the update() call. */ 57 - return -ENOTSUPP; 58 - } 59 - EXPORT_SYMBOL_GPL(crypto_poly1305_setkey); 60 - 61 50 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key) 62 51 { 63 52 /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ ··· 65 76 dctx->s[3] = get_unaligned_le32(key + 12); 66 77 } 67 78 79 + /* 80 + * Poly1305 requires a unique key for each tag, which implies that we can't set 81 + * it on the tfm that gets accessed by multiple users simultaneously. Instead we 82 + * expect the key as the first 32 bytes in the update() call. 83 + */ 68 84 unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, 69 85 const u8 *src, unsigned int srclen) 70 86 { ··· 204 210 int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) 205 211 { 206 212 struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); 207 - __le32 *mac = (__le32 *)dst; 208 213 u32 h0, h1, h2, h3, h4; 209 214 u32 g0, g1, g2, g3, g4; 210 215 u32 mask; ··· 260 267 h3 = (h3 >> 18) | (h4 << 8); 261 268 262 269 /* mac = (h + s) % (2^128) */ 263 - f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f); 264 - f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f); 265 - f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f); 266 - f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f); 270 + f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0); 271 + f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4); 272 + f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8); 273 + f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12); 267 274 268 275 return 0; 269 276 } ··· 274 281 .init = crypto_poly1305_init, 275 282 .update = crypto_poly1305_update, 276 283 .final = crypto_poly1305_final, 277 - .setkey = crypto_poly1305_setkey, 278 284 .descsize = sizeof(struct poly1305_desc_ctx), 279 285 .base = { 280 286 .cra_name = "poly1305", 281 287 .cra_driver_name = "poly1305-generic", 282 288 .cra_priority = 100, 283 289 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 284 - .cra_alignmask = sizeof(u32) - 1, 285 290 .cra_blocksize = POLY1305_BLOCK_SIZE, 286 291 .cra_module = THIS_MODULE, 287 292 },
+1 -1
crypto/proc.c
··· 46 46 seq_printf(m, "driver : %s\n", alg->cra_driver_name); 47 47 seq_printf(m, "module : %s\n", module_name(alg->cra_module)); 48 48 seq_printf(m, "priority : %d\n", alg->cra_priority); 49 - seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); 49 + seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt)); 50 50 seq_printf(m, "selftest : %s\n", 51 51 (alg->cra_flags & CRYPTO_ALG_TESTED) ? 52 52 "passed" : "unknown");
+105 -143
crypto/salsa20_generic.c
··· 19 19 * 20 20 */ 21 21 22 - #include <linux/init.h> 22 + #include <asm/unaligned.h> 23 + #include <crypto/internal/skcipher.h> 24 + #include <crypto/salsa20.h> 23 25 #include <linux/module.h> 24 - #include <linux/errno.h> 25 - #include <linux/crypto.h> 26 - #include <linux/types.h> 27 - #include <linux/bitops.h> 28 - #include <crypto/algapi.h> 29 - #include <asm/byteorder.h> 30 26 31 - #define SALSA20_IV_SIZE 8U 32 - #define SALSA20_MIN_KEY_SIZE 16U 33 - #define SALSA20_MAX_KEY_SIZE 32U 34 - 35 - /* 36 - * Start of code taken from D. J. Bernstein's reference implementation. 37 - * With some modifications and optimizations made to suit our needs. 38 - */ 39 - 40 - /* 41 - salsa20-ref.c version 20051118 42 - D. J. Bernstein 43 - Public domain. 44 - */ 45 - 46 - #define U32TO8_LITTLE(p, v) \ 47 - { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \ 48 - (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; } 49 - #define U8TO32_LITTLE(p) \ 50 - (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \ 51 - ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) ) 52 - 53 - struct salsa20_ctx 54 - { 55 - u32 input[16]; 56 - }; 57 - 58 - static void salsa20_wordtobyte(u8 output[64], const u32 input[16]) 27 + static void salsa20_block(u32 *state, __le32 *stream) 59 28 { 60 29 u32 x[16]; 61 30 int i; 62 31 63 - memcpy(x, input, sizeof(x)); 64 - for (i = 20; i > 0; i -= 2) { 32 + memcpy(x, state, sizeof(x)); 33 + 34 + for (i = 0; i < 20; i += 2) { 65 35 x[ 4] ^= rol32((x[ 0] + x[12]), 7); 66 36 x[ 8] ^= rol32((x[ 4] + x[ 0]), 9); 67 37 x[12] ^= rol32((x[ 8] + x[ 4]), 13); ··· 65 95 x[14] ^= rol32((x[13] + x[12]), 13); 66 96 x[15] ^= rol32((x[14] + x[13]), 18); 67 97 } 68 - for (i = 0; i < 16; ++i) 69 - x[i] += input[i]; 70 - for (i = 0; i < 16; ++i) 71 - U32TO8_LITTLE(output + 4 * i,x[i]); 98 + 99 + for (i = 0; i < 16; i++) 100 + stream[i] = cpu_to_le32(x[i] + state[i]); 101 + 102 + if (++state[8] == 0) 103 + state[9]++; 72 104 } 73 105 74 - static const char sigma[16] = "expand 32-byte k"; 75 - static const char tau[16] = "expand 16-byte k"; 76 - 77 - static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) 106 + static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src, 107 + unsigned int bytes) 78 108 { 79 - const char *constants; 80 - 81 - ctx->input[1] = U8TO32_LITTLE(k + 0); 82 - ctx->input[2] = U8TO32_LITTLE(k + 4); 83 - ctx->input[3] = U8TO32_LITTLE(k + 8); 84 - ctx->input[4] = U8TO32_LITTLE(k + 12); 85 - if (kbytes == 32) { /* recommended */ 86 - k += 16; 87 - constants = sigma; 88 - } else { /* kbytes == 16 */ 89 - constants = tau; 90 - } 91 - ctx->input[11] = U8TO32_LITTLE(k + 0); 92 - ctx->input[12] = U8TO32_LITTLE(k + 4); 93 - ctx->input[13] = U8TO32_LITTLE(k + 8); 94 - ctx->input[14] = U8TO32_LITTLE(k + 12); 95 - ctx->input[0] = U8TO32_LITTLE(constants + 0); 96 - ctx->input[5] = U8TO32_LITTLE(constants + 4); 97 - ctx->input[10] = U8TO32_LITTLE(constants + 8); 98 - ctx->input[15] = U8TO32_LITTLE(constants + 12); 99 - } 100 - 101 - static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) 102 - { 103 - ctx->input[6] = U8TO32_LITTLE(iv + 0); 104 - ctx->input[7] = U8TO32_LITTLE(iv + 4); 105 - ctx->input[8] = 0; 106 - ctx->input[9] = 0; 107 - } 108 - 109 - static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, 110 - const u8 *src, unsigned int bytes) 111 - { 112 - u8 buf[64]; 109 + __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)]; 113 110 114 111 if (dst != src) 115 112 memcpy(dst, src, bytes); 116 113 117 - while (bytes) { 118 - salsa20_wordtobyte(buf, ctx->input); 119 - 120 - ctx->input[8]++; 121 - if (!ctx->input[8]) 122 - ctx->input[9]++; 123 - 124 - if (bytes <= 64) { 125 - crypto_xor(dst, buf, bytes); 126 - return; 127 - } 128 - 129 - crypto_xor(dst, buf, 64); 130 - bytes -= 64; 131 - dst += 64; 114 + while (bytes >= SALSA20_BLOCK_SIZE) { 115 + salsa20_block(state, stream); 116 + crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE); 117 + bytes -= SALSA20_BLOCK_SIZE; 118 + dst += SALSA20_BLOCK_SIZE; 119 + } 120 + if (bytes) { 121 + salsa20_block(state, stream); 122 + crypto_xor(dst, (const u8 *)stream, bytes); 132 123 } 133 124 } 134 125 135 - /* 136 - * End of code taken from D. J. Bernstein's reference implementation. 137 - */ 138 - 139 - static int setkey(struct crypto_tfm *tfm, const u8 *key, 140 - unsigned int keysize) 126 + void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, 127 + const u8 *iv) 141 128 { 142 - struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); 143 - salsa20_keysetup(ctx, key, keysize); 129 + memcpy(state, ctx->initial_state, sizeof(ctx->initial_state)); 130 + state[6] = get_unaligned_le32(iv + 0); 131 + state[7] = get_unaligned_le32(iv + 4); 132 + } 133 + EXPORT_SYMBOL_GPL(crypto_salsa20_init); 134 + 135 + int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, 136 + unsigned int keysize) 137 + { 138 + static const char sigma[16] = "expand 32-byte k"; 139 + static const char tau[16] = "expand 16-byte k"; 140 + struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); 141 + const char *constants; 142 + 143 + if (keysize != SALSA20_MIN_KEY_SIZE && 144 + keysize != SALSA20_MAX_KEY_SIZE) 145 + return -EINVAL; 146 + 147 + ctx->initial_state[1] = get_unaligned_le32(key + 0); 148 + ctx->initial_state[2] = get_unaligned_le32(key + 4); 149 + ctx->initial_state[3] = get_unaligned_le32(key + 8); 150 + ctx->initial_state[4] = get_unaligned_le32(key + 12); 151 + if (keysize == 32) { /* recommended */ 152 + key += 16; 153 + constants = sigma; 154 + } else { /* keysize == 16 */ 155 + constants = tau; 156 + } 157 + ctx->initial_state[11] = get_unaligned_le32(key + 0); 158 + ctx->initial_state[12] = get_unaligned_le32(key + 4); 159 + ctx->initial_state[13] = get_unaligned_le32(key + 8); 160 + ctx->initial_state[14] = get_unaligned_le32(key + 12); 161 + ctx->initial_state[0] = get_unaligned_le32(constants + 0); 162 + ctx->initial_state[5] = get_unaligned_le32(constants + 4); 163 + ctx->initial_state[10] = get_unaligned_le32(constants + 8); 164 + ctx->initial_state[15] = get_unaligned_le32(constants + 12); 165 + 166 + /* space for the nonce; it will be overridden for each request */ 167 + ctx->initial_state[6] = 0; 168 + ctx->initial_state[7] = 0; 169 + 170 + /* initial block number */ 171 + ctx->initial_state[8] = 0; 172 + ctx->initial_state[9] = 0; 173 + 144 174 return 0; 145 175 } 176 + EXPORT_SYMBOL_GPL(crypto_salsa20_setkey); 146 177 147 - static int encrypt(struct blkcipher_desc *desc, 148 - struct scatterlist *dst, struct scatterlist *src, 149 - unsigned int nbytes) 178 + static int salsa20_crypt(struct skcipher_request *req) 150 179 { 151 - struct blkcipher_walk walk; 152 - struct crypto_blkcipher *tfm = desc->tfm; 153 - struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); 180 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 181 + const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm); 182 + struct skcipher_walk walk; 183 + u32 state[16]; 154 184 int err; 155 185 156 - blkcipher_walk_init(&walk, dst, src, nbytes); 157 - err = blkcipher_walk_virt_block(desc, &walk, 64); 186 + err = skcipher_walk_virt(&walk, req, true); 158 187 159 - salsa20_ivsetup(ctx, walk.iv); 188 + crypto_salsa20_init(state, ctx, walk.iv); 160 189 161 - while (walk.nbytes >= 64) { 162 - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, 163 - walk.src.virt.addr, 164 - walk.nbytes - (walk.nbytes % 64)); 165 - err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); 166 - } 190 + while (walk.nbytes > 0) { 191 + unsigned int nbytes = walk.nbytes; 167 192 168 - if (walk.nbytes) { 169 - salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, 170 - walk.src.virt.addr, walk.nbytes); 171 - err = blkcipher_walk_done(desc, &walk, 0); 193 + if (nbytes < walk.total) 194 + nbytes = round_down(nbytes, walk.stride); 195 + 196 + salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, 197 + nbytes); 198 + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); 172 199 } 173 200 174 201 return err; 175 202 } 176 203 177 - static struct crypto_alg alg = { 178 - .cra_name = "salsa20", 179 - .cra_driver_name = "salsa20-generic", 180 - .cra_priority = 100, 181 - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 182 - .cra_type = &crypto_blkcipher_type, 183 - .cra_blocksize = 1, 184 - .cra_ctxsize = sizeof(struct salsa20_ctx), 185 - .cra_alignmask = 3, 186 - .cra_module = THIS_MODULE, 187 - .cra_u = { 188 - .blkcipher = { 189 - .setkey = setkey, 190 - .encrypt = encrypt, 191 - .decrypt = encrypt, 192 - .min_keysize = SALSA20_MIN_KEY_SIZE, 193 - .max_keysize = SALSA20_MAX_KEY_SIZE, 194 - .ivsize = SALSA20_IV_SIZE, 195 - } 196 - } 204 + static struct skcipher_alg alg = { 205 + .base.cra_name = "salsa20", 206 + .base.cra_driver_name = "salsa20-generic", 207 + .base.cra_priority = 100, 208 + .base.cra_blocksize = 1, 209 + .base.cra_ctxsize = sizeof(struct salsa20_ctx), 210 + .base.cra_module = THIS_MODULE, 211 + 212 + .min_keysize = SALSA20_MIN_KEY_SIZE, 213 + .max_keysize = SALSA20_MAX_KEY_SIZE, 214 + .ivsize = SALSA20_IV_SIZE, 215 + .chunksize = SALSA20_BLOCK_SIZE, 216 + .setkey = crypto_salsa20_setkey, 217 + .encrypt = salsa20_crypt, 218 + .decrypt = salsa20_crypt, 197 219 }; 198 220 199 221 static int __init salsa20_generic_mod_init(void) 200 222 { 201 - return crypto_register_alg(&alg); 223 + return crypto_register_skcipher(&alg); 202 224 } 203 225 204 226 static void __exit salsa20_generic_mod_fini(void) 205 227 { 206 - crypto_unregister_alg(&alg); 228 + crypto_unregister_skcipher(&alg); 207 229 } 208 230 209 231 module_init(salsa20_generic_mod_init);
-5
crypto/seqiv.c
··· 144 144 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) 145 145 { 146 146 struct aead_instance *inst; 147 - struct crypto_aead_spawn *spawn; 148 - struct aead_alg *alg; 149 147 int err; 150 148 151 149 inst = aead_geniv_alloc(tmpl, tb, 0, 0); 152 150 153 151 if (IS_ERR(inst)) 154 152 return PTR_ERR(inst); 155 - 156 - spawn = aead_instance_ctx(inst); 157 - alg = crypto_spawn_aead_alg(spawn); 158 153 159 154 err = -EINVAL; 160 155 if (inst->alg.ivsize != sizeof(u64))
+163 -167
crypto/sha3_generic.c
··· 5 5 * http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf 6 6 * 7 7 * SHA-3 code by Jeff Garzik <jeff@garzik.org> 8 + * Ard Biesheuvel <ard.biesheuvel@linaro.org> 8 9 * 9 10 * This program is free software; you can redistribute it and/or modify it 10 11 * under the terms of the GNU General Public License as published by the Free ··· 18 17 #include <linux/module.h> 19 18 #include <linux/types.h> 20 19 #include <crypto/sha3.h> 21 - #include <asm/byteorder.h> 20 + #include <asm/unaligned.h> 22 21 23 22 #define KECCAK_ROUNDS 24 24 - 25 - #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) 26 23 27 24 static const u64 keccakf_rndc[24] = { 28 25 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, ··· 33 34 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL 34 35 }; 35 36 36 - static const int keccakf_rotc[24] = { 37 - 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 38 - 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44 39 - }; 40 - 41 - static const int keccakf_piln[24] = { 42 - 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 43 - 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1 44 - }; 45 - 46 37 /* update the state with given number of rounds */ 47 38 48 - static void keccakf(u64 st[25]) 39 + static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25]) 49 40 { 50 - int i, j, round; 51 - u64 t, bc[5]; 41 + u64 t[5], tt, bc[5]; 42 + int round; 52 43 53 44 for (round = 0; round < KECCAK_ROUNDS; round++) { 54 45 55 46 /* Theta */ 56 - for (i = 0; i < 5; i++) 57 - bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] 58 - ^ st[i + 20]; 47 + bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; 48 + bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; 49 + bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; 50 + bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; 51 + bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; 59 52 60 - for (i = 0; i < 5; i++) { 61 - t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); 62 - for (j = 0; j < 25; j += 5) 63 - st[j + i] ^= t; 64 - } 53 + t[0] = bc[4] ^ rol64(bc[1], 1); 54 + t[1] = bc[0] ^ rol64(bc[2], 1); 55 + t[2] = bc[1] ^ rol64(bc[3], 1); 56 + t[3] = bc[2] ^ rol64(bc[4], 1); 57 + t[4] = bc[3] ^ rol64(bc[0], 1); 58 + 59 + st[0] ^= t[0]; 65 60 66 61 /* Rho Pi */ 67 - t = st[1]; 68 - for (i = 0; i < 24; i++) { 69 - j = keccakf_piln[i]; 70 - bc[0] = st[j]; 71 - st[j] = ROTL64(t, keccakf_rotc[i]); 72 - t = bc[0]; 73 - } 62 + tt = st[1]; 63 + st[ 1] = rol64(st[ 6] ^ t[1], 44); 64 + st[ 6] = rol64(st[ 9] ^ t[4], 20); 65 + st[ 9] = rol64(st[22] ^ t[2], 61); 66 + st[22] = rol64(st[14] ^ t[4], 39); 67 + st[14] = rol64(st[20] ^ t[0], 18); 68 + st[20] = rol64(st[ 2] ^ t[2], 62); 69 + st[ 2] = rol64(st[12] ^ t[2], 43); 70 + st[12] = rol64(st[13] ^ t[3], 25); 71 + st[13] = rol64(st[19] ^ t[4], 8); 72 + st[19] = rol64(st[23] ^ t[3], 56); 73 + st[23] = rol64(st[15] ^ t[0], 41); 74 + st[15] = rol64(st[ 4] ^ t[4], 27); 75 + st[ 4] = rol64(st[24] ^ t[4], 14); 76 + st[24] = rol64(st[21] ^ t[1], 2); 77 + st[21] = rol64(st[ 8] ^ t[3], 55); 78 + st[ 8] = rol64(st[16] ^ t[1], 45); 79 + st[16] = rol64(st[ 5] ^ t[0], 36); 80 + st[ 5] = rol64(st[ 3] ^ t[3], 28); 81 + st[ 3] = rol64(st[18] ^ t[3], 21); 82 + st[18] = rol64(st[17] ^ t[2], 15); 83 + st[17] = rol64(st[11] ^ t[1], 10); 84 + st[11] = rol64(st[ 7] ^ t[2], 6); 85 + st[ 7] = rol64(st[10] ^ t[0], 3); 86 + st[10] = rol64( tt ^ t[1], 1); 74 87 75 88 /* Chi */ 76 - for (j = 0; j < 25; j += 5) { 77 - for (i = 0; i < 5; i++) 78 - bc[i] = st[j + i]; 79 - for (i = 0; i < 5; i++) 80 - st[j + i] ^= (~bc[(i + 1) % 5]) & 81 - bc[(i + 2) % 5]; 82 - } 89 + bc[ 0] = ~st[ 1] & st[ 2]; 90 + bc[ 1] = ~st[ 2] & st[ 3]; 91 + bc[ 2] = ~st[ 3] & st[ 4]; 92 + bc[ 3] = ~st[ 4] & st[ 0]; 93 + bc[ 4] = ~st[ 0] & st[ 1]; 94 + st[ 0] ^= bc[ 0]; 95 + st[ 1] ^= bc[ 1]; 96 + st[ 2] ^= bc[ 2]; 97 + st[ 3] ^= bc[ 3]; 98 + st[ 4] ^= bc[ 4]; 99 + 100 + bc[ 0] = ~st[ 6] & st[ 7]; 101 + bc[ 1] = ~st[ 7] & st[ 8]; 102 + bc[ 2] = ~st[ 8] & st[ 9]; 103 + bc[ 3] = ~st[ 9] & st[ 5]; 104 + bc[ 4] = ~st[ 5] & st[ 6]; 105 + st[ 5] ^= bc[ 0]; 106 + st[ 6] ^= bc[ 1]; 107 + st[ 7] ^= bc[ 2]; 108 + st[ 8] ^= bc[ 3]; 109 + st[ 9] ^= bc[ 4]; 110 + 111 + bc[ 0] = ~st[11] & st[12]; 112 + bc[ 1] = ~st[12] & st[13]; 113 + bc[ 2] = ~st[13] & st[14]; 114 + bc[ 3] = ~st[14] & st[10]; 115 + bc[ 4] = ~st[10] & st[11]; 116 + st[10] ^= bc[ 0]; 117 + st[11] ^= bc[ 1]; 118 + st[12] ^= bc[ 2]; 119 + st[13] ^= bc[ 3]; 120 + st[14] ^= bc[ 4]; 121 + 122 + bc[ 0] = ~st[16] & st[17]; 123 + bc[ 1] = ~st[17] & st[18]; 124 + bc[ 2] = ~st[18] & st[19]; 125 + bc[ 3] = ~st[19] & st[15]; 126 + bc[ 4] = ~st[15] & st[16]; 127 + st[15] ^= bc[ 0]; 128 + st[16] ^= bc[ 1]; 129 + st[17] ^= bc[ 2]; 130 + st[18] ^= bc[ 3]; 131 + st[19] ^= bc[ 4]; 132 + 133 + bc[ 0] = ~st[21] & st[22]; 134 + bc[ 1] = ~st[22] & st[23]; 135 + bc[ 2] = ~st[23] & st[24]; 136 + bc[ 3] = ~st[24] & st[20]; 137 + bc[ 4] = ~st[20] & st[21]; 138 + st[20] ^= bc[ 0]; 139 + st[21] ^= bc[ 1]; 140 + st[22] ^= bc[ 2]; 141 + st[23] ^= bc[ 3]; 142 + st[24] ^= bc[ 4]; 83 143 84 144 /* Iota */ 85 145 st[0] ^= keccakf_rndc[round]; 86 146 } 87 147 } 88 148 89 - static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz) 149 + int crypto_sha3_init(struct shash_desc *desc) 90 150 { 91 - memset(sctx, 0, sizeof(*sctx)); 92 - sctx->md_len = digest_sz; 93 - sctx->rsiz = 200 - 2 * digest_sz; 151 + struct sha3_state *sctx = shash_desc_ctx(desc); 152 + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 153 + 154 + sctx->rsiz = 200 - 2 * digest_size; 94 155 sctx->rsizw = sctx->rsiz / 8; 95 - } 156 + sctx->partial = 0; 96 157 97 - static int sha3_224_init(struct shash_desc *desc) 98 - { 99 - struct sha3_state *sctx = shash_desc_ctx(desc); 100 - 101 - sha3_init(sctx, SHA3_224_DIGEST_SIZE); 158 + memset(sctx->st, 0, sizeof(sctx->st)); 102 159 return 0; 103 160 } 161 + EXPORT_SYMBOL(crypto_sha3_init); 104 162 105 - static int sha3_256_init(struct shash_desc *desc) 106 - { 107 - struct sha3_state *sctx = shash_desc_ctx(desc); 108 - 109 - sha3_init(sctx, SHA3_256_DIGEST_SIZE); 110 - return 0; 111 - } 112 - 113 - static int sha3_384_init(struct shash_desc *desc) 114 - { 115 - struct sha3_state *sctx = shash_desc_ctx(desc); 116 - 117 - sha3_init(sctx, SHA3_384_DIGEST_SIZE); 118 - return 0; 119 - } 120 - 121 - static int sha3_512_init(struct shash_desc *desc) 122 - { 123 - struct sha3_state *sctx = shash_desc_ctx(desc); 124 - 125 - sha3_init(sctx, SHA3_512_DIGEST_SIZE); 126 - return 0; 127 - } 128 - 129 - static int sha3_update(struct shash_desc *desc, const u8 *data, 163 + int crypto_sha3_update(struct shash_desc *desc, const u8 *data, 130 164 unsigned int len) 131 165 { 132 166 struct sha3_state *sctx = shash_desc_ctx(desc); ··· 181 149 unsigned int i; 182 150 183 151 for (i = 0; i < sctx->rsizw; i++) 184 - sctx->st[i] ^= ((u64 *) src)[i]; 152 + sctx->st[i] ^= get_unaligned_le64(src + 8 * i); 185 153 keccakf(sctx->st); 186 154 187 155 done += sctx->rsiz; ··· 195 163 196 164 return 0; 197 165 } 166 + EXPORT_SYMBOL(crypto_sha3_update); 198 167 199 - static int sha3_final(struct shash_desc *desc, u8 *out) 168 + int crypto_sha3_final(struct shash_desc *desc, u8 *out) 200 169 { 201 170 struct sha3_state *sctx = shash_desc_ctx(desc); 202 171 unsigned int i, inlen = sctx->partial; 172 + unsigned int digest_size = crypto_shash_digestsize(desc->tfm); 173 + __le64 *digest = (__le64 *)out; 203 174 204 175 sctx->buf[inlen++] = 0x06; 205 176 memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); 206 177 sctx->buf[sctx->rsiz - 1] |= 0x80; 207 178 208 179 for (i = 0; i < sctx->rsizw; i++) 209 - sctx->st[i] ^= ((u64 *) sctx->buf)[i]; 180 + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); 210 181 211 182 keccakf(sctx->st); 212 183 213 - for (i = 0; i < sctx->rsizw; i++) 214 - sctx->st[i] = cpu_to_le64(sctx->st[i]); 184 + for (i = 0; i < digest_size / 8; i++) 185 + put_unaligned_le64(sctx->st[i], digest++); 215 186 216 - memcpy(out, sctx->st, sctx->md_len); 187 + if (digest_size & 4) 188 + put_unaligned_le32(sctx->st[i], (__le32 *)digest); 217 189 218 190 memset(sctx, 0, sizeof(*sctx)); 219 191 return 0; 220 192 } 193 + EXPORT_SYMBOL(crypto_sha3_final); 221 194 222 - static struct shash_alg sha3_224 = { 223 - .digestsize = SHA3_224_DIGEST_SIZE, 224 - .init = sha3_224_init, 225 - .update = sha3_update, 226 - .final = sha3_final, 227 - .descsize = sizeof(struct sha3_state), 228 - .base = { 229 - .cra_name = "sha3-224", 230 - .cra_driver_name = "sha3-224-generic", 231 - .cra_flags = CRYPTO_ALG_TYPE_SHASH, 232 - .cra_blocksize = SHA3_224_BLOCK_SIZE, 233 - .cra_module = THIS_MODULE, 234 - } 235 - }; 236 - 237 - static struct shash_alg sha3_256 = { 238 - .digestsize = SHA3_256_DIGEST_SIZE, 239 - .init = sha3_256_init, 240 - .update = sha3_update, 241 - .final = sha3_final, 242 - .descsize = sizeof(struct sha3_state), 243 - .base = { 244 - .cra_name = "sha3-256", 245 - .cra_driver_name = "sha3-256-generic", 246 - .cra_flags = CRYPTO_ALG_TYPE_SHASH, 247 - .cra_blocksize = SHA3_256_BLOCK_SIZE, 248 - .cra_module = THIS_MODULE, 249 - } 250 - }; 251 - 252 - static struct shash_alg sha3_384 = { 253 - .digestsize = SHA3_384_DIGEST_SIZE, 254 - .init = sha3_384_init, 255 - .update = sha3_update, 256 - .final = sha3_final, 257 - .descsize = sizeof(struct sha3_state), 258 - .base = { 259 - .cra_name = "sha3-384", 260 - .cra_driver_name = "sha3-384-generic", 261 - .cra_flags = CRYPTO_ALG_TYPE_SHASH, 262 - .cra_blocksize = SHA3_384_BLOCK_SIZE, 263 - .cra_module = THIS_MODULE, 264 - } 265 - }; 266 - 267 - static struct shash_alg sha3_512 = { 268 - .digestsize = SHA3_512_DIGEST_SIZE, 269 - .init = sha3_512_init, 270 - .update = sha3_update, 271 - .final = sha3_final, 272 - .descsize = sizeof(struct sha3_state), 273 - .base = { 274 - .cra_name = "sha3-512", 275 - .cra_driver_name = "sha3-512-generic", 276 - .cra_flags = CRYPTO_ALG_TYPE_SHASH, 277 - .cra_blocksize = SHA3_512_BLOCK_SIZE, 278 - .cra_module = THIS_MODULE, 279 - } 280 - }; 195 + static struct shash_alg algs[] = { { 196 + .digestsize = SHA3_224_DIGEST_SIZE, 197 + .init = crypto_sha3_init, 198 + .update = crypto_sha3_update, 199 + .final = crypto_sha3_final, 200 + .descsize = sizeof(struct sha3_state), 201 + .base.cra_name = "sha3-224", 202 + .base.cra_driver_name = "sha3-224-generic", 203 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 204 + .base.cra_blocksize = SHA3_224_BLOCK_SIZE, 205 + .base.cra_module = THIS_MODULE, 206 + }, { 207 + .digestsize = SHA3_256_DIGEST_SIZE, 208 + .init = crypto_sha3_init, 209 + .update = crypto_sha3_update, 210 + .final = crypto_sha3_final, 211 + .descsize = sizeof(struct sha3_state), 212 + .base.cra_name = "sha3-256", 213 + .base.cra_driver_name = "sha3-256-generic", 214 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 215 + .base.cra_blocksize = SHA3_256_BLOCK_SIZE, 216 + .base.cra_module = THIS_MODULE, 217 + }, { 218 + .digestsize = SHA3_384_DIGEST_SIZE, 219 + .init = crypto_sha3_init, 220 + .update = crypto_sha3_update, 221 + .final = crypto_sha3_final, 222 + .descsize = sizeof(struct sha3_state), 223 + .base.cra_name = "sha3-384", 224 + .base.cra_driver_name = "sha3-384-generic", 225 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 226 + .base.cra_blocksize = SHA3_384_BLOCK_SIZE, 227 + .base.cra_module = THIS_MODULE, 228 + }, { 229 + .digestsize = SHA3_512_DIGEST_SIZE, 230 + .init = crypto_sha3_init, 231 + .update = crypto_sha3_update, 232 + .final = crypto_sha3_final, 233 + .descsize = sizeof(struct sha3_state), 234 + .base.cra_name = "sha3-512", 235 + .base.cra_driver_name = "sha3-512-generic", 236 + .base.cra_flags = CRYPTO_ALG_TYPE_SHASH, 237 + .base.cra_blocksize = SHA3_512_BLOCK_SIZE, 238 + .base.cra_module = THIS_MODULE, 239 + } }; 281 240 282 241 static int __init sha3_generic_mod_init(void) 283 242 { 284 - int ret; 285 - 286 - ret = crypto_register_shash(&sha3_224); 287 - if (ret < 0) 288 - goto err_out; 289 - ret = crypto_register_shash(&sha3_256); 290 - if (ret < 0) 291 - goto err_out_224; 292 - ret = crypto_register_shash(&sha3_384); 293 - if (ret < 0) 294 - goto err_out_256; 295 - ret = crypto_register_shash(&sha3_512); 296 - if (ret < 0) 297 - goto err_out_384; 298 - 299 - return 0; 300 - 301 - err_out_384: 302 - crypto_unregister_shash(&sha3_384); 303 - err_out_256: 304 - crypto_unregister_shash(&sha3_256); 305 - err_out_224: 306 - crypto_unregister_shash(&sha3_224); 307 - err_out: 308 - return ret; 243 + return crypto_register_shashes(algs, ARRAY_SIZE(algs)); 309 244 } 310 245 311 246 static void __exit sha3_generic_mod_fini(void) 312 247 { 313 - crypto_unregister_shash(&sha3_224); 314 - crypto_unregister_shash(&sha3_256); 315 - crypto_unregister_shash(&sha3_384); 316 - crypto_unregister_shash(&sha3_512); 248 + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 317 249 } 318 250 319 251 module_init(sha3_generic_mod_init);
+21 -4
crypto/shash.c
··· 58 58 { 59 59 struct shash_alg *shash = crypto_shash_alg(tfm); 60 60 unsigned long alignmask = crypto_shash_alignmask(tfm); 61 + int err; 61 62 62 63 if ((unsigned long)key & alignmask) 63 - return shash_setkey_unaligned(tfm, key, keylen); 64 + err = shash_setkey_unaligned(tfm, key, keylen); 65 + else 66 + err = shash->setkey(tfm, key, keylen); 64 67 65 - return shash->setkey(tfm, key, keylen); 68 + if (err) 69 + return err; 70 + 71 + crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 72 + return 0; 66 73 } 67 74 EXPORT_SYMBOL_GPL(crypto_shash_setkey); 68 75 ··· 187 180 struct crypto_shash *tfm = desc->tfm; 188 181 struct shash_alg *shash = crypto_shash_alg(tfm); 189 182 unsigned long alignmask = crypto_shash_alignmask(tfm); 183 + 184 + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 185 + return -ENOKEY; 190 186 191 187 if (((unsigned long)data | (unsigned long)out) & alignmask) 192 188 return shash_digest_unaligned(desc, data, len, out); ··· 370 360 crt->digest = shash_async_digest; 371 361 crt->setkey = shash_async_setkey; 372 362 373 - crt->has_setkey = alg->setkey != shash_no_setkey; 363 + crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & 364 + CRYPTO_TFM_NEED_KEY); 374 365 375 366 if (alg->export) 376 367 crt->export = shash_async_export; ··· 386 375 static int crypto_shash_init_tfm(struct crypto_tfm *tfm) 387 376 { 388 377 struct crypto_shash *hash = __crypto_shash_cast(tfm); 378 + struct shash_alg *alg = crypto_shash_alg(hash); 389 379 390 - hash->descsize = crypto_shash_alg(hash)->descsize; 380 + hash->descsize = alg->descsize; 381 + 382 + if (crypto_shash_alg_has_setkey(alg) && 383 + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) 384 + crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); 385 + 391 386 return 0; 392 387 } 393 388
+1 -3
crypto/simd.c
··· 19 19 * GNU General Public License for more details. 20 20 * 21 21 * You should have received a copy of the GNU General Public License 22 - * along with this program; if not, write to the Free Software 23 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 24 - * USA 22 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 25 23 * 26 24 */ 27 25
+26 -4
crypto/skcipher.c
··· 598 598 err = crypto_blkcipher_setkey(blkcipher, key, keylen); 599 599 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & 600 600 CRYPTO_TFM_RES_MASK); 601 + if (err) 602 + return err; 601 603 602 - return err; 604 + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 605 + return 0; 603 606 } 604 607 605 608 static int skcipher_crypt_blkcipher(struct skcipher_request *req, ··· 677 674 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); 678 675 skcipher->keysize = calg->cra_blkcipher.max_keysize; 679 676 677 + if (skcipher->keysize) 678 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); 679 + 680 680 return 0; 681 681 } 682 682 ··· 698 692 crypto_skcipher_set_flags(tfm, 699 693 crypto_ablkcipher_get_flags(ablkcipher) & 700 694 CRYPTO_TFM_RES_MASK); 695 + if (err) 696 + return err; 701 697 702 - return err; 698 + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 699 + return 0; 703 700 } 704 701 705 702 static int skcipher_crypt_ablkcipher(struct skcipher_request *req, ··· 776 767 sizeof(struct ablkcipher_request); 777 768 skcipher->keysize = calg->cra_ablkcipher.max_keysize; 778 769 770 + if (skcipher->keysize) 771 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); 772 + 779 773 return 0; 780 774 } 781 775 ··· 808 796 { 809 797 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); 810 798 unsigned long alignmask = crypto_skcipher_alignmask(tfm); 799 + int err; 811 800 812 801 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { 813 802 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); ··· 816 803 } 817 804 818 805 if ((unsigned long)key & alignmask) 819 - return skcipher_setkey_unaligned(tfm, key, keylen); 806 + err = skcipher_setkey_unaligned(tfm, key, keylen); 807 + else 808 + err = cipher->setkey(tfm, key, keylen); 820 809 821 - return cipher->setkey(tfm, key, keylen); 810 + if (err) 811 + return err; 812 + 813 + crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 814 + return 0; 822 815 } 823 816 824 817 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) ··· 852 833 skcipher->decrypt = alg->decrypt; 853 834 skcipher->ivsize = alg->ivsize; 854 835 skcipher->keysize = alg->max_keysize; 836 + 837 + if (skcipher->keysize) 838 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); 855 839 856 840 if (alg->exit) 857 841 skcipher->base.exit = crypto_skcipher_exit_tfm;
+979 -104
crypto/tcrypt.c
··· 67 67 static u32 type; 68 68 static u32 mask; 69 69 static int mode; 70 + static u32 num_mb = 8; 70 71 static char *tvmem[TVMEMSIZE]; 71 72 72 73 static char *check[] = { ··· 80 79 NULL 81 80 }; 82 81 82 + static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; 83 + static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; 84 + 85 + #define XBUFSIZE 8 86 + #define MAX_IVLEN 32 87 + 88 + static int testmgr_alloc_buf(char *buf[XBUFSIZE]) 89 + { 90 + int i; 91 + 92 + for (i = 0; i < XBUFSIZE; i++) { 93 + buf[i] = (void *)__get_free_page(GFP_KERNEL); 94 + if (!buf[i]) 95 + goto err_free_buf; 96 + } 97 + 98 + return 0; 99 + 100 + err_free_buf: 101 + while (i-- > 0) 102 + free_page((unsigned long)buf[i]); 103 + 104 + return -ENOMEM; 105 + } 106 + 107 + static void testmgr_free_buf(char *buf[XBUFSIZE]) 108 + { 109 + int i; 110 + 111 + for (i = 0; i < XBUFSIZE; i++) 112 + free_page((unsigned long)buf[i]); 113 + } 114 + 115 + static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], 116 + unsigned int buflen, const void *assoc, 117 + unsigned int aad_size) 118 + { 119 + int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; 120 + int k, rem; 121 + 122 + if (np > XBUFSIZE) { 123 + rem = PAGE_SIZE; 124 + np = XBUFSIZE; 125 + } else { 126 + rem = buflen % PAGE_SIZE; 127 + } 128 + 129 + sg_init_table(sg, np + 1); 130 + 131 + sg_set_buf(&sg[0], assoc, aad_size); 132 + 133 + if (rem) 134 + np--; 135 + for (k = 0; k < np; k++) 136 + sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); 137 + 138 + if (rem) 139 + sg_set_buf(&sg[k + 1], xbuf[k], rem); 140 + } 141 + 83 142 static inline int do_one_aead_op(struct aead_request *req, int ret) 84 143 { 85 144 struct crypto_wait *wait = req->base.data; 86 145 87 146 return crypto_wait_req(ret, wait); 147 + } 148 + 149 + struct test_mb_aead_data { 150 + struct scatterlist sg[XBUFSIZE]; 151 + struct scatterlist sgout[XBUFSIZE]; 152 + struct aead_request *req; 153 + struct crypto_wait wait; 154 + char *xbuf[XBUFSIZE]; 155 + char *xoutbuf[XBUFSIZE]; 156 + char *axbuf[XBUFSIZE]; 157 + }; 158 + 159 + static int do_mult_aead_op(struct test_mb_aead_data *data, int enc, 160 + u32 num_mb) 161 + { 162 + int i, rc[num_mb], err = 0; 163 + 164 + /* Fire up a bunch of concurrent requests */ 165 + for (i = 0; i < num_mb; i++) { 166 + if (enc == ENCRYPT) 167 + rc[i] = crypto_aead_encrypt(data[i].req); 168 + else 169 + rc[i] = crypto_aead_decrypt(data[i].req); 170 + } 171 + 172 + /* Wait for all requests to finish */ 173 + for (i = 0; i < num_mb; i++) { 174 + rc[i] = crypto_wait_req(rc[i], &data[i].wait); 175 + 176 + if (rc[i]) { 177 + pr_info("concurrent request %d error %d\n", i, rc[i]); 178 + err = rc[i]; 179 + } 180 + } 181 + 182 + return err; 183 + } 184 + 185 + static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc, 186 + int blen, int secs, u32 num_mb) 187 + { 188 + unsigned long start, end; 189 + int bcount; 190 + int ret; 191 + 192 + for (start = jiffies, end = start + secs * HZ, bcount = 0; 193 + time_before(jiffies, end); bcount++) { 194 + ret = do_mult_aead_op(data, enc, num_mb); 195 + if (ret) 196 + return ret; 197 + } 198 + 199 + pr_cont("%d operations in %d seconds (%ld bytes)\n", 200 + bcount * num_mb, secs, (long)bcount * blen * num_mb); 201 + return 0; 202 + } 203 + 204 + static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc, 205 + int blen, u32 num_mb) 206 + { 207 + unsigned long cycles = 0; 208 + int ret = 0; 209 + int i; 210 + 211 + /* Warm-up run. */ 212 + for (i = 0; i < 4; i++) { 213 + ret = do_mult_aead_op(data, enc, num_mb); 214 + if (ret) 215 + goto out; 216 + } 217 + 218 + /* The real thing. */ 219 + for (i = 0; i < 8; i++) { 220 + cycles_t start, end; 221 + 222 + start = get_cycles(); 223 + ret = do_mult_aead_op(data, enc, num_mb); 224 + end = get_cycles(); 225 + 226 + if (ret) 227 + goto out; 228 + 229 + cycles += end - start; 230 + } 231 + 232 + out: 233 + if (ret == 0) 234 + pr_cont("1 operation in %lu cycles (%d bytes)\n", 235 + (cycles + 4) / (8 * num_mb), blen); 236 + 237 + return ret; 238 + } 239 + 240 + static void test_mb_aead_speed(const char *algo, int enc, int secs, 241 + struct aead_speed_template *template, 242 + unsigned int tcount, u8 authsize, 243 + unsigned int aad_size, u8 *keysize, u32 num_mb) 244 + { 245 + struct test_mb_aead_data *data; 246 + struct crypto_aead *tfm; 247 + unsigned int i, j, iv_len; 248 + const char *key; 249 + const char *e; 250 + void *assoc; 251 + u32 *b_size; 252 + char *iv; 253 + int ret; 254 + 255 + 256 + if (aad_size >= PAGE_SIZE) { 257 + pr_err("associate data length (%u) too big\n", aad_size); 258 + return; 259 + } 260 + 261 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL); 262 + if (!iv) 263 + return; 264 + 265 + if (enc == ENCRYPT) 266 + e = "encryption"; 267 + else 268 + e = "decryption"; 269 + 270 + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); 271 + if (!data) 272 + goto out_free_iv; 273 + 274 + tfm = crypto_alloc_aead(algo, 0, 0); 275 + if (IS_ERR(tfm)) { 276 + pr_err("failed to load transform for %s: %ld\n", 277 + algo, PTR_ERR(tfm)); 278 + goto out_free_data; 279 + } 280 + 281 + ret = crypto_aead_setauthsize(tfm, authsize); 282 + 283 + for (i = 0; i < num_mb; ++i) 284 + if (testmgr_alloc_buf(data[i].xbuf)) { 285 + while (i--) 286 + testmgr_free_buf(data[i].xbuf); 287 + goto out_free_tfm; 288 + } 289 + 290 + for (i = 0; i < num_mb; ++i) 291 + if (testmgr_alloc_buf(data[i].axbuf)) { 292 + while (i--) 293 + testmgr_free_buf(data[i].axbuf); 294 + goto out_free_xbuf; 295 + } 296 + 297 + for (i = 0; i < num_mb; ++i) 298 + if (testmgr_alloc_buf(data[i].xoutbuf)) { 299 + while (i--) 300 + testmgr_free_buf(data[i].xoutbuf); 301 + goto out_free_axbuf; 302 + } 303 + 304 + for (i = 0; i < num_mb; ++i) { 305 + data[i].req = aead_request_alloc(tfm, GFP_KERNEL); 306 + if (!data[i].req) { 307 + pr_err("alg: skcipher: Failed to allocate request for %s\n", 308 + algo); 309 + while (i--) 310 + aead_request_free(data[i].req); 311 + goto out_free_xoutbuf; 312 + } 313 + } 314 + 315 + for (i = 0; i < num_mb; ++i) { 316 + crypto_init_wait(&data[i].wait); 317 + aead_request_set_callback(data[i].req, 318 + CRYPTO_TFM_REQ_MAY_BACKLOG, 319 + crypto_req_done, &data[i].wait); 320 + } 321 + 322 + pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, 323 + get_driver_name(crypto_aead, tfm), e); 324 + 325 + i = 0; 326 + do { 327 + b_size = aead_sizes; 328 + do { 329 + if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) { 330 + pr_err("template (%u) too big for buffer (%lu)\n", 331 + authsize + *b_size, 332 + XBUFSIZE * PAGE_SIZE); 333 + goto out; 334 + } 335 + 336 + pr_info("test %u (%d bit key, %d byte blocks): ", i, 337 + *keysize * 8, *b_size); 338 + 339 + /* Set up tfm global state, i.e. the key */ 340 + 341 + memset(tvmem[0], 0xff, PAGE_SIZE); 342 + key = tvmem[0]; 343 + for (j = 0; j < tcount; j++) { 344 + if (template[j].klen == *keysize) { 345 + key = template[j].key; 346 + break; 347 + } 348 + } 349 + 350 + crypto_aead_clear_flags(tfm, ~0); 351 + 352 + ret = crypto_aead_setkey(tfm, key, *keysize); 353 + if (ret) { 354 + pr_err("setkey() failed flags=%x\n", 355 + crypto_aead_get_flags(tfm)); 356 + goto out; 357 + } 358 + 359 + iv_len = crypto_aead_ivsize(tfm); 360 + if (iv_len) 361 + memset(iv, 0xff, iv_len); 362 + 363 + /* Now setup per request stuff, i.e. buffers */ 364 + 365 + for (j = 0; j < num_mb; ++j) { 366 + struct test_mb_aead_data *cur = &data[j]; 367 + 368 + assoc = cur->axbuf[0]; 369 + memset(assoc, 0xff, aad_size); 370 + 371 + sg_init_aead(cur->sg, cur->xbuf, 372 + *b_size + (enc ? 0 : authsize), 373 + assoc, aad_size); 374 + 375 + sg_init_aead(cur->sgout, cur->xoutbuf, 376 + *b_size + (enc ? authsize : 0), 377 + assoc, aad_size); 378 + 379 + aead_request_set_ad(cur->req, aad_size); 380 + 381 + if (!enc) { 382 + 383 + aead_request_set_crypt(cur->req, 384 + cur->sgout, 385 + cur->sg, 386 + *b_size, iv); 387 + ret = crypto_aead_encrypt(cur->req); 388 + ret = do_one_aead_op(cur->req, ret); 389 + 390 + if (ret) { 391 + pr_err("calculating auth failed failed (%d)\n", 392 + ret); 393 + break; 394 + } 395 + } 396 + 397 + aead_request_set_crypt(cur->req, cur->sg, 398 + cur->sgout, *b_size + 399 + (enc ? 0 : authsize), 400 + iv); 401 + 402 + } 403 + 404 + if (secs) 405 + ret = test_mb_aead_jiffies(data, enc, *b_size, 406 + secs, num_mb); 407 + else 408 + ret = test_mb_aead_cycles(data, enc, *b_size, 409 + num_mb); 410 + 411 + if (ret) { 412 + pr_err("%s() failed return code=%d\n", e, ret); 413 + break; 414 + } 415 + b_size++; 416 + i++; 417 + } while (*b_size); 418 + keysize++; 419 + } while (*keysize); 420 + 421 + out: 422 + for (i = 0; i < num_mb; ++i) 423 + aead_request_free(data[i].req); 424 + out_free_xoutbuf: 425 + for (i = 0; i < num_mb; ++i) 426 + testmgr_free_buf(data[i].xoutbuf); 427 + out_free_axbuf: 428 + for (i = 0; i < num_mb; ++i) 429 + testmgr_free_buf(data[i].axbuf); 430 + out_free_xbuf: 431 + for (i = 0; i < num_mb; ++i) 432 + testmgr_free_buf(data[i].xbuf); 433 + out_free_tfm: 434 + crypto_free_aead(tfm); 435 + out_free_data: 436 + kfree(data); 437 + out_free_iv: 438 + kfree(iv); 88 439 } 89 440 90 441 static int test_aead_jiffies(struct aead_request *req, int enc, ··· 502 149 (cycles + 4) / 8, blen); 503 150 504 151 return ret; 505 - } 506 - 507 - static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 }; 508 - static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 }; 509 - 510 - #define XBUFSIZE 8 511 - #define MAX_IVLEN 32 512 - 513 - static int testmgr_alloc_buf(char *buf[XBUFSIZE]) 514 - { 515 - int i; 516 - 517 - for (i = 0; i < XBUFSIZE; i++) { 518 - buf[i] = (void *)__get_free_page(GFP_KERNEL); 519 - if (!buf[i]) 520 - goto err_free_buf; 521 - } 522 - 523 - return 0; 524 - 525 - err_free_buf: 526 - while (i-- > 0) 527 - free_page((unsigned long)buf[i]); 528 - 529 - return -ENOMEM; 530 - } 531 - 532 - static void testmgr_free_buf(char *buf[XBUFSIZE]) 533 - { 534 - int i; 535 - 536 - for (i = 0; i < XBUFSIZE; i++) 537 - free_page((unsigned long)buf[i]); 538 - } 539 - 540 - static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], 541 - unsigned int buflen) 542 - { 543 - int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE; 544 - int k, rem; 545 - 546 - if (np > XBUFSIZE) { 547 - rem = PAGE_SIZE; 548 - np = XBUFSIZE; 549 - } else { 550 - rem = buflen % PAGE_SIZE; 551 - } 552 - 553 - sg_init_table(sg, np + 1); 554 - np--; 555 - for (k = 0; k < np; k++) 556 - sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE); 557 - 558 - sg_set_buf(&sg[k + 1], xbuf[k], rem); 559 152 } 560 153 561 154 static void test_aead_speed(const char *algo, int enc, unsigned int secs, ··· 615 316 goto out; 616 317 } 617 318 618 - sg_init_aead(sg, xbuf, 619 - *b_size + (enc ? 0 : authsize)); 319 + sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize), 320 + assoc, aad_size); 620 321 621 322 sg_init_aead(sgout, xoutbuf, 622 - *b_size + (enc ? authsize : 0)); 323 + *b_size + (enc ? authsize : 0), assoc, 324 + aad_size); 623 325 624 - sg_set_buf(&sg[0], assoc, aad_size); 625 - sg_set_buf(&sgout[0], assoc, aad_size); 326 + aead_request_set_ad(req, aad_size); 327 + 328 + if (!enc) { 329 + 330 + /* 331 + * For decryption we need a proper auth so 332 + * we do the encryption path once with buffers 333 + * reversed (input <-> output) to calculate it 334 + */ 335 + aead_request_set_crypt(req, sgout, sg, 336 + *b_size, iv); 337 + ret = do_one_aead_op(req, 338 + crypto_aead_encrypt(req)); 339 + 340 + if (ret) { 341 + pr_err("calculating auth failed failed (%d)\n", 342 + ret); 343 + break; 344 + } 345 + } 626 346 627 347 aead_request_set_crypt(req, sg, sgout, 628 348 *b_size + (enc ? 0 : authsize), 629 349 iv); 630 - aead_request_set_ad(req, aad_size); 631 350 632 351 if (secs) 633 352 ret = test_aead_jiffies(req, enc, *b_size, ··· 698 381 } 699 382 700 383 struct test_mb_ahash_data { 701 - struct scatterlist sg[TVMEMSIZE]; 384 + struct scatterlist sg[XBUFSIZE]; 702 385 char result[64]; 703 386 struct ahash_request *req; 704 387 struct crypto_wait wait; 705 388 char *xbuf[XBUFSIZE]; 706 389 }; 707 390 708 - static void test_mb_ahash_speed(const char *algo, unsigned int sec, 709 - struct hash_speed *speed) 391 + static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb) 392 + { 393 + int i, rc[num_mb], err = 0; 394 + 395 + /* Fire up a bunch of concurrent requests */ 396 + for (i = 0; i < num_mb; i++) 397 + rc[i] = crypto_ahash_digest(data[i].req); 398 + 399 + /* Wait for all requests to finish */ 400 + for (i = 0; i < num_mb; i++) { 401 + rc[i] = crypto_wait_req(rc[i], &data[i].wait); 402 + 403 + if (rc[i]) { 404 + pr_info("concurrent request %d error %d\n", i, rc[i]); 405 + err = rc[i]; 406 + } 407 + } 408 + 409 + return err; 410 + } 411 + 412 + static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, 413 + int secs, u32 num_mb) 414 + { 415 + unsigned long start, end; 416 + int bcount; 417 + int ret; 418 + 419 + for (start = jiffies, end = start + secs * HZ, bcount = 0; 420 + time_before(jiffies, end); bcount++) { 421 + ret = do_mult_ahash_op(data, num_mb); 422 + if (ret) 423 + return ret; 424 + } 425 + 426 + pr_cont("%d operations in %d seconds (%ld bytes)\n", 427 + bcount * num_mb, secs, (long)bcount * blen * num_mb); 428 + return 0; 429 + } 430 + 431 + static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, 432 + u32 num_mb) 433 + { 434 + unsigned long cycles = 0; 435 + int ret = 0; 436 + int i; 437 + 438 + /* Warm-up run. */ 439 + for (i = 0; i < 4; i++) { 440 + ret = do_mult_ahash_op(data, num_mb); 441 + if (ret) 442 + goto out; 443 + } 444 + 445 + /* The real thing. */ 446 + for (i = 0; i < 8; i++) { 447 + cycles_t start, end; 448 + 449 + start = get_cycles(); 450 + ret = do_mult_ahash_op(data, num_mb); 451 + end = get_cycles(); 452 + 453 + if (ret) 454 + goto out; 455 + 456 + cycles += end - start; 457 + } 458 + 459 + out: 460 + if (ret == 0) 461 + pr_cont("1 operation in %lu cycles (%d bytes)\n", 462 + (cycles + 4) / (8 * num_mb), blen); 463 + 464 + return ret; 465 + } 466 + 467 + static void test_mb_ahash_speed(const char *algo, unsigned int secs, 468 + struct hash_speed *speed, u32 num_mb) 710 469 { 711 470 struct test_mb_ahash_data *data; 712 471 struct crypto_ahash *tfm; 713 - unsigned long start, end; 714 - unsigned long cycles; 715 472 unsigned int i, j, k; 716 473 int ret; 717 474 718 - data = kzalloc(sizeof(*data) * 8, GFP_KERNEL); 475 + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); 719 476 if (!data) 720 477 return; 721 478 ··· 800 409 goto free_data; 801 410 } 802 411 803 - for (i = 0; i < 8; ++i) { 412 + for (i = 0; i < num_mb; ++i) { 804 413 if (testmgr_alloc_buf(data[i].xbuf)) 805 414 goto out; 806 415 ··· 815 424 816 425 ahash_request_set_callback(data[i].req, 0, crypto_req_done, 817 426 &data[i].wait); 818 - test_hash_sg_init(data[i].sg); 427 + 428 + sg_init_table(data[i].sg, XBUFSIZE); 429 + for (j = 0; j < XBUFSIZE; j++) { 430 + sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); 431 + memset(data[i].xbuf[j], 0xff, PAGE_SIZE); 432 + } 819 433 } 820 434 821 435 pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, ··· 831 435 if (speed[i].blen != speed[i].plen) 832 436 continue; 833 437 834 - if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) { 438 + if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { 835 439 pr_err("template (%u) too big for tvmem (%lu)\n", 836 - speed[i].blen, TVMEMSIZE * PAGE_SIZE); 440 + speed[i].blen, XBUFSIZE * PAGE_SIZE); 837 441 goto out; 838 442 } 839 443 840 444 if (speed[i].klen) 841 445 crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen); 842 446 843 - for (k = 0; k < 8; k++) 447 + for (k = 0; k < num_mb; k++) 844 448 ahash_request_set_crypt(data[k].req, data[k].sg, 845 449 data[k].result, speed[i].blen); 846 450 ··· 849 453 i, speed[i].blen, speed[i].plen, 850 454 speed[i].blen / speed[i].plen); 851 455 852 - start = get_cycles(); 456 + if (secs) 457 + ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, 458 + num_mb); 459 + else 460 + ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); 853 461 854 - for (k = 0; k < 8; k++) { 855 - ret = crypto_ahash_digest(data[k].req); 856 - if (ret == -EINPROGRESS) { 857 - ret = 0; 858 - continue; 859 - } 860 - 861 - if (ret) 862 - break; 863 - 864 - crypto_req_done(&data[k].req->base, 0); 865 - } 866 - 867 - for (j = 0; j < k; j++) { 868 - struct crypto_wait *wait = &data[j].wait; 869 - int wait_ret; 870 - 871 - wait_ret = crypto_wait_req(-EINPROGRESS, wait); 872 - if (wait_ret) 873 - ret = wait_ret; 874 - } 875 - 876 - end = get_cycles(); 877 - cycles = end - start; 878 - pr_cont("%6lu cycles/operation, %4lu cycles/byte\n", 879 - cycles, cycles / (8 * speed[i].blen)); 880 462 881 463 if (ret) { 882 464 pr_err("At least one hashing failed ret=%d\n", ret); ··· 863 489 } 864 490 865 491 out: 866 - for (k = 0; k < 8; ++k) 492 + for (k = 0; k < num_mb; ++k) 867 493 ahash_request_free(data[k].req); 868 494 869 - for (k = 0; k < 8; ++k) 495 + for (k = 0; k < num_mb; ++k) 870 496 testmgr_free_buf(data[k].xbuf); 871 497 872 498 crypto_free_ahash(tfm); ··· 1108 734 struct hash_speed *speed) 1109 735 { 1110 736 return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC); 737 + } 738 + 739 + struct test_mb_skcipher_data { 740 + struct scatterlist sg[XBUFSIZE]; 741 + struct skcipher_request *req; 742 + struct crypto_wait wait; 743 + char *xbuf[XBUFSIZE]; 744 + }; 745 + 746 + static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc, 747 + u32 num_mb) 748 + { 749 + int i, rc[num_mb], err = 0; 750 + 751 + /* Fire up a bunch of concurrent requests */ 752 + for (i = 0; i < num_mb; i++) { 753 + if (enc == ENCRYPT) 754 + rc[i] = crypto_skcipher_encrypt(data[i].req); 755 + else 756 + rc[i] = crypto_skcipher_decrypt(data[i].req); 757 + } 758 + 759 + /* Wait for all requests to finish */ 760 + for (i = 0; i < num_mb; i++) { 761 + rc[i] = crypto_wait_req(rc[i], &data[i].wait); 762 + 763 + if (rc[i]) { 764 + pr_info("concurrent request %d error %d\n", i, rc[i]); 765 + err = rc[i]; 766 + } 767 + } 768 + 769 + return err; 770 + } 771 + 772 + static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc, 773 + int blen, int secs, u32 num_mb) 774 + { 775 + unsigned long start, end; 776 + int bcount; 777 + int ret; 778 + 779 + for (start = jiffies, end = start + secs * HZ, bcount = 0; 780 + time_before(jiffies, end); bcount++) { 781 + ret = do_mult_acipher_op(data, enc, num_mb); 782 + if (ret) 783 + return ret; 784 + } 785 + 786 + pr_cont("%d operations in %d seconds (%ld bytes)\n", 787 + bcount * num_mb, secs, (long)bcount * blen * num_mb); 788 + return 0; 789 + } 790 + 791 + static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc, 792 + int blen, u32 num_mb) 793 + { 794 + unsigned long cycles = 0; 795 + int ret = 0; 796 + int i; 797 + 798 + /* Warm-up run. */ 799 + for (i = 0; i < 4; i++) { 800 + ret = do_mult_acipher_op(data, enc, num_mb); 801 + if (ret) 802 + goto out; 803 + } 804 + 805 + /* The real thing. */ 806 + for (i = 0; i < 8; i++) { 807 + cycles_t start, end; 808 + 809 + start = get_cycles(); 810 + ret = do_mult_acipher_op(data, enc, num_mb); 811 + end = get_cycles(); 812 + 813 + if (ret) 814 + goto out; 815 + 816 + cycles += end - start; 817 + } 818 + 819 + out: 820 + if (ret == 0) 821 + pr_cont("1 operation in %lu cycles (%d bytes)\n", 822 + (cycles + 4) / (8 * num_mb), blen); 823 + 824 + return ret; 825 + } 826 + 827 + static void test_mb_skcipher_speed(const char *algo, int enc, int secs, 828 + struct cipher_speed_template *template, 829 + unsigned int tcount, u8 *keysize, u32 num_mb) 830 + { 831 + struct test_mb_skcipher_data *data; 832 + struct crypto_skcipher *tfm; 833 + unsigned int i, j, iv_len; 834 + const char *key; 835 + const char *e; 836 + u32 *b_size; 837 + char iv[128]; 838 + int ret; 839 + 840 + if (enc == ENCRYPT) 841 + e = "encryption"; 842 + else 843 + e = "decryption"; 844 + 845 + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); 846 + if (!data) 847 + return; 848 + 849 + tfm = crypto_alloc_skcipher(algo, 0, 0); 850 + if (IS_ERR(tfm)) { 851 + pr_err("failed to load transform for %s: %ld\n", 852 + algo, PTR_ERR(tfm)); 853 + goto out_free_data; 854 + } 855 + 856 + for (i = 0; i < num_mb; ++i) 857 + if (testmgr_alloc_buf(data[i].xbuf)) { 858 + while (i--) 859 + testmgr_free_buf(data[i].xbuf); 860 + goto out_free_tfm; 861 + } 862 + 863 + 864 + for (i = 0; i < num_mb; ++i) 865 + if (testmgr_alloc_buf(data[i].xbuf)) { 866 + while (i--) 867 + testmgr_free_buf(data[i].xbuf); 868 + goto out_free_tfm; 869 + } 870 + 871 + 872 + for (i = 0; i < num_mb; ++i) { 873 + data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL); 874 + if (!data[i].req) { 875 + pr_err("alg: skcipher: Failed to allocate request for %s\n", 876 + algo); 877 + while (i--) 878 + skcipher_request_free(data[i].req); 879 + goto out_free_xbuf; 880 + } 881 + } 882 + 883 + for (i = 0; i < num_mb; ++i) { 884 + skcipher_request_set_callback(data[i].req, 885 + CRYPTO_TFM_REQ_MAY_BACKLOG, 886 + crypto_req_done, &data[i].wait); 887 + crypto_init_wait(&data[i].wait); 888 + } 889 + 890 + pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo, 891 + get_driver_name(crypto_skcipher, tfm), e); 892 + 893 + i = 0; 894 + do { 895 + b_size = block_sizes; 896 + do { 897 + if (*b_size > XBUFSIZE * PAGE_SIZE) { 898 + pr_err("template (%u) too big for buffer (%lu)\n", 899 + *b_size, XBUFSIZE * PAGE_SIZE); 900 + goto out; 901 + } 902 + 903 + pr_info("test %u (%d bit key, %d byte blocks): ", i, 904 + *keysize * 8, *b_size); 905 + 906 + /* Set up tfm global state, i.e. the key */ 907 + 908 + memset(tvmem[0], 0xff, PAGE_SIZE); 909 + key = tvmem[0]; 910 + for (j = 0; j < tcount; j++) { 911 + if (template[j].klen == *keysize) { 912 + key = template[j].key; 913 + break; 914 + } 915 + } 916 + 917 + crypto_skcipher_clear_flags(tfm, ~0); 918 + 919 + ret = crypto_skcipher_setkey(tfm, key, *keysize); 920 + if (ret) { 921 + pr_err("setkey() failed flags=%x\n", 922 + crypto_skcipher_get_flags(tfm)); 923 + goto out; 924 + } 925 + 926 + iv_len = crypto_skcipher_ivsize(tfm); 927 + if (iv_len) 928 + memset(&iv, 0xff, iv_len); 929 + 930 + /* Now setup per request stuff, i.e. buffers */ 931 + 932 + for (j = 0; j < num_mb; ++j) { 933 + struct test_mb_skcipher_data *cur = &data[j]; 934 + unsigned int k = *b_size; 935 + unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE); 936 + unsigned int p = 0; 937 + 938 + sg_init_table(cur->sg, pages); 939 + 940 + while (k > PAGE_SIZE) { 941 + sg_set_buf(cur->sg + p, cur->xbuf[p], 942 + PAGE_SIZE); 943 + memset(cur->xbuf[p], 0xff, PAGE_SIZE); 944 + p++; 945 + k -= PAGE_SIZE; 946 + } 947 + 948 + sg_set_buf(cur->sg + p, cur->xbuf[p], k); 949 + memset(cur->xbuf[p], 0xff, k); 950 + 951 + skcipher_request_set_crypt(cur->req, cur->sg, 952 + cur->sg, *b_size, 953 + iv); 954 + } 955 + 956 + if (secs) 957 + ret = test_mb_acipher_jiffies(data, enc, 958 + *b_size, secs, 959 + num_mb); 960 + else 961 + ret = test_mb_acipher_cycles(data, enc, 962 + *b_size, num_mb); 963 + 964 + if (ret) { 965 + pr_err("%s() failed flags=%x\n", e, 966 + crypto_skcipher_get_flags(tfm)); 967 + break; 968 + } 969 + b_size++; 970 + i++; 971 + } while (*b_size); 972 + keysize++; 973 + } while (*keysize); 974 + 975 + out: 976 + for (i = 0; i < num_mb; ++i) 977 + skcipher_request_free(data[i].req); 978 + out_free_xbuf: 979 + for (i = 0; i < num_mb; ++i) 980 + testmgr_free_buf(data[i].xbuf); 981 + out_free_tfm: 982 + crypto_free_skcipher(tfm); 983 + out_free_data: 984 + kfree(data); 1111 985 } 1112 986 1113 987 static inline int do_one_acipher_op(struct skcipher_request *req, int ret) ··· 2179 1557 NULL, 0, 16, 16, aead_speed_template_20); 2180 1558 test_aead_speed("gcm(aes)", ENCRYPT, sec, 2181 1559 NULL, 0, 16, 8, speed_template_16_24_32); 1560 + test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, 1561 + NULL, 0, 16, 16, aead_speed_template_20); 1562 + test_aead_speed("gcm(aes)", DECRYPT, sec, 1563 + NULL, 0, 16, 8, speed_template_16_24_32); 2182 1564 break; 2183 1565 2184 1566 case 212: 2185 1567 test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, 1568 + NULL, 0, 16, 16, aead_speed_template_19); 1569 + test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, 2186 1570 NULL, 0, 16, 16, aead_speed_template_19); 2187 1571 break; 2188 1572 2189 1573 case 213: 2190 1574 test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec, 2191 1575 NULL, 0, 16, 8, aead_speed_template_36); 1576 + test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec, 1577 + NULL, 0, 16, 8, aead_speed_template_36); 2192 1578 break; 2193 1579 2194 1580 case 214: 2195 1581 test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0, 2196 1582 speed_template_32); 1583 + break; 1584 + 1585 + case 215: 1586 + test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL, 1587 + 0, 16, 16, aead_speed_template_20, num_mb); 1588 + test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8, 1589 + speed_template_16_24_32, num_mb); 1590 + test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL, 1591 + 0, 16, 16, aead_speed_template_20, num_mb); 1592 + test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8, 1593 + speed_template_16_24_32, num_mb); 1594 + break; 1595 + 1596 + case 216: 1597 + test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0, 1598 + 16, 16, aead_speed_template_19, num_mb); 1599 + test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0, 1600 + 16, 16, aead_speed_template_19, num_mb); 1601 + break; 1602 + 1603 + case 217: 1604 + test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, 1605 + sec, NULL, 0, 16, 8, aead_speed_template_36, 1606 + num_mb); 1607 + test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, 1608 + sec, NULL, 0, 16, 8, aead_speed_template_36, 1609 + num_mb); 2197 1610 break; 2198 1611 2199 1612 case 300: ··· 2435 1778 if (mode > 400 && mode < 500) break; 2436 1779 /* fall through */ 2437 1780 case 422: 2438 - test_mb_ahash_speed("sha1", sec, generic_hash_speed_template); 1781 + test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, 1782 + num_mb); 2439 1783 if (mode > 400 && mode < 500) break; 2440 1784 /* fall through */ 2441 1785 case 423: 2442 - test_mb_ahash_speed("sha256", sec, generic_hash_speed_template); 1786 + test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, 1787 + num_mb); 2443 1788 if (mode > 400 && mode < 500) break; 2444 1789 /* fall through */ 2445 1790 case 424: 2446 - test_mb_ahash_speed("sha512", sec, generic_hash_speed_template); 1791 + test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, 1792 + num_mb); 2447 1793 if (mode > 400 && mode < 500) break; 2448 1794 /* fall through */ 2449 1795 case 425: 2450 - test_mb_ahash_speed("sm3", sec, generic_hash_speed_template); 1796 + test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, 1797 + num_mb); 2451 1798 if (mode > 400 && mode < 500) break; 2452 1799 /* fall through */ 2453 1800 case 499: ··· 2669 2008 speed_template_8_32); 2670 2009 break; 2671 2010 2011 + case 600: 2012 + test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, 2013 + speed_template_16_24_32, num_mb); 2014 + test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, 2015 + speed_template_16_24_32, num_mb); 2016 + test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0, 2017 + speed_template_16_24_32, num_mb); 2018 + test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0, 2019 + speed_template_16_24_32, num_mb); 2020 + test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0, 2021 + speed_template_32_40_48, num_mb); 2022 + test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0, 2023 + speed_template_32_40_48, num_mb); 2024 + test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0, 2025 + speed_template_32_64, num_mb); 2026 + test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0, 2027 + speed_template_32_64, num_mb); 2028 + test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0, 2029 + speed_template_16_24_32, num_mb); 2030 + test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0, 2031 + speed_template_16_24_32, num_mb); 2032 + test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0, 2033 + speed_template_16_24_32, num_mb); 2034 + test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0, 2035 + speed_template_16_24_32, num_mb); 2036 + test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0, 2037 + speed_template_16_24_32, num_mb); 2038 + test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0, 2039 + speed_template_16_24_32, num_mb); 2040 + test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0, 2041 + speed_template_16_24_32, num_mb); 2042 + test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0, 2043 + speed_template_16_24_32, num_mb); 2044 + test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 2045 + 0, speed_template_20_28_36, num_mb); 2046 + test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 2047 + 0, speed_template_20_28_36, num_mb); 2048 + break; 2049 + 2050 + case 601: 2051 + test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec, 2052 + des3_speed_template, DES3_SPEED_VECTORS, 2053 + speed_template_24, num_mb); 2054 + test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec, 2055 + des3_speed_template, DES3_SPEED_VECTORS, 2056 + speed_template_24, num_mb); 2057 + test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec, 2058 + des3_speed_template, DES3_SPEED_VECTORS, 2059 + speed_template_24, num_mb); 2060 + test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec, 2061 + des3_speed_template, DES3_SPEED_VECTORS, 2062 + speed_template_24, num_mb); 2063 + test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec, 2064 + des3_speed_template, DES3_SPEED_VECTORS, 2065 + speed_template_24, num_mb); 2066 + test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec, 2067 + des3_speed_template, DES3_SPEED_VECTORS, 2068 + speed_template_24, num_mb); 2069 + test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec, 2070 + des3_speed_template, DES3_SPEED_VECTORS, 2071 + speed_template_24, num_mb); 2072 + test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec, 2073 + des3_speed_template, DES3_SPEED_VECTORS, 2074 + speed_template_24, num_mb); 2075 + break; 2076 + 2077 + case 602: 2078 + test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0, 2079 + speed_template_8, num_mb); 2080 + test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0, 2081 + speed_template_8, num_mb); 2082 + test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0, 2083 + speed_template_8, num_mb); 2084 + test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, 2085 + speed_template_8, num_mb); 2086 + test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0, 2087 + speed_template_8, num_mb); 2088 + test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0, 2089 + speed_template_8, num_mb); 2090 + test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0, 2091 + speed_template_8, num_mb); 2092 + test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0, 2093 + speed_template_8, num_mb); 2094 + break; 2095 + 2096 + case 603: 2097 + test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0, 2098 + speed_template_16_32, num_mb); 2099 + test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0, 2100 + speed_template_16_32, num_mb); 2101 + test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0, 2102 + speed_template_16_32, num_mb); 2103 + test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0, 2104 + speed_template_16_32, num_mb); 2105 + test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0, 2106 + speed_template_16_32, num_mb); 2107 + test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0, 2108 + speed_template_16_32, num_mb); 2109 + test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0, 2110 + speed_template_32_48, num_mb); 2111 + test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0, 2112 + speed_template_32_48, num_mb); 2113 + test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0, 2114 + speed_template_32_64, num_mb); 2115 + test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0, 2116 + speed_template_32_64, num_mb); 2117 + break; 2118 + 2119 + case 604: 2120 + test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0, 2121 + speed_template_16_24_32, num_mb); 2122 + test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0, 2123 + speed_template_16_24_32, num_mb); 2124 + test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0, 2125 + speed_template_16_24_32, num_mb); 2126 + test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, 2127 + speed_template_16_24_32, num_mb); 2128 + test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0, 2129 + speed_template_16_24_32, num_mb); 2130 + test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0, 2131 + speed_template_16_24_32, num_mb); 2132 + test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0, 2133 + speed_template_32_40_48, num_mb); 2134 + test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0, 2135 + speed_template_32_40_48, num_mb); 2136 + test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0, 2137 + speed_template_32_48_64, num_mb); 2138 + test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0, 2139 + speed_template_32_48_64, num_mb); 2140 + break; 2141 + 2142 + case 605: 2143 + test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, 2144 + speed_template_8, num_mb); 2145 + break; 2146 + 2147 + case 606: 2148 + test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0, 2149 + speed_template_8_16, num_mb); 2150 + test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0, 2151 + speed_template_8_16, num_mb); 2152 + test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0, 2153 + speed_template_8_16, num_mb); 2154 + test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0, 2155 + speed_template_8_16, num_mb); 2156 + test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0, 2157 + speed_template_8_16, num_mb); 2158 + test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0, 2159 + speed_template_8_16, num_mb); 2160 + break; 2161 + 2162 + case 607: 2163 + test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0, 2164 + speed_template_16_32, num_mb); 2165 + test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0, 2166 + speed_template_16_32, num_mb); 2167 + test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0, 2168 + speed_template_16_32, num_mb); 2169 + test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0, 2170 + speed_template_16_32, num_mb); 2171 + test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0, 2172 + speed_template_16_32, num_mb); 2173 + test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0, 2174 + speed_template_16_32, num_mb); 2175 + test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0, 2176 + speed_template_32_48, num_mb); 2177 + test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0, 2178 + speed_template_32_48, num_mb); 2179 + test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0, 2180 + speed_template_32_64, num_mb); 2181 + test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0, 2182 + speed_template_32_64, num_mb); 2183 + break; 2184 + 2185 + case 608: 2186 + test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0, 2187 + speed_template_16_32, num_mb); 2188 + test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0, 2189 + speed_template_16_32, num_mb); 2190 + test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0, 2191 + speed_template_16_32, num_mb); 2192 + test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0, 2193 + speed_template_16_32, num_mb); 2194 + test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0, 2195 + speed_template_16_32, num_mb); 2196 + test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0, 2197 + speed_template_16_32, num_mb); 2198 + test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0, 2199 + speed_template_32_48, num_mb); 2200 + test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0, 2201 + speed_template_32_48, num_mb); 2202 + test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0, 2203 + speed_template_32_64, num_mb); 2204 + test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0, 2205 + speed_template_32_64, num_mb); 2206 + break; 2207 + 2208 + case 609: 2209 + test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0, 2210 + speed_template_8_32, num_mb); 2211 + test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0, 2212 + speed_template_8_32, num_mb); 2213 + test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0, 2214 + speed_template_8_32, num_mb); 2215 + test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, 2216 + speed_template_8_32, num_mb); 2217 + test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0, 2218 + speed_template_8_32, num_mb); 2219 + test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0, 2220 + speed_template_8_32, num_mb); 2221 + break; 2222 + 2672 2223 case 1000: 2673 2224 test_available(); 2674 2225 break; ··· 2942 2069 module_param(sec, uint, 0); 2943 2070 MODULE_PARM_DESC(sec, "Length in seconds of speed tests " 2944 2071 "(defaults to zero which uses CPU cycles instead)"); 2072 + module_param(num_mb, uint, 0000); 2073 + MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)"); 2945 2074 2946 2075 MODULE_LICENSE("GPL"); 2947 2076 MODULE_DESCRIPTION("Quick & dirty crypto testing module");
+40 -1
crypto/testmgr.c
··· 177 177 free_page((unsigned long)buf[i]); 178 178 } 179 179 180 + static int ahash_guard_result(char *result, char c, int size) 181 + { 182 + int i; 183 + 184 + for (i = 0; i < size; i++) { 185 + if (result[i] != c) 186 + return -EINVAL; 187 + } 188 + 189 + return 0; 190 + } 191 + 180 192 static int ahash_partial_update(struct ahash_request **preq, 181 193 struct crypto_ahash *tfm, const struct hash_testvec *template, 182 194 void *hash_buff, int k, int temp, struct scatterlist *sg, ··· 197 185 char *state; 198 186 struct ahash_request *req; 199 187 int statesize, ret = -EINVAL; 200 - const char guard[] = { 0x00, 0xba, 0xad, 0x00 }; 188 + static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 }; 189 + int digestsize = crypto_ahash_digestsize(tfm); 201 190 202 191 req = *preq; 203 192 statesize = crypto_ahash_statesize( ··· 209 196 goto out_nostate; 210 197 } 211 198 memcpy(state + statesize, guard, sizeof(guard)); 199 + memset(result, 1, digestsize); 212 200 ret = crypto_ahash_export(req, state); 213 201 WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); 214 202 if (ret) { 215 203 pr_err("alg: hash: Failed to export() for %s\n", algo); 204 + goto out; 205 + } 206 + ret = ahash_guard_result(result, 1, digestsize); 207 + if (ret) { 208 + pr_err("alg: hash: Failed, export used req->result for %s\n", 209 + algo); 216 210 goto out; 217 211 } 218 212 ahash_request_free(req); ··· 239 219 ret = crypto_ahash_import(req, state); 240 220 if (ret) { 241 221 pr_err("alg: hash: Failed to import() for %s\n", algo); 222 + goto out; 223 + } 224 + ret = ahash_guard_result(result, 1, digestsize); 225 + if (ret) { 226 + pr_err("alg: hash: Failed, import used req->result for %s\n", 227 + algo); 242 228 goto out; 243 229 } 244 230 ret = crypto_wait_req(crypto_ahash_update(req), wait); ··· 342 316 goto out; 343 317 } 344 318 } else { 319 + memset(result, 1, digest_size); 345 320 ret = crypto_wait_req(crypto_ahash_init(req), &wait); 346 321 if (ret) { 347 322 pr_err("alg: hash: init failed on test %d " 348 323 "for %s: ret=%d\n", j, algo, -ret); 349 324 goto out; 350 325 } 326 + ret = ahash_guard_result(result, 1, digest_size); 327 + if (ret) { 328 + pr_err("alg: hash: init failed on test %d " 329 + "for %s: used req->result\n", j, algo); 330 + goto out; 331 + } 351 332 ret = crypto_wait_req(crypto_ahash_update(req), &wait); 352 333 if (ret) { 353 334 pr_err("alg: hash: update failed on test %d " 354 335 "for %s: ret=%d\n", j, algo, -ret); 336 + goto out; 337 + } 338 + ret = ahash_guard_result(result, 1, digest_size); 339 + if (ret) { 340 + pr_err("alg: hash: update failed on test %d " 341 + "for %s: used req->result\n", j, algo); 355 342 goto out; 356 343 } 357 344 ret = crypto_wait_req(crypto_ahash_final(req), &wait);
+550
crypto/testmgr.h
··· 1052 1052 "\xc9\xfd\x55\x74\x49\x44\x79\xba" 1053 1053 "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" 1054 1054 "\xd0\xfc\xce\x33", 1055 + .np = 2, 1056 + .tap = { 28, 28 }, 1057 + }, { 1058 + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" 1059 + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" 1060 + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" 1061 + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" 1062 + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" 1063 + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" 1064 + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" 1065 + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" 1066 + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" 1067 + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" 1068 + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" 1069 + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" 1070 + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" 1071 + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" 1072 + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" 1073 + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" 1074 + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" 1075 + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" 1076 + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" 1077 + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" 1078 + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" 1079 + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" 1080 + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" 1081 + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" 1082 + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" 1083 + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" 1084 + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" 1085 + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" 1086 + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" 1087 + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" 1088 + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" 1089 + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" 1090 + "\x53\xea\x81\x18\x8c\x23\xba\x2e" 1091 + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" 1092 + "\x37\xce\x42\xd9\x70\x07\x7b\x12" 1093 + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" 1094 + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" 1095 + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" 1096 + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" 1097 + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" 1098 + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" 1099 + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" 1100 + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" 1101 + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" 1102 + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" 1103 + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" 1104 + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" 1105 + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" 1106 + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" 1107 + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" 1108 + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" 1109 + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" 1110 + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" 1111 + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" 1112 + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" 1113 + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" 1114 + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" 1115 + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" 1116 + "\x38\xcf\x43\xda\x71\x08\x7c\x13" 1117 + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" 1118 + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" 1119 + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" 1120 + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" 1121 + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" 1122 + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" 1123 + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" 1124 + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" 1125 + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" 1126 + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" 1127 + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" 1128 + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" 1129 + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" 1130 + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" 1131 + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" 1132 + "\xef\x63\xfa\x91\x05\x9c\x33\xca" 1133 + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" 1134 + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" 1135 + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" 1136 + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" 1137 + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" 1138 + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" 1139 + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" 1140 + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" 1141 + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" 1142 + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" 1143 + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" 1144 + "\xde\x75\x0c\x80\x17\xae\x22\xb9" 1145 + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" 1146 + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" 1147 + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" 1148 + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" 1149 + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" 1150 + "\x67\xfe\x72\x09\xa0\x14\xab\x42" 1151 + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" 1152 + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" 1153 + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" 1154 + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" 1155 + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" 1156 + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" 1157 + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" 1158 + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" 1159 + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" 1160 + "\x95\x09\xa0\x37\xce\x42\xd9\x70" 1161 + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" 1162 + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" 1163 + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" 1164 + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" 1165 + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" 1166 + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" 1167 + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" 1168 + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" 1169 + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" 1170 + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" 1171 + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" 1172 + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" 1173 + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" 1174 + "\x68\xff\x73\x0a\xa1\x15\xac\x43" 1175 + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" 1176 + "\x29\xc0\x57\xee\x62\xf9\x90\x04" 1177 + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" 1178 + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" 1179 + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" 1180 + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" 1181 + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" 1182 + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" 1183 + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" 1184 + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" 1185 + "\x08\x7c\x13\xaa\x1e\xb5\x4c", 1186 + .psize = 1023, 1187 + .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26" 1188 + "\xc3\x88\x20\x71\x15\x06\xe8\x2d" 1189 + "\xa3\x92\x44\xab\x3e\xe7\xff\x86" 1190 + "\xb6\x79\x10\x72", 1055 1191 }, 1056 1192 }; 1057 1193 ··· 1213 1077 "\x49\x10\x03\x76\xa8\x23\x5e\x2c" 1214 1078 "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" 1215 1079 "\xdb\x32\xdd\x97\x49\x6d\x33\x76", 1080 + .np = 2, 1081 + .tap = { 28, 28 }, 1082 + }, { 1083 + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" 1084 + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" 1085 + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" 1086 + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" 1087 + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" 1088 + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" 1089 + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" 1090 + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" 1091 + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" 1092 + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" 1093 + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" 1094 + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" 1095 + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" 1096 + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" 1097 + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" 1098 + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" 1099 + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" 1100 + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" 1101 + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" 1102 + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" 1103 + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" 1104 + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" 1105 + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" 1106 + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" 1107 + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" 1108 + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" 1109 + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" 1110 + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" 1111 + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" 1112 + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" 1113 + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" 1114 + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" 1115 + "\x53\xea\x81\x18\x8c\x23\xba\x2e" 1116 + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" 1117 + "\x37\xce\x42\xd9\x70\x07\x7b\x12" 1118 + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" 1119 + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" 1120 + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" 1121 + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" 1122 + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" 1123 + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" 1124 + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" 1125 + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" 1126 + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" 1127 + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" 1128 + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" 1129 + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" 1130 + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" 1131 + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" 1132 + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" 1133 + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" 1134 + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" 1135 + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" 1136 + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" 1137 + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" 1138 + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" 1139 + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" 1140 + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" 1141 + "\x38\xcf\x43\xda\x71\x08\x7c\x13" 1142 + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" 1143 + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" 1144 + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" 1145 + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" 1146 + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" 1147 + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" 1148 + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" 1149 + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" 1150 + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" 1151 + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" 1152 + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" 1153 + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" 1154 + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" 1155 + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" 1156 + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" 1157 + "\xef\x63\xfa\x91\x05\x9c\x33\xca" 1158 + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" 1159 + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" 1160 + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" 1161 + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" 1162 + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" 1163 + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" 1164 + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" 1165 + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" 1166 + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" 1167 + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" 1168 + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" 1169 + "\xde\x75\x0c\x80\x17\xae\x22\xb9" 1170 + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" 1171 + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" 1172 + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" 1173 + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" 1174 + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" 1175 + "\x67\xfe\x72\x09\xa0\x14\xab\x42" 1176 + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" 1177 + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" 1178 + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" 1179 + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" 1180 + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" 1181 + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" 1182 + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" 1183 + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" 1184 + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" 1185 + "\x95\x09\xa0\x37\xce\x42\xd9\x70" 1186 + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" 1187 + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" 1188 + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" 1189 + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" 1190 + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" 1191 + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" 1192 + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" 1193 + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" 1194 + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" 1195 + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" 1196 + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" 1197 + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" 1198 + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" 1199 + "\x68\xff\x73\x0a\xa1\x15\xac\x43" 1200 + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" 1201 + "\x29\xc0\x57\xee\x62\xf9\x90\x04" 1202 + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" 1203 + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" 1204 + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" 1205 + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" 1206 + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" 1207 + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" 1208 + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" 1209 + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" 1210 + "\x08\x7c\x13\xaa\x1e\xb5\x4c", 1211 + .psize = 1023, 1212 + .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71" 1213 + "\xf7\xfa\x80\xf5\xea\x11\x03\xb1" 1214 + "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7" 1215 + "\x8a\x97\xbb\xf2\x07\x08\x38\x30", 1216 1216 }, 1217 1217 }; 1218 1218 ··· 1381 1109 "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" 1382 1110 "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" 1383 1111 "\x9e\xef\x51\xac\xd0\x65\x7c\x22", 1112 + .np = 2, 1113 + .tap = { 28, 28 }, 1114 + }, { 1115 + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" 1116 + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" 1117 + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" 1118 + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" 1119 + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" 1120 + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" 1121 + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" 1122 + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" 1123 + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" 1124 + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" 1125 + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" 1126 + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" 1127 + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" 1128 + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" 1129 + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" 1130 + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" 1131 + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" 1132 + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" 1133 + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" 1134 + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" 1135 + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" 1136 + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" 1137 + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" 1138 + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" 1139 + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" 1140 + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" 1141 + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" 1142 + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" 1143 + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" 1144 + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" 1145 + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" 1146 + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" 1147 + "\x53\xea\x81\x18\x8c\x23\xba\x2e" 1148 + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" 1149 + "\x37\xce\x42\xd9\x70\x07\x7b\x12" 1150 + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" 1151 + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" 1152 + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" 1153 + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" 1154 + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" 1155 + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" 1156 + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" 1157 + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" 1158 + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" 1159 + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" 1160 + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" 1161 + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" 1162 + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" 1163 + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" 1164 + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" 1165 + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" 1166 + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" 1167 + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" 1168 + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" 1169 + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" 1170 + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" 1171 + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" 1172 + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" 1173 + "\x38\xcf\x43\xda\x71\x08\x7c\x13" 1174 + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" 1175 + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" 1176 + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" 1177 + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" 1178 + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" 1179 + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" 1180 + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" 1181 + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" 1182 + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" 1183 + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" 1184 + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" 1185 + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" 1186 + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" 1187 + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" 1188 + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" 1189 + "\xef\x63\xfa\x91\x05\x9c\x33\xca" 1190 + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" 1191 + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" 1192 + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" 1193 + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" 1194 + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" 1195 + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" 1196 + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" 1197 + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" 1198 + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" 1199 + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" 1200 + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" 1201 + "\xde\x75\x0c\x80\x17\xae\x22\xb9" 1202 + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" 1203 + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" 1204 + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" 1205 + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" 1206 + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" 1207 + "\x67\xfe\x72\x09\xa0\x14\xab\x42" 1208 + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" 1209 + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" 1210 + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" 1211 + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" 1212 + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" 1213 + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" 1214 + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" 1215 + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" 1216 + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" 1217 + "\x95\x09\xa0\x37\xce\x42\xd9\x70" 1218 + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" 1219 + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" 1220 + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" 1221 + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" 1222 + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" 1223 + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" 1224 + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" 1225 + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" 1226 + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" 1227 + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" 1228 + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" 1229 + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" 1230 + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" 1231 + "\x68\xff\x73\x0a\xa1\x15\xac\x43" 1232 + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" 1233 + "\x29\xc0\x57\xee\x62\xf9\x90\x04" 1234 + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" 1235 + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" 1236 + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" 1237 + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" 1238 + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" 1239 + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" 1240 + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" 1241 + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" 1242 + "\x08\x7c\x13\xaa\x1e\xb5\x4c", 1243 + .psize = 1023, 1244 + .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71" 1245 + "\xcf\xca\x30\x85\x9b\xc1\x25\xc7" 1246 + "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b" 1247 + "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51" 1248 + "\xd2\x21\xae\x2d\xc7\xae\x8c\x40" 1249 + "\xb9\xe6\x56\x48\x03\xcd\x88\x6b", 1384 1250 }, 1385 1251 }; 1386 1252 ··· 1557 1147 "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" 1558 1148 "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" 1559 1149 "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", 1150 + .np = 2, 1151 + .tap = { 28, 28 }, 1152 + }, { 1153 + .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" 1154 + "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" 1155 + "\xec\x60\xf7\x8e\x02\x99\x30\xc7" 1156 + "\x3b\xd2\x69\x00\x74\x0b\xa2\x16" 1157 + "\xad\x44\xdb\x4f\xe6\x7d\x14\x88" 1158 + "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa" 1159 + "\x91\x05\x9c\x33\xca\x3e\xd5\x6c" 1160 + "\x03\x77\x0e\xa5\x19\xb0\x47\xde" 1161 + "\x52\xe9\x80\x17\x8b\x22\xb9\x2d" 1162 + "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f" 1163 + "\x36\xcd\x41\xd8\x6f\x06\x7a\x11" 1164 + "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83" 1165 + "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5" 1166 + "\x69\x00\x97\x0b\xa2\x39\xd0\x44" 1167 + "\xdb\x72\x09\x7d\x14\xab\x1f\xb6" 1168 + "\x4d\xe4\x58\xef\x86\x1d\x91\x28" 1169 + "\xbf\x33\xca\x61\xf8\x6c\x03\x9a" 1170 + "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c" 1171 + "\x80\x17\xae\x22\xb9\x50\xe7\x5b" 1172 + "\xf2\x89\x20\x94\x2b\xc2\x36\xcd" 1173 + "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f" 1174 + "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1" 1175 + "\x25\xbc\x53\xea\x5e\xf5\x8c\x00" 1176 + "\x97\x2e\xc5\x39\xd0\x67\xfe\x72" 1177 + "\x09\xa0\x14\xab\x42\xd9\x4d\xe4" 1178 + "\x7b\x12\x86\x1d\xb4\x28\xbf\x56" 1179 + "\xed\x61\xf8\x8f\x03\x9a\x31\xc8" 1180 + "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17" 1181 + "\xae\x45\xdc\x50\xe7\x7e\x15\x89" 1182 + "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb" 1183 + "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d" 1184 + "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf" 1185 + "\x53\xea\x81\x18\x8c\x23\xba\x2e" 1186 + "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0" 1187 + "\x37\xce\x42\xd9\x70\x07\x7b\x12" 1188 + "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84" 1189 + "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6" 1190 + "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45" 1191 + "\xdc\x73\x0a\x7e\x15\xac\x20\xb7" 1192 + "\x4e\xe5\x59\xf0\x87\x1e\x92\x29" 1193 + "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b" 1194 + "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d" 1195 + "\x81\x18\xaf\x23\xba\x51\xe8\x5c" 1196 + "\xf3\x8a\x21\x95\x2c\xc3\x37\xce" 1197 + "\x65\xfc\x70\x07\x9e\x12\xa9\x40" 1198 + "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2" 1199 + "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01" 1200 + "\x98\x2f\xc6\x3a\xd1\x68\xff\x73" 1201 + "\x0a\xa1\x15\xac\x43\xda\x4e\xe5" 1202 + "\x7c\x13\x87\x1e\xb5\x29\xc0\x57" 1203 + "\xee\x62\xf9\x90\x04\x9b\x32\xc9" 1204 + "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18" 1205 + "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a" 1206 + "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc" 1207 + "\x93\x07\x9e\x35\xcc\x40\xd7\x6e" 1208 + "\x05\x79\x10\xa7\x1b\xb2\x49\xe0" 1209 + "\x54\xeb\x82\x19\x8d\x24\xbb\x2f" 1210 + "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1" 1211 + "\x38\xcf\x43\xda\x71\x08\x7c\x13" 1212 + "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85" 1213 + "\x1c\x90\x27\xbe\x32\xc9\x60\xf7" 1214 + "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46" 1215 + "\xdd\x74\x0b\x7f\x16\xad\x21\xb8" 1216 + "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a" 1217 + "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c" 1218 + "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e" 1219 + "\x82\x19\xb0\x24\xbb\x52\xe9\x5d" 1220 + "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf" 1221 + "\x66\xfd\x71\x08\x9f\x13\xaa\x41" 1222 + "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3" 1223 + "\x27\xbe\x55\xec\x60\xf7\x8e\x02" 1224 + "\x99\x30\xc7\x3b\xd2\x69\x00\x74" 1225 + "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6" 1226 + "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58" 1227 + "\xef\x63\xfa\x91\x05\x9c\x33\xca" 1228 + "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19" 1229 + "\xb0\x47\xde\x52\xe9\x80\x17\x8b" 1230 + "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd" 1231 + "\x94\x08\x9f\x36\xcd\x41\xd8\x6f" 1232 + "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1" 1233 + "\x55\xec\x83\x1a\x8e\x25\xbc\x30" 1234 + "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2" 1235 + "\x39\xd0\x44\xdb\x72\x09\x7d\x14" 1236 + "\xab\x1f\xb6\x4d\xe4\x58\xef\x86" 1237 + "\x1d\x91\x28\xbf\x33\xca\x61\xf8" 1238 + "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47" 1239 + "\xde\x75\x0c\x80\x17\xae\x22\xb9" 1240 + "\x50\xe7\x5b\xf2\x89\x20\x94\x2b" 1241 + "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d" 1242 + "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f" 1243 + "\x83\x1a\xb1\x25\xbc\x53\xea\x5e" 1244 + "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0" 1245 + "\x67\xfe\x72\x09\xa0\x14\xab\x42" 1246 + "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4" 1247 + "\x28\xbf\x56\xed\x61\xf8\x8f\x03" 1248 + "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75" 1249 + "\x0c\xa3\x17\xae\x45\xdc\x50\xe7" 1250 + "\x7e\x15\x89\x20\xb7\x2b\xc2\x59" 1251 + "\xf0\x64\xfb\x92\x06\x9d\x34\xcb" 1252 + "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a" 1253 + "\xb1\x48\xdf\x53\xea\x81\x18\x8c" 1254 + "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe" 1255 + "\x95\x09\xa0\x37\xce\x42\xd9\x70" 1256 + "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2" 1257 + "\x56\xed\x84\x1b\x8f\x26\xbd\x31" 1258 + "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3" 1259 + "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15" 1260 + "\xac\x20\xb7\x4e\xe5\x59\xf0\x87" 1261 + "\x1e\x92\x29\xc0\x34\xcb\x62\xf9" 1262 + "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48" 1263 + "\xdf\x76\x0d\x81\x18\xaf\x23\xba" 1264 + "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c" 1265 + "\xc3\x37\xce\x65\xfc\x70\x07\x9e" 1266 + "\x12\xa9\x40\xd7\x4b\xe2\x79\x10" 1267 + "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f" 1268 + "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1" 1269 + "\x68\xff\x73\x0a\xa1\x15\xac\x43" 1270 + "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5" 1271 + "\x29\xc0\x57\xee\x62\xf9\x90\x04" 1272 + "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76" 1273 + "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8" 1274 + "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a" 1275 + "\xf1\x65\xfc\x93\x07\x9e\x35\xcc" 1276 + "\x40\xd7\x6e\x05\x79\x10\xa7\x1b" 1277 + "\xb2\x49\xe0\x54\xeb\x82\x19\x8d" 1278 + "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff" 1279 + "\x96\x0a\xa1\x38\xcf\x43\xda\x71" 1280 + "\x08\x7c\x13\xaa\x1e\xb5\x4c", 1281 + .psize = 1023, 1282 + .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde" 1283 + "\xf0\xc6\x42\x17\xd7\xb2\x26\x47" 1284 + "\x90\x28\xa6\x84\xe8\x49\x7a\x86" 1285 + "\xd6\xb8\x9e\xf8\x07\x59\x21\x03" 1286 + "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0" 1287 + "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b" 1288 + "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41" 1289 + "\x32\xc2\x8e\x50\x91\x86\x90\xfb", 1560 1290 }, 1561 1291 }; 1562 1292
+2 -3
crypto/twofish_common.c
··· 24 24 * GNU General Public License for more details. 25 25 * 26 26 * You should have received a copy of the GNU General Public License 27 - * along with this program; if not, write to the Free Software 28 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 29 - * USA 27 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 28 + * 30 29 * 31 30 * This code is a "clean room" implementation, written from the paper 32 31 * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
+2 -3
crypto/twofish_generic.c
··· 23 23 * GNU General Public License for more details. 24 24 * 25 25 * You should have received a copy of the GNU General Public License 26 - * along with this program; if not, write to the Free Software 27 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 28 - * USA 26 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 27 + * 29 28 * 30 29 * This code is a "clean room" implementation, written from the paper 31 30 * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
+1 -2
crypto/xcbc.c
··· 12 12 * GNU General Public License for more details. 13 13 * 14 14 * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 16 * 18 17 * Author: 19 18 * Kazunori Miyazawa <miyazawa@linux-ipv6.org>
+16 -16
drivers/char/hw_random/Kconfig
··· 73 73 74 74 If unsure, say Y. 75 75 76 - config HW_RANDOM_BCM63XX 77 - tristate "Broadcom BCM63xx Random Number Generator support" 78 - depends on BCM63XX || BMIPS_GENERIC 79 - default HW_RANDOM 80 - ---help--- 81 - This driver provides kernel-side support for the Random Number 82 - Generator hardware found on the Broadcom BCM63xx SoCs. 83 - 84 - To compile this driver as a module, choose M here: the 85 - module will be called bcm63xx-rng 86 - 87 - If unusure, say Y. 88 - 89 76 config HW_RANDOM_BCM2835 90 - tristate "Broadcom BCM2835 Random Number Generator support" 91 - depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X 77 + tristate "Broadcom BCM2835/BCM63xx Random Number Generator support" 78 + depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \ 79 + ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC 92 80 default HW_RANDOM 93 81 ---help--- 94 82 This driver provides kernel-side support for the Random Number 95 - Generator hardware found on the Broadcom BCM2835 SoCs. 83 + Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs. 96 84 97 85 To compile this driver as a module, choose M here: the 98 86 module will be called bcm2835-rng ··· 424 436 425 437 If unsure, say Y. 426 438 439 + config HW_RANDOM_EXYNOS 440 + tristate "Samsung Exynos True Random Number Generator support" 441 + depends on ARCH_EXYNOS || COMPILE_TEST 442 + default HW_RANDOM 443 + ---help--- 444 + This driver provides support for the True Random Number 445 + Generator available in Exynos SoCs. 446 + 447 + To compile this driver as a module, choose M here: the module 448 + will be called exynos-trng. 449 + 450 + If unsure, say Y. 427 451 endif # HW_RANDOM 428 452 429 453 config UML_RANDOM
+1 -1
drivers/char/hw_random/Makefile
··· 9 9 obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o 10 10 obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o 11 11 obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o 12 - obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o 13 12 obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o 14 13 obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o 15 14 n2-rng-y := n2-drv.o n2-asm.o 16 15 obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o 16 + obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o 17 17 obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o 18 18 obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o 19 19 obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
+118 -53
drivers/char/hw_random/bcm2835-rng.c
··· 15 15 #include <linux/of_platform.h> 16 16 #include <linux/platform_device.h> 17 17 #include <linux/printk.h> 18 + #include <linux/clk.h> 18 19 19 20 #define RNG_CTRL 0x0 20 21 #define RNG_STATUS 0x4 ··· 30 29 31 30 #define RNG_INT_OFF 0x1 32 31 33 - static void __init nsp_rng_init(void __iomem *base) 34 - { 35 - u32 val; 32 + struct bcm2835_rng_priv { 33 + struct hwrng rng; 34 + void __iomem *base; 35 + bool mask_interrupts; 36 + struct clk *clk; 37 + }; 36 38 37 - /* mask the interrupt */ 38 - val = readl(base + RNG_INT_MASK); 39 - val |= RNG_INT_OFF; 40 - writel(val, base + RNG_INT_MASK); 39 + static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) 40 + { 41 + return container_of(rng, struct bcm2835_rng_priv, rng); 42 + } 43 + 44 + static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset) 45 + { 46 + /* MIPS chips strapped for BE will automagically configure the 47 + * peripheral registers for CPU-native byte order. 48 + */ 49 + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 50 + return __raw_readl(priv->base + offset); 51 + else 52 + return readl(priv->base + offset); 53 + } 54 + 55 + static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val, 56 + u32 offset) 57 + { 58 + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 59 + __raw_writel(val, priv->base + offset); 60 + else 61 + writel(val, priv->base + offset); 41 62 } 42 63 43 64 static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, 44 65 bool wait) 45 66 { 46 - void __iomem *rng_base = (void __iomem *)rng->priv; 67 + struct bcm2835_rng_priv *priv = to_rng_priv(rng); 47 68 u32 max_words = max / sizeof(u32); 48 69 u32 num_words, count; 49 70 50 - while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) { 71 + while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { 51 72 if (!wait) 52 73 return 0; 53 74 cpu_relax(); 54 75 } 55 76 56 - num_words = readl(rng_base + RNG_STATUS) >> 24; 77 + num_words = rng_readl(priv, RNG_STATUS) >> 24; 57 78 if (num_words > max_words) 58 79 num_words = max_words; 59 80 60 81 for (count = 0; count < num_words; count++) 61 - ((u32 *)buf)[count] = readl(rng_base + RNG_DATA); 82 + ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA); 62 83 63 84 return num_words * sizeof(u32); 64 85 } 65 86 66 - static struct hwrng bcm2835_rng_ops = { 67 - .name = "bcm2835", 68 - .read = bcm2835_rng_read, 87 + static int bcm2835_rng_init(struct hwrng *rng) 88 + { 89 + struct bcm2835_rng_priv *priv = to_rng_priv(rng); 90 + int ret = 0; 91 + u32 val; 92 + 93 + if (!IS_ERR(priv->clk)) { 94 + ret = clk_prepare_enable(priv->clk); 95 + if (ret) 96 + return ret; 97 + } 98 + 99 + if (priv->mask_interrupts) { 100 + /* mask the interrupt */ 101 + val = rng_readl(priv, RNG_INT_MASK); 102 + val |= RNG_INT_OFF; 103 + rng_writel(priv, val, RNG_INT_MASK); 104 + } 105 + 106 + /* set warm-up count & enable */ 107 + rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS); 108 + rng_writel(priv, RNG_RBGEN, RNG_CTRL); 109 + 110 + return ret; 111 + } 112 + 113 + static void bcm2835_rng_cleanup(struct hwrng *rng) 114 + { 115 + struct bcm2835_rng_priv *priv = to_rng_priv(rng); 116 + 117 + /* disable rng hardware */ 118 + rng_writel(priv, 0, RNG_CTRL); 119 + 120 + if (!IS_ERR(priv->clk)) 121 + clk_disable_unprepare(priv->clk); 122 + } 123 + 124 + struct bcm2835_rng_of_data { 125 + bool mask_interrupts; 126 + }; 127 + 128 + static const struct bcm2835_rng_of_data nsp_rng_of_data = { 129 + .mask_interrupts = true, 69 130 }; 70 131 71 132 static const struct of_device_id bcm2835_rng_of_match[] = { 72 133 { .compatible = "brcm,bcm2835-rng"}, 73 - { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init}, 74 - { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init}, 134 + { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data }, 135 + { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data }, 136 + { .compatible = "brcm,bcm6368-rng"}, 75 137 {}, 76 138 }; 77 139 78 140 static int bcm2835_rng_probe(struct platform_device *pdev) 79 141 { 142 + const struct bcm2835_rng_of_data *of_data; 80 143 struct device *dev = &pdev->dev; 81 144 struct device_node *np = dev->of_node; 82 - void (*rng_setup)(void __iomem *base); 83 145 const struct of_device_id *rng_id; 84 - void __iomem *rng_base; 146 + struct bcm2835_rng_priv *priv; 147 + struct resource *r; 85 148 int err; 86 149 150 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 151 + if (!priv) 152 + return -ENOMEM; 153 + 154 + platform_set_drvdata(pdev, priv); 155 + 156 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 157 + 87 158 /* map peripheral */ 88 - rng_base = of_iomap(np, 0); 89 - if (!rng_base) { 90 - dev_err(dev, "failed to remap rng regs"); 91 - return -ENODEV; 92 - } 93 - bcm2835_rng_ops.priv = (unsigned long)rng_base; 159 + priv->base = devm_ioremap_resource(dev, r); 160 + if (IS_ERR(priv->base)) 161 + return PTR_ERR(priv->base); 162 + 163 + /* Clock is optional on most platforms */ 164 + priv->clk = devm_clk_get(dev, NULL); 165 + 166 + priv->rng.name = pdev->name; 167 + priv->rng.init = bcm2835_rng_init; 168 + priv->rng.read = bcm2835_rng_read; 169 + priv->rng.cleanup = bcm2835_rng_cleanup; 94 170 95 171 rng_id = of_match_node(bcm2835_rng_of_match, np); 96 - if (!rng_id) { 97 - iounmap(rng_base); 172 + if (!rng_id) 98 173 return -EINVAL; 99 - } 100 - /* Check for rng init function, execute it */ 101 - rng_setup = rng_id->data; 102 - if (rng_setup) 103 - rng_setup(rng_base); 104 174 105 - /* set warm-up count & enable */ 106 - __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS); 107 - __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL); 175 + /* Check for rng init function, execute it */ 176 + of_data = rng_id->data; 177 + if (of_data) 178 + priv->mask_interrupts = of_data->mask_interrupts; 108 179 109 180 /* register driver */ 110 - err = hwrng_register(&bcm2835_rng_ops); 111 - if (err) { 181 + err = devm_hwrng_register(dev, &priv->rng); 182 + if (err) 112 183 dev_err(dev, "hwrng registration failed\n"); 113 - iounmap(rng_base); 114 - } else 184 + else 115 185 dev_info(dev, "hwrng registered\n"); 116 186 117 187 return err; 118 188 } 119 189 120 - static int bcm2835_rng_remove(struct platform_device *pdev) 121 - { 122 - void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv; 123 - 124 - /* disable rng hardware */ 125 - __raw_writel(0, rng_base + RNG_CTRL); 126 - 127 - /* unregister driver */ 128 - hwrng_unregister(&bcm2835_rng_ops); 129 - iounmap(rng_base); 130 - 131 - return 0; 132 - } 133 - 134 190 MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match); 191 + 192 + static struct platform_device_id bcm2835_rng_devtype[] = { 193 + { .name = "bcm2835-rng" }, 194 + { .name = "bcm63xx-rng" }, 195 + { /* sentinel */ } 196 + }; 197 + MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype); 135 198 136 199 static struct platform_driver bcm2835_rng_driver = { 137 200 .driver = { ··· 203 138 .of_match_table = bcm2835_rng_of_match, 204 139 }, 205 140 .probe = bcm2835_rng_probe, 206 - .remove = bcm2835_rng_remove, 141 + .id_table = bcm2835_rng_devtype, 207 142 }; 208 143 module_platform_driver(bcm2835_rng_driver); 209 144
-154
drivers/char/hw_random/bcm63xx-rng.c
··· 1 - /* 2 - * Broadcom BCM63xx Random Number Generator support 3 - * 4 - * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org> 5 - * Copyright (C) 2009, Broadcom Corporation 6 - * 7 - */ 8 - #include <linux/module.h> 9 - #include <linux/slab.h> 10 - #include <linux/io.h> 11 - #include <linux/err.h> 12 - #include <linux/clk.h> 13 - #include <linux/platform_device.h> 14 - #include <linux/hw_random.h> 15 - #include <linux/of.h> 16 - 17 - #define RNG_CTRL 0x00 18 - #define RNG_EN (1 << 0) 19 - 20 - #define RNG_STAT 0x04 21 - #define RNG_AVAIL_MASK (0xff000000) 22 - 23 - #define RNG_DATA 0x08 24 - #define RNG_THRES 0x0c 25 - #define RNG_MASK 0x10 26 - 27 - struct bcm63xx_rng_priv { 28 - struct hwrng rng; 29 - struct clk *clk; 30 - void __iomem *regs; 31 - }; 32 - 33 - #define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng) 34 - 35 - static int bcm63xx_rng_init(struct hwrng *rng) 36 - { 37 - struct bcm63xx_rng_priv *priv = to_rng_priv(rng); 38 - u32 val; 39 - int error; 40 - 41 - error = clk_prepare_enable(priv->clk); 42 - if (error) 43 - return error; 44 - 45 - val = __raw_readl(priv->regs + RNG_CTRL); 46 - val |= RNG_EN; 47 - __raw_writel(val, priv->regs + RNG_CTRL); 48 - 49 - return 0; 50 - } 51 - 52 - static void bcm63xx_rng_cleanup(struct hwrng *rng) 53 - { 54 - struct bcm63xx_rng_priv *priv = to_rng_priv(rng); 55 - u32 val; 56 - 57 - val = __raw_readl(priv->regs + RNG_CTRL); 58 - val &= ~RNG_EN; 59 - __raw_writel(val, priv->regs + RNG_CTRL); 60 - 61 - clk_disable_unprepare(priv->clk); 62 - } 63 - 64 - static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) 65 - { 66 - struct bcm63xx_rng_priv *priv = to_rng_priv(rng); 67 - 68 - return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK; 69 - } 70 - 71 - static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data) 72 - { 73 - struct bcm63xx_rng_priv *priv = to_rng_priv(rng); 74 - 75 - *data = __raw_readl(priv->regs + RNG_DATA); 76 - 77 - return 4; 78 - } 79 - 80 - static int bcm63xx_rng_probe(struct platform_device *pdev) 81 - { 82 - struct resource *r; 83 - int ret; 84 - struct bcm63xx_rng_priv *priv; 85 - 86 - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 87 - if (!r) { 88 - dev_err(&pdev->dev, "no iomem resource\n"); 89 - return -ENXIO; 90 - } 91 - 92 - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 93 - if (!priv) 94 - return -ENOMEM; 95 - 96 - priv->rng.name = pdev->name; 97 - priv->rng.init = bcm63xx_rng_init; 98 - priv->rng.cleanup = bcm63xx_rng_cleanup; 99 - priv->rng.data_present = bcm63xx_rng_data_present; 100 - priv->rng.data_read = bcm63xx_rng_data_read; 101 - 102 - priv->clk = devm_clk_get(&pdev->dev, "ipsec"); 103 - if (IS_ERR(priv->clk)) { 104 - ret = PTR_ERR(priv->clk); 105 - dev_err(&pdev->dev, "no clock for device: %d\n", ret); 106 - return ret; 107 - } 108 - 109 - if (!devm_request_mem_region(&pdev->dev, r->start, 110 - resource_size(r), pdev->name)) { 111 - dev_err(&pdev->dev, "request mem failed"); 112 - return -EBUSY; 113 - } 114 - 115 - priv->regs = devm_ioremap_nocache(&pdev->dev, r->start, 116 - resource_size(r)); 117 - if (!priv->regs) { 118 - dev_err(&pdev->dev, "ioremap failed"); 119 - return -ENOMEM; 120 - } 121 - 122 - ret = devm_hwrng_register(&pdev->dev, &priv->rng); 123 - if (ret) { 124 - dev_err(&pdev->dev, "failed to register rng device: %d\n", 125 - ret); 126 - return ret; 127 - } 128 - 129 - dev_info(&pdev->dev, "registered RNG driver\n"); 130 - 131 - return 0; 132 - } 133 - 134 - #ifdef CONFIG_OF 135 - static const struct of_device_id bcm63xx_rng_of_match[] = { 136 - { .compatible = "brcm,bcm6368-rng", }, 137 - {}, 138 - }; 139 - MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match); 140 - #endif 141 - 142 - static struct platform_driver bcm63xx_rng_driver = { 143 - .probe = bcm63xx_rng_probe, 144 - .driver = { 145 - .name = "bcm63xx-rng", 146 - .of_match_table = of_match_ptr(bcm63xx_rng_of_match), 147 - }, 148 - }; 149 - 150 - module_platform_driver(bcm63xx_rng_driver); 151 - 152 - MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); 153 - MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver"); 154 - MODULE_LICENSE("GPL");
+4
drivers/char/hw_random/core.c
··· 306 306 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); 307 307 if (!ret) 308 308 cur_rng_set_by_user = 0; 309 + } else { 310 + drop_current_rng(); 311 + cur_rng_set_by_user = 0; 312 + ret = 0; 309 313 } 310 314 311 315 return ret;
+235
drivers/char/hw_random/exynos-trng.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * RNG driver for Exynos TRNGs 4 + * 5 + * Author: Łukasz Stelmach <l.stelmach@samsung.com> 6 + * 7 + * Copyright 2017 (c) Samsung Electronics Software, Inc. 8 + * 9 + * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by 10 + * Krzysztof Kozłowski <krzk@kernel.org> 11 + */ 12 + 13 + #include <linux/clk.h> 14 + #include <linux/crypto.h> 15 + #include <linux/delay.h> 16 + #include <linux/err.h> 17 + #include <linux/hw_random.h> 18 + #include <linux/io.h> 19 + #include <linux/iopoll.h> 20 + #include <linux/kernel.h> 21 + #include <linux/module.h> 22 + #include <linux/platform_device.h> 23 + #include <linux/pm_runtime.h> 24 + 25 + #define EXYNOS_TRNG_CLKDIV (0x0) 26 + 27 + #define EXYNOS_TRNG_CTRL (0x20) 28 + #define EXYNOS_TRNG_CTRL_RNGEN BIT(31) 29 + 30 + #define EXYNOS_TRNG_POST_CTRL (0x30) 31 + #define EXYNOS_TRNG_ONLINE_CTRL (0x40) 32 + #define EXYNOS_TRNG_ONLINE_STAT (0x44) 33 + #define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48) 34 + #define EXYNOS_TRNG_FIFO_CTRL (0x50) 35 + #define EXYNOS_TRNG_FIFO_0 (0x80) 36 + #define EXYNOS_TRNG_FIFO_1 (0x84) 37 + #define EXYNOS_TRNG_FIFO_2 (0x88) 38 + #define EXYNOS_TRNG_FIFO_3 (0x8c) 39 + #define EXYNOS_TRNG_FIFO_4 (0x90) 40 + #define EXYNOS_TRNG_FIFO_5 (0x94) 41 + #define EXYNOS_TRNG_FIFO_6 (0x98) 42 + #define EXYNOS_TRNG_FIFO_7 (0x9c) 43 + #define EXYNOS_TRNG_FIFO_LEN (8) 44 + #define EXYNOS_TRNG_CLOCK_RATE (500000) 45 + 46 + 47 + struct exynos_trng_dev { 48 + struct device *dev; 49 + void __iomem *mem; 50 + struct clk *clk; 51 + struct hwrng rng; 52 + }; 53 + 54 + static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max, 55 + bool wait) 56 + { 57 + struct exynos_trng_dev *trng; 58 + int val; 59 + 60 + max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4)); 61 + 62 + trng = (struct exynos_trng_dev *)rng->priv; 63 + 64 + writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL); 65 + val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val, 66 + val == 0, 200, 1000000); 67 + if (val < 0) 68 + return val; 69 + 70 + memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max); 71 + 72 + return max; 73 + } 74 + 75 + static int exynos_trng_init(struct hwrng *rng) 76 + { 77 + struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv; 78 + unsigned long sss_rate; 79 + u32 val; 80 + 81 + sss_rate = clk_get_rate(trng->clk); 82 + 83 + /* 84 + * For most TRNG circuits the clock frequency of under 500 kHz 85 + * is safe. 86 + */ 87 + val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2); 88 + if (val > 0x7fff) { 89 + dev_err(trng->dev, "clock divider too large: %d", val); 90 + return -ERANGE; 91 + } 92 + val = val << 1; 93 + writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV); 94 + 95 + /* Enable the generator. */ 96 + val = EXYNOS_TRNG_CTRL_RNGEN; 97 + writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL); 98 + 99 + /* 100 + * Disable post-processing. /dev/hwrng is supposed to deliver 101 + * unprocessed data. 102 + */ 103 + writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL); 104 + 105 + return 0; 106 + } 107 + 108 + static int exynos_trng_probe(struct platform_device *pdev) 109 + { 110 + struct exynos_trng_dev *trng; 111 + struct resource *res; 112 + int ret = -ENOMEM; 113 + 114 + trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); 115 + if (!trng) 116 + return ret; 117 + 118 + trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev), 119 + GFP_KERNEL); 120 + if (!trng->rng.name) 121 + return ret; 122 + 123 + trng->rng.init = exynos_trng_init; 124 + trng->rng.read = exynos_trng_do_read; 125 + trng->rng.priv = (unsigned long) trng; 126 + 127 + platform_set_drvdata(pdev, trng); 128 + trng->dev = &pdev->dev; 129 + 130 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 131 + trng->mem = devm_ioremap_resource(&pdev->dev, res); 132 + if (IS_ERR(trng->mem)) 133 + return PTR_ERR(trng->mem); 134 + 135 + pm_runtime_enable(&pdev->dev); 136 + ret = pm_runtime_get_sync(&pdev->dev); 137 + if (ret < 0) { 138 + dev_err(&pdev->dev, "Could not get runtime PM.\n"); 139 + goto err_pm_get; 140 + } 141 + 142 + trng->clk = devm_clk_get(&pdev->dev, "secss"); 143 + if (IS_ERR(trng->clk)) { 144 + ret = PTR_ERR(trng->clk); 145 + dev_err(&pdev->dev, "Could not get clock.\n"); 146 + goto err_clock; 147 + } 148 + 149 + ret = clk_prepare_enable(trng->clk); 150 + if (ret) { 151 + dev_err(&pdev->dev, "Could not enable the clk.\n"); 152 + goto err_clock; 153 + } 154 + 155 + ret = hwrng_register(&trng->rng); 156 + if (ret) { 157 + dev_err(&pdev->dev, "Could not register hwrng device.\n"); 158 + goto err_register; 159 + } 160 + 161 + dev_info(&pdev->dev, "Exynos True Random Number Generator.\n"); 162 + 163 + return 0; 164 + 165 + err_register: 166 + clk_disable_unprepare(trng->clk); 167 + 168 + err_clock: 169 + pm_runtime_put_sync(&pdev->dev); 170 + 171 + err_pm_get: 172 + pm_runtime_disable(&pdev->dev); 173 + 174 + return ret; 175 + } 176 + 177 + static int exynos_trng_remove(struct platform_device *pdev) 178 + { 179 + struct exynos_trng_dev *trng = platform_get_drvdata(pdev); 180 + 181 + hwrng_unregister(&trng->rng); 182 + clk_disable_unprepare(trng->clk); 183 + 184 + pm_runtime_put_sync(&pdev->dev); 185 + pm_runtime_disable(&pdev->dev); 186 + 187 + return 0; 188 + } 189 + 190 + static int __maybe_unused exynos_trng_suspend(struct device *dev) 191 + { 192 + pm_runtime_put_sync(dev); 193 + 194 + return 0; 195 + } 196 + 197 + static int __maybe_unused exynos_trng_resume(struct device *dev) 198 + { 199 + int ret; 200 + 201 + ret = pm_runtime_get_sync(dev); 202 + if (ret < 0) { 203 + dev_err(dev, "Could not get runtime PM.\n"); 204 + pm_runtime_put_noidle(dev); 205 + return ret; 206 + } 207 + 208 + return 0; 209 + } 210 + 211 + static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend, 212 + exynos_trng_resume); 213 + 214 + static const struct of_device_id exynos_trng_dt_match[] = { 215 + { 216 + .compatible = "samsung,exynos5250-trng", 217 + }, 218 + { }, 219 + }; 220 + MODULE_DEVICE_TABLE(of, exynos_trng_dt_match); 221 + 222 + static struct platform_driver exynos_trng_driver = { 223 + .driver = { 224 + .name = "exynos-trng", 225 + .pm = &exynos_trng_pm_ops, 226 + .of_match_table = exynos_trng_dt_match, 227 + }, 228 + .probe = exynos_trng_probe, 229 + .remove = exynos_trng_remove, 230 + }; 231 + 232 + module_platform_driver(exynos_trng_driver); 233 + MODULE_AUTHOR("Łukasz Stelmach"); 234 + MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips"); 235 + MODULE_LICENSE("GPL v2");
+3 -10
drivers/char/hw_random/imx-rngc.c
··· 282 282 return 0; 283 283 } 284 284 285 - #ifdef CONFIG_PM 286 - static int imx_rngc_suspend(struct device *dev) 285 + static int __maybe_unused imx_rngc_suspend(struct device *dev) 287 286 { 288 287 struct imx_rngc *rngc = dev_get_drvdata(dev); 289 288 ··· 291 292 return 0; 292 293 } 293 294 294 - static int imx_rngc_resume(struct device *dev) 295 + static int __maybe_unused imx_rngc_resume(struct device *dev) 295 296 { 296 297 struct imx_rngc *rngc = dev_get_drvdata(dev); 297 298 ··· 300 301 return 0; 301 302 } 302 303 303 - static const struct dev_pm_ops imx_rngc_pm_ops = { 304 - .suspend = imx_rngc_suspend, 305 - .resume = imx_rngc_resume, 306 - }; 307 - #endif 304 + SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); 308 305 309 306 static const struct of_device_id imx_rngc_dt_ids[] = { 310 307 { .compatible = "fsl,imx25-rngb", .data = NULL, }, ··· 311 316 static struct platform_driver imx_rngc_driver = { 312 317 .driver = { 313 318 .name = "imx_rngc", 314 - #ifdef CONFIG_PM 315 319 .pm = &imx_rngc_pm_ops, 316 - #endif 317 320 .of_match_table = imx_rngc_dt_ids, 318 321 }, 319 322 .remove = __exit_p(imx_rngc_remove),
+1
drivers/char/hw_random/mtk-rng.c
··· 135 135 #endif 136 136 priv->rng.read = mtk_rng_read; 137 137 priv->rng.priv = (unsigned long)&pdev->dev; 138 + priv->rng.quality = 900; 138 139 139 140 priv->clk = devm_clk_get(&pdev->dev, "rng"); 140 141 if (IS_ERR(priv->clk)) {
+12 -12
drivers/char/random.c
··· 431 431 static int crng_init_cnt = 0; 432 432 #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) 433 433 static void _extract_crng(struct crng_state *crng, 434 - __u8 out[CHACHA20_BLOCK_SIZE]); 434 + __u32 out[CHACHA20_BLOCK_WORDS]); 435 435 static void _crng_backtrack_protect(struct crng_state *crng, 436 - __u8 tmp[CHACHA20_BLOCK_SIZE], int used); 436 + __u32 tmp[CHACHA20_BLOCK_WORDS], int used); 437 437 static void process_random_ready_list(void); 438 438 static void _get_random_bytes(void *buf, int nbytes); 439 439 ··· 817 817 unsigned long flags; 818 818 int i, num; 819 819 union { 820 - __u8 block[CHACHA20_BLOCK_SIZE]; 820 + __u32 block[CHACHA20_BLOCK_WORDS]; 821 821 __u32 key[8]; 822 822 } buf; 823 823 ··· 851 851 } 852 852 853 853 static void _extract_crng(struct crng_state *crng, 854 - __u8 out[CHACHA20_BLOCK_SIZE]) 854 + __u32 out[CHACHA20_BLOCK_WORDS]) 855 855 { 856 856 unsigned long v, flags; 857 857 ··· 867 867 spin_unlock_irqrestore(&crng->lock, flags); 868 868 } 869 869 870 - static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE]) 870 + static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS]) 871 871 { 872 872 struct crng_state *crng = NULL; 873 873 ··· 885 885 * enough) to mutate the CRNG key to provide backtracking protection. 886 886 */ 887 887 static void _crng_backtrack_protect(struct crng_state *crng, 888 - __u8 tmp[CHACHA20_BLOCK_SIZE], int used) 888 + __u32 tmp[CHACHA20_BLOCK_WORDS], int used) 889 889 { 890 890 unsigned long flags; 891 891 __u32 *s, *d; ··· 897 897 used = 0; 898 898 } 899 899 spin_lock_irqsave(&crng->lock, flags); 900 - s = (__u32 *) &tmp[used]; 900 + s = &tmp[used / sizeof(__u32)]; 901 901 d = &crng->state[4]; 902 902 for (i=0; i < 8; i++) 903 903 *d++ ^= *s++; 904 904 spin_unlock_irqrestore(&crng->lock, flags); 905 905 } 906 906 907 - static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used) 907 + static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used) 908 908 { 909 909 struct crng_state *crng = NULL; 910 910 ··· 920 920 static ssize_t extract_crng_user(void __user *buf, size_t nbytes) 921 921 { 922 922 ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE; 923 - __u8 tmp[CHACHA20_BLOCK_SIZE]; 923 + __u32 tmp[CHACHA20_BLOCK_WORDS]; 924 924 int large_request = (nbytes > 256); 925 925 926 926 while (nbytes) { ··· 1507 1507 */ 1508 1508 static void _get_random_bytes(void *buf, int nbytes) 1509 1509 { 1510 - __u8 tmp[CHACHA20_BLOCK_SIZE]; 1510 + __u32 tmp[CHACHA20_BLOCK_WORDS]; 1511 1511 1512 1512 trace_get_random_bytes(nbytes, _RET_IP_); 1513 1513 ··· 2114 2114 if (use_lock) 2115 2115 read_lock_irqsave(&batched_entropy_reset_lock, flags); 2116 2116 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2117 - extract_crng((u8 *)batch->entropy_u64); 2117 + extract_crng((__u32 *)batch->entropy_u64); 2118 2118 batch->position = 0; 2119 2119 } 2120 2120 ret = batch->entropy_u64[batch->position++]; ··· 2144 2144 if (use_lock) 2145 2145 read_lock_irqsave(&batched_entropy_reset_lock, flags); 2146 2146 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2147 - extract_crng((u8 *)batch->entropy_u32); 2147 + extract_crng(batch->entropy_u32); 2148 2148 batch->position = 0; 2149 2149 } 2150 2150 ret = batch->entropy_u32[batch->position++];
-1
drivers/crypto/Kconfig
··· 723 723 select CRYPTO_HASH 724 724 select CRYPTO_SHA1 725 725 select CRYPTO_SHA256 726 - select CRYPTO_SHA384 727 726 select CRYPTO_SHA512 728 727 help 729 728 Enables the driver for the on-chip crypto accelerator
+1 -5
drivers/crypto/amcc/crypto4xx_alg.c
··· 256 256 if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3)) 257 257 return true; 258 258 259 - /* CCM - fix CBC MAC mismatch in special case */ 260 - if (is_ccm && decrypt && !req->assoclen) 261 - return true; 262 - 263 259 return false; 264 260 } 265 261 ··· 326 330 sa = (struct dynamic_sa_ctl *) ctx->sa_in; 327 331 sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2); 328 332 329 - set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV, 333 + set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 330 334 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, 331 335 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC, 332 336 SA_CIPHER_ALG_AES,
+82 -49
drivers/crypto/amcc/crypto4xx_core.c
··· 128 128 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); 129 129 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT); 130 130 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG); 131 - writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); 131 + if (dev->is_revb) { 132 + writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10, 133 + dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT); 134 + writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT, 135 + dev->ce_base + CRYPTO4XX_INT_EN); 136 + } else { 137 + writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN); 138 + } 132 139 } 133 140 134 141 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size) ··· 282 275 */ 283 276 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) 284 277 { 285 - dev->gdr = dma_alloc_coherent(dev->core_dev->device, 286 - sizeof(struct ce_gd) * PPC4XX_NUM_GD, 287 - &dev->gdr_pa, GFP_ATOMIC); 278 + dev->gdr = dma_zalloc_coherent(dev->core_dev->device, 279 + sizeof(struct ce_gd) * PPC4XX_NUM_GD, 280 + &dev->gdr_pa, GFP_ATOMIC); 288 281 if (!dev->gdr) 289 282 return -ENOMEM; 290 - 291 - memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD); 292 283 293 284 return 0; 294 285 } ··· 575 570 struct pd_uinfo *pd_uinfo, 576 571 struct ce_pd *pd) 577 572 { 578 - struct aead_request *aead_req; 579 - struct crypto4xx_ctx *ctx; 573 + struct aead_request *aead_req = container_of(pd_uinfo->async_req, 574 + struct aead_request, base); 580 575 struct scatterlist *dst = pd_uinfo->dest_va; 576 + size_t cp_len = crypto_aead_authsize( 577 + crypto_aead_reqtfm(aead_req)); 578 + u32 icv[cp_len]; 581 579 int err = 0; 582 - 583 - aead_req = container_of(pd_uinfo->async_req, struct aead_request, 584 - base); 585 - ctx = crypto_tfm_ctx(aead_req->base.tfm); 586 580 587 581 if (pd_uinfo->using_sd) { 588 582 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ··· 594 590 595 591 if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) { 596 592 /* append icv at the end */ 597 - size_t cp_len = crypto_aead_authsize( 598 - crypto_aead_reqtfm(aead_req)); 599 - u32 icv[cp_len]; 600 - 601 593 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest, 602 594 cp_len); 603 595 604 596 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen, 605 597 cp_len, 1); 598 + } else { 599 + /* check icv at the end */ 600 + scatterwalk_map_and_copy(icv, aead_req->src, 601 + aead_req->assoclen + aead_req->cryptlen - 602 + cp_len, cp_len, 0); 603 + 604 + crypto4xx_memcpy_from_le32(icv, icv, cp_len); 605 + 606 + if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len)) 607 + err = -EBADMSG; 606 608 } 607 609 608 610 crypto4xx_ret_sg_desc(dev, pd_uinfo); 609 611 610 612 if (pd->pd_ctl.bf.status & 0xff) { 611 - if (pd->pd_ctl.bf.status & 0x1) { 612 - /* authentication error */ 613 - err = -EBADMSG; 614 - } else { 615 - if (!__ratelimit(&dev->aead_ratelimit)) { 616 - if (pd->pd_ctl.bf.status & 2) 617 - pr_err("pad fail error\n"); 618 - if (pd->pd_ctl.bf.status & 4) 619 - pr_err("seqnum fail\n"); 620 - if (pd->pd_ctl.bf.status & 8) 621 - pr_err("error _notify\n"); 622 - pr_err("aead return err status = 0x%02x\n", 623 - pd->pd_ctl.bf.status & 0xff); 624 - pr_err("pd pad_ctl = 0x%08x\n", 625 - pd->pd_ctl.bf.pd_pad_ctl); 626 - } 627 - err = -EINVAL; 613 + if (!__ratelimit(&dev->aead_ratelimit)) { 614 + if (pd->pd_ctl.bf.status & 2) 615 + pr_err("pad fail error\n"); 616 + if (pd->pd_ctl.bf.status & 4) 617 + pr_err("seqnum fail\n"); 618 + if (pd->pd_ctl.bf.status & 8) 619 + pr_err("error _notify\n"); 620 + pr_err("aead return err status = 0x%02x\n", 621 + pd->pd_ctl.bf.status & 0xff); 622 + pr_err("pd pad_ctl = 0x%08x\n", 623 + pd->pd_ctl.bf.pd_pad_ctl); 628 624 } 625 + err = -EINVAL; 629 626 } 630 627 631 628 if (pd_uinfo->state & PD_ENTRY_BUSY) ··· 1075 1070 /** 1076 1071 * Top Half of isr. 1077 1072 */ 1078 - static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) 1073 + static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data, 1074 + u32 clr_val) 1079 1075 { 1080 1076 struct device *dev = (struct device *)data; 1081 1077 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); 1082 1078 1083 - if (!core_dev->dev->ce_base) 1084 - return 0; 1085 - 1086 - writel(PPC4XX_INTERRUPT_CLR, 1087 - core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); 1079 + writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR); 1088 1080 tasklet_schedule(&core_dev->tasklet); 1089 1081 1090 1082 return IRQ_HANDLED; 1083 + } 1084 + 1085 + static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data) 1086 + { 1087 + return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR); 1088 + } 1089 + 1090 + static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data) 1091 + { 1092 + return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR | 1093 + PPC4XX_TMO_ERR_INT); 1091 1094 } 1092 1095 1093 1096 /** ··· 1279 1266 struct resource res; 1280 1267 struct device *dev = &ofdev->dev; 1281 1268 struct crypto4xx_core_device *core_dev; 1269 + u32 pvr; 1270 + bool is_revb = true; 1282 1271 1283 1272 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res); 1284 1273 if (rc) ··· 1297 1282 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET); 1298 1283 mtdcri(SDR0, PPC405EX_SDR0_SRST, 1299 1284 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET); 1285 + is_revb = false; 1300 1286 } else if (of_find_compatible_node(NULL, NULL, 1301 1287 "amcc,ppc460sx-crypto")) { 1302 1288 mtdcri(SDR0, PPC460SX_SDR0_SRST, ··· 1320 1304 if (!core_dev->dev) 1321 1305 goto err_alloc_dev; 1322 1306 1307 + /* 1308 + * Older version of 460EX/GT have a hardware bug. 1309 + * Hence they do not support H/W based security intr coalescing 1310 + */ 1311 + pvr = mfspr(SPRN_PVR); 1312 + if (is_revb && ((pvr >> 4) == 0x130218A)) { 1313 + u32 min = PVR_MIN(pvr); 1314 + 1315 + if (min < 4) { 1316 + dev_info(dev, "RevA detected - disable interrupt coalescing\n"); 1317 + is_revb = false; 1318 + } 1319 + } 1320 + 1323 1321 core_dev->dev->core_dev = core_dev; 1322 + core_dev->dev->is_revb = is_revb; 1324 1323 core_dev->device = dev; 1325 1324 spin_lock_init(&core_dev->lock); 1326 1325 INIT_LIST_HEAD(&core_dev->dev->alg_list); ··· 1356 1325 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb, 1357 1326 (unsigned long) dev); 1358 1327 1359 - /* Register for Crypto isr, Crypto Engine IRQ */ 1360 - core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1361 - rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0, 1362 - core_dev->dev->name, dev); 1363 - if (rc) 1364 - goto err_request_irq; 1365 - 1366 1328 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0); 1367 1329 if (!core_dev->dev->ce_base) { 1368 1330 dev_err(dev, "failed to of_iomap\n"); 1369 1331 rc = -ENOMEM; 1370 1332 goto err_iomap; 1371 1333 } 1334 + 1335 + /* Register for Crypto isr, Crypto Engine IRQ */ 1336 + core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1337 + rc = request_irq(core_dev->irq, is_revb ? 1338 + crypto4xx_ce_interrupt_handler_revb : 1339 + crypto4xx_ce_interrupt_handler, 0, 1340 + KBUILD_MODNAME, dev); 1341 + if (rc) 1342 + goto err_request_irq; 1372 1343 1373 1344 /* need to setup pdr, rdr, gdr and sdr before this */ 1374 1345 crypto4xx_hw_init(core_dev->dev); ··· 1385 1352 return 0; 1386 1353 1387 1354 err_start_dev: 1388 - iounmap(core_dev->dev->ce_base); 1389 - err_iomap: 1390 1355 free_irq(core_dev->irq, dev); 1391 1356 err_request_irq: 1392 1357 irq_dispose_mapping(core_dev->irq); 1358 + iounmap(core_dev->dev->ce_base); 1359 + err_iomap: 1393 1360 tasklet_kill(&core_dev->tasklet); 1394 1361 err_build_sdr: 1395 1362 crypto4xx_destroy_sdr(core_dev->dev); ··· 1430 1397 1431 1398 static struct platform_driver crypto4xx_driver = { 1432 1399 .driver = { 1433 - .name = MODULE_NAME, 1400 + .name = KBUILD_MODNAME, 1434 1401 .of_match_table = crypto4xx_match, 1435 1402 }, 1436 1403 .probe = crypto4xx_probe,
+1 -3
drivers/crypto/amcc/crypto4xx_core.h
··· 28 28 #include "crypto4xx_reg_def.h" 29 29 #include "crypto4xx_sa.h" 30 30 31 - #define MODULE_NAME "crypto4xx" 32 - 33 31 #define PPC460SX_SDR0_SRST 0x201 34 32 #define PPC405EX_SDR0_SRST 0x200 35 33 #define PPC460EX_SDR0_SRST 0x201 ··· 80 82 81 83 struct crypto4xx_device { 82 84 struct crypto4xx_core_device *core_dev; 83 - char *name; 84 85 void __iomem *ce_base; 85 86 void __iomem *trng_base; 86 87 ··· 106 109 struct list_head alg_list; /* List of algorithm supported 107 110 by this device */ 108 111 struct ratelimit_state aead_ratelimit; 112 + bool is_revb; 109 113 }; 110 114 111 115 struct crypto4xx_core_device {
+3 -1
drivers/crypto/amcc/crypto4xx_reg_def.h
··· 121 121 #define PPC4XX_PD_SIZE 6 122 122 #define PPC4XX_CTX_DONE_INT 0x2000 123 123 #define PPC4XX_PD_DONE_INT 0x8000 124 + #define PPC4XX_TMO_ERR_INT 0x40000 124 125 #define PPC4XX_BYTE_ORDER 0x22222 125 126 #define PPC4XX_INTERRUPT_CLR 0x3ffff 126 127 #define PPC4XX_PRNG_CTRL_AUTO_EN 0x3 127 128 #define PPC4XX_DC_3DES_EN 1 128 129 #define PPC4XX_TRNG_EN 0x00020000 129 - #define PPC4XX_INT_DESCR_CNT 4 130 + #define PPC4XX_INT_DESCR_CNT 7 130 131 #define PPC4XX_INT_TIMEOUT_CNT 0 132 + #define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF 131 133 #define PPC4XX_INT_CFG 1 132 134 /** 133 135 * all follow define are ad hoc
+1 -1
drivers/crypto/amcc/crypto4xx_trng.c
··· 92 92 if (!rng) 93 93 goto err_out; 94 94 95 - rng->name = MODULE_NAME; 95 + rng->name = KBUILD_MODNAME; 96 96 rng->data_present = ppc4xx_trng_data_present; 97 97 rng->data_read = ppc4xx_trng_data_read; 98 98 rng->priv = (unsigned long) dev;
+3 -5
drivers/crypto/axis/artpec6_crypto.c
··· 22 22 #include <linux/slab.h> 23 23 24 24 #include <crypto/aes.h> 25 + #include <crypto/gcm.h> 25 26 #include <crypto/internal/aead.h> 26 27 #include <crypto/internal/hash.h> 27 28 #include <crypto/internal/skcipher.h> ··· 1935 1934 1936 1935 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); 1937 1936 // The HW omits the initial increment of the counter field. 1938 - crypto_inc(req_ctx->hw_ctx.J0+12, 4); 1937 + memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4); 1939 1938 1940 1939 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, 1941 1940 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); ··· 2957 2956 .setkey = artpec6_crypto_aead_set_key, 2958 2957 .encrypt = artpec6_crypto_aead_encrypt, 2959 2958 .decrypt = artpec6_crypto_aead_decrypt, 2960 - .ivsize = AES_BLOCK_SIZE, 2959 + .ivsize = GCM_AES_IV_SIZE, 2961 2960 .maxauthsize = AES_BLOCK_SIZE, 2962 2961 2963 2962 .base = { ··· 3042 3041 variant = (enum artpec6_crypto_variant)match->data; 3043 3042 3044 3043 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3045 - if (!res) 3046 - return -ENODEV; 3047 - 3048 3044 base = devm_ioremap_resource(&pdev->dev, res); 3049 3045 if (IS_ERR(base)) 3050 3046 return PTR_ERR(base);
-1
drivers/crypto/bcm/cipher.c
··· 42 42 #include <crypto/authenc.h> 43 43 #include <crypto/skcipher.h> 44 44 #include <crypto/hash.h> 45 - #include <crypto/aes.h> 46 45 #include <crypto/sha3.h> 47 46 48 47 #include "util.h"
+2 -1
drivers/crypto/bfin_crc.c
··· 494 494 .cra_driver_name = DRIVER_NAME, 495 495 .cra_priority = 100, 496 496 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 497 - CRYPTO_ALG_ASYNC, 497 + CRYPTO_ALG_ASYNC | 498 + CRYPTO_ALG_OPTIONAL_KEY, 498 499 .cra_blocksize = CHKSUM_BLOCK_SIZE, 499 500 .cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx), 500 501 .cra_alignmask = 3,
+81 -39
drivers/crypto/caam/caamalg.c
··· 108 108 dma_addr_t sh_desc_dec_dma; 109 109 dma_addr_t sh_desc_givenc_dma; 110 110 dma_addr_t key_dma; 111 + enum dma_data_direction dir; 111 112 struct device *jrdev; 112 113 struct alginfo adata; 113 114 struct alginfo cdata; ··· 119 118 { 120 119 struct caam_ctx *ctx = crypto_aead_ctx(aead); 121 120 struct device *jrdev = ctx->jrdev; 121 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 122 122 u32 *desc; 123 123 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - 124 124 ctx->adata.keylen_pad; ··· 138 136 139 137 /* aead_encrypt shared descriptor */ 140 138 desc = ctx->sh_desc_enc; 141 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize); 139 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, 140 + ctrlpriv->era); 142 141 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 143 - desc_bytes(desc), DMA_TO_DEVICE); 142 + desc_bytes(desc), ctx->dir); 144 143 145 144 /* 146 145 * Job Descriptor and Shared Descriptors ··· 157 154 158 155 /* aead_decrypt shared descriptor */ 159 156 desc = ctx->sh_desc_dec; 160 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize); 157 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, 158 + ctrlpriv->era); 161 159 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 162 - desc_bytes(desc), DMA_TO_DEVICE); 160 + desc_bytes(desc), ctx->dir); 163 161 164 162 return 0; 165 163 } ··· 172 168 unsigned int ivsize = crypto_aead_ivsize(aead); 173 169 struct caam_ctx *ctx = crypto_aead_ctx(aead); 174 170 struct device *jrdev = ctx->jrdev; 171 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 175 172 u32 ctx1_iv_off = 0; 176 173 u32 *desc, *nonce = NULL; 177 174 u32 inl_mask; ··· 239 234 desc = ctx->sh_desc_enc; 240 235 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, 241 236 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, 242 - false); 237 + false, ctrlpriv->era); 243 238 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 244 - desc_bytes(desc), DMA_TO_DEVICE); 239 + desc_bytes(desc), ctx->dir); 245 240 246 241 skip_enc: 247 242 /* ··· 271 266 desc = ctx->sh_desc_dec; 272 267 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, 273 268 ctx->authsize, alg->caam.geniv, is_rfc3686, 274 - nonce, ctx1_iv_off, false); 269 + nonce, ctx1_iv_off, false, ctrlpriv->era); 275 270 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 276 - desc_bytes(desc), DMA_TO_DEVICE); 271 + desc_bytes(desc), ctx->dir); 277 272 278 273 if (!alg->caam.geniv) 279 274 goto skip_givenc; ··· 305 300 desc = ctx->sh_desc_enc; 306 301 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, 307 302 ctx->authsize, is_rfc3686, nonce, 308 - ctx1_iv_off, false); 303 + ctx1_iv_off, false, ctrlpriv->era); 309 304 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 310 - desc_bytes(desc), DMA_TO_DEVICE); 305 + desc_bytes(desc), ctx->dir); 311 306 312 307 skip_givenc: 313 308 return 0; ··· 351 346 desc = ctx->sh_desc_enc; 352 347 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize); 353 348 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 354 - desc_bytes(desc), DMA_TO_DEVICE); 349 + desc_bytes(desc), ctx->dir); 355 350 356 351 /* 357 352 * Job Descriptor and Shared Descriptors ··· 368 363 desc = ctx->sh_desc_dec; 369 364 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize); 370 365 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 371 - desc_bytes(desc), DMA_TO_DEVICE); 366 + desc_bytes(desc), ctx->dir); 372 367 373 368 return 0; 374 369 } ··· 410 405 desc = ctx->sh_desc_enc; 411 406 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize); 412 407 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 413 - desc_bytes(desc), DMA_TO_DEVICE); 408 + desc_bytes(desc), ctx->dir); 414 409 415 410 /* 416 411 * Job Descriptor and Shared Descriptors ··· 427 422 desc = ctx->sh_desc_dec; 428 423 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize); 429 424 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 430 - desc_bytes(desc), DMA_TO_DEVICE); 425 + desc_bytes(desc), ctx->dir); 431 426 432 427 return 0; 433 428 } ··· 470 465 desc = ctx->sh_desc_enc; 471 466 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize); 472 467 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 473 - desc_bytes(desc), DMA_TO_DEVICE); 468 + desc_bytes(desc), ctx->dir); 474 469 475 470 /* 476 471 * Job Descriptor and Shared Descriptors ··· 487 482 desc = ctx->sh_desc_dec; 488 483 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize); 489 484 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 490 - desc_bytes(desc), DMA_TO_DEVICE); 485 + desc_bytes(desc), ctx->dir); 491 486 492 487 return 0; 493 488 } ··· 508 503 { 509 504 struct caam_ctx *ctx = crypto_aead_ctx(aead); 510 505 struct device *jrdev = ctx->jrdev; 506 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 511 507 struct crypto_authenc_keys keys; 512 508 int ret = 0; 513 509 ··· 523 517 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 524 518 #endif 525 519 520 + /* 521 + * If DKP is supported, use it in the shared descriptor to generate 522 + * the split key. 523 + */ 524 + if (ctrlpriv->era >= 6) { 525 + ctx->adata.keylen = keys.authkeylen; 526 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 527 + OP_ALG_ALGSEL_MASK); 528 + 529 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 530 + goto badkey; 531 + 532 + memcpy(ctx->key, keys.authkey, keys.authkeylen); 533 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 534 + keys.enckeylen); 535 + dma_sync_single_for_device(jrdev, ctx->key_dma, 536 + ctx->adata.keylen_pad + 537 + keys.enckeylen, ctx->dir); 538 + goto skip_split_key; 539 + } 540 + 526 541 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, 527 542 keys.authkeylen, CAAM_MAX_KEY_SIZE - 528 543 keys.enckeylen); ··· 554 527 /* postpend encryption key to auth split key */ 555 528 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 556 529 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 557 - keys.enckeylen, DMA_TO_DEVICE); 530 + keys.enckeylen, ctx->dir); 558 531 #ifdef DEBUG 559 532 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 560 533 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 561 534 ctx->adata.keylen_pad + keys.enckeylen, 1); 562 535 #endif 536 + 537 + skip_split_key: 563 538 ctx->cdata.keylen = keys.enckeylen; 564 539 return aead_set_sh_desc(aead); 565 540 badkey: ··· 581 552 #endif 582 553 583 554 memcpy(ctx->key, key, keylen); 584 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 555 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); 585 556 ctx->cdata.keylen = keylen; 586 557 587 558 return gcm_set_sh_desc(aead); ··· 609 580 */ 610 581 ctx->cdata.keylen = keylen - 4; 611 582 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 612 - DMA_TO_DEVICE); 583 + ctx->dir); 613 584 return rfc4106_set_sh_desc(aead); 614 585 } 615 586 ··· 635 606 */ 636 607 ctx->cdata.keylen = keylen - 4; 637 608 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, 638 - DMA_TO_DEVICE); 609 + ctx->dir); 639 610 return rfc4543_set_sh_desc(aead); 640 611 } 641 612 ··· 654 625 const bool is_rfc3686 = (ctr_mode && 655 626 (strstr(alg_name, "rfc3686") != NULL)); 656 627 657 - memcpy(ctx->key, key, keylen); 658 628 #ifdef DEBUG 659 629 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ", 660 630 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ··· 676 648 keylen -= CTR_RFC3686_NONCE_SIZE; 677 649 } 678 650 679 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 680 651 ctx->cdata.keylen = keylen; 681 - ctx->cdata.key_virt = ctx->key; 652 + ctx->cdata.key_virt = key; 682 653 ctx->cdata.key_inline = true; 683 654 684 655 /* ablkcipher_encrypt shared descriptor */ ··· 685 658 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, 686 659 ctx1_iv_off); 687 660 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 688 - desc_bytes(desc), DMA_TO_DEVICE); 661 + desc_bytes(desc), ctx->dir); 689 662 690 663 /* ablkcipher_decrypt shared descriptor */ 691 664 desc = ctx->sh_desc_dec; 692 665 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, 693 666 ctx1_iv_off); 694 667 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 695 - desc_bytes(desc), DMA_TO_DEVICE); 668 + desc_bytes(desc), ctx->dir); 696 669 697 670 /* ablkcipher_givencrypt shared descriptor */ 698 671 desc = ctx->sh_desc_givenc; 699 672 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686, 700 673 ctx1_iv_off); 701 674 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma, 702 - desc_bytes(desc), DMA_TO_DEVICE); 675 + desc_bytes(desc), ctx->dir); 703 676 704 677 return 0; 705 678 } ··· 718 691 return -EINVAL; 719 692 } 720 693 721 - memcpy(ctx->key, key, keylen); 722 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 723 694 ctx->cdata.keylen = keylen; 724 - ctx->cdata.key_virt = ctx->key; 695 + ctx->cdata.key_virt = key; 725 696 ctx->cdata.key_inline = true; 726 697 727 698 /* xts_ablkcipher_encrypt shared descriptor */ 728 699 desc = ctx->sh_desc_enc; 729 700 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata); 730 701 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, 731 - desc_bytes(desc), DMA_TO_DEVICE); 702 + desc_bytes(desc), ctx->dir); 732 703 733 704 /* xts_ablkcipher_decrypt shared descriptor */ 734 705 desc = ctx->sh_desc_dec; 735 706 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata); 736 707 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, 737 - desc_bytes(desc), DMA_TO_DEVICE); 708 + desc_bytes(desc), ctx->dir); 738 709 739 710 return 0; 740 711 } ··· 1004 979 append_seq_out_ptr(desc, dst_dma, 1005 980 req->assoclen + req->cryptlen - authsize, 1006 981 out_options); 1007 - 1008 - /* REG3 = assoclen */ 1009 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1010 982 } 1011 983 1012 984 static void init_gcm_job(struct aead_request *req, ··· 1018 996 unsigned int last; 1019 997 1020 998 init_aead_job(req, edesc, all_contig, encrypt); 999 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1021 1000 1022 1001 /* BUG This should not be specific to generic GCM. */ 1023 1002 last = 0; ··· 1045 1022 struct caam_aead_alg, aead); 1046 1023 unsigned int ivsize = crypto_aead_ivsize(aead); 1047 1024 struct caam_ctx *ctx = crypto_aead_ctx(aead); 1025 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 1048 1026 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 1049 1027 OP_ALG_AAI_CTR_MOD128); 1050 1028 const bool is_rfc3686 = alg->caam.rfc3686; ··· 1068 1044 ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; 1069 1045 1070 1046 init_aead_job(req, edesc, all_contig, encrypt); 1047 + 1048 + /* 1049 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports 1050 + * having DPOVRD as destination. 1051 + */ 1052 + if (ctrlpriv->era < 3) 1053 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); 1054 + else 1055 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); 1071 1056 1072 1057 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) 1073 1058 append_load_as_imm(desc, req->iv, ivsize, ··· 3261 3228 struct caam_alg_entry caam; 3262 3229 }; 3263 3230 3264 - static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 3231 + static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 3232 + bool uses_dkp) 3265 3233 { 3266 3234 dma_addr_t dma_addr; 3235 + struct caam_drv_private *priv; 3267 3236 3268 3237 ctx->jrdev = caam_jr_alloc(); 3269 3238 if (IS_ERR(ctx->jrdev)) { ··· 3273 3238 return PTR_ERR(ctx->jrdev); 3274 3239 } 3275 3240 3241 + priv = dev_get_drvdata(ctx->jrdev->parent); 3242 + if (priv->era >= 6 && uses_dkp) 3243 + ctx->dir = DMA_BIDIRECTIONAL; 3244 + else 3245 + ctx->dir = DMA_TO_DEVICE; 3246 + 3276 3247 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, 3277 3248 offsetof(struct caam_ctx, 3278 3249 sh_desc_enc_dma), 3279 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 3250 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3280 3251 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 3281 3252 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); 3282 3253 caam_jr_free(ctx->jrdev); ··· 3310 3269 container_of(alg, struct caam_crypto_alg, crypto_alg); 3311 3270 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 3312 3271 3313 - return caam_init_common(ctx, &caam_alg->caam); 3272 + return caam_init_common(ctx, &caam_alg->caam, false); 3314 3273 } 3315 3274 3316 3275 static int caam_aead_init(struct crypto_aead *tfm) ··· 3320 3279 container_of(alg, struct caam_aead_alg, aead); 3321 3280 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 3322 3281 3323 - return caam_init_common(ctx, &caam_alg->caam); 3282 + return caam_init_common(ctx, &caam_alg->caam, 3283 + alg->setkey == aead_setkey); 3324 3284 } 3325 3285 3326 3286 static void caam_exit_common(struct caam_ctx *ctx) 3327 3287 { 3328 3288 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, 3329 3289 offsetof(struct caam_ctx, sh_desc_enc_dma), 3330 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 3290 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 3331 3291 caam_jr_free(ctx->jrdev); 3332 3292 } 3333 3293
+110 -72
drivers/crypto/caam/caamalg_desc.c
··· 45 45 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor 46 46 * (non-protocol) with no (null) encryption. 47 47 * @desc: pointer to buffer used for descriptor construction 48 - * @adata: pointer to authentication transform definitions. Note that since a 49 - * split key is to be used, the size of the split key itself is 50 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 51 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 48 + * @adata: pointer to authentication transform definitions. 49 + * A split key is required for SEC Era < 6; the size of the split key 50 + * is specified in this case. Valid algorithm values - one of 51 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 52 + * with OP_ALG_AAI_HMAC_PRECOMP. 52 53 * @icvsize: integrity check value (ICV) size (truncated or full) 53 - * 54 - * Note: Requires an MDHA split key. 54 + * @era: SEC Era 55 55 */ 56 56 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 57 - unsigned int icvsize) 57 + unsigned int icvsize, int era) 58 58 { 59 59 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; 60 60 ··· 63 63 /* Skip if already shared */ 64 64 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 65 65 JUMP_COND_SHRD); 66 - if (adata->key_inline) 67 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, 68 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | 69 - KEY_ENC); 70 - else 71 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | 72 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 66 + if (era < 6) { 67 + if (adata->key_inline) 68 + append_key_as_imm(desc, adata->key_virt, 69 + adata->keylen_pad, adata->keylen, 70 + CLASS_2 | KEY_DEST_MDHA_SPLIT | 71 + KEY_ENC); 72 + else 73 + append_key(desc, adata->key_dma, adata->keylen, 74 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); 75 + } else { 76 + append_proto_dkp(desc, adata); 77 + } 73 78 set_jump_tgt_here(desc, key_jump_cmd); 74 79 75 80 /* assoclen + cryptlen = seqinlen */ ··· 126 121 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor 127 122 * (non-protocol) with no (null) decryption. 128 123 * @desc: pointer to buffer used for descriptor construction 129 - * @adata: pointer to authentication transform definitions. Note that since a 130 - * split key is to be used, the size of the split key itself is 131 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 132 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 124 + * @adata: pointer to authentication transform definitions. 125 + * A split key is required for SEC Era < 6; the size of the split key 126 + * is specified in this case. Valid algorithm values - one of 127 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 128 + * with OP_ALG_AAI_HMAC_PRECOMP. 133 129 * @icvsize: integrity check value (ICV) size (truncated or full) 134 - * 135 - * Note: Requires an MDHA split key. 130 + * @era: SEC Era 136 131 */ 137 132 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, 138 - unsigned int icvsize) 133 + unsigned int icvsize, int era) 139 134 { 140 135 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; 141 136 ··· 144 139 /* Skip if already shared */ 145 140 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 146 141 JUMP_COND_SHRD); 147 - if (adata->key_inline) 148 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, 149 - adata->keylen, CLASS_2 | 150 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 151 - else 152 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | 153 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 142 + if (era < 6) { 143 + if (adata->key_inline) 144 + append_key_as_imm(desc, adata->key_virt, 145 + adata->keylen_pad, adata->keylen, 146 + CLASS_2 | KEY_DEST_MDHA_SPLIT | 147 + KEY_ENC); 148 + else 149 + append_key(desc, adata->key_dma, adata->keylen, 150 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); 151 + } else { 152 + append_proto_dkp(desc, adata); 153 + } 154 154 set_jump_tgt_here(desc, key_jump_cmd); 155 155 156 156 /* Class 2 operation */ ··· 214 204 static void init_sh_desc_key_aead(u32 * const desc, 215 205 struct alginfo * const cdata, 216 206 struct alginfo * const adata, 217 - const bool is_rfc3686, u32 *nonce) 207 + const bool is_rfc3686, u32 *nonce, int era) 218 208 { 219 209 u32 *key_jump_cmd; 220 210 unsigned int enckeylen = cdata->keylen; ··· 234 224 if (is_rfc3686) 235 225 enckeylen -= CTR_RFC3686_NONCE_SIZE; 236 226 237 - if (adata->key_inline) 238 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, 239 - adata->keylen, CLASS_2 | 240 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 241 - else 242 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | 243 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 227 + if (era < 6) { 228 + if (adata->key_inline) 229 + append_key_as_imm(desc, adata->key_virt, 230 + adata->keylen_pad, adata->keylen, 231 + CLASS_2 | KEY_DEST_MDHA_SPLIT | 232 + KEY_ENC); 233 + else 234 + append_key(desc, adata->key_dma, adata->keylen, 235 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); 236 + } else { 237 + append_proto_dkp(desc, adata); 238 + } 244 239 245 240 if (cdata->key_inline) 246 241 append_key_as_imm(desc, cdata->key_virt, enckeylen, ··· 276 261 * @cdata: pointer to block cipher transform definitions 277 262 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 278 263 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. 279 - * @adata: pointer to authentication transform definitions. Note that since a 280 - * split key is to be used, the size of the split key itself is 281 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 282 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 264 + * @adata: pointer to authentication transform definitions. 265 + * A split key is required for SEC Era < 6; the size of the split key 266 + * is specified in this case. Valid algorithm values - one of 267 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 268 + * with OP_ALG_AAI_HMAC_PRECOMP. 283 269 * @ivsize: initialization vector size 284 270 * @icvsize: integrity check value (ICV) size (truncated or full) 285 271 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 286 272 * @nonce: pointer to rfc3686 nonce 287 273 * @ctx1_iv_off: IV offset in CONTEXT1 register 288 274 * @is_qi: true when called from caam/qi 289 - * 290 - * Note: Requires an MDHA split key. 275 + * @era: SEC Era 291 276 */ 292 277 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, 293 278 struct alginfo *adata, unsigned int ivsize, 294 279 unsigned int icvsize, const bool is_rfc3686, 295 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi) 280 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, 281 + int era) 296 282 { 297 283 /* Note: Context registers are saved. */ 298 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 284 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 299 285 300 286 /* Class 2 operation */ 301 287 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ··· 322 306 } 323 307 324 308 /* Read and write assoclen bytes */ 325 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 326 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 309 + if (is_qi || era < 3) { 310 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 311 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 312 + } else { 313 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 314 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 315 + } 327 316 328 317 /* Skip assoc data */ 329 318 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ··· 371 350 * @cdata: pointer to block cipher transform definitions 372 351 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 373 352 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. 374 - * @adata: pointer to authentication transform definitions. Note that since a 375 - * split key is to be used, the size of the split key itself is 376 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 377 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 353 + * @adata: pointer to authentication transform definitions. 354 + * A split key is required for SEC Era < 6; the size of the split key 355 + * is specified in this case. Valid algorithm values - one of 356 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 357 + * with OP_ALG_AAI_HMAC_PRECOMP. 378 358 * @ivsize: initialization vector size 379 359 * @icvsize: integrity check value (ICV) size (truncated or full) 380 360 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 381 361 * @nonce: pointer to rfc3686 nonce 382 362 * @ctx1_iv_off: IV offset in CONTEXT1 register 383 363 * @is_qi: true when called from caam/qi 384 - * 385 - * Note: Requires an MDHA split key. 364 + * @era: SEC Era 386 365 */ 387 366 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, 388 367 struct alginfo *adata, unsigned int ivsize, 389 368 unsigned int icvsize, const bool geniv, 390 369 const bool is_rfc3686, u32 *nonce, 391 - const u32 ctx1_iv_off, const bool is_qi) 370 + const u32 ctx1_iv_off, const bool is_qi, int era) 392 371 { 393 372 /* Note: Context registers are saved. */ 394 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 373 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 395 374 396 375 /* Class 2 operation */ 397 376 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | ··· 418 397 } 419 398 420 399 /* Read and write assoclen bytes */ 421 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 422 - if (geniv) 423 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); 424 - else 425 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 400 + if (is_qi || era < 3) { 401 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 402 + if (geniv) 403 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, 404 + ivsize); 405 + else 406 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 407 + CAAM_CMD_SZ); 408 + } else { 409 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 410 + if (geniv) 411 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, 412 + ivsize); 413 + else 414 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, 415 + CAAM_CMD_SZ); 416 + } 426 417 427 418 /* Skip assoc data */ 428 419 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ··· 489 456 * @cdata: pointer to block cipher transform definitions 490 457 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed 491 458 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. 492 - * @adata: pointer to authentication transform definitions. Note that since a 493 - * split key is to be used, the size of the split key itself is 494 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, 495 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP. 459 + * @adata: pointer to authentication transform definitions. 460 + * A split key is required for SEC Era < 6; the size of the split key 461 + * is specified in this case. Valid algorithm values - one of 462 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed 463 + * with OP_ALG_AAI_HMAC_PRECOMP. 496 464 * @ivsize: initialization vector size 497 465 * @icvsize: integrity check value (ICV) size (truncated or full) 498 466 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template 499 467 * @nonce: pointer to rfc3686 nonce 500 468 * @ctx1_iv_off: IV offset in CONTEXT1 register 501 469 * @is_qi: true when called from caam/qi 502 - * 503 - * Note: Requires an MDHA split key. 470 + * @era: SEC Era 504 471 */ 505 472 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, 506 473 struct alginfo *adata, unsigned int ivsize, 507 474 unsigned int icvsize, const bool is_rfc3686, 508 475 u32 *nonce, const u32 ctx1_iv_off, 509 - const bool is_qi) 476 + const bool is_qi, int era) 510 477 { 511 478 u32 geniv, moveiv; 512 479 513 480 /* Note: Context registers are saved. */ 514 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce); 481 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); 515 482 516 483 if (is_qi) { 517 484 u32 *wait_load_cmd; ··· 561 528 OP_ALG_ENCRYPT); 562 529 563 530 /* Read and write assoclen bytes */ 564 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 565 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 531 + if (is_qi || era < 3) { 532 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); 533 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); 534 + } else { 535 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 536 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); 537 + } 566 538 567 539 /* Skip assoc data */ 568 540 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); ··· 1113 1075 1114 1076 /* Load nonce into CONTEXT1 reg */ 1115 1077 if (is_rfc3686) { 1116 - u8 *nonce = cdata->key_virt + cdata->keylen; 1078 + const u8 *nonce = cdata->key_virt + cdata->keylen; 1117 1079 1118 1080 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, 1119 1081 LDST_CLASS_IND_CCB | ··· 1178 1140 1179 1141 /* Load nonce into CONTEXT1 reg */ 1180 1142 if (is_rfc3686) { 1181 - u8 *nonce = cdata->key_virt + cdata->keylen; 1143 + const u8 *nonce = cdata->key_virt + cdata->keylen; 1182 1144 1183 1145 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, 1184 1146 LDST_CLASS_IND_CCB | ··· 1247 1209 1248 1210 /* Load Nonce into CONTEXT1 reg */ 1249 1211 if (is_rfc3686) { 1250 - u8 *nonce = cdata->key_virt + cdata->keylen; 1212 + const u8 *nonce = cdata->key_virt + cdata->keylen; 1251 1213 1252 1214 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, 1253 1215 LDST_CLASS_IND_CCB |
+5 -5
drivers/crypto/caam/caamalg_desc.h
··· 43 43 15 * CAAM_CMD_SZ) 44 44 45 45 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, 46 - unsigned int icvsize); 46 + unsigned int icvsize, int era); 47 47 48 48 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, 49 - unsigned int icvsize); 49 + unsigned int icvsize, int era); 50 50 51 51 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, 52 52 struct alginfo *adata, unsigned int ivsize, 53 53 unsigned int icvsize, const bool is_rfc3686, 54 54 u32 *nonce, const u32 ctx1_iv_off, 55 - const bool is_qi); 55 + const bool is_qi, int era); 56 56 57 57 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, 58 58 struct alginfo *adata, unsigned int ivsize, 59 59 unsigned int icvsize, const bool geniv, 60 60 const bool is_rfc3686, u32 *nonce, 61 - const u32 ctx1_iv_off, const bool is_qi); 61 + const u32 ctx1_iv_off, const bool is_qi, int era); 62 62 63 63 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, 64 64 struct alginfo *adata, unsigned int ivsize, 65 65 unsigned int icvsize, const bool is_rfc3686, 66 66 u32 *nonce, const u32 ctx1_iv_off, 67 - const bool is_qi); 67 + const bool is_qi, int era); 68 68 69 69 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, 70 70 unsigned int icvsize);
+48 -20
drivers/crypto/caam/caamalg_qi.c
··· 53 53 u32 sh_desc_givenc[DESC_MAX_USED_LEN]; 54 54 u8 key[CAAM_MAX_KEY_SIZE]; 55 55 dma_addr_t key_dma; 56 + enum dma_data_direction dir; 56 57 struct alginfo adata; 57 58 struct alginfo cdata; 58 59 unsigned int authsize; ··· 75 74 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == 76 75 OP_ALG_AAI_CTR_MOD128); 77 76 const bool is_rfc3686 = alg->caam.rfc3686; 77 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 78 78 79 79 if (!ctx->cdata.keylen || !ctx->authsize) 80 80 return 0; ··· 126 124 127 125 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 128 126 ivsize, ctx->authsize, is_rfc3686, nonce, 129 - ctx1_iv_off, true); 127 + ctx1_iv_off, true, ctrlpriv->era); 130 128 131 129 skip_enc: 132 130 /* aead_decrypt shared descriptor */ ··· 151 149 152 150 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, 153 151 ivsize, ctx->authsize, alg->caam.geniv, 154 - is_rfc3686, nonce, ctx1_iv_off, true); 152 + is_rfc3686, nonce, ctx1_iv_off, true, 153 + ctrlpriv->era); 155 154 156 155 if (!alg->caam.geniv) 157 156 goto skip_givenc; ··· 179 176 180 177 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, 181 178 ivsize, ctx->authsize, is_rfc3686, nonce, 182 - ctx1_iv_off, true); 179 + ctx1_iv_off, true, ctrlpriv->era); 183 180 184 181 skip_givenc: 185 182 return 0; ··· 200 197 { 201 198 struct caam_ctx *ctx = crypto_aead_ctx(aead); 202 199 struct device *jrdev = ctx->jrdev; 200 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 203 201 struct crypto_authenc_keys keys; 204 202 int ret = 0; 205 203 ··· 215 211 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); 216 212 #endif 217 213 214 + /* 215 + * If DKP is supported, use it in the shared descriptor to generate 216 + * the split key. 217 + */ 218 + if (ctrlpriv->era >= 6) { 219 + ctx->adata.keylen = keys.authkeylen; 220 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 221 + OP_ALG_ALGSEL_MASK); 222 + 223 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) 224 + goto badkey; 225 + 226 + memcpy(ctx->key, keys.authkey, keys.authkeylen); 227 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, 228 + keys.enckeylen); 229 + dma_sync_single_for_device(jrdev, ctx->key_dma, 230 + ctx->adata.keylen_pad + 231 + keys.enckeylen, ctx->dir); 232 + goto skip_split_key; 233 + } 234 + 218 235 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, 219 236 keys.authkeylen, CAAM_MAX_KEY_SIZE - 220 237 keys.enckeylen); ··· 245 220 /* postpend encryption key to auth split key */ 246 221 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); 247 222 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + 248 - keys.enckeylen, DMA_TO_DEVICE); 223 + keys.enckeylen, ctx->dir); 249 224 #ifdef DEBUG 250 225 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ", 251 226 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 252 227 ctx->adata.keylen_pad + keys.enckeylen, 1); 253 228 #endif 254 229 230 + skip_split_key: 255 231 ctx->cdata.keylen = keys.enckeylen; 256 232 257 233 ret = aead_set_sh_desc(aead); ··· 298 272 const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); 299 273 int ret = 0; 300 274 301 - memcpy(ctx->key, key, keylen); 302 275 #ifdef DEBUG 303 276 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", 304 277 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ··· 320 295 keylen -= CTR_RFC3686_NONCE_SIZE; 321 296 } 322 297 323 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 324 298 ctx->cdata.keylen = keylen; 325 - ctx->cdata.key_virt = ctx->key; 299 + ctx->cdata.key_virt = key; 326 300 ctx->cdata.key_inline = true; 327 301 328 302 /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ ··· 380 356 return -EINVAL; 381 357 } 382 358 383 - memcpy(ctx->key, key, keylen); 384 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); 385 359 ctx->cdata.keylen = keylen; 386 - ctx->cdata.key_virt = ctx->key; 360 + ctx->cdata.key_virt = key; 387 361 ctx->cdata.key_inline = true; 388 362 389 363 /* xts ablkcipher encrypt, decrypt shared descriptors */ ··· 690 668 qm_sg_ents = 1 + !!ivsize + mapped_src_nents + 691 669 (mapped_dst_nents > 1 ? mapped_dst_nents : 0); 692 670 if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) { 693 - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", 671 + dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", 694 672 qm_sg_ents, CAAM_QI_MAX_AEAD_SG); 695 673 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 696 674 iv_dma, ivsize, op_type, 0, 0); ··· 927 905 928 906 qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; 929 907 if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { 930 - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", 908 + dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", 931 909 qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); 932 910 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 933 911 iv_dma, ivsize, op_type, 0, 0); ··· 1080 1058 } 1081 1059 1082 1060 if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) { 1083 - dev_err(qidev, "Insufficient S/G entries: %d > %lu\n", 1061 + dev_err(qidev, "Insufficient S/G entries: %d > %zu\n", 1084 1062 qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG); 1085 1063 caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 1086 1064 iv_dma, ivsize, GIVENCRYPT, 0, 0); ··· 2145 2123 struct caam_alg_entry caam; 2146 2124 }; 2147 2125 2148 - static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam) 2126 + static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, 2127 + bool uses_dkp) 2149 2128 { 2150 2129 struct caam_drv_private *priv; 2151 2130 ··· 2160 2137 return PTR_ERR(ctx->jrdev); 2161 2138 } 2162 2139 2140 + priv = dev_get_drvdata(ctx->jrdev->parent); 2141 + if (priv->era >= 6 && uses_dkp) 2142 + ctx->dir = DMA_BIDIRECTIONAL; 2143 + else 2144 + ctx->dir = DMA_TO_DEVICE; 2145 + 2163 2146 ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key), 2164 - DMA_TO_DEVICE); 2147 + ctx->dir); 2165 2148 if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { 2166 2149 dev_err(ctx->jrdev, "unable to map key\n"); 2167 2150 caam_jr_free(ctx->jrdev); ··· 2178 2149 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; 2179 2150 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; 2180 2151 2181 - priv = dev_get_drvdata(ctx->jrdev->parent); 2182 2152 ctx->qidev = priv->qidev; 2183 2153 2184 2154 spin_lock_init(&ctx->lock); ··· 2195 2167 crypto_alg); 2196 2168 struct caam_ctx *ctx = crypto_tfm_ctx(tfm); 2197 2169 2198 - return caam_init_common(ctx, &caam_alg->caam); 2170 + return caam_init_common(ctx, &caam_alg->caam, false); 2199 2171 } 2200 2172 2201 2173 static int caam_aead_init(struct crypto_aead *tfm) ··· 2205 2177 aead); 2206 2178 struct caam_ctx *ctx = crypto_aead_ctx(tfm); 2207 2179 2208 - return caam_init_common(ctx, &caam_alg->caam); 2180 + return caam_init_common(ctx, &caam_alg->caam, 2181 + alg->setkey == aead_setkey); 2209 2182 } 2210 2183 2211 2184 static void caam_exit_common(struct caam_ctx *ctx) ··· 2215 2186 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); 2216 2187 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]); 2217 2188 2218 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), 2219 - DMA_TO_DEVICE); 2189 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir); 2220 2190 2221 2191 caam_jr_free(ctx->jrdev); 2222 2192 }
+50 -23
drivers/crypto/caam/caamhash.c
··· 107 107 dma_addr_t sh_desc_update_first_dma; 108 108 dma_addr_t sh_desc_fin_dma; 109 109 dma_addr_t sh_desc_digest_dma; 110 + enum dma_data_direction dir; 110 111 struct device *jrdev; 111 112 u8 key[CAAM_MAX_HASH_KEY_SIZE]; 112 113 int ctx_len; ··· 242 241 * read and write to seqout 243 242 */ 244 243 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize, 245 - struct caam_hash_ctx *ctx, bool import_ctx) 244 + struct caam_hash_ctx *ctx, bool import_ctx, 245 + int era) 246 246 { 247 247 u32 op = ctx->adata.algtype; 248 248 u32 *skip_key_load; ··· 256 254 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | 257 255 JUMP_COND_SHRD); 258 256 259 - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, 260 - ctx->adata.keylen, CLASS_2 | 261 - KEY_DEST_MDHA_SPLIT | KEY_ENC); 257 + if (era < 6) 258 + append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad, 259 + ctx->adata.keylen, CLASS_2 | 260 + KEY_DEST_MDHA_SPLIT | KEY_ENC); 261 + else 262 + append_proto_dkp(desc, &ctx->adata); 262 263 263 264 set_jump_tgt_here(desc, skip_key_load); 264 265 ··· 294 289 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 295 290 int digestsize = crypto_ahash_digestsize(ahash); 296 291 struct device *jrdev = ctx->jrdev; 292 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); 297 293 u32 *desc; 294 + 295 + ctx->adata.key_virt = ctx->key; 298 296 299 297 /* ahash_update shared descriptor */ 300 298 desc = ctx->sh_desc_update; 301 - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true); 299 + ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true, 300 + ctrlpriv->era); 302 301 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, 303 - desc_bytes(desc), DMA_TO_DEVICE); 302 + desc_bytes(desc), ctx->dir); 304 303 #ifdef DEBUG 305 304 print_hex_dump(KERN_ERR, 306 305 "ahash update shdesc@"__stringify(__LINE__)": ", ··· 313 304 314 305 /* ahash_update_first shared descriptor */ 315 306 desc = ctx->sh_desc_update_first; 316 - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false); 307 + ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false, 308 + ctrlpriv->era); 317 309 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, 318 - desc_bytes(desc), DMA_TO_DEVICE); 310 + desc_bytes(desc), ctx->dir); 319 311 #ifdef DEBUG 320 312 print_hex_dump(KERN_ERR, 321 313 "ahash update first shdesc@"__stringify(__LINE__)": ", ··· 325 315 326 316 /* ahash_final shared descriptor */ 327 317 desc = ctx->sh_desc_fin; 328 - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true); 318 + ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true, 319 + ctrlpriv->era); 329 320 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, 330 - desc_bytes(desc), DMA_TO_DEVICE); 321 + desc_bytes(desc), ctx->dir); 331 322 #ifdef DEBUG 332 323 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ", 333 324 DUMP_PREFIX_ADDRESS, 16, 4, desc, ··· 337 326 338 327 /* ahash_digest shared descriptor */ 339 328 desc = ctx->sh_desc_digest; 340 - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false); 329 + ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false, 330 + ctrlpriv->era); 341 331 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, 342 - desc_bytes(desc), DMA_TO_DEVICE); 332 + desc_bytes(desc), ctx->dir); 343 333 #ifdef DEBUG 344 334 print_hex_dump(KERN_ERR, 345 335 "ahash digest shdesc@"__stringify(__LINE__)": ", ··· 433 421 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 434 422 int blocksize = crypto_tfm_alg_blocksize(&ahash->base); 435 423 int digestsize = crypto_ahash_digestsize(ahash); 424 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); 436 425 int ret; 437 426 u8 *hashed_key = NULL; 438 427 ··· 454 441 key = hashed_key; 455 442 } 456 443 457 - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, 458 - CAAM_MAX_HASH_KEY_SIZE); 459 - if (ret) 460 - goto bad_free_key; 444 + /* 445 + * If DKP is supported, use it in the shared descriptor to generate 446 + * the split key. 447 + */ 448 + if (ctrlpriv->era >= 6) { 449 + ctx->adata.key_inline = true; 450 + ctx->adata.keylen = keylen; 451 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & 452 + OP_ALG_ALGSEL_MASK); 461 453 462 - #ifdef DEBUG 463 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", 464 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, 465 - ctx->adata.keylen_pad, 1); 466 - #endif 454 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) 455 + goto bad_free_key; 456 + 457 + memcpy(ctx->key, key, keylen); 458 + } else { 459 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, 460 + keylen, CAAM_MAX_HASH_KEY_SIZE); 461 + if (ret) 462 + goto bad_free_key; 463 + } 467 464 468 465 kfree(hashed_key); 469 466 return ahash_set_sh_desc(ahash); ··· 1738 1715 HASH_MSG_LEN + 64, 1739 1716 HASH_MSG_LEN + SHA512_DIGEST_SIZE }; 1740 1717 dma_addr_t dma_addr; 1718 + struct caam_drv_private *priv; 1741 1719 1742 1720 /* 1743 1721 * Get a Job ring from Job Ring driver to ensure in-order ··· 1750 1726 return PTR_ERR(ctx->jrdev); 1751 1727 } 1752 1728 1729 + priv = dev_get_drvdata(ctx->jrdev->parent); 1730 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; 1731 + 1753 1732 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, 1754 1733 offsetof(struct caam_hash_ctx, 1755 1734 sh_desc_update_dma), 1756 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1735 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1757 1736 if (dma_mapping_error(ctx->jrdev, dma_addr)) { 1758 1737 dev_err(ctx->jrdev, "unable to map shared descriptors\n"); 1759 1738 caam_jr_free(ctx->jrdev); ··· 1791 1764 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, 1792 1765 offsetof(struct caam_hash_ctx, 1793 1766 sh_desc_update_dma), 1794 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1767 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); 1795 1768 caam_jr_free(ctx->jrdev); 1796 1769 } 1797 1770
+3 -1
drivers/crypto/caam/ctrl.c
··· 611 611 goto iounmap_ctrl; 612 612 } 613 613 614 + ctrlpriv->era = caam_get_era(); 615 + 614 616 ret = of_platform_populate(nprop, caam_match, NULL, dev); 615 617 if (ret) { 616 618 dev_err(dev, "JR platform devices creation error\n"); ··· 744 742 745 743 /* Report "alive" for developer to see */ 746 744 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, 747 - caam_get_era()); 745 + ctrlpriv->era); 748 746 dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n", 749 747 ctrlpriv->total_jobrs, ctrlpriv->qi_present, 750 748 caam_dpaa2 ? "yes" : "no");
+29
drivers/crypto/caam/desc.h
··· 444 444 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT) 445 445 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT) 446 446 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT) 447 + #define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT) 448 + #define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT) 449 + #define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT) 450 + #define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT) 451 + #define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT) 452 + #define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT) 453 + #define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT) 454 + #define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT) 455 + #define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT) 456 + #define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT) 457 + #define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT) 458 + #define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT) 447 459 448 460 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */ 449 461 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT) ··· 1105 1093 /* MacSec protinfos */ 1106 1094 #define OP_PCL_MACSEC 0x0001 1107 1095 1096 + /* Derived Key Protocol (DKP) Protinfo */ 1097 + #define OP_PCL_DKP_SRC_SHIFT 14 1098 + #define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT) 1099 + #define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT) 1100 + #define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT) 1101 + #define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT) 1102 + #define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT) 1103 + #define OP_PCL_DKP_DST_SHIFT 12 1104 + #define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT) 1105 + #define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT) 1106 + #define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT) 1107 + #define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT) 1108 + #define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT) 1109 + #define OP_PCL_DKP_KEY_SHIFT 0 1110 + #define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT) 1111 + 1108 1112 /* PKI unidirectional protocol protinfo bits */ 1109 1113 #define OP_PCL_PKPROT_TEST 0x0008 1110 1114 #define OP_PCL_PKPROT_DECRYPT 0x0004 ··· 1480 1452 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT) 1481 1453 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT) 1482 1454 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT) 1455 + #define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT) 1483 1456 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT) 1484 1457 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT) 1485 1458 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+46 -5
drivers/crypto/caam/desc_constr.h
··· 109 109 append_ptr(desc, ptr); 110 110 } 111 111 112 - static inline void append_data(u32 * const desc, void *data, int len) 112 + static inline void append_data(u32 * const desc, const void *data, int len) 113 113 { 114 114 u32 *offset = desc_end(desc); 115 115 ··· 172 172 append_cmd(desc, len); 173 173 } 174 174 175 - static inline void append_cmd_data(u32 * const desc, void *data, int len, 175 + static inline void append_cmd_data(u32 * const desc, const void *data, int len, 176 176 u32 command) 177 177 { 178 178 append_cmd(desc, command | IMMEDIATE | len); ··· 271 271 APPEND_SEQ_PTR_INTLEN(out, OUT) 272 272 273 273 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \ 274 - static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ 274 + static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ 275 275 unsigned int len, u32 options) \ 276 276 { \ 277 277 PRINT_POS; \ ··· 312 312 * from length of immediate data provided, e.g., split keys 313 313 */ 314 314 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \ 315 - static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \ 315 + static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \ 316 316 unsigned int data_len, \ 317 317 unsigned int len, u32 options) \ 318 318 { \ ··· 452 452 unsigned int keylen_pad; 453 453 union { 454 454 dma_addr_t key_dma; 455 - void *key_virt; 455 + const void *key_virt; 456 456 }; 457 457 bool key_inline; 458 458 }; ··· 494 494 } 495 495 496 496 return (rem_bytes >= 0) ? 0 : -1; 497 + } 498 + 499 + /** 500 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key 501 + * @desc: pointer to buffer used for descriptor construction 502 + * @adata: pointer to authentication transform definitions. 503 + * keylen should be the length of initial key, while keylen_pad 504 + * the length of the derived (split) key. 505 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, 506 + * SHA256, SHA384, SHA512}. 507 + */ 508 + static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata) 509 + { 510 + u32 protid; 511 + 512 + /* 513 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*} 514 + * to OP_PCLID_DKP_{MD5, SHA*} 515 + */ 516 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) | 517 + (0x20 << OP_ALG_ALGSEL_SHIFT); 518 + 519 + if (adata->key_inline) { 520 + int words; 521 + 522 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | 523 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM | 524 + adata->keylen); 525 + append_data(desc, adata->key_virt, adata->keylen); 526 + 527 + /* Reserve space in descriptor buffer for the derived key */ 528 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) - 529 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ; 530 + if (words) 531 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words); 532 + } else { 533 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid | 534 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR | 535 + adata->keylen); 536 + append_ptr(desc, adata->key_dma); 537 + } 497 538 } 498 539 499 540 #endif /* DESC_CONSTR_H */
+1
drivers/crypto/caam/intern.h
··· 84 84 u8 qi_present; /* Nonzero if QI present in device */ 85 85 int secvio_irq; /* Security violation interrupt number */ 86 86 int virt_en; /* Virtualization enabled in CAAM */ 87 + int era; /* CAAM Era (internal HW revision) */ 87 88 88 89 #define RNG4_MAX_HANDLES 2 89 90 /* RNG4 block */
-30
drivers/crypto/caam/key_gen.c
··· 11 11 #include "desc_constr.h" 12 12 #include "key_gen.h" 13 13 14 - /** 15 - * split_key_len - Compute MDHA split key length for a given algorithm 16 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 17 - * SHA224, SHA384, SHA512. 18 - * 19 - * Return: MDHA split key length 20 - */ 21 - static inline u32 split_key_len(u32 hash) 22 - { 23 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 24 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 25 - u32 idx; 26 - 27 - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; 28 - 29 - return (u32)(mdpadlen[idx] * 2); 30 - } 31 - 32 - /** 33 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm 34 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 35 - * SHA224, SHA384, SHA512. 36 - * 37 - * Return: MDHA split key pad length 38 - */ 39 - static inline u32 split_key_pad_len(u32 hash) 40 - { 41 - return ALIGN(split_key_len(hash), 16); 42 - } 43 - 44 14 void split_key_done(struct device *dev, u32 *desc, u32 err, 45 15 void *context) 46 16 {
+30
drivers/crypto/caam/key_gen.h
··· 6 6 * 7 7 */ 8 8 9 + /** 10 + * split_key_len - Compute MDHA split key length for a given algorithm 11 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 12 + * SHA224, SHA384, SHA512. 13 + * 14 + * Return: MDHA split key length 15 + */ 16 + static inline u32 split_key_len(u32 hash) 17 + { 18 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */ 19 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 }; 20 + u32 idx; 21 + 22 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT; 23 + 24 + return (u32)(mdpadlen[idx] * 2); 25 + } 26 + 27 + /** 28 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm 29 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1, 30 + * SHA224, SHA384, SHA512. 31 + * 32 + * Return: MDHA split key pad length 33 + */ 34 + static inline u32 split_key_pad_len(u32 hash) 35 + { 36 + return ALIGN(split_key_len(hash), 16); 37 + } 38 + 9 39 struct split_key_result { 10 40 struct completion completion; 11 41 int err;
+2 -1
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
··· 459 459 info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); 460 460 if (unlikely(!info->completion_addr)) { 461 461 dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); 462 - return -ENOMEM; 462 + ret = -ENOMEM; 463 + goto request_cleanup; 463 464 } 464 465 465 466 result = (union cpt_res_s *)info->completion_addr;
-1
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
··· 6 6 #include "nitrox_dev.h" 7 7 #include "nitrox_req.h" 8 8 #include "nitrox_csr.h" 9 - #include "nitrox_req.h" 10 9 11 10 /* SLC_STORE_INFO */ 12 11 #define MIN_UDD_LEN 16
-1
drivers/crypto/ccp/ccp-crypto-aes-galois.c
··· 21 21 #include <crypto/ctr.h> 22 22 #include <crypto/gcm.h> 23 23 #include <crypto/scatterwalk.h> 24 - #include <linux/delay.h> 25 24 26 25 #include "ccp-crypto.h" 27 26
+10
drivers/crypto/chelsio/Kconfig
··· 19 19 20 20 To compile this driver as a module, choose M here: the module 21 21 will be called chcr. 22 + 23 + config CHELSIO_IPSEC_INLINE 24 + bool "Chelsio IPSec XFRM Tx crypto offload" 25 + depends on CHELSIO_T4 26 + depends on CRYPTO_DEV_CHELSIO 27 + depends on XFRM_OFFLOAD 28 + depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD 29 + default n 30 + ---help--- 31 + Enable support for IPSec Tx Inline.
+1
drivers/crypto/chelsio/Makefile
··· 2 2 3 3 obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o 4 4 chcr-objs := chcr_core.o chcr_algo.o 5 + chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
+369 -207
drivers/crypto/chelsio/chcr_algo.c
··· 73 73 74 74 #define IV AES_BLOCK_SIZE 75 75 76 + static unsigned int sgl_ent_len[] = { 77 + 0, 0, 16, 24, 40, 48, 64, 72, 88, 78 + 96, 112, 120, 136, 144, 160, 168, 184, 79 + 192, 208, 216, 232, 240, 256, 264, 280, 80 + 288, 304, 312, 328, 336, 352, 360, 376 81 + }; 82 + 83 + static unsigned int dsgl_ent_len[] = { 84 + 0, 32, 32, 48, 48, 64, 64, 80, 80, 85 + 112, 112, 128, 128, 144, 144, 160, 160, 86 + 192, 192, 208, 208, 224, 224, 240, 240, 87 + 272, 272, 288, 288, 304, 304, 320, 320 88 + }; 89 + 90 + static u32 round_constant[11] = { 91 + 0x01000000, 0x02000000, 0x04000000, 0x08000000, 92 + 0x10000000, 0x20000000, 0x40000000, 0x80000000, 93 + 0x1B000000, 0x36000000, 0x6C000000 94 + }; 95 + 96 + static int chcr_handle_cipher_resp(struct ablkcipher_request *req, 97 + unsigned char *input, int err); 98 + 76 99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) 77 100 { 78 101 return ctx->crypto_ctx->aeadctx; ··· 129 106 static inline int is_ofld_imm(const struct sk_buff *skb) 130 107 { 131 108 return (skb->len <= SGE_MAX_WR_LEN); 132 - } 133 - 134 - /* 135 - * sgl_len - calculates the size of an SGL of the given capacity 136 - * @n: the number of SGL entries 137 - * Calculates the number of flits needed for a scatter/gather list that 138 - * can hold the given number of entries. 139 - */ 140 - static inline unsigned int sgl_len(unsigned int n) 141 - { 142 - n--; 143 - return (3 * n) / 2 + (n & 1) + 2; 144 109 } 145 110 146 111 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, ··· 171 160 172 161 if (input == NULL) 173 162 goto out; 174 - reqctx = ahash_request_ctx(req); 175 163 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); 176 164 if (reqctx->is_sg_map) 177 165 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); ··· 193 183 } 194 184 out: 195 185 req->base.complete(&req->base, err); 186 + } 196 187 197 - } 198 - 199 - static inline void chcr_handle_aead_resp(struct aead_request *req, 200 - unsigned char *input, 201 - int err) 188 + static inline int get_aead_subtype(struct crypto_aead *aead) 202 189 { 203 - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 204 - struct crypto_aead *tfm = crypto_aead_reqtfm(req); 205 - struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); 206 - 207 - 208 - chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); 209 - if (reqctx->b0_dma) 210 - dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, 211 - reqctx->b0_len, DMA_BIDIRECTIONAL); 212 - if (reqctx->verify == VERIFY_SW) { 213 - chcr_verify_tag(req, input, &err); 214 - reqctx->verify = VERIFY_HW; 190 + struct aead_alg *alg = crypto_aead_alg(aead); 191 + struct chcr_alg_template *chcr_crypto_alg = 192 + container_of(alg, struct chcr_alg_template, alg.aead); 193 + return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; 215 194 } 216 - req->base.complete(&req->base, err); 217 195 218 - } 219 - static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) 196 + void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) 220 197 { 221 198 u8 temp[SHA512_DIGEST_SIZE]; 222 199 struct crypto_aead *tfm = crypto_aead_reqtfm(req); ··· 226 229 *err = -EBADMSG; 227 230 else 228 231 *err = 0; 232 + } 233 + 234 + static inline void chcr_handle_aead_resp(struct aead_request *req, 235 + unsigned char *input, 236 + int err) 237 + { 238 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 239 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 240 + struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); 241 + 242 + chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); 243 + if (reqctx->b0_dma) 244 + dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, 245 + reqctx->b0_len, DMA_BIDIRECTIONAL); 246 + if (reqctx->verify == VERIFY_SW) { 247 + chcr_verify_tag(req, input, &err); 248 + reqctx->verify = VERIFY_HW; 249 + } 250 + req->base.complete(&req->base, err); 229 251 } 230 252 231 253 /* ··· 574 558 skip = 0; 575 559 } 576 560 } 577 - if (walk->nents == 0) { 561 + WARN(!sg, "SG should not be null here\n"); 562 + if (sg && (walk->nents == 0)) { 578 563 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); 579 564 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); 580 565 walk->sgl->len0 = cpu_to_be32(sgmin); ··· 610 593 skip_len = 0; 611 594 } 612 595 } 613 - } 614 - 615 - static inline int get_aead_subtype(struct crypto_aead *aead) 616 - { 617 - struct aead_alg *alg = crypto_aead_alg(aead); 618 - struct chcr_alg_template *chcr_crypto_alg = 619 - container_of(alg, struct chcr_alg_template, alg.aead); 620 - return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; 621 596 } 622 597 623 598 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) ··· 684 675 if (srclen <= dstlen) 685 676 break; 686 677 less = min_t(unsigned int, sg_dma_len(dst) - offset - 687 - dstskip, CHCR_DST_SG_SIZE); 678 + dstskip, CHCR_DST_SG_SIZE); 688 679 dstlen += less; 689 680 offset += less; 690 681 if (offset == sg_dma_len(dst)) { ··· 695 686 dstskip = 0; 696 687 } 697 688 src = sg_next(src); 698 - srcskip = 0; 689 + srcskip = 0; 699 690 } 700 691 return min(srclen, dstlen); 701 692 } ··· 1017 1008 return bytes; 1018 1009 } 1019 1010 1020 - static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) 1011 + static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, 1012 + u32 isfinal) 1021 1013 { 1022 1014 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 1023 1015 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); ··· 1045 1035 for (i = 0; i < (round % 8); i++) 1046 1036 gf128mul_x_ble((le128 *)iv, (le128 *)iv); 1047 1037 1048 - crypto_cipher_decrypt_one(cipher, iv, iv); 1038 + if (!isfinal) 1039 + crypto_cipher_decrypt_one(cipher, iv, iv); 1049 1040 out: 1050 1041 return ret; 1051 1042 } ··· 1067 1056 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / 1068 1057 AES_BLOCK_SIZE) + 1); 1069 1058 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) 1070 - ret = chcr_update_tweak(req, iv); 1059 + ret = chcr_update_tweak(req, iv, 0); 1071 1060 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 1072 1061 if (reqctx->op) 1073 1062 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, ··· 1098 1087 ctr_add_iv(iv, req->info, (reqctx->processed / 1099 1088 AES_BLOCK_SIZE)); 1100 1089 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) 1101 - ret = chcr_update_tweak(req, iv); 1090 + ret = chcr_update_tweak(req, iv, 1); 1102 1091 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { 1103 1092 if (reqctx->op) 1104 1093 sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv, ··· 1111 1100 return ret; 1112 1101 1113 1102 } 1114 - 1115 1103 1116 1104 static int chcr_handle_cipher_resp(struct ablkcipher_request *req, 1117 1105 unsigned char *input, int err) ··· 1145 1135 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, 1146 1136 SPACE_LEFT(ablkctx->enckey_len), 1147 1137 reqctx->src_ofst, reqctx->dst_ofst); 1148 - if ((bytes + reqctx->processed) >= req->nbytes) 1149 - bytes = req->nbytes - reqctx->processed; 1150 - else 1151 - bytes = ROUND_16(bytes); 1138 + if ((bytes + reqctx->processed) >= req->nbytes) 1139 + bytes = req->nbytes - reqctx->processed; 1140 + else 1141 + bytes = ROUND_16(bytes); 1152 1142 } else { 1153 1143 /*CTR mode counter overfloa*/ 1154 1144 bytes = req->nbytes - reqctx->processed; ··· 1249 1239 MIN_CIPHER_SG, 1250 1240 SPACE_LEFT(ablkctx->enckey_len), 1251 1241 0, 0); 1252 - if ((bytes + reqctx->processed) >= req->nbytes) 1253 - bytes = req->nbytes - reqctx->processed; 1254 - else 1255 - bytes = ROUND_16(bytes); 1242 + if ((bytes + reqctx->processed) >= req->nbytes) 1243 + bytes = req->nbytes - reqctx->processed; 1244 + else 1245 + bytes = ROUND_16(bytes); 1256 1246 } else { 1257 1247 bytes = req->nbytes; 1258 1248 } 1259 1249 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == 1260 - CRYPTO_ALG_SUB_TYPE_CTR) { 1250 + CRYPTO_ALG_SUB_TYPE_CTR) { 1261 1251 bytes = adjust_ctr_overflow(req->info, bytes); 1262 1252 } 1263 1253 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == ··· 2024 2014 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 2025 2015 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2026 2016 int error = -EINVAL; 2027 - unsigned int dst_size; 2028 2017 unsigned int authsize = crypto_aead_authsize(tfm); 2029 2018 2030 - dst_size = req->assoclen + req->cryptlen + (op_type ? 2031 - -authsize : authsize); 2032 2019 /* validate key size */ 2033 2020 if (aeadctx->enckey_len == 0) 2034 2021 goto err; ··· 2090 2083 struct cpl_rx_phys_dsgl *phys_cpl; 2091 2084 struct ulptx_sgl *ulptx; 2092 2085 unsigned int transhdr_len; 2093 - unsigned int dst_size = 0, temp; 2086 + unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); 2094 2087 unsigned int kctx_len = 0, dnents; 2095 2088 unsigned int assoclen = req->assoclen; 2096 2089 unsigned int authsize = crypto_aead_authsize(tfm); ··· 2104 2097 return NULL; 2105 2098 2106 2099 reqctx->b0_dma = 0; 2107 - if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { 2100 + if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || 2101 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 2108 2102 null = 1; 2109 2103 assoclen = 0; 2110 2104 } 2111 - dst_size = assoclen + req->cryptlen + (op_type ? -authsize : 2112 - authsize); 2113 2105 error = chcr_aead_common_init(req, op_type); 2114 2106 if (error) 2115 2107 return ERR_PTR(error); 2116 - if (dst_size) { 2117 - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2118 - dnents += sg_nents_xlen(req->dst, req->cryptlen + 2119 - (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, 2120 - req->assoclen); 2121 - dnents += MIN_AUTH_SG; // For IV 2122 - } else { 2123 - dnents = 0; 2124 - } 2108 + dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2109 + dnents += sg_nents_xlen(req->dst, req->cryptlen + 2110 + (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, 2111 + req->assoclen); 2112 + dnents += MIN_AUTH_SG; // For IV 2125 2113 2126 2114 dst_size = get_space_for_phys_dsgl(dnents); 2127 2115 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) ··· 2164 2162 temp & 0xF, 2165 2163 null ? 0 : assoclen + IV + 1, 2166 2164 temp, temp); 2165 + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || 2166 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) 2167 + temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; 2168 + else 2169 + temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; 2167 2170 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 2168 2171 (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, 2169 - CHCR_SCMD_CIPHER_MODE_AES_CBC, 2172 + temp, 2170 2173 actx->auth_mode, aeadctx->hmac_ctrl, 2171 2174 IV >> 1); 2172 2175 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 2173 2176 0, 0, dst_size); 2174 2177 2175 2178 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; 2176 - if (op_type == CHCR_ENCRYPT_OP) 2179 + if (op_type == CHCR_ENCRYPT_OP || 2180 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 2181 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) 2177 2182 memcpy(chcr_req->key_ctx.key, aeadctx->key, 2178 2183 aeadctx->enckey_len); 2179 2184 else ··· 2190 2181 memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 2191 2182 4), actx->h_iopad, kctx_len - 2192 2183 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4)); 2193 - memcpy(reqctx->iv, req->iv, IV); 2184 + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 2185 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 2186 + memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); 2187 + memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, 2188 + CTR_RFC3686_IV_SIZE); 2189 + *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + 2190 + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); 2191 + } else { 2192 + memcpy(reqctx->iv, req->iv, IV); 2193 + } 2194 2194 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); 2195 2195 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); 2196 2196 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); ··· 2220 2202 return ERR_PTR(error); 2221 2203 } 2222 2204 2223 - static int chcr_aead_dma_map(struct device *dev, 2224 - struct aead_request *req, 2225 - unsigned short op_type) 2205 + int chcr_aead_dma_map(struct device *dev, 2206 + struct aead_request *req, 2207 + unsigned short op_type) 2226 2208 { 2227 2209 int error; 2228 2210 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); ··· 2264 2246 return -ENOMEM; 2265 2247 } 2266 2248 2267 - static void chcr_aead_dma_unmap(struct device *dev, 2268 - struct aead_request *req, 2269 - unsigned short op_type) 2249 + void chcr_aead_dma_unmap(struct device *dev, 2250 + struct aead_request *req, 2251 + unsigned short op_type) 2270 2252 { 2271 2253 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2272 2254 struct crypto_aead *tfm = crypto_aead_reqtfm(req); ··· 2291 2273 } 2292 2274 } 2293 2275 2294 - static inline void chcr_add_aead_src_ent(struct aead_request *req, 2295 - struct ulptx_sgl *ulptx, 2296 - unsigned int assoclen, 2297 - unsigned short op_type) 2276 + void chcr_add_aead_src_ent(struct aead_request *req, 2277 + struct ulptx_sgl *ulptx, 2278 + unsigned int assoclen, 2279 + unsigned short op_type) 2298 2280 { 2299 2281 struct ulptx_walk ulp_walk; 2300 2282 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); ··· 2326 2308 } 2327 2309 } 2328 2310 2329 - static inline void chcr_add_aead_dst_ent(struct aead_request *req, 2330 - struct cpl_rx_phys_dsgl *phys_cpl, 2331 - unsigned int assoclen, 2332 - unsigned short op_type, 2333 - unsigned short qid) 2311 + void chcr_add_aead_dst_ent(struct aead_request *req, 2312 + struct cpl_rx_phys_dsgl *phys_cpl, 2313 + unsigned int assoclen, 2314 + unsigned short op_type, 2315 + unsigned short qid) 2334 2316 { 2335 2317 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 2336 2318 struct crypto_aead *tfm = crypto_aead_reqtfm(req); ··· 2348 2330 dsgl_walk_end(&dsgl_walk, qid); 2349 2331 } 2350 2332 2351 - static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 2352 - struct ulptx_sgl *ulptx, 2353 - struct cipher_wr_param *wrparam) 2333 + void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 2334 + struct ulptx_sgl *ulptx, 2335 + struct cipher_wr_param *wrparam) 2354 2336 { 2355 2337 struct ulptx_walk ulp_walk; 2356 2338 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); ··· 2373 2355 } 2374 2356 } 2375 2357 2376 - static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, 2377 - struct cpl_rx_phys_dsgl *phys_cpl, 2378 - struct cipher_wr_param *wrparam, 2379 - unsigned short qid) 2358 + void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, 2359 + struct cpl_rx_phys_dsgl *phys_cpl, 2360 + struct cipher_wr_param *wrparam, 2361 + unsigned short qid) 2380 2362 { 2381 2363 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2382 2364 struct dsgl_walk dsgl_walk; ··· 2391 2373 dsgl_walk_end(&dsgl_walk, qid); 2392 2374 } 2393 2375 2394 - static inline void chcr_add_hash_src_ent(struct ahash_request *req, 2395 - struct ulptx_sgl *ulptx, 2396 - struct hash_wr_param *param) 2376 + void chcr_add_hash_src_ent(struct ahash_request *req, 2377 + struct ulptx_sgl *ulptx, 2378 + struct hash_wr_param *param) 2397 2379 { 2398 2380 struct ulptx_walk ulp_walk; 2399 2381 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); ··· 2413 2395 ulptx_walk_add_page(&ulp_walk, param->bfr_len, 2414 2396 &reqctx->dma_addr); 2415 2397 ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len, 2416 - 0); 2417 - // reqctx->srcsg = ulp_walk.last_sg; 2418 - // reqctx->src_ofst = ulp_walk.last_sg_len; 2419 - ulptx_walk_end(&ulp_walk); 2398 + 0); 2399 + ulptx_walk_end(&ulp_walk); 2420 2400 } 2421 2401 } 2422 2402 2423 - 2424 - static inline int chcr_hash_dma_map(struct device *dev, 2425 - struct ahash_request *req) 2403 + int chcr_hash_dma_map(struct device *dev, 2404 + struct ahash_request *req) 2426 2405 { 2427 2406 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 2428 2407 int error = 0; ··· 2429 2414 error = dma_map_sg(dev, req->src, sg_nents(req->src), 2430 2415 DMA_TO_DEVICE); 2431 2416 if (!error) 2432 - return error; 2417 + return -ENOMEM; 2433 2418 req_ctx->is_sg_map = 1; 2434 2419 return 0; 2435 2420 } 2436 2421 2437 - static inline void chcr_hash_dma_unmap(struct device *dev, 2438 - struct ahash_request *req) 2422 + void chcr_hash_dma_unmap(struct device *dev, 2423 + struct ahash_request *req) 2439 2424 { 2440 2425 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); 2441 2426 ··· 2448 2433 2449 2434 } 2450 2435 2451 - 2452 - static int chcr_cipher_dma_map(struct device *dev, 2453 - struct ablkcipher_request *req) 2436 + int chcr_cipher_dma_map(struct device *dev, 2437 + struct ablkcipher_request *req) 2454 2438 { 2455 2439 int error; 2456 2440 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); ··· 2483 2469 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); 2484 2470 return -ENOMEM; 2485 2471 } 2486 - static void chcr_cipher_dma_unmap(struct device *dev, 2487 - struct ablkcipher_request *req) 2472 + 2473 + void chcr_cipher_dma_unmap(struct device *dev, 2474 + struct ablkcipher_request *req) 2488 2475 { 2489 2476 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); 2490 2477 ··· 2681 2666 sub_type = get_aead_subtype(tfm); 2682 2667 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) 2683 2668 assoclen -= 8; 2684 - dst_size = assoclen + req->cryptlen + (op_type ? -authsize : 2685 - authsize); 2686 2669 error = chcr_aead_common_init(req, op_type); 2687 2670 if (error) 2688 2671 return ERR_PTR(error); ··· 2690 2677 error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); 2691 2678 if (error) 2692 2679 goto err; 2693 - if (dst_size) { 2694 - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2695 - dnents += sg_nents_xlen(req->dst, req->cryptlen 2696 - + (op_type ? -authsize : authsize), 2697 - CHCR_DST_SG_SIZE, req->assoclen); 2698 - dnents += MIN_CCM_SG; // For IV and B0 2699 - } else { 2700 - dnents = 0; 2701 - } 2680 + dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2681 + dnents += sg_nents_xlen(req->dst, req->cryptlen 2682 + + (op_type ? -authsize : authsize), 2683 + CHCR_DST_SG_SIZE, req->assoclen); 2684 + dnents += MIN_CCM_SG; // For IV and B0 2702 2685 dst_size = get_space_for_phys_dsgl(dnents); 2703 2686 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2; 2704 2687 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); ··· 2789 2780 assoclen = req->assoclen - 8; 2790 2781 2791 2782 reqctx->b0_dma = 0; 2792 - dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize); 2793 2783 error = chcr_aead_common_init(req, op_type); 2794 - if (error) 2795 - return ERR_PTR(error); 2796 - if (dst_size) { 2797 - dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2798 - dnents += sg_nents_xlen(req->dst, 2799 - req->cryptlen + (op_type ? -authsize : authsize), 2784 + if (error) 2785 + return ERR_PTR(error); 2786 + dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); 2787 + dnents += sg_nents_xlen(req->dst, req->cryptlen + 2788 + (op_type ? -authsize : authsize), 2800 2789 CHCR_DST_SG_SIZE, req->assoclen); 2801 - dnents += MIN_GCM_SG; // For IV 2802 - } else { 2803 - dnents = 0; 2804 - } 2790 + dnents += MIN_GCM_SG; // For IV 2805 2791 dst_size = get_space_for_phys_dsgl(dnents); 2806 2792 kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) + 2807 2793 AEAD_H_SIZE; ··· 2833 2829 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 2834 2830 assoclen ? 1 : 0, assoclen, 2835 2831 assoclen + IV + 1, 0); 2836 - chcr_req->sec_cpl.cipherstop_lo_authinsert = 2832 + chcr_req->sec_cpl.cipherstop_lo_authinsert = 2837 2833 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, 2838 2834 temp, temp); 2839 - chcr_req->sec_cpl.seqno_numivs = 2835 + chcr_req->sec_cpl.seqno_numivs = 2840 2836 FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == 2841 2837 CHCR_ENCRYPT_OP) ? 1 : 0, 2842 2838 CHCR_SCMD_CIPHER_MODE_AES_GCM, ··· 3216 3212 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); 3217 3213 /* it contains auth and cipher key both*/ 3218 3214 struct crypto_authenc_keys keys; 3219 - unsigned int bs; 3215 + unsigned int bs, subtype; 3220 3216 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; 3221 3217 int err = 0, i, key_ctx_len = 0; 3222 3218 unsigned char ck_size = 0; ··· 3245 3241 pr_err("chcr : Unsupported digest size\n"); 3246 3242 goto out; 3247 3243 } 3244 + subtype = get_aead_subtype(authenc); 3245 + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 3246 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 3247 + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) 3248 + goto out; 3249 + memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen 3250 + - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); 3251 + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; 3252 + } 3248 3253 if (keys.enckeylen == AES_KEYSIZE_128) { 3249 3254 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3250 3255 } else if (keys.enckeylen == AES_KEYSIZE_192) { ··· 3271 3258 */ 3272 3259 memcpy(aeadctx->key, keys.enckey, keys.enckeylen); 3273 3260 aeadctx->enckey_len = keys.enckeylen; 3274 - get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3275 - aeadctx->enckey_len << 3); 3261 + if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || 3262 + subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { 3276 3263 3264 + get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3265 + aeadctx->enckey_len << 3); 3266 + } 3277 3267 base_hash = chcr_alloc_shash(max_authsize); 3278 3268 if (IS_ERR(base_hash)) { 3279 3269 pr_err("chcr : Base driver cannot be loaded\n"); ··· 3349 3333 struct crypto_authenc_keys keys; 3350 3334 int err; 3351 3335 /* it contains auth and cipher key both*/ 3336 + unsigned int subtype; 3352 3337 int key_ctx_len = 0; 3353 3338 unsigned char ck_size = 0; 3354 3339 ··· 3367 3350 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 3368 3351 goto out; 3369 3352 } 3353 + subtype = get_aead_subtype(authenc); 3354 + if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || 3355 + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { 3356 + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) 3357 + goto out; 3358 + memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen 3359 + - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); 3360 + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; 3361 + } 3370 3362 if (keys.enckeylen == AES_KEYSIZE_128) { 3371 3363 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 3372 3364 } else if (keys.enckeylen == AES_KEYSIZE_192) { ··· 3383 3357 } else if (keys.enckeylen == AES_KEYSIZE_256) { 3384 3358 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 3385 3359 } else { 3386 - pr_err("chcr : Unsupported cipher key\n"); 3360 + pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); 3387 3361 goto out; 3388 3362 } 3389 3363 memcpy(aeadctx->key, keys.enckey, keys.enckeylen); 3390 3364 aeadctx->enckey_len = keys.enckeylen; 3391 - get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3392 - aeadctx->enckey_len << 3); 3365 + if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || 3366 + subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { 3367 + get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, 3368 + aeadctx->enckey_len << 3); 3369 + } 3393 3370 key_ctx_len = sizeof(struct _key_ctx) 3394 3371 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4); 3395 3372 ··· 3404 3375 aeadctx->enckey_len = 0; 3405 3376 return -EINVAL; 3406 3377 } 3407 - static int chcr_aead_encrypt(struct aead_request *req) 3408 - { 3409 - struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3410 - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3411 - 3412 - reqctx->verify = VERIFY_HW; 3413 - 3414 - switch (get_aead_subtype(tfm)) { 3415 - case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: 3416 - case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: 3417 - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3418 - create_authenc_wr); 3419 - case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3420 - case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3421 - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3422 - create_aead_ccm_wr); 3423 - default: 3424 - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3425 - create_gcm_wr); 3426 - } 3427 - } 3428 - 3429 - static int chcr_aead_decrypt(struct aead_request *req) 3430 - { 3431 - struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3432 - struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3433 - struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3434 - int size; 3435 - 3436 - if (aeadctx->mayverify == VERIFY_SW) { 3437 - size = crypto_aead_maxauthsize(tfm); 3438 - reqctx->verify = VERIFY_SW; 3439 - } else { 3440 - size = 0; 3441 - reqctx->verify = VERIFY_HW; 3442 - } 3443 - 3444 - switch (get_aead_subtype(tfm)) { 3445 - case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC: 3446 - case CRYPTO_ALG_SUB_TYPE_AEAD_NULL: 3447 - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3448 - create_authenc_wr); 3449 - case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3450 - case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3451 - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3452 - create_aead_ccm_wr); 3453 - default: 3454 - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3455 - create_gcm_wr); 3456 - } 3457 - } 3458 3378 3459 3379 static int chcr_aead_op(struct aead_request *req, 3460 - unsigned short op_type, 3461 - int size, 3462 - create_wr_t create_wr_fn) 3380 + unsigned short op_type, 3381 + int size, 3382 + create_wr_t create_wr_fn) 3463 3383 { 3464 3384 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3465 3385 struct uld_ctx *u_ctx; ··· 3437 3459 chcr_send_wr(skb); 3438 3460 return -EINPROGRESS; 3439 3461 } 3462 + 3463 + static int chcr_aead_encrypt(struct aead_request *req) 3464 + { 3465 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3466 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3467 + 3468 + reqctx->verify = VERIFY_HW; 3469 + 3470 + switch (get_aead_subtype(tfm)) { 3471 + case CRYPTO_ALG_SUB_TYPE_CTR_SHA: 3472 + case CRYPTO_ALG_SUB_TYPE_CBC_SHA: 3473 + case CRYPTO_ALG_SUB_TYPE_CBC_NULL: 3474 + case CRYPTO_ALG_SUB_TYPE_CTR_NULL: 3475 + return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3476 + create_authenc_wr); 3477 + case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3478 + case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3479 + return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3480 + create_aead_ccm_wr); 3481 + default: 3482 + return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, 3483 + create_gcm_wr); 3484 + } 3485 + } 3486 + 3487 + static int chcr_aead_decrypt(struct aead_request *req) 3488 + { 3489 + struct crypto_aead *tfm = crypto_aead_reqtfm(req); 3490 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); 3491 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); 3492 + int size; 3493 + 3494 + if (aeadctx->mayverify == VERIFY_SW) { 3495 + size = crypto_aead_maxauthsize(tfm); 3496 + reqctx->verify = VERIFY_SW; 3497 + } else { 3498 + size = 0; 3499 + reqctx->verify = VERIFY_HW; 3500 + } 3501 + 3502 + switch (get_aead_subtype(tfm)) { 3503 + case CRYPTO_ALG_SUB_TYPE_CBC_SHA: 3504 + case CRYPTO_ALG_SUB_TYPE_CTR_SHA: 3505 + case CRYPTO_ALG_SUB_TYPE_CBC_NULL: 3506 + case CRYPTO_ALG_SUB_TYPE_CTR_NULL: 3507 + return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3508 + create_authenc_wr); 3509 + case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: 3510 + case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: 3511 + return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3512 + create_aead_ccm_wr); 3513 + default: 3514 + return chcr_aead_op(req, CHCR_DECRYPT_OP, size, 3515 + create_gcm_wr); 3516 + } 3517 + } 3518 + 3440 3519 static struct chcr_alg_template driver_algs[] = { 3441 3520 /* AES-CBC */ 3442 3521 { ··· 3777 3742 } 3778 3743 }, 3779 3744 { 3780 - .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, 3745 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 3781 3746 .is_registered = 0, 3782 3747 .alg.aead = { 3783 3748 .base = { ··· 3798 3763 } 3799 3764 }, 3800 3765 { 3801 - .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, 3766 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 3802 3767 .is_registered = 0, 3803 3768 .alg.aead = { 3804 3769 .base = { ··· 3820 3785 } 3821 3786 }, 3822 3787 { 3823 - .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, 3788 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 3824 3789 .is_registered = 0, 3825 3790 .alg.aead = { 3826 3791 .base = { ··· 3840 3805 } 3841 3806 }, 3842 3807 { 3843 - .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, 3808 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 3844 3809 .is_registered = 0, 3845 3810 .alg.aead = { 3846 3811 .base = { ··· 3861 3826 } 3862 3827 }, 3863 3828 { 3864 - .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC, 3829 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, 3865 3830 .is_registered = 0, 3866 3831 .alg.aead = { 3867 3832 .base = { ··· 3882 3847 } 3883 3848 }, 3884 3849 { 3885 - .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL, 3850 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, 3886 3851 .is_registered = 0, 3887 3852 .alg.aead = { 3888 3853 .base = { ··· 3902 3867 .setauthsize = chcr_authenc_null_setauthsize, 3903 3868 } 3904 3869 }, 3870 + { 3871 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 3872 + .is_registered = 0, 3873 + .alg.aead = { 3874 + .base = { 3875 + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", 3876 + .cra_driver_name = 3877 + "authenc-hmac-sha1-rfc3686-ctr-aes-chcr", 3878 + .cra_blocksize = 1, 3879 + .cra_priority = CHCR_AEAD_PRIORITY, 3880 + .cra_ctxsize = sizeof(struct chcr_context) + 3881 + sizeof(struct chcr_aead_ctx) + 3882 + sizeof(struct chcr_authenc_ctx), 3883 + 3884 + }, 3885 + .ivsize = CTR_RFC3686_IV_SIZE, 3886 + .maxauthsize = SHA1_DIGEST_SIZE, 3887 + .setkey = chcr_authenc_setkey, 3888 + .setauthsize = chcr_authenc_setauthsize, 3889 + } 3890 + }, 3891 + { 3892 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 3893 + .is_registered = 0, 3894 + .alg.aead = { 3895 + .base = { 3896 + 3897 + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", 3898 + .cra_driver_name = 3899 + "authenc-hmac-sha256-rfc3686-ctr-aes-chcr", 3900 + .cra_blocksize = 1, 3901 + .cra_priority = CHCR_AEAD_PRIORITY, 3902 + .cra_ctxsize = sizeof(struct chcr_context) + 3903 + sizeof(struct chcr_aead_ctx) + 3904 + sizeof(struct chcr_authenc_ctx), 3905 + 3906 + }, 3907 + .ivsize = CTR_RFC3686_IV_SIZE, 3908 + .maxauthsize = SHA256_DIGEST_SIZE, 3909 + .setkey = chcr_authenc_setkey, 3910 + .setauthsize = chcr_authenc_setauthsize, 3911 + } 3912 + }, 3913 + { 3914 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 3915 + .is_registered = 0, 3916 + .alg.aead = { 3917 + .base = { 3918 + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", 3919 + .cra_driver_name = 3920 + "authenc-hmac-sha224-rfc3686-ctr-aes-chcr", 3921 + .cra_blocksize = 1, 3922 + .cra_priority = CHCR_AEAD_PRIORITY, 3923 + .cra_ctxsize = sizeof(struct chcr_context) + 3924 + sizeof(struct chcr_aead_ctx) + 3925 + sizeof(struct chcr_authenc_ctx), 3926 + }, 3927 + .ivsize = CTR_RFC3686_IV_SIZE, 3928 + .maxauthsize = SHA224_DIGEST_SIZE, 3929 + .setkey = chcr_authenc_setkey, 3930 + .setauthsize = chcr_authenc_setauthsize, 3931 + } 3932 + }, 3933 + { 3934 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 3935 + .is_registered = 0, 3936 + .alg.aead = { 3937 + .base = { 3938 + .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", 3939 + .cra_driver_name = 3940 + "authenc-hmac-sha384-rfc3686-ctr-aes-chcr", 3941 + .cra_blocksize = 1, 3942 + .cra_priority = CHCR_AEAD_PRIORITY, 3943 + .cra_ctxsize = sizeof(struct chcr_context) + 3944 + sizeof(struct chcr_aead_ctx) + 3945 + sizeof(struct chcr_authenc_ctx), 3946 + 3947 + }, 3948 + .ivsize = CTR_RFC3686_IV_SIZE, 3949 + .maxauthsize = SHA384_DIGEST_SIZE, 3950 + .setkey = chcr_authenc_setkey, 3951 + .setauthsize = chcr_authenc_setauthsize, 3952 + } 3953 + }, 3954 + { 3955 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, 3956 + .is_registered = 0, 3957 + .alg.aead = { 3958 + .base = { 3959 + .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", 3960 + .cra_driver_name = 3961 + "authenc-hmac-sha512-rfc3686-ctr-aes-chcr", 3962 + .cra_blocksize = 1, 3963 + .cra_priority = CHCR_AEAD_PRIORITY, 3964 + .cra_ctxsize = sizeof(struct chcr_context) + 3965 + sizeof(struct chcr_aead_ctx) + 3966 + sizeof(struct chcr_authenc_ctx), 3967 + 3968 + }, 3969 + .ivsize = CTR_RFC3686_IV_SIZE, 3970 + .maxauthsize = SHA512_DIGEST_SIZE, 3971 + .setkey = chcr_authenc_setkey, 3972 + .setauthsize = chcr_authenc_setauthsize, 3973 + } 3974 + }, 3975 + { 3976 + .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, 3977 + .is_registered = 0, 3978 + .alg.aead = { 3979 + .base = { 3980 + .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))", 3981 + .cra_driver_name = 3982 + "authenc-digest_null-rfc3686-ctr-aes-chcr", 3983 + .cra_blocksize = 1, 3984 + .cra_priority = CHCR_AEAD_PRIORITY, 3985 + .cra_ctxsize = sizeof(struct chcr_context) + 3986 + sizeof(struct chcr_aead_ctx) + 3987 + sizeof(struct chcr_authenc_ctx), 3988 + 3989 + }, 3990 + .ivsize = CTR_RFC3686_IV_SIZE, 3991 + .maxauthsize = 0, 3992 + .setkey = chcr_aead_digest_null_setkey, 3993 + .setauthsize = chcr_authenc_null_setauthsize, 3994 + } 3995 + }, 3996 + 3905 3997 }; 3906 3998 3907 3999 /*
-15
drivers/crypto/chelsio/chcr_algo.h
··· 226 226 #define SPACE_LEFT(len) \ 227 227 ((SGE_MAX_WR_LEN - WR_MIN_LEN - (len))) 228 228 229 - unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88, 230 - 96, 112, 120, 136, 144, 160, 168, 184, 231 - 192, 208, 216, 232, 240, 256, 264, 280, 232 - 288, 304, 312, 328, 336, 352, 360, 376}; 233 - unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80, 234 - 112, 112, 128, 128, 144, 144, 160, 160, 235 - 192, 192, 208, 208, 224, 224, 240, 240, 236 - 272, 272, 288, 288, 304, 304, 320, 320}; 237 - 238 229 struct algo_param { 239 230 unsigned int auth_mode; 240 231 unsigned int mk_size; ··· 394 403 bytes[3] = aes_sbox[bytes[3]]; 395 404 return *(u32 *)(&bytes[0]); 396 405 } 397 - 398 - static u32 round_constant[11] = { 399 - 0x01000000, 0x02000000, 0x04000000, 0x08000000, 400 - 0x10000000, 0x20000000, 0x40000000, 0x80000000, 401 - 0x1B000000, 0x36000000, 0x6C000000 402 - }; 403 406 404 407 #endif /* __CHCR_ALGO_H__ */
+14
drivers/crypto/chelsio/chcr_core.c
··· 48 48 .add = chcr_uld_add, 49 49 .state_change = chcr_uld_state_change, 50 50 .rx_handler = chcr_uld_rx_handler, 51 + #ifdef CONFIG_CHELSIO_IPSEC_INLINE 52 + .tx_handler = chcr_uld_tx_handler, 53 + #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 51 54 }; 52 55 53 56 struct uld_ctx *assign_chcr_device(void) ··· 167 164 goto out; 168 165 } 169 166 u_ctx->lldi = *lld; 167 + #ifdef CONFIG_CHELSIO_IPSEC_INLINE 168 + if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) 169 + chcr_add_xfrmops(lld); 170 + #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 170 171 out: 171 172 return u_ctx; 172 173 } ··· 193 186 work_handlers[rpl->opcode](dev, pgl->va); 194 187 return 0; 195 188 } 189 + 190 + #ifdef CONFIG_CHELSIO_IPSEC_INLINE 191 + int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) 192 + { 193 + return chcr_ipsec_xmit(skb, dev); 194 + } 195 + #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 196 196 197 197 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 198 198 {
+38
drivers/crypto/chelsio/chcr_core.h
··· 39 39 #include <crypto/algapi.h> 40 40 #include "t4_hw.h" 41 41 #include "cxgb4.h" 42 + #include "t4_msg.h" 42 43 #include "cxgb4_uld.h" 43 44 44 45 #define DRV_MODULE_NAME "chcr" ··· 90 89 struct chcr_dev *dev; 91 90 }; 92 91 92 + struct chcr_ipsec_req { 93 + struct ulp_txpkt ulptx; 94 + struct ulptx_idata sc_imm; 95 + struct cpl_tx_sec_pdu sec_cpl; 96 + struct _key_ctx key_ctx; 97 + }; 98 + 99 + struct chcr_ipsec_wr { 100 + struct fw_ulptx_wr wreq; 101 + struct chcr_ipsec_req req; 102 + }; 103 + 104 + struct ipsec_sa_entry { 105 + int hmac_ctrl; 106 + unsigned int enckey_len; 107 + unsigned int kctx_len; 108 + unsigned int authsize; 109 + __be32 key_ctx_hdr; 110 + char salt[MAX_SALT]; 111 + char key[2 * AES_MAX_KEY_SIZE]; 112 + }; 113 + 114 + /* 115 + * sgl_len - calculates the size of an SGL of the given capacity 116 + * @n: the number of SGL entries 117 + * Calculates the number of flits needed for a scatter/gather list that 118 + * can hold the given number of entries. 119 + */ 120 + static inline unsigned int sgl_len(unsigned int n) 121 + { 122 + n--; 123 + return (3 * n) / 2 + (n & 1) + 2; 124 + } 125 + 93 126 struct uld_ctx *assign_chcr_device(void); 94 127 int chcr_send_wr(struct sk_buff *skb); 95 128 int start_crypto(void); 96 129 int stop_crypto(void); 97 130 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 98 131 const struct pkt_gl *pgl); 132 + int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev); 99 133 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, 100 134 int err); 135 + int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); 136 + void chcr_add_xfrmops(const struct cxgb4_lld_info *lld); 101 137 #endif /* __CHCR_CORE_H__ */
+29 -47
drivers/crypto/chelsio/chcr_crypto.h
··· 134 134 #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 135 135 #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000 136 136 #define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000 137 - #define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000 137 + #define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000 138 138 #define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000 139 139 #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000 140 - #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000 140 + #define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000 141 141 #define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000 142 142 #define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000 143 143 #define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000 144 144 #define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000 145 + #define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000 146 + #define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000 145 147 #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ 146 148 CRYPTO_ALG_SUB_TYPE_HASH_HMAC) 147 149 ··· 212 210 struct phys_sge_pairs *to; 213 211 }; 214 212 215 - 216 - 217 213 struct chcr_gcm_ctx { 218 214 u8 ghash_h[AEAD_H_SIZE]; 219 215 }; ··· 227 227 struct chcr_authenc_ctx authenc[0]; 228 228 }; 229 229 230 - 231 - 232 230 struct chcr_aead_ctx { 233 231 __be32 key_ctx_hdr; 234 232 unsigned int enckey_len; 235 233 struct crypto_aead *sw_cipher; 236 234 u8 salt[MAX_SALT]; 237 235 u8 key[CHCR_AES_MAX_KEY_LEN]; 236 + u8 nonce[4]; 238 237 u16 hmac_ctrl; 239 238 u16 mayverify; 240 239 struct __aead_ctx ctx[0]; 241 240 }; 242 - 243 - 244 241 245 242 struct hmac_ctx { 246 243 struct crypto_shash *base_hash; ··· 304 307 int size, 305 308 unsigned short op_type); 306 309 307 - static int chcr_aead_op(struct aead_request *req_base, 308 - unsigned short op_type, 309 - int size, 310 - create_wr_t create_wr_fn); 311 - static inline int get_aead_subtype(struct crypto_aead *aead); 312 - static int chcr_handle_cipher_resp(struct ablkcipher_request *req, 313 - unsigned char *input, int err); 314 - static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); 315 - static int chcr_aead_dma_map(struct device *dev, struct aead_request *req, 316 - unsigned short op_type); 317 - static void chcr_aead_dma_unmap(struct device *dev, struct aead_request 318 - *req, unsigned short op_type); 319 - static inline void chcr_add_aead_dst_ent(struct aead_request *req, 320 - struct cpl_rx_phys_dsgl *phys_cpl, 321 - unsigned int assoclen, 322 - unsigned short op_type, 323 - unsigned short qid); 324 - static inline void chcr_add_aead_src_ent(struct aead_request *req, 325 - struct ulptx_sgl *ulptx, 326 - unsigned int assoclen, 327 - unsigned short op_type); 328 - static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 329 - struct ulptx_sgl *ulptx, 330 - struct cipher_wr_param *wrparam); 331 - static int chcr_cipher_dma_map(struct device *dev, 332 - struct ablkcipher_request *req); 333 - static void chcr_cipher_dma_unmap(struct device *dev, 334 - struct ablkcipher_request *req); 335 - static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, 336 - struct cpl_rx_phys_dsgl *phys_cpl, 337 - struct cipher_wr_param *wrparam, 338 - unsigned short qid); 310 + void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); 311 + int chcr_aead_dma_map(struct device *dev, struct aead_request *req, 312 + unsigned short op_type); 313 + void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, 314 + unsigned short op_type); 315 + void chcr_add_aead_dst_ent(struct aead_request *req, 316 + struct cpl_rx_phys_dsgl *phys_cpl, 317 + unsigned int assoclen, unsigned short op_type, 318 + unsigned short qid); 319 + void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, 320 + unsigned int assoclen, unsigned short op_type); 321 + void chcr_add_cipher_src_ent(struct ablkcipher_request *req, 322 + struct ulptx_sgl *ulptx, 323 + struct cipher_wr_param *wrparam); 324 + int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req); 325 + void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req); 326 + void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, 327 + struct cpl_rx_phys_dsgl *phys_cpl, 328 + struct cipher_wr_param *wrparam, 329 + unsigned short qid); 339 330 int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip); 340 - static inline void chcr_add_hash_src_ent(struct ahash_request *req, 341 - struct ulptx_sgl *ulptx, 342 - struct hash_wr_param *param); 343 - static inline int chcr_hash_dma_map(struct device *dev, 344 - struct ahash_request *req); 345 - static inline void chcr_hash_dma_unmap(struct device *dev, 346 - struct ahash_request *req); 331 + void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, 332 + struct hash_wr_param *param); 333 + int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); 334 + void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); 347 335 #endif /* __CHCR_CRYPTO_H__ */
+654
drivers/crypto/chelsio/chcr_ipsec.c
··· 1 + /* 2 + * This file is part of the Chelsio T6 Crypto driver for Linux. 3 + * 4 + * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. 5 + * 6 + * This software is available to you under a choice of one of two 7 + * licenses. You may choose to be licensed under the terms of the GNU 8 + * General Public License (GPL) Version 2, available from the file 9 + * COPYING in the main directory of this source tree, or the 10 + * OpenIB.org BSD license below: 11 + * 12 + * Redistribution and use in source and binary forms, with or 13 + * without modification, are permitted provided that the following 14 + * conditions are met: 15 + * 16 + * - Redistributions of source code must retain the above 17 + * copyright notice, this list of conditions and the following 18 + * disclaimer. 19 + * 20 + * - Redistributions in binary form must reproduce the above 21 + * copyright notice, this list of conditions and the following 22 + * disclaimer in the documentation and/or other materials 23 + * provided with the distribution. 24 + * 25 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 + * SOFTWARE. 33 + * 34 + * Written and Maintained by: 35 + * Atul Gupta (atul.gupta@chelsio.com) 36 + */ 37 + 38 + #define pr_fmt(fmt) "chcr:" fmt 39 + 40 + #include <linux/kernel.h> 41 + #include <linux/module.h> 42 + #include <linux/crypto.h> 43 + #include <linux/cryptohash.h> 44 + #include <linux/skbuff.h> 45 + #include <linux/rtnetlink.h> 46 + #include <linux/highmem.h> 47 + #include <linux/if_vlan.h> 48 + #include <linux/ip.h> 49 + #include <linux/netdevice.h> 50 + #include <net/esp.h> 51 + #include <net/xfrm.h> 52 + #include <crypto/aes.h> 53 + #include <crypto/algapi.h> 54 + #include <crypto/hash.h> 55 + #include <crypto/sha.h> 56 + #include <crypto/authenc.h> 57 + #include <crypto/internal/aead.h> 58 + #include <crypto/null.h> 59 + #include <crypto/internal/skcipher.h> 60 + #include <crypto/aead.h> 61 + #include <crypto/scatterwalk.h> 62 + #include <crypto/internal/hash.h> 63 + 64 + #include "chcr_core.h" 65 + #include "chcr_algo.h" 66 + #include "chcr_crypto.h" 67 + 68 + /* 69 + * Max Tx descriptor space we allow for an Ethernet packet to be inlined 70 + * into a WR. 71 + */ 72 + #define MAX_IMM_TX_PKT_LEN 256 73 + #define GCM_ESP_IV_SIZE 8 74 + 75 + static int chcr_xfrm_add_state(struct xfrm_state *x); 76 + static void chcr_xfrm_del_state(struct xfrm_state *x); 77 + static void chcr_xfrm_free_state(struct xfrm_state *x); 78 + static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 79 + 80 + static const struct xfrmdev_ops chcr_xfrmdev_ops = { 81 + .xdo_dev_state_add = chcr_xfrm_add_state, 82 + .xdo_dev_state_delete = chcr_xfrm_del_state, 83 + .xdo_dev_state_free = chcr_xfrm_free_state, 84 + .xdo_dev_offload_ok = chcr_ipsec_offload_ok, 85 + }; 86 + 87 + /* Add offload xfrms to Chelsio Interface */ 88 + void chcr_add_xfrmops(const struct cxgb4_lld_info *lld) 89 + { 90 + struct net_device *netdev = NULL; 91 + int i; 92 + 93 + for (i = 0; i < lld->nports; i++) { 94 + netdev = lld->ports[i]; 95 + if (!netdev) 96 + continue; 97 + netdev->xfrmdev_ops = &chcr_xfrmdev_ops; 98 + netdev->hw_enc_features |= NETIF_F_HW_ESP; 99 + netdev->features |= NETIF_F_HW_ESP; 100 + rtnl_lock(); 101 + netdev_change_features(netdev); 102 + rtnl_unlock(); 103 + } 104 + } 105 + 106 + static inline int chcr_ipsec_setauthsize(struct xfrm_state *x, 107 + struct ipsec_sa_entry *sa_entry) 108 + { 109 + int hmac_ctrl; 110 + int authsize = x->aead->alg_icv_len / 8; 111 + 112 + sa_entry->authsize = authsize; 113 + 114 + switch (authsize) { 115 + case ICV_8: 116 + hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 117 + break; 118 + case ICV_12: 119 + hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 120 + break; 121 + case ICV_16: 122 + hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 123 + break; 124 + default: 125 + return -EINVAL; 126 + } 127 + return hmac_ctrl; 128 + } 129 + 130 + static inline int chcr_ipsec_setkey(struct xfrm_state *x, 131 + struct ipsec_sa_entry *sa_entry) 132 + { 133 + struct crypto_cipher *cipher; 134 + int keylen = (x->aead->alg_key_len + 7) / 8; 135 + unsigned char *key = x->aead->alg_key; 136 + int ck_size, key_ctx_size = 0; 137 + unsigned char ghash_h[AEAD_H_SIZE]; 138 + int ret = 0; 139 + 140 + if (keylen > 3) { 141 + keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 142 + memcpy(sa_entry->salt, key + keylen, 4); 143 + } 144 + 145 + if (keylen == AES_KEYSIZE_128) { 146 + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 147 + } else if (keylen == AES_KEYSIZE_192) { 148 + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 149 + } else if (keylen == AES_KEYSIZE_256) { 150 + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 151 + } else { 152 + pr_err("GCM: Invalid key length %d\n", keylen); 153 + ret = -EINVAL; 154 + goto out; 155 + } 156 + 157 + memcpy(sa_entry->key, key, keylen); 158 + sa_entry->enckey_len = keylen; 159 + key_ctx_size = sizeof(struct _key_ctx) + 160 + ((DIV_ROUND_UP(keylen, 16)) << 4) + 161 + AEAD_H_SIZE; 162 + 163 + sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 164 + CHCR_KEYCTX_MAC_KEY_SIZE_128, 165 + 0, 0, 166 + key_ctx_size >> 4); 167 + 168 + /* Calculate the H = CIPH(K, 0 repeated 16 times). 169 + * It will go in key context 170 + */ 171 + cipher = crypto_alloc_cipher("aes-generic", 0, 0); 172 + if (IS_ERR(cipher)) { 173 + sa_entry->enckey_len = 0; 174 + ret = -ENOMEM; 175 + goto out; 176 + } 177 + 178 + ret = crypto_cipher_setkey(cipher, key, keylen); 179 + if (ret) { 180 + sa_entry->enckey_len = 0; 181 + goto out1; 182 + } 183 + memset(ghash_h, 0, AEAD_H_SIZE); 184 + crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h); 185 + memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 186 + 16), ghash_h, AEAD_H_SIZE); 187 + sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + 188 + AEAD_H_SIZE; 189 + out1: 190 + crypto_free_cipher(cipher); 191 + out: 192 + return ret; 193 + } 194 + 195 + /* 196 + * chcr_xfrm_add_state 197 + * returns 0 on success, negative error if failed to send message to FPGA 198 + * positive error if FPGA returned a bad response 199 + */ 200 + static int chcr_xfrm_add_state(struct xfrm_state *x) 201 + { 202 + struct ipsec_sa_entry *sa_entry; 203 + int res = 0; 204 + 205 + if (x->props.aalgo != SADB_AALG_NONE) { 206 + pr_debug("CHCR: Cannot offload authenticated xfrm states\n"); 207 + return -EINVAL; 208 + } 209 + if (x->props.calgo != SADB_X_CALG_NONE) { 210 + pr_debug("CHCR: Cannot offload compressed xfrm states\n"); 211 + return -EINVAL; 212 + } 213 + if (x->props.flags & XFRM_STATE_ESN) { 214 + pr_debug("CHCR: Cannot offload ESN xfrm states\n"); 215 + return -EINVAL; 216 + } 217 + if (x->props.family != AF_INET && 218 + x->props.family != AF_INET6) { 219 + pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n"); 220 + return -EINVAL; 221 + } 222 + if (x->props.mode != XFRM_MODE_TRANSPORT && 223 + x->props.mode != XFRM_MODE_TUNNEL) { 224 + pr_debug("CHCR: Only transport and tunnel xfrm offload\n"); 225 + return -EINVAL; 226 + } 227 + if (x->id.proto != IPPROTO_ESP) { 228 + pr_debug("CHCR: Only ESP xfrm state offloaded\n"); 229 + return -EINVAL; 230 + } 231 + if (x->encap) { 232 + pr_debug("CHCR: Encapsulated xfrm state not offloaded\n"); 233 + return -EINVAL; 234 + } 235 + if (!x->aead) { 236 + pr_debug("CHCR: Cannot offload xfrm states without aead\n"); 237 + return -EINVAL; 238 + } 239 + if (x->aead->alg_icv_len != 128 && 240 + x->aead->alg_icv_len != 96) { 241 + pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n"); 242 + return -EINVAL; 243 + } 244 + if ((x->aead->alg_key_len != 128 + 32) && 245 + (x->aead->alg_key_len != 256 + 32)) { 246 + pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); 247 + return -EINVAL; 248 + } 249 + if (x->tfcpad) { 250 + pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n"); 251 + return -EINVAL; 252 + } 253 + if (!x->geniv) { 254 + pr_debug("CHCR: Cannot offload xfrm states without geniv\n"); 255 + return -EINVAL; 256 + } 257 + if (strcmp(x->geniv, "seqiv")) { 258 + pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n"); 259 + return -EINVAL; 260 + } 261 + 262 + sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); 263 + if (!sa_entry) { 264 + res = -ENOMEM; 265 + goto out; 266 + } 267 + 268 + sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry); 269 + chcr_ipsec_setkey(x, sa_entry); 270 + x->xso.offload_handle = (unsigned long)sa_entry; 271 + try_module_get(THIS_MODULE); 272 + out: 273 + return res; 274 + } 275 + 276 + static void chcr_xfrm_del_state(struct xfrm_state *x) 277 + { 278 + /* do nothing */ 279 + if (!x->xso.offload_handle) 280 + return; 281 + } 282 + 283 + static void chcr_xfrm_free_state(struct xfrm_state *x) 284 + { 285 + struct ipsec_sa_entry *sa_entry; 286 + 287 + if (!x->xso.offload_handle) 288 + return; 289 + 290 + sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 291 + kfree(sa_entry); 292 + module_put(THIS_MODULE); 293 + } 294 + 295 + static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 296 + { 297 + /* Offload with IP options is not supported yet */ 298 + if (ip_hdr(skb)->ihl > 5) 299 + return false; 300 + 301 + return true; 302 + } 303 + 304 + static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len) 305 + { 306 + int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len; 307 + 308 + hdrlen += sizeof(struct cpl_tx_pkt); 309 + if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 310 + return hdrlen; 311 + return 0; 312 + } 313 + 314 + static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, 315 + unsigned int kctx_len) 316 + { 317 + unsigned int flits; 318 + int hdrlen = is_eth_imm(skb, kctx_len); 319 + 320 + /* If the skb is small enough, we can pump it out as a work request 321 + * with only immediate data. In that case we just have to have the 322 + * TX Packet header plus the skb data in the Work Request. 323 + */ 324 + 325 + if (hdrlen) 326 + return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 327 + 328 + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 329 + 330 + /* Otherwise, we're going to have to construct a Scatter gather list 331 + * of the skb body and fragments. We also include the flits necessary 332 + * for the TX Packet Work Request and CPL. We always have a firmware 333 + * Write Header (incorporated as part of the cpl_tx_pkt_lso and 334 + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 335 + * message or, if we're doing a Large Send Offload, an LSO CPL message 336 + * with an embedded TX Packet Write CPL message. 337 + */ 338 + flits += (sizeof(struct fw_ulptx_wr) + 339 + sizeof(struct chcr_ipsec_req) + 340 + kctx_len + 341 + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); 342 + return flits; 343 + } 344 + 345 + inline void *copy_cpltx_pktxt(struct sk_buff *skb, 346 + struct net_device *dev, 347 + void *pos) 348 + { 349 + struct adapter *adap; 350 + struct port_info *pi; 351 + struct sge_eth_txq *q; 352 + struct cpl_tx_pkt_core *cpl; 353 + u64 cntrl = 0; 354 + u32 ctrl0, qidx; 355 + 356 + pi = netdev_priv(dev); 357 + adap = pi->adapter; 358 + qidx = skb->queue_mapping; 359 + q = &adap->sge.ethtxq[qidx + pi->first_qset]; 360 + 361 + cpl = (struct cpl_tx_pkt_core *)pos; 362 + 363 + if (skb->ip_summed == CHECKSUM_PARTIAL) 364 + cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 365 + ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 366 + TXPKT_PF_V(adap->pf); 367 + if (skb_vlan_tag_present(skb)) { 368 + q->vlan_ins++; 369 + cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 370 + } 371 + 372 + cpl->ctrl0 = htonl(ctrl0); 373 + cpl->pack = htons(0); 374 + cpl->len = htons(skb->len); 375 + cpl->ctrl1 = cpu_to_be64(cntrl); 376 + 377 + pos += sizeof(struct cpl_tx_pkt_core); 378 + return pos; 379 + } 380 + 381 + inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, 382 + struct net_device *dev, 383 + void *pos, 384 + struct ipsec_sa_entry *sa_entry) 385 + { 386 + struct adapter *adap; 387 + struct port_info *pi; 388 + struct sge_eth_txq *q; 389 + unsigned int len, qidx; 390 + struct _key_ctx *key_ctx; 391 + int left, eoq, key_len; 392 + 393 + pi = netdev_priv(dev); 394 + adap = pi->adapter; 395 + qidx = skb->queue_mapping; 396 + q = &adap->sge.ethtxq[qidx + pi->first_qset]; 397 + len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core); 398 + key_len = sa_entry->kctx_len; 399 + 400 + /* end of queue, reset pos to start of queue */ 401 + eoq = (void *)q->q.stat - pos; 402 + left = eoq; 403 + if (!eoq) { 404 + pos = q->q.desc; 405 + left = 64 * q->q.size; 406 + } 407 + 408 + /* Copy the Key context header */ 409 + key_ctx = (struct _key_ctx *)pos; 410 + key_ctx->ctx_hdr = sa_entry->key_ctx_hdr; 411 + memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT); 412 + pos += sizeof(struct _key_ctx); 413 + left -= sizeof(struct _key_ctx); 414 + 415 + if (likely(len <= left)) { 416 + memcpy(key_ctx->key, sa_entry->key, key_len); 417 + pos += key_len; 418 + } else { 419 + if (key_len <= left) { 420 + memcpy(pos, sa_entry->key, key_len); 421 + pos += key_len; 422 + } else { 423 + memcpy(pos, sa_entry->key, left); 424 + memcpy(q->q.desc, sa_entry->key + left, 425 + key_len - left); 426 + pos = (u8 *)q->q.desc + (key_len - left); 427 + } 428 + } 429 + /* Copy CPL TX PKT XT */ 430 + pos = copy_cpltx_pktxt(skb, dev, pos); 431 + 432 + return pos; 433 + } 434 + 435 + inline void *chcr_crypto_wreq(struct sk_buff *skb, 436 + struct net_device *dev, 437 + void *pos, 438 + int credits, 439 + struct ipsec_sa_entry *sa_entry) 440 + { 441 + struct port_info *pi = netdev_priv(dev); 442 + struct adapter *adap = pi->adapter; 443 + unsigned int immdatalen = 0; 444 + unsigned int ivsize = GCM_ESP_IV_SIZE; 445 + struct chcr_ipsec_wr *wr; 446 + unsigned int flits; 447 + u32 wr_mid; 448 + int qidx = skb_get_queue_mapping(skb); 449 + struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; 450 + unsigned int kctx_len = sa_entry->kctx_len; 451 + int qid = q->q.cntxt_id; 452 + 453 + atomic_inc(&adap->chcr_stats.ipsec_cnt); 454 + 455 + flits = calc_tx_sec_flits(skb, kctx_len); 456 + 457 + if (is_eth_imm(skb, kctx_len)) 458 + immdatalen = skb->len; 459 + 460 + /* WR Header */ 461 + wr = (struct chcr_ipsec_wr *)pos; 462 + wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); 463 + wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); 464 + 465 + if (unlikely(credits < ETHTXQ_STOP_THRES)) { 466 + netif_tx_stop_queue(q->txq); 467 + q->q.stops++; 468 + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 469 + } 470 + wr_mid |= FW_ULPTX_WR_DATA_F; 471 + wr->wreq.flowid_len16 = htonl(wr_mid); 472 + 473 + /* ULPTX */ 474 + wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); 475 + wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); 476 + 477 + /* Sub-command */ 478 + wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen); 479 + wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 480 + sizeof(wr->req.key_ctx) + 481 + kctx_len + 482 + sizeof(struct cpl_tx_pkt_core) + 483 + immdatalen); 484 + 485 + /* CPL_SEC_PDU */ 486 + wr->req.sec_cpl.op_ivinsrtofst = htonl( 487 + CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | 488 + CPL_TX_SEC_PDU_CPLLEN_V(2) | 489 + CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | 490 + CPL_TX_SEC_PDU_IVINSRTOFST_V( 491 + (skb_transport_offset(skb) + 492 + sizeof(struct ip_esp_hdr) + 1))); 493 + 494 + wr->req.sec_cpl.pldlen = htonl(skb->len); 495 + 496 + wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 497 + (skb_transport_offset(skb) + 1), 498 + (skb_transport_offset(skb) + 499 + sizeof(struct ip_esp_hdr)), 500 + (skb_transport_offset(skb) + 501 + sizeof(struct ip_esp_hdr) + 502 + GCM_ESP_IV_SIZE + 1), 0); 503 + 504 + wr->req.sec_cpl.cipherstop_lo_authinsert = 505 + FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) + 506 + sizeof(struct ip_esp_hdr) + 507 + GCM_ESP_IV_SIZE + 1, 508 + sa_entry->authsize, 509 + sa_entry->authsize); 510 + wr->req.sec_cpl.seqno_numivs = 511 + FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1, 512 + CHCR_SCMD_CIPHER_MODE_AES_GCM, 513 + CHCR_SCMD_AUTH_MODE_GHASH, 514 + sa_entry->hmac_ctrl, 515 + ivsize >> 1); 516 + wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 517 + 0, 0, 0); 518 + 519 + pos += sizeof(struct fw_ulptx_wr) + 520 + sizeof(struct ulp_txpkt) + 521 + sizeof(struct ulptx_idata) + 522 + sizeof(struct cpl_tx_sec_pdu); 523 + 524 + pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry); 525 + 526 + return pos; 527 + } 528 + 529 + /** 530 + * flits_to_desc - returns the num of Tx descriptors for the given flits 531 + * @n: the number of flits 532 + * 533 + * Returns the number of Tx descriptors needed for the supplied number 534 + * of flits. 535 + */ 536 + static inline unsigned int flits_to_desc(unsigned int n) 537 + { 538 + WARN_ON(n > SGE_MAX_WR_LEN / 8); 539 + return DIV_ROUND_UP(n, 8); 540 + } 541 + 542 + static inline unsigned int txq_avail(const struct sge_txq *q) 543 + { 544 + return q->size - 1 - q->in_use; 545 + } 546 + 547 + static void eth_txq_stop(struct sge_eth_txq *q) 548 + { 549 + netif_tx_stop_queue(q->txq); 550 + q->q.stops++; 551 + } 552 + 553 + static inline void txq_advance(struct sge_txq *q, unsigned int n) 554 + { 555 + q->in_use += n; 556 + q->pidx += n; 557 + if (q->pidx >= q->size) 558 + q->pidx -= q->size; 559 + } 560 + 561 + /* 562 + * chcr_ipsec_xmit called from ULD Tx handler 563 + */ 564 + int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) 565 + { 566 + struct xfrm_state *x = xfrm_input_state(skb); 567 + struct ipsec_sa_entry *sa_entry; 568 + u64 *pos, *end, *before, *sgl; 569 + int qidx, left, credits; 570 + unsigned int flits = 0, ndesc, kctx_len; 571 + struct adapter *adap; 572 + struct sge_eth_txq *q; 573 + struct port_info *pi; 574 + dma_addr_t addr[MAX_SKB_FRAGS + 1]; 575 + bool immediate = false; 576 + 577 + if (!x->xso.offload_handle) 578 + return NETDEV_TX_BUSY; 579 + 580 + sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 581 + kctx_len = sa_entry->kctx_len; 582 + 583 + if (skb->sp->len != 1) { 584 + out_free: dev_kfree_skb_any(skb); 585 + return NETDEV_TX_OK; 586 + } 587 + 588 + pi = netdev_priv(dev); 589 + adap = pi->adapter; 590 + qidx = skb->queue_mapping; 591 + q = &adap->sge.ethtxq[qidx + pi->first_qset]; 592 + 593 + cxgb4_reclaim_completed_tx(adap, &q->q, true); 594 + 595 + flits = calc_tx_sec_flits(skb, sa_entry->kctx_len); 596 + ndesc = flits_to_desc(flits); 597 + credits = txq_avail(&q->q) - ndesc; 598 + 599 + if (unlikely(credits < 0)) { 600 + eth_txq_stop(q); 601 + dev_err(adap->pdev_dev, 602 + "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n", 603 + dev->name, qidx, credits, ndesc, txq_avail(&q->q), 604 + flits); 605 + return NETDEV_TX_BUSY; 606 + } 607 + 608 + if (is_eth_imm(skb, kctx_len)) 609 + immediate = true; 610 + 611 + if (!immediate && 612 + unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { 613 + q->mapping_err++; 614 + goto out_free; 615 + } 616 + 617 + pos = (u64 *)&q->q.desc[q->q.pidx]; 618 + before = (u64 *)pos; 619 + end = (u64 *)pos + flits; 620 + /* Setup IPSec CPL */ 621 + pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos, 622 + credits, sa_entry); 623 + if (before > (u64 *)pos) { 624 + left = (u8 *)end - (u8 *)q->q.stat; 625 + end = (void *)q->q.desc + left; 626 + } 627 + if (pos == (u64 *)q->q.stat) { 628 + left = (u8 *)end - (u8 *)q->q.stat; 629 + end = (void *)q->q.desc + left; 630 + pos = (void *)q->q.desc; 631 + } 632 + 633 + sgl = (void *)pos; 634 + if (immediate) { 635 + cxgb4_inline_tx_skb(skb, &q->q, sgl); 636 + dev_consume_skb_any(skb); 637 + } else { 638 + int last_desc; 639 + 640 + cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 641 + 0, addr); 642 + skb_orphan(skb); 643 + 644 + last_desc = q->q.pidx + ndesc - 1; 645 + if (last_desc >= q->q.size) 646 + last_desc -= q->q.size; 647 + q->q.sdesc[last_desc].skb = skb; 648 + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; 649 + } 650 + txq_advance(&q->q, ndesc); 651 + 652 + cxgb4_ring_tx_db(adap, &q->q, ndesc); 653 + return NETDEV_TX_OK; 654 + }
+62 -46
drivers/crypto/exynos-rng.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * exynos-rng.c - Random Number Generator driver for the Exynos 3 4 * ··· 7 6 * Loosely based on old driver from drivers/char/hw_random/exynos-rng.c: 8 7 * Copyright (C) 2012 Samsung Electronics 9 8 * Jonghwa Lee <jonghwa3.lee@samsung.com> 10 - * 11 - * This program is free software; you can redistribute it and/or modify 12 - * it under the terms of the GNU General Public License as published by 13 - * the Free Software Foundation; 14 - * 15 - * This program is distributed in the hope that it will be useful, 16 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 - * GNU General Public License for more details. 19 9 */ 20 10 21 11 #include <linux/clk.h> ··· 14 22 #include <linux/err.h> 15 23 #include <linux/io.h> 16 24 #include <linux/module.h> 25 + #include <linux/mutex.h> 26 + #include <linux/of_device.h> 17 27 #include <linux/platform_device.h> 18 28 19 29 #include <crypto/internal/rng.h> 20 30 21 31 #define EXYNOS_RNG_CONTROL 0x0 22 32 #define EXYNOS_RNG_STATUS 0x10 33 + 34 + #define EXYNOS_RNG_SEED_CONF 0x14 35 + #define EXYNOS_RNG_GEN_PRNG BIT(1) 36 + 23 37 #define EXYNOS_RNG_SEED_BASE 0x140 24 38 #define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4)) 25 39 #define EXYNOS_RNG_OUT_BASE 0x160 ··· 41 43 #define EXYNOS_RNG_SEED_REGS 5 42 44 #define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4) 43 45 46 + enum exynos_prng_type { 47 + EXYNOS_PRNG_UNKNOWN = 0, 48 + EXYNOS_PRNG_EXYNOS4, 49 + EXYNOS_PRNG_EXYNOS5, 50 + }; 51 + 44 52 /* 45 - * Driver re-seeds itself with generated random numbers to increase 46 - * the randomness. 53 + * Driver re-seeds itself with generated random numbers to hinder 54 + * backtracking of the original seed. 47 55 * 48 56 * Time for next re-seed in ms. 49 57 */ 50 - #define EXYNOS_RNG_RESEED_TIME 100 58 + #define EXYNOS_RNG_RESEED_TIME 1000 59 + #define EXYNOS_RNG_RESEED_BYTES 65536 60 + 51 61 /* 52 62 * In polling mode, do not wait infinitely for the engine to finish the work. 53 63 */ ··· 69 63 /* Device associated memory */ 70 64 struct exynos_rng_dev { 71 65 struct device *dev; 66 + enum exynos_prng_type type; 72 67 void __iomem *mem; 73 68 struct clk *clk; 69 + struct mutex lock; 74 70 /* Generated numbers stored for seeding during resume */ 75 71 u8 seed_save[EXYNOS_RNG_SEED_SIZE]; 76 72 unsigned int seed_save_len; 77 73 /* Time of last seeding in jiffies */ 78 74 unsigned long last_seeding; 75 + /* Bytes generated since last seeding */ 76 + unsigned long bytes_seeding; 79 77 }; 80 78 81 79 static struct exynos_rng_dev *exynos_rng_dev; ··· 124 114 } 125 115 126 116 rng->last_seeding = jiffies; 117 + rng->bytes_seeding = 0; 127 118 128 119 return 0; 129 - } 130 - 131 - /* 132 - * Read from output registers and put the data under 'dst' array, 133 - * up to dlen bytes. 134 - * 135 - * Returns number of bytes actually stored in 'dst' (dlen 136 - * or EXYNOS_RNG_SEED_SIZE). 137 - */ 138 - static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng, 139 - u8 *dst, unsigned int dlen) 140 - { 141 - unsigned int cnt = 0; 142 - int i, j; 143 - u32 val; 144 - 145 - for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) { 146 - val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j)); 147 - 148 - for (i = 0; i < 4; i++) { 149 - dst[cnt] = val & 0xff; 150 - val >>= 8; 151 - if (++cnt >= dlen) 152 - return cnt; 153 - } 154 - } 155 - 156 - return cnt; 157 120 } 158 121 159 122 /* ··· 143 160 { 144 161 int retry = EXYNOS_RNG_WAIT_RETRIES; 145 162 146 - exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START, 147 - EXYNOS_RNG_CONTROL); 163 + if (rng->type == EXYNOS_PRNG_EXYNOS4) { 164 + exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START, 165 + EXYNOS_RNG_CONTROL); 166 + } else if (rng->type == EXYNOS_PRNG_EXYNOS5) { 167 + exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG, 168 + EXYNOS_RNG_SEED_CONF); 169 + } 148 170 149 171 while (!(exynos_rng_readl(rng, 150 172 EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry) ··· 161 173 /* Clear status bit */ 162 174 exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE, 163 175 EXYNOS_RNG_STATUS); 164 - *read = exynos_rng_copy_random(rng, dst, dlen); 176 + *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE); 177 + memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read); 178 + rng->bytes_seeding += *read; 165 179 166 180 return 0; 167 181 } ··· 177 187 unsigned int read = 0; 178 188 u8 seed[EXYNOS_RNG_SEED_SIZE]; 179 189 180 - if (time_before(now, next_seeding)) 190 + if (time_before(now, next_seeding) && 191 + rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES) 181 192 return; 182 193 183 194 if (exynos_rng_get_random(rng, seed, sizeof(seed), &read)) 184 195 return; 185 196 186 197 exynos_rng_set_seed(rng, seed, read); 198 + 199 + /* Let others do some of their job. */ 200 + mutex_unlock(&rng->lock); 201 + mutex_lock(&rng->lock); 187 202 } 188 203 189 204 static int exynos_rng_generate(struct crypto_rng *tfm, ··· 204 209 if (ret) 205 210 return ret; 206 211 212 + mutex_lock(&rng->lock); 207 213 do { 208 214 ret = exynos_rng_get_random(rng, dst, dlen, &read); 209 215 if (ret) ··· 215 219 216 220 exynos_rng_reseed(rng); 217 221 } while (dlen > 0); 222 + mutex_unlock(&rng->lock); 218 223 219 224 clk_disable_unprepare(rng->clk); 220 225 ··· 233 236 if (ret) 234 237 return ret; 235 238 239 + mutex_lock(&rng->lock); 236 240 ret = exynos_rng_set_seed(ctx->rng, seed, slen); 241 + mutex_unlock(&rng->lock); 237 242 238 243 clk_disable_unprepare(rng->clk); 239 244 ··· 258 259 .base = { 259 260 .cra_name = "stdrng", 260 261 .cra_driver_name = "exynos_rng", 261 - .cra_priority = 100, 262 + .cra_priority = 300, 262 263 .cra_ctxsize = sizeof(struct exynos_rng_ctx), 263 264 .cra_module = THIS_MODULE, 264 265 .cra_init = exynos_rng_kcapi_init, ··· 277 278 rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); 278 279 if (!rng) 279 280 return -ENOMEM; 281 + 282 + rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev); 283 + 284 + mutex_init(&rng->lock); 280 285 281 286 rng->dev = &pdev->dev; 282 287 rng->clk = devm_clk_get(&pdev->dev, "secss"); ··· 332 329 if (ret) 333 330 return ret; 334 331 332 + mutex_lock(&rng->lock); 333 + 335 334 /* Get new random numbers and store them for seeding on resume. */ 336 335 exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save), 337 336 &(rng->seed_save_len)); 337 + 338 + mutex_unlock(&rng->lock); 339 + 338 340 dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n", 339 341 rng->seed_save_len); 340 342 ··· 362 354 if (ret) 363 355 return ret; 364 356 357 + mutex_lock(&rng->lock); 358 + 365 359 ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len); 360 + 361 + mutex_unlock(&rng->lock); 366 362 367 363 clk_disable_unprepare(rng->clk); 368 364 ··· 379 367 static const struct of_device_id exynos_rng_dt_match[] = { 380 368 { 381 369 .compatible = "samsung,exynos4-rng", 370 + .data = (const void *)EXYNOS_PRNG_EXYNOS4, 371 + }, { 372 + .compatible = "samsung,exynos5250-prng", 373 + .data = (const void *)EXYNOS_PRNG_EXYNOS5, 382 374 }, 383 375 { }, 384 376 }; ··· 402 386 403 387 MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver"); 404 388 MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>"); 405 - MODULE_LICENSE("GPL"); 389 + MODULE_LICENSE("GPL v2");
+1
drivers/crypto/hifn_795x.c
··· 2579 2579 for (i = 0; i < 3; ++i) 2580 2580 if (dev->bar[i]) 2581 2581 iounmap(dev->bar[i]); 2582 + kfree(dev); 2582 2583 2583 2584 err_out_free_regions: 2584 2585 pci_release_regions(pdev);
+244 -128
drivers/crypto/inside-secure/safexcel.c
··· 108 108 writel(EIP197_PE_ICE_x_CTRL_SW_RESET | 109 109 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | 110 110 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, 111 - priv->base + ctrl); 111 + EIP197_PE(priv) + ctrl); 112 112 113 113 /* Enable access to the program memory */ 114 - writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL); 114 + writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); 115 115 116 116 /* Write the firmware */ 117 117 for (i = 0; i < fw->size / sizeof(u32); i++) ··· 119 119 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32)); 120 120 121 121 /* Disable access to the program memory */ 122 - writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL); 122 + writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL); 123 123 124 124 /* Release engine from reset */ 125 - val = readl(priv->base + ctrl); 125 + val = readl(EIP197_PE(priv) + ctrl); 126 126 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET; 127 - writel(val, priv->base + ctrl); 127 + writel(val, EIP197_PE(priv) + ctrl); 128 128 } 129 129 130 130 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) ··· 145 145 } 146 146 147 147 /* Clear the scratchpad memory */ 148 - val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL); 148 + val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); 149 149 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | 150 150 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | 151 151 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | 152 152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; 153 - writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL); 153 + writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); 154 154 155 - memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0, 155 + memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 156 156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 157 157 158 158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, ··· 173 173 u32 hdw, cd_size_rnd, val; 174 174 int i; 175 175 176 - hdw = readl(priv->base + EIP197_HIA_OPTIONS); 176 + hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 177 177 hdw &= GENMASK(27, 25); 178 178 hdw >>= 25; 179 179 ··· 182 182 for (i = 0; i < priv->config.rings; i++) { 183 183 /* ring base address */ 184 184 writel(lower_32_bits(priv->ring[i].cdr.base_dma), 185 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 185 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 186 186 writel(upper_32_bits(priv->ring[i].cdr.base_dma), 187 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 187 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 188 188 189 189 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) | 190 190 priv->config.cd_size, 191 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE); 191 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); 192 192 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) | 193 193 (EIP197_FETCH_COUNT * priv->config.cd_offset), 194 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); 194 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); 195 195 196 196 /* Configure DMA tx control */ 197 197 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); 198 198 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); 199 - writel(val, 200 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG); 199 + writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); 201 200 202 201 /* clear any pending interrupt */ 203 202 writel(GENMASK(5, 0), 204 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT); 203 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); 205 204 } 206 205 207 206 return 0; ··· 211 212 u32 hdw, rd_size_rnd, val; 212 213 int i; 213 214 214 - hdw = readl(priv->base + EIP197_HIA_OPTIONS); 215 + hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 215 216 hdw &= GENMASK(27, 25); 216 217 hdw >>= 25; 217 218 ··· 220 221 for (i = 0; i < priv->config.rings; i++) { 221 222 /* ring base address */ 222 223 writel(lower_32_bits(priv->ring[i].rdr.base_dma), 223 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 224 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); 224 225 writel(upper_32_bits(priv->ring[i].rdr.base_dma), 225 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 226 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); 226 227 227 228 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) | 228 229 priv->config.rd_size, 229 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE); 230 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); 230 231 231 232 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) | 232 233 (EIP197_FETCH_COUNT * priv->config.rd_offset), 233 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); 234 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); 234 235 235 236 /* Configure DMA tx control */ 236 237 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); 237 238 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); 238 239 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG; 239 240 writel(val, 240 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG); 241 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); 241 242 242 243 /* clear any pending interrupt */ 243 244 writel(GENMASK(7, 0), 244 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT); 245 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); 245 246 246 247 /* enable ring interrupt */ 247 - val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); 248 + val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); 248 249 val |= EIP197_RDR_IRQ(i); 249 - writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); 250 + writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); 250 251 } 251 252 252 253 return 0; ··· 258 259 int i, ret; 259 260 260 261 /* Determine endianess and configure byte swap */ 261 - version = readl(priv->base + EIP197_HIA_VERSION); 262 - val = readl(priv->base + EIP197_HIA_MST_CTRL); 262 + version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION); 263 + val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); 263 264 264 265 if ((version & 0xffff) == EIP197_HIA_VERSION_BE) 265 266 val |= EIP197_MST_CTRL_BYTE_SWAP; 266 267 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE) 267 268 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24); 268 269 269 - writel(val, priv->base + EIP197_HIA_MST_CTRL); 270 - 270 + writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); 271 271 272 272 /* Configure wr/rd cache values */ 273 273 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | 274 274 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), 275 - priv->base + EIP197_MST_CTRL); 275 + EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL); 276 276 277 277 /* Interrupts reset */ 278 278 279 279 /* Disable all global interrupts */ 280 - writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL); 280 + writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL); 281 281 282 282 /* Clear any pending interrupt */ 283 - writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK); 283 + writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); 284 284 285 285 /* Data Fetch Engine configuration */ 286 286 287 287 /* Reset all DFE threads */ 288 288 writel(EIP197_DxE_THR_CTRL_RESET_PE, 289 - priv->base + EIP197_HIA_DFE_THR_CTRL); 289 + EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); 290 290 291 - /* Reset HIA input interface arbiter */ 292 - writel(EIP197_HIA_RA_PE_CTRL_RESET, 293 - priv->base + EIP197_HIA_RA_PE_CTRL); 291 + if (priv->version == EIP197) { 292 + /* Reset HIA input interface arbiter */ 293 + writel(EIP197_HIA_RA_PE_CTRL_RESET, 294 + EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); 295 + } 294 296 295 297 /* DMA transfer size to use */ 296 298 val = EIP197_HIA_DFE_CFG_DIS_DEBUG; ··· 299 299 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); 300 300 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); 301 301 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); 302 - writel(val, priv->base + EIP197_HIA_DFE_CFG); 302 + writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG); 303 303 304 304 /* Leave the DFE threads reset state */ 305 - writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL); 305 + writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); 306 306 307 307 /* Configure the procesing engine thresholds */ 308 308 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9), 309 - priv->base + EIP197_PE_IN_DBUF_THRES); 309 + EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES); 310 310 writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7), 311 - priv->base + EIP197_PE_IN_TBUF_THRES); 311 + EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES); 312 312 313 - /* enable HIA input interface arbiter and rings */ 314 - writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0), 315 - priv->base + EIP197_HIA_RA_PE_CTRL); 313 + if (priv->version == EIP197) { 314 + /* enable HIA input interface arbiter and rings */ 315 + writel(EIP197_HIA_RA_PE_CTRL_EN | 316 + GENMASK(priv->config.rings - 1, 0), 317 + EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL); 318 + } 316 319 317 320 /* Data Store Engine configuration */ 318 321 319 322 /* Reset all DSE threads */ 320 323 writel(EIP197_DxE_THR_CTRL_RESET_PE, 321 - priv->base + EIP197_HIA_DSE_THR_CTRL); 324 + EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); 322 325 323 326 /* Wait for all DSE threads to complete */ 324 - while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) & 327 + while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) & 325 328 GENMASK(15, 12)) != GENMASK(15, 12)) 326 329 ; 327 330 ··· 333 330 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8); 334 331 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); 335 332 val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE; 336 - val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; 337 - writel(val, priv->base + EIP197_HIA_DSE_CFG); 333 + /* FIXME: instability issues can occur for EIP97 but disabling it impact 334 + * performances. 335 + */ 336 + if (priv->version == EIP197) 337 + val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; 338 + writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG); 338 339 339 340 /* Leave the DSE threads reset state */ 340 - writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL); 341 + writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); 341 342 342 343 /* Configure the procesing engine thresholds */ 343 344 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8), 344 - priv->base + EIP197_PE_OUT_DBUF_THRES); 345 + EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES); 345 346 346 347 /* Processing Engine configuration */ 347 348 ··· 355 348 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC; 356 349 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1; 357 350 val |= EIP197_ALG_SHA2; 358 - writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN); 351 + writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN); 359 352 360 353 /* Command Descriptor Rings prepare */ 361 354 for (i = 0; i < priv->config.rings; i++) { 362 355 /* Clear interrupts for this ring */ 363 356 writel(GENMASK(31, 0), 364 - priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i)); 357 + EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i)); 365 358 366 359 /* Disable external triggering */ 367 - writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG); 360 + writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); 368 361 369 362 /* Clear the pending prepared counter */ 370 363 writel(EIP197_xDR_PREP_CLR_COUNT, 371 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT); 364 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); 372 365 373 366 /* Clear the pending processed counter */ 374 367 writel(EIP197_xDR_PROC_CLR_COUNT, 375 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT); 368 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); 376 369 377 370 writel(0, 378 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR); 371 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); 379 372 writel(0, 380 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR); 373 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); 381 374 382 375 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2, 383 - priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE); 376 + EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); 384 377 } 385 378 386 379 /* Result Descriptor Ring prepare */ 387 380 for (i = 0; i < priv->config.rings; i++) { 388 381 /* Disable external triggering*/ 389 - writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG); 382 + writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); 390 383 391 384 /* Clear the pending prepared counter */ 392 385 writel(EIP197_xDR_PREP_CLR_COUNT, 393 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT); 386 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); 394 387 395 388 /* Clear the pending processed counter */ 396 389 writel(EIP197_xDR_PROC_CLR_COUNT, 397 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT); 390 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); 398 391 399 392 writel(0, 400 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR); 393 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); 401 394 writel(0, 402 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR); 395 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); 403 396 404 397 /* Ring size */ 405 398 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2, 406 - priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE); 399 + EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); 407 400 } 408 401 409 402 /* Enable command descriptor rings */ 410 403 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), 411 - priv->base + EIP197_HIA_DFE_THR_CTRL); 404 + EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL); 412 405 413 406 /* Enable result descriptor rings */ 414 407 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), 415 - priv->base + EIP197_HIA_DSE_THR_CTRL); 408 + EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL); 416 409 417 410 /* Clear any HIA interrupt */ 418 - writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK); 411 + writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); 419 412 420 - eip197_trc_cache_init(priv); 413 + if (priv->version == EIP197) { 414 + eip197_trc_cache_init(priv); 421 415 422 - ret = eip197_load_firmwares(priv); 423 - if (ret) 424 - return ret; 416 + ret = eip197_load_firmwares(priv); 417 + if (ret) 418 + return ret; 419 + } 425 420 426 421 safexcel_hw_setup_cdesc_rings(priv); 427 422 safexcel_hw_setup_rdesc_rings(priv); 428 423 429 424 return 0; 425 + } 426 + 427 + /* Called with ring's lock taken */ 428 + static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv, 429 + int ring, int reqs) 430 + { 431 + int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ); 432 + 433 + if (!coal) 434 + return 0; 435 + 436 + /* Configure when we want an interrupt */ 437 + writel(EIP197_HIA_RDR_THRESH_PKT_MODE | 438 + EIP197_HIA_RDR_THRESH_PROC_PKT(coal), 439 + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH); 440 + 441 + return coal; 430 442 } 431 443 432 444 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ··· 455 429 struct safexcel_request *request; 456 430 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; 457 431 458 - priv->ring[ring].need_dequeue = false; 432 + /* If a request wasn't properly dequeued because of a lack of resources, 433 + * proceeded it first, 434 + */ 435 + req = priv->ring[ring].req; 436 + backlog = priv->ring[ring].backlog; 437 + if (req) 438 + goto handle_req; 459 439 460 - do { 440 + while (true) { 461 441 spin_lock_bh(&priv->ring[ring].queue_lock); 462 442 backlog = crypto_get_backlog(&priv->ring[ring].queue); 463 443 req = crypto_dequeue_request(&priv->ring[ring].queue); 464 444 spin_unlock_bh(&priv->ring[ring].queue_lock); 465 445 466 - if (!req) 467 - goto finalize; 468 - 469 - request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); 470 - if (!request) { 471 - spin_lock_bh(&priv->ring[ring].queue_lock); 472 - crypto_enqueue_request(&priv->ring[ring].queue, req); 473 - spin_unlock_bh(&priv->ring[ring].queue_lock); 474 - 475 - priv->ring[ring].need_dequeue = true; 446 + if (!req) { 447 + priv->ring[ring].req = NULL; 448 + priv->ring[ring].backlog = NULL; 476 449 goto finalize; 477 450 } 451 + 452 + handle_req: 453 + request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req)); 454 + if (!request) 455 + goto request_failed; 478 456 479 457 ctx = crypto_tfm_ctx(req->tfm); 480 458 ret = ctx->send(req, ring, request, &commands, &results); 481 459 if (ret) { 482 460 kfree(request); 483 - req->complete(req, ret); 484 - priv->ring[ring].need_dequeue = true; 485 - goto finalize; 461 + goto request_failed; 486 462 } 487 463 488 464 if (backlog) ··· 496 468 497 469 cdesc += commands; 498 470 rdesc += results; 499 - } while (nreq++ < EIP197_MAX_BATCH_SZ); 471 + nreq++; 472 + } 473 + 474 + request_failed: 475 + /* Not enough resources to handle all the requests. Bail out and save 476 + * the request and the backlog for the next dequeue call (per-ring). 477 + */ 478 + priv->ring[ring].req = req; 479 + priv->ring[ring].backlog = backlog; 500 480 501 481 finalize: 502 - if (nreq == EIP197_MAX_BATCH_SZ) 503 - priv->ring[ring].need_dequeue = true; 504 - else if (!nreq) 482 + if (!nreq) 505 483 return; 506 484 507 - spin_lock_bh(&priv->ring[ring].lock); 485 + spin_lock_bh(&priv->ring[ring].egress_lock); 508 486 509 - /* Configure when we want an interrupt */ 510 - writel(EIP197_HIA_RDR_THRESH_PKT_MODE | 511 - EIP197_HIA_RDR_THRESH_PROC_PKT(nreq), 512 - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH); 487 + if (!priv->ring[ring].busy) { 488 + nreq -= safexcel_try_push_requests(priv, ring, nreq); 489 + if (nreq) 490 + priv->ring[ring].busy = true; 491 + } 492 + 493 + priv->ring[ring].requests_left += nreq; 494 + 495 + spin_unlock_bh(&priv->ring[ring].egress_lock); 513 496 514 497 /* let the RDR know we have pending descriptors */ 515 498 writel((rdesc * priv->config.rd_offset) << 2, 516 - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT); 499 + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); 517 500 518 501 /* let the CDR know we have pending descriptors */ 519 502 writel((cdesc * priv->config.cd_offset) << 2, 520 - priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT); 521 - 522 - spin_unlock_bh(&priv->ring[ring].lock); 503 + EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); 523 504 } 524 505 525 506 void safexcel_free_context(struct safexcel_crypto_priv *priv, ··· 577 540 } 578 541 579 542 int safexcel_invalidate_cache(struct crypto_async_request *async, 580 - struct safexcel_context *ctx, 581 543 struct safexcel_crypto_priv *priv, 582 544 dma_addr_t ctxr_dma, int ring, 583 545 struct safexcel_request *request) ··· 623 587 { 624 588 struct safexcel_request *sreq; 625 589 struct safexcel_context *ctx; 626 - int ret, i, nreq, ndesc = 0; 590 + int ret, i, nreq, ndesc, tot_descs, done; 627 591 bool should_complete; 628 592 629 - nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); 630 - nreq >>= 24; 631 - nreq &= GENMASK(6, 0); 593 + handle_results: 594 + tot_descs = 0; 595 + 596 + nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 597 + nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; 598 + nreq &= EIP197_xDR_PROC_xD_PKT_MASK; 632 599 if (!nreq) 633 - return; 600 + goto requests_left; 634 601 635 602 for (i = 0; i < nreq; i++) { 636 603 spin_lock_bh(&priv->ring[ring].egress_lock); ··· 648 609 if (ndesc < 0) { 649 610 kfree(sreq); 650 611 dev_err(priv->dev, "failed to handle result (%d)", ndesc); 651 - return; 612 + goto acknowledge; 652 613 } 653 - 654 - writel(EIP197_xDR_PROC_xD_PKT(1) | 655 - EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset), 656 - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT); 657 614 658 615 if (should_complete) { 659 616 local_bh_disable(); ··· 658 623 } 659 624 660 625 kfree(sreq); 626 + tot_descs += ndesc; 661 627 } 628 + 629 + acknowledge: 630 + if (i) { 631 + writel(EIP197_xDR_PROC_xD_PKT(i) | 632 + EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset), 633 + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); 634 + } 635 + 636 + /* If the number of requests overflowed the counter, try to proceed more 637 + * requests. 638 + */ 639 + if (nreq == EIP197_xDR_PROC_xD_PKT_MASK) 640 + goto handle_results; 641 + 642 + requests_left: 643 + spin_lock_bh(&priv->ring[ring].egress_lock); 644 + 645 + done = safexcel_try_push_requests(priv, ring, 646 + priv->ring[ring].requests_left); 647 + 648 + priv->ring[ring].requests_left -= done; 649 + if (!done && !priv->ring[ring].requests_left) 650 + priv->ring[ring].busy = false; 651 + 652 + spin_unlock_bh(&priv->ring[ring].egress_lock); 662 653 } 663 654 664 - static void safexcel_handle_result_work(struct work_struct *work) 655 + static void safexcel_dequeue_work(struct work_struct *work) 665 656 { 666 657 struct safexcel_work_data *data = 667 658 container_of(work, struct safexcel_work_data, work); 668 - struct safexcel_crypto_priv *priv = data->priv; 669 659 670 - safexcel_handle_result_descriptor(priv, data->ring); 671 - 672 - if (priv->ring[data->ring].need_dequeue) 673 - safexcel_dequeue(data->priv, data->ring); 660 + safexcel_dequeue(data->priv, data->ring); 674 661 } 675 662 676 663 struct safexcel_ring_irq_data { ··· 704 647 { 705 648 struct safexcel_ring_irq_data *irq_data = data; 706 649 struct safexcel_crypto_priv *priv = irq_data->priv; 707 - int ring = irq_data->ring; 650 + int ring = irq_data->ring, rc = IRQ_NONE; 708 651 u32 status, stat; 709 652 710 - status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); 653 + status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); 711 654 if (!status) 712 - return IRQ_NONE; 655 + return rc; 713 656 714 657 /* RDR interrupts */ 715 658 if (status & EIP197_RDR_IRQ(ring)) { 716 - stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); 659 + stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); 717 660 718 661 if (unlikely(stat & EIP197_xDR_ERR)) { 719 662 /* ··· 723 666 */ 724 667 dev_err(priv->dev, "RDR: fatal error."); 725 668 } else if (likely(stat & EIP197_xDR_THRESH)) { 726 - queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work); 669 + rc = IRQ_WAKE_THREAD; 727 670 } 728 671 729 672 /* ACK the interrupts */ 730 673 writel(stat & 0xff, 731 - priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT); 674 + EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); 732 675 } 733 676 734 677 /* ACK the interrupts */ 735 - writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring)); 678 + writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring)); 679 + 680 + return rc; 681 + } 682 + 683 + static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) 684 + { 685 + struct safexcel_ring_irq_data *irq_data = data; 686 + struct safexcel_crypto_priv *priv = irq_data->priv; 687 + int ring = irq_data->ring; 688 + 689 + safexcel_handle_result_descriptor(priv, ring); 690 + 691 + queue_work(priv->ring[ring].workqueue, 692 + &priv->ring[ring].work_data.work); 736 693 737 694 return IRQ_HANDLED; 738 695 } 739 696 740 697 static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name, 741 698 irq_handler_t handler, 699 + irq_handler_t threaded_handler, 742 700 struct safexcel_ring_irq_data *ring_irq_priv) 743 701 { 744 702 int ret, irq = platform_get_irq_byname(pdev, name); ··· 763 691 return irq; 764 692 } 765 693 766 - ret = devm_request_irq(&pdev->dev, irq, handler, 0, 767 - dev_name(&pdev->dev), ring_irq_priv); 694 + ret = devm_request_threaded_irq(&pdev->dev, irq, handler, 695 + threaded_handler, IRQF_ONESHOT, 696 + dev_name(&pdev->dev), ring_irq_priv); 768 697 if (ret) { 769 698 dev_err(&pdev->dev, "unable to request IRQ %d\n", irq); 770 699 return ret; ··· 828 755 { 829 756 u32 val, mask; 830 757 831 - val = readl(priv->base + EIP197_HIA_OPTIONS); 758 + val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 832 759 val = (val & GENMASK(27, 25)) >> 25; 833 760 mask = BIT(val) - 1; 834 761 835 - val = readl(priv->base + EIP197_HIA_OPTIONS); 762 + val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS); 836 763 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); 837 764 838 765 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32)); ··· 840 767 841 768 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32)); 842 769 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; 770 + } 771 + 772 + static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) 773 + { 774 + struct safexcel_register_offsets *offsets = &priv->offsets; 775 + 776 + if (priv->version == EIP197) { 777 + offsets->hia_aic = EIP197_HIA_AIC_BASE; 778 + offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; 779 + offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; 780 + offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; 781 + offsets->hia_dfe = EIP197_HIA_DFE_BASE; 782 + offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; 783 + offsets->hia_dse = EIP197_HIA_DSE_BASE; 784 + offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; 785 + offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; 786 + offsets->pe = EIP197_PE_BASE; 787 + } else { 788 + offsets->hia_aic = EIP97_HIA_AIC_BASE; 789 + offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; 790 + offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; 791 + offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; 792 + offsets->hia_dfe = EIP97_HIA_DFE_BASE; 793 + offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; 794 + offsets->hia_dse = EIP97_HIA_DSE_BASE; 795 + offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; 796 + offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; 797 + offsets->pe = EIP97_PE_BASE; 798 + } 843 799 } 844 800 845 801 static int safexcel_probe(struct platform_device *pdev) ··· 883 781 return -ENOMEM; 884 782 885 783 priv->dev = dev; 784 + priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); 785 + 786 + safexcel_init_register_offsets(priv); 886 787 887 788 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 888 789 priv->base = devm_ioremap_resource(dev, res); ··· 944 839 945 840 snprintf(irq_name, 6, "ring%d", i); 946 841 irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring, 842 + safexcel_irq_ring_thread, 947 843 ring_irq); 948 844 if (irq < 0) { 949 845 ret = irq; ··· 953 847 954 848 priv->ring[i].work_data.priv = priv; 955 849 priv->ring[i].work_data.ring = i; 956 - INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work); 850 + INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work); 957 851 958 852 snprintf(wq_name, 9, "wq_ring%d", i); 959 853 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name); ··· 961 855 ret = -ENOMEM; 962 856 goto err_clk; 963 857 } 858 + 859 + priv->ring[i].requests_left = 0; 860 + priv->ring[i].busy = false; 964 861 965 862 crypto_init_queue(&priv->ring[i].queue, 966 863 EIP197_DEFAULT_RING_SIZE); ··· 1012 903 } 1013 904 1014 905 static const struct of_device_id safexcel_of_match_table[] = { 1015 - { .compatible = "inside-secure,safexcel-eip197" }, 906 + { 907 + .compatible = "inside-secure,safexcel-eip97", 908 + .data = (void *)EIP97, 909 + }, 910 + { 911 + .compatible = "inside-secure,safexcel-eip197", 912 + .data = (void *)EIP197, 913 + }, 1016 914 {}, 1017 915 }; 1018 916
+123 -50
drivers/crypto/inside-secure/safexcel.h
··· 19 19 #define EIP197_HIA_VERSION_BE 0x35ca 20 20 21 21 /* Static configuration */ 22 - #define EIP197_DEFAULT_RING_SIZE 64 22 + #define EIP197_DEFAULT_RING_SIZE 400 23 23 #define EIP197_MAX_TOKENS 5 24 24 #define EIP197_MAX_RINGS 4 25 25 #define EIP197_FETCH_COUNT 1 26 - #define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE 26 + #define EIP197_MAX_BATCH_SZ 64 27 27 28 28 #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \ 29 29 GFP_KERNEL : GFP_ATOMIC) 30 30 31 + /* Register base offsets */ 32 + #define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic) 33 + #define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g) 34 + #define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r) 35 + #define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr) 36 + #define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe) 37 + #define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr) 38 + #define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse) 39 + #define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr) 40 + #define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg) 41 + #define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe) 42 + 43 + /* EIP197 base offsets */ 44 + #define EIP197_HIA_AIC_BASE 0x90000 45 + #define EIP197_HIA_AIC_G_BASE 0x90000 46 + #define EIP197_HIA_AIC_R_BASE 0x90800 47 + #define EIP197_HIA_AIC_xDR_BASE 0x80000 48 + #define EIP197_HIA_DFE_BASE 0x8c000 49 + #define EIP197_HIA_DFE_THR_BASE 0x8c040 50 + #define EIP197_HIA_DSE_BASE 0x8d000 51 + #define EIP197_HIA_DSE_THR_BASE 0x8d040 52 + #define EIP197_HIA_GEN_CFG_BASE 0xf0000 53 + #define EIP197_PE_BASE 0xa0000 54 + 55 + /* EIP97 base offsets */ 56 + #define EIP97_HIA_AIC_BASE 0x0 57 + #define EIP97_HIA_AIC_G_BASE 0x0 58 + #define EIP97_HIA_AIC_R_BASE 0x0 59 + #define EIP97_HIA_AIC_xDR_BASE 0x0 60 + #define EIP97_HIA_DFE_BASE 0xf000 61 + #define EIP97_HIA_DFE_THR_BASE 0xf200 62 + #define EIP97_HIA_DSE_BASE 0xf400 63 + #define EIP97_HIA_DSE_THR_BASE 0xf600 64 + #define EIP97_HIA_GEN_CFG_BASE 0x10000 65 + #define EIP97_PE_BASE 0x10000 66 + 31 67 /* CDR/RDR register offsets */ 32 - #define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000) 33 - #define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r)) 34 - #define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800) 35 - #define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0 36 - #define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4 37 - #define EIP197_HIA_xDR_RING_SIZE 0x18 38 - #define EIP197_HIA_xDR_DESC_SIZE 0x1c 39 - #define EIP197_HIA_xDR_CFG 0x20 40 - #define EIP197_HIA_xDR_DMA_CFG 0x24 41 - #define EIP197_HIA_xDR_THRESH 0x28 42 - #define EIP197_HIA_xDR_PREP_COUNT 0x2c 43 - #define EIP197_HIA_xDR_PROC_COUNT 0x30 44 - #define EIP197_HIA_xDR_PREP_PNTR 0x34 45 - #define EIP197_HIA_xDR_PROC_PNTR 0x38 46 - #define EIP197_HIA_xDR_STAT 0x3c 68 + #define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000) 69 + #define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r)) 70 + #define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800) 71 + #define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000 72 + #define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004 73 + #define EIP197_HIA_xDR_RING_SIZE 0x0018 74 + #define EIP197_HIA_xDR_DESC_SIZE 0x001c 75 + #define EIP197_HIA_xDR_CFG 0x0020 76 + #define EIP197_HIA_xDR_DMA_CFG 0x0024 77 + #define EIP197_HIA_xDR_THRESH 0x0028 78 + #define EIP197_HIA_xDR_PREP_COUNT 0x002c 79 + #define EIP197_HIA_xDR_PROC_COUNT 0x0030 80 + #define EIP197_HIA_xDR_PREP_PNTR 0x0034 81 + #define EIP197_HIA_xDR_PROC_PNTR 0x0038 82 + #define EIP197_HIA_xDR_STAT 0x003c 47 83 48 84 /* register offsets */ 49 - #define EIP197_HIA_DFE_CFG 0x8c000 50 - #define EIP197_HIA_DFE_THR_CTRL 0x8c040 51 - #define EIP197_HIA_DFE_THR_STAT 0x8c044 52 - #define EIP197_HIA_DSE_CFG 0x8d000 53 - #define EIP197_HIA_DSE_THR_CTRL 0x8d040 54 - #define EIP197_HIA_DSE_THR_STAT 0x8d044 55 - #define EIP197_HIA_RA_PE_CTRL 0x90010 56 - #define EIP197_HIA_RA_PE_STAT 0x90014 85 + #define EIP197_HIA_DFE_CFG 0x0000 86 + #define EIP197_HIA_DFE_THR_CTRL 0x0000 87 + #define EIP197_HIA_DFE_THR_STAT 0x0004 88 + #define EIP197_HIA_DSE_CFG 0x0000 89 + #define EIP197_HIA_DSE_THR_CTRL 0x0000 90 + #define EIP197_HIA_DSE_THR_STAT 0x0004 91 + #define EIP197_HIA_RA_PE_CTRL 0x0010 92 + #define EIP197_HIA_RA_PE_STAT 0x0014 57 93 #define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000) 58 - #define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r)) 59 - #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) 60 - #define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r)) 61 - #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r)) 62 - #define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808 63 - #define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810 64 - #define EIP197_HIA_AIC_G_ACK 0x9f810 65 - #define EIP197_HIA_MST_CTRL 0x9fff4 66 - #define EIP197_HIA_OPTIONS 0x9fff8 67 - #define EIP197_HIA_VERSION 0x9fffc 68 - #define EIP197_PE_IN_DBUF_THRES 0xa0000 69 - #define EIP197_PE_IN_TBUF_THRES 0xa0100 70 - #define EIP197_PE_ICE_SCRATCH_RAM 0xa0800 71 - #define EIP197_PE_ICE_PUE_CTRL 0xa0c80 72 - #define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04 73 - #define EIP197_PE_ICE_FPP_CTRL 0xa0d80 74 - #define EIP197_PE_ICE_RAM_CTRL 0xa0ff0 75 - #define EIP197_PE_EIP96_FUNCTION_EN 0xa1004 76 - #define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008 77 - #define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c 78 - #define EIP197_PE_OUT_DBUF_THRES 0xa1c00 79 - #define EIP197_PE_OUT_TBUF_THRES 0xa1d00 94 + #define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r)) 95 + #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) 96 + #define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r)) 97 + #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r)) 98 + #define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808 99 + #define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810 100 + #define EIP197_HIA_AIC_G_ACK 0xf810 101 + #define EIP197_HIA_MST_CTRL 0xfff4 102 + #define EIP197_HIA_OPTIONS 0xfff8 103 + #define EIP197_HIA_VERSION 0xfffc 104 + #define EIP197_PE_IN_DBUF_THRES 0x0000 105 + #define EIP197_PE_IN_TBUF_THRES 0x0100 106 + #define EIP197_PE_ICE_SCRATCH_RAM 0x0800 107 + #define EIP197_PE_ICE_PUE_CTRL 0x0c80 108 + #define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04 109 + #define EIP197_PE_ICE_FPP_CTRL 0x0d80 110 + #define EIP197_PE_ICE_RAM_CTRL 0x0ff0 111 + #define EIP197_PE_EIP96_FUNCTION_EN 0x1004 112 + #define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008 113 + #define EIP197_PE_EIP96_CONTEXT_STAT 0x100c 114 + #define EIP197_PE_OUT_DBUF_THRES 0x1c00 115 + #define EIP197_PE_OUT_TBUF_THRES 0x1d00 116 + #define EIP197_MST_CTRL 0xfff4 117 + 118 + /* EIP197-specific registers, no indirection */ 80 119 #define EIP197_CLASSIFICATION_RAMS 0xe0000 81 120 #define EIP197_TRC_CTRL 0xf0800 82 121 #define EIP197_TRC_LASTRES 0xf0804 ··· 129 90 #define EIP197_TRC_ECCDATASTAT 0xf083c 130 91 #define EIP197_TRC_ECCDATA 0xf0840 131 92 #define EIP197_CS_RAM_CTRL 0xf7ff0 132 - #define EIP197_MST_CTRL 0xffff4 133 93 134 94 /* EIP197_HIA_xDR_DESC_SIZE */ 135 95 #define EIP197_xDR_DESC_MODE_64BIT BIT(31) ··· 155 117 #define EIP197_xDR_PREP_CLR_COUNT BIT(31) 156 118 157 119 /* EIP197_HIA_xDR_PROC_COUNT */ 120 + #define EIP197_xDR_PROC_xD_PKT_OFFSET 24 121 + #define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0) 158 122 #define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2) 159 123 #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24) 160 124 #define EIP197_xDR_PROC_CLR_COUNT BIT(31) ··· 503 463 int ring; 504 464 }; 505 465 466 + enum safexcel_eip_version { 467 + EIP97, 468 + EIP197, 469 + }; 470 + 471 + struct safexcel_register_offsets { 472 + u32 hia_aic; 473 + u32 hia_aic_g; 474 + u32 hia_aic_r; 475 + u32 hia_aic_xdr; 476 + u32 hia_dfe; 477 + u32 hia_dfe_thr; 478 + u32 hia_dse; 479 + u32 hia_dse_thr; 480 + u32 hia_gen_cfg; 481 + u32 pe; 482 + }; 483 + 506 484 struct safexcel_crypto_priv { 507 485 void __iomem *base; 508 486 struct device *dev; 509 487 struct clk *clk; 510 488 struct safexcel_config config; 489 + 490 + enum safexcel_eip_version version; 491 + struct safexcel_register_offsets offsets; 511 492 512 493 /* context DMA pool */ 513 494 struct dma_pool *context_pool; ··· 550 489 /* queue */ 551 490 struct crypto_queue queue; 552 491 spinlock_t queue_lock; 553 - bool need_dequeue; 492 + 493 + /* Number of requests in the engine that needs the threshold 494 + * interrupt to be set up. 495 + */ 496 + int requests_left; 497 + 498 + /* The ring is currently handling at least one request */ 499 + bool busy; 500 + 501 + /* Store for current requests when bailing out of the dequeueing 502 + * function when no enough resources are available. 503 + */ 504 + struct crypto_async_request *req; 505 + struct crypto_async_request *backlog; 554 506 } ring[EIP197_MAX_RINGS]; 555 507 }; 556 508 ··· 613 539 struct crypto_async_request *req, 614 540 int result_sz); 615 541 int safexcel_invalidate_cache(struct crypto_async_request *async, 616 - struct safexcel_context *ctx, 617 542 struct safexcel_crypto_priv *priv, 618 543 dma_addr_t ctxr_dma, int ring, 619 544 struct safexcel_request *request);
+34 -19
drivers/crypto/inside-secure/safexcel_cipher.c
··· 27 27 struct safexcel_context base; 28 28 struct safexcel_crypto_priv *priv; 29 29 30 - enum safexcel_cipher_direction direction; 31 30 u32 mode; 32 31 33 32 __le32 key[8]; ··· 34 35 }; 35 36 36 37 struct safexcel_cipher_req { 38 + enum safexcel_cipher_direction direction; 37 39 bool needs_inv; 38 40 }; 39 41 ··· 69 69 { 70 70 struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); 71 71 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 72 + struct safexcel_crypto_priv *priv = ctx->priv; 72 73 struct crypto_aes_ctx aes; 73 74 int ret, i; 74 75 ··· 79 78 return ret; 80 79 } 81 80 82 - for (i = 0; i < len / sizeof(u32); i++) { 83 - if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { 84 - ctx->base.needs_inv = true; 85 - break; 81 + if (priv->version == EIP197 && ctx->base.ctxr_dma) { 82 + for (i = 0; i < len / sizeof(u32); i++) { 83 + if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) { 84 + ctx->base.needs_inv = true; 85 + break; 86 + } 86 87 } 87 88 } 88 89 ··· 98 95 } 99 96 100 97 static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, 98 + struct crypto_async_request *async, 101 99 struct safexcel_command_desc *cdesc) 102 100 { 103 101 struct safexcel_crypto_priv *priv = ctx->priv; 102 + struct skcipher_request *req = skcipher_request_cast(async); 103 + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); 104 104 int ctrl_size; 105 105 106 - if (ctx->direction == SAFEXCEL_ENCRYPT) 106 + if (sreq->direction == SAFEXCEL_ENCRYPT) 107 107 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT; 108 108 else 109 109 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN; ··· 249 243 n_cdesc++; 250 244 251 245 if (n_cdesc == 1) { 252 - safexcel_context_control(ctx, cdesc); 246 + safexcel_context_control(ctx, async, cdesc); 253 247 safexcel_cipher_token(ctx, async, cdesc, req->cryptlen); 254 248 } 255 249 ··· 359 353 if (enq_ret != -EINPROGRESS) 360 354 *ret = enq_ret; 361 355 362 - if (!priv->ring[ring].need_dequeue) 363 - safexcel_dequeue(priv, ring); 356 + queue_work(priv->ring[ring].workqueue, 357 + &priv->ring[ring].work_data.work); 364 358 365 359 *should_complete = false; 366 360 ··· 396 390 struct safexcel_crypto_priv *priv = ctx->priv; 397 391 int ret; 398 392 399 - ret = safexcel_invalidate_cache(async, &ctx->base, priv, 393 + ret = safexcel_invalidate_cache(async, priv, 400 394 ctx->base.ctxr_dma, ring, request); 401 395 if (unlikely(ret)) 402 396 return ret; ··· 412 406 int *commands, int *results) 413 407 { 414 408 struct skcipher_request *req = skcipher_request_cast(async); 409 + struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); 415 410 struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); 411 + struct safexcel_crypto_priv *priv = ctx->priv; 416 412 int ret; 413 + 414 + BUG_ON(priv->version == EIP97 && sreq->needs_inv); 417 415 418 416 if (sreq->needs_inv) 419 417 ret = safexcel_cipher_send_inv(async, ring, request, ··· 453 443 crypto_enqueue_request(&priv->ring[ring].queue, &req->base); 454 444 spin_unlock_bh(&priv->ring[ring].queue_lock); 455 445 456 - if (!priv->ring[ring].need_dequeue) 457 - safexcel_dequeue(priv, ring); 446 + queue_work(priv->ring[ring].workqueue, 447 + &priv->ring[ring].work_data.work); 458 448 459 449 wait_for_completion_interruptible(&result.completion); 460 450 ··· 477 467 int ret, ring; 478 468 479 469 sreq->needs_inv = false; 480 - ctx->direction = dir; 470 + sreq->direction = dir; 481 471 ctx->mode = mode; 482 472 483 473 if (ctx->base.ctxr) { 484 - if (ctx->base.needs_inv) { 474 + if (priv->version == EIP197 && ctx->base.needs_inv) { 485 475 sreq->needs_inv = true; 486 476 ctx->base.needs_inv = false; 487 477 } ··· 500 490 ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base); 501 491 spin_unlock_bh(&priv->ring[ring].queue_lock); 502 492 503 - if (!priv->ring[ring].need_dequeue) 504 - safexcel_dequeue(priv, ring); 493 + queue_work(priv->ring[ring].workqueue, 494 + &priv->ring[ring].work_data.work); 505 495 506 496 return ret; 507 497 } ··· 549 539 550 540 memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32)); 551 541 552 - ret = safexcel_cipher_exit_inv(tfm); 553 - if (ret) 554 - dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); 542 + if (priv->version == EIP197) { 543 + ret = safexcel_cipher_exit_inv(tfm); 544 + if (ret) 545 + dev_warn(priv->dev, "cipher: invalidation error %d\n", ret); 546 + } else { 547 + dma_pool_free(priv->context_pool, ctx->base.ctxr, 548 + ctx->base.ctxr_dma); 549 + } 555 550 } 556 551 557 552 struct safexcel_alg_template safexcel_alg_ecb_aes = {
+83 -40
drivers/crypto/inside-secure/safexcel_hash.c
··· 14 14 #include <linux/dma-mapping.h> 15 15 #include <linux/dmapool.h> 16 16 17 - 18 17 #include "safexcel.h" 19 18 20 19 struct safexcel_ahash_ctx { ··· 32 33 bool finish; 33 34 bool hmac; 34 35 bool needs_inv; 36 + 37 + int nents; 35 38 36 39 u8 state_sz; /* expected sate size, only set once */ 37 40 u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); ··· 153 152 memcpy(areq->result, sreq->state, 154 153 crypto_ahash_digestsize(ahash)); 155 154 156 - dma_unmap_sg(priv->dev, areq->src, 157 - sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); 155 + if (sreq->nents) { 156 + dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); 157 + sreq->nents = 0; 158 + } 158 159 159 160 safexcel_free_context(priv, async, sreq->state_sz); 160 161 ··· 181 178 struct safexcel_command_desc *cdesc, *first_cdesc = NULL; 182 179 struct safexcel_result_desc *rdesc; 183 180 struct scatterlist *sg; 184 - int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; 181 + int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; 185 182 186 183 queued = len = req->len - req->processed; 187 184 if (queued < crypto_ahash_blocksize(ahash)) ··· 189 186 else 190 187 cache_len = queued - areq->nbytes; 191 188 192 - /* 193 - * If this is not the last request and the queued data does not fit 194 - * into full blocks, cache it for the next send() call. 195 - */ 196 - extra = queued & (crypto_ahash_blocksize(ahash) - 1); 197 - if (!req->last_req && extra) { 198 - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 199 - req->cache_next, extra, areq->nbytes - extra); 189 + if (!req->last_req) { 190 + /* If this is not the last request and the queued data does not 191 + * fit into full blocks, cache it for the next send() call. 192 + */ 193 + extra = queued & (crypto_ahash_blocksize(ahash) - 1); 194 + if (!extra) 195 + /* If this is not the last request and the queued data 196 + * is a multiple of a block, cache the last one for now. 197 + */ 198 + extra = queued - crypto_ahash_blocksize(ahash); 200 199 201 - queued -= extra; 202 - len -= extra; 200 + if (extra) { 201 + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 202 + req->cache_next, extra, 203 + areq->nbytes - extra); 204 + 205 + queued -= extra; 206 + len -= extra; 207 + 208 + if (!queued) { 209 + *commands = 0; 210 + *results = 0; 211 + return 0; 212 + } 213 + } 203 214 } 204 215 205 216 spin_lock_bh(&priv->ring[ring].egress_lock); ··· 251 234 } 252 235 253 236 /* Now handle the current ahash request buffer(s) */ 254 - nents = dma_map_sg(priv->dev, areq->src, 255 - sg_nents_for_len(areq->src, areq->nbytes), 256 - DMA_TO_DEVICE); 257 - if (!nents) { 237 + req->nents = dma_map_sg(priv->dev, areq->src, 238 + sg_nents_for_len(areq->src, areq->nbytes), 239 + DMA_TO_DEVICE); 240 + if (!req->nents) { 258 241 ret = -ENOMEM; 259 242 goto cdesc_rollback; 260 243 } 261 244 262 - for_each_sg(areq->src, sg, nents, i) { 245 + for_each_sg(areq->src, sg, req->nents, i) { 263 246 int sglen = sg_dma_len(sg); 264 247 265 248 /* Do not overflow the request */ ··· 399 382 if (enq_ret != -EINPROGRESS) 400 383 *ret = enq_ret; 401 384 402 - if (!priv->ring[ring].need_dequeue) 403 - safexcel_dequeue(priv, ring); 385 + queue_work(priv->ring[ring].workqueue, 386 + &priv->ring[ring].work_data.work); 404 387 405 388 *should_complete = false; 406 389 ··· 414 397 struct ahash_request *areq = ahash_request_cast(async); 415 398 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 416 399 int err; 400 + 401 + BUG_ON(priv->version == EIP97 && req->needs_inv); 417 402 418 403 if (req->needs_inv) { 419 404 req->needs_inv = false; ··· 437 418 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); 438 419 int ret; 439 420 440 - ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, 421 + ret = safexcel_invalidate_cache(async, ctx->priv, 441 422 ctx->base.ctxr_dma, ring, request); 442 423 if (unlikely(ret)) 443 424 return ret; ··· 490 471 crypto_enqueue_request(&priv->ring[ring].queue, &req->base); 491 472 spin_unlock_bh(&priv->ring[ring].queue_lock); 492 473 493 - if (!priv->ring[ring].need_dequeue) 494 - safexcel_dequeue(priv, ring); 474 + queue_work(priv->ring[ring].workqueue, 475 + &priv->ring[ring].work_data.work); 495 476 496 477 wait_for_completion_interruptible(&result.completion); 497 478 ··· 504 485 return 0; 505 486 } 506 487 488 + /* safexcel_ahash_cache: cache data until at least one request can be sent to 489 + * the engine, aka. when there is at least 1 block size in the pipe. 490 + */ 507 491 static int safexcel_ahash_cache(struct ahash_request *areq) 508 492 { 509 493 struct safexcel_ahash_req *req = ahash_request_ctx(areq); 510 494 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 511 495 int queued, cache_len; 512 496 497 + /* cache_len: everyting accepted by the driver but not sent yet, 498 + * tot sz handled by update() - last req sz - tot sz handled by send() 499 + */ 513 500 cache_len = req->len - areq->nbytes - req->processed; 501 + /* queued: everything accepted by the driver which will be handled by 502 + * the next send() calls. 503 + * tot sz handled by update() - tot sz handled by send() 504 + */ 514 505 queued = req->len - req->processed; 515 506 516 507 /* ··· 534 505 return areq->nbytes; 535 506 } 536 507 537 - /* We could'nt cache all the data */ 508 + /* We couldn't cache all the data */ 538 509 return -E2BIG; 539 510 } 540 511 ··· 547 518 548 519 req->needs_inv = false; 549 520 550 - if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 551 - ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); 552 - 553 521 if (ctx->base.ctxr) { 522 + if (priv->version == EIP197 && 523 + !ctx->base.needs_inv && req->processed && 524 + ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) 525 + /* We're still setting needs_inv here, even though it is 526 + * cleared right away, because the needs_inv flag can be 527 + * set in other functions and we want to keep the same 528 + * logic. 529 + */ 530 + ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); 531 + 554 532 if (ctx->base.needs_inv) { 555 533 ctx->base.needs_inv = false; 556 534 req->needs_inv = true; ··· 577 541 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base); 578 542 spin_unlock_bh(&priv->ring[ring].queue_lock); 579 543 580 - if (!priv->ring[ring].need_dequeue) 581 - safexcel_dequeue(priv, ring); 544 + queue_work(priv->ring[ring].workqueue, 545 + &priv->ring[ring].work_data.work); 582 546 583 547 return ret; 584 548 } ··· 661 625 export->processed = req->processed; 662 626 663 627 memcpy(export->state, req->state, req->state_sz); 664 - memset(export->cache, 0, crypto_ahash_blocksize(ahash)); 665 628 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash)); 666 629 667 630 return 0; ··· 742 707 if (!ctx->base.ctxr) 743 708 return; 744 709 745 - ret = safexcel_ahash_exit_inv(tfm); 746 - if (ret) 747 - dev_warn(priv->dev, "hash: invalidation error %d\n", ret); 710 + if (priv->version == EIP197) { 711 + ret = safexcel_ahash_exit_inv(tfm); 712 + if (ret) 713 + dev_warn(priv->dev, "hash: invalidation error %d\n", ret); 714 + } else { 715 + dma_pool_free(priv->context_pool, ctx->base.ctxr, 716 + ctx->base.ctxr_dma); 717 + } 748 718 } 749 719 750 720 struct safexcel_alg_template safexcel_alg_sha1 = { ··· 888 848 req->last_req = true; 889 849 890 850 ret = crypto_ahash_update(areq); 891 - if (ret && ret != -EINPROGRESS) 851 + if (ret && ret != -EINPROGRESS && ret != -EBUSY) 892 852 return ret; 893 853 894 854 wait_for_completion_interruptible(&result.completion); ··· 953 913 unsigned int keylen) 954 914 { 955 915 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 916 + struct safexcel_crypto_priv *priv = ctx->priv; 956 917 struct safexcel_ahash_export_state istate, ostate; 957 918 int ret, i; 958 919 ··· 961 920 if (ret) 962 921 return ret; 963 922 964 - for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { 965 - if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 966 - ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 967 - ctx->base.needs_inv = true; 968 - break; 923 + if (priv->version == EIP197 && ctx->base.ctxr) { 924 + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { 925 + if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 926 + ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 927 + ctx->base.needs_inv = true; 928 + break; 929 + } 969 930 } 970 931 } 971 932
+3 -4
drivers/crypto/ixp4xx_crypto.c
··· 260 260 { 261 261 struct device *dev = &pdev->dev; 262 262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 263 - crypt_virt = dma_alloc_coherent(dev, 264 - NPE_QLEN * sizeof(struct crypt_ctl), 265 - &crypt_phys, GFP_ATOMIC); 263 + crypt_virt = dma_zalloc_coherent(dev, 264 + NPE_QLEN * sizeof(struct crypt_ctl), 265 + &crypt_phys, GFP_ATOMIC); 266 266 if (!crypt_virt) 267 267 return -ENOMEM; 268 - memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl)); 269 268 return 0; 270 269 } 271 270
+12 -7
drivers/crypto/marvell/cesa.c
··· 15 15 */ 16 16 17 17 #include <linux/delay.h> 18 + #include <linux/dma-mapping.h> 18 19 #include <linux/genalloc.h> 19 20 #include <linux/interrupt.h> 20 21 #include <linux/io.h> ··· 411 410 if (IS_ERR(engine->sram)) 412 411 return PTR_ERR(engine->sram); 413 412 414 - engine->sram_dma = phys_to_dma(cesa->dev, 415 - (phys_addr_t)res->start); 413 + engine->sram_dma = dma_map_resource(cesa->dev, res->start, 414 + cesa->sram_size, 415 + DMA_BIDIRECTIONAL, 0); 416 + if (dma_mapping_error(cesa->dev, engine->sram_dma)) 417 + return -ENOMEM; 416 418 417 419 return 0; 418 420 } ··· 425 421 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev); 426 422 struct mv_cesa_engine *engine = &cesa->engines[idx]; 427 423 428 - if (!engine->pool) 429 - return; 430 - 431 - gen_pool_free(engine->pool, (unsigned long)engine->sram, 432 - cesa->sram_size); 424 + if (engine->pool) 425 + gen_pool_free(engine->pool, (unsigned long)engine->sram, 426 + cesa->sram_size); 427 + else 428 + dma_unmap_resource(cesa->dev, engine->sram_dma, 429 + cesa->sram_size, DMA_BIDIRECTIONAL, 0); 433 430 } 434 431 435 432 static int mv_cesa_probe(struct platform_device *pdev)
+2 -2
drivers/crypto/nx/nx-842-powernv.c
··· 743 743 } 744 744 745 745 if (!per_cpu(cpu_txwin, i)) { 746 - /* shoudn't happen, Each chip will have NX engine */ 747 - pr_err("NX engine is not availavle for CPU %d\n", i); 746 + /* shouldn't happen, Each chip will have NX engine */ 747 + pr_err("NX engine is not available for CPU %d\n", i); 748 748 return -EINVAL; 749 749 } 750 750 }
+17 -10
drivers/crypto/picoxcell_crypto.c
··· 1618 1618 1619 1619 static int spacc_probe(struct platform_device *pdev) 1620 1620 { 1621 - int i, err, ret = -EINVAL; 1621 + int i, err, ret; 1622 1622 struct resource *mem, *irq; 1623 1623 struct device_node *np = pdev->dev.of_node; 1624 1624 struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), ··· 1679 1679 engine->clk = clk_get(&pdev->dev, "ref"); 1680 1680 if (IS_ERR(engine->clk)) { 1681 1681 dev_info(&pdev->dev, "clk unavailable\n"); 1682 - device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1683 1682 return PTR_ERR(engine->clk); 1684 1683 } 1685 1684 1686 1685 if (clk_prepare_enable(engine->clk)) { 1687 1686 dev_info(&pdev->dev, "unable to prepare/enable clk\n"); 1688 - clk_put(engine->clk); 1689 - return -EIO; 1687 + ret = -EIO; 1688 + goto err_clk_put; 1690 1689 } 1691 1690 1692 - err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1693 - if (err) { 1694 - clk_disable_unprepare(engine->clk); 1695 - clk_put(engine->clk); 1696 - return err; 1697 - } 1691 + ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1692 + if (ret) 1693 + goto err_clk_disable; 1698 1694 1699 1695 1700 1696 /* ··· 1721 1725 1722 1726 platform_set_drvdata(pdev, engine); 1723 1727 1728 + ret = -EINVAL; 1724 1729 INIT_LIST_HEAD(&engine->registered_algs); 1725 1730 for (i = 0; i < engine->num_algs; ++i) { 1726 1731 engine->algs[i].engine = engine; ··· 1755 1758 dev_dbg(engine->dev, "registered alg \"%s\"\n", 1756 1759 engine->aeads[i].alg.base.cra_name); 1757 1760 } 1761 + 1762 + if (!ret) 1763 + return 0; 1764 + 1765 + del_timer_sync(&engine->packet_timeout); 1766 + device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); 1767 + err_clk_disable: 1768 + clk_disable_unprepare(engine->clk); 1769 + err_clk_put: 1770 + clk_put(engine->clk); 1758 1771 1759 1772 return ret; 1760 1773 }
+67 -66
drivers/crypto/qat/qat_common/qat_hal.c
··· 117 117 118 118 #define CSR_RETRY_TIMES 500 119 119 static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle, 120 - unsigned char ae, unsigned int csr, 121 - unsigned int *value) 120 + unsigned char ae, unsigned int csr) 122 121 { 123 122 unsigned int iterations = CSR_RETRY_TIMES; 123 + int value; 124 124 125 125 do { 126 - *value = GET_AE_CSR(handle, ae, csr); 126 + value = GET_AE_CSR(handle, ae, csr); 127 127 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS)) 128 - return 0; 128 + return value; 129 129 } while (iterations--); 130 130 131 131 pr_err("QAT: Read CSR timeout\n"); 132 - return -EFAULT; 132 + return 0; 133 133 } 134 134 135 135 static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle, ··· 154 154 { 155 155 unsigned int cur_ctx; 156 156 157 - qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); 157 + cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); 158 158 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); 159 - qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events); 159 + *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT); 160 160 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); 161 161 } 162 162 ··· 169 169 int times = MAX_RETRY_TIMES; 170 170 int elapsed_cycles = 0; 171 171 172 - qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt); 172 + base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); 173 173 base_cnt &= 0xffff; 174 174 while ((int)cycles > elapsed_cycles && times--) { 175 175 if (chk_inactive) 176 - qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr); 176 + csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); 177 177 178 - qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt); 178 + cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); 179 179 cur_cnt &= 0xffff; 180 180 elapsed_cycles = cur_cnt - base_cnt; 181 181 ··· 207 207 } 208 208 209 209 /* Sets the accelaration engine context mode to either four or eight */ 210 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); 210 + csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 211 211 csr = IGNORE_W1C_MASK & csr; 212 212 new_csr = (mode == 4) ? 213 213 SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) : ··· 221 221 { 222 222 unsigned int csr, new_csr; 223 223 224 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); 224 + csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 225 225 csr &= IGNORE_W1C_MASK; 226 226 227 227 new_csr = (mode) ? ··· 240 240 { 241 241 unsigned int csr, new_csr; 242 242 243 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr); 243 + csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 244 244 csr &= IGNORE_W1C_MASK; 245 245 switch (lm_type) { 246 246 case ICP_LMEM0: ··· 328 328 { 329 329 unsigned int ctx, cur_ctx; 330 330 331 - qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); 331 + cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); 332 332 333 333 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { 334 334 if (!(ctx_mask & (1 << ctx))) ··· 340 340 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); 341 341 } 342 342 343 - static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, 343 + static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle, 344 344 unsigned char ae, unsigned char ctx, 345 - unsigned int ae_csr, unsigned int *csr_val) 345 + unsigned int ae_csr) 346 346 { 347 - unsigned int cur_ctx; 347 + unsigned int cur_ctx, csr_val; 348 348 349 - qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); 349 + cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); 350 350 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx); 351 - qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val); 351 + csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr); 352 352 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx); 353 + 354 + return csr_val; 353 355 } 354 356 355 357 static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle, ··· 360 358 { 361 359 unsigned int ctx, cur_ctx; 362 360 363 - qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); 361 + cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); 364 362 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { 365 363 if (!(ctx_mask & (1 << ctx))) 366 364 continue; ··· 376 374 { 377 375 unsigned int ctx, cur_ctx; 378 376 379 - qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx); 377 + cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER); 380 378 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) { 381 379 if (!(ctx_mask & (1 << ctx))) 382 380 continue; ··· 394 392 int times = MAX_RETRY_TIMES; 395 393 396 394 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 397 - qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, 398 - (unsigned int *)&base_cnt); 395 + base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); 399 396 base_cnt &= 0xffff; 400 397 401 398 do { 402 - qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, 403 - (unsigned int *)&cur_cnt); 399 + cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT); 404 400 cur_cnt &= 0xffff; 405 401 } while (times-- && (cur_cnt == base_cnt)); 406 402 ··· 416 416 { 417 417 unsigned int enable = 0, active = 0; 418 418 419 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable); 420 - qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active); 419 + enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 420 + active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); 421 421 if ((enable & (0xff << CE_ENABLE_BITPOS)) || 422 422 (active & (1 << ACS_ABO_BITPOS))) 423 423 return 1; ··· 540 540 { 541 541 unsigned int ctx; 542 542 543 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); 543 + ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 544 544 ctx &= IGNORE_W1C_MASK & 545 545 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS)); 546 546 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx); ··· 583 583 unsigned int ustore_addr; 584 584 unsigned int i; 585 585 586 - qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); 586 + ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); 587 587 uaddr |= UA_ECS; 588 588 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); 589 589 for (i = 0; i < words_num; i++) { ··· 604 604 { 605 605 unsigned int ctx; 606 606 607 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx); 607 + ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 608 608 ctx &= IGNORE_W1C_MASK; 609 609 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF; 610 610 ctx |= (ctx_mask << CE_ENABLE_BITPOS); ··· 636 636 int ret = 0; 637 637 638 638 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 639 - qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); 639 + csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL); 640 640 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS); 641 641 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val); 642 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val); 642 + csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 643 643 csr_val &= IGNORE_W1C_MASK; 644 644 csr_val |= CE_NN_MODE; 645 645 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val); ··· 648 648 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT, 649 649 handle->hal_handle->upc_mask & 650 650 INIT_PC_VALUE); 651 - qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); 651 + savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); 652 652 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0); 653 653 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY); 654 654 qat_hal_wr_indr_csr(handle, ae, ctx_mask, ··· 760 760 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { 761 761 unsigned int csr_val = 0; 762 762 763 - qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val); 763 + csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE); 764 764 csr_val |= 0x1; 765 765 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val); 766 766 } ··· 826 826 unsigned int i, uwrd_lo, uwrd_hi; 827 827 unsigned int ustore_addr, misc_control; 828 828 829 - qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control); 829 + misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL); 830 830 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, 831 831 misc_control & 0xfffffffb); 832 - qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); 832 + ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); 833 833 uaddr |= UA_ECS; 834 834 for (i = 0; i < words_num; i++) { 835 835 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); 836 836 uaddr++; 837 - qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo); 838 - qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi); 837 + uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER); 838 + uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER); 839 839 uword[i] = uwrd_hi; 840 840 uword[i] = (uword[i] << 0x20) | uwrd_lo; 841 841 } ··· 849 849 { 850 850 unsigned int i, ustore_addr; 851 851 852 - qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); 852 + ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); 853 853 uaddr |= UA_ECS; 854 854 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); 855 855 for (i = 0; i < words_num; i++) { ··· 890 890 return -EINVAL; 891 891 } 892 892 /* save current context */ 893 - qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0); 894 - qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1); 895 - qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, 896 - &ind_lm_addr_byte0); 897 - qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, 898 - &ind_lm_addr_byte1); 893 + ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT); 894 + ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT); 895 + ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx, 896 + INDIRECT_LM_ADDR_0_BYTE_INDEX); 897 + ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx, 898 + INDIRECT_LM_ADDR_1_BYTE_INDEX); 899 899 if (inst_num <= MAX_EXEC_INST) 900 900 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords); 901 901 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events); 902 - qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc); 902 + savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT); 903 903 savpc = (savpc & handle->hal_handle->upc_mask) >> 0; 904 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); 904 + ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 905 905 ctx_enables &= IGNORE_W1C_MASK; 906 - qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc); 907 - qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); 908 - qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl); 909 - qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, 910 - &ind_cnt_sig); 911 - qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig); 912 - qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig); 906 + savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE); 907 + savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); 908 + ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL); 909 + ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx, 910 + FUTURE_COUNT_SIGNAL_INDIRECT); 911 + ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx, 912 + CTX_SIG_EVENTS_INDIRECT); 913 + act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE); 913 914 /* execute micro codes */ 914 915 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); 915 916 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst); ··· 928 927 if (endpc) { 929 928 unsigned int ctx_status; 930 929 931 - qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, 932 - &ctx_status); 930 + ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx, 931 + CTX_STS_INDIRECT); 933 932 *endpc = ctx_status & handle->hal_handle->upc_mask; 934 933 } 935 934 /* retore to saved context */ ··· 939 938 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events); 940 939 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 941 940 handle->hal_handle->upc_mask & savpc); 942 - qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val); 941 + csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL); 943 942 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS); 944 943 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val); 945 944 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc); ··· 987 986 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10); 988 987 break; 989 988 } 990 - qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx); 991 - qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl); 992 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); 989 + savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS); 990 + ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL); 991 + ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 993 992 ctx_enables &= IGNORE_W1C_MASK; 994 993 if (ctx != (savctx & ACS_ACNO)) 995 994 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 996 995 ctx & ACS_ACNO); 997 996 qat_hal_get_uwords(handle, ae, 0, 1, &savuword); 998 997 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables); 999 - qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr); 998 + ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS); 1000 999 uaddr = UA_ECS; 1001 1000 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr); 1002 1001 insts = qat_hal_set_uword_ecc(insts); ··· 1012 1011 * the instruction should have been executed 1013 1012 * prior to clearing the ECS in putUwords 1014 1013 */ 1015 - qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data); 1014 + *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT); 1016 1015 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr); 1017 1016 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword); 1018 1017 if (ctx != (savctx & ACS_ACNO)) ··· 1189 1188 unsigned short mask; 1190 1189 unsigned short dr_offset = 0x10; 1191 1190 1192 - status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); 1191 + status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 1193 1192 if (CE_INUSE_CONTEXTS & ctx_enables) { 1194 1193 if (ctx & 0x1) { 1195 1194 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx); ··· 1239 1238 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1; 1240 1239 const unsigned short gprnum = 0, dly = num_inst * 0x5; 1241 1240 1242 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); 1241 + ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 1243 1242 if (CE_INUSE_CONTEXTS & ctx_enables) { 1244 1243 if (ctx & 0x1) { 1245 1244 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx); ··· 1283 1282 unsigned int ctx_enables; 1284 1283 int stat = 0; 1285 1284 1286 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); 1285 + ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 1287 1286 ctx_enables &= IGNORE_W1C_MASK; 1288 1287 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE); 1289 1288 ··· 1300 1299 { 1301 1300 unsigned int ctx_enables; 1302 1301 1303 - qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables); 1302 + ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES); 1304 1303 if (ctx_enables & CE_INUSE_CONTEXTS) { 1305 1304 /* 4-ctx mode */ 1306 1305 *relreg = absreg_num & 0x1F;
+11 -15
drivers/crypto/s5p-sss.c
··· 1 - /* 2 - * Cryptographic API. 3 - * 4 - * Support for Samsung S5PV210 and Exynos HW acceleration. 5 - * 6 - * Copyright (C) 2011 NetUP Inc. All rights reserved. 7 - * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License version 2 as published 11 - * by the Free Software Foundation. 12 - * 13 - * Hash part based on omap-sham.c driver. 14 - */ 1 + // SPDX-License-Identifier: GPL-2.0 2 + // 3 + // Cryptographic API. 4 + // 5 + // Support for Samsung S5PV210 and Exynos HW acceleration. 6 + // 7 + // Copyright (C) 2011 NetUP Inc. All rights reserved. 8 + // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved. 9 + // 10 + // Hash part based on omap-sham.c driver. 15 11 16 12 #include <linux/clk.h> 17 13 #include <linux/crypto.h> ··· 1457 1461 &dd->hash_flags)) { 1458 1462 /* hash or semi-hash ready */ 1459 1463 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags); 1460 - goto finish; 1464 + goto finish; 1461 1465 } 1462 1466 } 1463 1467
+11 -2
drivers/crypto/stm32/Kconfig
··· 1 - config CRC_DEV_STM32 1 + config CRYPTO_DEV_STM32_CRC 2 2 tristate "Support for STM32 crc accelerators" 3 3 depends on ARCH_STM32 4 4 select CRYPTO_HASH ··· 6 6 This enables support for the CRC32 hw accelerator which can be found 7 7 on STMicroelectronics STM32 SOC. 8 8 9 - config HASH_DEV_STM32 9 + config CRYPTO_DEV_STM32_HASH 10 10 tristate "Support for STM32 hash accelerators" 11 11 depends on ARCH_STM32 12 12 depends on HAS_DMA ··· 18 18 help 19 19 This enables support for the HASH hw accelerator which can be found 20 20 on STMicroelectronics STM32 SOC. 21 + 22 + config CRYPTO_DEV_STM32_CRYP 23 + tristate "Support for STM32 cryp accelerators" 24 + depends on ARCH_STM32 25 + select CRYPTO_HASH 26 + select CRYPTO_ENGINE 27 + help 28 + This enables support for the CRYP (AES/DES/TDES) hw accelerator which 29 + can be found on STMicroelectronics STM32 SOC.
+3 -2
drivers/crypto/stm32/Makefile
··· 1 - obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o 2 - obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o 1 + obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o 2 + obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o 3 + obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
+1170
drivers/crypto/stm32/stm32-cryp.c
··· 1 + /* 2 + * Copyright (C) STMicroelectronics SA 2017 3 + * Author: Fabien Dessenne <fabien.dessenne@st.com> 4 + * License terms: GNU General Public License (GPL), version 2 5 + */ 6 + 7 + #include <linux/clk.h> 8 + #include <linux/delay.h> 9 + #include <linux/interrupt.h> 10 + #include <linux/iopoll.h> 11 + #include <linux/module.h> 12 + #include <linux/of_device.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/reset.h> 15 + 16 + #include <crypto/aes.h> 17 + #include <crypto/des.h> 18 + #include <crypto/engine.h> 19 + #include <crypto/scatterwalk.h> 20 + 21 + #define DRIVER_NAME "stm32-cryp" 22 + 23 + /* Bit [0] encrypt / decrypt */ 24 + #define FLG_ENCRYPT BIT(0) 25 + /* Bit [8..1] algo & operation mode */ 26 + #define FLG_AES BIT(1) 27 + #define FLG_DES BIT(2) 28 + #define FLG_TDES BIT(3) 29 + #define FLG_ECB BIT(4) 30 + #define FLG_CBC BIT(5) 31 + #define FLG_CTR BIT(6) 32 + /* Mode mask = bits [15..0] */ 33 + #define FLG_MODE_MASK GENMASK(15, 0) 34 + 35 + /* Registers */ 36 + #define CRYP_CR 0x00000000 37 + #define CRYP_SR 0x00000004 38 + #define CRYP_DIN 0x00000008 39 + #define CRYP_DOUT 0x0000000C 40 + #define CRYP_DMACR 0x00000010 41 + #define CRYP_IMSCR 0x00000014 42 + #define CRYP_RISR 0x00000018 43 + #define CRYP_MISR 0x0000001C 44 + #define CRYP_K0LR 0x00000020 45 + #define CRYP_K0RR 0x00000024 46 + #define CRYP_K1LR 0x00000028 47 + #define CRYP_K1RR 0x0000002C 48 + #define CRYP_K2LR 0x00000030 49 + #define CRYP_K2RR 0x00000034 50 + #define CRYP_K3LR 0x00000038 51 + #define CRYP_K3RR 0x0000003C 52 + #define CRYP_IV0LR 0x00000040 53 + #define CRYP_IV0RR 0x00000044 54 + #define CRYP_IV1LR 0x00000048 55 + #define CRYP_IV1RR 0x0000004C 56 + 57 + /* Registers values */ 58 + #define CR_DEC_NOT_ENC 0x00000004 59 + #define CR_TDES_ECB 0x00000000 60 + #define CR_TDES_CBC 0x00000008 61 + #define CR_DES_ECB 0x00000010 62 + #define CR_DES_CBC 0x00000018 63 + #define CR_AES_ECB 0x00000020 64 + #define CR_AES_CBC 0x00000028 65 + #define CR_AES_CTR 0x00000030 66 + #define CR_AES_KP 0x00000038 67 + #define CR_AES_UNKNOWN 0xFFFFFFFF 68 + #define CR_ALGO_MASK 0x00080038 69 + #define CR_DATA32 0x00000000 70 + #define CR_DATA16 0x00000040 71 + #define CR_DATA8 0x00000080 72 + #define CR_DATA1 0x000000C0 73 + #define CR_KEY128 0x00000000 74 + #define CR_KEY192 0x00000100 75 + #define CR_KEY256 0x00000200 76 + #define CR_FFLUSH 0x00004000 77 + #define CR_CRYPEN 0x00008000 78 + 79 + #define SR_BUSY 0x00000010 80 + #define SR_OFNE 0x00000004 81 + 82 + #define IMSCR_IN BIT(0) 83 + #define IMSCR_OUT BIT(1) 84 + 85 + #define MISR_IN BIT(0) 86 + #define MISR_OUT BIT(1) 87 + 88 + /* Misc */ 89 + #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) 90 + #define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) 91 + #define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) 92 + 93 + struct stm32_cryp_ctx { 94 + struct stm32_cryp *cryp; 95 + int keylen; 96 + u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 97 + unsigned long flags; 98 + }; 99 + 100 + struct stm32_cryp_reqctx { 101 + unsigned long mode; 102 + }; 103 + 104 + struct stm32_cryp { 105 + struct list_head list; 106 + struct device *dev; 107 + void __iomem *regs; 108 + struct clk *clk; 109 + unsigned long flags; 110 + u32 irq_status; 111 + struct stm32_cryp_ctx *ctx; 112 + 113 + struct crypto_engine *engine; 114 + 115 + struct mutex lock; /* protects req */ 116 + struct ablkcipher_request *req; 117 + 118 + size_t hw_blocksize; 119 + 120 + size_t total_in; 121 + size_t total_in_save; 122 + size_t total_out; 123 + size_t total_out_save; 124 + 125 + struct scatterlist *in_sg; 126 + struct scatterlist *out_sg; 127 + struct scatterlist *out_sg_save; 128 + 129 + struct scatterlist in_sgl; 130 + struct scatterlist out_sgl; 131 + bool sgs_copied; 132 + 133 + int in_sg_len; 134 + int out_sg_len; 135 + 136 + struct scatter_walk in_walk; 137 + struct scatter_walk out_walk; 138 + 139 + u32 last_ctr[4]; 140 + }; 141 + 142 + struct stm32_cryp_list { 143 + struct list_head dev_list; 144 + spinlock_t lock; /* protect dev_list */ 145 + }; 146 + 147 + static struct stm32_cryp_list cryp_list = { 148 + .dev_list = LIST_HEAD_INIT(cryp_list.dev_list), 149 + .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock), 150 + }; 151 + 152 + static inline bool is_aes(struct stm32_cryp *cryp) 153 + { 154 + return cryp->flags & FLG_AES; 155 + } 156 + 157 + static inline bool is_des(struct stm32_cryp *cryp) 158 + { 159 + return cryp->flags & FLG_DES; 160 + } 161 + 162 + static inline bool is_tdes(struct stm32_cryp *cryp) 163 + { 164 + return cryp->flags & FLG_TDES; 165 + } 166 + 167 + static inline bool is_ecb(struct stm32_cryp *cryp) 168 + { 169 + return cryp->flags & FLG_ECB; 170 + } 171 + 172 + static inline bool is_cbc(struct stm32_cryp *cryp) 173 + { 174 + return cryp->flags & FLG_CBC; 175 + } 176 + 177 + static inline bool is_ctr(struct stm32_cryp *cryp) 178 + { 179 + return cryp->flags & FLG_CTR; 180 + } 181 + 182 + static inline bool is_encrypt(struct stm32_cryp *cryp) 183 + { 184 + return cryp->flags & FLG_ENCRYPT; 185 + } 186 + 187 + static inline bool is_decrypt(struct stm32_cryp *cryp) 188 + { 189 + return !is_encrypt(cryp); 190 + } 191 + 192 + static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst) 193 + { 194 + return readl_relaxed(cryp->regs + ofst); 195 + } 196 + 197 + static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val) 198 + { 199 + writel_relaxed(val, cryp->regs + ofst); 200 + } 201 + 202 + static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp) 203 + { 204 + u32 status; 205 + 206 + return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status, 207 + !(status & SR_BUSY), 10, 100000); 208 + } 209 + 210 + static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) 211 + { 212 + struct stm32_cryp *tmp, *cryp = NULL; 213 + 214 + spin_lock_bh(&cryp_list.lock); 215 + if (!ctx->cryp) { 216 + list_for_each_entry(tmp, &cryp_list.dev_list, list) { 217 + cryp = tmp; 218 + break; 219 + } 220 + ctx->cryp = cryp; 221 + } else { 222 + cryp = ctx->cryp; 223 + } 224 + 225 + spin_unlock_bh(&cryp_list.lock); 226 + 227 + return cryp; 228 + } 229 + 230 + static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total, 231 + size_t align) 232 + { 233 + int len = 0; 234 + 235 + if (!total) 236 + return 0; 237 + 238 + if (!IS_ALIGNED(total, align)) 239 + return -EINVAL; 240 + 241 + while (sg) { 242 + if (!IS_ALIGNED(sg->offset, sizeof(u32))) 243 + return -EINVAL; 244 + 245 + if (!IS_ALIGNED(sg->length, align)) 246 + return -EINVAL; 247 + 248 + len += sg->length; 249 + sg = sg_next(sg); 250 + } 251 + 252 + if (len != total) 253 + return -EINVAL; 254 + 255 + return 0; 256 + } 257 + 258 + static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp) 259 + { 260 + int ret; 261 + 262 + ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in, 263 + cryp->hw_blocksize); 264 + if (ret) 265 + return ret; 266 + 267 + ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out, 268 + cryp->hw_blocksize); 269 + 270 + return ret; 271 + } 272 + 273 + static void sg_copy_buf(void *buf, struct scatterlist *sg, 274 + unsigned int start, unsigned int nbytes, int out) 275 + { 276 + struct scatter_walk walk; 277 + 278 + if (!nbytes) 279 + return; 280 + 281 + scatterwalk_start(&walk, sg); 282 + scatterwalk_advance(&walk, start); 283 + scatterwalk_copychunks(buf, &walk, nbytes, out); 284 + scatterwalk_done(&walk, out, 0); 285 + } 286 + 287 + static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp) 288 + { 289 + void *buf_in, *buf_out; 290 + int pages, total_in, total_out; 291 + 292 + if (!stm32_cryp_check_io_aligned(cryp)) { 293 + cryp->sgs_copied = 0; 294 + return 0; 295 + } 296 + 297 + total_in = ALIGN(cryp->total_in, cryp->hw_blocksize); 298 + pages = total_in ? get_order(total_in) : 1; 299 + buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); 300 + 301 + total_out = ALIGN(cryp->total_out, cryp->hw_blocksize); 302 + pages = total_out ? get_order(total_out) : 1; 303 + buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); 304 + 305 + if (!buf_in || !buf_out) { 306 + dev_err(cryp->dev, "Can't allocate pages when unaligned\n"); 307 + cryp->sgs_copied = 0; 308 + return -EFAULT; 309 + } 310 + 311 + sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0); 312 + 313 + sg_init_one(&cryp->in_sgl, buf_in, total_in); 314 + cryp->in_sg = &cryp->in_sgl; 315 + cryp->in_sg_len = 1; 316 + 317 + sg_init_one(&cryp->out_sgl, buf_out, total_out); 318 + cryp->out_sg_save = cryp->out_sg; 319 + cryp->out_sg = &cryp->out_sgl; 320 + cryp->out_sg_len = 1; 321 + 322 + cryp->sgs_copied = 1; 323 + 324 + return 0; 325 + } 326 + 327 + static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv) 328 + { 329 + if (!iv) 330 + return; 331 + 332 + stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++)); 333 + stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++)); 334 + 335 + if (is_aes(cryp)) { 336 + stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++)); 337 + stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++)); 338 + } 339 + } 340 + 341 + static void stm32_cryp_hw_write_key(struct stm32_cryp *c) 342 + { 343 + unsigned int i; 344 + int r_id; 345 + 346 + if (is_des(c)) { 347 + stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0])); 348 + stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1])); 349 + } else { 350 + r_id = CRYP_K3RR; 351 + for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) 352 + stm32_cryp_write(c, r_id, 353 + cpu_to_be32(c->ctx->key[i - 1])); 354 + } 355 + } 356 + 357 + static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp) 358 + { 359 + if (is_aes(cryp) && is_ecb(cryp)) 360 + return CR_AES_ECB; 361 + 362 + if (is_aes(cryp) && is_cbc(cryp)) 363 + return CR_AES_CBC; 364 + 365 + if (is_aes(cryp) && is_ctr(cryp)) 366 + return CR_AES_CTR; 367 + 368 + if (is_des(cryp) && is_ecb(cryp)) 369 + return CR_DES_ECB; 370 + 371 + if (is_des(cryp) && is_cbc(cryp)) 372 + return CR_DES_CBC; 373 + 374 + if (is_tdes(cryp) && is_ecb(cryp)) 375 + return CR_TDES_ECB; 376 + 377 + if (is_tdes(cryp) && is_cbc(cryp)) 378 + return CR_TDES_CBC; 379 + 380 + dev_err(cryp->dev, "Unknown mode\n"); 381 + return CR_AES_UNKNOWN; 382 + } 383 + 384 + static int stm32_cryp_hw_init(struct stm32_cryp *cryp) 385 + { 386 + int ret; 387 + u32 cfg, hw_mode; 388 + 389 + /* Disable interrupt */ 390 + stm32_cryp_write(cryp, CRYP_IMSCR, 0); 391 + 392 + /* Set key */ 393 + stm32_cryp_hw_write_key(cryp); 394 + 395 + /* Set configuration */ 396 + cfg = CR_DATA8 | CR_FFLUSH; 397 + 398 + switch (cryp->ctx->keylen) { 399 + case AES_KEYSIZE_128: 400 + cfg |= CR_KEY128; 401 + break; 402 + 403 + case AES_KEYSIZE_192: 404 + cfg |= CR_KEY192; 405 + break; 406 + 407 + default: 408 + case AES_KEYSIZE_256: 409 + cfg |= CR_KEY256; 410 + break; 411 + } 412 + 413 + hw_mode = stm32_cryp_get_hw_mode(cryp); 414 + if (hw_mode == CR_AES_UNKNOWN) 415 + return -EINVAL; 416 + 417 + /* AES ECB/CBC decrypt: run key preparation first */ 418 + if (is_decrypt(cryp) && 419 + ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) { 420 + stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN); 421 + 422 + /* Wait for end of processing */ 423 + ret = stm32_cryp_wait_busy(cryp); 424 + if (ret) { 425 + dev_err(cryp->dev, "Timeout (key preparation)\n"); 426 + return ret; 427 + } 428 + } 429 + 430 + cfg |= hw_mode; 431 + 432 + if (is_decrypt(cryp)) 433 + cfg |= CR_DEC_NOT_ENC; 434 + 435 + /* Apply config and flush (valid when CRYPEN = 0) */ 436 + stm32_cryp_write(cryp, CRYP_CR, cfg); 437 + 438 + switch (hw_mode) { 439 + case CR_DES_CBC: 440 + case CR_TDES_CBC: 441 + case CR_AES_CBC: 442 + case CR_AES_CTR: 443 + stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info); 444 + break; 445 + 446 + default: 447 + break; 448 + } 449 + 450 + /* Enable now */ 451 + cfg |= CR_CRYPEN; 452 + 453 + stm32_cryp_write(cryp, CRYP_CR, cfg); 454 + 455 + return 0; 456 + } 457 + 458 + static void stm32_cryp_finish_req(struct stm32_cryp *cryp) 459 + { 460 + int err = 0; 461 + 462 + if (cryp->sgs_copied) { 463 + void *buf_in, *buf_out; 464 + int pages, len; 465 + 466 + buf_in = sg_virt(&cryp->in_sgl); 467 + buf_out = sg_virt(&cryp->out_sgl); 468 + 469 + sg_copy_buf(buf_out, cryp->out_sg_save, 0, 470 + cryp->total_out_save, 1); 471 + 472 + len = ALIGN(cryp->total_in_save, cryp->hw_blocksize); 473 + pages = len ? get_order(len) : 1; 474 + free_pages((unsigned long)buf_in, pages); 475 + 476 + len = ALIGN(cryp->total_out_save, cryp->hw_blocksize); 477 + pages = len ? get_order(len) : 1; 478 + free_pages((unsigned long)buf_out, pages); 479 + } 480 + 481 + crypto_finalize_cipher_request(cryp->engine, cryp->req, err); 482 + cryp->req = NULL; 483 + 484 + memset(cryp->ctx->key, 0, cryp->ctx->keylen); 485 + 486 + mutex_unlock(&cryp->lock); 487 + } 488 + 489 + static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) 490 + { 491 + /* Enable interrupt and let the IRQ handler do everything */ 492 + stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT); 493 + 494 + return 0; 495 + } 496 + 497 + static int stm32_cryp_cra_init(struct crypto_tfm *tfm) 498 + { 499 + tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx); 500 + 501 + return 0; 502 + } 503 + 504 + static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode) 505 + { 506 + struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( 507 + crypto_ablkcipher_reqtfm(req)); 508 + struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req); 509 + struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx); 510 + 511 + if (!cryp) 512 + return -ENODEV; 513 + 514 + rctx->mode = mode; 515 + 516 + return crypto_transfer_cipher_request_to_engine(cryp->engine, req); 517 + } 518 + 519 + static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 520 + unsigned int keylen) 521 + { 522 + struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm); 523 + 524 + memcpy(ctx->key, key, keylen); 525 + ctx->keylen = keylen; 526 + 527 + return 0; 528 + } 529 + 530 + static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 531 + unsigned int keylen) 532 + { 533 + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && 534 + keylen != AES_KEYSIZE_256) 535 + return -EINVAL; 536 + else 537 + return stm32_cryp_setkey(tfm, key, keylen); 538 + } 539 + 540 + static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 541 + unsigned int keylen) 542 + { 543 + if (keylen != DES_KEY_SIZE) 544 + return -EINVAL; 545 + else 546 + return stm32_cryp_setkey(tfm, key, keylen); 547 + } 548 + 549 + static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, 550 + unsigned int keylen) 551 + { 552 + if (keylen != (3 * DES_KEY_SIZE)) 553 + return -EINVAL; 554 + else 555 + return stm32_cryp_setkey(tfm, key, keylen); 556 + } 557 + 558 + static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req) 559 + { 560 + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); 561 + } 562 + 563 + static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req) 564 + { 565 + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); 566 + } 567 + 568 + static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req) 569 + { 570 + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); 571 + } 572 + 573 + static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req) 574 + { 575 + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); 576 + } 577 + 578 + static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req) 579 + { 580 + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); 581 + } 582 + 583 + static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req) 584 + { 585 + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); 586 + } 587 + 588 + static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req) 589 + { 590 + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); 591 + } 592 + 593 + static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req) 594 + { 595 + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); 596 + } 597 + 598 + static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req) 599 + { 600 + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); 601 + } 602 + 603 + static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req) 604 + { 605 + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); 606 + } 607 + 608 + static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req) 609 + { 610 + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); 611 + } 612 + 613 + static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req) 614 + { 615 + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); 616 + } 617 + 618 + static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req) 619 + { 620 + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); 621 + } 622 + 623 + static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req) 624 + { 625 + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); 626 + } 627 + 628 + static int stm32_cryp_prepare_req(struct crypto_engine *engine, 629 + struct ablkcipher_request *req) 630 + { 631 + struct stm32_cryp_ctx *ctx; 632 + struct stm32_cryp *cryp; 633 + struct stm32_cryp_reqctx *rctx; 634 + int ret; 635 + 636 + if (!req) 637 + return -EINVAL; 638 + 639 + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 640 + 641 + cryp = ctx->cryp; 642 + 643 + if (!cryp) 644 + return -ENODEV; 645 + 646 + mutex_lock(&cryp->lock); 647 + 648 + rctx = ablkcipher_request_ctx(req); 649 + rctx->mode &= FLG_MODE_MASK; 650 + 651 + ctx->cryp = cryp; 652 + 653 + cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode; 654 + cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE; 655 + cryp->ctx = ctx; 656 + 657 + cryp->req = req; 658 + cryp->total_in = req->nbytes; 659 + cryp->total_out = cryp->total_in; 660 + 661 + cryp->total_in_save = cryp->total_in; 662 + cryp->total_out_save = cryp->total_out; 663 + 664 + cryp->in_sg = req->src; 665 + cryp->out_sg = req->dst; 666 + cryp->out_sg_save = cryp->out_sg; 667 + 668 + cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); 669 + if (cryp->in_sg_len < 0) { 670 + dev_err(cryp->dev, "Cannot get in_sg_len\n"); 671 + ret = cryp->in_sg_len; 672 + goto out; 673 + } 674 + 675 + cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); 676 + if (cryp->out_sg_len < 0) { 677 + dev_err(cryp->dev, "Cannot get out_sg_len\n"); 678 + ret = cryp->out_sg_len; 679 + goto out; 680 + } 681 + 682 + ret = stm32_cryp_copy_sgs(cryp); 683 + if (ret) 684 + goto out; 685 + 686 + scatterwalk_start(&cryp->in_walk, cryp->in_sg); 687 + scatterwalk_start(&cryp->out_walk, cryp->out_sg); 688 + 689 + ret = stm32_cryp_hw_init(cryp); 690 + out: 691 + if (ret) 692 + mutex_unlock(&cryp->lock); 693 + 694 + return ret; 695 + } 696 + 697 + static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine, 698 + struct ablkcipher_request *req) 699 + { 700 + return stm32_cryp_prepare_req(engine, req); 701 + } 702 + 703 + static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, 704 + struct ablkcipher_request *req) 705 + { 706 + struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx( 707 + crypto_ablkcipher_reqtfm(req)); 708 + struct stm32_cryp *cryp = ctx->cryp; 709 + 710 + if (!cryp) 711 + return -ENODEV; 712 + 713 + return stm32_cryp_cpu_start(cryp); 714 + } 715 + 716 + static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, 717 + unsigned int n) 718 + { 719 + scatterwalk_advance(&cryp->out_walk, n); 720 + 721 + if (unlikely(cryp->out_sg->length == _walked_out)) { 722 + cryp->out_sg = sg_next(cryp->out_sg); 723 + if (cryp->out_sg) { 724 + scatterwalk_start(&cryp->out_walk, cryp->out_sg); 725 + return (sg_virt(cryp->out_sg) + _walked_out); 726 + } 727 + } 728 + 729 + return (u32 *)((u8 *)dst + n); 730 + } 731 + 732 + static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src, 733 + unsigned int n) 734 + { 735 + scatterwalk_advance(&cryp->in_walk, n); 736 + 737 + if (unlikely(cryp->in_sg->length == _walked_in)) { 738 + cryp->in_sg = sg_next(cryp->in_sg); 739 + if (cryp->in_sg) { 740 + scatterwalk_start(&cryp->in_walk, cryp->in_sg); 741 + return (sg_virt(cryp->in_sg) + _walked_in); 742 + } 743 + } 744 + 745 + return (u32 *)((u8 *)src + n); 746 + } 747 + 748 + static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) 749 + { 750 + u32 cr; 751 + 752 + if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) { 753 + cryp->last_ctr[3] = 0; 754 + cryp->last_ctr[2]++; 755 + if (!cryp->last_ctr[2]) { 756 + cryp->last_ctr[1]++; 757 + if (!cryp->last_ctr[1]) 758 + cryp->last_ctr[0]++; 759 + } 760 + 761 + cr = stm32_cryp_read(cryp, CRYP_CR); 762 + stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); 763 + 764 + stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr); 765 + 766 + stm32_cryp_write(cryp, CRYP_CR, cr); 767 + } 768 + 769 + cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR); 770 + cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR); 771 + cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR); 772 + cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR); 773 + } 774 + 775 + static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp) 776 + { 777 + unsigned int i, j; 778 + u32 d32, *dst; 779 + u8 *d8; 780 + 781 + dst = sg_virt(cryp->out_sg) + _walked_out; 782 + 783 + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { 784 + if (likely(cryp->total_out >= sizeof(u32))) { 785 + /* Read a full u32 */ 786 + *dst = stm32_cryp_read(cryp, CRYP_DOUT); 787 + 788 + dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); 789 + cryp->total_out -= sizeof(u32); 790 + } else if (!cryp->total_out) { 791 + /* Empty fifo out (data from input padding) */ 792 + d32 = stm32_cryp_read(cryp, CRYP_DOUT); 793 + } else { 794 + /* Read less than an u32 */ 795 + d32 = stm32_cryp_read(cryp, CRYP_DOUT); 796 + d8 = (u8 *)&d32; 797 + 798 + for (j = 0; j < cryp->total_out; j++) { 799 + *((u8 *)dst) = *(d8++); 800 + dst = stm32_cryp_next_out(cryp, dst, 1); 801 + } 802 + cryp->total_out = 0; 803 + } 804 + } 805 + 806 + return !cryp->total_out || !cryp->total_in; 807 + } 808 + 809 + static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) 810 + { 811 + unsigned int i, j; 812 + u32 *src; 813 + u8 d8[4]; 814 + 815 + src = sg_virt(cryp->in_sg) + _walked_in; 816 + 817 + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { 818 + if (likely(cryp->total_in >= sizeof(u32))) { 819 + /* Write a full u32 */ 820 + stm32_cryp_write(cryp, CRYP_DIN, *src); 821 + 822 + src = stm32_cryp_next_in(cryp, src, sizeof(u32)); 823 + cryp->total_in -= sizeof(u32); 824 + } else if (!cryp->total_in) { 825 + /* Write padding data */ 826 + stm32_cryp_write(cryp, CRYP_DIN, 0); 827 + } else { 828 + /* Write less than an u32 */ 829 + memset(d8, 0, sizeof(u32)); 830 + for (j = 0; j < cryp->total_in; j++) { 831 + d8[j] = *((u8 *)src); 832 + src = stm32_cryp_next_in(cryp, src, 1); 833 + } 834 + 835 + stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); 836 + cryp->total_in = 0; 837 + } 838 + } 839 + } 840 + 841 + static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) 842 + { 843 + if (unlikely(!cryp->total_in)) { 844 + dev_warn(cryp->dev, "No more data to process\n"); 845 + return; 846 + } 847 + 848 + if (is_aes(cryp) && is_ctr(cryp)) 849 + stm32_cryp_check_ctr_counter(cryp); 850 + 851 + stm32_cryp_irq_write_block(cryp); 852 + } 853 + 854 + static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) 855 + { 856 + struct stm32_cryp *cryp = arg; 857 + 858 + if (cryp->irq_status & MISR_OUT) 859 + /* Output FIFO IRQ: read data */ 860 + if (unlikely(stm32_cryp_irq_read_data(cryp))) { 861 + /* All bytes processed, finish */ 862 + stm32_cryp_write(cryp, CRYP_IMSCR, 0); 863 + stm32_cryp_finish_req(cryp); 864 + return IRQ_HANDLED; 865 + } 866 + 867 + if (cryp->irq_status & MISR_IN) { 868 + /* Input FIFO IRQ: write data */ 869 + stm32_cryp_irq_write_data(cryp); 870 + } 871 + 872 + return IRQ_HANDLED; 873 + } 874 + 875 + static irqreturn_t stm32_cryp_irq(int irq, void *arg) 876 + { 877 + struct stm32_cryp *cryp = arg; 878 + 879 + cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR); 880 + 881 + return IRQ_WAKE_THREAD; 882 + } 883 + 884 + static struct crypto_alg crypto_algs[] = { 885 + { 886 + .cra_name = "ecb(aes)", 887 + .cra_driver_name = "stm32-ecb-aes", 888 + .cra_priority = 200, 889 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 890 + CRYPTO_ALG_ASYNC, 891 + .cra_blocksize = AES_BLOCK_SIZE, 892 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 893 + .cra_alignmask = 0xf, 894 + .cra_type = &crypto_ablkcipher_type, 895 + .cra_module = THIS_MODULE, 896 + .cra_init = stm32_cryp_cra_init, 897 + .cra_ablkcipher = { 898 + .min_keysize = AES_MIN_KEY_SIZE, 899 + .max_keysize = AES_MAX_KEY_SIZE, 900 + .setkey = stm32_cryp_aes_setkey, 901 + .encrypt = stm32_cryp_aes_ecb_encrypt, 902 + .decrypt = stm32_cryp_aes_ecb_decrypt, 903 + } 904 + }, 905 + { 906 + .cra_name = "cbc(aes)", 907 + .cra_driver_name = "stm32-cbc-aes", 908 + .cra_priority = 200, 909 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 910 + CRYPTO_ALG_ASYNC, 911 + .cra_blocksize = AES_BLOCK_SIZE, 912 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 913 + .cra_alignmask = 0xf, 914 + .cra_type = &crypto_ablkcipher_type, 915 + .cra_module = THIS_MODULE, 916 + .cra_init = stm32_cryp_cra_init, 917 + .cra_ablkcipher = { 918 + .min_keysize = AES_MIN_KEY_SIZE, 919 + .max_keysize = AES_MAX_KEY_SIZE, 920 + .ivsize = AES_BLOCK_SIZE, 921 + .setkey = stm32_cryp_aes_setkey, 922 + .encrypt = stm32_cryp_aes_cbc_encrypt, 923 + .decrypt = stm32_cryp_aes_cbc_decrypt, 924 + } 925 + }, 926 + { 927 + .cra_name = "ctr(aes)", 928 + .cra_driver_name = "stm32-ctr-aes", 929 + .cra_priority = 200, 930 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 931 + CRYPTO_ALG_ASYNC, 932 + .cra_blocksize = 1, 933 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 934 + .cra_alignmask = 0xf, 935 + .cra_type = &crypto_ablkcipher_type, 936 + .cra_module = THIS_MODULE, 937 + .cra_init = stm32_cryp_cra_init, 938 + .cra_ablkcipher = { 939 + .min_keysize = AES_MIN_KEY_SIZE, 940 + .max_keysize = AES_MAX_KEY_SIZE, 941 + .ivsize = AES_BLOCK_SIZE, 942 + .setkey = stm32_cryp_aes_setkey, 943 + .encrypt = stm32_cryp_aes_ctr_encrypt, 944 + .decrypt = stm32_cryp_aes_ctr_decrypt, 945 + } 946 + }, 947 + { 948 + .cra_name = "ecb(des)", 949 + .cra_driver_name = "stm32-ecb-des", 950 + .cra_priority = 200, 951 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 952 + CRYPTO_ALG_ASYNC, 953 + .cra_blocksize = DES_BLOCK_SIZE, 954 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 955 + .cra_alignmask = 0xf, 956 + .cra_type = &crypto_ablkcipher_type, 957 + .cra_module = THIS_MODULE, 958 + .cra_init = stm32_cryp_cra_init, 959 + .cra_ablkcipher = { 960 + .min_keysize = DES_BLOCK_SIZE, 961 + .max_keysize = DES_BLOCK_SIZE, 962 + .setkey = stm32_cryp_des_setkey, 963 + .encrypt = stm32_cryp_des_ecb_encrypt, 964 + .decrypt = stm32_cryp_des_ecb_decrypt, 965 + } 966 + }, 967 + { 968 + .cra_name = "cbc(des)", 969 + .cra_driver_name = "stm32-cbc-des", 970 + .cra_priority = 200, 971 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 972 + CRYPTO_ALG_ASYNC, 973 + .cra_blocksize = DES_BLOCK_SIZE, 974 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 975 + .cra_alignmask = 0xf, 976 + .cra_type = &crypto_ablkcipher_type, 977 + .cra_module = THIS_MODULE, 978 + .cra_init = stm32_cryp_cra_init, 979 + .cra_ablkcipher = { 980 + .min_keysize = DES_BLOCK_SIZE, 981 + .max_keysize = DES_BLOCK_SIZE, 982 + .ivsize = DES_BLOCK_SIZE, 983 + .setkey = stm32_cryp_des_setkey, 984 + .encrypt = stm32_cryp_des_cbc_encrypt, 985 + .decrypt = stm32_cryp_des_cbc_decrypt, 986 + } 987 + }, 988 + { 989 + .cra_name = "ecb(des3_ede)", 990 + .cra_driver_name = "stm32-ecb-des3", 991 + .cra_priority = 200, 992 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 993 + CRYPTO_ALG_ASYNC, 994 + .cra_blocksize = DES_BLOCK_SIZE, 995 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 996 + .cra_alignmask = 0xf, 997 + .cra_type = &crypto_ablkcipher_type, 998 + .cra_module = THIS_MODULE, 999 + .cra_init = stm32_cryp_cra_init, 1000 + .cra_ablkcipher = { 1001 + .min_keysize = 3 * DES_BLOCK_SIZE, 1002 + .max_keysize = 3 * DES_BLOCK_SIZE, 1003 + .setkey = stm32_cryp_tdes_setkey, 1004 + .encrypt = stm32_cryp_tdes_ecb_encrypt, 1005 + .decrypt = stm32_cryp_tdes_ecb_decrypt, 1006 + } 1007 + }, 1008 + { 1009 + .cra_name = "cbc(des3_ede)", 1010 + .cra_driver_name = "stm32-cbc-des3", 1011 + .cra_priority = 200, 1012 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1013 + CRYPTO_ALG_ASYNC, 1014 + .cra_blocksize = DES_BLOCK_SIZE, 1015 + .cra_ctxsize = sizeof(struct stm32_cryp_ctx), 1016 + .cra_alignmask = 0xf, 1017 + .cra_type = &crypto_ablkcipher_type, 1018 + .cra_module = THIS_MODULE, 1019 + .cra_init = stm32_cryp_cra_init, 1020 + .cra_ablkcipher = { 1021 + .min_keysize = 3 * DES_BLOCK_SIZE, 1022 + .max_keysize = 3 * DES_BLOCK_SIZE, 1023 + .ivsize = DES_BLOCK_SIZE, 1024 + .setkey = stm32_cryp_tdes_setkey, 1025 + .encrypt = stm32_cryp_tdes_cbc_encrypt, 1026 + .decrypt = stm32_cryp_tdes_cbc_decrypt, 1027 + } 1028 + }, 1029 + }; 1030 + 1031 + static const struct of_device_id stm32_dt_ids[] = { 1032 + { .compatible = "st,stm32f756-cryp", }, 1033 + {}, 1034 + }; 1035 + MODULE_DEVICE_TABLE(of, stm32_dt_ids); 1036 + 1037 + static int stm32_cryp_probe(struct platform_device *pdev) 1038 + { 1039 + struct device *dev = &pdev->dev; 1040 + struct stm32_cryp *cryp; 1041 + struct resource *res; 1042 + struct reset_control *rst; 1043 + int irq, ret; 1044 + 1045 + cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL); 1046 + if (!cryp) 1047 + return -ENOMEM; 1048 + 1049 + cryp->dev = dev; 1050 + 1051 + mutex_init(&cryp->lock); 1052 + 1053 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1054 + cryp->regs = devm_ioremap_resource(dev, res); 1055 + if (IS_ERR(cryp->regs)) 1056 + return PTR_ERR(cryp->regs); 1057 + 1058 + irq = platform_get_irq(pdev, 0); 1059 + if (irq < 0) { 1060 + dev_err(dev, "Cannot get IRQ resource\n"); 1061 + return irq; 1062 + } 1063 + 1064 + ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, 1065 + stm32_cryp_irq_thread, IRQF_ONESHOT, 1066 + dev_name(dev), cryp); 1067 + if (ret) { 1068 + dev_err(dev, "Cannot grab IRQ\n"); 1069 + return ret; 1070 + } 1071 + 1072 + cryp->clk = devm_clk_get(dev, NULL); 1073 + if (IS_ERR(cryp->clk)) { 1074 + dev_err(dev, "Could not get clock\n"); 1075 + return PTR_ERR(cryp->clk); 1076 + } 1077 + 1078 + ret = clk_prepare_enable(cryp->clk); 1079 + if (ret) { 1080 + dev_err(cryp->dev, "Failed to enable clock\n"); 1081 + return ret; 1082 + } 1083 + 1084 + rst = devm_reset_control_get(dev, NULL); 1085 + if (!IS_ERR(rst)) { 1086 + reset_control_assert(rst); 1087 + udelay(2); 1088 + reset_control_deassert(rst); 1089 + } 1090 + 1091 + platform_set_drvdata(pdev, cryp); 1092 + 1093 + spin_lock(&cryp_list.lock); 1094 + list_add(&cryp->list, &cryp_list.dev_list); 1095 + spin_unlock(&cryp_list.lock); 1096 + 1097 + /* Initialize crypto engine */ 1098 + cryp->engine = crypto_engine_alloc_init(dev, 1); 1099 + if (!cryp->engine) { 1100 + dev_err(dev, "Could not init crypto engine\n"); 1101 + ret = -ENOMEM; 1102 + goto err_engine1; 1103 + } 1104 + 1105 + cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req; 1106 + cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req; 1107 + 1108 + ret = crypto_engine_start(cryp->engine); 1109 + if (ret) { 1110 + dev_err(dev, "Could not start crypto engine\n"); 1111 + goto err_engine2; 1112 + } 1113 + 1114 + ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); 1115 + if (ret) { 1116 + dev_err(dev, "Could not register algs\n"); 1117 + goto err_algs; 1118 + } 1119 + 1120 + dev_info(dev, "Initialized\n"); 1121 + 1122 + return 0; 1123 + 1124 + err_algs: 1125 + err_engine2: 1126 + crypto_engine_exit(cryp->engine); 1127 + err_engine1: 1128 + spin_lock(&cryp_list.lock); 1129 + list_del(&cryp->list); 1130 + spin_unlock(&cryp_list.lock); 1131 + 1132 + clk_disable_unprepare(cryp->clk); 1133 + 1134 + return ret; 1135 + } 1136 + 1137 + static int stm32_cryp_remove(struct platform_device *pdev) 1138 + { 1139 + struct stm32_cryp *cryp = platform_get_drvdata(pdev); 1140 + 1141 + if (!cryp) 1142 + return -ENODEV; 1143 + 1144 + crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs)); 1145 + 1146 + crypto_engine_exit(cryp->engine); 1147 + 1148 + spin_lock(&cryp_list.lock); 1149 + list_del(&cryp->list); 1150 + spin_unlock(&cryp_list.lock); 1151 + 1152 + clk_disable_unprepare(cryp->clk); 1153 + 1154 + return 0; 1155 + } 1156 + 1157 + static struct platform_driver stm32_cryp_driver = { 1158 + .probe = stm32_cryp_probe, 1159 + .remove = stm32_cryp_remove, 1160 + .driver = { 1161 + .name = DRIVER_NAME, 1162 + .of_match_table = stm32_dt_ids, 1163 + }, 1164 + }; 1165 + 1166 + module_platform_driver(stm32_cryp_driver); 1167 + 1168 + MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); 1169 + MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver"); 1170 + MODULE_LICENSE("GPL");
+2
drivers/crypto/stm32/stm32_crc32.c
··· 208 208 .cra_name = "crc32", 209 209 .cra_driver_name = DRIVER_NAME, 210 210 .cra_priority = 200, 211 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 211 212 .cra_blocksize = CHKSUM_BLOCK_SIZE, 212 213 .cra_alignmask = 3, 213 214 .cra_ctxsize = sizeof(struct stm32_crc_ctx), ··· 230 229 .cra_name = "crc32c", 231 230 .cra_driver_name = DRIVER_NAME, 232 231 .cra_priority = 200, 232 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 233 233 .cra_blocksize = CHKSUM_BLOCK_SIZE, 234 234 .cra_alignmask = 3, 235 235 .cra_ctxsize = sizeof(struct stm32_crc_ctx),
+23
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 58 58 extern struct list_head adapter_list; 59 59 extern struct mutex uld_mutex; 60 60 61 + /* Suspend an Ethernet Tx queue with fewer available descriptors than this. 62 + * This is the same as calc_tx_descs() for a TSO packet with 63 + * nr_frags == MAX_SKB_FRAGS. 64 + */ 65 + #define ETHTXQ_STOP_THRES \ 66 + (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) 67 + 61 68 enum { 62 69 MAX_NPORTS = 4, /* max # of ports */ 63 70 SERNUM_LEN = 24, /* Serial # length */ ··· 570 563 571 564 enum { 572 565 ULP_CRYPTO_LOOKASIDE = 1 << 0, 566 + ULP_CRYPTO_IPSEC_INLINE = 1 << 1, 573 567 }; 574 568 575 569 struct rx_sw_desc; ··· 973 965 974 966 enum { 975 967 SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */ 968 + }; 969 + 970 + struct tx_sw_desc { /* SW state per Tx descriptor */ 971 + struct sk_buff *skb; 972 + struct ulptx_sgl *sgl; 976 973 }; 977 974 978 975 /* Support for "sched_queue" command to allow one or more NIC TX Queues ··· 1712 1699 void free_tx_desc(struct adapter *adap, struct sge_txq *q, 1713 1700 unsigned int n, bool unmap); 1714 1701 void free_txq(struct adapter *adap, struct sge_txq *q); 1702 + void cxgb4_reclaim_completed_tx(struct adapter *adap, 1703 + struct sge_txq *q, bool unmap); 1704 + int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 1705 + dma_addr_t *addr); 1706 + void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, 1707 + void *pos); 1708 + void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 1709 + struct ulptx_sgl *sgl, u64 *end, unsigned int start, 1710 + const dma_addr_t *addr); 1711 + void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); 1715 1712 #endif /* __CXGB4_H__ */
+2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 3096 3096 atomic_read(&adap->chcr_stats.error)); 3097 3097 seq_printf(seq, "Fallback: %10u \n", 3098 3098 atomic_read(&adap->chcr_stats.fallback)); 3099 + seq_printf(seq, "IPSec PDU: %10u\n", 3100 + atomic_read(&adap->chcr_stats.ipsec_cnt)); 3099 3101 return 0; 3100 3102 } 3101 3103
+1 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 4096 4096 } else { 4097 4097 adap->vres.ncrypto_fc = val[0]; 4098 4098 } 4099 - adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; 4099 + adap->params.crypto = ntohs(caps_cmd.cryptocaps); 4100 4100 adap->num_uld += 1; 4101 4101 } 4102 4102 #undef FW_PARAM_PFVF
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
··· 637 637 lld->nchan = adap->params.nports; 638 638 lld->nports = adap->params.nports; 639 639 lld->wr_cred = adap->params.ofldq_wr_cred; 640 + lld->crypto = adap->params.crypto; 640 641 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 641 642 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); 642 643 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
··· 297 297 atomic_t complete; 298 298 atomic_t error; 299 299 atomic_t fallback; 300 + atomic_t ipsec_cnt; 300 301 }; 301 302 302 303 #define OCQ_WIN_OFFSET(pdev, vres) \ ··· 323 322 unsigned char wr_cred; /* WR 16-byte credits */ 324 323 unsigned char adapter_type; /* type of adapter */ 325 324 unsigned char fw_api_ver; /* FW API version */ 325 + unsigned char crypto; /* crypto support */ 326 326 unsigned int fw_vers; /* FW version */ 327 327 unsigned int iscsi_iolen; /* iSCSI max I/O length */ 328 328 unsigned int cclk_ps; /* Core clock period in psec */ ··· 372 370 struct t4_lro_mgr *lro_mgr, 373 371 struct napi_struct *napi); 374 372 void (*lro_flush)(struct t4_lro_mgr *); 373 + int (*tx_handler)(struct sk_buff *skb, struct net_device *dev); 375 374 }; 376 375 377 376 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+51 -51
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 41 41 #include <linux/jiffies.h> 42 42 #include <linux/prefetch.h> 43 43 #include <linux/export.h> 44 + #include <net/xfrm.h> 44 45 #include <net/ipv6.h> 45 46 #include <net/tcp.h> 46 47 #include <net/busy_poll.h> ··· 54 53 #include "t4_msg.h" 55 54 #include "t4fw_api.h" 56 55 #include "cxgb4_ptp.h" 56 + #include "cxgb4_uld.h" 57 57 58 58 /* 59 59 * Rx buffer size. We use largish buffers if possible but settle for single ··· 112 110 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) 113 111 114 112 /* 115 - * Suspend an Ethernet Tx queue with fewer available descriptors than this. 116 - * This is the same as calc_tx_descs() for a TSO packet with 117 - * nr_frags == MAX_SKB_FRAGS. 118 - */ 119 - #define ETHTXQ_STOP_THRES \ 120 - (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) 121 - 122 - /* 123 113 * Suspension threshold for non-Ethernet Tx queues. We require enough room 124 114 * for a full sized WR. 125 115 */ ··· 127 133 * Max size of a WR sent through a control Tx queue. 128 134 */ 129 135 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 130 - 131 - struct tx_sw_desc { /* SW state per Tx descriptor */ 132 - struct sk_buff *skb; 133 - struct ulptx_sgl *sgl; 134 - }; 135 136 136 137 struct rx_sw_desc { /* SW state per Rx descriptor */ 137 138 struct page *page; ··· 237 248 return fl->avail - fl->pend_cred <= s->fl_starve_thres; 238 249 } 239 250 240 - static int map_skb(struct device *dev, const struct sk_buff *skb, 241 - dma_addr_t *addr) 251 + int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb, 252 + dma_addr_t *addr) 242 253 { 243 254 const skb_frag_t *fp, *end; 244 255 const struct skb_shared_info *si; ··· 266 277 out_err: 267 278 return -ENOMEM; 268 279 } 280 + EXPORT_SYMBOL(cxgb4_map_skb); 269 281 270 282 #ifdef CONFIG_NEED_DMA_MAP_STATE 271 283 static void unmap_skb(struct device *dev, const struct sk_buff *skb, ··· 401 411 } 402 412 403 413 /** 404 - * reclaim_completed_tx - reclaims completed Tx descriptors 414 + * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors 405 415 * @adap: the adapter 406 416 * @q: the Tx queue to reclaim completed descriptors from 407 417 * @unmap: whether the buffers should be unmapped for DMA ··· 410 420 * and frees the associated buffers if possible. Called with the Tx 411 421 * queue locked. 412 422 */ 413 - static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 423 + inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, 414 424 bool unmap) 415 425 { 416 426 int avail = reclaimable(q); ··· 427 437 q->in_use -= avail; 428 438 } 429 439 } 440 + EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); 430 441 431 442 static inline int get_buf_size(struct adapter *adapter, 432 443 const struct rx_sw_desc *d) ··· 824 833 } 825 834 826 835 /** 827 - * write_sgl - populate a scatter/gather list for a packet 836 + * cxgb4_write_sgl - populate a scatter/gather list for a packet 828 837 * @skb: the packet 829 838 * @q: the Tx queue we are writing into 830 839 * @sgl: starting location for writing the SGL ··· 840 849 * right after the end of the SGL but does not account for any potential 841 850 * wrap around, i.e., @end > @sgl. 842 851 */ 843 - static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, 844 - struct ulptx_sgl *sgl, u64 *end, unsigned int start, 845 - const dma_addr_t *addr) 852 + void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, 853 + struct ulptx_sgl *sgl, u64 *end, unsigned int start, 854 + const dma_addr_t *addr) 846 855 { 847 856 unsigned int i, len; 848 857 struct ulptx_sge_pair *to; ··· 894 903 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ 895 904 *end = 0; 896 905 } 906 + EXPORT_SYMBOL(cxgb4_write_sgl); 897 907 898 908 /* This function copies 64 byte coalesced work request to 899 909 * memory mapped BAR2 space. For coalesced WR SGE fetches ··· 913 921 } 914 922 915 923 /** 916 - * ring_tx_db - check and potentially ring a Tx queue's doorbell 924 + * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell 917 925 * @adap: the adapter 918 926 * @q: the Tx queue 919 927 * @n: number of new descriptors to give to HW 920 928 * 921 929 * Ring the doorbel for a Tx queue. 922 930 */ 923 - static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 931 + inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 924 932 { 925 933 /* Make sure that all writes to the TX Descriptors are committed 926 934 * before we tell the hardware about them. ··· 987 995 wmb(); 988 996 } 989 997 } 998 + EXPORT_SYMBOL(cxgb4_ring_tx_db); 990 999 991 1000 /** 992 - * inline_tx_skb - inline a packet's data into Tx descriptors 1001 + * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors 993 1002 * @skb: the packet 994 1003 * @q: the Tx queue where the packet will be inlined 995 1004 * @pos: starting position in the Tx queue where to inline the packet ··· 1000 1007 * Most of the complexity of this operation is dealing with wrap arounds 1001 1008 * in the middle of the packet we want to inline. 1002 1009 */ 1003 - static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, 1004 - void *pos) 1010 + void cxgb4_inline_tx_skb(const struct sk_buff *skb, 1011 + const struct sge_txq *q, void *pos) 1005 1012 { 1006 1013 u64 *p; 1007 1014 int left = (void *)q->stat - pos; ··· 1023 1030 if ((uintptr_t)p & 8) 1024 1031 *p = 0; 1025 1032 } 1033 + EXPORT_SYMBOL(cxgb4_inline_tx_skb); 1026 1034 1027 1035 static void *inline_tx_skb_header(const struct sk_buff *skb, 1028 1036 const struct sge_txq *q, void *pos, ··· 1193 1199 1194 1200 pi = netdev_priv(dev); 1195 1201 adap = pi->adapter; 1202 + ssi = skb_shinfo(skb); 1203 + #ifdef CONFIG_CHELSIO_IPSEC_INLINE 1204 + if (xfrm_offload(skb) && !ssi->gso_size) 1205 + return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev); 1206 + #endif /* CHELSIO_IPSEC_INLINE */ 1207 + 1196 1208 qidx = skb_get_queue_mapping(skb); 1197 1209 if (ptp_enabled) { 1198 1210 spin_lock(&adap->ptp_lock); ··· 1215 1215 } 1216 1216 skb_tx_timestamp(skb); 1217 1217 1218 - reclaim_completed_tx(adap, &q->q, true); 1218 + cxgb4_reclaim_completed_tx(adap, &q->q, true); 1219 1219 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 1220 1220 1221 1221 #ifdef CONFIG_CHELSIO_T4_FCOE ··· 1245 1245 immediate = true; 1246 1246 1247 1247 if (!immediate && 1248 - unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { 1248 + unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { 1249 1249 q->mapping_err++; 1250 1250 if (ptp_enabled) 1251 1251 spin_unlock(&adap->ptp_lock); ··· 1264 1264 end = (u64 *)wr + flits; 1265 1265 1266 1266 len = immediate ? skb->len : 0; 1267 - ssi = skb_shinfo(skb); 1268 1267 if (ssi->gso_size) { 1269 1268 struct cpl_tx_pkt_lso *lso = (void *)wr; 1270 1269 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; ··· 1340 1341 cpl->ctrl1 = cpu_to_be64(cntrl); 1341 1342 1342 1343 if (immediate) { 1343 - inline_tx_skb(skb, &q->q, cpl + 1); 1344 + cxgb4_inline_tx_skb(skb, &q->q, cpl + 1); 1344 1345 dev_consume_skb_any(skb); 1345 1346 } else { 1346 1347 int last_desc; 1347 1348 1348 - write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, 1349 - addr); 1349 + cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), 1350 + end, 0, addr); 1350 1351 skb_orphan(skb); 1351 1352 1352 1353 last_desc = q->q.pidx + ndesc - 1; ··· 1358 1359 1359 1360 txq_advance(&q->q, ndesc); 1360 1361 1361 - ring_tx_db(adap, &q->q, ndesc); 1362 + cxgb4_ring_tx_db(adap, &q->q, ndesc); 1362 1363 if (ptp_enabled) 1363 1364 spin_unlock(&adap->ptp_lock); 1364 1365 return NETDEV_TX_OK; ··· 1368 1369 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs 1369 1370 * @q: the SGE control Tx queue 1370 1371 * 1371 - * This is a variant of reclaim_completed_tx() that is used for Tx queues 1372 - * that send only immediate data (presently just the control queues) and 1373 - * thus do not have any sk_buffs to release. 1372 + * This is a variant of cxgb4_reclaim_completed_tx() that is used 1373 + * for Tx queues that send only immediate data (presently just 1374 + * the control queues) and thus do not have any sk_buffs to release. 1374 1375 */ 1375 1376 static inline void reclaim_completed_tx_imm(struct sge_txq *q) 1376 1377 { ··· 1445 1446 } 1446 1447 1447 1448 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; 1448 - inline_tx_skb(skb, &q->q, wr); 1449 + cxgb4_inline_tx_skb(skb, &q->q, wr); 1449 1450 1450 1451 txq_advance(&q->q, ndesc); 1451 1452 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) 1452 1453 ctrlq_check_stop(q, wr); 1453 1454 1454 - ring_tx_db(q->adap, &q->q, ndesc); 1455 + cxgb4_ring_tx_db(q->adap, &q->q, ndesc); 1455 1456 spin_unlock(&q->sendq.lock); 1456 1457 1457 1458 kfree_skb(skb); ··· 1486 1487 txq_advance(&q->q, ndesc); 1487 1488 spin_unlock(&q->sendq.lock); 1488 1489 1489 - inline_tx_skb(skb, &q->q, wr); 1490 + cxgb4_inline_tx_skb(skb, &q->q, wr); 1490 1491 kfree_skb(skb); 1491 1492 1492 1493 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { ··· 1499 1500 } 1500 1501 } 1501 1502 if (written > 16) { 1502 - ring_tx_db(q->adap, &q->q, written); 1503 + cxgb4_ring_tx_db(q->adap, &q->q, written); 1503 1504 written = 0; 1504 1505 } 1505 1506 spin_lock(&q->sendq.lock); 1506 1507 } 1507 1508 q->full = 0; 1508 - ringdb: if (written) 1509 - ring_tx_db(q->adap, &q->q, written); 1509 + ringdb: 1510 + if (written) 1511 + cxgb4_ring_tx_db(q->adap, &q->q, written); 1510 1512 spin_unlock(&q->sendq.lock); 1511 1513 } 1512 1514 ··· 1650 1650 */ 1651 1651 spin_unlock(&q->sendq.lock); 1652 1652 1653 - reclaim_completed_tx(q->adap, &q->q, false); 1653 + cxgb4_reclaim_completed_tx(q->adap, &q->q, false); 1654 1654 1655 1655 flits = skb->priority; /* previously saved */ 1656 1656 ndesc = flits_to_desc(flits); ··· 1661 1661 1662 1662 pos = (u64 *)&q->q.desc[q->q.pidx]; 1663 1663 if (is_ofld_imm(skb)) 1664 - inline_tx_skb(skb, &q->q, pos); 1665 - else if (map_skb(q->adap->pdev_dev, skb, 1666 - (dma_addr_t *)skb->head)) { 1664 + cxgb4_inline_tx_skb(skb, &q->q, pos); 1665 + else if (cxgb4_map_skb(q->adap->pdev_dev, skb, 1666 + (dma_addr_t *)skb->head)) { 1667 1667 txq_stop_maperr(q); 1668 1668 spin_lock(&q->sendq.lock); 1669 1669 break; ··· 1694 1694 pos = (void *)txq->desc; 1695 1695 } 1696 1696 1697 - write_sgl(skb, &q->q, (void *)pos, 1698 - end, hdr_len, 1699 - (dma_addr_t *)skb->head); 1697 + cxgb4_write_sgl(skb, &q->q, (void *)pos, 1698 + end, hdr_len, 1699 + (dma_addr_t *)skb->head); 1700 1700 #ifdef CONFIG_NEED_DMA_MAP_STATE 1701 1701 skb->dev = q->adap->port[0]; 1702 1702 skb->destructor = deferred_unmap_destructor; ··· 1710 1710 txq_advance(&q->q, ndesc); 1711 1711 written += ndesc; 1712 1712 if (unlikely(written > 32)) { 1713 - ring_tx_db(q->adap, &q->q, written); 1713 + cxgb4_ring_tx_db(q->adap, &q->q, written); 1714 1714 written = 0; 1715 1715 } 1716 1716 ··· 1725 1725 kfree_skb(skb); 1726 1726 } 1727 1727 if (likely(written)) 1728 - ring_tx_db(q->adap, &q->q, written); 1728 + cxgb4_ring_tx_db(q->adap, &q->q, written); 1729 1729 1730 1730 /*Indicate that no thread is processing the Pending Send Queue 1731 1731 * currently.
+7
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
··· 513 513 u64 cookie; 514 514 }; 515 515 516 + #define FW_ULPTX_WR_DATA_S 28 517 + #define FW_ULPTX_WR_DATA_M 0x1 518 + #define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S) 519 + #define FW_ULPTX_WR_DATA_G(x) \ 520 + (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M) 521 + #define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U) 522 + 516 523 struct fw_tp_wr { 517 524 __be32 op_to_immdlen; 518 525 __be32 flowid_len16;
+1
drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
··· 120 120 .cra_name = "adler32", 121 121 .cra_driver_name = "adler32-zlib", 122 122 .cra_priority = 100, 123 + .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 123 124 .cra_blocksize = CHKSUM_BLOCK_SIZE, 124 125 .cra_ctxsize = sizeof(u32), 125 126 .cra_module = THIS_MODULE,
+9 -1
include/crypto/aead.h
··· 327 327 */ 328 328 static inline int crypto_aead_encrypt(struct aead_request *req) 329 329 { 330 - return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); 330 + struct crypto_aead *aead = crypto_aead_reqtfm(req); 331 + 332 + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 333 + return -ENOKEY; 334 + 335 + return crypto_aead_alg(aead)->encrypt(req); 331 336 } 332 337 333 338 /** ··· 360 355 static inline int crypto_aead_decrypt(struct aead_request *req) 361 356 { 362 357 struct crypto_aead *aead = crypto_aead_reqtfm(req); 358 + 359 + if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY) 360 + return -ENOKEY; 363 361 364 362 if (req->cryptlen < crypto_aead_authsize(aead)) 365 363 return -EINVAL;
+2 -1
include/crypto/chacha20.h
··· 13 13 #define CHACHA20_IV_SIZE 16 14 14 #define CHACHA20_KEY_SIZE 32 15 15 #define CHACHA20_BLOCK_SIZE 64 16 + #define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32)) 16 17 17 18 struct chacha20_ctx { 18 19 u32 key[8]; 19 20 }; 20 21 21 - void chacha20_block(u32 *state, void *stream); 22 + void chacha20_block(u32 *state, u32 *stream); 22 23 void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); 23 24 int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, 24 25 unsigned int keysize);
+28 -18
include/crypto/hash.h
··· 71 71 72 72 /** 73 73 * struct ahash_alg - asynchronous message digest definition 74 - * @init: Initialize the transformation context. Intended only to initialize the 74 + * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the 75 75 * state of the HASH transformation at the beginning. This shall fill in 76 76 * the internal structures used during the entire duration of the whole 77 77 * transformation. No data processing happens at this point. 78 - * Note: mandatory. 79 - * @update: Push a chunk of data into the driver for transformation. This 78 + * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This 80 79 * function actually pushes blocks of data from upper layers into the 81 80 * driver, which then passes those to the hardware as seen fit. This 82 81 * function must not finalize the HASH transformation by calculating the ··· 84 85 * context, as this function may be called in parallel with the same 85 86 * transformation object. Data processing can happen synchronously 86 87 * [SHASH] or asynchronously [AHASH] at this point. 87 - * Note: mandatory. 88 - * @final: Retrieve result from the driver. This function finalizes the 88 + * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the 89 89 * transformation and retrieves the resulting hash from the driver and 90 90 * pushes it back to upper layers. No data processing happens at this 91 91 * point unless hardware requires it to finish the transformation 92 92 * (then the data buffered by the device driver is processed). 93 - * Note: mandatory. 94 - * @finup: Combination of @update and @final. This function is effectively a 93 + * @finup: **[optional]** Combination of @update and @final. This function is effectively a 95 94 * combination of @update and @final calls issued in sequence. As some 96 95 * hardware cannot do @update and @final separately, this callback was 97 96 * added to allow such hardware to be used at least by IPsec. Data 98 97 * processing can happen synchronously [SHASH] or asynchronously [AHASH] 99 98 * at this point. 100 - * Note: optional. 101 99 * @digest: Combination of @init and @update and @final. This function 102 100 * effectively behaves as the entire chain of operations, @init, 103 101 * @update and @final issued in sequence. Just like @finup, this was ··· 206 210 unsigned int keylen); 207 211 208 212 unsigned int reqsize; 209 - bool has_setkey; 210 213 struct crypto_tfm base; 211 214 }; 212 215 ··· 405 410 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 406 411 unsigned int keylen); 407 412 408 - static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm) 409 - { 410 - return tfm->has_setkey; 411 - } 412 - 413 413 /** 414 414 * crypto_ahash_finup() - update and finalize message digest 415 415 * @req: reference to the ahash_request handle that holds all information ··· 477 487 */ 478 488 static inline int crypto_ahash_import(struct ahash_request *req, const void *in) 479 489 { 480 - return crypto_ahash_reqtfm(req)->import(req, in); 490 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 491 + 492 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 493 + return -ENOKEY; 494 + 495 + return tfm->import(req, in); 481 496 } 482 497 483 498 /** ··· 498 503 */ 499 504 static inline int crypto_ahash_init(struct ahash_request *req) 500 505 { 501 - return crypto_ahash_reqtfm(req)->init(req); 506 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 507 + 508 + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 509 + return -ENOKEY; 510 + 511 + return tfm->init(req); 502 512 } 503 513 504 514 /** ··· 855 855 */ 856 856 static inline int crypto_shash_import(struct shash_desc *desc, const void *in) 857 857 { 858 - return crypto_shash_alg(desc->tfm)->import(desc, in); 858 + struct crypto_shash *tfm = desc->tfm; 859 + 860 + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 861 + return -ENOKEY; 862 + 863 + return crypto_shash_alg(tfm)->import(desc, in); 859 864 } 860 865 861 866 /** ··· 876 871 */ 877 872 static inline int crypto_shash_init(struct shash_desc *desc) 878 873 { 879 - return crypto_shash_alg(desc->tfm)->init(desc); 874 + struct crypto_shash *tfm = desc->tfm; 875 + 876 + if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 877 + return -ENOKEY; 878 + 879 + return crypto_shash_alg(tfm)->init(desc); 880 880 } 881 881 882 882 /**
+2
include/crypto/internal/hash.h
··· 90 90 return alg->setkey != shash_no_setkey; 91 91 } 92 92 93 + bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg); 94 + 93 95 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 94 96 struct hash_alg_common *alg, 95 97 struct crypto_instance *inst);
-11
include/crypto/internal/scompress.h
··· 28 28 * @free_ctx: Function frees context allocated with alloc_ctx 29 29 * @compress: Function performs a compress operation 30 30 * @decompress: Function performs a de-compress operation 31 - * @init: Initialize the cryptographic transformation object. 32 - * This function is used to initialize the cryptographic 33 - * transformation object. This function is called only once at 34 - * the instantiation time, right after the transformation context 35 - * was allocated. In case the cryptographic hardware has some 36 - * special requirements which need to be handled by software, this 37 - * function shall check for the precise requirement of the 38 - * transformation and put any software fallbacks in place. 39 - * @exit: Deinitialize the cryptographic transformation object. This is a 40 - * counterpart to @init, used to remove various changes set in 41 - * @init. 42 31 * @base: Common crypto API algorithm data structure 43 32 */ 44 33 struct scomp_alg {
-10
include/crypto/null.h
··· 12 12 struct crypto_skcipher *crypto_get_default_null_skcipher(void); 13 13 void crypto_put_default_null_skcipher(void); 14 14 15 - static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void) 16 - { 17 - return crypto_get_default_null_skcipher(); 18 - } 19 - 20 - static inline void crypto_put_default_null_skcipher2(void) 21 - { 22 - crypto_put_default_null_skcipher(); 23 - } 24 - 25 15 #endif
-2
include/crypto/poly1305.h
··· 31 31 }; 32 32 33 33 int crypto_poly1305_init(struct shash_desc *desc); 34 - int crypto_poly1305_setkey(struct crypto_shash *tfm, 35 - const u8 *key, unsigned int keylen); 36 34 unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, 37 35 const u8 *src, unsigned int srclen); 38 36 int crypto_poly1305_update(struct shash_desc *desc,
+27
include/crypto/salsa20.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Common values for the Salsa20 algorithm 4 + */ 5 + 6 + #ifndef _CRYPTO_SALSA20_H 7 + #define _CRYPTO_SALSA20_H 8 + 9 + #include <linux/types.h> 10 + 11 + #define SALSA20_IV_SIZE 8 12 + #define SALSA20_MIN_KEY_SIZE 16 13 + #define SALSA20_MAX_KEY_SIZE 32 14 + #define SALSA20_BLOCK_SIZE 64 15 + 16 + struct crypto_skcipher; 17 + 18 + struct salsa20_ctx { 19 + u32 initial_state[16]; 20 + }; 21 + 22 + void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx, 23 + const u8 *iv); 24 + int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key, 25 + unsigned int keysize); 26 + 27 + #endif /* _CRYPTO_SALSA20_H */
+5 -1
include/crypto/sha3.h
··· 19 19 20 20 struct sha3_state { 21 21 u64 st[25]; 22 - unsigned int md_len; 23 22 unsigned int rsiz; 24 23 unsigned int rsizw; 25 24 26 25 unsigned int partial; 27 26 u8 buf[SHA3_224_BLOCK_SIZE]; 28 27 }; 28 + 29 + int crypto_sha3_init(struct shash_desc *desc); 30 + int crypto_sha3_update(struct shash_desc *desc, const u8 *data, 31 + unsigned int len); 32 + int crypto_sha3_final(struct shash_desc *desc, u8 *out); 29 33 30 34 #endif
+6 -5
include/crypto/skcipher.h
··· 401 401 return tfm->setkey(tfm, key, keylen); 402 402 } 403 403 404 - static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm) 405 - { 406 - return tfm->keysize; 407 - } 408 - 409 404 static inline unsigned int crypto_skcipher_default_keysize( 410 405 struct crypto_skcipher *tfm) 411 406 { ··· 437 442 { 438 443 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 439 444 445 + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 446 + return -ENOKEY; 447 + 440 448 return tfm->encrypt(req); 441 449 } 442 450 ··· 457 459 static inline int crypto_skcipher_decrypt(struct skcipher_request *req) 458 460 { 459 461 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 462 + 463 + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 464 + return -ENOKEY; 460 465 461 466 return tfm->decrypt(req); 462 467 }
+9 -1
include/linux/crypto.h
··· 107 107 #define CRYPTO_ALG_INTERNAL 0x00002000 108 108 109 109 /* 110 + * Set if the algorithm has a ->setkey() method but can be used without 111 + * calling it first, i.e. there is a default key. 112 + */ 113 + #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 114 + 115 + /* 110 116 * Transform masks and values (for crt_flags). 111 117 */ 118 + #define CRYPTO_TFM_NEED_KEY 0x00000001 119 + 112 120 #define CRYPTO_TFM_REQ_MASK 0x000fff00 113 121 #define CRYPTO_TFM_RES_MASK 0xfff00000 114 122 ··· 455 447 unsigned int cra_alignmask; 456 448 457 449 int cra_priority; 458 - atomic_t cra_refcnt; 450 + refcount_t cra_refcnt; 459 451 460 452 char cra_name[CRYPTO_MAX_ALG_NAME]; 461 453 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
+1
kernel/padata.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * padata.c - generic interface to process data streams in parallel 3 4 *
+33 -38
lib/chacha20.c
··· 16 16 #include <asm/unaligned.h> 17 17 #include <crypto/chacha20.h> 18 18 19 - static inline u32 rotl32(u32 v, u8 n) 20 - { 21 - return (v << n) | (v >> (sizeof(v) * 8 - n)); 22 - } 23 - 24 - extern void chacha20_block(u32 *state, void *stream) 19 + void chacha20_block(u32 *state, u32 *stream) 25 20 { 26 21 u32 x[16], *out = stream; 27 22 int i; ··· 25 30 x[i] = state[i]; 26 31 27 32 for (i = 0; i < 20; i += 2) { 28 - x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); 29 - x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); 30 - x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); 31 - x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); 33 + x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); 34 + x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); 35 + x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); 36 + x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16); 32 37 33 - x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); 34 - x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); 35 - x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); 36 - x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); 38 + x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12); 39 + x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12); 40 + x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12); 41 + x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12); 37 42 38 - x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); 39 - x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); 40 - x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); 41 - x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); 43 + x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8); 44 + x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8); 45 + x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8); 46 + x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8); 42 47 43 - x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); 44 - x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); 45 - x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); 46 - x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); 48 + x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7); 49 + x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7); 50 + x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7); 51 + x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7); 47 52 48 - x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); 49 - x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); 50 - x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); 51 - x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); 53 + x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16); 54 + x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16); 55 + x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16); 56 + x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16); 52 57 53 - x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); 54 - x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); 55 - x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); 56 - x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); 58 + x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12); 59 + x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12); 60 + x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12); 61 + x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12); 57 62 58 - x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); 59 - x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); 60 - x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); 61 - x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); 63 + x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8); 64 + x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8); 65 + x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8); 66 + x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8); 62 67 63 - x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); 64 - x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); 65 - x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); 66 - x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); 68 + x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7); 69 + x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7); 70 + x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); 71 + x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); 67 72 } 68 73 69 74 for (i = 0; i < ARRAY_SIZE(x); i++)